Merge git://git.kernel.org/pub/scm/linux/kernel/git/sam/kbuild

* git://git.kernel.org/pub/scm/linux/kernel/git/sam/kbuild: (33 commits)
  xtensa: use DATA_DATA in xtensa
  powerpc: add missing DATA_DATA to powerpc
  cris: use DATA_DATA in cris
  kallsyms: remove usage of memmem and _GNU_SOURCE from scripts/kallsyms.c
  kbuild: use -fno-optimize-sibling-calls unconditionally
  kconfig: reset generated values only if Kconfig and .config agree.
  kbuild: fix the warning when running make tags
  kconfig: strip 'CONFIG_' automatically in kernel configuration search
  kbuild: use POSIX BRE in headers install target
  Whitelist references from __dbe_table to .init
  modpost white list pattern adjustment
  kbuild: do section mismatch check on full vmlinux
  kbuild: whitelist references from variables named _timer to .init.text
  kbuild: remove hardcoded _logo names from modpost
  kbuild: remove hardcoded apic_es7000 from modpost
  kbuild: warn about references from .init.text to .exit.text
  kbuild: consolidate section checks
  kbuild: refactor code in modpost to improve maintainability
  kbuild: ignore section mismatch warnings originating from .note section
  kbuild: .paravirtprobe section is obsolete, so modpost doesn't need to handle it
  ...
diff --git a/Documentation/CodingStyle b/Documentation/CodingStyle
index a667eb1..7f1730f 100644
--- a/Documentation/CodingStyle
+++ b/Documentation/CodingStyle
@@ -633,12 +633,27 @@
 
 Kernel developers like to be seen as literate. Do mind the spelling
 of kernel messages to make a good impression. Do not use crippled
-words like "dont" and use "do not" or "don't" instead.
+words like "dont"; use "do not" or "don't" instead.  Make the messages
+concise, clear, and unambiguous.
 
 Kernel messages do not have to be terminated with a period.
 
 Printing numbers in parentheses (%d) adds no value and should be avoided.
 
+There are a number of driver model diagnostic macros in <linux/device.h>
+which you should use to make sure messages are matched to the right device
+and driver, and are tagged with the right level:  dev_err(), dev_warn(),
+dev_info(), and so forth.  For messages that aren't associated with a
+particular device, <linux/kernel.h> defines pr_debug() and pr_info().
+
+Coming up with good debugging messages can be quite a challenge; and once
+you have them, they can be a huge help for remote troubleshooting.  Such
+messages should be compiled out when the DEBUG symbol is not defined (that
+is, by default they are not included).  When you use dev_dbg() or pr_debug(),
+that's automatic.  Many subsystems have Kconfig options to turn on -DDEBUG.
+A related convention uses VERBOSE_DEBUG to add dev_vdbg() messages to the
+ones already enabled by DEBUG.
+
 
 		Chapter 14: Allocating memory
 
@@ -790,4 +805,5 @@
 http://www.kroah.com/linux/talks/ols_2002_kernel_codingstyle_talk/html/
 
 --
-Last updated on 2006-December-06.
+Last updated on 2007-July-13.
+
diff --git a/Documentation/DocBook/Makefile b/Documentation/DocBook/Makefile
index 6fd1646..08687e4 100644
--- a/Documentation/DocBook/Makefile
+++ b/Documentation/DocBook/Makefile
@@ -15,11 +15,11 @@
 
 ###
 # The build process is as follows (targets):
-#              (xmldocs)
-# file.tmpl --> file.xml +--> file.ps   (psdocs)
-#                        +--> file.pdf  (pdfdocs)
-#                        +--> DIR=file  (htmldocs)
-#                        +--> man/      (mandocs)
+#              (xmldocs) [by docproc]
+# file.tmpl --> file.xml +--> file.ps   (psdocs)   [by db2ps or xmlto]
+#                        +--> file.pdf  (pdfdocs)  [by db2pdf or xmlto]
+#                        +--> DIR=file  (htmldocs) [by xmlto]
+#                        +--> man/      (mandocs)  [by xmlto]
 
 
 # for PDF and PS output you can choose between xmlto and docbook-utils tools
diff --git a/Documentation/DocBook/kernel-api.tmpl b/Documentation/DocBook/kernel-api.tmpl
index 46bcff2..eb42bf9 100644
--- a/Documentation/DocBook/kernel-api.tmpl
+++ b/Documentation/DocBook/kernel-api.tmpl
@@ -139,8 +139,10 @@
 !Elib/cmdline.c
      </sect1>
 
-     <sect1><title>CRC Functions</title>
+     <sect1 id="crc"><title>CRC Functions</title>
+!Elib/crc7.c
 !Elib/crc16.c
+!Elib/crc-itu-t.c
 !Elib/crc32.c
 !Elib/crc-ccitt.c
      </sect1>
@@ -157,7 +159,6 @@
 !Earch/i386/lib/usercopy.c
      </sect1>
      <sect1><title>More Memory Management Functions</title>
-!Iinclude/linux/rmap.h
 !Emm/readahead.c
 !Emm/filemap.c
 !Emm/memory.c
@@ -406,6 +407,10 @@
 !Edrivers/pnp/manager.c
 !Edrivers/pnp/support.c
      </sect1>
+     <sect1><title>Userspace IO devices</title>
+!Edrivers/uio/uio.c
+!Iinclude/linux/uio_driver.h
+     </sect1>
   </chapter>
 
   <chapter id="blkdev">
diff --git a/Documentation/DocBook/uio-howto.tmpl b/Documentation/DocBook/uio-howto.tmpl
new file mode 100644
index 0000000..e3bb29a
--- /dev/null
+++ b/Documentation/DocBook/uio-howto.tmpl
@@ -0,0 +1,611 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE book PUBLIC "-//OASIS//DTD DocBook XML V4.2//EN"
+"http://www.oasis-open.org/docbook/xml/4.2/docbookx.dtd" []>
+
+<book id="index">
+<bookinfo>
+<title>The Userspace I/O HOWTO</title>
+
+<author>
+      <firstname>Hans-Jürgen</firstname>
+      <surname>Koch</surname>
+      <authorblurb><para>Linux developer, Linutronix</para></authorblurb>
+	<affiliation>
+	<orgname>
+		<ulink url="http://www.linutronix.de">Linutronix</ulink>
+	</orgname>
+
+	<address>
+	   <email>hjk@linutronix.de</email>
+	</address>
+    </affiliation>
+</author>
+
+<pubdate>2006-12-11</pubdate>
+
+<abstract>
+	<para>This HOWTO describes concept and usage of Linux kernel's
+		Userspace I/O system.</para>
+</abstract>
+
+<revhistory>
+	<revision>
+	<revnumber>0.3</revnumber>
+	<date>2007-04-29</date>
+	<authorinitials>hjk</authorinitials>
+	<revremark>Added section about userspace drivers.</revremark>
+	</revision>
+	<revision>
+	<revnumber>0.2</revnumber>
+	<date>2007-02-13</date>
+	<authorinitials>hjk</authorinitials>
+	<revremark>Update after multiple mappings were added.</revremark>
+	</revision>
+	<revision>
+	<revnumber>0.1</revnumber>
+	<date>2006-12-11</date>
+	<authorinitials>hjk</authorinitials>
+	<revremark>First draft.</revremark>
+	</revision>
+</revhistory>
+</bookinfo>
+
+<chapter id="aboutthisdoc">
+<?dbhtml filename="about.html"?>
+<title>About this document</title>
+
+<sect1 id="copyright">
+<?dbhtml filename="copyright.html"?>
+<title>Copyright and License</title>
+<para>
+      Copyright (c) 2006 by Hans-Jürgen Koch.</para>
+<para>
+This documentation is Free Software licensed under the terms of the
+GPL version 2.
+</para>
+</sect1>
+
+<sect1 id="translations">
+<?dbhtml filename="translations.html"?>
+<title>Translations</title>
+
+<para>If you know of any translations for this document, or you are
+interested in translating it, please email me
+<email>hjk@linutronix.de</email>.
+</para>
+</sect1>
+
+<sect1 id="preface">
+<title>Preface</title>
+	<para>
+	For many types of devices, creating a Linux kernel driver is
+	overkill.  All that is really needed is some way to handle an
+	interrupt and provide access to the memory space of the
+	device.  The logic of controlling the device does not
+	necessarily have to be within the kernel, as the device does
+	not need to take advantage of any of other resources that the
+	kernel provides.  One such common class of devices that are
+	like this are for industrial I/O cards.
+	</para>
+	<para>
+	To address this situation, the userspace I/O system (UIO) was
+	designed.  For typical industrial I/O cards, only a very small
+	kernel module is needed. The main part of the driver will run in
+	user space. This simplifies development and reduces the risk of
+	serious bugs within a kernel module.
+	</para>
+</sect1>
+
+<sect1 id="thanks">
+<title>Acknowledgments</title>
+	<para>I'd like to thank Thomas Gleixner and Benedikt Spranger of
+	Linutronix, who have not only written most of the UIO code, but also
+	helped greatly writing this HOWTO by giving me all kinds of background
+	information.</para>
+</sect1>
+
+<sect1 id="feedback">
+<title>Feedback</title>
+	<para>Find something wrong with this document? (Or perhaps something
+	right?) I would love to hear from you. Please email me at
+	<email>hjk@linutronix.de</email>.</para>
+</sect1>
+</chapter>
+
+<chapter id="about">
+<?dbhtml filename="about.html"?>
+<title>About UIO</title>
+
+<para>If you use UIO for your card's driver, here's what you get:</para>
+
+<itemizedlist>
+<listitem>
+	<para>only one small kernel module to write and maintain.</para>
+</listitem>
+<listitem>
+	<para>develop the main part of your driver in user space,
+	with all the tools and libraries you're used to.</para>
+</listitem>
+<listitem>
+	<para>bugs in your driver won't crash the kernel.</para>
+</listitem>
+<listitem>
+	<para>updates of your driver can take place without recompiling
+	the kernel.</para>
+</listitem>
+<listitem>
+	<para>if you need to keep some parts of your driver closed source,
+	you can do so without violating the GPL license on the kernel.</para>
+</listitem>
+</itemizedlist>
+
+<sect1 id="how_uio_works">
+<title>How UIO works</title>
+	<para>
+	Each UIO device is accessed through a device file and several
+	sysfs attribute files. The device file will be called
+	<filename>/dev/uio0</filename> for the first device, and
+	<filename>/dev/uio1</filename>, <filename>/dev/uio2</filename>
+	and so on for subsequent devices.
+	</para>
+
+	<para><filename>/dev/uioX</filename> is used to access the
+	address space of the card. Just use
+	<function>mmap()</function> to access registers or RAM
+	locations of your card.
+	</para>
+
+	<para>
+	Interrupts are handled by reading from
+	<filename>/dev/uioX</filename>. A blocking
+	<function>read()</function> from
+	<filename>/dev/uioX</filename> will return as soon as an
+	interrupt occurs. You can also use
+	<function>select()</function> on
+	<filename>/dev/uioX</filename> to wait for an interrupt. The
+	integer value read from <filename>/dev/uioX</filename>
+	represents the total interrupt count. You can use this number
+	to figure out if you missed some interrupts.
+	</para>
+
+	<para>
+	To handle interrupts properly, your custom kernel module can
+	provide its own interrupt handler. It will automatically be
+	called by the built-in handler.
+	</para>
+
+	<para>
+	For cards that don't generate interrupts but need to be
+	polled, there is the possibility to set up a timer that
+	triggers the interrupt handler at configurable time intervals.
+	See <filename>drivers/uio/uio_dummy.c</filename> for an
+	example of this technique.
+	</para>
+
+	<para>
+	Each driver provides attributes that are used to read or write
+	variables. These attributes are accessible through sysfs
+	files.  A custom kernel driver module can add its own
+	attributes to the device owned by the uio driver, but not added
+	to the UIO device itself at this time.  This might change in the
+	future if it would be found to be useful.
+	</para>
+
+	<para>
+	The following standard attributes are provided by the UIO
+	framework:
+	</para>
+<itemizedlist>
+<listitem>
+	<para>
+	<filename>name</filename>: The name of your device. It is
+	recommended to use the name of your kernel module for this.
+	</para>
+</listitem>
+<listitem>
+	<para>
+	<filename>version</filename>: A version string defined by your
+	driver. This allows the user space part of your driver to deal
+	with different versions of the kernel module.
+	</para>
+</listitem>
+<listitem>
+	<para>
+	<filename>event</filename>: The total number of interrupts
+	handled by the driver since the last time the device node was
+	read.
+	</para>
+</listitem>
+</itemizedlist>
+<para>
+	These attributes appear under the
+	<filename>/sys/class/uio/uioX</filename> directory.  Please
+	note that this directory might be a symlink, and not a real
+	directory.  Any userspace code that accesses it must be able
+	to handle this.
+</para>
+<para>
+	Each UIO device can make one or more memory regions available for
+	memory mapping. This is necessary because some industrial I/O cards
+	require access to more than one PCI memory region in a driver.
+</para>
+<para>
+	Each mapping has its own directory in sysfs, the first mapping
+	appears as <filename>/sys/class/uio/uioX/maps/map0/</filename>.
+	Subsequent mappings create directories <filename>map1/</filename>,
+	<filename>map2/</filename>, and so on. These directories will only
+	appear if the size of the mapping is not 0.
+</para>
+<para>
+	Each <filename>mapX/</filename> directory contains two read-only files
+	that show start address and size of the memory:
+</para>
+<itemizedlist>
+<listitem>
+	<para>
+	<filename>addr</filename>: The address of memory that can be mapped.
+	</para>
+</listitem>
+<listitem>
+	<para>
+	<filename>size</filename>: The size, in bytes, of the memory
+	pointed to by addr.
+	</para>
+</listitem>
+</itemizedlist>
+
+<para>
+	From userspace, the different mappings are distinguished by adjusting
+	the <varname>offset</varname> parameter of the
+	<function>mmap()</function> call. To map the memory of mapping N, you
+	have to use N times the page size as your offset:
+</para>
+<programlisting format="linespecific">
+offset = N * getpagesize();
+</programlisting>
+
+</sect1>
+</chapter>
+
+<chapter id="using-uio_dummy" xreflabel="Using uio_dummy">
+<?dbhtml filename="using-uio_dummy.html"?>
+<title>Using uio_dummy</title>
+	<para>
+	Well, there is no real use for uio_dummy. Its only purpose is
+	to test most parts of the UIO system (everything except
+	hardware interrupts), and to serve as an example for the
+	kernel module that you will have to write yourself.
+	</para>
+
+<sect1 id="what_uio_dummy_does">
+<title>What uio_dummy does</title>
+	<para>
+	The kernel module <filename>uio_dummy.ko</filename> creates a
+	device that uses a timer to generate periodic interrupts. The
+	interrupt handler does nothing but increment a counter. The
+	driver adds two custom attributes, <varname>count</varname>
+	and <varname>freq</varname>, that appear under
+	<filename>/sys/devices/platform/uio_dummy/</filename>.
+	</para>
+
+	<para>
+	The attribute <varname>count</varname> can be read and
+	written.  The associated file
+	<filename>/sys/devices/platform/uio_dummy/count</filename>
+	appears as a normal text file and contains the total number of
+	timer interrupts. If you look at it (e.g. using
+	<function>cat</function>), you'll notice it is slowly counting
+	up.
+	</para>
+
+	<para>
+	The attribute <varname>freq</varname> can be read and written.
+	The content of
+	<filename>/sys/devices/platform/uio_dummy/freq</filename>
+	represents the number of system timer ticks between two timer
+	interrupts. The default value of <varname>freq</varname> is
+	the value of the kernel variable <varname>HZ</varname>, which
+	gives you an interval of one second. Lower values will
+	increase the frequency. Try the following:
+	</para>
+<programlisting format="linespecific">
+cd /sys/devices/platform/uio_dummy/
+echo 100 > freq
+</programlisting>
+	<para>
+	Use <function>cat count</function> to see how the interrupt
+	frequency changes.
+	</para>
+</sect1>
+</chapter>
+
+<chapter id="custom_kernel_module" xreflabel="Writing your own kernel module">
+<?dbhtml filename="custom_kernel_module.html"?>
+<title>Writing your own kernel module</title>
+	<para>
+	Please have a look at <filename>uio_dummy.c</filename> as an
+	example. The following paragraphs explain the different
+	sections of this file.
+	</para>
+
+<sect1 id="uio_info">
+<title>struct uio_info</title>
+	<para>
+	This structure tells the framework the details of your driver,
+	Some of the members are required, others are optional.
+	</para>
+
+<itemizedlist>
+<listitem><para>
+<varname>char *name</varname>: Required. The name of your driver as
+it will appear in sysfs. I recommend using the name of your module for this.
+</para></listitem>
+
+<listitem><para>
+<varname>char *version</varname>: Required. This string appears in
+<filename>/sys/class/uio/uioX/version</filename>.
+</para></listitem>
+
+<listitem><para>
+<varname>struct uio_mem mem[ MAX_UIO_MAPS ]</varname>: Required if you
+have memory that can be mapped with <function>mmap()</function>. For each
+mapping you need to fill one of the <varname>uio_mem</varname> structures.
+See the description below for details.
+</para></listitem>
+
+<listitem><para>
+<varname>long irq</varname>: Required. If your hardware generates an
+interrupt, it's your modules task to determine the irq number during
+initialization. If you don't have a hardware generated interrupt but
+want to trigger the interrupt handler in some other way, set
+<varname>irq</varname> to <varname>UIO_IRQ_CUSTOM</varname>. The
+uio_dummy module does this as it triggers the event mechanism in a timer
+routine. If you had no interrupt at all, you could set
+<varname>irq</varname> to <varname>UIO_IRQ_NONE</varname>, though this
+rarely makes sense.
+</para></listitem>
+
+<listitem><para>
+<varname>unsigned long irq_flags</varname>: Required if you've set
+<varname>irq</varname> to a hardware interrupt number. The flags given
+here will be used in the call to <function>request_irq()</function>.
+</para></listitem>
+
+<listitem><para>
+<varname>int (*mmap)(struct uio_info *info, struct vm_area_struct
+*vma)</varname>: Optional. If you need a special
+<function>mmap()</function> function, you can set it here. If this
+pointer is not NULL, your <function>mmap()</function> will be called
+instead of the built-in one.
+</para></listitem>
+
+<listitem><para>
+<varname>int (*open)(struct uio_info *info, struct inode *inode)
+</varname>: Optional. You might want to have your own
+<function>open()</function>, e.g. to enable interrupts only when your
+device is actually used.
+</para></listitem>
+
+<listitem><para>
+<varname>int (*release)(struct uio_info *info, struct inode *inode)
+</varname>: Optional. If you define your own
+<function>open()</function>, you will probably also want a custom
+<function>release()</function> function.
+</para></listitem>
+</itemizedlist>
+
+<para>
+Usually, your device will have one or more memory regions that can be mapped
+to user space. For each region, you have to set up a
+<varname>struct uio_mem</varname> in the <varname>mem[]</varname> array.
+Here's a description of the fields of <varname>struct uio_mem</varname>:
+</para>
+
+<itemizedlist>
+<listitem><para>
+<varname>int memtype</varname>: Required if the mapping is used. Set this to
+<varname>UIO_MEM_PHYS</varname> if you you have physical memory on your
+card to be mapped. Use <varname>UIO_MEM_LOGICAL</varname> for logical
+memory (e.g. allocated with <function>kmalloc()</function>). There's also
+<varname>UIO_MEM_VIRTUAL</varname> for virtual memory.
+</para></listitem>
+
+<listitem><para>
+<varname>unsigned long addr</varname>: Required if the mapping is used.
+Fill in the address of your memory block. This address is the one that
+appears in sysfs.
+</para></listitem>
+
+<listitem><para>
+<varname>unsigned long size</varname>: Fill in the size of the
+memory block that <varname>addr</varname> points to. If <varname>size</varname>
+is zero, the mapping is considered unused. Note that you
+<emphasis>must</emphasis> initialize <varname>size</varname> with zero for
+all unused mappings.
+</para></listitem>
+
+<listitem><para>
+<varname>void *internal_addr</varname>: If you have to access this memory
+region from within your kernel module, you will want to map it internally by
+using something like <function>ioremap()</function>. Addresses
+returned by this function cannot be mapped to user space, so you must not
+store it in <varname>addr</varname>. Use <varname>internal_addr</varname>
+instead to remember such an address.
+</para></listitem>
+</itemizedlist>
+
+<para>
+Please do not touch the <varname>kobj</varname> element of
+<varname>struct uio_mem</varname>! It is used by the UIO framework
+to set up sysfs files for this mapping. Simply leave it alone.
+</para>
+</sect1>
+
+<sect1 id="adding_irq_handler">
+<title>Adding an interrupt handler</title>
+	<para>
+	What you need to do in your interrupt handler depends on your
+	hardware and on how you want to	handle it. You should try to
+	keep the amount of code in your kernel interrupt handler low.
+	If your hardware requires no action that you
+	<emphasis>have</emphasis> to perform after each interrupt,
+	then your handler can be empty.</para> <para>If, on the other
+	hand, your hardware <emphasis>needs</emphasis> some action to
+	be performed after each interrupt, then you
+	<emphasis>must</emphasis> do it in your kernel module. Note
+	that you cannot rely on the userspace part of your driver. Your
+	userspace program can terminate at any time, possibly leaving
+	your hardware in a state where proper interrupt handling is
+	still required.
+	</para>
+
+	<para>
+	There might also be applications where you want to read data
+	from your hardware at each interrupt and buffer it in a piece
+	of kernel memory you've allocated for that purpose.  With this
+	technique you could avoid loss of data if your userspace
+	program misses an interrupt.
+	</para>
+
+	<para>
+	A note on shared interrupts: Your driver should support
+	interrupt sharing whenever this is possible. It is possible if
+	and only if your driver can detect whether your hardware has
+	triggered the interrupt or not. This is usually done by looking
+	at an interrupt status register. If your driver sees that the
+	IRQ bit is actually set, it will perform its actions, and the
+	handler returns IRQ_HANDLED. If the driver detects that it was
+	not your hardware that caused the interrupt, it will do nothing
+	and return IRQ_NONE, allowing the kernel to call the next
+	possible interrupt handler.
+	</para>
+
+	<para>
+	If you decide not to support shared interrupts, your card
+	won't work in computers with no free interrupts. As this
+	frequently happens on the PC platform, you can save yourself a
+	lot of trouble by supporting interrupt sharing.
+	</para>
+</sect1>
+
+</chapter>
+
+<chapter id="userspace_driver" xreflabel="Writing a driver in user space">
+<?dbhtml filename="userspace_driver.html"?>
+<title>Writing a driver in userspace</title>
+	<para>
+	Once you have a working kernel module for your hardware, you can
+	write the userspace part of your driver. You don't need any special
+	libraries, your driver can be written in any reasonable language,
+	you can use floating point numbers and so on. In short, you can
+	use all the tools and libraries you'd normally use for writing a
+	userspace application.
+	</para>
+
+<sect1 id="getting_uio_information">
+<title>Getting information about your UIO device</title>
+	<para>
+	Information about all UIO devices is available in sysfs. The
+	first thing you should do in your driver is check
+	<varname>name</varname> and <varname>version</varname> to
+	make sure your talking to the right device and that its kernel
+	driver has the version you expect.
+	</para>
+	<para>
+	You should also make sure that the memory mapping you need
+	exists and has the size you expect.
+	</para>
+	<para>
+	There is a tool called <varname>lsuio</varname> that lists
+	UIO devices and their attributes. It is available here:
+	</para>
+	<para>
+	<ulink url="http://www.osadl.org/projects/downloads/UIO/user/">
+		http://www.osadl.org/projects/downloads/UIO/user/</ulink>
+	</para>
+	<para>
+	With <varname>lsuio</varname> you can quickly check if your
+	kernel module is loaded and which attributes it exports.
+	Have a look at the manpage for details.
+	</para>
+	<para>
+	The source code of <varname>lsuio</varname> can serve as an
+	example for getting information about an UIO device.
+	The file <filename>uio_helper.c</filename> contains a lot of
+	functions you could use in your userspace driver code.
+	</para>
+</sect1>
+
+<sect1 id="mmap_device_memory">
+<title>mmap() device memory</title>
+	<para>
+	After you made sure you've got the right device with the
+	memory mappings you need, all you have to do is to call
+	<function>mmap()</function> to map the device's memory
+	to userspace.
+	</para>
+	<para>
+	The parameter <varname>offset</varname> of the
+	<function>mmap()</function> call has a special meaning
+	for UIO devices: It is used to select which mapping of
+	your device you want to map. To map the memory of
+	mapping N, you have to use N times the page size as
+	your offset:
+	</para>
+<programlisting format="linespecific">
+	offset = N * getpagesize();
+</programlisting>
+	<para>
+	N starts from zero, so if you've got only one memory
+	range to map, set <varname>offset = 0</varname>.
+	A drawback of this technique is that memory is always
+	mapped beginning with its start address.
+	</para>
+</sect1>
+
+<sect1 id="wait_for_interrupts">
+<title>Waiting for interrupts</title>
+	<para>
+	After you successfully mapped your devices memory, you
+	can access it like an ordinary array. Usually, you will
+	perform some initialization. After that, your hardware
+	starts working and will generate an interrupt as soon
+	as it's finished, has some data available, or needs your
+	attention because an error occured.
+	</para>
+	<para>
+	<filename>/dev/uioX</filename> is a read-only file. A
+	<function>read()</function> will always block until an
+	interrupt occurs. There is only one legal value for the
+	<varname>count</varname> parameter of
+	<function>read()</function>, and that is the size of a
+	signed 32 bit integer (4). Any other value for
+	<varname>count</varname> causes <function>read()</function>
+	to fail. The signed 32 bit integer read is the interrupt
+	count of your device. If the value is one more than the value
+	you read the last time, everything is OK. If the difference
+	is greater than one, you missed interrupts.
+	</para>
+	<para>
+	You can also use <function>select()</function> on
+	<filename>/dev/uioX</filename>.
+	</para>
+</sect1>
+
+</chapter>
+
+<appendix id="app1">
+<title>Further information</title>
+<itemizedlist>
+	<listitem><para>
+			<ulink url="http://www.osadl.org">
+				OSADL homepage.</ulink>
+		</para></listitem>
+	<listitem><para>
+		<ulink url="http://www.linutronix.de">
+		 Linutronix homepage.</ulink>
+		</para></listitem>
+</itemizedlist>
+</appendix>
+
+</book>
diff --git a/Documentation/HOWTO b/Documentation/HOWTO
index 98e2701..f8cc3f8 100644
--- a/Documentation/HOWTO
+++ b/Documentation/HOWTO
@@ -249,6 +249,9 @@
     release a new -rc kernel every week.
   - Process continues until the kernel is considered "ready", the
     process should last around 6 weeks.
+  - A list of known regressions present in each -rc release is
+    tracked at the following URI:
+    http://kernelnewbies.org/known_regressions
 
 It is worth mentioning what Andrew Morton wrote on the linux-kernel
 mailing list about kernel releases:
diff --git a/Documentation/cachetlb.txt b/Documentation/cachetlb.txt
index debf681..866b761 100644
--- a/Documentation/cachetlb.txt
+++ b/Documentation/cachetlb.txt
@@ -253,7 +253,7 @@
 
 	The first of these two routines is invoked after map_vm_area()
 	has installed the page table entries.  The second is invoked
-	before unmap_vm_area() deletes the page table entries.
+	before unmap_kernel_range() deletes the page table entries.
 
 There exists another whole class of cpu cache issues which currently
 require a whole different set of interfaces to handle properly.
diff --git a/Documentation/connector/cn_test.c b/Documentation/connector/cn_test.c
index 3e73231..be7af14 100644
--- a/Documentation/connector/cn_test.c
+++ b/Documentation/connector/cn_test.c
@@ -124,9 +124,8 @@
 	struct cn_msg *m;
 	char data[32];
 
-	m = kmalloc(sizeof(*m) + sizeof(data), GFP_ATOMIC);
+	m = kzalloc(sizeof(*m) + sizeof(data), GFP_ATOMIC);
 	if (m) {
-		memset(m, 0, sizeof(*m) + sizeof(data));
 
 		memcpy(&m->id, &cn_test_id, sizeof(m->id));
 		m->seq = cn_test_timer_counter;
diff --git a/Documentation/console/console.txt b/Documentation/console/console.txt
index d3e1744..877a1b2 100644
--- a/Documentation/console/console.txt
+++ b/Documentation/console/console.txt
@@ -29,7 +29,7 @@
 
 If sysfs is enabled, the contents of /sys/class/vtconsole can be
 examined. This shows the console backends currently registered by the
-system which are named vtcon<n> where <n> is an integer fro 0 to 15. Thus:
+system which are named vtcon<n> where <n> is an integer from 0 to 15. Thus:
 
        ls /sys/class/vtconsole
        .  ..  vtcon0  vtcon1
diff --git a/Documentation/driver-model/devres.txt b/Documentation/driver-model/devres.txt
index 6c8d8f2..8569072 100644
--- a/Documentation/driver-model/devres.txt
+++ b/Documentation/driver-model/devres.txt
@@ -207,7 +207,7 @@
 resource allocations already do the job.
 
 For an example of single-instance devres type, read pcim_iomap_table()
-in lib/iomap.c.
+in lib/devres.c.
 
 All devres interface functions can be called without context if the
 right gfp mask is given.
diff --git a/Documentation/drivers/edac/edac.txt b/Documentation/drivers/edac/edac.txt
index 3c5a9e4..a5c36842e 100644
--- a/Documentation/drivers/edac/edac.txt
+++ b/Documentation/drivers/edac/edac.txt
@@ -2,22 +2,42 @@
 
 EDAC - Error Detection And Correction
 
-Written by Doug Thompson <norsk5@xmission.com>
+Written by Doug Thompson <dougthompson@xmission.com>
 7 Dec 2005
+17 Jul 2007	Updated
 
 
-EDAC was written by:
-	Thayne Harbaugh,
-	modified by Dave Peterson, Doug Thompson, et al,
-	from the bluesmoke.sourceforge.net project.
+EDAC is maintained and written by:
 
+	Doug Thompson, Dave Jiang, Dave Peterson et al,
+	original author: Thayne Harbaugh,
+
+Contact:
+	website:	bluesmoke.sourceforge.net
+	mailing list:	bluesmoke-devel@lists.sourceforge.net
+
+"bluesmoke" was the name for this device driver when it was "out-of-tree"
+and maintained at sourceforge.net.  When it was pushed into 2.6.16 for the
+first time, it was renamed to 'EDAC'.
+
+The bluesmoke project at sourceforge.net is now utilized as a 'staging area'
+for EDAC development, before it is sent upstream to kernel.org
+
+At the bluesmoke/EDAC project site, is a series of quilt patches against
+recent kernels, stored in a SVN respository. For easier downloading, there
+is also a tarball snapshot available.
 
 ============================================================================
 EDAC PURPOSE
 
 The 'edac' kernel module goal is to detect and report errors that occur
-within the computer system. In the initial release, memory Correctable Errors
-(CE) and Uncorrectable Errors (UE) are the primary errors being harvested.
+within the computer system running under linux.
+
+MEMORY
+
+In the initial release, memory Correctable Errors (CE) and Uncorrectable
+Errors (UE) are the primary errors being harvested. These types of errors
+are harvested by the 'edac_mc' class of device.
 
 Detecting CE events, then harvesting those events and reporting them,
 CAN be a predictor of future UE events.  With CE events, the system can
@@ -25,9 +45,27 @@
 proactive part replacement of memory DIMMs exhibiting CEs can reduce
 the likelihood of the dreaded UE events and system 'panics'.
 
+NON-MEMORY
+
+A new feature for EDAC, the edac_device class of device, was added in
+the 2.6.23 version of the kernel.
+
+This new device type allows for non-memory type of ECC hardware detectors
+to have their states harvested and presented to userspace via the sysfs
+interface.
+
+Some architectures have ECC detectors for L1, L2 and L3 caches, along with DMA
+engines, fabric switches, main data path switches, interconnections,
+and various other hardware data paths. If the hardware reports it, then
+a edac_device device probably can be constructed to harvest and present
+that to userspace.
+
+
+PCI BUS SCANNING
 
 In addition, PCI Bus Parity and SERR Errors are scanned for on PCI devices
 in order to determine if errors are occurring on data transfers.
+
 The presence of PCI Parity errors must be examined with a grain of salt.
 There are several add-in adapters that do NOT follow the PCI specification
 with regards to Parity generation and reporting. The specification says
@@ -35,11 +73,17 @@
 to generate parity.  Some vendors do not do this, and thus the parity bit
 can "float" giving false positives.
 
-[There are patches in the kernel queue which will allow for storage of
-quirks of PCI devices reporting false parity positives. The 2.6.18
-kernel should have those patches included. When that becomes available,
-then EDAC will be patched to utilize that information to "skip" such
-devices.]
+In the kernel there is a pci device attribute located in sysfs that is
+checked by the EDAC PCI scanning code. If that attribute is set,
+PCI parity/error scannining is skipped for that device. The attribute
+is:
+
+	broken_parity_status
+
+as is located in /sys/devices/pci<XXX>/0000:XX:YY.Z directorys for
+PCI devices.
+
+FUTURE HARDWARE SCANNING
 
 EDAC will have future error detectors that will be integrated with
 EDAC or added to it, in the following list:
@@ -57,13 +101,14 @@
 ============================================================================
 EDAC VERSIONING
 
-EDAC is composed of a "core" module (edac_mc.ko) and several Memory
+EDAC is composed of a "core" module (edac_core.ko) and several Memory
 Controller (MC) driver modules. On a given system, the CORE
 is loaded and one MC driver will be loaded. Both the CORE and
-the MC driver have individual versions that reflect current release
-level of their respective modules.  Thus, to "report" on what version
-a system is running, one must report both the CORE's and the
-MC driver's versions.
+the MC driver (or edac_device driver) have individual versions that reflect
+current release level of their respective modules.
+
+Thus, to "report" on what version a system is running, one must report both
+the CORE's and the MC driver's versions.
 
 
 LOADING
@@ -88,8 +133,9 @@
 EDAC presents a 'sysfs' interface for control, reporting and attribute
 reporting purposes.
 
-EDAC lives in the /sys/devices/system/edac directory. Within this directory
-there currently reside 2 'edac' components:
+EDAC lives in the /sys/devices/system/edac directory.
+
+Within this directory there currently reside 2 'edac' components:
 
 	mc	memory controller(s) system
 	pci	PCI control and status system
@@ -188,7 +234,7 @@
 
 Panic on UE control file:
 
-	'panic_on_ue'
+	'edac_mc_panic_on_ue'
 
 	An uncorrectable error will cause a machine panic.  This is usually
 	desirable.  It is a bad idea to continue when an uncorrectable error
@@ -199,12 +245,12 @@
 
 	LOAD TIME: module/kernel parameter: panic_on_ue=[0|1]
 
-	RUN TIME:  echo "1" >/sys/devices/system/edac/mc/panic_on_ue
+	RUN TIME:  echo "1" >/sys/devices/system/edac/mc/edac_mc_panic_on_ue
 
 
 Log UE control file:
 
-	'log_ue'
+	'edac_mc_log_ue'
 
 	Generate kernel messages describing uncorrectable errors.  These errors
 	are reported through the system message log system.  UE statistics
@@ -212,12 +258,12 @@
 
 	LOAD TIME: module/kernel parameter: log_ue=[0|1]
 
-	RUN TIME: echo "1" >/sys/devices/system/edac/mc/log_ue
+	RUN TIME: echo "1" >/sys/devices/system/edac/mc/edac_mc_log_ue
 
 
 Log CE control file:
 
-	'log_ce'
+	'edac_mc_log_ce'
 
 	Generate kernel messages describing correctable errors.  These
 	errors are reported through the system message log system.
@@ -225,12 +271,12 @@
 
 	LOAD TIME: module/kernel parameter: log_ce=[0|1]
 
-	RUN TIME: echo "1" >/sys/devices/system/edac/mc/log_ce
+	RUN TIME: echo "1" >/sys/devices/system/edac/mc/edac_mc_log_ce
 
 
 Polling period control file:
 
-	'poll_msec'
+	'edac_mc_poll_msec'
 
 	The time period, in milliseconds, for polling for error information.
 	Too small a value wastes resources.  Too large a value might delay
@@ -241,7 +287,7 @@
 
 	LOAD TIME: module/kernel parameter: poll_msec=[0|1]
 
-	RUN TIME: echo "1000" >/sys/devices/system/edac/mc/poll_msec
+	RUN TIME: echo "1000" >/sys/devices/system/edac/mc/edac_mc_poll_msec
 
 
 ============================================================================
@@ -587,3 +633,95 @@
 
 
 =======================================================================
+
+
+EDAC_DEVICE type of device
+
+In the header file, edac_core.h, there is a series of edac_device structures
+and APIs for the EDAC_DEVICE.
+
+User space access to an edac_device is through the sysfs interface.
+
+At the location /sys/devices/system/edac (sysfs) new edac_device devices will
+appear.
+
+There is a three level tree beneath the above 'edac' directory. For example,
+the 'test_device_edac' device (found at the bluesmoke.sourceforget.net website)
+installs itself as:
+
+	/sys/devices/systm/edac/test-instance
+
+in this directory are various controls, a symlink and one or more 'instance'
+directorys.
+
+The standard default controls are:
+
+	log_ce		boolean to log CE events
+	log_ue		boolean to log UE events
+	panic_on_ue	boolean to 'panic' the system if an UE is encountered
+			(default off, can be set true via startup script)
+	poll_msec	time period between POLL cycles for events
+
+The test_device_edac device adds at least one of its own custom control:
+
+	test_bits	which in the current test driver does nothing but
+			show how it is installed. A ported driver can
+			add one or more such controls and/or attributes
+			for specific uses.
+			One out-of-tree driver uses controls here to allow
+			for ERROR INJECTION operations to hardware
+			injection registers
+
+The symlink points to the 'struct dev' that is registered for this edac_device.
+
+INSTANCES
+
+One or more instance directories are present. For the 'test_device_edac' case:
+
+	test-instance0
+
+
+In this directory there are two default counter attributes, which are totals of
+counter in deeper subdirectories.
+
+	ce_count	total of CE events of subdirectories
+	ue_count	total of UE events of subdirectories
+
+BLOCKS
+
+At the lowest directory level is the 'block' directory. There can be 0, 1
+or more blocks specified in each instance.
+
+	test-block0
+
+
+In this directory the default attributes are:
+
+	ce_count	which is counter of CE events for this 'block'
+			of hardware being monitored
+	ue_count	which is counter of UE events for this 'block'
+			of hardware being monitored
+
+
+The 'test_device_edac' device adds 4 attributes and 1 control:
+
+	test-block-bits-0	for every POLL cycle this counter
+				is incremented
+	test-block-bits-1	every 10 cycles, this counter is bumped once,
+				and test-block-bits-0 is set to 0
+	test-block-bits-2	every 100 cycles, this counter is bumped once,
+				and test-block-bits-1 is set to 0
+	test-block-bits-3	every 1000 cycles, this counter is bumped once,
+				and test-block-bits-2 is set to 0
+
+
+	reset-counters		writing ANY thing to this control will
+				reset all the above counters.
+
+
+Use of the 'test_device_edac' driver should any others to create their own
+unique drivers for their hardware systems.
+
+The 'test_device_edac' sample driver is located at the
+bluesmoke.sourceforge.net project site for EDAC.
+
diff --git a/Documentation/dvb/bt8xx.txt b/Documentation/dvb/bt8xx.txt
index 4e7614e..ecb47ad 100644
--- a/Documentation/dvb/bt8xx.txt
+++ b/Documentation/dvb/bt8xx.txt
@@ -9,19 +9,29 @@
 Please see Documentation/dvb/cards.txt => o Cards based on the Conexant Bt8xx PCI bridge:
 
 Compiling kernel please enable:
-a.)"Device drivers" => "Multimedia devices" => "Video For Linux" => "BT848 Video For Linux"
-b.)"Device drivers" => "Multimedia devices" => "Digital Video Broadcasting Devices"
- => "DVB for Linux" "DVB Core Support" "Bt8xx based PCI Cards"
+a.)"Device drivers" => "Multimedia devices" => "Video For Linux" => "Enable Video for Linux API 1 (DEPRECATED)"
+b.)"Device drivers" => "Multimedia devices" => "Video For Linux" => "Video Capture Adapters" => "BT848 Video For Linux"
+c.)"Device drivers" => "Multimedia devices" => "Digital Video Broadcasting Devices" => "DVB for Linux" "DVB Core Support" "Bt8xx based PCI Cards"
+
+Please use the following options with care as deselection of drivers which are in fact necessary
+may result in DVB devices that cannot be tuned due to lack of driver support:
+You can save RAM by deselecting every frontend module that your DVB card does not need.
+
+First please remove the static dependency of DVB card drivers on all frontend modules for all possible card variants by enabling:
+d.) "Device drivers" => "Multimedia devices" => "Digital Video Broadcasting Devices"
+ => "DVB for Linux" "DVB Core Support" "Load and attach frontend modules as needed"
+
+If you know the frontend driver that your card needs please enable:
+e.)"Device drivers" => "Multimedia devices" => "Digital Video Broadcasting Devices"
+ => "DVB for Linux" "DVB Core Support" "Customise DVB Frontends" => "Customise the frontend modules to build"
+ Then please select your card-specific frontend module.
 
 2) Loading Modules
 ==================
 
-In default cases bttv is loaded automatically.
-To load the backend either place dvb-bt8xx in etc/modules, or apply manually:
-
-	$ modprobe dvb-bt8xx
-
-All frontends will be loaded automatically.
+Regular case: If the bttv driver detects a bt8xx-based DVB card, all frontend and backend modules will be loaded automatically.
+Exceptions are:
+- Old TwinHan DST cards or clones with or without CA slot and not containing an Eeprom.
 People running udev please see Documentation/dvb/udev.txt.
 
 In the following cases overriding the PCI type detection for dvb-bt8xx might be necessary:
@@ -30,7 +40,6 @@
 ------------------------------
 
 	$ modprobe bttv card=113
-	$ modprobe dvb-bt8xx
 	$ modprobe dst
 
 Useful parameters for verbosity level and debugging the dst module:
@@ -65,10 +74,9 @@
 Notice: The order of the card ID should be uprising:
 Example:
 	$ modprobe bttv card=113 card=135
-	$ modprobe dvb-bt8xx
 
 For a full list of card ID's please see Documentation/video4linux/CARDLIST.bttv.
-In case of further problems send questions to the mailing list: www.linuxdvb.org.
+In case of further problems please subscribe and send questions to the mailing list: linux-dvb@linuxtv.org.
 
 Authors: Richard Walker,
 	 Jamie Honan,
diff --git a/Documentation/dvb/get_dvb_firmware b/Documentation/dvb/get_dvb_firmware
index 4820366..b4d306a 100644
--- a/Documentation/dvb/get_dvb_firmware
+++ b/Documentation/dvb/get_dvb_firmware
@@ -24,7 +24,8 @@
 @components = ( "sp8870", "sp887x", "tda10045", "tda10046",
 		"tda10046lifeview", "av7110", "dec2000t", "dec2540t",
 		"dec3000s", "vp7041", "dibusb", "nxt2002", "nxt2004",
-		"or51211", "or51132_qam", "or51132_vsb", "bluebird");
+		"or51211", "or51132_qam", "or51132_vsb", "bluebird",
+		"opera1");
 
 # Check args
 syntax() if (scalar(@ARGV) != 1);
@@ -56,7 +57,7 @@
 
 sub sp8870 {
     my $sourcefile = "tt_Premium_217g.zip";
-    my $url = "http://www.technotrend.de/new/217g/$sourcefile";
+    my $url = "http://www.softwarepatch.pl/9999ccd06a4813cb827dbb0005071c71/$sourcefile";
     my $hash = "53970ec17a538945a6d8cb608a7b3899";
     my $outfile = "dvb-fe-sp8870.fw";
     my $tmpdir = tempdir(DIR => "/tmp", CLEANUP => 1);
@@ -210,6 +211,45 @@
 
     $outfile;
 }
+sub opera1{
+	my $tmpdir = tempdir(DIR => "/tmp", CLEANUP => 0);
+
+	checkstandard();
+	my $fwfile1="dvb-usb-opera1-fpga-01.fw";
+	my $fwfile2="dvb-usb-opera-01.fw";
+	extract("2830SCap2.sys", 0x62e8, 55024, "$tmpdir/opera1-fpga.fw");
+	extract("2830SLoad2.sys",0x3178,0x3685-0x3178,"$tmpdir/fw1part1");
+	extract("2830SLoad2.sys",0x0980,0x3150-0x0980,"$tmpdir/fw1part2");
+	delzero("$tmpdir/fw1part1","$tmpdir/fw1part1-1");
+	delzero("$tmpdir/fw1part2","$tmpdir/fw1part2-1");
+	verify("$tmpdir/fw1part1-1","5e0909858fdf0b5b09ad48b9fe622e70");
+	verify("$tmpdir/fw1part2-1","d6e146f321427e931df2c6fcadac37a1");
+	verify("$tmpdir/opera1-fpga.fw","0f8133f5e9051f5f3c1928f7e5a1b07d");
+
+	my $RES1="\x01\x92\x7f\x00\x01\x00";
+	my $RES0="\x01\x92\x7f\x00\x00\x00";
+	my $DAT1="\x01\x00\xe6\x00\x01\x00";
+	my $DAT0="\x01\x00\xe6\x00\x00\x00";
+	open FW,">$tmpdir/opera.fw";
+	print FW "$RES1";
+	print FW "$DAT1";
+	print FW "$RES1";
+	print FW "$DAT1";
+	appendfile(FW,"$tmpdir/fw1part1-1");
+	print FW "$RES0";
+	print FW "$DAT0";
+	print FW "$RES1";
+	print FW "$DAT1";
+	appendfile(FW,"$tmpdir/fw1part2-1");
+	print FW "$RES1";
+	print FW "$DAT1";
+	print FW "$RES0";
+	print FW "$DAT0";
+	copy ("$tmpdir/opera1-fpga.fw",$fwfile1);
+	copy ("$tmpdir/opera.fw",$fwfile2);
+
+	$fwfile1.",".$fwfile2;
+}
 
 sub vp7041 {
     my $sourcefile = "2.422.zip";
@@ -440,6 +480,25 @@
     close(INFILE);
 }
 
+sub delzero{
+	my ($infile,$outfile) =@_;
+
+	open INFILE,"<$infile";
+	open OUTFILE,">$outfile";
+	while (1){
+		$rcount=sysread(INFILE,$buf,22);
+		$len=ord(substr($buf,0,1));
+		print OUTFILE substr($buf,0,1);
+		print OUTFILE substr($buf,2,$len+3);
+	last if ($rcount<1);
+	printf OUTFILE "%c",0;
+#print $len." ".length($buf)."\n";
+
+	}
+	close(INFILE);
+	close(OUTFILE);
+}
+
 sub syntax() {
     print STDERR "syntax: get_dvb_firmware <component>\n";
     print STDERR "Supported components:\n";
diff --git a/Documentation/dvb/opera-firmware.txt b/Documentation/dvb/opera-firmware.txt
new file mode 100644
index 0000000..93e784c
--- /dev/null
+++ b/Documentation/dvb/opera-firmware.txt
@@ -0,0 +1,27 @@
+To extract the firmware for the Opera DVB-S1 USB-Box
+you need to copy the files:
+
+2830SCap2.sys
+2830SLoad2.sys
+
+from the windriver disk into this directory.
+
+Then run
+
+./get_dvb_firware opera1
+
+and after that you have 2 files:
+
+dvb-usb-opera-01.fw
+dvb-usb-opera1-fpga-01.fw
+
+in here.
+
+Copy them into /lib/firmware/ .
+
+After that the driver can load the firmware
+(if you have enabled firmware loading
+in kernel config and have hotplug running).
+
+
+Marco Gittler <g.marco@freenet.de>
\ No newline at end of file
diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt
index 18bd2dd..a5cb783 100644
--- a/Documentation/feature-removal-schedule.txt
+++ b/Documentation/feature-removal-schedule.txt
@@ -26,9 +26,7 @@
 
 ---------------------------
 
-What:	/sys/devices/.../power/state
-	dev->power.power_state
-	dpm_runtime_{suspend,resume)()
+What:	dev->power.power_state
 When:	July 2007
 Why:	Broken design for runtime control over driver power states, confusing
 	driver-internal runtime power management with:  mechanisms to support
@@ -53,6 +51,7 @@
 What:	Video4Linux API 1 ioctls and video_decoder.h from Video devices.
 When:	December 2006
 Files:	include/linux/video_decoder.h
+Check:	include/linux/video_decoder.h
 Why:	V4L1 AP1 was replaced by V4L2 API. during migration from 2.4 to 2.6
 	series. The old API have lots of drawbacks and don't provide enough
 	means to work with all video and audio standards. The newer API is
@@ -86,7 +85,7 @@
 What:	remove EXPORT_SYMBOL(kernel_thread)
 When:	August 2006
 Files:	arch/*/kernel/*_ksyms.c
-Funcs:	kernel_thread
+Check:	kernel_thread
 Why:	kernel_thread is a low-level implementation detail.  Drivers should
         use the <linux/kthread.h> API instead which shields them from
 	implementation details and provides a higherlevel interface that
@@ -137,6 +136,15 @@
 
 ---------------------------
 
+What:	vm_ops.nopage
+When:	Soon, provided in-kernel callers have been converted
+Why:	This interface is replaced by vm_ops.fault, but it has been around
+	forever, is used by a lot of drivers, and doesn't cost much to
+	maintain.
+Who:	Nick Piggin <npiggin@suse.de>
+
+---------------------------
+
 What:	Interrupt only SA_* flags
 When:	September 2007
 Why:	The interrupt related SA_* flags are replaced by IRQF_* to move them
@@ -156,15 +164,6 @@
 
 ---------------------------
 
-What:	i2c-isa
-When:	December 2006
-Why:	i2c-isa is a non-sense and doesn't fit in the device driver
-	model. Drivers relying on it are better implemented as platform
-	drivers.
-Who:	Jean Delvare <khali@linux-fr.org>
-
----------------------------
-
 What:	i2c_adapter.list
 When:	July 2007
 Why:	Superfluous, this list duplicates the one maintained by the driver
@@ -297,3 +296,26 @@
 Who:	Patrick McHardy <kaber@trash.net>
 
 ---------------------------
+
+What: The arch/ppc and include/asm-ppc directories
+When: Jun 2008
+Why:  The arch/powerpc tree is the merged architecture for ppc32 and ppc64
+      platforms.  Currently there are efforts underway to port the remaining
+      arch/ppc platforms to the merged tree.  New submissions to the arch/ppc
+      tree have been frozen with the 2.6.22 kernel release and that tree will
+      remain in bug-fix only mode until its scheduled removal.  Platforms
+      that are not ported by June 2008 will be removed due to the lack of an
+      interested maintainer.
+Who:  linuxppc-dev@ozlabs.org
+
+---------------------------
+
+What:	mthca driver's MSI support
+When:	January 2008
+Files:	drivers/infiniband/hw/mthca/*.[ch]
+Why:	All mthca hardware also supports MSI-X, which provides
+	strictly more functionality than MSI.  So there is no point in
+	having both MSI-X and MSI support in the driver.
+Who:	Roland Dreier <rolandd@cisco.com>
+
+---------------------------
diff --git a/Documentation/filesystems/Locking b/Documentation/filesystems/Locking
index d866551..f0f8258 100644
--- a/Documentation/filesystems/Locking
+++ b/Documentation/filesystems/Locking
@@ -510,13 +510,24 @@
 prototypes:
 	void (*open)(struct vm_area_struct*);
 	void (*close)(struct vm_area_struct*);
+	int (*fault)(struct vm_area_struct*, struct vm_fault *);
 	struct page *(*nopage)(struct vm_area_struct*, unsigned long, int *);
+	int (*page_mkwrite)(struct vm_area_struct *, struct page *);
 
 locking rules:
-		BKL	mmap_sem
+		BKL	mmap_sem	PageLocked(page)
 open:		no	yes
 close:		no	yes
+fault:		no	yes
 nopage:		no	yes
+page_mkwrite:	no	yes		no
+
+	->page_mkwrite() is called when a previously read-only page is
+about to become writeable. The file system is responsible for
+protecting against truncate races. Once appropriate action has been
+taking to lock out truncate, the page range should be verified to be
+within i_size. The page mapping should also be checked that it is not
+NULL.
 
 ================================================================================
 			Dubious stuff
diff --git a/Documentation/filesystems/configfs/configfs_example.c b/Documentation/filesystems/configfs/configfs_example.c
index e56d492..25151fd 100644
--- a/Documentation/filesystems/configfs/configfs_example.c
+++ b/Documentation/filesystems/configfs/configfs_example.c
@@ -277,11 +277,10 @@
 {
 	struct simple_child *simple_child;
 
-	simple_child = kmalloc(sizeof(struct simple_child), GFP_KERNEL);
+	simple_child = kzalloc(sizeof(struct simple_child), GFP_KERNEL);
 	if (!simple_child)
 		return NULL;
 
-	memset(simple_child, 0, sizeof(struct simple_child));
 
 	config_item_init_type_name(&simple_child->item, name,
 				   &simple_child_type);
@@ -364,12 +363,11 @@
 {
 	struct simple_children *simple_children;
 
-	simple_children = kmalloc(sizeof(struct simple_children),
+	simple_children = kzalloc(sizeof(struct simple_children),
 				  GFP_KERNEL);
 	if (!simple_children)
 		return NULL;
 
-	memset(simple_children, 0, sizeof(struct simple_children));
 
 	config_group_init_type_name(&simple_children->group, name,
 				    &simple_children_type);
diff --git a/Documentation/ecryptfs.txt b/Documentation/filesystems/ecryptfs.txt
similarity index 100%
rename from Documentation/ecryptfs.txt
rename to Documentation/filesystems/ecryptfs.txt
diff --git a/Documentation/filesystems/proc.txt b/Documentation/filesystems/proc.txt
index 460b892..4a37e25 100644
--- a/Documentation/filesystems/proc.txt
+++ b/Documentation/filesystems/proc.txt
@@ -42,6 +42,7 @@
   2.12	/proc/<pid>/oom_adj - Adjust the oom-killer score
   2.13	/proc/<pid>/oom_score - Display current oom-killer score
   2.14	/proc/<pid>/io - Display the IO accounting fields
+  2.15	/proc/<pid>/coredump_filter - Core dump filtering settings
 
 ------------------------------------------------------------------------------
 Preface
@@ -1065,6 +1066,13 @@
 resume it  if we have a value of 3 or more percent; consider information about
 the amount of free space valid for 30 seconds
 
+audit_argv_kb
+-------------
+
+The file contains a single value denoting the limit on the argv array size
+for execve (in KiB). This limit is only applied when system call auditing for
+execve is enabled, otherwise the value is ignored.
+
 ctrl-alt-del
 ------------
 
@@ -1348,6 +1356,21 @@
 hugetlb_shm_group contains group id that is allowed to create SysV shared
 memory segment using hugetlb page.
 
+hugepages_treat_as_movable
+--------------------------
+
+This parameter is only useful when kernelcore= is specified at boot time to
+create ZONE_MOVABLE for pages that may be reclaimed or migrated. Huge pages
+are not movable so are not normally allocated from ZONE_MOVABLE. A non-zero
+value written to hugepages_treat_as_movable allows huge pages to be allocated
+from ZONE_MOVABLE.
+
+Once enabled, the ZONE_MOVABLE is treated as an area of memory the huge
+pages pool can easily grow or shrink within. Assuming that applications are
+not running that mlock() a lot of memory, it is likely the huge pages pool
+can grow to the size of ZONE_MOVABLE by repeatedly entering the desired value
+into nr_hugepages and triggering page reclaim.
+
 laptop_mode
 -----------
 
@@ -2162,4 +2185,41 @@
 More information about this can be found within the taskstats documentation in
 Documentation/accounting.
 
+2.15 /proc/<pid>/coredump_filter - Core dump filtering settings
+---------------------------------------------------------------
+When a process is dumped, all anonymous memory is written to a core file as
+long as the size of the core file isn't limited. But sometimes we don't want
+to dump some memory segments, for example, huge shared memory. Conversely,
+sometimes we want to save file-backed memory segments into a core file, not
+only the individual files.
+
+/proc/<pid>/coredump_filter allows you to customize which memory segments
+will be dumped when the <pid> process is dumped. coredump_filter is a bitmask
+of memory types. If a bit of the bitmask is set, memory segments of the
+corresponding memory type are dumped, otherwise they are not dumped.
+
+The following 4 memory types are supported:
+  - (bit 0) anonymous private memory
+  - (bit 1) anonymous shared memory
+  - (bit 2) file-backed private memory
+  - (bit 3) file-backed shared memory
+
+  Note that MMIO pages such as frame buffer are never dumped and vDSO pages
+  are always dumped regardless of the bitmask status.
+
+Default value of coredump_filter is 0x3; this means all anonymous memory
+segments are dumped.
+
+If you don't want to dump all shared memory segments attached to pid 1234,
+write 1 to the process's proc file.
+
+  $ echo 0x1 > /proc/1234/coredump_filter
+
+When a new process is created, the process inherits the bitmask status from its
+parent. It is useful to set up coredump_filter before the program runs.
+For example:
+
+  $ echo 0x7 > /proc/self/coredump_filter
+  $ ./some_program
+
 ------------------------------------------------------------------------------
diff --git a/Documentation/gpio.txt b/Documentation/gpio.txt
index 36af58e..218a865 100644
--- a/Documentation/gpio.txt
+++ b/Documentation/gpio.txt
@@ -75,6 +75,9 @@
 If you stick to this convention then it'll be easier for other developers to
 see what your code is doing, and help maintain it.
 
+Note that these operations include I/O barriers on platforms which need to
+use them; drivers don't need to add them explicitly.
+
 
 Identifying GPIOs
 -----------------
diff --git a/Documentation/hwmon/abituguru b/Documentation/hwmon/abituguru
index b2c0d61..87ffa0f 100644
--- a/Documentation/hwmon/abituguru
+++ b/Documentation/hwmon/abituguru
@@ -2,7 +2,7 @@
 =======================
 
 Supported chips:
-  * Abit uGuru revision 1-3 (Hardware Monitor part only)
+  * Abit uGuru revision 1 & 2 (Hardware Monitor part only)
     Prefix: 'abituguru'
     Addresses scanned: ISA 0x0E0
     Datasheet: Not available, this driver is based on reverse engineering.
@@ -20,8 +20,8 @@
 	uGuru 2.1.0.0 ~ 2.1.2.8 (AS8, AV8, AA8, AG8, AA8XE, AX8)
 	uGuru 2.2.0.0 ~ 2.2.0.6 (AA8 Fatal1ty)
 	uGuru 2.3.0.0 ~ 2.3.0.9 (AN8)
-	uGuru 3.0.0.0 ~ 3.0.1.2 (AW8, AL8, NI8)
-	uGuru 4.xxxxx?          (AT8 32X) (2)
+	uGuru 3.0.0.0 ~ 3.0.x.x (AW8, AL8, AT8, NI8 SLI, AT8 32X, AN8 32X,
+				 AW9D-MAX) (2)
 	1) For revisions 2 and 3 uGuru's the driver can autodetect the
 	   sensortype (Volt or Temp) for bank1 sensors, for revision 1 uGuru's
 	   this doesnot always work. For these uGuru's the autodection can
@@ -30,8 +30,9 @@
 	   bank1_types=1,1,0,0,0,0,0,2,0,0,0,0,2,0,0,1
 	   You may also need to specify the fan_sensors option for these boards
 	   fan_sensors=5
-	2) The current version of the abituguru driver is known to NOT work
-	   on these Motherboards
+	2) There is a seperate abituguru3 driver for these motherboards,
+	   the abituguru (without the 3 !) driver will not work on these
+	   motherboards (and visa versa)!
 
 Authors:
 	Hans de Goede <j.w.r.degoede@hhs.nl>,
@@ -43,8 +44,10 @@
 -----------------
 
 * force: bool		Force detection. Note this parameter only causes the
-			detection to be skipped, if the uGuru can't be read
-			the module initialization (insmod) will still fail.
+			detection to be skipped, and thus the insmod to
+			succeed. If the uGuru can't be read the actual hwmon
+			driver will not load and thus no hwmon device will get
+			registered.
 * bank1_types: int[]	Bank1 sensortype autodetection override:
 			  -1 autodetect (default)
 			   0 volt sensor
@@ -69,13 +72,15 @@
 Description
 -----------
 
-This driver supports the hardware monitoring features of the Abit uGuru chip
-found on Abit uGuru featuring motherboards (most modern Abit motherboards).
+This driver supports the hardware monitoring features of the first and
+second revision of the Abit uGuru chip found on Abit uGuru featuring
+motherboards (most modern Abit motherboards).
 
-The uGuru chip in reality is a Winbond W83L950D in disguise (despite Abit
-claiming it is "a new microprocessor designed by the ABIT Engineers").
-Unfortunatly this doesn't help since the W83L950D is a generic
-microcontroller with a custom Abit application running on it.
+The first and second revision of the uGuru chip in reality is a Winbond
+W83L950D in disguise (despite Abit claiming it is "a new microprocessor
+designed by the ABIT Engineers"). Unfortunatly this doesn't help since the
+W83L950D is a generic microcontroller with a custom Abit application running
+on it.
 
 Despite Abit not releasing any information regarding the uGuru, Olle
 Sandberg <ollebull@gmail.com> has managed to reverse engineer the sensor part
diff --git a/Documentation/hwmon/abituguru3 b/Documentation/hwmon/abituguru3
new file mode 100644
index 0000000..fa598aa
--- /dev/null
+++ b/Documentation/hwmon/abituguru3
@@ -0,0 +1,65 @@
+Kernel driver abituguru3
+========================
+
+Supported chips:
+  * Abit uGuru revision 3 (Hardware Monitor part, reading only)
+    Prefix: 'abituguru3'
+    Addresses scanned: ISA 0x0E0
+    Datasheet: Not available, this driver is based on reverse engineering.
+    Note:
+	The uGuru is a microcontroller with onboard firmware which programs
+	it to behave as a hwmon IC. There are many different revisions of the
+	firmware and thus effectivly many different revisions of the uGuru.
+	Below is an incomplete list with which revisions are used for which
+	Motherboards:
+	uGuru 1.00    ~ 1.24    (AI7, KV8-MAX3, AN7)
+	uGuru 2.0.0.0 ~ 2.0.4.2 (KV8-PRO)
+	uGuru 2.1.0.0 ~ 2.1.2.8 (AS8, AV8, AA8, AG8, AA8XE, AX8)
+	uGuru 2.3.0.0 ~ 2.3.0.9 (AN8)
+	uGuru 3.0.0.0 ~ 3.0.x.x (AW8, AL8, AT8, NI8 SLI, AT8 32X, AN8 32X,
+				 AW9D-MAX)
+	The abituguru3 driver is only for revison 3.0.x.x motherboards,
+	this driver will not work on older motherboards. For older
+	motherboards use the abituguru (without the 3 !) driver.
+
+Authors:
+	Hans de Goede <j.w.r.degoede@hhs.nl>,
+	(Initial reverse engineering done by Louis Kruger)
+
+
+Module Parameters
+-----------------
+
+* force: bool		Force detection. Note this parameter only causes the
+			detection to be skipped, and thus the insmod to
+			succeed. If the uGuru can't be read the actual hwmon
+			driver will not load and thus no hwmon device will get
+			registered.
+* verbose: bool		Should the driver be verbose?
+			0/off/false  normal output
+			1/on/true    + verbose error reporting (default)
+			Default: 1 (the driver is still in the testing phase)
+
+Description
+-----------
+
+This driver supports the hardware monitoring features of the third revision of
+the Abit uGuru chip, found on recent Abit uGuru featuring motherboards.
+
+The 3rd revision of the uGuru chip in reality is a Winbond W83L951G.
+Unfortunatly this doesn't help since the W83L951G is a generic microcontroller
+with a custom Abit application running on it.
+
+Despite Abit not releasing any information regarding the uGuru revision 3,
+Louis Kruger has managed to reverse engineer the sensor part of the uGuru.
+Without his work this driver would not have been possible.
+
+Known Issues
+------------
+
+The voltage and frequency control parts of the Abit uGuru are not supported,
+neither is writing any of the sensor settings and writing / reading the
+fanspeed control registers (FanEQ)
+
+If you encounter any problems please mail me <j.w.r.degoede@hhs.nl> and
+include the output of: "dmesg | grep abituguru"
diff --git a/Documentation/hwmon/dme1737 b/Documentation/hwmon/dme1737
new file mode 100644
index 0000000..1a0f3d6
--- /dev/null
+++ b/Documentation/hwmon/dme1737
@@ -0,0 +1,257 @@
+Kernel driver dme1737
+=====================
+
+Supported chips:
+  * SMSC DME1737 and compatibles (like Asus A8000)
+    Prefix: 'dme1737'
+    Addresses scanned: I2C 0x2c, 0x2d, 0x2e
+    Datasheet: Provided by SMSC upon request and under NDA
+
+Authors:
+    Juerg Haefliger <juergh@gmail.com>
+
+
+Module Parameters
+-----------------
+
+* force_start: bool	Enables the monitoring of voltage, fan and temp inputs
+			and PWM output control functions. Using this parameter
+			shouldn't be required since the BIOS usually takes care
+			of this.
+
+Note that there is no need to use this parameter if the driver loads without
+complaining. The driver will say so if it is necessary.
+
+
+Description
+-----------
+
+This driver implements support for the hardware monitoring capabilities of the
+SMSC DME1737 and Asus A8000 (which are the same) Super-I/O chips. This chip
+features monitoring of 3 temp sensors temp[1-3] (2 remote diodes and 1
+internal), 7 voltages in[0-6] (6 external and 1 internal) and 6 fan speeds
+fan[1-6]. Additionally, the chip implements 5 PWM outputs pwm[1-3,5-6] for
+controlling fan speeds both manually and automatically.
+
+Fan[3-6] and pwm[3,5-6] are optional features and their availability is
+dependent on the configuration of the chip. The driver will detect which
+features are present during initialization and create the sysfs attributes
+accordingly.
+
+
+Voltage Monitoring
+------------------
+
+The voltage inputs are sampled with 12-bit resolution and have internal
+scaling resistors. The values returned by the driver therefore reflect true
+millivolts and don't need scaling. The voltage inputs are mapped as follows
+(the last column indicates the input ranges):
+
+	in0: +5VTR	(+5V standby)		0V - 6.64V
+	in1: Vccp	(processor core)	0V - 3V
+	in2: VCC	(internal +3.3V)	0V - 4.38V
+	in3: +5V				0V - 6.64V
+	in4: +12V				0V - 16V
+	in5: VTR	(+3.3V standby)		0V - 4.38V
+	in6: Vbat	(+3.0V)			0V - 4.38V
+
+Each voltage input has associated min and max limits which trigger an alarm
+when crossed.
+
+
+Temperature Monitoring
+----------------------
+
+Temperatures are measured with 12-bit resolution and reported in millidegree
+Celsius. The chip also features offsets for all 3 temperature inputs which -
+when programmed - get added to the input readings. The chip does all the
+scaling by itself and the driver therefore reports true temperatures that don't
+need any user-space adjustments. The temperature inputs are mapped as follows
+(the last column indicates the input ranges):
+
+	temp1: Remote diode 1 (3904 type) temperature	-127C - +127C
+	temp2: DME1737 internal temperature		-127C - +127C
+	temp3: Remote diode 2 (3904 type) temperature	-127C - +127C
+
+Each temperature input has associated min and max limits which trigger an alarm
+when crossed. Additionally, each temperature input has a fault attribute that
+returns 1 when a faulty diode or an unconnected input is detected and 0
+otherwise.
+
+
+Fan Monitoring
+--------------
+
+Fan RPMs are measured with 16-bit resolution. The chip provides inputs for 6
+fan tachometers. All 6 inputs have an associated min limit which triggers an
+alarm when crossed. Fan inputs 1-4 provide type attributes that need to be set
+to the number of pulses per fan revolution that the connected tachometer
+generates. Supported values are 1, 2, and 4. Fan inputs 5-6 only support fans
+that generate 2 pulses per revolution. Fan inputs 5-6 also provide a max
+attribute that needs to be set to the maximum attainable RPM (fan at 100% duty-
+cycle) of the input. The chip adjusts the sampling rate based on this value.
+
+
+PWM Output Control
+------------------
+
+This chip features 5 PWM outputs. PWM outputs 1-3 are associated with fan
+inputs 1-3 and PWM outputs 5-6 are associated with fan inputs 5-6. PWM outputs
+1-3 can be configured to operate either in manual or automatic mode by setting
+the appropriate enable attribute accordingly. PWM outputs 5-6 can only operate
+in manual mode, their enable attributes are therefore read-only. When set to
+manual mode, the fan speed is set by writing the duty-cycle value to the
+appropriate PWM attribute. In automatic mode, the PWM attribute returns the
+current duty-cycle as set by the fan controller in the chip. All PWM outputs
+support the setting of the output frequency via the freq attribute.
+
+In automatic mode, the chip supports the setting of the PWM ramp rate which
+defines how fast the PWM output is adjusting to changes of the associated
+temperature input. Associating PWM outputs to temperature inputs is done via
+temperature zones. The chip features 3 zones whose assignments to temperature
+inputs is static and determined during initialization. These assignments can
+be retrieved via the zone[1-3]_auto_channels_temp attributes. Each PWM output
+is assigned to one (or hottest of multiple) temperature zone(s) through the
+pwm[1-3]_auto_channels_zone attributes. Each PWM output has 3 distinct output
+duty-cycles: full, low, and min. Full is internally hard-wired to 255 (100%)
+and low and min can be programmed via pwm[1-3]_auto_point1_pwm and
+pwm[1-3]_auto_pwm_min, respectively. The thermal thresholds of the zones are
+programmed via zone[1-3]_auto_point[1-3]_temp and
+zone[1-3]_auto_point1_temp_hyst:
+
+	pwm[1-3]_auto_point2_pwm	full-speed duty-cycle (255, i.e., 100%)
+	pwm[1-3]_auto_point1_pwm	low-speed duty-cycle
+	pwm[1-3]_auto_pwm_min		min-speed duty-cycle
+
+	zone[1-3]_auto_point3_temp	full-speed temp (all outputs)
+	zone[1-3]_auto_point2_temp	full-speed temp
+	zone[1-3]_auto_point1_temp	low-speed temp
+	zone[1-3]_auto_point1_temp_hyst	min-speed temp
+
+The chip adjusts the output duty-cycle linearly in the range of auto_point1_pwm
+to auto_point2_pwm if the temperature of the associated zone is between
+auto_point1_temp and auto_point2_temp. If the temperature drops below the
+auto_point1_temp_hyst value, the output duty-cycle is set to the auto_pwm_min
+value which only supports two values: 0 or auto_point1_pwm. That means that the
+fan either turns completely off or keeps spinning with the low-speed
+duty-cycle. If any of the temperatures rise above the auto_point3_temp value,
+all PWM outputs are set to 100% duty-cycle.
+
+Following is another representation of how the chip sets the output duty-cycle
+based on the temperature of the associated thermal zone:
+
+			Duty-Cycle	Duty-Cycle
+	Temperature	Rising Temp	Falling Temp
+	-----------	-----------	------------
+	full-speed	full-speed	full-speed
+
+			< linearly adjusted duty-cycle >
+
+	low-speed	low-speed	low-speed
+			min-speed	low-speed
+	min-speed	min-speed	min-speed
+			min-speed	min-speed
+
+
+Sysfs Attributes
+----------------
+
+Following is a list of all sysfs attributes that the driver provides, their
+permissions and a short description:
+
+Name				Perm	Description
+----				----	-----------
+cpu0_vid			RO	CPU core reference voltage in
+					millivolts.
+vrm				RW	Voltage regulator module version
+					number.
+
+in[0-6]_input			RO	Measured voltage in millivolts.
+in[0-6]_min			RW	Low limit for voltage input.
+in[0-6]_max			RW	High limit for voltage input.
+in[0-6]_alarm			RO	Voltage input alarm. Returns 1 if
+					voltage input is or went outside the
+					associated min-max range, 0 otherwise.
+
+temp[1-3]_input			RO	Measured temperature in millidegree
+					Celsius.
+temp[1-3]_min			RW	Low limit for temp input.
+temp[1-3]_max			RW	High limit for temp input.
+temp[1-3]_offset		RW	Offset for temp input. This value will
+					be added by the chip to the measured
+					temperature.
+temp[1-3]_alarm			RO	Alarm for temp input. Returns 1 if temp
+					input is or went outside the associated
+					min-max range, 0 otherwise.
+temp[1-3]_fault			RO	Temp input fault. Returns 1 if the chip
+					detects a faulty thermal diode or an
+					unconnected temp input, 0 otherwise.
+
+zone[1-3]_auto_channels_temp	RO	Temperature zone to temperature input
+					mapping. This attribute is a bitfield
+					and supports the following values:
+						1: temp1
+						2: temp2
+						4: temp3
+zone[1-3]_auto_point1_temp_hyst	RW	Auto PWM temp point1 hysteresis. The
+					output of the corresponding PWM is set
+					to the pwm_auto_min value if the temp
+					falls below the auto_point1_temp_hyst
+					value.
+zone[1-3]_auto_point[1-3]_temp	RW	Auto PWM temp points. Auto_point1 is
+					the low-speed temp, auto_point2 is the
+					full-speed temp, and auto_point3 is the
+					temp at which all PWM outputs are set
+					to full-speed (100% duty-cycle).
+
+fan[1-6]_input			RO	Measured fan speed in RPM.
+fan[1-6]_min			RW	Low limit for fan input.
+fan[1-6]_alarm			RO	Alarm for fan input. Returns 1 if fan
+					input is or went below the associated
+					min value, 0 otherwise.
+fan[1-4]_type			RW	Type of attached fan. Expressed in
+					number of pulses per revolution that
+					the fan generates. Supported values are
+					1, 2, and 4.
+fan[5-6]_max			RW	Max attainable RPM at 100% duty-cycle.
+					Required for chip to adjust the
+					sampling rate accordingly.
+
+pmw[1-3,5-6]			RO/RW	Duty-cycle of PWM output. Supported
+					values are 0-255 (0%-100%). Only
+					writeable if the associated PWM is in
+					manual mode.
+pwm[1-3]_enable			RW	Enable of PWM outputs 1-3. Supported
+					values are:
+						 0: turned off (output @ 100%)
+						 1: manual mode
+						 2: automatic mode
+pwm[5-6]_enable			RO	Enable of PWM outputs 5-6. Always
+					returns 1 since these 2 outputs are
+					hard-wired to manual mode.
+pmw[1-3,5-6]_freq		RW	Frequency of PWM output. Supported
+					values are in the range 11Hz-30000Hz
+					(default is 25000Hz).
+pmw[1-3]_ramp_rate		RW	Ramp rate of PWM output. Determines how
+					fast the PWM duty-cycle will change
+					when the PWM is in automatic mode.
+					Expressed in ms per PWM step. Supported
+					values are in the range 0ms-206ms
+					(default is 0, which means the duty-
+					cycle changes instantly).
+pwm[1-3]_auto_channels_zone	RW	PWM output to temperature zone mapping.
+					This attribute is a bitfield and
+					supports the following values:
+						1: zone1
+						2: zone2
+						4: zone3
+						6: highest of zone[2-3]
+						7: highest of zone[1-3]
+pwm[1-3]_auto_pwm_min		RW	Auto PWM min pwm. Minimum PWM duty-
+					cycle. Supported values are 0 or
+					auto_point1_pwm.
+pwm[1-3]_auto_point1_pwm	RW	Auto PWM pwm point. Auto_point1 is the
+					low-speed duty-cycle.
+pwm[1-3]_auto_point2_pwm	RO	Auto PWM pwm point. Auto_point2 is the
+					full-speed duty-cycle which is hard-
+					wired to 255 (100% duty-cycle).
diff --git a/Documentation/hwmon/f71805f b/Documentation/hwmon/f71805f
index bfd0f15..94e0d2c 100644
--- a/Documentation/hwmon/f71805f
+++ b/Documentation/hwmon/f71805f
@@ -5,11 +5,11 @@
   * Fintek F71805F/FG
     Prefix: 'f71805f'
     Addresses scanned: none, address read from Super I/O config space
-    Datasheet: Provided by Fintek on request
+    Datasheet: Available from the Fintek website
   * Fintek F71872F/FG
     Prefix: 'f71872f'
     Addresses scanned: none, address read from Super I/O config space
-    Datasheet: Provided by Fintek on request
+    Datasheet: Available from the Fintek website
 
 Author: Jean Delvare <khali@linux-fr.org>
 
@@ -128,7 +128,9 @@
 When the PWM method is used, you can select the operating frequency,
 from 187.5 kHz (default) to 31 Hz. The best frequency depends on the
 fan model. As a rule of thumb, lower frequencies seem to give better
-control, but may generate annoying high-pitch noise. Fintek recommends
+control, but may generate annoying high-pitch noise. So a frequency just
+above the audible range, such as 25 kHz, may be a good choice; if this
+doesn't give you good linear control, try reducing it. Fintek recommends
 not going below 1 kHz, as the fan tachometers get confused by lower
 frequencies as well.
 
@@ -136,16 +138,23 @@
 corresponds to a pwm value of 106 for the driver. The driver doesn't
 enforce this limit though.
 
-Three different fan control modes are supported:
+Three different fan control modes are supported; the mode number is written
+to the pwm<n>_enable file.
 
-* Manual mode
-  You ask for a specific PWM duty cycle or DC voltage.
+* 1: Manual mode
+  You ask for a specific PWM duty cycle or DC voltage by writing to the
+  pwm<n> file.
 
-* Fan speed mode
-  You ask for a specific fan speed. This mode assumes that pwm1
-  corresponds to fan1, pwm2 to fan2 and pwm3 to fan3.
+* 2: Temperature mode
+  You define 3 temperature/fan speed trip points using the
+  pwm<n>_auto_point<m>_temp and _fan files. These define a staircase
+  relationship between temperature and fan speed with two additional points
+  interpolated between the values that you define. When the temperature
+  is below auto_point1_temp the fan is switched off.
 
-* Temperature mode
-  You define 3 temperature/fan speed trip points, and the fan speed is
-  adjusted depending on the measured temperature, using interpolation.
-  This mode is not yet supported by the driver.
+* 3: Fan speed mode
+  You ask for a specific fan speed by writing to the fan<n>_target file.
+
+Both of the automatic modes require that pwm1 corresponds to fan1, pwm2 to
+fan2 and pwm3 to fan3. Temperature mode also requires that temp1 corresponds
+to pwm1 and fan1, etc.
diff --git a/Documentation/hwmon/it87 b/Documentation/hwmon/it87
index c0528d6..81ecc7e 100644
--- a/Documentation/hwmon/it87
+++ b/Documentation/hwmon/it87
@@ -12,11 +12,12 @@
     Addresses scanned: from Super I/O config space (8 I/O ports)
     Datasheet: Publicly available at the ITE website
                http://www.ite.com.tw/
-  * IT8716F
+  * IT8716F/IT8726F
     Prefix: 'it8716'
     Addresses scanned: from Super I/O config space (8 I/O ports)
     Datasheet: Publicly available at the ITE website
                http://www.ite.com.tw/product_info/file/pc/IT8716F_V0.3.ZIP
+               http://www.ite.com.tw/product_info/file/pc/IT8726F_V0.3.pdf
   * IT8718F
     Prefix: 'it8718'
     Addresses scanned: from Super I/O config space (8 I/O ports)
@@ -68,7 +69,7 @@
 -----------
 
 This driver implements support for the IT8705F, IT8712F, IT8716F,
-IT8718F and SiS950 chips.
+IT8718F, IT8726F and SiS950 chips.
 
 These chips are 'Super I/O chips', supporting floppy disks, infrared ports,
 joysticks and other miscellaneous stuff. For hardware monitoring, they
@@ -97,6 +98,10 @@
 revisions. For now, the driver only uses the 16-bit mode on the
 IT8716F and IT8718F.
 
+The IT8726F is just bit enhanced IT8716F with additional hardware
+for AMD power sequencing. Therefore the chip will appear as IT8716F
+to userspace applications.
+
 Temperatures are measured in degrees Celsius. An alarm is triggered once
 when the Overtemperature Shutdown limit is crossed.
 
diff --git a/Documentation/hwmon/lm90 b/Documentation/hwmon/lm90
index 438cb24..aa4a0ec 100644
--- a/Documentation/hwmon/lm90
+++ b/Documentation/hwmon/lm90
@@ -48,6 +48,18 @@
     Addresses scanned: I2C 0x4c, 0x4d (unsupported 0x4e)
     Datasheet: Publicly available at the Maxim website
                http://www.maxim-ic.com/quick_view2.cfm/qv_pk/2578
+  * Maxim MAX6680
+    Prefix: 'max6680'
+    Addresses scanned: I2C 0x18, 0x19, 0x1a, 0x29, 0x2a, 0x2b,
+                           0x4c, 0x4d and 0x4e
+    Datasheet: Publicly available at the Maxim website
+               http://www.maxim-ic.com/quick_view2.cfm/qv_pk/3370
+  * Maxim MAX6681
+    Prefix: 'max6680'
+    Addresses scanned: I2C 0x18, 0x19, 0x1a, 0x29, 0x2a, 0x2b,
+                           0x4c, 0x4d and 0x4e
+    Datasheet: Publicly available at the Maxim website
+               http://www.maxim-ic.com/quick_view2.cfm/qv_pk/3370
 
 
 Author: Jean Delvare <khali@linux-fr.org>
@@ -59,11 +71,15 @@
 The LM90 is a digital temperature sensor. It senses its own temperature as
 well as the temperature of up to one external diode. It is compatible
 with many other devices such as the LM86, the LM89, the LM99, the ADM1032,
-the MAX6657, MAX6658 and the MAX6659 all of which are supported by this driver.
-Note that there is no easy way to differentiate between the last three
-variants. The extra address and features of the MAX6659 are not supported by
-this driver. Additionally, the ADT7461 is supported if found in ADM1032
-compatibility mode.
+the MAX6657, MAX6658, MAX6659, MAX6680 and the MAX6681 all of which are
+supported by this driver.
+
+Note that there is no easy way to differentiate between the MAX6657,
+MAX6658 and MAX6659 variants. The extra address and features of the
+MAX6659 are not supported by this driver. The MAX6680 and MAX6681 only
+differ in their pinout, therefore they obviously can't (and don't need to)
+be distinguished. Additionally, the ADT7461 is supported if found in
+ADM1032 compatibility mode.
 
 The specificity of this family of chipsets over the ADM1021/LM84
 family is that it features critical limits with hysteresis, and an
@@ -93,18 +109,22 @@
   * ALERT is triggered by open remote sensor.
   * SMBus PEC support for Write Byte and Receive Byte transactions.
 
-ADT7461
+ADT7461:
   * Extended temperature range (breaks compatibility)
   * Lower resolution for remote temperature
 
 MAX6657 and MAX6658:
   * Remote sensor type selection
 
-MAX6659
+MAX6659:
   * Selectable address
   * Second critical temperature limit
   * Remote sensor type selection
 
+MAX6680 and MAX6681:
+  * Selectable address
+  * Remote sensor type selection
+
 All temperature values are given in degrees Celsius. Resolution
 is 1.0 degree for the local temperature, 0.125 degree for the remote
 temperature.
@@ -141,7 +161,7 @@
 Additionally, the ADM1032 doesn't support SMBus Send Byte with PEC.
 Instead, it will try to write the PEC value to the register (because the
 SMBus Send Byte transaction with PEC is similar to a Write Byte transaction
-without PEC), which is not what we want. Thus, PEC is explicitely disabled
+without PEC), which is not what we want. Thus, PEC is explicitly disabled
 on SMBus Send Byte transactions in the lm90 driver.
 
 PEC on byte data transactions represents a significant increase in bandwidth
diff --git a/Documentation/hwmon/lm93 b/Documentation/hwmon/lm93
new file mode 100644
index 0000000..4e4a1dc
--- /dev/null
+++ b/Documentation/hwmon/lm93
@@ -0,0 +1,412 @@
+Kernel driver lm93
+==================
+
+Supported chips:
+  * National Semiconductor LM93
+    Prefix 'lm93'
+    Addresses scanned: I2C 0x2c-0x2e
+    Datasheet: http://www.national.com/ds.cgi/LM/LM93.pdf
+
+Author:
+	Mark M. Hoffman <mhoffman@lightlink.com>
+	Ported to 2.6 by Eric J. Bowersox <ericb@aspsys.com>
+	Adapted to 2.6.20 by Carsten Emde <ce@osadl.org>
+	Modified for mainline integration by Hans J. Koch <hjk@linutronix.de>
+
+Module Parameters
+-----------------
+
+(specific to LM93)
+* init: integer
+  Set to non-zero to force some initializations (default is 0).
+* disable_block: integer
+  A "0" allows SMBus block data transactions if the host supports them.  A "1"
+  disables SMBus block data transactions.  The default is 0.
+* vccp_limit_type: integer array (2)
+  Configures in7 and in8 limit type, where 0 means absolute and non-zero
+  means relative.  "Relative" here refers to "Dynamic Vccp Monitoring using
+  VID" from the datasheet.  It greatly simplifies the interface to allow
+  only one set of limits (absolute or relative) to be in operation at a
+  time (even though the hardware is capable of enabling both).  There's
+  not a compelling use case for enabling both at once, anyway.  The default
+  is "0,0".
+* vid_agtl: integer
+  A "0" configures the VID pins for V(ih) = 2.1V min, V(il) = 0.8V max.
+  A "1" configures the VID pins for V(ih) = 0.8V min, V(il) = 0.4V max.
+  (The latter setting is referred to as AGTL+ Compatible in the datasheet.)
+  I.e. this parameter controls the VID pin input thresholds; if your VID
+  inputs are not working, try changing this.  The default value is "0".
+
+(common among sensor drivers)
+* force: short array (min = 1, max = 48)
+  List of adapter,address pairs to assume to be present.  Autodetection
+  of the target device will still be attempted.  Use one of the more
+  specific force directives below if this doesn't detect the device.
+* force_lm93: short array (min = 1, max = 48)
+  List of adapter,address pairs which are unquestionably assumed to contain
+  a 'lm93' chip
+* ignore: short array (min = 1, max = 48)
+  List of adapter,address pairs not to scan
+* ignore_range: short array (min = 1, max = 48)
+  List of adapter,start-addr,end-addr triples not to scan
+* probe: short array (min = 1, max = 48)
+  List of adapter,address pairs to scan additionally
+* probe_range: short array (min = 1, max = 48)
+  List of adapter,start-addr,end-addr triples to scan additionally
+
+
+Hardware Description
+--------------------
+
+(from the datasheet)
+
+The LM93, hardware monitor, has a two wire digital interface compatible with
+SMBus 2.0. Using an 8-bit ADC, the LM93 measures the temperature of two remote
+diode connected transistors as well as its own die and 16 power supply
+voltages. To set fan speed, the LM93 has two PWM outputs that are each
+controlled by up to four temperature zones. The fancontrol algorithm is lookup
+table based. The LM93 includes a digital filter that can be invoked to smooth
+temperature readings for better control of fan speed. The LM93 has four
+tachometer inputs to measure fan speed. Limit and status registers for all
+measured values are included. The LM93 builds upon the functionality of
+previous motherboard management ASICs and uses some of the LM85 s features
+(i.e. smart tachometer mode). It also adds measurement and control support
+for dynamic Vccp monitoring and PROCHOT. It is designed to monitor a dual
+processor Xeon class motherboard with a minimum of external components.
+
+
+Driver Description
+------------------
+
+This driver implements support for the National Semiconductor LM93.
+
+
+User Interface
+--------------
+
+#PROCHOT:
+
+The LM93 can monitor two #PROCHOT signals.  The results are found in the
+sysfs files prochot1, prochot2, prochot1_avg, prochot2_avg, prochot1_max,
+and prochot2_max.  prochot1_max and prochot2_max contain the user limits
+for #PROCHOT1 and #PROCHOT2, respectively.  prochot1 and prochot2 contain
+the current readings for the most recent complete time interval.  The
+value of prochot1_avg and prochot2_avg is something like a 2 period
+exponential moving average (but not quite - check the datasheet). Note
+that this third value is calculated by the chip itself.  All values range
+from 0-255 where 0 indicates no throttling, and 255 indicates > 99.6%.
+
+The monitoring intervals for the two #PROCHOT signals is also configurable.
+These intervals can be found in the sysfs files prochot1_interval and
+prochot2_interval.  The values in these files specify the intervals for
+#P1_PROCHOT and #P2_PROCHOT, respectively.  Selecting a value not in this
+list will cause the driver to use the next largest interval.  The available
+intervals are:
+
+#PROCHOT intervals: 0.73, 1.46, 2.9, 5.8, 11.7, 23.3, 46.6, 93.2, 186, 372
+
+It is possible to configure the LM93 to logically short the two #PROCHOT
+signals.  I.e. when #P1_PROCHOT is asserted, the LM93 will automatically
+assert #P2_PROCHOT, and vice-versa.  This mode is enabled by writing a
+non-zero integer to the sysfs file prochot_short.
+
+The LM93 can also override the #PROCHOT pins by driving a PWM signal onto
+one or both of them.  When overridden, the signal has a period of 3.56 mS,
+a minimum pulse width of 5 clocks (at 22.5kHz => 6.25% duty cycle), and
+a maximum pulse width of 80 clocks (at 22.5kHz => 99.88% duty cycle).
+
+The sysfs files prochot1_override and prochot2_override contain boolean
+intgers which enable or disable the override function for #P1_PROCHOT and
+#P2_PROCHOT, respectively.  The sysfs file prochot_override_duty_cycle
+contains a value controlling the duty cycle for the PWM signal used when
+the override function is enabled.  This value ranges from 0 to 15, with 0
+indicating minimum duty cycle and 15 indicating maximum.
+
+#VRD_HOT:
+
+The LM93 can monitor two #VRD_HOT signals. The results are found in the
+sysfs files vrdhot1 and vrdhot2. There is one value per file: a boolean for
+which 1 indicates #VRD_HOT is asserted and 0 indicates it is negated. These
+files are read-only.
+
+Smart Tach Mode:
+
+(from the datasheet)
+
+	If a fan is driven using a low-side drive PWM, the tachometer
+	output of the fan is corrupted. The LM93 includes smart tachometer
+	circuitry that allows an accurate tachometer reading to be
+	achieved despite the signal corruption.  In smart tach mode all
+	four signals are measured within 4 seconds.
+
+Smart tach mode is enabled by the driver by writing 1 or 2 (associating the
+the fan tachometer with a pwm) to the sysfs file fan<n>_smart_tach.  A zero
+will disable the function for that fan.  Note that Smart tach mode cannot be
+enabled if the PWM output frequency is 22500 Hz (see below).
+
+Manual PWM:
+
+The LM93 has a fixed or override mode for the two PWM outputs (although, there
+are still some conditions that will override even this mode - see section
+15.10.6 of the datasheet for details.)  The sysfs files pwm1_override
+and pwm2_override are used to enable this mode; each is a boolean integer
+where 0 disables and 1 enables the manual control mode.  The sysfs files pwm1
+and pwm2 are used to set the manual duty cycle; each is an integer (0-255)
+where 0 is 0% duty cycle, and 255 is 100%.  Note that the duty cycle values
+are constrained by the hardware. Selecting a value which is not available
+will cause the driver to use the next largest value.  Also note: when manual
+PWM mode is disabled, the value of pwm1 and pwm2 indicates the current duty
+cycle chosen by the h/w.
+
+PWM Output Frequency:
+
+The LM93 supports several different frequencies for the PWM output channels.
+The sysfs files pwm1_freq and pwm2_freq are used to select the frequency. The
+frequency values are constrained by the hardware.  Selecting a value which is
+not available will cause the driver to use the next largest value.  Also note
+that this parameter has implications for the Smart Tach Mode (see above).
+
+PWM Output Frequencies: 12, 36, 48, 60, 72, 84, 96, 22500 (h/w default)
+
+Automatic PWM:
+
+The LM93 is capable of complex automatic fan control, with many different
+points of configuration.  To start, each PWM output can be bound to any
+combination of eight control sources.  The final PWM is the largest of all
+individual control sources to which the PWM output is bound.
+
+The eight control sources are: temp1-temp4 (aka "zones" in the datasheet),
+#PROCHOT 1 & 2, and #VRDHOT 1 & 2.  The bindings are expressed as a bitmask
+in the sysfs files pwm<n>_auto_channels, where a "1" enables the binding, and
+ a "0" disables it. The h/w default is 0x0f (all temperatures bound).
+
+	0x01 - Temp 1
+	0x02 - Temp 2
+	0x04 - Temp 3
+	0x08 - Temp 4
+	0x10 - #PROCHOT 1
+	0x20 - #PROCHOT 2
+	0x40 - #VRDHOT 1
+	0x80 - #VRDHOT 2
+
+The function y = f(x) takes a source temperature x to a PWM output y.  This
+function of the LM93 is derived from a base temperature and a table of 12
+temperature offsets.  The base temperature is expressed in degrees C in the
+sysfs files temp<n>_auto_base.  The offsets are expressed in cumulative
+degrees C, with the value of offset <i> for temperature value <n> being
+contained in the file temp<n>_auto_offset<i>.  E.g. if the base temperature
+is 40C:
+
+     offset #	temp<n>_auto_offset<i>	range		pwm
+	 1		0		-		 25.00%
+	 2		0		-		 28.57%
+	 3		1		40C - 41C	 32.14%
+	 4		1		41C - 42C	 35.71%
+	 5		2		42C - 44C	 39.29%
+	 6		2		44C - 46C	 42.86%
+	 7		2		48C - 50C	 46.43%
+	 8		2		50C - 52C	 50.00%
+	 9		2		52C - 54C	 53.57%
+	10		2		54C - 56C	 57.14%
+	11		2		56C - 58C	 71.43%
+	12		2		58C - 60C	 85.71%
+					> 60C		100.00%
+
+Valid offsets are in the range 0C <= x <= 7.5C in 0.5C increments.
+
+There is an independent base temperature for each temperature channel. Note,
+however, there are only two tables of offsets: one each for temp[12] and
+temp[34].  Therefore, any change to e.g. temp1_auto_offset<i> will also
+affect temp2_auto_offset<i>.
+
+The LM93 can also apply hysteresis to the offset table, to prevent unwanted
+oscillation between two steps in the offsets table.  These values are found in
+the sysfs files temp<n>_auto_offset_hyst.  The value in this file has the
+same representation as in temp<n>_auto_offset<i>.
+
+If a temperature reading falls below the base value for that channel, the LM93
+will use the minimum PWM value.  These values are found in the sysfs files
+temp<n>_auto_pwm_min.  Note, there are only two minimums: one each for temp[12]
+and temp[34].  Therefore, any change to e.g. temp1_auto_pwm_min will also
+affect temp2_auto_pwm_min.
+
+PWM Spin-Up Cycle:
+
+A spin-up cycle occurs when a PWM output is commanded from 0% duty cycle to
+some value > 0%.  The LM93 supports a minimum duty cycle during spin-up.  These
+values are found in the sysfs files pwm<n>_auto_spinup_min. The value in this
+file has the same representation as other PWM duty cycle values. The
+duration of the spin-up cycle is also configurable.  These values are found in
+the sysfs files pwm<n>_auto_spinup_time. The value in this file is
+the spin-up time in seconds.  The available spin-up times are constrained by
+the hardware.  Selecting a value which is not available will cause the driver
+to use the next largest value.
+
+Spin-up Durations: 0 (disabled, h/w default), 0.1, 0.25, 0.4, 0.7, 1.0,
+		   2.0, 4.0
+
+#PROCHOT and #VRDHOT PWM Ramping:
+
+If the #PROCHOT or #VRDHOT signals are asserted while bound to a PWM output
+channel, the LM93 will ramp the PWM output up to 100% duty cycle in discrete
+steps. The duration of each step is configurable. There are two files, with
+one value each in seconds: pwm_auto_prochot_ramp and pwm_auto_vrdhot_ramp.
+The available ramp times are constrained by the hardware.  Selecting a value
+which is not available will cause the driver to use the next largest value.
+
+Ramp Times: 0 (disabled, h/w default) to 0.75 in 0.05 second intervals
+
+Fan Boost:
+
+For each temperature channel, there is a boost temperature: if the channel
+exceeds this limit, the LM93 will immediately drive both PWM outputs to 100%.
+This limit is expressed in degrees C in the sysfs files temp<n>_auto_boost.
+There is also a hysteresis temperature for this function: after the boost
+limit is reached, the temperature channel must drop below this value before
+the boost function is disabled.  This temperature is also expressed in degrees
+C in the sysfs files temp<n>_auto_boost_hyst.
+
+GPIO Pins:
+
+The LM93 can monitor the logic level of four dedicated GPIO pins as well as the
+four tach input pins.  GPIO0-GPIO3 correspond to (fan) tach 1-4, respectively.
+All eight GPIOs are read by reading the bitmask in the sysfs file gpio.  The
+LSB is GPIO0, and the MSB is GPIO7.
+
+
+LM93 Unique sysfs Files
+-----------------------
+
+	file			description
+	-------------------------------------------------------------
+
+	prochot<n>		current #PROCHOT %
+
+	prochot<n>_avg		moving average #PROCHOT %
+
+	prochot<n>_max		limit #PROCHOT %
+
+	prochot_short		enable or disable logical #PROCHOT pin short
+
+	prochot<n>_override	force #PROCHOT assertion as PWM
+
+	prochot_override_duty_cycle
+				duty cycle for the PWM signal used when
+				#PROCHOT is overridden
+
+	prochot<n>_interval	#PROCHOT PWM sampling interval
+
+	vrdhot<n>		0 means negated, 1 means asserted
+
+	fan<n>_smart_tach	enable or disable smart tach mode
+
+	pwm<n>_auto_channels	select control sources for PWM outputs
+
+	pwm<n>_auto_spinup_min	minimum duty cycle during spin-up
+
+	pwm<n>_auto_spinup_time	duration of spin-up
+
+	pwm_auto_prochot_ramp	ramp time per step when #PROCHOT asserted
+
+	pwm_auto_vrdhot_ramp	ramp time per step when #VRDHOT asserted
+
+	temp<n>_auto_base	temperature channel base
+
+	temp<n>_auto_offset[1-12]
+				temperature channel offsets
+
+	temp<n>_auto_offset_hyst
+				temperature channel offset hysteresis
+
+	temp<n>_auto_boost	temperature channel boost (PWMs to 100%) limit
+
+	temp<n>_auto_boost_hyst	temperature channel boost hysteresis
+
+	gpio			input state of 8 GPIO pins; read-only
+
+
+Sample Configuration File
+-------------------------
+
+Here is a sample LM93 chip config for sensors.conf:
+
+---------- cut here ----------
+chip "lm93-*"
+
+# VOLTAGE INPUTS
+
+	# labels and scaling based on datasheet recommendations
+	label in1	"+12V1"
+	compute in1	@ * 12.945, @ / 12.945
+	set in1_min	12 * 0.90
+	set in1_max	12 * 1.10
+
+	label in2	"+12V2"
+	compute in2	@ * 12.945, @ / 12.945
+	set in2_min	12 * 0.90
+	set in2_max	12 * 1.10
+
+	label in3	"+12V3"
+	compute in3	@ * 12.945, @ / 12.945
+	set in3_min	12 * 0.90
+	set in3_max	12 * 1.10
+
+	label in4	"FSB_Vtt"
+
+	label in5	"3GIO"
+
+	label in6	"ICH_Core"
+
+	label in7	"Vccp1"
+
+	label in8	"Vccp2"
+
+	label in9	"+3.3V"
+	set in9_min	3.3 * 0.90
+	set in9_max	3.3 * 1.10
+
+	label in10	"+5V"
+	set in10_min	5.0 * 0.90
+	set in10_max	5.0 * 1.10
+
+	label in11	"SCSI_Core"
+
+	label in12	"Mem_Core"
+
+	label in13	"Mem_Vtt"
+
+	label in14	"Gbit_Core"
+
+	# Assuming R1/R2 = 4.1143, and 3.3V reference
+	# -12V = (4.1143 + 1) * (@ - 3.3) + 3.3
+	label in15	"-12V"
+	compute in15 @ * 5.1143 - 13.57719, (@ + 13.57719) / 5.1143
+	set in15_min	-12 * 0.90
+	set in15_max	-12 * 1.10
+
+	label in16	"+3.3VSB"
+	set in16_min	3.3 * 0.90
+	set in16_max	3.3 * 1.10
+
+# TEMPERATURE INPUTS
+
+	label temp1	"CPU1"
+	label temp2	"CPU2"
+	label temp3	"LM93"
+
+# TACHOMETER INPUTS
+
+	label fan1	"Fan1"
+	set fan1_min	3000
+	label fan2	"Fan2"
+	set fan2_min	3000
+	label fan3	"Fan3"
+	set fan3_min	3000
+	label fan4	"Fan4"
+	set fan4_min	3000
+
+# PWM OUTPUTS
+
+	label pwm1	"CPU1"
+	label pwm2	"CPU2"
+
diff --git a/Documentation/hwmon/smsc47b397 b/Documentation/hwmon/smsc47b397
index 20682f1..3a43b69 100644
--- a/Documentation/hwmon/smsc47b397
+++ b/Documentation/hwmon/smsc47b397
@@ -4,6 +4,7 @@
 Supported chips:
   * SMSC LPC47B397-NC
   * SMSC SCH5307-NS
+  * SMSC SCH5317
     Prefix: 'smsc47b397'
     Addresses scanned: none, address read from Super I/O config space
     Datasheet: In this file
@@ -18,8 +19,8 @@
 provided by Craig Kelly (In-Store Broadcast Network) and edited/corrected
 by Mark M. Hoffman <mhoffman@lightlink.com>.
 
-[1] And SMSC SCH5307-NS, which has a different device ID but is otherwise
-compatible.
+[1] And SMSC SCH5307-NS and SCH5317, which have different device IDs but are
+otherwise compatible.
 
 * * * * *
 
@@ -131,7 +132,7 @@
 The registers of interest for identifying the SIO on the dc7100 are Device ID
 (0x20) and Device Rev  (0x21).
 
-The Device ID will read 0x6F (for SCH5307-NS, 0x81)
+The Device ID will read 0x6F (0x81 for SCH5307-NS, and 0x85 for SCH5317)
 The Device Rev currently reads 0x01
 
 Obtaining the HWM Base Address.
diff --git a/Documentation/hwmon/sysfs-interface b/Documentation/hwmon/sysfs-interface
index a9a18ad..b3a9e1b 100644
--- a/Documentation/hwmon/sysfs-interface
+++ b/Documentation/hwmon/sysfs-interface
@@ -172,11 +172,10 @@
 		255 is max or 100%.
 
 pwm[1-*]_enable
-		Switch PWM on and off.
-		Not always present even if pwmN is.
-		0: turn off
-		1: turn on in manual mode
-		2+: turn on in automatic mode
+		Fan speed control method:
+		0: no fan speed control (i.e. fan at full speed)
+		1: manual fan speed control enabled (using pwm[1-*])
+		2+: automatic fan speed control enabled
 		Check individual chip documentation files for automatic mode
 		details.
 		RW
@@ -343,9 +342,9 @@
 supports it. When this boolean has value 1, the measurement for that
 channel should not be trusted.
 
-in[0-*]_input_fault
-fan[1-*]_input_fault
-temp[1-*]_input_fault
+in[0-*]_fault
+fan[1-*]_fault
+temp[1-*]_fault
 		Input fault condition
 		0: no fault occured
 		1: fault condition
diff --git a/Documentation/hwmon/w83627ehf b/Documentation/hwmon/w83627ehf
index 030fac6..ccc2bcb 100644
--- a/Documentation/hwmon/w83627ehf
+++ b/Documentation/hwmon/w83627ehf
@@ -22,9 +22,9 @@
 W83627DHG super I/O chips. We will refer to them collectively as Winbond chips.
 
 The chips implement three temperature sensors, five fan rotation
-speed sensors, ten analog voltage sensors (only nine for the 627DHG), alarms
-with beep warnings (control unimplemented), and some automatic fan regulation
-strategies (plus manual fan control mode).
+speed sensors, ten analog voltage sensors (only nine for the 627DHG), one
+VID (6 pins), alarms with beep warnings (control unimplemented), and
+some automatic fan regulation strategies (plus manual fan control mode).
 
 Temperatures are measured in degrees Celsius and measurement resolution is 1
 degC for temp1 and 0.5 degC for temp2 and temp3. An alarm is triggered when
diff --git a/Documentation/ja_JP/HOWTO b/Documentation/ja_JP/HOWTO
new file mode 100644
index 0000000..b2446a0
--- /dev/null
+++ b/Documentation/ja_JP/HOWTO
@@ -0,0 +1,650 @@
+NOTE:
+This is Japanese translated version of "Documentation/HOWTO".
+This one is maintained by Tsugikazu Shibata <tshibata@ab.jp.nec.com>
+and JF Project team <www.linux.or.jp/JF>.
+If you find difference with original file or problem in translation,
+please contact maintainer of this file or JF project.
+
+Please also note that purpose of this file is easier to read for non
+English natives and not to be intended to fork. So, if you have any
+comments or updates of this file, please try to update Original(English)
+file at first.
+
+Last Updated: 2007/06/04
+==================================
+これは、
+linux-2.6.21/Documentation/HOWTO
+の和訳です。
+
+翻訳団体: JF プロジェクト < http://www.linux.or.jp/JF/ >
+翻訳日: 2007/06/04
+翻訳者: Tsugikazu Shibata <tshibata at ab dot jp dot nec dot com>
+校正者: 松倉さん <nbh--mats at nifty dot com>
+         小林 雅典さん (Masanori Kobayasi) <zap03216 at nifty dot ne dot jp>
+         武井伸光さん、<takei at webmasters dot gr dot jp>
+         かねこさん (Seiji Kaneko) <skaneko at a2 dot mbn dot or dot jp>
+         野口さん (Kenji Noguchi) <tokyo246 at gmail dot com>
+         河内さん (Takayoshi Kochi) <t-kochi at bq dot jp dot nec dot com>
+         岩本さん (iwamoto) <iwamoto.kn at ncos dot nec dot co dot jp>
+==================================
+
+Linux カーネル開発のやり方
+-------------------------------
+
+これは上のトピック( Linux カーネル開発のやり方)の重要な事柄を網羅した
+ドキュメントです。ここには Linux カーネル開発者になるための方法と
+Linux カーネル開発コミュニティと共に活動するやり方を学ぶ方法が含まれて
+います。カーネルプログラミングに関する技術的な項目に関することは何も含
+めないようにしていますが、カーネル開発者となるための正しい方向に向かう
+手助けになります。
+
+もし、このドキュメントのどこかが古くなっていた場合には、このドキュメン
+トの最後にリストしたメンテナーにパッチを送ってください。
+
+はじめに
+---------
+
+あなたは Linux カーネルの開発者になる方法を学びたいのでしょうか? そ
+れともあなたは上司から「このデバイスの Linux ドライバを書くように」と
+言われているのでしょうか? 
+この文書の目的は、あなたが踏むべき手順と、コミュニティと一緒にうまく働
+くヒントを書き下すことで、あなたが知るべき全てのことを教えることです。
+また、このコミュニティがなぜ今うまくまわっているのかという理由の一部も
+説明しようと試みています。
+
+カーネルは 少量のアーキテクチャ依存部分がアセンブリ言語で書かれている
+以外は大部分は C 言語で書かれています。C言語をよく理解していることはカー
+ネル開発者には必要です。アーキテクチャ向けの低レベル部分の開発をするの
+でなければ、(どんなアーキテクチャでも)アセンブリ(訳注: 言語)は必要あり
+ません。以下の本は、C 言語の十分な知識や何年もの経験に取って代わるもの
+ではありませんが、少なくともリファレンスとしてはいい本です。
+ - "The C Programming Language" by Kernighan and Ritchie [Prentice Hall]
+ -『プログラミング言語C第2版』(B.W. カーニハン/D.M. リッチー著 石田晴久訳) [共立出版]
+ - "Practical C Programming" by Steve Oualline [O'Reilly]
+ - 『C実践プログラミング第3版』(Steve Oualline著 望月康司監訳 谷口功訳) [オライリージャパン]
+ - "C:  A Reference Manual" by Harbison and Steele [Prentice Hall]
+ - 『新・詳説 C 言語 H&S リファレンス』
+       (サミュエル P ハービソン/ガイ L スティール共著 斉藤 信男監訳)[ソフトバンク]
+
+カーネルは GNU C と GNU ツールチェインを使って書かれています。カーネル
+は ISO C89 仕様に準拠して書く一方で、標準には無い言語拡張を多く使って
+います。カーネルは標準 C ライブラリとは関係がないといった、C 言語フリー
+スタンディング環境です。そのため、C の標準で使えないものもあります。任
+意の long long の除算や浮動小数点は使えません。
+ときどき、カーネルがツールチェインや C 言語拡張に置いている前提がどう
+なっているのかわかりにくいことがあり、また、残念なことに決定的なリファ
+レンスは存在しません。情報を得るには、gcc の info ページ( info gcc )を
+みてください。
+
+あなたは既存の開発コミュニティと一緒に作業する方法を学ぼうとしているこ
+とに留意してください。そのコミュニティは、コーディング、スタイル、
+開発手順について高度な標準を持つ、多様な人の集まりです。
+地理的に分散した大規模なチームに対してもっともうまくいくとわかったこと
+をベースにしながら、これらの標準は長い時間をかけて築かれてきました。
+これらはきちんと文書化されていますから、事前にこれらの標準についてでき
+るだけたくさん学んでください。また皆があなたやあなたの会社のやり方に合わ
+せてくれると思わないでください。
+
+法的問題
+------------
+
+Linux カーネルのソースコードは GPL ライセンスの下でリリースされていま
+す。ライセンスの詳細については、ソースツリーのメインディレクトリに存在
+する、COPYING のファイルをみてください。もしライセンスについてさらに質
+問があれば、Linux Kernel メーリングリストに質問するのではなく、どうぞ
+法律家に相談してください。メーリングリストの人達は法律家ではなく、法的
+問題については彼らの声明はあてにするべきではありません。
+
+GPL に関する共通の質問や回答については、以下を参照してください。
+	http://www.gnu.org/licenses/gpl-faq.html
+
+ドキュメント
+------------
+
+Linux カーネルソースツリーは幅広い範囲のドキュメントを含んでおり、それ
+らはカーネルコミュニティと会話する方法を学ぶのに非常に貴重なものです。
+新しい機能がカーネルに追加される場合、その機能の使い方について説明した
+新しいドキュメントファイルも追加することを勧めます。
+カーネルの変更が、カーネルがユーザ空間に公開しているインターフェイスの
+変更を引き起こす場合、その変更を説明するマニュアルページのパッチや情報
+をマニュアルページのメンテナ mtk-manpages@gmx.net に送ることを勧めます。
+
+以下はカーネルソースツリーに含まれている読んでおくべきファイルの一覧で
+す-
+
+  README
+    このファイルは Linuxカーネルの簡単な背景とカーネルを設定(訳注
+    configure )し、生成(訳注 build )するために必要なことは何かが書かれ
+    ています。カーネルに関して初めての人はここからスタートするとよいで
+    しょう。
+
+  Documentation/Changes
+     このファイルはカーネルをうまく生成(訳注 build )し、走らせるのに最
+     小限のレベルで必要な数々のソフトウェアパッケージの一覧を示してい
+     ます。
+
+  Documentation/CodingStyle
+    これは Linux カーネルのコーディングスタイルと背景にある理由を記述
+    しています。全ての新しいコードはこのドキュメントにあるガイドライン
+    に従っていることを期待されています。大部分のメンテナーはこれらのルー
+    ルに従っているものだけを受け付け、多くの人は正しいスタイルのコード
+    だけをレビューします。
+
+  Documentation/SubmittingPatches
+  Documentation/SubmittingDrivers
+     これらのファイルには、どうやってうまくパッチを作って投稿するかに
+     ついて非常に詳しく書かれており、以下を含みます(これだけに限らない
+     けれども)
+        - Email に含むこと
+        - Email の形式
+        - だれに送るか
+     これらのルールに従えばうまくいくことを保証することではありません
+     が (すべてのパッチは内容とスタイルについて精査を受けるので)、
+     ルールに従わなければ間違いなくうまくいかないでしょう。
+     この他にパッチを作る方法についてのよくできた記述は-
+
+	"The Perfect Patch"
+		http://www.zip.com.au/~akpm/linux/patches/stuff/tpp.txt
+	"Linux kernel patch submission format"
+		http://linux.yyz.us/patch-format.html
+
+  Documentation/stable_api_nonsense.txt
+     このファイルはカーネルの中に不変のAPIを持たないことにした意識的な
+     決断の背景にある理由について書かれています。以下のようなことを含
+     んでいます-
+       - サブシステムとの間に層を作ること(コンパチビリティのため?)
+       - オペレーティングシステム間のドライバの移植性
+       - カーネルソースツリーの素早い変更を遅らせる(もしくは素早い変更
+         を妨げる)
+     このドキュメントは Linux 開発の思想を理解するのに非常に重要です。
+     そして、他のOSでの開発者が Linux に移る時にとても重要です。
+
+  Documentation/SecurityBugs
+    もし Linux カーネルでセキュリティ問題を発見したように思ったら、こ
+    のドキュメントのステップに従ってカーネル開発者に連絡し、問題解決を
+    支援してください。
+
+  Documentation/ManagementStyle
+    このドキュメントは Linux カーネルのメンテナー達がどう行動するか、
+    彼らの手法の背景にある共有されている精神について記述しています。こ
+    れはカーネル開発の初心者なら(もしくは、単に興味があるだけの人でも)
+    重要です。なぜならこのドキュメントは、カーネルメンテナー達の独特な
+    行動についての多くの誤解や混乱を解消するからです。
+
+  Documentation/stable_kernel_rules.txt
+    このファイルはどのように stable カーネルのリリースが行われるかのルー
+    ルが記述されています。そしてこれらのリリースの中のどこかで変更を取
+    り入れてもらいたい場合に何をすればいいかが示されています。
+
+  Documentation/kernel-docs.txt
+  カーネル開発に付随する外部ドキュメントのリストです。もしあなたが
+    探しているものがカーネル内のドキュメントでみつからなかった場合、
+    このリストをあたってみてください。
+
+  Documentation/applying-patches.txt
+    パッチとはなにか、パッチをどうやって様々なカーネルの開発ブランチに
+    適用するのかについて正確に記述した良い入門書です。
+
+カーネルはソースコードから自動的に生成可能な多数のドキュメントを自分自
+身でもっています。これにはカーネル内 API のすべての記述や、どう正しく
+ロックをかけるかの規則が含まれます。このドキュメントは
+Documentation/DocBook/ ディレクトリに作られ、以下のように
+	make pdfdocs
+	make psdocs
+	make htmldocs
+	make mandocs
+コマンドを実行するとメインカーネルのソースディレクトリから
+それぞれ、PDF, Postscript, HTML, man page の形式で生成されます。
+
+カーネル開発者になるには
+---------------------------
+
+もしあなたが、Linux カーネル開発について何も知らないならば、
+KernelNewbies プロジェクトを見るべきです
+	http://kernelnewbies.org
+
+このサイトには役に立つメーリングリストがあり、基本的なカーネル開発に関
+するほとんどどんな種類の質問もできます (既に回答されているようなことを
+聞く前にまずはアーカイブを調べてください)。
+またここには、リアルタイムで質問を聞くことができる IRC チャネルや、Linux
+カーネルの開発に関して学ぶのに便利なたくさんの役に立つドキュメントがあ
+ります。
+
+web サイトには、コードの構成、サブシステム、現在存在するプロジェクト(ツ
+リーにあるもの無いものの両方)の基本的な管理情報があります。
+ここには、また、カーネルのコンパイルのやり方やパッチの当て方などの間接
+的な基本情報も記述されています。
+
+あなたがどこからスタートしてよいかわからないが、Linux カーネル開発コミュ
+ニティに参加して何かすることをさがしている場合には、Linux kernel
+Janitor's プロジェクトにいけばよいでしょう -
+	http://janitor.kernelnewbies.org/
+ここはそのようなスタートをするのにうってつけの場所です。ここには、
+Linux カーネルソースツリーの中に含まれる、きれいにし、修正しなければな
+らない、単純な問題のリストが記述されています。このプロジェクトに関わる
+開発者と一緒に作業することで、あなたのパッチを Linuxカーネルツリーに入
+れるための基礎を学ぶことができ、そしてもしあなたがまだアイディアを持っ
+ていない場合には、次にやる仕事の方向性が見えてくるかもしれません。
+
+もしあなたが、すでにひとまとまりコードを書いていて、カーネルツリーに入
+れたいと思っていたり、それに関する適切な支援を求めたい場合、カーネル
+メンターズプロジェクトはそのような皆さんを助けるためにできました。
+ここにはメーリングリストがあり、以下から参照できます
+	http://selenic.com/mailman/listinfo/kernel-mentors
+
+実際に Linux カーネルのコードについて修正を加える前に、どうやってその
+コードが動作するのかを理解することが必要です。そのためには、特別なツー
+ルの助けを借りてでも、それを直接よく読むことが最良の方法です(ほとんど
+のトリッキーな部分は十分にコメントしてありますから)。そういうツールで
+特におすすめなのは、Linux クロスリファレンスプロジェクトです。これは、
+自己参照方式で、索引がついた web 形式で、ソースコードを参照することが
+できます。この最新の素晴しいカーネルコードのリポジトリは以下で見つかり
+ます-
+	http://sosdg.org/~coywolf/lxr/
+
+開発プロセス
+-----------------------
+
+Linux カーネルの開発プロセスは現在幾つかの異なるメインカーネル「ブラン
+チ」と多数のサブシステム毎のカーネルブランチから構成されます。
+これらのブランチとは-
+  - メインの 2.6.x カーネルツリー
+  - 2.6.x.y -stable カーネルツリー
+  - 2.6.x -git カーネルパッチ
+  - 2.6.x -mm カーネルパッチ
+  - サブシステム毎のカーネルツリーとパッチ
+
+2.6.x カーネルツリー
+-----------------
+
+2.6.x カーネルは Linus Torvalds によってメンテナンスされ、kernel.org
+の pub/linux/kernel/v2.6/ ディレクトリに存在します。この開発プロセスは
+以下のとおり-
+
+  - 新しいカーネルがリリースされた直後に、2週間の特別期間が設けられ、
+    この期間中に、メンテナー達は Linus に大きな差分を送ることができま
+    す。このような差分は通常 -mm カーネルに数週間含まれてきたパッチで
+    す。 大きな変更は git(カーネルのソース管理ツール、詳細は
+    http://git.or.cz/  参照) を使って送るのが好ましいやり方ですが、パッ
+    チファイルの形式のまま送るのでも十分です。
+
+  - 2週間後、-rc1 カーネルがリリースされ、この後にはカーネル全体の安定
+    性に影響をあたえるような新機能は含まない類のパッチしか取り込むこと
+    はできません。新しいドライバ(もしくはファイルシステム)のパッチは
+    -rc1 の後で受け付けられることもあることを覚えておいてください。な
+    ぜなら、変更が独立していて、追加されたコードの外の領域に影響を与え
+    ない限り、退行のリスクは無いからです。-rc1 がリリースされた後、
+    Linus へパッチを送付するのに git を使うこともできますが、パッチは
+    レビューのために、パブリックなメーリングリストへも同時に送る必要が
+    あります。
+
+  - 新しい -rc は Linus が、最新の git ツリーがテスト目的であれば十分
+    に安定した状態にあると判断したときにリリースされます。目標は毎週新
+    しい -rc カーネルをリリースすることです。
+
+  - このプロセスはカーネルが 「準備ができた」と考えられるまで継続しま
+    す。このプロセスはだいたい 6週間継続します。
+
+Andrew Morton が Linux-kernel メーリングリストにカーネルリリースについ
+て書いたことをここで言っておくことは価値があります-
+  「カーネルがいつリリースされるかは誰も知りません。なぜなら、これは現
+  実に認識されたバグの状況によりリリースされるのであり、前もって決めら
+  れた計画によってリリースされるものではないからです。」
+
+2.6.x.y -stable カーネルツリー
+---------------------------
+
+バージョンに4つ目の数字がついたカーネルは -stable カーネルです。これに
+は、2.6.x カーネルで見つかったセキュリティ問題や重大な後戻りに対する比
+較的小さい重要な修正が含まれます。
+
+これは、開発/実験的バージョンのテストに協力することに興味が無く、
+最新の安定したカーネルを使いたいユーザに推奨するブランチです。
+
+もし、2.6.x.y カーネルが存在しない場合には、番号が一番大きい 2.6.x
+が最新の安定版カーネルです。
+
+2.6.x.y は "stable" チーム <stable@kernel.org> でメンテされており、だ
+いたい隔週でリリースされています。
+
+カーネルツリーに入っている、Documentation/stable_kernel_rules.txt ファ
+イルにはどのような種類の変更が -stable ツリーに受け入れ可能か、またリ
+リースプロセスがどう動くかが記述されています。
+
+2.6.x -git パッチ
+------------------
+
+git リポジトリで管理されているLinus のカーネルツリーの毎日のスナップ
+ショットがあります。(だから -git という名前がついています)。これらのパッ
+チはおおむね毎日リリースされており、Linus のツリーの現状を表します。こ
+れは -rc カーネルと比べて、パッチが大丈夫かどうかも確認しないで自動的
+に生成されるので、より実験的です。
+
+2.6.x -mm カーネルパッチ
+------------------------
+
+Andrew Morton によってリリースされる実験的なカーネルパッチ群です。
+Andrew は個別のサブシステムカーネルツリーとパッチを全て集めてきて
+linux-kernel メーリングリストで収集された多数のパッチと同時に一つにま
+とめます。
+このツリーは新機能とパッチが検証される場となります。ある期間の間パッチ
+が -mm に入って価値を証明されたら、Andrew やサブシステムメンテナが、メ
+インラインへ入れるように Linus にプッシュします。
+
+メインカーネルツリーに含めるために Linus に送る前に、すべての新しいパッ
+チが -mm ツリーでテストされることが強く推奨されます。
+
+これらのカーネルは安定して動作すべきシステムとして使うのには適切ではあ
+りませんし、カーネルブランチの中でももっとも動作にリスクが高いものです。
+
+もしあなたが、カーネル開発プロセスの支援をしたいと思っているのであれば、
+どうぞこれらのカーネルリリースをテストに使ってみて、そしてもし問題があ
+れば、またもし全てが正しく動作したとしても、linux-kernel メーリングリ
+ストにフィードバックを提供してください。
+
+すべての他の実験的パッチに加えて、これらのカーネルは通常リリース時点で
+メインラインの -git カーネルに含まれる全ての変更も含んでいます。
+
+-mm カーネルは決まったスケジュールではリリースされません、しかし通常幾
+つかの -mm カーネル (1 から 3 が普通)が各-rc カーネルの間にリリースさ
+れます。
+
+サブシステム毎のカーネルツリーとパッチ
+-------------------------------------------
+
+カーネルの様々な領域で何が起きているかを見られるようにするため、多くの
+カーネルサブシステム開発者は彼らの開発ツリーを公開しています。これらの
+ツリーは説明したように -mm カーネルリリースに入れ込まれます。
+
+以下はさまざまなカーネルツリーの中のいくつかのリスト-
+
+  git ツリー-
+    - Kbuild の開発ツリー、Sam Ravnborg <sam@ravnborg.org>
+	kernel.org:/pub/scm/linux/kernel/git/sam/kbuild.git
+
+    - ACPI の開発ツリー、 Len Brown <len.brown@intel.com>
+	kernel.org:/pub/scm/linux/kernel/git/lenb/linux-acpi-2.6.git
+
+    - Block の開発ツリー、Jens Axboe <axboe@suse.de>
+	kernel.org:/pub/scm/linux/kernel/git/axboe/linux-2.6-block.git
+
+    - DRM の開発ツリー、Dave Airlie <airlied@linux.ie>
+	kernel.org:/pub/scm/linux/kernel/git/airlied/drm-2.6.git
+
+    - ia64 の開発ツリー、Tony Luck <tony.luck@intel.com>
+	kernel.org:/pub/scm/linux/kernel/git/aegl/linux-2.6.git
+
+    - ieee1394 の開発ツリー、Jody McIntyre <scjody@modernduck.com>
+	kernel.org:/pub/scm/linux/kernel/git/scjody/ieee1394.git
+
+    - infiniband, Roland Dreier <rolandd@cisco.com>
+	kernel.org:/pub/scm/linux/kernel/git/roland/infiniband.git
+
+    - libata, Jeff Garzik <jgarzik@pobox.com>
+	kernel.org:/pub/scm/linux/kernel/git/jgarzik/libata-dev.git
+
+    - ネットワークドライバ, Jeff Garzik <jgarzik@pobox.com>
+	kernel.org:/pub/scm/linux/kernel/git/jgarzik/netdev-2.6.git
+
+    - pcmcia, Dominik Brodowski <linux@dominikbrodowski.net>
+	kernel.org:/pub/scm/linux/kernel/git/brodo/pcmcia-2.6.git
+
+    - SCSI, James Bottomley <James.Bottomley@SteelEye.com>
+	kernel.org:/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6.git
+
+  その他の git カーネルツリーは http://kernel.org/git に一覧表がありま
+  す。
+
+  quilt ツリー-
+    - USB, PCI ドライバコアと I2C, Greg Kroah-Hartman <gregkh@suse.de>
+	kernel.org/pub/linux/kernel/people/gregkh/gregkh-2.6/
+
+バグレポート
+-------------
+
+bugzilla.kernel.org は Linux カーネル開発者がカーネルのバグを追跡する
+場所です。ユーザは見つけたバグの全てをこのツールで報告すべきです。
+どう kernel bugzilla を使うかの詳細は、以下を参照してください-
+	http://test.kernel.org/bugzilla/faq.html
+
+メインカーネルソースディレクトリにあるファイル REPORTING-BUGS はカーネ
+ルバグらしいものについてどうレポートするかの良いテンプレートであり、問
+題の追跡を助けるためにカーネル開発者にとってどんな情報が必要なのかの詳
+細が書かれています。
+
+メーリングリスト
+-------------
+
+上のいくつかのドキュメントで述べていますが、コアカーネル開発者の大部分
+は Linux kernel メーリングリストに参加しています。このリストの登録/脱
+退の方法については以下を参照してください-
+	http://vger.kernel.org/vger-lists.html#linux-kernel
+
+このメーリングリストのアーカイブは web 上の多数の場所に存在します。こ
+れらのアーカイブを探すにはサーチエンジンを使いましょう。例えば-
+	http://dir.gmane.org/gmane.linux.kernel
+
+リストに投稿する前にすでにその話題がアーカイブに存在するかどうかを検索
+することを是非やってください。多数の事がすでに詳細に渡って議論されて
+おり、アーカイブにのみ記録されています。
+
+大部分のカーネルサブシステムも自分の個別の開発を実施するメーリングリス
+トを持っています。個々のグループがどんなリストを持っているかは、
+MAINTAINERS ファイルにリストがありますので参照してください。
+
+多くのリストは kernel.org でホストされています。これらの情報は以下にあ
+ります-
+	http://vger.kernel.org/vger-lists.html
+
+メーリングリストを使う場合、良い行動習慣に従うようにしましょう。
+少し安っぽいが、以下の URL は上のリスト(や他のリスト)で会話する場合の
+シンプルなガイドラインを示しています-
+	http://www.albion.com/netiquette/
+
+もし複数の人があなたのメールに返事をした場合、CC: で受ける人のリストは
+だいぶ多くなるでしょう。良い理由がない場合、CC: リストから誰かを削除を
+しないように、また、メーリングリストのアドレスだけにリプライすることの
+ないようにしましょう。1つは送信者から、もう1つはリストからのように、メー
+ルを2回受けることになってもそれに慣れ、しゃれたメールヘッダーを追加し
+てこの状態を変えようとしないように。人々はそのようなことは好みません。
+
+今までのメールでのやりとりとその間のあなたの発言はそのまま残し、
+"John Kernlehacker wrote ...:" の行をあなたのリプライの先頭行にして、
+メールの先頭でなく、各引用行の間にあなたの言いたいことを追加するべきで
+す。
+
+もしパッチをメールに付ける場合は、Documentaion/SubmittingPatches に提
+示されているように、それは プレーンな可読テキストにすることを忘れない
+ようにしましょう。カーネル開発者は 添付や圧縮したパッチを扱いたがりま
+せん-
+彼らはあなたのパッチの行毎にコメントを入れたいので、そのためにはそうす
+るしかありません。あなたのメールプログラムが空白やタブを圧縮しないよう
+に確認した方がいいです。最初の良いテストとしては、自分にメールを送って
+みて、そのパッチを自分で当ててみることです。もしそれがうまく行かないな
+ら、あなたのメールプログラムを直してもらうか、正しく動くように変えるべ
+きです。
+
+とりわけ、他の登録者に対する尊敬を表すようにすることを覚えておいてくだ
+さい。
+
+コミュニティと共に働くこと
+--------------------------
+
+カーネルコミュニティのゴールは可能なかぎり最高のカーネルを提供すること
+です。あなたがパッチを受け入れてもらうために投稿した場合、それは、技術
+的メリットだけがレビューされます。その際、あなたは何を予想すべきでしょ
+うか?
+  - 批判
+  - コメント
+  - 変更の要求
+  - パッチの正当性の証明要求
+  - 沈黙
+
+思い出してください、ここはあなたのパッチをカーネルに入れる話です。あ
+なたは、あなたのパッチに対する批判とコメントを受け入れるべきで、それら
+を技術的レベルで評価して、パッチを再作成するか、なぜそれらの変更をすべ
+きでないかを明確で簡潔な理由の説明を提供してください。
+もし、あなたのパッチに何も反応がない場合、たまにはメールの山に埋もれて
+見逃され、あなたの投稿が忘れられてしまうこともあるので、数日待って再度
+投稿してください。
+
+あなたがやるべきでないものは?
+  - 質問なしにあなたのパッチが受け入れられると想像すること
+  - 守りに入ること
+  - コメントを無視すること
+  - 要求された変更を何もしないでパッチを出し直すこと
+
+可能な限り最高の技術的解決を求めているコミュニティでは、パッチがどのく
+らい有益なのかについては常に異なる意見があります。あなたは協調的である
+べきですし、また、あなたのアイディアをカーネルに対してうまく合わせるよ
+うにすることが望まれています。もしくは、最低限あなたのアイディアがそれ
+だけの価値があるとすすんで証明するようにしなければなりません。
+正しい解決に向かって進もうという意志がある限り、間違うことがあっても許
+容されることを忘れないでください。
+
+あなたの最初のパッチに単に 1ダースもの修正を求めるリストの返答になるこ
+とも普通のことです。これはあなたのパッチが受け入れられないということで
+は *ありません*、そしてあなた自身に反対することを意味するのでも *ありま
+せん*。単に自分のパッチに対して指摘された問題を全て修正して再送すれば
+いいのです。
+
+カーネルコミュニティと企業組織のちがい
+-----------------------------------------------------------------
+
+カーネルコミュニティは大部分の伝統的な会社の開発環境とは異ったやり方で
+動いています。以下は問題を避けるためにできるとよいことののリストです-
+
+  あなたの提案する変更について言うときのうまい言い方:
+
+    - "これは複数の問題を解決します"
+    - "これは2000行のコードを削除します"
+    - "以下のパッチは、私が言おうとしていることを説明するものです"
+    - "私はこれを5つの異なるアーキテクチャでテストしたのですが..."
+    - "以下は一連の小さなパッチ群ですが..."
+    - "これは典型的なマシンでの性能を向上させます.."
+
+  やめた方がいい悪い言い方:
+
+    - このやり方で AIX/ptx/Solaris ではできたので、できるはずだ
+    - 私はこれを20年もの間やってきた、だから
+    - これは、私の会社が金儲けをするために必要だ
+    - これは我々のエンタープライズ向け商品ラインのためである
+    - これは 私が自分のアイディアを記述した、1000ページの設計資料である
+    - 私はこれについて、6ケ月作業している。
+    - 以下は ... に関する5000行のパッチです
+    - 私は現在のぐちゃぐちゃを全部書き直した、それが以下です...
+    - 私は〆切がある、そのためこのパッチは今すぐ適用される必要がある
+
+カーネルコミュニティが大部分の伝統的なソフトウェアエンジニアリングの労
+働環境と異なるもう一つの点は、やりとりに顔を合わせないということです。
+email と irc を第一のコミュニケーションの形とする一つの利点は、性別や
+民族の差別がないことです。Linux カーネルの職場環境は女性や少数民族を受
+容します。なぜなら、email アドレスによってのみあなたが認識されるからで
+す。
+国際的な側面からも活動領域を均等にするようにします。なぜならば、あなた
+は人の名前で性別を想像できないからです。ある男性が アンドレアという名
+前で、女性の名前は パット かもしれません (訳注 Andrea は米国では女性、
+それ以外(欧州など)では男性名として使われることが多い。同様に、Pat は
+Patricia (主に女性名)や Patrick (主に男性名)の略称)。
+Linux カーネルの活動をして、意見を表明したことがある大部分の女性は、前
+向きな経験をもっています。
+
+言葉の壁は英語が得意でない一部の人には問題になります。
+メーリングリストの中できちんとアイディアを交換するには、相当うまく英語
+を操れる必要があることもあります。そのため、あなたは自分のメール
+を送る前に英語で意味が通じているかをチェックすることをお薦めします。
+
+変更を分割する
+---------------------
+
+Linux カーネルコミュニティは、一度に大量のコードの塊を喜んで受容するこ
+とはありません。変更は正確に説明される必要があり、議論され、小さい、個
+別の部分に分割する必要があります。これはこれまで多くの会社がやり慣れて
+きたことと全く正反対のことです。あなたのプロポーザルは、開発プロセスのと
+ても早い段階から紹介されるべきです。そうすれば あなたは自分のやってい
+ることにフィードバックを得られます。これは、コミュニティからみれば、あ
+なたが彼らと一緒にやっているように感じられ、単にあなたの提案する機能の
+ゴミ捨て場として使っているのではない、と感じられるでしょう。
+しかし、一度に 50 もの email をメーリングリストに送りつけるようなことは
+やってはいけません、あなたのパッチ群はいつもどんな時でもそれよりは小さ
+くなければなりません。
+
+パッチを分割する理由は以下です-
+
+1) 小さいパッチはあなたのパッチが適用される見込みを大きくします、カー
+   ネルの人達はパッチが正しいかどうかを確認する時間や労力をかけないか
+   らです。5行のパッチはメンテナがたった1秒見るだけで適用できます。し
+   かし、500行のパッチは、正しいことをレビューするのに数時間かかるかも
+   しれません(時間はパッチのサイズなどにより指数関数に比例してかかりま
+   す)
+   小さいパッチは何かあったときにデバッグもとても簡単になります。パッ
+   チを1個1個取り除くのは、とても大きなパッチを当てた後に(かつ、何かお
+   かしくなった後で)解剖するのに比べればとても簡単です。
+
+2) 小さいパッチを送るだけでなく、送るまえに、書き直して、シンプルにす
+   る(もしくは、単に順番を変えるだけでも)ことも、とても重要です。
+
+以下はカーネル開発者の Al Viro のたとえ話しです:
+
+        "生徒の数学の宿題を採点する先生のことを考えてみてください、先
+        生は生徒が解に到達するまでの試行錯誤をみたいとは思わないでしょ
+        う。先生は簡潔な最高の解をみたいのです。良い生徒はこれを知って
+        おり、そして最終解の前の中間作業を提出することは決してないので
+        す"
+        カーネル開発でもこれは同じです。メンテナー達とレビューア達は、
+        問題を解決する解の背後になる思考プロセスをみたいとは思いません。
+        彼らは単純であざやかな解決方法をみたいのです。
+
+あざやかな解を説明するのと、コミュニティと共に仕事をし、未解決の仕事を
+議論することのバランスをキープするのは難しいかもしれません。
+ですから、開発プロセスの早期段階で改善のためのフィードバックをもらうよ
+うにするのもいいですが、変更点を小さい部分に分割して全体ではまだ完成し
+ていない仕事を(部分的に)取り込んでもらえるようにすることもいいことです。
+
+また、でき上がっていないものや、"将来直す" ようなパッチを、本流に含め
+てもらうように送っても、それは受け付けられないことを理解してください。
+
+あなたの変更を正当化する
+-------------------
+
+あなたのパッチを分割するのと同時に、なぜその変更を追加しなければならな
+いかを Linux コミュニティに知らせることはとても重要です。新機能は必要
+性と有用性で正当化されなければなりません。
+
+あなたの変更の説明
+--------------------
+
+あなたのパッチを送付する場合には、メールの中のテキストで何を言うかにつ
+いて、特別に注意を払ってください。この情報はパッチの ChangeLog に使わ
+れ、いつも皆がみられるように保管されます。これは次のような項目を含め、
+パッチを完全に記述するべきです-
+
+  - なぜ変更が必要か
+  - パッチ全体の設計アプローチ
+  - 実装の詳細
+  - テスト結果
+
+これについて全てがどのようにあるべきかについての詳細は、以下のドキュメ
+ントの ChangeLog セクションをみてください-
+  "The Perfect Patch"
+      http://www.zip.com.au/~akpm/linux/patches/stuff/tpp.txt
+
+これらのどれもが、時にはとても困難です。これらの慣例を完璧に実施するに
+は数年かかるかもしれません。これは継続的な改善のプロセスであり、そのた
+めには多数の忍耐と決意を必要とするものです。でも、諦めないで、これは可
+能なことです。多数の人がすでにできていますし、彼らも皆最初はあなたと同
+じところからスタートしたのですから。
+
+Paolo Ciarrocchi に感謝、彼は彼の書いた "Development Process"
+(http://linux.tar.bz/articles/2.6-development_process)セクショ
+ンをこのテキストの原型にすることを許可してくれました。
+Rundy Dunlap と Gerrit Huizenga はメーリングリストでやるべきこととやっ
+てはいけないことのリストを提供してくれました。
+以下の人々のレビュー、コメント、貢献に感謝。
+Pat Mochel, Hanna Linder, Randy Dunlap, Kay Sievers,
+Vojtech Pavlik, Jan Kara, Josh Boyer, Kees Cook, Andrew Morton, Andi
+Kleen, Vadim Lobanov, Jesper Juhl, Adrian Bunk, Keri Harris, Frans Pop,
+David A. Wheeler, Junio Hamano, Michael Kerrisk, と Alex Shepard
+彼らの支援なしでは、このドキュメントはできなかったでしょう。
+
+Maintainer: Greg Kroah-Hartman <greg@kroah.com>
diff --git a/Documentation/ja_JP/stable_api_nonsense.txt b/Documentation/ja_JP/stable_api_nonsense.txt
new file mode 100644
index 0000000..b3f2b27
--- /dev/null
+++ b/Documentation/ja_JP/stable_api_nonsense.txt
@@ -0,0 +1,263 @@
+NOTE:
+This is a Japanese translated version of
+"Documentation/stable_api_nonsense.txt".
+This one is maintained by
+IKEDA, Munehiro <m-ikeda@ds.jp.nec.com>
+and JF Project team <http://www.linux.or.jp/JF/>.
+If you find difference with original file or problem in translation,
+please contact the maintainer of this file or JF project.
+
+Please also note that purpose of this file is easier to read for non
+English natives and not to be intended to fork. So, if you have any
+comments or updates of this file, please try to update
+Original(English) file at first.
+
+==================================
+これは、
+linux-2.6.22-rc4/Documentation/stable_api_nonsense.txt の和訳
+です。
+翻訳団体: JF プロジェクト < http://www.linux.or.jp/JF/ >
+翻訳日 : 2007/06/11
+原著作者: Greg Kroah-Hartman < greg at kroah dot com >
+翻訳者 : 池田 宗広 < m-ikeda at ds dot jp dot nec dot com >
+校正者 : Masanori Kobayashi さん < zap03216 at nifty dot ne dot jp >
+          Seiji Kaneko さん < skaneko at a2 dot mbn dot or dot jp >
+==================================
+
+
+
+Linux カーネルのドライバインターフェース
+(あなたの質問すべてに対する回答とその他諸々)
+
+Greg Kroah-Hartman <greg at kroah dot com>
+
+
+この文書は、なぜ Linux ではバイナリカーネルインターフェースが定義
+されていないのか、またはなぜ不変のカーネルインターフェースを持たな
+いのか、ということを説明するために書かれた。ここでの話題は「カーネ
+ル内部の」インターフェースについてであり、ユーザー空間とのインター
+フェースではないことを理解してほしい。カーネルとユーザー空間とのイ
+ンターフェースとはアプリケーションプログラムが使用するものであり、
+つまりシステムコールのインターフェースがこれに当たる。これは今まで
+長きに渡り、かつ今後も「まさしく」不変である。私は確か 0.9 か何か
+より前のカーネルを使ってビルドした古いプログラムを持っているが、そ
+れは最新の 2.6 カーネルでもきちんと動作する。ユーザー空間とのイン
+ターフェースは、ユーザーとアプリケーションプログラマが不変性を信頼
+してよいものの一つである。
+
+
+要旨
+----
+
+あなたは不変のカーネルインターフェースが必要だと考えているかもしれ
+ないが、実際のところはそうではない。あなたは必要としているものが分
+かっていない。あなたが必要としているものは安定して動作するドライバ
+であり、それはドライバがメインのカーネルツリーに含まれる場合のみ得
+ることができる。ドライバがメインのカーネルツリーに含まれていると、
+他にも多くの良いことがある。それは、Linux をより強固で、安定な、成
+熟したオペレーティングシステムにすることができるということだ。これ
+こそ、そもそもあなたが Linux を使う理由のはずだ。
+
+
+はじめに
+--------
+
+カーネル内部のインターフェース変更を心配しなければならないドライバ
+を書きたいなどというのは、変わり者だけだ。この世界のほとんどの人は、
+そのようなドライバがどんなインターフェースを使っているかなど知らな
+いし、そんなドライバのことなど全く気にもかけていない。
+
+
+まず初めに、クローズソースとか、ソースコードの隠蔽とか、バイナリの
+みが配布される使い物にならない代物[訳注(1)]とか、実体はバイナリ
+コードでそれを読み込むためのラッパー部分のみソースコードが公開され
+ているとか、その他用語は何であれ GPL の下にソースコードがリリース
+されていないカーネルドライバに関する法的な問題について、私は「いか
+なる議論も」行うつもりがない。法的な疑問があるのならば、プログラマ
+である私ではなく、弁護士に相談して欲しい。ここでは単に、技術的な問
+題について述べることにする。(法的な問題を軽視しているわけではない。
+それらは実際に存在するし、あなたはそれをいつも気にかけておく必要が
+ある)
+
+訳注(1)
+「使い物にならない代物」の原文は "blob"
+
+
+さてここでは、バイナリカーネルインターフェースについてと、ソースレ
+ベルでのインターフェースの不変性について、という二つの話題を取り上
+げる。この二つは互いに依存する関係にあるが、まずはバイナリインター
+フェースについて議論を行いやっつけてしまおう。
+
+
+バイナリカーネルインターフェース
+--------------------------------
+
+もしソースレベルでのインターフェースが不変ならば、バイナリインター
+フェースも当然のように不変である、というのは正しいだろうか?正しく
+ない。Linux カーネルに関する以下の事実を考えてみてほしい。
+  - あなたが使用するCコンパイラのバージョンによって、カーネル内部
+    の構造体の配置構造は異なったものになる。また、関数は異なった方
+    法でカーネルに含まれることになるかもしれない(例えばインライン
+    関数として扱われたり、扱われなかったりする)。個々の関数がどの
+    ようにコンパイルされるかはそれほど重要ではないが、構造体のパデ
+    ィングが異なるというのは非常に重要である。
+  - あなたがカーネルのビルドオプションをどのように設定するかによっ
+    て、カーネルには広い範囲で異なった事態が起こり得る。
+      - データ構造は異なるデータフィールドを持つかもしれない
+      - いくつかの関数は全く実装されていない状態になり得る
+        (例:SMP向けではないビルドでは、いくつかのロックは中身が
+          カラにコンパイルされる)
+      - カーネル内のメモリは、異なった方法で配置され得る。これはビ
+        ルドオプションに依存している。
+  - Linux は様々な異なるプロセッサアーキテクチャ上で動作する。
+    あるアーキテクチャ用のバイナリドライバを、他のアーキテクチャで
+    正常に動作させる方法はない。
+
+
+ある特定のカーネル設定を使用し、カーネルをビルドしたのと正確に同じ
+Cコンパイラを使用して単にカーネルモジュールをコンパイルするだけで
+も、あなたはこれらいくつもの問題に直面することになる。ある特定の
+Linux ディストリビューションの、ある特定のリリースバージョン用にモ
+ジュールを提供しようと思っただけでも、これらの問題を引き起こすには
+十分である。にも関わらず Linux ディストリビューションの数と、サ
+ポートするディストリビューションのリリース数を掛け算し、それら一つ
+一つについてビルドを行ったとしたら、今度はリリースごとのビルドオプ
+ションの違いという悪夢にすぐさま悩まされることになる。また、ディス
+トリビューションの各リリースバージョンには、異なるハードウェア(プ
+ロセッサタイプや種々のオプション)に対応するため、何種類かのカーネ
+ルが含まれているということも理解して欲しい。従って、ある一つのリ
+リースバージョンだけのためにモジュールを作成する場合でも、あなたは
+何バージョンものモジュールを用意しなければならない。
+
+
+信じて欲しい。このような方法でサポートを続けようとするなら、あなた
+はいずれ正気を失うだろう。遠い昔、私はそれがいかに困難なことか、身
+をもって学んだのだ・・・
+
+
+不変のカーネルソースレベルインターフェース
+------------------------------------------
+
+メインカーネルツリーに含まれていない Linux カーネルドライバを継続
+してサポートしていこうとしている人たちとの議論においては、これは極
+めて「引火性の高い」話題である。[訳注(2)]
+
+訳注(2)
+「引火性の高い」の原文は "volatile"。
+volatile には「揮発性の」「爆発しやすい」という意味の他、「変わり
+やすい」「移り気な」という意味がある。
+「(この話題は)爆発的に激しい論争を巻き起こしかねない」ということ
+を、「(カーネルのソースレベルインターフェースは)移ろい行くもので
+ある」ということを連想させる "volatile" という単語で表現している。
+
+
+Linux カーネルの開発は継続的に速いペースで行われ、決して歩みを緩め
+ることがない。その中でカーネル開発者達は、現状のインターフェースに
+あるバグを見つけ、より良い方法を考え出す。彼らはやがて、現状のイン
+ターフェースがより正しく動作するように修正を行う。その過程で関数の
+名前は変更されるかもしれず、構造体は大きく、または小さくなるかもし
+れず、関数の引数は検討しなおされるかもしれない。そのような場合、引
+き続き全てが正常に動作するよう、カーネル内でこれらのインターフェー
+スを使用している個所も全て同時に修正される。
+
+
+具体的な例として、カーネル内の USB インターフェースを挙げる。USB
+サブシステムはこれまでに少なくとも3回の書き直しが行われ、その結果
+インターフェースが変更された。これらの書き直しはいくつかの異なった
+問題を修正するために行われた。
+  - 同期的データストリームが非同期に変更された。これにより多数のド
+    ライバを単純化でき、全てのドライバのスループットが向上した。今
+    やほとんど全ての USB デバイスは、考えられる最高の速度で動作し
+    ている。
+  - USB ドライバが USB サブシステムのコアから行う、データパケット
+    用のメモリ確保方法が変更された。これに伴い、いくつもの文書化さ
+    れたデッドロック条件を回避するため、全ての USB ドライバはより
+    多くの情報を USB コアに提供しなければならないようになっている。
+
+
+このできごとは、数多く存在するクローズソースのオペレーティングシス
+テムとは全く対照的だ。それらは長期に渡り古い USB インターフェース
+をメンテナンスしなければならない。古いインターフェースが残ることで、
+新たな開発者が偶然古いインターフェースを使い、正しくない方法で開発
+を行ってしまう可能性が生じる。これによりシステムの安定性は危険にさ
+らされることになる。
+
+
+上に挙げたどちらの例においても、開発者達はその変更が重要かつ必要で
+あることに合意し、比較的楽にそれを実行した。もし Linux がソースレ
+ベルでインターフェースの不変性を保証しなければならないとしたら、新
+しいインターフェースを作ると同時に、古い、問題のある方を今後ともメ
+ンテナンスするという余計な仕事を USB の開発者にさせなければならな
+い。Linux の USB 開発者は、自分の時間を使って仕事をしている。よっ
+て、価値のない余計な仕事を報酬もなしに実行しろと言うことはできない。
+
+
+セキュリティ問題も、Linux にとっては非常に重要である。ひとたびセキ
+ュリティに関する問題が発見されれば、それは極めて短期間のうちに修正
+される。セキュリティ問題の発生を防ぐための修正は、カーネルの内部イ
+ンターフェースの変更を何度も引き起こしてきた。その際同時に、変更さ
+れたインターフェースを使用する全てのドライバもまた変更された。これ
+により問題が解消し、将来偶然に問題が再発してしまわないことが保証さ
+れる。もし内部インターフェースの変更が許されないとしたら、このよう
+にセキュリティ問題を修正し、将来再発しないことを保証することなど不
+可能なのだ。
+
+
+カーネルのインターフェースは時が経つにつれクリーンナップを受ける。
+誰も使っていないインターフェースは削除される。これにより、可能な限
+りカーネルが小さく保たれ、現役の全てのインターフェースが可能な限り
+テストされることを保証しているのだ。(使われていないインターフェー
+スの妥当性をテストすることは不可能と言っていいだろう)
+
+
+
+これから何をすべきか
+-----------------------
+
+では、もしメインのカーネルツリーに含まれない Linux カーネルドライ
+バがあったとして、あなたは、つまり開発者は何をするべきだろうか?全
+てのディストリビューションの全てのカーネルバージョン向けにバイナリ
+のドライバを供給することは悪夢であり、カーネルインターフェースの変
+更を追いかけ続けることもまた過酷な仕事だ。
+
+
+答えは簡単。そのドライバをメインのカーネルツリーに入れてしまえばよ
+い。(ここで言及しているのは、GPL に従って公開されるドライバのこと
+だということに注意してほしい。あなたのコードがそれに該当しないなら
+ば、さよなら。幸運を祈ります。ご自分で何とかしてください。Andrew
+と Linus からのコメント<Andrew と Linus のコメントへのリンクをこ
+こに置く>をどうぞ)ドライバがメインツリーに入れば、カーネルのイン
+ターフェースが変更された場合、変更を行った開発者によってドライバも
+修正されることになるだろう。あなたはほとんど労力を払うことなしに、
+常にビルド可能できちんと動作するドライバを手に入れることができる。
+
+
+ドライバをメインのカーネルツリーに入れると、非常に好ましい以下の効
+果がある。
+  - ドライバの品質が向上する一方で、(元の開発者にとっての)メンテ
+    ナンスコストは下がる。
+  - あなたのドライバに他の開発者が機能を追加してくれる。
+  - 誰かがあなたのドライバにあるバグを見つけ、修正してくれる。
+  - 誰かがあなたのドライバにある改善点を見つけてくれる。
+  - 外部インターフェースが変更されドライバの更新が必要になった場合、
+    誰かがあなたの代わりに更新してくれる。
+  - ドライバを入れてくれとディストロに頼まなくても、そのドライバは
+    全ての Linux ディストリビューションに自動的に含まれてリリース
+    される。
+
+
+Linux では、他のどのオペレーティングシステムよりも数多くのデバイス
+が「そのまま」使用できるようになった。また Linux は、どのオペレー
+ティングシステムよりも数多くのプロセッサアーキテクチャ上でそれらの
+デバイスを使用することができるようにもなった。このように、Linux の
+開発モデルは実証されており、今後も間違いなく正しい方向へと進んでい
+くだろう。:)
+
+
+
+------
+
+この文書の初期の草稿に対し、Randy Dunlap, Andrew Morton, David
+Brownell, Hanna Linder, Robert Love, Nishanth Aravamudan から査読
+と助言を頂きました。感謝申し上げます。
+
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 8363ad3..9a54148 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -817,6 +817,32 @@
 	js=		[HW,JOY] Analog joystick
 			See Documentation/input/joystick.txt.
 
+	kernelcore=nn[KMG]	[KNL,IA-32,IA-64,PPC,X86-64] This parameter
+			specifies the amount of memory usable by the kernel
+			for non-movable allocations.  The requested amount is
+			spread evenly throughout all nodes in the system. The
+			remaining memory in each node is used for Movable
+			pages. In the event, a node is too small to have both
+			kernelcore and Movable pages, kernelcore pages will
+			take priority and other nodes will have a larger number
+			of kernelcore pages.  The Movable zone is used for the
+			allocation of pages that may be reclaimed or moved
+			by the page migration subsystem.  This means that
+			HugeTLB pages may not be allocated from this zone.
+			Note that allocations like PTEs-from-HighMem still
+			use the HighMem zone if it exists, and the Normal
+			zone if it does not.
+
+	movablecore=nn[KMG]	[KNL,IA-32,IA-64,PPC,X86-64] This parameter
+			is similar to kernelcore except it specifies the
+			amount of memory used for migratable allocations.
+			If both kernelcore and movablecore is specified,
+			then kernelcore will be at *least* the specified
+			value but may be more. If movablecore on its own
+			is specified, the administrator must be careful
+			that the amount of memory usable for all allocations
+			is not too small.
+
 	keepinitrd	[HW,ARM]
 
 	kstack=N	[IA-32,X86-64] Print N words from the kernel stack
diff --git a/Documentation/kprobes.txt b/Documentation/kprobes.txt
index da5404ab..cb12ae1 100644
--- a/Documentation/kprobes.txt
+++ b/Documentation/kprobes.txt
@@ -247,12 +247,6 @@
 fastcall, or anything else that affects how args are passed, the
 handler's declaration must match.
 
-NOTE: A macro JPROBE_ENTRY is provided to handle architecture-specific
-aliasing of jp->entry. In the interest of portability, it is advised
-to use:
-
-	jp->entry = JPROBE_ENTRY(handler);
-
 register_jprobe() returns 0 on success, or a negative errno otherwise.
 
 4.3 register_kretprobe
@@ -518,7 +512,7 @@
 }
 
 static struct jprobe my_jprobe = {
-	.entry = JPROBE_ENTRY(jdo_fork)
+	.entry = jdo_fork
 };
 
 static int __init jprobe_init(void)
diff --git a/Documentation/lguest/Makefile b/Documentation/lguest/Makefile
new file mode 100644
index 0000000..b9b9427
--- /dev/null
+++ b/Documentation/lguest/Makefile
@@ -0,0 +1,27 @@
+# This creates the demonstration utility "lguest" which runs a Linux guest.
+
+# For those people that have a separate object dir, look there for .config
+KBUILD_OUTPUT := ../..
+ifdef O
+  ifeq ("$(origin O)", "command line")
+    KBUILD_OUTPUT := $(O)
+  endif
+endif
+# We rely on CONFIG_PAGE_OFFSET to know where to put lguest binary.
+include $(KBUILD_OUTPUT)/.config
+LGUEST_GUEST_TOP := ($(CONFIG_PAGE_OFFSET) - 0x08000000)
+
+CFLAGS:=-Wall -Wmissing-declarations -Wmissing-prototypes -O3 \
+	-static -DLGUEST_GUEST_TOP="$(LGUEST_GUEST_TOP)" -Wl,-T,lguest.lds
+LDLIBS:=-lz
+
+all: lguest.lds lguest
+
+# The linker script on x86 is so complex the only way of creating one
+# which will link our binary in the right place is to mangle the
+# default one.
+lguest.lds:
+	$(LD) --verbose | awk '/^==========/ { PRINT=1; next; } /SIZEOF_HEADERS/ { gsub(/0x[0-9A-F]*/, "$(LGUEST_GUEST_TOP)") } { if (PRINT) print $$0; }' > $@
+
+clean:
+	rm -f lguest.lds lguest
diff --git a/Documentation/lguest/lguest.c b/Documentation/lguest/lguest.c
new file mode 100644
index 0000000..1432b50
--- /dev/null
+++ b/Documentation/lguest/lguest.c
@@ -0,0 +1,1012 @@
+/* Simple program to layout "physical" memory for new lguest guest.
+ * Linked high to avoid likely physical memory.  */
+#define _LARGEFILE64_SOURCE
+#define _GNU_SOURCE
+#include <stdio.h>
+#include <string.h>
+#include <unistd.h>
+#include <err.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <elf.h>
+#include <sys/mman.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <sys/wait.h>
+#include <fcntl.h>
+#include <stdbool.h>
+#include <errno.h>
+#include <ctype.h>
+#include <sys/socket.h>
+#include <sys/ioctl.h>
+#include <sys/time.h>
+#include <time.h>
+#include <netinet/in.h>
+#include <net/if.h>
+#include <linux/sockios.h>
+#include <linux/if_tun.h>
+#include <sys/uio.h>
+#include <termios.h>
+#include <getopt.h>
+#include <zlib.h>
+typedef unsigned long long u64;
+typedef uint32_t u32;
+typedef uint16_t u16;
+typedef uint8_t u8;
+#include "../../include/linux/lguest_launcher.h"
+#include "../../include/asm-i386/e820.h"
+
+#define PAGE_PRESENT 0x7 	/* Present, RW, Execute */
+#define NET_PEERNUM 1
+#define BRIDGE_PFX "bridge:"
+#ifndef SIOCBRADDIF
+#define SIOCBRADDIF	0x89a2		/* add interface to bridge      */
+#endif
+
+static bool verbose;
+#define verbose(args...) \
+	do { if (verbose) printf(args); } while(0)
+static int waker_fd;
+
+struct device_list
+{
+	fd_set infds;
+	int max_infd;
+
+	struct device *dev;
+	struct device **lastdev;
+};
+
+struct device
+{
+	struct device *next;
+	struct lguest_device_desc *desc;
+	void *mem;
+
+	/* Watch this fd if handle_input non-NULL. */
+	int fd;
+	bool (*handle_input)(int fd, struct device *me);
+
+	/* Watch DMA to this key if handle_input non-NULL. */
+	unsigned long watch_key;
+	u32 (*handle_output)(int fd, const struct iovec *iov,
+			     unsigned int num, struct device *me);
+
+	/* Device-specific data. */
+	void *priv;
+};
+
+static int open_or_die(const char *name, int flags)
+{
+	int fd = open(name, flags);
+	if (fd < 0)
+		err(1, "Failed to open %s", name);
+	return fd;
+}
+
+static void *map_zeroed_pages(unsigned long addr, unsigned int num)
+{
+	static int fd = -1;
+
+	if (fd == -1)
+		fd = open_or_die("/dev/zero", O_RDONLY);
+
+	if (mmap((void *)addr, getpagesize() * num,
+		 PROT_READ|PROT_WRITE|PROT_EXEC, MAP_FIXED|MAP_PRIVATE, fd, 0)
+	    != (void *)addr)
+		err(1, "Mmaping %u pages of /dev/zero @%p", num, (void *)addr);
+	return (void *)addr;
+}
+
+/* Find magic string marking entry point, return entry point. */
+static unsigned long entry_point(void *start, void *end,
+				 unsigned long page_offset)
+{
+	void *p;
+
+	for (p = start; p < end; p++)
+		if (memcmp(p, "GenuineLguest", strlen("GenuineLguest")) == 0)
+			return (long)p + strlen("GenuineLguest") + page_offset;
+
+	err(1, "Is this image a genuine lguest?");
+}
+
+/* Returns the entry point */
+static unsigned long map_elf(int elf_fd, const Elf32_Ehdr *ehdr,
+			     unsigned long *page_offset)
+{
+	void *addr;
+	Elf32_Phdr phdr[ehdr->e_phnum];
+	unsigned int i;
+	unsigned long start = -1UL, end = 0;
+
+	/* Sanity checks. */
+	if (ehdr->e_type != ET_EXEC
+	    || ehdr->e_machine != EM_386
+	    || ehdr->e_phentsize != sizeof(Elf32_Phdr)
+	    || ehdr->e_phnum < 1 || ehdr->e_phnum > 65536U/sizeof(Elf32_Phdr))
+		errx(1, "Malformed elf header");
+
+	if (lseek(elf_fd, ehdr->e_phoff, SEEK_SET) < 0)
+		err(1, "Seeking to program headers");
+	if (read(elf_fd, phdr, sizeof(phdr)) != sizeof(phdr))
+		err(1, "Reading program headers");
+
+	*page_offset = 0;
+	/* We map the loadable segments at virtual addresses corresponding
+	 * to their physical addresses (our virtual == guest physical). */
+	for (i = 0; i < ehdr->e_phnum; i++) {
+		if (phdr[i].p_type != PT_LOAD)
+			continue;
+
+		verbose("Section %i: size %i addr %p\n",
+			i, phdr[i].p_memsz, (void *)phdr[i].p_paddr);
+
+		/* We expect linear address space. */
+		if (!*page_offset)
+			*page_offset = phdr[i].p_vaddr - phdr[i].p_paddr;
+		else if (*page_offset != phdr[i].p_vaddr - phdr[i].p_paddr)
+			errx(1, "Page offset of section %i different", i);
+
+		if (phdr[i].p_paddr < start)
+			start = phdr[i].p_paddr;
+		if (phdr[i].p_paddr + phdr[i].p_filesz > end)
+			end = phdr[i].p_paddr + phdr[i].p_filesz;
+
+		/* We map everything private, writable. */
+		addr = mmap((void *)phdr[i].p_paddr,
+			    phdr[i].p_filesz,
+			    PROT_READ|PROT_WRITE|PROT_EXEC,
+			    MAP_FIXED|MAP_PRIVATE,
+			    elf_fd, phdr[i].p_offset);
+		if (addr != (void *)phdr[i].p_paddr)
+			err(1, "Mmaping vmlinux seg %i gave %p not %p",
+			    i, addr, (void *)phdr[i].p_paddr);
+	}
+
+	return entry_point((void *)start, (void *)end, *page_offset);
+}
+
+/* This is amazingly reliable. */
+static unsigned long intuit_page_offset(unsigned char *img, unsigned long len)
+{
+	unsigned int i, possibilities[256] = { 0 };
+
+	for (i = 0; i + 4 < len; i++) {
+		/* mov 0xXXXXXXXX,%eax */
+		if (img[i] == 0xA1 && ++possibilities[img[i+4]] > 3)
+			return (unsigned long)img[i+4] << 24;
+	}
+	errx(1, "could not determine page offset");
+}
+
+static unsigned long unpack_bzimage(int fd, unsigned long *page_offset)
+{
+	gzFile f;
+	int ret, len = 0;
+	void *img = (void *)0x100000;
+
+	f = gzdopen(fd, "rb");
+	while ((ret = gzread(f, img + len, 65536)) > 0)
+		len += ret;
+	if (ret < 0)
+		err(1, "reading image from bzImage");
+
+	verbose("Unpacked size %i addr %p\n", len, img);
+	*page_offset = intuit_page_offset(img, len);
+
+	return entry_point(img, img + len, *page_offset);
+}
+
+static unsigned long load_bzimage(int fd, unsigned long *page_offset)
+{
+	unsigned char c;
+	int state = 0;
+
+	/* Ugly brute force search for gzip header. */
+	while (read(fd, &c, 1) == 1) {
+		switch (state) {
+		case 0:
+			if (c == 0x1F)
+				state++;
+			break;
+		case 1:
+			if (c == 0x8B)
+				state++;
+			else
+				state = 0;
+			break;
+		case 2 ... 8:
+			state++;
+			break;
+		case 9:
+			lseek(fd, -10, SEEK_CUR);
+			if (c != 0x03) /* Compressed under UNIX. */
+				state = -1;
+			else
+				return unpack_bzimage(fd, page_offset);
+		}
+	}
+	errx(1, "Could not find kernel in bzImage");
+}
+
+static unsigned long load_kernel(int fd, unsigned long *page_offset)
+{
+	Elf32_Ehdr hdr;
+
+	if (read(fd, &hdr, sizeof(hdr)) != sizeof(hdr))
+		err(1, "Reading kernel");
+
+	if (memcmp(hdr.e_ident, ELFMAG, SELFMAG) == 0)
+		return map_elf(fd, &hdr, page_offset);
+
+	return load_bzimage(fd, page_offset);
+}
+
+static inline unsigned long page_align(unsigned long addr)
+{
+	return ((addr + getpagesize()-1) & ~(getpagesize()-1));
+}
+
+/* initrd gets loaded at top of memory: return length. */
+static unsigned long load_initrd(const char *name, unsigned long mem)
+{
+	int ifd;
+	struct stat st;
+	unsigned long len;
+	void *iaddr;
+
+	ifd = open_or_die(name, O_RDONLY);
+	if (fstat(ifd, &st) < 0)
+		err(1, "fstat() on initrd '%s'", name);
+
+	len = page_align(st.st_size);
+	iaddr = mmap((void *)mem - len, st.st_size,
+		     PROT_READ|PROT_EXEC|PROT_WRITE,
+		     MAP_FIXED|MAP_PRIVATE, ifd, 0);
+	if (iaddr != (void *)mem - len)
+		err(1, "Mmaping initrd '%s' returned %p not %p",
+		    name, iaddr, (void *)mem - len);
+	close(ifd);
+	verbose("mapped initrd %s size=%lu @ %p\n", name, st.st_size, iaddr);
+	return len;
+}
+
+static unsigned long setup_pagetables(unsigned long mem,
+				      unsigned long initrd_size,
+				      unsigned long page_offset)
+{
+	u32 *pgdir, *linear;
+	unsigned int mapped_pages, i, linear_pages;
+	unsigned int ptes_per_page = getpagesize()/sizeof(u32);
+
+	/* If we can map all of memory above page_offset, we do so. */
+	if (mem <= -page_offset)
+		mapped_pages = mem/getpagesize();
+	else
+		mapped_pages = -page_offset/getpagesize();
+
+	/* Each linear PTE page can map ptes_per_page pages. */
+	linear_pages = (mapped_pages + ptes_per_page-1)/ptes_per_page;
+
+	/* We lay out top-level then linear mapping immediately below initrd */
+	pgdir = (void *)mem - initrd_size - getpagesize();
+	linear = (void *)pgdir - linear_pages*getpagesize();
+
+	for (i = 0; i < mapped_pages; i++)
+		linear[i] = ((i * getpagesize()) | PAGE_PRESENT);
+
+	/* Now set up pgd so that this memory is at page_offset */
+	for (i = 0; i < mapped_pages; i += ptes_per_page) {
+		pgdir[(i + page_offset/getpagesize())/ptes_per_page]
+			= (((u32)linear + i*sizeof(u32)) | PAGE_PRESENT);
+	}
+
+	verbose("Linear mapping of %u pages in %u pte pages at %p\n",
+		mapped_pages, linear_pages, linear);
+
+	return (unsigned long)pgdir;
+}
+
+static void concat(char *dst, char *args[])
+{
+	unsigned int i, len = 0;
+
+	for (i = 0; args[i]; i++) {
+		strcpy(dst+len, args[i]);
+		strcat(dst+len, " ");
+		len += strlen(args[i]) + 1;
+	}
+	/* In case it's empty. */
+	dst[len] = '\0';
+}
+
+static int tell_kernel(u32 pgdir, u32 start, u32 page_offset)
+{
+	u32 args[] = { LHREQ_INITIALIZE,
+		       LGUEST_GUEST_TOP/getpagesize(), /* Just below us */
+		       pgdir, start, page_offset };
+	int fd;
+
+	fd = open_or_die("/dev/lguest", O_RDWR);
+	if (write(fd, args, sizeof(args)) < 0)
+		err(1, "Writing to /dev/lguest");
+	return fd;
+}
+
+static void set_fd(int fd, struct device_list *devices)
+{
+	FD_SET(fd, &devices->infds);
+	if (fd > devices->max_infd)
+		devices->max_infd = fd;
+}
+
+/* When input arrives, we tell the kernel to kick lguest out with -EAGAIN. */
+static void wake_parent(int pipefd, int lguest_fd, struct device_list *devices)
+{
+	set_fd(pipefd, devices);
+
+	for (;;) {
+		fd_set rfds = devices->infds;
+		u32 args[] = { LHREQ_BREAK, 1 };
+
+		select(devices->max_infd+1, &rfds, NULL, NULL, NULL);
+		if (FD_ISSET(pipefd, &rfds)) {
+			int ignorefd;
+			if (read(pipefd, &ignorefd, sizeof(ignorefd)) == 0)
+				exit(0);
+			FD_CLR(ignorefd, &devices->infds);
+		} else
+			write(lguest_fd, args, sizeof(args));
+	}
+}
+
+static int setup_waker(int lguest_fd, struct device_list *device_list)
+{
+	int pipefd[2], child;
+
+	pipe(pipefd);
+	child = fork();
+	if (child == -1)
+		err(1, "forking");
+
+	if (child == 0) {
+		close(pipefd[1]);
+		wake_parent(pipefd[0], lguest_fd, device_list);
+	}
+	close(pipefd[0]);
+
+	return pipefd[1];
+}
+
+static void *_check_pointer(unsigned long addr, unsigned int size,
+			    unsigned int line)
+{
+	if (addr >= LGUEST_GUEST_TOP || addr + size >= LGUEST_GUEST_TOP)
+		errx(1, "%s:%i: Invalid address %li", __FILE__, line, addr);
+	return (void *)addr;
+}
+#define check_pointer(addr,size) _check_pointer(addr, size, __LINE__)
+
+/* Returns pointer to dma->used_len */
+static u32 *dma2iov(unsigned long dma, struct iovec iov[], unsigned *num)
+{
+	unsigned int i;
+	struct lguest_dma *udma;
+
+	udma = check_pointer(dma, sizeof(*udma));
+	for (i = 0; i < LGUEST_MAX_DMA_SECTIONS; i++) {
+		if (!udma->len[i])
+			break;
+
+		iov[i].iov_base = check_pointer(udma->addr[i], udma->len[i]);
+		iov[i].iov_len = udma->len[i];
+	}
+	*num = i;
+	return &udma->used_len;
+}
+
+static u32 *get_dma_buffer(int fd, void *key,
+			   struct iovec iov[], unsigned int *num, u32 *irq)
+{
+	u32 buf[] = { LHREQ_GETDMA, (u32)key };
+	unsigned long udma;
+	u32 *res;
+
+	udma = write(fd, buf, sizeof(buf));
+	if (udma == (unsigned long)-1)
+		return NULL;
+
+	/* Kernel stashes irq in ->used_len. */
+	res = dma2iov(udma, iov, num);
+	*irq = *res;
+	return res;
+}
+
+static void trigger_irq(int fd, u32 irq)
+{
+	u32 buf[] = { LHREQ_IRQ, irq };
+	if (write(fd, buf, sizeof(buf)) != 0)
+		err(1, "Triggering irq %i", irq);
+}
+
+static void discard_iovec(struct iovec *iov, unsigned int *num)
+{
+	static char discard_buf[1024];
+	*num = 1;
+	iov->iov_base = discard_buf;
+	iov->iov_len = sizeof(discard_buf);
+}
+
+static struct termios orig_term;
+static void restore_term(void)
+{
+	tcsetattr(STDIN_FILENO, TCSANOW, &orig_term);
+}
+
+struct console_abort
+{
+	int count;
+	struct timeval start;
+};
+
+/* We DMA input to buffer bound at start of console page. */
+static bool handle_console_input(int fd, struct device *dev)
+{
+	u32 irq = 0, *lenp;
+	int len;
+	unsigned int num;
+	struct iovec iov[LGUEST_MAX_DMA_SECTIONS];
+	struct console_abort *abort = dev->priv;
+
+	lenp = get_dma_buffer(fd, dev->mem, iov, &num, &irq);
+	if (!lenp) {
+		warn("console: no dma buffer!");
+		discard_iovec(iov, &num);
+	}
+
+	len = readv(dev->fd, iov, num);
+	if (len <= 0) {
+		warnx("Failed to get console input, ignoring console.");
+		len = 0;
+	}
+
+	if (lenp) {
+		*lenp = len;
+		trigger_irq(fd, irq);
+	}
+
+	/* Three ^C within one second?  Exit. */
+	if (len == 1 && ((char *)iov[0].iov_base)[0] == 3) {
+		if (!abort->count++)
+			gettimeofday(&abort->start, NULL);
+		else if (abort->count == 3) {
+			struct timeval now;
+			gettimeofday(&now, NULL);
+			if (now.tv_sec <= abort->start.tv_sec+1) {
+				/* Make sure waker is not blocked in BREAK */
+				u32 args[] = { LHREQ_BREAK, 0 };
+				close(waker_fd);
+				write(fd, args, sizeof(args));
+				exit(2);
+			}
+			abort->count = 0;
+		}
+	} else
+		abort->count = 0;
+
+	if (!len) {
+		restore_term();
+		return false;
+	}
+	return true;
+}
+
+static u32 handle_console_output(int fd, const struct iovec *iov,
+				 unsigned num, struct device*dev)
+{
+	return writev(STDOUT_FILENO, iov, num);
+}
+
+static u32 handle_tun_output(int fd, const struct iovec *iov,
+			     unsigned num, struct device *dev)
+{
+	/* Now we've seen output, we should warn if we can't get buffers. */
+	*(bool *)dev->priv = true;
+	return writev(dev->fd, iov, num);
+}
+
+static unsigned long peer_offset(unsigned int peernum)
+{
+	return 4 * peernum;
+}
+
+static bool handle_tun_input(int fd, struct device *dev)
+{
+	u32 irq = 0, *lenp;
+	int len;
+	unsigned num;
+	struct iovec iov[LGUEST_MAX_DMA_SECTIONS];
+
+	lenp = get_dma_buffer(fd, dev->mem+peer_offset(NET_PEERNUM), iov, &num,
+			      &irq);
+	if (!lenp) {
+		if (*(bool *)dev->priv)
+			warn("network: no dma buffer!");
+		discard_iovec(iov, &num);
+	}
+
+	len = readv(dev->fd, iov, num);
+	if (len <= 0)
+		err(1, "reading network");
+	if (lenp) {
+		*lenp = len;
+		trigger_irq(fd, irq);
+	}
+	verbose("tun input packet len %i [%02x %02x] (%s)\n", len,
+		((u8 *)iov[0].iov_base)[0], ((u8 *)iov[0].iov_base)[1],
+		lenp ? "sent" : "discarded");
+	return true;
+}
+
+static u32 handle_block_output(int fd, const struct iovec *iov,
+			       unsigned num, struct device *dev)
+{
+	struct lguest_block_page *p = dev->mem;
+	u32 irq, *lenp;
+	unsigned int len, reply_num;
+	struct iovec reply[LGUEST_MAX_DMA_SECTIONS];
+	off64_t device_len, off = (off64_t)p->sector * 512;
+
+	device_len = *(off64_t *)dev->priv;
+
+	if (off >= device_len)
+		err(1, "Bad offset %llu vs %llu", off, device_len);
+	if (lseek64(dev->fd, off, SEEK_SET) != off)
+		err(1, "Bad seek to sector %i", p->sector);
+
+	verbose("Block: %s at offset %llu\n", p->type ? "WRITE" : "READ", off);
+
+	lenp = get_dma_buffer(fd, dev->mem, reply, &reply_num, &irq);
+	if (!lenp)
+		err(1, "Block request didn't give us a dma buffer");
+
+	if (p->type) {
+		len = writev(dev->fd, iov, num);
+		if (off + len > device_len) {
+			ftruncate(dev->fd, device_len);
+			errx(1, "Write past end %llu+%u", off, len);
+		}
+		*lenp = 0;
+	} else {
+		len = readv(dev->fd, reply, reply_num);
+		*lenp = len;
+	}
+
+	p->result = 1 + (p->bytes != len);
+	trigger_irq(fd, irq);
+	return 0;
+}
+
+static void handle_output(int fd, unsigned long dma, unsigned long key,
+			  struct device_list *devices)
+{
+	struct device *i;
+	u32 *lenp;
+	struct iovec iov[LGUEST_MAX_DMA_SECTIONS];
+	unsigned num = 0;
+
+	lenp = dma2iov(dma, iov, &num);
+	for (i = devices->dev; i; i = i->next) {
+		if (i->handle_output && key == i->watch_key) {
+			*lenp = i->handle_output(fd, iov, num, i);
+			return;
+		}
+	}
+	warnx("Pending dma %p, key %p", (void *)dma, (void *)key);
+}
+
+static void handle_input(int fd, struct device_list *devices)
+{
+	struct timeval poll = { .tv_sec = 0, .tv_usec = 0 };
+
+	for (;;) {
+		struct device *i;
+		fd_set fds = devices->infds;
+
+		if (select(devices->max_infd+1, &fds, NULL, NULL, &poll) == 0)
+			break;
+
+		for (i = devices->dev; i; i = i->next) {
+			if (i->handle_input && FD_ISSET(i->fd, &fds)) {
+				if (!i->handle_input(fd, i)) {
+					FD_CLR(i->fd, &devices->infds);
+					/* Tell waker to ignore it too... */
+					write(waker_fd, &i->fd, sizeof(i->fd));
+				}
+			}
+		}
+	}
+}
+
+static struct lguest_device_desc *new_dev_desc(u16 type, u16 features,
+					       u16 num_pages)
+{
+	static unsigned long top = LGUEST_GUEST_TOP;
+	struct lguest_device_desc *desc;
+
+	desc = malloc(sizeof(*desc));
+	desc->type = type;
+	desc->num_pages = num_pages;
+	desc->features = features;
+	desc->status = 0;
+	if (num_pages) {
+		top -= num_pages*getpagesize();
+		map_zeroed_pages(top, num_pages);
+		desc->pfn = top / getpagesize();
+	} else
+		desc->pfn = 0;
+	return desc;
+}
+
+static struct device *new_device(struct device_list *devices,
+				 u16 type, u16 num_pages, u16 features,
+				 int fd,
+				 bool (*handle_input)(int, struct device *),
+				 unsigned long watch_off,
+				 u32 (*handle_output)(int,
+						      const struct iovec *,
+						      unsigned,
+						      struct device *))
+{
+	struct device *dev = malloc(sizeof(*dev));
+
+	/* Append to device list. */
+	*devices->lastdev = dev;
+	dev->next = NULL;
+	devices->lastdev = &dev->next;
+
+	dev->fd = fd;
+	if (handle_input)
+		set_fd(dev->fd, devices);
+	dev->desc = new_dev_desc(type, features, num_pages);
+	dev->mem = (void *)(dev->desc->pfn * getpagesize());
+	dev->handle_input = handle_input;
+	dev->watch_key = (unsigned long)dev->mem + watch_off;
+	dev->handle_output = handle_output;
+	return dev;
+}
+
+static void setup_console(struct device_list *devices)
+{
+	struct device *dev;
+
+	if (tcgetattr(STDIN_FILENO, &orig_term) == 0) {
+		struct termios term = orig_term;
+		term.c_lflag &= ~(ISIG|ICANON|ECHO);
+		tcsetattr(STDIN_FILENO, TCSANOW, &term);
+		atexit(restore_term);
+	}
+
+	/* We don't currently require a page for the console. */
+	dev = new_device(devices, LGUEST_DEVICE_T_CONSOLE, 0, 0,
+			 STDIN_FILENO, handle_console_input,
+			 LGUEST_CONSOLE_DMA_KEY, handle_console_output);
+	dev->priv = malloc(sizeof(struct console_abort));
+	((struct console_abort *)dev->priv)->count = 0;
+	verbose("device %p: console\n",
+		(void *)(dev->desc->pfn * getpagesize()));
+}
+
+static void setup_block_file(const char *filename, struct device_list *devices)
+{
+	int fd;
+	struct device *dev;
+	off64_t *device_len;
+	struct lguest_block_page *p;
+
+	fd = open_or_die(filename, O_RDWR|O_LARGEFILE|O_DIRECT);
+	dev = new_device(devices, LGUEST_DEVICE_T_BLOCK, 1,
+			 LGUEST_DEVICE_F_RANDOMNESS,
+			 fd, NULL, 0, handle_block_output);
+	device_len = dev->priv = malloc(sizeof(*device_len));
+	*device_len = lseek64(fd, 0, SEEK_END);
+	p = dev->mem;
+
+	p->num_sectors = *device_len/512;
+	verbose("device %p: block %i sectors\n",
+		(void *)(dev->desc->pfn * getpagesize()), p->num_sectors);
+}
+
+/* We use fnctl locks to reserve network slots (autocleanup!) */
+static unsigned int find_slot(int netfd, const char *filename)
+{
+	struct flock fl;
+
+	fl.l_type = F_WRLCK;
+	fl.l_whence = SEEK_SET;
+	fl.l_len = 1;
+	for (fl.l_start = 0;
+	     fl.l_start < getpagesize()/sizeof(struct lguest_net);
+	     fl.l_start++) {
+		if (fcntl(netfd, F_SETLK, &fl) == 0)
+			return fl.l_start;
+	}
+	errx(1, "No free slots in network file %s", filename);
+}
+
+static void setup_net_file(const char *filename,
+			   struct device_list *devices)
+{
+	int netfd;
+	struct device *dev;
+
+	netfd = open(filename, O_RDWR, 0);
+	if (netfd < 0) {
+		if (errno == ENOENT) {
+			netfd = open(filename, O_RDWR|O_CREAT, 0600);
+			if (netfd >= 0) {
+				char page[getpagesize()];
+				memset(page, 0, sizeof(page));
+				write(netfd, page, sizeof(page));
+			}
+		}
+		if (netfd < 0)
+			err(1, "cannot open net file '%s'", filename);
+	}
+
+	dev = new_device(devices, LGUEST_DEVICE_T_NET, 1,
+			 find_slot(netfd, filename)|LGUEST_NET_F_NOCSUM,
+			 -1, NULL, 0, NULL);
+
+	/* We overwrite the /dev/zero mapping with the actual file. */
+	if (mmap(dev->mem, getpagesize(), PROT_READ|PROT_WRITE,
+			 MAP_FIXED|MAP_SHARED, netfd, 0) != dev->mem)
+			err(1, "could not mmap '%s'", filename);
+	verbose("device %p: shared net %s, peer %i\n",
+		(void *)(dev->desc->pfn * getpagesize()), filename,
+		dev->desc->features & ~LGUEST_NET_F_NOCSUM);
+}
+
+static u32 str2ip(const char *ipaddr)
+{
+	unsigned int byte[4];
+
+	sscanf(ipaddr, "%u.%u.%u.%u", &byte[0], &byte[1], &byte[2], &byte[3]);
+	return (byte[0] << 24) | (byte[1] << 16) | (byte[2] << 8) | byte[3];
+}
+
+/* adapted from libbridge */
+static void add_to_bridge(int fd, const char *if_name, const char *br_name)
+{
+	int ifidx;
+	struct ifreq ifr;
+
+	if (!*br_name)
+		errx(1, "must specify bridge name");
+
+	ifidx = if_nametoindex(if_name);
+	if (!ifidx)
+		errx(1, "interface %s does not exist!", if_name);
+
+	strncpy(ifr.ifr_name, br_name, IFNAMSIZ);
+	ifr.ifr_ifindex = ifidx;
+	if (ioctl(fd, SIOCBRADDIF, &ifr) < 0)
+		err(1, "can't add %s to bridge %s", if_name, br_name);
+}
+
+static void configure_device(int fd, const char *devname, u32 ipaddr,
+			     unsigned char hwaddr[6])
+{
+	struct ifreq ifr;
+	struct sockaddr_in *sin = (struct sockaddr_in *)&ifr.ifr_addr;
+
+	memset(&ifr, 0, sizeof(ifr));
+	strcpy(ifr.ifr_name, devname);
+	sin->sin_family = AF_INET;
+	sin->sin_addr.s_addr = htonl(ipaddr);
+	if (ioctl(fd, SIOCSIFADDR, &ifr) != 0)
+		err(1, "Setting %s interface address", devname);
+	ifr.ifr_flags = IFF_UP;
+	if (ioctl(fd, SIOCSIFFLAGS, &ifr) != 0)
+		err(1, "Bringing interface %s up", devname);
+
+	if (ioctl(fd, SIOCGIFHWADDR, &ifr) != 0)
+		err(1, "getting hw address for %s", devname);
+
+	memcpy(hwaddr, ifr.ifr_hwaddr.sa_data, 6);
+}
+
+static void setup_tun_net(const char *arg, struct device_list *devices)
+{
+	struct device *dev;
+	struct ifreq ifr;
+	int netfd, ipfd;
+	u32 ip;
+	const char *br_name = NULL;
+
+	netfd = open_or_die("/dev/net/tun", O_RDWR);
+	memset(&ifr, 0, sizeof(ifr));
+	ifr.ifr_flags = IFF_TAP | IFF_NO_PI;
+	strcpy(ifr.ifr_name, "tap%d");
+	if (ioctl(netfd, TUNSETIFF, &ifr) != 0)
+		err(1, "configuring /dev/net/tun");
+	ioctl(netfd, TUNSETNOCSUM, 1);
+
+	/* You will be peer 1: we should create enough jitter to randomize */
+	dev = new_device(devices, LGUEST_DEVICE_T_NET, 1,
+			 NET_PEERNUM|LGUEST_DEVICE_F_RANDOMNESS, netfd,
+			 handle_tun_input, peer_offset(0), handle_tun_output);
+	dev->priv = malloc(sizeof(bool));
+	*(bool *)dev->priv = false;
+
+	ipfd = socket(PF_INET, SOCK_DGRAM, IPPROTO_IP);
+	if (ipfd < 0)
+		err(1, "opening IP socket");
+
+	if (!strncmp(BRIDGE_PFX, arg, strlen(BRIDGE_PFX))) {
+		ip = INADDR_ANY;
+		br_name = arg + strlen(BRIDGE_PFX);
+		add_to_bridge(ipfd, ifr.ifr_name, br_name);
+	} else
+		ip = str2ip(arg);
+
+	/* We are peer 0, ie. first slot. */
+	configure_device(ipfd, ifr.ifr_name, ip, dev->mem);
+
+	/* Set "promisc" bit: we want every single packet. */
+	*((u8 *)dev->mem) |= 0x1;
+
+	close(ipfd);
+
+	verbose("device %p: tun net %u.%u.%u.%u\n",
+		(void *)(dev->desc->pfn * getpagesize()),
+		(u8)(ip>>24), (u8)(ip>>16), (u8)(ip>>8), (u8)ip);
+	if (br_name)
+		verbose("attached to bridge: %s\n", br_name);
+}
+
+/* Now we know how much memory we have, we copy in device descriptors */
+static void map_device_descriptors(struct device_list *devs, unsigned long mem)
+{
+	struct device *i;
+	unsigned int num;
+	struct lguest_device_desc *descs;
+
+	/* Device descriptor array sits just above top of normal memory */
+	descs = map_zeroed_pages(mem, 1);
+
+	for (i = devs->dev, num = 0; i; i = i->next, num++) {
+		if (num == LGUEST_MAX_DEVICES)
+			errx(1, "too many devices");
+		verbose("Device %i: %s\n", num,
+			i->desc->type == LGUEST_DEVICE_T_NET ? "net"
+			: i->desc->type == LGUEST_DEVICE_T_CONSOLE ? "console"
+			: i->desc->type == LGUEST_DEVICE_T_BLOCK ? "block"
+			: "unknown");
+		descs[num] = *i->desc;
+		free(i->desc);
+		i->desc = &descs[num];
+	}
+}
+
+static void __attribute__((noreturn))
+run_guest(int lguest_fd, struct device_list *device_list)
+{
+	for (;;) {
+		u32 args[] = { LHREQ_BREAK, 0 };
+		unsigned long arr[2];
+		int readval;
+
+		/* We read from the /dev/lguest device to run the Guest. */
+		readval = read(lguest_fd, arr, sizeof(arr));
+
+		if (readval == sizeof(arr)) {
+			handle_output(lguest_fd, arr[0], arr[1], device_list);
+			continue;
+		} else if (errno == ENOENT) {
+			char reason[1024] = { 0 };
+			read(lguest_fd, reason, sizeof(reason)-1);
+			errx(1, "%s", reason);
+		} else if (errno != EAGAIN)
+			err(1, "Running guest failed");
+		handle_input(lguest_fd, device_list);
+		if (write(lguest_fd, args, sizeof(args)) < 0)
+			err(1, "Resetting break");
+	}
+}
+
+static struct option opts[] = {
+	{ "verbose", 0, NULL, 'v' },
+	{ "sharenet", 1, NULL, 's' },
+	{ "tunnet", 1, NULL, 't' },
+	{ "block", 1, NULL, 'b' },
+	{ "initrd", 1, NULL, 'i' },
+	{ NULL },
+};
+static void usage(void)
+{
+	errx(1, "Usage: lguest [--verbose] "
+	     "[--sharenet=<filename>|--tunnet=(<ipaddr>|bridge:<bridgename>)\n"
+	     "|--block=<filename>|--initrd=<filename>]...\n"
+	     "<mem-in-mb> vmlinux [args...]");
+}
+
+int main(int argc, char *argv[])
+{
+	unsigned long mem, pgdir, start, page_offset, initrd_size = 0;
+	int c, lguest_fd;
+	struct device_list device_list;
+	void *boot = (void *)0;
+	const char *initrd_name = NULL;
+
+	device_list.max_infd = -1;
+	device_list.dev = NULL;
+	device_list.lastdev = &device_list.dev;
+	FD_ZERO(&device_list.infds);
+
+	while ((c = getopt_long(argc, argv, "v", opts, NULL)) != EOF) {
+		switch (c) {
+		case 'v':
+			verbose = true;
+			break;
+		case 's':
+			setup_net_file(optarg, &device_list);
+			break;
+		case 't':
+			setup_tun_net(optarg, &device_list);
+			break;
+		case 'b':
+			setup_block_file(optarg, &device_list);
+			break;
+		case 'i':
+			initrd_name = optarg;
+			break;
+		default:
+			warnx("Unknown argument %s", argv[optind]);
+			usage();
+		}
+	}
+	if (optind + 2 > argc)
+		usage();
+
+	/* We need a console device */
+	setup_console(&device_list);
+
+	/* First we map /dev/zero over all of guest-physical memory. */
+	mem = atoi(argv[optind]) * 1024 * 1024;
+	map_zeroed_pages(0, mem / getpagesize());
+
+	/* Now we load the kernel */
+	start = load_kernel(open_or_die(argv[optind+1], O_RDONLY),
+			    &page_offset);
+
+	/* Write the device descriptors into memory. */
+	map_device_descriptors(&device_list, mem);
+
+	/* Map the initrd image if requested */
+	if (initrd_name) {
+		initrd_size = load_initrd(initrd_name, mem);
+		*(unsigned long *)(boot+0x218) = mem - initrd_size;
+		*(unsigned long *)(boot+0x21c) = initrd_size;
+		*(unsigned char *)(boot+0x210) = 0xFF;
+	}
+
+	/* Set up the initial linar pagetables. */
+	pgdir = setup_pagetables(mem, initrd_size, page_offset);
+
+	/* E820 memory map: ours is a simple, single region. */
+	*(char*)(boot+E820NR) = 1;
+	*((struct e820entry *)(boot+E820MAP))
+		= ((struct e820entry) { 0, mem, E820_RAM });
+	/* Command line pointer and command line (at 4096) */
+	*(void **)(boot + 0x228) = boot + 4096;
+	concat(boot + 4096, argv+optind+2);
+	/* Paravirt type: 1 == lguest */
+	*(int *)(boot + 0x23c) = 1;
+
+	lguest_fd = tell_kernel(pgdir, start, page_offset);
+	waker_fd = setup_waker(lguest_fd, &device_list);
+
+	run_guest(lguest_fd, &device_list);
+}
diff --git a/Documentation/lguest/lguest.txt b/Documentation/lguest/lguest.txt
new file mode 100644
index 0000000..821617b
--- /dev/null
+++ b/Documentation/lguest/lguest.txt
@@ -0,0 +1,129 @@
+Rusty's Remarkably Unreliable Guide to Lguest
+	- or, A Young Coder's Illustrated Hypervisor
+http://lguest.ozlabs.org
+
+Lguest is designed to be a minimal hypervisor for the Linux kernel, for
+Linux developers and users to experiment with virtualization with the
+minimum of complexity.  Nonetheless, it should have sufficient
+features to make it useful for specific tasks, and, of course, you are
+encouraged to fork and enhance it.
+
+Features:
+
+- Kernel module which runs in a normal kernel.
+- Simple I/O model for communication.
+- Simple program to create new guests.
+- Logo contains cute puppies: http://lguest.ozlabs.org
+
+Developer features:
+
+- Fun to hack on.
+- No ABI: being tied to a specific kernel anyway, you can change anything.
+- Many opportunities for improvement or feature implementation.
+
+Running Lguest:
+
+- Lguest runs the same kernel as guest and host.  You can configure
+  them differently, but usually it's easiest not to.
+
+  You will need to configure your kernel with the following options:
+
+  CONFIG_HIGHMEM64G=n ("High Memory Support" "64GB")[1]
+  CONFIG_TUN=y/m ("Universal TUN/TAP device driver support")
+  CONFIG_EXPERIMENTAL=y ("Prompt for development and/or incomplete code/drivers")
+  CONFIG_PARAVIRT=y ("Paravirtualization support (EXPERIMENTAL)")
+  CONFIG_LGUEST=y/m ("Linux hypervisor example code")
+
+  and I recommend:
+  CONFIG_HZ=100 ("Timer frequency")[2]
+
+- A tool called "lguest" is available in this directory: type "make"
+  to build it.  If you didn't build your kernel in-tree, use "make
+  O=<builddir>".
+
+- Create or find a root disk image.  There are several useful ones
+  around, such as the xm-test tiny root image at
+	  http://xm-test.xensource.com/ramdisks/initrd-1.1-i386.img
+
+  For more serious work, I usually use a distribution ISO image and
+  install it under qemu, then make multiple copies:
+
+	  dd if=/dev/zero of=rootfile bs=1M count=2048
+	  qemu -cdrom image.iso -hda rootfile -net user -net nic -boot d
+
+- "modprobe lg" if you built it as a module.
+
+- Run an lguest as root:
+
+      Documentation/lguest/lguest 64m vmlinux --tunnet=192.168.19.1 --block=rootfile root=/dev/lgba
+
+   Explanation:
+    64m: the amount of memory to use.
+
+    vmlinux: the kernel image found in the top of your build directory.  You
+       can also use a standard bzImage.
+
+    --tunnet=192.168.19.1: configures a "tap" device for networking with this
+       IP address.
+
+    --block=rootfile: a file or block device which becomes /dev/lgba
+       inside the guest.
+
+    root=/dev/lgba: this (and anything else on the command line) are
+       kernel boot parameters.
+
+- Configuring networking.  I usually have the host masquerade, using
+  "iptables -t nat -A POSTROUTING -o eth0 -j MASQUERADE" and "echo 1 >
+  /proc/sys/net/ipv4/ip_forward".  In this example, I would configure
+  eth0 inside the guest at 192.168.19.2.
+
+  Another method is to bridge the tap device to an external interface
+  using --tunnet=bridge:<bridgename>, and perhaps run dhcp on the guest
+  to obtain an IP address.  The bridge needs to be configured first:
+  this option simply adds the tap interface to it.
+
+  A simple example on my system:
+
+    ifconfig eth0 0.0.0.0
+    brctl addbr lg0
+    ifconfig lg0 up
+    brctl addif lg0 eth0
+    dhclient lg0
+
+  Then use --tunnet=bridge:lg0 when launching the guest.
+
+  See http://linux-net.osdl.org/index.php/Bridge for general information
+  on how to get bridging working.
+
+- You can also create an inter-guest network using
+  "--sharenet=<filename>": any two guests using the same file are on
+  the same network.  This file is created if it does not exist.
+
+Lguest I/O model:
+
+Lguest uses a simplified DMA model plus shared memory for I/O.  Guests
+can communicate with each other if they share underlying memory
+(usually by the lguest program mmaping the same file), but they can
+use any non-shared memory to communicate with the lguest process.
+
+Guests can register DMA buffers at any key (must be a valid physical
+address) using the LHCALL_BIND_DMA(key, dmabufs, num<<8|irq)
+hypercall.  "dmabufs" is the physical address of an array of "num"
+"struct lguest_dma": each contains a used_len, and an array of
+physical addresses and lengths.  When a transfer occurs, the
+"used_len" field of one of the buffers which has used_len 0 will be
+set to the length transferred and the irq will fire.
+
+Using an irq value of 0 unbinds the dma buffers.
+
+To send DMA, the LHCALL_SEND_DMA(key, dma_physaddr) hypercall is used,
+and the bytes used is written to the used_len field.  This can be 0 if
+noone else has bound a DMA buffer to that key or some other error.
+DMA buffers bound by the same guest are ignored.
+
+Cheers!
+Rusty Russell rusty@rustcorp.com.au.
+
+[1] These are on various places on the TODO list, waiting for you to
+    get annoyed enough at the limitation to fix it.
+[2] Lguest is not yet tickless when idle.  See [1].
diff --git a/Documentation/oops-tracing.txt b/Documentation/oops-tracing.txt
index 23e6dde..7f60dfe 100644
--- a/Documentation/oops-tracing.txt
+++ b/Documentation/oops-tracing.txt
@@ -251,6 +251,8 @@
   7: 'U' if a user or user application specifically requested that the
      Tainted flag be set, ' ' otherwise.
 
+  8: 'D' if the kernel has died recently, i.e. there was an OOPS or BUG.
+
 The primary reason for the 'Tainted: ' string is to tell kernel
 debuggers if this is a clean kernel or if anything unusual has
 occurred.  Tainting is permanent: even if an offending module is
diff --git a/Documentation/power/freezing-of-tasks.txt b/Documentation/power/freezing-of-tasks.txt
new file mode 100644
index 0000000..af1a282
--- /dev/null
+++ b/Documentation/power/freezing-of-tasks.txt
@@ -0,0 +1,160 @@
+Freezing of tasks
+	(C) 2007 Rafael J. Wysocki <rjw@sisk.pl>, GPL
+
+I. What is the freezing of tasks?
+
+The freezing of tasks is a mechanism by which user space processes and some
+kernel threads are controlled during hibernation or system-wide suspend (on some
+architectures).
+
+II. How does it work?
+
+There are four per-task flags used for that, PF_NOFREEZE, PF_FROZEN, TIF_FREEZE
+and PF_FREEZER_SKIP (the last one is auxiliary).  The tasks that have
+PF_NOFREEZE unset (all user space processes and some kernel threads) are
+regarded as 'freezable' and treated in a special way before the system enters a
+suspend state as well as before a hibernation image is created (in what follows
+we only consider hibernation, but the description also applies to suspend).
+
+Namely, as the first step of the hibernation procedure the function
+freeze_processes() (defined in kernel/power/process.c) is called.  It executes
+try_to_freeze_tasks() that sets TIF_FREEZE for all of the freezable tasks and
+sends a fake signal to each of them.  A task that receives such a signal and has
+TIF_FREEZE set, should react to it by calling the refrigerator() function
+(defined in kernel/power/process.c), which sets the task's PF_FROZEN flag,
+changes its state to TASK_UNINTERRUPTIBLE and makes it loop until PF_FROZEN is
+cleared for it.  Then, we say that the task is 'frozen' and therefore the set of
+functions handling this mechanism is called 'the freezer' (these functions are
+defined in kernel/power/process.c and include/linux/freezer.h).  User space
+processes are generally frozen before kernel threads.
+
+It is not recommended to call refrigerator() directly.  Instead, it is
+recommended to use the try_to_freeze() function (defined in
+include/linux/freezer.h), that checks the task's TIF_FREEZE flag and makes the
+task enter refrigerator() if the flag is set.
+
+For user space processes try_to_freeze() is called automatically from the
+signal-handling code, but the freezable kernel threads need to call it
+explicitly in suitable places.  The code to do this may look like the following:
+
+	do {
+		hub_events();
+		wait_event_interruptible(khubd_wait,
+					!list_empty(&hub_event_list));
+		try_to_freeze();
+	} while (!signal_pending(current));
+
+(from drivers/usb/core/hub.c::hub_thread()).
+
+If a freezable kernel thread fails to call try_to_freeze() after the freezer has
+set TIF_FREEZE for it, the freezing of tasks will fail and the entire
+hibernation operation will be cancelled.  For this reason, freezable kernel
+threads must call try_to_freeze() somewhere.
+
+After the system memory state has been restored from a hibernation image and
+devices have been reinitialized, the function thaw_processes() is called in
+order to clear the PF_FROZEN flag for each frozen task.  Then, the tasks that
+have been frozen leave refrigerator() and continue running.
+
+III. Which kernel threads are freezable?
+
+Kernel threads are not freezable by default.  However, a kernel thread may clear
+PF_NOFREEZE for itself by calling set_freezable() (the resetting of PF_NOFREEZE
+directly is strongly discouraged).  From this point it is regarded as freezable
+and must call try_to_freeze() in a suitable place.
+
+IV. Why do we do that?
+
+Generally speaking, there is a couple of reasons to use the freezing of tasks:
+
+1. The principal reason is to prevent filesystems from being damaged after
+hibernation.  At the moment we have no simple means of checkpointing
+filesystems, so if there are any modifications made to filesystem data and/or
+metadata on disks, we cannot bring them back to the state from before the
+modifications.  At the same time each hibernation image contains some
+filesystem-related information that must be consistent with the state of the
+on-disk data and metadata after the system memory state has been restored from
+the image (otherwise the filesystems will be damaged in a nasty way, usually
+making them almost impossible to repair).  We therefore freeze tasks that might
+cause the on-disk filesystems' data and metadata to be modified after the
+hibernation image has been created and before the system is finally powered off.
+The majority of these are user space processes, but if any of the kernel threads
+may cause something like this to happen, they have to be freezable.
+
+2. The second reason is to prevent user space processes and some kernel threads
+from interfering with the suspending and resuming of devices.  A user space
+process running on a second CPU while we are suspending devices may, for
+example, be troublesome and without the freezing of tasks we would need some
+safeguards against race conditions that might occur in such a case.
+
+Although Linus Torvalds doesn't like the freezing of tasks, he said this in one
+of the discussions on LKML (http://lkml.org/lkml/2007/4/27/608):
+
+"RJW:> Why we freeze tasks at all or why we freeze kernel threads?
+
+Linus: In many ways, 'at all'.
+
+I _do_ realize the IO request queue issues, and that we cannot actually do
+s2ram with some devices in the middle of a DMA.  So we want to be able to
+avoid *that*, there's no question about that.  And I suspect that stopping
+user threads and then waiting for a sync is practically one of the easier
+ways to do so.
+
+So in practice, the 'at all' may become a 'why freeze kernel threads?' and
+freezing user threads I don't find really objectionable."
+
+Still, there are kernel threads that may want to be freezable.  For example, if
+a kernel that belongs to a device driver accesses the device directly, it in
+principle needs to know when the device is suspended, so that it doesn't try to
+access it at that time.  However, if the kernel thread is freezable, it will be
+frozen before the driver's .suspend() callback is executed and it will be
+thawed after the driver's .resume() callback has run, so it won't be accessing
+the device while it's suspended.
+
+3. Another reason for freezing tasks is to prevent user space processes from
+realizing that hibernation (or suspend) operation takes place.  Ideally, user
+space processes should not notice that such a system-wide operation has occurred
+and should continue running without any problems after the restore (or resume
+from suspend).  Unfortunately, in the most general case this is quite difficult
+to achieve without the freezing of tasks.  Consider, for example, a process
+that depends on all CPUs being online while it's running.  Since we need to
+disable nonboot CPUs during the hibernation, if this process is not frozen, it
+may notice that the number of CPUs has changed and may start to work incorrectly
+because of that.
+
+V. Are there any problems related to the freezing of tasks?
+
+Yes, there are.
+
+First of all, the freezing of kernel threads may be tricky if they depend one
+on another.  For example, if kernel thread A waits for a completion (in the
+TASK_UNINTERRUPTIBLE state) that needs to be done by freezable kernel thread B
+and B is frozen in the meantime, then A will be blocked until B is thawed, which
+may be undesirable.  That's why kernel threads are not freezable by default.
+
+Second, there are the following two problems related to the freezing of user
+space processes:
+1. Putting processes into an uninterruptible sleep distorts the load average.
+2. Now that we have FUSE, plus the framework for doing device drivers in
+userspace, it gets even more complicated because some userspace processes are
+now doing the sorts of things that kernel threads do
+(https://lists.linux-foundation.org/pipermail/linux-pm/2007-May/012309.html).
+
+The problem 1. seems to be fixable, although it hasn't been fixed so far.  The
+other one is more serious, but it seems that we can work around it by using
+hibernation (and suspend) notifiers (in that case, though, we won't be able to
+avoid the realization by the user space processes that the hibernation is taking
+place).
+
+There are also problems that the freezing of tasks tends to expose, although
+they are not directly related to it.  For example, if request_firmware() is
+called from a device driver's .resume() routine, it will timeout and eventually
+fail, because the user land process that should respond to the request is frozen
+at this point.  So, seemingly, the failure is due to the freezing of tasks.
+Suppose, however, that the firmware file is located on a filesystem accessible
+only through another device that hasn't been resumed yet.  In that case,
+request_firmware() will fail regardless of whether or not the freezing of tasks
+is used.  Consequently, the problem is not really related to the freezing of
+tasks, since it generally exists anyway.  [The solution to this particular
+problem is to keep the firmware in memory after it's loaded for the first time
+and upload if from memory to the device whenever necessary.]
diff --git a/Documentation/power/kernel_threads.txt b/Documentation/power/kernel_threads.txt
deleted file mode 100644
index fb57784..0000000
--- a/Documentation/power/kernel_threads.txt
+++ /dev/null
@@ -1,40 +0,0 @@
-KERNEL THREADS
-
-
-Freezer
-
-Upon entering a suspended state the system will freeze all
-tasks. This is done by delivering pseudosignals. This affects
-kernel threads, too. To successfully freeze a kernel thread
-the thread has to check for the pseudosignal and enter the
-refrigerator. Code to do this looks like this:
-
-	do {
-		hub_events();
-		wait_event_interruptible(khubd_wait, !list_empty(&hub_event_list));
-		try_to_freeze();
-	} while (!signal_pending(current));
-
-from drivers/usb/core/hub.c::hub_thread()
-
-
-The Unfreezable
-
-Some kernel threads however, must not be frozen. The kernel must
-be able to finish pending IO operations and later on be able to
-write the memory image to disk. Kernel threads needed to do IO
-must stay awake. Such threads must mark themselves unfreezable
-like this:
-
-	/*
-	 * This thread doesn't need any user-level access,
-	 * so get rid of all our resources.
-	 */
-	daemonize("usb-storage");
-
-	current->flags |= PF_NOFREEZE;
-
-from drivers/usb/storage/usb.c::usb_stor_control_thread()
-
-Such drivers are themselves responsible for staying quiet during
-the actual snapshotting.
diff --git a/Documentation/power/notifiers.txt b/Documentation/power/notifiers.txt
new file mode 100644
index 0000000..9293e4b
--- /dev/null
+++ b/Documentation/power/notifiers.txt
@@ -0,0 +1,50 @@
+Suspend notifiers
+	(C) 2007 Rafael J. Wysocki <rjw@sisk.pl>, GPL
+
+There are some operations that device drivers may want to carry out in their
+.suspend() routines, but shouldn't, because they can cause the hibernation or
+suspend to fail. For example, a driver may want to allocate a substantial amount
+of memory (like 50 MB) in .suspend(), but that shouldn't be done after the
+swsusp's memory shrinker has run.
+
+Also, there may be some operations, that subsystems want to carry out before a
+hibernation/suspend or after a restore/resume, requiring the system to be fully
+functional, so the drivers' .suspend() and .resume() routines are not suitable
+for this purpose.  For example, device drivers may want to upload firmware to
+their devices after a restore from a hibernation image, but they cannot do it by
+calling request_firmware() from their .resume() routines (user land processes
+are frozen at this point).  The solution may be to load the firmware into
+memory before processes are frozen and upload it from there in the .resume()
+routine.  Of course, a hibernation notifier may be used for this purpose.
+
+The subsystems that have such needs can register suspend notifiers that will be
+called upon the following events by the suspend core:
+
+PM_HIBERNATION_PREPARE	The system is going to hibernate or suspend, tasks will
+			be frozen immediately.
+
+PM_POST_HIBERNATION	The system memory state has been restored from a
+			hibernation image or an error occured during the
+			hibernation.  Device drivers' .resume() callbacks have
+			been executed and tasks have been thawed.
+
+PM_SUSPEND_PREPARE	The system is preparing for a suspend.
+
+PM_POST_SUSPEND		The system has just resumed or an error occured during
+			the suspend.	Device drivers' .resume() callbacks have
+			been executed and tasks have been thawed.
+
+It is generally assumed that whatever the notifiers do for
+PM_HIBERNATION_PREPARE, should be undone for PM_POST_HIBERNATION.  Analogously,
+operations performed for PM_SUSPEND_PREPARE should be reversed for
+PM_POST_SUSPEND.  Additionally, all of the notifiers are called for
+PM_POST_HIBERNATION if one of them fails for PM_HIBERNATION_PREPARE, and
+all of the notifiers are called for PM_POST_SUSPEND if one of them fails for
+PM_SUSPEND_PREPARE.
+
+The hibernation and suspend notifiers are called with pm_mutex held.  They are
+defined in the usual way, but their last argument is meaningless (it is always
+NULL).  To register and/or unregister a suspend notifier use the functions
+register_pm_notifier() and unregister_pm_notifier(), respectively, defined in
+include/linux/suspend.h .  If you don't need to unregister the notifier, you can
+also use the pm_notifier() macro defined in include/linux/suspend.h .
diff --git a/Documentation/power/swsusp.txt b/Documentation/power/swsusp.txt
index 152b510..aea7e92 100644
--- a/Documentation/power/swsusp.txt
+++ b/Documentation/power/swsusp.txt
@@ -140,21 +140,11 @@
 website, and not to the Linux Kernel Mailing List. We are working
 toward merging suspend2 into the mainline kernel.
 
-Q: A kernel thread must voluntarily freeze itself (call 'refrigerator').
-I found some kernel threads that don't do it, and they don't freeze
-so the system can't sleep. Is this a known behavior?
+Q: What is the freezing of tasks and why are we using it?
 
-A: All such kernel threads need to be fixed, one by one. Select the
-place where the thread is safe to be frozen (no kernel semaphores
-should be held at that point and it must be safe to sleep there), and
-add:
-
-       try_to_freeze();
-
-If the thread is needed for writing the image to storage, you should
-instead set the PF_NOFREEZE process flag when creating the thread (and
-be very careful).
-
+A: The freezing of tasks is a mechanism by which user space processes and some
+kernel threads are controlled during hibernation or system-wide suspend (on some
+architectures).  See freezing-of-tasks.txt for details.
 
 Q: What is the difference between "platform" and "shutdown"?
 
diff --git a/Documentation/powerpc/booting-without-of.txt b/Documentation/powerpc/booting-without-of.txt
index d42d981..76733a3 100644
--- a/Documentation/powerpc/booting-without-of.txt
+++ b/Documentation/powerpc/booting-without-of.txt
@@ -42,15 +42,16 @@
     1) Defining child nodes of an SOC
     2) Representing devices without a current OF specification
       a) MDIO IO device
-      c) PHY nodes
       b) Gianfar-compatible ethernet nodes
+      c) PHY nodes
       d) Interrupt controllers
       e) I2C
       f) Freescale SOC USB controllers
       g) Freescale SOC SEC Security Engines
       h) Board Control and Status (BCSR)
       i) Freescale QUICC Engine module (QE)
-      g) Flash chip nodes
+      j) Flash chip nodes
+      k) Global Utilities Block
 
   VII - Specifying interrupt information for devices
     1) interrupts property
@@ -626,6 +627,14 @@
 a list of properties, a list of child nodes, and an end token. Every
 child node is a full node structure itself as defined above.
 
+NOTE: The above definition requires that all property definitions for
+a particular node MUST precede any subnode definitions for that node.
+Although the structure would not be ambiguous if properties and
+subnodes were intermingled, the kernel parser requires that the
+properties come first (up until at least 2.6.22).  Any tools
+manipulating a flattened tree must take care to preserve this
+constraint.
+
 4) Device tree "strings" block
 
 In order to save space, property names, which are generally redundant,
@@ -1241,6 +1250,12 @@
       network device.  This is used by the bootwrapper to interpret
       MAC addresses passed by the firmware when no information other
       than indices is available to associate an address with a device.
+    - phy-connection-type : a string naming the controller/PHY interface type,
+      i.e., "mii" (default), "rmii", "gmii", "rgmii", "rgmii-id", "sgmii",
+      "tbi", or "rtbi".  This property is only really needed if the connection
+      is of type "rgmii-id", as all other connection types are detected by
+      hardware.
+
 
   Example:
 
@@ -1782,6 +1797,33 @@
  		partition-names = "fs\0firmware";
  	};
 
+   k) Global Utilities Block
+
+   The global utilities block controls power management, I/O device
+   enabling, power-on-reset configuration monitoring, general-purpose
+   I/O signal configuration, alternate function selection for multiplexed
+   signals, and clock control.
+
+   Required properties:
+
+    - compatible : Should define the compatible device type for
+      global-utilities.
+    - reg : Offset and length of the register set for the device.
+
+  Recommended properties:
+
+    - fsl,has-rstcr : Indicates that the global utilities register set
+      contains a functioning "reset control register" (i.e. the board
+      is wired to reset upon setting the HRESET_REQ bit in this register).
+
+    Example:
+
+	global-utilities@e0000 {	/* global utilities block */
+		compatible = "fsl,mpc8548-guts";
+		reg = <e0000 1000>;
+		fsl,has-rstcr;
+	};
+
    More devices will be defined as this spec matures.
 
 VII - Specifying interrupt information for devices
diff --git a/Documentation/rtc.txt b/Documentation/rtc.txt
index 7c701b8..c931d61 100644
--- a/Documentation/rtc.txt
+++ b/Documentation/rtc.txt
@@ -385,7 +385,7 @@
 		/* not all RTCs support periodic IRQs */
 		if (errno == ENOTTY) {
 			fprintf(stderr, "\nNo periodic IRQ support\n");
-			return 0;
+			goto done;
 		}
 		perror("RTC_IRQP_READ ioctl");
 		exit(errno);
diff --git a/Documentation/spi/spi-lm70llp b/Documentation/spi/spi-lm70llp
new file mode 100644
index 0000000..154bd02
--- /dev/null
+++ b/Documentation/spi/spi-lm70llp
@@ -0,0 +1,69 @@
+spi_lm70llp :  LM70-LLP parport-to-SPI adapter
+==============================================
+
+Supported board/chip:
+  * National Semiconductor LM70 LLP evaluation board
+    Datasheet: http://www.national.com/pf/LM/LM70.html
+
+Author:
+        Kaiwan N Billimoria <kaiwan@designergraphix.com>
+
+Description
+-----------
+This driver provides glue code connecting a National Semiconductor LM70 LLP
+temperature sensor evaluation board to the kernel's SPI core subsystem.
+
+In effect, this driver turns the parallel port interface on the eval board
+into a SPI bus with a single device, which will be driven by the generic
+LM70 driver (drivers/hwmon/lm70.c).
+
+The hardware interfacing on the LM70 LLP eval board is as follows:
+
+   Parallel                 LM70 LLP
+     Port      Direction   JP2 Header
+   ----------- --------- ----------------
+      D0     2      -         -
+      D1     3     -->      V+   5
+      D2     4     -->      V+   5
+      D3     5     -->      V+   5
+      D4     6     -->      V+   5
+      D5     7     -->      nCS  8
+      D6     8     -->      SCLK 3
+      D7     9     -->      SI/O 5
+     GND    25      -       GND  7
+    Select  13     <--      SI/O 1
+   ----------- --------- ----------------
+
+Note that since the LM70 uses a "3-wire" variant of SPI, the SI/SO pin
+is connected to both pin D7 (as Master Out) and Select (as Master In)
+using an arrangment that lets either the parport or the LM70 pull the
+pin low.  This can't be shared with true SPI devices, but other 3-wire
+devices might share the same SI/SO pin.
+
+The bitbanger routine in this driver (lm70_txrx) is called back from
+the bound "hwmon/lm70" protocol driver through its sysfs hook, using a
+spi_write_then_read() call.  It performs Mode 0 (SPI/Microwire) bitbanging.
+The lm70 driver then inteprets the resulting digital temperature value
+and exports it through sysfs.
+
+A "gotcha": National Semiconductor's LM70 LLP eval board circuit schematic
+shows that the SI/O line from the LM70 chip is connected to the base of a
+transistor Q1 (and also a pullup, and a zener diode to D7); while the
+collector is tied to VCC.
+
+Interpreting this circuit, when the LM70 SI/O line is High (or tristate
+and not grounded by the host via D7), the transistor conducts and switches
+the collector to zero, which is reflected on pin 13 of the DB25 parport
+connector.  When SI/O is Low (driven by the LM70 or the host) on the other
+hand, the transistor is cut off and the voltage tied to it's collector is
+reflected on pin 13 as a High level.
+
+So: the getmiso inline routine in this driver takes this fact into account,
+inverting the value read at pin 13.
+
+
+Thanks to
+---------
+o David Brownell for mentoring the SPI-side driver development.
+o Dr.Craig Hollabaugh for the (early) "manual" bitbanging driver version.
+o Nadir Billimoria for help interpreting the circuit schematic.
diff --git a/Documentation/sysctl/vm.txt b/Documentation/sysctl/vm.txt
index df3ff20..a0ccc5b 100644
--- a/Documentation/sysctl/vm.txt
+++ b/Documentation/sysctl/vm.txt
@@ -38,7 +38,8 @@
 
 dirty_ratio, dirty_background_ratio, dirty_expire_centisecs,
 dirty_writeback_centisecs, vfs_cache_pressure, laptop_mode,
-block_dump, swap_token_timeout, drop-caches:
+block_dump, swap_token_timeout, drop-caches,
+hugepages_treat_as_movable:
 
 See Documentation/filesystems/proc.txt
 
diff --git a/Documentation/video4linux/CARDLIST.bttv b/Documentation/video4linux/CARDLIST.bttv
index b606391..177159c 100644
--- a/Documentation/video4linux/CARDLIST.bttv
+++ b/Documentation/video4linux/CARDLIST.bttv
@@ -66,7 +66,7 @@
  65 -> Lifeview FlyVideo 2000S LR90
  66 -> Terratec TValueRadio                                [153b:1135,153b:ff3b]
  67 -> IODATA GV-BCTV4/PCI                                 [10fc:4050]
- 68 -> 3Dfx VoodooTV FM (Euro), VoodooTV 200 (USA)         [121a:3000,10b4:2637]
+ 68 -> 3Dfx VoodooTV FM (Euro)                             [10b4:2637]
  69 -> Active Imaging AIMMS
  70 -> Prolink Pixelview PV-BT878P+ (Rev.4C,8E)
  71 -> Lifeview FlyVideo 98EZ (capture only) LR51          [1851:1851]
@@ -145,3 +145,5 @@
 144 -> MagicTV
 145 -> SSAI Security Video Interface                       [4149:5353]
 146 -> SSAI Ultrasound Video Interface                     [414a:5353]
+147 -> VoodooTV 200 (USA)                                  [121a:3000]
+148 -> DViCO FusionHDTV 2                                  [dbc0:d200]
diff --git a/Documentation/video4linux/CARDLIST.cx88 b/Documentation/video4linux/CARDLIST.cx88
index 60f838b..82ac825 100644
--- a/Documentation/video4linux/CARDLIST.cx88
+++ b/Documentation/video4linux/CARDLIST.cx88
@@ -55,3 +55,4 @@
  54 -> Norwood Micro TV Tuner
  55 -> Shenzhen Tungsten Ages Tech TE-DTV-250 / Swann OEM  [c180:c980]
  56 -> Hauppauge WinTV-HVR1300 DVB-T/Hybrid MPEG Encoder   [0070:9600,0070:9601,0070:9602]
+ 57 -> ADS Tech Instant Video PCI                          [1421:0390]
diff --git a/Documentation/video4linux/CARDLIST.saa7134 b/Documentation/video4linux/CARDLIST.saa7134
index 712e8c8..3f8aeab 100644
--- a/Documentation/video4linux/CARDLIST.saa7134
+++ b/Documentation/video4linux/CARDLIST.saa7134
@@ -114,3 +114,4 @@
 113 -> Elitegroup ECS TVP3XP FM1246 Tuner Card (PAL,FM) [1019:4cb6]
 114 -> KWorld DVB-T 210                         [17de:7250]
 115 -> Sabrent PCMCIA TV-PCB05                  [0919:2003]
+116 -> 10MOONS TM300 TV Card                    [1131:2304]
diff --git a/Documentation/video4linux/CARDLIST.tuner b/Documentation/video4linux/CARDLIST.tuner
index 44134f0..a88c02d 100644
--- a/Documentation/video4linux/CARDLIST.tuner
+++ b/Documentation/video4linux/CARDLIST.tuner
@@ -40,7 +40,7 @@
 tuner=39 - LG NTSC (newer TAPC series)
 tuner=40 - HITACHI V7-J180AT
 tuner=41 - Philips PAL_MK (FI1216 MK)
-tuner=42 - Philips 1236D ATSC/NTSC dual in
+tuner=42 - Philips FCV1236D ATSC/NTSC dual in
 tuner=43 - Philips NTSC MK3 (FM1236MK3 or FM1236/F)
 tuner=44 - Philips 4 in 1 (ATI TV Wonder Pro/Conexant)
 tuner=45 - Microtune 4049 FM5
@@ -72,3 +72,4 @@
 tuner=71 - Xceive xc3028
 tuner=72 - Thomson FE6600
 tuner=73 - Samsung TCPG 6121P30A
+tuner=75 - Philips TEA5761 FM Radio
diff --git a/Documentation/video4linux/sn9c102.txt b/Documentation/video4linux/sn9c102.txt
index 279717c..1ffad19 100644
--- a/Documentation/video4linux/sn9c102.txt
+++ b/Documentation/video4linux/sn9c102.txt
@@ -436,7 +436,7 @@
 HV7131R    Hynix Semiconductor     | No          Yes      Yes      Yes
 MI-0343    Micron Technology       | Yes         No       No       No
 MI-0360    Micron Technology       | No          Yes      Yes      Yes
-OV7630     OmniVision Technologies | Yes         Yes      No       No
+OV7630     OmniVision Technologies | Yes         Yes      Yes      Yes
 OV7660     OmniVision Technologies | No          No       Yes      Yes
 PAS106B    PixArt Imaging          | Yes         No       No       No
 PAS202B    PixArt Imaging          | Yes         Yes      No       No
@@ -583,6 +583,7 @@
 - Bertrik Sikken, who reverse-engineered and documented the Huffman compression
   algorithm used in the SN9C101, SN9C102 and SN9C103 controllers and
   implemented the first decoder;
+- Ronny Standke for the donation of a webcam;
 - Mizuno Takafumi for the donation of a webcam;
 - an "anonymous" donator (who didn't want his name to be revealed) for the
   donation of a webcam.
diff --git a/Documentation/video4linux/zr364xx.txt b/Documentation/video4linux/zr364xx.txt
index c76992d..4d9a0c3 100644
--- a/Documentation/video4linux/zr364xx.txt
+++ b/Documentation/video4linux/zr364xx.txt
@@ -62,4 +62,4 @@
 0x0784  0x0040   Traveler        Slimline X5
 0x06d6  0x0034   Trust           Powerc@m 750
 0x0a17  0x0062   Pentax          Optio 50L
-
+0x06d6  0x003b   Trust           Powerc@m 970Z
diff --git a/Documentation/vm/slub.txt b/Documentation/vm/slub.txt
index df812b0..d17f324 100644
--- a/Documentation/vm/slub.txt
+++ b/Documentation/vm/slub.txt
@@ -127,13 +127,20 @@
 
 Here is a sample of slub debug output:
 
-*** SLUB kmalloc-8: Redzone Active@0xc90f6d20 slab 0xc528c530 offset=3360 flags=0x400000c3 inuse=61 freelist=0xc90f6d58
-  Bytes b4 0xc90f6d10:  00 00 00 00 00 00 00 00 5a 5a 5a 5a 5a 5a 5a 5a ........ZZZZZZZZ
-    Object 0xc90f6d20:  31 30 31 39 2e 30 30 35                         1019.005
-   Redzone 0xc90f6d28:  00 cc cc cc                                     .
-FreePointer 0xc90f6d2c -> 0xc90f6d58
-Last alloc: get_modalias+0x61/0xf5 jiffies_ago=53 cpu=1 pid=554
-Filler 0xc90f6d50:  5a 5a 5a 5a 5a 5a 5a 5a                         ZZZZZZZZ
+====================================================================
+BUG kmalloc-8: Redzone overwritten
+--------------------------------------------------------------------
+
+INFO: 0xc90f6d28-0xc90f6d2b. First byte 0x00 instead of 0xcc
+INFO: Slab 0xc528c530 flags=0x400000c3 inuse=61 fp=0xc90f6d58
+INFO: Object 0xc90f6d20 @offset=3360 fp=0xc90f6d58
+INFO: Allocated in get_modalias+0x61/0xf5 age=53 cpu=1 pid=554
+
+Bytes b4 0xc90f6d10:  00 00 00 00 00 00 00 00 5a 5a 5a 5a 5a 5a 5a 5a ........ZZZZZZZZ
+  Object 0xc90f6d20:  31 30 31 39 2e 30 30 35                         1019.005
+ Redzone 0xc90f6d28:  00 cc cc cc                                     .
+ Padding 0xc90f6d50:  5a 5a 5a 5a 5a 5a 5a 5a                         ZZZZZZZZ
+
   [<c010523d>] dump_trace+0x63/0x1eb
   [<c01053df>] show_trace_log_lvl+0x1a/0x2f
   [<c010601d>] show_trace+0x12/0x14
@@ -155,74 +162,108 @@
   [<c0104112>] sysenter_past_esp+0x5f/0x99
   [<b7f7b410>] 0xb7f7b410
   =======================
-@@@ SLUB kmalloc-8: Restoring redzone (0xcc) from 0xc90f6d28-0xc90f6d2b
 
+FIX kmalloc-8: Restoring Redzone 0xc90f6d28-0xc90f6d2b=0xcc
 
+If SLUB encounters a corrupted object (full detection requires the kernel
+to be booted with slub_debug) then the following output will be dumped
+into the syslog:
 
-If SLUB encounters a corrupted object then it will perform the following
-actions:
-
-1. Isolation and report of the issue
+1. Description of the problem encountered
 
 This will be a message in the system log starting with
 
-*** SLUB <slab cache affected>: <What went wrong>@<object address>
-offset=<offset of object into slab> flags=<slabflags>
-inuse=<objects in use in this slab> freelist=<first free object in slab>
+===============================================
+BUG <slab cache affected>: <What went wrong>
+-----------------------------------------------
 
-2. Report on how the problem was dealt with in order to ensure the continued
-operation of the system.
+INFO: <corruption start>-<corruption_end> <more info>
+INFO: Slab <address> <slab information>
+INFO: Object <address> <object information>
+INFO: Allocated in <kernel function> age=<jiffies since alloc> cpu=<allocated by
+	cpu> pid=<pid of the process>
+INFO: Freed in <kernel function> age=<jiffies since free> cpu=<freed by cpu>
+	 pid=<pid of the process>
 
-These are messages in the system log beginning with
+(Object allocation / free information is only available if SLAB_STORE_USER is
+set for the slab. slub_debug sets that option)
 
-@@@ SLUB <slab cache affected>: <corrective action taken>
+2. The object contents if an object was involved.
 
-
-In the above sample SLUB found that the Redzone of an active object has
-been overwritten. Here a string of 8 characters was written into a slab that
-has the length of 8 characters. However, a 8 character string needs a
-terminating 0. That zero has overwritten the first byte of the Redzone field.
-After reporting the details of the issue encountered the @@@ SLUB message
-tell us that SLUB has restored the redzone to its proper value and then
-system operations continue.
-
-Various types of lines can follow the @@@ SLUB line:
+Various types of lines can follow the BUG SLUB line:
 
 Bytes b4 <address> : <bytes>
-	Show a few bytes before the object where the problem was detected.
+	Shows a few bytes before the object where the problem was detected.
 	Can be useful if the corruption does not stop with the start of the
 	object.
 
 Object <address> : <bytes>
 	The bytes of the object. If the object is inactive then the bytes
-	typically contain poisoning values. Any non-poison value shows a
+	typically contain poison values. Any non-poison value shows a
 	corruption by a write after free.
 
 Redzone <address> : <bytes>
-	The redzone following the object. The redzone is used to detect
+	The Redzone following the object. The Redzone is used to detect
 	writes after the object. All bytes should always have the same
 	value. If there is any deviation then it is due to a write after
 	the object boundary.
 
-Freepointer
-	The pointer to the next free object in the slab. May become
-	corrupted if overwriting continues after the red zone.
+	(Redzone information is only available if SLAB_RED_ZONE is set.
+	slub_debug sets that option)
 
-Last alloc:
-Last free:
-	Shows the address from which the object was allocated/freed last.
-	We note the pid, the time and the CPU that did so. This is usually
-	the most useful information to figure out where things went wrong.
-	Here get_modalias() did an kmalloc(8) instead of a kmalloc(9).
-
-Filler <address> : <bytes>
+Padding <address> : <bytes>
 	Unused data to fill up the space in order to get the next object
 	properly aligned. In the debug case we make sure that there are
-	at least 4 bytes of filler. This allow for the detection of writes
+	at least 4 bytes of padding. This allows the detection of writes
 	before the object.
 
-Following the filler will be a stackdump. That stackdump describes the
-location where the error was detected. The cause of the corruption is more
-likely to be found by looking at the information about the last alloc / free.
+3. A stackdump
 
-Christoph Lameter, <clameter@sgi.com>, May 23, 2007
+The stackdump describes the location where the error was detected. The cause
+of the corruption is may be more likely found by looking at the function that
+allocated or freed the object.
+
+4. Report on how the problem was dealt with in order to ensure the continued
+operation of the system.
+
+These are messages in the system log beginning with
+
+FIX <slab cache affected>: <corrective action taken>
+
+In the above sample SLUB found that the Redzone of an active object has
+been overwritten. Here a string of 8 characters was written into a slab that
+has the length of 8 characters. However, a 8 character string needs a
+terminating 0. That zero has overwritten the first byte of the Redzone field.
+After reporting the details of the issue encountered the FIX SLUB message
+tell us that SLUB has restored the Redzone to its proper value and then
+system operations continue.
+
+Emergency operations:
+---------------------
+
+Minimal debugging (sanity checks alone) can be enabled by booting with
+
+	slub_debug=F
+
+This will be generally be enough to enable the resiliency features of slub
+which will keep the system running even if a bad kernel component will
+keep corrupting objects. This may be important for production systems.
+Performance will be impacted by the sanity checks and there will be a
+continual stream of error messages to the syslog but no additional memory
+will be used (unlike full debugging).
+
+No guarantees. The kernel component still needs to be fixed. Performance
+may be optimized further by locating the slab that experiences corruption
+and enabling debugging only for that cache
+
+I.e.
+
+	slub_debug=F,dentry
+
+If the corruption occurs by writing after the end of the object then it
+may be advisable to enable a Redzone to avoid corrupting the beginning
+of other objects.
+
+	slub_debug=FZ,dentry
+
+Christoph Lameter, <clameter@sgi.com>, May 30, 2007
diff --git a/Documentation/zh_CN/HOWTO b/Documentation/zh_CN/HOWTO
new file mode 100644
index 0000000..48fc67b
--- /dev/null
+++ b/Documentation/zh_CN/HOWTO
@@ -0,0 +1,536 @@
+Chinese translated version of Documentation/HOWTO
+
+If you have any comment or update to the content, please contact the
+original document maintainer directly.  However, if you have problem
+communicating in English you can also ask the Chinese maintainer for
+help.  Contact the Chinese maintainer, if this translation is outdated
+or there is problem with translation.
+
+Maintainer: Greg Kroah-Hartman <greg@kroah.com>
+Chinese maintainer: Li Yang <leoli@freescale.com>
+---------------------------------------------------------------------
+Documentation/HOWTO 的中文翻译
+
+如果想评论或更新本文的内容,请直接联系原文档的维护者。如果你使用英文
+交流有困难的话,也可以向中文版维护者求助。如果本翻译更新不及时或者翻
+译存在问题,请联系中文版维护者。
+
+英文版维护者: Greg Kroah-Hartman <greg@kroah.com>
+中文版维护者: 李阳  Li Yang <leoli@freescale.com>
+中文版翻译者: 李阳  Li Yang <leoli@freescale.com>
+中文版校译者: 钟宇  TripleX Chung <xxx.phy@gmail.com>
+               陈琦  Maggie Chen <chenqi@beyondsoft.com>
+               王聪  Wang Cong <xiyou.wangcong@gmail.com>
+
+以下为正文
+---------------------------------------------------------------------
+
+如何参与Linux内核开发
+---------------------
+
+这是一篇将如何参与Linux内核开发的相关问题一网打尽的终极秘笈。它将指导你
+成为一名Linux内核开发者,并且学会如何同Linux内核开发社区合作。它尽可能不
+包括任何关于内核编程的技术细节,但会给你指引一条获得这些知识的正确途径。
+
+如果这篇文章中的任何内容不再适用,请给文末列出的文件维护者发送补丁。
+
+
+入门
+----
+
+你想了解如何成为一名Linux内核开发者?或者老板吩咐你“给这个设备写个Linux
+驱动程序”?这篇文章的目的就是教会你达成这些目标的全部诀窍,它将描述你需
+要经过的流程以及给出如何同内核社区合作的一些提示。它还将试图解释内核社区
+为何这样运作。
+
+Linux内核大部分是由C语言写成的,一些体系结构相关的代码用到了汇编语言。要
+参与内核开发,你必须精通C语言。除非你想为某个架构开发底层代码,否则你并
+不需要了解(任何体系结构的)汇编语言。下面列举的书籍虽然不能替代扎实的C
+语言教育和多年的开发经验,但如果需要的话,做为参考还是不错的:
+ - "The C Programming Language" by Kernighan and Ritchie [Prentice Hall]
+   《C程序设计语言(第2版·新版)》(徐宝文 李志 译)[机械工业出版社]
+ - "Practical C Programming" by Steve Oualline [O'Reilly]
+   《实用C语言编程(第三版)》(郭大海 译)[中国电力出版社]
+ - "C:  A Reference Manual" by Harbison and Steele [Prentice Hall]
+   《C语言参考手册(原书第5版)》(邱仲潘 等译)[机械工业出版社]
+
+Linux内核使用GNU C和GNU工具链开发。虽然它遵循ISO C89标准,但也用到了一些
+标准中没有定义的扩展。内核是自给自足的C环境,不依赖于标准C库的支持,所以
+并不支持C标准中的部分定义。比如long long类型的大数除法和浮点运算就不允许
+使用。有时候确实很难弄清楚内核对工具链的要求和它所使用的扩展,不幸的是目
+前还没有明确的参考资料可以解释它们。请查阅gcc信息页(使用“info gcc”命令
+显示)获得一些这方面信息。
+
+请记住你是在学习怎么和已经存在的开发社区打交道。它由一群形形色色的人组成,
+他们对代码、风格和过程有着很高的标准。这些标准是在长期实践中总结出来的,
+适应于地理上分散的大型开发团队。它们已经被很好得整理成档,建议你在开发
+之前尽可能多的学习这些标准,而不要期望别人来适应你或者你公司的行为方式。
+
+
+法律问题
+--------
+
+Linux内核源代码都是在GPL(通用公共许可证)的保护下发布的。要了解这种许可
+的细节请查看源代码主目录下的COPYING文件。如果你对它还有更深入问题请联系
+律师,而不要在Linux内核邮件组上提问。因为邮件组里的人并不是律师,不要期
+望他们的话有法律效力。
+
+对于GPL的常见问题和解答,请访问以下链接:
+	http://www.gnu.org/licenses/gpl-faq.html
+
+
+文档
+----
+
+Linux内核代码中包含有大量的文档。这些文档对于学习如何与内核社区互动有着
+不可估量的价值。当一个新的功能被加入内核,最好把解释如何使用这个功能的文
+档也放进内核。当内核的改动导致面向用户空间的接口发生变化时,最好将相关信
+息或手册页(manpages)的补丁发到mtk-manpages@gmx.net,以向手册页(manpages)
+的维护者解释这些变化。
+
+以下是内核代码中需要阅读的文档:
+  README
+    文件简要介绍了Linux内核的背景,并且描述了如何配置和编译内核。内核的
+    新用户应该从这里开始。
+
+  Documentation/Changes
+    文件给出了用来编译和使用内核所需要的最小软件包列表。
+
+  Documentation/CodingStyle
+    描述Linux内核的代码风格和理由。所有新代码需要遵守这篇文档中定义的规
+    范。大多数维护者只会接收符合规定的补丁,很多人也只会帮忙检查符合风格
+    的代码。
+
+  Documentation/SubmittingPatches
+  Documentation/SubmittingDrivers
+    这两份文档明确描述如何创建和发送补丁,其中包括(但不仅限于):
+       - 邮件内容
+       - 邮件格式
+       - 选择收件人
+    遵守这些规定并不能保证提交成功(因为所有补丁需要通过严格的内容和风格
+    审查),但是忽视他们几乎就意味着失败。
+
+    其他关于如何正确地生成补丁的优秀文档包括:
+    "The Perfect Patch"
+        http://www.zip.com.au/~akpm/linux/patches/stuff/tpp.txt
+    "Linux kernel patch submission format"
+        http://linux.yyz.us/patch-format.html
+
+  Documentation/stable_api_nonsense.txt
+    论证内核为什么特意不包括稳定的内核内部API,也就是说不包括像这样的特
+    性:
+       - 子系统中间层(为了兼容性?)
+       - 在不同操作系统间易于移植的驱动程序
+       - 减缓(甚至阻止)内核代码的快速变化
+    这篇文档对于理解Linux的开发哲学至关重要。对于将开发平台从其他操作系
+    统转移到Linux的人来说也很重要。
+
+  Documentation/SecurityBugs
+    如果你认为自己发现了Linux内核的安全性问题,请根据这篇文档中的步骤来
+    提醒其他内核开发者并帮助解决这个问题。
+
+  Documentation/ManagementStyle
+    描述内核维护者的工作方法及其共有特点。这对于刚刚接触内核开发(或者对
+    它感到好奇)的人来说很重要,因为它解释了很多对于内核维护者独特行为的
+    普遍误解与迷惑。
+
+  Documentation/stable_kernel_rules.txt
+    解释了稳定版内核发布的规则,以及如何将改动放入这些版本的步骤。
+
+  Documentation/kernel-docs.txt
+    有助于内核开发的外部文档列表。如果你在内核自带的文档中没有找到你想找
+    的内容,可以查看这些文档。
+
+  Documentation/applying-patches.txt
+    关于补丁是什么以及如何将它打在不同内核开发分支上的好介绍
+
+内核还拥有大量从代码自动生成的文档。它包含内核内部API的全面介绍以及如何
+妥善处理加锁的规则。生成的文档会放在 Documentation/DocBook/目录下。在内
+核源码的主目录中使用以下不同命令将会分别生成PDF、Postscript、HTML和手册
+页等不同格式的文档:
+    make pdfdocs
+    make psdocs
+    make htmldocs
+    make mandocs
+
+
+如何成为内核开发者
+------------------
+如果你对Linux内核开发一无所知,你应该访问“Linux内核新手”计划:
+	http://kernelnewbies.org
+它拥有一个可以问各种最基本的内核开发问题的邮件列表(在提问之前一定要记得
+查找已往的邮件,确认是否有人已经回答过相同的问题)。它还拥有一个可以获得
+实时反馈的IRC聊天频道,以及大量对于学习Linux内核开发相当有帮助的文档。
+
+网站简要介绍了源代码组织结构、子系统划分以及目前正在进行的项目(包括内核
+中的和单独维护的)。它还提供了一些基本的帮助信息,比如如何编译内核和打补
+丁。
+
+如果你想加入内核开发社区并协助完成一些任务,却找不到从哪里开始,可以访问
+“Linux内核房管员”计划:
+	http://janitor.kernelnewbies.org/
+这是极佳的起点。它提供一个相对简单的任务列表,列出内核代码中需要被重新
+整理或者改正的地方。通过和负责这个计划的开发者们一同工作,你会学到将补丁
+集成进内核的基本原理。如果还没有决定下一步要做什么的话,你还可能会得到方
+向性的指点。
+
+如果你已经有一些现成的代码想要放到内核中,但是需要一些帮助来使它们拥有正
+确的格式。请访问“内核导师”计划。这个计划就是用来帮助你完成这个目标的。它
+是一个邮件列表,地址如下:
+	http://selenic.com/mailman/listinfo/kernel-mentors
+
+在真正动手修改内核代码之前,理解要修改的代码如何运作是必需的。要达到这个
+目的,没什么办法比直接读代码更有效了(大多数花招都会有相应的注释),而且
+一些特制的工具还可以提供帮助。例如,“Linux代码交叉引用”项目就是一个值得
+特别推荐的帮助工具,它将源代码显示在有编目和索引的网页上。其中一个更新及
+时的内核源码库,可以通过以下地址访问:
+	http://sosdg.org/~coywolf/lxr/
+
+
+开发流程
+--------
+
+目前Linux内核开发流程包括几个“主内核分支”和很多子系统相关的内核分支。这
+些分支包括:
+  - 2.6.x主内核源码树
+  - 2.6.x.y -stable内核源码树
+  - 2.6.x -git内核补丁集
+  - 2.6.x -mm内核补丁集
+  - 子系统相关的内核源码树和补丁集
+
+
+2.6.x内核主源码树
+-----------------
+2.6.x内核是由Linus Torvalds(Linux的创造者)亲自维护的。你可以在
+kernel.org网站的pub/linux/kernel/v2.6/目录下找到它。它的开发遵循以下步
+骤:
+  - 每当一个新版本的内核被发布,为期两周的集成窗口将被打开。在这段时间里
+    维护者可以向Linus提交大段的修改,通常这些修改已经被放到-mm内核中几个
+    星期了。提交大量修改的首选方式是使用git工具(内核的代码版本管理工具
+    ,更多的信息可以在http://git.or.cz/获取),不过使用普通补丁也是可以
+    的。
+  - 两个星期以后-rc1版本内核发布。之后只有不包含可能影响整个内核稳定性的
+    新功能的补丁才可能被接受。请注意一个全新的驱动程序(或者文件系统)有
+    可能在-rc1后被接受是因为这样的修改完全独立,不会影响其他的代码,所以
+    没有造成内核退步的风险。在-rc1以后也可以用git向Linus提交补丁,不过所
+    有的补丁需要同时被发送到相应的公众邮件列表以征询意见。
+  - 当Linus认为当前的git源码树已经达到一个合理健全的状态足以发布供人测试
+    时,一个新的-rc版本就会被发布。计划是每周都发布新的-rc版本。
+  - 这个过程一直持续下去直到内核被认为达到足够稳定的状态,持续时间大概是
+    6个星期。
+
+关于内核发布,值得一提的是Andrew Morton在linux-kernel邮件列表中如是说:
+	“没有人知道新内核何时会被发布,因为发布是根据已知bug的情况来决定
+	的,而不是根据一个事先制定好的时间表。”
+
+
+2.6.x.y -stable(稳定版)内核源码树
+-----------------------------------
+由4个数字组成的内核版本号说明此内核是-stable版本。它们包含基于2.6.x版本
+内核的相对较小且至关重要的修补,这些修补针对安全性问题或者严重的内核退步。
+
+这种版本的内核适用于那些期望获得最新的稳定版内核并且不想参与测试开发版或
+者实验版的用户。
+
+如果没有2.6.x.y版本内核存在,那么最新的2.6.x版本内核就相当于是当前的稳定
+版内核。
+
+2.6.x.y版本由“稳定版”小组(邮件地址<stable@kernel.org>)维护,一般隔周发
+布新版本。
+
+内核源码中的Documentation/stable_kernel_rules.txt文件具体描述了可被稳定
+版内核接受的修改类型以及发布的流程。
+
+
+2.6.x -git补丁集
+----------------
+Linus的内核源码树的每日快照,这个源码树是由git工具管理的(由此得名)。这
+些补丁通常每天更新以反映Linus的源码树的最新状态。它们比-rc版本的内核源码
+树更具试验性质,因为这个补丁集是全自动生成的,没有任何人来确认其是否真正
+健全。
+
+
+2.6.x -mm补丁集
+---------------
+这是由Andrew Morton维护的试验性内核补丁集。Andrew将所有子系统的内核源码
+和补丁拼凑到一起,并且加入了大量从linux-kernel邮件列表中采集的补丁。这个
+源码树是新功能和补丁的试炼场。当补丁在-mm补丁集里证明了其价值以后Andrew
+或者相应子系统的维护者会将补丁发给Linus以便集成进主内核源码树。
+
+在将所有新补丁发给Linus以集成到主内核源码树之前,我们非常鼓励先把这些补
+丁放在-mm版内核源码树中进行测试。
+
+这些内核版本不适合在需要稳定运行的系统上运行,因为运行它们比运行任何其他
+内核分支都更具有风险。
+
+如果你想为内核开发进程提供帮助,请尝试并使用这些内核版本,并在
+linux-kernel邮件列表中提供反馈,告诉大家你遇到了问题还是一切正常。
+
+通常-mm版补丁集不光包括这些额外的试验性补丁,还包括发布时-git版主源码树
+中的改动。
+
+-mm版内核没有固定的发布周期,但是通常在每两个-rc版内核发布之间都会有若干
+个-mm版内核发布(一般是1至3个)。
+
+
+子系统相关内核源码树和补丁集
+----------------------------
+相当一部分内核子系统开发者会公开他们自己的开发源码树,以便其他人能了解内
+核的不同领域正在发生的事情。如上所述,这些源码树会被集成到-mm版本内核中。
+
+下面是目前可用的一些内核源码树的列表:
+  通过git管理的源码树:
+    - Kbuild开发源码树, Sam Ravnborg <sam@ravnborg.org>
+	git.kernel.org:/pub/scm/linux/kernel/git/sam/kbuild.git
+
+    - ACPI开发源码树, Len Brown <len.brown@intel.com>
+	git.kernel.org:/pub/scm/linux/kernel/git/lenb/linux-acpi-2.6.git
+
+    - 块设备开发源码树, Jens Axboe <axboe@suse.de>
+	git.kernel.org:/pub/scm/linux/kernel/git/axboe/linux-2.6-block.git
+
+    - DRM开发源码树, Dave Airlie <airlied@linux.ie>
+	git.kernel.org:/pub/scm/linux/kernel/git/airlied/drm-2.6.git
+
+    - ia64开发源码树, Tony Luck <tony.luck@intel.com>
+	git.kernel.org:/pub/scm/linux/kernel/git/aegl/linux-2.6.git
+
+    - ieee1394开发源码树, Jody McIntyre <scjody@modernduck.com>
+	git.kernel.org:/pub/scm/linux/kernel/git/scjody/ieee1394.git
+
+    - infiniband开发源码树, Roland Dreier <rolandd@cisco.com>
+	git.kernel.org:/pub/scm/linux/kernel/git/roland/infiniband.git
+
+    - libata开发源码树, Jeff Garzik <jgarzik@pobox.com>
+	git.kernel.org:/pub/scm/linux/kernel/git/jgarzik/libata-dev.git
+
+    - 网络驱动程序开发源码树, Jeff Garzik <jgarzik@pobox.com>
+	git.kernel.org:/pub/scm/linux/kernel/git/jgarzik/netdev-2.6.git
+
+    - pcmcia开发源码树, Dominik Brodowski <linux@dominikbrodowski.net>
+	git.kernel.org:/pub/scm/linux/kernel/git/brodo/pcmcia-2.6.git
+
+    - SCSI开发源码树, James Bottomley <James.Bottomley@SteelEye.com>
+	git.kernel.org:/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6.git
+
+  使用quilt管理的补丁集:
+    - USB, PCI, 驱动程序核心和I2C, Greg Kroah-Hartman <gregkh@suse.de>
+	kernel.org/pub/linux/kernel/people/gregkh/gregkh-2.6/
+    - x86-64, 部分i386, Andi Kleen <ak@suse.de>
+	ftp.firstfloor.org:/pub/ak/x86_64/quilt/
+
+  其他内核源码树可以在http://git.kernel.org的列表中和MAINTAINERS文件里
+  找到。
+
+报告bug
+-------
+
+bugzilla.kernel.org是Linux内核开发者们用来跟踪内核Bug的网站。我们鼓励用
+户在这个工具中报告找到的所有bug。如何使用内核bugzilla的细节请访问:
+	http://test.kernel.org/bugzilla/faq.html
+
+内核源码主目录中的REPORTING-BUGS文件里有一个很好的模板。它指导用户如何报
+告可能的内核bug以及需要提供哪些信息来帮助内核开发者们找到问题的根源。
+
+
+利用bug报告
+-----------
+
+练习内核开发技能的最好办法就是修改其他人报告的bug。你不光可以帮助内核变
+得更加稳定,还可以学会如何解决实际问题从而提高自己的技能,并且让其他开发
+者感受到你的存在。修改bug是赢得其他开发者赞誉的最好办法,因为并不是很多
+人都喜欢浪费时间去修改别人报告的bug。
+
+要尝试修改已知的bug,请访问http://bugzilla.kernel.org网址。如果你想获得
+最新bug的通知,可以订阅bugme-new邮件列表(只有新的bug报告会被寄到这里)
+或者订阅bugme-janitor邮件列表(所有bugzilla的变动都会被寄到这里)。
+
+	http://lists.osdl.org/mailman/listinfo/bugme-new
+	http://lists.osdl.org/mailman/listinfo/bugme-janitors
+
+
+邮件列表
+--------
+
+正如上面的文档所描述,大多数的骨干内核开发者都加入了Linux Kernel邮件列
+表。如何订阅和退订列表的细节可以在这里找到:
+	http://vger.kernel.org/vger-lists.html#linux-kernel
+网上很多地方都有这个邮件列表的存档(archive)。可以使用搜索引擎来找到这些
+存档。比如:
+	http://dir.gmane.org/gmane.linux.kernel
+在发信之前,我们强烈建议你先在存档中搜索你想要讨论的问题。很多已经被详细
+讨论过的问题只在邮件列表的存档中可以找到。
+
+大多数内核子系统也有自己独立的邮件列表来协调各自的开发工作。从
+MAINTAINERS文件中可以找到不同话题对应的邮件列表。
+
+很多邮件列表架设在kernel.org服务器上。这些列表的信息可以在这里找到:
+	http://vger.kernel.org/vger-lists.html
+
+在使用这些邮件列表时,请记住保持良好的行为习惯。下面的链接提供了与这些列
+表(或任何其它邮件列表)交流的一些简单规则,虽然内容有点滥竽充数。
+	http://www.albion.com/netiquette/
+
+当有很多人回复你的邮件时,邮件的抄送列表会变得很长。请不要将任何人从抄送
+列表中删除,除非你有足够的理由这么做。也不要只回复到邮件列表。请习惯于同
+一封邮件接收两次(一封来自发送者一封来自邮件列表),而不要试图通过添加一
+些奇特的邮件头来解决这个问题,人们不会喜欢的。
+
+记住保留你所回复内容的上下文和源头。在你回复邮件的顶部保留“某某某说到……”
+这几行。将你的评论加在被引用的段落之间而不要放在邮件的顶部。
+
+如果你在邮件中附带补丁,请确认它们是可以直接阅读的纯文本(如
+Documentation/SubmittingPatches文档中所述)。内核开发者们不希望遇到附件
+或者被压缩了的补丁。只有这样才能保证他们可以直接评论你的每行代码。请确保
+你使用的邮件发送程序不会修改空格和制表符。一个防范性的测试方法是先将邮件
+发送给自己,然后自己尝试是否可以顺利地打上收到的补丁。如果测试不成功,请
+调整或者更换你的邮件发送程序直到它正确工作为止。
+
+总而言之,请尊重其他的邮件列表订阅者。
+
+
+同内核社区合作
+----------------
+
+内核社区的目标就是提供尽善尽美的内核。所以当你提交补丁期望被接受进内核的
+时候,它的技术价值以及其他方面都将被评审。那么你可能会得到什么呢?
+  - 批评
+  - 评论
+  - 要求修改
+  - 要求证明修改的必要性
+  - 沉默
+
+要记住,这些是把补丁放进内核的正常情况。你必须学会听取对补丁的批评和评论,
+从技术层面评估它们,然后要么重写你的补丁要么简明扼要地论证修改是不必要
+的。如果你发的邮件没有得到任何回应,请过几天后再试一次,因为有时信件会湮
+没在茫茫信海中。
+
+你不应该做的事情:
+  - 期望自己的补丁不受任何质疑就直接被接受
+  - 翻脸
+  - 忽略别人的评论
+  - 没有按照别人的要求做任何修改就重新提交
+
+在一个努力追寻最好技术方案的社区里,对于一个补丁有多少好处总会有不同的见
+解。你必须要抱着合作的态度,愿意改变自己的观点来适应内核的风格。或者至少
+愿意去证明你的想法是有价值的。记住,犯错误是允许的,只要你愿意朝着正确的
+方案去努力。
+
+如果你的第一个补丁换来的是一堆修改建议,这是很正常的。这并不代表你的补丁
+不会被接受,也不意味着有人和你作对。你只需要改正所有提出的问题然后重新发
+送你的补丁。
+
+内核社区和公司文化的差异
+------------------------
+
+内核社区的工作模式同大多数传统公司开发队伍的工作模式并不相同。下面这些例
+子,可以帮助你避免某些可能发生问题:
+  用这些话介绍你的修改提案会有好处:
+    - 它同时解决了多个问题
+    - 它删除了2000行代码
+    - 这是补丁,它已经解释了我想要说明的
+    - 我在5种不同的体系结构上测试过它……
+    - 这是一系列小补丁用来……
+    - 这个修改提高了普通机器的性能……
+
+  应该避免如下的说法:
+    - 我们在AIX/ptx/Solaris就是这么做的,所以这么做肯定是好的……
+    - 我做这行已经20年了,所以……
+    - 为了我们公司赚钱考虑必须这么做
+    - 这是我们的企业产品线所需要的
+    - 这里是描述我观点的1000页设计文档
+    - 这是一个5000行的补丁用来……
+    - 我重写了现在乱七八糟的代码,这就是……
+    - 我被规定了最后期限,所以这个补丁需要立刻被接受
+
+另外一个内核社区与大部分传统公司的软件开发队伍不同的地方是无法面对面地交
+流。使用电子邮件和IRC聊天工具做为主要沟通工具的一个好处是性别和种族歧视
+将会更少。Linux内核的工作环境更能接受妇女和少数族群,因为每个人在别人眼
+里只是一个邮件地址。国际化也帮助了公平的实现,因为你无法通过姓名来判断人
+的性别。男人有可能叫李丽,女人也有可能叫王刚。大多数在Linux内核上工作过
+并表达过看法的女性对在linux上工作的经历都给出了正面的评价。
+
+对于一些不习惯使用英语的人来说,语言可能是一个引起问题的障碍。在邮件列表
+中要正确地表达想法必需良好地掌握语言,所以建议你在发送邮件之前最好检查一
+下英文写得是否正确。
+
+
+拆分修改
+--------
+
+Linux内核社区并不喜欢一下接收大段的代码。修改需要被恰当地介绍、讨论并且
+拆分成独立的小段。这几乎完全和公司中的习惯背道而驰。你的想法应该在开发最
+开始的阶段就让大家知道,这样你就可以及时获得对你正在进行的开发的反馈。这
+样也会让社区觉得你是在和他们协作,而不是仅仅把他们当作倾销新功能的对象。
+无论如何,你不要一次性地向邮件列表发送50封信,你的补丁序列应该永远用不到
+这么多。
+
+将补丁拆开的原因如下:
+
+1) 小的补丁更有可能被接受,因为它们不需要太多的时间和精力去验证其正确性。
+   一个5行的补丁,可能在维护者看了一眼以后就会被接受。而500行的补丁则
+   需要数个小时来审查其正确性(所需时间随补丁大小增加大约呈指数级增长)。
+
+   当出了问题的时候,小的补丁也会让调试变得非常容易。一个一个补丁地回溯
+   将会比仔细剖析一个被打上的大补丁(这个补丁破坏了其他东西)容易得多。
+
+2)不光发送小的补丁很重要,在提交之前重新编排、化简(或者仅仅重新排列)
+   补丁也是很重要的。
+
+这里有内核开发者Al Viro打的一个比方:
+	“想象一个老师正在给学生批改数学作业。老师并不希望看到学生为了得
+	到正确解法所进行的尝试和产生的错误。他希望看到的是最干净最优雅的
+	解答。好学生了解这点,绝不会把最终解决之前的中间方案提交上去。”
+
+	内核开发也是这样。维护者和评审者不希望看到一个人在解决问题时的思
+	考过程。他们只希望看到简单和优雅的解决方案。
+
+直接给出一流的解决方案,和社区一起协作讨论尚未完成的工作,这两者之间似乎
+很难找到一个平衡点。所以最好尽早开始收集有利于你进行改进的反馈;同时也要
+保证修改分成很多小块,这样在整个项目都准备好被包含进内核之前,其中的一部
+分可能会先被接收。
+
+必须了解这样做是不可接受的:试图将未完成的工作提交进内核,然后再找时间修
+复。
+
+
+证明修改的必要性
+----------------
+除了将补丁拆成小块,很重要的一点是让Linux社区了解他们为什么需要这样修改。
+你必须证明新功能是有人需要的并且是有用的。
+
+
+记录修改
+--------
+
+当你发送补丁的时候,需要特别留意邮件正文的内容。因为这里的信息将会做为补
+丁的修改记录(ChangeLog),会被一直保留以备大家查阅。它需要完全地描述补丁,
+包括:
+  - 为什么需要这个修改
+  - 补丁的总体设计
+  - 实现细节
+  - 测试结果
+
+想了解它具体应该看起来像什么,请查阅以下文档中的“ChangeLog”章节:
+  “The Perfect Patch”
+      http://www.zip.com.au/~akpm/linux/patches/stuff/tpp.txt
+
+
+这些事情有时候做起来很难。要在任何方面都做到完美可能需要好几年时间。这是
+一个持续提高的过程,它需要大量的耐心和决心。只要不放弃,你一定可以做到。
+很多人已经做到了,而他们都曾经和现在的你站在同样的起点上。
+
+
+---------------
+感谢Paolo Ciarrocchi允许“开发流程”部分基于他所写的文章
+(http://linux.tar.bz/articles/2.6-development_process),感谢Randy
+Dunlap和Gerrit Huizenga完善了应该说和不该说的列表。感谢Pat Mochel, Hanna
+Linder, Randy Dunlap, Kay Sievers, Vojtech Pavlik, Jan Kara, Josh Boyer,
+Kees Cook, Andrew Morton, Andi Kleen, Vadim Lobanov, Jesper Juhl, Adrian
+Bunk, Keri Harris, Frans Pop, David A. Wheeler, Junio Hamano, Michael
+Kerrisk和Alex Shepard的评审、建议和贡献。没有他们的帮助,这篇文档是不可
+能完成的。
+
+
+
+英文版维护者: Greg Kroah-Hartman <greg@kroah.com>
diff --git a/Documentation/zh_CN/stable_api_nonsense.txt b/Documentation/zh_CN/stable_api_nonsense.txt
new file mode 100644
index 0000000..c26a27d
--- /dev/null
+++ b/Documentation/zh_CN/stable_api_nonsense.txt
@@ -0,0 +1,157 @@
+Chinese translated version of Documentation/stable_api_nonsense.txt
+
+If you have any comment or update to the content, please contact the
+original document maintainer directly.  However, if you have problem
+communicating in English you can also ask the Chinese maintainer for help.
+Contact the Chinese maintainer, if this translation is outdated or there
+is problem with translation.
+
+Maintainer: Greg Kroah-Hartman <greg@kroah.com>
+Chinese maintainer: TripleX Chung <zhongyu@18mail.cn>
+---------------------------------------------------------------------
+Documentation/stable_api_nonsense.txt 的中文翻译
+
+如果想评论或更新本文的内容,请直接联系原文档的维护者。如果你使用英文
+交流有困难的话,也可以向中文版维护者求助。如果本翻译更新不及时或者翻
+译存在问题,请联系中文版维护者。
+
+英文版维护者: Greg Kroah-Hartman <greg@kroah.com>
+中文版维护者: 钟宇  TripleX Chung <zhongyu@18mail.cn>
+中文版翻译者: 钟宇  TripleX Chung <zhongyu@18mail.cn>
+中文版校译者: 李阳  Li Yang <leoli@freescale.com>
+以下为正文
+---------------------------------------------------------------------
+
+写作本文档的目的,是为了解释为什么Linux既没有二进制内核接口,也没有稳定
+的内核接口。这里所说的内核接口,是指内核里的接口,而不是内核和用户空间
+的接口。内核到用户空间的接口,是提供给应用程序使用的系统调用,系统调用
+在历史上几乎没有过变化,将来也不会有变化。我有一些老应用程序是在0.9版本
+或者更早版本的内核上编译的,在使用2.6版本内核的Linux发布上依然用得很好
+。用户和应用程序作者可以将这个接口看成是稳定的。
+
+
+执行纲要
+--------
+
+你也许以为自己想要稳定的内核接口,但是你不清楚你要的实际上不是它。你需
+要的其实是稳定的驱动程序,而你只有将驱动程序放到公版内核的源代码树里,
+才有可能达到这个目的。而且这样做还有很多其它好处,正是因为这些好处使得
+Linux能成为强壮,稳定,成熟的操作系统,这也是你最开始选择Linux的原因。
+
+
+入门
+-----
+
+只有那些写驱动程序的“怪人”才会担心内核接口的改变,对广大用户来说,既
+看不到内核接口,也不需要去关心它。
+
+首先,我不打算讨论关于任何非GPL许可的内核驱动的法律问题,这些非GPL许可
+的驱动程序包括不公开源代码,隐藏源代码,二进制或者是用源代码包装,或者
+是其它任何形式的不能以GPL许可公开源代码的驱动程序。如果有法律问题,请咨
+询律师,我只是一个程序员,所以我只打算探讨技术问题(不是小看法律问题,
+法律问题很实际,并且需要一直关注)。
+
+既然只谈技术问题,我们就有了下面两个主题:二进制内核接口和稳定的内核源
+代码接口。这两个问题是互相关联的,让我们先解决掉二进制接口的问题。
+
+
+二进制内核接口
+--------------
+假如我们有一个稳定的内核源代码接口,那么自然而然的,我们就拥有了稳定的
+二进制接口,是这样的吗?错。让我们看看关于Linux内核的几点事实:
+    - 取决于所用的C编译器的版本,不同的内核数据结构里的结构体的对齐方
+式会有差别,代码中不同函数的表现形式也不一样(函数是不是被inline编译取
+决于编译器行为)。不同的函数的表现形式并不重要,但是数据结构内部的对齐
+方式很关键。
+    - 取决于内核的配置选项,不同的选项会让内核的很多东西发生改变:
+      - 同一个结构体可能包含不同的成员变量
+      - 有的函数可能根本不会被实现(比如编译的时候没有选择SMP支持
+,一些锁函数就会被定义成空函数)。
+      - 内核使用的内存会以不同的方式对齐,这取决于不同的内核配置选
+项。
+    - Linux可以在很多的不同体系结构的处理器上运行。在某个体系结构上编
+译好的二进制驱动程序,不可能在另外一个体系结构上正确的运行。
+
+对于一个特定的内核,满足这些条件并不难,使用同一个C编译器和同样的内核配
+置选项来编译驱动程序模块就可以了。这对于给一个特定Linux发布的特定版本提
+供驱动程序,是完全可以满足需求的。但是如果你要给不同发布的不同版本都发
+布一个驱动程序,就需要在每个发布上用不同的内核设置参数都编译一次内核,
+这简直跟噩梦一样。而且还要注意到,每个Linux发布还提供不同的Linux内核,
+这些内核都针对不同的硬件类型进行了优化(有很多种不同的处理器,还有不同
+的内核设置选项)。所以每发布一次驱动程序,都需要提供很多不同版本的内核
+模块。
+
+相信我,如果你真的要采取这种发布方式,一定会慢慢疯掉,我很久以前就有过
+深刻的教训...
+
+
+稳定的内核源代码接口
+--------------------
+
+如果有人不将他的内核驱动程序,放入公版内核的源代码树,而又想让驱动程序
+一直保持在最新的内核中可用,那么这个话题将会变得没完没了。
+ 内核开发是持续而且快节奏的,从来都不会慢下来。内核开发人员在当前接口中
+找到bug,或者找到更好的实现方式。一旦发现这些,他们就很快会去修改当前的
+接口。修改接口意味着,函数名可能会改变,结构体可能被扩充或者删减,函数
+的参数也可能发生改变。一旦接口被修改,内核中使用这些接口的地方需要同时
+修正,这样才能保证所有的东西继续工作。
+
+举一个例子,内核的USB驱动程序接口在USB子系统的整个生命周期中,至少经历
+了三次重写。这些重写解决以下问题:
+    - 把数据流从同步模式改成非同步模式,这个改动减少了一些驱动程序的
+复杂度,提高了所有USB驱动程序的吞吐率,这样几乎所有的USB设备都能以最大
+速率工作了。
+    - 修改了USB核心代码中为USB驱动分配数据包内存的方式,所有的驱动都
+需要提供更多的参数给USB核心,以修正了很多已经被记录在案的死锁。
+
+这和一些封闭源代码的操作系统形成鲜明的对比,在那些操作系统上,不得不额
+外的维护旧的USB接口。这导致了一个可能性,新的开发者依然会不小心使用旧的
+接口,以不恰当的方式编写代码,进而影响到操作系统的稳定性。
+ 在上面的例子中,所有的开发者都同意这些重要的改动,在这样的情况下修改代
+价很低。如果Linux保持一个稳定的内核源代码接口,那么就得创建一个新的接口
+;旧的,有问题的接口必须一直维护,给Linux USB开发者带来额外的工作。既然
+所有的Linux USB驱动的作者都是利用自己的时间工作,那么要求他们去做毫无意
+义的免费额外工作,是不可能的。
+ 安全问题对Linux来说十分重要。一个安全问题被发现,就会在短时间内得到修
+正。在很多情况下,这将导致Linux内核中的一些接口被重写,以从根本上避免安
+全问题。一旦接口被重写,所有使用这些接口的驱动程序,必须同时得到修正,
+以确定安全问题已经得到修复并且不可能在未来还有同样的安全问题。如果内核
+内部接口不允许改变,那么就不可能修复这样的安全问题,也不可能确认这样的
+安全问题以后不会发生。
+开发者一直在清理内核接口。如果一个接口没有人在使用了,它就会被删除。这
+样可以确保内核尽可能的小,而且所有潜在的接口都会得到尽可能完整的测试
+(没有人使用的接口是不可能得到良好的测试的)。
+
+
+要做什么
+-------
+
+如果你写了一个Linux内核驱动,但是它还不在Linux源代码树里,作为一个开发
+者,你应该怎么做?为每个发布的每个版本提供一个二进制驱动,那简直是一个
+噩梦,要跟上永远处于变化之中的内核接口,也是一件辛苦活。
+很简单,让你的驱动进入内核源代码树(要记得我们在谈论的是以GPL许可发行
+的驱动,如果你的代码不符合GPL,那么祝你好运,你只能自己解决这个问题了,
+你这个吸血鬼<把Andrew和Linus对吸血鬼的定义链接到这里>)。当你的代码加入
+公版内核源代码树之后,如果一个内核接口改变,你的驱动会直接被修改接口的
+那个人修改。保证你的驱动永远都可以编译通过,并且一直工作,你几乎不需要
+做什么事情。
+
+把驱动放到内核源代码树里会有很多的好处:
+    - 驱动的质量会提升,而维护成本(对原始作者来说)会下降。
+    - 其他人会给驱动添加新特性。
+    - 其他人会找到驱动中的bug并修复。
+    - 其他人会在驱动中找到性能优化的机会。
+    - 当外部的接口的改变需要修改驱动程序的时候,其他人会修改驱动程序
+。
+    - 不需要联系任何发行商,这个驱动会自动的随着所有的Linux发布一起发
+布。
+
+和别的操作系统相比,Linux为更多不同的设备提供现成的驱动,而且能在更多不
+同体系结构的处理器上支持这些设备。这个经过考验的开发模式,必然是错不了
+的 :)
+
+-------------
+感谢 Randy Dunlap, Andrew Morton, David Brownell, Hanna Linder,
+Robert Love, and Nishanth Aravamudan 对于本文档早期版本的评审和建议。
+
+英文版维护者: Greg Kroah-Hartman <greg@kroah.com>
diff --git a/MAINTAINERS b/MAINTAINERS
index 5abec14..fbe0dca 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -607,6 +607,12 @@
 W:	http://xf.iksaif.net/acpi4asus
 S:	Maintained
 
+ASUS ASB100 HARDWARE MONITOR DRIVER
+P:	Mark M. Hoffman
+M:	mhoffman@lightlink.com
+L:	lm-sensors@lm-sensors.org
+S:	Maintained
+
 ASUS LAPTOP EXTRAS DRIVER
 P:	Corentin Chary
 M:	corentincj@iksaif.net
@@ -732,6 +738,13 @@
 W:	http://blackfin.uclinux.org
 S:	Supported
 
+BLACKFIN EMAC DRIVER
+P:	Bryan Wu
+M:	bryan.wu@analog.com
+L:	uclinux-dist-devel@blackfin.uclinux.org (subscribers-only)
+W:	http://blackfin.uclinux.org
+S:	Supported
+
 BLACKFIN RTC DRIVER
 P:	Mike Frysinger
 M:	michael.frysinger@analog.com
@@ -913,6 +926,12 @@
 L:	netdev@vger.kernel.org
 S:	Supported
 
+BSG (block layer generic sg v4 driver)
+P:	FUJITA Tomonori
+M:	fujita.tomonori@lab.ntt.co.jp
+L:	linux-scsi@vger.kernel.org
+S:	Supported
+
 BTTV VIDEO4LINUX DRIVER
 P:	Mauro Carvalho Chehab
 M:	mchehab@infradead.org
@@ -1260,6 +1279,12 @@
 L:	linux-kernel@vger.kernel.org
 S:	Maintained
 
+DME1737 HARDWARE MONITOR DRIVER
+P:	Juerg Haefliger
+M:	juergh@gmail.com
+L:	lm-sensors@lm-sensors.org
+S:	Maintained
+
 DOCBOOK FOR DOCUMENTATION
 P:	Randy Dunlap
 M:	rdunlap@xenotime.net
@@ -1339,21 +1364,60 @@
 
 EDAC-CORE
 P:	Doug Thompson
-M:	norsk5@xmission.com
+M:	dougthompson@xmission.com
 L:	bluesmoke-devel@lists.sourceforge.net
 W:	bluesmoke.sourceforge.net
 S:	Supported
 
 EDAC-E752X
 P:	Mark Gross
+P:	Doug Thompson
 M:	mark.gross@intel.com
+M:	dougthompson@xmission.com
 L:	bluesmoke-devel@lists.sourceforge.net
 W:	bluesmoke.sourceforge.net
 S:	Maintained
 
 EDAC-E7XXX
 P:	Doug Thompson
-M:	norsk5@xmission.com
+M:	dougthompson@xmission.com
+L:	bluesmoke-devel@lists.sourceforge.net
+W:	bluesmoke.sourceforge.net
+S:	Maintained
+
+EDAC-I82443BXGX
+P:	Tim Small
+M:	tim@buttersideup.com
+L:	bluesmoke-devel@lists.sourceforge.net
+W:	bluesmoke.sourceforge.net
+S:	Maintained
+
+EDAC-I3000
+P:	Jason Uhlenkott
+M:	juhlenko@akamai.com
+L:	bluesmoke-devel@lists.sourceforge.net
+W:	bluesmoke.sourceforge.net
+S:	Maintained
+
+EDAC-I5000
+P:	Doug Thompson
+M:	dougthompson@xmission.com
+L:	bluesmoke-devel@lists.sourceforge.net
+W:	bluesmoke.sourceforge.net
+S:	Maintained
+
+EDAC-I82975X
+P:	Ranganathan Desikan
+P:	Arvind R.
+M:	rdesikan@jetzbroadband.com
+M:	arvind@acarlab.com
+L:	bluesmoke-devel@lists.sourceforge.net
+W:	bluesmoke.sourceforge.net
+S:	Maintained
+
+EDAC-PASEMI
+P:	Egor Martovetsky
+M:	egor@pasemi.com
 L:	bluesmoke-devel@lists.sourceforge.net
 W:	bluesmoke.sourceforge.net
 S:	Maintained
@@ -1571,11 +1635,11 @@
 S:	Maintained
 
 HARDWARE MONITORING
-P:	Jean Delvare
-M:	khali@linux-fr.org
+P:	Mark M. Hoffman
+M:	mhoffman@lightlink.com
 L:	lm-sensors@lm-sensors.org
 W:	http://www.lm-sensors.org/
-T:	quilt http://khali.linux-fr.org/devel/linux-2.6/jdelvare-hwmon/
+T:	git lm-sensors.org:/kernel/mhoffman/hwmon-2.6.git
 S:	Maintained
 
 HARDWARE RANDOM NUMBER GENERATOR CORE
@@ -1711,6 +1775,12 @@
 M:	wli@holomorphy.com
 S:	Maintained
 
+I2C/SMBUS STUB DRIVER
+P:	Mark M. Hoffman
+M:	mhoffman@lightlink.com
+L:	lm-sensors@lm-sensors.org
+S:	Maintained
+
 I2C SUBSYSTEM
 P:	Jean Delvare
 M:	khali@linux-fr.org
@@ -1734,6 +1804,7 @@
 i386 SETUP CODE / CPU ERRATA WORKAROUNDS
 P:	H. Peter Anvin
 M:	hpa@zytor.com
+T:	git.kernel.org:/pub/scm/linux/kernel/git/hpa/linux-2.6-x86setup.git
 S:	Maintained
 
 IA64 (Itanium) PLATFORM
@@ -2387,7 +2458,7 @@
 M:	dedekind@infradead.org
 W:	http://www.linux-mtd.infradead.org/
 L:	linux-mtd@lists.infradead.org
-T:	git git://git.infradead.org/ubi-2.6.git
+T:	git git://git.infradead.org/~dedekind/ubi-2.6.git
 S:	Maintained
 
 MICROTEK X6 SCANNER
@@ -2843,8 +2914,8 @@
 S:	Maintained
 
 POSIX CLOCKS and TIMERS
-P:	George Anzinger
-M:	george@mvista.com
+P:	Thomas Gleixner
+M:	tglx@linutronix.de
 L:	linux-kernel@vger.kernel.org
 S:	Supported
 
@@ -3239,6 +3310,12 @@
 L:	netdev@vger.kernel.org
 S:	Maintained
 
+SIS 96X I2C/SMBUS DRIVER
+P:	Mark M. Hoffman
+M:	mhoffman@lightlink.com
+L:	lm-sensors@lm-sensors.org
+S:	Maintained
+
 SIS FRAMEBUFFER DRIVER
 P:	Thomas Winischhofer
 M:	thomas@winischhofer.net
@@ -3256,6 +3333,12 @@
 M:	nico@cam.org
 S:	Maintained
 
+SMSC47B397 HARDWARE MONITOR DRIVER
+P:	Mark M. Hoffman
+M:	mhoffman@lightlink.com
+L:	lm-sensors@lm-sensors.org
+S:	Maintained
+
 SOFTMAC LAYER (IEEE 802.11)
 P:	Johannes Berg
 M:	johannes@sipsolutions.net
@@ -3275,9 +3358,19 @@
 L:	linux-raid@vger.kernel.org
 S:	Supported
 
-SOFTWARE SUSPEND:
+HIBERNATION (aka Software Suspend, aka swsusp):
 P:	Pavel Machek
 M:	pavel@suse.cz
+P:	Rafael J. Wysocki
+M:	rjw@sisk.pl
+L:	linux-pm@lists.linux-foundation.org
+S:	Supported
+
+SUSPEND TO RAM:
+P:	Pavel Machek
+M:	pavel@suse.cz
+P:	Rafael J. Wysocki
+M:	rjw@sisk.pl
 L:	linux-pm@lists.linux-foundation.org
 S:	Maintained
 
@@ -4104,6 +4197,11 @@
 W:	http://www.polyware.nl/~middelin/hobbies.html
 S:	Maintained
 
+ZS DECSTATION Z85C30 SERIAL DRIVER
+P:	Maciej W. Rozycki
+M:	macro@linux-mips.org
+S:	Maintained
+
 THE REST
 P:	Linus Torvalds
 S:	Buried alive in reporters
diff --git a/Makefile b/Makefile
index 7d25129..284d072 100644
--- a/Makefile
+++ b/Makefile
@@ -514,6 +514,12 @@
 # disable pointer signed / unsigned warnings in gcc 4.0
 CFLAGS += $(call cc-option,-Wno-pointer-sign,)
 
+# Use --build-id when available.
+LDFLAGS_BUILD_ID = $(patsubst -Wl$(comma)%,%,\
+			      $(call ld-option, -Wl$(comma)--build-id,))
+LDFLAGS_MODULE += $(LDFLAGS_BUILD_ID)
+LDFLAGS_vmlinux += $(LDFLAGS_BUILD_ID)
+
 # Default kernel image to build when no specific target is given.
 # KBUILD_IMAGE may be overruled on the command line or
 # set in the environment
diff --git a/arch/alpha/kernel/module.c b/arch/alpha/kernel/module.c
index bd03dc9..026ba9a 100644
--- a/arch/alpha/kernel/module.c
+++ b/arch/alpha/kernel/module.c
@@ -119,8 +119,7 @@
 	}
 
 	nsyms = symtab->sh_size / sizeof(Elf64_Sym);
-	chains = kmalloc(nsyms * sizeof(struct got_entry), GFP_KERNEL);
-	memset(chains, 0, nsyms * sizeof(struct got_entry));
+	chains = kcalloc(nsyms, sizeof(struct got_entry), GFP_KERNEL);
 
 	got->sh_size = 0;
 	got->sh_addralign = 8;
diff --git a/arch/alpha/kernel/ptrace.c b/arch/alpha/kernel/ptrace.c
index 0cd0605..83a7818 100644
--- a/arch/alpha/kernel/ptrace.c
+++ b/arch/alpha/kernel/ptrace.c
@@ -315,9 +315,7 @@
 	/* When I and D space are separate, this will have to be fixed.  */
 	case PTRACE_POKETEXT: /* write the word at location addr. */
 	case PTRACE_POKEDATA:
-		tmp = data;
-		copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 1);
-		ret = (copied == sizeof(tmp)) ? 0 : -EIO;
+		ret = generic_ptrace_pokedata(child, addr, data);
 		break;
 
 	case PTRACE_POKEUSR: /* write the specified register */
diff --git a/arch/alpha/kernel/smp.c b/arch/alpha/kernel/smp.c
index 80cfb75..b287314 100644
--- a/arch/alpha/kernel/smp.c
+++ b/arch/alpha/kernel/smp.c
@@ -65,7 +65,7 @@
 };
 
 /* Set to a secondary's cpuid when it comes online.  */
-static int smp_secondary_alive __initdata = 0;
+static int smp_secondary_alive __devinitdata = 0;
 
 /* Which cpus ids came online.  */
 cpumask_t cpu_online_map;
@@ -173,7 +173,7 @@
 }
 
 /* Wait until hwrpb->txrdy is clear for cpu.  Return -1 on timeout.  */
-static int __init
+static int __devinit
 wait_for_txrdy (unsigned long cpumask)
 {
 	unsigned long timeout;
@@ -358,7 +358,7 @@
 /*
  * Bring one cpu online.
  */
-static int __init
+static int __devinit
 smp_boot_one_cpu(int cpuid)
 {
 	struct task_struct *idle;
diff --git a/arch/alpha/kernel/traps.c b/arch/alpha/kernel/traps.c
index d6e665d..ec0f05e 100644
--- a/arch/alpha/kernel/traps.c
+++ b/arch/alpha/kernel/traps.c
@@ -184,6 +184,7 @@
 #endif
 	printk("%s(%d): %s %ld\n", current->comm, current->pid, str, err);
 	dik_show_regs(regs, r9_15);
+	add_taint(TAINT_DIE);
 	dik_show_trace((unsigned long *)(regs+1));
 	dik_show_code((unsigned int *)regs->pc);
 
diff --git a/arch/alpha/kernel/vmlinux.lds.S b/arch/alpha/kernel/vmlinux.lds.S
index 449e76f..fe13daa 100644
--- a/arch/alpha/kernel/vmlinux.lds.S
+++ b/arch/alpha/kernel/vmlinux.lds.S
@@ -3,7 +3,7 @@
 OUTPUT_FORMAT("elf64-alpha")
 OUTPUT_ARCH(alpha)
 ENTRY(__start)
-PHDRS { kernel PT_LOAD ; }
+PHDRS { kernel PT_LOAD; note PT_NOTE; }
 jiffies = jiffies_64;
 SECTIONS
 {
@@ -28,6 +28,9 @@
   __ex_table : { *(__ex_table) }
   __stop___ex_table = .;
 
+  NOTES :kernel :note
+  .dummy : { *(.dummy) } :kernel
+
   RODATA
 
   /* Will be freed after init */
@@ -69,10 +72,7 @@
   . = ALIGN(8);
   SECURITY_INIT
 
-  . = ALIGN(8192);
-  __per_cpu_start = .;
-  .data.percpu : { *(.data.percpu) }
-  __per_cpu_end = .;
+  PERCPU(8192)
 
   . = ALIGN(2*8192);
   __init_end = .;
diff --git a/arch/alpha/lib/checksum.c b/arch/alpha/lib/checksum.c
index ab3761c..8698e07 100644
--- a/arch/alpha/lib/checksum.c
+++ b/arch/alpha/lib/checksum.c
@@ -69,6 +69,7 @@
 	result = (result & 0xffffffff) + (result >> 32);
 	return (__force __wsum)result;
 }
+EXPORT_SYMBOL(csum_tcpudp_nofold);
 
 /*
  * Do a 64-bit checksum on an arbitrary memory area..
diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c
index f586279..a0e18da 100644
--- a/arch/alpha/mm/fault.c
+++ b/arch/alpha/mm/fault.c
@@ -148,21 +148,17 @@
 	   the fault.  */
 	fault = handle_mm_fault(mm, vma, address, cause > 0);
 	up_read(&mm->mmap_sem);
-
-	switch (fault) {
-	      case VM_FAULT_MINOR:
-		current->min_flt++;
-		break;
-	      case VM_FAULT_MAJOR:
-		current->maj_flt++;
-		break;
-	      case VM_FAULT_SIGBUS:
-		goto do_sigbus;
-	      case VM_FAULT_OOM:
-		goto out_of_memory;
-	      default:
+	if (unlikely(fault & VM_FAULT_ERROR)) {
+		if (fault & VM_FAULT_OOM)
+			goto out_of_memory;
+		else if (fault & VM_FAULT_SIGBUS)
+			goto do_sigbus;
 		BUG();
 	}
+	if (fault & VM_FAULT_MAJOR)
+		current->maj_flt++;
+	else
+		current->min_flt++;
 	return;
 
 	/* Something tried to access memory that isn't in our memory map.
diff --git a/arch/arm/configs/badge4_defconfig b/arch/arm/configs/badge4_defconfig
index 821865f..b2bbf21 100644
--- a/arch/arm/configs/badge4_defconfig
+++ b/arch/arm/configs/badge4_defconfig
@@ -708,7 +708,6 @@
 # I2C Hardware Bus support
 #
 CONFIG_I2C_ELEKTOR=m
-# CONFIG_I2C_ISA is not set
 # CONFIG_I2C_PARPORT is not set
 # CONFIG_I2C_PARPORT_LIGHT is not set
 # CONFIG_I2C_STUB is not set
diff --git a/arch/arm/configs/clps7500_defconfig b/arch/arm/configs/clps7500_defconfig
index af9ae538..49e9f9d 100644
--- a/arch/arm/configs/clps7500_defconfig
+++ b/arch/arm/configs/clps7500_defconfig
@@ -536,7 +536,6 @@
 # I2C Hardware Bus support
 #
 # CONFIG_I2C_ELEKTOR is not set
-# CONFIG_I2C_ISA is not set
 # CONFIG_I2C_PARPORT is not set
 # CONFIG_I2C_PARPORT_LIGHT is not set
 # CONFIG_I2C_PCA_ISA is not set
diff --git a/arch/arm/configs/footbridge_defconfig b/arch/arm/configs/footbridge_defconfig
index 2a612d2..299dc22 100644
--- a/arch/arm/configs/footbridge_defconfig
+++ b/arch/arm/configs/footbridge_defconfig
@@ -748,7 +748,6 @@
 # CONFIG_I2C_ELEKTOR is not set
 # CONFIG_I2C_I801 is not set
 # CONFIG_I2C_I810 is not set
-# CONFIG_I2C_ISA is not set
 # CONFIG_I2C_NFORCE2 is not set
 # CONFIG_I2C_PARPORT is not set
 # CONFIG_I2C_PARPORT_LIGHT is not set
diff --git a/arch/arm/configs/neponset_defconfig b/arch/arm/configs/neponset_defconfig
index e86794a..92ccdc6 100644
--- a/arch/arm/configs/neponset_defconfig
+++ b/arch/arm/configs/neponset_defconfig
@@ -698,7 +698,6 @@
 # I2C Hardware Bus support
 #
 # CONFIG_I2C_ELEKTOR is not set
-# CONFIG_I2C_ISA is not set
 # CONFIG_I2C_PARPORT_LIGHT is not set
 # CONFIG_I2C_STUB is not set
 # CONFIG_I2C_PCA_ISA is not set
diff --git a/arch/arm/configs/picotux200_defconfig b/arch/arm/configs/picotux200_defconfig
index 339c489..3c0c4f1 100644
--- a/arch/arm/configs/picotux200_defconfig
+++ b/arch/arm/configs/picotux200_defconfig
@@ -735,7 +735,6 @@
 # I2C Hardware Bus support
 #
 CONFIG_I2C_AT91=m
-CONFIG_I2C_ISA=m
 # CONFIG_I2C_OCORES is not set
 # CONFIG_I2C_PARPORT_LIGHT is not set
 # CONFIG_I2C_STUB is not set
diff --git a/arch/arm/configs/rpc_defconfig b/arch/arm/configs/rpc_defconfig
index bc09126..8452dc8 100644
--- a/arch/arm/configs/rpc_defconfig
+++ b/arch/arm/configs/rpc_defconfig
@@ -558,7 +558,6 @@
 #
 # I2C Hardware Bus support
 #
-# CONFIG_I2C_ISA is not set
 # CONFIG_I2C_PARPORT is not set
 # CONFIG_I2C_PARPORT_LIGHT is not set
 # CONFIG_I2C_STUB is not set
diff --git a/arch/arm/configs/s3c2410_defconfig b/arch/arm/configs/s3c2410_defconfig
index a850da3..1d5150e 100644
--- a/arch/arm/configs/s3c2410_defconfig
+++ b/arch/arm/configs/s3c2410_defconfig
@@ -826,7 +826,6 @@
 # I2C Hardware Bus support
 #
 # CONFIG_I2C_ELEKTOR is not set
-CONFIG_I2C_ISA=m
 # CONFIG_I2C_OCORES is not set
 # CONFIG_I2C_PARPORT is not set
 # CONFIG_I2C_PARPORT_LIGHT is not set
diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c
index 6f2f46c..78c9f1a 100644
--- a/arch/arm/kernel/ptrace.c
+++ b/arch/arm/kernel/ptrace.c
@@ -657,7 +657,6 @@
 
 long arch_ptrace(struct task_struct *child, long request, long addr, long data)
 {
-	unsigned long tmp;
 	int ret;
 
 	switch (request) {
@@ -666,12 +665,7 @@
 		 */
 		case PTRACE_PEEKTEXT:
 		case PTRACE_PEEKDATA:
-			ret = access_process_vm(child, addr, &tmp,
-						sizeof(unsigned long), 0);
-			if (ret == sizeof(unsigned long))
-				ret = put_user(tmp, (unsigned long __user *) data);
-			else
-				ret = -EIO;
+			ret = generic_ptrace_peekdata(child, addr, data);
 			break;
 
 		case PTRACE_PEEKUSR:
@@ -683,12 +677,7 @@
 		 */
 		case PTRACE_POKETEXT:
 		case PTRACE_POKEDATA:
-			ret = access_process_vm(child, addr, &data,
-						sizeof(unsigned long), 1);
-			if (ret == sizeof(unsigned long))
-				ret = 0;
-			else
-				ret = -EIO;
+			ret = generic_ptrace_pokedata(child, addr, data);
 			break;
 
 		case PTRACE_POKEUSR:
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
index 237f499..f2114bc 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -249,6 +249,7 @@
 	bust_spinlocks(1);
 	__die(str, err, thread, regs);
 	bust_spinlocks(0);
+	add_taint(TAINT_DIE);
 	spin_unlock_irq(&die_lock);
 
 	if (in_interrupt())
diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
index 2b7a8f5..5ff5406 100644
--- a/arch/arm/kernel/vmlinux.lds.S
+++ b/arch/arm/kernel/vmlinux.lds.S
@@ -66,6 +66,7 @@
 		. = ALIGN(4096);
 		__per_cpu_start = .;
 			*(.data.percpu)
+			*(.data.percpu.shared_aligned)
 		__per_cpu_end = .;
 #ifndef CONFIG_XIP_KERNEL
 		__init_begin = _stext;
diff --git a/arch/arm/mach-at91/board-csb337.c b/arch/arm/mach-at91/board-csb337.c
index e18a41e..dde0899 100644
--- a/arch/arm/mach-at91/board-csb337.c
+++ b/arch/arm/mach-at91/board-csb337.c
@@ -23,6 +23,7 @@
 #include <linux/mm.h>
 #include <linux/module.h>
 #include <linux/platform_device.h>
+#include <linux/i2c.h>
 #include <linux/spi/spi.h>
 #include <linux/mtd/physmap.h>
 
@@ -83,6 +84,13 @@
 	.pullup_pin	= AT91_PIN_PA24,
 };
 
+static struct i2c_board_info __initdata csb337_i2c_devices[] = {
+	{ I2C_BOARD_INFO("rtc-ds1307", 0x68),
+	  .type = "ds1307",
+	},
+};
+
+
 static struct at91_cf_data __initdata csb337_cf_data = {
 	/*
 	 * connector P4 on the CSB 337 mates to
@@ -161,6 +169,8 @@
 	at91_add_device_udc(&csb337_udc_data);
 	/* I2C */
 	at91_add_device_i2c();
+	i2c_register_board_info(0, csb337_i2c_devices,
+			ARRAY_SIZE(csb337_i2c_devices));
 	/* Compact Flash */
 	at91_set_gpio_input(AT91_PIN_PB22, 1);		/* IOIS16 */
 	at91_add_device_cf(&csb337_cf_data);
diff --git a/arch/arm/mach-iop13xx/pci.c b/arch/arm/mach-iop13xx/pci.c
index 9d63d7f..99d94cb 100644
--- a/arch/arm/mach-iop13xx/pci.c
+++ b/arch/arm/mach-iop13xx/pci.c
@@ -1002,11 +1002,10 @@
 	if (nr > 1)
 		return 0;
 
-	res = kmalloc(sizeof(struct resource) * 2, GFP_KERNEL);
+	res = kcalloc(2, sizeof(struct resource), GFP_KERNEL);
 	if (!res)
 		panic("PCI: unable to alloc resources");
 
-	memset(res, 0, sizeof(struct resource) * 2);
 
 	/* 'nr' assumptions:
 	 * ATUX is always 0
diff --git a/arch/arm/mach-iop32x/n2100.c b/arch/arm/mach-iop32x/n2100.c
index 390a97d..1873bd8 100644
--- a/arch/arm/mach-iop32x/n2100.c
+++ b/arch/arm/mach-iop32x/n2100.c
@@ -25,6 +25,7 @@
 #include <linux/serial_core.h>
 #include <linux/serial_8250.h>
 #include <linux/mtd/physmap.h>
+#include <linux/i2c.h>
 #include <linux/platform_device.h>
 #include <linux/reboot.h>
 #include <asm/hardware.h>
@@ -199,6 +200,12 @@
 	.resource	= &n2100_uart_resource,
 };
 
+static struct i2c_board_info __initdata n2100_i2c_devices[] = {
+	{
+		I2C_BOARD_INFO("rtc-rs5c372", 0x32),
+		.type = "rs5c372b",
+	},
+};
 
 /*
  * Pull PCA9532 GPIO #8 low to power off the machine.
@@ -248,6 +255,9 @@
 	platform_device_register(&iop3xx_dma_0_channel);
 	platform_device_register(&iop3xx_dma_1_channel);
 
+	i2c_register_board_info(0, n2100_i2c_devices,
+		ARRAY_SIZE(n2100_i2c_devices));
+
 	pm_power_off = n2100_power_off;
 
 	init_timer(&power_button_poll_timer);
diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
index 75d4914..c04124a 100644
--- a/arch/arm/mm/fault.c
+++ b/arch/arm/mm/fault.c
@@ -183,20 +183,20 @@
 	 */
 survive:
 	fault = handle_mm_fault(mm, vma, addr & PAGE_MASK, fsr & (1 << 11));
-
-	/*
-	 * Handle the "normal" cases first - successful and sigbus
-	 */
-	switch (fault) {
-	case VM_FAULT_MAJOR:
-		tsk->maj_flt++;
-		return fault;
-	case VM_FAULT_MINOR:
-		tsk->min_flt++;
-	case VM_FAULT_SIGBUS:
-		return fault;
+	if (unlikely(fault & VM_FAULT_ERROR)) {
+		if (fault & VM_FAULT_OOM)
+			goto out_of_memory;
+		else if (fault & VM_FAULT_SIGBUS)
+			return fault;
+		BUG();
 	}
+	if (fault & VM_FAULT_MAJOR)
+		tsk->maj_flt++;
+	else
+		tsk->min_flt++;
+	return fault;
 
+out_of_memory:
 	if (!is_init(tsk))
 		goto out;
 
@@ -249,7 +249,7 @@
 	/*
 	 * Handle the "normal" case first - VM_FAULT_MAJOR / VM_FAULT_MINOR
 	 */
-	if (fault >= VM_FAULT_MINOR)
+	if (likely(!(fault & VM_FAULT_ERROR)))
 		return 0;
 
 	/*
@@ -259,8 +259,7 @@
 	if (!user_mode(regs))
 		goto no_context;
 
-	switch (fault) {
-	case VM_FAULT_OOM:
+	if (fault & VM_FAULT_OOM) {
 		/*
 		 * We ran out of memory, or some other thing
 		 * happened to us that made us unable to handle
@@ -269,17 +268,15 @@
 		printk("VM: killing process %s\n", tsk->comm);
 		do_exit(SIGKILL);
 		return 0;
-
-	case VM_FAULT_SIGBUS:
+	}
+	if (fault & VM_FAULT_SIGBUS) {
 		/*
 		 * We had some memory, but were unable to
 		 * successfully fix up this page fault.
 		 */
 		sig = SIGBUS;
 		code = BUS_ADRERR;
-		break;
-
-	default:
+	} else {
 		/*
 		 * Something tried to access memory that
 		 * isn't in our memory map..
@@ -287,7 +284,6 @@
 		sig = SIGSEGV;
 		code = fault == VM_FAULT_BADACCESS ?
 			SEGV_ACCERR : SEGV_MAPERR;
-		break;
 	}
 
 	__do_user_fault(tsk, addr, fsr, sig, code, regs);
diff --git a/arch/arm26/kernel/ptrace.c b/arch/arm26/kernel/ptrace.c
index 4169279..0fefb869 100644
--- a/arch/arm26/kernel/ptrace.c
+++ b/arch/arm26/kernel/ptrace.c
@@ -531,7 +531,6 @@
 
 long arch_ptrace(struct task_struct *child, long request, long addr, long data)
 {
-	unsigned long tmp;
 	int ret;
 
 	switch (request) {
@@ -540,12 +539,7 @@
 		 */
 		case PTRACE_PEEKTEXT:
 		case PTRACE_PEEKDATA:
-			ret = access_process_vm(child, addr, &tmp,
-						sizeof(unsigned long), 0);
-			if (ret == sizeof(unsigned long))
-				ret = put_user(tmp, (unsigned long *) data);
-			else
-				ret = -EIO;
+			ret = generic_ptrace_peekdata(child, addr, data);
 			break;
 
 		case PTRACE_PEEKUSR:
@@ -557,12 +551,7 @@
 		 */
 		case PTRACE_POKETEXT:
 		case PTRACE_POKEDATA:
-			ret = access_process_vm(child, addr, &data,
-						sizeof(unsigned long), 1);
-			if (ret == sizeof(unsigned long))
-				ret = 0;
-			else
-				ret = -EIO;
+			ret = generic_ptrace_pokedata(child, addr, data);
 			break;
 
 		case PTRACE_POKEUSR:
diff --git a/arch/arm26/kernel/traps.c b/arch/arm26/kernel/traps.c
index d594fb5..2911e2e 100644
--- a/arch/arm26/kernel/traps.c
+++ b/arch/arm26/kernel/traps.c
@@ -185,6 +185,7 @@
 	printk("Internal error: %s: %x\n", str, err);
 	printk("CPU: %d\n", smp_processor_id());
 	show_regs(regs);
+	add_taint(TAINT_DIE);
 	printk("Process %s (pid: %d, stack limit = 0x%p)\n",
 		current->comm, current->pid, end_of_stack(tsk));
 
diff --git a/arch/arm26/mm/fault.c b/arch/arm26/mm/fault.c
index 93c0cee..dec638a 100644
--- a/arch/arm26/mm/fault.c
+++ b/arch/arm26/mm/fault.c
@@ -170,20 +170,20 @@
 	 */
 survive:
 	fault = handle_mm_fault(mm, vma, addr & PAGE_MASK, DO_COW(fsr));
-
-	/*
-	 * Handle the "normal" cases first - successful and sigbus
-	 */
-	switch (fault) {
-	case VM_FAULT_MAJOR:
-		tsk->maj_flt++;
-		return fault;
-	case VM_FAULT_MINOR:
-		tsk->min_flt++;
-	case VM_FAULT_SIGBUS:
-		return fault;
+	if (unlikely(fault & VM_FAULT_ERROR)) {
+		if (fault & VM_FAULT_OOM)
+			goto out_of_memory;
+		else if (fault & VM_FAULT_SIGBUS)
+			return fault;
+		BUG();
 	}
+	if (fault & VM_FAULT_MAJOR)
+		tsk->maj_flt++;
+	else
+		tsk->min_flt++;
+	return fault;
 
+out_of_memory:
 	fault = -3; /* out of memory */
 	if (!is_init(tsk))
 		goto out;
@@ -225,13 +225,11 @@
 	/*
 	 * Handle the "normal" case first
 	 */
-	switch (fault) {
-	case VM_FAULT_MINOR:
-	case VM_FAULT_MAJOR:
+	if (likely(!(fault & VM_FAULT_ERROR)))
 		return 0;
-	case VM_FAULT_SIGBUS:
+	if (fault & VM_FAULT_SIGBUS)
 		goto do_sigbus;
-	}
+	/* else VM_FAULT_OOM */
 
 	/*
 	 * If we are in kernel mode at this point, we
diff --git a/arch/avr32/Kconfig b/arch/avr32/Kconfig
index 3ec7658..d12346a 100644
--- a/arch/avr32/Kconfig
+++ b/arch/avr32/Kconfig
@@ -113,6 +113,10 @@
 	bool "ATNGW100 Network Gateway"
 endchoice
 
+if BOARD_ATSTK1000
+source "arch/avr32/boards/atstk1000/Kconfig"
+endif
+
 choice
 	prompt "Boot loader type"
 	default LOADER_U_BOOT
@@ -185,6 +189,27 @@
 
 endmenu
 
+menu "Power managment options"
+
+menu "CPU Frequency scaling"
+
+source "drivers/cpufreq/Kconfig"
+
+config CPU_FREQ_AT32AP
+	bool "CPU frequency driver for AT32AP"
+	depends on CPU_FREQ && PLATFORM_AT32AP
+	default n
+	help
+	  This enables the CPU frequency driver for AT32AP processors.
+
+	  For details, take a look in <file:Documentation/cpu-freq>.
+
+	  If in doubt, say N.
+
+endmenu
+
+endmenu
+
 menu "Bus options"
 
 config PCI
diff --git a/arch/avr32/boards/atstk1000/Kconfig b/arch/avr32/boards/atstk1000/Kconfig
new file mode 100644
index 0000000..71bc7d3
--- /dev/null
+++ b/arch/avr32/boards/atstk1000/Kconfig
@@ -0,0 +1,53 @@
+# STK1000 customization
+
+if BOARD_ATSTK1002
+
+config BOARD_ATSTK1002_CUSTOM
+	bool "Non-default STK-1002 jumper settings"
+	help
+	  You will normally leave the jumpers on the CPU card at their
+	  default settings.  If you need to use certain peripherals,
+	  you will need to change some of those jumpers.
+
+if BOARD_ATSTK1002_CUSTOM
+
+config BOARD_ATSTK1002_SW1_CUSTOM
+	bool "SW1: use SSC1 (not SPI0)"
+	help
+	  This also prevents using the external DAC as an audio interface,
+	  and means you can't initialize the on-board QVGA display.
+
+config BOARD_ATSTK1002_SW2_CUSTOM
+	bool "SW2: use IRDA or TIMER0 (not UART-A, MMC/SD, and PS2-A)"
+	help
+	  If you change this you'll want an updated boot loader putting
+	  the console on UART-C not UART-A.
+
+config BOARD_ATSTK1002_SW3_CUSTOM
+	bool "SW3: use TIMER1 (not SSC0 and GCLK)"
+	help
+	  This also prevents using the external DAC as an audio interface.
+
+config BOARD_ATSTK1002_SW4_CUSTOM
+	bool "SW4: use ISI/Camera (not GPIOs, SPI1, and PS2-B)"
+	help
+	  To use the camera interface you'll need a custom card (on the
+	  PCI-format connector) connect a video sensor.
+
+config BOARD_ATSTK1002_SW5_CUSTOM
+	bool "SW5: use MACB1 (not LCDC)"
+
+config BOARD_ATSTK1002_SW6_CUSTOM
+	bool "SW6: more GPIOs (not MACB0)"
+
+endif	# custom
+
+config BOARD_ATSTK1002_SPI1
+	bool "Configure SPI1 controller"
+	depends on !BOARD_ATSTK1002_SW4_CUSTOM
+	help
+	  All the signals for the second SPI controller are available on
+	  GPIO lines and accessed through the J1 jumper block.  Say "y"
+	  here to configure that SPI controller.
+
+endif	# stk 1002
diff --git a/arch/avr32/boards/atstk1000/atstk1002.c b/arch/avr32/boards/atstk1000/atstk1002.c
index e253e86..cb93eab 100644
--- a/arch/avr32/boards/atstk1000/atstk1002.c
+++ b/arch/avr32/boards/atstk1000/atstk1002.c
@@ -27,15 +27,27 @@
 
 #include "atstk1000.h"
 
-#define	SW2_DEFAULT		/* MMCI and UART_A available */
 
 struct eth_addr {
 	u8 addr[6];
 };
 
 static struct eth_addr __initdata hw_addr[2];
-static struct eth_platform_data __initdata eth_data[2];
+static struct eth_platform_data __initdata eth_data[2] = {
+	{
+		/*
+		 * The MDIO pullups on STK1000 are a bit too weak for
+		 * the autodetection to work properly, so we have to
+		 * mask out everything but the correct address.
+		 */
+		.phy_mask	= ~(1U << 16),
+	},
+	{
+		.phy_mask	= ~(1U << 17),
+	},
+};
 
+#ifndef CONFIG_BOARD_ATSTK1002_SW1_CUSTOM
 static struct spi_board_info spi0_board_info[] __initdata = {
 	{
 		/* QVGA display */
@@ -45,6 +57,13 @@
 		.mode		= SPI_MODE_3,
 	},
 };
+#endif
+
+#ifdef CONFIG_BOARD_ATSTK1002_SPI1
+static struct spi_board_info spi1_board_info[] __initdata = { {
+	/* patch in custom entries here */
+} };
+#endif
 
 /*
  * The next two functions should go away as the boot loader is
@@ -103,10 +122,10 @@
 
 void __init setup_board(void)
 {
-#ifdef	SW2_DEFAULT
-	at32_map_usart(1, 0);	/* USART 1/A: /dev/ttyS0, DB9 */
-#else
+#ifdef	CONFIG_BOARD_ATSTK1002_SW2_CUSTOM
 	at32_map_usart(0, 1);	/* USART 0/B: /dev/ttyS1, IRDA */
+#else
+	at32_map_usart(1, 0);	/* USART 1/A: /dev/ttyS0, DB9 */
 #endif
 	/* USART 2/unused: expansion connector */
 	at32_map_usart(3, 2);	/* USART 3/C: /dev/ttyS2, DB9 */
@@ -140,18 +159,31 @@
 
 	at32_add_system_devices();
 
-#ifdef	SW2_DEFAULT
-	at32_add_device_usart(0);
-#else
+#ifdef	CONFIG_BOARD_ATSTK1002_SW2_CUSTOM
 	at32_add_device_usart(1);
+#else
+	at32_add_device_usart(0);
 #endif
 	at32_add_device_usart(2);
 
+#ifndef CONFIG_BOARD_ATSTK1002_SW6_CUSTOM
 	set_hw_addr(at32_add_device_eth(0, &eth_data[0]));
-
+#endif
+#ifndef CONFIG_BOARD_ATSTK1002_SW1_CUSTOM
 	at32_add_device_spi(0, spi0_board_info, ARRAY_SIZE(spi0_board_info));
+#endif
+#ifdef CONFIG_BOARD_ATSTK1002_SPI1
+	at32_add_device_spi(1, spi1_board_info, ARRAY_SIZE(spi1_board_info));
+#endif
+#ifdef CONFIG_BOARD_ATSTK1002_SW5_CUSTOM
+	set_hw_addr(at32_add_device_eth(1, &eth_data[1]));
+#else
 	at32_add_device_lcdc(0, &atstk1000_lcdc_data,
 			     fbmem_start, fbmem_size);
+#endif
+#ifndef CONFIG_BOARD_ATSTK1002_SW3_CUSTOM
+	at32_add_device_ssc(0, ATMEL_SSC_TX);
+#endif
 
 	return 0;
 }
diff --git a/arch/avr32/kernel/ptrace.c b/arch/avr32/kernel/ptrace.c
index 3c36c2d..39060cb 100644
--- a/arch/avr32/kernel/ptrace.c
+++ b/arch/avr32/kernel/ptrace.c
@@ -153,7 +153,6 @@
 
 long arch_ptrace(struct task_struct *child, long request, long addr, long data)
 {
-	unsigned long tmp;
 	int ret;
 
 	pr_debug("arch_ptrace(%ld, %d, %#lx, %#lx)\n",
@@ -166,11 +165,7 @@
 	/* Read the word at location addr in the child process */
 	case PTRACE_PEEKTEXT:
 	case PTRACE_PEEKDATA:
-		ret = access_process_vm(child, addr, &tmp, sizeof(tmp), 0);
-		if (ret == sizeof(tmp))
-			ret = put_user(tmp, (unsigned long __user *)data);
-		else
-			ret = -EIO;
+		ret = generic_ptrace_peekdata(child, addr, data);
 		break;
 
 	case PTRACE_PEEKUSR:
@@ -181,11 +176,7 @@
 	/* Write the word in data at location addr */
 	case PTRACE_POKETEXT:
 	case PTRACE_POKEDATA:
-		ret = access_process_vm(child, addr, &data, sizeof(data), 1);
-		if (ret == sizeof(data))
-			ret = 0;
-		else
-			ret = -EIO;
+		ret = generic_ptrace_pokedata(child, addr, data);
 		break;
 
 	case PTRACE_POKEUSR:
diff --git a/arch/avr32/kernel/setup.c b/arch/avr32/kernel/setup.c
index b279d66..d08b0bc 100644
--- a/arch/avr32/kernel/setup.c
+++ b/arch/avr32/kernel/setup.c
@@ -313,7 +313,7 @@
 
 static int __init parse_tag_rdimg(struct tag *tag)
 {
-#ifdef CONFIG_INITRD
+#ifdef CONFIG_BLK_DEV_INITRD
 	struct tag_mem_range *mem = &tag->u.mem_range;
 	int ret;
 
@@ -323,7 +323,7 @@
 		return 0;
 	}
 
-	ret = add_reserved_region(mem->start, mem->start + mem->size - 1,
+	ret = add_reserved_region(mem->addr, mem->addr + mem->size - 1,
 				  "initrd");
 	if (ret) {
 		printk(KERN_WARNING
diff --git a/arch/avr32/kernel/traps.c b/arch/avr32/kernel/traps.c
index aaa7928..9a73ce7 100644
--- a/arch/avr32/kernel/traps.c
+++ b/arch/avr32/kernel/traps.c
@@ -56,6 +56,7 @@
 	show_regs_log_lvl(regs, KERN_EMERG);
 	show_stack_log_lvl(current, regs->sp, regs, KERN_EMERG);
 	bust_spinlocks(0);
+	add_taint(TAINT_DIE);
 	spin_unlock_irq(&die_lock);
 
 	if (in_interrupt())
diff --git a/arch/avr32/mach-at32ap/Makefile b/arch/avr32/mach-at32ap/Makefile
index f1d3957..a8b4450 100644
--- a/arch/avr32/mach-at32ap/Makefile
+++ b/arch/avr32/mach-at32ap/Makefile
@@ -1,3 +1,4 @@
 obj-y				+= at32ap.o clock.o intc.o extint.o pio.o hsmc.o
 obj-$(CONFIG_CPU_AT32AP7000)	+= at32ap7000.o
 obj-$(CONFIG_CPU_AT32AP7000)	+= time-tc.o
+obj-$(CONFIG_CPU_FREQ_AT32AP)	+= cpufreq.o
diff --git a/arch/avr32/mach-at32ap/at32ap.c b/arch/avr32/mach-at32ap/at32ap.c
index 90f207e..7c4987f 100644
--- a/arch/avr32/mach-at32ap/at32ap.c
+++ b/arch/avr32/mach-at32ap/at32ap.c
@@ -11,41 +11,10 @@
 #include <linux/init.h>
 #include <linux/platform_device.h>
 
-#include <asm/io.h>
-
 #include <asm/arch/init.h>
-#include <asm/arch/sm.h>
-
-struct at32_sm system_manager;
-
-static int __init at32_sm_init(void)
-{
-	struct resource *regs;
-	struct at32_sm *sm = &system_manager;
-	int ret = -ENXIO;
-
-	regs = platform_get_resource(&at32_sm_device, IORESOURCE_MEM, 0);
-	if (!regs)
-		goto fail;
-
-	spin_lock_init(&sm->lock);
-	sm->pdev = &at32_sm_device;
-
-	ret = -ENOMEM;
-	sm->regs = ioremap(regs->start, regs->end - regs->start + 1);
-	if (!sm->regs)
-		goto fail;
-
-	return 0;
-
-fail:
-	printk(KERN_ERR "Failed to initialize System Manager: %d\n", ret);
-	return ret;
-}
 
 void __init setup_platform(void)
 {
-	at32_sm_init();
 	at32_clock_init();
 	at32_portmux_init();
 }
diff --git a/arch/avr32/mach-at32ap/at32ap7000.c b/arch/avr32/mach-at32ap/at32ap7000.c
index 4dda42d..64cc558 100644
--- a/arch/avr32/mach-at32ap/at32ap7000.c
+++ b/arch/avr32/mach-at32ap/at32ap7000.c
@@ -17,14 +17,20 @@
 #include <asm/arch/at32ap7000.h>
 #include <asm/arch/board.h>
 #include <asm/arch/portmux.h>
-#include <asm/arch/sm.h>
 
 #include <video/atmel_lcdc.h>
 
 #include "clock.h"
 #include "hmatrix.h"
 #include "pio.h"
-#include "sm.h"
+#include "pm.h"
+
+/*
+ * We can reduce the code size a bit by using a constant here. Since
+ * this file is completely chip-specific, it's safe to not use
+ * ioremap. Generic drivers should of course never do this.
+ */
+#define AT32_PM_BASE	0xfff00000
 
 #define PBMEM(base)					\
 	{						\
@@ -88,6 +94,8 @@
 	.index		= _index,				\
 }
 
+static DEFINE_SPINLOCK(pm_lock);
+
 unsigned long at32ap7000_osc_rates[3] = {
 	[0] = 32768,
 	/* FIXME: these are ATSTK1002-specific */
@@ -104,11 +112,11 @@
 {
 	unsigned long div, mul, rate;
 
-	if (!(control & SM_BIT(PLLEN)))
+	if (!(control & PM_BIT(PLLEN)))
 		return 0;
 
-	div = SM_BFEXT(PLLDIV, control) + 1;
-	mul = SM_BFEXT(PLLMUL, control) + 1;
+	div = PM_BFEXT(PLLDIV, control) + 1;
+	mul = PM_BFEXT(PLLMUL, control) + 1;
 
 	rate = clk->parent->get_rate(clk->parent);
 	rate = (rate + div / 2) / div;
@@ -121,7 +129,7 @@
 {
 	u32 control;
 
-	control = sm_readl(&system_manager, PM_PLL0);
+	control = pm_readl(PLL0);
 
 	return pll_get_rate(clk, control);
 }
@@ -130,7 +138,7 @@
 {
 	u32 control;
 
-	control = sm_readl(&system_manager, PM_PLL1);
+	control = pm_readl(PLL1);
 
 	return pll_get_rate(clk, control);
 }
@@ -187,108 +195,139 @@
 
 static void cpu_clk_mode(struct clk *clk, int enabled)
 {
-	struct at32_sm *sm = &system_manager;
 	unsigned long flags;
 	u32 mask;
 
-	spin_lock_irqsave(&sm->lock, flags);
-	mask = sm_readl(sm, PM_CPU_MASK);
+	spin_lock_irqsave(&pm_lock, flags);
+	mask = pm_readl(CPU_MASK);
 	if (enabled)
 		mask |= 1 << clk->index;
 	else
 		mask &= ~(1 << clk->index);
-	sm_writel(sm, PM_CPU_MASK, mask);
-	spin_unlock_irqrestore(&sm->lock, flags);
+	pm_writel(CPU_MASK, mask);
+	spin_unlock_irqrestore(&pm_lock, flags);
 }
 
 static unsigned long cpu_clk_get_rate(struct clk *clk)
 {
 	unsigned long cksel, shift = 0;
 
-	cksel = sm_readl(&system_manager, PM_CKSEL);
-	if (cksel & SM_BIT(CPUDIV))
-		shift = SM_BFEXT(CPUSEL, cksel) + 1;
+	cksel = pm_readl(CKSEL);
+	if (cksel & PM_BIT(CPUDIV))
+		shift = PM_BFEXT(CPUSEL, cksel) + 1;
 
 	return bus_clk_get_rate(clk, shift);
 }
 
+static long cpu_clk_set_rate(struct clk *clk, unsigned long rate, int apply)
+{
+	u32 control;
+	unsigned long parent_rate, child_div, actual_rate, div;
+
+	parent_rate = clk->parent->get_rate(clk->parent);
+	control = pm_readl(CKSEL);
+
+	if (control & PM_BIT(HSBDIV))
+		child_div = 1 << (PM_BFEXT(HSBSEL, control) + 1);
+	else
+		child_div = 1;
+
+	if (rate > 3 * (parent_rate / 4) || child_div == 1) {
+		actual_rate = parent_rate;
+		control &= ~PM_BIT(CPUDIV);
+	} else {
+		unsigned int cpusel;
+		div = (parent_rate + rate / 2) / rate;
+		if (div > child_div)
+			div = child_div;
+		cpusel = (div > 1) ? (fls(div) - 2) : 0;
+		control = PM_BIT(CPUDIV) | PM_BFINS(CPUSEL, cpusel, control);
+		actual_rate = parent_rate / (1 << (cpusel + 1));
+	}
+
+	pr_debug("clk %s: new rate %lu (actual rate %lu)\n",
+			clk->name, rate, actual_rate);
+
+	if (apply)
+		pm_writel(CKSEL, control);
+
+	return actual_rate;
+}
+
 static void hsb_clk_mode(struct clk *clk, int enabled)
 {
-	struct at32_sm *sm = &system_manager;
 	unsigned long flags;
 	u32 mask;
 
-	spin_lock_irqsave(&sm->lock, flags);
-	mask = sm_readl(sm, PM_HSB_MASK);
+	spin_lock_irqsave(&pm_lock, flags);
+	mask = pm_readl(HSB_MASK);
 	if (enabled)
 		mask |= 1 << clk->index;
 	else
 		mask &= ~(1 << clk->index);
-	sm_writel(sm, PM_HSB_MASK, mask);
-	spin_unlock_irqrestore(&sm->lock, flags);
+	pm_writel(HSB_MASK, mask);
+	spin_unlock_irqrestore(&pm_lock, flags);
 }
 
 static unsigned long hsb_clk_get_rate(struct clk *clk)
 {
 	unsigned long cksel, shift = 0;
 
-	cksel = sm_readl(&system_manager, PM_CKSEL);
-	if (cksel & SM_BIT(HSBDIV))
-		shift = SM_BFEXT(HSBSEL, cksel) + 1;
+	cksel = pm_readl(CKSEL);
+	if (cksel & PM_BIT(HSBDIV))
+		shift = PM_BFEXT(HSBSEL, cksel) + 1;
 
 	return bus_clk_get_rate(clk, shift);
 }
 
 static void pba_clk_mode(struct clk *clk, int enabled)
 {
-	struct at32_sm *sm = &system_manager;
 	unsigned long flags;
 	u32 mask;
 
-	spin_lock_irqsave(&sm->lock, flags);
-	mask = sm_readl(sm, PM_PBA_MASK);
+	spin_lock_irqsave(&pm_lock, flags);
+	mask = pm_readl(PBA_MASK);
 	if (enabled)
 		mask |= 1 << clk->index;
 	else
 		mask &= ~(1 << clk->index);
-	sm_writel(sm, PM_PBA_MASK, mask);
-	spin_unlock_irqrestore(&sm->lock, flags);
+	pm_writel(PBA_MASK, mask);
+	spin_unlock_irqrestore(&pm_lock, flags);
 }
 
 static unsigned long pba_clk_get_rate(struct clk *clk)
 {
 	unsigned long cksel, shift = 0;
 
-	cksel = sm_readl(&system_manager, PM_CKSEL);
-	if (cksel & SM_BIT(PBADIV))
-		shift = SM_BFEXT(PBASEL, cksel) + 1;
+	cksel = pm_readl(CKSEL);
+	if (cksel & PM_BIT(PBADIV))
+		shift = PM_BFEXT(PBASEL, cksel) + 1;
 
 	return bus_clk_get_rate(clk, shift);
 }
 
 static void pbb_clk_mode(struct clk *clk, int enabled)
 {
-	struct at32_sm *sm = &system_manager;
 	unsigned long flags;
 	u32 mask;
 
-	spin_lock_irqsave(&sm->lock, flags);
-	mask = sm_readl(sm, PM_PBB_MASK);
+	spin_lock_irqsave(&pm_lock, flags);
+	mask = pm_readl(PBB_MASK);
 	if (enabled)
 		mask |= 1 << clk->index;
 	else
 		mask &= ~(1 << clk->index);
-	sm_writel(sm, PM_PBB_MASK, mask);
-	spin_unlock_irqrestore(&sm->lock, flags);
+	pm_writel(PBB_MASK, mask);
+	spin_unlock_irqrestore(&pm_lock, flags);
 }
 
 static unsigned long pbb_clk_get_rate(struct clk *clk)
 {
 	unsigned long cksel, shift = 0;
 
-	cksel = sm_readl(&system_manager, PM_CKSEL);
-	if (cksel & SM_BIT(PBBDIV))
-		shift = SM_BFEXT(PBBSEL, cksel) + 1;
+	cksel = pm_readl(CKSEL);
+	if (cksel & PM_BIT(PBBDIV))
+		shift = PM_BFEXT(PBBSEL, cksel) + 1;
 
 	return bus_clk_get_rate(clk, shift);
 }
@@ -296,6 +335,7 @@
 static struct clk cpu_clk = {
 	.name		= "cpu",
 	.get_rate	= cpu_clk_get_rate,
+	.set_rate	= cpu_clk_set_rate,
 	.users		= 1,
 };
 static struct clk hsb_clk = {
@@ -327,12 +367,12 @@
 {
 	u32 control;
 
-	control = sm_readl(&system_manager, PM_GCCTRL + 4 * clk->index);
+	control = pm_readl(GCCTRL(clk->index));
 	if (enabled)
-		control |= SM_BIT(CEN);
+		control |= PM_BIT(CEN);
 	else
-		control &= ~SM_BIT(CEN);
-	sm_writel(&system_manager, PM_GCCTRL + 4 * clk->index, control);
+		control &= ~PM_BIT(CEN);
+	pm_writel(GCCTRL(clk->index), control);
 }
 
 static unsigned long genclk_get_rate(struct clk *clk)
@@ -340,9 +380,9 @@
 	u32 control;
 	unsigned long div = 1;
 
-	control = sm_readl(&system_manager, PM_GCCTRL + 4 * clk->index);
-	if (control & SM_BIT(DIVEN))
-		div = 2 * (SM_BFEXT(DIV, control) + 1);
+	control = pm_readl(GCCTRL(clk->index));
+	if (control & PM_BIT(DIVEN))
+		div = 2 * (PM_BFEXT(DIV, control) + 1);
 
 	return clk->parent->get_rate(clk->parent) / div;
 }
@@ -353,23 +393,22 @@
 	unsigned long parent_rate, actual_rate, div;
 
 	parent_rate = clk->parent->get_rate(clk->parent);
-	control = sm_readl(&system_manager, PM_GCCTRL + 4 * clk->index);
+	control = pm_readl(GCCTRL(clk->index));
 
 	if (rate > 3 * parent_rate / 4) {
 		actual_rate = parent_rate;
-		control &= ~SM_BIT(DIVEN);
+		control &= ~PM_BIT(DIVEN);
 	} else {
 		div = (parent_rate + rate) / (2 * rate) - 1;
-		control = SM_BFINS(DIV, div, control) | SM_BIT(DIVEN);
+		control = PM_BFINS(DIV, div, control) | PM_BIT(DIVEN);
 		actual_rate = parent_rate / (2 * (div + 1));
 	}
 
-	printk("clk %s: new rate %lu (actual rate %lu)\n",
-	       clk->name, rate, actual_rate);
+	dev_dbg(clk->dev, "clk %s: new rate %lu (actual rate %lu)\n",
+		clk->name, rate, actual_rate);
 
 	if (apply)
-		sm_writel(&system_manager, PM_GCCTRL + 4 * clk->index,
-			  control);
+		pm_writel(GCCTRL(clk->index), control);
 
 	return actual_rate;
 }
@@ -378,24 +417,24 @@
 {
 	u32 control;
 
-	printk("clk %s: new parent %s (was %s)\n",
-	       clk->name, parent->name, clk->parent->name);
+	dev_dbg(clk->dev, "clk %s: new parent %s (was %s)\n",
+		clk->name, parent->name, clk->parent->name);
 
-	control = sm_readl(&system_manager, PM_GCCTRL + 4 * clk->index);
+	control = pm_readl(GCCTRL(clk->index));
 
 	if (parent == &osc1 || parent == &pll1)
-		control |= SM_BIT(OSCSEL);
+		control |= PM_BIT(OSCSEL);
 	else if (parent == &osc0 || parent == &pll0)
-		control &= ~SM_BIT(OSCSEL);
+		control &= ~PM_BIT(OSCSEL);
 	else
 		return -EINVAL;
 
 	if (parent == &pll0 || parent == &pll1)
-		control |= SM_BIT(PLLSEL);
+		control |= PM_BIT(PLLSEL);
 	else
-		control &= ~SM_BIT(PLLSEL);
+		control &= ~PM_BIT(PLLSEL);
 
-	sm_writel(&system_manager, PM_GCCTRL + 4 * clk->index, control);
+	pm_writel(GCCTRL(clk->index), control);
 	clk->parent = parent;
 
 	return 0;
@@ -408,11 +447,11 @@
 
 	BUG_ON(clk->index > 7);
 
-	control = sm_readl(&system_manager, PM_GCCTRL + 4 * clk->index);
-	if (control & SM_BIT(OSCSEL))
-		parent = (control & SM_BIT(PLLSEL)) ? &pll1 : &osc1;
+	control = pm_readl(GCCTRL(clk->index));
+	if (control & PM_BIT(OSCSEL))
+		parent = (control & PM_BIT(PLLSEL)) ? &pll1 : &osc1;
 	else
-		parent = (control & SM_BIT(PLLSEL)) ? &pll0 : &osc0;
+		parent = (control & PM_BIT(PLLSEL)) ? &pll0 : &osc0;
 
 	clk->parent = parent;
 }
@@ -420,21 +459,53 @@
 /* --------------------------------------------------------------------
  *  System peripherals
  * -------------------------------------------------------------------- */
-static struct resource sm_resource[] = {
-	PBMEM(0xfff00000),
-	NAMED_IRQ(19, "eim"),
-	NAMED_IRQ(20, "pm"),
-	NAMED_IRQ(21, "rtc"),
+static struct resource at32_pm0_resource[] = {
+	{
+		.start	= 0xfff00000,
+		.end	= 0xfff0007f,
+		.flags	= IORESOURCE_MEM,
+	},
+	IRQ(20),
 };
-struct platform_device at32_sm_device = {
-	.name		= "sm",
-	.id		= 0,
-	.resource	= sm_resource,
-	.num_resources	= ARRAY_SIZE(sm_resource),
+
+static struct resource at32ap700x_rtc0_resource[] = {
+	{
+		.start	= 0xfff00080,
+		.end	= 0xfff000af,
+		.flags	= IORESOURCE_MEM,
+	},
+	IRQ(21),
 };
-static struct clk at32_sm_pclk = {
+
+static struct resource at32_wdt0_resource[] = {
+	{
+		.start	= 0xfff000b0,
+		.end	= 0xfff000bf,
+		.flags	= IORESOURCE_MEM,
+	},
+};
+
+static struct resource at32_eic0_resource[] = {
+	{
+		.start	= 0xfff00100,
+		.end	= 0xfff0013f,
+		.flags	= IORESOURCE_MEM,
+	},
+	IRQ(19),
+};
+
+DEFINE_DEV(at32_pm, 0);
+DEFINE_DEV(at32ap700x_rtc, 0);
+DEFINE_DEV(at32_wdt, 0);
+DEFINE_DEV(at32_eic, 0);
+
+/*
+ * Peripheral clock for PM, RTC, WDT and EIC. PM will ensure that this
+ * is always running.
+ */
+static struct clk at32_pm_pclk = {
 	.name		= "pclk",
-	.dev		= &at32_sm_device.dev,
+	.dev		= &at32_pm0_device.dev,
 	.parent		= &pbb_clk,
 	.mode		= pbb_clk_mode,
 	.get_rate	= pbb_clk_get_rate,
@@ -583,10 +654,11 @@
 
 void __init at32_add_system_devices(void)
 {
-	system_manager.eim_first_irq = EIM_IRQ_BASE;
-
-	platform_device_register(&at32_sm_device);
+	platform_device_register(&at32_pm0_device);
 	platform_device_register(&at32_intc0_device);
+	platform_device_register(&at32ap700x_rtc0_device);
+	platform_device_register(&at32_wdt0_device);
+	platform_device_register(&at32_eic0_device);
 	platform_device_register(&smc0_device);
 	platform_device_register(&pdc_device);
 
@@ -1013,6 +1085,89 @@
 }
 
 /* --------------------------------------------------------------------
+ *  SSC
+ * -------------------------------------------------------------------- */
+static struct resource ssc0_resource[] = {
+	PBMEM(0xffe01c00),
+	IRQ(10),
+};
+DEFINE_DEV(ssc, 0);
+DEV_CLK(pclk, ssc0, pba, 7);
+
+static struct resource ssc1_resource[] = {
+	PBMEM(0xffe02000),
+	IRQ(11),
+};
+DEFINE_DEV(ssc, 1);
+DEV_CLK(pclk, ssc1, pba, 8);
+
+static struct resource ssc2_resource[] = {
+	PBMEM(0xffe02400),
+	IRQ(12),
+};
+DEFINE_DEV(ssc, 2);
+DEV_CLK(pclk, ssc2, pba, 9);
+
+struct platform_device *__init
+at32_add_device_ssc(unsigned int id, unsigned int flags)
+{
+	struct platform_device *pdev;
+
+	switch (id) {
+	case 0:
+		pdev = &ssc0_device;
+		if (flags & ATMEL_SSC_RF)
+			select_peripheral(PA(21), PERIPH_A, 0);	/* RF */
+		if (flags & ATMEL_SSC_RK)
+			select_peripheral(PA(22), PERIPH_A, 0);	/* RK */
+		if (flags & ATMEL_SSC_TK)
+			select_peripheral(PA(23), PERIPH_A, 0);	/* TK */
+		if (flags & ATMEL_SSC_TF)
+			select_peripheral(PA(24), PERIPH_A, 0);	/* TF */
+		if (flags & ATMEL_SSC_TD)
+			select_peripheral(PA(25), PERIPH_A, 0);	/* TD */
+		if (flags & ATMEL_SSC_RD)
+			select_peripheral(PA(26), PERIPH_A, 0);	/* RD */
+		break;
+	case 1:
+		pdev = &ssc1_device;
+		if (flags & ATMEL_SSC_RF)
+			select_peripheral(PA(0), PERIPH_B, 0);	/* RF */
+		if (flags & ATMEL_SSC_RK)
+			select_peripheral(PA(1), PERIPH_B, 0);	/* RK */
+		if (flags & ATMEL_SSC_TK)
+			select_peripheral(PA(2), PERIPH_B, 0);	/* TK */
+		if (flags & ATMEL_SSC_TF)
+			select_peripheral(PA(3), PERIPH_B, 0);	/* TF */
+		if (flags & ATMEL_SSC_TD)
+			select_peripheral(PA(4), PERIPH_B, 0);	/* TD */
+		if (flags & ATMEL_SSC_RD)
+			select_peripheral(PA(5), PERIPH_B, 0);	/* RD */
+		break;
+	case 2:
+		pdev = &ssc2_device;
+		if (flags & ATMEL_SSC_TD)
+			select_peripheral(PB(13), PERIPH_A, 0);	/* TD */
+		if (flags & ATMEL_SSC_RD)
+			select_peripheral(PB(14), PERIPH_A, 0);	/* RD */
+		if (flags & ATMEL_SSC_TK)
+			select_peripheral(PB(15), PERIPH_A, 0);	/* TK */
+		if (flags & ATMEL_SSC_TF)
+			select_peripheral(PB(16), PERIPH_A, 0);	/* TF */
+		if (flags & ATMEL_SSC_RF)
+			select_peripheral(PB(17), PERIPH_A, 0);	/* RF */
+		if (flags & ATMEL_SSC_RK)
+			select_peripheral(PB(18), PERIPH_A, 0);	/* RK */
+		break;
+	default:
+		return NULL;
+	}
+
+	platform_device_register(pdev);
+	return pdev;
+}
+
+/* --------------------------------------------------------------------
  *  GCLK
  * -------------------------------------------------------------------- */
 static struct clk gclk0 = {
@@ -1066,7 +1221,7 @@
 	&hsb_clk,
 	&pba_clk,
 	&pbb_clk,
-	&at32_sm_pclk,
+	&at32_pm_pclk,
 	&at32_intc0_pclk,
 	&hmatrix_clk,
 	&ebi_clk,
@@ -1094,6 +1249,9 @@
 	&atmel_spi1_spi_clk,
 	&atmel_lcdfb0_hck1,
 	&atmel_lcdfb0_pixclk,
+	&ssc0_pclk,
+	&ssc1_pclk,
+	&ssc2_pclk,
 	&gclk0,
 	&gclk1,
 	&gclk2,
@@ -1113,18 +1271,20 @@
 
 void __init at32_clock_init(void)
 {
-	struct at32_sm *sm = &system_manager;
 	u32 cpu_mask = 0, hsb_mask = 0, pba_mask = 0, pbb_mask = 0;
 	int i;
 
-	if (sm_readl(sm, PM_MCCTRL) & SM_BIT(PLLSEL))
+	if (pm_readl(MCCTRL) & PM_BIT(PLLSEL)) {
 		main_clock = &pll0;
-	else
+		cpu_clk.parent = &pll0;
+	} else {
 		main_clock = &osc0;
+		cpu_clk.parent = &osc0;
+	}
 
-	if (sm_readl(sm, PM_PLL0) & SM_BIT(PLLOSC))
+	if (pm_readl(PLL0) & PM_BIT(PLLOSC))
 		pll0.parent = &osc1;
-	if (sm_readl(sm, PM_PLL1) & SM_BIT(PLLOSC))
+	if (pm_readl(PLL1) & PM_BIT(PLLOSC))
 		pll1.parent = &osc1;
 
 	genclk_init_parent(&gclk0);
@@ -1157,8 +1317,8 @@
 			pbb_mask |= 1 << clk->index;
 	}
 
-	sm_writel(sm, PM_CPU_MASK, cpu_mask);
-	sm_writel(sm, PM_HSB_MASK, hsb_mask);
-	sm_writel(sm, PM_PBA_MASK, pba_mask);
-	sm_writel(sm, PM_PBB_MASK, pbb_mask);
+	pm_writel(CPU_MASK, cpu_mask);
+	pm_writel(HSB_MASK, hsb_mask);
+	pm_writel(PBA_MASK, pba_mask);
+	pm_writel(PBB_MASK, pbb_mask);
 }
diff --git a/arch/avr32/mach-at32ap/cpufreq.c b/arch/avr32/mach-at32ap/cpufreq.c
new file mode 100644
index 0000000..235524b
--- /dev/null
+++ b/arch/avr32/mach-at32ap/cpufreq.c
@@ -0,0 +1,112 @@
+/*
+ * Copyright (C) 2004-2007 Atmel Corporation
+ *
+ * Based on MIPS implementation arch/mips/kernel/time.c
+ *   Copyright 2001 MontaVista Software Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/*#define DEBUG*/
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/cpufreq.h>
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <asm/system.h>
+
+static struct clk *cpuclk;
+
+static int at32_verify_speed(struct cpufreq_policy *policy)
+{
+	if (policy->cpu != 0)
+		return -EINVAL;
+
+	cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
+			policy->cpuinfo.max_freq);
+	return 0;
+}
+
+static unsigned int at32_get_speed(unsigned int cpu)
+{
+	/* No SMP support */
+	if (cpu)
+		return 0;
+	return (unsigned int)((clk_get_rate(cpuclk) + 500) / 1000);
+}
+
+static int at32_set_target(struct cpufreq_policy *policy,
+			  unsigned int target_freq,
+			  unsigned int relation)
+{
+	struct cpufreq_freqs freqs;
+	long freq;
+
+	/* Convert target_freq from kHz to Hz */
+	freq = clk_round_rate(cpuclk, target_freq * 1000);
+
+	/* Check if policy->min <= new_freq <= policy->max */
+	if(freq < (policy->min * 1000) || freq > (policy->max * 1000))
+		return -EINVAL;
+
+	pr_debug("cpufreq: requested frequency %u Hz\n", target_freq * 1000);
+
+	freqs.old = at32_get_speed(0);
+	freqs.new = (freq + 500) / 1000;
+	freqs.cpu = 0;
+	freqs.flags = 0;
+
+	cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
+	clk_set_rate(cpuclk, freq);
+	cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
+
+	pr_debug("cpufreq: set frequency %lu Hz\n", freq);
+
+	return 0;
+}
+
+static int __init at32_cpufreq_driver_init(struct cpufreq_policy *policy)
+{
+	if (policy->cpu != 0)
+		return -EINVAL;
+
+	cpuclk = clk_get(NULL, "cpu");
+	if (IS_ERR(cpuclk)) {
+		pr_debug("cpufreq: could not get CPU clk\n");
+		return PTR_ERR(cpuclk);
+	}
+
+	policy->cpuinfo.min_freq = (clk_round_rate(cpuclk, 1) + 500) / 1000;
+	policy->cpuinfo.max_freq = (clk_round_rate(cpuclk, ~0UL) + 500) / 1000;
+	policy->cpuinfo.transition_latency = 0;
+	policy->cur = at32_get_speed(0);
+	policy->min = policy->cpuinfo.min_freq;
+	policy->max = policy->cpuinfo.max_freq;
+	policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
+
+	printk("cpufreq: AT32AP CPU frequency driver\n");
+
+	return 0;
+}
+
+static struct cpufreq_driver at32_driver = {
+	.name		= "at32ap",
+	.owner		= THIS_MODULE,
+	.init		= at32_cpufreq_driver_init,
+	.verify		= at32_verify_speed,
+	.target		= at32_set_target,
+	.get		= at32_get_speed,
+	.flags		= CPUFREQ_STICKY,
+};
+
+static int __init at32_cpufreq_init(void)
+{
+	return cpufreq_register_driver(&at32_driver);
+}
+
+arch_initcall(at32_cpufreq_init);
diff --git a/arch/avr32/mach-at32ap/extint.c b/arch/avr32/mach-at32ap/extint.c
index 4a60ecc..8acd0109 100644
--- a/arch/avr32/mach-at32ap/extint.c
+++ b/arch/avr32/mach-at32ap/extint.c
@@ -17,42 +17,83 @@
 
 #include <asm/io.h>
 
-#include <asm/arch/sm.h>
+/* EIC register offsets */
+#define EIC_IER					0x0000
+#define EIC_IDR					0x0004
+#define EIC_IMR					0x0008
+#define EIC_ISR					0x000c
+#define EIC_ICR					0x0010
+#define EIC_MODE				0x0014
+#define EIC_EDGE				0x0018
+#define EIC_LEVEL				0x001c
+#define EIC_TEST				0x0020
+#define EIC_NMIC				0x0024
 
-#include "sm.h"
+/* Bitfields in TEST */
+#define EIC_TESTEN_OFFSET			31
+#define EIC_TESTEN_SIZE				1
 
-static void eim_ack_irq(unsigned int irq)
+/* Bitfields in NMIC */
+#define EIC_EN_OFFSET				0
+#define EIC_EN_SIZE				1
+
+/* Bit manipulation macros */
+#define EIC_BIT(name)					\
+	(1 << EIC_##name##_OFFSET)
+#define EIC_BF(name,value)				\
+	(((value) & ((1 << EIC_##name##_SIZE) - 1))	\
+	 << EIC_##name##_OFFSET)
+#define EIC_BFEXT(name,value)				\
+	(((value) >> EIC_##name##_OFFSET)		\
+	 & ((1 << EIC_##name##_SIZE) - 1))
+#define EIC_BFINS(name,value,old)			\
+	(((old) & ~(((1 << EIC_##name##_SIZE) - 1)	\
+		    << EIC_##name##_OFFSET))		\
+	 | EIC_BF(name,value))
+
+/* Register access macros */
+#define eic_readl(port,reg)				\
+	__raw_readl((port)->regs + EIC_##reg)
+#define eic_writel(port,reg,value)			\
+	__raw_writel((value), (port)->regs + EIC_##reg)
+
+struct eic {
+	void __iomem *regs;
+	struct irq_chip *chip;
+	unsigned int first_irq;
+};
+
+static void eic_ack_irq(unsigned int irq)
 {
-	struct at32_sm *sm = get_irq_chip_data(irq);
-	sm_writel(sm, EIM_ICR, 1 << (irq - sm->eim_first_irq));
+	struct eic *eic = get_irq_chip_data(irq);
+	eic_writel(eic, ICR, 1 << (irq - eic->first_irq));
 }
 
-static void eim_mask_irq(unsigned int irq)
+static void eic_mask_irq(unsigned int irq)
 {
-	struct at32_sm *sm = get_irq_chip_data(irq);
-	sm_writel(sm, EIM_IDR, 1 << (irq - sm->eim_first_irq));
+	struct eic *eic = get_irq_chip_data(irq);
+	eic_writel(eic, IDR, 1 << (irq - eic->first_irq));
 }
 
-static void eim_mask_ack_irq(unsigned int irq)
+static void eic_mask_ack_irq(unsigned int irq)
 {
-	struct at32_sm *sm = get_irq_chip_data(irq);
-	sm_writel(sm, EIM_ICR, 1 << (irq - sm->eim_first_irq));
-	sm_writel(sm, EIM_IDR, 1 << (irq - sm->eim_first_irq));
+	struct eic *eic = get_irq_chip_data(irq);
+	eic_writel(eic, ICR, 1 << (irq - eic->first_irq));
+	eic_writel(eic, IDR, 1 << (irq - eic->first_irq));
 }
 
-static void eim_unmask_irq(unsigned int irq)
+static void eic_unmask_irq(unsigned int irq)
 {
-	struct at32_sm *sm = get_irq_chip_data(irq);
-	sm_writel(sm, EIM_IER, 1 << (irq - sm->eim_first_irq));
+	struct eic *eic = get_irq_chip_data(irq);
+	eic_writel(eic, IER, 1 << (irq - eic->first_irq));
 }
 
-static int eim_set_irq_type(unsigned int irq, unsigned int flow_type)
+static int eic_set_irq_type(unsigned int irq, unsigned int flow_type)
 {
-	struct at32_sm *sm = get_irq_chip_data(irq);
+	struct eic *eic = get_irq_chip_data(irq);
 	struct irq_desc *desc;
-	unsigned int i = irq - sm->eim_first_irq;
+	unsigned int i = irq - eic->first_irq;
 	u32 mode, edge, level;
-	unsigned long flags;
 	int ret = 0;
 
 	flow_type &= IRQ_TYPE_SENSE_MASK;
@@ -60,11 +101,10 @@
 		flow_type = IRQ_TYPE_LEVEL_LOW;
 
 	desc = &irq_desc[irq];
-	spin_lock_irqsave(&sm->lock, flags);
 
-	mode = sm_readl(sm, EIM_MODE);
-	edge = sm_readl(sm, EIM_EDGE);
-	level = sm_readl(sm, EIM_LEVEL);
+	mode = eic_readl(eic, MODE);
+	edge = eic_readl(eic, EDGE);
+	level = eic_readl(eic, LEVEL);
 
 	switch (flow_type) {
 	case IRQ_TYPE_LEVEL_LOW:
@@ -89,9 +129,9 @@
 	}
 
 	if (ret == 0) {
-		sm_writel(sm, EIM_MODE, mode);
-		sm_writel(sm, EIM_EDGE, edge);
-		sm_writel(sm, EIM_LEVEL, level);
+		eic_writel(eic, MODE, mode);
+		eic_writel(eic, EDGE, edge);
+		eic_writel(eic, LEVEL, level);
 
 		if (flow_type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH))
 			flow_type |= IRQ_LEVEL;
@@ -99,35 +139,33 @@
 		desc->status |= flow_type;
 	}
 
-	spin_unlock_irqrestore(&sm->lock, flags);
-
 	return ret;
 }
 
-struct irq_chip eim_chip = {
-	.name		= "eim",
-	.ack		= eim_ack_irq,
-	.mask		= eim_mask_irq,
-	.mask_ack	= eim_mask_ack_irq,
-	.unmask		= eim_unmask_irq,
-	.set_type	= eim_set_irq_type,
+struct irq_chip eic_chip = {
+	.name		= "eic",
+	.ack		= eic_ack_irq,
+	.mask		= eic_mask_irq,
+	.mask_ack	= eic_mask_ack_irq,
+	.unmask		= eic_unmask_irq,
+	.set_type	= eic_set_irq_type,
 };
 
-static void demux_eim_irq(unsigned int irq, struct irq_desc *desc)
+static void demux_eic_irq(unsigned int irq, struct irq_desc *desc)
 {
-	struct at32_sm *sm = desc->handler_data;
+	struct eic *eic = desc->handler_data;
 	struct irq_desc *ext_desc;
 	unsigned long status, pending;
 	unsigned int i, ext_irq;
 
-	status = sm_readl(sm, EIM_ISR);
-	pending = status & sm_readl(sm, EIM_IMR);
+	status = eic_readl(eic, ISR);
+	pending = status & eic_readl(eic, IMR);
 
 	while (pending) {
 		i = fls(pending) - 1;
 		pending &= ~(1 << i);
 
-		ext_irq = i + sm->eim_first_irq;
+		ext_irq = i + eic->first_irq;
 		ext_desc = irq_desc + ext_irq;
 		if (ext_desc->status & IRQ_LEVEL)
 			handle_level_irq(ext_irq, ext_desc);
@@ -136,51 +174,85 @@
 	}
 }
 
-static int __init eim_init(void)
+static int __init eic_probe(struct platform_device *pdev)
 {
-	struct at32_sm *sm = &system_manager;
+	struct eic *eic;
+	struct resource *regs;
 	unsigned int i;
 	unsigned int nr_irqs;
 	unsigned int int_irq;
+	int ret;
 	u32 pattern;
 
-	/*
-	 * The EIM is really the same module as SM, so register
-	 * mapping, etc. has been taken care of already.
-	 */
+	regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	int_irq = platform_get_irq(pdev, 0);
+	if (!regs || !int_irq) {
+		dev_dbg(&pdev->dev, "missing regs and/or irq resource\n");
+		return -ENXIO;
+	}
+
+	ret = -ENOMEM;
+	eic = kzalloc(sizeof(struct eic), GFP_KERNEL);
+	if (!eic) {
+		dev_dbg(&pdev->dev, "no memory for eic structure\n");
+		goto err_kzalloc;
+	}
+
+	eic->first_irq = EIM_IRQ_BASE + 32 * pdev->id;
+	eic->regs = ioremap(regs->start, regs->end - regs->start + 1);
+	if (!eic->regs) {
+		dev_dbg(&pdev->dev, "failed to map regs\n");
+		goto err_ioremap;
+	}
 
 	/*
 	 * Find out how many interrupt lines that are actually
 	 * implemented in hardware.
 	 */
-	sm_writel(sm, EIM_IDR, ~0UL);
-	sm_writel(sm, EIM_MODE, ~0UL);
-	pattern = sm_readl(sm, EIM_MODE);
+	eic_writel(eic, IDR, ~0UL);
+	eic_writel(eic, MODE, ~0UL);
+	pattern = eic_readl(eic, MODE);
 	nr_irqs = fls(pattern);
 
 	/* Trigger on falling edge unless overridden by driver */
-	sm_writel(sm, EIM_MODE, 0UL);
-	sm_writel(sm, EIM_EDGE, 0UL);
+	eic_writel(eic, MODE, 0UL);
+	eic_writel(eic, EDGE, 0UL);
 
-	sm->eim_chip = &eim_chip;
+	eic->chip = &eic_chip;
 
 	for (i = 0; i < nr_irqs; i++) {
 		/* NOTE the handler we set here is ignored by the demux */
-		set_irq_chip_and_handler(sm->eim_first_irq + i, &eim_chip,
+		set_irq_chip_and_handler(eic->first_irq + i, &eic_chip,
 					 handle_level_irq);
-		set_irq_chip_data(sm->eim_first_irq + i, sm);
+		set_irq_chip_data(eic->first_irq + i, eic);
 	}
 
-	int_irq = platform_get_irq_byname(sm->pdev, "eim");
+	set_irq_chained_handler(int_irq, demux_eic_irq);
+	set_irq_data(int_irq, eic);
 
-	set_irq_chained_handler(int_irq, demux_eim_irq);
-	set_irq_data(int_irq, sm);
-
-	printk("EIM: External Interrupt Module at 0x%p, IRQ %u\n",
-	       sm->regs, int_irq);
-	printk("EIM: Handling %u external IRQs, starting with IRQ %u\n",
-	       nr_irqs, sm->eim_first_irq);
+	dev_info(&pdev->dev,
+		 "External Interrupt Controller at 0x%p, IRQ %u\n",
+		 eic->regs, int_irq);
+	dev_info(&pdev->dev,
+		 "Handling %u external IRQs, starting with IRQ %u\n",
+		 nr_irqs, eic->first_irq);
 
 	return 0;
+
+err_ioremap:
+	kfree(eic);
+err_kzalloc:
+	return ret;
 }
-arch_initcall(eim_init);
+
+static struct platform_driver eic_driver = {
+	.driver = {
+		.name = "at32_eic",
+	},
+};
+
+static int __init eic_init(void)
+{
+	return platform_driver_probe(&eic_driver, eic_probe);
+}
+arch_initcall(eic_init);
diff --git a/arch/avr32/mach-at32ap/pm.h b/arch/avr32/mach-at32ap/pm.h
new file mode 100644
index 0000000..a1f8ace
--- /dev/null
+++ b/arch/avr32/mach-at32ap/pm.h
@@ -0,0 +1,112 @@
+/*
+ * Register definitions for the Power Manager (PM)
+ */
+#ifndef __ARCH_AVR32_MACH_AT32AP_PM_H__
+#define __ARCH_AVR32_MACH_AT32AP_PM_H__
+
+/* PM register offsets */
+#define PM_MCCTRL				0x0000
+#define PM_CKSEL				0x0004
+#define PM_CPU_MASK				0x0008
+#define PM_HSB_MASK				0x000c
+#define PM_PBA_MASK				0x0010
+#define PM_PBB_MASK				0x0014
+#define PM_PLL0					0x0020
+#define PM_PLL1					0x0024
+#define PM_IER					0x0040
+#define PM_IDR					0x0044
+#define PM_IMR					0x0048
+#define PM_ISR					0x004c
+#define PM_ICR					0x0050
+#define PM_GCCTRL(x)				(0x0060 + 4 * (x))
+#define PM_RCAUSE				0x00c0
+
+/* Bitfields in CKSEL */
+#define PM_CPUSEL_OFFSET			0
+#define PM_CPUSEL_SIZE				3
+#define PM_CPUDIV_OFFSET			7
+#define PM_CPUDIV_SIZE				1
+#define PM_HSBSEL_OFFSET			8
+#define PM_HSBSEL_SIZE				3
+#define PM_HSBDIV_OFFSET			15
+#define PM_HSBDIV_SIZE				1
+#define PM_PBASEL_OFFSET			16
+#define PM_PBASEL_SIZE				3
+#define PM_PBADIV_OFFSET			23
+#define PM_PBADIV_SIZE				1
+#define PM_PBBSEL_OFFSET			24
+#define PM_PBBSEL_SIZE				3
+#define PM_PBBDIV_OFFSET			31
+#define PM_PBBDIV_SIZE				1
+
+/* Bitfields in PLL0 */
+#define PM_PLLEN_OFFSET				0
+#define PM_PLLEN_SIZE				1
+#define PM_PLLOSC_OFFSET			1
+#define PM_PLLOSC_SIZE				1
+#define PM_PLLOPT_OFFSET			2
+#define PM_PLLOPT_SIZE				3
+#define PM_PLLDIV_OFFSET			8
+#define PM_PLLDIV_SIZE				8
+#define PM_PLLMUL_OFFSET			16
+#define PM_PLLMUL_SIZE				8
+#define PM_PLLCOUNT_OFFSET			24
+#define PM_PLLCOUNT_SIZE			6
+#define PM_PLLTEST_OFFSET			31
+#define PM_PLLTEST_SIZE				1
+
+/* Bitfields in ICR */
+#define PM_LOCK0_OFFSET				0
+#define PM_LOCK0_SIZE				1
+#define PM_LOCK1_OFFSET				1
+#define PM_LOCK1_SIZE				1
+#define PM_WAKE_OFFSET				2
+#define PM_WAKE_SIZE				1
+#define PM_CKRDY_OFFSET				5
+#define PM_CKRDY_SIZE				1
+#define PM_MSKRDY_OFFSET			6
+#define PM_MSKRDY_SIZE				1
+
+/* Bitfields in GCCTRL0 */
+#define PM_OSCSEL_OFFSET			0
+#define PM_OSCSEL_SIZE				1
+#define PM_PLLSEL_OFFSET			1
+#define PM_PLLSEL_SIZE				1
+#define PM_CEN_OFFSET				2
+#define PM_CEN_SIZE				1
+#define PM_DIVEN_OFFSET				4
+#define PM_DIVEN_SIZE				1
+#define PM_DIV_OFFSET				8
+#define PM_DIV_SIZE				8
+
+/* Bitfields in RCAUSE */
+#define PM_POR_OFFSET				0
+#define PM_POR_SIZE				1
+#define PM_EXT_OFFSET				2
+#define PM_EXT_SIZE				1
+#define PM_WDT_OFFSET				3
+#define PM_WDT_SIZE				1
+#define PM_NTAE_OFFSET				4
+#define PM_NTAE_SIZE				1
+
+/* Bit manipulation macros */
+#define PM_BIT(name)					\
+	(1 << PM_##name##_OFFSET)
+#define PM_BF(name,value)				\
+	(((value) & ((1 << PM_##name##_SIZE) - 1))	\
+	 << PM_##name##_OFFSET)
+#define PM_BFEXT(name,value)				\
+	(((value) >> PM_##name##_OFFSET)		\
+	 & ((1 << PM_##name##_SIZE) - 1))
+#define PM_BFINS(name,value,old)\
+	(((old) & ~(((1 << PM_##name##_SIZE) - 1)	\
+		    << PM_##name##_OFFSET))		\
+	 | PM_BF(name,value))
+
+/* Register access macros */
+#define pm_readl(reg)							\
+	__raw_readl((void __iomem *)AT32_PM_BASE + PM_##reg)
+#define pm_writel(reg,value)						\
+	__raw_writel((value), (void __iomem *)AT32_PM_BASE + PM_##reg)
+
+#endif /* __ARCH_AVR32_MACH_AT32AP_PM_H__ */
diff --git a/arch/avr32/mach-at32ap/sm.h b/arch/avr32/mach-at32ap/sm.h
deleted file mode 100644
index cad02b5..0000000
--- a/arch/avr32/mach-at32ap/sm.h
+++ /dev/null
@@ -1,242 +0,0 @@
-/*
- * Register definitions for SM
- *
- * System Manager
- */
-#ifndef __ASM_AVR32_SM_H__
-#define __ASM_AVR32_SM_H__
-
-/* SM register offsets */
-#define SM_PM_MCCTRL                            0x0000
-#define SM_PM_CKSEL                             0x0004
-#define SM_PM_CPU_MASK                          0x0008
-#define SM_PM_HSB_MASK                          0x000c
-#define SM_PM_PBA_MASK                         0x0010
-#define SM_PM_PBB_MASK                         0x0014
-#define SM_PM_PLL0                              0x0020
-#define SM_PM_PLL1                              0x0024
-#define SM_PM_VCTRL                             0x0030
-#define SM_PM_VMREF                             0x0034
-#define SM_PM_VMV                               0x0038
-#define SM_PM_IER                               0x0040
-#define SM_PM_IDR                               0x0044
-#define SM_PM_IMR                               0x0048
-#define SM_PM_ISR                               0x004c
-#define SM_PM_ICR                               0x0050
-#define SM_PM_GCCTRL                            0x0060
-#define SM_RTC_CTRL                             0x0080
-#define SM_RTC_VAL                              0x0084
-#define SM_RTC_TOP                              0x0088
-#define SM_RTC_IER                              0x0090
-#define SM_RTC_IDR                              0x0094
-#define SM_RTC_IMR                              0x0098
-#define SM_RTC_ISR                              0x009c
-#define SM_RTC_ICR                              0x00a0
-#define SM_WDT_CTRL                             0x00b0
-#define SM_WDT_CLR                              0x00b4
-#define SM_WDT_EXT                              0x00b8
-#define SM_RC_RCAUSE                            0x00c0
-#define SM_EIM_IER                              0x0100
-#define SM_EIM_IDR                              0x0104
-#define SM_EIM_IMR                              0x0108
-#define SM_EIM_ISR                              0x010c
-#define SM_EIM_ICR                              0x0110
-#define SM_EIM_MODE                             0x0114
-#define SM_EIM_EDGE                             0x0118
-#define SM_EIM_LEVEL                            0x011c
-#define SM_EIM_TEST                             0x0120
-#define SM_EIM_NMIC                             0x0124
-
-/* Bitfields in PM_MCCTRL */
-
-/* Bitfields in PM_CKSEL */
-#define SM_CPUSEL_OFFSET                        0
-#define SM_CPUSEL_SIZE                          3
-#define SM_CPUDIV_OFFSET                        7
-#define SM_CPUDIV_SIZE                          1
-#define SM_HSBSEL_OFFSET                        8
-#define SM_HSBSEL_SIZE                          3
-#define SM_HSBDIV_OFFSET                        15
-#define SM_HSBDIV_SIZE                          1
-#define SM_PBASEL_OFFSET                       16
-#define SM_PBASEL_SIZE                         3
-#define SM_PBADIV_OFFSET                       23
-#define SM_PBADIV_SIZE                         1
-#define SM_PBBSEL_OFFSET                       24
-#define SM_PBBSEL_SIZE                         3
-#define SM_PBBDIV_OFFSET                       31
-#define SM_PBBDIV_SIZE                         1
-
-/* Bitfields in PM_CPU_MASK */
-
-/* Bitfields in PM_HSB_MASK */
-
-/* Bitfields in PM_PBA_MASK */
-
-/* Bitfields in PM_PBB_MASK */
-
-/* Bitfields in PM_PLL0 */
-#define SM_PLLEN_OFFSET                         0
-#define SM_PLLEN_SIZE                           1
-#define SM_PLLOSC_OFFSET                        1
-#define SM_PLLOSC_SIZE                          1
-#define SM_PLLOPT_OFFSET                        2
-#define SM_PLLOPT_SIZE                          3
-#define SM_PLLDIV_OFFSET                        8
-#define SM_PLLDIV_SIZE                          8
-#define SM_PLLMUL_OFFSET                        16
-#define SM_PLLMUL_SIZE                          8
-#define SM_PLLCOUNT_OFFSET                      24
-#define SM_PLLCOUNT_SIZE                        6
-#define SM_PLLTEST_OFFSET                       31
-#define SM_PLLTEST_SIZE                         1
-
-/* Bitfields in PM_PLL1 */
-
-/* Bitfields in PM_VCTRL */
-#define SM_VAUTO_OFFSET                         0
-#define SM_VAUTO_SIZE                           1
-#define SM_PM_VCTRL_VAL_OFFSET                  8
-#define SM_PM_VCTRL_VAL_SIZE                    7
-
-/* Bitfields in PM_VMREF */
-#define SM_REFSEL_OFFSET                        0
-#define SM_REFSEL_SIZE                          4
-
-/* Bitfields in PM_VMV */
-#define SM_PM_VMV_VAL_OFFSET                    0
-#define SM_PM_VMV_VAL_SIZE                      8
-
-/* Bitfields in PM_IER */
-
-/* Bitfields in PM_IDR */
-
-/* Bitfields in PM_IMR */
-
-/* Bitfields in PM_ISR */
-
-/* Bitfields in PM_ICR */
-#define SM_LOCK0_OFFSET                         0
-#define SM_LOCK0_SIZE                           1
-#define SM_LOCK1_OFFSET                         1
-#define SM_LOCK1_SIZE                           1
-#define SM_WAKE_OFFSET                          2
-#define SM_WAKE_SIZE                            1
-#define SM_VOK_OFFSET                           3
-#define SM_VOK_SIZE                             1
-#define SM_VMRDY_OFFSET                         4
-#define SM_VMRDY_SIZE                           1
-#define SM_CKRDY_OFFSET                         5
-#define SM_CKRDY_SIZE                           1
-
-/* Bitfields in PM_GCCTRL */
-#define SM_OSCSEL_OFFSET                        0
-#define SM_OSCSEL_SIZE                          1
-#define SM_PLLSEL_OFFSET                        1
-#define SM_PLLSEL_SIZE                          1
-#define SM_CEN_OFFSET                           2
-#define SM_CEN_SIZE                             1
-#define SM_CPC_OFFSET                           3
-#define SM_CPC_SIZE                             1
-#define SM_DIVEN_OFFSET                         4
-#define SM_DIVEN_SIZE                           1
-#define SM_DIV_OFFSET                           8
-#define SM_DIV_SIZE                             8
-
-/* Bitfields in RTC_CTRL */
-#define SM_PCLR_OFFSET                          1
-#define SM_PCLR_SIZE                            1
-#define SM_TOPEN_OFFSET                         2
-#define SM_TOPEN_SIZE                           1
-#define SM_CLKEN_OFFSET                         3
-#define SM_CLKEN_SIZE                           1
-#define SM_PSEL_OFFSET                          8
-#define SM_PSEL_SIZE                            16
-
-/* Bitfields in RTC_VAL */
-#define SM_RTC_VAL_VAL_OFFSET                   0
-#define SM_RTC_VAL_VAL_SIZE                     31
-
-/* Bitfields in RTC_TOP */
-#define SM_RTC_TOP_VAL_OFFSET                   0
-#define SM_RTC_TOP_VAL_SIZE                     32
-
-/* Bitfields in RTC_IER */
-
-/* Bitfields in RTC_IDR */
-
-/* Bitfields in RTC_IMR */
-
-/* Bitfields in RTC_ISR */
-
-/* Bitfields in RTC_ICR */
-#define SM_TOPI_OFFSET                          0
-#define SM_TOPI_SIZE                            1
-
-/* Bitfields in WDT_CTRL */
-#define SM_KEY_OFFSET                           24
-#define SM_KEY_SIZE                             8
-
-/* Bitfields in WDT_CLR */
-
-/* Bitfields in WDT_EXT */
-
-/* Bitfields in RC_RCAUSE */
-#define SM_POR_OFFSET                           0
-#define SM_POR_SIZE                             1
-#define SM_BOD_OFFSET                           1
-#define SM_BOD_SIZE                             1
-#define SM_EXT_OFFSET                           2
-#define SM_EXT_SIZE                             1
-#define SM_WDT_OFFSET                           3
-#define SM_WDT_SIZE                             1
-#define SM_NTAE_OFFSET                          4
-#define SM_NTAE_SIZE                            1
-#define SM_SERP_OFFSET                          5
-#define SM_SERP_SIZE                            1
-
-/* Bitfields in EIM_IER */
-
-/* Bitfields in EIM_IDR */
-
-/* Bitfields in EIM_IMR */
-
-/* Bitfields in EIM_ISR */
-
-/* Bitfields in EIM_ICR */
-
-/* Bitfields in EIM_MODE */
-
-/* Bitfields in EIM_EDGE */
-#define SM_INT0_OFFSET                          0
-#define SM_INT0_SIZE                            1
-#define SM_INT1_OFFSET                          1
-#define SM_INT1_SIZE                            1
-#define SM_INT2_OFFSET                          2
-#define SM_INT2_SIZE                            1
-#define SM_INT3_OFFSET                          3
-#define SM_INT3_SIZE                            1
-
-/* Bitfields in EIM_LEVEL */
-
-/* Bitfields in EIM_TEST */
-#define SM_TESTEN_OFFSET                        31
-#define SM_TESTEN_SIZE                          1
-
-/* Bitfields in EIM_NMIC */
-#define SM_EN_OFFSET                            0
-#define SM_EN_SIZE                              1
-
-/* Bit manipulation macros */
-#define SM_BIT(name)                            (1 << SM_##name##_OFFSET)
-#define SM_BF(name,value)                       (((value) & ((1 << SM_##name##_SIZE) - 1)) << SM_##name##_OFFSET)
-#define SM_BFEXT(name,value)                    (((value) >> SM_##name##_OFFSET) & ((1 << SM_##name##_SIZE) - 1))
-#define SM_BFINS(name,value,old)                (((old) & ~(((1 << SM_##name##_SIZE) - 1) << SM_##name##_OFFSET)) | SM_BF(name,value))
-
-/* Register access macros */
-#define sm_readl(port,reg)					\
-	__raw_readl((port)->regs + SM_##reg)
-#define sm_writel(port,reg,value)				\
-	__raw_writel((value), (port)->regs + SM_##reg)
-
-#endif /* __ASM_AVR32_SM_H__ */
diff --git a/arch/avr32/mm/fault.c b/arch/avr32/mm/fault.c
index 4b24952..ae2d2c5 100644
--- a/arch/avr32/mm/fault.c
+++ b/arch/avr32/mm/fault.c
@@ -64,6 +64,7 @@
 	int writeaccess;
 	long signr;
 	int code;
+	int fault;
 
 	if (notify_page_fault(regs, ecr))
 		return;
@@ -132,20 +133,18 @@
 	 * fault.
 	 */
 survive:
-	switch (handle_mm_fault(mm, vma, address, writeaccess)) {
-	case VM_FAULT_MINOR:
-		tsk->min_flt++;
-		break;
-	case VM_FAULT_MAJOR:
-		tsk->maj_flt++;
-		break;
-	case VM_FAULT_SIGBUS:
-		goto do_sigbus;
-	case VM_FAULT_OOM:
-		goto out_of_memory;
-	default:
+	fault = handle_mm_fault(mm, vma, address, writeaccess);
+	if (unlikely(fault & VM_FAULT_ERROR)) {
+		if (fault & VM_FAULT_OOM)
+			goto out_of_memory;
+		else if (fault & VM_FAULT_SIGBUS)
+			goto do_sigbus;
 		BUG();
 	}
+	if (fault & VM_FAULT_MAJOR)
+		tsk->maj_flt++;
+	else
+		tsk->min_flt++;
 
 	up_read(&mm->mmap_sem);
 	return;
diff --git a/arch/blackfin/mm/blackfin_sram.c b/arch/blackfin/mm/blackfin_sram.c
index 16c6169..b99ea88 100644
--- a/arch/blackfin/mm/blackfin_sram.c
+++ b/arch/blackfin/mm/blackfin_sram.c
@@ -521,10 +521,9 @@
 	struct sram_list_struct *lsl = NULL;
 	struct mm_struct *mm = current->mm;
 
-	lsl = kmalloc(sizeof(struct sram_list_struct), GFP_KERNEL);
+	lsl = kzalloc(sizeof(struct sram_list_struct), GFP_KERNEL);
 	if (!lsl)
 		return NULL;
-	memset(lsl, 0, sizeof(*lsl));
 
 	if (flags & L1_INST_SRAM)
 		addr = l1_inst_sram_alloc(size);
diff --git a/arch/cris/arch-v10/drivers/pcf8563.c b/arch/cris/arch-v10/drivers/pcf8563.c
index d47cfbf..1de0026 100644
--- a/arch/cris/arch-v10/drivers/pcf8563.c
+++ b/arch/cris/arch-v10/drivers/pcf8563.c
@@ -180,9 +180,7 @@
 void __exit
 pcf8563_exit(void)
 {
-	if (unregister_chrdev(PCF8563_MAJOR, DEVICE_NAME) < 0) {
-		printk(KERN_INFO "%s: Unable to unregister device.\n", PCF8563_NAME);
-	}
+	unregister_chrdev(PCF8563_MAJOR, DEVICE_NAME);
 }
 
 /*
diff --git a/arch/cris/arch-v10/kernel/ptrace.c b/arch/cris/arch-v10/kernel/ptrace.c
index fd2129a..f4f9db69 100644
--- a/arch/cris/arch-v10/kernel/ptrace.c
+++ b/arch/cris/arch-v10/kernel/ptrace.c
@@ -83,19 +83,9 @@
 	switch (request) {
 		/* Read word at location address. */ 
 		case PTRACE_PEEKTEXT:
-		case PTRACE_PEEKDATA: {
-			unsigned long tmp;
-			int copied;
-
-			copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0);
-			ret = -EIO;
-			
-			if (copied != sizeof(tmp))
-				break;
-			
-			ret = put_user(tmp,datap);
+		case PTRACE_PEEKDATA:
+			ret = generic_ptrace_peekdata(child, addr, data);
 			break;
-		}
 
 		/* Read the word at location address in the USER area. */
 		case PTRACE_PEEKUSR: {
@@ -113,12 +103,7 @@
 		/* Write the word at location address. */
 		case PTRACE_POKETEXT:
 		case PTRACE_POKEDATA:
-			ret = 0;
-			
-			if (access_process_vm(child, addr, &data, sizeof(data), 1) == sizeof(data))
-				break;
-			
-			ret = -EIO;
+			ret = generic_ptrace_pokedata(child, addr, data);
 			break;
  
  		/* Write the word at location address in the USER area. */
diff --git a/arch/cris/arch-v32/drivers/pcf8563.c b/arch/cris/arch-v32/drivers/pcf8563.c
index fa8d500..da479a1 100644
--- a/arch/cris/arch-v32/drivers/pcf8563.c
+++ b/arch/cris/arch-v32/drivers/pcf8563.c
@@ -193,9 +193,7 @@
 void __exit
 pcf8563_exit(void)
 {
-	if (unregister_chrdev(PCF8563_MAJOR, DEVICE_NAME) < 0) {
-		printk(KERN_INFO "%s: Unable to unregister device.\n", PCF8563_NAME);
-	}
+	unregister_chrdev(PCF8563_MAJOR, DEVICE_NAME);
 }
 
 /*
diff --git a/arch/cris/arch-v32/drivers/pci/dma.c b/arch/cris/arch-v32/drivers/pci/dma.c
index 832fc63..66f9500 100644
--- a/arch/cris/arch-v32/drivers/pci/dma.c
+++ b/arch/cris/arch-v32/drivers/pci/dma.c
@@ -91,14 +91,12 @@
 	if (!mem_base)
 		goto out;
 
-	dev->dma_mem = kmalloc(sizeof(struct dma_coherent_mem), GFP_KERNEL);
+	dev->dma_mem = kzalloc(sizeof(struct dma_coherent_mem), GFP_KERNEL);
 	if (!dev->dma_mem)
 		goto out;
-	memset(dev->dma_mem, 0, sizeof(struct dma_coherent_mem));
-	dev->dma_mem->bitmap = kmalloc(bitmap_size, GFP_KERNEL);
+	dev->dma_mem->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
 	if (!dev->dma_mem->bitmap)
 		goto free1_out;
-	memset(dev->dma_mem->bitmap, 0, bitmap_size);
 
 	dev->dma_mem->virt_base = mem_base;
 	dev->dma_mem->device_base = device_addr;
diff --git a/arch/cris/arch-v32/kernel/ptrace.c b/arch/cris/arch-v32/kernel/ptrace.c
index d4d57b7..38ece0c 100644
--- a/arch/cris/arch-v32/kernel/ptrace.c
+++ b/arch/cris/arch-v32/kernel/ptrace.c
@@ -146,12 +146,7 @@
 		/* Write the word at location address. */
 		case PTRACE_POKETEXT:
 		case PTRACE_POKEDATA:
-			ret = 0;
-
-			if (access_process_vm(child, addr, &data, sizeof(data), 1) == sizeof(data))
-				break;
-
-			ret = -EIO;
+			ret = generic_ptrace_pokedata(child, addr, data);
 			break;
 
  		/* Write the word at location address in the USER area. */
diff --git a/arch/cris/arch-v32/vmlinux.lds.S b/arch/cris/arch-v32/vmlinux.lds.S
index e3a32ac..b076c13 100644
--- a/arch/cris/arch-v32/vmlinux.lds.S
+++ b/arch/cris/arch-v32/vmlinux.lds.S
@@ -91,10 +91,7 @@
 	}
 	SECURITY_INIT
 
-	. =  ALIGN (8192);
-	__per_cpu_start = .;
-	.data.percpu  : { *(.data.percpu) }
-	__per_cpu_end = .;
+	PERCPU(8192)
 
 #ifdef CONFIG_BLK_DEV_INITRD
 	.init.ramfs : {
diff --git a/arch/cris/mm/fault.c b/arch/cris/mm/fault.c
index c73e91f..8672ab7 100644
--- a/arch/cris/mm/fault.c
+++ b/arch/cris/mm/fault.c
@@ -179,6 +179,7 @@
 	struct mm_struct *mm;
 	struct vm_area_struct * vma;
 	siginfo_t info;
+	int fault;
 
         D(printk("Page fault for %lX on %X at %lX, prot %d write %d\n",
                  address, smp_processor_id(), instruction_pointer(regs),
@@ -283,18 +284,18 @@
 	 * the fault.
 	 */
 
-	switch (handle_mm_fault(mm, vma, address, writeaccess & 1)) {
-	case VM_FAULT_MINOR:
-		tsk->min_flt++;
-		break;
-	case VM_FAULT_MAJOR:
-		tsk->maj_flt++;
-		break;
-	case VM_FAULT_SIGBUS:
-		goto do_sigbus;
-	default:
-		goto out_of_memory;
+	fault = handle_mm_fault(mm, vma, address, writeaccess & 1);
+	if (unlikely(fault & VM_FAULT_ERROR)) {
+		if (fault & VM_FAULT_OOM)
+			goto out_of_memory;
+		else if (fault & VM_FAULT_SIGBUS)
+			goto do_sigbus;
+		BUG();
 	}
+	if (fault & VM_FAULT_MAJOR)
+		tsk->maj_flt++;
+	else
+		tsk->min_flt++;
 
 	up_read(&mm->mmap_sem);
 	return;
diff --git a/arch/frv/Makefile b/arch/frv/Makefile
index 038e3a8..9bf7345 100644
--- a/arch/frv/Makefile
+++ b/arch/frv/Makefile
@@ -88,7 +88,7 @@
 # make sure the .S files get compiled with debug info
 # and disable optimisations that are unhelpful whilst debugging
 ifdef CONFIG_DEBUG_INFO
-CFLAGS		+= -O1
+#CFLAGS		+= -O1
 AFLAGS		+= -Wa,--gdwarf2
 ASFLAGS		+= -Wa,--gdwarf2
 endif
diff --git a/arch/frv/kernel/ptrace.c b/arch/frv/kernel/ptrace.c
index ce88fb9..709e9bd 100644
--- a/arch/frv/kernel/ptrace.c
+++ b/arch/frv/kernel/ptrace.c
@@ -112,20 +112,12 @@
 	switch (request) {
 		/* when I and D space are separate, these will need to be fixed. */
 	case PTRACE_PEEKTEXT: /* read word at location addr. */
-	case PTRACE_PEEKDATA: {
-		int copied;
-
+	case PTRACE_PEEKDATA:
 		ret = -EIO;
 		if (is_user_addr_valid(child, addr, sizeof(tmp)) < 0)
 			break;
-
-		copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0);
-		if (copied != sizeof(tmp))
-			break;
-
-		ret = put_user(tmp,(unsigned long *) data);
+		ret = generic_ptrace_peekdata(child, addr, data);
 		break;
-	}
 
 		/* read the word at location addr in the USER area. */
 	case PTRACE_PEEKUSR: {
@@ -176,9 +168,7 @@
 		ret = -EIO;
 		if (is_user_addr_valid(child, addr, sizeof(tmp)) < 0)
 			break;
-		if (access_process_vm(child, addr, &data, sizeof(data), 1) != sizeof(data))
-			break;
-		ret = 0;
+		ret = generic_ptrace_pokedata(child, addr, data);
 		break;
 
 	case PTRACE_POKEUSR: /* write the word at location addr in the USER area */
diff --git a/arch/frv/kernel/setup.c b/arch/frv/kernel/setup.c
index c1c32e4..a74c087 100644
--- a/arch/frv/kernel/setup.c
+++ b/arch/frv/kernel/setup.c
@@ -29,6 +29,7 @@
 #include <linux/serial.h>
 #include <linux/serial_core.h>
 #include <linux/serial_reg.h>
+#include <linux/serial_8250.h>
 
 #include <asm/setup.h>
 #include <asm/irq.h>
diff --git a/arch/frv/kernel/vmlinux.lds.S b/arch/frv/kernel/vmlinux.lds.S
index 481dc13..3b71e0c 100644
--- a/arch/frv/kernel/vmlinux.lds.S
+++ b/arch/frv/kernel/vmlinux.lds.S
@@ -57,10 +57,7 @@
   __alt_instructions_end = .;
  .altinstr_replacement : { *(.altinstr_replacement) }
 
-  . = ALIGN(4096);
-  __per_cpu_start = .;
-  .data.percpu  : { *(.data.percpu) }
-  __per_cpu_end = .;
+  PERCPU(4096)
 
 #ifdef CONFIG_BLK_DEV_INITRD
   . = ALIGN(4096);
diff --git a/arch/frv/mm/fault.c b/arch/frv/mm/fault.c
index 3f12296..6798fa0 100644
--- a/arch/frv/mm/fault.c
+++ b/arch/frv/mm/fault.c
@@ -40,6 +40,7 @@
 	pud_t *pue;
 	pte_t *pte;
 	int write;
+	int fault;
 
 #if 0
 	const char *atxc[16] = {
@@ -162,18 +163,18 @@
 	 * make sure we exit gracefully rather than endlessly redo
 	 * the fault.
 	 */
-	switch (handle_mm_fault(mm, vma, ear0, write)) {
-	case VM_FAULT_MINOR:
-		current->min_flt++;
-		break;
-	case VM_FAULT_MAJOR:
-		current->maj_flt++;
-		break;
-	case VM_FAULT_SIGBUS:
-		goto do_sigbus;
-	default:
-		goto out_of_memory;
+	fault = handle_mm_fault(mm, vma, ear0, write);
+	if (unlikely(fault & VM_FAULT_ERROR)) {
+		if (fault & VM_FAULT_OOM)
+			goto out_of_memory;
+		else if (fault & VM_FAULT_SIGBUS)
+			goto do_sigbus;
+		BUG();
 	}
+	if (fault & VM_FAULT_MAJOR)
+		current->maj_flt++;
+	else
+		current->min_flt++;
 
 	up_read(&mm->mmap_sem);
 	return;
diff --git a/arch/h8300/kernel/ptrace.c b/arch/h8300/kernel/ptrace.c
index 8a7a991..d32bbf0 100644
--- a/arch/h8300/kernel/ptrace.c
+++ b/arch/h8300/kernel/ptrace.c
@@ -111,10 +111,7 @@
       /* when I and D space are separate, this will have to be fixed. */
 		case PTRACE_POKETEXT: /* write the word at location addr. */
 		case PTRACE_POKEDATA:
-			ret = 0;
-			if (access_process_vm(child, addr, &data, sizeof(data), 1) == sizeof(data))
-				break;
-			ret = -EIO;
+			ret = generic_ptrace_pokedata(child, addr, data);
 			break;
 
 		case PTRACE_POKEUSR: /* write the word at location addr in the USER area */
diff --git a/arch/i386/Kconfig b/arch/i386/Kconfig
index 8531a54..7a11b90 100644
--- a/arch/i386/Kconfig
+++ b/arch/i386/Kconfig
@@ -222,6 +222,8 @@
 	  However, when run without a hypervisor the kernel is
 	  theoretically slower.  If in doubt, say N.
 
+source "arch/i386/xen/Kconfig"
+
 config VMI
 	bool "VMI Paravirt-ops support"
 	depends on PARAVIRT
@@ -1222,8 +1224,8 @@
 source "arch/i386/oprofile/Kconfig"
 
 config KPROBES
-	bool "Kprobes (EXPERIMENTAL)"
-	depends on KALLSYMS && EXPERIMENTAL && MODULES
+	bool "Kprobes"
+	depends on KALLSYMS && MODULES
 	help
 	  Kprobes allows you to trap at almost any kernel address and
 	  execute a callback function.  register_kprobe() establishes
diff --git a/arch/i386/Makefile b/arch/i386/Makefile
index bd28f9f..01f0ff0 100644
--- a/arch/i386/Makefile
+++ b/arch/i386/Makefile
@@ -93,6 +93,9 @@
 mcore-$(CONFIG_X86_ES7000)	:= mach-default
 core-$(CONFIG_X86_ES7000)	:= arch/i386/mach-es7000/
 
+# Xen paravirtualization support
+core-$(CONFIG_XEN)		+= arch/i386/xen/
+
 # default subarch .h files
 mflags-y += -Iinclude/asm-i386/mach-default
 
@@ -108,6 +111,7 @@
 # must be linked after kernel/
 drivers-$(CONFIG_OPROFILE)		+= arch/i386/oprofile/
 drivers-$(CONFIG_PM)			+= arch/i386/power/
+drivers-$(CONFIG_FB)                    += arch/i386/video/
 
 CFLAGS += $(mflags-y)
 AFLAGS += $(mflags-y)
diff --git a/arch/i386/boot/Makefile b/arch/i386/boot/Makefile
index 08678a0a..93386a4 100644
--- a/arch/i386/boot/Makefile
+++ b/arch/i386/boot/Makefile
@@ -39,7 +39,7 @@
 setup-y		+= video-vga.o
 setup-y		+= video-vesa.o
 setup-y		+= video-bios.o
-
+targets		+= $(setup-y)
 hostprogs-y	:= tools/build
 
 HOSTCFLAGS_build.o := $(LINUXINCLUDE)
diff --git a/arch/i386/boot/boot.h b/arch/i386/boot/boot.h
index 0329c4f..dec70c9 100644
--- a/arch/i386/boot/boot.h
+++ b/arch/i386/boot/boot.h
@@ -56,7 +56,7 @@
 
 static inline void outl(u32 v, u16 port)
 {
-	asm volatile("outl %0,%1" : : "a" (v), "dn" (port));
+	asm volatile("outl %0,%1" : : "a" (v), "dN" (port));
 }
 static inline u32 inl(u32 port)
 {
diff --git a/arch/i386/boot/compressed/relocs.c b/arch/i386/boot/compressed/relocs.c
index ce4fda2..b0e21c3 100644
--- a/arch/i386/boot/compressed/relocs.c
+++ b/arch/i386/boot/compressed/relocs.c
@@ -31,6 +31,8 @@
 		"__kernel_rt_sigreturn",
 		"__kernel_sigreturn",
 		"SYSENTER_RETURN",
+		"xen_irq_disable_direct_reloc",
+		"xen_save_fl_direct_reloc",
 };
 
 static int is_safe_abs_reloc(const char* sym_name)
diff --git a/arch/i386/boot/cpucheck.c b/arch/i386/boot/cpucheck.c
index 8b0f447..991e8ce 100644
--- a/arch/i386/boot/cpucheck.c
+++ b/arch/i386/boot/cpucheck.c
@@ -115,8 +115,8 @@
 	    "pushfl ; "
 	    "popl %1 ; "
 	    "popfl"
-	    : "=r" (f0), "=r" (f1)
-	    : "g" (mask));
+	    : "=&r" (f0), "=&r" (f1)
+	    : "ri" (mask));
 
 	return !!((f0^f1) & mask);
 }
diff --git a/arch/i386/boot/mca.c b/arch/i386/boot/mca.c
index 9b68bd1..68222f2 100644
--- a/arch/i386/boot/mca.c
+++ b/arch/i386/boot/mca.c
@@ -26,7 +26,7 @@
 	    "setc %0 ; "
 	    "movw %%es, %1 ; "
 	    "popw %%es"
-	    : "=acdSDm" (err), "=acdSDm" (es), "=b" (bx)
+	    : "=acd" (err), "=acdSD" (es), "=b" (bx)
 	    : "a" (0xc000));
 
 	if (err)
diff --git a/arch/i386/boot/pm.c b/arch/i386/boot/pm.c
index 3fa53e1..1df025c 100644
--- a/arch/i386/boot/pm.c
+++ b/arch/i386/boot/pm.c
@@ -65,7 +65,7 @@
 			     "popw %%ds ; "
 			     "popw %%es"
 			     : "+c" (dwords)
-			     : "rm" (dst_seg), "rm" (src_seg)
+			     : "r" (dst_seg), "r" (src_seg)
 			     : "esi", "edi");
 
 		syssize -= paras;
diff --git a/arch/i386/boot/tools/build.c b/arch/i386/boot/tools/build.c
index 886f47d8..b424874 100644
--- a/arch/i386/boot/tools/build.c
+++ b/arch/i386/boot/tools/build.c
@@ -5,7 +5,7 @@
  */
 
 /*
- * This file builds a disk-image from three different files:
+ * This file builds a disk-image from two different files:
  *
  * - setup: 8086 machine code, sets up system parm
  * - system: 80386 code for actual system
diff --git a/arch/i386/boot/tty.c b/arch/i386/boot/tty.c
index a8db787..9c668aa 100644
--- a/arch/i386/boot/tty.c
+++ b/arch/i386/boot/tty.c
@@ -31,7 +31,7 @@
 
 	/* int $0x10 is known to have bugs involving touching registers
 	   it shouldn't.  Be extra conservative... */
-	asm volatile("pushal; int $0x10; popal"
+	asm volatile("pushal; pushw %%ds; int $0x10; popw %%ds; popal"
 		     : : "b" (0x0007), "c" (0x0001), "a" (0x0e00|ch));
 }
 
diff --git a/arch/i386/boot/video.c b/arch/i386/boot/video.c
index 3bb3573..958130e 100644
--- a/arch/i386/boot/video.c
+++ b/arch/i386/boot/video.c
@@ -195,7 +195,7 @@
 {
 	unsigned int font_size, rows;
 	u16 crtc;
-	u8 ov;
+	u8 pt, ov;
 
 	set_fs(0);
 	font_size = rdfs8(0x485); /* BIOS: font size (pixels) */
@@ -206,7 +206,12 @@
 
 	crtc = vga_crtc();
 
+	pt = in_idx(crtc, 0x11);
+	pt &= ~0x80;		/* Unlock CR0-7 */
+	out_idx(pt, crtc, 0x11);
+
 	out_idx((u8)rows, crtc, 0x12); /* Lower height register */
+
 	ov = in_idx(crtc, 0x07); /* Overflow register */
 	ov &= 0xbd;
 	ov |= (rows >> (8-1)) & 0x02;
@@ -411,7 +416,7 @@
 			     "1: rep;stosl ; "
 			     "popw %%es"
 			     : "+D" (dst), "+c" (npad)
-			     : "bdSm" (video_segment),
+			     : "bdS" (video_segment),
 			       "a" (0x07200720));
 	}
 
diff --git a/arch/i386/boot/video.h b/arch/i386/boot/video.h
index 29eca17..b92447d 100644
--- a/arch/i386/boot/video.h
+++ b/arch/i386/boot/video.h
@@ -117,8 +117,15 @@
  * int $0x10 is notorious for touching registers it shouldn't.
  * gcc doesn't like %ebp being clobbered, so define it as a push/pop
  * sequence here.
+ *
+ * A number of systems, including the original PC can clobber %bp in
+ * certain circumstances, like when scrolling.  There exists at least
+ * one Trident video card which could clobber DS under a set of
+ * circumstances that we are unlikely to encounter (scrolling when
+ * using an extended graphics mode of more than 800x600 pixels), but
+ * it's cheap insurance to deal with that here.
  */
-#define INT10 "pushl %%ebp; int $0x10; popl %%ebp"
+#define INT10 "pushl %%ebp; pushw %%ds; int $0x10; popw %%ds; popl %%ebp"
 
 /* Accessing VGA indexed registers */
 static inline u8 in_idx(u16 port, u8 index)
diff --git a/arch/i386/boot/voyager.c b/arch/i386/boot/voyager.c
index 9221614..61c8fe0 100644
--- a/arch/i386/boot/voyager.c
+++ b/arch/i386/boot/voyager.c
@@ -32,7 +32,7 @@
 	    "setc %0 ; "
 	    "movw %%es, %1 ; "
 	    "popw %%es"
-	    : "=qm" (err), "=rm" (es), "=D" (di)
+	    : "=q" (err), "=r" (es), "=D" (di)
 	    : "a" (0xffc0));
 
 	if (err)
diff --git a/arch/i386/kernel/acpi/sleep.c b/arch/i386/kernel/acpi/sleep.c
index 4ee8357..c42b5ab 100644
--- a/arch/i386/kernel/acpi/sleep.c
+++ b/arch/i386/kernel/acpi/sleep.c
@@ -14,7 +14,7 @@
 
 /* address in low memory of the wakeup routine. */
 unsigned long acpi_wakeup_address = 0;
-unsigned long acpi_video_flags;
+unsigned long acpi_realmode_flags;
 extern char wakeup_start, wakeup_end;
 
 extern unsigned long FASTCALL(acpi_copy_wakeup_routine(unsigned long));
@@ -68,9 +68,11 @@
 {
 	while ((str != NULL) && (*str != '\0')) {
 		if (strncmp(str, "s3_bios", 7) == 0)
-			acpi_video_flags = 1;
+			acpi_realmode_flags |= 1;
 		if (strncmp(str, "s3_mode", 7) == 0)
-			acpi_video_flags |= 2;
+			acpi_realmode_flags |= 2;
+		if (strncmp(str, "s3_beep", 7) == 0)
+			acpi_realmode_flags |= 4;
 		str = strchr(str, ',');
 		if (str != NULL)
 			str += strspn(str, ", \t");
@@ -80,9 +82,11 @@
 
 __setup("acpi_sleep=", acpi_sleep_setup);
 
+/* Ouch, we want to delete this. We already have better version in userspace, in
+   s2ram from suspend.sf.net project */
 static __init int reset_videomode_after_s3(struct dmi_system_id *d)
 {
-	acpi_video_flags |= 2;
+	acpi_realmode_flags |= 2;
 	return 0;
 }
 
diff --git a/arch/i386/kernel/acpi/wakeup.S b/arch/i386/kernel/acpi/wakeup.S
index a2295a3..ed0a0f2 100644
--- a/arch/i386/kernel/acpi/wakeup.S
+++ b/arch/i386/kernel/acpi/wakeup.S
@@ -13,6 +13,21 @@
 # cs = 0x1234, eip = 0x05
 # 
 
+#define BEEP \
+	inb	$97, %al; 	\
+	outb	%al, $0x80; 	\
+	movb	$3, %al; 	\
+	outb	%al, $97; 	\
+	outb	%al, $0x80; 	\
+	movb	$-74, %al; 	\
+	outb	%al, $67; 	\
+	outb	%al, $0x80; 	\
+	movb	$-119, %al; 	\
+	outb	%al, $66; 	\
+	outb	%al, $0x80; 	\
+	movb	$15, %al; 	\
+	outb	%al, $66;
+
 ALIGN
 	.align	4096
 ENTRY(wakeup_start)
@@ -31,6 +46,11 @@
 	movw	%cs, %ax
 	movw	%ax, %ds					# Make ds:0 point to wakeup_start
 	movw	%ax, %ss
+
+	testl   $4, realmode_flags - wakeup_code
+	jz      1f
+	BEEP
+1:
 	mov	$(wakeup_stack - wakeup_code), %sp		# Private stack is needed for ASUS board
 	movw	$0x0e00 + 'S', %fs:(0x12)
 
@@ -41,7 +61,7 @@
 	cmpl	$0x12345678, %eax
 	jne	bogus_real_magic
 
-	testl	$1, video_flags - wakeup_code
+	testl	$1, realmode_flags - wakeup_code
 	jz	1f
 	lcall   $0xc000,$3
 	movw	%cs, %ax
@@ -49,7 +69,7 @@
 	movw	%ax, %ss
 1:
 
-	testl	$2, video_flags - wakeup_code
+	testl	$2, realmode_flags - wakeup_code
 	jz	1f
 	mov	video_mode - wakeup_code, %ax
 	call	mode_set
@@ -88,7 +108,11 @@
 	cmpl	$0x12345678, %eax
 	jne	bogus_real_magic
 
-	ljmpl	$__KERNEL_CS,$wakeup_pmode_return
+	testl   $8, realmode_flags - wakeup_code
+	jz      1f
+	BEEP
+1:
+	ljmpl	$__KERNEL_CS, $wakeup_pmode_return
 
 real_save_gdt:	.word 0
 		.long 0
@@ -97,7 +121,8 @@
 real_save_cr4:	.long 0
 real_magic:	.long 0
 video_mode:	.long 0
-video_flags:	.long 0
+realmode_flags:	.long 0
+beep_flags:	.long 0
 real_efer_save_restore:	.long 0
 real_save_efer_edx: 	.long 0
 real_save_efer_eax: 	.long 0
@@ -260,8 +285,8 @@
 
 	movl	saved_videomode, %edx
 	movl	%edx, video_mode - wakeup_start (%eax)
-	movl	acpi_video_flags, %edx
-	movl	%edx, video_flags - wakeup_start (%eax)
+	movl	acpi_realmode_flags, %edx
+	movl	%edx, realmode_flags - wakeup_start (%eax)
 	movl	$0x12345678, real_magic - wakeup_start (%eax)
 	movl	$0x12345678, saved_magic
 	popl	%ebx
diff --git a/arch/i386/kernel/apm.c b/arch/i386/kernel/apm.c
index 4112afe..47001d5 100644
--- a/arch/i386/kernel/apm.c
+++ b/arch/i386/kernel/apm.c
@@ -222,6 +222,7 @@
 #include <linux/capability.h>
 #include <linux/device.h>
 #include <linux/kernel.h>
+#include <linux/freezer.h>
 #include <linux/smp.h>
 #include <linux/dmi.h>
 #include <linux/suspend.h>
@@ -2311,7 +2312,6 @@
 		remove_proc_entry("apm", NULL);
 		return err;
 	}
-	kapmd_task->flags |= PF_NOFREEZE;
 	wake_up_process(kapmd_task);
 
 	if (num_online_cpus() > 1 && !smp ) {
diff --git a/arch/i386/kernel/asm-offsets.c b/arch/i386/kernel/asm-offsets.c
index 27a776c..7288ac8 100644
--- a/arch/i386/kernel/asm-offsets.c
+++ b/arch/i386/kernel/asm-offsets.c
@@ -17,6 +17,13 @@
 #include <asm/thread_info.h>
 #include <asm/elf.h>
 
+#include <xen/interface/xen.h>
+
+#ifdef CONFIG_LGUEST_GUEST
+#include <linux/lguest.h>
+#include "../../../drivers/lguest/lg.h"
+#endif
+
 #define DEFINE(sym, val) \
         asm volatile("\n->" #sym " %0 " #val : : "i" (val))
 
@@ -59,6 +66,7 @@
 	OFFSET(TI_addr_limit, thread_info, addr_limit);
 	OFFSET(TI_restart_block, thread_info, restart_block);
 	OFFSET(TI_sysenter_return, thread_info, sysenter_return);
+	OFFSET(TI_cpu, thread_info, cpu);
 	BLANK();
 
 	OFFSET(GDS_size, Xgt_desc_struct, size);
@@ -115,4 +123,25 @@
 	OFFSET(PARAVIRT_iret, paravirt_ops, iret);
 	OFFSET(PARAVIRT_read_cr0, paravirt_ops, read_cr0);
 #endif
+
+#ifdef CONFIG_XEN
+	BLANK();
+	OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
+	OFFSET(XEN_vcpu_info_pending, vcpu_info, evtchn_upcall_pending);
+#endif
+
+#ifdef CONFIG_LGUEST_GUEST
+	BLANK();
+	OFFSET(LGUEST_DATA_irq_enabled, lguest_data, irq_enabled);
+	OFFSET(LGUEST_PAGES_host_gdt_desc, lguest_pages, state.host_gdt_desc);
+	OFFSET(LGUEST_PAGES_host_idt_desc, lguest_pages, state.host_idt_desc);
+	OFFSET(LGUEST_PAGES_host_cr3, lguest_pages, state.host_cr3);
+	OFFSET(LGUEST_PAGES_host_sp, lguest_pages, state.host_sp);
+	OFFSET(LGUEST_PAGES_guest_gdt_desc, lguest_pages,state.guest_gdt_desc);
+	OFFSET(LGUEST_PAGES_guest_idt_desc, lguest_pages,state.guest_idt_desc);
+	OFFSET(LGUEST_PAGES_guest_gdt, lguest_pages, state.guest_gdt);
+	OFFSET(LGUEST_PAGES_regs_trapnum, lguest_pages, regs.trapnum);
+	OFFSET(LGUEST_PAGES_regs_errcode, lguest_pages, regs.errcode);
+	OFFSET(LGUEST_PAGES_regs, lguest_pages, regs);
+#endif
 }
diff --git a/arch/i386/kernel/cpu/mcheck/therm_throt.c b/arch/i386/kernel/cpu/mcheck/therm_throt.c
index 7ba7c3a..1203dc5 100644
--- a/arch/i386/kernel/cpu/mcheck/therm_throt.c
+++ b/arch/i386/kernel/cpu/mcheck/therm_throt.c
@@ -134,19 +134,21 @@
 	int err;
 
 	sys_dev = get_cpu_sysdev(cpu);
-	mutex_lock(&therm_cpu_lock);
 	switch (action) {
 	case CPU_ONLINE:
 	case CPU_ONLINE_FROZEN:
+		mutex_lock(&therm_cpu_lock);
 		err = thermal_throttle_add_dev(sys_dev);
+		mutex_unlock(&therm_cpu_lock);
 		WARN_ON(err);
 		break;
 	case CPU_DEAD:
 	case CPU_DEAD_FROZEN:
+		mutex_lock(&therm_cpu_lock);
 		thermal_throttle_remove_dev(sys_dev);
+		mutex_unlock(&therm_cpu_lock);
 		break;
 	}
-	mutex_unlock(&therm_cpu_lock);
 	return NOTIFY_OK;
 }
 
diff --git a/arch/i386/kernel/efi.c b/arch/i386/kernel/efi.c
index a180802..2452c6f 100644
--- a/arch/i386/kernel/efi.c
+++ b/arch/i386/kernel/efi.c
@@ -278,7 +278,7 @@
 	struct range {
 		unsigned long start;
 		unsigned long end;
-	} prev, curr;
+	} uninitialized_var(prev), curr;
 	efi_memory_desc_t *md;
 	unsigned long start, end;
 	void *p;
diff --git a/arch/i386/kernel/entry.S b/arch/i386/kernel/entry.S
index 3c3c220..a714d6b 100644
--- a/arch/i386/kernel/entry.S
+++ b/arch/i386/kernel/entry.S
@@ -409,8 +409,6 @@
 1:	INTERRUPT_RETURN
 .section .fixup,"ax"
 iret_exc:
-	TRACE_IRQS_ON
-	ENABLE_INTERRUPTS(CLBR_NONE)
 	pushl $0			# no error code
 	pushl $do_iret_error
 	jmp error_code
@@ -1023,6 +1021,91 @@
 	CFI_ENDPROC
 ENDPROC(kernel_thread_helper)
 
+#ifdef CONFIG_XEN
+ENTRY(xen_hypervisor_callback)
+	CFI_STARTPROC
+	pushl $0
+	CFI_ADJUST_CFA_OFFSET 4
+	SAVE_ALL
+	TRACE_IRQS_OFF
+
+	/* Check to see if we got the event in the critical
+	   region in xen_iret_direct, after we've reenabled
+	   events and checked for pending events.  This simulates
+	   iret instruction's behaviour where it delivers a
+	   pending interrupt when enabling interrupts. */
+	movl PT_EIP(%esp),%eax
+	cmpl $xen_iret_start_crit,%eax
+	jb   1f
+	cmpl $xen_iret_end_crit,%eax
+	jae  1f
+
+	call xen_iret_crit_fixup
+
+1:	mov %esp, %eax
+	call xen_evtchn_do_upcall
+	jmp  ret_from_intr
+	CFI_ENDPROC
+ENDPROC(xen_hypervisor_callback)
+
+# Hypervisor uses this for application faults while it executes.
+# We get here for two reasons:
+#  1. Fault while reloading DS, ES, FS or GS
+#  2. Fault while executing IRET
+# Category 1 we fix up by reattempting the load, and zeroing the segment
+# register if the load fails.
+# Category 2 we fix up by jumping to do_iret_error. We cannot use the
+# normal Linux return path in this case because if we use the IRET hypercall
+# to pop the stack frame we end up in an infinite loop of failsafe callbacks.
+# We distinguish between categories by maintaining a status value in EAX.
+ENTRY(xen_failsafe_callback)
+	CFI_STARTPROC
+	pushl %eax
+	CFI_ADJUST_CFA_OFFSET 4
+	movl $1,%eax
+1:	mov 4(%esp),%ds
+2:	mov 8(%esp),%es
+3:	mov 12(%esp),%fs
+4:	mov 16(%esp),%gs
+	testl %eax,%eax
+	popl %eax
+	CFI_ADJUST_CFA_OFFSET -4
+	lea 16(%esp),%esp
+	CFI_ADJUST_CFA_OFFSET -16
+	jz 5f
+	addl $16,%esp
+	jmp iret_exc		# EAX != 0 => Category 2 (Bad IRET)
+5:	pushl $0		# EAX == 0 => Category 1 (Bad segment)
+	CFI_ADJUST_CFA_OFFSET 4
+	SAVE_ALL
+	jmp ret_from_exception
+	CFI_ENDPROC
+
+.section .fixup,"ax"
+6:	xorl %eax,%eax
+	movl %eax,4(%esp)
+	jmp 1b
+7:	xorl %eax,%eax
+	movl %eax,8(%esp)
+	jmp 2b
+8:	xorl %eax,%eax
+	movl %eax,12(%esp)
+	jmp 3b
+9:	xorl %eax,%eax
+	movl %eax,16(%esp)
+	jmp 4b
+.previous
+.section __ex_table,"a"
+	.align 4
+	.long 1b,6b
+	.long 2b,7b
+	.long 3b,8b
+	.long 4b,9b
+.previous
+ENDPROC(xen_failsafe_callback)
+
+#endif	/* CONFIG_XEN */
+
 .section .rodata,"a"
 #include "syscall_table.S"
 
diff --git a/arch/i386/kernel/head.S b/arch/i386/kernel/head.S
index 8271466..7c52b22 100644
--- a/arch/i386/kernel/head.S
+++ b/arch/i386/kernel/head.S
@@ -510,7 +510,8 @@
 /*
  * BSS section
  */
-.section ".bss.page_aligned","w"
+.section ".bss.page_aligned","wa"
+	.align PAGE_SIZE_asm
 ENTRY(swapper_pg_dir)
 	.fill 1024,4,0
 ENTRY(swapper_pg_pmd)
@@ -538,6 +539,8 @@
 	.ascii "Int %d: CR2 %p  err %p  EIP %p  CS %p  flags %p\n"
 	.asciz "Stack: %p %p %p %p %p %p %p %p\n"
 
+#include "../xen/xen-head.S"
+
 /*
  * The IDT and GDT 'descriptors' are a strange 48-bit object
  * only used by the lidt and lgdt instructions. They are not
diff --git a/arch/i386/kernel/init_task.c b/arch/i386/kernel/init_task.c
index cff95d1..d26fc06 100644
--- a/arch/i386/kernel/init_task.c
+++ b/arch/i386/kernel/init_task.c
@@ -42,5 +42,5 @@
  * per-CPU TSS segments. Threads are completely 'soft' on Linux,
  * no more per-task TSS's.
  */ 
-DEFINE_PER_CPU(struct tss_struct, init_tss) ____cacheline_internodealigned_in_smp = INIT_TSS;
+DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
 
diff --git a/arch/i386/kernel/io_apic.c b/arch/i386/kernel/io_apic.c
index 7f8b7af..21db8f5 100644
--- a/arch/i386/kernel/io_apic.c
+++ b/arch/i386/kernel/io_apic.c
@@ -667,6 +667,7 @@
 		set_pending_irq(i, cpumask_of_cpu(0));
 	}
 
+	set_freezable();
 	for ( ; ; ) {
 		time_remaining = schedule_timeout_interruptible(time_remaining);
 		try_to_freeze();
diff --git a/arch/i386/kernel/irq.c b/arch/i386/kernel/irq.c
index d2daf67..ba44d40 100644
--- a/arch/i386/kernel/irq.c
+++ b/arch/i386/kernel/irq.c
@@ -21,7 +21,7 @@
 #include <asm/apic.h>
 #include <asm/uaccess.h>
 
-DEFINE_PER_CPU(irq_cpustat_t, irq_stat) ____cacheline_internodealigned_in_smp;
+DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
 EXPORT_PER_CPU_SYMBOL(irq_stat);
 
 DEFINE_PER_CPU(struct pt_regs *, irq_regs);
diff --git a/arch/i386/kernel/nmi.c b/arch/i386/kernel/nmi.c
index fba121f..03b7f55 100644
--- a/arch/i386/kernel/nmi.c
+++ b/arch/i386/kernel/nmi.c
@@ -295,7 +295,7 @@
 	last_irq_sums [NR_CPUS],
 	alert_counter [NR_CPUS];
 
-void touch_nmi_watchdog (void)
+void touch_nmi_watchdog(void)
 {
 	if (nmi_watchdog > 0) {
 		unsigned cpu;
@@ -304,8 +304,10 @@
 		 * Just reset the alert counters, (other CPUs might be
 		 * spinning on locks we hold):
 		 */
-		for_each_present_cpu (cpu)
-			alert_counter[cpu] = 0;
+		for_each_present_cpu(cpu) {
+			if (alert_counter[cpu])
+				alert_counter[cpu] = 0;
+		}
 	}
 
 	/*
diff --git a/arch/i386/kernel/paravirt.c b/arch/i386/kernel/paravirt.c
index faab09a..53f07a8 100644
--- a/arch/i386/kernel/paravirt.c
+++ b/arch/i386/kernel/paravirt.c
@@ -228,6 +228,41 @@
 }
 core_initcall(print_banner);
 
+static struct resource reserve_ioports = {
+	.start = 0,
+	.end = IO_SPACE_LIMIT,
+	.name = "paravirt-ioport",
+	.flags = IORESOURCE_IO | IORESOURCE_BUSY,
+};
+
+static struct resource reserve_iomem = {
+	.start = 0,
+	.end = -1,
+	.name = "paravirt-iomem",
+	.flags = IORESOURCE_MEM | IORESOURCE_BUSY,
+};
+
+/*
+ * Reserve the whole legacy IO space to prevent any legacy drivers
+ * from wasting time probing for their hardware.  This is a fairly
+ * brute-force approach to disabling all non-virtual drivers.
+ *
+ * Note that this must be called very early to have any effect.
+ */
+int paravirt_disable_iospace(void)
+{
+	int ret;
+
+	ret = request_resource(&ioport_resource, &reserve_ioports);
+	if (ret == 0) {
+		ret = request_resource(&iomem_resource, &reserve_iomem);
+		if (ret)
+			release_resource(&reserve_ioports);
+	}
+
+	return ret;
+}
+
 struct paravirt_ops paravirt_ops = {
 	.name = "bare hardware",
 	.paravirt_enabled = 0,
@@ -267,7 +302,7 @@
 	.write_msr = native_write_msr_safe,
 	.read_tsc = native_read_tsc,
 	.read_pmc = native_read_pmc,
-	.get_scheduled_cycles = native_read_tsc,
+	.sched_clock = native_sched_clock,
 	.get_cpu_khz = native_calculate_cpu_khz,
 	.load_tr_desc = native_load_tr_desc,
 	.set_ldt = native_set_ldt,
diff --git a/arch/i386/kernel/ptrace.c b/arch/i386/kernel/ptrace.c
index 0c0ceec..0c8f00e 100644
--- a/arch/i386/kernel/ptrace.c
+++ b/arch/i386/kernel/ptrace.c
@@ -164,14 +164,22 @@
 		u32 *desc;
 		unsigned long base;
 
-		down(&child->mm->context.sem);
-		desc = child->mm->context.ldt + (seg & ~7);
-		base = (desc[0] >> 16) | ((desc[1] & 0xff) << 16) | (desc[1] & 0xff000000);
+		seg &= ~7UL;
 
-		/* 16-bit code segment? */
-		if (!((desc[1] >> 22) & 1))
-			addr &= 0xffff;
-		addr += base;
+		down(&child->mm->context.sem);
+		if (unlikely((seg >> 3) >= child->mm->context.size))
+			addr = -1L; /* bogus selector, access would fault */
+		else {
+			desc = child->mm->context.ldt + seg;
+			base = ((desc[0] >> 16) |
+				((desc[1] & 0xff) << 16) |
+				(desc[1] & 0xff000000));
+
+			/* 16-bit code segment? */
+			if (!((desc[1] >> 22) & 1))
+				addr &= 0xffff;
+			addr += base;
+		}
 		up(&child->mm->context.sem);
 	}
 	return addr;
@@ -358,17 +366,9 @@
 	switch (request) {
 	/* when I and D space are separate, these will need to be fixed. */
 	case PTRACE_PEEKTEXT: /* read word at location addr. */ 
-	case PTRACE_PEEKDATA: {
-		unsigned long tmp;
-		int copied;
-
-		copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0);
-		ret = -EIO;
-		if (copied != sizeof(tmp))
-			break;
-		ret = put_user(tmp, datap);
+	case PTRACE_PEEKDATA:
+		ret = generic_ptrace_peekdata(child, addr, data);
 		break;
-	}
 
 	/* read the word at location addr in the USER area. */
 	case PTRACE_PEEKUSR: {
@@ -395,10 +395,7 @@
 	/* when I and D space are separate, this will have to be fixed. */
 	case PTRACE_POKETEXT: /* write the word at location addr. */
 	case PTRACE_POKEDATA:
-		ret = 0;
-		if (access_process_vm(child, addr, &data, sizeof(data), 1) == sizeof(data))
-			break;
-		ret = -EIO;
+		ret = generic_ptrace_pokedata(child, addr, data);
 		break;
 
 	case PTRACE_POKEUSR: /* write the word at location addr in the USER area */
diff --git a/arch/i386/kernel/setup.c b/arch/i386/kernel/setup.c
index 2d61e65..74871d0 100644
--- a/arch/i386/kernel/setup.c
+++ b/arch/i386/kernel/setup.c
@@ -601,6 +601,8 @@
 	 * NOTE: at this point the bootmem allocator is fully available.
 	 */
 
+	paravirt_post_allocator_init();
+
 	dmi_scan_machine();
 
 #ifdef CONFIG_X86_GENERICARCH
diff --git a/arch/i386/kernel/smp.c b/arch/i386/kernel/smp.c
index 6299c08..2d35d85 100644
--- a/arch/i386/kernel/smp.c
+++ b/arch/i386/kernel/smp.c
@@ -22,6 +22,7 @@
 
 #include <asm/mtrr.h>
 #include <asm/tlbflush.h>
+#include <asm/mmu_context.h>
 #include <mach_apic.h>
 
 /*
@@ -249,13 +250,13 @@
 static DEFINE_SPINLOCK(tlbstate_lock);
 
 /*
- * We cannot call mmdrop() because we are in interrupt context, 
+ * We cannot call mmdrop() because we are in interrupt context,
  * instead update mm->cpu_vm_mask.
  *
  * We need to reload %cr3 since the page tables may be going
  * away from under us..
  */
-static inline void leave_mm (unsigned long cpu)
+void leave_mm(unsigned long cpu)
 {
 	if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK)
 		BUG();
diff --git a/arch/i386/kernel/smpboot.c b/arch/i386/kernel/smpboot.c
index 0b29545..5910d3f 100644
--- a/arch/i386/kernel/smpboot.c
+++ b/arch/i386/kernel/smpboot.c
@@ -148,7 +148,7 @@
  * a given CPU
  */
 
-static void __cpuinit smp_store_cpu_info(int id)
+void __cpuinit smp_store_cpu_info(int id)
 {
 	struct cpuinfo_x86 *c = cpu_data + id;
 
@@ -308,8 +308,7 @@
 /* representing cpus for which sibling maps can be computed */
 static cpumask_t cpu_sibling_setup_map;
 
-static inline void
-set_cpu_sibling_map(int cpu)
+void set_cpu_sibling_map(int cpu)
 {
 	int i;
 	struct cpuinfo_x86 *c = cpu_data;
@@ -1144,8 +1143,7 @@
 }
 
 #ifdef CONFIG_HOTPLUG_CPU
-static void
-remove_siblinginfo(int cpu)
+void remove_siblinginfo(int cpu)
 {
 	int sibling;
 	struct cpuinfo_x86 *c = cpu_data;
diff --git a/arch/i386/kernel/smpcommon.c b/arch/i386/kernel/smpcommon.c
index 1868ae1..bbfe85a 100644
--- a/arch/i386/kernel/smpcommon.c
+++ b/arch/i386/kernel/smpcommon.c
@@ -47,7 +47,7 @@
 EXPORT_SYMBOL(smp_call_function);
 
 /**
- * smp_call_function_single - Run a function on another CPU
+ * smp_call_function_single - Run a function on a specific CPU
  * @cpu: The target CPU.  Cannot be the calling CPU.
  * @func: The function to run. This must be fast and non-blocking.
  * @info: An arbitrary pointer to pass to the function.
@@ -66,9 +66,11 @@
 	int ret;
 	int me = get_cpu();
 	if (cpu == me) {
-		WARN_ON(1);
+		local_irq_disable();
+		func(info);
+		local_irq_enable();
 		put_cpu();
-		return -EBUSY;
+		return 0;
 	}
 
 	ret = smp_call_function_mask(cpumask_of_cpu(cpu), func, info, wait);
diff --git a/arch/i386/kernel/syscall_table.S b/arch/i386/kernel/syscall_table.S
index bf6adce..8344c70 100644
--- a/arch/i386/kernel/syscall_table.S
+++ b/arch/i386/kernel/syscall_table.S
@@ -323,3 +323,4 @@
 	.long sys_signalfd
 	.long sys_timerfd
 	.long sys_eventfd
+	.long sys_fallocate
diff --git a/arch/i386/kernel/traps.c b/arch/i386/kernel/traps.c
index 28bd1c5..3e7753c 100644
--- a/arch/i386/kernel/traps.c
+++ b/arch/i386/kernel/traps.c
@@ -41,6 +41,10 @@
 #include <linux/mca.h>
 #endif
 
+#if defined(CONFIG_EDAC)
+#include <linux/edac.h>
+#endif
+
 #include <asm/processor.h>
 #include <asm/system.h>
 #include <asm/io.h>
@@ -433,6 +437,7 @@
 
 	bust_spinlocks(0);
 	die.lock_owner = -1;
+	add_taint(TAINT_DIE);
 	spin_unlock_irqrestore(&die.lock, flags);
 
 	if (!regs)
@@ -517,10 +522,12 @@
 	do_trap(trapnr, signr, str, 0, regs, error_code, NULL); \
 }
 
-#define DO_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \
+#define DO_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr, irq) \
 fastcall void do_##name(struct pt_regs * regs, long error_code) \
 { \
 	siginfo_t info; \
+	if (irq) \
+		local_irq_enable(); \
 	info.si_signo = signr; \
 	info.si_errno = 0; \
 	info.si_code = sicode; \
@@ -560,13 +567,13 @@
 #endif
 DO_VM86_ERROR( 4, SIGSEGV, "overflow", overflow)
 DO_VM86_ERROR( 5, SIGSEGV, "bounds", bounds)
-DO_ERROR_INFO( 6, SIGILL,  "invalid opcode", invalid_op, ILL_ILLOPN, regs->eip)
+DO_ERROR_INFO( 6, SIGILL,  "invalid opcode", invalid_op, ILL_ILLOPN, regs->eip, 0)
 DO_ERROR( 9, SIGFPE,  "coprocessor segment overrun", coprocessor_segment_overrun)
 DO_ERROR(10, SIGSEGV, "invalid TSS", invalid_TSS)
 DO_ERROR(11, SIGBUS,  "segment not present", segment_not_present)
 DO_ERROR(12, SIGBUS,  "stack segment", stack_segment)
-DO_ERROR_INFO(17, SIGBUS, "alignment check", alignment_check, BUS_ADRALN, 0)
-DO_ERROR_INFO(32, SIGSEGV, "iret exception", iret_error, ILL_BADSTK, 0)
+DO_ERROR_INFO(17, SIGBUS, "alignment check", alignment_check, BUS_ADRALN, 0, 0)
+DO_ERROR_INFO(32, SIGSEGV, "iret exception", iret_error, ILL_BADSTK, 0, 1)
 
 fastcall void __kprobes do_general_protection(struct pt_regs * regs,
 					      long error_code)
@@ -635,6 +642,14 @@
 	printk(KERN_EMERG "Uhhuh. NMI received for unknown reason %02x on "
 		"CPU %d.\n", reason, smp_processor_id());
 	printk(KERN_EMERG "You have some hardware problem, likely on the PCI bus.\n");
+
+#if defined(CONFIG_EDAC)
+	if(edac_handler_set()) {
+		edac_atomic_assert_error();
+		return;
+	}
+#endif
+
 	if (panic_on_unrecovered_nmi)
                 panic("NMI: Not continuing");
 
@@ -1053,6 +1068,7 @@
 	thread->status |= TS_USEDFPU;	/* So we fnsave on switch_to() */
 	tsk->fpu_counter++;
 }
+EXPORT_SYMBOL_GPL(math_state_restore);
 
 #ifndef CONFIG_MATH_EMULATION
 
diff --git a/arch/i386/kernel/tsc.c b/arch/i386/kernel/tsc.c
index ea63a30..debd7db 100644
--- a/arch/i386/kernel/tsc.c
+++ b/arch/i386/kernel/tsc.c
@@ -27,6 +27,7 @@
  * an extra value to store the TSC freq
  */
 unsigned int tsc_khz;
+EXPORT_SYMBOL_GPL(tsc_khz);
 
 int tsc_disable;
 
@@ -58,10 +59,11 @@
  */
 static int tsc_unstable;
 
-static inline int check_tsc_unstable(void)
+int check_tsc_unstable(void)
 {
 	return tsc_unstable;
 }
+EXPORT_SYMBOL_GPL(check_tsc_unstable);
 
 /* Accellerators for sched_clock()
  * convert from cycles(64bits) => nanoseconds (64bits)
@@ -84,7 +86,7 @@
  *
  *			-johnstul@us.ibm.com "math is hard, lets go shopping!"
  */
-static unsigned long cyc2ns_scale __read_mostly;
+unsigned long cyc2ns_scale __read_mostly;
 
 #define CYC2NS_SCALE_FACTOR 10 /* 2^10, carefully chosen */
 
@@ -93,15 +95,10 @@
 	cyc2ns_scale = (1000000 << CYC2NS_SCALE_FACTOR)/cpu_khz;
 }
 
-static inline unsigned long long cycles_2_ns(unsigned long long cyc)
-{
-	return (cyc * cyc2ns_scale) >> CYC2NS_SCALE_FACTOR;
-}
-
 /*
  * Scheduler clock - returns current time in nanosec units.
  */
-unsigned long long sched_clock(void)
+unsigned long long native_sched_clock(void)
 {
 	unsigned long long this_offset;
 
@@ -118,12 +115,24 @@
 		return (jiffies_64 - INITIAL_JIFFIES) * (1000000000 / HZ);
 
 	/* read the Time Stamp Counter: */
-	get_scheduled_cycles(this_offset);
+	rdtscll(this_offset);
 
 	/* return the value in ns */
 	return cycles_2_ns(this_offset);
 }
 
+/* We need to define a real function for sched_clock, to override the
+   weak default version */
+#ifdef CONFIG_PARAVIRT
+unsigned long long sched_clock(void)
+{
+	return paravirt_sched_clock();
+}
+#else
+unsigned long long sched_clock(void)
+	__attribute__((alias("native_sched_clock")));
+#endif
+
 unsigned long native_calculate_cpu_khz(void)
 {
 	unsigned long long start, end;
diff --git a/arch/i386/kernel/vmi.c b/arch/i386/kernel/vmi.c
index c12720d..72042bb 100644
--- a/arch/i386/kernel/vmi.c
+++ b/arch/i386/kernel/vmi.c
@@ -362,7 +362,7 @@
 }
 #endif
 
-static void vmi_allocate_pt(u32 pfn)
+static void vmi_allocate_pt(struct mm_struct *mm, u32 pfn)
 {
 	vmi_set_page_type(pfn, VMI_PAGE_L1);
 	vmi_ops.allocate_page(pfn, VMI_PAGE_L1, 0, 0, 0);
@@ -891,7 +891,7 @@
 		paravirt_ops.setup_boot_clock = vmi_time_bsp_init;
 		paravirt_ops.setup_secondary_clock = vmi_time_ap_init;
 #endif
-		paravirt_ops.get_scheduled_cycles = vmi_get_sched_cycles;
+		paravirt_ops.sched_clock = vmi_sched_clock;
  		paravirt_ops.get_cpu_khz = vmi_cpu_khz;
 
 		/* We have true wallclock functions; disable CMOS clock sync */
diff --git a/arch/i386/kernel/vmiclock.c b/arch/i386/kernel/vmiclock.c
index 26a37f8..f9b845f 100644
--- a/arch/i386/kernel/vmiclock.c
+++ b/arch/i386/kernel/vmiclock.c
@@ -64,10 +64,10 @@
 	return 0;
 }
 
-/* paravirt_ops.get_scheduled_cycles = vmi_get_sched_cycles */
-unsigned long long vmi_get_sched_cycles(void)
+/* paravirt_ops.sched_clock = vmi_sched_clock */
+unsigned long long vmi_sched_clock(void)
 {
-	return vmi_timer_ops.get_cycle_counter(VMI_CYCLES_AVAILABLE);
+	return cycles_2_ns(vmi_timer_ops.get_cycle_counter(VMI_CYCLES_AVAILABLE));
 }
 
 /* paravirt_ops.get_cpu_khz = vmi_cpu_khz */
diff --git a/arch/i386/kernel/vmlinux.lds.S b/arch/i386/kernel/vmlinux.lds.S
index aa87b06..7d72cce 100644
--- a/arch/i386/kernel/vmlinux.lds.S
+++ b/arch/i386/kernel/vmlinux.lds.S
@@ -60,7 +60,9 @@
   	__stop___ex_table = .;
   }
 
-  BUG_TABLE
+  NOTES :text :note
+
+  BUG_TABLE :text
 
   . = ALIGN(4);
   .tracedata : AT(ADDR(.tracedata) - LOAD_OFFSET) {
@@ -88,6 +90,7 @@
 
   . = ALIGN(4096);
   .data.page_aligned : AT(ADDR(.data.page_aligned) - LOAD_OFFSET) {
+	*(.data.page_aligned)
 	*(.data.idt)
   }
 
@@ -180,6 +183,7 @@
   .data.percpu  : AT(ADDR(.data.percpu) - LOAD_OFFSET) {
 	__per_cpu_start = .;
 	*(.data.percpu)
+	*(.data.percpu.shared_aligned)
 	__per_cpu_end = .;
   }
   . = ALIGN(4096);
@@ -206,6 +210,4 @@
   STABS_DEBUG
 
   DWARF_DEBUG
-
-  NOTES
 }
diff --git a/arch/i386/kernel/vsyscall-note.S b/arch/i386/kernel/vsyscall-note.S
index d4b5be4..271f16a 100644
--- a/arch/i386/kernel/vsyscall-note.S
+++ b/arch/i386/kernel/vsyscall-note.S
@@ -3,23 +3,40 @@
  * Here we can supply some information useful to userland.
  */
 
-#include <linux/uts.h>
 #include <linux/version.h>
+#include <linux/elfnote.h>
 
-#define ASM_ELF_NOTE_BEGIN(name, flags, vendor, type)			      \
-	.section name, flags;						      \
-	.balign 4;							      \
-	.long 1f - 0f;		/* name length */			      \
-	.long 3f - 2f;		/* data length */			      \
-	.long type;		/* note type */				      \
-0:	.asciz vendor;		/* vendor name */			      \
-1:	.balign 4;							      \
-2:
-
-#define ASM_ELF_NOTE_END						      \
-3:	.balign 4;		/* pad out section */			      \
-	.previous
-
-	ASM_ELF_NOTE_BEGIN(".note.kernel-version", "a", UTS_SYSNAME, 0)
+/* Ideally this would use UTS_NAME, but using a quoted string here
+   doesn't work. Remember to change this when changing the
+   kernel's name. */
+ELFNOTE_START(Linux, 0, "a")
 	.long LINUX_VERSION_CODE
-	ASM_ELF_NOTE_END
+ELFNOTE_END
+
+#ifdef CONFIG_XEN
+
+/*
+ * Add a special note telling glibc's dynamic linker a fake hardware
+ * flavor that it will use to choose the search path for libraries in the
+ * same way it uses real hardware capabilities like "mmx".
+ * We supply "nosegneg" as the fake capability, to indicate that we
+ * do not like negative offsets in instructions using segment overrides,
+ * since we implement those inefficiently.  This makes it possible to
+ * install libraries optimized to avoid those access patterns in someplace
+ * like /lib/i686/tls/nosegneg.  Note that an /etc/ld.so.conf.d/file
+ * corresponding to the bits here is needed to make ldconfig work right.
+ * It should contain:
+ *	hwcap 1 nosegneg
+ * to match the mapping of bit to name that we give here.
+ */
+
+/* Bit used for the pseudo-hwcap for non-negative segments.  We use
+   bit 1 to avoid bugs in some versions of glibc when bit 0 is
+   used; the choice is otherwise arbitrary. */
+#define VDSO_NOTE_NONEGSEG_BIT	1
+
+ELFNOTE_START(GNU, 2, "a")
+	.long 1, 1<<VDSO_NOTE_NONEGSEG_BIT		/* ncaps, mask */
+	.byte VDSO_NOTE_NONEGSEG_BIT; .asciz "nosegneg"	/* bit, name */
+ELFNOTE_END
+#endif
diff --git a/arch/i386/mach-voyager/voyager_thread.c b/arch/i386/mach-voyager/voyager_thread.c
index b4b24e0..f9d5953 100644
--- a/arch/i386/mach-voyager/voyager_thread.c
+++ b/arch/i386/mach-voyager/voyager_thread.c
@@ -52,7 +52,7 @@
 		NULL,
 	};
 
-	if ((ret = call_usermodehelper(argv[0], argv, envp, 1)) != 0) {
+	if ((ret = call_usermodehelper(argv[0], argv, envp, UMH_WAIT_PROC)) != 0) {
 		printk(KERN_ERR "Voyager failed to run \"%s\": %i\n",
 		       string, ret);
 	}
diff --git a/arch/i386/mm/fault.c b/arch/i386/mm/fault.c
index 1ecb3e4..e92a101 100644
--- a/arch/i386/mm/fault.c
+++ b/arch/i386/mm/fault.c
@@ -303,6 +303,7 @@
 	struct vm_area_struct * vma;
 	unsigned long address;
 	int write, si_code;
+	int fault;
 
 	/* get the address */
         address = read_cr2();
@@ -422,20 +423,18 @@
 	 * make sure we exit gracefully rather than endlessly redo
 	 * the fault.
 	 */
-	switch (handle_mm_fault(mm, vma, address, write)) {
-		case VM_FAULT_MINOR:
-			tsk->min_flt++;
-			break;
-		case VM_FAULT_MAJOR:
-			tsk->maj_flt++;
-			break;
-		case VM_FAULT_SIGBUS:
-			goto do_sigbus;
-		case VM_FAULT_OOM:
+	fault = handle_mm_fault(mm, vma, address, write);
+	if (unlikely(fault & VM_FAULT_ERROR)) {
+		if (fault & VM_FAULT_OOM)
 			goto out_of_memory;
-		default:
-			BUG();
+		else if (fault & VM_FAULT_SIGBUS)
+			goto do_sigbus;
+		BUG();
 	}
+	if (fault & VM_FAULT_MAJOR)
+		tsk->maj_flt++;
+	else
+		tsk->min_flt++;
 
 	/*
 	 * Did it hit the DOS screen memory VA from vm86 mode?
diff --git a/arch/i386/mm/init.c b/arch/i386/mm/init.c
index 7135946..6a68b1a 100644
--- a/arch/i386/mm/init.c
+++ b/arch/i386/mm/init.c
@@ -87,7 +87,7 @@
 	if (!(pmd_val(*pmd) & _PAGE_PRESENT)) {
 		pte_t *page_table = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
 
-		paravirt_alloc_pt(__pa(page_table) >> PAGE_SHIFT);
+		paravirt_alloc_pt(&init_mm, __pa(page_table) >> PAGE_SHIFT);
 		set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
 		BUG_ON(page_table != pte_offset_kernel(pmd, 0));
 	}
@@ -473,6 +473,7 @@
 
 static int disable_nx __initdata = 0;
 u64 __supported_pte_mask __read_mostly = ~_PAGE_NX;
+EXPORT_SYMBOL_GPL(__supported_pte_mask);
 
 /*
  * noexec = on|off
diff --git a/arch/i386/mm/pageattr.c b/arch/i386/mm/pageattr.c
index 2eb14a7..37992ff 100644
--- a/arch/i386/mm/pageattr.c
+++ b/arch/i386/mm/pageattr.c
@@ -60,7 +60,7 @@
 	address = __pa(address);
 	addr = address & LARGE_PAGE_MASK; 
 	pbase = (pte_t *)page_address(base);
-	paravirt_alloc_pt(page_to_pfn(base));
+	paravirt_alloc_pt(&init_mm, page_to_pfn(base));
 	for (i = 0; i < PTRS_PER_PTE; i++, addr += PAGE_SIZE) {
                set_pte(&pbase[i], pfn_pte(addr >> PAGE_SHIFT,
                                           addr == address ? prot : ref_prot));
diff --git a/arch/i386/video/Makefile b/arch/i386/video/Makefile
new file mode 100644
index 0000000..2c447c9
--- /dev/null
+++ b/arch/i386/video/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_FB)               += fbdev.o
diff --git a/arch/i386/video/fbdev.c b/arch/i386/video/fbdev.c
new file mode 100644
index 0000000..48fb38d
--- /dev/null
+++ b/arch/i386/video/fbdev.c
@@ -0,0 +1,32 @@
+/*
+ * arch/i386/video/fbdev.c - i386 Framebuffer
+ *
+ * Copyright (C) 2007 Antonino Daplas <adaplas@gmail.com>
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file COPYING in the main directory of this archive
+ * for more details.
+ *
+ */
+#include <linux/fb.h>
+#include <linux/pci.h>
+
+int fb_is_primary_device(struct fb_info *info)
+{
+	struct device *device = info->device;
+	struct pci_dev *pci_dev = NULL;
+	struct resource *res = NULL;
+	int retval = 0;
+
+	if (device)
+		pci_dev = to_pci_dev(device);
+
+	if (pci_dev)
+		res = &pci_dev->resource[PCI_ROM_RESOURCE];
+
+	if (res && res->flags & IORESOURCE_ROM_SHADOW)
+		retval = 1;
+
+	return retval;
+}
+EXPORT_SYMBOL(fb_is_primary_device);
diff --git a/arch/i386/xen/Kconfig b/arch/i386/xen/Kconfig
new file mode 100644
index 0000000..9df99e1
--- /dev/null
+++ b/arch/i386/xen/Kconfig
@@ -0,0 +1,11 @@
+#
+# This Kconfig describes xen options
+#
+
+config XEN
+	bool "Enable support for Xen hypervisor"
+	depends on PARAVIRT && X86_CMPXCHG && X86_TSC && !NEED_MULTIPLE_NODES
+	help
+	  This is the Linux Xen port.  Enabling this will allow the
+	  kernel to boot in a paravirtualized environment under the
+	  Xen hypervisor.
diff --git a/arch/i386/xen/Makefile b/arch/i386/xen/Makefile
new file mode 100644
index 0000000..343df24
--- /dev/null
+++ b/arch/i386/xen/Makefile
@@ -0,0 +1,4 @@
+obj-y		:= enlighten.o setup.o features.o multicalls.o mmu.o \
+			events.o time.o manage.o xen-asm.o
+
+obj-$(CONFIG_SMP)	+= smp.o
diff --git a/arch/i386/xen/enlighten.c b/arch/i386/xen/enlighten.c
new file mode 100644
index 0000000..9a8c118
--- /dev/null
+++ b/arch/i386/xen/enlighten.c
@@ -0,0 +1,1144 @@
+/*
+ * Core of Xen paravirt_ops implementation.
+ *
+ * This file contains the xen_paravirt_ops structure itself, and the
+ * implementations for:
+ * - privileged instructions
+ * - interrupt flags
+ * - segment operations
+ * - booting and setup
+ *
+ * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/smp.h>
+#include <linux/preempt.h>
+#include <linux/hardirq.h>
+#include <linux/percpu.h>
+#include <linux/delay.h>
+#include <linux/start_kernel.h>
+#include <linux/sched.h>
+#include <linux/bootmem.h>
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <linux/page-flags.h>
+#include <linux/highmem.h>
+#include <linux/smp.h>
+
+#include <xen/interface/xen.h>
+#include <xen/interface/physdev.h>
+#include <xen/interface/vcpu.h>
+#include <xen/interface/sched.h>
+#include <xen/features.h>
+#include <xen/page.h>
+
+#include <asm/paravirt.h>
+#include <asm/page.h>
+#include <asm/xen/hypercall.h>
+#include <asm/xen/hypervisor.h>
+#include <asm/fixmap.h>
+#include <asm/processor.h>
+#include <asm/setup.h>
+#include <asm/desc.h>
+#include <asm/pgtable.h>
+#include <asm/tlbflush.h>
+#include <asm/reboot.h>
+
+#include "xen-ops.h"
+#include "mmu.h"
+#include "multicalls.h"
+
+EXPORT_SYMBOL_GPL(hypercall_page);
+
+DEFINE_PER_CPU(enum paravirt_lazy_mode, xen_lazy_mode);
+
+DEFINE_PER_CPU(struct vcpu_info *, xen_vcpu);
+DEFINE_PER_CPU(struct vcpu_info, xen_vcpu_info);
+DEFINE_PER_CPU(unsigned long, xen_cr3);
+
+struct start_info *xen_start_info;
+EXPORT_SYMBOL_GPL(xen_start_info);
+
+static /* __initdata */ struct shared_info dummy_shared_info;
+
+/*
+ * Point at some empty memory to start with. We map the real shared_info
+ * page as soon as fixmap is up and running.
+ */
+struct shared_info *HYPERVISOR_shared_info = (void *)&dummy_shared_info;
+
+/*
+ * Flag to determine whether vcpu info placement is available on all
+ * VCPUs.  We assume it is to start with, and then set it to zero on
+ * the first failure.  This is because it can succeed on some VCPUs
+ * and not others, since it can involve hypervisor memory allocation,
+ * or because the guest failed to guarantee all the appropriate
+ * constraints on all VCPUs (ie buffer can't cross a page boundary).
+ *
+ * Note that any particular CPU may be using a placed vcpu structure,
+ * but we can only optimise if the all are.
+ *
+ * 0: not available, 1: available
+ */
+static int have_vcpu_info_placement = 1;
+
+static void __init xen_vcpu_setup(int cpu)
+{
+	struct vcpu_register_vcpu_info info;
+	int err;
+	struct vcpu_info *vcpup;
+
+	per_cpu(xen_vcpu, cpu) = &HYPERVISOR_shared_info->vcpu_info[cpu];
+
+	if (!have_vcpu_info_placement)
+		return;		/* already tested, not available */
+
+	vcpup = &per_cpu(xen_vcpu_info, cpu);
+
+	info.mfn = virt_to_mfn(vcpup);
+	info.offset = offset_in_page(vcpup);
+
+	printk(KERN_DEBUG "trying to map vcpu_info %d at %p, mfn %x, offset %d\n",
+	       cpu, vcpup, info.mfn, info.offset);
+
+	/* Check to see if the hypervisor will put the vcpu_info
+	   structure where we want it, which allows direct access via
+	   a percpu-variable. */
+	err = HYPERVISOR_vcpu_op(VCPUOP_register_vcpu_info, cpu, &info);
+
+	if (err) {
+		printk(KERN_DEBUG "register_vcpu_info failed: err=%d\n", err);
+		have_vcpu_info_placement = 0;
+	} else {
+		/* This cpu is using the registered vcpu info, even if
+		   later ones fail to. */
+		per_cpu(xen_vcpu, cpu) = vcpup;
+
+		printk(KERN_DEBUG "cpu %d using vcpu_info at %p\n",
+		       cpu, vcpup);
+	}
+}
+
+static void __init xen_banner(void)
+{
+	printk(KERN_INFO "Booting paravirtualized kernel on %s\n",
+	       paravirt_ops.name);
+	printk(KERN_INFO "Hypervisor signature: %s\n", xen_start_info->magic);
+}
+
+static void xen_cpuid(unsigned int *eax, unsigned int *ebx,
+		      unsigned int *ecx, unsigned int *edx)
+{
+	unsigned maskedx = ~0;
+
+	/*
+	 * Mask out inconvenient features, to try and disable as many
+	 * unsupported kernel subsystems as possible.
+	 */
+	if (*eax == 1)
+		maskedx = ~((1 << X86_FEATURE_APIC) |  /* disable APIC */
+			    (1 << X86_FEATURE_ACPI) |  /* disable ACPI */
+			    (1 << X86_FEATURE_ACC));   /* thermal monitoring */
+
+	asm(XEN_EMULATE_PREFIX "cpuid"
+		: "=a" (*eax),
+		  "=b" (*ebx),
+		  "=c" (*ecx),
+		  "=d" (*edx)
+		: "0" (*eax), "2" (*ecx));
+	*edx &= maskedx;
+}
+
+static void xen_set_debugreg(int reg, unsigned long val)
+{
+	HYPERVISOR_set_debugreg(reg, val);
+}
+
+static unsigned long xen_get_debugreg(int reg)
+{
+	return HYPERVISOR_get_debugreg(reg);
+}
+
+static unsigned long xen_save_fl(void)
+{
+	struct vcpu_info *vcpu;
+	unsigned long flags;
+
+	vcpu = x86_read_percpu(xen_vcpu);
+
+	/* flag has opposite sense of mask */
+	flags = !vcpu->evtchn_upcall_mask;
+
+	/* convert to IF type flag
+	   -0 -> 0x00000000
+	   -1 -> 0xffffffff
+	*/
+	return (-flags) & X86_EFLAGS_IF;
+}
+
+static void xen_restore_fl(unsigned long flags)
+{
+	struct vcpu_info *vcpu;
+
+	/* convert from IF type flag */
+	flags = !(flags & X86_EFLAGS_IF);
+
+	/* There's a one instruction preempt window here.  We need to
+	   make sure we're don't switch CPUs between getting the vcpu
+	   pointer and updating the mask. */
+	preempt_disable();
+	vcpu = x86_read_percpu(xen_vcpu);
+	vcpu->evtchn_upcall_mask = flags;
+	preempt_enable_no_resched();
+
+	/* Doesn't matter if we get preempted here, because any
+	   pending event will get dealt with anyway. */
+
+	if (flags == 0) {
+		preempt_check_resched();
+		barrier(); /* unmask then check (avoid races) */
+		if (unlikely(vcpu->evtchn_upcall_pending))
+			force_evtchn_callback();
+	}
+}
+
+static void xen_irq_disable(void)
+{
+	/* There's a one instruction preempt window here.  We need to
+	   make sure we're don't switch CPUs between getting the vcpu
+	   pointer and updating the mask. */
+	preempt_disable();
+	x86_read_percpu(xen_vcpu)->evtchn_upcall_mask = 1;
+	preempt_enable_no_resched();
+}
+
+static void xen_irq_enable(void)
+{
+	struct vcpu_info *vcpu;
+
+	/* There's a one instruction preempt window here.  We need to
+	   make sure we're don't switch CPUs between getting the vcpu
+	   pointer and updating the mask. */
+	preempt_disable();
+	vcpu = x86_read_percpu(xen_vcpu);
+	vcpu->evtchn_upcall_mask = 0;
+	preempt_enable_no_resched();
+
+	/* Doesn't matter if we get preempted here, because any
+	   pending event will get dealt with anyway. */
+
+	barrier(); /* unmask then check (avoid races) */
+	if (unlikely(vcpu->evtchn_upcall_pending))
+		force_evtchn_callback();
+}
+
+static void xen_safe_halt(void)
+{
+	/* Blocking includes an implicit local_irq_enable(). */
+	if (HYPERVISOR_sched_op(SCHEDOP_block, 0) != 0)
+		BUG();
+}
+
+static void xen_halt(void)
+{
+	if (irqs_disabled())
+		HYPERVISOR_vcpu_op(VCPUOP_down, smp_processor_id(), NULL);
+	else
+		xen_safe_halt();
+}
+
+static void xen_set_lazy_mode(enum paravirt_lazy_mode mode)
+{
+	BUG_ON(preemptible());
+
+	switch (mode) {
+	case PARAVIRT_LAZY_NONE:
+		BUG_ON(x86_read_percpu(xen_lazy_mode) == PARAVIRT_LAZY_NONE);
+		break;
+
+	case PARAVIRT_LAZY_MMU:
+	case PARAVIRT_LAZY_CPU:
+		BUG_ON(x86_read_percpu(xen_lazy_mode) != PARAVIRT_LAZY_NONE);
+		break;
+
+	case PARAVIRT_LAZY_FLUSH:
+		/* flush if necessary, but don't change state */
+		if (x86_read_percpu(xen_lazy_mode) != PARAVIRT_LAZY_NONE)
+			xen_mc_flush();
+		return;
+	}
+
+	xen_mc_flush();
+	x86_write_percpu(xen_lazy_mode, mode);
+}
+
+static unsigned long xen_store_tr(void)
+{
+	return 0;
+}
+
+static void xen_set_ldt(const void *addr, unsigned entries)
+{
+	unsigned long linear_addr = (unsigned long)addr;
+	struct mmuext_op *op;
+	struct multicall_space mcs = xen_mc_entry(sizeof(*op));
+
+	op = mcs.args;
+	op->cmd = MMUEXT_SET_LDT;
+	if (linear_addr) {
+		/* ldt my be vmalloced, use arbitrary_virt_to_machine */
+		xmaddr_t maddr;
+		maddr = arbitrary_virt_to_machine((unsigned long)addr);
+		linear_addr = (unsigned long)maddr.maddr;
+	}
+	op->arg1.linear_addr = linear_addr;
+	op->arg2.nr_ents = entries;
+
+	MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
+
+	xen_mc_issue(PARAVIRT_LAZY_CPU);
+}
+
+static void xen_load_gdt(const struct Xgt_desc_struct *dtr)
+{
+	unsigned long *frames;
+	unsigned long va = dtr->address;
+	unsigned int size = dtr->size + 1;
+	unsigned pages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
+	int f;
+	struct multicall_space mcs;
+
+	/* A GDT can be up to 64k in size, which corresponds to 8192
+	   8-byte entries, or 16 4k pages.. */
+
+	BUG_ON(size > 65536);
+	BUG_ON(va & ~PAGE_MASK);
+
+	mcs = xen_mc_entry(sizeof(*frames) * pages);
+	frames = mcs.args;
+
+	for (f = 0; va < dtr->address + size; va += PAGE_SIZE, f++) {
+		frames[f] = virt_to_mfn(va);
+		make_lowmem_page_readonly((void *)va);
+	}
+
+	MULTI_set_gdt(mcs.mc, frames, size / sizeof(struct desc_struct));
+
+	xen_mc_issue(PARAVIRT_LAZY_CPU);
+}
+
+static void load_TLS_descriptor(struct thread_struct *t,
+				unsigned int cpu, unsigned int i)
+{
+	struct desc_struct *gdt = get_cpu_gdt_table(cpu);
+	xmaddr_t maddr = virt_to_machine(&gdt[GDT_ENTRY_TLS_MIN+i]);
+	struct multicall_space mc = __xen_mc_entry(0);
+
+	MULTI_update_descriptor(mc.mc, maddr.maddr, t->tls_array[i]);
+}
+
+static void xen_load_tls(struct thread_struct *t, unsigned int cpu)
+{
+	xen_mc_batch();
+
+	load_TLS_descriptor(t, cpu, 0);
+	load_TLS_descriptor(t, cpu, 1);
+	load_TLS_descriptor(t, cpu, 2);
+
+	xen_mc_issue(PARAVIRT_LAZY_CPU);
+
+	/*
+	 * XXX sleazy hack: If we're being called in a lazy-cpu zone,
+	 * it means we're in a context switch, and %gs has just been
+	 * saved.  This means we can zero it out to prevent faults on
+	 * exit from the hypervisor if the next process has no %gs.
+	 * Either way, it has been saved, and the new value will get
+	 * loaded properly.  This will go away as soon as Xen has been
+	 * modified to not save/restore %gs for normal hypercalls.
+	 */
+	if (xen_get_lazy_mode() == PARAVIRT_LAZY_CPU)
+		loadsegment(gs, 0);
+}
+
+static void xen_write_ldt_entry(struct desc_struct *dt, int entrynum,
+				u32 low, u32 high)
+{
+	unsigned long lp = (unsigned long)&dt[entrynum];
+	xmaddr_t mach_lp = virt_to_machine(lp);
+	u64 entry = (u64)high << 32 | low;
+
+	preempt_disable();
+
+	xen_mc_flush();
+	if (HYPERVISOR_update_descriptor(mach_lp.maddr, entry))
+		BUG();
+
+	preempt_enable();
+}
+
+static int cvt_gate_to_trap(int vector, u32 low, u32 high,
+			    struct trap_info *info)
+{
+	u8 type, dpl;
+
+	type = (high >> 8) & 0x1f;
+	dpl = (high >> 13) & 3;
+
+	if (type != 0xf && type != 0xe)
+		return 0;
+
+	info->vector = vector;
+	info->address = (high & 0xffff0000) | (low & 0x0000ffff);
+	info->cs = low >> 16;
+	info->flags = dpl;
+	/* interrupt gates clear IF */
+	if (type == 0xe)
+		info->flags |= 4;
+
+	return 1;
+}
+
+/* Locations of each CPU's IDT */
+static DEFINE_PER_CPU(struct Xgt_desc_struct, idt_desc);
+
+/* Set an IDT entry.  If the entry is part of the current IDT, then
+   also update Xen. */
+static void xen_write_idt_entry(struct desc_struct *dt, int entrynum,
+				u32 low, u32 high)
+{
+	unsigned long p = (unsigned long)&dt[entrynum];
+	unsigned long start, end;
+
+	preempt_disable();
+
+	start = __get_cpu_var(idt_desc).address;
+	end = start + __get_cpu_var(idt_desc).size + 1;
+
+	xen_mc_flush();
+
+	write_dt_entry(dt, entrynum, low, high);
+
+	if (p >= start && (p + 8) <= end) {
+		struct trap_info info[2];
+
+		info[1].address = 0;
+
+		if (cvt_gate_to_trap(entrynum, low, high, &info[0]))
+			if (HYPERVISOR_set_trap_table(info))
+				BUG();
+	}
+
+	preempt_enable();
+}
+
+static void xen_convert_trap_info(const struct Xgt_desc_struct *desc,
+				  struct trap_info *traps)
+{
+	unsigned in, out, count;
+
+	count = (desc->size+1) / 8;
+	BUG_ON(count > 256);
+
+	for (in = out = 0; in < count; in++) {
+		const u32 *entry = (u32 *)(desc->address + in * 8);
+
+		if (cvt_gate_to_trap(in, entry[0], entry[1], &traps[out]))
+			out++;
+	}
+	traps[out].address = 0;
+}
+
+void xen_copy_trap_info(struct trap_info *traps)
+{
+	const struct Xgt_desc_struct *desc = &__get_cpu_var(idt_desc);
+
+	xen_convert_trap_info(desc, traps);
+}
+
+/* Load a new IDT into Xen.  In principle this can be per-CPU, so we
+   hold a spinlock to protect the static traps[] array (static because
+   it avoids allocation, and saves stack space). */
+static void xen_load_idt(const struct Xgt_desc_struct *desc)
+{
+	static DEFINE_SPINLOCK(lock);
+	static struct trap_info traps[257];
+
+	spin_lock(&lock);
+
+	__get_cpu_var(idt_desc) = *desc;
+
+	xen_convert_trap_info(desc, traps);
+
+	xen_mc_flush();
+	if (HYPERVISOR_set_trap_table(traps))
+		BUG();
+
+	spin_unlock(&lock);
+}
+
+/* Write a GDT descriptor entry.  Ignore LDT descriptors, since
+   they're handled differently. */
+static void xen_write_gdt_entry(struct desc_struct *dt, int entry,
+				u32 low, u32 high)
+{
+	preempt_disable();
+
+	switch ((high >> 8) & 0xff) {
+	case DESCTYPE_LDT:
+	case DESCTYPE_TSS:
+		/* ignore */
+		break;
+
+	default: {
+		xmaddr_t maddr = virt_to_machine(&dt[entry]);
+		u64 desc = (u64)high << 32 | low;
+
+		xen_mc_flush();
+		if (HYPERVISOR_update_descriptor(maddr.maddr, desc))
+			BUG();
+	}
+
+	}
+
+	preempt_enable();
+}
+
+static void xen_load_esp0(struct tss_struct *tss,
+			  struct thread_struct *thread)
+{
+	struct multicall_space mcs = xen_mc_entry(0);
+	MULTI_stack_switch(mcs.mc, __KERNEL_DS, thread->esp0);
+	xen_mc_issue(PARAVIRT_LAZY_CPU);
+}
+
+static void xen_set_iopl_mask(unsigned mask)
+{
+	struct physdev_set_iopl set_iopl;
+
+	/* Force the change at ring 0. */
+	set_iopl.iopl = (mask == 0) ? 1 : (mask >> 12) & 3;
+	HYPERVISOR_physdev_op(PHYSDEVOP_set_iopl, &set_iopl);
+}
+
+static void xen_io_delay(void)
+{
+}
+
+#ifdef CONFIG_X86_LOCAL_APIC
+static unsigned long xen_apic_read(unsigned long reg)
+{
+	return 0;
+}
+
+static void xen_apic_write(unsigned long reg, unsigned long val)
+{
+	/* Warn to see if there's any stray references */
+	WARN_ON(1);
+}
+#endif
+
+static void xen_flush_tlb(void)
+{
+	struct mmuext_op *op;
+	struct multicall_space mcs = xen_mc_entry(sizeof(*op));
+
+	op = mcs.args;
+	op->cmd = MMUEXT_TLB_FLUSH_LOCAL;
+	MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
+
+	xen_mc_issue(PARAVIRT_LAZY_MMU);
+}
+
+static void xen_flush_tlb_single(unsigned long addr)
+{
+	struct mmuext_op *op;
+	struct multicall_space mcs = xen_mc_entry(sizeof(*op));
+
+	op = mcs.args;
+	op->cmd = MMUEXT_INVLPG_LOCAL;
+	op->arg1.linear_addr = addr & PAGE_MASK;
+	MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
+
+	xen_mc_issue(PARAVIRT_LAZY_MMU);
+}
+
+static void xen_flush_tlb_others(const cpumask_t *cpus, struct mm_struct *mm,
+				 unsigned long va)
+{
+	struct {
+		struct mmuext_op op;
+		cpumask_t mask;
+	} *args;
+	cpumask_t cpumask = *cpus;
+	struct multicall_space mcs;
+
+	/*
+	 * A couple of (to be removed) sanity checks:
+	 *
+	 * - current CPU must not be in mask
+	 * - mask must exist :)
+	 */
+	BUG_ON(cpus_empty(cpumask));
+	BUG_ON(cpu_isset(smp_processor_id(), cpumask));
+	BUG_ON(!mm);
+
+	/* If a CPU which we ran on has gone down, OK. */
+	cpus_and(cpumask, cpumask, cpu_online_map);
+	if (cpus_empty(cpumask))
+		return;
+
+	mcs = xen_mc_entry(sizeof(*args));
+	args = mcs.args;
+	args->mask = cpumask;
+	args->op.arg2.vcpumask = &args->mask;
+
+	if (va == TLB_FLUSH_ALL) {
+		args->op.cmd = MMUEXT_TLB_FLUSH_MULTI;
+	} else {
+		args->op.cmd = MMUEXT_INVLPG_MULTI;
+		args->op.arg1.linear_addr = va;
+	}
+
+	MULTI_mmuext_op(mcs.mc, &args->op, 1, NULL, DOMID_SELF);
+
+	xen_mc_issue(PARAVIRT_LAZY_MMU);
+}
+
+static void xen_write_cr2(unsigned long cr2)
+{
+	x86_read_percpu(xen_vcpu)->arch.cr2 = cr2;
+}
+
+static unsigned long xen_read_cr2(void)
+{
+	return x86_read_percpu(xen_vcpu)->arch.cr2;
+}
+
+static unsigned long xen_read_cr2_direct(void)
+{
+	return x86_read_percpu(xen_vcpu_info.arch.cr2);
+}
+
+static void xen_write_cr4(unsigned long cr4)
+{
+	/* never allow TSC to be disabled */
+	native_write_cr4(cr4 & ~X86_CR4_TSD);
+}
+
+static unsigned long xen_read_cr3(void)
+{
+	return x86_read_percpu(xen_cr3);
+}
+
+static void xen_write_cr3(unsigned long cr3)
+{
+	BUG_ON(preemptible());
+
+	if (cr3 == x86_read_percpu(xen_cr3)) {
+		/* just a simple tlb flush */
+		xen_flush_tlb();
+		return;
+	}
+
+	x86_write_percpu(xen_cr3, cr3);
+
+
+	{
+		struct mmuext_op *op;
+		struct multicall_space mcs = xen_mc_entry(sizeof(*op));
+		unsigned long mfn = pfn_to_mfn(PFN_DOWN(cr3));
+
+		op = mcs.args;
+		op->cmd = MMUEXT_NEW_BASEPTR;
+		op->arg1.mfn = mfn;
+
+		MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
+
+		xen_mc_issue(PARAVIRT_LAZY_CPU);
+	}
+}
+
+/* Early in boot, while setting up the initial pagetable, assume
+   everything is pinned. */
+static __init void xen_alloc_pt_init(struct mm_struct *mm, u32 pfn)
+{
+	BUG_ON(mem_map);	/* should only be used early */
+	make_lowmem_page_readonly(__va(PFN_PHYS(pfn)));
+}
+
+/* This needs to make sure the new pte page is pinned iff its being
+   attached to a pinned pagetable. */
+static void xen_alloc_pt(struct mm_struct *mm, u32 pfn)
+{
+	struct page *page = pfn_to_page(pfn);
+
+	if (PagePinned(virt_to_page(mm->pgd))) {
+		SetPagePinned(page);
+
+		if (!PageHighMem(page))
+			make_lowmem_page_readonly(__va(PFN_PHYS(pfn)));
+		else
+			/* make sure there are no stray mappings of
+			   this page */
+			kmap_flush_unused();
+	}
+}
+
+/* This should never happen until we're OK to use struct page */
+static void xen_release_pt(u32 pfn)
+{
+	struct page *page = pfn_to_page(pfn);
+
+	if (PagePinned(page)) {
+		if (!PageHighMem(page))
+			make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
+	}
+}
+
+#ifdef CONFIG_HIGHPTE
+static void *xen_kmap_atomic_pte(struct page *page, enum km_type type)
+{
+	pgprot_t prot = PAGE_KERNEL;
+
+	if (PagePinned(page))
+		prot = PAGE_KERNEL_RO;
+
+	if (0 && PageHighMem(page))
+		printk("mapping highpte %lx type %d prot %s\n",
+		       page_to_pfn(page), type,
+		       (unsigned long)pgprot_val(prot) & _PAGE_RW ? "WRITE" : "READ");
+
+	return kmap_atomic_prot(page, type, prot);
+}
+#endif
+
+static __init pte_t mask_rw_pte(pte_t *ptep, pte_t pte)
+{
+	/* If there's an existing pte, then don't allow _PAGE_RW to be set */
+	if (pte_val_ma(*ptep) & _PAGE_PRESENT)
+		pte = __pte_ma(((pte_val_ma(*ptep) & _PAGE_RW) | ~_PAGE_RW) &
+			       pte_val_ma(pte));
+
+	return pte;
+}
+
+/* Init-time set_pte while constructing initial pagetables, which
+   doesn't allow RO pagetable pages to be remapped RW */
+static __init void xen_set_pte_init(pte_t *ptep, pte_t pte)
+{
+	pte = mask_rw_pte(ptep, pte);
+
+	xen_set_pte(ptep, pte);
+}
+
+static __init void xen_pagetable_setup_start(pgd_t *base)
+{
+	pgd_t *xen_pgd = (pgd_t *)xen_start_info->pt_base;
+
+	/* special set_pte for pagetable initialization */
+	paravirt_ops.set_pte = xen_set_pte_init;
+
+	init_mm.pgd = base;
+	/*
+	 * copy top-level of Xen-supplied pagetable into place.	 For
+	 * !PAE we can use this as-is, but for PAE it is a stand-in
+	 * while we copy the pmd pages.
+	 */
+	memcpy(base, xen_pgd, PTRS_PER_PGD * sizeof(pgd_t));
+
+	if (PTRS_PER_PMD > 1) {
+		int i;
+		/*
+		 * For PAE, need to allocate new pmds, rather than
+		 * share Xen's, since Xen doesn't like pmd's being
+		 * shared between address spaces.
+		 */
+		for (i = 0; i < PTRS_PER_PGD; i++) {
+			if (pgd_val_ma(xen_pgd[i]) & _PAGE_PRESENT) {
+				pmd_t *pmd = (pmd_t *)alloc_bootmem_low_pages(PAGE_SIZE);
+
+				memcpy(pmd, (void *)pgd_page_vaddr(xen_pgd[i]),
+				       PAGE_SIZE);
+
+				make_lowmem_page_readonly(pmd);
+
+				set_pgd(&base[i], __pgd(1 + __pa(pmd)));
+			} else
+				pgd_clear(&base[i]);
+		}
+	}
+
+	/* make sure zero_page is mapped RO so we can use it in pagetables */
+	make_lowmem_page_readonly(empty_zero_page);
+	make_lowmem_page_readonly(base);
+	/*
+	 * Switch to new pagetable.  This is done before
+	 * pagetable_init has done anything so that the new pages
+	 * added to the table can be prepared properly for Xen.
+	 */
+	xen_write_cr3(__pa(base));
+}
+
+static __init void xen_pagetable_setup_done(pgd_t *base)
+{
+	/* This will work as long as patching hasn't happened yet
+	   (which it hasn't) */
+	paravirt_ops.alloc_pt = xen_alloc_pt;
+	paravirt_ops.set_pte = xen_set_pte;
+
+	if (!xen_feature(XENFEAT_auto_translated_physmap)) {
+		/*
+		 * Create a mapping for the shared info page.
+		 * Should be set_fixmap(), but shared_info is a machine
+		 * address with no corresponding pseudo-phys address.
+		 */
+		set_pte_mfn(fix_to_virt(FIX_PARAVIRT_BOOTMAP),
+			    PFN_DOWN(xen_start_info->shared_info),
+			    PAGE_KERNEL);
+
+		HYPERVISOR_shared_info =
+			(struct shared_info *)fix_to_virt(FIX_PARAVIRT_BOOTMAP);
+
+	} else
+		HYPERVISOR_shared_info =
+			(struct shared_info *)__va(xen_start_info->shared_info);
+
+	/* Actually pin the pagetable down, but we can't set PG_pinned
+	   yet because the page structures don't exist yet. */
+	{
+		struct mmuext_op op;
+#ifdef CONFIG_X86_PAE
+		op.cmd = MMUEXT_PIN_L3_TABLE;
+#else
+		op.cmd = MMUEXT_PIN_L3_TABLE;
+#endif
+		op.arg1.mfn = pfn_to_mfn(PFN_DOWN(__pa(base)));
+		if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF))
+			BUG();
+	}
+}
+
+/* This is called once we have the cpu_possible_map */
+void __init xen_setup_vcpu_info_placement(void)
+{
+	int cpu;
+
+	for_each_possible_cpu(cpu)
+		xen_vcpu_setup(cpu);
+
+	/* xen_vcpu_setup managed to place the vcpu_info within the
+	   percpu area for all cpus, so make use of it */
+	if (have_vcpu_info_placement) {
+		printk(KERN_INFO "Xen: using vcpu_info placement\n");
+
+		paravirt_ops.save_fl = xen_save_fl_direct;
+		paravirt_ops.restore_fl = xen_restore_fl_direct;
+		paravirt_ops.irq_disable = xen_irq_disable_direct;
+		paravirt_ops.irq_enable = xen_irq_enable_direct;
+		paravirt_ops.read_cr2 = xen_read_cr2_direct;
+		paravirt_ops.iret = xen_iret_direct;
+	}
+}
+
+static unsigned xen_patch(u8 type, u16 clobbers, void *insns, unsigned len)
+{
+	char *start, *end, *reloc;
+	unsigned ret;
+
+	start = end = reloc = NULL;
+
+#define SITE(x)								\
+	case PARAVIRT_PATCH(x):						\
+	if (have_vcpu_info_placement) {					\
+		start = (char *)xen_##x##_direct;			\
+		end = xen_##x##_direct_end;				\
+		reloc = xen_##x##_direct_reloc;				\
+	}								\
+	goto patch_site
+
+	switch (type) {
+		SITE(irq_enable);
+		SITE(irq_disable);
+		SITE(save_fl);
+		SITE(restore_fl);
+#undef SITE
+
+	patch_site:
+		if (start == NULL || (end-start) > len)
+			goto default_patch;
+
+		ret = paravirt_patch_insns(insns, len, start, end);
+
+		/* Note: because reloc is assigned from something that
+		   appears to be an array, gcc assumes it's non-null,
+		   but doesn't know its relationship with start and
+		   end. */
+		if (reloc > start && reloc < end) {
+			int reloc_off = reloc - start;
+			long *relocp = (long *)(insns + reloc_off);
+			long delta = start - (char *)insns;
+
+			*relocp += delta;
+		}
+		break;
+
+	default_patch:
+	default:
+		ret = paravirt_patch_default(type, clobbers, insns, len);
+		break;
+	}
+
+	return ret;
+}
+
+static const struct paravirt_ops xen_paravirt_ops __initdata = {
+	.paravirt_enabled = 1,
+	.shared_kernel_pmd = 0,
+
+	.name = "Xen",
+	.banner = xen_banner,
+
+	.patch = xen_patch,
+
+	.memory_setup = xen_memory_setup,
+	.arch_setup = xen_arch_setup,
+	.init_IRQ = xen_init_IRQ,
+	.post_allocator_init = xen_mark_init_mm_pinned,
+
+	.time_init = xen_time_init,
+	.set_wallclock = xen_set_wallclock,
+	.get_wallclock = xen_get_wallclock,
+	.get_cpu_khz = xen_cpu_khz,
+	.sched_clock = xen_sched_clock,
+
+	.cpuid = xen_cpuid,
+
+	.set_debugreg = xen_set_debugreg,
+	.get_debugreg = xen_get_debugreg,
+
+	.clts = native_clts,
+
+	.read_cr0 = native_read_cr0,
+	.write_cr0 = native_write_cr0,
+
+	.read_cr2 = xen_read_cr2,
+	.write_cr2 = xen_write_cr2,
+
+	.read_cr3 = xen_read_cr3,
+	.write_cr3 = xen_write_cr3,
+
+	.read_cr4 = native_read_cr4,
+	.read_cr4_safe = native_read_cr4_safe,
+	.write_cr4 = xen_write_cr4,
+
+	.save_fl = xen_save_fl,
+	.restore_fl = xen_restore_fl,
+	.irq_disable = xen_irq_disable,
+	.irq_enable = xen_irq_enable,
+	.safe_halt = xen_safe_halt,
+	.halt = xen_halt,
+	.wbinvd = native_wbinvd,
+
+	.read_msr = native_read_msr_safe,
+	.write_msr = native_write_msr_safe,
+	.read_tsc = native_read_tsc,
+	.read_pmc = native_read_pmc,
+
+	.iret = (void *)&hypercall_page[__HYPERVISOR_iret],
+	.irq_enable_sysexit = NULL,  /* never called */
+
+	.load_tr_desc = paravirt_nop,
+	.set_ldt = xen_set_ldt,
+	.load_gdt = xen_load_gdt,
+	.load_idt = xen_load_idt,
+	.load_tls = xen_load_tls,
+
+	.store_gdt = native_store_gdt,
+	.store_idt = native_store_idt,
+	.store_tr = xen_store_tr,
+
+	.write_ldt_entry = xen_write_ldt_entry,
+	.write_gdt_entry = xen_write_gdt_entry,
+	.write_idt_entry = xen_write_idt_entry,
+	.load_esp0 = xen_load_esp0,
+
+	.set_iopl_mask = xen_set_iopl_mask,
+	.io_delay = xen_io_delay,
+
+#ifdef CONFIG_X86_LOCAL_APIC
+	.apic_write = xen_apic_write,
+	.apic_write_atomic = xen_apic_write,
+	.apic_read = xen_apic_read,
+	.setup_boot_clock = paravirt_nop,
+	.setup_secondary_clock = paravirt_nop,
+	.startup_ipi_hook = paravirt_nop,
+#endif
+
+	.flush_tlb_user = xen_flush_tlb,
+	.flush_tlb_kernel = xen_flush_tlb,
+	.flush_tlb_single = xen_flush_tlb_single,
+	.flush_tlb_others = xen_flush_tlb_others,
+
+	.pte_update = paravirt_nop,
+	.pte_update_defer = paravirt_nop,
+
+	.pagetable_setup_start = xen_pagetable_setup_start,
+	.pagetable_setup_done = xen_pagetable_setup_done,
+
+	.alloc_pt = xen_alloc_pt_init,
+	.release_pt = xen_release_pt,
+	.alloc_pd = paravirt_nop,
+	.alloc_pd_clone = paravirt_nop,
+	.release_pd = paravirt_nop,
+
+#ifdef CONFIG_HIGHPTE
+	.kmap_atomic_pte = xen_kmap_atomic_pte,
+#endif
+
+	.set_pte = NULL,	/* see xen_pagetable_setup_* */
+	.set_pte_at = xen_set_pte_at,
+	.set_pmd = xen_set_pmd,
+
+	.pte_val = xen_pte_val,
+	.pgd_val = xen_pgd_val,
+
+	.make_pte = xen_make_pte,
+	.make_pgd = xen_make_pgd,
+
+#ifdef CONFIG_X86_PAE
+	.set_pte_atomic = xen_set_pte_atomic,
+	.set_pte_present = xen_set_pte_at,
+	.set_pud = xen_set_pud,
+	.pte_clear = xen_pte_clear,
+	.pmd_clear = xen_pmd_clear,
+
+	.make_pmd = xen_make_pmd,
+	.pmd_val = xen_pmd_val,
+#endif	/* PAE */
+
+	.activate_mm = xen_activate_mm,
+	.dup_mmap = xen_dup_mmap,
+	.exit_mmap = xen_exit_mmap,
+
+	.set_lazy_mode = xen_set_lazy_mode,
+};
+
+#ifdef CONFIG_SMP
+static const struct smp_ops xen_smp_ops __initdata = {
+	.smp_prepare_boot_cpu = xen_smp_prepare_boot_cpu,
+	.smp_prepare_cpus = xen_smp_prepare_cpus,
+	.cpu_up = xen_cpu_up,
+	.smp_cpus_done = xen_smp_cpus_done,
+
+	.smp_send_stop = xen_smp_send_stop,
+	.smp_send_reschedule = xen_smp_send_reschedule,
+	.smp_call_function_mask = xen_smp_call_function_mask,
+};
+#endif	/* CONFIG_SMP */
+
+static void xen_reboot(int reason)
+{
+#ifdef CONFIG_SMP
+	smp_send_stop();
+#endif
+
+	if (HYPERVISOR_sched_op(SCHEDOP_shutdown, reason))
+		BUG();
+}
+
+static void xen_restart(char *msg)
+{
+	xen_reboot(SHUTDOWN_reboot);
+}
+
+static void xen_emergency_restart(void)
+{
+	xen_reboot(SHUTDOWN_reboot);
+}
+
+static void xen_machine_halt(void)
+{
+	xen_reboot(SHUTDOWN_poweroff);
+}
+
+static void xen_crash_shutdown(struct pt_regs *regs)
+{
+	xen_reboot(SHUTDOWN_crash);
+}
+
+static const struct machine_ops __initdata xen_machine_ops = {
+	.restart = xen_restart,
+	.halt = xen_machine_halt,
+	.power_off = xen_machine_halt,
+	.shutdown = xen_machine_halt,
+	.crash_shutdown = xen_crash_shutdown,
+	.emergency_restart = xen_emergency_restart,
+};
+
+
+/* First C function to be called on Xen boot */
+asmlinkage void __init xen_start_kernel(void)
+{
+	pgd_t *pgd;
+
+	if (!xen_start_info)
+		return;
+
+	BUG_ON(memcmp(xen_start_info->magic, "xen-3.0", 7) != 0);
+
+	/* Install Xen paravirt ops */
+	paravirt_ops = xen_paravirt_ops;
+	machine_ops = xen_machine_ops;
+
+#ifdef CONFIG_SMP
+	smp_ops = xen_smp_ops;
+#endif
+
+	xen_setup_features();
+
+	/* Get mfn list */
+	if (!xen_feature(XENFEAT_auto_translated_physmap))
+		phys_to_machine_mapping = (unsigned long *)xen_start_info->mfn_list;
+
+	pgd = (pgd_t *)xen_start_info->pt_base;
+
+	init_pg_tables_end = __pa(pgd) + xen_start_info->nr_pt_frames*PAGE_SIZE;
+
+	init_mm.pgd = pgd; /* use the Xen pagetables to start */
+
+	/* keep using Xen gdt for now; no urgent need to change it */
+
+	x86_write_percpu(xen_cr3, __pa(pgd));
+
+#ifdef CONFIG_SMP
+	/* Don't do the full vcpu_info placement stuff until we have a
+	   possible map. */
+	per_cpu(xen_vcpu, 0) = &HYPERVISOR_shared_info->vcpu_info[0];
+#else
+	/* May as well do it now, since there's no good time to call
+	   it later on UP. */
+	xen_setup_vcpu_info_placement();
+#endif
+
+	paravirt_ops.kernel_rpl = 1;
+	if (xen_feature(XENFEAT_supervisor_mode_kernel))
+		paravirt_ops.kernel_rpl = 0;
+
+	/* set the limit of our address space */
+	reserve_top_address(-HYPERVISOR_VIRT_START + 2 * PAGE_SIZE);
+
+	/* set up basic CPUID stuff */
+	cpu_detect(&new_cpu_data);
+	new_cpu_data.hard_math = 1;
+	new_cpu_data.x86_capability[0] = cpuid_edx(1);
+
+	/* Poke various useful things into boot_params */
+	LOADER_TYPE = (9 << 4) | 0;
+	INITRD_START = xen_start_info->mod_start ? __pa(xen_start_info->mod_start) : 0;
+	INITRD_SIZE = xen_start_info->mod_len;
+
+	/* Start the world */
+	start_kernel();
+}
diff --git a/arch/i386/xen/events.c b/arch/i386/xen/events.c
new file mode 100644
index 0000000..8904acc
--- /dev/null
+++ b/arch/i386/xen/events.c
@@ -0,0 +1,590 @@
+/*
+ * Xen event channels
+ *
+ * Xen models interrupts with abstract event channels.  Because each
+ * domain gets 1024 event channels, but NR_IRQ is not that large, we
+ * must dynamically map irqs<->event channels.  The event channels
+ * interface with the rest of the kernel by defining a xen interrupt
+ * chip.  When an event is recieved, it is mapped to an irq and sent
+ * through the normal interrupt processing path.
+ *
+ * There are four kinds of events which can be mapped to an event
+ * channel:
+ *
+ * 1. Inter-domain notifications.  This includes all the virtual
+ *    device events, since they're driven by front-ends in another domain
+ *    (typically dom0).
+ * 2. VIRQs, typically used for timers.  These are per-cpu events.
+ * 3. IPIs.
+ * 4. Hardware interrupts. Not supported at present.
+ *
+ * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
+ */
+
+#include <linux/linkage.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/module.h>
+#include <linux/string.h>
+
+#include <asm/ptrace.h>
+#include <asm/irq.h>
+#include <asm/sync_bitops.h>
+#include <asm/xen/hypercall.h>
+
+#include <xen/events.h>
+#include <xen/interface/xen.h>
+#include <xen/interface/event_channel.h>
+
+#include "xen-ops.h"
+
+/*
+ * This lock protects updates to the following mapping and reference-count
+ * arrays. The lock does not need to be acquired to read the mapping tables.
+ */
+static DEFINE_SPINLOCK(irq_mapping_update_lock);
+
+/* IRQ <-> VIRQ mapping. */
+static DEFINE_PER_CPU(int, virq_to_irq[NR_VIRQS]) = {[0 ... NR_VIRQS-1] = -1};
+
+/* IRQ <-> IPI mapping */
+static DEFINE_PER_CPU(int, ipi_to_irq[XEN_NR_IPIS]) = {[0 ... XEN_NR_IPIS-1] = -1};
+
+/* Packed IRQ information: binding type, sub-type index, and event channel. */
+struct packed_irq
+{
+	unsigned short evtchn;
+	unsigned char index;
+	unsigned char type;
+};
+
+static struct packed_irq irq_info[NR_IRQS];
+
+/* Binding types. */
+enum {
+	IRQT_UNBOUND,
+	IRQT_PIRQ,
+	IRQT_VIRQ,
+	IRQT_IPI,
+	IRQT_EVTCHN
+};
+
+/* Convenient shorthand for packed representation of an unbound IRQ. */
+#define IRQ_UNBOUND	mk_irq_info(IRQT_UNBOUND, 0, 0)
+
+static int evtchn_to_irq[NR_EVENT_CHANNELS] = {
+	[0 ... NR_EVENT_CHANNELS-1] = -1
+};
+static unsigned long cpu_evtchn_mask[NR_CPUS][NR_EVENT_CHANNELS/BITS_PER_LONG];
+static u8 cpu_evtchn[NR_EVENT_CHANNELS];
+
+/* Reference counts for bindings to IRQs. */
+static int irq_bindcount[NR_IRQS];
+
+/* Xen will never allocate port zero for any purpose. */
+#define VALID_EVTCHN(chn)	((chn) != 0)
+
+/*
+ * Force a proper event-channel callback from Xen after clearing the
+ * callback mask. We do this in a very simple manner, by making a call
+ * down into Xen. The pending flag will be checked by Xen on return.
+ */
+void force_evtchn_callback(void)
+{
+	(void)HYPERVISOR_xen_version(0, NULL);
+}
+EXPORT_SYMBOL_GPL(force_evtchn_callback);
+
+static struct irq_chip xen_dynamic_chip;
+
+/* Constructor for packed IRQ information. */
+static inline struct packed_irq mk_irq_info(u32 type, u32 index, u32 evtchn)
+{
+	return (struct packed_irq) { evtchn, index, type };
+}
+
+/*
+ * Accessors for packed IRQ information.
+ */
+static inline unsigned int evtchn_from_irq(int irq)
+{
+	return irq_info[irq].evtchn;
+}
+
+static inline unsigned int index_from_irq(int irq)
+{
+	return irq_info[irq].index;
+}
+
+static inline unsigned int type_from_irq(int irq)
+{
+	return irq_info[irq].type;
+}
+
+static inline unsigned long active_evtchns(unsigned int cpu,
+					   struct shared_info *sh,
+					   unsigned int idx)
+{
+	return (sh->evtchn_pending[idx] &
+		cpu_evtchn_mask[cpu][idx] &
+		~sh->evtchn_mask[idx]);
+}
+
+static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu)
+{
+	int irq = evtchn_to_irq[chn];
+
+	BUG_ON(irq == -1);
+#ifdef CONFIG_SMP
+	irq_desc[irq].affinity = cpumask_of_cpu(cpu);
+#endif
+
+	__clear_bit(chn, cpu_evtchn_mask[cpu_evtchn[chn]]);
+	__set_bit(chn, cpu_evtchn_mask[cpu]);
+
+	cpu_evtchn[chn] = cpu;
+}
+
+static void init_evtchn_cpu_bindings(void)
+{
+#ifdef CONFIG_SMP
+	int i;
+	/* By default all event channels notify CPU#0. */
+	for (i = 0; i < NR_IRQS; i++)
+		irq_desc[i].affinity = cpumask_of_cpu(0);
+#endif
+
+	memset(cpu_evtchn, 0, sizeof(cpu_evtchn));
+	memset(cpu_evtchn_mask[0], ~0, sizeof(cpu_evtchn_mask[0]));
+}
+
+static inline unsigned int cpu_from_evtchn(unsigned int evtchn)
+{
+	return cpu_evtchn[evtchn];
+}
+
+static inline void clear_evtchn(int port)
+{
+	struct shared_info *s = HYPERVISOR_shared_info;
+	sync_clear_bit(port, &s->evtchn_pending[0]);
+}
+
+static inline void set_evtchn(int port)
+{
+	struct shared_info *s = HYPERVISOR_shared_info;
+	sync_set_bit(port, &s->evtchn_pending[0]);
+}
+
+
+/**
+ * notify_remote_via_irq - send event to remote end of event channel via irq
+ * @irq: irq of event channel to send event to
+ *
+ * Unlike notify_remote_via_evtchn(), this is safe to use across
+ * save/restore. Notifications on a broken connection are silently
+ * dropped.
+ */
+void notify_remote_via_irq(int irq)
+{
+	int evtchn = evtchn_from_irq(irq);
+
+	if (VALID_EVTCHN(evtchn))
+		notify_remote_via_evtchn(evtchn);
+}
+EXPORT_SYMBOL_GPL(notify_remote_via_irq);
+
+static void mask_evtchn(int port)
+{
+	struct shared_info *s = HYPERVISOR_shared_info;
+	sync_set_bit(port, &s->evtchn_mask[0]);
+}
+
+static void unmask_evtchn(int port)
+{
+	struct shared_info *s = HYPERVISOR_shared_info;
+	unsigned int cpu = get_cpu();
+
+	BUG_ON(!irqs_disabled());
+
+	/* Slow path (hypercall) if this is a non-local port. */
+	if (unlikely(cpu != cpu_from_evtchn(port))) {
+		struct evtchn_unmask unmask = { .port = port };
+		(void)HYPERVISOR_event_channel_op(EVTCHNOP_unmask, &unmask);
+	} else {
+		struct vcpu_info *vcpu_info = __get_cpu_var(xen_vcpu);
+
+		sync_clear_bit(port, &s->evtchn_mask[0]);
+
+		/*
+		 * The following is basically the equivalent of
+		 * 'hw_resend_irq'. Just like a real IO-APIC we 'lose
+		 * the interrupt edge' if the channel is masked.
+		 */
+		if (sync_test_bit(port, &s->evtchn_pending[0]) &&
+		    !sync_test_and_set_bit(port / BITS_PER_LONG,
+					   &vcpu_info->evtchn_pending_sel))
+			vcpu_info->evtchn_upcall_pending = 1;
+	}
+
+	put_cpu();
+}
+
+static int find_unbound_irq(void)
+{
+	int irq;
+
+	/* Only allocate from dynirq range */
+	for (irq = 0; irq < NR_IRQS; irq++)
+		if (irq_bindcount[irq] == 0)
+			break;
+
+	if (irq == NR_IRQS)
+		panic("No available IRQ to bind to: increase NR_IRQS!\n");
+
+	return irq;
+}
+
+int bind_evtchn_to_irq(unsigned int evtchn)
+{
+	int irq;
+
+	spin_lock(&irq_mapping_update_lock);
+
+	irq = evtchn_to_irq[evtchn];
+
+	if (irq == -1) {
+		irq = find_unbound_irq();
+
+		dynamic_irq_init(irq);
+		set_irq_chip_and_handler_name(irq, &xen_dynamic_chip,
+					      handle_level_irq, "event");
+
+		evtchn_to_irq[evtchn] = irq;
+		irq_info[irq] = mk_irq_info(IRQT_EVTCHN, 0, evtchn);
+	}
+
+	irq_bindcount[irq]++;
+
+	spin_unlock(&irq_mapping_update_lock);
+
+	return irq;
+}
+EXPORT_SYMBOL_GPL(bind_evtchn_to_irq);
+
+static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
+{
+	struct evtchn_bind_ipi bind_ipi;
+	int evtchn, irq;
+
+	spin_lock(&irq_mapping_update_lock);
+
+	irq = per_cpu(ipi_to_irq, cpu)[ipi];
+	if (irq == -1) {
+		irq = find_unbound_irq();
+		if (irq < 0)
+			goto out;
+
+		dynamic_irq_init(irq);
+		set_irq_chip_and_handler_name(irq, &xen_dynamic_chip,
+					      handle_level_irq, "ipi");
+
+		bind_ipi.vcpu = cpu;
+		if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi,
+						&bind_ipi) != 0)
+			BUG();
+		evtchn = bind_ipi.port;
+
+		evtchn_to_irq[evtchn] = irq;
+		irq_info[irq] = mk_irq_info(IRQT_IPI, ipi, evtchn);
+
+		per_cpu(ipi_to_irq, cpu)[ipi] = irq;
+
+		bind_evtchn_to_cpu(evtchn, cpu);
+	}
+
+	irq_bindcount[irq]++;
+
+ out:
+	spin_unlock(&irq_mapping_update_lock);
+	return irq;
+}
+
+
+static int bind_virq_to_irq(unsigned int virq, unsigned int cpu)
+{
+	struct evtchn_bind_virq bind_virq;
+	int evtchn, irq;
+
+	spin_lock(&irq_mapping_update_lock);
+
+	irq = per_cpu(virq_to_irq, cpu)[virq];
+
+	if (irq == -1) {
+		bind_virq.virq = virq;
+		bind_virq.vcpu = cpu;
+		if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
+						&bind_virq) != 0)
+			BUG();
+		evtchn = bind_virq.port;
+
+		irq = find_unbound_irq();
+
+		dynamic_irq_init(irq);
+		set_irq_chip_and_handler_name(irq, &xen_dynamic_chip,
+					      handle_level_irq, "virq");
+
+		evtchn_to_irq[evtchn] = irq;
+		irq_info[irq] = mk_irq_info(IRQT_VIRQ, virq, evtchn);
+
+		per_cpu(virq_to_irq, cpu)[virq] = irq;
+
+		bind_evtchn_to_cpu(evtchn, cpu);
+	}
+
+	irq_bindcount[irq]++;
+
+	spin_unlock(&irq_mapping_update_lock);
+
+	return irq;
+}
+
+static void unbind_from_irq(unsigned int irq)
+{
+	struct evtchn_close close;
+	int evtchn = evtchn_from_irq(irq);
+
+	spin_lock(&irq_mapping_update_lock);
+
+	if (VALID_EVTCHN(evtchn) && (--irq_bindcount[irq] == 0)) {
+		close.port = evtchn;
+		if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0)
+			BUG();
+
+		switch (type_from_irq(irq)) {
+		case IRQT_VIRQ:
+			per_cpu(virq_to_irq, cpu_from_evtchn(evtchn))
+				[index_from_irq(irq)] = -1;
+			break;
+		default:
+			break;
+		}
+
+		/* Closed ports are implicitly re-bound to VCPU0. */
+		bind_evtchn_to_cpu(evtchn, 0);
+
+		evtchn_to_irq[evtchn] = -1;
+		irq_info[irq] = IRQ_UNBOUND;
+
+		dynamic_irq_init(irq);
+	}
+
+	spin_unlock(&irq_mapping_update_lock);
+}
+
+int bind_evtchn_to_irqhandler(unsigned int evtchn,
+			      irqreturn_t (*handler)(int, void *),
+			      unsigned long irqflags,
+			      const char *devname, void *dev_id)
+{
+	unsigned int irq;
+	int retval;
+
+	irq = bind_evtchn_to_irq(evtchn);
+	retval = request_irq(irq, handler, irqflags, devname, dev_id);
+	if (retval != 0) {
+		unbind_from_irq(irq);
+		return retval;
+	}
+
+	return irq;
+}
+EXPORT_SYMBOL_GPL(bind_evtchn_to_irqhandler);
+
+int bind_virq_to_irqhandler(unsigned int virq, unsigned int cpu,
+			    irqreturn_t (*handler)(int, void *),
+			    unsigned long irqflags, const char *devname, void *dev_id)
+{
+	unsigned int irq;
+	int retval;
+
+	irq = bind_virq_to_irq(virq, cpu);
+	retval = request_irq(irq, handler, irqflags, devname, dev_id);
+	if (retval != 0) {
+		unbind_from_irq(irq);
+		return retval;
+	}
+
+	return irq;
+}
+EXPORT_SYMBOL_GPL(bind_virq_to_irqhandler);
+
+int bind_ipi_to_irqhandler(enum ipi_vector ipi,
+			   unsigned int cpu,
+			   irq_handler_t handler,
+			   unsigned long irqflags,
+			   const char *devname,
+			   void *dev_id)
+{
+	int irq, retval;
+
+	irq = bind_ipi_to_irq(ipi, cpu);
+	if (irq < 0)
+		return irq;
+
+	retval = request_irq(irq, handler, irqflags, devname, dev_id);
+	if (retval != 0) {
+		unbind_from_irq(irq);
+		return retval;
+	}
+
+	return irq;
+}
+
+void unbind_from_irqhandler(unsigned int irq, void *dev_id)
+{
+	free_irq(irq, dev_id);
+	unbind_from_irq(irq);
+}
+EXPORT_SYMBOL_GPL(unbind_from_irqhandler);
+
+void xen_send_IPI_one(unsigned int cpu, enum ipi_vector vector)
+{
+	int irq = per_cpu(ipi_to_irq, cpu)[vector];
+	BUG_ON(irq < 0);
+	notify_remote_via_irq(irq);
+}
+
+
+/*
+ * Search the CPUs pending events bitmasks.  For each one found, map
+ * the event number to an irq, and feed it into do_IRQ() for
+ * handling.
+ *
+ * Xen uses a two-level bitmap to speed searching.  The first level is
+ * a bitset of words which contain pending event bits.  The second
+ * level is a bitset of pending events themselves.
+ */
+fastcall void xen_evtchn_do_upcall(struct pt_regs *regs)
+{
+	int cpu = get_cpu();
+	struct shared_info *s = HYPERVISOR_shared_info;
+	struct vcpu_info *vcpu_info = __get_cpu_var(xen_vcpu);
+	unsigned long pending_words;
+
+	vcpu_info->evtchn_upcall_pending = 0;
+
+	/* NB. No need for a barrier here -- XCHG is a barrier on x86. */
+	pending_words = xchg(&vcpu_info->evtchn_pending_sel, 0);
+	while (pending_words != 0) {
+		unsigned long pending_bits;
+		int word_idx = __ffs(pending_words);
+		pending_words &= ~(1UL << word_idx);
+
+		while ((pending_bits = active_evtchns(cpu, s, word_idx)) != 0) {
+			int bit_idx = __ffs(pending_bits);
+			int port = (word_idx * BITS_PER_LONG) + bit_idx;
+			int irq = evtchn_to_irq[port];
+
+			if (irq != -1) {
+				regs->orig_eax = ~irq;
+				do_IRQ(regs);
+			}
+		}
+	}
+
+	put_cpu();
+}
+
+/* Rebind an evtchn so that it gets delivered to a specific cpu */
+static void rebind_irq_to_cpu(unsigned irq, unsigned tcpu)
+{
+	struct evtchn_bind_vcpu bind_vcpu;
+	int evtchn = evtchn_from_irq(irq);
+
+	if (!VALID_EVTCHN(evtchn))
+		return;
+
+	/* Send future instances of this interrupt to other vcpu. */
+	bind_vcpu.port = evtchn;
+	bind_vcpu.vcpu = tcpu;
+
+	/*
+	 * If this fails, it usually just indicates that we're dealing with a
+	 * virq or IPI channel, which don't actually need to be rebound. Ignore
+	 * it, but don't do the xenlinux-level rebind in that case.
+	 */
+	if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &bind_vcpu) >= 0)
+		bind_evtchn_to_cpu(evtchn, tcpu);
+}
+
+
+static void set_affinity_irq(unsigned irq, cpumask_t dest)
+{
+	unsigned tcpu = first_cpu(dest);
+	rebind_irq_to_cpu(irq, tcpu);
+}
+
+static void enable_dynirq(unsigned int irq)
+{
+	int evtchn = evtchn_from_irq(irq);
+
+	if (VALID_EVTCHN(evtchn))
+		unmask_evtchn(evtchn);
+}
+
+static void disable_dynirq(unsigned int irq)
+{
+	int evtchn = evtchn_from_irq(irq);
+
+	if (VALID_EVTCHN(evtchn))
+		mask_evtchn(evtchn);
+}
+
+static void ack_dynirq(unsigned int irq)
+{
+	int evtchn = evtchn_from_irq(irq);
+
+	move_native_irq(irq);
+
+	if (VALID_EVTCHN(evtchn))
+		clear_evtchn(evtchn);
+}
+
+static int retrigger_dynirq(unsigned int irq)
+{
+	int evtchn = evtchn_from_irq(irq);
+	int ret = 0;
+
+	if (VALID_EVTCHN(evtchn)) {
+		set_evtchn(evtchn);
+		ret = 1;
+	}
+
+	return ret;
+}
+
+static struct irq_chip xen_dynamic_chip __read_mostly = {
+	.name		= "xen-dyn",
+	.mask		= disable_dynirq,
+	.unmask		= enable_dynirq,
+	.ack		= ack_dynirq,
+	.set_affinity	= set_affinity_irq,
+	.retrigger	= retrigger_dynirq,
+};
+
+void __init xen_init_IRQ(void)
+{
+	int i;
+
+	init_evtchn_cpu_bindings();
+
+	/* No event channels are 'live' right now. */
+	for (i = 0; i < NR_EVENT_CHANNELS; i++)
+		mask_evtchn(i);
+
+	/* Dynamic IRQ space is currently unbound. Zero the refcnts. */
+	for (i = 0; i < NR_IRQS; i++)
+		irq_bindcount[i] = 0;
+
+	irq_ctx_init(smp_processor_id());
+}
diff --git a/arch/i386/xen/features.c b/arch/i386/xen/features.c
new file mode 100644
index 0000000..0707714
--- /dev/null
+++ b/arch/i386/xen/features.c
@@ -0,0 +1,29 @@
+/******************************************************************************
+ * features.c
+ *
+ * Xen feature flags.
+ *
+ * Copyright (c) 2006, Ian Campbell, XenSource Inc.
+ */
+#include <linux/types.h>
+#include <linux/cache.h>
+#include <linux/module.h>
+#include <asm/xen/hypervisor.h>
+#include <xen/features.h>
+
+u8 xen_features[XENFEAT_NR_SUBMAPS * 32] __read_mostly;
+EXPORT_SYMBOL_GPL(xen_features);
+
+void xen_setup_features(void)
+{
+	struct xen_feature_info fi;
+	int i, j;
+
+	for (i = 0; i < XENFEAT_NR_SUBMAPS; i++) {
+		fi.submap_idx = i;
+		if (HYPERVISOR_xen_version(XENVER_get_features, &fi) < 0)
+			break;
+		for (j = 0; j < 32; j++)
+			xen_features[i * 32 + j] = !!(fi.submap & 1<<j);
+	}
+}
diff --git a/arch/i386/xen/manage.c b/arch/i386/xen/manage.c
new file mode 100644
index 0000000..aa7af9e
--- /dev/null
+++ b/arch/i386/xen/manage.c
@@ -0,0 +1,143 @@
+/*
+ * Handle extern requests for shutdown, reboot and sysrq
+ */
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/reboot.h>
+#include <linux/sysrq.h>
+
+#include <xen/xenbus.h>
+
+#define SHUTDOWN_INVALID  -1
+#define SHUTDOWN_POWEROFF  0
+#define SHUTDOWN_SUSPEND   2
+/* Code 3 is SHUTDOWN_CRASH, which we don't use because the domain can only
+ * report a crash, not be instructed to crash!
+ * HALT is the same as POWEROFF, as far as we're concerned.  The tools use
+ * the distinction when we return the reason code to them.
+ */
+#define SHUTDOWN_HALT      4
+
+/* Ignore multiple shutdown requests. */
+static int shutting_down = SHUTDOWN_INVALID;
+
+static void shutdown_handler(struct xenbus_watch *watch,
+			     const char **vec, unsigned int len)
+{
+	char *str;
+	struct xenbus_transaction xbt;
+	int err;
+
+	if (shutting_down != SHUTDOWN_INVALID)
+		return;
+
+ again:
+	err = xenbus_transaction_start(&xbt);
+	if (err)
+		return;
+
+	str = (char *)xenbus_read(xbt, "control", "shutdown", NULL);
+	/* Ignore read errors and empty reads. */
+	if (XENBUS_IS_ERR_READ(str)) {
+		xenbus_transaction_end(xbt, 1);
+		return;
+	}
+
+	xenbus_write(xbt, "control", "shutdown", "");
+
+	err = xenbus_transaction_end(xbt, 0);
+	if (err == -EAGAIN) {
+		kfree(str);
+		goto again;
+	}
+
+	if (strcmp(str, "poweroff") == 0 ||
+	    strcmp(str, "halt") == 0)
+		orderly_poweroff(false);
+	else if (strcmp(str, "reboot") == 0)
+		ctrl_alt_del();
+	else {
+		printk(KERN_INFO "Ignoring shutdown request: %s\n", str);
+		shutting_down = SHUTDOWN_INVALID;
+	}
+
+	kfree(str);
+}
+
+static void sysrq_handler(struct xenbus_watch *watch, const char **vec,
+			  unsigned int len)
+{
+	char sysrq_key = '\0';
+	struct xenbus_transaction xbt;
+	int err;
+
+ again:
+	err = xenbus_transaction_start(&xbt);
+	if (err)
+		return;
+	if (!xenbus_scanf(xbt, "control", "sysrq", "%c", &sysrq_key)) {
+		printk(KERN_ERR "Unable to read sysrq code in "
+		       "control/sysrq\n");
+		xenbus_transaction_end(xbt, 1);
+		return;
+	}
+
+	if (sysrq_key != '\0')
+		xenbus_printf(xbt, "control", "sysrq", "%c", '\0');
+
+	err = xenbus_transaction_end(xbt, 0);
+	if (err == -EAGAIN)
+		goto again;
+
+	if (sysrq_key != '\0')
+		handle_sysrq(sysrq_key, NULL);
+}
+
+static struct xenbus_watch shutdown_watch = {
+	.node = "control/shutdown",
+	.callback = shutdown_handler
+};
+
+static struct xenbus_watch sysrq_watch = {
+	.node = "control/sysrq",
+	.callback = sysrq_handler
+};
+
+static int setup_shutdown_watcher(void)
+{
+	int err;
+
+	err = register_xenbus_watch(&shutdown_watch);
+	if (err) {
+		printk(KERN_ERR "Failed to set shutdown watcher\n");
+		return err;
+	}
+
+	err = register_xenbus_watch(&sysrq_watch);
+	if (err) {
+		printk(KERN_ERR "Failed to set sysrq watcher\n");
+		return err;
+	}
+
+	return 0;
+}
+
+static int shutdown_event(struct notifier_block *notifier,
+			  unsigned long event,
+			  void *data)
+{
+	setup_shutdown_watcher();
+	return NOTIFY_DONE;
+}
+
+static int __init setup_shutdown_event(void)
+{
+	static struct notifier_block xenstore_notifier = {
+		.notifier_call = shutdown_event
+	};
+	register_xenstore_notifier(&xenstore_notifier);
+
+	return 0;
+}
+
+subsys_initcall(setup_shutdown_event);
diff --git a/arch/i386/xen/mmu.c b/arch/i386/xen/mmu.c
new file mode 100644
index 0000000..4ae038a
--- /dev/null
+++ b/arch/i386/xen/mmu.c
@@ -0,0 +1,564 @@
+/*
+ * Xen mmu operations
+ *
+ * This file contains the various mmu fetch and update operations.
+ * The most important job they must perform is the mapping between the
+ * domain's pfn and the overall machine mfns.
+ *
+ * Xen allows guests to directly update the pagetable, in a controlled
+ * fashion.  In other words, the guest modifies the same pagetable
+ * that the CPU actually uses, which eliminates the overhead of having
+ * a separate shadow pagetable.
+ *
+ * In order to allow this, it falls on the guest domain to map its
+ * notion of a "physical" pfn - which is just a domain-local linear
+ * address - into a real "machine address" which the CPU's MMU can
+ * use.
+ *
+ * A pgd_t/pmd_t/pte_t will typically contain an mfn, and so can be
+ * inserted directly into the pagetable.  When creating a new
+ * pte/pmd/pgd, it converts the passed pfn into an mfn.  Conversely,
+ * when reading the content back with __(pgd|pmd|pte)_val, it converts
+ * the mfn back into a pfn.
+ *
+ * The other constraint is that all pages which make up a pagetable
+ * must be mapped read-only in the guest.  This prevents uncontrolled
+ * guest updates to the pagetable.  Xen strictly enforces this, and
+ * will disallow any pagetable update which will end up mapping a
+ * pagetable page RW, and will disallow using any writable page as a
+ * pagetable.
+ *
+ * Naively, when loading %cr3 with the base of a new pagetable, Xen
+ * would need to validate the whole pagetable before going on.
+ * Naturally, this is quite slow.  The solution is to "pin" a
+ * pagetable, which enforces all the constraints on the pagetable even
+ * when it is not actively in use.  This menas that Xen can be assured
+ * that it is still valid when you do load it into %cr3, and doesn't
+ * need to revalidate it.
+ *
+ * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
+ */
+#include <linux/sched.h>
+#include <linux/highmem.h>
+#include <linux/bug.h>
+#include <linux/sched.h>
+
+#include <asm/pgtable.h>
+#include <asm/tlbflush.h>
+#include <asm/mmu_context.h>
+#include <asm/paravirt.h>
+
+#include <asm/xen/hypercall.h>
+#include <asm/xen/hypervisor.h>
+
+#include <xen/page.h>
+#include <xen/interface/xen.h>
+
+#include "multicalls.h"
+#include "mmu.h"
+
+xmaddr_t arbitrary_virt_to_machine(unsigned long address)
+{
+	pte_t *pte = lookup_address(address);
+	unsigned offset = address & PAGE_MASK;
+
+	BUG_ON(pte == NULL);
+
+	return XMADDR((pte_mfn(*pte) << PAGE_SHIFT) + offset);
+}
+
+void make_lowmem_page_readonly(void *vaddr)
+{
+	pte_t *pte, ptev;
+	unsigned long address = (unsigned long)vaddr;
+
+	pte = lookup_address(address);
+	BUG_ON(pte == NULL);
+
+	ptev = pte_wrprotect(*pte);
+
+	if (HYPERVISOR_update_va_mapping(address, ptev, 0))
+		BUG();
+}
+
+void make_lowmem_page_readwrite(void *vaddr)
+{
+	pte_t *pte, ptev;
+	unsigned long address = (unsigned long)vaddr;
+
+	pte = lookup_address(address);
+	BUG_ON(pte == NULL);
+
+	ptev = pte_mkwrite(*pte);
+
+	if (HYPERVISOR_update_va_mapping(address, ptev, 0))
+		BUG();
+}
+
+
+void xen_set_pmd(pmd_t *ptr, pmd_t val)
+{
+	struct multicall_space mcs;
+	struct mmu_update *u;
+
+	preempt_disable();
+
+	mcs = xen_mc_entry(sizeof(*u));
+	u = mcs.args;
+	u->ptr = virt_to_machine(ptr).maddr;
+	u->val = pmd_val_ma(val);
+	MULTI_mmu_update(mcs.mc, u, 1, NULL, DOMID_SELF);
+
+	xen_mc_issue(PARAVIRT_LAZY_MMU);
+
+	preempt_enable();
+}
+
+/*
+ * Associate a virtual page frame with a given physical page frame
+ * and protection flags for that frame.
+ */
+void set_pte_mfn(unsigned long vaddr, unsigned long mfn, pgprot_t flags)
+{
+	pgd_t *pgd;
+	pud_t *pud;
+	pmd_t *pmd;
+	pte_t *pte;
+
+	pgd = swapper_pg_dir + pgd_index(vaddr);
+	if (pgd_none(*pgd)) {
+		BUG();
+		return;
+	}
+	pud = pud_offset(pgd, vaddr);
+	if (pud_none(*pud)) {
+		BUG();
+		return;
+	}
+	pmd = pmd_offset(pud, vaddr);
+	if (pmd_none(*pmd)) {
+		BUG();
+		return;
+	}
+	pte = pte_offset_kernel(pmd, vaddr);
+	/* <mfn,flags> stored as-is, to permit clearing entries */
+	xen_set_pte(pte, mfn_pte(mfn, flags));
+
+	/*
+	 * It's enough to flush this one mapping.
+	 * (PGE mappings get flushed as well)
+	 */
+	__flush_tlb_one(vaddr);
+}
+
+void xen_set_pte_at(struct mm_struct *mm, unsigned long addr,
+		    pte_t *ptep, pte_t pteval)
+{
+	if (mm == current->mm || mm == &init_mm) {
+		if (xen_get_lazy_mode() == PARAVIRT_LAZY_MMU) {
+			struct multicall_space mcs;
+			mcs = xen_mc_entry(0);
+
+			MULTI_update_va_mapping(mcs.mc, addr, pteval, 0);
+			xen_mc_issue(PARAVIRT_LAZY_MMU);
+			return;
+		} else
+			if (HYPERVISOR_update_va_mapping(addr, pteval, 0) == 0)
+				return;
+	}
+	xen_set_pte(ptep, pteval);
+}
+
+#ifdef CONFIG_X86_PAE
+void xen_set_pud(pud_t *ptr, pud_t val)
+{
+	struct multicall_space mcs;
+	struct mmu_update *u;
+
+	preempt_disable();
+
+	mcs = xen_mc_entry(sizeof(*u));
+	u = mcs.args;
+	u->ptr = virt_to_machine(ptr).maddr;
+	u->val = pud_val_ma(val);
+	MULTI_mmu_update(mcs.mc, u, 1, NULL, DOMID_SELF);
+
+	xen_mc_issue(PARAVIRT_LAZY_MMU);
+
+	preempt_enable();
+}
+
+void xen_set_pte(pte_t *ptep, pte_t pte)
+{
+	ptep->pte_high = pte.pte_high;
+	smp_wmb();
+	ptep->pte_low = pte.pte_low;
+}
+
+void xen_set_pte_atomic(pte_t *ptep, pte_t pte)
+{
+	set_64bit((u64 *)ptep, pte_val_ma(pte));
+}
+
+void xen_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
+{
+	ptep->pte_low = 0;
+	smp_wmb();		/* make sure low gets written first */
+	ptep->pte_high = 0;
+}
+
+void xen_pmd_clear(pmd_t *pmdp)
+{
+	xen_set_pmd(pmdp, __pmd(0));
+}
+
+unsigned long long xen_pte_val(pte_t pte)
+{
+	unsigned long long ret = 0;
+
+	if (pte.pte_low) {
+		ret = ((unsigned long long)pte.pte_high << 32) | pte.pte_low;
+		ret = machine_to_phys(XMADDR(ret)).paddr | 1;
+	}
+
+	return ret;
+}
+
+unsigned long long xen_pmd_val(pmd_t pmd)
+{
+	unsigned long long ret = pmd.pmd;
+	if (ret)
+		ret = machine_to_phys(XMADDR(ret)).paddr | 1;
+	return ret;
+}
+
+unsigned long long xen_pgd_val(pgd_t pgd)
+{
+	unsigned long long ret = pgd.pgd;
+	if (ret)
+		ret = machine_to_phys(XMADDR(ret)).paddr | 1;
+	return ret;
+}
+
+pte_t xen_make_pte(unsigned long long pte)
+{
+	if (pte & 1)
+		pte = phys_to_machine(XPADDR(pte)).maddr;
+
+	return (pte_t){ pte, pte >> 32 };
+}
+
+pmd_t xen_make_pmd(unsigned long long pmd)
+{
+	if (pmd & 1)
+		pmd = phys_to_machine(XPADDR(pmd)).maddr;
+
+	return (pmd_t){ pmd };
+}
+
+pgd_t xen_make_pgd(unsigned long long pgd)
+{
+	if (pgd & _PAGE_PRESENT)
+		pgd = phys_to_machine(XPADDR(pgd)).maddr;
+
+	return (pgd_t){ pgd };
+}
+#else  /* !PAE */
+void xen_set_pte(pte_t *ptep, pte_t pte)
+{
+	*ptep = pte;
+}
+
+unsigned long xen_pte_val(pte_t pte)
+{
+	unsigned long ret = pte.pte_low;
+
+	if (ret & _PAGE_PRESENT)
+		ret = machine_to_phys(XMADDR(ret)).paddr;
+
+	return ret;
+}
+
+unsigned long xen_pgd_val(pgd_t pgd)
+{
+	unsigned long ret = pgd.pgd;
+	if (ret)
+		ret = machine_to_phys(XMADDR(ret)).paddr | 1;
+	return ret;
+}
+
+pte_t xen_make_pte(unsigned long pte)
+{
+	if (pte & _PAGE_PRESENT)
+		pte = phys_to_machine(XPADDR(pte)).maddr;
+
+	return (pte_t){ pte };
+}
+
+pgd_t xen_make_pgd(unsigned long pgd)
+{
+	if (pgd & _PAGE_PRESENT)
+		pgd = phys_to_machine(XPADDR(pgd)).maddr;
+
+	return (pgd_t){ pgd };
+}
+#endif	/* CONFIG_X86_PAE */
+
+
+
+/*
+  (Yet another) pagetable walker.  This one is intended for pinning a
+  pagetable.  This means that it walks a pagetable and calls the
+  callback function on each page it finds making up the page table,
+  at every level.  It walks the entire pagetable, but it only bothers
+  pinning pte pages which are below pte_limit.  In the normal case
+  this will be TASK_SIZE, but at boot we need to pin up to
+  FIXADDR_TOP.  But the important bit is that we don't pin beyond
+  there, because then we start getting into Xen's ptes.
+*/
+static int pgd_walk(pgd_t *pgd_base, int (*func)(struct page *, unsigned),
+		    unsigned long limit)
+{
+	pgd_t *pgd = pgd_base;
+	int flush = 0;
+	unsigned long addr = 0;
+	unsigned long pgd_next;
+
+	BUG_ON(limit > FIXADDR_TOP);
+
+	if (xen_feature(XENFEAT_auto_translated_physmap))
+		return 0;
+
+	for (; addr != FIXADDR_TOP; pgd++, addr = pgd_next) {
+		pud_t *pud;
+		unsigned long pud_limit, pud_next;
+
+		pgd_next = pud_limit = pgd_addr_end(addr, FIXADDR_TOP);
+
+		if (!pgd_val(*pgd))
+			continue;
+
+		pud = pud_offset(pgd, 0);
+
+		if (PTRS_PER_PUD > 1) /* not folded */
+			flush |= (*func)(virt_to_page(pud), 0);
+
+		for (; addr != pud_limit; pud++, addr = pud_next) {
+			pmd_t *pmd;
+			unsigned long pmd_limit;
+
+			pud_next = pud_addr_end(addr, pud_limit);
+
+			if (pud_next < limit)
+				pmd_limit = pud_next;
+			else
+				pmd_limit = limit;
+
+			if (pud_none(*pud))
+				continue;
+
+			pmd = pmd_offset(pud, 0);
+
+			if (PTRS_PER_PMD > 1) /* not folded */
+				flush |= (*func)(virt_to_page(pmd), 0);
+
+			for (; addr != pmd_limit; pmd++) {
+				addr += (PAGE_SIZE * PTRS_PER_PTE);
+				if ((pmd_limit-1) < (addr-1)) {
+					addr = pmd_limit;
+					break;
+				}
+
+				if (pmd_none(*pmd))
+					continue;
+
+				flush |= (*func)(pmd_page(*pmd), 0);
+			}
+		}
+	}
+
+	flush |= (*func)(virt_to_page(pgd_base), UVMF_TLB_FLUSH);
+
+	return flush;
+}
+
+static int pin_page(struct page *page, unsigned flags)
+{
+	unsigned pgfl = test_and_set_bit(PG_pinned, &page->flags);
+	int flush;
+
+	if (pgfl)
+		flush = 0;		/* already pinned */
+	else if (PageHighMem(page))
+		/* kmaps need flushing if we found an unpinned
+		   highpage */
+		flush = 1;
+	else {
+		void *pt = lowmem_page_address(page);
+		unsigned long pfn = page_to_pfn(page);
+		struct multicall_space mcs = __xen_mc_entry(0);
+
+		flush = 0;
+
+		MULTI_update_va_mapping(mcs.mc, (unsigned long)pt,
+					pfn_pte(pfn, PAGE_KERNEL_RO),
+					flags);
+	}
+
+	return flush;
+}
+
+/* This is called just after a mm has been created, but it has not
+   been used yet.  We need to make sure that its pagetable is all
+   read-only, and can be pinned. */
+void xen_pgd_pin(pgd_t *pgd)
+{
+	struct multicall_space mcs;
+	struct mmuext_op *op;
+
+	xen_mc_batch();
+
+	if (pgd_walk(pgd, pin_page, TASK_SIZE)) {
+		/* re-enable interrupts for kmap_flush_unused */
+		xen_mc_issue(0);
+		kmap_flush_unused();
+		xen_mc_batch();
+	}
+
+	mcs = __xen_mc_entry(sizeof(*op));
+	op = mcs.args;
+
+#ifdef CONFIG_X86_PAE
+	op->cmd = MMUEXT_PIN_L3_TABLE;
+#else
+	op->cmd = MMUEXT_PIN_L2_TABLE;
+#endif
+	op->arg1.mfn = pfn_to_mfn(PFN_DOWN(__pa(pgd)));
+	MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
+
+	xen_mc_issue(0);
+}
+
+/* The init_mm pagetable is really pinned as soon as its created, but
+   that's before we have page structures to store the bits.  So do all
+   the book-keeping now. */
+static __init int mark_pinned(struct page *page, unsigned flags)
+{
+	SetPagePinned(page);
+	return 0;
+}
+
+void __init xen_mark_init_mm_pinned(void)
+{
+	pgd_walk(init_mm.pgd, mark_pinned, FIXADDR_TOP);
+}
+
+static int unpin_page(struct page *page, unsigned flags)
+{
+	unsigned pgfl = test_and_clear_bit(PG_pinned, &page->flags);
+
+	if (pgfl && !PageHighMem(page)) {
+		void *pt = lowmem_page_address(page);
+		unsigned long pfn = page_to_pfn(page);
+		struct multicall_space mcs = __xen_mc_entry(0);
+
+		MULTI_update_va_mapping(mcs.mc, (unsigned long)pt,
+					pfn_pte(pfn, PAGE_KERNEL),
+					flags);
+	}
+
+	return 0;		/* never need to flush on unpin */
+}
+
+/* Release a pagetables pages back as normal RW */
+static void xen_pgd_unpin(pgd_t *pgd)
+{
+	struct mmuext_op *op;
+	struct multicall_space mcs;
+
+	xen_mc_batch();
+
+	mcs = __xen_mc_entry(sizeof(*op));
+
+	op = mcs.args;
+	op->cmd = MMUEXT_UNPIN_TABLE;
+	op->arg1.mfn = pfn_to_mfn(PFN_DOWN(__pa(pgd)));
+
+	MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
+
+	pgd_walk(pgd, unpin_page, TASK_SIZE);
+
+	xen_mc_issue(0);
+}
+
+void xen_activate_mm(struct mm_struct *prev, struct mm_struct *next)
+{
+	spin_lock(&next->page_table_lock);
+	xen_pgd_pin(next->pgd);
+	spin_unlock(&next->page_table_lock);
+}
+
+void xen_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm)
+{
+	spin_lock(&mm->page_table_lock);
+	xen_pgd_pin(mm->pgd);
+	spin_unlock(&mm->page_table_lock);
+}
+
+
+#ifdef CONFIG_SMP
+/* Another cpu may still have their %cr3 pointing at the pagetable, so
+   we need to repoint it somewhere else before we can unpin it. */
+static void drop_other_mm_ref(void *info)
+{
+	struct mm_struct *mm = info;
+
+	if (__get_cpu_var(cpu_tlbstate).active_mm == mm)
+		leave_mm(smp_processor_id());
+}
+
+static void drop_mm_ref(struct mm_struct *mm)
+{
+	if (current->active_mm == mm) {
+		if (current->mm == mm)
+			load_cr3(swapper_pg_dir);
+		else
+			leave_mm(smp_processor_id());
+	}
+
+	if (!cpus_empty(mm->cpu_vm_mask))
+		xen_smp_call_function_mask(mm->cpu_vm_mask, drop_other_mm_ref,
+					   mm, 1);
+}
+#else
+static void drop_mm_ref(struct mm_struct *mm)
+{
+	if (current->active_mm == mm)
+		load_cr3(swapper_pg_dir);
+}
+#endif
+
+/*
+ * While a process runs, Xen pins its pagetables, which means that the
+ * hypervisor forces it to be read-only, and it controls all updates
+ * to it.  This means that all pagetable updates have to go via the
+ * hypervisor, which is moderately expensive.
+ *
+ * Since we're pulling the pagetable down, we switch to use init_mm,
+ * unpin old process pagetable and mark it all read-write, which
+ * allows further operations on it to be simple memory accesses.
+ *
+ * The only subtle point is that another CPU may be still using the
+ * pagetable because of lazy tlb flushing.  This means we need need to
+ * switch all CPUs off this pagetable before we can unpin it.
+ */
+void xen_exit_mmap(struct mm_struct *mm)
+{
+	get_cpu();		/* make sure we don't move around */
+	drop_mm_ref(mm);
+	put_cpu();
+
+	spin_lock(&mm->page_table_lock);
+	xen_pgd_unpin(mm->pgd);
+	spin_unlock(&mm->page_table_lock);
+}
diff --git a/arch/i386/xen/mmu.h b/arch/i386/xen/mmu.h
new file mode 100644
index 0000000..c9ff27f
--- /dev/null
+++ b/arch/i386/xen/mmu.h
@@ -0,0 +1,60 @@
+#ifndef _XEN_MMU_H
+
+#include <linux/linkage.h>
+#include <asm/page.h>
+
+/*
+ * Page-directory addresses above 4GB do not fit into architectural %cr3.
+ * When accessing %cr3, or equivalent field in vcpu_guest_context, guests
+ * must use the following accessor macros to pack/unpack valid MFNs.
+ *
+ * Note that Xen is using the fact that the pagetable base is always
+ * page-aligned, and putting the 12 MSB of the address into the 12 LSB
+ * of cr3.
+ */
+#define xen_pfn_to_cr3(pfn) (((unsigned)(pfn) << 12) | ((unsigned)(pfn) >> 20))
+#define xen_cr3_to_pfn(cr3) (((unsigned)(cr3) >> 12) | ((unsigned)(cr3) << 20))
+
+
+void set_pte_mfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags);
+
+void xen_set_pte(pte_t *ptep, pte_t pteval);
+void xen_set_pte_at(struct mm_struct *mm, unsigned long addr,
+		    pte_t *ptep, pte_t pteval);
+void xen_set_pmd(pmd_t *pmdp, pmd_t pmdval);
+
+void xen_activate_mm(struct mm_struct *prev, struct mm_struct *next);
+void xen_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm);
+void xen_exit_mmap(struct mm_struct *mm);
+
+void xen_pgd_pin(pgd_t *pgd);
+//void xen_pgd_unpin(pgd_t *pgd);
+
+#ifdef CONFIG_X86_PAE
+unsigned long long xen_pte_val(pte_t);
+unsigned long long xen_pmd_val(pmd_t);
+unsigned long long xen_pgd_val(pgd_t);
+
+pte_t xen_make_pte(unsigned long long);
+pmd_t xen_make_pmd(unsigned long long);
+pgd_t xen_make_pgd(unsigned long long);
+
+void xen_set_pte_at(struct mm_struct *mm, unsigned long addr,
+		    pte_t *ptep, pte_t pteval);
+void xen_set_pte_atomic(pte_t *ptep, pte_t pte);
+void xen_set_pud(pud_t *ptr, pud_t val);
+void xen_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
+void xen_pmd_clear(pmd_t *pmdp);
+
+
+#else
+unsigned long xen_pte_val(pte_t);
+unsigned long xen_pmd_val(pmd_t);
+unsigned long xen_pgd_val(pgd_t);
+
+pte_t xen_make_pte(unsigned long);
+pmd_t xen_make_pmd(unsigned long);
+pgd_t xen_make_pgd(unsigned long);
+#endif
+
+#endif	/* _XEN_MMU_H */
diff --git a/arch/i386/xen/multicalls.c b/arch/i386/xen/multicalls.c
new file mode 100644
index 0000000..c837e8e
--- /dev/null
+++ b/arch/i386/xen/multicalls.c
@@ -0,0 +1,90 @@
+/*
+ * Xen hypercall batching.
+ *
+ * Xen allows multiple hypercalls to be issued at once, using the
+ * multicall interface.  This allows the cost of trapping into the
+ * hypervisor to be amortized over several calls.
+ *
+ * This file implements a simple interface for multicalls.  There's a
+ * per-cpu buffer of outstanding multicalls.  When you want to queue a
+ * multicall for issuing, you can allocate a multicall slot for the
+ * call and its arguments, along with storage for space which is
+ * pointed to by the arguments (for passing pointers to structures,
+ * etc).  When the multicall is actually issued, all the space for the
+ * commands and allocated memory is freed for reuse.
+ *
+ * Multicalls are flushed whenever any of the buffers get full, or
+ * when explicitly requested.  There's no way to get per-multicall
+ * return results back.  It will BUG if any of the multicalls fail.
+ *
+ * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
+ */
+#include <linux/percpu.h>
+#include <linux/hardirq.h>
+
+#include <asm/xen/hypercall.h>
+
+#include "multicalls.h"
+
+#define MC_BATCH	32
+#define MC_ARGS		(MC_BATCH * 16 / sizeof(u64))
+
+struct mc_buffer {
+	struct multicall_entry entries[MC_BATCH];
+	u64 args[MC_ARGS];
+	unsigned mcidx, argidx;
+};
+
+static DEFINE_PER_CPU(struct mc_buffer, mc_buffer);
+DEFINE_PER_CPU(unsigned long, xen_mc_irq_flags);
+
+void xen_mc_flush(void)
+{
+	struct mc_buffer *b = &__get_cpu_var(mc_buffer);
+	int ret = 0;
+	unsigned long flags;
+
+	BUG_ON(preemptible());
+
+	/* Disable interrupts in case someone comes in and queues
+	   something in the middle */
+	local_irq_save(flags);
+
+	if (b->mcidx) {
+		int i;
+
+		if (HYPERVISOR_multicall(b->entries, b->mcidx) != 0)
+			BUG();
+		for (i = 0; i < b->mcidx; i++)
+			if (b->entries[i].result < 0)
+				ret++;
+		b->mcidx = 0;
+		b->argidx = 0;
+	} else
+		BUG_ON(b->argidx != 0);
+
+	local_irq_restore(flags);
+
+	BUG_ON(ret);
+}
+
+struct multicall_space __xen_mc_entry(size_t args)
+{
+	struct mc_buffer *b = &__get_cpu_var(mc_buffer);
+	struct multicall_space ret;
+	unsigned argspace = (args + sizeof(u64) - 1) / sizeof(u64);
+
+	BUG_ON(preemptible());
+	BUG_ON(argspace > MC_ARGS);
+
+	if (b->mcidx == MC_BATCH ||
+	    (b->argidx + argspace) > MC_ARGS)
+		xen_mc_flush();
+
+	ret.mc = &b->entries[b->mcidx];
+	b->mcidx++;
+	ret.args = &b->args[b->argidx];
+	b->argidx += argspace;
+
+	return ret;
+}
diff --git a/arch/i386/xen/multicalls.h b/arch/i386/xen/multicalls.h
new file mode 100644
index 0000000..e6f7530
--- /dev/null
+++ b/arch/i386/xen/multicalls.h
@@ -0,0 +1,45 @@
+#ifndef _XEN_MULTICALLS_H
+#define _XEN_MULTICALLS_H
+
+#include "xen-ops.h"
+
+/* Multicalls */
+struct multicall_space
+{
+	struct multicall_entry *mc;
+	void *args;
+};
+
+/* Allocate room for a multicall and its args */
+struct multicall_space __xen_mc_entry(size_t args);
+
+DECLARE_PER_CPU(unsigned long, xen_mc_irq_flags);
+
+/* Call to start a batch of multiple __xen_mc_entry()s.  Must be
+   paired with xen_mc_issue() */
+static inline void xen_mc_batch(void)
+{
+	/* need to disable interrupts until this entry is complete */
+	local_irq_save(__get_cpu_var(xen_mc_irq_flags));
+}
+
+static inline struct multicall_space xen_mc_entry(size_t args)
+{
+	xen_mc_batch();
+	return __xen_mc_entry(args);
+}
+
+/* Flush all pending multicalls */
+void xen_mc_flush(void);
+
+/* Issue a multicall if we're not in a lazy mode */
+static inline void xen_mc_issue(unsigned mode)
+{
+	if ((xen_get_lazy_mode() & mode) == 0)
+		xen_mc_flush();
+
+	/* restore flags saved in xen_mc_batch */
+	local_irq_restore(x86_read_percpu(xen_mc_irq_flags));
+}
+
+#endif /* _XEN_MULTICALLS_H */
diff --git a/arch/i386/xen/setup.c b/arch/i386/xen/setup.c
new file mode 100644
index 0000000..2fe6eac
--- /dev/null
+++ b/arch/i386/xen/setup.c
@@ -0,0 +1,96 @@
+/*
+ * Machine specific setup for xen
+ *
+ * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
+ */
+
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/pm.h>
+
+#include <asm/elf.h>
+#include <asm/e820.h>
+#include <asm/setup.h>
+#include <asm/xen/hypervisor.h>
+#include <asm/xen/hypercall.h>
+
+#include <xen/interface/physdev.h>
+#include <xen/features.h>
+
+#include "xen-ops.h"
+
+/* These are code, but not functions.  Defined in entry.S */
+extern const char xen_hypervisor_callback[];
+extern const char xen_failsafe_callback[];
+
+unsigned long *phys_to_machine_mapping;
+EXPORT_SYMBOL(phys_to_machine_mapping);
+
+/**
+ * machine_specific_memory_setup - Hook for machine specific memory setup.
+ **/
+
+char * __init xen_memory_setup(void)
+{
+	unsigned long max_pfn = xen_start_info->nr_pages;
+
+	e820.nr_map = 0;
+	add_memory_region(0, PFN_PHYS(max_pfn), E820_RAM);
+
+	return "Xen";
+}
+
+static void xen_idle(void)
+{
+	local_irq_disable();
+
+	if (need_resched())
+		local_irq_enable();
+	else {
+		current_thread_info()->status &= ~TS_POLLING;
+		smp_mb__after_clear_bit();
+		safe_halt();
+		current_thread_info()->status |= TS_POLLING;
+	}
+}
+
+void __init xen_arch_setup(void)
+{
+	struct physdev_set_iopl set_iopl;
+	int rc;
+
+	HYPERVISOR_vm_assist(VMASST_CMD_enable, VMASST_TYPE_4gb_segments);
+	HYPERVISOR_vm_assist(VMASST_CMD_enable, VMASST_TYPE_writable_pagetables);
+
+	if (!xen_feature(XENFEAT_auto_translated_physmap))
+		HYPERVISOR_vm_assist(VMASST_CMD_enable, VMASST_TYPE_pae_extended_cr3);
+
+	HYPERVISOR_set_callbacks(__KERNEL_CS, (unsigned long)xen_hypervisor_callback,
+				 __KERNEL_CS, (unsigned long)xen_failsafe_callback);
+
+	set_iopl.iopl = 1;
+	rc = HYPERVISOR_physdev_op(PHYSDEVOP_set_iopl, &set_iopl);
+	if (rc != 0)
+		printk(KERN_INFO "physdev_op failed %d\n", rc);
+
+#ifdef CONFIG_ACPI
+	if (!(xen_start_info->flags & SIF_INITDOMAIN)) {
+		printk(KERN_INFO "ACPI in unprivileged domain disabled\n");
+		disable_acpi();
+	}
+#endif
+
+	memcpy(boot_command_line, xen_start_info->cmd_line,
+	       MAX_GUEST_CMDLINE > COMMAND_LINE_SIZE ?
+	       COMMAND_LINE_SIZE : MAX_GUEST_CMDLINE);
+
+	pm_idle = xen_idle;
+
+#ifdef CONFIG_SMP
+	/* fill cpus_possible with all available cpus */
+	xen_fill_possible_map();
+#endif
+
+	paravirt_disable_iospace();
+}
diff --git a/arch/i386/xen/smp.c b/arch/i386/xen/smp.c
new file mode 100644
index 0000000..557b8e2
--- /dev/null
+++ b/arch/i386/xen/smp.c
@@ -0,0 +1,404 @@
+/*
+ * Xen SMP support
+ *
+ * This file implements the Xen versions of smp_ops.  SMP under Xen is
+ * very straightforward.  Bringing a CPU up is simply a matter of
+ * loading its initial context and setting it running.
+ *
+ * IPIs are handled through the Xen event mechanism.
+ *
+ * Because virtual CPUs can be scheduled onto any real CPU, there's no
+ * useful topology information for the kernel to make use of.  As a
+ * result, all CPUs are treated as if they're single-core and
+ * single-threaded.
+ *
+ * This does not handle HOTPLUG_CPU yet.
+ */
+#include <linux/sched.h>
+#include <linux/err.h>
+#include <linux/smp.h>
+
+#include <asm/paravirt.h>
+#include <asm/desc.h>
+#include <asm/pgtable.h>
+#include <asm/cpu.h>
+
+#include <xen/interface/xen.h>
+#include <xen/interface/vcpu.h>
+
+#include <asm/xen/interface.h>
+#include <asm/xen/hypercall.h>
+
+#include <xen/page.h>
+#include <xen/events.h>
+
+#include "xen-ops.h"
+#include "mmu.h"
+
+static cpumask_t cpu_initialized_map;
+static DEFINE_PER_CPU(int, resched_irq);
+static DEFINE_PER_CPU(int, callfunc_irq);
+
+/*
+ * Structure and data for smp_call_function(). This is designed to minimise
+ * static memory requirements. It also looks cleaner.
+ */
+static DEFINE_SPINLOCK(call_lock);
+
+struct call_data_struct {
+	void (*func) (void *info);
+	void *info;
+	atomic_t started;
+	atomic_t finished;
+	int wait;
+};
+
+static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id);
+
+static struct call_data_struct *call_data;
+
+/*
+ * Reschedule call back. Nothing to do,
+ * all the work is done automatically when
+ * we return from the interrupt.
+ */
+static irqreturn_t xen_reschedule_interrupt(int irq, void *dev_id)
+{
+	return IRQ_HANDLED;
+}
+
+static __cpuinit void cpu_bringup_and_idle(void)
+{
+	int cpu = smp_processor_id();
+
+	cpu_init();
+
+	preempt_disable();
+	per_cpu(cpu_state, cpu) = CPU_ONLINE;
+
+	xen_setup_cpu_clockevents();
+
+	/* We can take interrupts now: we're officially "up". */
+	local_irq_enable();
+
+	wmb();			/* make sure everything is out */
+	cpu_idle();
+}
+
+static int xen_smp_intr_init(unsigned int cpu)
+{
+	int rc;
+	const char *resched_name, *callfunc_name;
+
+	per_cpu(resched_irq, cpu) = per_cpu(callfunc_irq, cpu) = -1;
+
+	resched_name = kasprintf(GFP_KERNEL, "resched%d", cpu);
+	rc = bind_ipi_to_irqhandler(XEN_RESCHEDULE_VECTOR,
+				    cpu,
+				    xen_reschedule_interrupt,
+				    IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING,
+				    resched_name,
+				    NULL);
+	if (rc < 0)
+		goto fail;
+	per_cpu(resched_irq, cpu) = rc;
+
+	callfunc_name = kasprintf(GFP_KERNEL, "callfunc%d", cpu);
+	rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_VECTOR,
+				    cpu,
+				    xen_call_function_interrupt,
+				    IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING,
+				    callfunc_name,
+				    NULL);
+	if (rc < 0)
+		goto fail;
+	per_cpu(callfunc_irq, cpu) = rc;
+
+	return 0;
+
+ fail:
+	if (per_cpu(resched_irq, cpu) >= 0)
+		unbind_from_irqhandler(per_cpu(resched_irq, cpu), NULL);
+	if (per_cpu(callfunc_irq, cpu) >= 0)
+		unbind_from_irqhandler(per_cpu(callfunc_irq, cpu), NULL);
+	return rc;
+}
+
+void __init xen_fill_possible_map(void)
+{
+	int i, rc;
+
+	for (i = 0; i < NR_CPUS; i++) {
+		rc = HYPERVISOR_vcpu_op(VCPUOP_is_up, i, NULL);
+		if (rc >= 0)
+			cpu_set(i, cpu_possible_map);
+	}
+}
+
+void __init xen_smp_prepare_boot_cpu(void)
+{
+	int cpu;
+
+	BUG_ON(smp_processor_id() != 0);
+	native_smp_prepare_boot_cpu();
+
+	/* We've switched to the "real" per-cpu gdt, so make sure the
+	   old memory can be recycled */
+	make_lowmem_page_readwrite(&per_cpu__gdt_page);
+
+	for (cpu = 0; cpu < NR_CPUS; cpu++) {
+		cpus_clear(cpu_sibling_map[cpu]);
+		cpus_clear(cpu_core_map[cpu]);
+	}
+
+	xen_setup_vcpu_info_placement();
+}
+
+void __init xen_smp_prepare_cpus(unsigned int max_cpus)
+{
+	unsigned cpu;
+
+	for (cpu = 0; cpu < NR_CPUS; cpu++) {
+		cpus_clear(cpu_sibling_map[cpu]);
+		cpus_clear(cpu_core_map[cpu]);
+	}
+
+	smp_store_cpu_info(0);
+	set_cpu_sibling_map(0);
+
+	if (xen_smp_intr_init(0))
+		BUG();
+
+	cpu_initialized_map = cpumask_of_cpu(0);
+
+	/* Restrict the possible_map according to max_cpus. */
+	while ((num_possible_cpus() > 1) && (num_possible_cpus() > max_cpus)) {
+		for (cpu = NR_CPUS-1; !cpu_isset(cpu, cpu_possible_map); cpu--)
+			continue;
+		cpu_clear(cpu, cpu_possible_map);
+	}
+
+	for_each_possible_cpu (cpu) {
+		struct task_struct *idle;
+
+		if (cpu == 0)
+			continue;
+
+		idle = fork_idle(cpu);
+		if (IS_ERR(idle))
+			panic("failed fork for CPU %d", cpu);
+
+		cpu_set(cpu, cpu_present_map);
+	}
+
+	//init_xenbus_allowed_cpumask();
+}
+
+static __cpuinit int
+cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
+{
+	struct vcpu_guest_context *ctxt;
+	struct gdt_page *gdt = &per_cpu(gdt_page, cpu);
+
+	if (cpu_test_and_set(cpu, cpu_initialized_map))
+		return 0;
+
+	ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
+	if (ctxt == NULL)
+		return -ENOMEM;
+
+	ctxt->flags = VGCF_IN_KERNEL;
+	ctxt->user_regs.ds = __USER_DS;
+	ctxt->user_regs.es = __USER_DS;
+	ctxt->user_regs.fs = __KERNEL_PERCPU;
+	ctxt->user_regs.gs = 0;
+	ctxt->user_regs.ss = __KERNEL_DS;
+	ctxt->user_regs.eip = (unsigned long)cpu_bringup_and_idle;
+	ctxt->user_regs.eflags = 0x1000; /* IOPL_RING1 */
+
+	memset(&ctxt->fpu_ctxt, 0, sizeof(ctxt->fpu_ctxt));
+
+	xen_copy_trap_info(ctxt->trap_ctxt);
+
+	ctxt->ldt_ents = 0;
+
+	BUG_ON((unsigned long)gdt->gdt & ~PAGE_MASK);
+	make_lowmem_page_readonly(gdt->gdt);
+
+	ctxt->gdt_frames[0] = virt_to_mfn(gdt->gdt);
+	ctxt->gdt_ents      = ARRAY_SIZE(gdt->gdt);
+
+	ctxt->user_regs.cs = __KERNEL_CS;
+	ctxt->user_regs.esp = idle->thread.esp0 - sizeof(struct pt_regs);
+
+	ctxt->kernel_ss = __KERNEL_DS;
+	ctxt->kernel_sp = idle->thread.esp0;
+
+	ctxt->event_callback_cs     = __KERNEL_CS;
+	ctxt->event_callback_eip    = (unsigned long)xen_hypervisor_callback;
+	ctxt->failsafe_callback_cs  = __KERNEL_CS;
+	ctxt->failsafe_callback_eip = (unsigned long)xen_failsafe_callback;
+
+	per_cpu(xen_cr3, cpu) = __pa(swapper_pg_dir);
+	ctxt->ctrlreg[3] = xen_pfn_to_cr3(virt_to_mfn(swapper_pg_dir));
+
+	if (HYPERVISOR_vcpu_op(VCPUOP_initialise, cpu, ctxt))
+		BUG();
+
+	kfree(ctxt);
+	return 0;
+}
+
+int __cpuinit xen_cpu_up(unsigned int cpu)
+{
+	struct task_struct *idle = idle_task(cpu);
+	int rc;
+
+#if 0
+	rc = cpu_up_check(cpu);
+	if (rc)
+		return rc;
+#endif
+
+	init_gdt(cpu);
+	per_cpu(current_task, cpu) = idle;
+	irq_ctx_init(cpu);
+	xen_setup_timer(cpu);
+
+	/* make sure interrupts start blocked */
+	per_cpu(xen_vcpu, cpu)->evtchn_upcall_mask = 1;
+
+	rc = cpu_initialize_context(cpu, idle);
+	if (rc)
+		return rc;
+
+	if (num_online_cpus() == 1)
+		alternatives_smp_switch(1);
+
+	rc = xen_smp_intr_init(cpu);
+	if (rc)
+		return rc;
+
+	smp_store_cpu_info(cpu);
+	set_cpu_sibling_map(cpu);
+	/* This must be done before setting cpu_online_map */
+	wmb();
+
+	cpu_set(cpu, cpu_online_map);
+
+	rc = HYPERVISOR_vcpu_op(VCPUOP_up, cpu, NULL);
+	BUG_ON(rc);
+
+	return 0;
+}
+
+void xen_smp_cpus_done(unsigned int max_cpus)
+{
+}
+
+static void stop_self(void *v)
+{
+	int cpu = smp_processor_id();
+
+	/* make sure we're not pinning something down */
+	load_cr3(swapper_pg_dir);
+	/* should set up a minimal gdt */
+
+	HYPERVISOR_vcpu_op(VCPUOP_down, cpu, NULL);
+	BUG();
+}
+
+void xen_smp_send_stop(void)
+{
+	smp_call_function(stop_self, NULL, 0, 0);
+}
+
+void xen_smp_send_reschedule(int cpu)
+{
+	xen_send_IPI_one(cpu, XEN_RESCHEDULE_VECTOR);
+}
+
+
+static void xen_send_IPI_mask(cpumask_t mask, enum ipi_vector vector)
+{
+	unsigned cpu;
+
+	cpus_and(mask, mask, cpu_online_map);
+
+	for_each_cpu_mask(cpu, mask)
+		xen_send_IPI_one(cpu, vector);
+}
+
+static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id)
+{
+	void (*func) (void *info) = call_data->func;
+	void *info = call_data->info;
+	int wait = call_data->wait;
+
+	/*
+	 * Notify initiating CPU that I've grabbed the data and am
+	 * about to execute the function
+	 */
+	mb();
+	atomic_inc(&call_data->started);
+	/*
+	 * At this point the info structure may be out of scope unless wait==1
+	 */
+	irq_enter();
+	(*func)(info);
+	irq_exit();
+
+	if (wait) {
+		mb();		/* commit everything before setting finished */
+		atomic_inc(&call_data->finished);
+	}
+
+	return IRQ_HANDLED;
+}
+
+int xen_smp_call_function_mask(cpumask_t mask, void (*func)(void *),
+			       void *info, int wait)
+{
+	struct call_data_struct data;
+	int cpus;
+
+	/* Holding any lock stops cpus from going down. */
+	spin_lock(&call_lock);
+
+	cpu_clear(smp_processor_id(), mask);
+
+	cpus = cpus_weight(mask);
+	if (!cpus) {
+		spin_unlock(&call_lock);
+		return 0;
+	}
+
+	/* Can deadlock when called with interrupts disabled */
+	WARN_ON(irqs_disabled());
+
+	data.func = func;
+	data.info = info;
+	atomic_set(&data.started, 0);
+	data.wait = wait;
+	if (wait)
+		atomic_set(&data.finished, 0);
+
+	call_data = &data;
+	mb();			/* write everything before IPI */
+
+	/* Send a message to other CPUs and wait for them to respond */
+	xen_send_IPI_mask(mask, XEN_CALL_FUNCTION_VECTOR);
+
+	/* Make sure other vcpus get a chance to run.
+	   XXX too severe?  Maybe we should check the other CPU's states? */
+	HYPERVISOR_sched_op(SCHEDOP_yield, 0);
+
+	/* Wait for response */
+	while (atomic_read(&data.started) != cpus ||
+	       (wait && atomic_read(&data.finished) != cpus))
+		cpu_relax();
+
+	spin_unlock(&call_lock);
+
+	return 0;
+}
diff --git a/arch/i386/xen/time.c b/arch/i386/xen/time.c
new file mode 100644
index 0000000..51fdabf
--- /dev/null
+++ b/arch/i386/xen/time.c
@@ -0,0 +1,590 @@
+/*
+ * Xen time implementation.
+ *
+ * This is implemented in terms of a clocksource driver which uses
+ * the hypervisor clock as a nanosecond timebase, and a clockevent
+ * driver which uses the hypervisor's timer mechanism.
+ *
+ * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
+ */
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/clocksource.h>
+#include <linux/clockchips.h>
+#include <linux/kernel_stat.h>
+
+#include <asm/xen/hypervisor.h>
+#include <asm/xen/hypercall.h>
+
+#include <xen/events.h>
+#include <xen/interface/xen.h>
+#include <xen/interface/vcpu.h>
+
+#include "xen-ops.h"
+
+#define XEN_SHIFT 22
+
+/* Xen may fire a timer up to this many ns early */
+#define TIMER_SLOP	100000
+#define NS_PER_TICK	(1000000000LL / HZ)
+
+static cycle_t xen_clocksource_read(void);
+
+/* These are perodically updated in shared_info, and then copied here. */
+struct shadow_time_info {
+	u64 tsc_timestamp;     /* TSC at last update of time vals.  */
+	u64 system_timestamp;  /* Time, in nanosecs, since boot.    */
+	u32 tsc_to_nsec_mul;
+	int tsc_shift;
+	u32 version;
+};
+
+static DEFINE_PER_CPU(struct shadow_time_info, shadow_time);
+
+/* runstate info updated by Xen */
+static DEFINE_PER_CPU(struct vcpu_runstate_info, runstate);
+
+/* snapshots of runstate info */
+static DEFINE_PER_CPU(struct vcpu_runstate_info, runstate_snapshot);
+
+/* unused ns of stolen and blocked time */
+static DEFINE_PER_CPU(u64, residual_stolen);
+static DEFINE_PER_CPU(u64, residual_blocked);
+
+/* return an consistent snapshot of 64-bit time/counter value */
+static u64 get64(const u64 *p)
+{
+	u64 ret;
+
+	if (BITS_PER_LONG < 64) {
+		u32 *p32 = (u32 *)p;
+		u32 h, l;
+
+		/*
+		 * Read high then low, and then make sure high is
+		 * still the same; this will only loop if low wraps
+		 * and carries into high.
+		 * XXX some clean way to make this endian-proof?
+		 */
+		do {
+			h = p32[1];
+			barrier();
+			l = p32[0];
+			barrier();
+		} while (p32[1] != h);
+
+		ret = (((u64)h) << 32) | l;
+	} else
+		ret = *p;
+
+	return ret;
+}
+
+/*
+ * Runstate accounting
+ */
+static void get_runstate_snapshot(struct vcpu_runstate_info *res)
+{
+	u64 state_time;
+	struct vcpu_runstate_info *state;
+
+	BUG_ON(preemptible());
+
+	state = &__get_cpu_var(runstate);
+
+	/*
+	 * The runstate info is always updated by the hypervisor on
+	 * the current CPU, so there's no need to use anything
+	 * stronger than a compiler barrier when fetching it.
+	 */
+	do {
+		state_time = get64(&state->state_entry_time);
+		barrier();
+		*res = *state;
+		barrier();
+	} while (get64(&state->state_entry_time) != state_time);
+}
+
+static void setup_runstate_info(int cpu)
+{
+	struct vcpu_register_runstate_memory_area area;
+
+	area.addr.v = &per_cpu(runstate, cpu);
+
+	if (HYPERVISOR_vcpu_op(VCPUOP_register_runstate_memory_area,
+			       cpu, &area))
+		BUG();
+}
+
+static void do_stolen_accounting(void)
+{
+	struct vcpu_runstate_info state;
+	struct vcpu_runstate_info *snap;
+	s64 blocked, runnable, offline, stolen;
+	cputime_t ticks;
+
+	get_runstate_snapshot(&state);
+
+	WARN_ON(state.state != RUNSTATE_running);
+
+	snap = &__get_cpu_var(runstate_snapshot);
+
+	/* work out how much time the VCPU has not been runn*ing*  */
+	blocked = state.time[RUNSTATE_blocked] - snap->time[RUNSTATE_blocked];
+	runnable = state.time[RUNSTATE_runnable] - snap->time[RUNSTATE_runnable];
+	offline = state.time[RUNSTATE_offline] - snap->time[RUNSTATE_offline];
+
+	*snap = state;
+
+	/* Add the appropriate number of ticks of stolen time,
+	   including any left-overs from last time.  Passing NULL to
+	   account_steal_time accounts the time as stolen. */
+	stolen = runnable + offline + __get_cpu_var(residual_stolen);
+
+	if (stolen < 0)
+		stolen = 0;
+
+	ticks = 0;
+	while (stolen >= NS_PER_TICK) {
+		ticks++;
+		stolen -= NS_PER_TICK;
+	}
+	__get_cpu_var(residual_stolen) = stolen;
+	account_steal_time(NULL, ticks);
+
+	/* Add the appropriate number of ticks of blocked time,
+	   including any left-overs from last time.  Passing idle to
+	   account_steal_time accounts the time as idle/wait. */
+	blocked += __get_cpu_var(residual_blocked);
+
+	if (blocked < 0)
+		blocked = 0;
+
+	ticks = 0;
+	while (blocked >= NS_PER_TICK) {
+		ticks++;
+		blocked -= NS_PER_TICK;
+	}
+	__get_cpu_var(residual_blocked) = blocked;
+	account_steal_time(idle_task(smp_processor_id()), ticks);
+}
+
+/*
+ * Xen sched_clock implementation.  Returns the number of unstolen
+ * nanoseconds, which is nanoseconds the VCPU spent in RUNNING+BLOCKED
+ * states.
+ */
+unsigned long long xen_sched_clock(void)
+{
+	struct vcpu_runstate_info state;
+	cycle_t now;
+	u64 ret;
+	s64 offset;
+
+	/*
+	 * Ideally sched_clock should be called on a per-cpu basis
+	 * anyway, so preempt should already be disabled, but that's
+	 * not current practice at the moment.
+	 */
+	preempt_disable();
+
+	now = xen_clocksource_read();
+
+	get_runstate_snapshot(&state);
+
+	WARN_ON(state.state != RUNSTATE_running);
+
+	offset = now - state.state_entry_time;
+	if (offset < 0)
+		offset = 0;
+
+	ret = state.time[RUNSTATE_blocked] +
+		state.time[RUNSTATE_running] +
+		offset;
+
+	preempt_enable();
+
+	return ret;
+}
+
+
+/* Get the CPU speed from Xen */
+unsigned long xen_cpu_khz(void)
+{
+	u64 cpu_khz = 1000000ULL << 32;
+	const struct vcpu_time_info *info =
+		&HYPERVISOR_shared_info->vcpu_info[0].time;
+
+	do_div(cpu_khz, info->tsc_to_system_mul);
+	if (info->tsc_shift < 0)
+		cpu_khz <<= -info->tsc_shift;
+	else
+		cpu_khz >>= info->tsc_shift;
+
+	return cpu_khz;
+}
+
+/*
+ * Reads a consistent set of time-base values from Xen, into a shadow data
+ * area.
+ */
+static unsigned get_time_values_from_xen(void)
+{
+	struct vcpu_time_info   *src;
+	struct shadow_time_info *dst;
+
+	/* src is shared memory with the hypervisor, so we need to
+	   make sure we get a consistent snapshot, even in the face of
+	   being preempted. */
+	src = &__get_cpu_var(xen_vcpu)->time;
+	dst = &__get_cpu_var(shadow_time);
+
+	do {
+		dst->version = src->version;
+		rmb();		/* fetch version before data */
+		dst->tsc_timestamp     = src->tsc_timestamp;
+		dst->system_timestamp  = src->system_time;
+		dst->tsc_to_nsec_mul   = src->tsc_to_system_mul;
+		dst->tsc_shift         = src->tsc_shift;
+		rmb();		/* test version after fetching data */
+	} while ((src->version & 1) | (dst->version ^ src->version));
+
+	return dst->version;
+}
+
+/*
+ * Scale a 64-bit delta by scaling and multiplying by a 32-bit fraction,
+ * yielding a 64-bit result.
+ */
+static inline u64 scale_delta(u64 delta, u32 mul_frac, int shift)
+{
+	u64 product;
+#ifdef __i386__
+	u32 tmp1, tmp2;
+#endif
+
+	if (shift < 0)
+		delta >>= -shift;
+	else
+		delta <<= shift;
+
+#ifdef __i386__
+	__asm__ (
+		"mul  %5       ; "
+		"mov  %4,%%eax ; "
+		"mov  %%edx,%4 ; "
+		"mul  %5       ; "
+		"xor  %5,%5    ; "
+		"add  %4,%%eax ; "
+		"adc  %5,%%edx ; "
+		: "=A" (product), "=r" (tmp1), "=r" (tmp2)
+		: "a" ((u32)delta), "1" ((u32)(delta >> 32)), "2" (mul_frac) );
+#elif __x86_64__
+	__asm__ (
+		"mul %%rdx ; shrd $32,%%rdx,%%rax"
+		: "=a" (product) : "0" (delta), "d" ((u64)mul_frac) );
+#else
+#error implement me!
+#endif
+
+	return product;
+}
+
+static u64 get_nsec_offset(struct shadow_time_info *shadow)
+{
+	u64 now, delta;
+	now = native_read_tsc();
+	delta = now - shadow->tsc_timestamp;
+	return scale_delta(delta, shadow->tsc_to_nsec_mul, shadow->tsc_shift);
+}
+
+static cycle_t xen_clocksource_read(void)
+{
+	struct shadow_time_info *shadow = &get_cpu_var(shadow_time);
+	cycle_t ret;
+	unsigned version;
+
+	do {
+		version = get_time_values_from_xen();
+		barrier();
+		ret = shadow->system_timestamp + get_nsec_offset(shadow);
+		barrier();
+	} while (version != __get_cpu_var(xen_vcpu)->time.version);
+
+	put_cpu_var(shadow_time);
+
+	return ret;
+}
+
+static void xen_read_wallclock(struct timespec *ts)
+{
+	const struct shared_info *s = HYPERVISOR_shared_info;
+	u32 version;
+	u64 delta;
+	struct timespec now;
+
+	/* get wallclock at system boot */
+	do {
+		version = s->wc_version;
+		rmb();		/* fetch version before time */
+		now.tv_sec  = s->wc_sec;
+		now.tv_nsec = s->wc_nsec;
+		rmb();		/* fetch time before checking version */
+	} while ((s->wc_version & 1) | (version ^ s->wc_version));
+
+	delta = xen_clocksource_read();	/* time since system boot */
+	delta += now.tv_sec * (u64)NSEC_PER_SEC + now.tv_nsec;
+
+	now.tv_nsec = do_div(delta, NSEC_PER_SEC);
+	now.tv_sec = delta;
+
+	set_normalized_timespec(ts, now.tv_sec, now.tv_nsec);
+}
+
+unsigned long xen_get_wallclock(void)
+{
+	struct timespec ts;
+
+	xen_read_wallclock(&ts);
+
+	return ts.tv_sec;
+}
+
+int xen_set_wallclock(unsigned long now)
+{
+	/* do nothing for domU */
+	return -1;
+}
+
+static struct clocksource xen_clocksource __read_mostly = {
+	.name = "xen",
+	.rating = 400,
+	.read = xen_clocksource_read,
+	.mask = ~0,
+	.mult = 1<<XEN_SHIFT,		/* time directly in nanoseconds */
+	.shift = XEN_SHIFT,
+	.flags = CLOCK_SOURCE_IS_CONTINUOUS,
+};
+
+/*
+   Xen clockevent implementation
+
+   Xen has two clockevent implementations:
+
+   The old timer_op one works with all released versions of Xen prior
+   to version 3.0.4.  This version of the hypervisor provides a
+   single-shot timer with nanosecond resolution.  However, sharing the
+   same event channel is a 100Hz tick which is delivered while the
+   vcpu is running.  We don't care about or use this tick, but it will
+   cause the core time code to think the timer fired too soon, and
+   will end up resetting it each time.  It could be filtered, but
+   doing so has complications when the ktime clocksource is not yet
+   the xen clocksource (ie, at boot time).
+
+   The new vcpu_op-based timer interface allows the tick timer period
+   to be changed or turned off.  The tick timer is not useful as a
+   periodic timer because events are only delivered to running vcpus.
+   The one-shot timer can report when a timeout is in the past, so
+   set_next_event is capable of returning -ETIME when appropriate.
+   This interface is used when available.
+*/
+
+
+/*
+  Get a hypervisor absolute time.  In theory we could maintain an
+  offset between the kernel's time and the hypervisor's time, and
+  apply that to a kernel's absolute timeout.  Unfortunately the
+  hypervisor and kernel times can drift even if the kernel is using
+  the Xen clocksource, because ntp can warp the kernel's clocksource.
+*/
+static s64 get_abs_timeout(unsigned long delta)
+{
+	return xen_clocksource_read() + delta;
+}
+
+static void xen_timerop_set_mode(enum clock_event_mode mode,
+				 struct clock_event_device *evt)
+{
+	switch (mode) {
+	case CLOCK_EVT_MODE_PERIODIC:
+		/* unsupported */
+		WARN_ON(1);
+		break;
+
+	case CLOCK_EVT_MODE_ONESHOT:
+		break;
+
+	case CLOCK_EVT_MODE_UNUSED:
+	case CLOCK_EVT_MODE_SHUTDOWN:
+		HYPERVISOR_set_timer_op(0);  /* cancel timeout */
+		break;
+	}
+}
+
+static int xen_timerop_set_next_event(unsigned long delta,
+				      struct clock_event_device *evt)
+{
+	WARN_ON(evt->mode != CLOCK_EVT_MODE_ONESHOT);
+
+	if (HYPERVISOR_set_timer_op(get_abs_timeout(delta)) < 0)
+		BUG();
+
+	/* We may have missed the deadline, but there's no real way of
+	   knowing for sure.  If the event was in the past, then we'll
+	   get an immediate interrupt. */
+
+	return 0;
+}
+
+static const struct clock_event_device xen_timerop_clockevent = {
+	.name = "xen",
+	.features = CLOCK_EVT_FEAT_ONESHOT,
+
+	.max_delta_ns = 0xffffffff,
+	.min_delta_ns = TIMER_SLOP,
+
+	.mult = 1,
+	.shift = 0,
+	.rating = 500,
+
+	.set_mode = xen_timerop_set_mode,
+	.set_next_event = xen_timerop_set_next_event,
+};
+
+
+
+static void xen_vcpuop_set_mode(enum clock_event_mode mode,
+				struct clock_event_device *evt)
+{
+	int cpu = smp_processor_id();
+
+	switch (mode) {
+	case CLOCK_EVT_MODE_PERIODIC:
+		WARN_ON(1);	/* unsupported */
+		break;
+
+	case CLOCK_EVT_MODE_ONESHOT:
+		if (HYPERVISOR_vcpu_op(VCPUOP_stop_periodic_timer, cpu, NULL))
+			BUG();
+		break;
+
+	case CLOCK_EVT_MODE_UNUSED:
+	case CLOCK_EVT_MODE_SHUTDOWN:
+		if (HYPERVISOR_vcpu_op(VCPUOP_stop_singleshot_timer, cpu, NULL) ||
+		    HYPERVISOR_vcpu_op(VCPUOP_stop_periodic_timer, cpu, NULL))
+			BUG();
+		break;
+	}
+}
+
+static int xen_vcpuop_set_next_event(unsigned long delta,
+				     struct clock_event_device *evt)
+{
+	int cpu = smp_processor_id();
+	struct vcpu_set_singleshot_timer single;
+	int ret;
+
+	WARN_ON(evt->mode != CLOCK_EVT_MODE_ONESHOT);
+
+	single.timeout_abs_ns = get_abs_timeout(delta);
+	single.flags = VCPU_SSHOTTMR_future;
+
+	ret = HYPERVISOR_vcpu_op(VCPUOP_set_singleshot_timer, cpu, &single);
+
+	BUG_ON(ret != 0 && ret != -ETIME);
+
+	return ret;
+}
+
+static const struct clock_event_device xen_vcpuop_clockevent = {
+	.name = "xen",
+	.features = CLOCK_EVT_FEAT_ONESHOT,
+
+	.max_delta_ns = 0xffffffff,
+	.min_delta_ns = TIMER_SLOP,
+
+	.mult = 1,
+	.shift = 0,
+	.rating = 500,
+
+	.set_mode = xen_vcpuop_set_mode,
+	.set_next_event = xen_vcpuop_set_next_event,
+};
+
+static const struct clock_event_device *xen_clockevent =
+	&xen_timerop_clockevent;
+static DEFINE_PER_CPU(struct clock_event_device, xen_clock_events);
+
+static irqreturn_t xen_timer_interrupt(int irq, void *dev_id)
+{
+	struct clock_event_device *evt = &__get_cpu_var(xen_clock_events);
+	irqreturn_t ret;
+
+	ret = IRQ_NONE;
+	if (evt->event_handler) {
+		evt->event_handler(evt);
+		ret = IRQ_HANDLED;
+	}
+
+	do_stolen_accounting();
+
+	return ret;
+}
+
+void xen_setup_timer(int cpu)
+{
+	const char *name;
+	struct clock_event_device *evt;
+	int irq;
+
+	printk(KERN_INFO "installing Xen timer for CPU %d\n", cpu);
+
+	name = kasprintf(GFP_KERNEL, "timer%d", cpu);
+	if (!name)
+		name = "<timer kasprintf failed>";
+
+	irq = bind_virq_to_irqhandler(VIRQ_TIMER, cpu, xen_timer_interrupt,
+				      IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING,
+				      name, NULL);
+
+	evt = &per_cpu(xen_clock_events, cpu);
+	memcpy(evt, xen_clockevent, sizeof(*evt));
+
+	evt->cpumask = cpumask_of_cpu(cpu);
+	evt->irq = irq;
+
+	setup_runstate_info(cpu);
+}
+
+void xen_setup_cpu_clockevents(void)
+{
+	BUG_ON(preemptible());
+
+	clockevents_register_device(&__get_cpu_var(xen_clock_events));
+}
+
+__init void xen_time_init(void)
+{
+	int cpu = smp_processor_id();
+
+	get_time_values_from_xen();
+
+	clocksource_register(&xen_clocksource);
+
+	if (HYPERVISOR_vcpu_op(VCPUOP_stop_periodic_timer, cpu, NULL) == 0) {
+		/* Successfully turned off 100Hz tick, so we have the
+		   vcpuop-based timer interface */
+		printk(KERN_DEBUG "Xen: using vcpuop timer interface\n");
+		xen_clockevent = &xen_vcpuop_clockevent;
+	}
+
+	/* Set initial system time with full resolution */
+	xen_read_wallclock(&xtime);
+	set_normalized_timespec(&wall_to_monotonic,
+				-xtime.tv_sec, -xtime.tv_nsec);
+
+	tsc_disable = 0;
+
+	xen_setup_timer(cpu);
+	xen_setup_cpu_clockevents();
+}
diff --git a/arch/i386/xen/xen-asm.S b/arch/i386/xen/xen-asm.S
new file mode 100644
index 0000000..1a43b60
--- /dev/null
+++ b/arch/i386/xen/xen-asm.S
@@ -0,0 +1,291 @@
+/*
+	Asm versions of Xen pv-ops, suitable for either direct use or inlining.
+	The inline versions are the same as the direct-use versions, with the
+	pre- and post-amble chopped off.
+
+	This code is encoded for size rather than absolute efficiency,
+	with a view to being able to inline as much as possible.
+
+	We only bother with direct forms (ie, vcpu in pda) of the operations
+	here; the indirect forms are better handled in C, since they're
+	generally too large to inline anyway.
+ */
+
+#include <linux/linkage.h>
+
+#include <asm/asm-offsets.h>
+#include <asm/thread_info.h>
+#include <asm/percpu.h>
+#include <asm/processor-flags.h>
+#include <asm/segment.h>
+
+#include <xen/interface/xen.h>
+
+#define RELOC(x, v)	.globl x##_reloc; x##_reloc=v
+#define ENDPATCH(x)	.globl x##_end; x##_end=.
+
+/* Pseudo-flag used for virtual NMI, which we don't implement yet */
+#define XEN_EFLAGS_NMI	0x80000000
+
+/*
+	Enable events.  This clears the event mask and tests the pending
+	event status with one and operation.  If there are pending
+	events, then enter the hypervisor to get them handled.
+ */
+ENTRY(xen_irq_enable_direct)
+	/* Clear mask and test pending */
+	andw $0x00ff, PER_CPU_VAR(xen_vcpu_info)+XEN_vcpu_info_pending
+	/* Preempt here doesn't matter because that will deal with
+	   any pending interrupts.  The pending check may end up being
+	   run on the wrong CPU, but that doesn't hurt. */
+	jz 1f
+2:	call check_events
+1:
+ENDPATCH(xen_irq_enable_direct)
+	ret
+	ENDPROC(xen_irq_enable_direct)
+	RELOC(xen_irq_enable_direct, 2b+1)
+
+
+/*
+	Disabling events is simply a matter of making the event mask
+	non-zero.
+ */
+ENTRY(xen_irq_disable_direct)
+	movb $1, PER_CPU_VAR(xen_vcpu_info)+XEN_vcpu_info_mask
+ENDPATCH(xen_irq_disable_direct)
+	ret
+	ENDPROC(xen_irq_disable_direct)
+	RELOC(xen_irq_disable_direct, 0)
+
+/*
+	(xen_)save_fl is used to get the current interrupt enable status.
+	Callers expect the status to be in X86_EFLAGS_IF, and other bits
+	may be set in the return value.  We take advantage of this by
+	making sure that X86_EFLAGS_IF has the right value (and other bits
+	in that byte are 0), but other bits in the return value are
+	undefined.  We need to toggle the state of the bit, because
+	Xen and x86 use opposite senses (mask vs enable).
+ */
+ENTRY(xen_save_fl_direct)
+	testb $0xff, PER_CPU_VAR(xen_vcpu_info)+XEN_vcpu_info_mask
+	setz %ah
+	addb %ah,%ah
+ENDPATCH(xen_save_fl_direct)
+	ret
+	ENDPROC(xen_save_fl_direct)
+	RELOC(xen_save_fl_direct, 0)
+
+
+/*
+	In principle the caller should be passing us a value return
+	from xen_save_fl_direct, but for robustness sake we test only
+	the X86_EFLAGS_IF flag rather than the whole byte. After
+	setting the interrupt mask state, it checks for unmasked
+	pending events and enters the hypervisor to get them delivered
+	if so.
+ */
+ENTRY(xen_restore_fl_direct)
+	testb $X86_EFLAGS_IF>>8, %ah
+	setz PER_CPU_VAR(xen_vcpu_info)+XEN_vcpu_info_mask
+	/* Preempt here doesn't matter because that will deal with
+	   any pending interrupts.  The pending check may end up being
+	   run on the wrong CPU, but that doesn't hurt. */
+
+	/* check for unmasked and pending */
+	cmpw $0x0001, PER_CPU_VAR(xen_vcpu_info)+XEN_vcpu_info_pending
+	jz 1f
+2:	call check_events
+1:
+ENDPATCH(xen_restore_fl_direct)
+	ret
+	ENDPROC(xen_restore_fl_direct)
+	RELOC(xen_restore_fl_direct, 2b+1)
+
+/*
+	This is run where a normal iret would be run, with the same stack setup:
+	      8: eflags
+	      4: cs
+	esp-> 0: eip
+
+	This attempts to make sure that any pending events are dealt
+	with on return to usermode, but there is a small window in
+	which an event can happen just before entering usermode.  If
+	the nested interrupt ends up setting one of the TIF_WORK_MASK
+	pending work flags, they will not be tested again before
+	returning to usermode. This means that a process can end up
+	with pending work, which will be unprocessed until the process
+	enters and leaves the kernel again, which could be an
+	unbounded amount of time.  This means that a pending signal or
+	reschedule event could be indefinitely delayed.
+
+	The fix is to notice a nested interrupt in the critical
+	window, and if one occurs, then fold the nested interrupt into
+	the current interrupt stack frame, and re-process it
+	iteratively rather than recursively.  This means that it will
+	exit via the normal path, and all pending work will be dealt
+	with appropriately.
+
+	Because the nested interrupt handler needs to deal with the
+	current stack state in whatever form its in, we keep things
+	simple by only using a single register which is pushed/popped
+	on the stack.
+
+	Non-direct iret could be done in the same way, but it would
+	require an annoying amount of code duplication.  We'll assume
+	that direct mode will be the common case once the hypervisor
+	support becomes commonplace.
+ */
+ENTRY(xen_iret_direct)
+	/* test eflags for special cases */
+	testl $(X86_EFLAGS_VM | XEN_EFLAGS_NMI), 8(%esp)
+	jnz hyper_iret
+
+	push %eax
+	ESP_OFFSET=4	# bytes pushed onto stack
+
+	/* Store vcpu_info pointer for easy access.  Do it this
+	   way to avoid having to reload %fs */
+#ifdef CONFIG_SMP
+	GET_THREAD_INFO(%eax)
+	movl TI_cpu(%eax),%eax
+	movl __per_cpu_offset(,%eax,4),%eax
+	lea per_cpu__xen_vcpu_info(%eax),%eax
+#else
+	movl $per_cpu__xen_vcpu_info, %eax
+#endif
+
+	/* check IF state we're restoring */
+	testb $X86_EFLAGS_IF>>8, 8+1+ESP_OFFSET(%esp)
+
+	/* Maybe enable events.  Once this happens we could get a
+	   recursive event, so the critical region starts immediately
+	   afterwards.  However, if that happens we don't end up
+	   resuming the code, so we don't have to be worried about
+	   being preempted to another CPU. */
+	setz XEN_vcpu_info_mask(%eax)
+xen_iret_start_crit:
+
+	/* check for unmasked and pending */
+	cmpw $0x0001, XEN_vcpu_info_pending(%eax)
+
+	/* If there's something pending, mask events again so we
+	   can jump back into xen_hypervisor_callback */
+	sete XEN_vcpu_info_mask(%eax)
+
+	popl %eax
+
+	/* From this point on the registers are restored and the stack
+	   updated, so we don't need to worry about it if we're preempted */
+iret_restore_end:
+
+	/* Jump to hypervisor_callback after fixing up the stack.
+	   Events are masked, so jumping out of the critical
+	   region is OK. */
+	je xen_hypervisor_callback
+
+	iret
+xen_iret_end_crit:
+
+hyper_iret:
+	/* put this out of line since its very rarely used */
+	jmp hypercall_page + __HYPERVISOR_iret * 32
+
+	.globl xen_iret_start_crit, xen_iret_end_crit
+
+/*
+   This is called by xen_hypervisor_callback in entry.S when it sees
+   that the EIP at the time of interrupt was between xen_iret_start_crit
+   and xen_iret_end_crit.  We're passed the EIP in %eax so we can do
+   a more refined determination of what to do.
+
+   The stack format at this point is:
+	----------------
+	 ss		: (ss/esp may be present if we came from usermode)
+	 esp		:
+	 eflags		}  outer exception info
+	 cs		}
+	 eip		}
+	---------------- <- edi (copy dest)
+	 eax		:  outer eax if it hasn't been restored
+	----------------
+	 eflags		}  nested exception info
+	 cs		}   (no ss/esp because we're nested
+	 eip		}    from the same ring)
+	 orig_eax	}<- esi (copy src)
+	 - - - - - - - -
+	 fs		}
+	 es		}
+	 ds		}  SAVE_ALL state
+	 eax		}
+	  :		:
+	 ebx		}
+	----------------
+	 return addr	 <- esp
+	----------------
+
+   In order to deliver the nested exception properly, we need to shift
+   everything from the return addr up to the error code so it
+   sits just under the outer exception info.  This means that when we
+   handle the exception, we do it in the context of the outer exception
+   rather than starting a new one.
+
+   The only caveat is that if the outer eax hasn't been
+   restored yet (ie, it's still on stack), we need to insert
+   its value into the SAVE_ALL state before going on, since
+   it's usermode state which we eventually need to restore.
+ */
+ENTRY(xen_iret_crit_fixup)
+	/* offsets +4 for return address */
+
+	/*
+	   Paranoia: Make sure we're really coming from userspace.
+	   One could imagine a case where userspace jumps into the
+	   critical range address, but just before the CPU delivers a GP,
+	   it decides to deliver an interrupt instead.  Unlikely?
+	   Definitely.  Easy to avoid?  Yes.  The Intel documents
+	   explicitly say that the reported EIP for a bad jump is the
+	   jump instruction itself, not the destination, but some virtual
+	   environments get this wrong.
+	 */
+	movl PT_CS+4(%esp), %ecx
+	andl $SEGMENT_RPL_MASK, %ecx
+	cmpl $USER_RPL, %ecx
+	je 2f
+
+	lea PT_ORIG_EAX+4(%esp), %esi
+	lea PT_EFLAGS+4(%esp), %edi
+
+	/* If eip is before iret_restore_end then stack
+	   hasn't been restored yet. */
+	cmp $iret_restore_end, %eax
+	jae 1f
+
+	movl 0+4(%edi),%eax		/* copy EAX */
+	movl %eax, PT_EAX+4(%esp)
+
+	lea ESP_OFFSET(%edi),%edi	/* move dest up over saved regs */
+
+	/* set up the copy */
+1:	std
+	mov $(PT_EIP+4) / 4, %ecx	/* copy ret+saved regs up to orig_eax */
+	rep movsl
+	cld
+
+	lea 4(%edi),%esp		/* point esp to new frame */
+2:	ret
+
+
+/*
+	Force an event check by making a hypercall,
+	but preserve regs before making the call.
+ */
+check_events:
+	push %eax
+	push %ecx
+	push %edx
+	call force_evtchn_callback
+	pop %edx
+	pop %ecx
+	pop %eax
+	ret
diff --git a/arch/i386/xen/xen-head.S b/arch/i386/xen/xen-head.S
new file mode 100644
index 0000000..2998d55
--- /dev/null
+++ b/arch/i386/xen/xen-head.S
@@ -0,0 +1,36 @@
+/* Xen-specific pieces of head.S, intended to be included in the right
+	place in head.S */
+
+#ifdef CONFIG_XEN
+
+#include <linux/elfnote.h>
+#include <asm/boot.h>
+#include <xen/interface/elfnote.h>
+
+ENTRY(startup_xen)
+	movl %esi,xen_start_info
+	cld
+	movl $(init_thread_union+THREAD_SIZE),%esp
+	jmp xen_start_kernel
+
+.pushsection ".bss.page_aligned"
+	.align PAGE_SIZE_asm
+ENTRY(hypercall_page)
+	.skip 0x1000
+.popsection
+
+	ELFNOTE(Xen, XEN_ELFNOTE_GUEST_OS,       .asciz "linux")
+	ELFNOTE(Xen, XEN_ELFNOTE_GUEST_VERSION,  .asciz "2.6")
+	ELFNOTE(Xen, XEN_ELFNOTE_XEN_VERSION,    .asciz "xen-3.0")
+	ELFNOTE(Xen, XEN_ELFNOTE_VIRT_BASE,      .long  __PAGE_OFFSET)
+	ELFNOTE(Xen, XEN_ELFNOTE_ENTRY,          .long  startup_xen)
+	ELFNOTE(Xen, XEN_ELFNOTE_HYPERCALL_PAGE, .long  hypercall_page)
+	ELFNOTE(Xen, XEN_ELFNOTE_FEATURES,       .asciz "!writable_page_tables|pae_pgdir_above_4gb")
+#ifdef CONFIG_X86_PAE
+	ELFNOTE(Xen, XEN_ELFNOTE_PAE_MODE,       .asciz "yes")
+#else
+	ELFNOTE(Xen, XEN_ELFNOTE_PAE_MODE,       .asciz "no")
+#endif
+	ELFNOTE(Xen, XEN_ELFNOTE_LOADER,         .asciz "generic")
+
+#endif /*CONFIG_XEN */
diff --git a/arch/i386/xen/xen-ops.h b/arch/i386/xen/xen-ops.h
new file mode 100644
index 0000000..b9aaea4
--- /dev/null
+++ b/arch/i386/xen/xen-ops.h
@@ -0,0 +1,71 @@
+#ifndef XEN_OPS_H
+#define XEN_OPS_H
+
+#include <linux/init.h>
+
+/* These are code, but not functions.  Defined in entry.S */
+extern const char xen_hypervisor_callback[];
+extern const char xen_failsafe_callback[];
+
+void xen_copy_trap_info(struct trap_info *traps);
+
+DECLARE_PER_CPU(struct vcpu_info *, xen_vcpu);
+DECLARE_PER_CPU(unsigned long, xen_cr3);
+
+extern struct start_info *xen_start_info;
+extern struct shared_info *HYPERVISOR_shared_info;
+
+char * __init xen_memory_setup(void);
+void __init xen_arch_setup(void);
+void __init xen_init_IRQ(void);
+
+void xen_setup_timer(int cpu);
+void xen_setup_cpu_clockevents(void);
+unsigned long xen_cpu_khz(void);
+void __init xen_time_init(void);
+unsigned long xen_get_wallclock(void);
+int xen_set_wallclock(unsigned long time);
+unsigned long long xen_sched_clock(void);
+
+void xen_mark_init_mm_pinned(void);
+
+DECLARE_PER_CPU(enum paravirt_lazy_mode, xen_lazy_mode);
+
+static inline unsigned xen_get_lazy_mode(void)
+{
+	return x86_read_percpu(xen_lazy_mode);
+}
+
+void __init xen_fill_possible_map(void);
+
+void __init xen_setup_vcpu_info_placement(void);
+void xen_smp_prepare_boot_cpu(void);
+void xen_smp_prepare_cpus(unsigned int max_cpus);
+int xen_cpu_up(unsigned int cpu);
+void xen_smp_cpus_done(unsigned int max_cpus);
+
+void xen_smp_send_stop(void);
+void xen_smp_send_reschedule(int cpu);
+int xen_smp_call_function (void (*func) (void *info), void *info, int nonatomic,
+			   int wait);
+int xen_smp_call_function_single(int cpu, void (*func) (void *info), void *info,
+				 int nonatomic, int wait);
+
+int xen_smp_call_function_mask(cpumask_t mask, void (*func)(void *),
+			       void *info, int wait);
+
+
+/* Declare an asm function, along with symbols needed to make it
+   inlineable */
+#define DECL_ASM(ret, name, ...)		\
+	ret name(__VA_ARGS__);			\
+	extern char name##_end[];		\
+	extern char name##_reloc[]		\
+
+DECL_ASM(void, xen_irq_enable_direct, void);
+DECL_ASM(void, xen_irq_disable_direct, void);
+DECL_ASM(unsigned long, xen_save_fl_direct, void);
+DECL_ASM(void, xen_restore_fl_direct, unsigned long);
+
+void xen_iret_direct(void);
+#endif /* XEN_OPS_H */
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index db9ddff..616c96e 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -582,8 +582,8 @@
 source "arch/ia64/oprofile/Kconfig"
 
 config KPROBES
-	bool "Kprobes (EXPERIMENTAL)"
-	depends on KALLSYMS && EXPERIMENTAL && MODULES
+	bool "Kprobes"
+	depends on KALLSYMS && MODULES
 	help
 	  Kprobes allows you to trap at almost any kernel address and
 	  execute a callback function.  register_kprobe() establishes
diff --git a/arch/ia64/hp/common/sba_iommu.c b/arch/ia64/hp/common/sba_iommu.c
index c1dca22..cd4adf5 100644
--- a/arch/ia64/hp/common/sba_iommu.c
+++ b/arch/ia64/hp/common/sba_iommu.c
@@ -34,6 +34,7 @@
 #include <linux/efi.h>
 #include <linux/nodemask.h>
 #include <linux/bitops.h>         /* hweight64() */
+#include <linux/crash_dump.h>
 
 #include <asm/delay.h>		/* ia64_get_itc() */
 #include <asm/io.h>
@@ -43,6 +44,8 @@
 
 #include <asm/acpi-ext.h>
 
+extern int swiotlb_late_init_with_default_size (size_t size);
+
 #define PFX "IOC: "
 
 /*
@@ -2026,11 +2029,24 @@
 	if (!ia64_platform_is("hpzx1") && !ia64_platform_is("hpzx1_swiotlb"))
 		return 0;
 
+#if defined(CONFIG_IA64_GENERIC) && defined(CONFIG_CRASH_DUMP)
+	/* If we are booting a kdump kernel, the sba_iommu will
+	 * cause devices that were not shutdown properly to MCA
+	 * as soon as they are turned back on.  Our only option for
+	 * a successful kdump kernel boot is to use the swiotlb.
+	 */
+	if (elfcorehdr_addr < ELFCORE_ADDR_MAX) {
+		if (swiotlb_late_init_with_default_size(64 * (1<<20)) != 0)
+			panic("Unable to initialize software I/O TLB:"
+				  " Try machvec=dig boot option");
+		machvec_init("dig");
+		return 0;
+	}
+#endif
+
 	acpi_bus_register_driver(&acpi_sba_ioc_driver);
 	if (!ioc_list) {
 #ifdef CONFIG_IA64_GENERIC
-		extern int swiotlb_late_init_with_default_size (size_t size);
-
 		/*
 		 * If we didn't find something sba_iommu can claim, we
 		 * need to setup the swiotlb and switch to the dig machvec.
diff --git a/arch/ia64/hp/sim/boot/fw-emu.c b/arch/ia64/hp/sim/boot/fw-emu.c
index 300acd9..1189d03 100644
--- a/arch/ia64/hp/sim/boot/fw-emu.c
+++ b/arch/ia64/hp/sim/boot/fw-emu.c
@@ -329,11 +329,6 @@
 	strcpy(sal_systab->product_id, "HP-simulator");
 #endif
 
-#ifdef CONFIG_IA64_SDV
-	strcpy(sal_systab->oem_id, "Intel");
-	strcpy(sal_systab->product_id, "SDV");
-#endif
-
 	/* fill in an entry point: */
 	sal_ed->type = SAL_DESC_ENTRY_POINT;
 	sal_ed->pal_proc = __pa(pal_desc[0]);
diff --git a/arch/ia64/hp/sim/simserial.c b/arch/ia64/hp/sim/simserial.c
index 324ea75..ef252df 100644
--- a/arch/ia64/hp/sim/simserial.c
+++ b/arch/ia64/hp/sim/simserial.c
@@ -36,10 +36,6 @@
 #include <asm/hw_irq.h>
 #include <asm/uaccess.h>
 
-#ifdef CONFIG_KDB
-# include <linux/kdb.h>
-#endif
-
 #undef SIMSERIAL_DEBUG	/* define this to get some debug information */
 
 #define KEYBOARD_INTR	3	/* must match with simulator! */
diff --git a/arch/ia64/ia32/binfmt_elf32.c b/arch/ia64/ia32/binfmt_elf32.c
index 6f4d3d0..e1189ba 100644
--- a/arch/ia64/ia32/binfmt_elf32.c
+++ b/arch/ia64/ia32/binfmt_elf32.c
@@ -195,62 +195,27 @@
 	ia32_load_state(current);
 }
 
+/*
+ * Undo the override of setup_arg_pages() without this ia32_setup_arg_pages()
+ * will suffer infinite self recursion.
+ */
+#undef setup_arg_pages
+
 int
 ia32_setup_arg_pages (struct linux_binprm *bprm, int executable_stack)
 {
-	unsigned long stack_base;
-	struct vm_area_struct *mpnt;
-	struct mm_struct *mm = current->mm;
-	int i, ret;
+	int ret;
 
-	stack_base = IA32_STACK_TOP - MAX_ARG_PAGES*PAGE_SIZE;
-	mm->arg_start = bprm->p + stack_base;
-
-	bprm->p += stack_base;
-	if (bprm->loader)
-		bprm->loader += stack_base;
-	bprm->exec += stack_base;
-
-	mpnt = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
-	if (!mpnt)
-		return -ENOMEM;
-
-	down_write(&current->mm->mmap_sem);
-	{
-		mpnt->vm_mm = current->mm;
-		mpnt->vm_start = PAGE_MASK & (unsigned long) bprm->p;
-		mpnt->vm_end = IA32_STACK_TOP;
-		if (executable_stack == EXSTACK_ENABLE_X)
-			mpnt->vm_flags = VM_STACK_FLAGS |  VM_EXEC;
-		else if (executable_stack == EXSTACK_DISABLE_X)
-			mpnt->vm_flags = VM_STACK_FLAGS & ~VM_EXEC;
-		else
-			mpnt->vm_flags = VM_STACK_FLAGS;
-		mpnt->vm_page_prot = (mpnt->vm_flags & VM_EXEC)?
-					PAGE_COPY_EXEC: PAGE_COPY;
-		if ((ret = insert_vm_struct(current->mm, mpnt))) {
-			up_write(&current->mm->mmap_sem);
-			kmem_cache_free(vm_area_cachep, mpnt);
-			return ret;
-		}
-		current->mm->stack_vm = current->mm->total_vm = vma_pages(mpnt);
+	ret = setup_arg_pages(bprm, IA32_STACK_TOP, executable_stack);
+	if (!ret) {
+		/*
+		 * Can't do it in ia64_elf32_init(). Needs to be done before
+		 * calls to elf32_map()
+		 */
+		current->thread.ppl = ia32_init_pp_list();
 	}
 
-	for (i = 0 ; i < MAX_ARG_PAGES ; i++) {
-		struct page *page = bprm->page[i];
-		if (page) {
-			bprm->page[i] = NULL;
-			install_arg_page(mpnt, page, stack_base);
-		}
-		stack_base += PAGE_SIZE;
-	}
-	up_write(&current->mm->mmap_sem);
-
-	/* Can't do it in ia64_elf32_init(). Needs to be done before calls to
-	   elf32_map() */
-	current->thread.ppl = ia32_init_pp_list();
-
-	return 0;
+	return ret;
 }
 
 static void
diff --git a/arch/ia64/kernel/efi.c b/arch/ia64/kernel/efi.c
index 75ec347..73ca86d 100644
--- a/arch/ia64/kernel/efi.c
+++ b/arch/ia64/kernel/efi.c
@@ -28,6 +28,7 @@
 #include <linux/time.h>
 #include <linux/efi.h>
 #include <linux/kexec.h>
+#include <linux/mm.h>
 
 #include <asm/io.h>
 #include <asm/kregs.h>
diff --git a/arch/ia64/kernel/fsys.S b/arch/ia64/kernel/fsys.S
index 8589e84..3f926c2 100644
--- a/arch/ia64/kernel/fsys.S
+++ b/arch/ia64/kernel/fsys.S
@@ -247,6 +247,9 @@
 .time_redo:
 	.pred.rel.mutex p8,p9,p10
 	ld4.acq r28 = [r29]	// xtime_lock.sequence. Must come first for locking purposes
+	;;
+	and r28 = ~1,r28	// Make sequence even to force retry if odd
+	;;
 (p8)	mov r2 = ar.itc		// CPU_TIMER. 36 clocks latency!!!
 	add r22 = IA64_TIME_INTERPOLATOR_LAST_COUNTER_OFFSET,r20
 (p9)	ld8 r2 = [r30]		// readq(ti->address). Could also have latency issues..
@@ -284,7 +287,6 @@
 (p15)	ld8 r17 = [r19],-IA64_TIMESPEC_TV_NSEC_OFFSET
 (p7)	cmp.ne p7,p0 = r25,r3	// if cmpxchg not successful redo
 	// simulate tbit.nz.or p7,p0 = r28,0
-	and r28 = ~1,r28	// Make sequence even to force retry if odd
 	getf.sig r2 = f8
 	mf
 	add r8 = r8,r18		// Add time interpolator offset
diff --git a/arch/ia64/kernel/kprobes.c b/arch/ia64/kernel/kprobes.c
index 5bc46f1..5dc98b5 100644
--- a/arch/ia64/kernel/kprobes.c
+++ b/arch/ia64/kernel/kprobes.c
@@ -936,10 +936,15 @@
 	return;
 }
 
+unsigned long arch_deref_entry_point(void *entry)
+{
+	return ((struct fnptr *)entry)->ip;
+}
+
 int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
 {
 	struct jprobe *jp = container_of(p, struct jprobe, kp);
-	unsigned long addr = ((struct fnptr *)(jp->entry))->ip;
+	unsigned long addr = arch_deref_entry_point(jp->entry);
 	struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
 	struct param_bsp_cfm pa;
 	int bytes;
diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c
index 4d9864c..cf06fe79 100644
--- a/arch/ia64/kernel/setup.c
+++ b/arch/ia64/kernel/setup.c
@@ -980,15 +980,6 @@
 	pm_idle = default_idle;
 }
 
-/*
- * On SMP systems, when the scheduler does migration-cost autodetection,
- * it needs a way to flush as much of the CPU's caches as possible.
- */
-void sched_cacheflush(void)
-{
-	ia64_sal_cache_flush(3);
-}
-
 void __init
 check_bugs (void)
 {
diff --git a/arch/ia64/kernel/smp.c b/arch/ia64/kernel/smp.c
index b3a47f9..9f72838 100644
--- a/arch/ia64/kernel/smp.c
+++ b/arch/ia64/kernel/smp.c
@@ -82,7 +82,7 @@
 #define IPI_KDUMP_CPU_STOP	3
 
 /* This needs to be cacheline aligned because it is written to by *other* CPUs.  */
-static DEFINE_PER_CPU(u64, ipi_operation) ____cacheline_aligned;
+static DEFINE_PER_CPU_SHARED_ALIGNED(u64, ipi_operation);
 
 extern void cpu_halt (void);
 
diff --git a/arch/ia64/kernel/traps.c b/arch/ia64/kernel/traps.c
index 15ad85d..3aeaf15 100644
--- a/arch/ia64/kernel/traps.c
+++ b/arch/ia64/kernel/traps.c
@@ -69,6 +69,7 @@
 
 	bust_spinlocks(0);
 	die.lock_owner = -1;
+	add_taint(TAINT_DIE);
 	spin_unlock_irq(&die.lock);
 
 	if (panic_on_oops)
diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
index 5a65965..860f251 100644
--- a/arch/ia64/kernel/vmlinux.lds.S
+++ b/arch/ia64/kernel/vmlinux.lds.S
@@ -206,6 +206,7 @@
 	{
 		__per_cpu_start = .;
 		*(.data.percpu)
+		*(.data.percpu.shared_aligned)
 		__per_cpu_end = .;
 	}
   . = __phys_per_cpu_start + PERCPU_PAGE_SIZE;	/* ensure percpu data fits
diff --git a/arch/ia64/lib/checksum.c b/arch/ia64/lib/checksum.c
index 4411d9b..9fc9550 100644
--- a/arch/ia64/lib/checksum.c
+++ b/arch/ia64/lib/checksum.c
@@ -60,6 +60,7 @@
 	result = (result & 0xffffffff) + (result >> 32);
 	return (__force __wsum)result;
 }
+EXPORT_SYMBOL(csum_tcpudp_nofold);
 
 extern unsigned long do_csum (const unsigned char *, long);
 
diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
index b87f785..73ccb60 100644
--- a/arch/ia64/mm/fault.c
+++ b/arch/ia64/mm/fault.c
@@ -80,6 +80,7 @@
 	struct mm_struct *mm = current->mm;
 	struct siginfo si;
 	unsigned long mask;
+	int fault;
 
 	/* mmap_sem is performance critical.... */
 	prefetchw(&mm->mmap_sem);
@@ -147,26 +148,25 @@
 	 * sure we exit gracefully rather than endlessly redo the
 	 * fault.
 	 */
-	switch (handle_mm_fault(mm, vma, address, (mask & VM_WRITE) != 0)) {
-	      case VM_FAULT_MINOR:
-		++current->min_flt;
-		break;
-	      case VM_FAULT_MAJOR:
-		++current->maj_flt;
-		break;
-	      case VM_FAULT_SIGBUS:
+	fault = handle_mm_fault(mm, vma, address, (mask & VM_WRITE) != 0);
+	if (unlikely(fault & VM_FAULT_ERROR)) {
 		/*
 		 * We ran out of memory, or some other thing happened
 		 * to us that made us unable to handle the page fault
 		 * gracefully.
 		 */
-		signal = SIGBUS;
-		goto bad_area;
-	      case VM_FAULT_OOM:
-		goto out_of_memory;
-	      default:
+		if (fault & VM_FAULT_OOM) {
+			goto out_of_memory;
+		} else if (fault & VM_FAULT_SIGBUS) {
+			signal = SIGBUS;
+			goto bad_area;
+		}
 		BUG();
 	}
+	if (fault & VM_FAULT_MAJOR)
+		current->maj_flt++;
+	else
+		current->min_flt++;
 	up_read(&mm->mmap_sem);
 	return;
 
diff --git a/arch/ia64/sn/kernel/sn2/sn_hwperf.c b/arch/ia64/sn/kernel/sn2/sn_hwperf.c
index 6da9854..df8d5be 100644
--- a/arch/ia64/sn/kernel/sn2/sn_hwperf.c
+++ b/arch/ia64/sn/kernel/sn2/sn_hwperf.c
@@ -750,9 +750,10 @@
 			goto error;
 		} else
 		if ((r = sn_hwperf_enum_objects(&nobj, &objs)) == 0) {
+			int cpuobj_index = 0;
+
 			memset(p, 0, a.sz);
 			for (i = 0; i < nobj; i++) {
-				int cpuobj_index = 0;
 				if (!SN_HWPERF_IS_NODE(objs + i))
 					continue;
 				node = sn_hwperf_obj_to_cnode(objs + i);
diff --git a/arch/m32r/kernel/ptrace.c b/arch/m32r/kernel/ptrace.c
index 5f02b31..57a92ef 100644
--- a/arch/m32r/kernel/ptrace.c
+++ b/arch/m32r/kernel/ptrace.c
@@ -595,7 +595,6 @@
 static int
 do_ptrace(long request, struct task_struct *child, long addr, long data)
 {
-	unsigned long tmp;
 	int ret;
 
 	switch (request) {
@@ -604,11 +603,7 @@
 	 */
 	case PTRACE_PEEKTEXT:
 	case PTRACE_PEEKDATA:
-		ret = access_process_vm(child, addr, &tmp, sizeof(tmp), 0);
-		if (ret == sizeof(tmp))
-			ret = put_user(tmp,(unsigned long __user *) data);
-		else
-			ret = -EIO;
+		ret = generic_ptrace_peekdata(child, addr, data);
 		break;
 
 	/*
@@ -624,15 +619,9 @@
 	 */
 	case PTRACE_POKETEXT:
 	case PTRACE_POKEDATA:
-		ret = access_process_vm(child, addr, &data, sizeof(data), 1);
-		if (ret == sizeof(data)) {
-			ret = 0;
-			if (request == PTRACE_POKETEXT) {
-				invalidate_cache();
-			}
-		} else {
-			ret = -EIO;
-		}
+		ret = generic_ptrace_pokedata(child, addr, data);
+		if (ret == 0 && request == PTRACE_POKETEXT)
+			invalidate_cache();
 		break;
 
 	/*
diff --git a/arch/m32r/kernel/vmlinux.lds.S b/arch/m32r/kernel/vmlinux.lds.S
index 4e2d5b9..942a8c7 100644
--- a/arch/m32r/kernel/vmlinux.lds.S
+++ b/arch/m32r/kernel/vmlinux.lds.S
@@ -110,10 +110,7 @@
   __initramfs_end = .;
 #endif
 
-  . = ALIGN(4096);
-  __per_cpu_start = .;
-  .data.percpu  : { *(.data.percpu) }
-  __per_cpu_end = .;
+  PERCPU(4096)
   . = ALIGN(4096);
   __init_end = .;
   /* freed after init ends here */
diff --git a/arch/m32r/m32104ut/defconfig.m32104ut b/arch/m32r/m32104ut/defconfig.m32104ut
index 7b68fe8..1f88f49 100644
--- a/arch/m32r/m32104ut/defconfig.m32104ut
+++ b/arch/m32r/m32104ut/defconfig.m32104ut
@@ -699,7 +699,6 @@
 # I2C Hardware Bus support
 #
 CONFIG_I2C_ELEKTOR=m
-CONFIG_I2C_ISA=m
 # CONFIG_I2C_OCORES is not set
 # CONFIG_I2C_PARPORT is not set
 # CONFIG_I2C_PARPORT_LIGHT is not set
diff --git a/arch/m32r/mm/fault.c b/arch/m32r/mm/fault.c
index f3935ba..676a1c4 100644
--- a/arch/m32r/mm/fault.c
+++ b/arch/m32r/mm/fault.c
@@ -80,6 +80,7 @@
 	struct vm_area_struct * vma;
 	unsigned long page, addr;
 	int write;
+	int fault;
 	siginfo_t info;
 
 	/*
@@ -195,20 +196,18 @@
 	 */
 	addr = (address & PAGE_MASK);
 	set_thread_fault_code(error_code);
-	switch (handle_mm_fault(mm, vma, addr, write)) {
-		case VM_FAULT_MINOR:
-			tsk->min_flt++;
-			break;
-		case VM_FAULT_MAJOR:
-			tsk->maj_flt++;
-			break;
-		case VM_FAULT_SIGBUS:
-			goto do_sigbus;
-		case VM_FAULT_OOM:
+	fault = handle_mm_fault(mm, vma, addr, write);
+	if (unlikely(fault & VM_FAULT_ERROR)) {
+		if (fault & VM_FAULT_OOM)
 			goto out_of_memory;
-		default:
-			BUG();
+		else if (fault & VM_FAULT_SIGBUS)
+			goto do_sigbus;
+		BUG();
 	}
+	if (fault & VM_FAULT_MAJOR)
+		tsk->maj_flt++;
+	else
+		tsk->min_flt++;
 	set_thread_fault_code(0);
 	up_read(&mm->mmap_sem);
 	return;
diff --git a/arch/m68k/kernel/ptrace.c b/arch/m68k/kernel/ptrace.c
index cdba9fd..2cf0690 100644
--- a/arch/m68k/kernel/ptrace.c
+++ b/arch/m68k/kernel/ptrace.c
@@ -128,10 +128,7 @@
 	/* when I and D space are separate, these will need to be fixed. */
 	case PTRACE_PEEKTEXT:	/* read word at location addr. */
 	case PTRACE_PEEKDATA:
-		i = access_process_vm(child, addr, &tmp, sizeof(tmp), 0);
-		if (i != sizeof(tmp))
-			goto out_eio;
-		ret = put_user(tmp, (unsigned long *)data);
+		ret = generic_ptrace_peekdata(child, addr, data);
 		break;
 
 	/* read the word at location addr in the USER area. */
@@ -160,8 +157,7 @@
 	/* when I and D space are separate, this will have to be fixed. */
 	case PTRACE_POKETEXT:	/* write the word at location addr. */
 	case PTRACE_POKEDATA:
-		if (access_process_vm(child, addr, &data, sizeof(data), 1) != sizeof(data))
-			goto out_eio;
+		ret = generic_ptrace_pokedata(child, addr, data);
 		break;
 
 	case PTRACE_POKEUSR:	/* write the word at location addr in the USER area */
diff --git a/arch/m68k/kernel/traps.c b/arch/m68k/kernel/traps.c
index a27a4fa..4e2752a 100644
--- a/arch/m68k/kernel/traps.c
+++ b/arch/m68k/kernel/traps.c
@@ -1170,6 +1170,7 @@
 	console_verbose();
 	printk("%s: %08x\n",str,nr);
 	show_registers(fp);
+	add_taint(TAINT_DIE);
 	do_exit(SIGSEGV);
 }
 
diff --git a/arch/m68k/lib/checksum.c b/arch/m68k/lib/checksum.c
index cf6bb51..6216f12 100644
--- a/arch/m68k/lib/checksum.c
+++ b/arch/m68k/lib/checksum.c
@@ -422,3 +422,4 @@
 	    );
     return(sum);
 }
+EXPORT_SYMBOL(csum_partial_copy_nocheck);
diff --git a/arch/m68k/mm/fault.c b/arch/m68k/mm/fault.c
index 2adbeb1..578b48f 100644
--- a/arch/m68k/mm/fault.c
+++ b/arch/m68k/mm/fault.c
@@ -159,18 +159,17 @@
 #ifdef DEBUG
 	printk("handle_mm_fault returns %d\n",fault);
 #endif
-	switch (fault) {
-	case VM_FAULT_MINOR:
-		current->min_flt++;
-		break;
-	case VM_FAULT_MAJOR:
-		current->maj_flt++;
-		break;
-	case VM_FAULT_SIGBUS:
-		goto bus_err;
-	default:
-		goto out_of_memory;
+	if (unlikely(fault & VM_FAULT_ERROR)) {
+		if (fault & VM_FAULT_OOM)
+			goto out_of_memory;
+		else if (fault & VM_FAULT_SIGBUS)
+			goto bus_err;
+		BUG();
 	}
+	if (fault & VM_FAULT_MAJOR)
+		current->maj_flt++;
+	else
+		current->min_flt++;
 
 	up_read(&mm->mmap_sem);
 	return 0;
diff --git a/arch/m68knommu/Kconfig b/arch/m68knommu/Kconfig
index adc64a2..1175cef 100644
--- a/arch/m68knommu/Kconfig
+++ b/arch/m68knommu/Kconfig
@@ -45,6 +45,10 @@
 	bool
 	default y
 
+config GENERIC_HARDIRQS
+	bool
+	default y
+
 config GENERIC_CALIBRATE_DELAY
 	bool
 	default y
diff --git a/arch/m68knommu/kernel/Makefile b/arch/m68knommu/kernel/Makefile
index 1c6cd1a..1524b39 100644
--- a/arch/m68knommu/kernel/Makefile
+++ b/arch/m68knommu/kernel/Makefile
@@ -4,8 +4,8 @@
 
 extra-y := vmlinux.lds
 
-obj-y += dma.o entry.o init_task.o m68k_ksyms.o process.o ptrace.o semaphore.o \
-	 setup.o signal.o syscalltable.o sys_m68k.o time.o traps.o
+obj-y += dma.o entry.o init_task.o irq.o m68k_ksyms.o process.o ptrace.o \
+	 semaphore.o setup.o signal.o syscalltable.o sys_m68k.o time.o traps.o
 
 obj-$(CONFIG_MODULES)	+= module.o
 obj-$(CONFIG_COMEMPCI)	+= comempci.o
diff --git a/arch/m68knommu/kernel/asm-offsets.c b/arch/m68knommu/kernel/asm-offsets.c
index 7cd183d..d97b89b 100644
--- a/arch/m68knommu/kernel/asm-offsets.c
+++ b/arch/m68knommu/kernel/asm-offsets.c
@@ -15,7 +15,6 @@
 #include <linux/hardirq.h>
 #include <asm/bootinfo.h>
 #include <asm/irq.h>
-#include <asm/irqnode.h>
 #include <asm/thread_info.h>
 
 #define DEFINE(sym, val) \
@@ -72,10 +71,6 @@
 #else
 	/* bitfields are a bit difficult */
 	DEFINE(PT_VECTOR, offsetof(struct pt_regs, pc) + 4);
-	/* offsets into the irq_handler struct */
-	DEFINE(IRQ_HANDLER, offsetof(struct irq_node, handler));
-	DEFINE(IRQ_DEVID, offsetof(struct irq_node, dev_id));
-	DEFINE(IRQ_NEXT, offsetof(struct irq_node, next));
 #endif
 
 	/* offsets into the kernel_stat struct */
diff --git a/arch/m68knommu/kernel/irq.c b/arch/m68knommu/kernel/irq.c
new file mode 100644
index 0000000..bba1bb4
--- /dev/null
+++ b/arch/m68knommu/kernel/irq.c
@@ -0,0 +1,82 @@
+/*
+ * irq.c
+ *
+ * (C) Copyright 2007, Greg Ungerer <gerg@snapgear.com>
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file COPYING in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/kernel_stat.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/seq_file.h>
+#include <asm/system.h>
+#include <asm/traps.h>
+
+asmlinkage void do_IRQ(int irq, struct pt_regs *regs)
+{
+	struct pt_regs *oldregs = set_irq_regs(regs);
+
+	irq_enter();
+	__do_IRQ(irq);
+	irq_exit();
+
+	set_irq_regs(oldregs);
+}
+
+void ack_bad_irq(unsigned int irq)
+{
+	printk(KERN_ERR "IRQ: unexpected irq=%d\n", irq);
+}
+
+static struct irq_chip m_irq_chip = {
+	.name		= "M68K-INTC",
+	.enable		= enable_vector,
+	.disable	= disable_vector,
+	.ack		= ack_vector,
+};
+
+void __init init_IRQ(void)
+{
+	int irq;
+
+	init_vectors();
+
+	for (irq = 0; (irq < NR_IRQS); irq++) {
+		irq_desc[irq].status = IRQ_DISABLED;
+		irq_desc[irq].action = NULL;
+		irq_desc[irq].depth = 1;
+		irq_desc[irq].chip = &m_irq_chip;
+	}
+}
+
+int show_interrupts(struct seq_file *p, void *v)
+{
+	struct irqaction *ap;
+	int irq = *((loff_t *) v);
+
+	if (irq == 0)
+		seq_puts(p, "           CPU0\n");
+
+	if (irq < NR_IRQS) {
+		ap = irq_desc[irq].action;
+		if (ap) {
+			seq_printf(p, "%3d: ", irq);
+			seq_printf(p, "%10u ", kstat_irqs(irq));
+			seq_printf(p, "%14s  ", irq_desc[irq].chip->name);
+
+			seq_printf(p, "%s", ap->name);
+			for (ap = ap->next; ap; ap = ap->next)
+				seq_printf(p, ", %s", ap->name);
+			seq_putc(p, '\n');
+		}
+	}
+
+	return 0;
+}
+
diff --git a/arch/m68knommu/kernel/m68k_ksyms.c b/arch/m68knommu/kernel/m68k_ksyms.c
index 25327c9..f795062 100644
--- a/arch/m68knommu/kernel/m68k_ksyms.c
+++ b/arch/m68knommu/kernel/m68k_ksyms.c
@@ -81,8 +81,6 @@
 EXPORT_SYMBOL(__udivsi3);
 EXPORT_SYMBOL(__umodsi3);
 
-EXPORT_SYMBOL(is_in_rom);
-
 #ifdef CONFIG_COLDFIRE
 extern unsigned int *dma_device_address;
 extern unsigned long dma_base_addr, _ramend;
diff --git a/arch/m68knommu/kernel/ptrace.c b/arch/m68knommu/kernel/ptrace.c
index f54b6a3..ef70ca0 100644
--- a/arch/m68knommu/kernel/ptrace.c
+++ b/arch/m68knommu/kernel/ptrace.c
@@ -106,17 +106,9 @@
 	switch (request) {
 		/* when I and D space are separate, these will need to be fixed. */
 		case PTRACE_PEEKTEXT: /* read word at location addr. */ 
-		case PTRACE_PEEKDATA: {
-			unsigned long tmp;
-			int copied;
-
-			copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0);
-			ret = -EIO;
-			if (copied != sizeof(tmp))
-				break;
-			ret = put_user(tmp,(unsigned long *) data);
+		case PTRACE_PEEKDATA:
+			ret = generic_ptrace_peekdata(child, addr, data);
 			break;
-		}
 
 		/* read the word at location addr in the USER area. */
 		case PTRACE_PEEKUSR: {
@@ -159,10 +151,7 @@
 		/* when I and D space are separate, this will have to be fixed. */
 		case PTRACE_POKETEXT: /* write the word at location addr. */
 		case PTRACE_POKEDATA:
-			ret = 0;
-			if (access_process_vm(child, addr, &data, sizeof(data), 1) == sizeof(data))
-				break;
-			ret = -EIO;
+			ret = generic_ptrace_pokedata(child, addr, data);
 			break;
 
 		case PTRACE_POKEUSR: /* write the word at location addr in the USER area */
diff --git a/arch/m68knommu/kernel/traps.c b/arch/m68knommu/kernel/traps.c
index bed5f47..437a061 100644
--- a/arch/m68knommu/kernel/traps.c
+++ b/arch/m68knommu/kernel/traps.c
@@ -62,8 +62,6 @@
 
 void __init trap_init(void)
 {
-	if (mach_trap_init)
-		mach_trap_init();
 }
 
 void die_if_kernel(char *str, struct pt_regs *fp, int nr)
@@ -82,7 +80,8 @@
 
 	printk(KERN_EMERG "Process %s (pid: %d, stackpage=%08lx)\n",
 		current->comm, current->pid, PAGE_SIZE+(unsigned long)current);
-	show_stack(NULL, (unsigned long *)fp);
+	show_stack(NULL, (unsigned long *)(fp + 1));
+	add_taint(TAINT_DIE);
 	do_exit(SIGSEGV);
 }
 
diff --git a/arch/m68knommu/mm/memory.c b/arch/m68knommu/mm/memory.c
index 1a66b71..f93b88b 100644
--- a/arch/m68knommu/mm/memory.c
+++ b/arch/m68knommu/mm/memory.c
@@ -33,23 +33,3 @@
 	return paddr;
 }
 
-
-int is_in_rom(unsigned long addr)
-{
-	extern unsigned long _ramstart, _ramend;
-
-	/*
-	 *	What we are really trying to do is determine if addr is
-	 *	in an allocated kernel memory region. If not then assume
-	 *	we cannot free it or otherwise de-allocate it. Ideally
-	 *	we could restrict this to really being in a ROM or flash,
-	 *	but that would need to be done on a board by board basis,
-	 *	not globally.
-	 */
-	if ((addr < _ramstart) || (addr >= _ramend))
-		return(1);
-
-	/* Default case, not in ROM */
-	return(0);
-}
-
diff --git a/arch/m68knommu/platform/5307/Makefile b/arch/m68knommu/platform/5307/Makefile
index 2fd37dcc..719a313 100644
--- a/arch/m68knommu/platform/5307/Makefile
+++ b/arch/m68knommu/platform/5307/Makefile
@@ -16,7 +16,7 @@
 AFLAGS += -DDEBUGGER_COMPATIBLE_CACHE=1
 endif
 
-obj-$(CONFIG_COLDFIRE)	+= entry.o vectors.o ints.o
+obj-$(CONFIG_COLDFIRE)	+= entry.o vectors.o
 obj-$(CONFIG_M5206)	+= timers.o
 obj-$(CONFIG_M5206e)	+= timers.o
 obj-$(CONFIG_M520x)	+= pit.o
diff --git a/arch/m68knommu/platform/5307/entry.S b/arch/m68knommu/platform/5307/entry.S
index f0dba84..c358aeb 100644
--- a/arch/m68knommu/platform/5307/entry.S
+++ b/arch/m68knommu/platform/5307/entry.S
@@ -1,7 +1,7 @@
 /*
  *  linux/arch/m68knommu/platform/5307/entry.S
  *
- *  Copyright (C) 1999-2002, Greg Ungerer (gerg@snapgear.com)
+ *  Copyright (C) 1999-2007, Greg Ungerer (gerg@snapgear.com)
  *  Copyright (C) 1998  D. Jeff Dionne <jeff@lineo.ca>,
  *                      Kenneth Albanowski <kjahds@kjahds.com>,
  *  Copyright (C) 2000  Lineo Inc. (www.lineo.com)
@@ -155,34 +155,21 @@
 
 /*
  * This is the generic interrupt handler (for all hardware interrupt
- * sources). It figures out the vector number and calls the appropriate
- * interrupt service routine directly.
+ * sources). Calls upto high level code to do all the work.
  */
 ENTRY(inthandler)
 	SAVE_ALL
 	moveq	#-1,%d0
 	movel	%d0,%sp@(PT_ORIG_D0)
-	addql	#1,local_irq_count
 
 	movew	%sp@(PT_FORMATVEC),%d0	/* put exception # in d0 */
 	andl	#0x03fc,%d0		/* mask out vector only */
 
-	leal	per_cpu__kstat+STAT_IRQ,%a0
-	addql	#1,%a0@(%d0)
-
+	movel	%sp,%sp@-		/* push regs arg */
 	lsrl	#2,%d0			/* calculate real vector # */
-	movel	%d0,%d1			/* calculate array offset */
-	lsll	#4,%d1
-	lea	irq_list,%a0
-	addl	%d1,%a0			/* pointer to array struct */
-
-	movel	%sp,%sp@-		/* push regs arg onto stack */
-	movel	%a0@(8),%sp@-		/* push devid arg */
-	movel	%d0,%sp@-		/* push vector # on stack */
-
-	movel	%a0@,%a0		/* get function to call */
-	jbsr	%a0@			/* call vector handler */
-	lea	%sp@(12),%sp		/* pop parameters off stack */
+	movel	%d0,%sp@-		/* push vector number */
+	jbsr	do_IRQ			/* call high level irq handler */
+	lea	%sp@(8),%sp		/* pop args off stack */
 
 	bra	ret_from_interrupt	/* this was fallthrough */
 
@@ -198,24 +185,15 @@
 	movew	%sp@(PT_FORMATVEC),%d0
 	andl	#0x03fc,%d0		/* mask out vector only */
 
-	leal	per_cpu__kstat+STAT_IRQ,%a0
-	addql	#1,%a0@(%d0)
-
-	movel	%sp,%sp@-		/* push regs arg onto stack */
-	clrl	%sp@-			/* push devid arg */
+	movel	%sp,%sp@-		/* push regs arg */
 	lsrl	#2,%d0			/* calculate real vector # */
-	movel	%d0,%sp@-		/* push vector # on stack */
-
-	lsll	#4,%d0			/* adjust for array offset */
-	lea	irq_list,%a0
-	movel	%a0@(%d0),%a0		/* get function to call */
-	jbsr	%a0@			/* call vector handler */
-	lea	%sp@(12),%sp		/* pop parameters off stack */
+	movel	%d0,%sp@-		/* push vector number */
+	jbsr	do_IRQ			/* call high level irq handler */
+	lea	%sp@(8),%sp		/* pop args off stack */
 
 	RESTORE_LOCAL
 
 ENTRY(ret_from_interrupt)
-	subql	#1,local_irq_count
 	jeq	2f
 1:
 	RESTORE_ALL
diff --git a/arch/m68knommu/platform/5307/ints.c b/arch/m68knommu/platform/5307/ints.c
deleted file mode 100644
index 7516330..0000000
--- a/arch/m68knommu/platform/5307/ints.c
+++ /dev/null
@@ -1,279 +0,0 @@
-/*
- * linux/arch/m68knommu/kernel/ints.c -- General interrupt handling code
- *
- * Copyright (C) 1999-2002  Greg Ungerer (gerg@snapgear.com)
- * Copyright (C) 1998  D. Jeff Dionne <jeff@lineo.ca>,
- *                     Kenneth Albanowski <kjahds@kjahds.com>,
- * Copyright (C) 2000  Lineo Inc. (www.lineo.com) 
- *
- * Based on:
- *
- * linux/arch/m68k/kernel/ints.c -- Linux/m68k general interrupt handling code
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License.  See the file COPYING in the main directory of this archive
- * for more details.
- */
-
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/init.h>
-#include <linux/sched.h>
-#include <linux/interrupt.h>
-#include <linux/kernel_stat.h>
-#include <linux/errno.h>
-#include <linux/seq_file.h>
-
-#include <asm/system.h>
-#include <asm/irq.h>
-#include <asm/irqnode.h>
-#include <asm/traps.h>
-#include <asm/page.h>
-#include <asm/machdep.h>
-
-/*
- *	This table stores the address info for each vector handler.
- */
-struct irq_entry irq_list[SYS_IRQS];
-
-#define NUM_IRQ_NODES 16
-static irq_node_t nodes[NUM_IRQ_NODES];
-
-/* The number of spurious interrupts */
-volatile unsigned int num_spurious;
-
-unsigned int local_irq_count[NR_CPUS];
-
-static irqreturn_t default_irq_handler(int irq, void *ptr)
-{
-#if 1
-	printk(KERN_INFO "%s(%d): default irq handler vec=%d [0x%x]\n",
-		__FILE__, __LINE__, irq, irq);
-#endif
-	return(IRQ_HANDLED);
-}
-
-/*
- * void init_IRQ(void)
- *
- * Parameters:	None
- *
- * Returns:	Nothing
- *
- * This function should be called during kernel startup to initialize
- * the IRQ handling routines.
- */
-
-void __init init_IRQ(void)
-{
-	int i;
-
-	for (i = 0; i < SYS_IRQS; i++) {
-		if (mach_default_handler)
-			irq_list[i].handler = mach_default_handler;
-		else
-			irq_list[i].handler = default_irq_handler;
-		irq_list[i].flags   = IRQ_FLG_STD;
-		irq_list[i].dev_id  = NULL;
-		irq_list[i].devname = NULL;
-	}
-
-	for (i = 0; i < NUM_IRQ_NODES; i++)
-		nodes[i].handler = NULL;
-
-	if (mach_init_IRQ)
-		mach_init_IRQ();
-}
-
-irq_node_t *new_irq_node(void)
-{
-	irq_node_t *node;
-	short i;
-
-	for (node = nodes, i = NUM_IRQ_NODES-1; i >= 0; node++, i--)
-		if (!node->handler)
-			return node;
-
-	printk(KERN_INFO "new_irq_node: out of nodes\n");
-	return NULL;
-}
-
-int request_irq(
-	unsigned int irq,
-	irq_handler_t handler,
-	unsigned long flags,
-	const char *devname,
-	void *dev_id)
-{
-	if (irq < 0 || irq >= NR_IRQS) {
-		printk(KERN_WARNING "%s: Incorrect IRQ %d from %s\n", __FUNCTION__,
-			irq, devname);
-		return -ENXIO;
-	}
-
-	if (!(irq_list[irq].flags & IRQ_FLG_STD)) {
-		if (irq_list[irq].flags & IRQ_FLG_LOCK) {
-			printk(KERN_WARNING "%s: IRQ %d from %s is not replaceable\n",
-			       __FUNCTION__, irq, irq_list[irq].devname);
-			return -EBUSY;
-		}
-		if (flags & IRQ_FLG_REPLACE) {
-			printk(KERN_WARNING "%s: %s can't replace IRQ %d from %s\n",
-			       __FUNCTION__, devname, irq, irq_list[irq].devname);
-			return -EBUSY;
-		}
-	}
-
-	if (flags & IRQ_FLG_FAST) {
-		extern asmlinkage void fasthandler(void);
-		extern void set_evector(int vecnum, void (*handler)(void));
-		set_evector(irq, fasthandler);
-	}
-
-	irq_list[irq].handler = handler;
-	irq_list[irq].flags   = flags;
-	irq_list[irq].dev_id  = dev_id;
-	irq_list[irq].devname = devname;
-	return 0;
-}
-
-EXPORT_SYMBOL(request_irq);
-
-void free_irq(unsigned int irq, void *dev_id)
-{
-	if (irq >= NR_IRQS) {
-		printk(KERN_WARNING "%s: Incorrect IRQ %d\n", __FUNCTION__, irq);
-		return;
-	}
-
-	if (irq_list[irq].dev_id != dev_id)
-		printk(KERN_WARNING "%s: Removing probably wrong IRQ %d from %s\n",
-		       __FUNCTION__, irq, irq_list[irq].devname);
-
-	if (irq_list[irq].flags & IRQ_FLG_FAST) {
-		extern asmlinkage void inthandler(void);
-		extern void set_evector(int vecnum, void (*handler)(void));
-		set_evector(irq, inthandler);
-	}
-
-	if (mach_default_handler)
-		irq_list[irq].handler = mach_default_handler;
-	else
-		irq_list[irq].handler = default_irq_handler;
-	irq_list[irq].flags   = IRQ_FLG_STD;
-	irq_list[irq].dev_id  = NULL;
-	irq_list[irq].devname = NULL;
-}
-
-EXPORT_SYMBOL(free_irq);
-
-
-int sys_request_irq(unsigned int irq, irq_handler_t handler, 
-                    unsigned long flags, const char *devname, void *dev_id)
-{
-	if (irq > IRQ7) {
-		printk(KERN_WARNING "%s: Incorrect IRQ %d from %s\n",
-		       __FUNCTION__, irq, devname);
-		return -ENXIO;
-	}
-
-#if 0
-	if (!(irq_list[irq].flags & IRQ_FLG_STD)) {
-		if (irq_list[irq].flags & IRQ_FLG_LOCK) {
-			printk(KERN_WARNING "%s: IRQ %d from %s is not replaceable\n",
-			       __FUNCTION__, irq, irq_list[irq].devname);
-			return -EBUSY;
-		}
-		if (!(flags & IRQ_FLG_REPLACE)) {
-			printk(KERN_WARNING "%s: %s can't replace IRQ %d from %s\n",
-			       __FUNCTION__, devname, irq, irq_list[irq].devname);
-			return -EBUSY;
-		}
-	}
-#endif
-
-	irq_list[irq].handler = handler;
-	irq_list[irq].flags   = flags;
-	irq_list[irq].dev_id  = dev_id;
-	irq_list[irq].devname = devname;
-	return 0;
-}
-
-void sys_free_irq(unsigned int irq, void *dev_id)
-{
-	if (irq > IRQ7) {
-		printk(KERN_WARNING "%s: Incorrect IRQ %d\n", __FUNCTION__, irq);
-		return;
-	}
-
-	if (irq_list[irq].dev_id != dev_id)
-		printk(KERN_WARNING "%s: Removing probably wrong IRQ %d from %s\n",
-		       __FUNCTION__, irq, irq_list[irq].devname);
-
-	irq_list[irq].handler = mach_default_handler;
-	irq_list[irq].flags   = 0;
-	irq_list[irq].dev_id  = NULL;
-	irq_list[irq].devname = NULL;
-}
-
-/*
- * Do we need these probe functions on the m68k?
- *
- *  ... may be useful with ISA devices
- */
-unsigned long probe_irq_on (void)
-{
-	return 0;
-}
-
-EXPORT_SYMBOL(probe_irq_on);
-
-int probe_irq_off (unsigned long irqs)
-{
-	return 0;
-}
-
-EXPORT_SYMBOL(probe_irq_off);
-
-asmlinkage void process_int(unsigned long vec, struct pt_regs *fp)
-{
-	if (vec >= VEC_INT1 && vec <= VEC_INT7) {
-		vec -= VEC_SPUR;
-		kstat_cpu(0).irqs[vec]++;
-		irq_list[vec].handler(vec, irq_list[vec].dev_id);
-	} else {
-		if (mach_process_int)
-			mach_process_int(vec, fp);
-		else
-			panic("Can't process interrupt vector %ld\n", vec);
-		return;
-	}
-}
-
-
-int show_interrupts(struct seq_file *p, void *v)
-{
-	int i = *(loff_t *) v;
-
-	if (i < NR_IRQS) {
-		if (! (irq_list[i].flags & IRQ_FLG_STD)) {
-			seq_printf(p, "%3d: %10u ", i,
-				(i ? kstat_cpu(0).irqs[i] : num_spurious));
-			if (irq_list[i].flags & IRQ_FLG_LOCK)
-				seq_printf(p, "L ");
-			else
-				seq_printf(p, "  ");
-			seq_printf(p, "%s\n", irq_list[i].devname);
-		}
-	}
-
-	if (i == NR_IRQS && mach_get_irq_list)
-		mach_get_irq_list(p, v);
-	return 0;
-}
-
-void init_irq_proc(void)
-{
-	/* Insert /proc/irq driver here */
-}
-
diff --git a/arch/m68knommu/platform/5307/vectors.c b/arch/m68knommu/platform/5307/vectors.c
index 2a8b0d0..6cf8946 100644
--- a/arch/m68knommu/platform/5307/vectors.c
+++ b/arch/m68knommu/platform/5307/vectors.c
@@ -3,23 +3,17 @@
 /*
  *	linux/arch/m68knommu/platform/5307/vectors.c
  *
- *	Copyright (C) 1999-2003, Greg Ungerer <gerg@snapgear.com>
+ *	Copyright (C) 1999-2007, Greg Ungerer <gerg@snapgear.com>
  */
 
 /***************************************************************************/
 
 #include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/param.h>
 #include <linux/init.h>
-#include <linux/unistd.h>
-#include <linux/delay.h>
-#include <asm/irq.h>
-#include <asm/dma.h>
+#include <linux/irq.h>
 #include <asm/traps.h>
 #include <asm/machdep.h>
 #include <asm/coldfire.h>
-#include <asm/mcftimer.h>
 #include <asm/mcfsim.h>
 #include <asm/mcfdma.h>
 #include <asm/mcfwdebug.h>
@@ -56,7 +50,7 @@
 asmlinkage void system_call(void);
 asmlinkage void inthandler(void);
 
-void __init coldfire_trap_init(void)
+void __init init_vectors(void)
 {
 	int i;
 
@@ -86,6 +80,23 @@
 
 /***************************************************************************/
 
+void enable_vector(unsigned int irq)
+{
+	/* Currently no action on ColdFire */
+}
+
+void disable_vector(unsigned int irq)
+{
+	/* Currently no action on ColdFire */
+}
+
+void ack_vector(unsigned int irq)
+{
+	/* Currently no action on ColdFire */
+}
+
+/***************************************************************************/
+
 void coldfire_reset(void)
 {
 	HARD_RESET_NOW();
diff --git a/arch/m68knommu/platform/68328/entry.S b/arch/m68knommu/platform/68328/entry.S
index f978627..b1aef72 100644
--- a/arch/m68knommu/platform/68328/entry.S
+++ b/arch/m68knommu/platform/68328/entry.S
@@ -133,7 +133,6 @@
  */
 inthandler1:
 	SAVE_ALL
-	addql	#1,local_irq_count	/*  put exception # in d0*/
 	movew	%sp@(PT_VECTOR), %d0
 	and	#0x3ff, %d0
 
@@ -145,7 +144,6 @@
 
 inthandler2:
 	SAVE_ALL
-	addql	#1,local_irq_count	/*  put exception # in d0*/
 	movew	%sp@(PT_VECTOR), %d0
 	and	#0x3ff, %d0
 
@@ -157,7 +155,6 @@
 
 inthandler3:
 	SAVE_ALL
-	addql	#1,local_irq_count	/*  put exception # in d0*/
 	movew	%sp@(PT_VECTOR), %d0
 	and	#0x3ff, %d0
 
@@ -169,7 +166,6 @@
 
 inthandler4:
 	SAVE_ALL
-	addql	#1,local_irq_count	/*  put exception # in d0*/
 	movew	%sp@(PT_VECTOR), %d0
 	and	#0x3ff, %d0
 
@@ -181,7 +177,6 @@
 
 inthandler5:
 	SAVE_ALL
-	addql	#1,local_irq_count	/*  put exception # in d0*/
 	movew	%sp@(PT_VECTOR), %d0
 	and	#0x3ff, %d0
 
@@ -193,7 +188,6 @@
 
 inthandler6:
 	SAVE_ALL
-	addql	#1,local_irq_count	/*  put exception # in d0*/
 	movew	%sp@(PT_VECTOR), %d0
 	and	#0x3ff, %d0
 
@@ -205,7 +199,6 @@
 
 inthandler7:
 	SAVE_ALL
-	addql	#1,local_irq_count	/*  put exception # in d0*/
 	movew	%sp@(PT_VECTOR), %d0
 	and	#0x3ff, %d0
 
@@ -217,7 +210,6 @@
 
 inthandler:
 	SAVE_ALL
-	addql	#1,local_irq_count	/*  put exception # in d0*/
 	movew	%sp@(PT_VECTOR), %d0
 	and	#0x3ff, %d0
 
@@ -228,7 +220,6 @@
 	bra	ret_from_interrupt
 
 ret_from_interrupt:
-	subql	#1,local_irq_count
 	jeq	1f
 2:
 	RESTORE_ALL
@@ -238,7 +229,6 @@
 	jhi	2b
 
 	/* check if we need to do software interrupts */
-	movel	local_irq_count,%d0
 	jeq	ret_from_exception
 
 	pea	ret_from_exception
diff --git a/arch/m68knommu/platform/68328/ints.c b/arch/m68knommu/platform/68328/ints.c
index 3de6e33..72e56d5 100644
--- a/arch/m68knommu/platform/68328/ints.c
+++ b/arch/m68knommu/platform/68328/ints.c
@@ -9,21 +9,14 @@
  * Copyright 1999 D. Jeff Dionne <jeff@rt-control.com>
  */
 
-#include <linux/module.h>
 #include <linux/types.h>
 #include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/kernel_stat.h>
-#include <linux/errno.h>
+#include <linux/init.h>
 #include <linux/interrupt.h>
-
-#include <asm/system.h>
-#include <asm/irq.h>
-#include <asm/irqnode.h>
+#include <linux/irq.h>
 #include <asm/traps.h>
 #include <asm/io.h>
 #include <asm/machdep.h>
-#include <asm/setup.h>
 
 #if defined(CONFIG_M68328)
 #include <asm/MC68328.h>
@@ -79,16 +72,12 @@
 
 /* The number of spurious interrupts */
 volatile unsigned int num_spurious;
-unsigned int local_irq_count[NR_CPUS];
-
-/* irq node variables for the 32 (potential) on chip sources */
-static irq_node_t int_irq_list[NR_IRQS];
 
 /*
  * This function should be called during kernel startup to initialize
- * the IRQ handling routines.
+ * the machine vector table.
  */
-void init_IRQ(void)
+void __init init_vectors(void)
 {
 	int i;
 
@@ -108,96 +97,10 @@
  
 	IVR = 0x40; /* Set DragonBall IVR (interrupt base) to 64 */
 
-	/* initialize handlers */
-	for (i = 0; i < NR_IRQS; i++) {
-		int_irq_list[i].handler = bad_interrupt;
-		int_irq_list[i].flags   = IRQ_FLG_STD;
-		int_irq_list[i].dev_id  = NULL;
-		int_irq_list[i].devname = NULL;
-	}
-
 	/* turn off all interrupts */
 	IMR = ~0;
 }
 
-int request_irq(
-	unsigned int irq,
-	irq_handler_t handler,
-	unsigned long flags,
-	const char *devname,
-	void *dev_id)
-{
-	if (irq >= NR_IRQS) {
-		printk (KERN_ERR "%s: Unknown IRQ %d from %s\n", __FUNCTION__, irq, devname);
-		return -ENXIO;
-	}
-
-	if (!(int_irq_list[irq].flags & IRQ_FLG_STD)) {
-		if (int_irq_list[irq].flags & IRQ_FLG_LOCK) {
-			printk(KERN_ERR "%s: IRQ %d from %s is not replaceable\n",
-			       __FUNCTION__, irq, int_irq_list[irq].devname);
-			return -EBUSY;
-		}
-		if (flags & IRQ_FLG_REPLACE) {
-			printk(KERN_ERR "%s: %s can't replace IRQ %d from %s\n",
-			       __FUNCTION__, devname, irq, int_irq_list[irq].devname);
-			return -EBUSY;
-		}
-	}
-
-	int_irq_list[irq].handler = handler;
-	int_irq_list[irq].flags   = flags;
-	int_irq_list[irq].dev_id  = dev_id;
-	int_irq_list[irq].devname = devname;
-
-	IMR &= ~(1<<irq);
-
-	return 0;
-}
-
-EXPORT_SYMBOL(request_irq);
-
-void free_irq(unsigned int irq, void *dev_id)
-{
-	if (irq >= NR_IRQS) {
-		printk (KERN_ERR "%s: Unknown IRQ %d\n", __FUNCTION__, irq);
-		return;
-	}
-
-	if (int_irq_list[irq].dev_id != dev_id)
-		printk(KERN_INFO "%s: removing probably wrong IRQ %d from %s\n",
-		       __FUNCTION__, irq, int_irq_list[irq].devname);
-
-	int_irq_list[irq].handler = bad_interrupt;
-	int_irq_list[irq].flags   = IRQ_FLG_STD;
-	int_irq_list[irq].dev_id  = NULL;
-	int_irq_list[irq].devname = NULL;
-
-	IMR |= 1<<irq;
-}
-
-EXPORT_SYMBOL(free_irq);
-
-int show_interrupts(struct seq_file *p, void *v)
-{
-	int i = *(loff_t *) v;
-
-	if (i < NR_IRQS) {
-		if (int_irq_list[i].devname) {
-			seq_printf(p, "%3d: %10u ", i, kstat_cpu(0).irqs[i]);
-			if (int_irq_list[i].flags & IRQ_FLG_LOCK)
-				seq_printf(p, "L ");
-			else
-				seq_printf(p, "  ");
-			seq_printf(p, "%s\n", int_irq_list[i].devname);
-		}
-	}
-	if (i == NR_IRQS)
-		seq_printf(p, "   : %10u   spurious\n", num_spurious);
-
-	return 0;
-}
-
 /* The 68k family did not have a good way to determine the source
  * of interrupts until later in the family.  The EC000 core does
  * not provide the vector number on the stack, we vector everything
@@ -255,14 +158,23 @@
 			irq++;
 		}
 
-		kstat_cpu(0).irqs[irq]++;
-
-		if (int_irq_list[irq].handler) {
-			int_irq_list[irq].handler(irq, int_irq_list[irq].dev_id, fp);
-		} else {
-			printk(KERN_ERR "unregistered interrupt %d!\nTurning it off in the IMR...\n", irq);
-			IMR |= mask;
-		}
+		do_IRQ(irq, fp);
 		pend &= ~mask;
 	}
 }
+
+void enable_vector(unsigned int irq)
+{
+	IMR &= ~(1<<irq);
+}
+
+void disable_vector(unsigned int irq)
+{
+	IMR |= (1<<irq);
+}
+
+void ack_vector(unsigned int irq)
+{
+	/* Nothing needed */
+}
+
diff --git a/arch/m68knommu/platform/68360/entry.S b/arch/m68knommu/platform/68360/entry.S
index f1af897..55dfefe 100644
--- a/arch/m68knommu/platform/68360/entry.S
+++ b/arch/m68knommu/platform/68360/entry.S
@@ -120,23 +120,21 @@
 	RESTORE_ALL
 
 /*
- * This is the main interrupt handler, responsible for calling process_int()
+ * This is the main interrupt handler, responsible for calling do_IRQ()
  */
 inthandler:
 	SAVE_ALL
-	addql	#1,local_irq_count	/*  put exception # in d0*/
 	movew	%sp@(PT_VECTOR), %d0
 	and.l	#0x3ff, %d0
 	lsr.l   #0x02,  %d0
 
 	movel	%sp,%sp@-
 	movel	%d0,%sp@- 		/*  put vector # on stack*/
-	jbsr	process_int		/*  process the IRQ*/
+	jbsr	do_IRQ			/*  process the IRQ*/
 3:     	addql	#8,%sp			/*  pop parameters off stack*/
 	bra	ret_from_interrupt
 
 ret_from_interrupt:
-	subql	#1,local_irq_count
 	jeq	1f
 2:
 	RESTORE_ALL
diff --git a/arch/m68knommu/platform/68360/ints.c b/arch/m68knommu/platform/68360/ints.c
index 4df3c14..c367811 100644
--- a/arch/m68knommu/platform/68360/ints.c
+++ b/arch/m68knommu/platform/68360/ints.c
@@ -10,20 +10,13 @@
  * Copyright (c) 1999 D. Jeff Dionne <jeff@uclinux.org>
  */
 
-#include <linux/module.h>
 #include <linux/types.h>
 #include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/kernel_stat.h>
-#include <linux/errno.h>
-
-#include <asm/system.h>
-#include <asm/irq.h>
-#include <asm/irqnode.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
 #include <asm/traps.h>
-#include <asm/io.h>
 #include <asm/machdep.h>
-#include <asm/setup.h>
 #include <asm/m68360.h>
 
 /* from quicc/commproc.c: */
@@ -36,26 +29,19 @@
 asmlinkage void system_call(void);
 asmlinkage void buserr(void);
 asmlinkage void trap(void);
-asmlinkage irqreturn_t bad_interrupt(void);
-asmlinkage irqreturn_t inthandler(void);
+asmlinkage void bad_interrupt(void);
+asmlinkage void inthandler(void);
 
 extern void *_ramvec[];
 
 /* The number of spurious interrupts */
 volatile unsigned int num_spurious;
-unsigned int local_irq_count[NR_CPUS];
-
-/* irq node variables for the 32 (potential) on chip sources */
-static irq_node_t int_irq_list[INTERNAL_IRQS];
-
-static short int_irq_ablecount[INTERNAL_IRQS];
 
 /*
  * This function should be called during kernel startup to initialize
- * IRQ handling routines.
+ * the vector table.
  */
-
-void init_IRQ(void)
+void init_vectors(void)
 {
 	int i;
 	int vba = (CPM_VECTOR_BASE<<4);
@@ -79,7 +65,6 @@
 	_ramvec[32] = system_call;
 	_ramvec[33] = trap;
 
-
 	cpm_interrupt_init();
 
 	/* set up CICR for vector base address and irq level */
@@ -124,212 +109,20 @@
 
 	/* turn off all CPM interrupts */
 	pquicc->intr_cimr = 0x00000000;
-
-	/* initialize handlers */
-	for (i = 0; i < INTERNAL_IRQS; i++) {
-		int_irq_list[i].handler = NULL;
-		int_irq_list[i].flags   = IRQ_FLG_STD;
-		int_irq_list[i].dev_id  = NULL;
-		int_irq_list[i].devname = NULL;
-	}
 }
 
-#if 0
-void M68360_insert_irq(irq_node_t **list, irq_node_t *node)
+void enable_vector(unsigned int irq)
 {
-	unsigned long flags;
-	irq_node_t *cur;
-
-	if (!node->dev_id)
-		printk(KERN_INFO "%s: Warning: dev_id of %s is zero\n",
-		       __FUNCTION__, node->devname);
-
-	local_irq_save(flags);
-
-	cur = *list;
-
-	while (cur) {
-		list = &cur->next;
-		cur = cur->next;
-	}
-
-	node->next = cur;
-	*list = node;
-
-	local_irq_restore(flags);
+	pquicc->intr_cimr |= (1 << irq);
 }
 
-void M68360_delete_irq(irq_node_t **list, void *dev_id)
+void disable_vector(unsigned int irq)
 {
-	unsigned long flags;
-	irq_node_t *node;
-
-	local_irq_save(flags);
-
-	for (node = *list; node; list = &node->next, node = *list) {
-		if (node->dev_id == dev_id) {
-			*list = node->next;
-			/* Mark it as free. */
-			node->handler = NULL;
-			local_irq_restore(flags);
-			return;
-		}
-	}
-	local_irq_restore(flags);
-	printk (KERN_INFO "%s: tried to remove invalid irq\n", __FUNCTION__);
+	pquicc->intr_cimr &= ~(1 << irq);
 }
-#endif
 
-int request_irq(
-	unsigned int irq,
-	irqreturn_t (*handler)(int, void *, struct pt_regs *),
-	unsigned long flags,
-	const char *devname,
-	void *dev_id)
+void ack_vector(unsigned int irq)
 {
-	int mask = (1<<irq);
-
-	irq += (CPM_VECTOR_BASE<<4);
-
-	if (irq >= INTERNAL_IRQS) {
-		printk (KERN_ERR "%s: Unknown IRQ %d from %s\n", __FUNCTION__, irq, devname);
-		return -ENXIO;
-	}
-
-	if (!(int_irq_list[irq].flags & IRQ_FLG_STD)) {
-		if (int_irq_list[irq].flags & IRQ_FLG_LOCK) {
-			printk(KERN_ERR "%s: IRQ %d from %s is not replaceable\n",
-			       __FUNCTION__, irq, int_irq_list[irq].devname);
-			return -EBUSY;
-		}
-		if (flags & IRQ_FLG_REPLACE) {
-			printk(KERN_ERR "%s: %s can't replace IRQ %d from %s\n",
-			       __FUNCTION__, devname, irq, int_irq_list[irq].devname);
-			return -EBUSY;
-		}
-	}
-	int_irq_list[irq].handler = handler;
-	int_irq_list[irq].flags   = flags;
-	int_irq_list[irq].dev_id  = dev_id;
-	int_irq_list[irq].devname = devname;
-
-	/* enable in the CIMR */
-	if (!int_irq_ablecount[irq])
-		pquicc->intr_cimr |= mask;
-	/*      *(volatile unsigned long *)0xfffff304 &= ~(1<<irq); */
-
-	return 0;
+	pquicc->intr_cisr = (1 << irq);
 }
 
-EXPORT_SYMBOL(request_irq);
-
-void free_irq(unsigned int irq, void *dev_id)
-{
-	if (irq >= INTERNAL_IRQS) {
-		printk (KERN_ERR "%s: Unknown IRQ %d\n", __FUNCTION__, irq);
-		return;
-	}
-
-	if (int_irq_list[irq].dev_id != dev_id)
-		printk(KERN_INFO "%s: removing probably wrong IRQ %d from %s\n",
-		       __FUNCTION__, irq, int_irq_list[irq].devname);
-	int_irq_list[irq].handler = NULL;
-	int_irq_list[irq].flags   = IRQ_FLG_STD;
-	int_irq_list[irq].dev_id  = NULL;
-	int_irq_list[irq].devname = NULL;
-
-	*(volatile unsigned long *)0xfffff304 |= 1<<irq;
-}
-
-EXPORT_SYMBOL(free_irq);
-
-#if 0
-/*
- * Enable/disable a particular machine specific interrupt source.
- * Note that this may affect other interrupts in case of a shared interrupt.
- * This function should only be called for a _very_ short time to change some
- * internal data, that may not be changed by the interrupt at the same time.
- * int_(enable|disable)_irq calls may also be nested.
- */
-void M68360_enable_irq(unsigned int irq)
-{
-	if (irq >= INTERNAL_IRQS) {
-		printk(KERN_ERR "%s: Unknown IRQ %d\n", __FUNCTION__, irq);
-		return;
-	}
-
-	if (--int_irq_ablecount[irq])
-		return;
-
-	/* enable the interrupt */
-	*(volatile unsigned long *)0xfffff304 &= ~(1<<irq);
-}
-
-void M68360_disable_irq(unsigned int irq)
-{
-	if (irq >= INTERNAL_IRQS) {
-		printk(KERN_ERR "%s: Unknown IRQ %d\n", __FUNCTION__, irq);
-		return;
-	}
-
-	if (int_irq_ablecount[irq]++)
-		return;
-
-	/* disable the interrupt */
-	*(volatile unsigned long *)0xfffff304 |= 1<<irq;
-}
-#endif
-
-int show_interrupts(struct seq_file *p, void *v)
-{
-	int i = *(loff_t *) v;
-
-	if (i < NR_IRQS) {
-		if (int_irq_list[i].devname) {
-			seq_printf(p, "%3d: %10u ", i, kstat_cpu(0).irqs[i]);
-			if (int_irq_list[i].flags & IRQ_FLG_LOCK)
-				seq_printf(p, "L ");
-			else
-				seq_printf(p, "  ");
-			seq_printf(p, "%s\n", int_irq_list[i].devname);
-		}
-	}
-	if (i == NR_IRQS)
-		seq_printf(p, "   : %10u   spurious\n", num_spurious);
-
-	return 0;
-}
-
-/* The 68k family did not have a good way to determine the source
- * of interrupts until later in the family.  The EC000 core does
- * not provide the vector number on the stack, we vector everything
- * into one vector and look in the blasted mask register...
- * This code is designed to be fast, almost constant time, not clean!
- */
-void process_int(int vec, struct pt_regs *fp)
-{
-	int irq;
-	int mask;
-
-	/* unsigned long pend = *(volatile unsigned long *)0xfffff30c; */
-
-	/* irq = vec + (CPM_VECTOR_BASE<<4); */
-	irq = vec;
-
-	/* unsigned long pend = *(volatile unsigned long *)pquicc->intr_cipr; */
-
-	/* Bugger all that weirdness. For the moment, I seem to know where I came from;
-	 * vec is passed from a specific ISR, so I'll use it. */
-
-	if (int_irq_list[irq].handler) {
-		int_irq_list[irq].handler(irq , int_irq_list[irq].dev_id, fp);
-		kstat_cpu(0).irqs[irq]++;
-		pquicc->intr_cisr = (1 << vec); /* indicate that irq has been serviced */
-	} else {
-		printk(KERN_ERR "unregistered interrupt %d!\nTurning it off in the CIMR...\n", irq);
-		/* *(volatile unsigned long *)0xfffff304 |= mask; */
-		pquicc->intr_cimr &= ~(1 << vec);
-		num_spurious += 1;
-	}
-	return(IRQ_HANDLED);
-}
diff --git a/arch/mips/basler/excite/excite_setup.c b/arch/mips/basler/excite/excite_setup.c
index 2f0e4c0..5600318 100644
--- a/arch/mips/basler/excite/excite_setup.c
+++ b/arch/mips/basler/excite/excite_setup.c
@@ -26,6 +26,7 @@
 #include <linux/tty.h>
 #include <linux/serial_core.h>
 #include <linux/serial.h>
+#include <linux/serial_8250.h>
 #include <linux/ioport.h>
 #include <linux/spinlock.h>
 #include <asm/bootinfo.h>
diff --git a/arch/mips/gt64120/wrppmc/setup.c b/arch/mips/gt64120/wrppmc/setup.c
index ea96552..ed58c13 100644
--- a/arch/mips/gt64120/wrppmc/setup.c
+++ b/arch/mips/gt64120/wrppmc/setup.c
@@ -14,6 +14,7 @@
 #include <linux/tty.h>
 #include <linux/serial.h>
 #include <linux/serial_core.h>
+#include <linux/serial_8250.h>
 #include <linux/pm.h>
 
 #include <asm/io.h>
diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
index b5a7b46..893e7bc 100644
--- a/arch/mips/kernel/ptrace.c
+++ b/arch/mips/kernel/ptrace.c
@@ -174,17 +174,9 @@
 	switch (request) {
 	/* when I and D space are separate, these will need to be fixed. */
 	case PTRACE_PEEKTEXT: /* read word at location addr. */
-	case PTRACE_PEEKDATA: {
-		unsigned long tmp;
-		int copied;
-
-		copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0);
-		ret = -EIO;
-		if (copied != sizeof(tmp))
-			break;
-		ret = put_user(tmp,(unsigned long __user *) data);
+	case PTRACE_PEEKDATA:
+		ret = generic_ptrace_peekdata(child, addr, data);
 		break;
-	}
 
 	/* Read the word at location addr in the USER area. */
 	case PTRACE_PEEKUSR: {
@@ -313,11 +305,7 @@
 	/* when I and D space are separate, this will have to be fixed. */
 	case PTRACE_POKETEXT: /* write the word at location addr. */
 	case PTRACE_POKEDATA:
-		ret = 0;
-		if (access_process_vm(child, addr, &data, sizeof(data), 1)
-		    == sizeof(data))
-			break;
-		ret = -EIO;
+		ret = generic_ptrace_pokedata(child, addr, data);
 		break;
 
 	case PTRACE_POKEUSR: {
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index 37c562c..ce277cb 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -326,6 +326,7 @@
 #endif /* CONFIG_MIPS_MT_SMTC */
 	printk("%s[#%d]:\n", str, ++die_counter);
 	show_registers(regs);
+	add_taint(TAINT_DIE);
 	spin_unlock_irq(&die_lock);
 
 	if (in_interrupt())
diff --git a/arch/mips/kernel/vmlinux.lds.S b/arch/mips/kernel/vmlinux.lds.S
index 9b9992c..bc9bae2 100644
--- a/arch/mips/kernel/vmlinux.lds.S
+++ b/arch/mips/kernel/vmlinux.lds.S
@@ -119,10 +119,7 @@
   .init.ramfs : { *(.init.ramfs) }
   __initramfs_end = .;
 #endif
-  . = ALIGN(_PAGE_SIZE);
-  __per_cpu_start = .;
-  .data.percpu  : { *(.data.percpu) }
-  __per_cpu_end = .;
+  PERCPU(_PAGE_SIZE)
   . = ALIGN(_PAGE_SIZE);
   __init_end = .;
   /* freed after init ends here */
diff --git a/arch/mips/mips-boards/atlas/atlas_setup.c b/arch/mips/mips-boards/atlas/atlas_setup.c
index 1cc6ebb..c68358a 100644
--- a/arch/mips/mips-boards/atlas/atlas_setup.c
+++ b/arch/mips/mips-boards/atlas/atlas_setup.c
@@ -22,6 +22,7 @@
 #include <linux/tty.h>
 #include <linux/serial.h>
 #include <linux/serial_core.h>
+#include <linux/serial_8250.h>
 
 #include <asm/cpu.h>
 #include <asm/bootinfo.h>
diff --git a/arch/mips/mips-boards/sead/sead_setup.c b/arch/mips/mips-boards/sead/sead_setup.c
index bb80140..5f70eaf 100644
--- a/arch/mips/mips-boards/sead/sead_setup.c
+++ b/arch/mips/mips-boards/sead/sead_setup.c
@@ -23,6 +23,7 @@
 #include <linux/tty.h>
 #include <linux/serial.h>
 #include <linux/serial_core.h>
+#include <linux/serial_8250.h>
 
 #include <asm/cpu.h>
 #include <asm/bootinfo.h>
diff --git a/arch/mips/mipssim/sim_setup.c b/arch/mips/mipssim/sim_setup.c
index 60e6690..17819b5 100644
--- a/arch/mips/mipssim/sim_setup.c
+++ b/arch/mips/mipssim/sim_setup.c
@@ -26,6 +26,7 @@
 #include <linux/tty.h>
 #include <linux/serial.h>
 #include <linux/serial_core.h>
+#include <linux/serial_8250.h>
 
 #include <asm/cpu.h>
 #include <asm/bootinfo.h>
diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
index 7ebea33..521771b 100644
--- a/arch/mips/mm/fault.c
+++ b/arch/mips/mm/fault.c
@@ -39,6 +39,7 @@
 	struct mm_struct *mm = tsk->mm;
 	const int field = sizeof(unsigned long) * 2;
 	siginfo_t info;
+	int fault;
 
 #if 0
 	printk("Cpu%d[%s:%d:%0*lx:%ld:%0*lx]\n", raw_smp_processor_id(),
@@ -102,20 +103,18 @@
 	 * make sure we exit gracefully rather than endlessly redo
 	 * the fault.
 	 */
-	switch (handle_mm_fault(mm, vma, address, write)) {
-	case VM_FAULT_MINOR:
-		tsk->min_flt++;
-		break;
-	case VM_FAULT_MAJOR:
-		tsk->maj_flt++;
-		break;
-	case VM_FAULT_SIGBUS:
-		goto do_sigbus;
-	case VM_FAULT_OOM:
-		goto out_of_memory;
-	default:
+	fault = handle_mm_fault(mm, vma, address, write);
+	if (unlikely(fault & VM_FAULT_ERROR)) {
+		if (fault & VM_FAULT_OOM)
+			goto out_of_memory;
+		else if (fault & VM_FAULT_SIGBUS)
+			goto do_sigbus;
 		BUG();
 	}
+	if (fault & VM_FAULT_MAJOR)
+		tsk->maj_flt++;
+	else
+		tsk->min_flt++;
 
 	up_read(&mm->mmap_sem);
 	return;
diff --git a/arch/mips/pmc-sierra/msp71xx/msp_serial.c b/arch/mips/pmc-sierra/msp71xx/msp_serial.c
index c41b53f..e25bac5 100644
--- a/arch/mips/pmc-sierra/msp71xx/msp_serial.c
+++ b/arch/mips/pmc-sierra/msp71xx/msp_serial.c
@@ -32,6 +32,7 @@
 #include <asm/io.h>
 #include <asm/processor.h>
 #include <asm/serial.h>
+#include <linux/serial_8250.h>
 
 #include <msp_prom.h>
 #include <msp_int.h>
diff --git a/arch/mips/pmc-sierra/yosemite/setup.c b/arch/mips/pmc-sierra/yosemite/setup.c
index 6a6e15e..f7f93ae 100644
--- a/arch/mips/pmc-sierra/yosemite/setup.c
+++ b/arch/mips/pmc-sierra/yosemite/setup.c
@@ -39,6 +39,7 @@
 #include <linux/tty.h>
 #include <linux/serial.h>
 #include <linux/serial_core.h>
+#include <linux/serial_8250.h>
 
 #include <asm/time.h>
 #include <asm/bootinfo.h>
diff --git a/arch/mips/sibyte/bcm1480/setup.c b/arch/mips/sibyte/bcm1480/setup.c
index bdaac34..89f2923 100644
--- a/arch/mips/sibyte/bcm1480/setup.c
+++ b/arch/mips/sibyte/bcm1480/setup.c
@@ -31,6 +31,7 @@
 unsigned int sb1_pass;
 unsigned int soc_pass;
 unsigned int soc_type;
+EXPORT_SYMBOL(soc_type);
 unsigned int periph_rev;
 unsigned int zbbus_mhz;
 
diff --git a/arch/mips/sibyte/sb1250/setup.c b/arch/mips/sibyte/sb1250/setup.c
index f4a6169..2d5c6d8b 100644
--- a/arch/mips/sibyte/sb1250/setup.c
+++ b/arch/mips/sibyte/sb1250/setup.c
@@ -31,6 +31,7 @@
 unsigned int sb1_pass;
 unsigned int soc_pass;
 unsigned int soc_type;
+EXPORT_SYMBOL(soc_type);
 unsigned int periph_rev;
 unsigned int zbbus_mhz;
 EXPORT_SYMBOL(zbbus_mhz);
diff --git a/arch/parisc/kernel/ptrace.c b/arch/parisc/kernel/ptrace.c
index 8a0db37..26ec774 100644
--- a/arch/parisc/kernel/ptrace.c
+++ b/arch/parisc/kernel/ptrace.c
@@ -87,10 +87,9 @@
 	switch (request) {
 	case PTRACE_PEEKTEXT: /* read word at location addr. */ 
 	case PTRACE_PEEKDATA: {
-		int copied;
-
 #ifdef CONFIG_64BIT
 		if (__is_compat_task(child)) {
+			int copied;
 			unsigned int tmp;
 
 			addr &= 0xffffffffL;
@@ -105,15 +104,7 @@
 		}
 		else
 #endif
-		{
-			unsigned long tmp;
-
-			copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0);
-			ret = -EIO;
-			if (copied != sizeof(tmp))
-				goto out_tsk;
-			ret = put_user(tmp,(unsigned long *) data);
-		}
+			ret = generic_ptrace_peekdata(child, addr, data);
 		goto out_tsk;
 	}
 
diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
index f9bca2d..bbf029a 100644
--- a/arch/parisc/kernel/traps.c
+++ b/arch/parisc/kernel/traps.c
@@ -264,6 +264,7 @@
 
 	show_regs(regs);
 	dump_stack();
+	add_taint(TAINT_DIE);
 
 	if (in_interrupt())
 		panic("Fatal exception in interrupt");
diff --git a/arch/parisc/kernel/unwind.c b/arch/parisc/kernel/unwind.c
index 3221677..cf780cb 100644
--- a/arch/parisc/kernel/unwind.c
+++ b/arch/parisc/kernel/unwind.c
@@ -242,7 +242,7 @@
 #ifdef CONFIG_KALLSYMS
 		/* Handle some frequent special cases.... */
 		{
-			char symname[KSYM_NAME_LEN+1];
+			char symname[KSYM_NAME_LEN];
 			char *modname;
 
 			kallsyms_lookup(info->ip, NULL, NULL, &modname,
diff --git a/arch/parisc/kernel/vmlinux.lds.S b/arch/parisc/kernel/vmlinux.lds.S
index 4d96ba4..d4e6a93 100644
--- a/arch/parisc/kernel/vmlinux.lds.S
+++ b/arch/parisc/kernel/vmlinux.lds.S
@@ -181,10 +181,9 @@
   .init.ramfs : { *(.init.ramfs) }
   __initramfs_end = .;
 #endif
-  . = ALIGN(ASM_PAGE_SIZE);
-  __per_cpu_start = .;
-  .data.percpu  : { *(.data.percpu) }
-  __per_cpu_end = .;
+
+  PERCPU(ASM_PAGE_SIZE)
+
   . = ALIGN(ASM_PAGE_SIZE);
   __init_end = .;
   /* freed after init ends here */
diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
index f6f6755..7899ab8 100644
--- a/arch/parisc/mm/fault.c
+++ b/arch/parisc/mm/fault.c
@@ -147,6 +147,7 @@
 	struct mm_struct *mm = tsk->mm;
 	const struct exception_table_entry *fix;
 	unsigned long acc_type;
+	int fault;
 
 	if (in_atomic() || !mm)
 		goto no_context;
@@ -173,23 +174,23 @@
 	 * fault.
 	 */
 
-	switch (handle_mm_fault(mm, vma, address, (acc_type & VM_WRITE) != 0)) {
-	      case VM_FAULT_MINOR:
-		++current->min_flt;
-		break;
-	      case VM_FAULT_MAJOR:
-		++current->maj_flt;
-		break;
-	      case VM_FAULT_SIGBUS:
+	fault = handle_mm_fault(mm, vma, address, (acc_type & VM_WRITE) != 0);
+	if (unlikely(fault & VM_FAULT_ERROR)) {
 		/*
 		 * We hit a shared mapping outside of the file, or some
 		 * other thing happened to us that made us unable to
 		 * handle the page fault gracefully.
 		 */
-		goto bad_area;
-	      default:
-		goto out_of_memory;
+		if (fault & VM_FAULT_OOM)
+			goto out_of_memory;
+		else if (fault & VM_FAULT_SIGBUS)
+			goto bad_area;
+		BUG();
 	}
+	if (fault & VM_FAULT_MAJOR)
+		current->maj_flt++;
+	else
+		current->min_flt++;
 	up_read(&mm->mmap_sem);
 	return;
 
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 6b8b83e..d860b64 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -4,17 +4,7 @@
 
 mainmenu "Linux/PowerPC Kernel Configuration"
 
-config PPC64
-	bool "64-bit kernel"
-	default n
-	help
-	  This option selects whether a 32-bit or a 64-bit kernel
-	  will be built.
-
-config PPC_PM_NEEDS_RTC_LIB
-	bool
-	select RTC_LIB
-	default y if PM
+source "arch/powerpc/platforms/Kconfig.cputype"
 
 config PPC32
 	bool
@@ -135,123 +125,6 @@
 	depends on PPC64 && (BROKEN || (PPC_PMAC64 && EXPERIMENTAL))
 	default y
 
-menu "Processor support"
-choice
-	prompt "Processor Type"
-	depends on PPC32
-	default 6xx
-
-config CLASSIC32
-	bool "52xx/6xx/7xx/74xx"
-	select PPC_FPU
-	select 6xx
-	help
-	  There are four families of PowerPC chips supported.  The more common
-	  types (601, 603, 604, 740, 750, 7400), the Motorola embedded
-	  versions (821, 823, 850, 855, 860, 52xx, 82xx, 83xx), the AMCC
-	  embedded versions (403 and 405) and the high end 64 bit Power
-	  processors (POWER 3, POWER4, and IBM PPC970 also known as G5).
-
-	  This option is the catch-all for 6xx types, including some of the
-	  embedded versions.  Unless there is see an option for the specific
-	  chip family you are using, you want this option.
-	  
-	  You do not want this if you are building a kernel for a 64 bit
-	  IBM RS/6000 or an Apple G5, choose 6xx.
-	  
-	  If unsure, select this option
-	  
-	  Note that the kernel runs in 32-bit mode even on 64-bit chips.
-
-config PPC_82xx
-	bool "Freescale 82xx"
-	select 6xx
-	select PPC_FPU
-
-config PPC_83xx
-	bool "Freescale 83xx"
-	select 6xx
-	select FSL_SOC
-	select 83xx
-	select PPC_FPU
-	select WANT_DEVICE_TREE
-
-config PPC_85xx
-	bool "Freescale 85xx"
-	select E500
-	select FSL_SOC
-	select 85xx
-	select WANT_DEVICE_TREE
-
-config PPC_86xx
-	bool "Freescale 86xx"
-	select 6xx
-	select FSL_SOC
-	select FSL_PCIE
-	select PPC_FPU
-	select ALTIVEC
-	help
-	  The Freescale E600 SoCs have 74xx cores.
-
-config PPC_8xx
-	bool "Freescale 8xx"
-	select FSL_SOC
-	select 8xx
-
-config 40x
-	bool "AMCC 40x"
-	select PPC_DCR_NATIVE
-
-config 44x
-	bool "AMCC 44x"
-	select PPC_DCR_NATIVE
-	select WANT_DEVICE_TREE
-
-config E200
-	bool "Freescale e200"
-
-endchoice
-
-config POWER4_ONLY
-	bool "Optimize for POWER4"
-	depends on PPC64
-	default n
-	---help---
-	  Cause the compiler to optimize for POWER4/POWER5/PPC970 processors.
-	  The resulting binary will not work on POWER3 or RS64 processors
-	  when compiled with binutils 2.15 or later.
-
-config POWER3
-	bool
-	depends on PPC64
-	default y if !POWER4_ONLY
-
-config POWER4
-	depends on PPC64
-	def_bool y
-
-config 6xx
-	bool
-
-# this is temp to handle compat with arch=ppc
-config 8xx
-	bool
-
-# this is temp to handle compat with arch=ppc
-config 83xx
-	bool
-
-# this is temp to handle compat with arch=ppc
-config 85xx
-	bool
-
-config E500
-	bool
-
-config PPC_FPU
-	bool
-	default y if PPC64
-
 config PPC_DCR_NATIVE
 	bool
 	default n
@@ -270,134 +143,6 @@
 	depends on PPC64 # not supported on 32 bits yet
 	default n
 
-config 4xx
-	bool
-	depends on 40x || 44x
-	default y
-
-config BOOKE
-	bool
-	depends on E200 || E500 || 44x
-	default y
-
-config FSL_BOOKE
-	bool
-	depends on E200 || E500
-	default y
-
-config PTE_64BIT
-	bool
-	depends on 44x || E500
-	default y if 44x
-	default y if E500 && PHYS_64BIT
-
-config PHYS_64BIT
-	bool 'Large physical address support' if E500
-	depends on 44x || E500
-	select RESOURCES_64BIT
-	default y if 44x
-	---help---
-	  This option enables kernel support for larger than 32-bit physical
-	  addresses.  This features is not be available on all e500 cores.
-
-	  If in doubt, say N here.
-
-config ALTIVEC
-	bool "AltiVec Support"
-	depends on CLASSIC32 || POWER4
-	---help---
-	  This option enables kernel support for the Altivec extensions to the
-	  PowerPC processor. The kernel currently supports saving and restoring
-	  altivec registers, and turning on the 'altivec enable' bit so user
-	  processes can execute altivec instructions.
-
-	  This option is only usefully if you have a processor that supports
-	  altivec (G4, otherwise known as 74xx series), but does not have
-	  any affect on a non-altivec cpu (it does, however add code to the
-	  kernel).
-
-	  If in doubt, say Y here.
-
-config SPE
-	bool "SPE Support"
-	depends on E200 || E500
-	default y
-	---help---
-	  This option enables kernel support for the Signal Processing
-	  Extensions (SPE) to the PowerPC processor. The kernel currently
-	  supports saving and restoring SPE registers, and turning on the
-	  'spe enable' bit so user processes can execute SPE instructions.
-
-	  This option is only useful if you have a processor that supports
-	  SPE (e500, otherwise known as 85xx series), but does not have any
-	  effect on a non-spe cpu (it does, however add code to the kernel).
-
-	  If in doubt, say Y here.
-
-config PPC_STD_MMU
-	bool
-	depends on 6xx || POWER3 || POWER4 || PPC64
-	default y
-
-config PPC_STD_MMU_32
-	def_bool y
-	depends on PPC_STD_MMU && PPC32
-
-config PPC_MM_SLICES
-	bool
-	default y if HUGETLB_PAGE
-	default n
-
-config VIRT_CPU_ACCOUNTING
-	bool "Deterministic task and CPU time accounting"
-	depends on PPC64
-	default y
-	help
-	  Select this option to enable more accurate task and CPU time
-	  accounting.  This is done by reading a CPU counter on each
-	  kernel entry and exit and on transitions within the kernel
-	  between system, softirq and hardirq state, so there is a
-	  small performance impact.  This also enables accounting of
-	  stolen time on logically-partitioned systems running on
-	  IBM POWER5-based machines.
-
-	  If in doubt, say Y here.
-
-config SMP
-	depends on PPC_STD_MMU
-	bool "Symmetric multi-processing support"
-	---help---
-	  This enables support for systems with more than one CPU. If you have
-	  a system with only one CPU, say N. If you have a system with more
-	  than one CPU, say Y.  Note that the kernel does not currently
-	  support SMP machines with 603/603e/603ev or PPC750 ("G3") processors
-	  since they have inadequate hardware support for multiprocessor
-	  operation.
-
-	  If you say N here, the kernel will run on single and multiprocessor
-	  machines, but will use only one CPU of a multiprocessor machine. If
-	  you say Y here, the kernel will run on single-processor machines.
-	  On a single-processor machine, the kernel will run faster if you say
-	  N here.
-
-	  If you don't know what to do here, say N.
-
-config NR_CPUS
-	int "Maximum number of CPUs (2-128)"
-	range 2 128
-	depends on SMP
-	default "32" if PPC64
-	default "4"
-
-config NOT_COHERENT_CACHE
-	bool
-	depends on 4xx || 8xx || E200
-	default y
-
-config CONFIG_CHECK_CACHE_COHERENCY
-	bool
-endmenu
-
 source "init/Kconfig"
 
 source "arch/powerpc/platforms/Kconfig"
@@ -677,10 +422,6 @@
 config FSL_SOC
 	bool
 
-config FSL_PCIE
-	bool
-	depends on PPC_86xx
-
 # Yes MCA RS/6000s exist but Linux-PPC does not currently support any
 config MCA
 	bool
@@ -688,10 +429,10 @@
 config PCI
 	bool "PCI support" if 40x || CPM2 || PPC_83xx || PPC_85xx || PPC_86xx \
 		|| PPC_MPC52xx || (EMBEDDED && (PPC_PSERIES || PPC_ISERIES)) \
-		|| MPC7448HPC2 || PPC_PS3 || PPC_HOLLY
-	default y if !40x && !CPM2 && !8xx && !APUS && !PPC_83xx \
+		|| PPC_PS3
+	default y if !40x && !CPM2 && !8xx && !PPC_83xx \
 		&& !PPC_85xx && !PPC_86xx
-	default PCI_PERMEDIA if !4xx && !CPM2 && !8xx && APUS
+	default PCI_PERMEDIA if !4xx && !CPM2 && !8xx
 	default PCI_QSPAN if !4xx && !CPM2 && 8xx
 	select ARCH_SUPPORTS_MSI
 	help
@@ -899,8 +640,8 @@
 source "arch/powerpc/oprofile/Kconfig"
 
 config KPROBES
-	bool "Kprobes (EXPERIMENTAL)"
-	depends on !BOOKE && !4xx && KALLSYMS && EXPERIMENTAL && MODULES
+	bool "Kprobes"
+	depends on !BOOKE && !4xx && KALLSYMS && MODULES
 	help
 	  Kprobes allows you to trap at almost any kernel address and
 	  execute a callback function.  register_kprobe() establishes
diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile
index 85be605..6c1e36c 100644
--- a/arch/powerpc/Makefile
+++ b/arch/powerpc/Makefile
@@ -148,7 +148,7 @@
 
 CPPFLAGS_vmlinux.lds	:= -Upowerpc
 
-BOOT_TARGETS = zImage zImage.initrd zImage.dts zImage.dts_initrd uImage
+BOOT_TARGETS = zImage zImage.initrd uImage
 
 PHONY += $(BOOT_TARGETS)
 
diff --git a/arch/powerpc/boot/44x.c b/arch/powerpc/boot/44x.c
index d51377d..9f64e84 100644
--- a/arch/powerpc/boot/44x.c
+++ b/arch/powerpc/boot/44x.c
@@ -38,3 +38,48 @@
 
 	dt_fixup_memory(0, memsize);
 }
+
+#define SPRN_DBCR0		0x134
+#define   DBCR0_RST_SYSTEM	0x30000000
+
+void ibm44x_dbcr_reset(void)
+{
+	unsigned long tmp;
+
+	asm volatile (
+		"mfspr	%0,%1\n"
+		"oris	%0,%0,%2@h\n"
+		"mtspr	%1,%0"
+		: "=&r"(tmp) : "i"(SPRN_DBCR0), "i"(DBCR0_RST_SYSTEM)
+		);
+
+}
+
+/* Read 4xx EBC bus bridge registers to get mappings of the peripheral
+ * banks into the OPB address space */
+void ibm4xx_fixup_ebc_ranges(const char *ebc)
+{
+	void *devp;
+	u32 bxcr;
+	u32 ranges[EBC_NUM_BANKS*4];
+	u32 *p = ranges;
+	int i;
+
+	for (i = 0; i < EBC_NUM_BANKS; i++) {
+		mtdcr(DCRN_EBC0_CFGADDR, EBC_BXCR(i));
+		bxcr = mfdcr(DCRN_EBC0_CFGDATA);
+
+		if ((bxcr & EBC_BXCR_BU) != EBC_BXCR_BU_OFF) {
+			*p++ = i;
+			*p++ = 0;
+			*p++ = bxcr & EBC_BXCR_BAS;
+			*p++ = EBC_BXCR_BANK_SIZE(bxcr);
+		}
+	}
+
+	devp = finddevice(ebc);
+	if (! devp)
+		fatal("Couldn't locate EBC node %s\n\r", ebc);
+
+	setprop(devp, "ranges", ranges, (p - ranges) * sizeof(u32));
+}
diff --git a/arch/powerpc/boot/44x.h b/arch/powerpc/boot/44x.h
index 7b129ad..577982c 100644
--- a/arch/powerpc/boot/44x.h
+++ b/arch/powerpc/boot/44x.h
@@ -11,6 +11,9 @@
 #define _PPC_BOOT_44X_H_
 
 void ibm44x_fixup_memsize(void);
+void ibm4xx_fixup_ebc_ranges(const char *ebc);
+
+void ibm44x_dbcr_reset(void);
 void ebony_init(void *mac0, void *mac1);
 
 #endif /* _PPC_BOOT_44X_H_ */
diff --git a/arch/powerpc/boot/Makefile b/arch/powerpc/boot/Makefile
index ff27019..61a6f34 100644
--- a/arch/powerpc/boot/Makefile
+++ b/arch/powerpc/boot/Makefile
@@ -43,10 +43,11 @@
 
 src-wlib := string.S crt0.S stdio.c main.c flatdevtree.c flatdevtree_misc.c \
 		ns16550.c serial.c simple_alloc.c div64.S util.S \
-		gunzip_util.c elf_util.c $(zlib) devtree.c \
-		44x.c ebony.c mv64x60.c mpsc.c mv64x60_i2c.c
+		gunzip_util.c elf_util.c $(zlib) devtree.c oflib.c ofconsole.c \
+		44x.c ebony.c mv64x60.c mpsc.c mv64x60_i2c.c cuboot.c
 src-plat := of.c cuboot-83xx.c cuboot-85xx.c holly.c \
-		cuboot-ebony.c treeboot-ebony.c prpmc2800.c
+		cuboot-ebony.c treeboot-ebony.c prpmc2800.c \
+		ps3-head.S ps3-hvcall.S ps3.c
 src-boot := $(src-wlib) $(src-plat) empty.c
 
 src-boot := $(addprefix $(obj)/, $(src-boot))
@@ -75,11 +76,11 @@
 $(obj)/empty.c:
 	@touch $@
 
-$(obj)/zImage.lds $(obj)/zImage.coff.lds: $(obj)/%: $(srctree)/$(src)/%.S
+$(obj)/zImage.lds $(obj)/zImage.coff.lds $(obj)/zImage.ps3.lds: $(obj)/%: $(srctree)/$(src)/%.S
 	@cp $< $@
 
 clean-files := $(zlib) $(zlibheader) $(zliblinuxheader) \
-		empty.c zImage.coff.lds zImage.lds
+		empty.c zImage zImage.coff.lds zImage.ps3.lds zImage.lds
 
 quiet_cmd_bootcc = BOOTCC  $@
       cmd_bootcc = $(CROSS32CC) -Wp,-MD,$(depfile) $(BOOTCFLAGS) -c -o $@ $<
@@ -102,7 +103,7 @@
 
 targets		+= $(patsubst $(obj)/%,%,$(obj-boot) wrapper.a)
 extra-y		:= $(obj)/wrapper.a $(obj-plat) $(obj)/empty.o \
-		   $(obj)/zImage.lds $(obj)/zImage.coff.lds
+		   $(obj)/zImage.lds $(obj)/zImage.coff.lds $(obj)/zImage.ps3.lds
 
 wrapper		:=$(srctree)/$(src)/wrapper
 wrapperbits	:= $(extra-y) $(addprefix $(obj)/,addnote hack-coff mktree) \
@@ -132,7 +133,7 @@
 image-$(CONFIG_PPC_CHRP)		+= zImage.chrp
 image-$(CONFIG_PPC_EFIKA)		+= zImage.chrp
 image-$(CONFIG_PPC_PMAC)		+= zImage.pmac
-image-$(CONFIG_PPC_HOLLY)		+= zImage.holly-elf
+image-$(CONFIG_PPC_HOLLY)		+= zImage.holly
 image-$(CONFIG_PPC_PRPMC2800)		+= zImage.prpmc2800
 image-$(CONFIG_PPC_ISERIES)		+= zImage.iseries
 image-$(CONFIG_DEFAULT_UIMAGE)		+= uImage
@@ -157,55 +158,43 @@
 
 $(addprefix $(obj)/, $(initrd-y)): $(obj)/ramdisk.image.gz
 
-dts-  := $(patsubst zImage%, zImage.dts%, $(image-n) $(image-))
-dts-y := $(patsubst zImage%, zImage.dts%, $(image-y))
-dts-y := $(filter-out $(image-y), $(dts-y))
-targets	+= $(image-y) $(dts-y)
-
-dts_initrd-  := $(patsubst zImage%, zImage.dts_initrd%, $(image-n) $(image-))
-dts_initrd-y := $(patsubst zImage%, zImage.dts_initrd%, $(image-y))
-dts_initrd-y := $(filter-out $(image-y), $(dts_initrd-y))
-targets	+= $(image-y) $(dts_initrd-y)
-
-$(addprefix $(obj)/, $(dts_initrd-y)): $(obj)/ramdisk.image.gz
+# If CONFIG_WANT_DEVICE_TREE is set and CONFIG_DEVICE_TREE isn't an
+# empty string, define 'dts' to be path to the dts
+# CONFIG_DEVICE_TREE will have "" around it, make sure to strip them
+ifeq ($(CONFIG_WANT_DEVICE_TREE),y)
+ifneq ($(CONFIG_DEVICE_TREE),"")
+dts = $(if $(shell echo $(CONFIG_DEVICE_TREE) | grep '^/'),\
+	,$(srctree)/$(src)/dts/)$(CONFIG_DEVICE_TREE:"%"=%)
+endif
+endif
 
 # Don't put the ramdisk on the pattern rule; when its missing make will try
 # the pattern rule with less dependencies that also matches (even with the
 # hard dependency listed).
-$(obj)/zImage.dts_initrd.%: vmlinux $(wrapperbits) $(dts) $(obj)/ramdisk.image.gz
+$(obj)/zImage.initrd.%: vmlinux $(wrapperbits) $(dts)
 	$(call if_changed,wrap,$*,$(dts),,$(obj)/ramdisk.image.gz)
 
-$(obj)/zImage.dts.%: vmlinux $(wrapperbits) $(dts)
+$(obj)/zImage.%: vmlinux $(wrapperbits) $(dts)
 	$(call if_changed,wrap,$*,$(dts))
 
-$(obj)/zImage.initrd.%: vmlinux $(wrapperbits)
-	$(call if_changed,wrap,$*,,,$(obj)/ramdisk.image.gz)
-
-$(obj)/zImage.%: vmlinux $(wrapperbits)
-	$(call if_changed,wrap,$*)
+# This cannot be in the root of $(src) as the zImage rule always adds a $(obj)
+# prefix
+$(obj)/vmlinux.strip: vmlinux
+	$(STRIP) -s -R .comment $< -o $@
 
 $(obj)/zImage.iseries: vmlinux
 	$(STRIP) -s -R .comment $< -o $@
 
-$(obj)/zImage.ps3: vmlinux
-	$(STRIP) -s -R .comment $< -o $@
+$(obj)/zImage.ps3: vmlinux  $(wrapper) $(wrapperbits) $(srctree)/$(src)/dts/ps3.dts
+	$(STRIP) -s -R .comment $< -o vmlinux.strip
+	$(call cmd,wrap,ps3,$(srctree)/$(src)/dts/ps3.dts,,)
 
-$(obj)/zImage.initrd.ps3: vmlinux
-	@echo "  WARNING zImage.initrd.ps3 not supported (yet)"
-
-$(obj)/zImage.holly-elf: vmlinux $(wrapperbits)
-	$(call if_changed,wrap,holly,$(obj)/dts/holly.dts,,)
-
-$(obj)/zImage.initrd.holly-elf: vmlinux $(wrapperbits) $(obj)/ramdisk.image.gz
-	$(call if_changed,wrap,holly,$(obj)/dts/holly.dts,,$(obj)/ramdisk.image.gz)
+$(obj)/zImage.initrd.ps3: vmlinux  $(wrapper) $(wrapperbits) $(srctree)/$(src)/dts/ps3.dts $(obj)/ramdisk.image.gz
+	$(call cmd,wrap,ps3,$(srctree)/$(src)/dts/ps3.dts,,$(obj)/ramdisk.image.gz)
 
 $(obj)/uImage: vmlinux $(wrapperbits)
 	$(call if_changed,wrap,uboot)
 
-# CONFIG_DEVICE_TREE will have "" around it, make sure to strip them
-dts = $(if $(shell echo $(CONFIG_DEVICE_TREE) | grep '^/'),\
-	,$(srctree)/$(src)/dts/)$(CONFIG_DEVICE_TREE:"%"=%)
-
 $(obj)/cuImage.%: vmlinux $(dts) $(wrapperbits)
 	$(call if_changed,wrap,cuboot-$*,$(dts))
 
@@ -215,22 +204,22 @@
 $(obj)/treeImage.%: vmlinux $(dts) $(wrapperbits)
 	$(call if_changed,wrap,treeboot-$*,$(dts))
 
+# If there isn't a platform selected then just strip the vmlinux.
+ifeq (,$(image-y))
+image-y := vmlinux.strip
+endif
+
 $(obj)/zImage:		$(addprefix $(obj)/, $(image-y))
 	@rm -f $@; ln $< $@
 $(obj)/zImage.initrd:	$(addprefix $(obj)/, $(initrd-y))
 	@rm -f $@; ln $< $@
-$(obj)/zImage.dts:	$(addprefix $(obj)/, $(dts-y))
-	@rm -f $@; ln $< $@
-$(obj)/zImage.dts_initrd:	$(addprefix $(obj)/, $(dts_initrd-y))
-	@rm -f $@; ln $< $@
-
 
 install: $(CONFIGURE) $(addprefix $(obj)/, $(image-y))
 	sh -x $(srctree)/$(src)/install.sh "$(KERNELRELEASE)" vmlinux System.map "$(INSTALL_PATH)" $<
 
 # anything not in $(targets)
-clean-files += $(image-) $(initrd-) zImage zImage.initrd cuImage.* \
-	treeImage.* zImage.dts zImage.dts_initrd
+clean-files += $(image-) $(initrd-) zImage zImage.initrd cuImage.* treeImage.* \
+	otheros.bld
 
 # clean up files cached by wrapper
 clean-kernel := vmlinux.strip vmlinux.bin
diff --git a/arch/powerpc/boot/cuboot-83xx.c b/arch/powerpc/boot/cuboot-83xx.c
index 9af554e..296025d 100644
--- a/arch/powerpc/boot/cuboot-83xx.c
+++ b/arch/powerpc/boot/cuboot-83xx.c
@@ -12,12 +12,12 @@
 
 #include "ops.h"
 #include "stdio.h"
+#include "cuboot.h"
 
 #define TARGET_83xx
 #include "ppcboot.h"
 
 static bd_t bd;
-extern char _end[];
 extern char _dtb_start[], _dtb_end[];
 
 static void platform_fixups(void)
@@ -52,16 +52,7 @@
 void platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
                    unsigned long r6, unsigned long r7)
 {
-	unsigned long end_of_ram = bd.bi_memstart + bd.bi_memsize;
-	unsigned long avail_ram = end_of_ram - (unsigned long)_end;
-
-	memcpy(&bd, (bd_t *)r3, sizeof(bd));
-	loader_info.initrd_addr = r4;
-	loader_info.initrd_size = r4 ? r5 - r4 : 0;
-	loader_info.cmdline = (char *)r6;
-	loader_info.cmdline_len = r7 - r6;
-
-	simple_alloc_init(_end, avail_ram - 1024*1024, 32, 64);
+	CUBOOT_INIT();
 	ft_init(_dtb_start, _dtb_end - _dtb_start, 32);
 	serial_console_init();
 	platform_ops.fixups = platform_fixups;
diff --git a/arch/powerpc/boot/cuboot-85xx.c b/arch/powerpc/boot/cuboot-85xx.c
index e256031..10f0f69 100644
--- a/arch/powerpc/boot/cuboot-85xx.c
+++ b/arch/powerpc/boot/cuboot-85xx.c
@@ -12,12 +12,12 @@
 
 #include "ops.h"
 #include "stdio.h"
+#include "cuboot.h"
 
 #define TARGET_85xx
 #include "ppcboot.h"
 
 static bd_t bd;
-extern char _end[];
 extern char _dtb_start[], _dtb_end[];
 
 static void platform_fixups(void)
@@ -53,16 +53,7 @@
 void platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
                    unsigned long r6, unsigned long r7)
 {
-	unsigned long end_of_ram = bd.bi_memstart + bd.bi_memsize;
-	unsigned long avail_ram = end_of_ram - (unsigned long)_end;
-
-	memcpy(&bd, (bd_t *)r3, sizeof(bd));
-	loader_info.initrd_addr = r4;
-	loader_info.initrd_size = r4 ? r5 - r4 : 0;
-	loader_info.cmdline = (char *)r6;
-	loader_info.cmdline_len = r7 - r6;
-
-	simple_alloc_init(_end, avail_ram - 1024*1024, 32, 64);
+	CUBOOT_INIT();
 	ft_init(_dtb_start, _dtb_end - _dtb_start, 32);
 	serial_console_init();
 	platform_ops.fixups = platform_fixups;
diff --git a/arch/powerpc/boot/cuboot-ebony.c b/arch/powerpc/boot/cuboot-ebony.c
index 4464c5f..c5f37ce 100644
--- a/arch/powerpc/boot/cuboot-ebony.c
+++ b/arch/powerpc/boot/cuboot-ebony.c
@@ -15,28 +15,16 @@
 #include "ops.h"
 #include "stdio.h"
 #include "44x.h"
+#include "cuboot.h"
 
 #define TARGET_44x
 #include "ppcboot.h"
 
 static bd_t bd;
-extern char _end[];
-
-BSS_STACK(4096);
 
 void platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
                    unsigned long r6, unsigned long r7)
 {
-	unsigned long end_of_ram = bd.bi_memstart + bd.bi_memsize;
-	unsigned long avail_ram = end_of_ram - (unsigned long)_end;
-
-	memcpy(&bd, (bd_t *)r3, sizeof(bd));
-	loader_info.initrd_addr = r4;
-	loader_info.initrd_size = r4 ? r5 : 0;
-	loader_info.cmdline = (char *)r6;
-	loader_info.cmdline_len = r7 - r6;
-
-	simple_alloc_init(_end, avail_ram, 32, 64);
-
+	CUBOOT_INIT();
 	ebony_init(&bd.bi_enetaddr, &bd.bi_enet1addr);
 }
diff --git a/arch/powerpc/boot/cuboot.c b/arch/powerpc/boot/cuboot.c
new file mode 100644
index 0000000..6579546
--- /dev/null
+++ b/arch/powerpc/boot/cuboot.c
@@ -0,0 +1,35 @@
+/*
+ * Compatibility for old (not device tree aware) U-Boot versions
+ *
+ * Author: Scott Wood <scottwood@freescale.com>
+ * Consolidated using macros by David Gibson <david@gibson.dropbear.id.au>
+ *
+ * Copyright 2007 David Gibson, IBM Corporation.
+ * Copyright (c) 2007 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ */
+
+#include "ops.h"
+#include "stdio.h"
+
+#include "ppcboot.h"
+
+extern char _end[];
+extern char _dtb_start[], _dtb_end[];
+
+void cuboot_init(unsigned long r4, unsigned long r5,
+		 unsigned long r6, unsigned long r7,
+		 unsigned long end_of_ram)
+{
+	unsigned long avail_ram = end_of_ram - (unsigned long)_end;
+
+	loader_info.initrd_addr = r4;
+	loader_info.initrd_size = r4 ? r5 - r4 : 0;
+	loader_info.cmdline = (char *)r6;
+	loader_info.cmdline_len = r7 - r6;
+
+	simple_alloc_init(_end, avail_ram - 1024*1024, 32, 64);
+}
diff --git a/arch/powerpc/boot/cuboot.h b/arch/powerpc/boot/cuboot.h
new file mode 100644
index 0000000..cd2aa7f
--- /dev/null
+++ b/arch/powerpc/boot/cuboot.h
@@ -0,0 +1,14 @@
+#ifndef _PPC_BOOT_CUBOOT_H_
+#define _PPC_BOOT_CUBOOT_H_
+
+void cuboot_init(unsigned long r4, unsigned long r5,
+		 unsigned long r6, unsigned long r7,
+		 unsigned long end_of_ram);
+
+#define CUBOOT_INIT() \
+	do { \
+		memcpy(&bd, (bd_t *)r3, sizeof(bd)); \
+		cuboot_init(r4, r5, r6, r7, bd.bi_memstart + bd.bi_memsize); \
+	} while (0)
+
+#endif /* _PPC_BOOT_CUBOOT_H_ */
diff --git a/arch/powerpc/boot/dcr.h b/arch/powerpc/boot/dcr.h
index 877bc97..14b44aa 100644
--- a/arch/powerpc/boot/dcr.h
+++ b/arch/powerpc/boot/dcr.h
@@ -26,6 +26,43 @@
 #define			SDRAM_CONFIG_BANK_SIZE(reg)	\
 	(0x00400000 << ((reg & SDRAM_CONFIG_SIZE_MASK) >> 17))
 
+/* 440GP External Bus Controller (EBC) */
+#define DCRN_EBC0_CFGADDR				0x012
+#define DCRN_EBC0_CFGDATA				0x013
+#define   EBC_NUM_BANKS					  8
+#define   EBC_B0CR					  0x00
+#define   EBC_B1CR					  0x01
+#define   EBC_B2CR					  0x02
+#define   EBC_B3CR					  0x03
+#define   EBC_B4CR					  0x04
+#define   EBC_B5CR					  0x05
+#define   EBC_B6CR					  0x06
+#define   EBC_B7CR					  0x07
+#define   EBC_BXCR(n)					  (n)
+#define	    EBC_BXCR_BAS				    0xfff00000
+#define	    EBC_BXCR_BS				  	    0x000e0000
+#define	    EBC_BXCR_BANK_SIZE(reg) \
+	(0x100000 << (((reg) & EBC_BXCR_BS) >> 17))
+#define	    EBC_BXCR_BU				  	    0x00018000
+#define	      EBC_BXCR_BU_OFF			  	      0x00000000
+#define	      EBC_BXCR_BU_RO			  	      0x00008000
+#define	      EBC_BXCR_BU_WO			  	      0x00010000
+#define	      EBC_BXCR_BU_RW			  	      0x00018000
+#define	    EBC_BXCR_BW				  	    0x00006000
+#define   EBC_B0AP					  0x10
+#define   EBC_B1AP					  0x11
+#define   EBC_B2AP					  0x12
+#define   EBC_B3AP					  0x13
+#define   EBC_B4AP					  0x14
+#define   EBC_B5AP					  0x15
+#define   EBC_B6AP					  0x16
+#define   EBC_B7AP					  0x17
+#define   EBC_BXAP(n)					  (0x10+(n))
+#define   EBC_BEAR					  0x20
+#define   EBC_BESR					  0x21
+#define   EBC_CFG					  0x23
+#define   EBC_CID					  0x24
+
 /* 440GP Clock, PM, chip control */
 #define DCRN_CPC0_SR					0x0b0
 #define DCRN_CPC0_ER					0x0b1
diff --git a/arch/powerpc/boot/dts/ebony.dts b/arch/powerpc/boot/dts/ebony.dts
index 0ec02f4..c5f9961 100644
--- a/arch/powerpc/boot/dts/ebony.dts
+++ b/arch/powerpc/boot/dts/ebony.dts
@@ -31,8 +31,8 @@
 			reg = <0>;
 			clock-frequency = <0>; // Filled in by zImage
 			timebase-frequency = <0>; // Filled in by zImage
-			i-cache-line-size = <32>;
-			d-cache-line-size = <32>;
+			i-cache-line-size = <20>;
+			d-cache-line-size = <20>;
 			i-cache-size = <8000>; /* 32 kB */
 			d-cache-size = <8000>; /* 32 kB */
 			dcr-controller;
@@ -135,11 +135,9 @@
 				#address-cells = <2>;
 				#size-cells = <1>;
 				clock-frequency = <0>; // Filled in by zImage
-				ranges = <0 00000000 fff00000 100000
-					  1 00000000 48000000 100000
-					  2 00000000 ff800000 400000
-					  3 00000000 48200000 100000
-					  7 00000000 48300000 100000>;
+				// ranges property is supplied by zImage
+				// based on firmware's configuration of the
+				// EBC bridge
 				interrupts = <5 4>;
 				interrupt-parent = <&UIC1>;
 
diff --git a/arch/powerpc/boot/dts/holly.dts b/arch/powerpc/boot/dts/holly.dts
index 254499b..80a4fab 100644
--- a/arch/powerpc/boot/dts/holly.dts
+++ b/arch/powerpc/boot/dts/holly.dts
@@ -46,7 +46,7 @@
 
   	tsi109@c0000000 {
 		device_type = "tsi-bridge";
-		compatible = "tsi-bridge";
+		compatible = "tsi109-bridge", "tsi108-bridge";
 		#address-cells = <1>;
 		#size-cells = <1>;
 		ranges = <00000000 c0000000 00010000>;
@@ -54,52 +54,55 @@
 
 		i2c@7000 {
 			device_type = "i2c";
-			compatible  = "tsi-i2c";
-			interrupt-parent = < &/tsi109@c0000000/pic@7400 >;
+			compatible  = "tsi109-i2c", "tsi108-i2c";
+			interrupt-parent = <&MPIC>;
 			interrupts = <e 2>;
 			reg = <7000 400>;
 		};
 
-		mdio@6000 {
+		MDIO: mdio@6000 {
 			device_type = "mdio";
-			compatible = "tsi-ethernet";
+			compatible = "tsi109-mdio", "tsi108-mdio";
+			reg = <6000 50>;
+			#address-cells = <1>;
+			#size-cells = <0>;
 
-			PHY1: ethernet-phy@6000 {
-				device_type = "ethernet-phy";
-				compatible = "bcm54xx";
-				reg = <6000 50>;
-				phy-id = <1>;
+			PHY1: ethernet-phy@1 {
+				compatible = "bcm5461a";
+				reg = <1>;
+				txc-rxc-delay-disable;
 			};
 
-			PHY2: ethernet-phy@6400 {
-				device_type = "ethernet-phy";
-				compatible = "bcm54xx";
-				reg = <6000 50>;
-				phy-id = <2>;
+			PHY2: ethernet-phy@2 {
+				compatible = "bcm5461a";
+				reg = <2>;
+				txc-rxc-delay-disable;
 			};
 		};
 
 		ethernet@6200 {
 			device_type = "network";
-			compatible = "tsi-ethernet";
+			compatible = "tsi109-ethernet", "tsi108-ethernet";
 			#address-cells = <1>;
 			#size-cells = <0>;
 			reg = <6000 200>;
 			local-mac-address = [ 00 00 00 00 00 00 ];
-			interrupt-parent = < &/tsi109@c0000000/pic@7400 >;
+			interrupt-parent = <&MPIC>;
 			interrupts = <10 2>;
+			mdio-handle = <&MDIO>;
 			phy-handle = <&PHY1>;
 		};
 
 		ethernet@6600 {
 			device_type = "network";
-			compatible = "tsi-ethernet";
+			compatible = "tsi109-ethernet", "tsi108-ethernet";
 			#address-cells = <1>;
 			#size-cells = <0>;
 			reg = <6400 200>;
 			local-mac-address = [ 00 00 00 00 00 00 ];
-			interrupt-parent = < &/tsi109@c0000000/pic@7400 >;
+			interrupt-parent = <&MPIC>;
 			interrupts = <11 2>;
+			mdio-handle = <&MDIO>;
 			phy-handle = <&PHY2>;
 		};
 
@@ -110,7 +113,7 @@
 			virtual-reg = <c0007808>;
 			clock-frequency = <3F9C6000>;
 			current-speed = <1c200>;
-			interrupt-parent = < &/tsi109@c0000000/pic@7400 >;
+			interrupt-parent = <&MPIC>;
 			interrupts = <c 2>;
 		};
 
@@ -121,7 +124,7 @@
 			virtual-reg = <c0007c08>;
 			clock-frequency = <3F9C6000>;
 			current-speed = <1c200>;
-			interrupt-parent = < &/tsi109@c0000000/pic@7400 >;
+			interrupt-parent = <&MPIC>;
 			interrupts = <d 2>;
 		};
 
@@ -136,7 +139,7 @@
 
 		pci@1000 {
 			device_type = "pci";
-			compatible = "tsi109";
+			compatible = "tsi109-pci", "tsi108-pci";
 			#interrupt-cells = <1>;
 			#size-cells = <2>;
 			#address-cells = <3>;
@@ -150,7 +153,7 @@
 			ranges = <02000000 0 40000000 40000000 0 10000000
 				  01000000 0 00000000 7e000000 0 00010000>;
 			clock-frequency = <7f28154>;
-			interrupt-parent = < &/tsi109@c0000000/pic@7400 >;
+			interrupt-parent = <&MPIC>;
 			interrupts = <17 2>;
 			interrupt-map-mask = <f800 0 0 7>;
 			/*----------------------------------------------------+
@@ -186,13 +189,12 @@
  				#address-cells = <0>;
  				#interrupt-cells = <2>;
  				interrupts = <17 2>;
-				interrupt-parent = < &/tsi109@c0000000/pic@7400 >;
+				interrupt-parent = <&MPIC>;
 			};
 		};
 	};
 
 	chosen {
 		linux,stdout-path = "/tsi109@c0000000/serial@7808";
-		bootargs = "console=ttyS0,115200";
 	};
 };
diff --git a/arch/powerpc/boot/dts/mpc7448hpc2.dts b/arch/powerpc/boot/dts/mpc7448hpc2.dts
index 765c306..0e3d314 100644
--- a/arch/powerpc/boot/dts/mpc7448hpc2.dts
+++ b/arch/powerpc/boot/dts/mpc7448hpc2.dts
@@ -45,7 +45,7 @@
 		#address-cells = <1>;
 		#size-cells = <1>;
 		#interrupt-cells = <2>;
-		device_type = "tsi-bridge";
+		device_type = "tsi108-bridge";
 		ranges = <00000000 c0000000 00010000>;
 		reg = <c0000000 00010000>;
 		bus-frequency = <0>;
@@ -55,27 +55,26 @@
 			interrupts = <E 0>;
 			reg = <7000 400>;
 			device_type = "i2c";
-			compatible  = "tsi-i2c";
+			compatible  = "tsi108-i2c";
 		};
 
-		mdio@6000 {
+		MDIO: mdio@6000 {
 			device_type = "mdio";
-			compatible = "tsi-ethernet";
+			compatible = "tsi108-mdio";
+			reg = <6000 50>;
+			#address-cells = <1>;
+			#size-cells = <0>;
 
-			phy8: ethernet-phy@6000 {
+			phy8: ethernet-phy@8 {
 				interrupt-parent = <&mpic>;
 				interrupts = <2 1>;
-				reg = <6000 50>;
-				phy-id = <8>;
-				device_type = "ethernet-phy";
+				reg = <8>;
 			};
 
-			phy9: ethernet-phy@6400 {
+			phy9: ethernet-phy@9 {
 				interrupt-parent = <&mpic>;
 				interrupts = <2 1>;
-				reg = <6000 50>;
-				phy-id = <9>;
-				device_type = "ethernet-phy";
+				reg = <9>;
 			};
 
 		};
@@ -83,12 +82,12 @@
 		ethernet@6200 {
 			#size-cells = <0>;
 			device_type = "network";
-			model = "TSI-ETH";
-			compatible = "tsi-ethernet";
+			compatible = "tsi108-ethernet";
 			reg = <6000 200>;
 			address = [ 00 06 D2 00 00 01 ];
 			interrupts = <10 2>;
 			interrupt-parent = <&mpic>;
+			mdio-handle = <&MDIO>;
 			phy-handle = <&phy8>;
 		};
 
@@ -96,12 +95,12 @@
 			#address-cells = <1>;
 			#size-cells = <0>;
 			device_type = "network";
-			model = "TSI-ETH";
-			compatible = "tsi-ethernet";
+			compatible = "tsi108-ethernet";
 			reg = <6400 200>;
 			address = [ 00 06 D2 00 00 02 ];
 			interrupts = <11 2>;
 			interrupt-parent = <&mpic>;
+			mdio-handle = <&MDIO>;
 			phy-handle = <&phy9>;
 		};
 
@@ -135,7 +134,7 @@
                        	big-endian;
 		};
 		pci@1000 {
-			compatible = "tsi10x";
+			compatible = "tsi108-pci";
 			device_type = "pci";
 			#interrupt-cells = <1>;
 			#size-cells = <2>;
diff --git a/arch/powerpc/boot/dts/mpc8272ads.dts b/arch/powerpc/boot/dts/mpc8272ads.dts
index 423eedc..1934b80 100644
--- a/arch/powerpc/boot/dts/mpc8272ads.dts
+++ b/arch/powerpc/boot/dts/mpc8272ads.dts
@@ -14,12 +14,10 @@
        compatible = "MPC8260ADS";
        #address-cells = <1>;
        #size-cells = <1>;
-       linux,phandle = <100>;
 
        cpus {
                #address-cells = <1>;
                #size-cells = <0>;
-               linux,phandle = <200>;
 
                PowerPC,8272@0 {
                        device_type = "cpu";
@@ -32,12 +30,10 @@
                        bus-frequency = <0>;
                        clock-frequency = <0>;
                        32-bit;
-                       linux,phandle = <201>;
                };
        };
 
-       interrupt-controller@f8200000 {
-               linux,phandle = <f8200000>;
+		pci_pic: interrupt-controller@f8200000 {
                #address-cells = <0>;
                #interrupt-cells = <2>;
                interrupt-controller;
@@ -47,15 +43,13 @@
        };
        memory {
                device_type = "memory";
-               linux,phandle = <300>;
                reg = <00000000 4000000 f4500000 00000020>;
        };
 
        chosen {
                name = "chosen";
                linux,platform = <0>;
-               interrupt-controller = <10c00>;
-               linux,phandle = <400>;
+		interrupt-controller = <&Cpm_pic>;
        };
 
        soc8272@f0000000 {
@@ -70,20 +64,17 @@
                        device_type = "mdio";
                        compatible = "fs_enet";
                        reg = <0 0>;
-                       linux,phandle = <24520>;
                        #address-cells = <1>;
                        #size-cells = <0>;
-                       ethernet-phy@0 {
-                               linux,phandle = <2452000>;
-                               interrupt-parent = <10c00>;
+			phy0:ethernet-phy@0 {
+				interrupt-parent = <&Cpm_pic>;
                                interrupts = <17 4>;
                                reg = <0>;
                                bitbang = [ 12 12 13 02 02 01 ];
                                device_type = "ethernet-phy";
                        };
-                       ethernet-phy@1 {
-                               linux,phandle = <2452001>;
-                               interrupt-parent = <10c00>;
+			phy1:ethernet-phy@1 {
+				interrupt-parent = <&Cpm_pic>;
                                interrupts = <17 4>;
                                bitbang = [ 12 12 13 02 02 01 ];
                                reg = <3>;
@@ -101,8 +92,8 @@
                        reg = <11300 20 8400 100 11380 30>;
                        mac-address = [ 00 11 2F 99 43 54 ];
                        interrupts = <20 2>;
-                       interrupt-parent = <10c00>;
-                       phy-handle = <2452000>;
+			interrupt-parent = <&Cpm_pic>;
+			phy-handle = <&Phy0>;
                        rx-clock = <13>;
                        tx-clock = <12>;
                };
@@ -115,14 +106,13 @@
                        reg = <11320 20 8500 100 113b0 30>;
                        mac-address = [ 00 11 2F 99 44 54 ];
                        interrupts = <21 2>;
-                       interrupt-parent = <10c00>;
-                       phy-handle = <2452001>;
+			interrupt-parent = <&Cpm_pic>;
+			phy-handle = <&Phy1>;
                        rx-clock = <17>;
                        tx-clock = <18>;
                };
 
                cpm@f0000000 {
-                       linux,phandle = <f0000000>;
                        #address-cells = <1>;
                        #size-cells = <1>;
                        #interrupt-cells = <2>;
@@ -142,7 +132,7 @@
                                reg = <11a00 20 8000 100>;
                                current-speed = <1c200>;
                                interrupts = <28 2>;
-                               interrupt-parent = <10c00>;
+				interrupt-parent = <&Cpm_pic>;
                                clock-setup = <0 00ffffff>;
                                rx-clock = <1>;
                                tx-clock = <1>;
@@ -156,15 +146,14 @@
                                reg = <11a60 20 8300 100>;
                                current-speed = <1c200>;
                                interrupts = <2b 2>;
-                               interrupt-parent = <10c00>;
+				interrupt-parent = <&Cpm_pic>;
                                clock-setup = <1b ffffff00>;
                                rx-clock = <4>;
                                tx-clock = <4>;
                        };
 
                };
-               interrupt-controller@10c00 {
-                       linux,phandle = <10c00>;
+			cpm_pic:interrupt-controller@10c00 {
                        #address-cells = <0>;
                        #interrupt-cells = <2>;
                        interrupt-controller;
@@ -174,7 +163,6 @@
 		       compatible = "CPM2";
                };
                pci@0500 {
-                       linux,phandle = <0500>;
                        #interrupt-cells = <1>;
                        #size-cells = <2>;
                        #address-cells = <3>;
@@ -202,7 +190,7 @@
                                         c000 0 0 2 f8200000 43 8
                                         c000 0 0 3 f8200000 40 8
                                         c000 0 0 4 f8200000 41 8>;
-                       interrupt-parent = <10c00>;
+			interrupt-parent = <&Cpm_pic>;
                        interrupts = <14 8>;
                        bus-range = <0 0>;
                        ranges = <02000000 0 80000000 80000000 0 40000000
@@ -216,7 +204,7 @@
                        compatible = "talitos";
                        reg = <30000 10000>;
                        interrupts = <b 2>;
-                       interrupt-parent = <10c00>;
+			interrupt-parent = <&Cpm_pic>;
                        num-channels = <4>;
                        channel-fifo-len = <18>;
                        exec-units-mask = <0000007e>;
diff --git a/arch/powerpc/boot/dts/mpc832x_mds.dts b/arch/powerpc/boot/dts/mpc832x_mds.dts
index 112dd51..4fc0c4d 100644
--- a/arch/powerpc/boot/dts/mpc832x_mds.dts
+++ b/arch/powerpc/boot/dts/mpc832x_mds.dts
@@ -272,7 +272,13 @@
 			reg = <2200 200>;
 			interrupts = <22>;
 			interrupt-parent = < &qeic >;
-			mac-address = [ 00 04 9f 00 23 23 ];
+			/*
+			 * mac-address is deprecated and will be removed
+			 * in 2.6.25.  Only recent versions of
+			 * U-Boot support local-mac-address, however.
+			 */
+			mac-address = [ 00 00 00 00 00 00 ];
+			local-mac-address = [ 00 00 00 00 00 00 ];
 			rx-clock = <19>;
 			tx-clock = <1a>;
 			phy-handle = < &phy3 >;
@@ -287,7 +293,13 @@
 			reg = <3000 200>;
 			interrupts = <23>;
 			interrupt-parent = < &qeic >;
-			mac-address = [ 00 11 22 33 44 55 ];
+			/*
+			 * mac-address is deprecated and will be removed
+			 * in 2.6.25.  Only recent versions of
+			 * U-Boot support local-mac-address, however.
+			 */
+			mac-address = [ 00 00 00 00 00 00 ];
+			local-mac-address = [ 00 00 00 00 00 00 ];
 			rx-clock = <17>;
 			tx-clock = <18>;
 			phy-handle = < &phy4 >;
diff --git a/arch/powerpc/boot/dts/mpc832x_rdb.dts b/arch/powerpc/boot/dts/mpc832x_rdb.dts
index be4c357..447c03f 100644
--- a/arch/powerpc/boot/dts/mpc832x_rdb.dts
+++ b/arch/powerpc/boot/dts/mpc832x_rdb.dts
@@ -231,7 +231,13 @@
 			reg = <3000 200>;
 			interrupts = <21>;
 			interrupt-parent = <&qeic>;
-			mac-address = [ 00 04 9f ef 03 02 ];
+			/*
+			 * mac-address is deprecated and will be removed
+			 * in 2.6.25.  Only recent versions of
+			 * U-Boot support local-mac-address, however.
+			 */
+			mac-address = [ 00 00 00 00 00 00 ];
+			local-mac-address = [ 00 00 00 00 00 00 ];
 			rx-clock = <20>;
 			tx-clock = <13>;
 			phy-handle = <&phy00>;
@@ -246,7 +252,13 @@
 			reg = <2200 200>;
 			interrupts = <22>;
 			interrupt-parent = <&qeic>;
-			mac-address = [ 00 04 9f ef 03 01 ];
+			/*
+			 * mac-address is deprecated and will be removed
+			 * in 2.6.25.  Only recent versions of
+			 * U-Boot support local-mac-address, however.
+			 */
+			mac-address = [ 00 00 00 00 00 00 ];
+			local-mac-address = [ 00 00 00 00 00 00 ];
 			rx-clock = <19>;
 			tx-clock = <1a>;
 			phy-handle = <&phy04>;
diff --git a/arch/powerpc/boot/dts/mpc8349emitx.dts b/arch/powerpc/boot/dts/mpc8349emitx.dts
index db0d003..ae9bca5 100644
--- a/arch/powerpc/boot/dts/mpc8349emitx.dts
+++ b/arch/powerpc/boot/dts/mpc8349emitx.dts
@@ -131,6 +131,11 @@
 			model = "TSEC";
 			compatible = "gianfar";
 			reg = <24000 1000>;
+			/*
+			 * address is deprecated and will be removed
+			 * in 2.6.25.  Only recent versions of
+			 * U-Boot support local-mac-address, however.
+			 */
 			address = [ 00 00 00 00 00 00 ];
 			local-mac-address = [ 00 00 00 00 00 00 ];
 			interrupts = <20 8 21 8 22 8>;
@@ -145,6 +150,11 @@
 			model = "TSEC";
 			compatible = "gianfar";
 			reg = <25000 1000>;
+			/*
+			 * address is deprecated and will be removed
+			 * in 2.6.25.  Only recent versions of
+			 * U-Boot support local-mac-address, however.
+			 */
 			address = [ 00 00 00 00 00 00 ];
 			local-mac-address = [ 00 00 00 00 00 00 ];
 			interrupts = <23 8 24 8 25 8>;
diff --git a/arch/powerpc/boot/dts/mpc834x_mds.dts b/arch/powerpc/boot/dts/mpc834x_mds.dts
index df773fa..310e877 100644
--- a/arch/powerpc/boot/dts/mpc834x_mds.dts
+++ b/arch/powerpc/boot/dts/mpc834x_mds.dts
@@ -136,6 +136,11 @@
 			model = "TSEC";
 			compatible = "gianfar";
 			reg = <24000 1000>;
+			/*
+			 * address is deprecated and will be removed
+			 * in 2.6.25.  Only recent versions of
+			 * U-Boot support local-mac-address, however.
+			 */
 			address = [ 00 00 00 00 00 00 ];
 			local-mac-address = [ 00 00 00 00 00 00 ];
 			interrupts = <20 8 21 8 22 8>;
@@ -150,6 +155,11 @@
 			model = "TSEC";
 			compatible = "gianfar";
 			reg = <25000 1000>;
+			/*
+			 * address is deprecated and will be removed
+			 * in 2.6.25.  Only recent versions of
+			 * U-Boot support local-mac-address, however.
+			 */
 			address = [ 00 00 00 00 00 00 ];
 			local-mac-address = [ 00 00 00 00 00 00 ];
 			interrupts = <23 8 24 8 25 8>;
diff --git a/arch/powerpc/boot/dts/mpc836x_mds.dts b/arch/powerpc/boot/dts/mpc836x_mds.dts
index 38c8594..1e914f31 100644
--- a/arch/powerpc/boot/dts/mpc836x_mds.dts
+++ b/arch/powerpc/boot/dts/mpc836x_mds.dts
@@ -301,7 +301,13 @@
 			reg = <2000 200>;
 			interrupts = <20>;
 			interrupt-parent = < &qeic >;
-			mac-address = [ 00 04 9f 00 23 23 ];
+			/*
+			 * mac-address is deprecated and will be removed
+			 * in 2.6.25.  Only recent versions of
+			 * U-Boot support local-mac-address, however.
+			 */
+			mac-address = [ 00 00 00 00 00 00 ];
+			local-mac-address = [ 00 00 00 00 00 00 ];
 			rx-clock = <0>;
 			tx-clock = <19>;
 			phy-handle = < &phy0 >;
@@ -317,7 +323,13 @@
 			reg = <3000 200>;
 			interrupts = <21>;
 			interrupt-parent = < &qeic >;
-			mac-address = [ 00 11 22 33 44 55 ];
+			/*
+			 * mac-address is deprecated and will be removed
+			 * in 2.6.25.  Only recent versions of
+			 * U-Boot support local-mac-address, however.
+			 */
+			mac-address = [ 00 00 00 00 00 00 ];
+			local-mac-address = [ 00 00 00 00 00 00 ];
 			rx-clock = <0>;
 			tx-clock = <14>;
 			phy-handle = < &phy1 >;
diff --git a/arch/powerpc/boot/dts/mpc8540ads.dts b/arch/powerpc/boot/dts/mpc8540ads.dts
index d91e81c..364a969 100644
--- a/arch/powerpc/boot/dts/mpc8540ads.dts
+++ b/arch/powerpc/boot/dts/mpc8540ads.dts
@@ -52,7 +52,7 @@
 			compatible = "fsl,8540-memory-controller";
 			reg = <2000 1000>;
 			interrupt-parent = <&mpic>;
-			interrupts = <2 2>;
+			interrupts = <12 2>;
 		};
 
 		l2-cache-controller@20000 {
@@ -61,14 +61,14 @@
 			cache-line-size = <20>;	// 32 bytes
 			cache-size = <40000>;	// L2, 256K
 			interrupt-parent = <&mpic>;
-			interrupts = <0 2>;
+			interrupts = <10 2>;
 		};
 
 		i2c@3000 {
 			device_type = "i2c";
 			compatible = "fsl-i2c";
 			reg = <3000 100>;
-			interrupts = <1b 2>;
+			interrupts = <2b 2>;
 			interrupt-parent = <&mpic>;
 			dfsrr;
 		};
@@ -81,19 +81,19 @@
 			reg = <24520 20>;
 			phy0: ethernet-phy@0 {
 				interrupt-parent = <&mpic>;
-				interrupts = <35 1>;
+				interrupts = <5 1>;
 				reg = <0>;
 				device_type = "ethernet-phy";
 			};
 			phy1: ethernet-phy@1 {
 				interrupt-parent = <&mpic>;
-				interrupts = <35 1>;
+				interrupts = <5 1>;
 				reg = <1>;
 				device_type = "ethernet-phy";
 			};
 			phy3: ethernet-phy@3 {
 				interrupt-parent = <&mpic>;
-				interrupts = <37 1>;
+				interrupts = <7 1>;
 				reg = <3>;
 				device_type = "ethernet-phy";
 			};
@@ -106,9 +106,14 @@
 			model = "TSEC";
 			compatible = "gianfar";
 			reg = <24000 1000>;
-			address = [ 00 E0 0C 00 73 00 ];
-			local-mac-address = [ 00 E0 0C 00 73 00 ];
-			interrupts = <d 2 e 2 12 2>;
+			/*
+			 * address is deprecated and will be removed
+			 * in 2.6.25.  Only recent versions of
+			 * U-Boot support local-mac-address, however.
+			 */
+			address = [ 00 00 00 00 00 00 ];
+			local-mac-address = [ 00 00 00 00 00 00 ];
+			interrupts = <1d 2 1e 2 22 2>;
 			interrupt-parent = <&mpic>;
 			phy-handle = <&phy0>;
 		};
@@ -120,9 +125,14 @@
 			model = "TSEC";
 			compatible = "gianfar";
 			reg = <25000 1000>;
-			address = [ 00 E0 0C 00 73 01 ];
-			local-mac-address = [ 00 E0 0C 00 73 01 ];
-			interrupts = <13 2 14 2 18 2>;
+			/*
+			 * address is deprecated and will be removed
+			 * in 2.6.25.  Only recent versions of
+			 * U-Boot support local-mac-address, however.
+			 */
+			address = [ 00 00 00 00 00 00 ];
+			local-mac-address = [ 00 00 00 00 00 00 ];
+			interrupts = <23 2 24 2 28 2>;
 			interrupt-parent = <&mpic>;
 			phy-handle = <&phy1>;
 		};
@@ -134,9 +144,14 @@
 			model = "FEC";
 			compatible = "gianfar";
 			reg = <26000 1000>;
-			address = [ 00 E0 0C 00 73 02 ];
-			local-mac-address = [ 00 E0 0C 00 73 02 ];
-			interrupts = <19 2>;
+			/*
+			 * address is deprecated and will be removed
+			 * in 2.6.25.  Only recent versions of
+			 * U-Boot support local-mac-address, however.
+			 */
+			address = [ 00 00 00 00 00 00 ];
+			local-mac-address = [ 00 00 00 00 00 00 ];
+			interrupts = <29 2>;
 			interrupt-parent = <&mpic>;
 			phy-handle = <&phy3>;
 		};
@@ -146,7 +161,7 @@
 			compatible = "ns16550";
 			reg = <4500 100>; 	// reg base, size
 			clock-frequency = <0>; 	// should we fill in in uboot?
-			interrupts = <1a 2>;
+			interrupts = <2a 2>;
 			interrupt-parent = <&mpic>;
 		};
 
@@ -155,7 +170,7 @@
 			compatible = "ns16550";
 			reg = <4600 100>;	// reg base, size
 			clock-frequency = <0>; 	// should we fill in in uboot?
-			interrupts = <1a 2>;
+			interrupts = <2a 2>;
 			interrupt-parent = <&mpic>;
 		};
 		pci@8000 {
@@ -163,78 +178,78 @@
 			interrupt-map = <
 
 				/* IDSEL 0x02 */
-				1000 0 0 1 &mpic 31 1
-				1000 0 0 2 &mpic 32 1
-				1000 0 0 3 &mpic 33 1
-				1000 0 0 4 &mpic 34 1
+				1000 0 0 1 &mpic 1 1
+				1000 0 0 2 &mpic 2 1
+				1000 0 0 3 &mpic 3 1
+				1000 0 0 4 &mpic 4 1
 
 				/* IDSEL 0x03 */
-				1800 0 0 1 &mpic 34 1
-				1800 0 0 2 &mpic 31 1
-				1800 0 0 3 &mpic 32 1
-				1800 0 0 4 &mpic 33 1
+				1800 0 0 1 &mpic 4 1
+				1800 0 0 2 &mpic 1 1
+				1800 0 0 3 &mpic 2 1
+				1800 0 0 4 &mpic 3 1
 
 				/* IDSEL 0x04 */
-				2000 0 0 1 &mpic 33 1
-				2000 0 0 2 &mpic 34 1
-				2000 0 0 3 &mpic 31 1
-				2000 0 0 4 &mpic 32 1
+				2000 0 0 1 &mpic 3 1
+				2000 0 0 2 &mpic 4 1
+				2000 0 0 3 &mpic 1 1
+				2000 0 0 4 &mpic 2 1
 
 				/* IDSEL 0x05 */
-				2800 0 0 1 &mpic 32 1
-				2800 0 0 2 &mpic 33 1
-				2800 0 0 3 &mpic 34 1
-				2800 0 0 4 &mpic 31 1
+				2800 0 0 1 &mpic 2 1
+				2800 0 0 2 &mpic 3 1
+				2800 0 0 3 &mpic 4 1
+				2800 0 0 4 &mpic 1 1
 
 				/* IDSEL 0x0c */
-				6000 0 0 1 &mpic 31 1
-				6000 0 0 2 &mpic 32 1
-				6000 0 0 3 &mpic 33 1
-				6000 0 0 4 &mpic 34 1
+				6000 0 0 1 &mpic 1 1
+				6000 0 0 2 &mpic 2 1
+				6000 0 0 3 &mpic 3 1
+				6000 0 0 4 &mpic 4 1
 
 				/* IDSEL 0x0d */
-				6800 0 0 1 &mpic 34 1
-				6800 0 0 2 &mpic 31 1
-				6800 0 0 3 &mpic 32 1
-				6800 0 0 4 &mpic 33 1
+				6800 0 0 1 &mpic 4 1
+				6800 0 0 2 &mpic 1 1
+				6800 0 0 3 &mpic 2 1
+				6800 0 0 4 &mpic 3 1
 
 				/* IDSEL 0x0e */
-				7000 0 0 1 &mpic 33 1
-				7000 0 0 2 &mpic 34 1
-				7000 0 0 3 &mpic 31 1
-				7000 0 0 4 &mpic 32 1
+				7000 0 0 1 &mpic 3 1
+				7000 0 0 2 &mpic 4 1
+				7000 0 0 3 &mpic 1 1
+				7000 0 0 4 &mpic 2 1
 
 				/* IDSEL 0x0f */
-				7800 0 0 1 &mpic 32 1
-				7800 0 0 2 &mpic 33 1
-				7800 0 0 3 &mpic 34 1
-				7800 0 0 4 &mpic 31 1
+				7800 0 0 1 &mpic 2 1
+				7800 0 0 2 &mpic 3 1
+				7800 0 0 3 &mpic 4 1
+				7800 0 0 4 &mpic 1 1
 
 				/* IDSEL 0x12 */
-				9000 0 0 1 &mpic 31 1
-				9000 0 0 2 &mpic 32 1
-				9000 0 0 3 &mpic 33 1
-				9000 0 0 4 &mpic 34 1
+				9000 0 0 1 &mpic 1 1
+				9000 0 0 2 &mpic 2 1
+				9000 0 0 3 &mpic 3 1
+				9000 0 0 4 &mpic 4 1
 
 				/* IDSEL 0x13 */
-				9800 0 0 1 &mpic 34 1
-				9800 0 0 2 &mpic 31 1
-				9800 0 0 3 &mpic 32 1
-				9800 0 0 4 &mpic 33 1
+				9800 0 0 1 &mpic 4 1
+				9800 0 0 2 &mpic 1 1
+				9800 0 0 3 &mpic 2 1
+				9800 0 0 4 &mpic 3 1
 
 				/* IDSEL 0x14 */
-				a000 0 0 1 &mpic 33 1
-				a000 0 0 2 &mpic 34 1
-				a000 0 0 3 &mpic 31 1
-				a000 0 0 4 &mpic 32 1
+				a000 0 0 1 &mpic 3 1
+				a000 0 0 2 &mpic 4 1
+				a000 0 0 3 &mpic 1 1
+				a000 0 0 4 &mpic 2 1
 
 				/* IDSEL 0x15 */
-				a800 0 0 1 &mpic 32 1
-				a800 0 0 2 &mpic 33 1
-				a800 0 0 3 &mpic 34 1
-				a800 0 0 4 &mpic 31 1>;
+				a800 0 0 1 &mpic 2 1
+				a800 0 0 2 &mpic 3 1
+				a800 0 0 3 &mpic 4 1
+				a800 0 0 4 &mpic 1 1>;
 			interrupt-parent = <&mpic>;
-			interrupts = <08 2>;
+			interrupts = <18 2>;
 			bus-range = <0 0>;
 			ranges = <02000000 0 80000000 80000000 0 20000000
 				  01000000 0 00000000 e2000000 0 00100000>;
diff --git a/arch/powerpc/boot/dts/mpc8541cds.dts b/arch/powerpc/boot/dts/mpc8541cds.dts
index 4f2c3af..070206f 100644
--- a/arch/powerpc/boot/dts/mpc8541cds.dts
+++ b/arch/powerpc/boot/dts/mpc8541cds.dts
@@ -52,7 +52,7 @@
 			compatible = "fsl,8541-memory-controller";
 			reg = <2000 1000>;
 			interrupt-parent = <&mpic>;
-			interrupts = <2 2>;
+			interrupts = <12 2>;
 		};
 
 		l2-cache-controller@20000 {
@@ -61,14 +61,14 @@
 			cache-line-size = <20>;	// 32 bytes
 			cache-size = <40000>;	// L2, 256K
 			interrupt-parent = <&mpic>;
-			interrupts = <0 2>;
+			interrupts = <10 2>;
 		};
 
 		i2c@3000 {
 			device_type = "i2c";
 			compatible = "fsl-i2c";
 			reg = <3000 100>;
-			interrupts = <1b 2>;
+			interrupts = <2b 2>;
 			interrupt-parent = <&mpic>;
 			dfsrr;
 		};
@@ -81,13 +81,13 @@
 			reg = <24520 20>;
 			phy0: ethernet-phy@0 {
 				interrupt-parent = <&mpic>;
-				interrupts = <35 0>;
+				interrupts = <5 1>;
 				reg = <0>;
 				device_type = "ethernet-phy";
 			};
 			phy1: ethernet-phy@1 {
 				interrupt-parent = <&mpic>;
-				interrupts = <35 0>;
+				interrupts = <5 1>;
 				reg = <1>;
 				device_type = "ethernet-phy";
 			};
@@ -100,8 +100,8 @@
 			model = "TSEC";
 			compatible = "gianfar";
 			reg = <24000 1000>;
-			local-mac-address = [ 00 E0 0C 00 73 00 ];
-			interrupts = <d 2 e 2 12 2>;
+			local-mac-address = [ 00 00 00 00 00 00 ];
+			interrupts = <1d 2 1e 2 22 2>;
 			interrupt-parent = <&mpic>;
 			phy-handle = <&phy0>;
 		};
@@ -113,8 +113,8 @@
 			model = "TSEC";
 			compatible = "gianfar";
 			reg = <25000 1000>;
-			local-mac-address = [ 00 E0 0C 00 73 01 ];
-			interrupts = <13 2 14 2 18 2>;
+			local-mac-address = [ 00 00 00 00 00 00 ];
+			interrupts = <23 2 24 2 28 2>;
 			interrupt-parent = <&mpic>;
 			phy-handle = <&phy1>;
 		};
@@ -124,7 +124,7 @@
 			compatible = "ns16550";
 			reg = <4500 100>; 	// reg base, size
 			clock-frequency = <0>; 	// should we fill in in uboot?
-			interrupts = <1a 2>;
+			interrupts = <2a 2>;
 			interrupt-parent = <&mpic>;
 		};
 
@@ -133,7 +133,7 @@
 			compatible = "ns16550";
 			reg = <4600 100>;	// reg base, size
 			clock-frequency = <0>; 	// should we fill in in uboot?
-			interrupts = <1a 2>;
+			interrupts = <2a 2>;
 			interrupt-parent = <&mpic>;
 		};
 
@@ -142,49 +142,49 @@
 			interrupt-map = <
 
 				/* IDSEL 0x10 */
-				08000 0 0 1 &mpic 30 1
-				08000 0 0 2 &mpic 31 1
-				08000 0 0 3 &mpic 32 1
-				08000 0 0 4 &mpic 33 1
+				08000 0 0 1 &mpic 0 1
+				08000 0 0 2 &mpic 1 1
+				08000 0 0 3 &mpic 2 1
+				08000 0 0 4 &mpic 3 1
 
 				/* IDSEL 0x11 */
-				08800 0 0 1 &mpic 30 1
-				08800 0 0 2 &mpic 31 1
-				08800 0 0 3 &mpic 32 1
-				08800 0 0 4 &mpic 33 1
+				08800 0 0 1 &mpic 0 1
+				08800 0 0 2 &mpic 1 1
+				08800 0 0 3 &mpic 2 1
+				08800 0 0 4 &mpic 3 1
 
 				/* IDSEL 0x12 (Slot 1) */
-				09000 0 0 1 &mpic 30 1
-				09000 0 0 2 &mpic 31 1
-				09000 0 0 3 &mpic 32 1
-				09000 0 0 4 &mpic 33 1
+				09000 0 0 1 &mpic 0 1
+				09000 0 0 2 &mpic 1 1
+				09000 0 0 3 &mpic 2 1
+				09000 0 0 4 &mpic 3 1
 
 				/* IDSEL 0x13 (Slot 2) */
-				09800 0 0 1 &mpic 31 1
-				09800 0 0 2 &mpic 32 1
-				09800 0 0 3 &mpic 33 1
-				09800 0 0 4 &mpic 30 1
+				09800 0 0 1 &mpic 1 1
+				09800 0 0 2 &mpic 2 1
+				09800 0 0 3 &mpic 3 1
+				09800 0 0 4 &mpic 0 1
 
 				/* IDSEL 0x14 (Slot 3) */
-				0a000 0 0 1 &mpic 32 1
-				0a000 0 0 2 &mpic 33 1
-				0a000 0 0 3 &mpic 30 1
-				0a000 0 0 4 &mpic 31 1
+				0a000 0 0 1 &mpic 2 1
+				0a000 0 0 2 &mpic 3 1
+				0a000 0 0 3 &mpic 0 1
+				0a000 0 0 4 &mpic 1 1
 
 				/* IDSEL 0x15 (Slot 4) */
-				0a800 0 0 1 &mpic 33 1
-				0a800 0 0 2 &mpic 30 1
-				0a800 0 0 3 &mpic 31 1
-				0a800 0 0 4 &mpic 32 1
+				0a800 0 0 1 &mpic 3 1
+				0a800 0 0 2 &mpic 0 1
+				0a800 0 0 3 &mpic 1 1
+				0a800 0 0 4 &mpic 2 1
 
 				/* Bus 1 (Tundra Bridge) */
 				/* IDSEL 0x12 (ISA bridge) */
-				19000 0 0 1 &mpic 30 1
-				19000 0 0 2 &mpic 31 1
-				19000 0 0 3 &mpic 32 1
-				19000 0 0 4 &mpic 33 1>;
+				19000 0 0 1 &mpic 0 1
+				19000 0 0 2 &mpic 1 1
+				19000 0 0 3 &mpic 2 1
+				19000 0 0 4 &mpic 3 1>;
 			interrupt-parent = <&mpic>;
-			interrupts = <08 2>;
+			interrupts = <18 2>;
 			bus-range = <0 0>;
 			ranges = <02000000 0 80000000 80000000 0 20000000
 				  01000000 0 00000000 e2000000 0 00100000>;
@@ -216,12 +216,12 @@
 			interrupt-map = <
 
 				/* IDSEL 0x15 */
-				a800 0 0 1 &mpic 3b 1
-				a800 0 0 2 &mpic 3b 1
-				a800 0 0 3 &mpic 3b 1
-				a800 0 0 4 &mpic 3b 1>;
+				a800 0 0 1 &mpic b 1
+				a800 0 0 2 &mpic b 1
+				a800 0 0 3 &mpic b 1
+				a800 0 0 4 &mpic b 1>;
 			interrupt-parent = <&mpic>;
-			interrupts = <09 2>;
+			interrupts = <19 2>;
 			bus-range = <0 0>;
 			ranges = <02000000 0 a0000000 a0000000 0 20000000
 				  01000000 0 00000000 e3000000 0 00100000>;
diff --git a/arch/powerpc/boot/dts/mpc8544ds.dts b/arch/powerpc/boot/dts/mpc8544ds.dts
index 3033599..8285925 100644
--- a/arch/powerpc/boot/dts/mpc8544ds.dts
+++ b/arch/powerpc/boot/dts/mpc8544ds.dts
@@ -52,7 +52,7 @@
 			compatible = "fsl,8544-memory-controller";
 			reg = <2000 1000>;
 			interrupt-parent = <&mpic>;
-			interrupts = <2 2>;
+			interrupts = <12 2>;
 		};
 
 		l2-cache-controller@20000 {
@@ -61,14 +61,14 @@
 			cache-line-size = <20>;	// 32 bytes
 			cache-size = <40000>;	// L2, 256K
 			interrupt-parent = <&mpic>;
-			interrupts = <0 2>;
+			interrupts = <10 2>;
 		};
 
 		i2c@3000 {
 			device_type = "i2c";
 			compatible = "fsl-i2c";
 			reg = <3000 100>;
-			interrupts = <1b 2>;
+			interrupts = <2b 2>;
 			interrupt-parent = <&mpic>;
 			dfsrr;
 		};
@@ -81,13 +81,13 @@
 			reg = <24520 20>;
 			phy0: ethernet-phy@0 {
 				interrupt-parent = <&mpic>;
-				interrupts = <3a 1>;
+				interrupts = <a 1>;
 				reg = <0>;
 				device_type = "ethernet-phy";
 			};
 			phy1: ethernet-phy@1 {
 				interrupt-parent = <&mpic>;
-				interrupts = <3a 1>;
+				interrupts = <a 1>;
 				reg = <1>;
 				device_type = "ethernet-phy";
 			};
@@ -101,7 +101,7 @@
 			compatible = "gianfar";
 			reg = <24000 1000>;
 			local-mac-address = [ 00 00 00 00 00 00 ];
-			interrupts = <d 2 e 2 12 2>;
+			interrupts = <1d 2 1e 2 22 2>;
 			interrupt-parent = <&mpic>;
 			phy-handle = <&phy0>;
 		};
@@ -114,7 +114,7 @@
 			compatible = "gianfar";
 			reg = <26000 1000>;
 			local-mac-address = [ 00 00 00 00 00 00 ];
-			interrupts = <f 2 10 2 11 2>;
+			interrupts = <1f 2 20 2 21 2>;
 			interrupt-parent = <&mpic>;
 			phy-handle = <&phy1>;
 		};
@@ -124,7 +124,7 @@
 			compatible = "ns16550";
 			reg = <4500 100>;
 			clock-frequency = <0>;
-			interrupts = <1a 2>;
+			interrupts = <2a 2>;
 			interrupt-parent = <&mpic>;
 		};
 
@@ -133,7 +133,7 @@
 			compatible = "ns16550";
 			reg = <4600 100>;
 			clock-frequency = <0>;
-			interrupts = <1a 2>;
+			interrupts = <2a 2>;
 			interrupt-parent = <&mpic>;
 		};
 
diff --git a/arch/powerpc/boot/dts/mpc8548cds.dts b/arch/powerpc/boot/dts/mpc8548cds.dts
index ad96381..9d0b84b 100644
--- a/arch/powerpc/boot/dts/mpc8548cds.dts
+++ b/arch/powerpc/boot/dts/mpc8548cds.dts
@@ -52,7 +52,7 @@
 			compatible = "fsl,8548-memory-controller";
 			reg = <2000 1000>;
 			interrupt-parent = <&mpic>;
-			interrupts = <2 2>;
+			interrupts = <12 2>;
 		};
 
 		l2-cache-controller@20000 {
@@ -61,14 +61,14 @@
 			cache-line-size = <20>;	// 32 bytes
 			cache-size = <80000>;	// L2, 512K
 			interrupt-parent = <&mpic>;
-			interrupts = <0 2>;
+			interrupts = <10 2>;
 		};
 
 		i2c@3000 {
 			device_type = "i2c";
 			compatible = "fsl-i2c";
 			reg = <3000 100>;
-			interrupts = <1b 2>;
+			interrupts = <2b 2>;
 			interrupt-parent = <&mpic>;
 			dfsrr;
 		};
@@ -81,25 +81,25 @@
 			reg = <24520 20>;
 			phy0: ethernet-phy@0 {
 				interrupt-parent = <&mpic>;
-				interrupts = <35 0>;
+				interrupts = <5 1>;
 				reg = <0>;
 				device_type = "ethernet-phy";
 			};
 			phy1: ethernet-phy@1 {
 				interrupt-parent = <&mpic>;
-				interrupts = <35 0>;
+				interrupts = <5 1>;
 				reg = <1>;
 				device_type = "ethernet-phy";
 			};
 			phy2: ethernet-phy@2 {
 				interrupt-parent = <&mpic>;
-				interrupts = <35 0>;
+				interrupts = <5 1>;
 				reg = <2>;
 				device_type = "ethernet-phy";
 			};
 			phy3: ethernet-phy@3 {
 				interrupt-parent = <&mpic>;
-				interrupts = <35 0>;
+				interrupts = <5 1>;
 				reg = <3>;
 				device_type = "ethernet-phy";
 			};
@@ -112,8 +112,8 @@
 			model = "eTSEC";
 			compatible = "gianfar";
 			reg = <24000 1000>;
-			local-mac-address = [ 00 E0 0C 00 73 00 ];
-			interrupts = <d 2 e 2 12 2>;
+			local-mac-address = [ 00 00 00 00 00 00 ];
+			interrupts = <1d 2 1e 2 22 2>;
 			interrupt-parent = <&mpic>;
 			phy-handle = <&phy0>;
 		};
@@ -125,8 +125,8 @@
 			model = "eTSEC";
 			compatible = "gianfar";
 			reg = <25000 1000>;
-			local-mac-address = [ 00 E0 0C 00 73 01 ];
-			interrupts = <13 2 14 2 18 2>;
+			local-mac-address = [ 00 00 00 00 00 00 ];
+			interrupts = <23 2 24 2 28 2>;
 			interrupt-parent = <&mpic>;
 			phy-handle = <&phy1>;
 		};
@@ -139,8 +139,8 @@
 			model = "eTSEC";
 			compatible = "gianfar";
 			reg = <26000 1000>;
-			local-mac-address = [ 00 E0 0C 00 73 02 ];
-			interrupts = <f 2 10 2 11 2>;
+			local-mac-address = [ 00 00 00 00 00 00 ];
+			interrupts = <1f 2 20 2 21 2>;
 			interrupt-parent = <&mpic>;
 			phy-handle = <&phy2>;
 		};
@@ -152,8 +152,8 @@
 			model = "eTSEC";
 			compatible = "gianfar";
 			reg = <27000 1000>;
-			local-mac-address = [ 00 E0 0C 00 73 03 ];
-			interrupts = <15 2 16 2 17 2>;
+			local-mac-address = [ 00 00 00 00 00 00 ];
+			interrupts = <25 2 26 2 27 2>;
 			interrupt-parent = <&mpic>;
 			phy-handle = <&phy3>;
 		};
@@ -164,7 +164,7 @@
 			compatible = "ns16550";
 			reg = <4500 100>; 	// reg base, size
 			clock-frequency = <0>; 	// should we fill in in uboot?
-			interrupts = <1a 2>;
+			interrupts = <2a 2>;
 			interrupt-parent = <&mpic>;
 		};
 
@@ -173,58 +173,64 @@
 			compatible = "ns16550";
 			reg = <4600 100>;	// reg base, size
 			clock-frequency = <0>; 	// should we fill in in uboot?
-			interrupts = <1a 2>;
+			interrupts = <2a 2>;
 			interrupt-parent = <&mpic>;
 		};
 
+		global-utilities@e0000 {	//global utilities reg
+			compatible = "fsl,mpc8548-guts";
+			reg = <e0000 1000>;
+			fsl,has-rstcr;
+		};
+
 		pci1: pci@8000 {
 			interrupt-map-mask = <1f800 0 0 7>;
 			interrupt-map = <
 
 				/* IDSEL 0x10 */
-				08000 0 0 1 &mpic 30 1
-				08000 0 0 2 &mpic 31 1
-				08000 0 0 3 &mpic 32 1
-				08000 0 0 4 &mpic 33 1
+				08000 0 0 1 &mpic 0 1
+				08000 0 0 2 &mpic 1 1
+				08000 0 0 3 &mpic 2 1
+				08000 0 0 4 &mpic 3 1
 
 				/* IDSEL 0x11 */
-				08800 0 0 1 &mpic 30 1
-				08800 0 0 2 &mpic 31 1
-				08800 0 0 3 &mpic 32 1
-				08800 0 0 4 &mpic 33 1
+				08800 0 0 1 &mpic 0 1
+				08800 0 0 2 &mpic 1 1
+				08800 0 0 3 &mpic 2 1
+				08800 0 0 4 &mpic 3 1
 
 				/* IDSEL 0x12 (Slot 1) */
-				09000 0 0 1 &mpic 30 1
-				09000 0 0 2 &mpic 31 1
-				09000 0 0 3 &mpic 32 1
-				09000 0 0 4 &mpic 33 1
+				09000 0 0 1 &mpic 0 1
+				09000 0 0 2 &mpic 1 1
+				09000 0 0 3 &mpic 2 1
+				09000 0 0 4 &mpic 3 1
 
 				/* IDSEL 0x13 (Slot 2) */
-				09800 0 0 1 &mpic 31 1
-				09800 0 0 2 &mpic 32 1
-				09800 0 0 3 &mpic 33 1
-				09800 0 0 4 &mpic 30 1
+				09800 0 0 1 &mpic 1 1
+				09800 0 0 2 &mpic 2 1
+				09800 0 0 3 &mpic 3 1
+				09800 0 0 4 &mpic 0 1
 
 				/* IDSEL 0x14 (Slot 3) */
-				0a000 0 0 1 &mpic 32 1
-				0a000 0 0 2 &mpic 33 1
-				0a000 0 0 3 &mpic 30 1
-				0a000 0 0 4 &mpic 31 1
+				0a000 0 0 1 &mpic 2 1
+				0a000 0 0 2 &mpic 3 1
+				0a000 0 0 3 &mpic 0 1
+				0a000 0 0 4 &mpic 1 1
 
 				/* IDSEL 0x15 (Slot 4) */
-				0a800 0 0 1 &mpic 33 1
-				0a800 0 0 2 &mpic 30 1
-				0a800 0 0 3 &mpic 31 1
-				0a800 0 0 4 &mpic 32 1
+				0a800 0 0 1 &mpic 3 1
+				0a800 0 0 2 &mpic 0 1
+				0a800 0 0 3 &mpic 1 1
+				0a800 0 0 4 &mpic 2 1
 
 				/* Bus 1 (Tundra Bridge) */
 				/* IDSEL 0x12 (ISA bridge) */
-				19000 0 0 1 &mpic 30 1
-				19000 0 0 2 &mpic 31 1
-				19000 0 0 3 &mpic 32 1
-				19000 0 0 4 &mpic 33 1>;
+				19000 0 0 1 &mpic 0 1
+				19000 0 0 2 &mpic 1 1
+				19000 0 0 3 &mpic 2 1
+				19000 0 0 4 &mpic 3 1>;
 			interrupt-parent = <&mpic>;
-			interrupts = <08 2>;
+			interrupts = <18 2>;
 			bus-range = <0 0>;
 			ranges = <02000000 0 80000000 80000000 0 20000000
 				  01000000 0 00000000 e2000000 0 00100000>;
@@ -256,12 +262,12 @@
 			interrupt-map = <
 
 				/* IDSEL 0x15 */
-				a800 0 0 1 &mpic 3b 1
-				a800 0 0 2 &mpic 3b 1
-				a800 0 0 3 &mpic 3b 1
-				a800 0 0 4 &mpic 3b 1>;
+				a800 0 0 1 &mpic b 1
+				a800 0 0 2 &mpic b 1
+				a800 0 0 3 &mpic b 1
+				a800 0 0 4 &mpic b 1>;
 			interrupt-parent = <&mpic>;
-			interrupts = <09 2>;
+			interrupts = <19 2>;
 			bus-range = <0 0>;
 			ranges = <02000000 0 a0000000 a0000000 0 20000000
 				  01000000 0 00000000 e3000000 0 00100000>;
diff --git a/arch/powerpc/boot/dts/mpc8555cds.dts b/arch/powerpc/boot/dts/mpc8555cds.dts
index 951ed92..17e45d9 100644
--- a/arch/powerpc/boot/dts/mpc8555cds.dts
+++ b/arch/powerpc/boot/dts/mpc8555cds.dts
@@ -52,7 +52,7 @@
 			compatible = "fsl,8555-memory-controller";
 			reg = <2000 1000>;
 			interrupt-parent = <&mpic>;
-			interrupts = <2 2>;
+			interrupts = <12 2>;
 		};
 
 		l2-cache-controller@20000 {
@@ -61,14 +61,14 @@
 			cache-line-size = <20>;	// 32 bytes
 			cache-size = <40000>;	// L2, 256K
 			interrupt-parent = <&mpic>;
-			interrupts = <0 2>;
+			interrupts = <10 2>;
 		};
 
 		i2c@3000 {
 			device_type = "i2c";
 			compatible = "fsl-i2c";
 			reg = <3000 100>;
-			interrupts = <1b 2>;
+			interrupts = <2b 2>;
 			interrupt-parent = <&mpic>;
 			dfsrr;
 		};
@@ -81,13 +81,13 @@
 			reg = <24520 20>;
 			phy0: ethernet-phy@0 {
 				interrupt-parent = <&mpic>;
-				interrupts = <35 0>;
+				interrupts = <5 1>;
 				reg = <0>;
 				device_type = "ethernet-phy";
 			};
 			phy1: ethernet-phy@1 {
 				interrupt-parent = <&mpic>;
-				interrupts = <35 0>;
+				interrupts = <5 1>;
 				reg = <1>;
 				device_type = "ethernet-phy";
 			};
@@ -100,8 +100,8 @@
 			model = "TSEC";
 			compatible = "gianfar";
 			reg = <24000 1000>;
-			local-mac-address = [ 00 E0 0C 00 73 00 ];
-			interrupts = <0d 2 0e 2 12 2>;
+			local-mac-address = [ 00 00 00 00 00 00 ];
+			interrupts = <1d 2 1e 2 22 2>;
 			interrupt-parent = <&mpic>;
 			phy-handle = <&phy0>;
 		};
@@ -113,8 +113,8 @@
 			model = "TSEC";
 			compatible = "gianfar";
 			reg = <25000 1000>;
-			local-mac-address = [ 00 E0 0C 00 73 01 ];
-			interrupts = <13 2 14 2 18 2>;
+			local-mac-address = [ 00 00 00 00 00 00 ];
+			interrupts = <23 2 24 2 28 2>;
 			interrupt-parent = <&mpic>;
 			phy-handle = <&phy1>;
 		};
@@ -124,7 +124,7 @@
 			compatible = "ns16550";
 			reg = <4500 100>; 	// reg base, size
 			clock-frequency = <0>; 	// should we fill in in uboot?
-			interrupts = <1a 2>;
+			interrupts = <2a 2>;
 			interrupt-parent = <&mpic>;
 		};
 
@@ -133,7 +133,7 @@
 			compatible = "ns16550";
 			reg = <4600 100>;	// reg base, size
 			clock-frequency = <0>; 	// should we fill in in uboot?
-			interrupts = <1a 2>;
+			interrupts = <2a 2>;
 			interrupt-parent = <&mpic>;
 		};
 
@@ -142,49 +142,49 @@
 			interrupt-map = <
 
 				/* IDSEL 0x10 */
-				08000 0 0 1 &mpic 30 1
-				08000 0 0 2 &mpic 31 1
-				08000 0 0 3 &mpic 32 1
-				08000 0 0 4 &mpic 33 1
+				08000 0 0 1 &mpic 0 1
+				08000 0 0 2 &mpic 1 1
+				08000 0 0 3 &mpic 2 1
+				08000 0 0 4 &mpic 3 1
 
 				/* IDSEL 0x11 */
-				08800 0 0 1 &mpic 30 1
-				08800 0 0 2 &mpic 31 1
-				08800 0 0 3 &mpic 32 1
-				08800 0 0 4 &mpic 33 1
+				08800 0 0 1 &mpic 0 1
+				08800 0 0 2 &mpic 1 1
+				08800 0 0 3 &mpic 2 1
+				08800 0 0 4 &mpic 3 1
 
 				/* IDSEL 0x12 (Slot 1) */
-				09000 0 0 1 &mpic 30 1
-				09000 0 0 2 &mpic 31 1
-				09000 0 0 3 &mpic 32 1
-				09000 0 0 4 &mpic 33 1
+				09000 0 0 1 &mpic 0 1
+				09000 0 0 2 &mpic 1 1
+				09000 0 0 3 &mpic 2 1
+				09000 0 0 4 &mpic 3 1
 
 				/* IDSEL 0x13 (Slot 2) */
-				09800 0 0 1 &mpic 31 1
-				09800 0 0 2 &mpic 32 1
-				09800 0 0 3 &mpic 33 1
-				09800 0 0 4 &mpic 30 1
+				09800 0 0 1 &mpic 1 1
+				09800 0 0 2 &mpic 2 1
+				09800 0 0 3 &mpic 3 1
+				09800 0 0 4 &mpic 0 1
 
 				/* IDSEL 0x14 (Slot 3) */
-				0a000 0 0 1 &mpic 32 1
-				0a000 0 0 2 &mpic 33 1
-				0a000 0 0 3 &mpic 30 1
-				0a000 0 0 4 &mpic 31 1
+				0a000 0 0 1 &mpic 2 1
+				0a000 0 0 2 &mpic 3 1
+				0a000 0 0 3 &mpic 0 1
+				0a000 0 0 4 &mpic 1 1
 
 				/* IDSEL 0x15 (Slot 4) */
-				0a800 0 0 1 &mpic 33 1
-				0a800 0 0 2 &mpic 30 1
-				0a800 0 0 3 &mpic 31 1
-				0a800 0 0 4 &mpic 32 1
+				0a800 0 0 1 &mpic 3 1
+				0a800 0 0 2 &mpic 0 1
+				0a800 0 0 3 &mpic 1 1
+				0a800 0 0 4 &mpic 2 1
 
 				/* Bus 1 (Tundra Bridge) */
 				/* IDSEL 0x12 (ISA bridge) */
-				19000 0 0 1 &mpic 30 1
-				19000 0 0 2 &mpic 31 1
-				19000 0 0 3 &mpic 32 1
-				19000 0 0 4 &mpic 33 1>;
+				19000 0 0 1 &mpic 0 1
+				19000 0 0 2 &mpic 1 1
+				19000 0 0 3 &mpic 2 1
+				19000 0 0 4 &mpic 3 1>;
 			interrupt-parent = <&mpic>;
-			interrupts = <08 2>;
+			interrupts = <18 2>;
 			bus-range = <0 0>;
 			ranges = <02000000 0 80000000 80000000 0 20000000
 				  01000000 0 00000000 e2000000 0 00100000>;
@@ -216,12 +216,12 @@
 			interrupt-map = <
 
 				/* IDSEL 0x15 */
-				a800 0 0 1 &mpic 3b 1
-				a800 0 0 2 &mpic 3b 1
-				a800 0 0 3 &mpic 3b 1
-				a800 0 0 4 &mpic 3b 1>;
+				a800 0 0 1 &mpic b 1
+				a800 0 0 2 &mpic b 1
+				a800 0 0 3 &mpic b 1
+				a800 0 0 4 &mpic b 1>;
 			interrupt-parent = <&mpic>;
-			interrupts = <09 2>;
+			interrupts = <19 2>;
 			bus-range = <0 0>;
 			ranges = <02000000 0 a0000000 a0000000 0 20000000
 				  01000000 0 00000000 e3000000 0 00100000>;
diff --git a/arch/powerpc/boot/dts/mpc8560ads.dts b/arch/powerpc/boot/dts/mpc8560ads.dts
index 8068215..21ccaaa 100644
--- a/arch/powerpc/boot/dts/mpc8560ads.dts
+++ b/arch/powerpc/boot/dts/mpc8560ads.dts
@@ -52,7 +52,7 @@
 			compatible = "fsl,8540-memory-controller";
 			reg = <2000 1000>;
 			interrupt-parent = <&mpic>;
-			interrupts = <2 2>;
+			interrupts = <12 2>;
 		};
 
 		l2-cache-controller@20000 {
@@ -61,7 +61,7 @@
 			cache-line-size = <20>;	// 32 bytes
 			cache-size = <40000>;	// L2, 256K
 			interrupt-parent = <&mpic>;
-			interrupts = <0 2>;
+			interrupts = <10 2>;
 		};
 
 		mdio@24520 {
@@ -72,25 +72,25 @@
 			#size-cells = <0>;
 			phy0: ethernet-phy@0 {
 				interrupt-parent = <&mpic>;
-				interrupts = <35 1>;
+				interrupts = <5 1>;
 				reg = <0>;
 				device_type = "ethernet-phy";
 			};
 			phy1: ethernet-phy@1 {
 				interrupt-parent = <&mpic>;
-				interrupts = <35 1>;
+				interrupts = <5 1>;
 				reg = <1>;
 				device_type = "ethernet-phy";
 			};
 			phy2: ethernet-phy@2 {
 				interrupt-parent = <&mpic>;
-				interrupts = <37 1>;
+				interrupts = <7 1>;
 				reg = <2>;
 				device_type = "ethernet-phy";
 			};
 			phy3: ethernet-phy@3 {
 				interrupt-parent = <&mpic>;
-				interrupts = <37 1>;
+				interrupts = <7 1>;
 				reg = <3>;
 				device_type = "ethernet-phy";
 			};
@@ -101,8 +101,14 @@
 			model = "TSEC";
 			compatible = "gianfar";
 			reg = <24000 1000>;
-			address = [ 00 00 0C 00 00 FD ];
-			interrupts = <d 2 e 2 12 2>;
+			/*
+			 * address is deprecated and will be removed
+			 * in 2.6.25.  Only recent versions of
+			 * U-Boot support local-mac-address, however.
+			 */
+			address = [ 00 00 00 00 00 00 ];
+			local-mac-address = [ 00 00 00 00 00 00 ];
+			interrupts = <1d 2 1e 2 22 2>;
 			interrupt-parent = <&mpic>;
 			phy-handle = <&phy0>;
 		};
@@ -114,8 +120,14 @@
 			model = "TSEC";
 			compatible = "gianfar";
 			reg = <25000 1000>;
-			address = [ 00 00 0C 00 01 FD ];
-			interrupts = <13 2 14 2 18 2>;
+			/*
+			 * address is deprecated and will be removed
+			 * in 2.6.25.  Only recent versions of
+			 * U-Boot support local-mac-address, however.
+			 */
+			address = [ 00 00 00 00 00 00 ];
+			local-mac-address = [ 00 00 00 00 00 00 ];
+			interrupts = <23 2 24 2 28 2>;
 			interrupt-parent = <&mpic>;
 			phy-handle = <&phy1>;
 		};
@@ -132,79 +144,79 @@
 			interrupt-map = <
 
 					/* IDSEL 0x2 */
-					 1000 0 0 1 &mpic 31 1
-					 1000 0 0 2 &mpic 32 1
-					 1000 0 0 3 &mpic 33 1
-					 1000 0 0 4 &mpic 34 1
+					 1000 0 0 1 &mpic 1 1
+					 1000 0 0 2 &mpic 2 1
+					 1000 0 0 3 &mpic 3 1
+					 1000 0 0 4 &mpic 4 1
 
 					/* IDSEL 0x3 */
-					 1800 0 0 1 &mpic 34 1
-					 1800 0 0 2 &mpic 31 1
-					 1800 0 0 3 &mpic 32 1
-					 1800 0 0 4 &mpic 33 1
+					 1800 0 0 1 &mpic 4 1
+					 1800 0 0 2 &mpic 1 1
+					 1800 0 0 3 &mpic 2 1
+					 1800 0 0 4 &mpic 3 1
 
 					/* IDSEL 0x4 */
-					 2000 0 0 1 &mpic 33 1
-					 2000 0 0 2 &mpic 34 1
-					 2000 0 0 3 &mpic 31 1
-					 2000 0 0 4 &mpic 32 1
+					 2000 0 0 1 &mpic 3 1
+					 2000 0 0 2 &mpic 4 1
+					 2000 0 0 3 &mpic 1 1
+					 2000 0 0 4 &mpic 2 1
 
 					/* IDSEL 0x5  */
-					 2800 0 0 1 &mpic 32 1
-					 2800 0 0 2 &mpic 33 1
-					 2800 0 0 3 &mpic 34 1
-					 2800 0 0 4 &mpic 31 1
+					 2800 0 0 1 &mpic 2 1
+					 2800 0 0 2 &mpic 3 1
+					 2800 0 0 3 &mpic 4 1
+					 2800 0 0 4 &mpic 1 1
 
 					/* IDSEL 12 */
-					 6000 0 0 1 &mpic 31 1
-					 6000 0 0 2 &mpic 32 1
-					 6000 0 0 3 &mpic 33 1
-					 6000 0 0 4 &mpic 34 1
+					 6000 0 0 1 &mpic 1 1
+					 6000 0 0 2 &mpic 2 1
+					 6000 0 0 3 &mpic 3 1
+					 6000 0 0 4 &mpic 4 1
 
 					/* IDSEL 13 */
-					 6800 0 0 1 &mpic 34 1
-					 6800 0 0 2 &mpic 31 1
-					 6800 0 0 3 &mpic 32 1
-					 6800 0 0 4 &mpic 33 1
+					 6800 0 0 1 &mpic 4 1
+					 6800 0 0 2 &mpic 1 1
+					 6800 0 0 3 &mpic 2 1
+					 6800 0 0 4 &mpic 3 1
 
 					/* IDSEL 14*/
-					 7000 0 0 1 &mpic 33 1
-					 7000 0 0 2 &mpic 34 1
-					 7000 0 0 3 &mpic 31 1
-					 7000 0 0 4 &mpic 32 1
+					 7000 0 0 1 &mpic 3 1
+					 7000 0 0 2 &mpic 4 1
+					 7000 0 0 3 &mpic 1 1
+					 7000 0 0 4 &mpic 2 1
 
 					/* IDSEL 15 */
-					 7800 0 0 1 &mpic 32 1
-					 7800 0 0 2 &mpic 33 1
-					 7800 0 0 3 &mpic 34 1
-					 7800 0 0 4 &mpic 31 1
+					 7800 0 0 1 &mpic 2 1
+					 7800 0 0 2 &mpic 3 1
+					 7800 0 0 3 &mpic 4 1
+					 7800 0 0 4 &mpic 1 1
 
 					/* IDSEL 18 */
-					 9000 0 0 1 &mpic 31 1
-					 9000 0 0 2 &mpic 32 1
-					 9000 0 0 3 &mpic 33 1
-					 9000 0 0 4 &mpic 34 1
+					 9000 0 0 1 &mpic 1 1
+					 9000 0 0 2 &mpic 2 1
+					 9000 0 0 3 &mpic 3 1
+					 9000 0 0 4 &mpic 4 1
 
 					/* IDSEL 19 */
-					 9800 0 0 1 &mpic 34 1
-					 9800 0 0 2 &mpic 31 1
-					 9800 0 0 3 &mpic 32 1
-					 9800 0 0 4 &mpic 33 1
+					 9800 0 0 1 &mpic 4 1
+					 9800 0 0 2 &mpic 1 1
+					 9800 0 0 3 &mpic 2 1
+					 9800 0 0 4 &mpic 3 1
 
 					/* IDSEL 20 */
-					 a000 0 0 1 &mpic 33 1
-					 a000 0 0 2 &mpic 34 1
-					 a000 0 0 3 &mpic 31 1
-					 a000 0 0 4 &mpic 32 1
+					 a000 0 0 1 &mpic 3 1
+					 a000 0 0 2 &mpic 4 1
+					 a000 0 0 3 &mpic 1 1
+					 a000 0 0 4 &mpic 2 1
 
 					/* IDSEL 21 */
-					 a800 0 0 1 &mpic 32 1
-					 a800 0 0 2 &mpic 33 1
-					 a800 0 0 3 &mpic 34 1
-					 a800 0 0 4 &mpic 31 1>;
+					 a800 0 0 1 &mpic 2 1
+					 a800 0 0 2 &mpic 3 1
+					 a800 0 0 3 &mpic 4 1
+					 a800 0 0 4 &mpic 1 1>;
 
 			interrupt-parent = <&mpic>;
-			interrupts = <8 0>;
+			interrupts = <18 2>;
 			bus-range = <0 0>;
 			ranges = <02000000 0 80000000 80000000 0 20000000
 				  01000000 0 00000000 e2000000 0 01000000>;
@@ -234,7 +246,7 @@
 				interrupt-controller;
 				#address-cells = <0>;
 				#interrupt-cells = <2>;
-				interrupts = <1e 0>;
+				interrupts = <2e 2>;
 				interrupt-parent = <&mpic>;
 				reg = <90c00 80>;
 				built-in;
@@ -275,7 +287,13 @@
 				model = "FCC";
 				device-id = <2>;
 				reg = <91320 20 88500 100 913a0 30>;
-				mac-address = [ 00 00 0C 00 02 FD ];
+				/*
+				 * mac-address is deprecated and will be removed
+				 * in 2.6.25.  Only recent versions of
+				 * U-Boot support local-mac-address, however.
+				 */
+				mac-address = [ 00 00 00 00 00 00 ];
+				local-mac-address = [ 00 00 00 00 00 00 ];
 				clock-setup = <ff00ffff 250000>;
 				rx-clock = <15>;
 				tx-clock = <16>;
@@ -290,7 +308,13 @@
 				model = "FCC";
 				device-id = <3>;
 				reg = <91340 20 88600 100 913d0 30>;
-				mac-address = [ 00 00 0C 00 03 FD ];
+				/*
+				 * mac-address is deprecated and will be removed
+				 * in 2.6.25.  Only recent versions of
+				 * U-Boot support local-mac-address, however.
+				 */
+				mac-address = [ 00 00 00 00 00 00 ];
+				local-mac-address = [ 00 00 00 00 00 00 ];
 				clock-setup = <ffff00ff 3700>;
 				rx-clock = <17>;
 				tx-clock = <18>;
diff --git a/arch/powerpc/boot/dts/mpc8568mds.dts b/arch/powerpc/boot/dts/mpc8568mds.dts
index a123ec94..6bb18f2 100644
--- a/arch/powerpc/boot/dts/mpc8568mds.dts
+++ b/arch/powerpc/boot/dts/mpc8568mds.dts
@@ -61,7 +61,7 @@
 			compatible = "fsl,8568-memory-controller";
 			reg = <2000 1000>;
 			interrupt-parent = <&mpic>;
-			interrupts = <2 2>;
+			interrupts = <12 2>;
 		};
 
 		l2-cache-controller@20000 {
@@ -70,14 +70,14 @@
 			cache-line-size = <20>;	// 32 bytes
 			cache-size = <80000>;	// L2, 512K
 			interrupt-parent = <&mpic>;
-			interrupts = <0 2>;
+			interrupts = <10 2>;
 		};
 
 		i2c@3000 {
 			device_type = "i2c";
 			compatible = "fsl-i2c";
 			reg = <3000 100>;
-			interrupts = <1b 2>;
+			interrupts = <2b 2>;
 			interrupt-parent = <&mpic>;
 			dfsrr;
 		};
@@ -86,7 +86,7 @@
 			device_type = "i2c";
 			compatible = "fsl-i2c";
 			reg = <3100 100>;
-			interrupts = <1b 2>;
+			interrupts = <2b 2>;
 			interrupt-parent = <&mpic>;
 			dfsrr;
 		};
@@ -99,25 +99,25 @@
 			reg = <24520 20>;
 			phy0: ethernet-phy@0 {
 				interrupt-parent = <&mpic>;
-				interrupts = <31 1>;
+				interrupts = <1 1>;
 				reg = <0>;
 				device_type = "ethernet-phy";
 			};
 			phy1: ethernet-phy@1 {
 				interrupt-parent = <&mpic>;
-				interrupts = <32 1>;
+				interrupts = <2 1>;
 				reg = <1>;
 				device_type = "ethernet-phy";
 			};
 			phy2: ethernet-phy@2 {
 				interrupt-parent = <&mpic>;
-				interrupts = <31 1>;
+				interrupts = <1 1>;
 				reg = <2>;
 				device_type = "ethernet-phy";
 			};
 			phy3: ethernet-phy@3 {
 				interrupt-parent = <&mpic>;
-				interrupts = <32 1>;
+				interrupts = <2 1>;
 				reg = <3>;
 				device_type = "ethernet-phy";
 			};
@@ -130,8 +130,14 @@
 			model = "eTSEC";
 			compatible = "gianfar";
 			reg = <24000 1000>;
+			/*
+			 * mac-address is deprecated and will be removed
+			 * in 2.6.25.  Only recent versions of
+			 * U-Boot support local-mac-address, however.
+			 */
 			mac-address = [ 00 00 00 00 00 00 ];
-			interrupts = <d 2 e 2 12 2>;
+			local-mac-address = [ 00 00 00 00 00 00 ];
+ 			interrupts = <1d 2 1e 2 22 2>;
 			interrupt-parent = <&mpic>;
 			phy-handle = <&phy2>;
 		};
@@ -143,8 +149,14 @@
 			model = "eTSEC";
 			compatible = "gianfar";
 			reg = <25000 1000>;
-			mac-address = [ 00 00 00 00 00 00];
-			interrupts = <13 2 14 2 18 2>;
+			/*
+			 * mac-address is deprecated and will be removed
+			 * in 2.6.25.  Only recent versions of
+			 * U-Boot support local-mac-address, however.
+			 */
+			mac-address = [ 00 00 00 00 00 00 ];
+			local-mac-address = [ 00 00 00 00 00 00 ];
+ 			interrupts = <23 2 24 2 28 2>;
 			interrupt-parent = <&mpic>;
 			phy-handle = <&phy3>;
 		};
@@ -154,7 +166,7 @@
 			compatible = "ns16550";
 			reg = <4500 100>;
 			clock-frequency = <0>;
-			interrupts = <1a 2>;
+			interrupts = <2a 2>;
 			interrupt-parent = <&mpic>;
 		};
 
@@ -163,7 +175,7 @@
 			compatible = "ns16550";
 			reg = <4600 100>;
 			clock-frequency = <0>;
-			interrupts = <1a 2>;
+			interrupts = <2a 2>;
 			interrupt-parent = <&mpic>;
 		};
 
@@ -172,7 +184,7 @@
 			model = "SEC2";
 			compatible = "talitos";
 			reg = <30000 f000>;
-			interrupts = <1d 2>;
+			interrupts = <2d 2>;
 			interrupt-parent = <&mpic>;
 			num-channels = <4>;
 			channel-fifo-len = <18>;
@@ -300,7 +312,13 @@
 			reg = <2000 200>;
 			interrupts = <20>;
 			interrupt-parent = <&qeic>;
-			mac-address = [ 00 04 9f 00 23 23 ];
+			/*
+			 * mac-address is deprecated and will be removed
+			 * in 2.6.25.  Only recent versions of
+			 * U-Boot support local-mac-address, however.
+			 */
+			mac-address = [ 00 00 00 00 00 00 ];
+			local-mac-address = [ 00 00 00 00 00 00 ];
 			rx-clock = <0>;
 			tx-clock = <19>;
 			phy-handle = <&qe_phy0>;
@@ -316,7 +334,13 @@
 			reg = <3000 200>;
 			interrupts = <21>;
 			interrupt-parent = <&qeic>;
-			mac-address = [ 00 11 22 33 44 55 ];
+			/*
+			 * mac-address is deprecated and will be removed
+			 * in 2.6.25.  Only recent versions of
+			 * U-Boot support local-mac-address, however.
+			 */
+			mac-address = [ 00 00 00 00 00 00 ];
+			local-mac-address = [ 00 00 00 00 00 00 ];
 			rx-clock = <0>;
 			tx-clock = <14>;
 			phy-handle = <&qe_phy1>;
@@ -335,25 +359,25 @@
 			 * gianfar's MDIO bus */
 			qe_phy0: ethernet-phy@00 {
 				interrupt-parent = <&mpic>;
-				interrupts = <31 1>;
+				interrupts = <1 1>;
 				reg = <0>;
 				device_type = "ethernet-phy";
 			};
 			qe_phy1: ethernet-phy@01 {
 				interrupt-parent = <&mpic>;
-				interrupts = <32 1>;
+				interrupts = <2 1>;
 				reg = <1>;
 				device_type = "ethernet-phy";
 			};
 			qe_phy2: ethernet-phy@02 {
 				interrupt-parent = <&mpic>;
-				interrupts = <31 1>;
+				interrupts = <1 1>;
 				reg = <2>;
 				device_type = "ethernet-phy";
 			};
 			qe_phy3: ethernet-phy@03 {
 				interrupt-parent = <&mpic>;
-				interrupts = <32 1>;
+				interrupts = <2 1>;
 				reg = <3>;
 				device_type = "ethernet-phy";
 			};
@@ -367,7 +391,7 @@
 			reg = <80 80>;
 			built-in;
 			big-endian;
-			interrupts = <1e 2 1e 2>; //high:30 low:30
+			interrupts = <2e 2 2e 2>; //high:30 low:30
 			interrupt-parent = <&mpic>;
 		};
 
diff --git a/arch/powerpc/boot/dts/mpc8641_hpcn.dts b/arch/powerpc/boot/dts/mpc8641_hpcn.dts
index 260b264..6a78a2b 100644
--- a/arch/powerpc/boot/dts/mpc8641_hpcn.dts
+++ b/arch/powerpc/boot/dts/mpc8641_hpcn.dts
@@ -56,8 +56,12 @@
 		#size-cells = <1>;
 		#interrupt-cells = <2>;
 		device_type = "soc";
-		ranges = <0 f8000000 00100000>;
-		reg = <f8000000 00100000>;	// CCSRBAR 1M
+		ranges = <00001000 f8001000 000ff000
+			  80000000 80000000 20000000
+			  e2000000 e2000000 00100000
+			  a0000000 a0000000 20000000
+			  e3000000 e3000000 00100000>;
+		reg = <f8000000 00001000>;	// CCSRBAR
 		bus-frequency = <0>;
 
 		i2c@3000 {
@@ -86,25 +90,25 @@
 			reg = <24520 20>;
 			phy0: ethernet-phy@0 {
 				interrupt-parent = <&mpic>;
-				interrupts = <4a 1>;
+				interrupts = <a 1>;
 				reg = <0>;
 				device_type = "ethernet-phy";
 			};
 			phy1: ethernet-phy@1 {
 				interrupt-parent = <&mpic>;
-				interrupts = <4a 1>;
+				interrupts = <a 1>;
 				reg = <1>;
 				device_type = "ethernet-phy";
 			};
 			phy2: ethernet-phy@2 {
 				interrupt-parent = <&mpic>;
-				interrupts = <4a 1>;
+				interrupts = <a 1>;
 				reg = <2>;
 				device_type = "ethernet-phy";
 			};
 			phy3: ethernet-phy@3 {
 				interrupt-parent = <&mpic>;
-				interrupts = <4a 1>;
+				interrupts = <a 1>;
 				reg = <3>;
 				device_type = "ethernet-phy";
 			};
@@ -117,10 +121,17 @@
 			model = "TSEC";
 			compatible = "gianfar";
 			reg = <24000 1000>;
-			mac-address = [ 00 E0 0C 00 73 00 ];
+			/*
+			 * mac-address is deprecated and will be removed
+			 * in 2.6.25.  Only recent versions of
+			 * U-Boot support local-mac-address, however.
+			 */
+			mac-address = [ 00 00 00 00 00 00 ];
+			local-mac-address = [ 00 00 00 00 00 00 ];
 			interrupts = <1d 2 1e 2 22 2>;
 			interrupt-parent = <&mpic>;
 			phy-handle = <&phy0>;
+			phy-connection-type = "rgmii-id";
 		};
 
 		ethernet@25000 {
@@ -130,10 +141,17 @@
 			model = "TSEC";
 			compatible = "gianfar";
 			reg = <25000 1000>;
-			mac-address = [ 00 E0 0C 00 73 01 ];
+			/*
+			 * mac-address is deprecated and will be removed
+			 * in 2.6.25.  Only recent versions of
+			 * U-Boot support local-mac-address, however.
+			 */
+			mac-address = [ 00 00 00 00 00 00 ];
+			local-mac-address = [ 00 00 00 00 00 00 ];
 			interrupts = <23 2 24 2 28 2>;
 			interrupt-parent = <&mpic>;
 			phy-handle = <&phy1>;
+			phy-connection-type = "rgmii-id";
 		};
 		
 		ethernet@26000 {
@@ -143,10 +161,17 @@
 			model = "TSEC";
 			compatible = "gianfar";
 			reg = <26000 1000>;
-			mac-address = [ 00 E0 0C 00 02 FD ];
+			/*
+			 * mac-address is deprecated and will be removed
+			 * in 2.6.25.  Only recent versions of
+			 * U-Boot support local-mac-address, however.
+			 */
+			mac-address = [ 00 00 00 00 00 00 ];
+			local-mac-address = [ 00 00 00 00 00 00 ];
 			interrupts = <1F 2 20 2 21 2>;
 			interrupt-parent = <&mpic>;
 			phy-handle = <&phy2>;
+			phy-connection-type = "rgmii-id";
 		};
 
 		ethernet@27000 {
@@ -156,10 +181,17 @@
 			model = "TSEC";
 			compatible = "gianfar";
 			reg = <27000 1000>;
-			mac-address = [ 00 E0 0C 00 03 FD ];
+			/*
+			 * mac-address is deprecated and will be removed
+			 * in 2.6.25.  Only recent versions of
+			 * U-Boot support local-mac-address, however.
+			 */
+			mac-address = [ 00 00 00 00 00 00 ];
+			local-mac-address = [ 00 00 00 00 00 00 ];
 			interrupts = <25 2 26 2 27 2>;
 			interrupt-parent = <&mpic>;
 			phy-handle = <&phy3>;
+			phy-connection-type = "rgmii-id";
 		};
 		serial@4500 {
 			device_type = "serial";
@@ -186,7 +218,7 @@
 			#size-cells = <2>;
 			#address-cells = <3>;
 			reg = <8000 1000>;
-			bus-range = <0 fe>;
+			bus-range = <0 ff>;
 			ranges = <02000000 0 80000000 80000000 0 20000000
 				  01000000 0 00000000 e2000000 0 00100000>;
 			clock-frequency = <1fca055>;
@@ -285,17 +317,84 @@
 				f800 0 0 3 &i8259 0 0
 				f800 0 0 4 &i8259 0 0
 				>;
-			i8259: i8259@4d0 {
-				clock-frequency = <0>;
-				interrupt-controller;
-				device_type = "interrupt-controller";
-				#address-cells = <0>;
-				#interrupt-cells = <2>;
-				built-in;
-				compatible = "chrp,iic";
-                	        big-endian;
-				interrupts = <49 2>;
-				interrupt-parent = <&mpic>;
+			uli1575@0 {
+				reg = <0 0 0 0 0>;
+				#size-cells = <2>;
+				#address-cells = <3>;
+				ranges = <02000000 0 80000000
+					  02000000 0 80000000
+					  0 20000000
+					  01000000 0 00000000
+					  01000000 0 00000000
+					  0 00100000>;
+
+				pci_bridge@0 {
+					reg = <0 0 0 0 0>;
+					#size-cells = <2>;
+					#address-cells = <3>;
+					ranges = <02000000 0 80000000
+						  02000000 0 80000000
+						  0 20000000
+						  01000000 0 00000000
+						  01000000 0 00000000
+						  0 00100000>;
+
+					isa@1e {
+						device_type = "isa";
+						#interrupt-cells = <2>;
+						#size-cells = <1>;
+						#address-cells = <2>;
+						reg = <f000 0 0 0 0>;
+						ranges = <1 0 01000000 0 0
+							  00001000>;
+						interrupt-parent = <&i8259>;
+
+						i8259: interrupt-controller@20 {
+							reg = <1 20 2
+							       1 a0 2
+							       1 4d0 2>;
+							clock-frequency = <0>;
+							interrupt-controller;
+							device_type = "interrupt-controller";
+							#address-cells = <0>;
+							#interrupt-cells = <2>;
+							built-in;
+							compatible = "chrp,iic";
+							interrupts = <9 2>;
+							interrupt-parent =
+								<&mpic>;
+						};
+
+						i8042@60 {
+							#size-cells = <0>;
+							#address-cells = <1>;
+							reg = <1 60 1 1 64 1>;
+							interrupts = <1 3 c 3>;
+							interrupt-parent =
+								<&i8259>;
+
+							keyboard@0 {
+								reg = <0>;
+								compatible = "pnpPNP,303";
+							};
+
+							mouse@1 {
+								reg = <1>;
+								compatible = "pnpPNP,f03";
+							};
+						};
+
+						rtc@70 {
+							compatible =
+								"pnpPNP,b00";
+							reg = <1 70 2>;
+						};
+
+						gpio@400 {
+							reg = <1 400 80>;
+						};
+					};
+				};
 			};
 
 		};
@@ -316,10 +415,10 @@
 			interrupt-map-mask = <f800 0 0 7>;
 			interrupt-map = <
 				/* IDSEL 0x0 */
-				0000 0 0 1 &mpic 44 1
-				0000 0 0 2 &mpic 45 1
-				0000 0 0 3 &mpic 46 1
-				0000 0 0 4 &mpic 47 1
+				0000 0 0 1 &mpic 4 1
+				0000 0 0 2 &mpic 5 1
+				0000 0 0 3 &mpic 6 1
+				0000 0 0 4 &mpic 7 1
 				>;
 		};
 
diff --git a/arch/powerpc/boot/dts/mpc866ads.dts b/arch/powerpc/boot/dts/mpc866ads.dts
index c0d06fd..e5e7726 100644
--- a/arch/powerpc/boot/dts/mpc866ads.dts
+++ b/arch/powerpc/boot/dts/mpc866ads.dts
@@ -15,12 +15,10 @@
 	compatible = "mpc8xx";
 	#address-cells = <1>;
 	#size-cells = <1>;
-	linux,phandle = <100>;
 
 	cpus {
 		#address-cells = <1>;
 		#size-cells = <0>;
-		linux,phandle = <200>;
 
 		PowerPC,866@0 {
 			device_type = "cpu";
@@ -34,14 +32,12 @@
 			clock-frequency = <0>;
 			32-bit;
 			interrupts = <f 2>;	// decrementer interrupt
-			interrupt-parent = <ff000000>;
-			linux,phandle = <201>;
+			interrupt-parent = <&Mpc8xx_pic>;
 		};
 	};
 
 	memory {
 		device_type = "memory";
-		linux,phandle = <300>;
 		reg = <00000000 800000>;
 	};
 
@@ -57,11 +53,9 @@
 			device_type = "mdio";
 			compatible = "fs_enet";
 			reg = <e80 8>;
-			linux,phandle = <e80>;
 			#address-cells = <1>;
 			#size-cells = <0>;
-			ethernet-phy@f {
-				linux,phandle = <e800f>;
+			phy: ethernet-phy@f {
 				reg = <f>;
 				device_type = "ethernet-phy";
 			};
@@ -75,12 +69,11 @@
 			reg = <e00 188>;
 			mac-address = [ 00 00 0C 00 01 FD ];
 			interrupts = <3 1>;
-			interrupt-parent = <ff000000>;
-			phy-handle = <e800f>;
+			interrupt-parent = <&Mpc8xx_pic>;
+			phy-handle = <&Phy>;
 		};
 
-		pic@ff000000 {
-			linux,phandle = <ff000000>;
+		mpc8xx_pic: pic@ff000000 {
 			interrupt-controller;
 			#address-cells = <0>;
 			#interrupt-cells = <2>;
@@ -91,7 +84,6 @@
 		};
 
 		cpm@ff000000 {
-			linux,phandle = <ff000000>;
 			#address-cells = <1>;
 			#size-cells = <1>;
 			#interrupt-cells = <2>;
@@ -102,15 +94,14 @@
 			command-proc = <9c0>;
 			brg-frequency = <0>;
 			interrupts = <0 2>;	// cpm error interrupt
-			interrupt-parent = <930>;
+			interrupt-parent = <&Cpm_pic>;
 
-			pic@930 {
-				linux,phandle = <930>;
+			cpm_pic: pic@930 {
 				interrupt-controller;
 				#address-cells = <0>;
 				#interrupt-cells = <2>;
 				interrupts = <5 2 0 2>;
-				interrupt-parent = <ff000000>;
+				interrupt-parent = <&Mpc8xx_pic>;
 				reg = <930 20>;
 				built-in;
 				device_type = "cpm-pic";
@@ -128,7 +119,7 @@
 				tx-clock = <1>;
 				current-speed = <0>;
 				interrupts = <4 3>;
-				interrupt-parent = <930>;
+				interrupt-parent = <&Cpm_pic>;
 			};
 
 			smc@a90 {
@@ -142,7 +133,7 @@
 				tx-clock = <2>;
 				current-speed = <0>;
 				interrupts = <3 3>;
-				interrupt-parent = <930>;
+				interrupt-parent = <&Cpm_pic>;
 			};
 
 			scc@a00 {
@@ -153,7 +144,7 @@
 				reg = <a00 18 3c00 80>;
 				mac-address = [ 00 00 0C 00 03 FD ];
 				interrupts = <1e 3>;
-				interrupt-parent = <930>;
+				interrupt-parent = <&Cpm_pic>;
 			};
 		};
 	};
diff --git a/arch/powerpc/boot/dts/mpc885ads.dts b/arch/powerpc/boot/dts/mpc885ads.dts
index 110bf61..dc7ab9c 100644
--- a/arch/powerpc/boot/dts/mpc885ads.dts
+++ b/arch/powerpc/boot/dts/mpc885ads.dts
@@ -15,12 +15,10 @@
 	compatible = "mpc8xx";
 	#address-cells = <1>;
 	#size-cells = <1>;
-	linux,phandle = <100>;
 
 	cpus {
 		#address-cells = <1>;
 		#size-cells = <0>;
-		linux,phandle = <200>;
 
 		PowerPC,885@0 {
 			device_type = "cpu";
@@ -34,14 +32,12 @@
 			clock-frequency = <0>;
 			32-bit;
 			interrupts = <f 2>;	// decrementer interrupt
-			interrupt-parent = <ff000000>;
-			linux,phandle = <201>;
+			interrupt-parent = <&Mpc8xx_pic>;
 		};
 	};
 
 	memory {
 		device_type = "memory";
-		linux,phandle = <300>;
 		reg = <00000000 800000>;
 	};
 
@@ -57,21 +53,17 @@
 			device_type = "mdio";
 			compatible = "fs_enet";
 			reg = <e80 8>;
-			linux,phandle = <e80>;
 			#address-cells = <1>;
 			#size-cells = <0>;
-			ethernet-phy@0 {
-				linux,phandle = <e8000>;
+			Phy0: ethernet-phy@0 {
 				reg = <0>;
 				device_type = "ethernet-phy";
 			};
-			ethernet-phy@1 {
-				linux,phandle = <e8001>;
+			Phy1: ethernet-phy@1 {
 				reg = <1>;
 				device_type = "ethernet-phy";
 			};
-			ethernet-phy@2 {
-				linux,phandle = <e8002>;
+			Phy2: ethernet-phy@2 {
 				reg = <2>;
 				device_type = "ethernet-phy";
 			};
@@ -85,8 +77,8 @@
 			reg = <e00 188>;
 			mac-address = [ 00 00 0C 00 01 FD ];
 			interrupts = <3 1>;
-			interrupt-parent = <ff000000>;
-			phy-handle = <e8000>;
+			interrupt-parent = <&Mpc8xx_pic>;
+			phy-handle = <&Phy1>;
 		};
 
 		fec@1e00 {
@@ -97,12 +89,11 @@
 			reg = <1e00 188>;
 			mac-address = [ 00 00 0C 00 02 FD ];
 			interrupts = <7 1>;
-			interrupt-parent = <ff000000>;
-			phy-handle = <e8001>;
+			interrupt-parent = <&Mpc8xx_pic>;
+			phy-handle = <&Phy2>;
 		};
 
-		pic@ff000000 {
-			linux,phandle = <ff000000>;
+		Mpc8xx_pic: pic@ff000000 {
 			interrupt-controller;
 			#address-cells = <0>;
 			#interrupt-cells = <2>;
@@ -112,8 +103,18 @@
 			compatible = "CPM";
 		};
 
+		pcmcia@0080 {
+			#address-cells = <3>;
+			#interrupt-cells = <1>;
+			#size-cells = <2>;
+			compatible = "fsl,pq-pcmcia";
+			device_type = "pcmcia";
+			reg = <80 80>;
+			interrupt-parent = <&Mpc8xx_pic>;
+			interrupts = <d 1>;
+		};
+
 		cpm@ff000000 {
-			linux,phandle = <ff000000>;
 			#address-cells = <1>;
 			#size-cells = <1>;
 			#interrupt-cells = <2>;
@@ -124,15 +125,14 @@
 			command-proc = <9c0>;
 			brg-frequency = <0>;
 			interrupts = <0 2>;	// cpm error interrupt
-			interrupt-parent = <930>;
+			interrupt-parent = <&Cpm_pic>;
 
-			pic@930 {
-				linux,phandle = <930>;
+			Cpm_pic: pic@930 {
 				interrupt-controller;
 				#address-cells = <0>;
 				#interrupt-cells = <2>;
 				interrupts = <5 2 0 2>;
-				interrupt-parent = <ff000000>;
+				interrupt-parent = <&Mpc8xx_pic>;
 				reg = <930 20>;
 				built-in;
 				device_type = "cpm-pic";
@@ -150,7 +150,7 @@
 				tx-clock = <1>;
 				current-speed = <0>;
 				interrupts = <4 3>;
-				interrupt-parent = <930>;
+				interrupt-parent = <&Cpm_pic>;
 			};
 
 			smc@a90 {
@@ -164,7 +164,7 @@
 				tx-clock = <2>;
 				current-speed = <0>;
 				interrupts = <3 3>;
-				interrupt-parent = <930>;
+				interrupt-parent = <&Cpm_pic>;
 			};
 
 			scc@a40 {
@@ -175,8 +175,8 @@
 				reg = <a40 18 3e00 80>;
 				mac-address = [ 00 00 0C 00 03 FD ];
 				interrupts = <1c 3>;
-				interrupt-parent = <930>;
-				phy-handle = <e8002>;
+				interrupt-parent = <&Cpm_pic>;
+				phy-handle = <&Phy2>;
 			};
 		};
 	};
diff --git a/arch/powerpc/boot/dts/prpmc2800.dts b/arch/powerpc/boot/dts/prpmc2800.dts
index 568965a..699d0df 100644
--- a/arch/powerpc/boot/dts/prpmc2800.dts
+++ b/arch/powerpc/boot/dts/prpmc2800.dts
@@ -309,7 +309,7 @@
 	};
 
 	chosen {
-		bootargs = "ip=on console=ttyMM0";
+		bootargs = "ip=on";
 		linux,stdout-path = "/mv64x60@f1000000/mpsc@8000";
 	};
 };
diff --git a/arch/powerpc/boot/dts/ps3.dts b/arch/powerpc/boot/dts/ps3.dts
new file mode 100644
index 0000000..379ded2
--- /dev/null
+++ b/arch/powerpc/boot/dts/ps3.dts
@@ -0,0 +1,68 @@
+/*
+ *  PS3 Game Console device tree.
+ *
+ *  Copyright (C) 2007 Sony Computer Entertainment Inc.
+ *  Copyright 2007 Sony Corp.
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; version 2 of the License.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+/ {
+	model = "SonyPS3";
+	compatible = "sony,ps3";
+	#size-cells = <2>;
+	#address-cells = <2>;
+
+	chosen {
+	};
+
+	/*
+	 * We'll get the size of the bootmem block from lv1 after startup,
+	 * so we'll put a null entry here.
+	 */
+
+	memory {
+		device_type = "memory";
+		reg = <0 0 0 0>;
+	};
+
+	/*
+	 * The boot cpu is always zero for PS3.
+	 *
+	 * dtc expects a clock-frequency and timebase-frequency entries, so
+	 * we'll put a null entries here.  These will be initialized after
+	 * startup with data from lv1.
+	 *
+	 * Seems the only way currently to indicate a processor has multiple
+	 * threads is with an ibm,ppc-interrupt-server#s entry.  We'll put one
+	 * here so we can bring up both of ours.  See smp_setup_cpu_maps().
+	 */
+
+	cpus {
+		#size-cells = <0>;
+		#address-cells = <1>;
+
+		cpu@0 {
+			device_type = "cpu";
+			reg = <0>;
+			ibm,ppc-interrupt-server#s = <0 1>;
+			clock-frequency = <0>;
+			timebase-frequency = <0>;
+			i-cache-size = <8000>;
+			d-cache-size = <8000>;
+			i-cache-line-size = <80>;
+			d-cache-line-size = <80>;
+		};
+	};
+};
diff --git a/arch/powerpc/boot/ebony.c b/arch/powerpc/boot/ebony.c
index b1251ee..75daeda 100644
--- a/arch/powerpc/boot/ebony.c
+++ b/arch/powerpc/boot/ebony.c
@@ -100,28 +100,13 @@
 	ibm440gp_fixup_clocks(sysclk, 6 * 1843200);
 	ibm44x_fixup_memsize();
 	dt_fixup_mac_addresses(ebony_mac0, ebony_mac1);
-}
-
-#define SPRN_DBCR0		0x134
-#define   DBCR0_RST_SYSTEM	0x30000000
-
-static void ebony_exit(void)
-{
-	unsigned long tmp;
-
-	asm volatile (
-		"mfspr	%0,%1\n"
-		"oris	%0,%0,%2@h\n"
-		"mtspr	%1,%0"
-		: "=&r"(tmp) : "i"(SPRN_DBCR0), "i"(DBCR0_RST_SYSTEM)
-		);
-
+	ibm4xx_fixup_ebc_ranges("/plb/opb/ebc");
 }
 
 void ebony_init(void *mac0, void *mac1)
 {
 	platform_ops.fixups = ebony_fixups;
-	platform_ops.exit = ebony_exit;
+	platform_ops.exit = ibm44x_dbcr_reset;
 	ebony_mac0 = mac0;
 	ebony_mac1 = mac1;
 	ft_init(_dtb_start, _dtb_end - _dtb_start, 32);
diff --git a/arch/powerpc/boot/main.c b/arch/powerpc/boot/main.c
index 56b56a8..416dc38 100644
--- a/arch/powerpc/boot/main.c
+++ b/arch/powerpc/boot/main.c
@@ -36,8 +36,6 @@
 	unsigned long size;
 };
 
-typedef void (*kernel_entry_t)(unsigned long, unsigned long, void *);
-
 #undef DEBUG
 
 static struct addr_range prep_kernel(void)
diff --git a/arch/powerpc/boot/of.c b/arch/powerpc/boot/of.c
index d16ee3e..385e08b 100644
--- a/arch/powerpc/boot/of.c
+++ b/arch/powerpc/boot/of.c
@@ -15,8 +15,7 @@
 #include "page.h"
 #include "ops.h"
 
-typedef void *ihandle;
-typedef void *phandle;
+#include "of.h"
 
 extern char _end[];
 
@@ -25,154 +24,10 @@
 #define RAM_END		(512<<20)	/* Fixme: use OF */
 #define	ONE_MB		0x100000
 
-int (*prom) (void *);
 
 
 static unsigned long claim_base;
 
-static int call_prom(const char *service, int nargs, int nret, ...)
-{
-	int i;
-	struct prom_args {
-		const char *service;
-		int nargs;
-		int nret;
-		unsigned int args[12];
-	} args;
-	va_list list;
-
-	args.service = service;
-	args.nargs = nargs;
-	args.nret = nret;
-
-	va_start(list, nret);
-	for (i = 0; i < nargs; i++)
-		args.args[i] = va_arg(list, unsigned int);
-	va_end(list);
-
-	for (i = 0; i < nret; i++)
-		args.args[nargs+i] = 0;
-
-	if (prom(&args) < 0)
-		return -1;
-
-	return (nret > 0)? args.args[nargs]: 0;
-}
-
-static int call_prom_ret(const char *service, int nargs, int nret,
-		  unsigned int *rets, ...)
-{
-	int i;
-	struct prom_args {
-		const char *service;
-		int nargs;
-		int nret;
-		unsigned int args[12];
-	} args;
-	va_list list;
-
-	args.service = service;
-	args.nargs = nargs;
-	args.nret = nret;
-
-	va_start(list, rets);
-	for (i = 0; i < nargs; i++)
-		args.args[i] = va_arg(list, unsigned int);
-	va_end(list);
-
-	for (i = 0; i < nret; i++)
-		args.args[nargs+i] = 0;
-
-	if (prom(&args) < 0)
-		return -1;
-
-	if (rets != (void *) 0)
-		for (i = 1; i < nret; ++i)
-			rets[i-1] = args.args[nargs+i];
-
-	return (nret > 0)? args.args[nargs]: 0;
-}
-
-/*
- * Older OF's require that when claiming a specific range of addresses,
- * we claim the physical space in the /memory node and the virtual
- * space in the chosen mmu node, and then do a map operation to
- * map virtual to physical.
- */
-static int need_map = -1;
-static ihandle chosen_mmu;
-static phandle memory;
-
-/* returns true if s2 is a prefix of s1 */
-static int string_match(const char *s1, const char *s2)
-{
-	for (; *s2; ++s2)
-		if (*s1++ != *s2)
-			return 0;
-	return 1;
-}
-
-static int check_of_version(void)
-{
-	phandle oprom, chosen;
-	char version[64];
-
-	oprom = finddevice("/openprom");
-	if (oprom == (phandle) -1)
-		return 0;
-	if (getprop(oprom, "model", version, sizeof(version)) <= 0)
-		return 0;
-	version[sizeof(version)-1] = 0;
-	printf("OF version = '%s'\r\n", version);
-	if (!string_match(version, "Open Firmware, 1.")
-	    && !string_match(version, "FirmWorks,3."))
-		return 0;
-	chosen = finddevice("/chosen");
-	if (chosen == (phandle) -1) {
-		chosen = finddevice("/chosen@0");
-		if (chosen == (phandle) -1) {
-			printf("no chosen\n");
-			return 0;
-		}
-	}
-	if (getprop(chosen, "mmu", &chosen_mmu, sizeof(chosen_mmu)) <= 0) {
-		printf("no mmu\n");
-		return 0;
-	}
-	memory = (ihandle) call_prom("open", 1, 1, "/memory");
-	if (memory == (ihandle) -1) {
-		memory = (ihandle) call_prom("open", 1, 1, "/memory@0");
-		if (memory == (ihandle) -1) {
-			printf("no memory node\n");
-			return 0;
-		}
-	}
-	printf("old OF detected\r\n");
-	return 1;
-}
-
-static void *claim(unsigned long virt, unsigned long size, unsigned long align)
-{
-	int ret;
-	unsigned int result;
-
-	if (need_map < 0)
-		need_map = check_of_version();
-	if (align || !need_map)
-		return (void *) call_prom("claim", 3, 1, virt, size, align);
-
-	ret = call_prom_ret("call-method", 5, 2, &result, "claim", memory,
-			    align, size, virt);
-	if (ret != 0 || result == -1)
-		return (void *) -1;
-	ret = call_prom_ret("call-method", 5, 2, &result, "claim", chosen_mmu,
-			    align, size, virt);
-	/* 0x12 == coherent + read/write */
-	ret = call_prom("call-method", 6, 1, "map", chosen_mmu,
-			0x12, size, virt, virt);
-	return (void *) virt;
-}
-
 static void *of_try_claim(unsigned long size)
 {
 	unsigned long addr = 0;
@@ -184,7 +39,7 @@
 #ifdef DEBUG
 		printf("    trying: 0x%08lx\n\r", claim_base);
 #endif
-		addr = (unsigned long)claim(claim_base, size, 0);
+		addr = (unsigned long)of_claim(claim_base, size, 0);
 		if ((void *)addr != (void *)-1)
 			break;
 	}
@@ -208,64 +63,6 @@
 	}
 }
 
-static void *of_vmlinux_alloc(unsigned long size)
-{
-	void *p = malloc(size);
-
-	if (!p)
-		fatal("Can't allocate memory for kernel image!\n\r");
-
-	return p;
-}
-
-static void of_exit(void)
-{
-	call_prom("exit", 0, 0);
-}
-
-/*
- * OF device tree routines
- */
-static void *of_finddevice(const char *name)
-{
-	return (phandle) call_prom("finddevice", 1, 1, name);
-}
-
-static int of_getprop(const void *phandle, const char *name, void *buf,
-		const int buflen)
-{
-	return call_prom("getprop", 4, 1, phandle, name, buf, buflen);
-}
-
-static int of_setprop(const void *phandle, const char *name, const void *buf,
-		const int buflen)
-{
-	return call_prom("setprop", 4, 1, phandle, name, buf, buflen);
-}
-
-/*
- * OF console routines
- */
-static void *of_stdout_handle;
-
-static int of_console_open(void)
-{
-	void *devp;
-
-	if (((devp = finddevice("/chosen")) != NULL)
-			&& (getprop(devp, "stdout", &of_stdout_handle,
-				sizeof(of_stdout_handle))
-				== sizeof(of_stdout_handle)))
-		return 0;
-
-	return -1;
-}
-
-static void of_console_write(char *buf, int len)
-{
-	call_prom("write", 3, 1, of_stdout_handle, buf, len);
-}
-
 void platform_init(unsigned long a1, unsigned long a2, void *promptr)
 {
 	platform_ops.image_hdr = of_image_hdr;
@@ -277,10 +74,9 @@
 	dt_ops.getprop = of_getprop;
 	dt_ops.setprop = of_setprop;
 
-	console_ops.open = of_console_open;
-	console_ops.write = of_console_write;
+	of_console_init();
 
-	prom = (int (*)(void *))promptr;
+	of_init(promptr);
 	loader_info.promptr = promptr;
 	if (a1 && a2 && a2 != 0xdeadbeef) {
 		loader_info.initrd_addr = a1;
diff --git a/arch/powerpc/boot/of.h b/arch/powerpc/boot/of.h
new file mode 100644
index 0000000..e4c68f7
--- /dev/null
+++ b/arch/powerpc/boot/of.h
@@ -0,0 +1,21 @@
+#ifndef _PPC_BOOT_OF_H_
+#define _PPC_BOOT_OF_H_
+
+typedef void *phandle;
+typedef void *ihandle;
+
+void of_init(void *promptr);
+int of_call_prom(const char *service, int nargs, int nret, ...);
+void *of_claim(unsigned long virt, unsigned long size, unsigned long align);
+void *of_vmlinux_alloc(unsigned long size);
+void of_exit(void);
+void *of_finddevice(const char *name);
+int of_getprop(const void *phandle, const char *name, void *buf,
+	       const int buflen);
+int of_setprop(const void *phandle, const char *name, const void *buf,
+	       const int buflen);
+
+/* Console functions */
+void of_console_init(void);
+
+#endif /* _PPC_BOOT_OF_H_ */
diff --git a/arch/powerpc/boot/ofconsole.c b/arch/powerpc/boot/ofconsole.c
new file mode 100644
index 0000000..ce0e024
--- /dev/null
+++ b/arch/powerpc/boot/ofconsole.c
@@ -0,0 +1,45 @@
+/*
+ * OF console routines
+ *
+ * Copyright (C) Paul Mackerras 1997.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include <stddef.h>
+#include "types.h"
+#include "elf.h"
+#include "string.h"
+#include "stdio.h"
+#include "page.h"
+#include "ops.h"
+
+#include "of.h"
+
+static void *of_stdout_handle;
+
+static int of_console_open(void)
+{
+	void *devp;
+
+	if (((devp = of_finddevice("/chosen")) != NULL)
+	    && (of_getprop(devp, "stdout", &of_stdout_handle,
+			   sizeof(of_stdout_handle))
+		== sizeof(of_stdout_handle)))
+		return 0;
+
+	return -1;
+}
+
+static void of_console_write(const char *buf, int len)
+{
+	of_call_prom("write", 3, 1, of_stdout_handle, buf, len);
+}
+
+void of_console_init(void)
+{
+	console_ops.open = of_console_open;
+	console_ops.write = of_console_write;
+}
diff --git a/arch/powerpc/boot/oflib.c b/arch/powerpc/boot/oflib.c
new file mode 100644
index 0000000..95b8fd6
--- /dev/null
+++ b/arch/powerpc/boot/oflib.c
@@ -0,0 +1,202 @@
+/*
+ * Copyright (C) Paul Mackerras 1997.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include <stddef.h>
+#include "types.h"
+#include "elf.h"
+#include "string.h"
+#include "stdio.h"
+#include "page.h"
+#include "ops.h"
+
+#include "of.h"
+
+static int (*prom) (void *);
+
+void of_init(void *promptr)
+{
+	prom = (int (*)(void *))promptr;
+}
+
+int of_call_prom(const char *service, int nargs, int nret, ...)
+{
+	int i;
+	struct prom_args {
+		const char *service;
+		int nargs;
+		int nret;
+		unsigned int args[12];
+	} args;
+	va_list list;
+
+	args.service = service;
+	args.nargs = nargs;
+	args.nret = nret;
+
+	va_start(list, nret);
+	for (i = 0; i < nargs; i++)
+		args.args[i] = va_arg(list, unsigned int);
+	va_end(list);
+
+	for (i = 0; i < nret; i++)
+		args.args[nargs+i] = 0;
+
+	if (prom(&args) < 0)
+		return -1;
+
+	return (nret > 0)? args.args[nargs]: 0;
+}
+
+static int of_call_prom_ret(const char *service, int nargs, int nret,
+			    unsigned int *rets, ...)
+{
+	int i;
+	struct prom_args {
+		const char *service;
+		int nargs;
+		int nret;
+		unsigned int args[12];
+	} args;
+	va_list list;
+
+	args.service = service;
+	args.nargs = nargs;
+	args.nret = nret;
+
+	va_start(list, rets);
+	for (i = 0; i < nargs; i++)
+		args.args[i] = va_arg(list, unsigned int);
+	va_end(list);
+
+	for (i = 0; i < nret; i++)
+		args.args[nargs+i] = 0;
+
+	if (prom(&args) < 0)
+		return -1;
+
+	if (rets != (void *) 0)
+		for (i = 1; i < nret; ++i)
+			rets[i-1] = args.args[nargs+i];
+
+	return (nret > 0)? args.args[nargs]: 0;
+}
+
+/* returns true if s2 is a prefix of s1 */
+static int string_match(const char *s1, const char *s2)
+{
+	for (; *s2; ++s2)
+		if (*s1++ != *s2)
+			return 0;
+	return 1;
+}
+
+/*
+ * Older OF's require that when claiming a specific range of addresses,
+ * we claim the physical space in the /memory node and the virtual
+ * space in the chosen mmu node, and then do a map operation to
+ * map virtual to physical.
+ */
+static int need_map = -1;
+static ihandle chosen_mmu;
+static phandle memory;
+
+static int check_of_version(void)
+{
+	phandle oprom, chosen;
+	char version[64];
+
+	oprom = of_finddevice("/openprom");
+	if (oprom == (phandle) -1)
+		return 0;
+	if (of_getprop(oprom, "model", version, sizeof(version)) <= 0)
+		return 0;
+	version[sizeof(version)-1] = 0;
+	printf("OF version = '%s'\r\n", version);
+	if (!string_match(version, "Open Firmware, 1.")
+	    && !string_match(version, "FirmWorks,3."))
+		return 0;
+	chosen = of_finddevice("/chosen");
+	if (chosen == (phandle) -1) {
+		chosen = of_finddevice("/chosen@0");
+		if (chosen == (phandle) -1) {
+			printf("no chosen\n");
+			return 0;
+		}
+	}
+	if (of_getprop(chosen, "mmu", &chosen_mmu, sizeof(chosen_mmu)) <= 0) {
+		printf("no mmu\n");
+		return 0;
+	}
+	memory = (ihandle) of_call_prom("open", 1, 1, "/memory");
+	if (memory == (ihandle) -1) {
+		memory = (ihandle) of_call_prom("open", 1, 1, "/memory@0");
+		if (memory == (ihandle) -1) {
+			printf("no memory node\n");
+			return 0;
+		}
+	}
+	printf("old OF detected\r\n");
+	return 1;
+}
+
+void *of_claim(unsigned long virt, unsigned long size, unsigned long align)
+{
+	int ret;
+	unsigned int result;
+
+	if (need_map < 0)
+		need_map = check_of_version();
+	if (align || !need_map)
+		return (void *) of_call_prom("claim", 3, 1, virt, size, align);
+
+	ret = of_call_prom_ret("call-method", 5, 2, &result, "claim", memory,
+			       align, size, virt);
+	if (ret != 0 || result == -1)
+		return (void *) -1;
+	ret = of_call_prom_ret("call-method", 5, 2, &result, "claim", chosen_mmu,
+			       align, size, virt);
+	/* 0x12 == coherent + read/write */
+	ret = of_call_prom("call-method", 6, 1, "map", chosen_mmu,
+			   0x12, size, virt, virt);
+	return (void *) virt;
+}
+
+void *of_vmlinux_alloc(unsigned long size)
+{
+	void *p = malloc(size);
+
+	if (!p)
+		fatal("Can't allocate memory for kernel image!\n\r");
+
+	return p;
+}
+
+void of_exit(void)
+{
+	of_call_prom("exit", 0, 0);
+}
+
+/*
+ * OF device tree routines
+ */
+void *of_finddevice(const char *name)
+{
+	return (phandle) of_call_prom("finddevice", 1, 1, name);
+}
+
+int of_getprop(const void *phandle, const char *name, void *buf,
+	       const int buflen)
+{
+	return of_call_prom("getprop", 4, 1, phandle, name, buf, buflen);
+}
+
+int of_setprop(const void *phandle, const char *name, const void *buf,
+	       const int buflen)
+{
+	return of_call_prom("setprop", 4, 1, phandle, name, buf, buflen);
+}
diff --git a/arch/powerpc/boot/ops.h b/arch/powerpc/boot/ops.h
index 959124f..8607706 100644
--- a/arch/powerpc/boot/ops.h
+++ b/arch/powerpc/boot/ops.h
@@ -19,6 +19,8 @@
 #define	MAX_PATH_LEN		256
 #define	MAX_PROP_LEN		256 /* What should this be? */
 
+typedef void (*kernel_entry_t)(unsigned long r3, unsigned long r4, void *r5);
+
 /* Platform specific operations */
 struct platform_ops {
 	void	(*fixups)(void);
@@ -51,7 +53,7 @@
 /* Console operations */
 struct console_ops {
 	int	(*open)(void);
-	void	(*write)(char *buf, int len);
+	void	(*write)(const char *buf, int len);
 	void	(*edit_cmdline)(char *buf, int len);
 	void	(*close)(void);
 	void	*data;
diff --git a/arch/powerpc/boot/ps3-head.S b/arch/powerpc/boot/ps3-head.S
new file mode 100644
index 0000000..1a6d64a
--- /dev/null
+++ b/arch/powerpc/boot/ps3-head.S
@@ -0,0 +1,80 @@
+/*
+ *  PS3 bootwrapper entry.
+ *
+ *  Copyright (C) 2007 Sony Computer Entertainment Inc.
+ *  Copyright 2007 Sony Corp.
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; version 2 of the License.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#include "ppc_asm.h"
+
+	.text
+
+/*
+ * __system_reset_overlay - The PS3 first stage entry.
+ *
+ * The bootwraper build script copies the 0x100 bytes at symbol
+ * __system_reset_overlay to offset 0x100 of the rom image.
+ *
+ * The PS3 has a single processor with two threads.
+ */
+
+	.globl __system_reset_overlay
+__system_reset_overlay:
+
+	/* Switch to 32-bit mode. */
+
+	mfmsr	r9
+	clrldi	r9,r9,1
+	mtmsrd	r9
+	nop
+
+	/* Get thread number in r3 and branch. */
+
+	mfspr	r3, 0x88
+	cntlzw.	r3, r3
+	li	r4, 0
+	li	r5, 0
+	beq	1f
+
+	/* Secondary goes to __secondary_hold in kernel. */
+
+	li	r4, 0x60
+	mtctr	r4
+	bctr
+
+	/* Primary delays then goes to _zimage_start in wrapper. */
+1:
+	or	31, 31, 31 /* db16cyc */
+	or	31, 31, 31 /* db16cyc */
+
+	lis	r4, _zimage_start@ha
+	addi	r4, r4, _zimage_start@l
+	mtctr	r4
+	bctr
+
+/*
+ * __system_reset_kernel - Place holder for the kernel reset vector.
+ *
+ * The bootwrapper build script copies 0x100 bytes from offset 0x100
+ * of the rom image to the symbol __system_reset_kernel.  At runtime
+ * the bootwrapper program copies the 0x100 bytes at __system_reset_kernel
+ * to ram address 0x100.  This symbol must occupy 0x100 bytes.
+ */
+
+	.globl __system_reset_kernel
+__system_reset_kernel:
+
+	. = __system_reset_kernel + 0x100
diff --git a/arch/powerpc/boot/ps3-hvcall.S b/arch/powerpc/boot/ps3-hvcall.S
new file mode 100644
index 0000000..c8b7df3
--- /dev/null
+++ b/arch/powerpc/boot/ps3-hvcall.S
@@ -0,0 +1,184 @@
+/*
+ *  PS3 bootwrapper hvcalls.
+ *
+ *  Copyright (C) 2007 Sony Computer Entertainment Inc.
+ *  Copyright 2007 Sony Corp.
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; version 2 of the License.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#include "ppc_asm.h"
+
+/*
+ * The PS3 hypervisor uses a 64 bit "C" language calling convention.
+ * The routines here marshal arguments between the 32 bit wrapper
+ * program and the 64 bit hvcalls.
+ *
+ *  wrapper           lv1
+ *  32-bit (h,l)      64-bit
+ *
+ *  1: r3,r4          <-> r3
+ *  2: r5,r6          <-> r4
+ *  3: r7,r8          <-> r5
+ *  4: r9,r10         <-> r6
+ *  5: 8(r1),12(r1)   <-> r7
+ *  6: 16(r1),20(r1)  <-> r8
+ *  7: 24(r1),28(r1)  <-> r9
+ *  8: 32(r1),36(r1)  <-> r10
+ *
+ */
+
+.macro GLOBAL name
+	.section ".text"
+	.balign 4
+	.globl \name
+\name:
+.endm
+
+.macro NO_SUPPORT name
+	GLOBAL \name
+	b ps3_no_support
+.endm
+
+.macro HVCALL num
+	li r11, \num
+	.long 0x44000022
+	extsw r3, r3
+.endm
+
+.macro SAVE_LR offset=4
+	mflr r0
+	stw r0, \offset(r1)
+.endm
+
+.macro LOAD_LR offset=4
+	lwz r0, \offset(r1)
+	mtlr r0
+.endm
+
+.macro LOAD_64_REG target,high,low
+	sldi r11, \high, 32
+	or \target, r11, \low
+.endm
+
+.macro LOAD_64_STACK target,offset
+	ld \target, \offset(r1)
+.endm
+
+.macro LOAD_R3
+	LOAD_64_REG r3,r3,r4
+.endm
+
+.macro LOAD_R4
+	LOAD_64_REG r4,r5,r6
+.endm
+
+.macro LOAD_R5
+	LOAD_64_REG r5,r7,r8
+.endm
+
+.macro LOAD_R6
+	LOAD_64_REG r6,r9,r10
+.endm
+
+.macro LOAD_R7
+	LOAD_64_STACK r7,8
+.endm
+
+.macro LOAD_R8
+	LOAD_64_STACK r8,16
+.endm
+
+.macro LOAD_R9
+	LOAD_64_STACK r9,24
+.endm
+
+.macro LOAD_R10
+	LOAD_64_STACK r10,32
+.endm
+
+.macro LOAD_REGS_0
+	stwu 1,-16(1)
+	stw 3, 8(1)
+.endm
+
+.macro LOAD_REGS_5
+	LOAD_R3
+	LOAD_R4
+	LOAD_R5
+	LOAD_R6
+	LOAD_R7
+.endm
+
+.macro LOAD_REGS_6
+	LOAD_REGS_5
+	LOAD_R8
+.endm
+
+.macro LOAD_REGS_8
+	LOAD_REGS_6
+	LOAD_R9
+	LOAD_R10
+.endm
+
+.macro STORE_REGS_0_1
+	lwz r11, 8(r1)
+	std r4, 0(r11)
+	mr r4, r3
+	li r3, 0
+	addi r1,r1,16
+.endm
+
+.macro STORE_REGS_5_2
+	lwz r11, 16(r1)
+	std r4, 0(r11)
+	lwz r11, 24(r1)
+	std r5, 0(r11)
+.endm
+
+.macro STORE_REGS_6_1
+	lwz r11, 24(r1)
+	std r4, 0(r11)
+.endm
+
+GLOBAL lv1_get_logical_ppe_id
+	SAVE_LR
+	LOAD_REGS_0
+	HVCALL 69
+	STORE_REGS_0_1
+	LOAD_LR
+	blr
+
+GLOBAL lv1_get_logical_partition_id
+	SAVE_LR
+	LOAD_REGS_0
+	HVCALL 74
+	STORE_REGS_0_1
+	LOAD_LR
+	blr
+
+GLOBAL lv1_get_repository_node_value
+	SAVE_LR
+	LOAD_REGS_5
+	HVCALL 91
+	STORE_REGS_5_2
+	LOAD_LR
+	blr
+
+GLOBAL lv1_panic
+	SAVE_LR
+	LOAD_REGS_8
+	HVCALL 255
+	LOAD_LR
+	blr
diff --git a/arch/powerpc/boot/ps3.c b/arch/powerpc/boot/ps3.c
new file mode 100644
index 0000000..893d593
--- /dev/null
+++ b/arch/powerpc/boot/ps3.c
@@ -0,0 +1,161 @@
+/*
+ *  PS3 bootwrapper support.
+ *
+ *  Copyright (C) 2007 Sony Computer Entertainment Inc.
+ *  Copyright 2007 Sony Corp.
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; version 2 of the License.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#include <stdarg.h>
+#include <stddef.h>
+#include "types.h"
+#include "elf.h"
+#include "string.h"
+#include "stdio.h"
+#include "page.h"
+#include "ops.h"
+
+extern s64 lv1_panic(u64 in_1);
+extern s64 lv1_get_logical_partition_id(u64 *out_1);
+extern s64 lv1_get_logical_ppe_id(u64 *out_1);
+extern s64 lv1_get_repository_node_value(u64 in_1, u64 in_2, u64 in_3,
+	u64 in_4, u64 in_5, u64 *out_1, u64 *out_2);
+
+#ifdef DEBUG
+#define DBG(fmt...) printf(fmt)
+#else
+static inline int __attribute__ ((format (printf, 1, 2))) DBG(
+	const char *fmt, ...) {return 0;}
+#endif
+
+BSS_STACK(4096);
+
+/* A buffer that may be edited by tools operating on a zImage binary so as to
+ * edit the command line passed to vmlinux (by setting /chosen/bootargs).
+ * The buffer is put in it's own section so that tools may locate it easier.
+ */
+static char cmdline[COMMAND_LINE_SIZE]
+	__attribute__((__section__("__builtin_cmdline")));
+
+static void prep_cmdline(void *chosen)
+{
+	if (cmdline[0] == '\0')
+		getprop(chosen, "bootargs", cmdline, COMMAND_LINE_SIZE-1);
+	else
+		setprop_str(chosen, "bootargs", cmdline);
+
+	printf("cmdline: '%s'\n", cmdline);
+}
+
+static void ps3_console_write(const char *buf, int len)
+{
+}
+
+static void ps3_exit(void)
+{
+	printf("ps3_exit\n");
+
+	/* lv1_panic will shutdown the lpar. */
+
+	lv1_panic(0); /* zero = do not reboot */
+	while (1);
+}
+
+static int ps3_repository_read_rm_size(u64 *rm_size)
+{
+	s64 result;
+	u64 lpar_id;
+	u64 ppe_id;
+	u64 v2;
+
+	result = lv1_get_logical_partition_id(&lpar_id);
+
+	if (result)
+		return -1;
+
+	result = lv1_get_logical_ppe_id(&ppe_id);
+
+	if (result)
+		return -1;
+
+	/*
+	 * n1: 0000000062690000 : ....bi..
+	 * n2: 7075000000000000 : pu......
+	 * n3: 0000000000000001 : ........
+	 * n4: 726d5f73697a6500 : rm_size.
+	*/
+
+	result = lv1_get_repository_node_value(lpar_id, 0x0000000062690000ULL,
+		0x7075000000000000ULL, ppe_id, 0x726d5f73697a6500ULL, rm_size,
+		&v2);
+
+	printf("%s:%d: ppe_id  %lu \n", __func__, __LINE__,
+		(unsigned long)ppe_id);
+	printf("%s:%d: lpar_id %lu \n", __func__, __LINE__,
+		(unsigned long)lpar_id);
+	printf("%s:%d: rm_size %llxh \n", __func__, __LINE__, *rm_size);
+
+	return result ? -1 : 0;
+}
+
+void ps3_copy_vectors(void)
+{
+	extern char __system_reset_kernel[];
+
+	memcpy((void *)0x100, __system_reset_kernel, 0x100);
+	flush_cache((void *)0x100, 0x100);
+}
+
+void platform_init(void)
+{
+	extern char _end[];
+	extern char _dtb_start[];
+	extern char _initrd_start[];
+	extern char _initrd_end[];
+	const u32 heapsize = 0x1000000 - (u32)_end; /* 16MiB */
+	void *chosen;
+	unsigned long ft_addr;
+	u64 rm_size;
+
+	console_ops.write = ps3_console_write;
+	platform_ops.exit = ps3_exit;
+
+	printf("\n-- PS3 bootwrapper --\n");
+
+	simple_alloc_init(_end, heapsize, 32, 64);
+	ft_init(_dtb_start, 0, 4);
+
+	chosen = finddevice("/chosen");
+
+	ps3_repository_read_rm_size(&rm_size);
+	dt_fixup_memory(0, rm_size);
+
+	if (_initrd_end > _initrd_start) {
+		setprop_val(chosen, "linux,initrd-start", (u32)(_initrd_start));
+		setprop_val(chosen, "linux,initrd-end", (u32)(_initrd_end));
+	}
+
+	prep_cmdline(chosen);
+
+	ft_addr = dt_ops.finalize();
+
+	ps3_copy_vectors();
+
+	printf(" flat tree at 0x%lx\n\r", ft_addr);
+
+	((kernel_entry_t)0)(ft_addr, 0, NULL);
+
+	ps3_exit();
+}
diff --git a/arch/powerpc/boot/serial.c b/arch/powerpc/boot/serial.c
index 7fd3233..eaa0d3a 100644
--- a/arch/powerpc/boot/serial.c
+++ b/arch/powerpc/boot/serial.c
@@ -27,7 +27,7 @@
 	return scdp->open();
 }
 
-static void serial_write(char *buf, int len)
+static void serial_write(const char *buf, int len)
 {
 	struct serial_console_data *scdp = console_ops.data;
 
diff --git a/arch/powerpc/boot/stdio.c b/arch/powerpc/boot/stdio.c
index 0a9feeb..5b57800 100644
--- a/arch/powerpc/boot/stdio.c
+++ b/arch/powerpc/boot/stdio.c
@@ -190,7 +190,11 @@
 
 		/* get the conversion qualifier */
 		qualifier = -1;
-		if (*fmt == 'h' || *fmt == 'l' || *fmt == 'L' || *fmt =='Z') {
+		if (*fmt == 'l' && *(fmt + 1) == 'l') {
+			qualifier = 'q';
+			fmt += 2;
+		} else if (*fmt == 'h' || *fmt == 'l' || *fmt == 'L'
+			|| *fmt == 'Z') {
 			qualifier = *fmt;
 			++fmt;
 		}
@@ -281,6 +285,10 @@
 			num = va_arg(args, unsigned long);
 			if (flags & SIGN)
 				num = (signed long) num;
+		} else if (qualifier == 'q') {
+			num = va_arg(args, unsigned long long);
+			if (flags & SIGN)
+				num = (signed long long) num;
 		} else if (qualifier == 'Z') {
 			num = va_arg(args, size_t);
 		} else if (qualifier == 'h') {
diff --git a/arch/powerpc/boot/types.h b/arch/powerpc/boot/types.h
index 79d26e7..31393d1 100644
--- a/arch/powerpc/boot/types.h
+++ b/arch/powerpc/boot/types.h
@@ -7,6 +7,10 @@
 typedef unsigned short		u16;
 typedef unsigned int		u32;
 typedef unsigned long long	u64;
+typedef signed char		s8;
+typedef short			s16;
+typedef int			s32;
+typedef long long		s64;
 
 #define min(x,y) ({ \
 	typeof(x) _x = (x);	\
diff --git a/arch/powerpc/boot/wrapper b/arch/powerpc/boot/wrapper
index da77adc..65f6854 100755
--- a/arch/powerpc/boot/wrapper
+++ b/arch/powerpc/boot/wrapper
@@ -144,6 +144,15 @@
 cuboot*)
     gzip=
     ;;
+ps3)
+    platformo="$object/ps3-head.o $object/ps3-hvcall.o $object/ps3.o"
+    lds=$object/zImage.ps3.lds
+    gzip=
+    ext=bin
+    objflags="-O binary --set-section-flags=.bss=contents,alloc,load,data"
+    ksection=.kernel:vmlinux.bin
+    isection=.kernel:initrd
+    ;;
 esac
 
 vmz="$tmpdir/`basename \"$kernel\"`.$ext"
@@ -239,4 +248,50 @@
     fi
     exit 0
     ;;
+ps3)
+    # The ps3's loader supports loading gzipped binary images from flash
+    # rom to addr zero. The loader enters the image at addr 0x100.  A
+    # bootwrapper overlay is use to arrange for the kernel to be loaded
+    # to addr zero and to have a suitable bootwrapper entry at 0x100.
+    # To construct the rom image, 0x100 bytes from offset 0x100 in the
+    # kernel is copied to the bootwrapper symbol __system_reset_kernel.
+    # The 0x100 bytes at the bootwrapper symbol __system_reset_overlay is
+    # then copied to offset 0x100.  At runtime the bootwrapper program
+    # copies the 0x100 bytes at __system_reset_kernel to addr 0x100.
+
+    system_reset_overlay=0x`${CROSS}nm "$ofile" \
+        | grep ' __system_reset_overlay$'       \
+        | cut -d' ' -f1`
+    system_reset_overlay=`printf "%d" $system_reset_overlay`
+    system_reset_kernel=0x`${CROSS}nm "$ofile" \
+        | grep ' __system_reset_kernel$'       \
+        | cut -d' ' -f1`
+    system_reset_kernel=`printf "%d" $system_reset_kernel`
+    overlay_dest="256"
+    overlay_size="256"
+
+    rm -f "$object/otheros.bld"
+
+    ${CROSS}objcopy -O binary "$ofile" "$ofile.bin"
+
+    msg=$(dd if="$ofile.bin" of="$ofile.bin" conv=notrunc \
+        skip=$overlay_dest seek=$system_reset_kernel      \
+        count=$overlay_size bs=1 2>&1)
+
+    if [ $? -ne "0" ]; then
+       echo $msg
+       exit 1
+    fi
+
+    msg=$(dd if="$ofile.bin" of="$ofile.bin" conv=notrunc \
+        skip=$system_reset_overlay seek=$overlay_dest     \
+        count=$overlay_size bs=1 2>&1)
+
+    if [ $? -ne "0" ]; then
+       echo $msg
+       exit 2
+    fi
+
+    gzip --force -9 --stdout "$ofile.bin" > "$object/otheros.bld"
+    ;;
 esac
diff --git a/arch/powerpc/boot/zImage.ps3.lds.S b/arch/powerpc/boot/zImage.ps3.lds.S
new file mode 100644
index 0000000..aaa469c
--- /dev/null
+++ b/arch/powerpc/boot/zImage.ps3.lds.S
@@ -0,0 +1,50 @@
+OUTPUT_ARCH(powerpc:common)
+ENTRY(_zimage_start)
+EXTERN(_zimage_start)
+SECTIONS
+{
+  _vmlinux_start =  .;
+  .kernel:vmlinux.bin : { *(.kernel:vmlinux.bin) }
+  _vmlinux_end =  .;
+
+  . = ALIGN(4096);
+  _dtb_start = .;
+  .kernel:dtb : { *(.kernel:dtb) }
+  _dtb_end = .;
+
+  . = ALIGN(4096);
+  _initrd_start =  .;
+  .kernel:initrd : { *(.kernel:initrd) }
+  _initrd_end =  .;
+
+  _start = .;
+  .text      :
+  {
+    *(.text)
+    *(.fixup)
+  }
+  _etext = .;
+  . = ALIGN(4096);
+  .data    :
+  {
+    *(.rodata*)
+    *(.data*)
+    *(.sdata*)
+    __got2_start = .;
+    *(.got2)
+    __got2_end = .;
+  }
+
+  . = ALIGN(4096);
+  _edata  =  .;
+
+  . = ALIGN(4096);
+  __bss_start = .;
+  .bss       :
+  {
+   *(.sbss)
+   *(.bss)
+  }
+  . = ALIGN(4096);
+  _end = . ;
+}
diff --git a/arch/powerpc/configs/holly_defconfig b/arch/powerpc/configs/holly_defconfig
index 3278184..04b94f8 100644
--- a/arch/powerpc/configs/holly_defconfig
+++ b/arch/powerpc/configs/holly_defconfig
@@ -190,10 +190,12 @@
 # CONFIG_RESOURCES_64BIT is not set
 CONFIG_ZONE_DMA_FLAG=1
 CONFIG_PROC_DEVICETREE=y
-# CONFIG_CMDLINE_BOOL is not set
+CONFIG_CMDLINE_BOOL=y
+CONFIG_CMDLINE="console=ttyS0,115200"
 # CONFIG_PM is not set
 # CONFIG_SECCOMP is not set
-# CONFIG_WANT_DEVICE_TREE is not set
+CONFIG_WANT_DEVICE_TREE=y
+CONFIG_DEVICE_TREE="holly.dts"
 CONFIG_ISA_DMA_API=y
 
 #
diff --git a/arch/powerpc/configs/ps3_defconfig b/arch/powerpc/configs/ps3_defconfig
index 956d1df..d0b43df 100644
--- a/arch/powerpc/configs/ps3_defconfig
+++ b/arch/powerpc/configs/ps3_defconfig
@@ -156,7 +156,11 @@
 CONFIG_PS3_USE_LPAR_ADDR=y
 CONFIG_PS3_VUART=y
 CONFIG_PS3_PS3AV=y
-CONFIG_PS3_SYS_MANAGER=y
+CONFIG_PS3_SYS_MANAGER=m
+CONFIG_PS3_STORAGE=y
+CONFIG_PS3_DISK=y
+CONFIG_PS3_ROM=y
+CONFIG_PS3_FLASH=y
 CONFIG_PPC_CELL=y
 # CONFIG_PPC_CELL_NATIVE is not set
 # CONFIG_PPC_IBM_CELL_BLADE is not set
@@ -335,7 +339,7 @@
 CONFIG_BT_L2CAP=m
 CONFIG_BT_SCO=m
 CONFIG_BT_RFCOMM=m
-# CONFIG_BT_RFCOMM_TTY is not set
+CONFIG_BT_RFCOMM_TTY=y
 # CONFIG_BT_BNEP is not set
 CONFIG_BT_HIDP=m
 
@@ -344,7 +348,9 @@
 #
 CONFIG_BT_HCIUSB=m
 CONFIG_BT_HCIUSB_SCO=y
-# CONFIG_BT_HCIUART is not set
+CONFIG_BT_HCIUART=m
+CONFIG_BT_HCIUART_H4=y
+CONFIG_BT_HCIUART_BCSP=y
 # CONFIG_BT_HCIBCM203X is not set
 # CONFIG_BT_HCIBPA10X is not set
 # CONFIG_BT_HCIBFUSB is not set
@@ -435,7 +441,7 @@
 #
 # Some SCSI devices (e.g. CD jukebox) support multiple LUNs
 #
-# CONFIG_SCSI_MULTI_LUN is not set
+CONFIG_SCSI_MULTI_LUN=y
 # CONFIG_SCSI_CONSTANTS is not set
 # CONFIG_SCSI_LOGGING is not set
 # CONFIG_SCSI_SCAN_ASYNC is not set
@@ -479,6 +485,7 @@
 CONFIG_MII=m
 CONFIG_NETDEV_1000=y
 CONFIG_NETDEV_10000=y
+CONFIG_GELIC_NET=y
 
 #
 # Wireless LAN
@@ -546,7 +553,27 @@
 #
 # CONFIG_INPUT_KEYBOARD is not set
 # CONFIG_INPUT_MOUSE is not set
-# CONFIG_INPUT_JOYSTICK is not set
+CONFIG_INPUT_JOYSTICK=y
+# CONFIG_JOYSTICK_ANALOG is not set
+# CONFIG_JOYSTICK_A3D is not set
+# CONFIG_JOYSTICK_ADI is not set
+# CONFIG_JOYSTICK_COBRA is not set
+# CONFIG_JOYSTICK_GF2K is not set
+# CONFIG_JOYSTICK_GRIP is not set
+# CONFIG_JOYSTICK_GRIP_MP is not set
+# CONFIG_JOYSTICK_GUILLEMOT is not set
+# CONFIG_JOYSTICK_INTERACT is not set
+# CONFIG_JOYSTICK_SIDEWINDER is not set
+# CONFIG_JOYSTICK_TMDC is not set
+# CONFIG_JOYSTICK_IFORCE is not set
+# CONFIG_JOYSTICK_WARRIOR is not set
+# CONFIG_JOYSTICK_MAGELLAN is not set
+# CONFIG_JOYSTICK_SPACEORB is not set
+# CONFIG_JOYSTICK_SPACEBALL is not set
+# CONFIG_JOYSTICK_STINGER is not set
+# CONFIG_JOYSTICK_TWIDJOY is not set
+# CONFIG_JOYSTICK_JOYDUMP is not set
+# CONFIG_JOYSTICK_XPAD is not set
 # CONFIG_INPUT_TABLET is not set
 # CONFIG_INPUT_TOUCHSCREEN is not set
 # CONFIG_INPUT_MISC is not set
@@ -563,7 +590,7 @@
 CONFIG_VT=y
 CONFIG_VT_CONSOLE=y
 CONFIG_HW_CONSOLE=y
-# CONFIG_VT_HW_CONSOLE_BINDING is not set
+CONFIG_VT_HW_CONSOLE_BINDING=y
 # CONFIG_SERIAL_NONSTANDARD is not set
 
 #
@@ -1086,7 +1113,7 @@
 #
 # CONFIG_PRINTK_TIME is not set
 CONFIG_ENABLE_MUST_CHECK=y
-# CONFIG_MAGIC_SYSRQ is not set
+CONFIG_MAGIC_SYSRQ=y
 # CONFIG_UNUSED_SYMBOLS is not set
 # CONFIG_DEBUG_FS is not set
 # CONFIG_HEADERS_CHECK is not set
@@ -1116,16 +1143,7 @@
 # CONFIG_DEBUGGER is not set
 CONFIG_IRQSTACKS=y
 # CONFIG_BOOTX_TEXT is not set
-CONFIG_PPC_EARLY_DEBUG=y
-# CONFIG_PPC_EARLY_DEBUG_LPAR is not set
-# CONFIG_PPC_EARLY_DEBUG_G5 is not set
-# CONFIG_PPC_EARLY_DEBUG_RTAS_PANEL is not set
-# CONFIG_PPC_EARLY_DEBUG_RTAS_CONSOLE is not set
-# CONFIG_PPC_EARLY_DEBUG_MAPLE is not set
-# CONFIG_PPC_EARLY_DEBUG_ISERIES is not set
-# CONFIG_PPC_EARLY_DEBUG_PAS_REALMODE is not set
-# CONFIG_PPC_EARLY_DEBUG_BEAT is not set
-# CONFIG_PPC_EARLY_DEBUG_44x is not set
+# CONFIG_PPC_EARLY_DEBUG is not set
 
 #
 # Security options
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index 3e779f0..42c42ec 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -12,7 +12,8 @@
 
 obj-y				:= semaphore.o cputable.o ptrace.o syscalls.o \
 				   irq.o align.o signal_32.o pmc.o vdso.o \
-				   init_task.o process.o systbl.o idle.o
+				   init_task.o process.o systbl.o idle.o \
+				   signal.o
 obj-y				+= vdso32/
 obj-$(CONFIG_PPC64)		+= setup_64.o binfmt_elf32.o sys_ppc32.o \
 				   signal_64.o ptrace32.o \
@@ -65,9 +66,9 @@
 module-$(CONFIG_PPC64)		+= module_64.o
 obj-$(CONFIG_MODULES)		+= $(module-y)
 
-pci64-$(CONFIG_PPC64)		+= pci_64.o pci_dn.o
+pci64-$(CONFIG_PPC64)		+= pci_64.o pci_dn.o isa-bridge.o
 pci32-$(CONFIG_PPC32)		:= pci_32.o
-obj-$(CONFIG_PCI)		+= $(pci64-y) $(pci32-y)
+obj-$(CONFIG_PCI)		+= $(pci64-y) $(pci32-y) pci-common.o
 obj-$(CONFIG_PCI_MSI)		+= msi.o
 kexec-$(CONFIG_PPC64)		:= machine_kexec_64.o
 kexec-$(CONFIG_PPC32)		:= machine_kexec_32.o
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
index b2b5d66..b1f8000 100644
--- a/arch/powerpc/kernel/cputable.c
+++ b/arch/powerpc/kernel/cputable.c
@@ -294,6 +294,21 @@
 		.oprofile_mmcra_sipr	= MMCRA_SIPR,
 		.platform		= "power5",
 	},
+	{	/* Power5++ */
+		.pvr_mask		= 0xffffff00,
+		.pvr_value		= 0x003b0300,
+		.cpu_name		= "POWER5+ (gs)",
+		.cpu_features		= CPU_FTRS_POWER5,
+		.cpu_user_features	= COMMON_USER_POWER5_PLUS,
+		.icache_bsize		= 128,
+		.dcache_bsize		= 128,
+		.num_pmcs		= 6,
+		.oprofile_cpu_type	= "ppc64/power5++",
+		.oprofile_type		= PPC_OPROFILE_POWER4,
+		.oprofile_mmcra_sihv	= MMCRA_SIHV,
+		.oprofile_mmcra_sipr	= MMCRA_SIPR,
+		.platform		= "power5+",
+	},
 	{	/* Power5 GS */
 		.pvr_mask		= 0xffff0000,
 		.pvr_value		= 0x003b0000,
@@ -1178,8 +1193,8 @@
 		.platform		= "ppc440",
 	},
 	{ /* 440SP Rev. A */
-		.pvr_mask		= 0xff000fff,
-		.pvr_value		= 0x53000891,
+		.pvr_mask		= 0xfff00fff,
+		.pvr_value		= 0x53200891,
 		.cpu_name		= "440SP Rev. A",
 		.cpu_features		= CPU_FTRS_44X,
 		.cpu_user_features	= COMMON_USER_BOOKE,
@@ -1188,9 +1203,19 @@
 		.platform		= "ppc440",
 	},
 	{ /* 440SPe Rev. A */
-		.pvr_mask		= 0xff000fff,
-		.pvr_value		= 0x53000890,
-		.cpu_name		= "440SPe Rev. A",
+		.pvr_mask               = 0xfff00fff,
+		.pvr_value              = 0x53400890,
+		.cpu_name               = "440SPe Rev. A",
+		.cpu_features		= CPU_FTRS_44X,
+		.cpu_user_features      = COMMON_USER_BOOKE,
+		.icache_bsize           = 32,
+		.dcache_bsize           = 32,
+		.platform               = "ppc440",
+	},
+	{ /* 440SPe Rev. B */
+		.pvr_mask		= 0xfff00fff,
+		.pvr_value		= 0x53400891,
+		.cpu_name		= "440SPe Rev. B",
 		.cpu_features		= CPU_FTRS_44X,
 		.cpu_user_features	= COMMON_USER_BOOKE,
 		.icache_bsize		= 32,
diff --git a/arch/powerpc/kernel/head_32.S b/arch/powerpc/kernel/head_32.S
index c897203..7d73a13 100644
--- a/arch/powerpc/kernel/head_32.S
+++ b/arch/powerpc/kernel/head_32.S
@@ -9,7 +9,6 @@
  *  rewritten by Paul Mackerras.
  *    Copyright (C) 1996 Paul Mackerras.
  *  MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
- *  Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
  *
  *  This file contains the low-level support and setup for the
  *  PowerPC platform, including trap and interrupt dispatch.
@@ -32,10 +31,6 @@
 #include <asm/ppc_asm.h>
 #include <asm/asm-offsets.h>
 
-#ifdef CONFIG_APUS
-#include <asm/amigappc.h>
-#endif
-
 /* 601 only have IBAT; cr0.eq is set on 601 when using this macro */
 #define LOAD_BAT(n, reg, RA, RB)	\
 	/* see the comment for clear_bats() -- Cort */ \
@@ -92,11 +87,6 @@
  *  r4: virtual address of boot_infos_t
  *  r5: 0
  *
- * APUS
- *   r3: 'APUS'
- *   r4: physical address of memory base
- *   Linux/m68k style BootInfo structure at &_end.
- *
  * PREP
  * This is jumped to on prep systems right after the kernel is relocated
  * to its proper place in memory by the boot loader.  The expected layout
@@ -150,14 +140,6 @@
  */
 	bl	early_init
 
-#ifdef CONFIG_APUS
-/* On APUS the __va/__pa constants need to be set to the correct
- * values before continuing.
- */
-	mr	r4,r30
-	bl	fix_mem_constants
-#endif /* CONFIG_APUS */
-
 /* Switch MMU off, clear BATs and flush TLB. At this point, r3 contains
  * the physical address we are running at, returned by early_init()
  */
@@ -167,7 +149,7 @@
 	bl	flush_tlbs
 
 	bl	initial_bats
-#if !defined(CONFIG_APUS) && defined(CONFIG_BOOTX_TEXT)
+#if defined(CONFIG_BOOTX_TEXT)
 	bl	setup_disp_bat
 #endif
 
@@ -183,7 +165,6 @@
 #endif /* CONFIG_6xx */
 
 
-#ifndef CONFIG_APUS
 /*
  * We need to run with _start at physical address 0.
  * On CHRP, we are loaded at 0x10000 since OF on CHRP uses
@@ -196,7 +177,6 @@
 	addis	r4,r3,KERNELBASE@h	/* current address of _start */
 	cmpwi	0,r4,0			/* are we already running at 0? */
 	bne	relocate_kernel
-#endif /* CONFIG_APUS */
 /*
  * we now have the 1st 16M of ram mapped with the bats.
  * prep needs the mmu to be turned on here, but pmac already has it on.
@@ -881,85 +861,6 @@
 	addi	r6,r6,4
 	blr
 
-#ifdef CONFIG_APUS
-/*
- * On APUS the physical base address of the kernel is not known at compile
- * time, which means the __pa/__va constants used are incorrect. In the
- * __init section is recorded the virtual addresses of instructions using
- * these constants, so all that has to be done is fix these before
- * continuing the kernel boot.
- *
- * r4 = The physical address of the kernel base.
- */
-fix_mem_constants:
-	mr	r10,r4
-	addis	r10,r10,-KERNELBASE@h    /* virt_to_phys constant */
-	neg	r11,r10	                 /* phys_to_virt constant */
-
-	lis	r12,__vtop_table_begin@h
-	ori	r12,r12,__vtop_table_begin@l
-	add	r12,r12,r10	         /* table begin phys address */
-	lis	r13,__vtop_table_end@h
-	ori	r13,r13,__vtop_table_end@l
-	add	r13,r13,r10	         /* table end phys address */
-	subi	r12,r12,4
-	subi	r13,r13,4
-1:	lwzu	r14,4(r12)               /* virt address of instruction */
-	add     r14,r14,r10              /* phys address of instruction */
-	lwz     r15,0(r14)               /* instruction, now insert top */
-	rlwimi  r15,r10,16,16,31         /* half of vp const in low half */
-	stw	r15,0(r14)               /* of instruction and restore. */
-	dcbst	r0,r14			 /* write it to memory */
-	sync
-	icbi	r0,r14			 /* flush the icache line */
-	cmpw	r12,r13
-	bne     1b
-	sync				/* additional sync needed on g4 */
-	isync
-
-/*
- * Map the memory where the exception handlers will
- * be copied to when hash constants have been patched.
- */
-#ifdef CONFIG_APUS_FAST_EXCEPT
-	lis	r8,0xfff0
-#else
-	lis	r8,0
-#endif
-	ori	r8,r8,0x2		/* 128KB, supervisor */
-	mtspr	SPRN_DBAT3U,r8
-	mtspr	SPRN_DBAT3L,r8
-
-	lis	r12,__ptov_table_begin@h
-	ori	r12,r12,__ptov_table_begin@l
-	add	r12,r12,r10	         /* table begin phys address */
-	lis	r13,__ptov_table_end@h
-	ori	r13,r13,__ptov_table_end@l
-	add	r13,r13,r10	         /* table end phys address */
-	subi	r12,r12,4
-	subi	r13,r13,4
-1:	lwzu	r14,4(r12)               /* virt address of instruction */
-	add     r14,r14,r10              /* phys address of instruction */
-	lwz     r15,0(r14)               /* instruction, now insert top */
-	rlwimi  r15,r11,16,16,31         /* half of pv const in low half*/
-	stw	r15,0(r14)               /* of instruction and restore. */
-	dcbst	r0,r14			 /* write it to memory */
-	sync
-	icbi	r0,r14			 /* flush the icache line */
-	cmpw	r12,r13
-	bne     1b
-
-	sync				/* additional sync needed on g4 */
-	isync				/* No speculative loading until now */
-	blr
-
-/***********************************************************************
- *  Please note that on APUS the exception handlers are located at the
- *  physical address 0xfff0000. For this reason, the exception handlers
- *  cannot use relative branches to access the code below.
- ***********************************************************************/
-#endif /* CONFIG_APUS */
-
 #ifdef CONFIG_SMP
 #ifdef CONFIG_GEMINI
 	.globl	__secondary_start_gemini
@@ -1135,19 +1036,6 @@
 	bl	__save_cpu_setup
 	bl	MMU_init
 
-#ifdef CONFIG_APUS
-	/* Copy exception code to exception vector base on APUS. */
-	lis	r4,KERNELBASE@h
-#ifdef CONFIG_APUS_FAST_EXCEPT
-	lis	r3,0xfff0		/* Copy to 0xfff00000 */
-#else
-	lis	r3,0			/* Copy to 0x00000000 */
-#endif
-	li	r5,0x4000		/* # bytes of memory to copy */
-	li	r6,0
-	bl	copy_and_flush		/* copy the first 0x4000 bytes */
-#endif  /* CONFIG_APUS */
-
 /*
  * Go back to running unmapped so we can load up new values
  * for SDR1 (hash table pointer) and the segment registers
@@ -1324,11 +1212,7 @@
 #else
 	ori	r8,r8,2			/* R/W access */
 #endif /* CONFIG_SMP */
-#ifdef CONFIG_APUS
-	ori	r11,r11,BL_8M<<2|0x2	/* set up 8MB BAT registers for 604 */
-#else
 	ori	r11,r11,BL_256M<<2|0x2	/* set up BAT registers for 604 */
-#endif /* CONFIG_APUS */
 
 	mtspr	SPRN_DBAT0L,r8		/* N.B. 6xx (not 601) have valid */
 	mtspr	SPRN_DBAT0U,r11		/* bit in upper BAT register */
@@ -1338,7 +1222,7 @@
 	blr
 
 
-#if !defined(CONFIG_APUS) && defined(CONFIG_BOOTX_TEXT)
+#ifdef CONFIG_BOOTX_TEXT
 setup_disp_bat:
 	/*
 	 * setup the display bat prepared for us in prom.c
@@ -1362,7 +1246,7 @@
 1:	mtspr	SPRN_IBAT3L,r8
 	mtspr	SPRN_IBAT3U,r11
 	blr
-#endif /* !defined(CONFIG_APUS) && defined(CONFIG_BOOTX_TEXT) */
+#endif /* CONFIG_BOOTX_TEXT */
 
 #ifdef CONFIG_8260
 /* Jump into the system reset for the rom.
diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
index 1111fce..8cdd48e 100644
--- a/arch/powerpc/kernel/head_64.S
+++ b/arch/powerpc/kernel/head_64.S
@@ -103,8 +103,8 @@
 
 	. = 0x60
 /*
- * The following code is used on pSeries to hold secondary processors
- * in a spin loop after they have been freed from OpenFirmware, but
+ * The following code is used to hold secondary processors
+ * in a spin loop after they have entered the kernel, but
  * before the bulk of the kernel has been relocated.  This code
  * is relocated to physical address 0x60 before prom_init is run.
  * All of it must fit below the first exception vector at 0x100.
diff --git a/arch/powerpc/kernel/io.c b/arch/powerpc/kernel/io.c
index 34ae114..e31aca92 100644
--- a/arch/powerpc/kernel/io.c
+++ b/arch/powerpc/kernel/io.c
@@ -35,7 +35,7 @@
 	asm volatile("sync");
 	do {
 		tmp = *port;
-		asm volatile("eieio");
+		eieio();
 		*tbuf++ = tmp;
 	} while (--count != 0);
 	asm volatile("twi 0,%0,0; isync" : : "r" (tmp));
@@ -66,7 +66,7 @@
 	asm volatile("sync");
 	do {
 		tmp = *port;
-		asm volatile("eieio");
+		eieio();
 		*tbuf++ = tmp;
 	} while (--count != 0);
 	asm volatile("twi 0,%0,0; isync" : : "r" (tmp));
@@ -97,7 +97,7 @@
 	asm volatile("sync");
 	do {
 		tmp = *port;
-		asm volatile("eieio");
+		eieio();
 		*tbuf++ = tmp;
 	} while (--count != 0);
 	asm volatile("twi 0,%0,0; isync" : : "r" (tmp));
@@ -155,21 +155,21 @@
 	__asm__ __volatile__ ("sync" : : : "memory");
 	while(n && (!IO_CHECK_ALIGN(vsrc, 4) || !IO_CHECK_ALIGN(dest, 4))) {
 		*((u8 *)dest) = *((volatile u8 *)vsrc);
-		__asm__ __volatile__ ("eieio" : : : "memory");
+		eieio();
 		vsrc++;
 		dest++;
 		n--;
 	}
 	while(n > 4) {
 		*((u32 *)dest) = *((volatile u32 *)vsrc);
-		__asm__ __volatile__ ("eieio" : : : "memory");
+		eieio();
 		vsrc += 4;
 		dest += 4;
 		n -= 4;
 	}
 	while(n) {
 		*((u8 *)dest) = *((volatile u8 *)vsrc);
-		__asm__ __volatile__ ("eieio" : : : "memory");
+		eieio();
 		vsrc++;
 		dest++;
 		n--;
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index c2b84c6..2fc8786 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -7,7 +7,6 @@
  *    Copyright (C) 1996-2001 Cort Dougan
  *  Adapted for Power Macintosh by Paul Mackerras
  *    Copyright (C) 1996 Paul Mackerras (paulus@cs.anu.edu.au)
- *  Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
  *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public License
@@ -337,7 +336,8 @@
 
 void __init init_IRQ(void)
 {
-	ppc_md.init_IRQ();
+	if (ppc_md.init_IRQ)
+		ppc_md.init_IRQ();
 #ifdef CONFIG_PPC64
 	irq_ctx_init();
 #endif
@@ -597,6 +597,49 @@
 	local_irq_restore(flags);
 }
 
+static int irq_setup_virq(struct irq_host *host, unsigned int virq,
+			    irq_hw_number_t hwirq)
+{
+	/* Clear IRQ_NOREQUEST flag */
+	get_irq_desc(virq)->status &= ~IRQ_NOREQUEST;
+
+	/* map it */
+	smp_wmb();
+	irq_map[virq].hwirq = hwirq;
+	smp_mb();
+
+	if (host->ops->map(host, virq, hwirq)) {
+		pr_debug("irq: -> mapping failed, freeing\n");
+		irq_free_virt(virq, 1);
+		return -1;
+	}
+
+	return 0;
+}
+
+unsigned int irq_create_direct_mapping(struct irq_host *host)
+{
+	unsigned int virq;
+
+	if (host == NULL)
+		host = irq_default_host;
+
+	BUG_ON(host == NULL);
+	WARN_ON(host->revmap_type != IRQ_HOST_MAP_NOMAP);
+
+	virq = irq_alloc_virt(host, 1, 0);
+	if (virq == NO_IRQ) {
+		pr_debug("irq: create_direct virq allocation failed\n");
+		return NO_IRQ;
+	}
+
+	pr_debug("irq: create_direct obtained virq %d\n", virq);
+
+	if (irq_setup_virq(host, virq, virq))
+		return NO_IRQ;
+
+	return virq;
+}
 
 unsigned int irq_create_mapping(struct irq_host *host,
 				irq_hw_number_t hwirq)
@@ -645,18 +688,9 @@
 	}
 	pr_debug("irq: -> obtained virq %d\n", virq);
 
-	/* Clear IRQ_NOREQUEST flag */
-	get_irq_desc(virq)->status &= ~IRQ_NOREQUEST;
-
-	/* map it */
-	smp_wmb();
-	irq_map[virq].hwirq = hwirq;
-	smp_mb();
-	if (host->ops->map(host, virq, hwirq)) {
-		pr_debug("irq: -> mapping failed, freeing\n");
-		irq_free_virt(virq, 1);
+	if (irq_setup_virq(host, virq, hwirq))
 		return NO_IRQ;
-	}
+
 	return virq;
 }
 EXPORT_SYMBOL_GPL(irq_create_mapping);
diff --git a/arch/powerpc/kernel/isa-bridge.c b/arch/powerpc/kernel/isa-bridge.c
new file mode 100644
index 0000000..f0f49d1
--- /dev/null
+++ b/arch/powerpc/kernel/isa-bridge.c
@@ -0,0 +1,271 @@
+/*
+ * Routines for tracking a legacy ISA bridge
+ *
+ * Copyrigh 2007 Benjamin Herrenschmidt <benh@kernel.crashing.org>, IBM Corp.
+ *
+ * Some bits and pieces moved over from pci_64.c
+ *
+ * Copyrigh 2003 Anton Blanchard <anton@au.ibm.com>, IBM Corp.
+ *
+ *      This program is free software; you can redistribute it and/or
+ *      modify it under the terms of the GNU General Public License
+ *      as published by the Free Software Foundation; either version
+ *      2 of the License, or (at your option) any later version.
+ */
+
+#define DEBUG
+
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/string.h>
+#include <linux/init.h>
+#include <linux/mm.h>
+#include <linux/notifier.h>
+
+#include <asm/processor.h>
+#include <asm/io.h>
+#include <asm/prom.h>
+#include <asm/pci-bridge.h>
+#include <asm/machdep.h>
+#include <asm/ppc-pci.h>
+#include <asm/firmware.h>
+
+unsigned long isa_io_base;	/* NULL if no ISA bus */
+EXPORT_SYMBOL(isa_io_base);
+
+/* Cached ISA bridge dev. */
+static struct device_node *isa_bridge_devnode;
+struct pci_dev *isa_bridge_pcidev;
+EXPORT_SYMBOL_GPL(isa_bridge_pcidev);
+
+#define ISA_SPACE_MASK 0x1
+#define ISA_SPACE_IO 0x1
+
+static void __devinit pci_process_ISA_OF_ranges(struct device_node *isa_node,
+						unsigned long phb_io_base_phys)
+{
+	/* We should get some saner parsing here and remove these structs */
+	struct pci_address {
+		u32 a_hi;
+		u32 a_mid;
+		u32 a_lo;
+	};
+
+	struct isa_address {
+		u32 a_hi;
+		u32 a_lo;
+	};
+
+	struct isa_range {
+		struct isa_address isa_addr;
+		struct pci_address pci_addr;
+		unsigned int size;
+	};
+
+	const struct isa_range *range;
+	unsigned long pci_addr;
+	unsigned int isa_addr;
+	unsigned int size;
+	int rlen = 0;
+
+	range = of_get_property(isa_node, "ranges", &rlen);
+	if (range == NULL || (rlen < sizeof(struct isa_range)))
+		goto inval_range;
+
+	/* From "ISA Binding to 1275"
+	 * The ranges property is laid out as an array of elements,
+	 * each of which comprises:
+	 *   cells 0 - 1:	an ISA address
+	 *   cells 2 - 4:	a PCI address
+	 *			(size depending on dev->n_addr_cells)
+	 *   cell 5:		the size of the range
+	 */
+	if ((range->isa_addr.a_hi && ISA_SPACE_MASK) != ISA_SPACE_IO) {
+		range++;
+		rlen -= sizeof(struct isa_range);
+		if (rlen < sizeof(struct isa_range))
+			goto inval_range;
+	}
+	if ((range->isa_addr.a_hi && ISA_SPACE_MASK) != ISA_SPACE_IO)
+		goto inval_range;
+
+	isa_addr = range->isa_addr.a_lo;
+	pci_addr = (unsigned long) range->pci_addr.a_mid << 32 |
+		range->pci_addr.a_lo;
+
+	/* Assume these are both zero. Note: We could fix that and
+	 * do a proper parsing instead ... oh well, that will do for
+	 * now as nobody uses fancy mappings for ISA bridges
+	 */
+	if ((pci_addr != 0) || (isa_addr != 0)) {
+		printk(KERN_ERR "unexpected isa to pci mapping: %s\n",
+		       __FUNCTION__);
+		return;
+	}
+
+	/* Align size and make sure it's cropped to 64K */
+	size = PAGE_ALIGN(range->size);
+	if (size > 0x10000)
+		size = 0x10000;
+
+	printk(KERN_ERR "no ISA IO ranges or unexpected isa range,"
+	       "mapping 64k\n");
+
+	__ioremap_at(phb_io_base_phys, (void *)ISA_IO_BASE,
+		     size, _PAGE_NO_CACHE|_PAGE_GUARDED);
+	return;
+
+inval_range:
+	printk(KERN_ERR "no ISA IO ranges or unexpected isa range,"
+	       "mapping 64k\n");
+	__ioremap_at(phb_io_base_phys, (void *)ISA_IO_BASE,
+		     0x10000, _PAGE_NO_CACHE|_PAGE_GUARDED);
+}
+
+
+/**
+ * isa_bridge_find_early - Find and map the ISA IO space early before
+ *                         main PCI discovery. This is optionally called by
+ *                         the arch code when adding PCI PHBs to get early
+ *                         access to ISA IO ports
+ */
+void __init isa_bridge_find_early(struct pci_controller *hose)
+{
+	struct device_node *np, *parent = NULL, *tmp;
+
+	/* If we already have an ISA bridge, bail off */
+	if (isa_bridge_devnode != NULL)
+		return;
+
+	/* For each "isa" node in the system. Note : we do a search by
+	 * type and not by name. It might be better to do by name but that's
+	 * what the code used to do and I don't want to break too much at
+	 * once. We can look into changing that separately
+	 */
+	for_each_node_by_type(np, "isa") {
+		/* Look for our hose being a parent */
+		for (parent = of_get_parent(np); parent;) {
+			if (parent == hose->arch_data) {
+				of_node_put(parent);
+				break;
+			}
+			tmp = parent;
+			parent = of_get_parent(parent);
+			of_node_put(tmp);
+		}
+		if (parent != NULL)
+			break;
+	}
+	if (np == NULL)
+		return;
+	isa_bridge_devnode = np;
+
+	/* Now parse the "ranges" property and setup the ISA mapping */
+	pci_process_ISA_OF_ranges(np, hose->io_base_phys);
+
+	/* Set the global ISA io base to indicate we have an ISA bridge */
+	isa_io_base = ISA_IO_BASE;
+
+	pr_debug("ISA bridge (early) is %s\n", np->full_name);
+}
+
+/**
+ * isa_bridge_find_late - Find and map the ISA IO space upon discovery of
+ *                        a new ISA bridge
+ */
+static void __devinit isa_bridge_find_late(struct pci_dev *pdev,
+					   struct device_node *devnode)
+{
+	struct pci_controller *hose = pci_bus_to_host(pdev->bus);
+
+	/* Store ISA device node and PCI device */
+	isa_bridge_devnode = of_node_get(devnode);
+	isa_bridge_pcidev = pdev;
+
+	/* Now parse the "ranges" property and setup the ISA mapping */
+	pci_process_ISA_OF_ranges(devnode, hose->io_base_phys);
+
+	/* Set the global ISA io base to indicate we have an ISA bridge */
+	isa_io_base = ISA_IO_BASE;
+
+	pr_debug("ISA bridge (late) is %s on %s\n",
+		 devnode->full_name, pci_name(pdev));
+}
+
+/**
+ * isa_bridge_remove - Remove/unmap an ISA bridge
+ */
+static void isa_bridge_remove(void)
+{
+	pr_debug("ISA bridge removed !\n");
+
+	/* Clear the global ISA io base to indicate that we have no more
+	 * ISA bridge. Note that drivers don't quite handle that, though
+	 * we should probably do something about it. But do we ever really
+	 * have ISA bridges being removed on machines using legacy devices ?
+	 */
+	isa_io_base = ISA_IO_BASE;
+
+	/* Clear references to the bridge */
+	of_node_put(isa_bridge_devnode);
+	isa_bridge_devnode = NULL;
+	isa_bridge_pcidev = NULL;
+
+	/* Unmap the ISA area */
+	__iounmap_at((void *)ISA_IO_BASE, 0x10000);
+}
+
+/**
+ * isa_bridge_notify - Get notified of PCI devices addition/removal
+ */
+static int __devinit isa_bridge_notify(struct notifier_block *nb,
+				       unsigned long action, void *data)
+{
+	struct device *dev = data;
+	struct pci_dev *pdev = to_pci_dev(dev);
+	struct device_node *devnode = pci_device_to_OF_node(pdev);
+
+	switch(action) {
+	case BUS_NOTIFY_ADD_DEVICE:
+		/* Check if we have an early ISA device, without PCI dev */
+		if (isa_bridge_devnode && isa_bridge_devnode == devnode &&
+		    !isa_bridge_pcidev) {
+			pr_debug("ISA bridge PCI attached: %s\n",
+				 pci_name(pdev));
+			isa_bridge_pcidev = pdev;
+		}
+
+		/* Check if we have no ISA device, and this happens to be one,
+		 * register it as such if it has an OF device
+		 */
+		if (!isa_bridge_devnode && devnode && devnode->type &&
+		    !strcmp(devnode->type, "isa"))
+			isa_bridge_find_late(pdev, devnode);
+
+		return 0;
+	case BUS_NOTIFY_DEL_DEVICE:
+		/* Check if this our existing ISA device */
+		if (pdev == isa_bridge_pcidev ||
+		    (devnode && devnode == isa_bridge_devnode))
+			isa_bridge_remove();
+		return 0;
+	}
+	return 0;
+}
+
+static struct notifier_block isa_bridge_notifier = {
+	.notifier_call = isa_bridge_notify
+};
+
+/**
+ * isa_bridge_init - register to be notified of ISA bridge addition/removal
+ *
+ */
+static int __init isa_bridge_init(void)
+{
+	if (firmware_has_feature(FW_FEATURE_ISERIES))
+		return 0;
+	bus_register_notifier(&pci_bus_type, &isa_bridge_notifier);
+	return 0;
+}
+arch_initcall(isa_bridge_init);
diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c
index 0c96611..440f5a8 100644
--- a/arch/powerpc/kernel/kprobes.c
+++ b/arch/powerpc/kernel/kprobes.c
@@ -492,6 +492,13 @@
 	return ret;
 }
 
+#ifdef CONFIG_PPC64
+unsigned long arch_deref_entry_point(void *entry)
+{
+	return (unsigned long)(((func_descr_t *)entry)->entry);
+}
+#endif
+
 int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
 {
 	struct jprobe *jp = container_of(p, struct jprobe, kp);
@@ -500,11 +507,9 @@
 	memcpy(&kcb->jprobe_saved_regs, regs, sizeof(struct pt_regs));
 
 	/* setup return addr to the jprobe handler routine */
+	regs->nip = arch_deref_entry_point(jp->entry);
 #ifdef CONFIG_PPC64
-	regs->nip = (unsigned long)(((func_descr_t *)jp->entry)->entry);
 	regs->gpr[2] = (unsigned long)(((func_descr_t *)jp->entry)->toc);
-#else
-	regs->nip = (unsigned long)jp->entry;
 #endif
 
 	return 1;
diff --git a/arch/powerpc/kernel/lparcfg.c b/arch/powerpc/kernel/lparcfg.c
index c492cee..6444eaa 100644
--- a/arch/powerpc/kernel/lparcfg.c
+++ b/arch/powerpc/kernel/lparcfg.c
@@ -248,7 +248,7 @@
 	} else {
 		int splpar_strlen;
 		int idx, w_idx;
-		char *workbuffer = kmalloc(SPLPAR_MAXLENGTH, GFP_KERNEL);
+		char *workbuffer = kzalloc(SPLPAR_MAXLENGTH, GFP_KERNEL);
 		if (!workbuffer) {
 			printk(KERN_ERR "%s %s kmalloc failure at line %d \n",
 			       __FILE__, __FUNCTION__, __LINE__);
@@ -261,7 +261,6 @@
 		splpar_strlen = local_buffer[0] * 256 + local_buffer[1];
 		local_buffer += 2;	/* step over strlen value */
 
-		memset(workbuffer, 0, SPLPAR_MAXLENGTH);
 		w_idx = 0;
 		idx = 0;
 		while ((*local_buffer) && (idx < splpar_strlen)) {
diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S
index 98decf8..e708ab7 100644
--- a/arch/powerpc/kernel/misc_32.S
+++ b/arch/powerpc/kernel/misc_32.S
@@ -392,7 +392,7 @@
 	mtspr   SPRN_L1CSR0,r3
 	isync
 	blr
-END_FTR_SECTION_IFCLR(CPU_FTR_SPLIT_ID_CACHE)
+END_FTR_SECTION_IFSET(CPU_FTR_UNIFIED_ID_CACHE)
 	mfspr	r3,SPRN_L1CSR1
 	ori	r3,r3,L1CSR1_ICFI|L1CSR1_ICLFR
 	mtspr	SPRN_L1CSR1,r3
@@ -419,7 +419,7 @@
 _GLOBAL(__flush_icache_range)
 BEGIN_FTR_SECTION
 	blr				/* for 601, do nothing */
-END_FTR_SECTION_IFCLR(CPU_FTR_SPLIT_ID_CACHE)
+END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE)
 	li	r5,L1_CACHE_BYTES-1
 	andc	r3,r3,r5
 	subf	r4,r3,r4
@@ -514,8 +514,8 @@
  */
 _GLOBAL(__flush_dcache_icache)
 BEGIN_FTR_SECTION
-	blr					/* for 601, do nothing */
-END_FTR_SECTION_IFCLR(CPU_FTR_SPLIT_ID_CACHE)
+	blr
+END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE)
 	rlwinm	r3,r3,0,0,19			/* Get page base address */
 	li	r4,4096/L1_CACHE_BYTES	/* Number of lines in a page */
 	mtctr	r4
@@ -543,7 +543,7 @@
 _GLOBAL(__flush_dcache_icache_phys)
 BEGIN_FTR_SECTION
 	blr					/* for 601, do nothing */
-END_FTR_SECTION_IFCLR(CPU_FTR_SPLIT_ID_CACHE)
+END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE)
 	mfmsr	r10
 	rlwinm	r0,r10,0,28,26			/* clear DR */
 	mtmsr	r0
diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S
index 519861d..bbb3ba5 100644
--- a/arch/powerpc/kernel/misc_64.S
+++ b/arch/powerpc/kernel/misc_64.S
@@ -646,6 +646,19 @@
 	/* turn off mmu */
 	bl	real_mode
 
+	/* copy  0x100 bytes starting at start to 0 */
+	li	r3,0
+	mr	r4,r30		/* start, aka phys mem offset */
+	li	r5,0x100
+	li	r6,0
+	bl	.copy_and_flush	/* (dest, src, copy limit, start offset) */
+1:	/* assume normal blr return */
+
+	/* release other cpus to the new kernel secondary start at 0x60 */
+	mflr	r5
+	li	r6,1
+	stw	r6,kexec_flag-1b(5)
+
 	/* clear out hardware hash page table and tlb */
 	ld	r5,0(r27)		/* deref function descriptor */
 	mtctr	r5
@@ -676,19 +689,6 @@
  *    are the boot cpu ?????
  *    other device tree differences (prop sizes, va vs pa, etc)...
  */
-
-	/* copy  0x100 bytes starting at start to 0 */
-	li	r3,0
-	mr	r4,r30
-	li	r5,0x100
-	li	r6,0
-	bl	.copy_and_flush	/* (dest, src, copy limit, start offset) */
-1:	/* assume normal blr return */
-
-	/* release other cpus to the new kernel secondary start at 0x60 */
-	mflr	r5
-	li	r6,1
-	stw	r6,kexec_flag-1b(5)
 	mr	r3,r25	# my phys cpu
 	mr	r4,r30	# start, aka phys mem offset
 	mtlr	4
diff --git a/arch/powerpc/kernel/of_platform.c b/arch/powerpc/kernel/of_platform.c
index d454f61..8ded4e7 100644
--- a/arch/powerpc/kernel/of_platform.c
+++ b/arch/powerpc/kernel/of_platform.c
@@ -222,10 +222,9 @@
 {
 	struct of_device *dev;
 
-	dev = kmalloc(sizeof(*dev), GFP_KERNEL);
+	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
 	if (!dev)
 		return NULL;
-	memset(dev, 0, sizeof(*dev));
 
 	dev->node = of_node_get(np);
 	dev->dma_mask = 0xffffffffUL;
@@ -427,14 +426,6 @@
 	/* Process "ranges" property */
 	pci_process_bridge_OF_ranges(phb, dev->node, 0);
 
-	/* Setup IO space. We use the non-dynamic version of that code here,
-	 * which doesn't quite support unplugging. Next kernel release will
-	 * have a better fix for this.
-	 * Note also that we don't do ISA, this will also be fixed with a
-	 * more massive rework.
-	 */
-	pci_setup_phb_io(phb, pci_io_base == 0);
-
 	/* Init pci_dn data structures */
 	pci_devs_phb_init_dynamic(phb);
 
diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c
new file mode 100644
index 0000000..94b4a02
--- /dev/null
+++ b/arch/powerpc/kernel/pci-common.c
@@ -0,0 +1,457 @@
+/*
+ * Contains common pci routines for ALL ppc platform
+ * (based on pci_32.c and pci_64.c)
+ *
+ * Port for PPC64 David Engebretsen, IBM Corp.
+ * Contains common pci routines for ppc64 platform, pSeries and iSeries brands.
+ *
+ * Copyright (C) 2003 Anton Blanchard <anton@au.ibm.com>, IBM
+ *   Rework, based on alpha PCI code.
+ *
+ * Common pmac/prep/chrp pci routines. -- Cort
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#undef DEBUG
+
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/string.h>
+#include <linux/init.h>
+#include <linux/bootmem.h>
+#include <linux/mm.h>
+#include <linux/list.h>
+#include <linux/syscalls.h>
+#include <linux/irq.h>
+#include <linux/vmalloc.h>
+
+#include <asm/processor.h>
+#include <asm/io.h>
+#include <asm/prom.h>
+#include <asm/pci-bridge.h>
+#include <asm/byteorder.h>
+#include <asm/machdep.h>
+#include <asm/ppc-pci.h>
+#include <asm/firmware.h>
+
+#ifdef DEBUG
+#include <asm/udbg.h>
+#define DBG(fmt...) printk(fmt)
+#else
+#define DBG(fmt...)
+#endif
+
+static DEFINE_SPINLOCK(hose_spinlock);
+
+/* XXX kill that some day ... */
+int global_phb_number;		/* Global phb counter */
+
+extern struct list_head hose_list;
+
+/*
+ * pci_controller(phb) initialized common variables.
+ */
+static void __devinit pci_setup_pci_controller(struct pci_controller *hose)
+{
+	memset(hose, 0, sizeof(struct pci_controller));
+
+	spin_lock(&hose_spinlock);
+	hose->global_number = global_phb_number++;
+	list_add_tail(&hose->list_node, &hose_list);
+	spin_unlock(&hose_spinlock);
+}
+
+struct pci_controller * pcibios_alloc_controller(struct device_node *dev)
+{
+	struct pci_controller *phb;
+
+	if (mem_init_done)
+		phb = kmalloc(sizeof(struct pci_controller), GFP_KERNEL);
+	else
+		phb = alloc_bootmem(sizeof (struct pci_controller));
+	if (phb == NULL)
+		return NULL;
+	pci_setup_pci_controller(phb);
+	phb->arch_data = dev;
+	phb->is_dynamic = mem_init_done;
+#ifdef CONFIG_PPC64
+	if (dev) {
+		int nid = of_node_to_nid(dev);
+
+		if (nid < 0 || !node_online(nid))
+			nid = -1;
+
+		PHB_SET_NODE(phb, nid);
+	}
+#endif
+	return phb;
+}
+
+void pcibios_free_controller(struct pci_controller *phb)
+{
+	spin_lock(&hose_spinlock);
+	list_del(&phb->list_node);
+	spin_unlock(&hose_spinlock);
+
+	if (phb->is_dynamic)
+		kfree(phb);
+}
+
+/*
+ * Return the domain number for this bus.
+ */
+int pci_domain_nr(struct pci_bus *bus)
+{
+	if (firmware_has_feature(FW_FEATURE_ISERIES))
+		return 0;
+	else {
+		struct pci_controller *hose = pci_bus_to_host(bus);
+
+		return hose->global_number;
+	}
+}
+
+EXPORT_SYMBOL(pci_domain_nr);
+
+#ifdef CONFIG_PPC_OF
+
+/* This routine is meant to be used early during boot, when the
+ * PCI bus numbers have not yet been assigned, and you need to
+ * issue PCI config cycles to an OF device.
+ * It could also be used to "fix" RTAS config cycles if you want
+ * to set pci_assign_all_buses to 1 and still use RTAS for PCI
+ * config cycles.
+ */
+struct pci_controller* pci_find_hose_for_OF_device(struct device_node* node)
+{
+	if (!have_of)
+		return NULL;
+	while(node) {
+		struct pci_controller *hose, *tmp;
+		list_for_each_entry_safe(hose, tmp, &hose_list, list_node)
+			if (hose->arch_data == node)
+				return hose;
+		node = node->parent;
+	}
+	return NULL;
+}
+
+static ssize_t pci_show_devspec(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct pci_dev *pdev;
+	struct device_node *np;
+
+	pdev = to_pci_dev (dev);
+	np = pci_device_to_OF_node(pdev);
+	if (np == NULL || np->full_name == NULL)
+		return 0;
+	return sprintf(buf, "%s", np->full_name);
+}
+static DEVICE_ATTR(devspec, S_IRUGO, pci_show_devspec, NULL);
+#endif /* CONFIG_PPC_OF */
+
+/* Add sysfs properties */
+int pcibios_add_platform_entries(struct pci_dev *pdev)
+{
+#ifdef CONFIG_PPC_OF
+	return device_create_file(&pdev->dev, &dev_attr_devspec);
+#else
+	return 0;
+#endif /* CONFIG_PPC_OF */
+
+}
+
+char __init *pcibios_setup(char *str)
+{
+	return str;
+}
+
+/*
+ * Reads the interrupt pin to determine if interrupt is use by card.
+ * If the interrupt is used, then gets the interrupt line from the
+ * openfirmware and sets it in the pci_dev and pci_config line.
+ */
+int pci_read_irq_line(struct pci_dev *pci_dev)
+{
+	struct of_irq oirq;
+	unsigned int virq;
+
+	DBG("Try to map irq for %s...\n", pci_name(pci_dev));
+
+#ifdef DEBUG
+	memset(&oirq, 0xff, sizeof(oirq));
+#endif
+	/* Try to get a mapping from the device-tree */
+	if (of_irq_map_pci(pci_dev, &oirq)) {
+		u8 line, pin;
+
+		/* If that fails, lets fallback to what is in the config
+		 * space and map that through the default controller. We
+		 * also set the type to level low since that's what PCI
+		 * interrupts are. If your platform does differently, then
+		 * either provide a proper interrupt tree or don't use this
+		 * function.
+		 */
+		if (pci_read_config_byte(pci_dev, PCI_INTERRUPT_PIN, &pin))
+			return -1;
+		if (pin == 0)
+			return -1;
+		if (pci_read_config_byte(pci_dev, PCI_INTERRUPT_LINE, &line) ||
+		    line == 0xff) {
+			return -1;
+		}
+		DBG(" -> no map ! Using irq line %d from PCI config\n", line);
+
+		virq = irq_create_mapping(NULL, line);
+		if (virq != NO_IRQ)
+			set_irq_type(virq, IRQ_TYPE_LEVEL_LOW);
+	} else {
+		DBG(" -> got one, spec %d cells (0x%08x 0x%08x...) on %s\n",
+		    oirq.size, oirq.specifier[0], oirq.specifier[1],
+		    oirq.controller->full_name);
+
+		virq = irq_create_of_mapping(oirq.controller, oirq.specifier,
+					     oirq.size);
+	}
+	if(virq == NO_IRQ) {
+		DBG(" -> failed to map !\n");
+		return -1;
+	}
+
+	DBG(" -> mapped to linux irq %d\n", virq);
+
+	pci_dev->irq = virq;
+
+	return 0;
+}
+EXPORT_SYMBOL(pci_read_irq_line);
+
+/*
+ * Platform support for /proc/bus/pci/X/Y mmap()s,
+ * modelled on the sparc64 implementation by Dave Miller.
+ *  -- paulus.
+ */
+
+/*
+ * Adjust vm_pgoff of VMA such that it is the physical page offset
+ * corresponding to the 32-bit pci bus offset for DEV requested by the user.
+ *
+ * Basically, the user finds the base address for his device which he wishes
+ * to mmap.  They read the 32-bit value from the config space base register,
+ * add whatever PAGE_SIZE multiple offset they wish, and feed this into the
+ * offset parameter of mmap on /proc/bus/pci/XXX for that device.
+ *
+ * Returns negative error code on failure, zero on success.
+ */
+static struct resource *__pci_mmap_make_offset(struct pci_dev *dev,
+					       resource_size_t *offset,
+					       enum pci_mmap_state mmap_state)
+{
+	struct pci_controller *hose = pci_bus_to_host(dev->bus);
+	unsigned long io_offset = 0;
+	int i, res_bit;
+
+	if (hose == 0)
+		return NULL;		/* should never happen */
+
+	/* If memory, add on the PCI bridge address offset */
+	if (mmap_state == pci_mmap_mem) {
+#if 0 /* See comment in pci_resource_to_user() for why this is disabled */
+		*offset += hose->pci_mem_offset;
+#endif
+		res_bit = IORESOURCE_MEM;
+	} else {
+		io_offset = (unsigned long)hose->io_base_virt - _IO_BASE;
+		*offset += io_offset;
+		res_bit = IORESOURCE_IO;
+	}
+
+	/*
+	 * Check that the offset requested corresponds to one of the
+	 * resources of the device.
+	 */
+	for (i = 0; i <= PCI_ROM_RESOURCE; i++) {
+		struct resource *rp = &dev->resource[i];
+		int flags = rp->flags;
+
+		/* treat ROM as memory (should be already) */
+		if (i == PCI_ROM_RESOURCE)
+			flags |= IORESOURCE_MEM;
+
+		/* Active and same type? */
+		if ((flags & res_bit) == 0)
+			continue;
+
+		/* In the range of this resource? */
+		if (*offset < (rp->start & PAGE_MASK) || *offset > rp->end)
+			continue;
+
+		/* found it! construct the final physical address */
+		if (mmap_state == pci_mmap_io)
+			*offset += hose->io_base_phys - io_offset;
+		return rp;
+	}
+
+	return NULL;
+}
+
+/*
+ * Set vm_page_prot of VMA, as appropriate for this architecture, for a pci
+ * device mapping.
+ */
+static pgprot_t __pci_mmap_set_pgprot(struct pci_dev *dev, struct resource *rp,
+				      pgprot_t protection,
+				      enum pci_mmap_state mmap_state,
+				      int write_combine)
+{
+	unsigned long prot = pgprot_val(protection);
+
+	/* Write combine is always 0 on non-memory space mappings. On
+	 * memory space, if the user didn't pass 1, we check for a
+	 * "prefetchable" resource. This is a bit hackish, but we use
+	 * this to workaround the inability of /sysfs to provide a write
+	 * combine bit
+	 */
+	if (mmap_state != pci_mmap_mem)
+		write_combine = 0;
+	else if (write_combine == 0) {
+		if (rp->flags & IORESOURCE_PREFETCH)
+			write_combine = 1;
+	}
+
+	/* XXX would be nice to have a way to ask for write-through */
+	prot |= _PAGE_NO_CACHE;
+	if (write_combine)
+		prot &= ~_PAGE_GUARDED;
+	else
+		prot |= _PAGE_GUARDED;
+
+	return __pgprot(prot);
+}
+
+/*
+ * This one is used by /dev/mem and fbdev who have no clue about the
+ * PCI device, it tries to find the PCI device first and calls the
+ * above routine
+ */
+pgprot_t pci_phys_mem_access_prot(struct file *file,
+				  unsigned long pfn,
+				  unsigned long size,
+				  pgprot_t protection)
+{
+	struct pci_dev *pdev = NULL;
+	struct resource *found = NULL;
+	unsigned long prot = pgprot_val(protection);
+	unsigned long offset = pfn << PAGE_SHIFT;
+	int i;
+
+	if (page_is_ram(pfn))
+		return __pgprot(prot);
+
+	prot |= _PAGE_NO_CACHE | _PAGE_GUARDED;
+
+	for_each_pci_dev(pdev) {
+		for (i = 0; i <= PCI_ROM_RESOURCE; i++) {
+			struct resource *rp = &pdev->resource[i];
+			int flags = rp->flags;
+
+			/* Active and same type? */
+			if ((flags & IORESOURCE_MEM) == 0)
+				continue;
+			/* In the range of this resource? */
+			if (offset < (rp->start & PAGE_MASK) ||
+			    offset > rp->end)
+				continue;
+			found = rp;
+			break;
+		}
+		if (found)
+			break;
+	}
+	if (found) {
+		if (found->flags & IORESOURCE_PREFETCH)
+			prot &= ~_PAGE_GUARDED;
+		pci_dev_put(pdev);
+	}
+
+	DBG("non-PCI map for %lx, prot: %lx\n", offset, prot);
+
+	return __pgprot(prot);
+}
+
+
+/*
+ * Perform the actual remap of the pages for a PCI device mapping, as
+ * appropriate for this architecture.  The region in the process to map
+ * is described by vm_start and vm_end members of VMA, the base physical
+ * address is found in vm_pgoff.
+ * The pci device structure is provided so that architectures may make mapping
+ * decisions on a per-device or per-bus basis.
+ *
+ * Returns a negative error code on failure, zero on success.
+ */
+int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
+			enum pci_mmap_state mmap_state, int write_combine)
+{
+	resource_size_t offset = vma->vm_pgoff << PAGE_SHIFT;
+	struct resource *rp;
+	int ret;
+
+	rp = __pci_mmap_make_offset(dev, &offset, mmap_state);
+	if (rp == NULL)
+		return -EINVAL;
+
+	vma->vm_pgoff = offset >> PAGE_SHIFT;
+	vma->vm_page_prot = __pci_mmap_set_pgprot(dev, rp,
+						  vma->vm_page_prot,
+						  mmap_state, write_combine);
+
+	ret = remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
+			       vma->vm_end - vma->vm_start, vma->vm_page_prot);
+
+	return ret;
+}
+
+void pci_resource_to_user(const struct pci_dev *dev, int bar,
+			  const struct resource *rsrc,
+			  resource_size_t *start, resource_size_t *end)
+{
+	struct pci_controller *hose = pci_bus_to_host(dev->bus);
+	resource_size_t offset = 0;
+
+	if (hose == NULL)
+		return;
+
+	if (rsrc->flags & IORESOURCE_IO)
+		offset = (unsigned long)hose->io_base_virt - _IO_BASE;
+
+	/* We pass a fully fixed up address to userland for MMIO instead of
+	 * a BAR value because X is lame and expects to be able to use that
+	 * to pass to /dev/mem !
+	 *
+	 * That means that we'll have potentially 64 bits values where some
+	 * userland apps only expect 32 (like X itself since it thinks only
+	 * Sparc has 64 bits MMIO) but if we don't do that, we break it on
+	 * 32 bits CHRPs :-(
+	 *
+	 * Hopefully, the sysfs insterface is immune to that gunk. Once X
+	 * has been fixed (and the fix spread enough), we can re-enable the
+	 * 2 lines below and pass down a BAR value to userland. In that case
+	 * we'll also have to re-enable the matching code in
+	 * __pci_mmap_make_offset().
+	 *
+	 * BenH.
+	 */
+#if 0
+	else if (rsrc->flags & IORESOURCE_MEM)
+		offset = hose->pci_mem_offset;
+#endif
+
+	*start = rsrc->start - offset;
+	*end = rsrc->end - offset;
+}
diff --git a/arch/powerpc/kernel/pci_32.c b/arch/powerpc/kernel/pci_32.c
index 8698211..0adf077 100644
--- a/arch/powerpc/kernel/pci_32.c
+++ b/arch/powerpc/kernel/pci_32.c
@@ -55,8 +55,7 @@
  */
 int pci_assign_all_buses;
 
-struct pci_controller* hose_head;
-struct pci_controller** hose_tail = &hose_head;
+LIST_HEAD(hose_list);
 
 static int pci_bus_count;
 
@@ -573,58 +572,6 @@
 	}
 }
 
-
-int
-pcibios_enable_resources(struct pci_dev *dev, int mask)
-{
-	u16 cmd, old_cmd;
-	int idx;
-	struct resource *r;
-
-	pci_read_config_word(dev, PCI_COMMAND, &cmd);
-	old_cmd = cmd;
-	for (idx=0; idx<6; idx++) {
-		/* Only set up the requested stuff */
-		if (!(mask & (1<<idx)))
-			continue;
-	
-		r = &dev->resource[idx];
-		if (r->flags & IORESOURCE_UNSET) {
-			printk(KERN_ERR "PCI: Device %s not available because of resource collisions\n", pci_name(dev));
-			return -EINVAL;
-		}
-		if (r->flags & IORESOURCE_IO)
-			cmd |= PCI_COMMAND_IO;
-		if (r->flags & IORESOURCE_MEM)
-			cmd |= PCI_COMMAND_MEMORY;
-	}
-	if (dev->resource[PCI_ROM_RESOURCE].start)
-		cmd |= PCI_COMMAND_MEMORY;
-	if (cmd != old_cmd) {
-		printk("PCI: Enabling device %s (%04x -> %04x)\n", pci_name(dev), old_cmd, cmd);
-		pci_write_config_word(dev, PCI_COMMAND, cmd);
-	}
-	return 0;
-}
-
-static int next_controller_index;
-
-struct pci_controller * __init
-pcibios_alloc_controller(void)
-{
-	struct pci_controller *hose;
-
-	hose = (struct pci_controller *)alloc_bootmem(sizeof(*hose));
-	memset(hose, 0, sizeof(struct pci_controller));
-
-	*hose_tail = hose;
-	hose_tail = &hose->next;
-
-	hose->index = next_controller_index++;
-
-	return hose;
-}
-
 #ifdef CONFIG_PPC_OF
 /*
  * Functions below are used on OpenFirmware machines.
@@ -670,7 +617,7 @@
 pcibios_make_OF_bus_map(void)
 {
 	int i;
-	struct pci_controller* hose;
+	struct pci_controller *hose, *tmp;
 	struct property *map_prop;
 	struct device_node *dn;
 
@@ -687,7 +634,7 @@
 		pci_to_OF_bus_map[i] = 0xff;
 
 	/* For each hose, we begin searching bridges */
-	for(hose=hose_head; hose; hose=hose->next) {
+	list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
 		struct device_node* node;	
 		node = (struct device_node *)hose->arch_data;
 		if (!node)
@@ -765,7 +712,7 @@
 
 	/* Are we a root bus ? */
 	if (bus->self == NULL || bus->parent == NULL) {
-		struct pci_controller *hose = pci_bus_to_hose(bus->number);
+		struct pci_controller *hose = pci_bus_to_host(bus);
 		if (hose == NULL)
 			return NULL;
 		return of_node_get(hose->arch_data);
@@ -818,27 +765,6 @@
 }
 EXPORT_SYMBOL(pci_device_to_OF_node);
 
-/* This routine is meant to be used early during boot, when the
- * PCI bus numbers have not yet been assigned, and you need to
- * issue PCI config cycles to an OF device.
- * It could also be used to "fix" RTAS config cycles if you want
- * to set pci_assign_all_buses to 1 and still use RTAS for PCI
- * config cycles.
- */
-struct pci_controller* pci_find_hose_for_OF_device(struct device_node* node)
-{
-	if (!have_of)
-		return NULL;
-	while(node) {
-		struct pci_controller* hose;
-		for (hose=hose_head;hose;hose=hose->next)
-			if (hose->arch_data == node)
-				return hose;
-		node=node->parent;
-	}
-	return NULL;
-}
-
 static int
 find_OF_pci_device_filter(struct device_node* node, void* data)
 {
@@ -1027,34 +953,12 @@
 	}
 }
 
-static ssize_t pci_show_devspec(struct device *dev, struct device_attribute *attr, char *buf)
-{
-	struct pci_dev *pdev;
-	struct device_node *np;
-
-	pdev = to_pci_dev (dev);
-	np = pci_device_to_OF_node(pdev);
-	if (np == NULL || np->full_name == NULL)
-		return 0;
-	return sprintf(buf, "%s", np->full_name);
-}
-static DEVICE_ATTR(devspec, S_IRUGO, pci_show_devspec, NULL);
-
 #else /* CONFIG_PPC_OF */
 void pcibios_make_OF_bus_map(void)
 {
 }
 #endif /* CONFIG_PPC_OF */
 
-/* Add sysfs properties */
-int pcibios_add_platform_entries(struct pci_dev *pdev)
-{
-#ifdef CONFIG_PPC_OF
-	return device_create_file(&pdev->dev, &dev_attr_devspec);
-#endif /* CONFIG_PPC_OF */
-}
-
-
 #ifdef CONFIG_PPC_PMAC
 /*
  * This set of routines checks for PCI<->PCI bridges that have closed
@@ -1269,14 +1173,14 @@
 static int __init
 pcibios_init(void)
 {
-	struct pci_controller *hose;
+	struct pci_controller *hose, *tmp;
 	struct pci_bus *bus;
-	int next_busno;
+	int next_busno = 0;
 
 	printk(KERN_INFO "PCI: Probing PCI hardware\n");
 
 	/* Scan all of the recorded PCI controllers.  */
-	for (next_busno = 0, hose = hose_head; hose; hose = hose->next) {
+	list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
 		if (pci_assign_all_buses)
 			hose->first_busno = next_busno;
 		hose->last_busno = 0xff;
@@ -1319,12 +1223,6 @@
 
 subsys_initcall(pcibios_init);
 
-unsigned long resource_fixup(struct pci_dev * dev, struct resource * res,
-			     unsigned long start, unsigned long size)
-{
-	return start;
-}
-
 void __init pcibios_fixup_bus(struct pci_bus *bus)
 {
 	struct pci_controller *hose = (struct pci_controller *) bus->sysdata;
@@ -1342,7 +1240,7 @@
 		if (!res->flags) {
 			if (io_offset)
 				printk(KERN_ERR "I/O resource not set for host"
-				       " bridge %d\n", hose->index);
+				       " bridge %d\n", hose->global_number);
 			res->start = 0;
 			res->end = IO_SPACE_LIMIT;
 			res->flags = IORESOURCE_IO;
@@ -1356,7 +1254,7 @@
 				if (i > 0)
 					continue;
 				printk(KERN_ERR "Memory resource not set for "
-				       "host bridge %d\n", hose->index);
+				       "host bridge %d\n", hose->global_number);
 				res->start = hose->pci_mem_offset;
 				res->end = ~0U;
 				res->flags = IORESOURCE_MEM;
@@ -1370,7 +1268,7 @@
 		for (i = 0; i < 4; ++i) {
 			if ((res = bus->resource[i]) == NULL)
 				continue;
-			if (!res->flags)
+			if (!res->flags || bus->self->transparent)
 				continue;
 			if (io_offset && (res->flags & IORESOURCE_IO)) {
 				res->start += io_offset;
@@ -1395,11 +1293,6 @@
 	}
 }
 
-char __init *pcibios_setup(char *str)
-{
-	return str;
-}
-
 /* the next one is stolen from the alpha port... */
 void __init
 pcibios_update_irq(struct pci_dev *dev, int irq)
@@ -1408,64 +1301,6 @@
 	/* XXX FIXME - update OF device tree node interrupt property */
 }
 
-#ifdef CONFIG_PPC_MERGE
-/* XXX This is a copy of the ppc64 version. This is temporary until we start
- * merging the 2 PCI layers
- */
-/*
- * Reads the interrupt pin to determine if interrupt is use by card.
- * If the interrupt is used, then gets the interrupt line from the
- * openfirmware and sets it in the pci_dev and pci_config line.
- */
-int pci_read_irq_line(struct pci_dev *pci_dev)
-{
-	struct of_irq oirq;
-	unsigned int virq;
-
-	DBG("Try to map irq for %s...\n", pci_name(pci_dev));
-
-	/* Try to get a mapping from the device-tree */
-	if (of_irq_map_pci(pci_dev, &oirq)) {
-		u8 line, pin;
-
-		/* If that fails, lets fallback to what is in the config
-		 * space and map that through the default controller. We
-		 * also set the type to level low since that's what PCI
-		 * interrupts are. If your platform does differently, then
-		 * either provide a proper interrupt tree or don't use this
-		 * function.
-		 */
-		if (pci_read_config_byte(pci_dev, PCI_INTERRUPT_PIN, &pin))
-			return -1;
-		if (pin == 0)
-			return -1;
-		if (pci_read_config_byte(pci_dev, PCI_INTERRUPT_LINE, &line) ||
-		    line == 0xff) {
-			return -1;
-		}
-		DBG(" -> no map ! Using irq line %d from PCI config\n", line);
-
-		virq = irq_create_mapping(NULL, line);
-		if (virq != NO_IRQ)
-			set_irq_type(virq, IRQ_TYPE_LEVEL_LOW);
-	} else {
-		DBG(" -> got one, spec %d cells (0x%08x...) on %s\n",
-		    oirq.size, oirq.specifier[0], oirq.controller->full_name);
-
-		virq = irq_create_of_mapping(oirq.controller, oirq.specifier,
-					     oirq.size);
-	}
-	if(virq == NO_IRQ) {
-		DBG(" -> failed to map !\n");
-		return -1;
-	}
-	pci_dev->irq = virq;
-
-	return 0;
-}
-EXPORT_SYMBOL(pci_read_irq_line);
-#endif /* CONFIG_PPC_MERGE */
-
 int pcibios_enable_device(struct pci_dev *dev, int mask)
 {
 	u16 cmd, old_cmd;
@@ -1497,281 +1332,17 @@
 	return 0;
 }
 
-struct pci_controller*
+static struct pci_controller*
 pci_bus_to_hose(int bus)
 {
-	struct pci_controller* hose = hose_head;
+	struct pci_controller *hose, *tmp;
 
-	for (; hose; hose = hose->next)
+	list_for_each_entry_safe(hose, tmp, &hose_list, list_node)
 		if (bus >= hose->first_busno && bus <= hose->last_busno)
 			return hose;
 	return NULL;
 }
 
-void __iomem *
-pci_bus_io_base(unsigned int bus)
-{
-	struct pci_controller *hose;
-
-	hose = pci_bus_to_hose(bus);
-	if (!hose)
-		return NULL;
-	return hose->io_base_virt;
-}
-
-unsigned long
-pci_bus_io_base_phys(unsigned int bus)
-{
-	struct pci_controller *hose;
-
-	hose = pci_bus_to_hose(bus);
-	if (!hose)
-		return 0;
-	return hose->io_base_phys;
-}
-
-unsigned long
-pci_bus_mem_base_phys(unsigned int bus)
-{
-	struct pci_controller *hose;
-
-	hose = pci_bus_to_hose(bus);
-	if (!hose)
-		return 0;
-	return hose->pci_mem_offset;
-}
-
-unsigned long
-pci_resource_to_bus(struct pci_dev *pdev, struct resource *res)
-{
-	/* Hack alert again ! See comments in chrp_pci.c
-	 */
-	struct pci_controller* hose =
-		(struct pci_controller *)pdev->sysdata;
-	if (hose && res->flags & IORESOURCE_MEM)
-		return res->start - hose->pci_mem_offset;
-	/* We may want to do something with IOs here... */
-	return res->start;
-}
-
-
-static struct resource *__pci_mmap_make_offset(struct pci_dev *dev,
-					       resource_size_t *offset,
-					       enum pci_mmap_state mmap_state)
-{
-	struct pci_controller *hose = pci_bus_to_hose(dev->bus->number);
-	unsigned long io_offset = 0;
-	int i, res_bit;
-
-	if (hose == 0)
-		return NULL;		/* should never happen */
-
-	/* If memory, add on the PCI bridge address offset */
-	if (mmap_state == pci_mmap_mem) {
-#if 0 /* See comment in pci_resource_to_user() for why this is disabled */
-		*offset += hose->pci_mem_offset;
-#endif
-		res_bit = IORESOURCE_MEM;
-	} else {
-		io_offset = hose->io_base_virt - (void __iomem *)_IO_BASE;
-		*offset += io_offset;
-		res_bit = IORESOURCE_IO;
-	}
-
-	/*
-	 * Check that the offset requested corresponds to one of the
-	 * resources of the device.
-	 */
-	for (i = 0; i <= PCI_ROM_RESOURCE; i++) {
-		struct resource *rp = &dev->resource[i];
-		int flags = rp->flags;
-
-		/* treat ROM as memory (should be already) */
-		if (i == PCI_ROM_RESOURCE)
-			flags |= IORESOURCE_MEM;
-
-		/* Active and same type? */
-		if ((flags & res_bit) == 0)
-			continue;
-
-		/* In the range of this resource? */
-		if (*offset < (rp->start & PAGE_MASK) || *offset > rp->end)
-			continue;
-
-		/* found it! construct the final physical address */
-		if (mmap_state == pci_mmap_io)
-			*offset += hose->io_base_phys - io_offset;
-		return rp;
-	}
-
-	return NULL;
-}
-
-/*
- * Set vm_page_prot of VMA, as appropriate for this architecture, for a pci
- * device mapping.
- */
-static pgprot_t __pci_mmap_set_pgprot(struct pci_dev *dev, struct resource *rp,
-				      pgprot_t protection,
-				      enum pci_mmap_state mmap_state,
-				      int write_combine)
-{
-	unsigned long prot = pgprot_val(protection);
-
-	/* Write combine is always 0 on non-memory space mappings. On
-	 * memory space, if the user didn't pass 1, we check for a
-	 * "prefetchable" resource. This is a bit hackish, but we use
-	 * this to workaround the inability of /sysfs to provide a write
-	 * combine bit
-	 */
-	if (mmap_state != pci_mmap_mem)
-		write_combine = 0;
-	else if (write_combine == 0) {
-		if (rp->flags & IORESOURCE_PREFETCH)
-			write_combine = 1;
-	}
-
-	/* XXX would be nice to have a way to ask for write-through */
-	prot |= _PAGE_NO_CACHE;
-	if (write_combine)
-		prot &= ~_PAGE_GUARDED;
-	else
-		prot |= _PAGE_GUARDED;
-
-	return __pgprot(prot);
-}
-
-/*
- * This one is used by /dev/mem and fbdev who have no clue about the
- * PCI device, it tries to find the PCI device first and calls the
- * above routine
- */
-pgprot_t pci_phys_mem_access_prot(struct file *file,
-				  unsigned long pfn,
-				  unsigned long size,
-				  pgprot_t protection)
-{
-	struct pci_dev *pdev = NULL;
-	struct resource *found = NULL;
-	unsigned long prot = pgprot_val(protection);
-	unsigned long offset = pfn << PAGE_SHIFT;
-	int i;
-
-	if (page_is_ram(pfn))
-		return __pgprot(prot);
-
-	prot |= _PAGE_NO_CACHE | _PAGE_GUARDED;
-
-	for_each_pci_dev(pdev) {
-		for (i = 0; i <= PCI_ROM_RESOURCE; i++) {
-			struct resource *rp = &pdev->resource[i];
-			int flags = rp->flags;
-
-			/* Active and same type? */
-			if ((flags & IORESOURCE_MEM) == 0)
-				continue;
-			/* In the range of this resource? */
-			if (offset < (rp->start & PAGE_MASK) ||
-			    offset > rp->end)
-				continue;
-			found = rp;
-			break;
-		}
-		if (found)
-			break;
-	}
-	if (found) {
-		if (found->flags & IORESOURCE_PREFETCH)
-			prot &= ~_PAGE_GUARDED;
-		pci_dev_put(pdev);
-	}
-
-	DBG("non-PCI map for %lx, prot: %lx\n", offset, prot);
-
-	return __pgprot(prot);
-}
-
-
-/*
- * Perform the actual remap of the pages for a PCI device mapping, as
- * appropriate for this architecture.  The region in the process to map
- * is described by vm_start and vm_end members of VMA, the base physical
- * address is found in vm_pgoff.
- * The pci device structure is provided so that architectures may make mapping
- * decisions on a per-device or per-bus basis.
- *
- * Returns a negative error code on failure, zero on success.
- */
-int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
-			enum pci_mmap_state mmap_state,
-			int write_combine)
-{
-	resource_size_t offset = vma->vm_pgoff << PAGE_SHIFT;
-	struct resource *rp;
-	int ret;
-
-	rp = __pci_mmap_make_offset(dev, &offset, mmap_state);
-	if (rp == NULL)
-		return -EINVAL;
-
-	vma->vm_pgoff = offset >> PAGE_SHIFT;
-	vma->vm_page_prot = __pci_mmap_set_pgprot(dev, rp,
-						  vma->vm_page_prot,
-						  mmap_state, write_combine);
-
-	ret = remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
-			       vma->vm_end - vma->vm_start, vma->vm_page_prot);
-
-	return ret;
-}
-
-/* Obsolete functions. Should be removed once the symbios driver
- * is fixed
- */
-unsigned long
-phys_to_bus(unsigned long pa)
-{
-	struct pci_controller *hose;
-	int i;
-
-	for (hose = hose_head; hose; hose = hose->next) {
-		for (i = 0; i < 3; ++i) {
-			if (pa >= hose->mem_resources[i].start
-			    && pa <= hose->mem_resources[i].end) {
-				/*
-				 * XXX the hose->pci_mem_offset really
-				 * only applies to mem_resources[0].
-				 * We need a way to store an offset for
-				 * the others.  -- paulus
-				 */
-				if (i == 0)
-					pa -= hose->pci_mem_offset;
-				return pa;
-			}
-		}
-	}
-	/* hmmm, didn't find it */
-	return 0;
-}
-
-unsigned long
-pci_phys_to_bus(unsigned long pa, int busnr)
-{
-	struct pci_controller* hose = pci_bus_to_hose(busnr);
-	if (!hose)
-		return pa;
-	return pa - hose->pci_mem_offset;
-}
-
-unsigned long
-pci_bus_to_phys(unsigned int ba, int busnr)
-{
-	struct pci_controller* hose = pci_bus_to_hose(busnr);
-	if (!hose)
-		return ba;
-	return ba + hose->pci_mem_offset;
-}
-
 /* Provide information on locations of various I/O regions in physical
  * memory.  Do this on a per-card basis so that we choose the right
  * root bridge.
@@ -1814,62 +1385,11 @@
 	return result;
 }
 
-void pci_resource_to_user(const struct pci_dev *dev, int bar,
-			  const struct resource *rsrc,
-			  resource_size_t *start, resource_size_t *end)
-{
-	struct pci_controller *hose = pci_bus_to_hose(dev->bus->number);
-	resource_size_t offset = 0;
-
-	if (hose == NULL)
-		return;
-
-	if (rsrc->flags & IORESOURCE_IO)
-		offset = (unsigned long)hose->io_base_virt - _IO_BASE;
-
-	/* We pass a fully fixed up address to userland for MMIO instead of
-	 * a BAR value because X is lame and expects to be able to use that
-	 * to pass to /dev/mem !
-	 *
-	 * That means that we'll have potentially 64 bits values where some
-	 * userland apps only expect 32 (like X itself since it thinks only
-	 * Sparc has 64 bits MMIO) but if we don't do that, we break it on
-	 * 32 bits CHRPs :-(
-	 *
-	 * Hopefully, the sysfs insterface is immune to that gunk. Once X
-	 * has been fixed (and the fix spread enough), we can re-enable the
-	 * 2 lines below and pass down a BAR value to userland. In that case
-	 * we'll also have to re-enable the matching code in
-	 * __pci_mmap_make_offset().
-	 *
-	 * BenH.
-	 */
-#if 0
-	else if (rsrc->flags & IORESOURCE_MEM)
-		offset = hose->pci_mem_offset;
-#endif
-
-	*start = rsrc->start - offset;
-	*end = rsrc->end - offset;
-}
-
-void __init pci_init_resource(struct resource *res, resource_size_t start,
-			      resource_size_t end, int flags, char *name)
-{
-	res->start = start;
-	res->end = end;
-	res->flags = flags;
-	res->name = name;
-	res->parent = NULL;
-	res->sibling = NULL;
-	res->child = NULL;
-}
-
 unsigned long pci_address_to_pio(phys_addr_t address)
 {
-	struct pci_controller* hose = hose_head;
+	struct pci_controller *hose, *tmp;
 
-	for (; hose; hose = hose->next) {
+	list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
 		unsigned int size = hose->io_resource.end -
 			hose->io_resource.start + 1;
 		if (address >= hose->io_base_phys &&
diff --git a/arch/powerpc/kernel/pci_64.c b/arch/powerpc/kernel/pci_64.c
index e3009a4..a97e23a 100644
--- a/arch/powerpc/kernel/pci_64.c
+++ b/arch/powerpc/kernel/pci_64.c
@@ -22,6 +22,7 @@
 #include <linux/list.h>
 #include <linux/syscalls.h>
 #include <linux/irq.h>
+#include <linux/vmalloc.h>
 
 #include <asm/processor.h>
 #include <asm/io.h>
@@ -41,35 +42,23 @@
 
 unsigned long pci_probe_only = 1;
 int pci_assign_all_buses = 0;
-static int pci_initial_scan_done;
 
 static void fixup_resource(struct resource *res, struct pci_dev *dev);
 static void do_bus_setup(struct pci_bus *bus);
-static void phbs_remap_io(void);
 
 /* pci_io_base -- the base address from which io bars are offsets.
  * This is the lowest I/O base address (so bar values are always positive),
  * and it *must* be the start of ISA space if an ISA bus exists because
- * ISA drivers use hard coded offsets.  If no ISA bus exists a dummy
- * page is mapped and isa_io_limit prevents access to it.
+ * ISA drivers use hard coded offsets.  If no ISA bus exists nothing
+ * is mapped on the first 64K of IO space
  */
-unsigned long isa_io_base;	/* NULL if no ISA bus */
-EXPORT_SYMBOL(isa_io_base);
-unsigned long pci_io_base;
+unsigned long pci_io_base = ISA_IO_BASE;
 EXPORT_SYMBOL(pci_io_base);
 
-void iSeries_pcibios_init(void);
-
 LIST_HEAD(hose_list);
 
 static struct dma_mapping_ops *pci_dma_ops;
 
-int global_phb_number;		/* Global phb counter */
-
-/* Cached ISA bridge dev. */
-struct pci_dev *ppc64_isabridge_dev = NULL;
-EXPORT_SYMBOL_GPL(ppc64_isabridge_dev);
-
 void set_pci_dma_ops(struct dma_mapping_ops *dma_ops)
 {
 	pci_dma_ops = dma_ops;
@@ -100,7 +89,7 @@
 		return;
 
 	if (res->flags & IORESOURCE_IO)
-	        offset = (unsigned long)hose->io_base_virt - pci_io_base;
+	        offset = (unsigned long)hose->io_base_virt - _IO_BASE;
 
 	if (res->flags & IORESOURCE_MEM)
 		offset = hose->pci_mem_offset;
@@ -119,7 +108,7 @@
 		return;
 
 	if (res->flags & IORESOURCE_IO)
-	        offset = (unsigned long)hose->io_base_virt - pci_io_base;
+	        offset = (unsigned long)hose->io_base_virt - _IO_BASE;
 
 	if (res->flags & IORESOURCE_MEM)
 		offset = hose->pci_mem_offset;
@@ -156,7 +145,7 @@
 
 	if (res->flags & IORESOURCE_IO) {
 	        unsigned long offset = (unsigned long)hose->io_base_virt -
-					pci_io_base;
+					_IO_BASE;
 		/* Make sure we start at our min on all hoses */
 		if (start - offset < PCIBIOS_MIN_IO)
 			start = PCIBIOS_MIN_IO + offset;
@@ -180,55 +169,6 @@
 	res->start = start;
 }
 
-static DEFINE_SPINLOCK(hose_spinlock);
-
-/*
- * pci_controller(phb) initialized common variables.
- */
-static void __devinit pci_setup_pci_controller(struct pci_controller *hose)
-{
-	memset(hose, 0, sizeof(struct pci_controller));
-
-	spin_lock(&hose_spinlock);
-	hose->global_number = global_phb_number++;
-	list_add_tail(&hose->list_node, &hose_list);
-	spin_unlock(&hose_spinlock);
-}
-
-struct pci_controller * pcibios_alloc_controller(struct device_node *dev)
-{
-	struct pci_controller *phb;
-
-	if (mem_init_done)
-		phb = kmalloc(sizeof(struct pci_controller), GFP_KERNEL);
-	else
-		phb = alloc_bootmem(sizeof (struct pci_controller));
-	if (phb == NULL)
-		return NULL;
-	pci_setup_pci_controller(phb);
-	phb->arch_data = dev;
-	phb->is_dynamic = mem_init_done;
-	if (dev) {
-		int nid = of_node_to_nid(dev);
-
-		if (nid < 0 || !node_online(nid))
-			nid = -1;
-
-		PHB_SET_NODE(phb, nid);
-	}
-	return phb;
-}
-
-void pcibios_free_controller(struct pci_controller *phb)
-{
-	spin_lock(&hose_spinlock);
-	list_del(&phb->list_node);
-	spin_unlock(&hose_spinlock);
-
-	if (phb->is_dynamic)
-		kfree(phb);
-}
-
 void __devinit pcibios_claim_one_bus(struct pci_bus *b)
 {
 	struct pci_dev *dev;
@@ -291,7 +231,6 @@
 	return flags;
 }
 
-#define GET_64BIT(prop, i)	((((u64) (prop)[(i)]) << 32) | (prop)[(i)+1])
 
 static void pci_parse_of_addrs(struct device_node *node, struct pci_dev *dev)
 {
@@ -310,8 +249,8 @@
 		flags = pci_parse_of_flags(addrs[0]);
 		if (!flags)
 			continue;
-		base = GET_64BIT(addrs, 1);
-		size = GET_64BIT(addrs, 3);
+		base = of_read_number(&addrs[1], 2);
+		size = of_read_number(&addrs[3], 2);
 		if (!size)
 			continue;
 		i = addrs[0] & 0xff;
@@ -479,7 +418,7 @@
 	i = 1;
 	for (; len >= 32; len -= 32, ranges += 8) {
 		flags = pci_parse_of_flags(ranges[0]);
-		size = GET_64BIT(ranges, 6);
+		size = of_read_number(&ranges[6], 2);
 		if (flags == 0 || size == 0)
 			continue;
 		if (flags & IORESOURCE_IO) {
@@ -498,7 +437,7 @@
 			res = bus->resource[i];
 			++i;
 		}
-		res->start = GET_64BIT(ranges, 1);
+		res->start = of_read_number(&ranges[1], 2);
 		res->end = res->start + size - 1;
 		res->flags = flags;
 		fixup_resource(res, dev);
@@ -537,10 +476,16 @@
 	bus->secondary = hose->first_busno;
 	hose->bus = bus;
 
+	if (!firmware_has_feature(FW_FEATURE_ISERIES))
+		pcibios_map_io_space(bus);
+
 	bus->resource[0] = res = &hose->io_resource;
-	if (res->flags && request_resource(&ioport_resource, res))
+	if (res->flags && request_resource(&ioport_resource, res)) {
 		printk(KERN_ERR "Failed to request PCI IO region "
 		       "on PCI domain %04x\n", hose->global_number);
+		DBG("res->start = 0x%016lx, res->end = 0x%016lx\n",
+		    res->start, res->end);
+	}
 
 	for (i = 0; i < 3; ++i) {
 		res = &hose->mem_resources[i];
@@ -598,17 +543,6 @@
 	if (ppc_md.pcibios_fixup)
 		ppc_md.pcibios_fixup();
 
-	/* Cache the location of the ISA bridge (if we have one) */
-	ppc64_isabridge_dev = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
-	if (ppc64_isabridge_dev != NULL)
-		printk(KERN_DEBUG "ISA bridge at %s\n", pci_name(ppc64_isabridge_dev));
-
-	if (!firmware_has_feature(FW_FEATURE_ISERIES))
-		/* map in PCI I/O space */
-		phbs_remap_io();
-
-	pci_initial_scan_done = 1;
-
 	printk(KERN_DEBUG "PCI: Probing PCI hardware done\n");
 
 	return 0;
@@ -616,11 +550,6 @@
 
 subsys_initcall(pcibios_init);
 
-char __init *pcibios_setup(char *str)
-{
-	return str;
-}
-
 int pcibios_enable_device(struct pci_dev *dev, int mask)
 {
 	u16 cmd, oldcmd;
@@ -651,22 +580,6 @@
 	return 0;
 }
 
-/*
- * Return the domain number for this bus.
- */
-int pci_domain_nr(struct pci_bus *bus)
-{
-	if (firmware_has_feature(FW_FEATURE_ISERIES))
-		return 0;
-	else {
-		struct pci_controller *hose = pci_bus_to_host(bus);
-
-		return hose->global_number;
-	}
-}
-
-EXPORT_SYMBOL(pci_domain_nr);
-
 /* Decide whether to display the domain number in /proc */
 int pci_proc_domain(struct pci_bus *bus)
 {
@@ -678,281 +591,6 @@
 	}
 }
 
-/*
- * Platform support for /proc/bus/pci/X/Y mmap()s,
- * modelled on the sparc64 implementation by Dave Miller.
- *  -- paulus.
- */
-
-/*
- * Adjust vm_pgoff of VMA such that it is the physical page offset
- * corresponding to the 32-bit pci bus offset for DEV requested by the user.
- *
- * Basically, the user finds the base address for his device which he wishes
- * to mmap.  They read the 32-bit value from the config space base register,
- * add whatever PAGE_SIZE multiple offset they wish, and feed this into the
- * offset parameter of mmap on /proc/bus/pci/XXX for that device.
- *
- * Returns negative error code on failure, zero on success.
- */
-static struct resource *__pci_mmap_make_offset(struct pci_dev *dev,
-					       resource_size_t *offset,
-					       enum pci_mmap_state mmap_state)
-{
-	struct pci_controller *hose = pci_bus_to_host(dev->bus);
-	unsigned long io_offset = 0;
-	int i, res_bit;
-
-	if (hose == 0)
-		return NULL;		/* should never happen */
-
-	/* If memory, add on the PCI bridge address offset */
-	if (mmap_state == pci_mmap_mem) {
-#if 0 /* See comment in pci_resource_to_user() for why this is disabled */
-		*offset += hose->pci_mem_offset;
-#endif
-		res_bit = IORESOURCE_MEM;
-	} else {
-		io_offset = (unsigned long)hose->io_base_virt - pci_io_base;
-		*offset += io_offset;
-		res_bit = IORESOURCE_IO;
-	}
-
-	/*
-	 * Check that the offset requested corresponds to one of the
-	 * resources of the device.
-	 */
-	for (i = 0; i <= PCI_ROM_RESOURCE; i++) {
-		struct resource *rp = &dev->resource[i];
-		int flags = rp->flags;
-
-		/* treat ROM as memory (should be already) */
-		if (i == PCI_ROM_RESOURCE)
-			flags |= IORESOURCE_MEM;
-
-		/* Active and same type? */
-		if ((flags & res_bit) == 0)
-			continue;
-
-		/* In the range of this resource? */
-		if (*offset < (rp->start & PAGE_MASK) || *offset > rp->end)
-			continue;
-
-		/* found it! construct the final physical address */
-		if (mmap_state == pci_mmap_io)
-		       	*offset += hose->io_base_phys - io_offset;
-		return rp;
-	}
-
-	return NULL;
-}
-
-/*
- * Set vm_page_prot of VMA, as appropriate for this architecture, for a pci
- * device mapping.
- */
-static pgprot_t __pci_mmap_set_pgprot(struct pci_dev *dev, struct resource *rp,
-				      pgprot_t protection,
-				      enum pci_mmap_state mmap_state,
-				      int write_combine)
-{
-	unsigned long prot = pgprot_val(protection);
-
-	/* Write combine is always 0 on non-memory space mappings. On
-	 * memory space, if the user didn't pass 1, we check for a
-	 * "prefetchable" resource. This is a bit hackish, but we use
-	 * this to workaround the inability of /sysfs to provide a write
-	 * combine bit
-	 */
-	if (mmap_state != pci_mmap_mem)
-		write_combine = 0;
-	else if (write_combine == 0) {
-		if (rp->flags & IORESOURCE_PREFETCH)
-			write_combine = 1;
-	}
-
-	/* XXX would be nice to have a way to ask for write-through */
-	prot |= _PAGE_NO_CACHE;
-	if (write_combine)
-		prot &= ~_PAGE_GUARDED;
-	else
-		prot |= _PAGE_GUARDED;
-
-	return __pgprot(prot);
-}
-
-/*
- * This one is used by /dev/mem and fbdev who have no clue about the
- * PCI device, it tries to find the PCI device first and calls the
- * above routine
- */
-pgprot_t pci_phys_mem_access_prot(struct file *file,
-				  unsigned long pfn,
-				  unsigned long size,
-				  pgprot_t protection)
-{
-	struct pci_dev *pdev = NULL;
-	struct resource *found = NULL;
-	unsigned long prot = pgprot_val(protection);
-	unsigned long offset = pfn << PAGE_SHIFT;
-	int i;
-
-	if (page_is_ram(pfn))
-		return __pgprot(prot);
-
-	prot |= _PAGE_NO_CACHE | _PAGE_GUARDED;
-
-	for_each_pci_dev(pdev) {
-		for (i = 0; i <= PCI_ROM_RESOURCE; i++) {
-			struct resource *rp = &pdev->resource[i];
-			int flags = rp->flags;
-
-			/* Active and same type? */
-			if ((flags & IORESOURCE_MEM) == 0)
-				continue;
-			/* In the range of this resource? */
-			if (offset < (rp->start & PAGE_MASK) ||
-			    offset > rp->end)
-				continue;
-			found = rp;
-			break;
-		}
-		if (found)
-			break;
-	}
-	if (found) {
-		if (found->flags & IORESOURCE_PREFETCH)
-			prot &= ~_PAGE_GUARDED;
-		pci_dev_put(pdev);
-	}
-
-	DBG("non-PCI map for %lx, prot: %lx\n", offset, prot);
-
-	return __pgprot(prot);
-}
-
-
-/*
- * Perform the actual remap of the pages for a PCI device mapping, as
- * appropriate for this architecture.  The region in the process to map
- * is described by vm_start and vm_end members of VMA, the base physical
- * address is found in vm_pgoff.
- * The pci device structure is provided so that architectures may make mapping
- * decisions on a per-device or per-bus basis.
- *
- * Returns a negative error code on failure, zero on success.
- */
-int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
-			enum pci_mmap_state mmap_state, int write_combine)
-{
-	resource_size_t offset = vma->vm_pgoff << PAGE_SHIFT;
-	struct resource *rp;
-	int ret;
-
-	rp = __pci_mmap_make_offset(dev, &offset, mmap_state);
-	if (rp == NULL)
-		return -EINVAL;
-
-	vma->vm_pgoff = offset >> PAGE_SHIFT;
-	vma->vm_page_prot = __pci_mmap_set_pgprot(dev, rp,
-						  vma->vm_page_prot,
-						  mmap_state, write_combine);
-
-	ret = remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
-			       vma->vm_end - vma->vm_start, vma->vm_page_prot);
-
-	return ret;
-}
-
-static ssize_t pci_show_devspec(struct device *dev,
-		struct device_attribute *attr, char *buf)
-{
-	struct pci_dev *pdev;
-	struct device_node *np;
-
-	pdev = to_pci_dev (dev);
-	np = pci_device_to_OF_node(pdev);
-	if (np == NULL || np->full_name == NULL)
-		return 0;
-	return sprintf(buf, "%s", np->full_name);
-}
-static DEVICE_ATTR(devspec, S_IRUGO, pci_show_devspec, NULL);
-
-int pcibios_add_platform_entries(struct pci_dev *pdev)
-{
-	return device_create_file(&pdev->dev, &dev_attr_devspec);
-}
-
-#define ISA_SPACE_MASK 0x1
-#define ISA_SPACE_IO 0x1
-
-static void __devinit pci_process_ISA_OF_ranges(struct device_node *isa_node,
-				      unsigned long phb_io_base_phys,
-				      void __iomem * phb_io_base_virt)
-{
-	/* Remove these asap */
-
-	struct pci_address {
-		u32 a_hi;
-		u32 a_mid;
-		u32 a_lo;
-	};
-
-	struct isa_address {
-		u32 a_hi;
-		u32 a_lo;
-	};
-
-	struct isa_range {
-		struct isa_address isa_addr;
-		struct pci_address pci_addr;
-		unsigned int size;
-	};
-
-	const struct isa_range *range;
-	unsigned long pci_addr;
-	unsigned int isa_addr;
-	unsigned int size;
-	int rlen = 0;
-
-	range = of_get_property(isa_node, "ranges", &rlen);
-	if (range == NULL || (rlen < sizeof(struct isa_range))) {
-		printk(KERN_ERR "no ISA ranges or unexpected isa range size,"
-		       "mapping 64k\n");
-		__ioremap_explicit(phb_io_base_phys,
-				   (unsigned long)phb_io_base_virt,
-				   0x10000, _PAGE_NO_CACHE | _PAGE_GUARDED);
-		return;	
-	}
-	
-	/* From "ISA Binding to 1275"
-	 * The ranges property is laid out as an array of elements,
-	 * each of which comprises:
-	 *   cells 0 - 1:	an ISA address
-	 *   cells 2 - 4:	a PCI address 
-	 *			(size depending on dev->n_addr_cells)
-	 *   cell 5:		the size of the range
-	 */
-	if ((range->isa_addr.a_hi && ISA_SPACE_MASK) == ISA_SPACE_IO) {
-		isa_addr = range->isa_addr.a_lo;
-		pci_addr = (unsigned long) range->pci_addr.a_mid << 32 | 
-			range->pci_addr.a_lo;
-
-		/* Assume these are both zero */
-		if ((pci_addr != 0) || (isa_addr != 0)) {
-			printk(KERN_ERR "unexpected isa to pci mapping: %s\n",
-					__FUNCTION__);
-			return;
-		}
-		
-		size = PAGE_ALIGN(range->size);
-
-		__ioremap_explicit(phb_io_base_phys, 
-				   (unsigned long) phb_io_base_virt, 
-				   size, _PAGE_NO_CACHE | _PAGE_GUARDED);
-	}
-}
-
 void __devinit pci_process_bridge_OF_ranges(struct pci_controller *hose,
 					    struct device_node *dev, int prim)
 {
@@ -1047,155 +685,122 @@
 	}
 }
 
-void __devinit pci_setup_phb_io(struct pci_controller *hose, int primary)
+#ifdef CONFIG_HOTPLUG
+
+int pcibios_unmap_io_space(struct pci_bus *bus)
 {
-	unsigned long size = hose->pci_io_size;
-	unsigned long io_virt_offset;
-	struct resource *res;
-	struct device_node *isa_dn;
+	struct pci_controller *hose;
 
-	if (size == 0)
-		return;
+	WARN_ON(bus == NULL);
 
-	hose->io_base_virt = reserve_phb_iospace(size);
-	DBG("phb%d io_base_phys 0x%lx io_base_virt 0x%lx\n",
-		hose->global_number, hose->io_base_phys,
-		(unsigned long) hose->io_base_virt);
-
-	if (primary) {
-		pci_io_base = (unsigned long)hose->io_base_virt;
-		isa_dn = of_find_node_by_type(NULL, "isa");
-		if (isa_dn) {
-			isa_io_base = pci_io_base;
-			pci_process_ISA_OF_ranges(isa_dn, hose->io_base_phys,
-						hose->io_base_virt);
-			of_node_put(isa_dn);
-		}
-	}
-
-	io_virt_offset = (unsigned long)hose->io_base_virt - pci_io_base;
-	res = &hose->io_resource;
-	res->start += io_virt_offset;
-	res->end += io_virt_offset;
-
-	/* If this is called after the initial PCI scan, then we need to
-	 * proceed to IO mappings now
+	/* If this is not a PHB, we only flush the hash table over
+	 * the area mapped by this bridge. We don't play with the PTE
+	 * mappings since we might have to deal with sub-page alignemnts
+	 * so flushing the hash table is the only sane way to make sure
+	 * that no hash entries are covering that removed bridge area
+	 * while still allowing other busses overlapping those pages
 	 */
-	if (pci_initial_scan_done)
-		__ioremap_explicit(hose->io_base_phys,
-				   (unsigned long)hose->io_base_virt,
-				   hose->pci_io_size,
-				   _PAGE_NO_CACHE | _PAGE_GUARDED);
-}
+	if (bus->self) {
+		struct resource *res = bus->resource[0];
 
-void __devinit pci_setup_phb_io_dynamic(struct pci_controller *hose,
-					int primary)
+		DBG("IO unmapping for PCI-PCI bridge %s\n",
+		    pci_name(bus->self));
+
+		__flush_hash_table_range(&init_mm, res->start + _IO_BASE,
+					 res->end - res->start + 1);
+		return 0;
+	}
+
+	/* Get the host bridge */
+	hose = pci_bus_to_host(bus);
+
+	/* Check if we have IOs allocated */
+	if (hose->io_base_alloc == 0)
+		return 0;
+
+	DBG("IO unmapping for PHB %s\n",
+	    ((struct device_node *)hose->arch_data)->full_name);
+	DBG("  alloc=0x%p\n", hose->io_base_alloc);
+
+	/* This is a PHB, we fully unmap the IO area */
+	vunmap(hose->io_base_alloc);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(pcibios_unmap_io_space);
+
+#endif /* CONFIG_HOTPLUG */
+
+int __devinit pcibios_map_io_space(struct pci_bus *bus)
 {
-	unsigned long size = hose->pci_io_size;
+	struct vm_struct *area;
+	unsigned long phys_page;
+	unsigned long size_page;
 	unsigned long io_virt_offset;
-	struct resource *res;
+	struct pci_controller *hose;
 
-	if (size == 0)
-		return;
+	WARN_ON(bus == NULL);
 
-	hose->io_base_virt = __ioremap(hose->io_base_phys, size,
-					_PAGE_NO_CACHE | _PAGE_GUARDED);
-	DBG("phb%d io_base_phys 0x%lx io_base_virt 0x%lx\n",
-		hose->global_number, hose->io_base_phys,
-		(unsigned long) hose->io_base_virt);
-
-	if (primary)
-		pci_io_base = (unsigned long)hose->io_base_virt;
-
-	io_virt_offset = (unsigned long)hose->io_base_virt - pci_io_base;
-	res = &hose->io_resource;
-	res->start += io_virt_offset;
-	res->end += io_virt_offset;
-}
-
-
-static int get_bus_io_range(struct pci_bus *bus, unsigned long *start_phys,
-				unsigned long *start_virt, unsigned long *size)
-{
-	struct pci_controller *hose = pci_bus_to_host(bus);
-	struct resource *res;
-
-	if (bus->self)
-		res = bus->resource[0];
-	else
-		/* Root Bus */
-		res = &hose->io_resource;
-
-	if (res->end == 0 && res->start == 0)
-		return 1;
-
-	*start_virt = pci_io_base + res->start;
-	*start_phys = *start_virt + hose->io_base_phys
-		- (unsigned long) hose->io_base_virt;
-
-	if (res->end > res->start)
-		*size = res->end - res->start + 1;
-	else {
-		printk("%s(): unexpected region 0x%lx->0x%lx\n",
-		       __FUNCTION__, res->start, res->end);
-		return 1;
+	/* If this not a PHB, nothing to do, page tables still exist and
+	 * thus HPTEs will be faulted in when needed
+	 */
+	if (bus->self) {
+		DBG("IO mapping for PCI-PCI bridge %s\n",
+		    pci_name(bus->self));
+		DBG("  virt=0x%016lx...0x%016lx\n",
+		    bus->resource[0]->start + _IO_BASE,
+		    bus->resource[0]->end + _IO_BASE);
+		return 0;
 	}
 
+	/* Get the host bridge */
+	hose = pci_bus_to_host(bus);
+	phys_page = _ALIGN_DOWN(hose->io_base_phys, PAGE_SIZE);
+	size_page = _ALIGN_UP(hose->pci_io_size, PAGE_SIZE);
+
+	/* Make sure IO area address is clear */
+	hose->io_base_alloc = NULL;
+
+	/* If there's no IO to map on that bus, get away too */
+	if (hose->pci_io_size == 0 || hose->io_base_phys == 0)
+		return 0;
+
+	/* Let's allocate some IO space for that guy. We don't pass
+	 * VM_IOREMAP because we don't care about alignment tricks that
+	 * the core does in that case. Maybe we should due to stupid card
+	 * with incomplete address decoding but I'd rather not deal with
+	 * those outside of the reserved 64K legacy region.
+	 */
+	area = __get_vm_area(size_page, 0, PHB_IO_BASE, PHB_IO_END);
+	if (area == NULL)
+		return -ENOMEM;
+	hose->io_base_alloc = area->addr;
+	hose->io_base_virt = (void __iomem *)(area->addr +
+					      hose->io_base_phys - phys_page);
+
+	DBG("IO mapping for PHB %s\n",
+	    ((struct device_node *)hose->arch_data)->full_name);
+	DBG("  phys=0x%016lx, virt=0x%p (alloc=0x%p)\n",
+	    hose->io_base_phys, hose->io_base_virt, hose->io_base_alloc);
+	DBG("  size=0x%016lx (alloc=0x%016lx)\n",
+	    hose->pci_io_size, size_page);
+
+	/* Establish the mapping */
+	if (__ioremap_at(phys_page, area->addr, size_page,
+			 _PAGE_NO_CACHE | _PAGE_GUARDED) == NULL)
+		return -ENOMEM;
+
+	/* Fixup hose IO resource */
+	io_virt_offset = (unsigned long)hose->io_base_virt - _IO_BASE;
+	hose->io_resource.start += io_virt_offset;
+	hose->io_resource.end += io_virt_offset;
+
+	DBG("  hose->io_resource=0x%016lx...0x%016lx\n",
+	    hose->io_resource.start, hose->io_resource.end);
+
 	return 0;
 }
-
-int unmap_bus_range(struct pci_bus *bus)
-{
-	unsigned long start_phys;
-	unsigned long start_virt;
-	unsigned long size;
-
-	if (!bus) {
-		printk(KERN_ERR "%s() expected bus\n", __FUNCTION__);
-		return 1;
-	}
-	
-	if (get_bus_io_range(bus, &start_phys, &start_virt, &size))
-		return 1;
-	if (__iounmap_explicit((void __iomem *) start_virt, size))
-		return 1;
-
-	return 0;
-}
-EXPORT_SYMBOL(unmap_bus_range);
-
-int remap_bus_range(struct pci_bus *bus)
-{
-	unsigned long start_phys;
-	unsigned long start_virt;
-	unsigned long size;
-
-	if (!bus) {
-		printk(KERN_ERR "%s() expected bus\n", __FUNCTION__);
-		return 1;
-	}
-	
-	
-	if (get_bus_io_range(bus, &start_phys, &start_virt, &size))
-		return 1;
-	if (start_phys == 0)
-		return 1;
-	printk(KERN_DEBUG "mapping IO %lx -> %lx, size: %lx\n", start_phys, start_virt, size);
-	if (__ioremap_explicit(start_phys, start_virt, size,
-			       _PAGE_NO_CACHE | _PAGE_GUARDED))
-		return 1;
-
-	return 0;
-}
-EXPORT_SYMBOL(remap_bus_range);
-
-static void phbs_remap_io(void)
-{
-	struct pci_controller *hose, *tmp;
-
-	list_for_each_entry_safe(hose, tmp, &hose_list, list_node)
-		remap_bus_range(hose->bus);
-}
+EXPORT_SYMBOL_GPL(pcibios_map_io_space);
 
 static void __devinit fixup_resource(struct resource *res, struct pci_dev *dev)
 {
@@ -1203,8 +808,7 @@
 	unsigned long offset;
 
 	if (res->flags & IORESOURCE_IO) {
-		offset = (unsigned long)hose->io_base_virt - pci_io_base;
-
+		offset = (unsigned long)hose->io_base_virt - _IO_BASE;
 		res->start += offset;
 		res->end += offset;
 	} else if (res->flags & IORESOURCE_MEM) {
@@ -1219,9 +823,20 @@
 	/* Update device resources.  */
 	int i;
 
-	for (i = 0; i < PCI_NUM_RESOURCES; i++)
-		if (dev->resource[i].flags)
-			fixup_resource(&dev->resource[i], dev);
+	DBG("%s: Fixup resources:\n", pci_name(dev));
+	for (i = 0; i < PCI_NUM_RESOURCES; i++) {
+		struct resource *res = &dev->resource[i];
+		if (!res->flags)
+			continue;
+
+		DBG("  0x%02x < %08lx:0x%016lx...0x%016lx\n",
+		    i, res->flags, res->start, res->end);
+
+		fixup_resource(res, dev);
+
+		DBG("       > %08lx:0x%016lx...0x%016lx\n",
+		    res->flags, res->start, res->end);
+	}
 }
 EXPORT_SYMBOL(pcibios_fixup_device_resources);
 
@@ -1291,119 +906,6 @@
 }
 EXPORT_SYMBOL(pcibios_fixup_bus);
 
-/*
- * Reads the interrupt pin to determine if interrupt is use by card.
- * If the interrupt is used, then gets the interrupt line from the 
- * openfirmware and sets it in the pci_dev and pci_config line.
- */
-int pci_read_irq_line(struct pci_dev *pci_dev)
-{
-	struct of_irq oirq;
-	unsigned int virq;
-
-	DBG("Try to map irq for %s...\n", pci_name(pci_dev));
-
-#ifdef DEBUG
-	memset(&oirq, 0xff, sizeof(oirq));
-#endif
-	/* Try to get a mapping from the device-tree */
-	if (of_irq_map_pci(pci_dev, &oirq)) {
-		u8 line, pin;
-
-		/* If that fails, lets fallback to what is in the config
-		 * space and map that through the default controller. We
-		 * also set the type to level low since that's what PCI
-		 * interrupts are. If your platform does differently, then
-		 * either provide a proper interrupt tree or don't use this
-		 * function.
-		 */
-		if (pci_read_config_byte(pci_dev, PCI_INTERRUPT_PIN, &pin))
-			return -1;
-		if (pin == 0)
-			return -1;
-		if (pci_read_config_byte(pci_dev, PCI_INTERRUPT_LINE, &line) ||
-		    line == 0xff) {
-			return -1;
-		}
-		DBG(" -> no map ! Using irq line %d from PCI config\n", line);
-
-		virq = irq_create_mapping(NULL, line);
-		if (virq != NO_IRQ)
-			set_irq_type(virq, IRQ_TYPE_LEVEL_LOW);
-	} else {
-		DBG(" -> got one, spec %d cells (0x%08x 0x%08x...) on %s\n",
-		    oirq.size, oirq.specifier[0], oirq.specifier[1],
-		    oirq.controller->full_name);
-
-		virq = irq_create_of_mapping(oirq.controller, oirq.specifier,
-					     oirq.size);
-	}
-	if(virq == NO_IRQ) {
-		DBG(" -> failed to map !\n");
-		return -1;
-	}
-
-	DBG(" -> mapped to linux irq %d\n", virq);
-
-	pci_dev->irq = virq;
-
-	return 0;
-}
-EXPORT_SYMBOL(pci_read_irq_line);
-
-void pci_resource_to_user(const struct pci_dev *dev, int bar,
-			  const struct resource *rsrc,
-			  resource_size_t *start, resource_size_t *end)
-{
-	struct pci_controller *hose = pci_bus_to_host(dev->bus);
-	resource_size_t offset = 0;
-
-	if (hose == NULL)
-		return;
-
-	if (rsrc->flags & IORESOURCE_IO)
-		offset = (unsigned long)hose->io_base_virt - pci_io_base;
-
-	/* We pass a fully fixed up address to userland for MMIO instead of
-	 * a BAR value because X is lame and expects to be able to use that
-	 * to pass to /dev/mem !
-	 *
-	 * That means that we'll have potentially 64 bits values where some
-	 * userland apps only expect 32 (like X itself since it thinks only
-	 * Sparc has 64 bits MMIO) but if we don't do that, we break it on
-	 * 32 bits CHRPs :-(
-	 *
-	 * Hopefully, the sysfs insterface is immune to that gunk. Once X
-	 * has been fixed (and the fix spread enough), we can re-enable the
-	 * 2 lines below and pass down a BAR value to userland. In that case
-	 * we'll also have to re-enable the matching code in
-	 * __pci_mmap_make_offset().
-	 *
-	 * BenH.
-	 */
-#if 0
-	else if (rsrc->flags & IORESOURCE_MEM)
-		offset = hose->pci_mem_offset;
-#endif
-
-	*start = rsrc->start - offset;
-	*end = rsrc->end - offset;
-}
-
-struct pci_controller* pci_find_hose_for_OF_device(struct device_node* node)
-{
-	if (!have_of)
-		return NULL;
-	while(node) {
-		struct pci_controller *hose, *tmp;
-		list_for_each_entry_safe(hose, tmp, &hose_list, list_node)
-			if (hose->arch_data == node)
-				return hose;
-		node = node->parent;
-	}
-	return NULL;
-}
-
 unsigned long pci_address_to_pio(phys_addr_t address)
 {
 	struct pci_controller *hose, *tmp;
@@ -1412,7 +914,7 @@
 		if (address >= hose->io_base_phys &&
 		    address < (hose->io_base_phys + hose->pci_io_size)) {
 			unsigned long base =
-				(unsigned long)hose->io_base_virt - pci_io_base;
+				(unsigned long)hose->io_base_virt - _IO_BASE;
 			return base + (address - hose->io_base_phys);
 		}
 	}
diff --git a/arch/powerpc/kernel/ppc_ksyms.c b/arch/powerpc/kernel/ppc_ksyms.c
index c96fa9b..a20f195 100644
--- a/arch/powerpc/kernel/ppc_ksyms.c
+++ b/arch/powerpc/kernel/ppc_ksyms.c
@@ -67,7 +67,6 @@
 EXPORT_SYMBOL(DMA_MODE_READ);
 EXPORT_SYMBOL(DMA_MODE_WRITE);
 
-EXPORT_SYMBOL(do_signal);
 EXPORT_SYMBOL(transfer_to_handler);
 EXPORT_SYMBOL(do_IRQ);
 EXPORT_SYMBOL(machine_check_exception);
@@ -106,10 +105,6 @@
 EXPORT_SYMBOL(pci_dram_offset);
 EXPORT_SYMBOL(pci_alloc_consistent);
 EXPORT_SYMBOL(pci_free_consistent);
-EXPORT_SYMBOL(pci_bus_io_base);
-EXPORT_SYMBOL(pci_bus_io_base_phys);
-EXPORT_SYMBOL(pci_bus_mem_base_phys);
-EXPORT_SYMBOL(pci_bus_to_hose);
 #endif /* CONFIG_PCI */
 
 EXPORT_SYMBOL(start_thread);
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 6e2f035..84f000a 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -219,22 +219,26 @@
 }
 #endif /* CONFIG_SMP */
 
-#ifdef CONFIG_PPC_MERGE		/* XXX for now */
 int set_dabr(unsigned long dabr)
 {
+#ifdef CONFIG_PPC_MERGE		/* XXX for now */
 	if (ppc_md.set_dabr)
 		return ppc_md.set_dabr(dabr);
+#endif
 
+	/* XXX should we have a CPU_FTR_HAS_DABR ? */
+#if defined(CONFIG_PPC64) || defined(CONFIG_6xx)
 	mtspr(SPRN_DABR, dabr);
+#endif
 	return 0;
 }
-#endif
 
 #ifdef CONFIG_PPC64
 DEFINE_PER_CPU(struct cpu_usage, cpu_usage_array);
-static DEFINE_PER_CPU(unsigned long, current_dabr);
 #endif
 
+static DEFINE_PER_CPU(unsigned long, current_dabr);
+
 struct task_struct *__switch_to(struct task_struct *prev,
 	struct task_struct *new)
 {
@@ -299,12 +303,10 @@
 
 #endif /* CONFIG_SMP */
 
-#ifdef CONFIG_PPC64	/* for now */
 	if (unlikely(__get_cpu_var(current_dabr) != new->thread.dabr)) {
 		set_dabr(new->thread.dabr);
 		__get_cpu_var(current_dabr) = new->thread.dabr;
 	}
-#endif /* CONFIG_PPC64 */
 
 	new_thread = &new->thread;
 	old_thread = &current->thread;
@@ -473,12 +475,10 @@
 
 	discard_lazy_cpu_state();
 
-#ifdef CONFIG_PPC64	/* for now */
 	if (current->thread.dabr) {
 		current->thread.dabr = 0;
 		set_dabr(0);
 	}
-#endif
 }
 
 void
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
index af42dda..37ff99b 100644
--- a/arch/powerpc/kernel/prom.c
+++ b/arch/powerpc/kernel/prom.c
@@ -52,6 +52,7 @@
 #include <asm/pSeries_reconfig.h>
 #include <asm/pci-bridge.h>
 #include <asm/kexec.h>
+#include <asm/system.h>
 
 #ifdef DEBUG
 #define DBG(fmt...) printk(KERN_ERR fmt)
@@ -1005,7 +1006,7 @@
 
 void __init early_init_devtree(void *params)
 {
-	DBG(" -> early_init_devtree()\n");
+	DBG(" -> early_init_devtree(%p)\n", params);
 
 	/* Setup flat device-tree pointer */
 	initial_boot_params = params;
@@ -1055,8 +1056,6 @@
 	DBG(" <- early_init_devtree()\n");
 }
 
-#undef printk
-
 int of_n_addr_cells(struct device_node* np)
 {
 	const int *ip;
@@ -1375,8 +1374,17 @@
 	struct device_node *node = kref_to_device_node(kref);
 	struct property *prop = node->properties;
 
-	if (!OF_IS_DYNAMIC(node))
+	/* We should never be releasing nodes that haven't been detached. */
+	if (!of_node_check_flag(node, OF_DETACHED)) {
+		printk("WARNING: Bad of_node_put() on %s\n", node->full_name);
+		dump_stack();
+		kref_init(&node->kref);
 		return;
+	}
+
+	if (!of_node_check_flag(node, OF_DYNAMIC))
+		return;
+
 	while (prop) {
 		struct property *next = prop->next;
 		kfree(prop->name);
@@ -1432,6 +1440,8 @@
 	write_lock(&devtree_lock);
 
 	parent = np->parent;
+	if (!parent)
+		goto out_unlock;
 
 	if (allnodes == np)
 		allnodes = np->allnext;
@@ -1455,6 +1465,9 @@
 		prevsib->sibling = np->sibling;
 	}
 
+	of_node_set_flag(np, OF_DETACHED);
+
+out_unlock:
 	write_unlock(&devtree_lock);
 }
 
@@ -1716,22 +1729,18 @@
 }
 EXPORT_SYMBOL(of_get_cpu_node);
 
-#ifdef DEBUG
+#if defined(CONFIG_DEBUG_FS) && defined(DEBUG)
 static struct debugfs_blob_wrapper flat_dt_blob;
 
 static int __init export_flat_device_tree(void)
 {
 	struct dentry *d;
 
-	d = debugfs_create_dir("powerpc", NULL);
-	if (!d)
-		return 1;
-
 	flat_dt_blob.data = initial_boot_params;
 	flat_dt_blob.size = initial_boot_params->totalsize;
 
 	d = debugfs_create_blob("flat-device-tree", S_IFREG | S_IRUSR,
-				d, &flat_dt_blob);
+				powerpc_debugfs_root, &flat_dt_blob);
 	if (!d)
 		return 1;
 
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
index d6047c4..a1d582e 100644
--- a/arch/powerpc/kernel/prom_init.c
+++ b/arch/powerpc/kernel/prom_init.c
@@ -635,6 +635,7 @@
 /* ibm,dynamic-reconfiguration-memory property supported */
 #define OV5_DRCONF_MEMORY	0x20
 #define OV5_LARGE_PAGES		0x10	/* large pages supported */
+#define OV5_DONATE_DEDICATE_CPU 0x02	/* donate dedicated CPU support */
 /* PCIe/MSI support.  Without MSI full PCIe is not supported */
 #ifdef CONFIG_PCI_MSI
 #define OV5_MSI			0x01	/* PCIe/MSI support */
@@ -685,7 +686,8 @@
 	/* option vector 5: PAPR/OF options */
 	3 - 2,				/* length */
 	0,				/* don't ignore, don't halt */
-	OV5_LPAR | OV5_SPLPAR | OV5_LARGE_PAGES | OV5_DRCONF_MEMORY | OV5_MSI,
+	OV5_LPAR | OV5_SPLPAR | OV5_LARGE_PAGES | OV5_DRCONF_MEMORY |
+	OV5_DONATE_DEDICATE_CPU | OV5_MSI,
 };
 
 /* Old method - ELF header with PT_NOTE sections */
diff --git a/arch/powerpc/kernel/ptrace-common.h b/arch/powerpc/kernel/ptrace-common.h
deleted file mode 100644
index 8797ae7..0000000
--- a/arch/powerpc/kernel/ptrace-common.h
+++ /dev/null
@@ -1,161 +0,0 @@
-/*
- *    Copyright (c) 2002 Stephen Rothwell, IBM Coproration
- *    Extracted from ptrace.c and ptrace32.c
- *
- * This file is subject to the terms and conditions of the GNU General
- * Public License.  See the file README.legal in the main directory of
- * this archive for more details.
- */
-
-#ifndef _PPC64_PTRACE_COMMON_H
-#define _PPC64_PTRACE_COMMON_H
-
-#include <asm/system.h>
-
-/*
- * Set of msr bits that gdb can change on behalf of a process.
- */
-#define MSR_DEBUGCHANGE	(MSR_FE0 | MSR_SE | MSR_BE | MSR_FE1)
-
-/*
- * Get contents of register REGNO in task TASK.
- */
-static inline unsigned long get_reg(struct task_struct *task, int regno)
-{
-	unsigned long tmp = 0;
-
-	/*
-	 * Put the correct FP bits in, they might be wrong as a result
-	 * of our lazy FP restore.
-	 */
-	if (regno == PT_MSR) {
-		tmp = ((unsigned long *)task->thread.regs)[PT_MSR];
-		tmp |= task->thread.fpexc_mode;
-	} else if (regno < (sizeof(struct pt_regs) / sizeof(unsigned long))) {
-		tmp = ((unsigned long *)task->thread.regs)[regno];
-	}
-
-	return tmp;
-}
-
-/*
- * Write contents of register REGNO in task TASK.
- */
-static inline int put_reg(struct task_struct *task, int regno,
-			  unsigned long data)
-{
-	if (regno < PT_SOFTE) {
-		if (regno == PT_MSR)
-			data = (data & MSR_DEBUGCHANGE)
-				| (task->thread.regs->msr & ~MSR_DEBUGCHANGE);
-		((unsigned long *)task->thread.regs)[regno] = data;
-		return 0;
-	}
-	return -EIO;
-}
-
-static inline void set_single_step(struct task_struct *task)
-{
-	struct pt_regs *regs = task->thread.regs;
-	if (regs != NULL)
-		regs->msr |= MSR_SE;
-	set_tsk_thread_flag(task, TIF_SINGLESTEP);
-}
-
-static inline void clear_single_step(struct task_struct *task)
-{
-	struct pt_regs *regs = task->thread.regs;
-	if (regs != NULL)
-		regs->msr &= ~MSR_SE;
-	clear_tsk_thread_flag(task, TIF_SINGLESTEP);
-}
-
-#ifdef CONFIG_ALTIVEC
-/*
- * Get/set all the altivec registers vr0..vr31, vscr, vrsave, in one go.
- * The transfer totals 34 quadword.  Quadwords 0-31 contain the
- * corresponding vector registers.  Quadword 32 contains the vscr as the
- * last word (offset 12) within that quadword.  Quadword 33 contains the
- * vrsave as the first word (offset 0) within the quadword.
- *
- * This definition of the VMX state is compatible with the current PPC32
- * ptrace interface.  This allows signal handling and ptrace to use the
- * same structures.  This also simplifies the implementation of a bi-arch
- * (combined (32- and 64-bit) gdb.
- */
-
-/*
- * Get contents of AltiVec register state in task TASK
- */
-static inline int get_vrregs(unsigned long __user *data,
-			     struct task_struct *task)
-{
-	unsigned long regsize;
-
-	/* copy AltiVec registers VR[0] .. VR[31] */
-	regsize = 32 * sizeof(vector128);
-	if (copy_to_user(data, task->thread.vr, regsize))
-		return -EFAULT;
-	data += (regsize / sizeof(unsigned long));
-
-	/* copy VSCR */
-	regsize = 1 * sizeof(vector128);
-	if (copy_to_user(data, &task->thread.vscr, regsize))
-		return -EFAULT;
-	data += (regsize / sizeof(unsigned long));
-
-	/* copy VRSAVE */
-	if (put_user(task->thread.vrsave, (u32 __user *)data))
-		return -EFAULT;
-
-	return 0;
-}
-
-/*
- * Write contents of AltiVec register state into task TASK.
- */
-static inline int set_vrregs(struct task_struct *task,
-			     unsigned long __user *data)
-{
-	unsigned long regsize;
-
-	/* copy AltiVec registers VR[0] .. VR[31] */
-	regsize = 32 * sizeof(vector128);
-	if (copy_from_user(task->thread.vr, data, regsize))
-		return -EFAULT;
-	data += (regsize / sizeof(unsigned long));
-
-	/* copy VSCR */
-	regsize = 1 * sizeof(vector128);
-	if (copy_from_user(&task->thread.vscr, data, regsize))
-		return -EFAULT;
-	data += (regsize / sizeof(unsigned long));
-
-	/* copy VRSAVE */
-	if (get_user(task->thread.vrsave, (u32 __user *)data))
-		return -EFAULT;
-
-	return 0;
-}
-#endif
-
-static inline int ptrace_set_debugreg(struct task_struct *task,
-				      unsigned long addr, unsigned long data)
-{
-	/* We only support one DABR and no IABRS at the moment */
-	if (addr > 0)
-		return -EINVAL;
-
-	/* The bottom 3 bits are flags */
-	if ((data & ~0x7UL) >= TASK_SIZE)
-		return -EIO;
-
-	/* Ensure translation is on */
-	if (data && !(data & DABR_TRANSLATION))
-		return -EIO;
-
-	task->thread.dabr = data;
-	return 0;
-}
-
-#endif /* _PPC64_PTRACE_COMMON_H */
diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
index bf76562..8a177bd 100644
--- a/arch/powerpc/kernel/ptrace.c
+++ b/arch/powerpc/kernel/ptrace.c
@@ -35,11 +35,11 @@
 #include <asm/pgtable.h>
 #include <asm/system.h>
 
-#ifdef CONFIG_PPC64
-#include "ptrace-common.h"
-#endif
+/*
+ * does not yet catch signals sent when the child dies.
+ * in exit.c or in signal.c.
+ */
 
-#ifdef CONFIG_PPC32
 /*
  * Set of msr bits that gdb can change on behalf of a process.
  */
@@ -48,65 +48,117 @@
 #else
 #define MSR_DEBUGCHANGE	(MSR_SE | MSR_BE)
 #endif
-#endif /* CONFIG_PPC32 */
 
 /*
- * does not yet catch signals sent when the child dies.
- * in exit.c or in signal.c.
+ * Max register writeable via put_reg
  */
-
 #ifdef CONFIG_PPC32
+#define PT_MAX_PUT_REG	PT_MQ
+#else
+#define PT_MAX_PUT_REG	PT_CCR
+#endif
+
 /*
  * Get contents of register REGNO in task TASK.
  */
-static inline unsigned long get_reg(struct task_struct *task, int regno)
+unsigned long ptrace_get_reg(struct task_struct *task, int regno)
 {
-	if (regno < sizeof(struct pt_regs) / sizeof(unsigned long)
-	    && task->thread.regs != NULL)
+	unsigned long tmp = 0;
+
+	if (task->thread.regs == NULL)
+		return -EIO;
+
+	if (regno == PT_MSR) {
+		tmp = ((unsigned long *)task->thread.regs)[PT_MSR];
+		return tmp | task->thread.fpexc_mode;
+	}
+
+	if (regno < (sizeof(struct pt_regs) / sizeof(unsigned long)))
 		return ((unsigned long *)task->thread.regs)[regno];
-	return (0);
+
+	return -EIO;
 }
 
 /*
  * Write contents of register REGNO in task TASK.
  */
-static inline int put_reg(struct task_struct *task, int regno,
-			  unsigned long data)
+int ptrace_put_reg(struct task_struct *task, int regno, unsigned long data)
 {
-	if (regno <= PT_MQ && task->thread.regs != NULL) {
+	if (task->thread.regs == NULL)
+		return -EIO;
+
+	if (regno <= PT_MAX_PUT_REG || regno == PT_TRAP) {
 		if (regno == PT_MSR)
 			data = (data & MSR_DEBUGCHANGE)
 				| (task->thread.regs->msr & ~MSR_DEBUGCHANGE);
+		/* We prevent mucking around with the reserved area of trap
+		 * which are used internally by the kernel
+		 */
+		if (regno == PT_TRAP)
+			data &= 0xfff0;
 		((unsigned long *)task->thread.regs)[regno] = data;
 		return 0;
 	}
 	return -EIO;
 }
 
+
+static int get_fpregs(void __user *data, struct task_struct *task,
+		      int has_fpscr)
+{
+	unsigned int count = has_fpscr ? 33 : 32;
+
+	if (copy_to_user(data, task->thread.fpr, count * sizeof(double)))
+		return -EFAULT;
+	return 0;
+}
+
+static int set_fpregs(void __user *data, struct task_struct *task,
+		      int has_fpscr)
+{
+	unsigned int count = has_fpscr ? 33 : 32;
+
+	if (copy_from_user(task->thread.fpr, data, count * sizeof(double)))
+		return -EFAULT;
+	return 0;
+}
+
+
 #ifdef CONFIG_ALTIVEC
 /*
+ * Get/set all the altivec registers vr0..vr31, vscr, vrsave, in one go.
+ * The transfer totals 34 quadword.  Quadwords 0-31 contain the
+ * corresponding vector registers.  Quadword 32 contains the vscr as the
+ * last word (offset 12) within that quadword.  Quadword 33 contains the
+ * vrsave as the first word (offset 0) within the quadword.
+ *
+ * This definition of the VMX state is compatible with the current PPC32
+ * ptrace interface.  This allows signal handling and ptrace to use the
+ * same structures.  This also simplifies the implementation of a bi-arch
+ * (combined (32- and 64-bit) gdb.
+ */
+
+/*
  * Get contents of AltiVec register state in task TASK
  */
-static inline int get_vrregs(unsigned long __user *data, struct task_struct *task)
+static int get_vrregs(unsigned long __user *data, struct task_struct *task)
 {
-	int i, j;
-
-	if (!access_ok(VERIFY_WRITE, data, 133 * sizeof(unsigned long)))
-		return -EFAULT;
+	unsigned long regsize;
 
 	/* copy AltiVec registers VR[0] .. VR[31] */
-	for (i = 0; i < 32; i++)
-		for (j = 0; j < 4; j++, data++)
-			if (__put_user(task->thread.vr[i].u[j], data))
-				return -EFAULT;
+	regsize = 32 * sizeof(vector128);
+	if (copy_to_user(data, task->thread.vr, regsize))
+		return -EFAULT;
+	data += (regsize / sizeof(unsigned long));
 
 	/* copy VSCR */
-	for (i = 0; i < 4; i++, data++)
-		if (__put_user(task->thread.vscr.u[i], data))
-			return -EFAULT;
+	regsize = 1 * sizeof(vector128);
+	if (copy_to_user(data, &task->thread.vscr, regsize))
+		return -EFAULT;
+	data += (regsize / sizeof(unsigned long));
 
-        /* copy VRSAVE */
-	if (__put_user(task->thread.vrsave, data))
+	/* copy VRSAVE */
+	if (put_user(task->thread.vrsave, (u32 __user *)data))
 		return -EFAULT;
 
 	return 0;
@@ -115,31 +167,29 @@
 /*
  * Write contents of AltiVec register state into task TASK.
  */
-static inline int set_vrregs(struct task_struct *task, unsigned long __user *data)
+static int set_vrregs(struct task_struct *task, unsigned long __user *data)
 {
-	int i, j;
-
-	if (!access_ok(VERIFY_READ, data, 133 * sizeof(unsigned long)))
-		return -EFAULT;
+	unsigned long regsize;
 
 	/* copy AltiVec registers VR[0] .. VR[31] */
-	for (i = 0; i < 32; i++)
-		for (j = 0; j < 4; j++, data++)
-			if (__get_user(task->thread.vr[i].u[j], data))
-				return -EFAULT;
+	regsize = 32 * sizeof(vector128);
+	if (copy_from_user(task->thread.vr, data, regsize))
+		return -EFAULT;
+	data += (regsize / sizeof(unsigned long));
 
 	/* copy VSCR */
-	for (i = 0; i < 4; i++, data++)
-		if (__get_user(task->thread.vscr.u[i], data))
-			return -EFAULT;
+	regsize = 1 * sizeof(vector128);
+	if (copy_from_user(&task->thread.vscr, data, regsize))
+		return -EFAULT;
+	data += (regsize / sizeof(unsigned long));
 
 	/* copy VRSAVE */
-	if (__get_user(task->thread.vrsave, data))
+	if (get_user(task->thread.vrsave, (u32 __user *)data))
 		return -EFAULT;
 
 	return 0;
 }
-#endif
+#endif /* CONFIG_ALTIVEC */
 
 #ifdef CONFIG_SPE
 
@@ -156,7 +206,7 @@
 /*
  * Get contents of SPE register state in task TASK.
  */
-static inline int get_evrregs(unsigned long *data, struct task_struct *task)
+static int get_evrregs(unsigned long *data, struct task_struct *task)
 {
 	int i;
 
@@ -182,7 +232,7 @@
 /*
  * Write contents of SPE register state into task TASK.
  */
-static inline int set_evrregs(struct task_struct *task, unsigned long *data)
+static int set_evrregs(struct task_struct *task, unsigned long *data)
 {
 	int i;
 
@@ -205,8 +255,8 @@
 }
 #endif /* CONFIG_SPE */
 
-static inline void
-set_single_step(struct task_struct *task)
+
+static void set_single_step(struct task_struct *task)
 {
 	struct pt_regs *regs = task->thread.regs;
 
@@ -221,8 +271,7 @@
 	set_tsk_thread_flag(task, TIF_SINGLESTEP);
 }
 
-static inline void
-clear_single_step(struct task_struct *task)
+static void clear_single_step(struct task_struct *task)
 {
 	struct pt_regs *regs = task->thread.regs;
 
@@ -236,7 +285,25 @@
 	}
 	clear_tsk_thread_flag(task, TIF_SINGLESTEP);
 }
-#endif /* CONFIG_PPC32 */
+
+static int ptrace_set_debugreg(struct task_struct *task, unsigned long addr,
+			       unsigned long data)
+{
+	/* We only support one DABR and no IABRS at the moment */
+	if (addr > 0)
+		return -EINVAL;
+
+	/* The bottom 3 bits are flags */
+	if ((data & ~0x7UL) >= TASK_SIZE)
+		return -EIO;
+
+	/* Ensure translation is on */
+	if (data && !(data & DABR_TRANSLATION))
+		return -EIO;
+
+	task->thread.dabr = data;
+	return 0;
+}
 
 /*
  * Called by kernel/ptrace.c when detaching..
@@ -249,6 +316,62 @@
 	clear_single_step(child);
 }
 
+/*
+ * Here are the old "legacy" powerpc specific getregs/setregs ptrace calls,
+ * we mark them as obsolete now, they will be removed in a future version
+ */
+static long arch_ptrace_old(struct task_struct *child, long request, long addr,
+			    long data)
+{
+	int ret = -EPERM;
+
+	switch(request) {
+	case PPC_PTRACE_GETREGS: { /* Get GPRs 0 - 31. */
+		int i;
+		unsigned long *reg = &((unsigned long *)child->thread.regs)[0];
+		unsigned long __user *tmp = (unsigned long __user *)addr;
+
+		for (i = 0; i < 32; i++) {
+			ret = put_user(*reg, tmp);
+			if (ret)
+				break;
+			reg++;
+			tmp++;
+		}
+		break;
+	}
+
+	case PPC_PTRACE_SETREGS: { /* Set GPRs 0 - 31. */
+		int i;
+		unsigned long *reg = &((unsigned long *)child->thread.regs)[0];
+		unsigned long __user *tmp = (unsigned long __user *)addr;
+
+		for (i = 0; i < 32; i++) {
+			ret = get_user(*reg, tmp);
+			if (ret)
+				break;
+			reg++;
+			tmp++;
+		}
+		break;
+	}
+
+	case PPC_PTRACE_GETFPREGS: { /* Get FPRs 0 - 31. */
+		flush_fp_to_thread(child);
+		ret = get_fpregs((void __user *)addr, child, 0);
+		break;
+	}
+
+	case PPC_PTRACE_SETFPREGS: { /* Get FPRs 0 - 31. */
+		flush_fp_to_thread(child);
+		ret = set_fpregs((void __user *)addr, child, 0);
+		break;
+	}
+
+	}
+	return ret;
+}
+
 long arch_ptrace(struct task_struct *child, long request, long addr, long data)
 {
 	int ret = -EPERM;
@@ -256,17 +379,9 @@
 	switch (request) {
 	/* when I and D space are separate, these will need to be fixed. */
 	case PTRACE_PEEKTEXT: /* read word at location addr. */
-	case PTRACE_PEEKDATA: {
-		unsigned long tmp;
-		int copied;
-
-		copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0);
-		ret = -EIO;
-		if (copied != sizeof(tmp))
-			break;
-		ret = put_user(tmp,(unsigned long __user *) data);
+	case PTRACE_PEEKDATA:
+		ret = generic_ptrace_peekdata(child, addr, data);
 		break;
-	}
 
 	/* read the word at location addr in the USER area. */
 	case PTRACE_PEEKUSR: {
@@ -284,11 +399,9 @@
 #endif
 			break;
 
-#ifdef CONFIG_PPC32
 		CHECK_FULL_REGS(child->thread.regs);
-#endif
 		if (index < PT_FPR0) {
-			tmp = get_reg(child, (int) index);
+			tmp = ptrace_get_reg(child, (int) index);
 		} else {
 			flush_fp_to_thread(child);
 			tmp = ((unsigned long *)child->thread.fpr)[index - PT_FPR0];
@@ -300,11 +413,7 @@
 	/* If I and D space are separate, this will have to be fixed. */
 	case PTRACE_POKETEXT: /* write the word at location addr. */
 	case PTRACE_POKEDATA:
-		ret = 0;
-		if (access_process_vm(child, addr, &data, sizeof(data), 1)
-				== sizeof(data))
-			break;
-		ret = -EIO;
+		ret = generic_ptrace_pokedata(child, addr, data);
 		break;
 
 	/* write the word at location addr in the USER area */
@@ -323,13 +432,9 @@
 #endif
 			break;
 
-#ifdef CONFIG_PPC32
 		CHECK_FULL_REGS(child->thread.regs);
-#endif
-		if (index == PT_ORIG_R3)
-			break;
 		if (index < PT_FPR0) {
-			ret = put_reg(child, index, data);
+			ret = ptrace_put_reg(child, index, data);
 		} else {
 			flush_fp_to_thread(child);
 			((unsigned long *)child->thread.fpr)[index - PT_FPR0] = data;
@@ -384,7 +489,6 @@
 		break;
 	}
 
-#ifdef CONFIG_PPC64
 	case PTRACE_GET_DEBUGREG: {
 		ret = -EINVAL;
 		/* We only support one DABR and no IABRS at the moment */
@@ -398,73 +502,61 @@
 	case PTRACE_SET_DEBUGREG:
 		ret = ptrace_set_debugreg(child, addr, data);
 		break;
-#endif
 
 	case PTRACE_DETACH:
 		ret = ptrace_detach(child, data);
 		break;
 
-	case PPC_PTRACE_GETREGS: { /* Get GPRs 0 - 31. */
-		int i;
-		unsigned long *reg = &((unsigned long *)child->thread.regs)[0];
-		unsigned long __user *tmp = (unsigned long __user *)addr;
-
-		for (i = 0; i < 32; i++) {
-			ret = put_user(*reg, tmp);
-			if (ret)
-				break;
-			reg++;
-			tmp++;
+#ifdef CONFIG_PPC64
+	case PTRACE_GETREGS64:
+#endif
+	case PTRACE_GETREGS: { /* Get all pt_regs from the child. */
+		int ui;
+	  	if (!access_ok(VERIFY_WRITE, (void __user *)data,
+			       sizeof(struct pt_regs))) {
+			ret = -EIO;
+			break;
+		}
+		ret = 0;
+		for (ui = 0; ui < PT_REGS_COUNT; ui ++) {
+			ret |= __put_user(ptrace_get_reg(child, ui),
+					  (unsigned long __user *) data);
+			data += sizeof(long);
 		}
 		break;
 	}
 
-	case PPC_PTRACE_SETREGS: { /* Set GPRs 0 - 31. */
-		int i;
-		unsigned long *reg = &((unsigned long *)child->thread.regs)[0];
-		unsigned long __user *tmp = (unsigned long __user *)addr;
-
-		for (i = 0; i < 32; i++) {
-			ret = get_user(*reg, tmp);
+#ifdef CONFIG_PPC64
+	case PTRACE_SETREGS64:
+#endif
+	case PTRACE_SETREGS: { /* Set all gp regs in the child. */
+		unsigned long tmp;
+		int ui;
+	  	if (!access_ok(VERIFY_READ, (void __user *)data,
+			       sizeof(struct pt_regs))) {
+			ret = -EIO;
+			break;
+		}
+		ret = 0;
+		for (ui = 0; ui < PT_REGS_COUNT; ui ++) {
+			ret = __get_user(tmp, (unsigned long __user *) data);
 			if (ret)
 				break;
-			reg++;
-			tmp++;
+			ptrace_put_reg(child, ui, tmp);
+			data += sizeof(long);
 		}
 		break;
 	}
 
-	case PPC_PTRACE_GETFPREGS: { /* Get FPRs 0 - 31. */
-		int i;
-		unsigned long *reg = &((unsigned long *)child->thread.fpr)[0];
-		unsigned long __user *tmp = (unsigned long __user *)addr;
-
+	case PTRACE_GETFPREGS: { /* Get the child FPU state (FPR0...31 + FPSCR) */
 		flush_fp_to_thread(child);
-
-		for (i = 0; i < 32; i++) {
-			ret = put_user(*reg, tmp);
-			if (ret)
-				break;
-			reg++;
-			tmp++;
-		}
+		ret = get_fpregs((void __user *)data, child, 1);
 		break;
 	}
 
-	case PPC_PTRACE_SETFPREGS: { /* Get FPRs 0 - 31. */
-		int i;
-		unsigned long *reg = &((unsigned long *)child->thread.fpr)[0];
-		unsigned long __user *tmp = (unsigned long __user *)addr;
-
+	case PTRACE_SETFPREGS: { /* Set the child FPU state (FPR0...31 + FPSCR) */
 		flush_fp_to_thread(child);
-
-		for (i = 0; i < 32; i++) {
-			ret = get_user(*reg, tmp);
-			if (ret)
-				break;
-			reg++;
-			tmp++;
-		}
+		ret = set_fpregs((void __user *)data, child, 1);
 		break;
 	}
 
@@ -499,11 +591,18 @@
 		break;
 #endif
 
+	/* Old reverse args ptrace callss */
+	case PPC_PTRACE_GETREGS: /* Get GPRs 0 - 31. */
+	case PPC_PTRACE_SETREGS: /* Set GPRs 0 - 31. */
+	case PPC_PTRACE_GETFPREGS: /* Get FPRs 0 - 31. */
+	case PPC_PTRACE_SETFPREGS: /* Get FPRs 0 - 31. */
+		ret = arch_ptrace_old(child, request, addr, data);
+		break;
+
 	default:
 		ret = ptrace_request(child, request, addr, data);
 		break;
 	}
-
 	return ret;
 }
 
diff --git a/arch/powerpc/kernel/ptrace32.c b/arch/powerpc/kernel/ptrace32.c
index 9b9a230..9e6baea 100644
--- a/arch/powerpc/kernel/ptrace32.c
+++ b/arch/powerpc/kernel/ptrace32.c
@@ -33,13 +33,55 @@
 #include <asm/pgtable.h>
 #include <asm/system.h>
 
-#include "ptrace-common.h"
-
 /*
  * does not yet catch signals sent when the child dies.
  * in exit.c or in signal.c.
  */
 
+/*
+ * Here are the old "legacy" powerpc specific getregs/setregs ptrace calls,
+ * we mark them as obsolete now, they will be removed in a future version
+ */
+static long compat_ptrace_old(struct task_struct *child, long request,
+			      long addr, long data)
+{
+	int ret = -EPERM;
+
+	switch(request) {
+	case PPC_PTRACE_GETREGS: { /* Get GPRs 0 - 31. */
+		int i;
+		unsigned long *reg = &((unsigned long *)child->thread.regs)[0];
+		unsigned int __user *tmp = (unsigned int __user *)addr;
+
+		for (i = 0; i < 32; i++) {
+			ret = put_user(*reg, tmp);
+			if (ret)
+				break;
+			reg++;
+			tmp++;
+		}
+		break;
+	}
+
+	case PPC_PTRACE_SETREGS: { /* Set GPRs 0 - 31. */
+		int i;
+		unsigned long *reg = &((unsigned long *)child->thread.regs)[0];
+		unsigned int __user *tmp = (unsigned int __user *)addr;
+
+		for (i = 0; i < 32; i++) {
+			ret = get_user(*reg, tmp);
+			if (ret)
+				break;
+			reg++;
+			tmp++;
+		}
+		break;
+	}
+
+	}
+	return ret;
+}
+
 long compat_sys_ptrace(int request, int pid, unsigned long addr,
 		       unsigned long data)
 {
@@ -123,7 +165,7 @@
 			break;
 
 		if (index < PT_FPR0) {
-			tmp = get_reg(child, index);
+			tmp = ptrace_get_reg(child, index);
 		} else {
 			flush_fp_to_thread(child);
 			/*
@@ -162,7 +204,9 @@
 		else
 			part = 0;  /* want the 1st half of the register (left-most). */
 
-		/* Validate the input - check to see if address is on the wrong boundary or beyond the end of the user area */
+		/* Validate the input - check to see if address is on the wrong boundary
+		 * or beyond the end of the user area
+		 */
 		if ((addr & 3) || numReg > PT_FPSCR)
 			break;
 
@@ -170,7 +214,7 @@
 			flush_fp_to_thread(child);
 			tmp = ((unsigned long int *)child->thread.fpr)[numReg - PT_FPR0];
 		} else { /* register within PT_REGS struct */
-			tmp = get_reg(child, numReg);
+			tmp = ptrace_get_reg(child, numReg);
 		} 
 		reg32bits = ((u32*)&tmp)[part];
 		ret = put_user(reg32bits, (u32 __user *)data);
@@ -226,10 +270,8 @@
 		if ((addr & 3) || (index > PT_FPSCR32))
 			break;
 
-		if (index == PT_ORIG_R3)
-			break;
 		if (index < PT_FPR0) {
-			ret = put_reg(child, index, data);
+			ret = ptrace_put_reg(child, index, data);
 		} else {
 			flush_fp_to_thread(child);
 			/*
@@ -258,70 +300,25 @@
 		/* Determine which register the user wants */
 		index = (u64)addr >> 2;
 		numReg = index / 2;
+
 		/*
 		 * Validate the input - check to see if address is on the
 		 * wrong boundary or beyond the end of the user area
 		 */
 		if ((addr & 3) || (numReg > PT_FPSCR))
 			break;
-		/* Insure it is a register we let them change */
-		if ((numReg == PT_ORIG_R3)
-				|| ((numReg > PT_CCR) && (numReg < PT_FPR0)))
-			break;
-		if (numReg >= PT_FPR0) {
+		if (numReg < PT_FPR0) {
+			unsigned long freg = ptrace_get_reg(child, numReg);
+			if (index % 2)
+				freg = (freg & ~0xfffffffful) | (data & 0xfffffffful);
+			else
+				freg = (freg & 0xfffffffful) | (data << 32);
+			ret = ptrace_put_reg(child, numReg, freg);
+		} else {
 			flush_fp_to_thread(child);
+			((unsigned int *)child->thread.regs)[index] = data;
+			ret = 0;
 		}
-		if (numReg == PT_MSR)
-			data = (data & MSR_DEBUGCHANGE)
-				| (child->thread.regs->msr & ~MSR_DEBUGCHANGE);
-		((u32*)child->thread.regs)[index] = data;
-		ret = 0;
-		break;
-	}
-
-	case PTRACE_SYSCALL: /* continue and stop at next (return from) syscall */
-	case PTRACE_CONT: { /* restart after signal. */
-		ret = -EIO;
-		if (!valid_signal(data))
-			break;
-		if (request == PTRACE_SYSCALL)
-			set_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
-		else
-			clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
-		child->exit_code = data;
-		/* make sure the single step bit is not set. */
-		clear_single_step(child);
-		wake_up_process(child);
-		ret = 0;
-		break;
-	}
-
-	/*
-	 * make the child exit.  Best I can do is send it a sigkill.
-	 * perhaps it should be put in the status that it wants to
-	 * exit.
-	 */
-	case PTRACE_KILL: {
-		ret = 0;
-		if (child->exit_state == EXIT_ZOMBIE)	/* already dead */
-			break;
-		child->exit_code = SIGKILL;
-		/* make sure the single step bit is not set. */
-		clear_single_step(child);
-		wake_up_process(child);
-		break;
-	}
-
-	case PTRACE_SINGLESTEP: {  /* set the trap flag. */
-		ret = -EIO;
-		if (!valid_signal(data))
-			break;
-		clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
-		set_single_step(child);
-		child->exit_code = data;
-		/* give it a chance to run. */
-		wake_up_process(child);
-		ret = 0;
 		break;
 	}
 
@@ -334,95 +331,67 @@
 		break;
 	}
 
-	case PTRACE_SET_DEBUGREG:
-		ret = ptrace_set_debugreg(child, addr, data);
-		break;
-
-	case PTRACE_DETACH:
-		ret = ptrace_detach(child, data);
-		break;
-
-	case PPC_PTRACE_GETREGS: { /* Get GPRs 0 - 31. */
-		int i;
-		unsigned long *reg = &((unsigned long *)child->thread.regs)[0];
-		unsigned int __user *tmp = (unsigned int __user *)addr;
-
-		for (i = 0; i < 32; i++) {
-			ret = put_user(*reg, tmp);
-			if (ret)
-				break;
-			reg++;
-			tmp++;
-		}
-		break;
-	}
-
-	case PPC_PTRACE_SETREGS: { /* Set GPRs 0 - 31. */
-		int i;
-		unsigned long *reg = &((unsigned long *)child->thread.regs)[0];
-		unsigned int __user *tmp = (unsigned int __user *)addr;
-
-		for (i = 0; i < 32; i++) {
-			ret = get_user(*reg, tmp);
-			if (ret)
-				break;
-			reg++;
-			tmp++;
-		}
-		break;
-	}
-
-	case PPC_PTRACE_GETFPREGS: { /* Get FPRs 0 - 31. */
-		int i;
-		unsigned long *reg = &((unsigned long *)child->thread.fpr)[0];
-		unsigned int __user *tmp = (unsigned int __user *)addr;
-
-		flush_fp_to_thread(child);
-
-		for (i = 0; i < 32; i++) {
-			ret = put_user(*reg, tmp);
-			if (ret)
-				break;
-			reg++;
-			tmp++;
-		}
-		break;
-	}
-
-	case PPC_PTRACE_SETFPREGS: { /* Get FPRs 0 - 31. */
-		int i;
-		unsigned long *reg = &((unsigned long *)child->thread.fpr)[0];
-		unsigned int __user *tmp = (unsigned int __user *)addr;
-
-		flush_fp_to_thread(child);
-
-		for (i = 0; i < 32; i++) {
-			ret = get_user(*reg, tmp);
-			if (ret)
-				break;
-			reg++;
-			tmp++;
-		}
-		break;
-	}
-
 	case PTRACE_GETEVENTMSG:
 		ret = put_user(child->ptrace_message, (unsigned int __user *) data);
 		break;
 
-#ifdef CONFIG_ALTIVEC
+	case PTRACE_GETREGS: { /* Get all pt_regs from the child. */
+		int ui;
+	  	if (!access_ok(VERIFY_WRITE, (void __user *)data,
+			       PT_REGS_COUNT * sizeof(int))) {
+			ret = -EIO;
+			break;
+		}
+		ret = 0;
+		for (ui = 0; ui < PT_REGS_COUNT; ui ++) {
+			ret |= __put_user(ptrace_get_reg(child, ui),
+					  (unsigned int __user *) data);
+			data += sizeof(int);
+		}
+		break;
+	}
+
+	case PTRACE_SETREGS: { /* Set all gp regs in the child. */
+		unsigned long tmp;
+		int ui;
+	  	if (!access_ok(VERIFY_READ, (void __user *)data,
+			       PT_REGS_COUNT * sizeof(int))) {
+			ret = -EIO;
+			break;
+		}
+		ret = 0;
+		for (ui = 0; ui < PT_REGS_COUNT; ui ++) {
+			ret = __get_user(tmp, (unsigned int __user *) data);
+			if (ret)
+				break;
+			ptrace_put_reg(child, ui, tmp);
+			data += sizeof(int);
+		}
+		break;
+	}
+
+	case PTRACE_GETFPREGS:
+	case PTRACE_SETFPREGS:
 	case PTRACE_GETVRREGS:
-		/* Get the child altivec register state. */
-		flush_altivec_to_thread(child);
-		ret = get_vrregs((unsigned long __user *)data, child);
+	case PTRACE_SETVRREGS:
+	case PTRACE_GETREGS64:
+	case PTRACE_SETREGS64:
+	case PPC_PTRACE_GETFPREGS:
+	case PPC_PTRACE_SETFPREGS:
+	case PTRACE_KILL:
+	case PTRACE_SINGLESTEP:
+	case PTRACE_DETACH:
+	case PTRACE_SET_DEBUGREG:
+	case PTRACE_SYSCALL:
+	case PTRACE_CONT:
+		ret = arch_ptrace(child, request, addr, data);
 		break;
 
-	case PTRACE_SETVRREGS:
-		/* Set the child altivec register state. */
-		flush_altivec_to_thread(child);
-		ret = set_vrregs(child, (unsigned long __user *)data);
+	/* Old reverse args ptrace callss */
+	case PPC_PTRACE_GETREGS: /* Get GPRs 0 - 31. */
+	case PPC_PTRACE_SETREGS: /* Set GPRs 0 - 31. */
+		ret = compat_ptrace_old(child, request, addr, data);
 		break;
-#endif
 
 	default:
 		ret = ptrace_request(child, request, addr, data);
diff --git a/arch/powerpc/kernel/rtas_pci.c b/arch/powerpc/kernel/rtas_pci.c
index f228682..a5de621 100644
--- a/arch/powerpc/kernel/rtas_pci.c
+++ b/arch/powerpc/kernel/rtas_pci.c
@@ -278,10 +278,8 @@
 {
 	struct device_node *node;
 	struct pci_controller *phb;
-	unsigned int index;
 	struct device_node *root = of_find_node_by_path("/");
 
-	index = 0;
 	for (node = of_get_next_child(root, NULL);
 	     node != NULL;
 	     node = of_get_next_child(root, node)) {
@@ -295,8 +293,7 @@
 			continue;
 		rtas_setup_phb(phb);
 		pci_process_bridge_OF_ranges(phb, node, 0);
-		pci_setup_phb_io(phb, index == 0);
-		index++;
+		isa_bridge_find_early(phb);
 	}
 
 	of_node_put(root);
@@ -335,7 +332,7 @@
 		return 1;
 	}
 
-	rc = unmap_bus_range(b);
+	rc = pcibios_unmap_io_space(b);
 	if (rc) {
 		printk(KERN_ERR "%s: failed to unmap IO on bus %s\n",
 			__FUNCTION__, b->name);
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
index ed07a19..4924c48 100644
--- a/arch/powerpc/kernel/setup-common.c
+++ b/arch/powerpc/kernel/setup-common.c
@@ -32,6 +32,7 @@
 #include <linux/unistd.h>
 #include <linux/serial.h>
 #include <linux/serial_8250.h>
+#include <linux/debugfs.h>
 #include <asm/io.h>
 #include <asm/prom.h>
 #include <asm/processor.h>
@@ -486,6 +487,14 @@
 
 	switch(base_port) {
 	case I8042_DATA_REG:
+		if (!(np = of_find_compatible_node(NULL, NULL, "pnpPNP,303")))
+			np = of_find_compatible_node(NULL, NULL, "pnpPNP,f03");
+		if (np) {
+			parent = of_get_parent(np);
+			of_node_put(np);
+			np = parent;
+			break;
+		}
 		np = of_find_node_by_type(NULL, "8042");
 		break;
 	case FDC_BASE: /* FDC1 */
@@ -571,3 +580,15 @@
 
 late_initcall(check_cache_coherency);
 #endif /* CONFIG_CHECK_CACHE_COHERENCY */
+
+#ifdef CONFIG_DEBUG_FS
+struct dentry *powerpc_debugfs_root;
+
+static int powerpc_debugfs_init(void)
+{
+	powerpc_debugfs_root = debugfs_create_dir("powerpc", NULL);
+
+	return powerpc_debugfs_root == NULL;
+}
+arch_initcall(powerpc_debugfs_init);
+#endif
diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c
index 35f8f44..7ec6ba5 100644
--- a/arch/powerpc/kernel/setup_32.c
+++ b/arch/powerpc/kernel/setup_32.c
@@ -262,13 +262,11 @@
 	 * Systems with OF can look in the properties on the cpu node(s)
 	 * for a possibly more accurate value.
 	 */
-	if (cpu_has_feature(CPU_FTR_SPLIT_ID_CACHE)) {
-		dcache_bsize = cur_cpu_spec->dcache_bsize;
-		icache_bsize = cur_cpu_spec->icache_bsize;
-		ucache_bsize = 0;
-	} else
-		ucache_bsize = dcache_bsize = icache_bsize
-			= cur_cpu_spec->dcache_bsize;
+	dcache_bsize = cur_cpu_spec->dcache_bsize;
+	icache_bsize = cur_cpu_spec->icache_bsize;
+	ucache_bsize = 0;
+	if (cpu_has_feature(CPU_FTR_UNIFIED_ID_CACHE))
+		ucache_bsize = icache_bsize = dcache_bsize;
 
 	/* reboot on panic */
 	panic_timeout = 180;
diff --git a/arch/powerpc/kernel/signal.c b/arch/powerpc/kernel/signal.c
new file mode 100644
index 0000000..c434d6c
--- /dev/null
+++ b/arch/powerpc/kernel/signal.c
@@ -0,0 +1,180 @@
+/*
+ * Common signal handling code for both 32 and 64 bits
+ *
+ *    Copyright (c) 2007 Benjamin Herrenschmidt, IBM Coproration
+ *    Extracted from signal_32.c and signal_64.c
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License.  See the file README.legal in the main directory of
+ * this archive for more details.
+ */
+
+#include <linux/ptrace.h>
+#include <linux/signal.h>
+#include <asm/uaccess.h>
+#include <asm/unistd.h>
+
+#include "signal.h"
+
+/*
+ * Allocate space for the signal frame
+ */
+void __user * get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
+			   size_t frame_size)
+{
+        unsigned long oldsp, newsp;
+
+        /* Default to using normal stack */
+        oldsp = regs->gpr[1];
+
+	/* Check for alt stack */
+	if ((ka->sa.sa_flags & SA_ONSTACK) &&
+	    current->sas_ss_size && !on_sig_stack(oldsp))
+		oldsp = (current->sas_ss_sp + current->sas_ss_size);
+
+	/* Get aligned frame */
+	newsp = (oldsp - frame_size) & ~0xFUL;
+
+	/* Check access */
+	if (!access_ok(VERIFY_WRITE, (void __user *)newsp, oldsp - newsp))
+		return NULL;
+
+        return (void __user *)newsp;
+}
+
+
+/*
+ * Restore the user process's signal mask
+ */
+void restore_sigmask(sigset_t *set)
+{
+	sigdelsetmask(set, ~_BLOCKABLE);
+	spin_lock_irq(&current->sighand->siglock);
+	current->blocked = *set;
+	recalc_sigpending();
+	spin_unlock_irq(&current->sighand->siglock);
+}
+
+static void check_syscall_restart(struct pt_regs *regs, struct k_sigaction *ka,
+				  int has_handler)
+{
+	unsigned long ret = regs->gpr[3];
+	int restart = 1;
+
+	/* syscall ? */
+	if (TRAP(regs) != 0x0C00)
+		return;
+
+	/* error signalled ? */
+	if (!(regs->ccr & 0x10000000))
+		return;
+
+	switch (ret) {
+	case ERESTART_RESTARTBLOCK:
+	case ERESTARTNOHAND:
+		/* ERESTARTNOHAND means that the syscall should only be
+		 * restarted if there was no handler for the signal, and since
+		 * we only get here if there is a handler, we dont restart.
+		 */
+		restart = !has_handler;
+		break;
+	case ERESTARTSYS:
+		/* ERESTARTSYS means to restart the syscall if there is no
+		 * handler or the handler was registered with SA_RESTART
+		 */
+		restart = !has_handler || (ka->sa.sa_flags & SA_RESTART) != 0;
+		break;
+	case ERESTARTNOINTR:
+		/* ERESTARTNOINTR means that the syscall should be
+		 * called again after the signal handler returns.
+		 */
+		break;
+	default:
+		return;
+	}
+	if (restart) {
+		if (ret == ERESTART_RESTARTBLOCK)
+			regs->gpr[0] = __NR_restart_syscall;
+		else
+			regs->gpr[3] = regs->orig_gpr3;
+		regs->nip -= 4;
+		regs->result = 0;
+	} else {
+		regs->result = -EINTR;
+		regs->gpr[3] = EINTR;
+		regs->ccr |= 0x10000000;
+	}
+}
+
+int do_signal(sigset_t *oldset, struct pt_regs *regs)
+{
+	siginfo_t info;
+	int signr;
+	struct k_sigaction ka;
+	int ret;
+	int is32 = is_32bit_task();
+
+	if (test_thread_flag(TIF_RESTORE_SIGMASK))
+		oldset = &current->saved_sigmask;
+	else if (!oldset)
+		oldset = &current->blocked;
+
+	signr = get_signal_to_deliver(&info, &ka, regs, NULL);
+
+	/* Is there any syscall restart business here ? */
+	check_syscall_restart(regs, &ka, signr > 0);
+
+	if (signr <= 0) {
+		/* No signal to deliver -- put the saved sigmask back */
+		if (test_thread_flag(TIF_RESTORE_SIGMASK)) {
+			clear_thread_flag(TIF_RESTORE_SIGMASK);
+			sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
+		}
+		return 0;               /* no signals delivered */
+	}
+
+        /*
+	 * Reenable the DABR before delivering the signal to
+	 * user space. The DABR will have been cleared if it
+	 * triggered inside the kernel.
+	 */
+	if (current->thread.dabr)
+		set_dabr(current->thread.dabr);
+
+	if (is32) {
+        	if (ka.sa.sa_flags & SA_SIGINFO)
+			ret = handle_rt_signal32(signr, &ka, &info, oldset,
+					regs);
+		else
+			ret = handle_signal32(signr, &ka, &info, oldset,
+					regs);
+	} else {
+		ret = handle_rt_signal64(signr, &ka, &info, oldset, regs);
+	}
+
+	if (ret) {
+		spin_lock_irq(&current->sighand->siglock);
+		sigorsets(&current->blocked, &current->blocked,
+			  &ka.sa.sa_mask);
+		if (!(ka.sa.sa_flags & SA_NODEFER))
+			sigaddset(&current->blocked, signr);
+		recalc_sigpending();
+		spin_unlock_irq(&current->sighand->siglock);
+
+		/*
+		 * A signal was successfully delivered; the saved sigmask is in
+		 * its frame, and we can clear the TIF_RESTORE_SIGMASK flag.
+		 */
+		if (test_thread_flag(TIF_RESTORE_SIGMASK))
+			clear_thread_flag(TIF_RESTORE_SIGMASK);
+	}
+
+	return ret;
+}
+
+long sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss,
+		unsigned long r5, unsigned long r6, unsigned long r7,
+		unsigned long r8, struct pt_regs *regs)
+{
+	return do_sigaltstack(uss, uoss, regs->gpr[1]);
+}
diff --git a/arch/powerpc/kernel/signal.h b/arch/powerpc/kernel/signal.h
new file mode 100644
index 0000000..77efb3d
--- /dev/null
+++ b/arch/powerpc/kernel/signal.h
@@ -0,0 +1,55 @@
+/*
+ *    Copyright (c) 2007 Benjamin Herrenschmidt, IBM Coproration
+ *    Extracted from signal_32.c and signal_64.c
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License.  See the file README.legal in the main directory of
+ * this archive for more details.
+ */
+
+#ifndef _POWERPC_ARCH_SIGNAL_H
+#define _POWERPC_ARCH_SIGNAL_H
+
+#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
+
+extern void __user * get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
+				  size_t frame_size);
+extern void restore_sigmask(sigset_t *set);
+
+extern int handle_signal32(unsigned long sig, struct k_sigaction *ka,
+			   siginfo_t *info, sigset_t *oldset,
+			   struct pt_regs *regs);
+
+extern int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka,
+			      siginfo_t *info, sigset_t *oldset,
+			      struct pt_regs *regs);
+
+
+#ifdef CONFIG_PPC64
+
+static inline int is_32bit_task(void)
+{
+	return test_thread_flag(TIF_32BIT);
+}
+
+extern int handle_rt_signal64(int signr, struct k_sigaction *ka,
+			      siginfo_t *info, sigset_t *set,
+			      struct pt_regs *regs);
+
+#else /* CONFIG_PPC64 */
+
+static inline int is_32bit_task(void)
+{
+	return 1;
+}
+
+static inline int handle_rt_signal64(int signr, struct k_sigaction *ka,
+				     siginfo_t *info, sigset_t *set,
+				     struct pt_regs *regs)
+{
+	return -EFAULT;
+}
+
+#endif /* !defined(CONFIG_PPC64) */
+
+#endif  /* _POWERPC_ARCH_SIGNAL_H */
diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
index dd1dca5..590057e 100644
--- a/arch/powerpc/kernel/signal_32.c
+++ b/arch/powerpc/kernel/signal_32.c
@@ -51,12 +51,11 @@
 #include <asm/pgtable.h>
 #endif
 
+#include "signal.h"
+
 #undef DEBUG_SIG
 
-#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
-
 #ifdef CONFIG_PPC64
-#define do_signal	do_signal32
 #define sys_sigsuspend	compat_sys_sigsuspend
 #define sys_rt_sigsuspend	compat_sys_rt_sigsuspend
 #define sys_rt_sigreturn	compat_sys_rt_sigreturn
@@ -231,8 +230,6 @@
 
 #endif /* CONFIG_PPC64 */
 
-int do_signal(sigset_t *oldset, struct pt_regs *regs);
-
 /*
  * Atomically swap in the new signal mask, and wait for a signal.
  */
@@ -251,14 +248,6 @@
  	return -ERESTARTNOHAND;
 }
 
-#ifdef CONFIG_PPC32
-long sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss, int r5,
-		int r6, int r7, int r8, struct pt_regs *regs)
-{
-	return do_sigaltstack(uss, uoss, regs->gpr[1]);
-}
-#endif
-
 long sys_sigaction(int sig, struct old_sigaction __user *act,
 		struct old_sigaction __user *oact)
 {
@@ -293,14 +282,17 @@
 /*
  * When we have signals to deliver, we set up on the
  * user stack, going down from the original stack pointer:
- *	a sigregs struct
+ *	an ABI gap of 56 words
+ *	an mcontext struct
  *	a sigcontext struct
  *	a gap of __SIGNAL_FRAMESIZE bytes
  *
- * Each of these things must be a multiple of 16 bytes in size.
+ * Each of these things must be a multiple of 16 bytes in size. The following
+ * structure represent all of this except the __SIGNAL_FRAMESIZE gap
  *
  */
-struct sigregs {
+struct sigframe {
+	struct sigcontext sctx;		/* the sigcontext */
 	struct mcontext	mctx;		/* all the register values */
 	/*
 	 * Programs using the rs6000/xcoff abi can save up to 19 gp
@@ -703,44 +695,22 @@
 }
 #endif /* CONFIG_PPC64 */
 
-
-/*
- * Restore the user process's signal mask
- */
-#ifdef CONFIG_PPC64
-extern void restore_sigmask(sigset_t *set);
-#else /* CONFIG_PPC64 */
-static void restore_sigmask(sigset_t *set)
-{
-	sigdelsetmask(set, ~_BLOCKABLE);
-	spin_lock_irq(&current->sighand->siglock);
-	current->blocked = *set;
-	recalc_sigpending();
-	spin_unlock_irq(&current->sighand->siglock);
-}
-#endif
-
 /*
  * Set up a signal frame for a "real-time" signal handler
  * (one which gets siginfo).
  */
-static int handle_rt_signal(unsigned long sig, struct k_sigaction *ka,
+int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka,
 		siginfo_t *info, sigset_t *oldset,
-		struct pt_regs *regs, unsigned long newsp)
+		struct pt_regs *regs)
 {
 	struct rt_sigframe __user *rt_sf;
 	struct mcontext __user *frame;
-	unsigned long origsp = newsp;
+	unsigned long newsp = 0;
 
 	/* Set up Signal Frame */
 	/* Put a Real Time Context onto stack */
-	newsp -= sizeof(*rt_sf);
-	rt_sf = (struct rt_sigframe __user *)newsp;
-
-	/* create a stack frame for the caller of the handler */
-	newsp -= __SIGNAL_FRAMESIZE + 16;
-
-	if (!access_ok(VERIFY_WRITE, (void __user *)newsp, origsp - newsp))
+	rt_sf = get_sigframe(ka, regs, sizeof(*rt_sf));
+	if (unlikely(rt_sf == NULL))
 		goto badframe;
 
 	/* Put the siginfo & fill in most of the ucontext */
@@ -770,8 +740,12 @@
 
 	current->thread.fpscr.val = 0;	/* turn off all fp exceptions */
 
+	/* create a stack frame for the caller of the handler */
+	newsp = ((unsigned long)rt_sf) - (__SIGNAL_FRAMESIZE + 16);
 	if (put_user(regs->gpr[1], (u32 __user *)newsp))
 		goto badframe;
+
+	/* Fill registers for signal handler */
 	regs->gpr[1] = newsp;
 	regs->gpr[3] = sig;
 	regs->gpr[4] = (unsigned long) &rt_sf->info;
@@ -1015,27 +989,18 @@
 /*
  * OK, we're invoking a handler
  */
-static int handle_signal(unsigned long sig, struct k_sigaction *ka,
-		siginfo_t *info, sigset_t *oldset, struct pt_regs *regs,
-		unsigned long newsp)
+int handle_signal32(unsigned long sig, struct k_sigaction *ka,
+		    siginfo_t *info, sigset_t *oldset, struct pt_regs *regs)
 {
 	struct sigcontext __user *sc;
-	struct sigregs __user *frame;
-	unsigned long origsp = newsp;
+	struct sigframe __user *frame;
+	unsigned long newsp = 0;
 
 	/* Set up Signal Frame */
-	newsp -= sizeof(struct sigregs);
-	frame = (struct sigregs __user *) newsp;
-
-	/* Put a sigcontext on the stack */
-	newsp -= sizeof(*sc);
-	sc = (struct sigcontext __user *) newsp;
-
-	/* create a stack frame for the caller of the handler */
-	newsp -= __SIGNAL_FRAMESIZE;
-
-	if (!access_ok(VERIFY_WRITE, (void __user *) newsp, origsp - newsp))
+	frame = get_sigframe(ka, regs, sizeof(*frame));
+	if (unlikely(frame == NULL))
 		goto badframe;
+	sc = (struct sigcontext __user *) &frame->sctx;
 
 #if _NSIG != 64
 #error "Please adjust handle_signal()"
@@ -1047,7 +1012,7 @@
 #else
 	    || __put_user(oldset->sig[1], &sc->_unused[3])
 #endif
-	    || __put_user(to_user_ptr(frame), &sc->regs)
+	    || __put_user(to_user_ptr(&frame->mctx), &sc->regs)
 	    || __put_user(sig, &sc->signal))
 		goto badframe;
 
@@ -1063,8 +1028,11 @@
 
 	current->thread.fpscr.val = 0;	/* turn off all fp exceptions */
 
+	/* create a stack frame for the caller of the handler */
+	newsp = ((unsigned long)frame) - __SIGNAL_FRAMESIZE;
 	if (put_user(regs->gpr[1], (u32 __user *)newsp))
 		goto badframe;
+
 	regs->gpr[1] = newsp;
 	regs->gpr[3] = sig;
 	regs->gpr[4] = (unsigned long) sc;
@@ -1126,106 +1094,3 @@
 	force_sig(SIGSEGV, current);
 	return 0;
 }
-
-/*
- * Note that 'init' is a special process: it doesn't get signals it doesn't
- * want to handle. Thus you cannot kill init even with a SIGKILL even by
- * mistake.
- */
-int do_signal(sigset_t *oldset, struct pt_regs *regs)
-{
-	siginfo_t info;
-	struct k_sigaction ka;
-	unsigned int newsp;
-	int signr, ret;
-
-#ifdef CONFIG_PPC32
-	if (try_to_freeze()) {
-		signr = 0;
-		if (!signal_pending(current))
-			goto no_signal;
-	}
-#endif
-
-	if (test_thread_flag(TIF_RESTORE_SIGMASK))
-		oldset = &current->saved_sigmask;
-	else if (!oldset)
-		oldset = &current->blocked;
-
-	signr = get_signal_to_deliver(&info, &ka, regs, NULL);
-#ifdef CONFIG_PPC32
-no_signal:
-#endif
-	if (TRAP(regs) == 0x0C00		/* System Call! */
-	    && regs->ccr & 0x10000000		/* error signalled */
-	    && ((ret = regs->gpr[3]) == ERESTARTSYS
-		|| ret == ERESTARTNOHAND || ret == ERESTARTNOINTR
-		|| ret == ERESTART_RESTARTBLOCK)) {
-
-		if (signr > 0
-		    && (ret == ERESTARTNOHAND || ret == ERESTART_RESTARTBLOCK
-			|| (ret == ERESTARTSYS
-			    && !(ka.sa.sa_flags & SA_RESTART)))) {
-			/* make the system call return an EINTR error */
-			regs->result = -EINTR;
-			regs->gpr[3] = EINTR;
-			/* note that the cr0.SO bit is already set */
-		} else {
-			regs->nip -= 4;	/* Back up & retry system call */
-			regs->result = 0;
-			regs->trap = 0;
-			if (ret == ERESTART_RESTARTBLOCK)
-				regs->gpr[0] = __NR_restart_syscall;
-			else
-				regs->gpr[3] = regs->orig_gpr3;
-		}
-	}
-
-	if (signr == 0) {
-		/* No signal to deliver -- put the saved sigmask back */
-		if (test_thread_flag(TIF_RESTORE_SIGMASK)) {
-			clear_thread_flag(TIF_RESTORE_SIGMASK);
-			sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
-		}
-		return 0;		/* no signals delivered */
-	}
-
-	if ((ka.sa.sa_flags & SA_ONSTACK) && current->sas_ss_size
-	    && !on_sig_stack(regs->gpr[1]))
-		newsp = current->sas_ss_sp + current->sas_ss_size;
-	else
-		newsp = regs->gpr[1];
-	newsp &= ~0xfUL;
-
-#ifdef CONFIG_PPC64
-	/*
-	 * Reenable the DABR before delivering the signal to
-	 * user space. The DABR will have been cleared if it
-	 * triggered inside the kernel.
-	 */
-	if (current->thread.dabr)
-		set_dabr(current->thread.dabr);
-#endif
-
-	/* Whee!  Actually deliver the signal.  */
-	if (ka.sa.sa_flags & SA_SIGINFO)
-		ret = handle_rt_signal(signr, &ka, &info, oldset, regs, newsp);
-	else
-		ret = handle_signal(signr, &ka, &info, oldset, regs, newsp);
-
-	if (ret) {
-		spin_lock_irq(&current->sighand->siglock);
-		sigorsets(&current->blocked, &current->blocked,
-			  &ka.sa.sa_mask);
-		if (!(ka.sa.sa_flags & SA_NODEFER))
-			sigaddset(&current->blocked, signr);
-		recalc_sigpending();
-		spin_unlock_irq(&current->sighand->siglock);
-		/* A signal was successfully delivered; the saved sigmask is in
-		   its frame, and we can clear the TIF_RESTORE_SIGMASK flag */
-		if (test_thread_flag(TIF_RESTORE_SIGMASK))
-			clear_thread_flag(TIF_RESTORE_SIGMASK);
-	}
-
-	return ret;
-}
diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
index b27e268..de895e6 100644
--- a/arch/powerpc/kernel/signal_64.c
+++ b/arch/powerpc/kernel/signal_64.c
@@ -34,9 +34,9 @@
 #include <asm/syscalls.h>
 #include <asm/vdso.h>
 
-#define DEBUG_SIG 0
+#include "signal.h"
 
-#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
+#define DEBUG_SIG 0
 
 #define GP_REGS_SIZE	min(sizeof(elf_gregset_t), sizeof(struct pt_regs))
 #define FP_REGS_SIZE	sizeof(elf_fpregset_t)
@@ -64,14 +64,6 @@
 	char abigap[288];
 } __attribute__ ((aligned (16)));
 
-long sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss, unsigned long r5,
-		     unsigned long r6, unsigned long r7, unsigned long r8,
-		     struct pt_regs *regs)
-{
-	return do_sigaltstack(uss, uoss, regs->gpr[1]);
-}
-
-
 /*
  * Set up the sigcontext for the signal frame.
  */
@@ -208,25 +200,6 @@
 }
 
 /*
- * Allocate space for the signal frame
- */
-static inline void __user * get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
-				  size_t frame_size)
-{
-        unsigned long newsp;
-
-        /* Default to using normal stack */
-        newsp = regs->gpr[1];
-
-	if ((ka->sa.sa_flags & SA_ONSTACK) && current->sas_ss_size) {
-		if (! on_sig_stack(regs->gpr[1]))
-			newsp = (current->sas_ss_sp + current->sas_ss_size);
-	}
-
-        return (void __user *)((newsp - frame_size) & -16ul);
-}
-
-/*
  * Setup the trampoline code on the stack
  */
 static long setup_trampoline(unsigned int syscall, unsigned int __user *tramp)
@@ -253,19 +226,6 @@
 }
 
 /*
- * Restore the user process's signal mask (also used by signal32.c)
- */
-void restore_sigmask(sigset_t *set)
-{
-	sigdelsetmask(set, ~_BLOCKABLE);
-	spin_lock_irq(&current->sighand->siglock);
-	current->blocked = *set;
-	recalc_sigpending();
-	spin_unlock_irq(&current->sighand->siglock);
-}
-
-
-/*
  * Handle {get,set,swap}_context operations
  */
 int sys_swapcontext(struct ucontext __user *old_ctx,
@@ -359,7 +319,7 @@
 	return 0;
 }
 
-static int setup_rt_frame(int signr, struct k_sigaction *ka, siginfo_t *info,
+int handle_rt_signal64(int signr, struct k_sigaction *ka, siginfo_t *info,
 		sigset_t *set, struct pt_regs *regs)
 {
 	/* Handler is *really* a pointer to the function descriptor for
@@ -373,8 +333,7 @@
 	long err = 0;
 
 	frame = get_sigframe(ka, regs, sizeof(*frame));
-
-	if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
+	if (unlikely(frame == NULL))
 		goto badframe;
 
 	err |= __put_user(&frame->info, &frame->pinfo);
@@ -411,7 +370,7 @@
 	funct_desc_ptr = (func_descr_t __user *) ka->sa.sa_handler;
 
 	/* Allocate a dummy caller frame for the signal handler. */
-	newsp = (unsigned long)frame - __SIGNAL_FRAMESIZE;
+	newsp = ((unsigned long)frame) - __SIGNAL_FRAMESIZE;
 	err |= put_user(regs->gpr[1], (unsigned long __user *)newsp);
 
 	/* Set up "regs" so we "return" to the signal handler. */
@@ -442,134 +401,3 @@
 	force_sigsegv(signr, current);
 	return 0;
 }
-
-
-/*
- * OK, we're invoking a handler
- */
-static int handle_signal(unsigned long sig, struct k_sigaction *ka,
-			 siginfo_t *info, sigset_t *oldset, struct pt_regs *regs)
-{
-	int ret;
-
-	/* Set up Signal Frame */
-	ret = setup_rt_frame(sig, ka, info, oldset, regs);
-
-	if (ret) {
-		spin_lock_irq(&current->sighand->siglock);
-		sigorsets(&current->blocked, &current->blocked, &ka->sa.sa_mask);
-		if (!(ka->sa.sa_flags & SA_NODEFER))
-			sigaddset(&current->blocked,sig);
-		recalc_sigpending();
-		spin_unlock_irq(&current->sighand->siglock);
-	}
-
-	return ret;
-}
-
-static inline void syscall_restart(struct pt_regs *regs, struct k_sigaction *ka)
-{
-	switch ((int)regs->result) {
-	case -ERESTART_RESTARTBLOCK:
-	case -ERESTARTNOHAND:
-		/* ERESTARTNOHAND means that the syscall should only be
-		 * restarted if there was no handler for the signal, and since
-		 * we only get here if there is a handler, we dont restart.
-		 */
-		regs->result = -EINTR;
-		regs->gpr[3] = EINTR;
-		regs->ccr |= 0x10000000;
-		break;
-	case -ERESTARTSYS:
-		/* ERESTARTSYS means to restart the syscall if there is no
-		 * handler or the handler was registered with SA_RESTART
-		 */
-		if (!(ka->sa.sa_flags & SA_RESTART)) {
-			regs->result = -EINTR;
-			regs->gpr[3] = EINTR;
-			regs->ccr |= 0x10000000;
-			break;
-		}
-		/* fallthrough */
-	case -ERESTARTNOINTR:
-		/* ERESTARTNOINTR means that the syscall should be
-		 * called again after the signal handler returns.
-		 */
-		regs->gpr[3] = regs->orig_gpr3;
-		regs->nip -= 4;
-		regs->result = 0;
-		break;
-	}
-}
-
-/*
- * Note that 'init' is a special process: it doesn't get signals it doesn't
- * want to handle. Thus you cannot kill init even with a SIGKILL even by
- * mistake.
- */
-int do_signal(sigset_t *oldset, struct pt_regs *regs)
-{
-	siginfo_t info;
-	int signr;
-	struct k_sigaction ka;
-
-	/*
-	 * If the current thread is 32 bit - invoke the
-	 * 32 bit signal handling code
-	 */
-	if (test_thread_flag(TIF_32BIT))
-		return do_signal32(oldset, regs);
-
-	if (test_thread_flag(TIF_RESTORE_SIGMASK))
-		oldset = &current->saved_sigmask;
-	else if (!oldset)
-		oldset = &current->blocked;
-
-	signr = get_signal_to_deliver(&info, &ka, regs, NULL);
-	if (signr > 0) {
-		int ret;
-
-		/* Whee!  Actually deliver the signal.  */
-		if (TRAP(regs) == 0x0C00)
-			syscall_restart(regs, &ka);
-
-		/*
-		 * Reenable the DABR before delivering the signal to
-		 * user space. The DABR will have been cleared if it
-		 * triggered inside the kernel.
-		 */
-		if (current->thread.dabr)
-			set_dabr(current->thread.dabr);
-
-		ret = handle_signal(signr, &ka, &info, oldset, regs);
-
-		/* If a signal was successfully delivered, the saved sigmask is in
-		   its frame, and we can clear the TIF_RESTORE_SIGMASK flag */
-		if (ret && test_thread_flag(TIF_RESTORE_SIGMASK))
-			clear_thread_flag(TIF_RESTORE_SIGMASK);
-
-		return ret;
-	}
-
-	if (TRAP(regs) == 0x0C00) {	/* System Call! */
-		if ((int)regs->result == -ERESTARTNOHAND ||
-		    (int)regs->result == -ERESTARTSYS ||
-		    (int)regs->result == -ERESTARTNOINTR) {
-			regs->gpr[3] = regs->orig_gpr3;
-			regs->nip -= 4; /* Back up & retry system call */
-			regs->result = 0;
-		} else if ((int)regs->result == -ERESTART_RESTARTBLOCK) {
-			regs->gpr[0] = __NR_restart_syscall;
-			regs->nip -= 4;
-			regs->result = 0;
-		}
-	}
-	/* No signal to deliver -- put the saved sigmask back */
-	if (test_thread_flag(TIF_RESTORE_SIGMASK)) {
-		clear_thread_flag(TIF_RESTORE_SIGMASK);
-		sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
-	}
-
-	return 0;
-}
-EXPORT_SYMBOL(do_signal);
diff --git a/arch/powerpc/kernel/sys_ppc32.c b/arch/powerpc/kernel/sys_ppc32.c
index b42cbf1..bd85b5f 100644
--- a/arch/powerpc/kernel/sys_ppc32.c
+++ b/arch/powerpc/kernel/sys_ppc32.c
@@ -773,6 +773,13 @@
 	return sys_truncate(path, (high << 32) | low);
 }
 
+asmlinkage long compat_sys_fallocate(int fd, int mode, u32 offhi, u32 offlo,
+				     u32 lenhi, u32 lenlo)
+{
+	return sys_fallocate(fd, mode, ((loff_t)offhi << 32) | offlo,
+			     ((loff_t)lenhi << 32) | lenlo);
+}
+
 asmlinkage int compat_sys_ftruncate64(unsigned int fd, u32 reg4, unsigned long high,
 				 unsigned long low)
 {
diff --git a/arch/powerpc/kernel/sysfs.c b/arch/powerpc/kernel/sysfs.c
index 68991c2..55d29ed 100644
--- a/arch/powerpc/kernel/sysfs.c
+++ b/arch/powerpc/kernel/sysfs.c
@@ -442,12 +442,14 @@
 	return sysfs_create_link(&node->sysdev.kobj, &dev->kobj,
 			kobject_name(&dev->kobj));
 }
+EXPORT_SYMBOL_GPL(sysfs_add_device_to_node);
 
 void sysfs_remove_device_from_node(struct sys_device *dev, int nid)
 {
 	struct node *node = &node_devices[nid];
 	sysfs_remove_link(&node->sysdev.kobj, kobject_name(&dev->kobj));
 }
+EXPORT_SYMBOL_GPL(sysfs_remove_device_from_node);
 
 #else
 static void register_nodes(void)
@@ -457,9 +459,6 @@
 
 #endif
 
-EXPORT_SYMBOL_GPL(sysfs_add_device_to_node);
-EXPORT_SYMBOL_GPL(sysfs_remove_device_from_node);
-
 /* Only valid if CPU is present. */
 static ssize_t show_physical_id(struct sys_device *dev, char *buf)
 {
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index 2c8564d..e5df167 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -77,9 +77,8 @@
 /* keep track of when we need to update the rtc */
 time_t last_rtc_update;
 #ifdef CONFIG_PPC_ISERIES
-unsigned long iSeries_recal_titan = 0;
-unsigned long iSeries_recal_tb = 0; 
-static unsigned long first_settimeofday = 1;
+static unsigned long __initdata iSeries_recal_titan;
+static signed long __initdata iSeries_recal_tb;
 #endif
 
 /* The decrementer counts down by 128 every 128ns on a 601. */
@@ -113,8 +112,9 @@
 DEFINE_SPINLOCK(rtc_lock);
 EXPORT_SYMBOL_GPL(rtc_lock);
 
-u64 tb_to_ns_scale;
-unsigned tb_to_ns_shift;
+static u64 tb_to_ns_scale __read_mostly;
+static unsigned tb_to_ns_shift __read_mostly;
+static unsigned long boot_tb __read_mostly;
 
 struct gettimeofday_struct do_gtod;
 
@@ -214,7 +214,6 @@
  	run_posix_cpu_timers(current);
 }
 
-#ifdef CONFIG_PPC_SPLPAR
 /*
  * Stuff for accounting stolen time.
  */
@@ -222,19 +221,28 @@
 	int	initialized;			/* thread is running */
 	u64	tb;			/* last TB value read */
 	u64	purr;			/* last PURR value read */
-	spinlock_t lock;
 };
 
+/*
+ * Each entry in the cpu_purr_data array is manipulated only by its
+ * "owner" cpu -- usually in the timer interrupt but also occasionally
+ * in process context for cpu online.  As long as cpus do not touch
+ * each others' cpu_purr_data, disabling local interrupts is
+ * sufficient to serialize accesses.
+ */
 static DEFINE_PER_CPU(struct cpu_purr_data, cpu_purr_data);
 
 static void snapshot_tb_and_purr(void *data)
 {
+	unsigned long flags;
 	struct cpu_purr_data *p = &__get_cpu_var(cpu_purr_data);
 
+	local_irq_save(flags);
 	p->tb = mftb();
 	p->purr = mfspr(SPRN_PURR);
 	wmb();
 	p->initialized = 1;
+	local_irq_restore(flags);
 }
 
 /*
@@ -242,15 +250,14 @@
  */
 void snapshot_timebases(void)
 {
-	int cpu;
-
 	if (!cpu_has_feature(CPU_FTR_PURR))
 		return;
-	for_each_possible_cpu(cpu)
-		spin_lock_init(&per_cpu(cpu_purr_data, cpu).lock);
 	on_each_cpu(snapshot_tb_and_purr, NULL, 0, 1);
 }
 
+/*
+ * Must be called with interrupts disabled.
+ */
 void calculate_steal_time(void)
 {
 	u64 tb, purr;
@@ -262,7 +269,6 @@
 	pme = &per_cpu(cpu_purr_data, smp_processor_id());
 	if (!pme->initialized)
 		return;		/* this can happen in early boot */
-	spin_lock(&pme->lock);
 	tb = mftb();
 	purr = mfspr(SPRN_PURR);
 	stolen = (tb - pme->tb) - (purr - pme->purr);
@@ -270,9 +276,9 @@
 		account_steal_time(current, stolen);
 	pme->tb = tb;
 	pme->purr = purr;
-	spin_unlock(&pme->lock);
 }
 
+#ifdef CONFIG_PPC_SPLPAR
 /*
  * Must be called before the cpu is added to the online map when
  * a cpu is being brought up at runtime.
@@ -284,12 +290,12 @@
 
 	if (!cpu_has_feature(CPU_FTR_PURR))
 		return;
+	local_irq_save(flags);
 	pme = &per_cpu(cpu_purr_data, smp_processor_id());
-	spin_lock_irqsave(&pme->lock, flags);
 	pme->tb = mftb();
 	pme->purr = mfspr(SPRN_PURR);
 	pme->initialized = 1;
-	spin_unlock_irqrestore(&pme->lock, flags);
+	local_irq_restore(flags);
 }
 
 #endif /* CONFIG_PPC_SPLPAR */
@@ -550,10 +556,15 @@
  * returned by the service processor for the timebase frequency.  
  */
 
-static void iSeries_tb_recal(void)
+static int __init iSeries_tb_recal(void)
 {
 	struct div_result divres;
 	unsigned long titan, tb;
+
+	/* Make sure we only run on iSeries */
+	if (!firmware_has_feature(FW_FEATURE_ISERIES))
+		return -ENODEV;
+
 	tb = get_tb();
 	titan = HvCallXm_loadTod();
 	if ( iSeries_recal_titan ) {
@@ -594,8 +605,18 @@
 	}
 	iSeries_recal_titan = titan;
 	iSeries_recal_tb = tb;
+
+	return 0;
 }
-#endif
+late_initcall(iSeries_tb_recal);
+
+/* Called from platform early init */
+void __init iSeries_time_init_early(void)
+{
+	iSeries_recal_tb = get_tb();
+	iSeries_recal_titan = HvCallXm_loadTod();
+}
+#endif /* CONFIG_PPC_ISERIES */
 
 /*
  * For iSeries shared processors, we have to let the hypervisor
@@ -735,7 +756,7 @@
 {
 	if (__USE_RTC())
 		return get_rtc();
-	return mulhdu(get_tb(), tb_to_ns_scale) << tb_to_ns_shift;
+	return mulhdu(get_tb() - boot_tb, tb_to_ns_scale) << tb_to_ns_shift;
 }
 
 int do_settimeofday(struct timespec *tv)
@@ -759,12 +780,6 @@
 	 * to the RTC again, or write to the RTC but then they don't call
 	 * settimeofday to perform this operation.
 	 */
-#ifdef CONFIG_PPC_ISERIES
-	if (firmware_has_feature(FW_FEATURE_ISERIES) && first_settimeofday) {
-		iSeries_tb_recal();
-		first_settimeofday = 0;
-	}
-#endif
 
 	/* Make userspace gettimeofday spin until we're done. */
 	++vdso_data->tb_update_count;
@@ -960,6 +975,8 @@
 	}
 	tb_to_ns_scale = scale;
 	tb_to_ns_shift = shift;
+	/* Save the current timebase to pretty up CONFIG_PRINTK_TIME */
+	boot_tb = get_tb();
 
 	tm = get_boot_time();
 
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index 3b8427e..2bb1cb9 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -149,6 +149,7 @@
 
 	bust_spinlocks(0);
 	die.lock_owner = -1;
+	add_taint(TAINT_DIE);
 	spin_unlock_irqrestore(&die.lock, flags);
 
 	if (kexec_should_crash(current) ||
diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
index 4245579..cef01e4 100644
--- a/arch/powerpc/kernel/vdso.c
+++ b/arch/powerpc/kernel/vdso.c
@@ -670,7 +670,7 @@
 	/*
 	 * Fill up the "systemcfg" stuff for backward compatiblity
 	 */
-	strcpy(vdso_data->eye_catcher, "SYSTEMCFG:PPC64");
+	strcpy((char *)vdso_data->eye_catcher, "SYSTEMCFG:PPC64");
 	vdso_data->version.major = SYSTEMCFG_MAJOR;
 	vdso_data->version.minor = SYSTEMCFG_MINOR;
 	vdso_data->processor = mfspr(SPRN_PVR);
diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S
index 24ff7d9..0c45855 100644
--- a/arch/powerpc/kernel/vmlinux.lds.S
+++ b/arch/powerpc/kernel/vmlinux.lds.S
@@ -7,6 +7,7 @@
 #define PROVIDE32(x)	PROVIDE(x)
 #endif
 #include <asm-generic/vmlinux.lds.h>
+#include <asm/cache.h>
 
 ENTRY(_stext)
 
@@ -62,6 +63,8 @@
 		__stop___ex_table = .;
 	}
 
+	NOTES
+
 	BUG_TABLE
 
 /*
@@ -143,6 +146,7 @@
 	.data.percpu : {
 		__per_cpu_start = .;
 		*(.data.percpu)
+		*(.data.percpu.shared_aligned)
 		__per_cpu_end = .;
 	}
 
@@ -213,6 +217,11 @@
 		*(.data.cacheline_aligned)
 	}
 
+	. = ALIGN(L1_CACHE_BYTES);
+	.data.read_mostly : {
+		*(.data.read_mostly)
+	}
+
 	. = ALIGN(PAGE_SIZE);
 	__data_nosave : {
 		__nosave_begin = .;
diff --git a/arch/powerpc/mm/44x_mmu.c b/arch/powerpc/mm/44x_mmu.c
index ca4dcb0..c3df504 100644
--- a/arch/powerpc/mm/44x_mmu.c
+++ b/arch/powerpc/mm/44x_mmu.c
@@ -12,7 +12,6 @@
  *  Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
  *  and Cort Dougan (PReP) (cort@cs.nmt.edu)
  *    Copyright (C) 1996 Paul Mackerras
- *  Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
  *
  *  Derived from "arch/i386/mm/init.c"
  *    Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
diff --git a/arch/powerpc/mm/4xx_mmu.c b/arch/powerpc/mm/4xx_mmu.c
index 838e09d..7ff2609 100644
--- a/arch/powerpc/mm/4xx_mmu.c
+++ b/arch/powerpc/mm/4xx_mmu.c
@@ -9,7 +9,6 @@
  *  Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
  *  and Cort Dougan (PReP) (cort@cs.nmt.edu)
  *    Copyright (C) 1996 Paul Mackerras
- *  Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
  *
  *  Derived from "arch/i386/mm/init.c"
  *    Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
diff --git a/arch/powerpc/mm/Makefile b/arch/powerpc/mm/Makefile
index 4f839c6..7e4d27a 100644
--- a/arch/powerpc/mm/Makefile
+++ b/arch/powerpc/mm/Makefile
@@ -11,8 +11,7 @@
 hash-$(CONFIG_PPC_NATIVE)	:= hash_native_64.o
 obj-$(CONFIG_PPC64)		+= init_64.o pgtable_64.o mmu_context_64.o \
 				   hash_utils_64.o hash_low_64.o tlb_64.o \
-				   slb_low.o slb.o stab.o mmap.o imalloc.o \
-				   $(hash-y)
+				   slb_low.o slb.o stab.o mmap.o $(hash-y)
 obj-$(CONFIG_PPC_STD_MMU_32)	+= ppc_mmu_32.o hash_low_32.o tlb_32.o
 obj-$(CONFIG_40x)		+= 4xx_mmu.o
 obj-$(CONFIG_44x)		+= 44x_mmu.o
diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
index 115b25f..3767211 100644
--- a/arch/powerpc/mm/fault.c
+++ b/arch/powerpc/mm/fault.c
@@ -145,7 +145,7 @@
 	struct mm_struct *mm = current->mm;
 	siginfo_t info;
 	int code = SEGV_MAPERR;
-	int is_write = 0;
+	int is_write = 0, ret;
 	int trap = TRAP(regs);
  	int is_exec = trap == 0x400;
 
@@ -330,22 +330,18 @@
 	 * the fault.
 	 */
  survive:
-	switch (handle_mm_fault(mm, vma, address, is_write)) {
-
-	case VM_FAULT_MINOR:
-		current->min_flt++;
-		break;
-	case VM_FAULT_MAJOR:
-		current->maj_flt++;
-		break;
-	case VM_FAULT_SIGBUS:
-		goto do_sigbus;
-	case VM_FAULT_OOM:
-		goto out_of_memory;
-	default:
+	ret = handle_mm_fault(mm, vma, address, is_write);
+	if (unlikely(ret & VM_FAULT_ERROR)) {
+		if (ret & VM_FAULT_OOM)
+			goto out_of_memory;
+		else if (ret & VM_FAULT_SIGBUS)
+			goto do_sigbus;
 		BUG();
 	}
-
+	if (ret & VM_FAULT_MAJOR)
+		current->maj_flt++;
+	else
+		current->min_flt++;
 	up_read(&mm->mmap_sem);
 	return 0;
 
@@ -380,7 +376,7 @@
 	}
 	printk("VM: killing process %s\n", current->comm);
 	if (user_mode(regs))
-		do_exit(SIGKILL);
+		do_group_exit(SIGKILL);
 	return SIGKILL;
 
 do_sigbus:
diff --git a/arch/powerpc/mm/fsl_booke_mmu.c b/arch/powerpc/mm/fsl_booke_mmu.c
index 123da03..afab247 100644
--- a/arch/powerpc/mm/fsl_booke_mmu.c
+++ b/arch/powerpc/mm/fsl_booke_mmu.c
@@ -14,7 +14,6 @@
  *  Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
  *  and Cort Dougan (PReP) (cort@cs.nmt.edu)
  *    Copyright (C) 1996 Paul Mackerras
- *  Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
  *
  *  Derived from "arch/i386/mm/init.c"
  *    Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
diff --git a/arch/powerpc/mm/hash_native_64.c b/arch/powerpc/mm/hash_native_64.c
index 4a20d89..6ba9b47 100644
--- a/arch/powerpc/mm/hash_native_64.c
+++ b/arch/powerpc/mm/hash_native_64.c
@@ -104,7 +104,7 @@
 		spin_unlock(&native_tlbie_lock);
 }
 
-static inline void native_lock_hpte(hpte_t *hptep)
+static inline void native_lock_hpte(struct hash_pte *hptep)
 {
 	unsigned long *word = &hptep->v;
 
@@ -116,7 +116,7 @@
 	}
 }
 
-static inline void native_unlock_hpte(hpte_t *hptep)
+static inline void native_unlock_hpte(struct hash_pte *hptep)
 {
 	unsigned long *word = &hptep->v;
 
@@ -128,7 +128,7 @@
 			unsigned long pa, unsigned long rflags,
 			unsigned long vflags, int psize)
 {
-	hpte_t *hptep = htab_address + hpte_group;
+	struct hash_pte *hptep = htab_address + hpte_group;
 	unsigned long hpte_v, hpte_r;
 	int i;
 
@@ -163,7 +163,7 @@
 
 	hptep->r = hpte_r;
 	/* Guarantee the second dword is visible before the valid bit */
-	__asm__ __volatile__ ("eieio" : : : "memory");
+	eieio();
 	/*
 	 * Now set the first dword including the valid bit
 	 * NOTE: this also unlocks the hpte
@@ -177,7 +177,7 @@
 
 static long native_hpte_remove(unsigned long hpte_group)
 {
-	hpte_t *hptep;
+	struct hash_pte *hptep;
 	int i;
 	int slot_offset;
 	unsigned long hpte_v;
@@ -217,7 +217,7 @@
 static long native_hpte_updatepp(unsigned long slot, unsigned long newpp,
 				 unsigned long va, int psize, int local)
 {
-	hpte_t *hptep = htab_address + slot;
+	struct hash_pte *hptep = htab_address + slot;
 	unsigned long hpte_v, want_v;
 	int ret = 0;
 
@@ -233,15 +233,14 @@
 	/* Even if we miss, we need to invalidate the TLB */
 	if (!HPTE_V_COMPARE(hpte_v, want_v) || !(hpte_v & HPTE_V_VALID)) {
 		DBG_LOW(" -> miss\n");
-		native_unlock_hpte(hptep);
 		ret = -1;
 	} else {
 		DBG_LOW(" -> hit\n");
 		/* Update the HPTE */
 		hptep->r = (hptep->r & ~(HPTE_R_PP | HPTE_R_N)) |
 			(newpp & (HPTE_R_PP | HPTE_R_N | HPTE_R_C));
-		native_unlock_hpte(hptep);
 	}
+	native_unlock_hpte(hptep);
 
 	/* Ensure it is out of the tlb too. */
 	tlbie(va, psize, local);
@@ -251,7 +250,7 @@
 
 static long native_hpte_find(unsigned long va, int psize)
 {
-	hpte_t *hptep;
+	struct hash_pte *hptep;
 	unsigned long hash;
 	unsigned long i, j;
 	long slot;
@@ -294,7 +293,7 @@
 {
 	unsigned long vsid, va;
 	long slot;
-	hpte_t *hptep;
+	struct hash_pte *hptep;
 
 	vsid = get_kernel_vsid(ea);
 	va = (vsid << 28) | (ea & 0x0fffffff);
@@ -315,7 +314,7 @@
 static void native_hpte_invalidate(unsigned long slot, unsigned long va,
 				   int psize, int local)
 {
-	hpte_t *hptep = htab_address + slot;
+	struct hash_pte *hptep = htab_address + slot;
 	unsigned long hpte_v;
 	unsigned long want_v;
 	unsigned long flags;
@@ -345,7 +344,7 @@
 #define LP_BITS		8
 #define LP_MASK(i)	((0xFF >> (i)) << LP_SHIFT)
 
-static void hpte_decode(hpte_t *hpte, unsigned long slot,
+static void hpte_decode(struct hash_pte *hpte, unsigned long slot,
 			int *psize, unsigned long *va)
 {
 	unsigned long hpte_r = hpte->r;
@@ -415,7 +414,7 @@
 static void native_hpte_clear(void)
 {
 	unsigned long slot, slots, flags;
-	hpte_t *hptep = htab_address;
+	struct hash_pte *hptep = htab_address;
 	unsigned long hpte_v, va;
 	unsigned long pteg_count;
 	int psize;
@@ -462,7 +461,7 @@
 static void native_flush_hash_range(unsigned long number, int local)
 {
 	unsigned long va, hash, index, hidx, shift, slot;
-	hpte_t *hptep;
+	struct hash_pte *hptep;
 	unsigned long hpte_v;
 	unsigned long want_v;
 	unsigned long flags;
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index 4f2f453..2ce9491 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -87,7 +87,7 @@
 static unsigned long _SDR1;
 struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT];
 
-hpte_t *htab_address;
+struct hash_pte *htab_address;
 unsigned long htab_size_bytes;
 unsigned long htab_hash_mask;
 int mmu_linear_psize = MMU_PAGE_4K;
diff --git a/arch/powerpc/mm/imalloc.c b/arch/powerpc/mm/imalloc.c
deleted file mode 100644
index c831815..0000000
--- a/arch/powerpc/mm/imalloc.c
+++ /dev/null
@@ -1,313 +0,0 @@
-/*
- * c 2001 PPC 64 Team, IBM Corp
- * 
- *      This program is free software; you can redistribute it and/or
- *      modify it under the terms of the GNU General Public License
- *      as published by the Free Software Foundation; either version
- *      2 of the License, or (at your option) any later version.
- */
-
-#include <linux/slab.h>
-#include <linux/vmalloc.h>
-
-#include <asm/uaccess.h>
-#include <asm/pgalloc.h>
-#include <asm/pgtable.h>
-#include <linux/mutex.h>
-#include <asm/cacheflush.h>
-
-#include "mmu_decl.h"
-
-static DEFINE_MUTEX(imlist_mutex);
-struct vm_struct * imlist = NULL;
-
-static int get_free_im_addr(unsigned long size, unsigned long *im_addr)
-{
-	unsigned long addr;
-	struct vm_struct **p, *tmp;
-
-	addr = ioremap_bot;
-	for (p = &imlist; (tmp = *p) ; p = &tmp->next) {
-		if (size + addr < (unsigned long) tmp->addr)
-			break;
-		if ((unsigned long)tmp->addr >= ioremap_bot)
-			addr = tmp->size + (unsigned long) tmp->addr;
-		if (addr >= IMALLOC_END-size)
-			return 1;
-	}
-	*im_addr = addr;
-
-	return 0;
-}
-
-/* Return whether the region described by v_addr and size is a subset
- * of the region described by parent
- */
-static inline int im_region_is_subset(unsigned long v_addr, unsigned long size,
-			struct vm_struct *parent)
-{
-	return (int) (v_addr >= (unsigned long) parent->addr &&
-	              v_addr < (unsigned long) parent->addr + parent->size &&
-	    	      size < parent->size);
-}
-
-/* Return whether the region described by v_addr and size is a superset
- * of the region described by child
- */
-static int im_region_is_superset(unsigned long v_addr, unsigned long size,
-		struct vm_struct *child)
-{
-	struct vm_struct parent;
-
-	parent.addr = (void *) v_addr;
-	parent.size = size;
-
-	return im_region_is_subset((unsigned long) child->addr, child->size,
-			&parent);
-}
-
-/* Return whether the region described by v_addr and size overlaps
- * the region described by vm.  Overlapping regions meet the
- * following conditions:
- * 1) The regions share some part of the address space
- * 2) The regions aren't identical
- * 3) Neither region is a subset of the other
- */
-static int im_region_overlaps(unsigned long v_addr, unsigned long size,
-		     struct vm_struct *vm)
-{
-	if (im_region_is_superset(v_addr, size, vm))
-		return 0;
-
-	return (v_addr + size > (unsigned long) vm->addr + vm->size &&
-		v_addr < (unsigned long) vm->addr + vm->size) ||
-	       (v_addr < (unsigned long) vm->addr &&
-		v_addr + size > (unsigned long) vm->addr);
-}
-
-/* Determine imalloc status of region described by v_addr and size.
- * Can return one of the following:
- * IM_REGION_UNUSED   -  Entire region is unallocated in imalloc space.
- * IM_REGION_SUBSET -    Region is a subset of a region that is already
- * 			 allocated in imalloc space.
- * 		         vm will be assigned to a ptr to the parent region.
- * IM_REGION_EXISTS -    Exact region already allocated in imalloc space.
- *                       vm will be assigned to a ptr to the existing imlist
- *                       member.
- * IM_REGION_OVERLAPS -  Region overlaps an allocated region in imalloc space.
- * IM_REGION_SUPERSET -  Region is a superset of a region that is already
- *                       allocated in imalloc space.
- */
-static int im_region_status(unsigned long v_addr, unsigned long size,
-		    struct vm_struct **vm)
-{
-	struct vm_struct *tmp;
-
-	for (tmp = imlist; tmp; tmp = tmp->next)
-		if (v_addr < (unsigned long) tmp->addr + tmp->size)
-			break;
-
-	*vm = NULL;
-	if (tmp) {
-		if (im_region_overlaps(v_addr, size, tmp))
-			return IM_REGION_OVERLAP;
-
-		*vm = tmp;
-		if (im_region_is_subset(v_addr, size, tmp)) {
-			/* Return with tmp pointing to superset */
-			return IM_REGION_SUBSET;
-		}
-		if (im_region_is_superset(v_addr, size, tmp)) {
-			/* Return with tmp pointing to first subset */
-			return IM_REGION_SUPERSET;
-		}
-		else if (v_addr == (unsigned long) tmp->addr &&
-		 	 size == tmp->size) {
-			/* Return with tmp pointing to exact region */
-			return IM_REGION_EXISTS;
-		}
-	}
-
-	return IM_REGION_UNUSED;
-}
-
-static struct vm_struct * split_im_region(unsigned long v_addr, 
-		unsigned long size, struct vm_struct *parent)
-{
-	struct vm_struct *vm1 = NULL;
-	struct vm_struct *vm2 = NULL;
-	struct vm_struct *new_vm = NULL;
-	
-	vm1 = kmalloc(sizeof(*vm1), GFP_KERNEL);
-	if (vm1	== NULL) {
-		printk(KERN_ERR "%s() out of memory\n", __FUNCTION__);
-		return NULL;
-	}
-
-	if (v_addr == (unsigned long) parent->addr) {
-	        /* Use existing parent vm_struct to represent child, allocate
-		 * new one for the remainder of parent range
-		 */
-		vm1->size = parent->size - size;
-		vm1->addr = (void *) (v_addr + size);
-		vm1->next = parent->next;
-
-		parent->size = size;
-		parent->next = vm1;
-		new_vm = parent;
-	} else if (v_addr + size == (unsigned long) parent->addr + 
-			parent->size) {
-		/* Allocate new vm_struct to represent child, use existing
-		 * parent one for remainder of parent range
-		 */
-		vm1->size = size;
-		vm1->addr = (void *) v_addr;
-		vm1->next = parent->next;
-		new_vm = vm1;
-
-		parent->size -= size;
-		parent->next = vm1;
-	} else {
-	        /* Allocate two new vm_structs for the new child and 
-		 * uppermost remainder, and use existing parent one for the
-		 * lower remainder of parent range
-		 */
-		vm2 = kmalloc(sizeof(*vm2), GFP_KERNEL);
-		if (vm2 == NULL) {
-			printk(KERN_ERR "%s() out of memory\n", __FUNCTION__);
-			kfree(vm1);
-			return NULL;
-		}
-
-		vm1->size = size;
-		vm1->addr = (void *) v_addr;
-		vm1->next = vm2;
-		new_vm = vm1;
-
-		vm2->size = ((unsigned long) parent->addr + parent->size) - 
-				(v_addr + size);
-		vm2->addr = (void *) v_addr + size;
-		vm2->next = parent->next;
-
-		parent->size = v_addr - (unsigned long) parent->addr;
-		parent->next = vm1;
-	}
-
-	return new_vm;
-}
-
-static struct vm_struct * __add_new_im_area(unsigned long req_addr, 
-					    unsigned long size)
-{
-	struct vm_struct **p, *tmp, *area;
-		
-	for (p = &imlist; (tmp = *p) ; p = &tmp->next) {
-		if (req_addr + size <= (unsigned long)tmp->addr)
-			break;
-	}
-	
-	area = kmalloc(sizeof(*area), GFP_KERNEL);
-	if (!area)
-		return NULL;
-	area->flags = 0;
-	area->addr = (void *)req_addr;
-	area->size = size;
-	area->next = *p;
-	*p = area;
-
-	return area;
-}
-
-static struct vm_struct * __im_get_area(unsigned long req_addr, 
-					unsigned long size,
-					int criteria)
-{
-	struct vm_struct *tmp;
-	int status;
-
-	status = im_region_status(req_addr, size, &tmp);
-	if ((criteria & status) == 0) {
-		return NULL;
-	}
-	
-	switch (status) {
-	case IM_REGION_UNUSED:
-		tmp = __add_new_im_area(req_addr, size);
-		break;
-	case IM_REGION_SUBSET:
-		tmp = split_im_region(req_addr, size, tmp);
-		break;
-	case IM_REGION_EXISTS:
-		/* Return requested region */
-		break;
-	case IM_REGION_SUPERSET:
-		/* Return first existing subset of requested region */
-		break;
-	default:
-		printk(KERN_ERR "%s() unexpected imalloc region status\n",
-				__FUNCTION__);
-		tmp = NULL;
-	}
-
-	return tmp;
-}
-
-struct vm_struct * im_get_free_area(unsigned long size)
-{
-	struct vm_struct *area;
-	unsigned long addr;
-	
-	mutex_lock(&imlist_mutex);
-	if (get_free_im_addr(size, &addr)) {
-		printk(KERN_ERR "%s() cannot obtain addr for size 0x%lx\n",
-				__FUNCTION__, size);
-		area = NULL;
-		goto next_im_done;
-	}
-
-	area = __im_get_area(addr, size, IM_REGION_UNUSED);
-	if (area == NULL) {
-		printk(KERN_ERR 
-		       "%s() cannot obtain area for addr 0x%lx size 0x%lx\n",
-			__FUNCTION__, addr, size);
-	}
-next_im_done:
-	mutex_unlock(&imlist_mutex);
-	return area;
-}
-
-struct vm_struct * im_get_area(unsigned long v_addr, unsigned long size,
-		int criteria)
-{
-	struct vm_struct *area;
-
-	mutex_lock(&imlist_mutex);
-	area = __im_get_area(v_addr, size, criteria);
-	mutex_unlock(&imlist_mutex);
-	return area;
-}
-
-void im_free(void * addr)
-{
-	struct vm_struct **p, *tmp;
-  
-	if (!addr)
-		return;
-	if ((unsigned long) addr & ~PAGE_MASK) {
-		printk(KERN_ERR "Trying to %s bad address (%p)\n", __FUNCTION__,			addr);
-		return;
-	}
-	mutex_lock(&imlist_mutex);
-	for (p = &imlist ; (tmp = *p) ; p = &tmp->next) {
-		if (tmp->addr == addr) {
-			*p = tmp->next;
-			unmap_vm_area(tmp);
-			kfree(tmp);
-			mutex_unlock(&imlist_mutex);
-			return;
-		}
-	}
-	mutex_unlock(&imlist_mutex);
-	printk(KERN_ERR "Trying to %s nonexistent area (%p)\n", __FUNCTION__,
-			addr);
-}
diff --git a/arch/powerpc/mm/init_32.c b/arch/powerpc/mm/init_32.c
index 5fce6cc..e1f5ded 100644
--- a/arch/powerpc/mm/init_32.c
+++ b/arch/powerpc/mm/init_32.c
@@ -5,7 +5,6 @@
  *  Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
  *  and Cort Dougan (PReP) (cort@cs.nmt.edu)
  *    Copyright (C) 1996 Paul Mackerras
- *  Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
  *  PPC44x/36-bit changes by Matt Porter (mporter@mvista.com)
  *
  *  Derived from "arch/i386/mm/init.c"
diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c
index 7312a26..1d6edf7 100644
--- a/arch/powerpc/mm/init_64.c
+++ b/arch/powerpc/mm/init_64.c
@@ -5,7 +5,6 @@
  *  Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
  *  and Cort Dougan (PReP) (cort@cs.nmt.edu)
  *    Copyright (C) 1996 Paul Mackerras
- *  Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
  *
  *  Derived from "arch/i386/mm/init.c"
  *    Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index 0266a94..f0e7eed 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -5,7 +5,6 @@
  *  Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
  *  and Cort Dougan (PReP) (cort@cs.nmt.edu)
  *    Copyright (C) 1996 Paul Mackerras
- *  Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
  *  PPC44x/36-bit changes by Matt Porter (mporter@mvista.com)
  *
  *  Derived from "arch/i386/mm/init.c"
@@ -129,8 +128,6 @@
 	zone = pgdata->node_zones;
 
 	return __add_pages(zone, start_pfn, nr_pages);
-
-	return 0;
 }
 
 /*
diff --git a/arch/powerpc/mm/mmu_context_32.c b/arch/powerpc/mm/mmu_context_32.c
index 792086b..cc32ba4 100644
--- a/arch/powerpc/mm/mmu_context_32.c
+++ b/arch/powerpc/mm/mmu_context_32.c
@@ -11,7 +11,6 @@
  *  Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
  *  and Cort Dougan (PReP) (cort@cs.nmt.edu)
  *    Copyright (C) 1996 Paul Mackerras
- *  Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
  *
  *  Derived from "arch/i386/mm/init.c"
  *    Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
diff --git a/arch/powerpc/mm/mmu_decl.h b/arch/powerpc/mm/mmu_decl.h
index 2558c34..c94a64f 100644
--- a/arch/powerpc/mm/mmu_decl.h
+++ b/arch/powerpc/mm/mmu_decl.h
@@ -8,7 +8,6 @@
  *  Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
  *  and Cort Dougan (PReP) (cort@cs.nmt.edu)
  *    Copyright (C) 1996 Paul Mackerras
- *  Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
  *
  *  Derived from "arch/i386/mm/init.c"
  *    Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
@@ -40,8 +39,8 @@
 extern unsigned long ioremap_base;
 extern unsigned int rtas_data, rtas_size;
 
-struct _PTE;
-extern struct _PTE *Hash, *Hash_end;
+struct hash_pte;
+extern struct hash_pte *Hash, *Hash_end;
 extern unsigned long Hash_size, Hash_mask;
 
 extern unsigned int num_tlbcam_entries;
@@ -90,16 +89,4 @@
 	else
 		_tlbie(va);
 }
-#else /* CONFIG_PPC64 */
-/* imalloc region types */
-#define IM_REGION_UNUSED	0x1
-#define IM_REGION_SUBSET	0x2
-#define IM_REGION_EXISTS	0x4
-#define IM_REGION_OVERLAP	0x8
-#define IM_REGION_SUPERSET	0x10
-
-extern struct vm_struct * im_get_free_area(unsigned long size);
-extern struct vm_struct * im_get_area(unsigned long v_addr, unsigned long size,
-				      int region_type);
-extern void im_free(void *addr);
 #endif
diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c
index f6ae1a5..6448872 100644
--- a/arch/powerpc/mm/pgtable_32.c
+++ b/arch/powerpc/mm/pgtable_32.c
@@ -8,7 +8,6 @@
  *  Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
  *  and Cort Dougan (PReP) (cort@cs.nmt.edu)
  *    Copyright (C) 1996 Paul Mackerras
- *  Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
  *
  *  Derived from "arch/i386/mm/init.c"
  *    Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
@@ -37,7 +36,6 @@
 unsigned long ioremap_base;
 unsigned long ioremap_bot;
 EXPORT_SYMBOL(ioremap_bot);	/* aka VMALLOC_END */
-int io_bat_index;
 
 #if defined(CONFIG_6xx) || defined(CONFIG_POWER3)
 #define HAVE_BATS	1
@@ -300,51 +298,6 @@
 	}
 }
 
-/* is x a power of 4? */
-#define is_power_of_4(x)	is_power_of_2(x) && (ffs(x) & 1)
-
-/*
- * Set up a mapping for a block of I/O.
- * virt, phys, size must all be page-aligned.
- * This should only be called before ioremap is called.
- */
-void __init io_block_mapping(unsigned long virt, phys_addr_t phys,
-			     unsigned int size, int flags)
-{
-	int i;
-
-	if (virt > KERNELBASE && virt < ioremap_bot)
-		ioremap_bot = ioremap_base = virt;
-
-#ifdef HAVE_BATS
-	/*
-	 * Use a BAT for this if possible...
-	 */
-	if (io_bat_index < 2 && is_power_of_2(size)
-	    && (virt & (size - 1)) == 0 && (phys & (size - 1)) == 0) {
-		setbat(io_bat_index, virt, phys, size, flags);
-		++io_bat_index;
-		return;
-	}
-#endif /* HAVE_BATS */
-
-#ifdef HAVE_TLBCAM
-	/*
-	 * Use a CAM for this if possible...
-	 */
-	if (tlbcam_index < num_tlbcam_entries && is_power_of_4(size)
-	    && (virt & (size - 1)) == 0 && (phys & (size - 1)) == 0) {
-		settlbcam(tlbcam_index, virt, phys, size, flags, 0);
-		++tlbcam_index;
-		return;
-	}
-#endif /* HAVE_TLBCAM */
-
-	/* No BATs available, put it in the page tables. */
-	for (i = 0; i < size; i += PAGE_SIZE)
-		map_page(virt + i, phys + i, flags);
-}
-
 /* Scan the real Linux page tables and return a PTE pointer for
  * a virtual address in a context.
  * Returns true (1) if PTE was found, zero otherwise.  The pointer to
@@ -379,82 +332,6 @@
         return(retval);
 }
 
-/* Find physical address for this virtual address.  Normally used by
- * I/O functions, but anyone can call it.
- */
-unsigned long iopa(unsigned long addr)
-{
-	unsigned long pa;
-
-	/* I don't know why this won't work on PMacs or CHRP.  It
-	 * appears there is some bug, or there is some implicit
-	 * mapping done not properly represented by BATs or in page
-	 * tables.......I am actively working on resolving this, but
-	 * can't hold up other stuff.  -- Dan
-	 */
-	pte_t *pte;
-	struct mm_struct *mm;
-
-	/* Check the BATs */
-	pa = v_mapped_by_bats(addr);
-	if (pa)
-		return pa;
-
-	/* Allow mapping of user addresses (within the thread)
-	 * for DMA if necessary.
-	 */
-	if (addr < TASK_SIZE)
-		mm = current->mm;
-	else
-		mm = &init_mm;
-
-	pa = 0;
-	if (get_pteptr(mm, addr, &pte, NULL)) {
-		pa = (pte_val(*pte) & PAGE_MASK) | (addr & ~PAGE_MASK);
-		pte_unmap(pte);
-	}
-
-	return(pa);
-}
-
-/* This is will find the virtual address for a physical one....
- * Swiped from APUS, could be dangerous :-).
- * This is only a placeholder until I really find a way to make this
- * work.  -- Dan
- */
-unsigned long
-mm_ptov (unsigned long paddr)
-{
-	unsigned long ret;
-#if 0
-	if (paddr < 16*1024*1024)
-		ret = ZTWO_VADDR(paddr);
-	else {
-		int i;
-
-		for (i = 0; i < kmap_chunk_count;){
-			unsigned long phys = kmap_chunks[i++];
-			unsigned long size = kmap_chunks[i++];
-			unsigned long virt = kmap_chunks[i++];
-			if (paddr >= phys
-			    && paddr < (phys + size)){
-				ret = virt + paddr - phys;
-				goto exit;
-			}
-		}
-	
-		ret = (unsigned long) __va(paddr);
-	}
-exit:
-#ifdef DEBUGPV
-	printk ("PTOV(%lx)=%lx\n", paddr, ret);
-#endif
-#else
-	ret = (unsigned long)paddr + KERNELBASE;
-#endif
-	return ret;
-}
-
 #ifdef CONFIG_DEBUG_PAGEALLOC
 
 static int __change_page_attr(struct page *page, pgprot_t prot)
diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c
index ad6e135..3dfd10d 100644
--- a/arch/powerpc/mm/pgtable_64.c
+++ b/arch/powerpc/mm/pgtable_64.c
@@ -7,7 +7,6 @@
  *  Modifications by Paul Mackerras (PowerMac) (paulus@samba.org)
  *  and Cort Dougan (PReP) (cort@cs.nmt.edu)
  *    Copyright (C) 1996 Paul Mackerras
- *  Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
  *
  *  Derived from "arch/i386/mm/init.c"
  *    Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
@@ -34,41 +33,27 @@
 #include <linux/stddef.h>
 #include <linux/vmalloc.h>
 #include <linux/init.h>
-#include <linux/delay.h>
-#include <linux/bootmem.h>
-#include <linux/highmem.h>
-#include <linux/idr.h>
-#include <linux/nodemask.h>
-#include <linux/module.h>
 
 #include <asm/pgalloc.h>
 #include <asm/page.h>
 #include <asm/prom.h>
-#include <asm/lmb.h>
-#include <asm/rtas.h>
 #include <asm/io.h>
 #include <asm/mmu_context.h>
 #include <asm/pgtable.h>
 #include <asm/mmu.h>
-#include <asm/uaccess.h>
 #include <asm/smp.h>
 #include <asm/machdep.h>
 #include <asm/tlb.h>
-#include <asm/eeh.h>
 #include <asm/processor.h>
-#include <asm/mmzone.h>
 #include <asm/cputable.h>
 #include <asm/sections.h>
 #include <asm/system.h>
-#include <asm/iommu.h>
 #include <asm/abs_addr.h>
-#include <asm/vdso.h>
 #include <asm/firmware.h>
 
 #include "mmu_decl.h"
 
-unsigned long ioremap_bot = IMALLOC_BASE;
-static unsigned long phbs_io_bot = PHBS_IO_BASE;
+unsigned long ioremap_bot = IOREMAP_BASE;
 
 /*
  * map_io_page currently only called by __ioremap
@@ -102,8 +87,8 @@
 		 * entry in the hardware page table.
 		 *
 		 */
-		if (htab_bolt_mapping(ea, ea + PAGE_SIZE, pa, flags,
-				      mmu_io_psize)) {
+		if (htab_bolt_mapping(ea, (unsigned long)ea + PAGE_SIZE,
+				      pa, flags, mmu_io_psize)) {
 			printk(KERN_ERR "Failed to do bolted mapping IO "
 			       "memory at %016lx !\n", pa);
 			return -ENOMEM;
@@ -113,8 +98,11 @@
 }
 
 
-static void __iomem * __ioremap_com(phys_addr_t addr, unsigned long pa,
-			    unsigned long ea, unsigned long size,
+/**
+ * __ioremap_at - Low level function to establish the page tables
+ *                for an IO mapping
+ */
+void __iomem * __ioremap_at(phys_addr_t pa, void *ea, unsigned long size,
 			    unsigned long flags)
 {
 	unsigned long i;
@@ -122,17 +110,35 @@
 	if ((flags & _PAGE_PRESENT) == 0)
 		flags |= pgprot_val(PAGE_KERNEL);
 
+	WARN_ON(pa & ~PAGE_MASK);
+	WARN_ON(((unsigned long)ea) & ~PAGE_MASK);
+	WARN_ON(size & ~PAGE_MASK);
+
 	for (i = 0; i < size; i += PAGE_SIZE)
-		if (map_io_page(ea+i, pa+i, flags))
+		if (map_io_page((unsigned long)ea+i, pa+i, flags))
 			return NULL;
 
-	return (void __iomem *) (ea + (addr & ~PAGE_MASK));
+	return (void __iomem *)ea;
+}
+
+/**
+ * __iounmap_from - Low level function to tear down the page tables
+ *                  for an IO mapping. This is used for mappings that
+ *                  are manipulated manually, like partial unmapping of
+ *                  PCI IOs or ISA space.
+ */
+void __iounmap_at(void *ea, unsigned long size)
+{
+	WARN_ON(((unsigned long)ea) & ~PAGE_MASK);
+	WARN_ON(size & ~PAGE_MASK);
+
+	unmap_kernel_range((unsigned long)ea, size);
 }
 
 void __iomem * __ioremap(phys_addr_t addr, unsigned long size,
 			 unsigned long flags)
 {
-	unsigned long pa, ea;
+	phys_addr_t paligned;
 	void __iomem *ret;
 
 	/*
@@ -144,27 +150,30 @@
 	 * IMALLOC_END
 	 * 
 	 */
-	pa = addr & PAGE_MASK;
-	size = PAGE_ALIGN(addr + size) - pa;
+	paligned = addr & PAGE_MASK;
+	size = PAGE_ALIGN(addr + size) - paligned;
 
-	if ((size == 0) || (pa == 0))
+	if ((size == 0) || (paligned == 0))
 		return NULL;
 
 	if (mem_init_done) {
 		struct vm_struct *area;
-		area = im_get_free_area(size);
+
+		area = __get_vm_area(size, VM_IOREMAP,
+				     ioremap_bot, IOREMAP_END);
 		if (area == NULL)
 			return NULL;
-		ea = (unsigned long)(area->addr);
-		ret = __ioremap_com(addr, pa, ea, size, flags);
+		ret = __ioremap_at(paligned, area->addr, size, flags);
 		if (!ret)
-			im_free(area->addr);
+			vunmap(area->addr);
 	} else {
-		ea = ioremap_bot;
-		ret = __ioremap_com(addr, pa, ea, size, flags);
+		ret = __ioremap_at(paligned, (void *)ioremap_bot, size, flags);
 		if (ret)
 			ioremap_bot += size;
 	}
+
+	if (ret)
+		ret += addr & ~PAGE_MASK;
 	return ret;
 }
 
@@ -187,62 +196,9 @@
 }
 
 
-#define IS_PAGE_ALIGNED(_val) ((_val) == ((_val) & PAGE_MASK))
-
-int __ioremap_explicit(phys_addr_t pa, unsigned long ea,
-		       unsigned long size, unsigned long flags)
-{
-	struct vm_struct *area;
-	void __iomem *ret;
-	
-	/* For now, require page-aligned values for pa, ea, and size */
-	if (!IS_PAGE_ALIGNED(pa) || !IS_PAGE_ALIGNED(ea) ||
-	    !IS_PAGE_ALIGNED(size)) {
-		printk(KERN_ERR	"unaligned value in %s\n", __FUNCTION__);
-		return 1;
-	}
-	
-	if (!mem_init_done) {
-		/* Two things to consider in this case:
-		 * 1) No records will be kept (imalloc, etc) that the region
-		 *    has been remapped
-		 * 2) It won't be easy to iounmap() the region later (because
-		 *    of 1)
-		 */
-		;
-	} else {
-		area = im_get_area(ea, size,
-			IM_REGION_UNUSED|IM_REGION_SUBSET|IM_REGION_EXISTS);
-		if (area == NULL) {
-			/* Expected when PHB-dlpar is in play */
-			return 1;
-		}
-		if (ea != (unsigned long) area->addr) {
-			printk(KERN_ERR "unexpected addr return from "
-			       "im_get_area\n");
-			return 1;
-		}
-	}
-	
-	ret = __ioremap_com(pa, pa, ea, size, flags);
-	if (ret == NULL) {
-		printk(KERN_ERR "ioremap_explicit() allocation failure !\n");
-		return 1;
-	}
-	if (ret != (void *) ea) {
-		printk(KERN_ERR "__ioremap_com() returned unexpected addr\n");
-		return 1;
-	}
-
-	return 0;
-}
-
 /*  
  * Unmap an IO region and remove it from imalloc'd list.
  * Access to IO memory should be serialized by driver.
- * This code is modeled after vmalloc code - unmap_vm_area()
- *
- * XXX	what about calls before mem_init_done (ie python_countermeasures())
  */
 void __iounmap(volatile void __iomem *token)
 {
@@ -251,9 +207,14 @@
 	if (!mem_init_done)
 		return;
 	
-	addr = (void *) ((unsigned long __force) token & PAGE_MASK);
-
-	im_free(addr);
+	addr = (void *) ((unsigned long __force)
+			 PCI_FIX_ADDR(token) & PAGE_MASK);
+	if ((unsigned long)addr < ioremap_bot) {
+		printk(KERN_WARNING "Attempt to iounmap early bolted mapping"
+		       " at 0x%p\n", addr);
+		return;
+	}
+	vunmap(addr);
 }
 
 void iounmap(volatile void __iomem *token)
@@ -264,77 +225,8 @@
 		__iounmap(token);
 }
 
-static int iounmap_subset_regions(unsigned long addr, unsigned long size)
-{
-	struct vm_struct *area;
-
-	/* Check whether subsets of this region exist */
-	area = im_get_area(addr, size, IM_REGION_SUPERSET);
-	if (area == NULL)
-		return 1;
-
-	while (area) {
-		iounmap((void __iomem *) area->addr);
-		area = im_get_area(addr, size,
-				IM_REGION_SUPERSET);
-	}
-
-	return 0;
-}
-
-int __iounmap_explicit(volatile void __iomem *start, unsigned long size)
-{
-	struct vm_struct *area;
-	unsigned long addr;
-	int rc;
-	
-	addr = (unsigned long __force) start & PAGE_MASK;
-
-	/* Verify that the region either exists or is a subset of an existing
-	 * region.  In the latter case, split the parent region to create 
-	 * the exact region 
-	 */
-	area = im_get_area(addr, size, 
-			    IM_REGION_EXISTS | IM_REGION_SUBSET);
-	if (area == NULL) {
-		/* Determine whether subset regions exist.  If so, unmap */
-		rc = iounmap_subset_regions(addr, size);
-		if (rc) {
-			printk(KERN_ERR
-			       "%s() cannot unmap nonexistent range 0x%lx\n",
- 				__FUNCTION__, addr);
-			return 1;
-		}
-	} else {
-		iounmap((void __iomem *) area->addr);
-	}
-	/*
-	 * FIXME! This can't be right:
-	iounmap(area->addr);
-	 * Maybe it should be "iounmap(area);"
-	 */
-	return 0;
-}
-
 EXPORT_SYMBOL(ioremap);
 EXPORT_SYMBOL(ioremap_flags);
 EXPORT_SYMBOL(__ioremap);
 EXPORT_SYMBOL(iounmap);
 EXPORT_SYMBOL(__iounmap);
-
-static DEFINE_SPINLOCK(phb_io_lock);
-
-void __iomem * reserve_phb_iospace(unsigned long size)
-{
-	void __iomem *virt_addr;
-		
-	if (phbs_io_bot >= IMALLOC_BASE) 
-		panic("reserve_phb_iospace(): phb io space overflow\n");
-			
-	spin_lock(&phb_io_lock);
-	virt_addr = (void __iomem *) phbs_io_bot;
-	phbs_io_bot += size;
-	spin_unlock(&phb_io_lock);
-
-	return virt_addr;
-}
diff --git a/arch/powerpc/mm/ppc_mmu_32.c b/arch/powerpc/mm/ppc_mmu_32.c
index ec1421a..5c45d47 100644
--- a/arch/powerpc/mm/ppc_mmu_32.c
+++ b/arch/powerpc/mm/ppc_mmu_32.c
@@ -11,7 +11,6 @@
  *  Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
  *  and Cort Dougan (PReP) (cort@cs.nmt.edu)
  *    Copyright (C) 1996 Paul Mackerras
- *  Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
  *
  *  Derived from "arch/i386/mm/init.c"
  *    Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
@@ -35,12 +34,12 @@
 
 #include "mmu_decl.h"
 
-PTE *Hash, *Hash_end;
+struct hash_pte *Hash, *Hash_end;
 unsigned long Hash_size, Hash_mask;
 unsigned long _SDR1;
 
 union ubat {			/* BAT register values to be loaded */
-	BAT	bat;
+	struct ppc_bat bat;
 	u32	word[2];
 } BATS[8][2];			/* 8 pairs of IBAT, DBAT */
 
@@ -245,7 +244,7 @@
 	cacheable_memzero(Hash, Hash_size);
 	_SDR1 = __pa(Hash) | SDR1_LOW_BITS;
 
-	Hash_end = (PTE *) ((unsigned long)Hash + Hash_size);
+	Hash_end = (struct hash_pte *) ((unsigned long)Hash + Hash_size);
 
 	printk("Total memory = %ldMB; using %ldkB for hash table (at %p)\n",
 	       total_memory >> 20, Hash_size >> 10, Hash);
diff --git a/arch/powerpc/mm/stab.c b/arch/powerpc/mm/stab.c
index 132c6bc..28492bb 100644
--- a/arch/powerpc/mm/stab.c
+++ b/arch/powerpc/mm/stab.c
@@ -55,7 +55,7 @@
 		for (entry = 0; entry < 8; entry++, ste++) {
 			if (!(ste->esid_data & STE_ESID_V)) {
 				ste->vsid_data = vsid_data;
-				asm volatile("eieio":::"memory");
+				eieio();
 				ste->esid_data = esid_data;
 				return (global_entry | entry);
 			}
@@ -101,7 +101,7 @@
 	asm volatile("sync" : : : "memory");    /* Order update */
 
 	castout_ste->vsid_data = vsid_data;
-	asm volatile("eieio" : : : "memory");   /* Order update */
+	eieio();				/* Order update */
 	castout_ste->esid_data = esid_data;
 
 	asm volatile("slbie  %0" : : "r" (old_esid << SID_SHIFT));
diff --git a/arch/powerpc/mm/tlb_32.c b/arch/powerpc/mm/tlb_32.c
index 6a69417..06c7e77 100644
--- a/arch/powerpc/mm/tlb_32.c
+++ b/arch/powerpc/mm/tlb_32.c
@@ -11,7 +11,6 @@
  *  Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
  *  and Cort Dougan (PReP) (cort@cs.nmt.edu)
  *    Copyright (C) 1996 Paul Mackerras
- *  Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
  *
  *  Derived from "arch/i386/mm/init.c"
  *    Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
diff --git a/arch/powerpc/mm/tlb_64.c b/arch/powerpc/mm/tlb_64.c
index 2bfc4d7..cbd34fc 100644
--- a/arch/powerpc/mm/tlb_64.c
+++ b/arch/powerpc/mm/tlb_64.c
@@ -8,7 +8,6 @@
  *  Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
  *  and Cort Dougan (PReP) (cort@cs.nmt.edu)
  *    Copyright (C) 1996 Paul Mackerras
- *  Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
  *
  *  Derived from "arch/i386/mm/init.c"
  *    Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
@@ -239,3 +238,59 @@
 	pte_free_submit(*batchp);
 	*batchp = NULL;
 }
+
+/**
+ * __flush_hash_table_range - Flush all HPTEs for a given address range
+ *                            from the hash table (and the TLB). But keeps
+ *                            the linux PTEs intact.
+ *
+ * @mm		: mm_struct of the target address space (generally init_mm)
+ * @start	: starting address
+ * @end         : ending address (not included in the flush)
+ *
+ * This function is mostly to be used by some IO hotplug code in order
+ * to remove all hash entries from a given address range used to map IO
+ * space on a removed PCI-PCI bidge without tearing down the full mapping
+ * since 64K pages may overlap with other bridges when using 64K pages
+ * with 4K HW pages on IO space.
+ *
+ * Because of that usage pattern, it's only available with CONFIG_HOTPLUG
+ * and is implemented for small size rather than speed.
+ */
+#ifdef CONFIG_HOTPLUG
+
+void __flush_hash_table_range(struct mm_struct *mm, unsigned long start,
+			      unsigned long end)
+{
+	unsigned long flags;
+
+	start = _ALIGN_DOWN(start, PAGE_SIZE);
+	end = _ALIGN_UP(end, PAGE_SIZE);
+
+	BUG_ON(!mm->pgd);
+
+	/* Note: Normally, we should only ever use a batch within a
+	 * PTE locked section. This violates the rule, but will work
+	 * since we don't actually modify the PTEs, we just flush the
+	 * hash while leaving the PTEs intact (including their reference
+	 * to being hashed). This is not the most performance oriented
+	 * way to do things but is fine for our needs here.
+	 */
+	local_irq_save(flags);
+	arch_enter_lazy_mmu_mode();
+	for (; start < end; start += PAGE_SIZE) {
+		pte_t *ptep = find_linux_pte(mm->pgd, start);
+		unsigned long pte;
+
+		if (ptep == NULL)
+			continue;
+		pte = pte_val(*ptep);
+		if (!(pte & _PAGE_HASHPTE))
+			continue;
+		hpte_need_flush(mm, start, ptep, pte, 0);
+	}
+	arch_leave_lazy_mmu_mode();
+	local_irq_restore(flags);
+}
+
+#endif /* CONFIG_HOTPLUG */
diff --git a/arch/powerpc/oprofile/op_model_power4.c b/arch/powerpc/oprofile/op_model_power4.c
index fe597a1..a7c206b 100644
--- a/arch/powerpc/oprofile/op_model_power4.c
+++ b/arch/powerpc/oprofile/op_model_power4.c
@@ -1,5 +1,7 @@
 /*
  * Copyright (C) 2004 Anton Blanchard <anton@au.ibm.com>, IBM
+ * Added mmcra[slot] support:
+ * Copyright (C) 2006-2007 Will Schmidt <willschm@us.ibm.com>, IBM
  *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public License
@@ -181,11 +183,17 @@
  * On GQ and newer the MMCRA stores the HV and PR bits at the time
  * the SIAR was sampled. We use that to work out if the SIAR was sampled in
  * the hypervisor, our exception vectors or RTAS.
+ * If the MMCRA_SAMPLE_ENABLE bit is set, we can use the MMCRA[slot] bits
+ * to more accurately identify the address of the sampled instruction. The
+ * mmcra[slot] bits represent the slot number of a sampled instruction
+ * within an instruction group.  The slot will contain a value between 1
+ * and 5 if MMCRA_SAMPLE_ENABLE is set, otherwise 0.
  */
 static unsigned long get_pc(struct pt_regs *regs)
 {
 	unsigned long pc = mfspr(SPRN_SIAR);
 	unsigned long mmcra;
+	unsigned long slot;
 
 	/* Cant do much about it */
 	if (!cur_cpu_spec->oprofile_mmcra_sihv)
@@ -193,6 +201,12 @@
 
 	mmcra = mfspr(SPRN_MMCRA);
 
+	if (mmcra & MMCRA_SAMPLE_ENABLE) {
+		slot = ((mmcra & MMCRA_SLOT) >> MMCRA_SLOT_SHIFT);
+		if (slot > 1)
+			pc += 4 * (slot - 1);
+	}
+
 	/* Were we in the hypervisor? */
 	if (firmware_has_feature(FW_FEATURE_LPAR) &&
 	    (mmcra & cur_cpu_spec->oprofile_mmcra_sihv))
diff --git a/arch/powerpc/platforms/52xx/efika.c b/arch/powerpc/platforms/52xx/efika.c
index f591a9f..4be6e7a 100644
--- a/arch/powerpc/platforms/52xx/efika.c
+++ b/arch/powerpc/platforms/52xx/efika.c
@@ -54,7 +54,7 @@
 	struct pci_controller *hose = bus->sysdata;
 	unsigned long addr = (offset & 0xff) | ((devfn & 0xff) << 8)
 	    | (((bus->number - hose->first_busno) & 0xff) << 16)
-	    | (hose->index << 24);
+	    | (hose->global_number << 24);
 	int ret = -1;
 	int rval;
 
@@ -69,7 +69,7 @@
 	struct pci_controller *hose = bus->sysdata;
 	unsigned long addr = (offset & 0xff) | ((devfn & 0xff) << 8)
 	    | (((bus->number - hose->first_busno) & 0xff) << 16)
-	    | (hose->index << 24);
+	    | (hose->global_number << 24);
 	int rval;
 
 	rval = rtas_call(rtas_token("write-pci-config"), 3, 1, NULL,
@@ -83,7 +83,7 @@
 };
 
 
-void __init efika_pcisetup(void)
+static void __init efika_pcisetup(void)
 {
 	const int *bus_range;
 	int len;
@@ -128,7 +128,7 @@
 	printk(" controlled by %s\n", pcictrl->full_name);
 	printk("\n");
 
-	hose = pcibios_alloc_controller();
+	hose = pcibios_alloc_controller(of_node_get(pcictrl));
 	if (!hose) {
 		printk(KERN_WARNING EFIKA_PLATFORM_NAME
 		       ": Can't allocate PCI controller structure for %s\n",
@@ -136,7 +136,6 @@
 		return;
 	}
 
-	hose->arch_data = of_node_get(pcictrl);
 	hose->first_busno = bus_range[0];
 	hose->last_busno = bus_range[1];
 	hose->ops = &rtas_pci_ops;
@@ -145,7 +144,7 @@
 }
 
 #else
-void __init efika_pcisetup(void)
+static void __init efika_pcisetup(void)
 {}
 #endif
 
@@ -252,6 +251,8 @@
 	.progress		= rtas_progress,
 	.get_boot_time		= rtas_get_boot_time,
 	.calibrate_decr		= generic_calibrate_decr,
+#ifdef CONFIG_PCI
 	.phys_mem_access_prot	= pci_phys_mem_access_prot,
+#endif
 };
 
diff --git a/arch/powerpc/platforms/52xx/lite5200.c b/arch/powerpc/platforms/52xx/lite5200.c
index 1cfc00d..5c46e89 100644
--- a/arch/powerpc/platforms/52xx/lite5200.c
+++ b/arch/powerpc/platforms/52xx/lite5200.c
@@ -156,7 +156,7 @@
 
 }
 
-void lite5200_show_cpuinfo(struct seq_file *m)
+static void lite5200_show_cpuinfo(struct seq_file *m)
 {
 	struct device_node* np = of_find_all_nodes(NULL);
 	const char *model = NULL;
diff --git a/arch/powerpc/platforms/52xx/mpc52xx_pci.c b/arch/powerpc/platforms/52xx/mpc52xx_pci.c
index 34d34a2..4c6c82a 100644
--- a/arch/powerpc/platforms/52xx/mpc52xx_pci.c
+++ b/arch/powerpc/platforms/52xx/mpc52xx_pci.c
@@ -112,18 +112,18 @@
 	u32 value;
 
 	if (ppc_md.pci_exclude_device)
-		if (ppc_md.pci_exclude_device(bus->number, devfn))
+		if (ppc_md.pci_exclude_device(hose, bus->number, devfn))
 			return PCIBIOS_DEVICE_NOT_FOUND;
 
 	out_be32(hose->cfg_addr,
 		(1 << 31) |
-		((bus->number - hose->bus_offset) << 16) |
+		(bus->number << 16) |
 		(devfn << 8) |
 		(offset & 0xfc));
 	mb();
 
 #if defined(CONFIG_PPC_MPC5200_BUGFIX)
-	if (bus->number != hose->bus_offset) {
+	if (bus->number) {
 		/* workaround for the bug 435 of the MPC5200 (L25R);
 		 * Don't do 32 bits config access during type-1 cycles */
 		switch (len) {
@@ -169,18 +169,18 @@
 	u32 value, mask;
 
 	if (ppc_md.pci_exclude_device)
-		if (ppc_md.pci_exclude_device(bus->number, devfn))
+		if (ppc_md.pci_exclude_device(hose, bus->number, devfn))
 			return PCIBIOS_DEVICE_NOT_FOUND;
 
 	out_be32(hose->cfg_addr,
 		(1 << 31) |
-		((bus->number - hose->bus_offset) << 16) |
+		(bus->number << 16) |
 		(devfn << 8) |
 		(offset & 0xfc));
 	mb();
 
 #if defined(CONFIG_PPC_MPC5200_BUGFIX)
-	if (bus->number != hose->bus_offset) {
+	if (bus->number) {
 		/* workaround for the bug 435 of the MPC5200 (L25R);
 		 * Don't do 32 bits config access during type-1 cycles */
 		switch (len) {
@@ -385,17 +385,13 @@
 	 * tree are needed to configure the 52xx PCI controller.  Rather
 	 * than parse the tree here, let pci_process_bridge_OF_ranges()
 	 * do it for us and extract the values after the fact */
-	hose = pcibios_alloc_controller();
+	hose = pcibios_alloc_controller(node);
 	if (!hose)
 		return -ENOMEM;
 
-	hose->arch_data = node;
-	hose->set_cfg_type = 1;
-
 	hose->first_busno = bus_range ? bus_range[0] : 0;
 	hose->last_busno = bus_range ? bus_range[1] : 0xff;
 
-	hose->bus_offset = 0;
 	hose->ops = &mpc52xx_pci_ops;
 
 	pci_regs = ioremap(rsrc.start, rsrc.end - rsrc.start + 1);
diff --git a/arch/powerpc/platforms/52xx/mpc52xx_pm.c b/arch/powerpc/platforms/52xx/mpc52xx_pm.c
index fd40044..ee2e763 100644
--- a/arch/powerpc/platforms/52xx/mpc52xx_pm.c
+++ b/arch/powerpc/platforms/52xx/mpc52xx_pm.c
@@ -9,8 +9,8 @@
 
 
 /* these are defined in mpc52xx_sleep.S, and only used here */
-extern void mpc52xx_deep_sleep(void *sram, void *sdram_regs,
-		struct mpc52xx_cdm *, struct mpc52xx_intr *);
+extern void mpc52xx_deep_sleep(void __iomem *sram, void __iomem *sdram_regs,
+		struct mpc52xx_cdm __iomem *, struct mpc52xx_intr __iomem*);
 extern void mpc52xx_ds_sram(void);
 extern const long mpc52xx_ds_sram_size;
 extern void mpc52xx_ds_cached(void);
@@ -21,7 +21,7 @@
 static struct mpc52xx_cdm __iomem *cdm;
 static struct mpc52xx_intr __iomem *intr;
 static struct mpc52xx_gpio_wkup __iomem *gpiow;
-static void *sram;
+static void __iomem *sram;
 static int sram_size;
 
 struct mpc52xx_suspend mpc52xx_suspend;
@@ -100,7 +100,7 @@
 	u32 clk_enables;
 	u32 msr, hid0;
 	u32 intr_main_mask;
-	void __iomem * irq_0x500 = (void *)CONFIG_KERNEL_START + 0x500;
+	void __iomem * irq_0x500 = (void __iomem *)CONFIG_KERNEL_START + 0x500;
 	unsigned long irq_0x500_stop = (unsigned long)irq_0x500 + mpc52xx_ds_cached_size;
 	char saved_0x500[mpc52xx_ds_cached_size];
 
diff --git a/arch/powerpc/platforms/82xx/Kconfig b/arch/powerpc/platforms/82xx/Kconfig
index de7fce9..89fde43 100644
--- a/arch/powerpc/platforms/82xx/Kconfig
+++ b/arch/powerpc/platforms/82xx/Kconfig
@@ -1,5 +1,5 @@
 choice
-	prompt "Machine Type"
+	prompt "82xx Board Type"
 	depends on PPC_82xx
 	default MPC82xx_ADS
 
diff --git a/arch/powerpc/platforms/82xx/mpc82xx_ads.c b/arch/powerpc/platforms/82xx/mpc82xx_ads.c
index 47cb09f..da20832 100644
--- a/arch/powerpc/platforms/82xx/mpc82xx_ads.c
+++ b/arch/powerpc/platforms/82xx/mpc82xx_ads.c
@@ -49,7 +49,7 @@
 #include <linux/fs_enet_pd.h>
 
 #include <sysdev/fsl_soc.h>
-#include <../sysdev/cpm2_pic.h>
+#include <sysdev/cpm2_pic.h>
 
 #include "pq2ads.h"
 
@@ -507,7 +507,8 @@
 	return;
 }
 
-static int m82xx_pci_exclude_device(u_char bus, u_char devfn)
+static int m82xx_pci_exclude_device(struct pci_controller *hose,
+				    u_char bus, u_char devfn)
 {
 	if (bus == 0 && PCI_SLOT(devfn) == 0)
 		return PCIBIOS_DEVICE_NOT_FOUND;
@@ -515,7 +516,7 @@
 		return PCIBIOS_SUCCESSFUL;
 }
 
-void __init add_bridge(struct device_node *np)
+static void __init mpc82xx_add_bridge(struct device_node *np)
 {
 	int len;
 	struct pci_controller *hose;
@@ -542,19 +543,13 @@
 
 	pci_assign_all_buses = 1;
 
-	hose = pcibios_alloc_controller();
+	hose = pcibios_alloc_controller(np);
 
 	if (!hose)
 		return;
 
-	hose->arch_data = np;
-	hose->set_cfg_type = 1;
-
 	hose->first_busno = bus_range ? bus_range[0] : 0;
 	hose->last_busno = bus_range ? bus_range[1] : 0xff;
-	hose->bus_offset = 0;
-
-	hose->set_cfg_type = 1;
 
 	setup_indirect_pci(hose,
 			   r.start + offsetof(pci_cpm2_t, pci_cfg_addr),
@@ -584,7 +579,7 @@
 #ifdef CONFIG_PCI
 	ppc_md.pci_exclude_device = m82xx_pci_exclude_device;
 	for (np = NULL; (np = of_find_node_by_type(np, "pci")) != NULL;)
-		add_bridge(np);
+		mpc82xx_add_bridge(np);
 
 	of_node_put(np);
 #endif
diff --git a/arch/powerpc/platforms/83xx/Kconfig b/arch/powerpc/platforms/83xx/Kconfig
index 19cafdf..ec305f1 100644
--- a/arch/powerpc/platforms/83xx/Kconfig
+++ b/arch/powerpc/platforms/83xx/Kconfig
@@ -1,5 +1,5 @@
 choice
-	prompt "Machine Type"
+	prompt "83xx Board Type"
 	depends on PPC_83xx
 	default MPC834x_MDS
 
diff --git a/arch/powerpc/platforms/83xx/Makefile b/arch/powerpc/platforms/83xx/Makefile
index 31a91b5..5a98f88 100644
--- a/arch/powerpc/platforms/83xx/Makefile
+++ b/arch/powerpc/platforms/83xx/Makefile
@@ -1,7 +1,7 @@
 #
 # Makefile for the PowerPC 83xx linux kernel.
 #
-obj-y				:= misc.o
+obj-y				:= misc.o usb.o
 obj-$(CONFIG_PCI)		+= pci.o
 obj-$(CONFIG_MPC8313_RDB)	+= mpc8313_rdb.o
 obj-$(CONFIG_MPC832x_RDB)	+= mpc832x_rdb.o
diff --git a/arch/powerpc/platforms/83xx/mpc8313_rdb.c b/arch/powerpc/platforms/83xx/mpc8313_rdb.c
index 96970ac..3edfe17 100644
--- a/arch/powerpc/platforms/83xx/mpc8313_rdb.c
+++ b/arch/powerpc/platforms/83xx/mpc8313_rdb.c
@@ -28,11 +28,6 @@
 #define DBG(fmt...)
 #endif
 
-#ifndef CONFIG_PCI
-unsigned long isa_io_base = 0;
-unsigned long isa_mem_base = 0;
-#endif
-
 /* ************************************************************************
  *
  * Setup the architecture
@@ -49,10 +44,11 @@
 
 #ifdef CONFIG_PCI
 	for (np = NULL; (np = of_find_node_by_type(np, "pci")) != NULL;)
-		add_bridge(np);
+		mpc83xx_add_bridge(np);
 
 	ppc_md.pci_exclude_device = mpc83xx_exclude_device;
 #endif
+	mpc831x_usb_cfg();
 }
 
 void __init mpc8313_rdb_init_IRQ(void)
diff --git a/arch/powerpc/platforms/83xx/mpc832x_mds.c b/arch/powerpc/platforms/83xx/mpc832x_mds.c
index 94843ed..b39cb52 100644
--- a/arch/powerpc/platforms/83xx/mpc832x_mds.c
+++ b/arch/powerpc/platforms/83xx/mpc832x_mds.c
@@ -49,11 +49,6 @@
 #define DBG(fmt...)
 #endif
 
-#ifndef CONFIG_PCI
-unsigned long isa_io_base = 0;
-unsigned long isa_mem_base = 0;
-#endif
-
 static u8 *bcsr_regs = NULL;
 
 /* ************************************************************************
@@ -80,7 +75,7 @@
 
 #ifdef CONFIG_PCI
 	for (np = NULL; (np = of_find_node_by_type(np, "pci")) != NULL;)
-		add_bridge(np);
+		mpc83xx_add_bridge(np);
 	ppc_md.pci_exclude_device = mpc83xx_exclude_device;
 #endif
 
diff --git a/arch/powerpc/platforms/83xx/mpc832x_rdb.c b/arch/powerpc/platforms/83xx/mpc832x_rdb.c
index 3db68b7..b2b28a4 100644
--- a/arch/powerpc/platforms/83xx/mpc832x_rdb.c
+++ b/arch/powerpc/platforms/83xx/mpc832x_rdb.c
@@ -32,11 +32,6 @@
 #define DBG(fmt...)
 #endif
 
-#ifndef CONFIG_PCI
-unsigned long isa_io_base = 0;
-unsigned long isa_mem_base = 0;
-#endif
-
 /* ************************************************************************
  *
  * Setup the architecture
@@ -53,7 +48,7 @@
 
 #ifdef CONFIG_PCI
 	for (np = NULL; (np = of_find_node_by_type(np, "pci")) != NULL;)
-		add_bridge(np);
+		mpc83xx_add_bridge(np);
 
 	ppc_md.pci_exclude_device = mpc83xx_exclude_device;
 #endif
diff --git a/arch/powerpc/platforms/83xx/mpc834x_itx.c b/arch/powerpc/platforms/83xx/mpc834x_itx.c
index 40a0194..47ba544 100644
--- a/arch/powerpc/platforms/83xx/mpc834x_itx.c
+++ b/arch/powerpc/platforms/83xx/mpc834x_itx.c
@@ -38,11 +38,6 @@
 
 #include "mpc83xx.h"
 
-#ifndef CONFIG_PCI
-unsigned long isa_io_base = 0;
-unsigned long isa_mem_base = 0;
-#endif
-
 /* ************************************************************************
  *
  * Setup the architecture
@@ -59,10 +54,12 @@
 
 #ifdef CONFIG_PCI
 	for (np = NULL; (np = of_find_node_by_type(np, "pci")) != NULL;)
-		add_bridge(np);
+		mpc83xx_add_bridge(np);
 
 	ppc_md.pci_exclude_device = mpc83xx_exclude_device;
 #endif
+
+	mpc834x_usb_cfg();
 }
 
 static void __init mpc834x_itx_init_IRQ(void)
diff --git a/arch/powerpc/platforms/83xx/mpc834x_mds.c b/arch/powerpc/platforms/83xx/mpc834x_mds.c
index 10394b2..4c9ff9c 100644
--- a/arch/powerpc/platforms/83xx/mpc834x_mds.c
+++ b/arch/powerpc/platforms/83xx/mpc834x_mds.c
@@ -38,61 +38,17 @@
 
 #include "mpc83xx.h"
 
-#ifndef CONFIG_PCI
-unsigned long isa_io_base = 0;
-unsigned long isa_mem_base = 0;
-#endif
-
 #define BCSR5_INT_USB		0x02
-/* Note: This is only for PB, not for PB+PIB
- * On PB only port0 is connected using ULPI */
-static int mpc834x_usb_cfg(void)
+static int mpc834xemds_usb_cfg(void)
 {
-	unsigned long sccr, sicrl;
-	void __iomem *immap;
+	struct device_node *np;
 	void __iomem *bcsr_regs = NULL;
 	u8 bcsr5;
-	struct device_node *np = NULL;
-	int port0_is_dr = 0;
 
-	if ((np = of_find_compatible_node(NULL, "usb", "fsl-usb2-dr")) != NULL)
-		port0_is_dr = 1;
-	if ((np = of_find_compatible_node(NULL, "usb", "fsl-usb2-mph")) != NULL){
-		if (port0_is_dr) {
-			printk(KERN_WARNING
-				"There is only one USB port on PB board! \n");
-			return -1;
-		} else if (!port0_is_dr)
-			/* No usb port enabled */
-			return -1;
-	}
-
-	immap = ioremap(get_immrbase(), 0x1000);
-	if (!immap)
-		return -1;
-
-	/* Configure clock */
-	sccr = in_be32(immap + MPC83XX_SCCR_OFFS);
-	if (port0_is_dr)
-		sccr |= MPC83XX_SCCR_USB_DRCM_11;  /* 1:3 */
-	else
-		sccr |= MPC83XX_SCCR_USB_MPHCM_11; /* 1:3 */
-	out_be32(immap + MPC83XX_SCCR_OFFS, sccr);
-
-	/* Configure Pin */
-	sicrl = in_be32(immap + MPC83XX_SICRL_OFFS);
-	/* set port0 only */
-	if (port0_is_dr)
-		sicrl |= MPC83XX_SICRL_USB0;
-	else
-		sicrl &= ~(MPC83XX_SICRL_USB0);
-	out_be32(immap + MPC83XX_SICRL_OFFS, sicrl);
-
-	iounmap(immap);
-
+	mpc834x_usb_cfg();
 	/* Map BCSR area */
 	np = of_find_node_by_name(NULL, "bcsr");
-	if (np != 0) {
+	if (np) {
 		struct resource res;
 
 		of_address_to_resource(np, 0, &res);
@@ -129,12 +85,12 @@
 
 #ifdef CONFIG_PCI
 	for (np = NULL; (np = of_find_node_by_type(np, "pci")) != NULL;)
-		add_bridge(np);
+		mpc83xx_add_bridge(np);
 
 	ppc_md.pci_exclude_device = mpc83xx_exclude_device;
 #endif
 
-	mpc834x_usb_cfg();
+	mpc834xemds_usb_cfg();
 }
 
 static void __init mpc834x_mds_init_IRQ(void)
diff --git a/arch/powerpc/platforms/83xx/mpc836x_mds.c b/arch/powerpc/platforms/83xx/mpc836x_mds.c
index bceeff8..0e615fd 100644
--- a/arch/powerpc/platforms/83xx/mpc836x_mds.c
+++ b/arch/powerpc/platforms/83xx/mpc836x_mds.c
@@ -55,11 +55,6 @@
 #define DBG(fmt...)
 #endif
 
-#ifndef CONFIG_PCI
-unsigned long isa_io_base = 0;
-unsigned long isa_mem_base = 0;
-#endif
-
 static u8 *bcsr_regs = NULL;
 
 /* ************************************************************************
@@ -86,7 +81,7 @@
 
 #ifdef CONFIG_PCI
 	for (np = NULL; (np = of_find_node_by_type(np, "pci")) != NULL;)
-		add_bridge(np);
+		mpc83xx_add_bridge(np);
 	ppc_md.pci_exclude_device = mpc83xx_exclude_device;
 #endif
 
diff --git a/arch/powerpc/platforms/83xx/mpc83xx.h b/arch/powerpc/platforms/83xx/mpc83xx.h
index 9cd03b5..589ee55 100644
--- a/arch/powerpc/platforms/83xx/mpc83xx.h
+++ b/arch/powerpc/platforms/83xx/mpc83xx.h
@@ -3,9 +3,11 @@
 
 #include <linux/init.h>
 #include <linux/device.h>
+#include <asm/pci-bridge.h>
 
 /* System Clock Control Register */
 #define MPC83XX_SCCR_OFFS          0xA08
+#define MPC83XX_SCCR_USB_MASK      0x00f00000
 #define MPC83XX_SCCR_USB_MPHCM_11  0x00c00000
 #define MPC83XX_SCCR_USB_MPHCM_01  0x00400000
 #define MPC83XX_SCCR_USB_MPHCM_10  0x00800000
@@ -15,21 +17,43 @@
 
 /* system i/o configuration register low */
 #define MPC83XX_SICRL_OFFS         0x114
-#define MPC83XX_SICRL_USB0         0x40000000
-#define MPC83XX_SICRL_USB1         0x20000000
+#define MPC834X_SICRL_USB_MASK     0x60000000
+#define MPC834X_SICRL_USB0         0x40000000
+#define MPC834X_SICRL_USB1         0x20000000
+#define MPC831X_SICRL_USB_MASK     0x00000c00
+#define MPC831X_SICRL_USB_ULPI     0x00000800
 
 /* system i/o configuration register high */
 #define MPC83XX_SICRH_OFFS         0x118
-#define MPC83XX_SICRH_USB_UTMI     0x00020000
+#define MPC834X_SICRH_USB_UTMI     0x00020000
+#define MPC831X_SICRH_USB_MASK     0x000000e0
+#define MPC831X_SICRH_USB_ULPI     0x000000a0
+
+/* USB Control Register */
+#define FSL_USB2_CONTROL_OFFS      0x500
+#define CONTROL_UTMI_PHY_EN        0x00000200
+#define CONTROL_REFSEL_48MHZ       0x00000080
+#define CONTROL_PHY_CLK_SEL_ULPI   0x00000400
+#define CONTROL_OTG_PORT           0x00000020
+
+/* USB PORTSC Registers */
+#define FSL_USB2_PORTSC1_OFFS      0x184
+#define FSL_USB2_PORTSC2_OFFS      0x188
+#define PORTSCX_PTW_16BIT          0x10000000
+#define PORTSCX_PTS_UTMI           0x00000000
+#define PORTSCX_PTS_ULPI           0x80000000
 
 /*
  * Declaration for the various functions exported by the
  * mpc83xx_* files. Mostly for use by mpc83xx_setup
  */
 
-extern int add_bridge(struct device_node *dev);
-extern int mpc83xx_exclude_device(u_char bus, u_char devfn);
+extern int mpc83xx_add_bridge(struct device_node *dev);
+extern int mpc83xx_exclude_device(struct pci_controller *hose,
+				  u_char bus, u_char devfn);
 extern void mpc83xx_restart(char *cmd);
 extern long mpc83xx_time_init(void);
+extern int mpc834x_usb_cfg(void);
+extern int mpc831x_usb_cfg(void);
 
 #endif				/* __MPC83XX_H__ */
diff --git a/arch/powerpc/platforms/83xx/pci.c b/arch/powerpc/platforms/83xx/pci.c
index 774457d0..c0e2b89 100644
--- a/arch/powerpc/platforms/83xx/pci.c
+++ b/arch/powerpc/platforms/83xx/pci.c
@@ -33,19 +33,14 @@
 #define DBG(x...)
 #endif
 
-int mpc83xx_pci2_busno;
-
-int mpc83xx_exclude_device(u_char bus, u_char devfn)
+int mpc83xx_exclude_device(struct pci_controller *hose, u_char bus, u_char devfn)
 {
-	if (bus == 0 && PCI_SLOT(devfn) == 0)
+	if ((bus == hose->first_busno) && PCI_SLOT(devfn) == 0)
 		return PCIBIOS_DEVICE_NOT_FOUND;
-	if (mpc83xx_pci2_busno)
-		if (bus == (mpc83xx_pci2_busno) && PCI_SLOT(devfn) == 0)
-			return PCIBIOS_DEVICE_NOT_FOUND;
 	return PCIBIOS_SUCCESSFUL;
 }
 
-int __init add_bridge(struct device_node *dev)
+int __init mpc83xx_add_bridge(struct device_node *dev)
 {
 	int len;
 	struct pci_controller *hose;
@@ -66,11 +61,10 @@
 		       " bus 0\n", dev->full_name);
 	}
 
-	hose = pcibios_alloc_controller();
+	pci_assign_all_buses = 1;
+	hose = pcibios_alloc_controller(dev);
 	if (!hose)
 		return -ENOMEM;
-	hose->arch_data = dev;
-	hose->set_cfg_type = 1;
 
 	hose->first_busno = bus_range ? bus_range[0] : 0;
 	hose->last_busno = bus_range ? bus_range[1] : 0xff;
@@ -86,8 +80,6 @@
 	if ((rsrc.start & 0xfffff) == 0x8600) {
 		setup_indirect_pci(hose, immr + 0x8380, immr + 0x8384);
 		primary = 0;
-		hose->bus_offset = hose->first_busno;
-		mpc83xx_pci2_busno = hose->first_busno;
 	}
 
 	printk(KERN_INFO "Found MPC83xx PCI host bridge at 0x%016llx. "
diff --git a/arch/powerpc/platforms/83xx/usb.c b/arch/powerpc/platforms/83xx/usb.c
new file mode 100644
index 0000000..e7fdf01
--- /dev/null
+++ b/arch/powerpc/platforms/83xx/usb.c
@@ -0,0 +1,181 @@
+/*
+ * Freescale 83xx USB SOC setup code
+ *
+ * Copyright (C) 2007 Freescale Semiconductor, Inc.
+ * Author: Li Yang
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ */
+
+
+#include <linux/stddef.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+
+#include <asm/io.h>
+#include <asm/prom.h>
+#include <sysdev/fsl_soc.h>
+
+#include "mpc83xx.h"
+
+
+#ifdef CONFIG_MPC834x
+int mpc834x_usb_cfg(void)
+{
+	unsigned long sccr, sicrl, sicrh;
+	void __iomem *immap;
+	struct device_node *np = NULL;
+	int port0_is_dr = 0, port1_is_dr = 0;
+	const void *prop, *dr_mode;
+
+	immap = ioremap(get_immrbase(), 0x1000);
+	if (!immap)
+		return -ENOMEM;
+
+	/* Read registers */
+	/* Note: DR and MPH must use the same clock setting in SCCR */
+	sccr = in_be32(immap + MPC83XX_SCCR_OFFS) & ~MPC83XX_SCCR_USB_MASK;
+	sicrl = in_be32(immap + MPC83XX_SICRL_OFFS) & ~MPC834X_SICRL_USB_MASK;
+	sicrh = in_be32(immap + MPC83XX_SICRH_OFFS) & ~MPC834X_SICRH_USB_UTMI;
+
+	np = of_find_compatible_node(NULL, "usb", "fsl-usb2-dr");
+	if (np) {
+		sccr |= MPC83XX_SCCR_USB_DRCM_11;  /* 1:3 */
+
+		prop = of_get_property(np, "phy_type", NULL);
+		if (prop && (!strcmp(prop, "utmi") ||
+					!strcmp(prop, "utmi_wide"))) {
+			sicrl |= MPC834X_SICRL_USB0 | MPC834X_SICRL_USB1;
+			sicrh |= MPC834X_SICRH_USB_UTMI;
+			port1_is_dr = 1;
+		} else if (prop && !strcmp(prop, "serial")) {
+			dr_mode = of_get_property(np, "dr_mode", NULL);
+			if (dr_mode && !strcmp(dr_mode, "otg")) {
+				sicrl |= MPC834X_SICRL_USB0 | MPC834X_SICRL_USB1;
+				port1_is_dr = 1;
+			} else {
+				sicrl |= MPC834X_SICRL_USB0;
+			}
+		} else if (prop && !strcmp(prop, "ulpi")) {
+			sicrl |= MPC834X_SICRL_USB0;
+		} else {
+			printk(KERN_WARNING "834x USB PHY type not supported\n");
+		}
+		port0_is_dr = 1;
+		of_node_put(np);
+	}
+	np = of_find_compatible_node(NULL, "usb", "fsl-usb2-mph");
+	if (np) {
+		sccr |= MPC83XX_SCCR_USB_MPHCM_11; /* 1:3 */
+
+		prop = of_get_property(np, "port0", NULL);
+		if (prop) {
+			if (port0_is_dr)
+				printk(KERN_WARNING
+					"834x USB port0 can't be used by both DR and MPH!\n");
+			sicrl |= MPC834X_SICRL_USB0;
+		}
+		prop = of_get_property(np, "port1", NULL);
+		if (prop) {
+			if (port1_is_dr)
+				printk(KERN_WARNING
+					"834x USB port1 can't be used by both DR and MPH!\n");
+			sicrl |= MPC834X_SICRL_USB1;
+		}
+		of_node_put(np);
+	}
+
+	/* Write back */
+	out_be32(immap + MPC83XX_SCCR_OFFS, sccr);
+	out_be32(immap + MPC83XX_SICRL_OFFS, sicrl);
+	out_be32(immap + MPC83XX_SICRH_OFFS, sicrh);
+
+	iounmap(immap);
+	return 0;
+}
+#endif /* CONFIG_MPC834x */
+
+#ifdef CONFIG_PPC_MPC831x
+int mpc831x_usb_cfg(void)
+{
+	u32 temp;
+	void __iomem *immap, *usb_regs;
+	struct device_node *np = NULL;
+	const void *prop;
+	struct resource res;
+	int ret = 0;
+#ifdef CONFIG_USB_OTG
+	const void *dr_mode;
+#endif
+
+	np = of_find_compatible_node(NULL, "usb", "fsl-usb2-dr");
+	if (!np)
+		return -ENODEV;
+	prop = of_get_property(np, "phy_type", NULL);
+
+	/* Map IMMR space for pin and clock settings */
+	immap = ioremap(get_immrbase(), 0x1000);
+	if (!immap) {
+		of_node_put(np);
+		return -ENOMEM;
+	}
+
+	/* Configure clock */
+	temp = in_be32(immap + MPC83XX_SCCR_OFFS);
+	temp &= ~MPC83XX_SCCR_USB_MASK;
+	temp |= MPC83XX_SCCR_USB_DRCM_11;  /* 1:3 */
+	out_be32(immap + MPC83XX_SCCR_OFFS, temp);
+
+	/* Configure pin mux for ULPI.  There is no pin mux for UTMI */
+	if (!strcmp(prop, "ulpi")) {
+		temp = in_be32(immap + MPC83XX_SICRL_OFFS);
+		temp &= ~MPC831X_SICRL_USB_MASK;
+		temp |= MPC831X_SICRL_USB_ULPI;
+		out_be32(immap + MPC83XX_SICRL_OFFS, temp);
+
+		temp = in_be32(immap + MPC83XX_SICRH_OFFS);
+		temp &= ~MPC831X_SICRH_USB_MASK;
+		temp |= MPC831X_SICRH_USB_ULPI;
+		out_be32(immap + MPC83XX_SICRH_OFFS, temp);
+	}
+
+	iounmap(immap);
+
+	/* Map USB SOC space */
+	ret = of_address_to_resource(np, 0, &res);
+	if (ret) {
+		of_node_put(np);
+		return ret;
+	}
+	usb_regs = ioremap(res.start, res.end - res.start + 1);
+
+	/* Using on-chip PHY */
+	if (!strcmp(prop, "utmi_wide") ||
+			!strcmp(prop, "utmi")) {
+		/* Set UTMI_PHY_EN, REFSEL to 48MHZ */
+		out_be32(usb_regs + FSL_USB2_CONTROL_OFFS,
+				CONTROL_UTMI_PHY_EN | CONTROL_REFSEL_48MHZ);
+	/* Using external UPLI PHY */
+	} else if (!strcmp(prop, "ulpi")) {
+		/* Set PHY_CLK_SEL to ULPI */
+		temp = CONTROL_PHY_CLK_SEL_ULPI;
+#ifdef CONFIG_USB_OTG
+		/* Set OTG_PORT */
+		dr_mode = of_get_property(np, "dr_mode", NULL);
+		if (dr_mode && !strcmp(dr_mode, "otg"))
+			temp |= CONTROL_OTG_PORT;
+#endif /* CONFIG_USB_OTG */
+		out_be32(usb_regs + FSL_USB2_CONTROL_OFFS, temp);
+	} else {
+		printk(KERN_WARNING "831x USB PHY type not supported\n");
+		ret = -EINVAL;
+	}
+
+	iounmap(usb_regs);
+	of_node_put(np);
+	return ret;
+}
+#endif /* CONFIG_PPC_MPC831x */
diff --git a/arch/powerpc/platforms/85xx/misc.c b/arch/powerpc/platforms/85xx/misc.c
index 3e62fcb..4fe376e 100644
--- a/arch/powerpc/platforms/85xx/misc.c
+++ b/arch/powerpc/platforms/85xx/misc.c
@@ -13,11 +13,43 @@
 #include <linux/irq.h>
 #include <linux/module.h>
 #include <asm/irq.h>
+#include <asm/io.h>
+#include <asm/prom.h>
+#include <sysdev/fsl_soc.h>
+
+static __be32 __iomem *rstcr;
 
 extern void abort(void);
 
+static int __init mpc85xx_rstcr(void)
+{
+	struct device_node *np;
+	np = of_find_node_by_name(NULL, "global-utilities");
+	if ((np && of_get_property(np, "fsl,has-rstcr", NULL))) {
+		const u32 *prop = of_get_property(np, "reg", NULL);
+		if (prop) {
+			/* map reset control register
+			 * 0xE00B0 is offset of reset control register
+			 */
+			rstcr = ioremap(get_immrbase() + *prop + 0xB0, 0xff);
+			if (!rstcr)
+				printk (KERN_EMERG "Error: reset control "
+						"register not mapped!\n");
+		}
+	} else
+		printk (KERN_INFO "rstcr compatible register does not exist!\n");
+	if (np)
+		of_node_put(np);
+	return 0;
+}
+
+arch_initcall(mpc85xx_rstcr);
+
 void mpc85xx_restart(char *cmd)
 {
 	local_irq_disable();
+	if (rstcr)
+		/* set reset control register */
+		out_be32(rstcr, 0x2);	/* HRESET_REQ */
 	abort();
 }
diff --git a/arch/powerpc/platforms/85xx/mpc8544_ds.c b/arch/powerpc/platforms/85xx/mpc8544_ds.c
index bec84ff..6fb90aa 100644
--- a/arch/powerpc/platforms/85xx/mpc8544_ds.c
+++ b/arch/powerpc/platforms/85xx/mpc8544_ds.c
@@ -61,24 +61,11 @@
 		return;
 	}
 
-	/* Alloc mpic structure and per isu has 16 INT entries. */
 	mpic = mpic_alloc(np, r.start,
 			  MPIC_PRIMARY | MPIC_WANTS_RESET | MPIC_BIG_ENDIAN,
-			  16, 64, " OPENPIC     ");
+			0, 256, " OpenPIC  ");
 	BUG_ON(mpic == NULL);
 
-	/*
-	 * 48 Internal Interrupts
-	 */
-	mpic_assign_isu(mpic, 0, r.start + 0x10200);
-	mpic_assign_isu(mpic, 1, r.start + 0x10400);
-	mpic_assign_isu(mpic, 2, r.start + 0x10600);
-
-	/*
-	 * 16 External interrupts
-	 */
-	mpic_assign_isu(mpic, 3, r.start + 0x10000);
-
 	mpic_init(mpic);
 
 #ifdef CONFIG_PPC_I8259
diff --git a/arch/powerpc/platforms/85xx/mpc85xx.h b/arch/powerpc/platforms/85xx/mpc85xx.h
index 83415db..7286ffa 100644
--- a/arch/powerpc/platforms/85xx/mpc85xx.h
+++ b/arch/powerpc/platforms/85xx/mpc85xx.h
@@ -15,4 +15,4 @@
  */
 
 extern void mpc85xx_restart(char *);
-extern int add_bridge(struct device_node *dev);
+extern int mpc85xx_add_bridge(struct device_node *dev);
diff --git a/arch/powerpc/platforms/85xx/mpc85xx_ads.c b/arch/powerpc/platforms/85xx/mpc85xx_ads.c
index 5d27621..7235f70 100644
--- a/arch/powerpc/platforms/85xx/mpc85xx_ads.c
+++ b/arch/powerpc/platforms/85xx/mpc85xx_ads.c
@@ -38,13 +38,9 @@
 #include <asm/fs_pd.h>
 #endif
 
-#ifndef CONFIG_PCI
-unsigned long isa_io_base = 0;
-unsigned long isa_mem_base = 0;
-#endif
-
 #ifdef CONFIG_PCI
-static int mpc85xx_exclude_device(u_char bus, u_char devfn)
+static int mpc85xx_exclude_device(struct pci_controller *hose,
+				   u_char bus, u_char devfn)
 {
 	if (bus == 0 && PCI_SLOT(devfn) == 0)
 		return PCIBIOS_DEVICE_NOT_FOUND;
@@ -91,30 +87,10 @@
 
 	mpic = mpic_alloc(np, r.start,
 			MPIC_PRIMARY | MPIC_WANTS_RESET | MPIC_BIG_ENDIAN,
-			4, 0, " OpenPIC  ");
+			0, 256, " OpenPIC  ");
 	BUG_ON(mpic == NULL);
 	of_node_put(np);
 
-	mpic_assign_isu(mpic, 0, r.start + 0x10200);
-	mpic_assign_isu(mpic, 1, r.start + 0x10280);
-	mpic_assign_isu(mpic, 2, r.start + 0x10300);
-	mpic_assign_isu(mpic, 3, r.start + 0x10380);
-	mpic_assign_isu(mpic, 4, r.start + 0x10400);
-	mpic_assign_isu(mpic, 5, r.start + 0x10480);
-	mpic_assign_isu(mpic, 6, r.start + 0x10500);
-	mpic_assign_isu(mpic, 7, r.start + 0x10580);
-
-	/* Unused on this platform (leave room for 8548) */
-	mpic_assign_isu(mpic, 8, r.start + 0x10600);
-	mpic_assign_isu(mpic, 9, r.start + 0x10680);
-	mpic_assign_isu(mpic, 10, r.start + 0x10700);
-	mpic_assign_isu(mpic, 11, r.start + 0x10780);
-
-	/* External Interrupts */
-	mpic_assign_isu(mpic, 12, r.start + 0x10000);
-	mpic_assign_isu(mpic, 13, r.start + 0x10080);
-	mpic_assign_isu(mpic, 14, r.start + 0x10100);
-
 	mpic_init(mpic);
 
 #ifdef CONFIG_CPM2
@@ -241,7 +217,7 @@
 
 #ifdef CONFIG_PCI
 	for (np = NULL; (np = of_find_node_by_type(np, "pci")) != NULL;)
-		add_bridge(np);
+		mpc85xx_add_bridge(np);
 	ppc_md.pci_exclude_device = mpc85xx_exclude_device;
 #endif
 }
diff --git a/arch/powerpc/platforms/85xx/mpc85xx_cds.c b/arch/powerpc/platforms/85xx/mpc85xx_cds.c
index 1490eb3..50c8d64 100644
--- a/arch/powerpc/platforms/85xx/mpc85xx_cds.c
+++ b/arch/powerpc/platforms/85xx/mpc85xx_cds.c
@@ -47,11 +47,6 @@
 #include <sysdev/fsl_soc.h>
 #include "mpc85xx.h"
 
-#ifndef CONFIG_PCI
-unsigned long isa_io_base = 0;
-unsigned long isa_mem_base = 0;
-#endif
-
 static int cds_pci_slot = 2;
 static volatile u8 *cadmus;
 
@@ -60,15 +55,11 @@
 #define ARCADIA_HOST_BRIDGE_IDSEL	17
 #define ARCADIA_2ND_BRIDGE_IDSEL	3
 
-extern int mpc85xx_pci2_busno;
-
-static int mpc85xx_exclude_device(u_char bus, u_char devfn)
+static int mpc85xx_exclude_device(struct pci_controller *hose,
+				  u_char bus, u_char devfn)
 {
-	if (bus == 0 && PCI_SLOT(devfn) == 0)
+	if ((bus == hose->first_busno) && PCI_SLOT(devfn) == 0)
 		return PCIBIOS_DEVICE_NOT_FOUND;
-	if (mpc85xx_pci2_busno)
-		if (bus == (mpc85xx_pci2_busno) && PCI_SLOT(devfn) == 0)
-			return PCIBIOS_DEVICE_NOT_FOUND;
 	/* We explicitly do not go past the Tundra 320 Bridge */
 	if ((bus == 1) && (PCI_SLOT(devfn) == ARCADIA_2ND_BRIDGE_IDSEL))
 		return PCIBIOS_DEVICE_NOT_FOUND;
@@ -78,52 +69,44 @@
 		return PCIBIOS_SUCCESSFUL;
 }
 
-static void __init mpc85xx_cds_pcibios_fixup(void)
+static void __init mpc85xx_cds_pci_irq_fixup(struct pci_dev *dev)
 {
-	struct pci_dev *dev;
-	u_char		c;
+	u_char c;
+	if (dev->vendor == PCI_VENDOR_ID_VIA) {
+		switch (dev->device) {
+		case PCI_DEVICE_ID_VIA_82C586_1:
+			/*
+			 * U-Boot does not set the enable bits
+			 * for the IDE device. Force them on here.
+			 */
+			pci_read_config_byte(dev, 0x40, &c);
+			c |= 0x03; /* IDE: Chip Enable Bits */
+			pci_write_config_byte(dev, 0x40, c);
 
-	if ((dev = pci_get_device(PCI_VENDOR_ID_VIA,
-					PCI_DEVICE_ID_VIA_82C586_1, NULL))) {
+			/*
+			 * Since only primary interface works, force the
+			 * IDE function to standard primary IDE interrupt
+			 * w/ 8259 offset
+			 */
+			dev->irq = 14;
+			pci_write_config_byte(dev, PCI_INTERRUPT_LINE, dev->irq);
+			break;
 		/*
-		 * U-Boot does not set the enable bits
-		 * for the IDE device. Force them on here.
+		 * Force legacy USB interrupt routing
 		 */
-		pci_read_config_byte(dev, 0x40, &c);
-		c |= 0x03; /* IDE: Chip Enable Bits */
-		pci_write_config_byte(dev, 0x40, c);
-
-		/*
-		 * Since only primary interface works, force the
-		 * IDE function to standard primary IDE interrupt
-		 * w/ 8259 offset
+		case PCI_DEVICE_ID_VIA_82C586_2:
+		/* There are two USB controllers.
+		 * Identify them by functon number
 		 */
-		dev->irq = 14;
-		pci_write_config_byte(dev, PCI_INTERRUPT_LINE, dev->irq);
-		pci_dev_put(dev);
+			if (PCI_FUNC(dev->devfn))
+				dev->irq = 11;
+			else
+				dev->irq = 10;
+			pci_write_config_byte(dev, PCI_INTERRUPT_LINE, dev->irq);
+		default:
+			break;
+		}
 	}
-
-	/*
-	 * Force legacy USB interrupt routing
-	 */
-	if ((dev = pci_get_device(PCI_VENDOR_ID_VIA,
-					PCI_DEVICE_ID_VIA_82C586_2, NULL))) {
-		dev->irq = 10;
-		pci_write_config_byte(dev, PCI_INTERRUPT_LINE, 10);
-		pci_dev_put(dev);
-	}
-
-	if ((dev = pci_get_device(PCI_VENDOR_ID_VIA,
-					PCI_DEVICE_ID_VIA_82C586_2, dev))) {
-		dev->irq = 11;
-		pci_write_config_byte(dev, PCI_INTERRUPT_LINE, 11);
-		pci_dev_put(dev);
-	}
-
-	/* Now map all the PCI irqs */
-	dev = NULL;
-	for_each_pci_dev(dev)
-		pci_read_irq_line(dev);
 }
 
 #ifdef CONFIG_PPC_I8259
@@ -165,33 +148,12 @@
 
 	mpic = mpic_alloc(np, r.start,
 			MPIC_PRIMARY | MPIC_WANTS_RESET | MPIC_BIG_ENDIAN,
-			4, 0, " OpenPIC  ");
+			0, 256, " OpenPIC  ");
 	BUG_ON(mpic == NULL);
 
 	/* Return the mpic node */
 	of_node_put(np);
 
-	mpic_assign_isu(mpic, 0, r.start + 0x10200);
-	mpic_assign_isu(mpic, 1, r.start + 0x10280);
-	mpic_assign_isu(mpic, 2, r.start + 0x10300);
-	mpic_assign_isu(mpic, 3, r.start + 0x10380);
-	mpic_assign_isu(mpic, 4, r.start + 0x10400);
-	mpic_assign_isu(mpic, 5, r.start + 0x10480);
-	mpic_assign_isu(mpic, 6, r.start + 0x10500);
-	mpic_assign_isu(mpic, 7, r.start + 0x10580);
-
-	/* Used only for 8548 so far, but no harm in
-	 * allocating them for everyone */
-	mpic_assign_isu(mpic, 8, r.start + 0x10600);
-	mpic_assign_isu(mpic, 9, r.start + 0x10680);
-	mpic_assign_isu(mpic, 10, r.start + 0x10700);
-	mpic_assign_isu(mpic, 11, r.start + 0x10780);
-
-	/* External Interrupts */
-	mpic_assign_isu(mpic, 12, r.start + 0x10000);
-	mpic_assign_isu(mpic, 13, r.start + 0x10080);
-	mpic_assign_isu(mpic, 14, r.start + 0x10100);
-
 	mpic_init(mpic);
 
 #ifdef CONFIG_PPC_I8259
@@ -257,9 +219,9 @@
 
 #ifdef CONFIG_PCI
 	for (np = NULL; (np = of_find_node_by_type(np, "pci")) != NULL;)
-		add_bridge(np);
+		mpc85xx_add_bridge(np);
 
-	ppc_md.pcibios_fixup = mpc85xx_cds_pcibios_fixup;
+	ppc_md.pci_irq_fixup = mpc85xx_cds_pci_irq_fixup;
 	ppc_md.pci_exclude_device = mpc85xx_exclude_device;
 #endif
 }
diff --git a/arch/powerpc/platforms/85xx/mpc85xx_mds.c b/arch/powerpc/platforms/85xx/mpc85xx_mds.c
index e3dddbf..004b80b 100644
--- a/arch/powerpc/platforms/85xx/mpc85xx_mds.c
+++ b/arch/powerpc/platforms/85xx/mpc85xx_mds.c
@@ -59,11 +59,6 @@
 #define DBG(fmt...)
 #endif
 
-#ifndef CONFIG_PCI
-unsigned long isa_io_base = 0;
-unsigned long isa_mem_base = 0;
-#endif
-
 /* ************************************************************************
  *
  * Setup the architecture
@@ -100,7 +95,7 @@
 
 #ifdef CONFIG_PCI
 	for (np = NULL; (np = of_find_node_by_type(np, "pci")) != NULL;) {
-		add_bridge(np);
+		mpc85xx_add_bridge(np);
 	}
 	of_node_put(np);
 #endif
@@ -181,29 +176,10 @@
 
 	mpic = mpic_alloc(np, r.start,
 			MPIC_PRIMARY | MPIC_WANTS_RESET | MPIC_BIG_ENDIAN,
-			4, 0, " OpenPIC  ");
+			0, 256, " OpenPIC  ");
 	BUG_ON(mpic == NULL);
 	of_node_put(np);
 
-	/* Internal Interrupts */
-	mpic_assign_isu(mpic, 0, r.start + 0x10200);
-	mpic_assign_isu(mpic, 1, r.start + 0x10280);
-	mpic_assign_isu(mpic, 2, r.start + 0x10300);
-	mpic_assign_isu(mpic, 3, r.start + 0x10380);
-	mpic_assign_isu(mpic, 4, r.start + 0x10400);
-	mpic_assign_isu(mpic, 5, r.start + 0x10480);
-	mpic_assign_isu(mpic, 6, r.start + 0x10500);
-	mpic_assign_isu(mpic, 7, r.start + 0x10580);
-	mpic_assign_isu(mpic, 8, r.start + 0x10600);
-	mpic_assign_isu(mpic, 9, r.start + 0x10680);
-	mpic_assign_isu(mpic, 10, r.start + 0x10700);
-	mpic_assign_isu(mpic, 11, r.start + 0x10780);
-
-	/* External Interrupts */
-	mpic_assign_isu(mpic, 12, r.start + 0x10000);
-	mpic_assign_isu(mpic, 13, r.start + 0x10080);
-	mpic_assign_isu(mpic, 14, r.start + 0x10100);
-
 	mpic_init(mpic);
 
 #ifdef CONFIG_QUICC_ENGINE
diff --git a/arch/powerpc/platforms/85xx/pci.c b/arch/powerpc/platforms/85xx/pci.c
index 48f17e2..8118417 100644
--- a/arch/powerpc/platforms/85xx/pci.c
+++ b/arch/powerpc/platforms/85xx/pci.c
@@ -33,10 +33,8 @@
 #define DBG(x...)
 #endif
 
-int mpc85xx_pci2_busno = 0;
-
 #ifdef CONFIG_PCI
-int __init add_bridge(struct device_node *dev)
+int __init mpc85xx_add_bridge(struct device_node *dev)
 {
 	int len;
 	struct pci_controller *hose;
@@ -57,11 +55,10 @@
 		       " bus 0\n", dev->full_name);
 	}
 
-	hose = pcibios_alloc_controller();
+	pci_assign_all_buses = 1;
+	hose = pcibios_alloc_controller(dev);
 	if (!hose)
 		return -ENOMEM;
-	hose->arch_data = dev;
-	hose->set_cfg_type = 1;
 
 	hose->first_busno = bus_range ? bus_range[0] : 0;
 	hose->last_busno = bus_range ? bus_range[1] : 0xff;
@@ -74,8 +71,6 @@
 	if ((rsrc.start & 0xfffff) == 0x9000) {
 		setup_indirect_pci(hose, immr + 0x9000, immr + 0x9004);
 		primary = 0;
-		hose->bus_offset = hose->first_busno;
-		mpc85xx_pci2_busno = hose->first_busno;
 	}
 
 	printk(KERN_INFO "Found MPC85xx PCI host bridge at 0x%016llx. "
diff --git a/arch/powerpc/platforms/86xx/Kconfig b/arch/powerpc/platforms/86xx/Kconfig
index d1bcff5..0faebfd 100644
--- a/arch/powerpc/platforms/86xx/Kconfig
+++ b/arch/powerpc/platforms/86xx/Kconfig
@@ -1,5 +1,5 @@
 choice
-	prompt "Machine Type"
+	prompt "86xx Board Type"
 	depends on PPC_86xx
 	default MPC8641_HPCN
 
diff --git a/arch/powerpc/platforms/86xx/mpc86xx.h b/arch/powerpc/platforms/86xx/mpc86xx.h
index 2834462..23f7ed2 100644
--- a/arch/powerpc/platforms/86xx/mpc86xx.h
+++ b/arch/powerpc/platforms/86xx/mpc86xx.h
@@ -15,15 +15,10 @@
  * mpc86xx_* files. Mostly for use by mpc86xx_setup().
  */
 
-extern int add_bridge(struct device_node *dev);
+extern int mpc86xx_add_bridge(struct device_node *dev);
 
-extern int mpc86xx_exclude_device(u_char bus, u_char devfn);
-
-extern void setup_indirect_pcie(struct pci_controller *hose,
-				       u32 cfg_addr, u32 cfg_data);
-extern void setup_indirect_pcie_nomap(struct pci_controller *hose,
-					     void __iomem *cfg_addr,
-					     void __iomem *cfg_data);
+extern int mpc86xx_exclude_device(struct pci_controller *hose,
+				  u_char bus, u_char devfn);
 
 extern void __init mpc86xx_smp_init(void);
 
diff --git a/arch/powerpc/platforms/86xx/mpc86xx_hpcn.c b/arch/powerpc/platforms/86xx/mpc86xx_hpcn.c
index 1051702..5b01ec7 100644
--- a/arch/powerpc/platforms/86xx/mpc86xx_hpcn.c
+++ b/arch/powerpc/platforms/86xx/mpc86xx_hpcn.c
@@ -44,13 +44,6 @@
 #define DBG(fmt...) do { } while(0)
 #endif
 
-#ifndef CONFIG_PCI
-unsigned long isa_io_base = 0;
-unsigned long isa_mem_base = 0;
-unsigned long pci_dram_offset = 0;
-#endif
-
-
 #ifdef CONFIG_PCI
 static void mpc86xx_8259_cascade(unsigned int irq, struct irq_desc *desc)
 {
@@ -81,22 +74,9 @@
 	/* Alloc mpic structure and per isu has 16 INT entries. */
 	mpic1 = mpic_alloc(np, res.start,
 			MPIC_PRIMARY | MPIC_WANTS_RESET | MPIC_BIG_ENDIAN,
-			16, NR_IRQS - 4,
-			" MPIC     ");
+			0, 256, " MPIC     ");
 	BUG_ON(mpic1 == NULL);
 
-	mpic_assign_isu(mpic1, 0, res.start + 0x10000);
-
-	/* 48 Internal Interrupts */
-	mpic_assign_isu(mpic1, 1, res.start + 0x10200);
-	mpic_assign_isu(mpic1, 2, res.start + 0x10400);
-	mpic_assign_isu(mpic1, 3, res.start + 0x10600);
-
-	/* 16 External interrupts
-	 * Moving them from [0 - 15] to [64 - 79]
-	 */
-	mpic_assign_isu(mpic1, 4, res.start + 0x10000);
-
 	mpic_init(mpic1);
 
 #ifdef CONFIG_PCI
@@ -319,6 +299,7 @@
 {
 	unsigned short temp;
 	pci_write_config_word(dev, 0x04, 0x0405);
+	dev->class &= ~0x5;
 	pci_read_config_word(dev, 0x4a, &temp);
 	temp |= 0x1000;
 	pci_write_config_word(dev, 0x4a, temp);
@@ -364,9 +345,7 @@
 
 #ifdef CONFIG_PCI
 	for (np = NULL; (np = of_find_node_by_type(np, "pci")) != NULL;)
-		add_bridge(np);
-
-	ppc_md.pci_exclude_device = mpc86xx_exclude_device;
+		mpc86xx_add_bridge(np);
 #endif
 
 	printk("MPC86xx HPCN board from Freescale Semiconductor\n");
diff --git a/arch/powerpc/platforms/86xx/pci.c b/arch/powerpc/platforms/86xx/pci.c
index 8235c56..73cd5b05 100644
--- a/arch/powerpc/platforms/86xx/pci.c
+++ b/arch/powerpc/platforms/86xx/pci.c
@@ -122,7 +122,6 @@
 mpc86xx_setup_pcie(struct pci_controller *hose, u32 pcie_offset, u32 pcie_size)
 {
 	u16 cmd;
-	unsigned int temps;
 
 	DBG("PCIE host controller register offset 0x%08x, size 0x%08x.\n",
 			pcie_offset, pcie_size);
@@ -133,22 +132,49 @@
 	early_write_config_word(hose, 0, 0, PCI_COMMAND, cmd);
 
 	early_write_config_byte(hose, 0, 0, PCI_LATENCY_TIMER, 0x80);
-
-	/* PCIE Bus, Fix the MPC8641D host bridge's location to bus 0xFF. */
-	early_read_config_dword(hose, 0, 0, PCI_PRIMARY_BUS, &temps);
-	temps = (temps & 0xff000000) | (0xff) | (0x0 << 8) | (0xfe << 16);
-	early_write_config_dword(hose, 0, 0, PCI_PRIMARY_BUS, temps);
 }
 
-int mpc86xx_exclude_device(u_char bus, u_char devfn)
+static void __devinit quirk_fsl_pcie_transparent(struct pci_dev *dev)
 {
-	if (bus == 0 && PCI_SLOT(devfn) == 0)
-		return PCIBIOS_DEVICE_NOT_FOUND;
+	struct resource *res;
+	int i, res_idx = PCI_BRIDGE_RESOURCES;
+	struct pci_controller *hose;
 
-	return PCIBIOS_SUCCESSFUL;
+	/*
+	 * Make the bridge be transparent.
+	 */
+	dev->transparent = 1;
+
+	hose = pci_bus_to_host(dev->bus);
+	if (!hose) {
+		printk(KERN_ERR "Can't find hose for bus %d\n",
+		       dev->bus->number);
+		return;
+	}
+
+	if (hose->io_resource.flags) {
+		res = &dev->resource[res_idx++];
+		res->start = hose->io_resource.start;
+		res->end = hose->io_resource.end;
+		res->flags = hose->io_resource.flags;
+	}
+
+	for (i = 0; i < 3; i++) {
+		res = &dev->resource[res_idx + i];
+		res->start = hose->mem_resources[i].start;
+		res->end = hose->mem_resources[i].end;
+		res->flags = hose->mem_resources[i].flags;
+	}
 }
 
-int __init add_bridge(struct device_node *dev)
+
+DECLARE_PCI_FIXUP_EARLY(0x1957, 0x7010, quirk_fsl_pcie_transparent);
+DECLARE_PCI_FIXUP_EARLY(0x1957, 0x7011, quirk_fsl_pcie_transparent);
+
+#define PCIE_LTSSM	0x404	/* PCIe Link Training and Status */
+#define PCIE_LTSSM_L0	0x16	/* L0 state */
+
+int __init mpc86xx_add_bridge(struct device_node *dev)
 {
 	int len;
 	struct pci_controller *hose;
@@ -156,6 +182,7 @@
 	const int *bus_range;
 	int has_address = 0;
 	int primary = 0;
+	u16 val;
 
 	DBG("Adding PCIE host bridge %s\n", dev->full_name);
 
@@ -168,17 +195,23 @@
 		printk(KERN_WARNING "Can't get bus-range for %s, assume"
 		       " bus 0\n", dev->full_name);
 
-	hose = pcibios_alloc_controller();
+	pci_assign_all_buses = 1;
+	hose = pcibios_alloc_controller(dev);
 	if (!hose)
 		return -ENOMEM;
-	hose->arch_data = dev;
-	hose->set_cfg_type = 1;
 
-	/* last_busno = 0xfe cause by MPC8641 PCIE bug */
+	hose->indirect_type = PPC_INDIRECT_TYPE_EXT_REG |
+				PPC_INDIRECT_TYPE_SURPRESS_PRIMARY_BUS;
+
 	hose->first_busno = bus_range ? bus_range[0] : 0x0;
-	hose->last_busno = bus_range ? bus_range[1] : 0xfe;
+	hose->last_busno = bus_range ? bus_range[1] : 0xff;
 
-	setup_indirect_pcie(hose, rsrc.start, rsrc.start + 0x4);
+	setup_indirect_pci(hose, rsrc.start, rsrc.start + 0x4);
+
+	/* Probe the hose link training status */
+	early_read_config_word(hose, 0, 0, PCIE_LTSSM, &val);
+	if (val < PCIE_LTSSM_L0)
+		return -ENXIO;
 
 	/* Setup the PCIE host controller. */
 	mpc86xx_setup_pcie(hose, rsrc.start, rsrc.end - rsrc.start + 1);
diff --git a/arch/powerpc/platforms/8xx/m8xx_setup.c b/arch/powerpc/platforms/8xx/m8xx_setup.c
index 0901dba..f169355 100644
--- a/arch/powerpc/platforms/8xx/m8xx_setup.c
+++ b/arch/powerpc/platforms/8xx/m8xx_setup.c
@@ -32,6 +32,7 @@
 #include <linux/root_dev.h>
 #include <linux/time.h>
 #include <linux/rtc.h>
+#include <linux/fsl_devices.h>
 
 #include <asm/mmu.h>
 #include <asm/reg.h>
@@ -49,6 +50,10 @@
 
 #include "sysdev/mpc8xx_pic.h"
 
+#ifdef CONFIG_PCMCIA_M8XX
+struct mpc8xx_pcmcia_ops m8xx_pcmcia_ops;
+#endif
+
 void m8xx_calibrate_decr(void);
 extern void m8xx_wdt_handler_install(bd_t *bp);
 extern int cpm_pic_init(void);
diff --git a/arch/powerpc/platforms/8xx/mpc885ads_setup.c b/arch/powerpc/platforms/8xx/mpc885ads_setup.c
index c36e475..5a808d6 100644
--- a/arch/powerpc/platforms/8xx/mpc885ads_setup.c
+++ b/arch/powerpc/platforms/8xx/mpc885ads_setup.c
@@ -22,6 +22,7 @@
 
 #include <linux/fs_enet_pd.h>
 #include <linux/fs_uart_pd.h>
+#include <linux/fsl_devices.h>
 #include <linux/mii.h>
 
 #include <asm/delay.h>
@@ -39,7 +40,7 @@
 #include <asm/prom.h>
 
 extern void cpm_reset(void);
-extern void mpc8xx_show_cpuinfo(struct seq_file*);
+extern void mpc8xx_show_cpuinfo(struct seq_file *);
 extern void mpc8xx_restart(char *cmd);
 extern void mpc8xx_calibrate_decr(void);
 extern int mpc8xx_set_rtc_time(struct rtc_time *tm);
@@ -47,9 +48,73 @@
 extern void m8xx_pic_init(void);
 extern unsigned int mpc8xx_get_irq(void);
 
-static void init_smc1_uart_ioports(struct fs_uart_platform_info* fpi);
-static void init_smc2_uart_ioports(struct fs_uart_platform_info* fpi);
-static void init_scc3_ioports(struct fs_platform_info* ptr);
+static void init_smc1_uart_ioports(struct fs_uart_platform_info *fpi);
+static void init_smc2_uart_ioports(struct fs_uart_platform_info *fpi);
+static void init_scc3_ioports(struct fs_platform_info *ptr);
+
+#ifdef CONFIG_PCMCIA_M8XX
+static void pcmcia_hw_setup(int slot, int enable)
+{
+	unsigned *bcsr_io;
+
+	bcsr_io = ioremap(BCSR1, sizeof(unsigned long));
+	if (enable)
+		clrbits32(bcsr_io, BCSR1_PCCEN);
+	else
+		setbits32(bcsr_io, BCSR1_PCCEN);
+
+	iounmap(bcsr_io);
+}
+
+static int pcmcia_set_voltage(int slot, int vcc, int vpp)
+{
+	u32 reg = 0;
+	unsigned *bcsr_io;
+
+	bcsr_io = ioremap(BCSR1, sizeof(unsigned long));
+
+	switch (vcc) {
+	case 0:
+		break;
+	case 33:
+		reg |= BCSR1_PCCVCC0;
+		break;
+	case 50:
+		reg |= BCSR1_PCCVCC1;
+		break;
+	default:
+		return 1;
+	}
+
+	switch (vpp) {
+	case 0:
+		break;
+	case 33:
+	case 50:
+		if (vcc == vpp)
+			reg |= BCSR1_PCCVPP1;
+		else
+			return 1;
+		break;
+	case 120:
+		if ((vcc == 33) || (vcc == 50))
+			reg |= BCSR1_PCCVPP0;
+		else
+			return 1;
+	default:
+		return 1;
+	}
+
+	/* first, turn off all power */
+	clrbits32(bcsr_io, 0x00610000);
+
+	/* enable new powersettings */
+	setbits32(bcsr_io, reg);
+
+	iounmap(bcsr_io);
+	return 0;
+}
+#endif
 
 void __init mpc885ads_board_setup(void)
 {
@@ -62,7 +127,7 @@
 #endif
 
 	bcsr_io = ioremap(BCSR1, sizeof(unsigned long));
-	cp = (cpm8xx_t *)immr_map(im_cpm);
+	cp = (cpm8xx_t *) immr_map(im_cpm);
 
 	if (bcsr_io == NULL) {
 		printk(KERN_CRIT "Could not remap BCSR\n");
@@ -75,13 +140,13 @@
 	out_8(&(cp->cp_smc[0].smc_smcm), tmpval8);
 	clrbits16(&cp->cp_smc[0].smc_smcmr, SMCMR_REN | SMCMR_TEN);	/* brg1 */
 #else
-	setbits32(bcsr_io,BCSR1_RS232EN_1);
+	setbits32(bcsr_io, BCSR1_RS232EN_1);
 	out_be16(&cp->cp_smc[0].smc_smcmr, 0);
 	out_8(&cp->cp_smc[0].smc_smce, 0);
 #endif
 
 #ifdef CONFIG_SERIAL_CPM_SMC2
-	clrbits32(bcsr_io,BCSR1_RS232EN_2);
+	clrbits32(bcsr_io, BCSR1_RS232EN_2);
 	clrbits32(&cp->cp_simode, 0xe0000000 >> 1);
 	setbits32(&cp->cp_simode, 0x20000000 >> 1);	/* brg2 */
 	tmpval8 = in_8(&(cp->cp_smc[1].smc_smcm)) | (SMCM_RX | SMCM_TX);
@@ -90,7 +155,7 @@
 
 	init_smc2_uart_ioports(0);
 #else
-	setbits32(bcsr_io,BCSR1_RS232EN_2);
+	setbits32(bcsr_io, BCSR1_RS232EN_2);
 	out_be16(&cp->cp_smc[1].smc_smcmr, 0);
 	out_8(&cp->cp_smc[1].smc_smce, 0);
 #endif
@@ -99,29 +164,34 @@
 
 #ifdef CONFIG_FS_ENET
 	/* use MDC for MII (common) */
-	io_port = (iop8xx_t*)immr_map(im_ioport);
+	io_port = (iop8xx_t *) immr_map(im_ioport);
 	setbits16(&io_port->iop_pdpar, 0x0080);
 	clrbits16(&io_port->iop_pddir, 0x0080);
 
 	bcsr_io = ioremap(BCSR5, sizeof(unsigned long));
-	clrbits32(bcsr_io,BCSR5_MII1_EN);
-	clrbits32(bcsr_io,BCSR5_MII1_RST);
+	clrbits32(bcsr_io, BCSR5_MII1_EN);
+	clrbits32(bcsr_io, BCSR5_MII1_RST);
 #ifndef CONFIG_FC_ENET_HAS_SCC
-	clrbits32(bcsr_io,BCSR5_MII2_EN);
-	clrbits32(bcsr_io,BCSR5_MII2_RST);
+	clrbits32(bcsr_io, BCSR5_MII2_EN);
+	clrbits32(bcsr_io, BCSR5_MII2_RST);
 
 #endif
 	iounmap(bcsr_io);
 	immr_unmap(io_port);
 
 #endif
+
+#ifdef CONFIG_PCMCIA_M8XX
+	/*Set up board specific hook-ups */
+	m8xx_pcmcia_ops.hw_ctrl = pcmcia_hw_setup;
+	m8xx_pcmcia_ops.voltage_set = pcmcia_set_voltage;
+#endif
 }
 
-
-static void init_fec1_ioports(struct fs_platform_info* ptr)
+static void init_fec1_ioports(struct fs_platform_info *ptr)
 {
-	cpm8xx_t *cp = (cpm8xx_t *)immr_map(im_cpm);
-	iop8xx_t *io_port = (iop8xx_t *)immr_map(im_ioport);
+	cpm8xx_t *cp = (cpm8xx_t *) immr_map(im_cpm);
+	iop8xx_t *io_port = (iop8xx_t *) immr_map(im_ioport);
 
 	/* configure FEC1 pins  */
 	setbits16(&io_port->iop_papar, 0xf830);
@@ -143,11 +213,10 @@
 	immr_unmap(cp);
 }
 
-
-static void init_fec2_ioports(struct fs_platform_info* ptr)
+static void init_fec2_ioports(struct fs_platform_info *ptr)
 {
-	cpm8xx_t *cp = (cpm8xx_t *)immr_map(im_cpm);
-	iop8xx_t *io_port = (iop8xx_t *)immr_map(im_ioport);
+	cpm8xx_t *cp = (cpm8xx_t *) immr_map(im_cpm);
+	iop8xx_t *io_port = (iop8xx_t *) immr_map(im_ioport);
 
 	/* configure FEC2 pins */
 	setbits32(&cp->cp_pepar, 0x0003fffc);
@@ -177,15 +246,15 @@
 	}
 }
 
-static void init_scc3_ioports(struct fs_platform_info* fpi)
+static void init_scc3_ioports(struct fs_platform_info *fpi)
 {
 	unsigned *bcsr_io;
 	iop8xx_t *io_port;
 	cpm8xx_t *cp;
 
 	bcsr_io = ioremap(BCSR_ADDR, BCSR_SIZE);
-	io_port = (iop8xx_t *)immr_map(im_ioport);
-	cp = (cpm8xx_t *)immr_map(im_cpm);
+	io_port = (iop8xx_t *) immr_map(im_ioport);
+	cp = (cpm8xx_t *) immr_map(im_cpm);
 
 	if (bcsr_io == NULL) {
 		printk(KERN_CRIT "Could not remap BCSR\n");
@@ -194,9 +263,9 @@
 
 	/* Enable the PHY.
 	 */
-	clrbits32(bcsr_io+4, BCSR4_ETH10_RST);
+	clrbits32(bcsr_io + 4, BCSR4_ETH10_RST);
 	udelay(1000);
-	setbits32(bcsr_io+4, BCSR4_ETH10_RST);
+	setbits32(bcsr_io + 4, BCSR4_ETH10_RST);
 	/* Configure port A pins for Txd and Rxd.
 	 */
 	setbits16(&io_port->iop_papar, PA_ENET_RXD | PA_ENET_TXD);
@@ -212,8 +281,7 @@
 	 */
 	setbits32(&cp->cp_pepar, PE_ENET_TCLK | PE_ENET_RCLK);
 	clrbits32(&cp->cp_pepar, PE_ENET_TENA);
-	clrbits32(&cp->cp_pedir,
-		  PE_ENET_TCLK | PE_ENET_RCLK | PE_ENET_TENA);
+	clrbits32(&cp->cp_pedir, PE_ENET_TCLK | PE_ENET_RCLK | PE_ENET_TENA);
 	clrbits32(&cp->cp_peso, PE_ENET_TCLK | PE_ENET_RCLK);
 	setbits32(&cp->cp_peso, PE_ENET_TENA);
 
@@ -237,7 +305,7 @@
 	clrbits32(&cp->cp_pedir, PE_ENET_TENA);
 	setbits32(&cp->cp_peso, PE_ENET_TENA);
 
-	setbits32(bcsr_io+4, BCSR1_ETHEN);
+	setbits32(bcsr_io + 4, BCSR1_ETHEN);
 	iounmap(bcsr_io);
 	immr_unmap(io_port);
 	immr_unmap(cp);
@@ -257,50 +325,48 @@
 	}
 }
 
-
-
-static void init_smc1_uart_ioports(struct fs_uart_platform_info* ptr)
+static void init_smc1_uart_ioports(struct fs_uart_platform_info *ptr)
 {
-        unsigned *bcsr_io;
+	unsigned *bcsr_io;
 	cpm8xx_t *cp;
 
-	cp = (cpm8xx_t *)immr_map(im_cpm);
+	cp = (cpm8xx_t *) immr_map(im_cpm);
 	setbits32(&cp->cp_pepar, 0x000000c0);
 	clrbits32(&cp->cp_pedir, 0x000000c0);
 	clrbits32(&cp->cp_peso, 0x00000040);
 	setbits32(&cp->cp_peso, 0x00000080);
 	immr_unmap(cp);
 
-        bcsr_io = ioremap(BCSR1, sizeof(unsigned long));
+	bcsr_io = ioremap(BCSR1, sizeof(unsigned long));
 
-        if (bcsr_io == NULL) {
-                printk(KERN_CRIT "Could not remap BCSR1\n");
-                return;
-        }
-        clrbits32(bcsr_io,BCSR1_RS232EN_1);
-        iounmap(bcsr_io);
+	if (bcsr_io == NULL) {
+		printk(KERN_CRIT "Could not remap BCSR1\n");
+		return;
+	}
+	clrbits32(bcsr_io, BCSR1_RS232EN_1);
+	iounmap(bcsr_io);
 }
 
-static void init_smc2_uart_ioports(struct fs_uart_platform_info* fpi)
+static void init_smc2_uart_ioports(struct fs_uart_platform_info *fpi)
 {
-        unsigned *bcsr_io;
+	unsigned *bcsr_io;
 	cpm8xx_t *cp;
 
-	cp = (cpm8xx_t *)immr_map(im_cpm);
+	cp = (cpm8xx_t *) immr_map(im_cpm);
 	setbits32(&cp->cp_pepar, 0x00000c00);
 	clrbits32(&cp->cp_pedir, 0x00000c00);
 	clrbits32(&cp->cp_peso, 0x00000400);
 	setbits32(&cp->cp_peso, 0x00000800);
 	immr_unmap(cp);
 
-        bcsr_io = ioremap(BCSR1, sizeof(unsigned long));
+	bcsr_io = ioremap(BCSR1, sizeof(unsigned long));
 
-        if (bcsr_io == NULL) {
-                printk(KERN_CRIT "Could not remap BCSR1\n");
-                return;
-        }
-        clrbits32(bcsr_io,BCSR1_RS232EN_2);
-        iounmap(bcsr_io);
+	if (bcsr_io == NULL) {
+		printk(KERN_CRIT "Could not remap BCSR1\n");
+		return;
+	}
+	clrbits32(bcsr_io, BCSR1_RS232EN_2);
+	iounmap(bcsr_io);
 }
 
 void init_smc_ioports(struct fs_uart_platform_info *data)
@@ -373,15 +439,11 @@
 	return 1;
 }
 
-define_machine(mpc885_ads) {
-	.name			= "MPC885 ADS",
-	.probe			= mpc885ads_probe,
-	.setup_arch		= mpc885ads_setup_arch,
-	.init_IRQ		= m8xx_pic_init,
-	.show_cpuinfo		= mpc8xx_show_cpuinfo,
-	.get_irq		= mpc8xx_get_irq,
-	.restart		= mpc8xx_restart,
-	.calibrate_decr		= mpc8xx_calibrate_decr,
-	.set_rtc_time		= mpc8xx_set_rtc_time,
-	.get_rtc_time		= mpc8xx_get_rtc_time,
-};
+define_machine(mpc885_ads)
+{
+.name = "MPC885 ADS",.probe = mpc885ads_probe,.setup_arch =
+	    mpc885ads_setup_arch,.init_IRQ =
+	    m8xx_pic_init,.show_cpuinfo = mpc8xx_show_cpuinfo,.get_irq =
+	    mpc8xx_get_irq,.restart = mpc8xx_restart,.calibrate_decr =
+	    mpc8xx_calibrate_decr,.set_rtc_time =
+	    mpc8xx_set_rtc_time,.get_rtc_time = mpc8xx_get_rtc_time,};
diff --git a/arch/powerpc/platforms/Kconfig b/arch/powerpc/platforms/Kconfig
index 361acfa..33545d3 100644
--- a/arch/powerpc/platforms/Kconfig
+++ b/arch/powerpc/platforms/Kconfig
@@ -2,7 +2,7 @@
 
 choice
 	prompt "Machine type"
-	depends on PPC64 || CLASSIC32
+	depends on PPC64 || 6xx
 	default PPC_MULTIPLATFORM
 
 config PPC_MULTIPLATFORM
@@ -16,15 +16,30 @@
 	bool "Embedded 6xx/7xx/7xxx-based board"
 	depends on PPC32 && (BROKEN||BROKEN_ON_SMP)
 
-config APUS
-	bool "Amiga-APUS"
-	depends on PPC32 && BROKEN
+config PPC_82xx
+	bool "Freescale 82xx"
+	depends on 6xx
+
+config PPC_83xx
+	bool "Freescale 83xx"
+	depends on 6xx
+	select FSL_SOC
+	select 83xx
+	select WANT_DEVICE_TREE
+
+config PPC_86xx
+	bool "Freescale 86xx"
+	depends on 6xx
+	select FSL_SOC
+	select ALTIVEC
 	help
-	  Select APUS if configuring for a PowerUP Amiga.
-	  More information is available at:
-	  <http://linux-apus.sourceforge.net/>.
+	  The Freescale E600 SoCs have 74xx cores.
 endchoice
 
+config CLASSIC32
+	def_bool y
+	depends on 6xx && PPC_MULTIPLATFORM
+
 source "arch/powerpc/platforms/pseries/Kconfig"
 source "arch/powerpc/platforms/iseries/Kconfig"
 source "arch/powerpc/platforms/chrp/Kconfig"
diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype
new file mode 100644
index 0000000..b8b5fde94
--- /dev/null
+++ b/arch/powerpc/platforms/Kconfig.cputype
@@ -0,0 +1,221 @@
+config PPC64
+	bool "64-bit kernel"
+	default n
+	help
+	  This option selects whether a 32-bit or a 64-bit kernel
+	  will be built.
+
+menu "Processor support"
+choice
+	prompt "Processor Type"
+	depends on PPC32
+	default 6xx
+	help
+	  There are five families of 32 bit PowerPC chips supported.
+	  The most common ones are the desktop and server CPUs (601, 603,
+	  604, 740, 750, 74xx) CPUs from Freescale and IBM, with their
+	  embedded 52xx/82xx/83xx/86xx counterparts.
+	  The other embeeded parts, namely 4xx, 8xx, e200 (55xx) and e500
+	  (85xx) each form a family of their own that is not compatible
+	  with the others.
+
+	  If unsure, select 52xx/6xx/7xx/74xx/82xx/83xx/86xx.
+
+config 6xx
+	bool "52xx/6xx/7xx/74xx/82xx/83xx/86xx"
+	select PPC_FPU
+
+config PPC_85xx
+	bool "Freescale 85xx"
+	select E500
+	select FSL_SOC
+	select 85xx
+	select WANT_DEVICE_TREE
+
+config PPC_8xx
+	bool "Freescale 8xx"
+	select FSL_SOC
+	select 8xx
+
+config 40x
+	bool "AMCC 40x"
+	select PPC_DCR_NATIVE
+
+config 44x
+	bool "AMCC 44x"
+	select PPC_DCR_NATIVE
+	select WANT_DEVICE_TREE
+
+config E200
+	bool "Freescale e200"
+
+endchoice
+
+config POWER4_ONLY
+	bool "Optimize for POWER4"
+	depends on PPC64
+	default n
+	---help---
+	  Cause the compiler to optimize for POWER4/POWER5/PPC970 processors.
+	  The resulting binary will not work on POWER3 or RS64 processors
+	  when compiled with binutils 2.15 or later.
+
+config POWER3
+	bool
+	depends on PPC64
+	default y if !POWER4_ONLY
+
+config POWER4
+	depends on PPC64
+	def_bool y
+
+config 6xx
+	bool
+
+# this is temp to handle compat with arch=ppc
+config 8xx
+	bool
+
+# this is temp to handle compat with arch=ppc
+config 83xx
+	bool
+
+# this is temp to handle compat with arch=ppc
+config 85xx
+	bool
+
+config E500
+	bool
+
+config PPC_FPU
+	bool
+	default y if PPC64
+
+config 4xx
+	bool
+	depends on 40x || 44x
+	default y
+
+config BOOKE
+	bool
+	depends on E200 || E500 || 44x
+	default y
+
+config FSL_BOOKE
+	bool
+	depends on E200 || E500
+	default y
+
+config PTE_64BIT
+	bool
+	depends on 44x || E500
+	default y if 44x
+	default y if E500 && PHYS_64BIT
+
+config PHYS_64BIT
+	bool 'Large physical address support' if E500
+	depends on 44x || E500
+	select RESOURCES_64BIT
+	default y if 44x
+	---help---
+	  This option enables kernel support for larger than 32-bit physical
+	  addresses.  This features is not be available on all e500 cores.
+
+	  If in doubt, say N here.
+
+config ALTIVEC
+	bool "AltiVec Support"
+	depends on CLASSIC32 || POWER4
+	---help---
+	  This option enables kernel support for the Altivec extensions to the
+	  PowerPC processor. The kernel currently supports saving and restoring
+	  altivec registers, and turning on the 'altivec enable' bit so user
+	  processes can execute altivec instructions.
+
+	  This option is only usefully if you have a processor that supports
+	  altivec (G4, otherwise known as 74xx series), but does not have
+	  any affect on a non-altivec cpu (it does, however add code to the
+	  kernel).
+
+	  If in doubt, say Y here.
+
+config SPE
+	bool "SPE Support"
+	depends on E200 || E500
+	default y
+	---help---
+	  This option enables kernel support for the Signal Processing
+	  Extensions (SPE) to the PowerPC processor. The kernel currently
+	  supports saving and restoring SPE registers, and turning on the
+	  'spe enable' bit so user processes can execute SPE instructions.
+
+	  This option is only useful if you have a processor that supports
+	  SPE (e500, otherwise known as 85xx series), but does not have any
+	  effect on a non-spe cpu (it does, however add code to the kernel).
+
+	  If in doubt, say Y here.
+
+config PPC_STD_MMU
+	bool
+	depends on 6xx || POWER3 || POWER4 || PPC64
+	default y
+
+config PPC_STD_MMU_32
+	def_bool y
+	depends on PPC_STD_MMU && PPC32
+
+config PPC_MM_SLICES
+	bool
+	default y if HUGETLB_PAGE
+	default n
+
+config VIRT_CPU_ACCOUNTING
+	bool "Deterministic task and CPU time accounting"
+	depends on PPC64
+	default y
+	help
+	  Select this option to enable more accurate task and CPU time
+	  accounting.  This is done by reading a CPU counter on each
+	  kernel entry and exit and on transitions within the kernel
+	  between system, softirq and hardirq state, so there is a
+	  small performance impact.  This also enables accounting of
+	  stolen time on logically-partitioned systems running on
+	  IBM POWER5-based machines.
+
+	  If in doubt, say Y here.
+
+config SMP
+	depends on PPC_STD_MMU
+	bool "Symmetric multi-processing support"
+	---help---
+	  This enables support for systems with more than one CPU. If you have
+	  a system with only one CPU, say N. If you have a system with more
+	  than one CPU, say Y.  Note that the kernel does not currently
+	  support SMP machines with 603/603e/603ev or PPC750 ("G3") processors
+	  since they have inadequate hardware support for multiprocessor
+	  operation.
+
+	  If you say N here, the kernel will run on single and multiprocessor
+	  machines, but will use only one CPU of a multiprocessor machine. If
+	  you say Y here, the kernel will run on single-processor machines.
+	  On a single-processor machine, the kernel will run faster if you say
+	  N here.
+
+	  If you don't know what to do here, say N.
+
+config NR_CPUS
+	int "Maximum number of CPUs (2-128)"
+	range 2 128
+	depends on SMP
+	default "32" if PPC64
+	default "4"
+
+config NOT_COHERENT_CACHE
+	bool
+	depends on 4xx || 8xx || E200
+	default y
+
+config CONFIG_CHECK_CACHE_COHERENCY
+	bool
+
+endmenu
diff --git a/arch/powerpc/platforms/apus/Kconfig b/arch/powerpc/platforms/apus/Kconfig
deleted file mode 100644
index 6bde3bf..0000000
--- a/arch/powerpc/platforms/apus/Kconfig
+++ /dev/null
@@ -1,130 +0,0 @@
-
-config AMIGA
-	bool
-	depends on APUS
-	default y
-	help
-	  This option enables support for the Amiga series of computers.
-
-config ZORRO
-	bool
-	depends on APUS
-	default y
-	help
-	  This enables support for the Zorro bus in the Amiga. If you have
-	  expansion cards in your Amiga that conform to the Amiga
-	  AutoConfig(tm) specification, say Y, otherwise N. Note that even
-	  expansion cards that do not fit in the Zorro slots but fit in e.g.
-	  the CPU slot may fall in this category, so you have to say Y to let
-	  Linux use these.
-
-config ABSTRACT_CONSOLE
-	bool
-	depends on APUS
-	default y
-
-config APUS_FAST_EXCEPT
-	bool
-	depends on APUS
-	default y
-
-config AMIGA_PCMCIA
-	bool "Amiga 1200/600 PCMCIA support"
-	depends on APUS && EXPERIMENTAL
-	help
-	  Include support in the kernel for pcmcia on Amiga 1200 and Amiga
-	  600. If you intend to use pcmcia cards say Y; otherwise say N.
-
-config AMIGA_BUILTIN_SERIAL
-	tristate "Amiga builtin serial support"
-	depends on APUS
-	help
-	  If you want to use your Amiga's built-in serial port in Linux,
-	  answer Y.
-
-	  To compile this driver as a module, choose M here.
-
-config GVPIOEXT
-	tristate "GVP IO-Extender support"
-	depends on APUS
-	help
-	  If you want to use a GVP IO-Extender serial card in Linux, say Y.
-	  Otherwise, say N.
-
-config GVPIOEXT_LP
-	tristate "GVP IO-Extender parallel printer support"
-	depends on GVPIOEXT
-	help
-	  Say Y to enable driving a printer from the parallel port on your
-	  GVP IO-Extender card, N otherwise.
-
-config GVPIOEXT_PLIP
-	tristate "GVP IO-Extender PLIP support"
-	depends on GVPIOEXT
-	help
-	  Say Y to enable doing IP over the parallel port on your GVP
-	  IO-Extender card, N otherwise.
-
-config MULTIFACE_III_TTY
-	tristate "Multiface Card III serial support"
-	depends on APUS
-	help
-	  If you want to use a Multiface III card's serial port in Linux,
-	  answer Y.
-
-	  To compile this driver as a module, choose M here.
-
-config A2232
-	tristate "Commodore A2232 serial support (EXPERIMENTAL)"
-	depends on EXPERIMENTAL && APUS
-	---help---
-	  This option supports the 2232 7-port serial card shipped with the
-	  Amiga 2000 and other Zorro-bus machines, dating from 1989.  At
-	  a max of 19,200 bps, the ports are served by a 6551 ACIA UART chip
-	  each, plus a 8520 CIA, and a master 6502 CPU and buffer as well. The
-	  ports were connected with 8 pin DIN connectors on the card bracket,
-	  for which 8 pin to DB25 adapters were supplied. The card also had
-	  jumpers internally to toggle various pinning configurations.
-
-	  This driver can be built as a module; but then "generic_serial"
-	  will also be built as a module. This has to be loaded before
-	  "ser_a2232". If you want to do this, answer M here.
-
-config WHIPPET_SERIAL
-	tristate "Hisoft Whippet PCMCIA serial support"
-	depends on AMIGA_PCMCIA
-	help
-	  HiSoft has a web page at <http://www.hisoft.co.uk/>, but there
-	  is no listing for the Whippet in their Amiga section.
-
-config APNE
-	tristate "PCMCIA NE2000 support"
-	depends on AMIGA_PCMCIA
-	help
-	  If you have a PCMCIA NE2000 compatible adapter, say Y.  Otherwise,
-	  say N.
-
-	  To compile this driver as a module, choose M here: the
-	  module will be called apne.
-
-config SERIAL_CONSOLE
-	bool "Support for serial port console"
-	depends on APUS && (AMIGA_BUILTIN_SERIAL=y || GVPIOEXT=y || MULTIFACE_III_TTY=y)
-
-config HEARTBEAT
-	bool "Use power LED as a heartbeat"
-	depends on APUS
-	help
-	  Use the power-on LED on your machine as a load meter.  The exact
-	  behavior is platform-dependent, but normally the flash frequency is
-	  a hyperbolic function of the 5-minute load average.
-
-config PROC_HARDWARE
-	bool "/proc/hardware support"
-	depends on APUS
-
-source "drivers/zorro/Kconfig"
-
-config PCI_PERMEDIA
-	bool "PCI for Permedia2"
-	depends on !4xx && !8xx && APUS
diff --git a/arch/powerpc/platforms/cell/io-workarounds.c b/arch/powerpc/platforms/cell/io-workarounds.c
index 7fb92f2..9d7c2ef 100644
--- a/arch/powerpc/platforms/cell/io-workarounds.c
+++ b/arch/powerpc/platforms/cell/io-workarounds.c
@@ -102,7 +102,7 @@
 		vaddr = (unsigned long)PCI_FIX_ADDR(addr);
 
 		/* Check if it's in allowed range for  PIO */
-		if (vaddr < PHBS_IO_BASE || vaddr >= IMALLOC_BASE)
+		if (vaddr < PHB_IO_BASE || vaddr > PHB_IO_END)
 			return;
 
 		/* Try to find a PTE. If not, clear the paddr, we'll do
diff --git a/arch/powerpc/platforms/cell/spu_base.c b/arch/powerpc/platforms/cell/spu_base.c
index a7f5a76..96a8f60 100644
--- a/arch/powerpc/platforms/cell/spu_base.c
+++ b/arch/powerpc/platforms/cell/spu_base.c
@@ -31,6 +31,7 @@
 #include <linux/mm.h>
 #include <linux/io.h>
 #include <linux/mutex.h>
+#include <linux/linux_logo.h>
 #include <asm/spu.h>
 #include <asm/spu_priv1.h>
 #include <asm/xmon.h>
@@ -183,7 +184,7 @@
 		spu->slb_replace = 0;
 
 	spu_restart_dma(spu);
-
+	spu->stats.slb_flt++;
 	return 0;
 }
 
@@ -332,6 +333,7 @@
 	if (stat & 0x10) /* SPU mailbox threshold */
 		spu->wbox_callback(spu);
 
+	spu->stats.class2_intr++;
 	return stat ? IRQ_HANDLED : IRQ_NONE;
 }
 
@@ -462,8 +464,18 @@
 }
 EXPORT_SYMBOL_GPL(spu_free);
 
+static int spu_shutdown(struct sys_device *sysdev)
+{
+	struct spu *spu = container_of(sysdev, struct spu, sysdev);
+
+	spu_free_irqs(spu);
+	spu_destroy_spu(spu);
+	return 0;
+}
+
 struct sysdev_class spu_sysdev_class = {
-	set_kset_name("spu")
+	set_kset_name("spu"),
+	.shutdown = spu_shutdown,
 };
 
 int spu_add_sysdev_attr(struct sysdev_attribute *attr)
@@ -574,6 +586,9 @@
 	spin_unlock_irqrestore(&spu_list_lock, flags);
 	mutex_unlock(&spu_mutex);
 
+	spu->stats.utilization_state = SPU_UTIL_IDLE;
+	spu->stats.tstamp = jiffies;
+
 	goto out;
 
 out_free_irqs:
@@ -586,6 +601,45 @@
 	return ret;
 }
 
+static const char *spu_state_names[] = {
+	"user", "system", "iowait", "idle"
+};
+
+static unsigned long long spu_acct_time(struct spu *spu,
+		enum spu_utilization_state state)
+{
+	unsigned long long time = spu->stats.times[state];
+
+	if (spu->stats.utilization_state == state)
+		time += jiffies - spu->stats.tstamp;
+
+	return jiffies_to_msecs(time);
+}
+
+
+static ssize_t spu_stat_show(struct sys_device *sysdev, char *buf)
+{
+	struct spu *spu = container_of(sysdev, struct spu, sysdev);
+
+	return sprintf(buf, "%s %llu %llu %llu %llu "
+		      "%llu %llu %llu %llu %llu %llu %llu %llu\n",
+		spu_state_names[spu->stats.utilization_state],
+		spu_acct_time(spu, SPU_UTIL_USER),
+		spu_acct_time(spu, SPU_UTIL_SYSTEM),
+		spu_acct_time(spu, SPU_UTIL_IOWAIT),
+		spu_acct_time(spu, SPU_UTIL_IDLE),
+		spu->stats.vol_ctx_switch,
+		spu->stats.invol_ctx_switch,
+		spu->stats.slb_flt,
+		spu->stats.hash_flt,
+		spu->stats.min_flt,
+		spu->stats.maj_flt,
+		spu->stats.class2_intr,
+		spu->stats.libassist);
+}
+
+static SYSDEV_ATTR(stat, 0644, spu_stat_show, NULL);
+
 static int __init init_spu_base(void)
 {
 	int i, ret = 0;
@@ -603,14 +657,28 @@
 
 	ret = spu_enumerate_spus(create_spu);
 
-	if (ret) {
+	if (ret < 0) {
 		printk(KERN_WARNING "%s: Error initializing spus\n",
 			__FUNCTION__);
 		goto out_unregister_sysdev_class;
 	}
 
+	if (ret > 0) {
+		/*
+		 * We cannot put the forward declaration in
+		 * <linux/linux_logo.h> because of conflicting session type
+		 * conflicts for const and __initdata with different compiler
+		 * versions
+		 */
+		extern const struct linux_logo logo_spe_clut224;
+
+		fb_append_extra_logo(&logo_spe_clut224, ret);
+	}
+
 	xmon_register_spus(&spu_full_list);
 
+	spu_add_sysdev_attr(&attr_stat);
+
 	return 0;
 
  out_unregister_sysdev_class:
diff --git a/arch/powerpc/platforms/cell/spu_manage.c b/arch/powerpc/platforms/cell/spu_manage.c
index 1d4562a..75ed50f 100644
--- a/arch/powerpc/platforms/cell/spu_manage.c
+++ b/arch/powerpc/platforms/cell/spu_manage.c
@@ -279,6 +279,7 @@
 {
 	int ret;
 	struct device_node *node;
+	unsigned int n = 0;
 
 	ret = -ENODEV;
 	for (node = of_find_node_by_type(NULL, "spe");
@@ -289,8 +290,9 @@
 				__FUNCTION__, node->name);
 			break;
 		}
+		n++;
 	}
-	return ret;
+	return ret ? ret : n;
 }
 
 static int __init of_create_spu(struct spu *spu, void *data)
diff --git a/arch/powerpc/platforms/cell/spufs/backing_ops.c b/arch/powerpc/platforms/cell/spufs/backing_ops.c
index d32db9f..07a0e81 100644
--- a/arch/powerpc/platforms/cell/spufs/backing_ops.c
+++ b/arch/powerpc/platforms/cell/spufs/backing_ops.c
@@ -320,6 +320,12 @@
 	/* FIXME: what are the side-effects of this? */
 	prob->dma_querymask_RW = mask;
 	prob->dma_querytype_RW = mode;
+	/* In the current implementation, the SPU context is always
+	 * acquired in runnable state when new bits are added to the
+	 * mask (tagwait), so it's sufficient just to mask
+	 * dma_tagstatus_R with the 'mask' parameter here.
+	 */
+	ctx->csa.prob.dma_tagstatus_R &= mask;
 out:
 	spin_unlock(&ctx->csa.register_lock);
 
diff --git a/arch/powerpc/platforms/cell/spufs/context.c b/arch/powerpc/platforms/cell/spufs/context.c
index 7c51cb5..6d7bd60 100644
--- a/arch/powerpc/platforms/cell/spufs/context.c
+++ b/arch/powerpc/platforms/cell/spufs/context.c
@@ -23,10 +23,14 @@
 #include <linux/fs.h>
 #include <linux/mm.h>
 #include <linux/slab.h>
+#include <asm/atomic.h>
 #include <asm/spu.h>
 #include <asm/spu_csa.h>
 #include "spufs.h"
 
+
+atomic_t nr_spu_contexts = ATOMIC_INIT(0);
+
 struct spu_context *alloc_spu_context(struct spu_gang *gang)
 {
 	struct spu_context *ctx;
@@ -53,10 +57,12 @@
 	INIT_LIST_HEAD(&ctx->rq);
 	if (gang)
 		spu_gang_add_ctx(gang, ctx);
-	ctx->rt_priority = current->rt_priority;
-	ctx->policy = current->policy;
-	ctx->prio = current->prio;
-	INIT_DELAYED_WORK(&ctx->sched_work, spu_sched_tick);
+	ctx->cpus_allowed = current->cpus_allowed;
+	spu_set_timeslice(ctx);
+	ctx->stats.execution_state = SPUCTX_UTIL_USER;
+	ctx->stats.tstamp = jiffies;
+
+	atomic_inc(&nr_spu_contexts);
 	goto out;
 out_free:
 	kfree(ctx);
@@ -76,6 +82,7 @@
 	if (ctx->gang)
 		spu_gang_remove_ctx(ctx->gang, ctx);
 	BUG_ON(!list_empty(&ctx->rq));
+	atomic_dec(&nr_spu_contexts);
 	kfree(ctx);
 }
 
diff --git a/arch/powerpc/platforms/cell/spufs/fault.c b/arch/powerpc/platforms/cell/spufs/fault.c
index 0f75c07..07f88de 100644
--- a/arch/powerpc/platforms/cell/spufs/fault.c
+++ b/arch/powerpc/platforms/cell/spufs/fault.c
@@ -33,7 +33,8 @@
  * function. Currently, there are a few corner cases that we haven't had
  * to handle fortunately.
  */
-static int spu_handle_mm_fault(struct mm_struct *mm, unsigned long ea, unsigned long dsisr)
+static int spu_handle_mm_fault(struct mm_struct *mm, unsigned long ea,
+		unsigned long dsisr, unsigned *flt)
 {
 	struct vm_area_struct *vma;
 	unsigned long is_write;
@@ -73,22 +74,21 @@
 			goto bad_area;
 	}
 	ret = 0;
-	switch (handle_mm_fault(mm, vma, ea, is_write)) {
-	case VM_FAULT_MINOR:
-		current->min_flt++;
-		break;
-	case VM_FAULT_MAJOR:
-		current->maj_flt++;
-		break;
-	case VM_FAULT_SIGBUS:
-		ret = -EFAULT;
-		goto bad_area;
-	case VM_FAULT_OOM:
-		ret = -ENOMEM;
-		goto bad_area;
-	default:
+	fault = handle_mm_fault(mm, vma, ea, is_write);
+	if (unlikely(fault & VM_FAULT_ERROR)) {
+		if (fault & VM_FAULT_OOM) {
+			ret = -ENOMEM;
+			goto bad_area;
+		} else if (fault & VM_FAULT_SIGBUS) {
+			ret = -EFAULT;
+			goto bad_area;
+		}
 		BUG();
 	}
+	if (fault & VM_FAULT_MAJOR)
+		current->maj_flt++;
+	else
+		current->min_flt++;
 	up_read(&mm->mmap_sem);
 	return ret;
 
@@ -153,6 +153,7 @@
 {
 	u64 ea, dsisr, access;
 	unsigned long flags;
+	unsigned flt = 0;
 	int ret;
 
 	/*
@@ -178,9 +179,17 @@
 	if (!(dsisr & (MFC_DSISR_PTE_NOT_FOUND | MFC_DSISR_ACCESS_DENIED)))
 		return 0;
 
+	spuctx_switch_state(ctx, SPUCTX_UTIL_IOWAIT);
+
 	pr_debug("ctx %p: ea %016lx, dsisr %016lx state %d\n", ctx, ea,
 		dsisr, ctx->state);
 
+	ctx->stats.hash_flt++;
+	if (ctx->state == SPU_STATE_RUNNABLE) {
+		ctx->spu->stats.hash_flt++;
+		spu_switch_state(ctx->spu, SPU_UTIL_IOWAIT);
+	}
+
 	/* we must not hold the lock when entering spu_handle_mm_fault */
 	spu_release(ctx);
 
@@ -192,7 +201,7 @@
 
 	/* hashing failed, so try the actual fault handler */
 	if (ret)
-		ret = spu_handle_mm_fault(current->mm, ea, dsisr);
+		ret = spu_handle_mm_fault(current->mm, ea, dsisr, &flt);
 
 	spu_acquire(ctx);
 	/*
@@ -201,11 +210,23 @@
 	 * In case of unhandled error report the problem to user space.
 	 */
 	if (!ret) {
+		if (flt == VM_FAULT_MINOR)
+			ctx->stats.min_flt++;
+		else
+			ctx->stats.maj_flt++;
+		if (ctx->state == SPU_STATE_RUNNABLE) {
+			if (flt == VM_FAULT_MINOR)
+				ctx->spu->stats.min_flt++;
+			else
+				ctx->spu->stats.maj_flt++;
+		}
+
 		if (ctx->spu)
 			ctx->ops->restart_dma(ctx);
 	} else
 		spufs_handle_dma_error(ctx, ea, SPE_EVENT_SPE_DATA_STORAGE);
 
+	spuctx_switch_state(ctx, SPUCTX_UTIL_SYSTEM);
 	return ret;
 }
 EXPORT_SYMBOL_GPL(spufs_handle_class1);
diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c
index b1e7e2f..c2814ea 100644
--- a/arch/powerpc/platforms/cell/spufs/file.c
+++ b/arch/powerpc/platforms/cell/spufs/file.c
@@ -28,6 +28,7 @@
 #include <linux/pagemap.h>
 #include <linux/poll.h>
 #include <linux/ptrace.h>
+#include <linux/seq_file.h>
 
 #include <asm/io.h>
 #include <asm/semaphore.h>
@@ -39,6 +40,7 @@
 
 #define SPUFS_MMAP_4K (PAGE_SIZE == 0x1000)
 
+
 static int
 spufs_mem_open(struct inode *inode, struct file *file)
 {
@@ -216,12 +218,12 @@
 #endif /* CONFIG_SPU_FS_64K_LS */
 
 static const struct file_operations spufs_mem_fops = {
-	.open	 		= spufs_mem_open,
-	.release 		= spufs_mem_release,
-	.read   		= spufs_mem_read,
-	.write   		= spufs_mem_write,
-	.llseek  		= generic_file_llseek,
-	.mmap    		= spufs_mem_mmap,
+	.open			= spufs_mem_open,
+	.release		= spufs_mem_release,
+	.read			= spufs_mem_read,
+	.write			= spufs_mem_write,
+	.llseek			= generic_file_llseek,
+	.mmap			= spufs_mem_mmap,
 #ifdef CONFIG_SPU_FS_64K_LS
 	.get_unmapped_area	= spufs_get_unmapped_area,
 #endif
@@ -1497,14 +1499,15 @@
 		if (status)
 			ret = status;
 	}
-	spu_release(ctx);
 
 	if (ret)
-		goto out;
+		goto out_unlock;
 
 	ctx->tagwait |= 1 << cmd.tag;
 	ret = size;
 
+out_unlock:
+	spu_release(ctx);
 out:
 	return ret;
 }
@@ -1515,14 +1518,14 @@
 	u32 free_elements, tagstatus;
 	unsigned int mask;
 
+	poll_wait(file, &ctx->mfc_wq, wait);
+
 	spu_acquire(ctx);
 	ctx->ops->set_mfc_query(ctx, ctx->tagwait, 2);
 	free_elements = ctx->ops->get_mfc_free_elements(ctx);
 	tagstatus = ctx->ops->read_mfc_tagstatus(ctx);
 	spu_release(ctx);
 
-	poll_wait(file, &ctx->mfc_wq, wait);
-
 	mask = 0;
 	if (free_elements & 0xffff)
 		mask |= POLLOUT | POLLWRNORM;
@@ -1797,6 +1800,29 @@
 	return 0;
 }
 
+static int spufs_caps_show(struct seq_file *s, void *private)
+{
+	struct spu_context *ctx = s->private;
+
+	if (!(ctx->flags & SPU_CREATE_NOSCHED))
+		seq_puts(s, "sched\n");
+	if (!(ctx->flags & SPU_CREATE_ISOLATE))
+		seq_puts(s, "step\n");
+	return 0;
+}
+
+static int spufs_caps_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, spufs_caps_show, SPUFS_I(inode)->i_ctx);
+}
+
+static const struct file_operations spufs_caps_fops = {
+	.open		= spufs_caps_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+
 static ssize_t __spufs_mbox_info_read(struct spu_context *ctx,
 			char __user *buf, size_t len, loff_t *pos)
 {
@@ -2014,7 +2040,105 @@
 	.read = spufs_proxydma_info_read,
 };
 
+static int spufs_show_tid(struct seq_file *s, void *private)
+{
+	struct spu_context *ctx = s->private;
+
+	seq_printf(s, "%d\n", ctx->tid);
+	return 0;
+}
+
+static int spufs_tid_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, spufs_show_tid, SPUFS_I(inode)->i_ctx);
+}
+
+static const struct file_operations spufs_tid_fops = {
+	.open		= spufs_tid_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+
+static const char *ctx_state_names[] = {
+	"user", "system", "iowait", "loaded"
+};
+
+static unsigned long long spufs_acct_time(struct spu_context *ctx,
+		enum spuctx_execution_state state)
+{
+	unsigned long time = ctx->stats.times[state];
+
+	if (ctx->stats.execution_state == state)
+		time += jiffies - ctx->stats.tstamp;
+
+	return jiffies_to_msecs(time);
+}
+
+static unsigned long long spufs_slb_flts(struct spu_context *ctx)
+{
+	unsigned long long slb_flts = ctx->stats.slb_flt;
+
+	if (ctx->state == SPU_STATE_RUNNABLE) {
+		slb_flts += (ctx->spu->stats.slb_flt -
+			     ctx->stats.slb_flt_base);
+	}
+
+	return slb_flts;
+}
+
+static unsigned long long spufs_class2_intrs(struct spu_context *ctx)
+{
+	unsigned long long class2_intrs = ctx->stats.class2_intr;
+
+	if (ctx->state == SPU_STATE_RUNNABLE) {
+		class2_intrs += (ctx->spu->stats.class2_intr -
+				 ctx->stats.class2_intr_base);
+	}
+
+	return class2_intrs;
+}
+
+
+static int spufs_show_stat(struct seq_file *s, void *private)
+{
+	struct spu_context *ctx = s->private;
+
+	spu_acquire(ctx);
+	seq_printf(s, "%s %llu %llu %llu %llu "
+		      "%llu %llu %llu %llu %llu %llu %llu %llu\n",
+		ctx_state_names[ctx->stats.execution_state],
+		spufs_acct_time(ctx, SPUCTX_UTIL_USER),
+		spufs_acct_time(ctx, SPUCTX_UTIL_SYSTEM),
+		spufs_acct_time(ctx, SPUCTX_UTIL_IOWAIT),
+		spufs_acct_time(ctx, SPUCTX_UTIL_LOADED),
+		ctx->stats.vol_ctx_switch,
+		ctx->stats.invol_ctx_switch,
+		spufs_slb_flts(ctx),
+		ctx->stats.hash_flt,
+		ctx->stats.min_flt,
+		ctx->stats.maj_flt,
+		spufs_class2_intrs(ctx),
+		ctx->stats.libassist);
+	spu_release(ctx);
+	return 0;
+}
+
+static int spufs_stat_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, spufs_show_stat, SPUFS_I(inode)->i_ctx);
+}
+
+static const struct file_operations spufs_stat_fops = {
+	.open		= spufs_stat_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+
+
 struct tree_descr spufs_dir_contents[] = {
+	{ "capabilities", &spufs_caps_fops, 0444, },
 	{ "mem",  &spufs_mem_fops,  0666, },
 	{ "regs", &spufs_regs_fops,  0666, },
 	{ "mbox", &spufs_mbox_fops, 0444, },
@@ -2046,10 +2170,13 @@
 	{ "wbox_info", &spufs_wbox_info_fops, 0444, },
 	{ "dma_info", &spufs_dma_info_fops, 0444, },
 	{ "proxydma_info", &spufs_proxydma_info_fops, 0444, },
+	{ "tid", &spufs_tid_fops, 0444, },
+	{ "stat", &spufs_stat_fops, 0444, },
 	{},
 };
 
 struct tree_descr spufs_dir_nosched_contents[] = {
+	{ "capabilities", &spufs_caps_fops, 0444, },
 	{ "mem",  &spufs_mem_fops,  0666, },
 	{ "mbox", &spufs_mbox_fops, 0444, },
 	{ "ibox", &spufs_ibox_fops, 0444, },
@@ -2068,6 +2195,8 @@
 	{ "psmap", &spufs_psmap_fops, 0666, },
 	{ "phys-id", &spufs_id_ops, 0666, },
 	{ "object-id", &spufs_object_id_ops, 0666, },
+	{ "tid", &spufs_tid_fops, 0444, },
+	{ "stat", &spufs_stat_fops, 0444, },
 	{},
 };
 
diff --git a/arch/powerpc/platforms/cell/spufs/inode.c b/arch/powerpc/platforms/cell/spufs/inode.c
index 9807206..f37460e 100644
--- a/arch/powerpc/platforms/cell/spufs/inode.c
+++ b/arch/powerpc/platforms/cell/spufs/inode.c
@@ -232,10 +232,6 @@
 	return dcache_dir_close(inode, file);
 }
 
-const struct inode_operations spufs_dir_inode_operations = {
-	.lookup = simple_lookup,
-};
-
 const struct file_operations spufs_context_fops = {
 	.open		= dcache_dir_open,
 	.release	= spufs_dir_close,
@@ -269,7 +265,7 @@
 		goto out_iput;
 
 	ctx->flags = flags;
-	inode->i_op = &spufs_dir_inode_operations;
+	inode->i_op = &simple_dir_inode_operations;
 	inode->i_fop = &simple_dir_operations;
 	if (flags & SPU_CREATE_NOSCHED)
 		ret = spufs_fill_dir(dentry, spufs_dir_nosched_contents,
@@ -386,7 +382,7 @@
 	if (!gang)
 		goto out_iput;
 
-	inode->i_op = &spufs_dir_inode_operations;
+	inode->i_op = &simple_dir_inode_operations;
 	inode->i_fop = &simple_dir_operations;
 
 	d_instantiate(dentry, inode);
@@ -593,7 +589,7 @@
 	if (!inode)
 		goto out;
 
-	inode->i_op = &spufs_dir_inode_operations;
+	inode->i_op = &simple_dir_inode_operations;
 	inode->i_fop = &simple_dir_operations;
 	SPUFS_I(inode)->i_ctx = NULL;
 
diff --git a/arch/powerpc/platforms/cell/spufs/run.c b/arch/powerpc/platforms/cell/spufs/run.c
index 5762660..58ae13b 100644
--- a/arch/powerpc/platforms/cell/spufs/run.c
+++ b/arch/powerpc/platforms/cell/spufs/run.c
@@ -29,7 +29,8 @@
 	spu = ctx->spu;
 	pte_fault = spu->dsisr &
 	    (MFC_DSISR_PTE_NOT_FOUND | MFC_DSISR_ACCESS_DENIED);
-	return (!(*stat & 0x1) || pte_fault || spu->class_0_pending) ? 1 : 0;
+	return (!(*stat & SPU_STATUS_RUNNING) || pte_fault || spu->class_0_pending) ?
+		1 : 0;
 }
 
 static int spu_setup_isolated(struct spu_context *ctx)
@@ -142,8 +143,11 @@
 			runcntl = SPU_RUNCNTL_RUNNABLE;
 		ctx->ops->runcntl_write(ctx, runcntl);
 	} else {
-		spu_start_tick(ctx);
+		unsigned long mode = SPU_PRIVCNTL_MODE_NORMAL;
 		ctx->ops->npc_write(ctx, *npc);
+		if (test_thread_flag(TIF_SINGLESTEP))
+			mode = SPU_PRIVCNTL_MODE_SINGLE_STEP;
+		out_be64(&ctx->spu->priv2->spu_privcntl_RW, mode);
 		ctx->ops->runcntl_write(ctx, SPU_RUNCNTL_RUNNABLE);
 	}
 
@@ -155,7 +159,6 @@
 {
 	int ret = 0;
 
-	spu_stop_tick(ctx);
 	*status = ctx->ops->status_read(ctx);
 	*npc = ctx->ops->npc_read(ctx);
 	spu_release(ctx);
@@ -298,9 +301,22 @@
 	ctx->ops->master_start(ctx);
 	ctx->event_return = 0;
 
-	ret = spu_acquire_runnable(ctx, 0);
-	if (ret)
-		return ret;
+	spu_acquire(ctx);
+	if (ctx->state == SPU_STATE_SAVED) {
+		__spu_update_sched_info(ctx);
+
+		ret = spu_activate(ctx, 0);
+		if (ret) {
+			spu_release(ctx);
+			goto out;
+		}
+	} else {
+		/*
+		 * We have to update the scheduling priority under active_mutex
+		 * to protect against find_victim().
+		 */
+		spu_update_sched_info(ctx);
+	}
 
 	ret = spu_run_init(ctx, npc);
 	if (ret) {
@@ -325,16 +341,20 @@
 
 		if (unlikely(ctx->state != SPU_STATE_RUNNABLE)) {
 			ret = spu_reacquire_runnable(ctx, npc, &status);
-			if (ret) {
-				spu_stop_tick(ctx);
+			if (ret)
 				goto out2;
-			}
 			continue;
 		}
 		ret = spu_process_events(ctx);
 
 	} while (!ret && !(status & (SPU_STATUS_STOPPED_BY_STOP |
-				      SPU_STATUS_STOPPED_BY_HALT)));
+				      SPU_STATUS_STOPPED_BY_HALT |
+				       SPU_STATUS_SINGLE_STEP)));
+
+	if ((status & SPU_STATUS_STOPPED_BY_STOP) &&
+	    (((status >> SPU_STOP_STATUS_SHIFT) & 0x3f00) == 0x2100) &&
+	    (ctx->state == SPU_STATE_RUNNABLE))
+		ctx->stats.libassist++;
 
 	ctx->ops->master_stop(ctx);
 	ret = spu_run_fini(ctx, npc, &status);
@@ -344,10 +364,15 @@
 	if ((ret == 0) ||
 	    ((ret == -ERESTARTSYS) &&
 	     ((status & SPU_STATUS_STOPPED_BY_HALT) ||
+	      (status & SPU_STATUS_SINGLE_STEP) ||
 	      ((status & SPU_STATUS_STOPPED_BY_STOP) &&
 	       (status >> SPU_STOP_STATUS_SHIFT != 0x2104)))))
 		ret = status;
 
+	/* Note: we don't need to force_sig SIGTRAP on single-step
+	 * since we have TIF_SINGLESTEP set, thus the kernel will do
+	 * it upon return from the syscall anyawy
+	 */
 	if ((status & SPU_STATUS_STOPPED_BY_STOP)
 	    && (status >> SPU_STOP_STATUS_SHIFT) == 0x3fff) {
 		force_sig(SIGTRAP, current);
diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c
index 3b831e0..e5b4dd1 100644
--- a/arch/powerpc/platforms/cell/spufs/sched.c
+++ b/arch/powerpc/platforms/cell/spufs/sched.c
@@ -35,6 +35,10 @@
 #include <linux/numa.h>
 #include <linux/mutex.h>
 #include <linux/notifier.h>
+#include <linux/kthread.h>
+#include <linux/pid_namespace.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
 
 #include <asm/io.h>
 #include <asm/mmu_context.h>
@@ -43,54 +47,126 @@
 #include <asm/spu_priv1.h>
 #include "spufs.h"
 
-#define SPU_TIMESLICE	(HZ)
-
 struct spu_prio_array {
 	DECLARE_BITMAP(bitmap, MAX_PRIO);
 	struct list_head runq[MAX_PRIO];
 	spinlock_t runq_lock;
 	struct list_head active_list[MAX_NUMNODES];
 	struct mutex active_mutex[MAX_NUMNODES];
+	int nr_active[MAX_NUMNODES];
+	int nr_waiting;
 };
 
+static unsigned long spu_avenrun[3];
 static struct spu_prio_array *spu_prio;
-static struct workqueue_struct *spu_sched_wq;
+static struct task_struct *spusched_task;
+static struct timer_list spusched_timer;
 
-static inline int node_allowed(int node)
+/*
+ * Priority of a normal, non-rt, non-niced'd process (aka nice level 0).
+ */
+#define NORMAL_PRIO		120
+
+/*
+ * Frequency of the spu scheduler tick.  By default we do one SPU scheduler
+ * tick for every 10 CPU scheduler ticks.
+ */
+#define SPUSCHED_TICK		(10)
+
+/*
+ * These are the 'tuning knobs' of the scheduler:
+ *
+ * Minimum timeslice is 5 msecs (or 1 spu scheduler tick, whichever is
+ * larger), default timeslice is 100 msecs, maximum timeslice is 800 msecs.
+ */
+#define MIN_SPU_TIMESLICE	max(5 * HZ / (1000 * SPUSCHED_TICK), 1)
+#define DEF_SPU_TIMESLICE	(100 * HZ / (1000 * SPUSCHED_TICK))
+
+#define MAX_USER_PRIO		(MAX_PRIO - MAX_RT_PRIO)
+#define SCALE_PRIO(x, prio) \
+	max(x * (MAX_PRIO - prio) / (MAX_USER_PRIO / 2), MIN_SPU_TIMESLICE)
+
+/*
+ * scale user-nice values [ -20 ... 0 ... 19 ] to time slice values:
+ * [800ms ... 100ms ... 5ms]
+ *
+ * The higher a thread's priority, the bigger timeslices
+ * it gets during one round of execution. But even the lowest
+ * priority thread gets MIN_TIMESLICE worth of execution time.
+ */
+void spu_set_timeslice(struct spu_context *ctx)
 {
-	cpumask_t mask;
-
-	if (!nr_cpus_node(node))
-		return 0;
-	mask = node_to_cpumask(node);
-	if (!cpus_intersects(mask, current->cpus_allowed))
-		return 0;
-	return 1;
+	if (ctx->prio < NORMAL_PRIO)
+		ctx->time_slice = SCALE_PRIO(DEF_SPU_TIMESLICE * 4, ctx->prio);
+	else
+		ctx->time_slice = SCALE_PRIO(DEF_SPU_TIMESLICE, ctx->prio);
 }
 
-void spu_start_tick(struct spu_context *ctx)
+/*
+ * Update scheduling information from the owning thread.
+ */
+void __spu_update_sched_info(struct spu_context *ctx)
 {
-	if (ctx->policy == SCHED_RR) {
-		/*
-		 * Make sure the exiting bit is cleared.
-		 */
-		clear_bit(SPU_SCHED_EXITING, &ctx->sched_flags);
-		mb();
-		queue_delayed_work(spu_sched_wq, &ctx->sched_work, SPU_TIMESLICE);
-	}
+	/*
+	 * 32-Bit assignment are atomic on powerpc, and we don't care about
+	 * memory ordering here because retriving the controlling thread is
+	 * per defintion racy.
+	 */
+	ctx->tid = current->pid;
+
+	/*
+	 * We do our own priority calculations, so we normally want
+	 * ->static_prio to start with. Unfortunately thies field
+	 * contains junk for threads with a realtime scheduling
+	 * policy so we have to look at ->prio in this case.
+	 */
+	if (rt_prio(current->prio))
+		ctx->prio = current->prio;
+	else
+		ctx->prio = current->static_prio;
+	ctx->policy = current->policy;
+
+	/*
+	 * A lot of places that don't hold active_mutex poke into
+	 * cpus_allowed, including grab_runnable_context which
+	 * already holds the runq_lock.  So abuse runq_lock
+	 * to protect this field aswell.
+	 */
+	spin_lock(&spu_prio->runq_lock);
+	ctx->cpus_allowed = current->cpus_allowed;
+	spin_unlock(&spu_prio->runq_lock);
 }
 
-void spu_stop_tick(struct spu_context *ctx)
+void spu_update_sched_info(struct spu_context *ctx)
 {
-	if (ctx->policy == SCHED_RR) {
-		/*
-		 * While the work can be rearming normally setting this flag
-		 * makes sure it does not rearm itself anymore.
-		 */
-		set_bit(SPU_SCHED_EXITING, &ctx->sched_flags);
-		mb();
-		cancel_delayed_work(&ctx->sched_work);
+	int node = ctx->spu->node;
+
+	mutex_lock(&spu_prio->active_mutex[node]);
+	__spu_update_sched_info(ctx);
+	mutex_unlock(&spu_prio->active_mutex[node]);
+}
+
+static int __node_allowed(struct spu_context *ctx, int node)
+{
+	if (nr_cpus_node(node)) {
+		cpumask_t mask = node_to_cpumask(node);
+
+		if (cpus_intersects(mask, ctx->cpus_allowed))
+			return 1;
 	}
+
+	return 0;
+}
+
+static int node_allowed(struct spu_context *ctx, int node)
+{
+	int rval;
+
+	spin_lock(&spu_prio->runq_lock);
+	rval = __node_allowed(ctx, node);
+	spin_unlock(&spu_prio->runq_lock);
+
+	return rval;
 }
 
 /**
@@ -99,9 +175,18 @@
  */
 static void spu_add_to_active_list(struct spu *spu)
 {
-	mutex_lock(&spu_prio->active_mutex[spu->node]);
-	list_add_tail(&spu->list, &spu_prio->active_list[spu->node]);
-	mutex_unlock(&spu_prio->active_mutex[spu->node]);
+	int node = spu->node;
+
+	mutex_lock(&spu_prio->active_mutex[node]);
+	spu_prio->nr_active[node]++;
+	list_add_tail(&spu->list, &spu_prio->active_list[node]);
+	mutex_unlock(&spu_prio->active_mutex[node]);
+}
+
+static void __spu_remove_from_active_list(struct spu *spu)
+{
+	list_del_init(&spu->list);
+	spu_prio->nr_active[spu->node]--;
 }
 
 /**
@@ -113,7 +198,7 @@
 	int node = spu->node;
 
 	mutex_lock(&spu_prio->active_mutex[node]);
-	list_del_init(&spu->list);
+	__spu_remove_from_active_list(spu);
 	mutex_unlock(&spu_prio->active_mutex[node]);
 }
 
@@ -144,6 +229,10 @@
 {
 	pr_debug("%s: pid=%d SPU=%d NODE=%d\n", __FUNCTION__, current->pid,
 		 spu->number, spu->node);
+
+	ctx->stats.slb_flt_base = spu->stats.slb_flt;
+	ctx->stats.class2_intr_base = spu->stats.class2_intr;
+
 	spu->ctx = ctx;
 	spu->flags = 0;
 	ctx->spu = spu;
@@ -161,8 +250,8 @@
 	spu->timestamp = jiffies;
 	spu_cpu_affinity_set(spu, raw_smp_processor_id());
 	spu_switch_notify(spu, ctx);
-	spu_add_to_active_list(spu);
 	ctx->state = SPU_STATE_RUNNABLE;
+	spu_switch_state(spu, SPU_UTIL_SYSTEM);
 }
 
 /**
@@ -175,7 +264,8 @@
 	pr_debug("%s: unbind pid=%d SPU=%d NODE=%d\n", __FUNCTION__,
 		 spu->pid, spu->number, spu->node);
 
-	spu_remove_from_active_list(spu);
+	spu_switch_state(spu, SPU_UTIL_IDLE);
+
 	spu_switch_notify(spu, NULL);
 	spu_unmap_mappings(ctx);
 	spu_save(&ctx->csa, spu);
@@ -192,6 +282,11 @@
 	ctx->spu = NULL;
 	spu->flags = 0;
 	spu->ctx = NULL;
+
+	ctx->stats.slb_flt +=
+		(spu->stats.slb_flt - ctx->stats.slb_flt_base);
+	ctx->stats.class2_intr +=
+		(spu->stats.class2_intr - ctx->stats.class2_intr_base);
 }
 
 /**
@@ -200,20 +295,39 @@
  */
 static void __spu_add_to_rq(struct spu_context *ctx)
 {
-	int prio = ctx->prio;
-
-	list_add_tail(&ctx->rq, &spu_prio->runq[prio]);
-	set_bit(prio, spu_prio->bitmap);
+	/*
+	 * Unfortunately this code path can be called from multiple threads
+	 * on behalf of a single context due to the way the problem state
+	 * mmap support works.
+	 *
+	 * Fortunately we need to wake up all these threads at the same time
+	 * and can simply skip the runqueue addition for every but the first
+	 * thread getting into this codepath.
+	 *
+	 * It's still quite hacky, and long-term we should proxy all other
+	 * threads through the owner thread so that spu_run is in control
+	 * of all the scheduling activity for a given context.
+	 */
+	if (list_empty(&ctx->rq)) {
+		list_add_tail(&ctx->rq, &spu_prio->runq[ctx->prio]);
+		set_bit(ctx->prio, spu_prio->bitmap);
+		if (!spu_prio->nr_waiting++)
+			__mod_timer(&spusched_timer, jiffies + SPUSCHED_TICK);
+	}
 }
 
 static void __spu_del_from_rq(struct spu_context *ctx)
 {
 	int prio = ctx->prio;
 
-	if (!list_empty(&ctx->rq))
+	if (!list_empty(&ctx->rq)) {
+		if (!--spu_prio->nr_waiting)
+			del_timer(&spusched_timer);
 		list_del_init(&ctx->rq);
-	if (list_empty(&spu_prio->runq[prio]))
-		clear_bit(prio, spu_prio->bitmap);
+
+		if (list_empty(&spu_prio->runq[prio]))
+			clear_bit(prio, spu_prio->bitmap);
+	}
 }
 
 static void spu_prio_wait(struct spu_context *ctx)
@@ -244,7 +358,7 @@
 
 	for (n = 0; n < MAX_NUMNODES; n++, node++) {
 		node = (node < MAX_NUMNODES) ? node : 0;
-		if (!node_allowed(node))
+		if (!node_allowed(ctx, node))
 			continue;
 		spu = spu_alloc_node(node);
 		if (spu)
@@ -276,15 +390,15 @@
 	node = cpu_to_node(raw_smp_processor_id());
 	for (n = 0; n < MAX_NUMNODES; n++, node++) {
 		node = (node < MAX_NUMNODES) ? node : 0;
-		if (!node_allowed(node))
+		if (!node_allowed(ctx, node))
 			continue;
 
 		mutex_lock(&spu_prio->active_mutex[node]);
 		list_for_each_entry(spu, &spu_prio->active_list[node], list) {
 			struct spu_context *tmp = spu->ctx;
 
-			if (tmp->rt_priority < ctx->rt_priority &&
-			    (!victim || tmp->rt_priority < victim->rt_priority))
+			if (tmp->prio > ctx->prio &&
+			    (!victim || tmp->prio > victim->prio))
 				victim = spu->ctx;
 		}
 		mutex_unlock(&spu_prio->active_mutex[node]);
@@ -312,7 +426,10 @@
 				victim = NULL;
 				goto restart;
 			}
+			spu_remove_from_active_list(spu);
 			spu_unbind_context(spu, victim);
+			victim->stats.invol_ctx_switch++;
+			spu->stats.invol_ctx_switch++;
 			mutex_unlock(&victim->state_mutex);
 			/*
 			 * We need to break out of the wait loop in spu_run
@@ -338,22 +455,30 @@
  */
 int spu_activate(struct spu_context *ctx, unsigned long flags)
 {
-
-	if (ctx->spu)
-		return 0;
+	spuctx_switch_state(ctx, SPUCTX_UTIL_SYSTEM);
 
 	do {
 		struct spu *spu;
 
+		/*
+		 * If there are multiple threads waiting for a single context
+		 * only one actually binds the context while the others will
+		 * only be able to acquire the state_mutex once the context
+		 * already is in runnable state.
+		 */
+		if (ctx->spu)
+			return 0;
+
 		spu = spu_get_idle(ctx);
 		/*
 		 * If this is a realtime thread we try to get it running by
 		 * preempting a lower priority thread.
 		 */
-		if (!spu && ctx->rt_priority)
+		if (!spu && rt_prio(ctx->prio))
 			spu = find_victim(ctx);
 		if (spu) {
 			spu_bind_context(spu, ctx);
+			spu_add_to_active_list(spu);
 			return 0;
 		}
 
@@ -369,23 +494,28 @@
  * Remove the highest priority context on the runqueue and return it
  * to the caller.  Returns %NULL if no runnable context was found.
  */
-static struct spu_context *grab_runnable_context(int prio)
+static struct spu_context *grab_runnable_context(int prio, int node)
 {
-	struct spu_context *ctx = NULL;
+	struct spu_context *ctx;
 	int best;
 
 	spin_lock(&spu_prio->runq_lock);
 	best = sched_find_first_bit(spu_prio->bitmap);
-	if (best < prio) {
+	while (best < prio) {
 		struct list_head *rq = &spu_prio->runq[best];
 
-		BUG_ON(list_empty(rq));
-
-		ctx = list_entry(rq->next, struct spu_context, rq);
-		__spu_del_from_rq(ctx);
+		list_for_each_entry(ctx, rq, rq) {
+			/* XXX(hch): check for affinity here aswell */
+			if (__node_allowed(ctx, node)) {
+				__spu_del_from_rq(ctx);
+				goto found;
+			}
+		}
+		best++;
 	}
+	ctx = NULL;
+ found:
 	spin_unlock(&spu_prio->runq_lock);
-
 	return ctx;
 }
 
@@ -395,9 +525,12 @@
 	struct spu_context *new = NULL;
 
 	if (spu) {
-		new = grab_runnable_context(max_prio);
+		new = grab_runnable_context(max_prio, spu->node);
 		if (new || force) {
+			spu_remove_from_active_list(spu);
 			spu_unbind_context(spu, ctx);
+			ctx->stats.vol_ctx_switch++;
+			spu->stats.vol_ctx_switch++;
 			spu_free(spu);
 			if (new)
 				wake_up(&new->stop_wq);
@@ -417,7 +550,17 @@
  */
 void spu_deactivate(struct spu_context *ctx)
 {
+	/*
+	 * We must never reach this for a nosched context,
+	 * but handle the case gracefull instead of panicing.
+	 */
+	if (ctx->flags & SPU_CREATE_NOSCHED) {
+		WARN_ON(1);
+		return;
+	}
+
 	__spu_deactivate(ctx, 1, MAX_PRIO);
+	spuctx_switch_state(ctx, SPUCTX_UTIL_USER);
 }
 
 /**
@@ -432,56 +575,178 @@
 {
 	if (!(ctx->flags & SPU_CREATE_NOSCHED)) {
 		mutex_lock(&ctx->state_mutex);
-		__spu_deactivate(ctx, 0, MAX_PRIO);
+		if (__spu_deactivate(ctx, 0, MAX_PRIO))
+			spuctx_switch_state(ctx, SPUCTX_UTIL_USER);
+		else {
+			spuctx_switch_state(ctx, SPUCTX_UTIL_LOADED);
+			spu_switch_state(ctx->spu, SPU_UTIL_USER);
+		}
 		mutex_unlock(&ctx->state_mutex);
 	}
 }
 
-void spu_sched_tick(struct work_struct *work)
+static void spusched_tick(struct spu_context *ctx)
 {
-	struct spu_context *ctx =
-		container_of(work, struct spu_context, sched_work.work);
-	int preempted;
-
-	/*
-	 * If this context is being stopped avoid rescheduling from the
-	 * scheduler tick because we would block on the state_mutex.
-	 * The caller will yield the spu later on anyway.
-	 */
-	if (test_bit(SPU_SCHED_EXITING, &ctx->sched_flags))
+	if (ctx->flags & SPU_CREATE_NOSCHED)
+		return;
+	if (ctx->policy == SCHED_FIFO)
 		return;
 
-	mutex_lock(&ctx->state_mutex);
-	preempted = __spu_deactivate(ctx, 0, ctx->prio + 1);
-	mutex_unlock(&ctx->state_mutex);
+	if (--ctx->time_slice)
+		return;
 
-	if (preempted) {
-		/*
-		 * We need to break out of the wait loop in spu_run manually
-		 * to ensure this context gets put on the runqueue again
-		 * ASAP.
-		 */
-		wake_up(&ctx->stop_wq);
+	/*
+	 * Unfortunately active_mutex ranks outside of state_mutex, so
+	 * we have to trylock here.  If we fail give the context another
+	 * tick and try again.
+	 */
+	if (mutex_trylock(&ctx->state_mutex)) {
+		struct spu *spu = ctx->spu;
+		struct spu_context *new;
+
+		new = grab_runnable_context(ctx->prio + 1, spu->node);
+		if (new) {
+
+			__spu_remove_from_active_list(spu);
+			spu_unbind_context(spu, ctx);
+			ctx->stats.invol_ctx_switch++;
+			spu->stats.invol_ctx_switch++;
+			spu_free(spu);
+			wake_up(&new->stop_wq);
+			/*
+			 * We need to break out of the wait loop in
+			 * spu_run manually to ensure this context
+			 * gets put on the runqueue again ASAP.
+			 */
+			wake_up(&ctx->stop_wq);
+		}
+		spu_set_timeslice(ctx);
+		mutex_unlock(&ctx->state_mutex);
 	} else {
-		spu_start_tick(ctx);
+		ctx->time_slice++;
 	}
 }
 
+/**
+ * count_active_contexts - count nr of active tasks
+ *
+ * Return the number of tasks currently running or waiting to run.
+ *
+ * Note that we don't take runq_lock / active_mutex here.  Reading
+ * a single 32bit value is atomic on powerpc, and we don't care
+ * about memory ordering issues here.
+ */
+static unsigned long count_active_contexts(void)
+{
+	int nr_active = 0, node;
+
+	for (node = 0; node < MAX_NUMNODES; node++)
+		nr_active += spu_prio->nr_active[node];
+	nr_active += spu_prio->nr_waiting;
+
+	return nr_active;
+}
+
+/**
+ * spu_calc_load - given tick count, update the avenrun load estimates.
+ * @tick:	tick count
+ *
+ * No locking against reading these values from userspace, as for
+ * the CPU loadavg code.
+ */
+static void spu_calc_load(unsigned long ticks)
+{
+	unsigned long active_tasks; /* fixed-point */
+	static int count = LOAD_FREQ;
+
+	count -= ticks;
+
+	if (unlikely(count < 0)) {
+		active_tasks = count_active_contexts() * FIXED_1;
+		do {
+			CALC_LOAD(spu_avenrun[0], EXP_1, active_tasks);
+			CALC_LOAD(spu_avenrun[1], EXP_5, active_tasks);
+			CALC_LOAD(spu_avenrun[2], EXP_15, active_tasks);
+			count += LOAD_FREQ;
+		} while (count < 0);
+	}
+}
+
+static void spusched_wake(unsigned long data)
+{
+	mod_timer(&spusched_timer, jiffies + SPUSCHED_TICK);
+	wake_up_process(spusched_task);
+	spu_calc_load(SPUSCHED_TICK);
+}
+
+static int spusched_thread(void *unused)
+{
+	struct spu *spu, *next;
+	int node;
+
+	while (!kthread_should_stop()) {
+		set_current_state(TASK_INTERRUPTIBLE);
+		schedule();
+		for (node = 0; node < MAX_NUMNODES; node++) {
+			mutex_lock(&spu_prio->active_mutex[node]);
+			list_for_each_entry_safe(spu, next,
+						 &spu_prio->active_list[node],
+						 list)
+				spusched_tick(spu->ctx);
+			mutex_unlock(&spu_prio->active_mutex[node]);
+		}
+	}
+
+	return 0;
+}
+
+#define LOAD_INT(x) ((x) >> FSHIFT)
+#define LOAD_FRAC(x) LOAD_INT(((x) & (FIXED_1-1)) * 100)
+
+static int show_spu_loadavg(struct seq_file *s, void *private)
+{
+	int a, b, c;
+
+	a = spu_avenrun[0] + (FIXED_1/200);
+	b = spu_avenrun[1] + (FIXED_1/200);
+	c = spu_avenrun[2] + (FIXED_1/200);
+
+	/*
+	 * Note that last_pid doesn't really make much sense for the
+	 * SPU loadavg (it even seems very odd on the CPU side..),
+	 * but we include it here to have a 100% compatible interface.
+	 */
+	seq_printf(s, "%d.%02d %d.%02d %d.%02d %ld/%d %d\n",
+		LOAD_INT(a), LOAD_FRAC(a),
+		LOAD_INT(b), LOAD_FRAC(b),
+		LOAD_INT(c), LOAD_FRAC(c),
+		count_active_contexts(),
+		atomic_read(&nr_spu_contexts),
+		current->nsproxy->pid_ns->last_pid);
+	return 0;
+}
+
+static int spu_loadavg_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, show_spu_loadavg, NULL);
+}
+
+static const struct file_operations spu_loadavg_fops = {
+	.open		= spu_loadavg_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+
 int __init spu_sched_init(void)
 {
-	int i;
-
-	spu_sched_wq = create_singlethread_workqueue("spusched");
-	if (!spu_sched_wq)
-		return 1;
+	struct proc_dir_entry *entry;
+	int err = -ENOMEM, i;
 
 	spu_prio = kzalloc(sizeof(struct spu_prio_array), GFP_KERNEL);
-	if (!spu_prio) {
-		printk(KERN_WARNING "%s: Unable to allocate priority queue.\n",
-		       __FUNCTION__);
-		       destroy_workqueue(spu_sched_wq);
-		return 1;
-	}
+	if (!spu_prio)
+		goto out;
+
 	for (i = 0; i < MAX_PRIO; i++) {
 		INIT_LIST_HEAD(&spu_prio->runq[i]);
 		__clear_bit(i, spu_prio->bitmap);
@@ -492,7 +757,30 @@
 		INIT_LIST_HEAD(&spu_prio->active_list[i]);
 	}
 	spin_lock_init(&spu_prio->runq_lock);
+
+	setup_timer(&spusched_timer, spusched_wake, 0);
+
+	spusched_task = kthread_run(spusched_thread, NULL, "spusched");
+	if (IS_ERR(spusched_task)) {
+		err = PTR_ERR(spusched_task);
+		goto out_free_spu_prio;
+	}
+
+	entry = create_proc_entry("spu_loadavg", 0, NULL);
+	if (!entry)
+		goto out_stop_kthread;
+	entry->proc_fops = &spu_loadavg_fops;
+
+	pr_debug("spusched: tick: %d, min ticks: %d, default ticks: %d\n",
+			SPUSCHED_TICK, MIN_SPU_TIMESLICE, DEF_SPU_TIMESLICE);
 	return 0;
+
+ out_stop_kthread:
+	kthread_stop(spusched_task);
+ out_free_spu_prio:
+	kfree(spu_prio);
+ out:
+	return err;
 }
 
 void __exit spu_sched_exit(void)
@@ -500,6 +788,11 @@
 	struct spu *spu, *tmp;
 	int node;
 
+	remove_proc_entry("spu_loadavg", NULL);
+
+	del_timer_sync(&spusched_timer);
+	kthread_stop(spusched_task);
+
 	for (node = 0; node < MAX_NUMNODES; node++) {
 		mutex_lock(&spu_prio->active_mutex[node]);
 		list_for_each_entry_safe(spu, tmp, &spu_prio->active_list[node],
@@ -510,5 +803,4 @@
 		mutex_unlock(&spu_prio->active_mutex[node]);
 	}
 	kfree(spu_prio);
-	destroy_workqueue(spu_sched_wq);
 }
diff --git a/arch/powerpc/platforms/cell/spufs/spu_restore.c b/arch/powerpc/platforms/cell/spufs/spu_restore.c
index 0bf723d..4e19ed7 100644
--- a/arch/powerpc/platforms/cell/spufs/spu_restore.c
+++ b/arch/powerpc/platforms/cell/spufs/spu_restore.c
@@ -296,7 +296,7 @@
  * This code deviates from the documented sequence in the
  * following aspects:
  *
- * 	1. The EA for LSCSA is passed from PPE in the
+ *	1. The EA for LSCSA is passed from PPE in the
  *	   signal notification channels.
  *	2. The register spill area is pulled by SPU
  *	   into LS, rather than pushed by PPE.
diff --git a/arch/powerpc/platforms/cell/spufs/spu_save.c b/arch/powerpc/platforms/cell/spufs/spu_save.c
index 196033b..ae95cc1 100644
--- a/arch/powerpc/platforms/cell/spufs/spu_save.c
+++ b/arch/powerpc/platforms/cell/spufs/spu_save.c
@@ -44,7 +44,7 @@
 	 *    Read the SPU_RdEventMsk channel and save to the LSCSA.
 	 */
 	offset = LSCSA_QW_OFFSET(event_mask);
-	regs_spill[offset].slot[0] = spu_readch(SPU_RdEventStatMask);
+	regs_spill[offset].slot[0] = spu_readch(SPU_RdEventMask);
 }
 
 static inline void save_tag_mask(void)
diff --git a/arch/powerpc/platforms/cell/spufs/spufs.h b/arch/powerpc/platforms/cell/spufs/spufs.h
index 47617e8..08b3530 100644
--- a/arch/powerpc/platforms/cell/spufs/spufs.h
+++ b/arch/powerpc/platforms/cell/spufs/spufs.h
@@ -26,6 +26,7 @@
 #include <linux/mutex.h>
 #include <linux/spinlock.h>
 #include <linux/fs.h>
+#include <linux/cpumask.h>
 
 #include <asm/spu.h>
 #include <asm/spu_csa.h>
@@ -39,9 +40,17 @@
 struct spu_context_ops;
 struct spu_gang;
 
-/* ctx->sched_flags */
-enum {
-	SPU_SCHED_EXITING = 0,
+/*
+ * This is the state for spu utilization reporting to userspace.
+ * Because this state is visible to userspace it must never change and needs
+ * to be kept strictly separate from any internal state kept by the kernel.
+ */
+enum spuctx_execution_state {
+	SPUCTX_UTIL_USER = 0,
+	SPUCTX_UTIL_SYSTEM,
+	SPUCTX_UTIL_IOWAIT,
+	SPUCTX_UTIL_LOADED,
+	SPUCTX_UTIL_MAX
 };
 
 struct spu_context {
@@ -81,13 +90,34 @@
 	struct list_head gang_list;
 	struct spu_gang *gang;
 
+	/* owner thread */
+	pid_t tid;
+
 	/* scheduler fields */
- 	struct list_head rq;
-	struct delayed_work sched_work;
+	struct list_head rq;
+	unsigned int time_slice;
 	unsigned long sched_flags;
-	unsigned long rt_priority;
+	cpumask_t cpus_allowed;
 	int policy;
 	int prio;
+
+	/* statistics */
+	struct {
+		/* updates protected by ctx->state_mutex */
+		enum spuctx_execution_state execution_state;
+		unsigned long tstamp;		/* time of last ctx switch */
+		unsigned long times[SPUCTX_UTIL_MAX];
+		unsigned long long vol_ctx_switch;
+		unsigned long long invol_ctx_switch;
+		unsigned long long min_flt;
+		unsigned long long maj_flt;
+		unsigned long long hash_flt;
+		unsigned long long slb_flt;
+		unsigned long long slb_flt_base; /* # at last ctx switch */
+		unsigned long long class2_intr;
+		unsigned long long class2_intr_base; /* # at last ctx switch */
+		unsigned long long libassist;
+	} stats;
 };
 
 struct spu_gang {
@@ -177,6 +207,7 @@
 int spufs_handle_class1(struct spu_context *ctx);
 
 /* context management */
+extern atomic_t nr_spu_contexts;
 static inline void spu_acquire(struct spu_context *ctx)
 {
 	mutex_lock(&ctx->state_mutex);
@@ -200,9 +231,9 @@
 int spu_activate(struct spu_context *ctx, unsigned long flags);
 void spu_deactivate(struct spu_context *ctx);
 void spu_yield(struct spu_context *ctx);
-void spu_start_tick(struct spu_context *ctx);
-void spu_stop_tick(struct spu_context *ctx);
-void spu_sched_tick(struct work_struct *work);
+void spu_set_timeslice(struct spu_context *ctx);
+void spu_update_sched_info(struct spu_context *ctx);
+void __spu_update_sched_info(struct spu_context *ctx);
 int __init spu_sched_init(void);
 void __exit spu_sched_exit(void);
 
@@ -210,7 +241,7 @@
 
 /*
  * spufs_wait
- * 	Same as wait_event_interruptible(), except that here
+ *	Same as wait_event_interruptible(), except that here
  *	we need to call spu_release(ctx) before sleeping, and
  *	then spu_acquire(ctx) when awoken.
  */
@@ -256,4 +287,37 @@
 extern struct spufs_coredump_reader spufs_coredump_read[];
 extern int spufs_coredump_num_notes;
 
+/*
+ * This function is a little bit too large for an inline, but
+ * as fault.c is built into the kernel we can't move it out of
+ * line.
+ */
+static inline void spuctx_switch_state(struct spu_context *ctx,
+		enum spuctx_execution_state new_state)
+{
+	WARN_ON(!mutex_is_locked(&ctx->state_mutex));
+
+	if (ctx->stats.execution_state != new_state) {
+		unsigned long curtime = jiffies;
+
+		ctx->stats.times[ctx->stats.execution_state] +=
+				 curtime - ctx->stats.tstamp;
+		ctx->stats.tstamp = curtime;
+		ctx->stats.execution_state = new_state;
+	}
+}
+
+static inline void spu_switch_state(struct spu *spu,
+		enum spuctx_execution_state new_state)
+{
+	if (spu->stats.utilization_state != new_state) {
+		unsigned long curtime = jiffies;
+
+		spu->stats.times[spu->stats.utilization_state] +=
+				 curtime - spu->stats.tstamp;
+		spu->stats.tstamp = curtime;
+		spu->stats.utilization_state = new_state;
+	}
+}
+
 #endif
diff --git a/arch/powerpc/platforms/cell/spufs/switch.c b/arch/powerpc/platforms/cell/spufs/switch.c
index 71a0b41..9c506ba 100644
--- a/arch/powerpc/platforms/cell/spufs/switch.c
+++ b/arch/powerpc/platforms/cell/spufs/switch.c
@@ -70,7 +70,7 @@
   }
 #endif				/* debug */
 
-#define POLL_WHILE_FALSE(_c) 	POLL_WHILE_TRUE(!(_c))
+#define POLL_WHILE_FALSE(_c)	POLL_WHILE_TRUE(!(_c))
 
 static inline void acquire_spu_lock(struct spu *spu)
 {
@@ -387,6 +387,19 @@
 	csa->prob.dma_querytype_RW = in_be32(&prob->dma_querytype_RW);
 }
 
+static inline void save_ppu_tagstatus(struct spu_state *csa, struct spu *spu)
+{
+	struct spu_problem __iomem *prob = spu->problem;
+
+	/* Save the Prxy_TagStatus register in the CSA.
+	 *
+	 * It is unnecessary to restore dma_tagstatus_R, however,
+	 * dma_tagstatus_R in the CSA is accessed via backing_ops, so
+	 * we must save it.
+	 */
+	csa->prob.dma_tagstatus_R = in_be32(&prob->dma_tagstatus_R);
+}
+
 static inline void save_mfc_csr_tsq(struct spu_state *csa, struct spu *spu)
 {
 	struct spu_priv2 __iomem *priv2 = spu->priv2;
@@ -1812,6 +1825,7 @@
 	save_mfc_queues(prev, spu);	/* Step 19. */
 	save_ppu_querymask(prev, spu);	/* Step 20. */
 	save_ppu_querytype(prev, spu);	/* Step 21. */
+	save_ppu_tagstatus(prev, spu);  /* NEW.     */
 	save_mfc_csr_tsq(prev, spu);	/* Step 22. */
 	save_mfc_csr_cmd(prev, spu);	/* Step 23. */
 	save_mfc_csr_ato(prev, spu);	/* Step 24. */
@@ -1930,7 +1944,7 @@
 	reset_spu_privcntl(prev, spu);	        /* Step 16. */
 	reset_spu_lslr(prev, spu);              /* Step 17. */
 	setup_mfc_sr1(prev, spu);	        /* Step 18. */
-	spu_invalidate_slbs(spu);        	/* Step 19. */
+	spu_invalidate_slbs(spu);		/* Step 19. */
 	reset_ch_part1(prev, spu);	        /* Step 20. */
 	reset_ch_part2(prev, spu);	        /* Step 21. */
 	enable_interrupts(prev, spu);	        /* Step 22. */
diff --git a/arch/powerpc/platforms/chrp/Kconfig b/arch/powerpc/platforms/chrp/Kconfig
index d2c6905..22b4b4e 100644
--- a/arch/powerpc/platforms/chrp/Kconfig
+++ b/arch/powerpc/platforms/chrp/Kconfig
@@ -8,4 +8,5 @@
 	select PPC_MPC106
 	select PPC_UDBG_16550
 	select PPC_NATIVE
+	select PCI
 	default y
diff --git a/arch/powerpc/platforms/chrp/Makefile b/arch/powerpc/platforms/chrp/Makefile
index 902feb1..4b3bfad 100644
--- a/arch/powerpc/platforms/chrp/Makefile
+++ b/arch/powerpc/platforms/chrp/Makefile
@@ -1,4 +1,3 @@
-obj-y				+= setup.o time.o pegasos_eth.o
-obj-$(CONFIG_PCI)		+= pci.o
+obj-y				+= setup.o time.o pegasos_eth.o pci.o
 obj-$(CONFIG_SMP)		+= smp.o
 obj-$(CONFIG_NVRAM)		+= nvram.o
diff --git a/arch/powerpc/platforms/chrp/pci.c b/arch/powerpc/platforms/chrp/pci.c
index d32fedc..3690624 100644
--- a/arch/powerpc/platforms/chrp/pci.c
+++ b/arch/powerpc/platforms/chrp/pci.c
@@ -99,7 +99,7 @@
 	struct pci_controller *hose = bus->sysdata;
 	unsigned long addr = (offset & 0xff) | ((devfn & 0xff) << 8)
 		| (((bus->number - hose->first_busno) & 0xff) << 16)
-		| (hose->index << 24);
+		| (hose->global_number << 24);
         int ret = -1;
 	int rval;
 
@@ -114,7 +114,7 @@
 	struct pci_controller *hose = bus->sysdata;
 	unsigned long addr = (offset & 0xff) | ((devfn & 0xff) << 8)
 		| (((bus->number - hose->first_busno) & 0xff) << 16)
-		| (hose->index << 24);
+		| (hose->global_number << 24);
 	int rval;
 
 	rval = rtas_call(rtas_token("write-pci-config"), 3, 1, NULL,
@@ -254,13 +254,12 @@
 			printk(" at %llx", (unsigned long long)r.start);
 		printk("\n");
 
-		hose = pcibios_alloc_controller();
+		hose = pcibios_alloc_controller(dev);
 		if (!hose) {
 			printk("Can't allocate PCI controller structure for %s\n",
 				dev->full_name);
 			continue;
 		}
-		hose->arch_data = dev;
 		hose->first_busno = bus_range[0];
 		hose->last_busno = bus_range[1];
 
diff --git a/arch/powerpc/platforms/embedded6xx/Kconfig b/arch/powerpc/platforms/embedded6xx/Kconfig
index f2d2626..bec7726 100644
--- a/arch/powerpc/platforms/embedded6xx/Kconfig
+++ b/arch/powerpc/platforms/embedded6xx/Kconfig
@@ -28,6 +28,7 @@
 	bool "PPC750GX/CL with TSI10x bridge (Hickory/Holly)"
 	select TSI108_BRIDGE
 	select PPC_UDBG_16550
+	select WANT_DEVICE_TREE
 	help
 	  Select PPC_HOLLY if configuring for an IBM 750GX/CL Eval
 	  Board with TSI108/9 bridge (Hickory/Holly)
@@ -44,6 +45,7 @@
 config TSI108_BRIDGE
 	bool
 	depends on MPC7448HPC2 || PPC_HOLLY
+	select PCI
 	select MPIC
 	select MPIC_WEIRD
 	default y
diff --git a/arch/powerpc/platforms/embedded6xx/holly.c b/arch/powerpc/platforms/embedded6xx/holly.c
index 3a0b4a0..6292e36 100644
--- a/arch/powerpc/platforms/embedded6xx/holly.c
+++ b/arch/powerpc/platforms/embedded6xx/holly.c
@@ -45,7 +45,7 @@
 
 #define HOLLY_PCI_CFG_PHYS 0x7c000000
 
-int holly_exclude_device(u_char bus, u_char devfn)
+int holly_exclude_device(struct pci_controller *hose, u_char bus, u_char devfn)
 {
 	if (bus == 0 && PCI_SLOT(devfn) == 0)
 		return PCIBIOS_DEVICE_NOT_FOUND;
diff --git a/arch/powerpc/platforms/embedded6xx/linkstation.c b/arch/powerpc/platforms/embedded6xx/linkstation.c
index b412f00..f4d0a7a 100644
--- a/arch/powerpc/platforms/embedded6xx/linkstation.c
+++ b/arch/powerpc/platforms/embedded6xx/linkstation.c
@@ -54,8 +54,9 @@
 	},
 };
 
-static int __init add_bridge(struct device_node *dev)
+static int __init linkstation_add_bridge(struct device_node *dev)
 {
+#ifdef CONFIG_PCI
 	int len;
 	struct pci_controller *hose;
 	const int *bus_range;
@@ -67,18 +68,17 @@
 		printk(KERN_WARNING "Can't get bus-range for %s, assume"
 				" bus 0\n", dev->full_name);
 
-	hose = pcibios_alloc_controller();
+	hose = pcibios_alloc_controller(dev);
 	if (hose == NULL)
 		return -ENOMEM;
 	hose->first_busno = bus_range ? bus_range[0] : 0;
 	hose->last_busno = bus_range ? bus_range[1] : 0xff;
-	hose->arch_data = dev;
 	setup_indirect_pci(hose, 0xfec00000, 0xfee00000);
 
 	/* Interpret the "ranges" property */
 	/* This also maps the I/O region and sets isa_io/mem_base */
 	pci_process_bridge_OF_ranges(hose, dev, 1);
-
+#endif
 	return 0;
 }
 
@@ -92,7 +92,7 @@
 
 	/* Lookup PCI host bridges */
 	for (np = NULL; (np = of_find_node_by_type(np, "pci")) != NULL;)
-		add_bridge(np);
+		linkstation_add_bridge(np);
 
 	printk(KERN_INFO "BUFFALO Network Attached Storage Series\n");
 	printk(KERN_INFO "(C) 2002-2005 BUFFALO INC.\n");
diff --git a/arch/powerpc/platforms/embedded6xx/mpc7448_hpc2.c b/arch/powerpc/platforms/embedded6xx/mpc7448_hpc2.c
index 4542e0c8..1e3cc69 100644
--- a/arch/powerpc/platforms/embedded6xx/mpc7448_hpc2.c
+++ b/arch/powerpc/platforms/embedded6xx/mpc7448_hpc2.c
@@ -54,15 +54,10 @@
 
 #define MPC7448HPC2_PCI_CFG_PHYS 0xfb000000
 
-#ifndef CONFIG_PCI
-isa_io_base = MPC7448_HPC2_ISA_IO_BASE;
-isa_mem_base = MPC7448_HPC2_ISA_MEM_BASE;
-pci_dram_offset = MPC7448_HPC2_PCI_MEM_OFFSET;
-#endif
-
 extern void _nmask_and_or_msr(unsigned long nmask, unsigned long or_val);
 
-int mpc7448_hpc2_exclude_device(u_char bus, u_char devfn)
+int mpc7448_hpc2_exclude_device(struct pci_controller *hose,
+				u_char bus, u_char devfn)
 {
 	if (bus == 0 && PCI_SLOT(devfn) == 0)
 		return PCIBIOS_DEVICE_NOT_FOUND;
diff --git a/arch/powerpc/platforms/embedded6xx/mpc7448_hpc2.h b/arch/powerpc/platforms/embedded6xx/mpc7448_hpc2.h
index a543a52..f7e0e0c 100644
--- a/arch/powerpc/platforms/embedded6xx/mpc7448_hpc2.h
+++ b/arch/powerpc/platforms/embedded6xx/mpc7448_hpc2.h
@@ -18,9 +18,4 @@
 
 #include <asm/ppcboot.h>
 
-/* Base Addresses for the PCI bus
- */
-#define MPC7448_HPC2_PCI_MEM_OFFSET	(0x00000000)
-#define MPC7448_HPC2_ISA_IO_BASE	(0x00000000)
-#define MPC7448_HPC2_ISA_MEM_BASE	(0x00000000)
 #endif				/* __PPC_PLATFORMS_MPC7448_HPC2_H */
diff --git a/arch/powerpc/platforms/iseries/call_hpt.h b/arch/powerpc/platforms/iseries/call_hpt.h
index a843b0f..8d95fe4b 100644
--- a/arch/powerpc/platforms/iseries/call_hpt.h
+++ b/arch/powerpc/platforms/iseries/call_hpt.h
@@ -76,24 +76,25 @@
 	return compressedStatus;
 }
 
-static inline u64 HvCallHpt_findValid(hpte_t *hpte, u64 vpn)
+static inline u64 HvCallHpt_findValid(struct hash_pte *hpte, u64 vpn)
 {
 	return HvCall3Ret16(HvCallHptFindValid, hpte, vpn, 0, 0);
 }
 
-static inline u64 HvCallHpt_findNextValid(hpte_t *hpte, u32 hpteIndex,
+static inline u64 HvCallHpt_findNextValid(struct hash_pte *hpte, u32 hpteIndex,
 		u8 bitson, u8 bitsoff)
 {
 	return HvCall3Ret16(HvCallHptFindNextValid, hpte, hpteIndex,
 			bitson, bitsoff);
 }
 
-static inline void HvCallHpt_get(hpte_t *hpte, u32 hpteIndex)
+static inline void HvCallHpt_get(struct hash_pte *hpte, u32 hpteIndex)
 {
 	HvCall2Ret16(HvCallHptGet, hpte, hpteIndex, 0);
 }
 
-static inline void HvCallHpt_addValidate(u32 hpteIndex, u32 hBit, hpte_t *hpte)
+static inline void HvCallHpt_addValidate(u32 hpteIndex, u32 hBit,
+					 struct hash_pte *hpte)
 {
 	HvCall4(HvCallHptAddValidate, hpteIndex, hBit, hpte->v, hpte->r);
 }
diff --git a/arch/powerpc/platforms/iseries/htab.c b/arch/powerpc/platforms/iseries/htab.c
index ed44dfc..b4e2c7a 100644
--- a/arch/powerpc/platforms/iseries/htab.c
+++ b/arch/powerpc/platforms/iseries/htab.c
@@ -44,7 +44,7 @@
 			 unsigned long vflags, int psize)
 {
 	long slot;
-	hpte_t lhpte;
+	struct hash_pte lhpte;
 	int secondary = 0;
 
 	BUG_ON(psize != MMU_PAGE_4K);
@@ -99,7 +99,7 @@
 
 static unsigned long iSeries_hpte_getword0(unsigned long slot)
 {
-	hpte_t hpte;
+	struct hash_pte hpte;
 
 	HvCallHpt_get(&hpte, slot);
 	return hpte.v;
@@ -144,7 +144,7 @@
 static long iSeries_hpte_updatepp(unsigned long slot, unsigned long newpp,
 				  unsigned long va, int psize, int local)
 {
-	hpte_t hpte;
+	struct hash_pte hpte;
 	unsigned long want_v;
 
 	iSeries_hlock(slot);
@@ -176,7 +176,7 @@
  */
 static long iSeries_hpte_find(unsigned long vpn)
 {
-	hpte_t hpte;
+	struct hash_pte hpte;
 	long slot;
 
 	/*
diff --git a/arch/powerpc/platforms/iseries/pci.c b/arch/powerpc/platforms/iseries/pci.c
index 9c97422..da87162 100644
--- a/arch/powerpc/platforms/iseries/pci.c
+++ b/arch/powerpc/platforms/iseries/pci.c
@@ -742,6 +742,11 @@
 	/* Install IO hooks */
 	ppc_pci_io = iseries_pci_io;
 
+	/* iSeries has no IO space in the common sense, it needs to set
+	 * the IO base to 0
+	 */
+	pci_io_base = 0;
+
 	if (root == NULL) {
 		printk(KERN_CRIT "iSeries_pcibios_init: can't find root "
 				"of device tree\n");
@@ -763,7 +768,7 @@
 		if (phb == NULL)
 			continue;
 
-		phb->pci_mem_offset = phb->local_number = bus;
+		phb->pci_mem_offset = bus;
 		phb->first_busno = bus;
 		phb->last_busno = bus;
 		phb->ops = &iSeries_pci_ops;
diff --git a/arch/powerpc/platforms/iseries/setup.c b/arch/powerpc/platforms/iseries/setup.c
index 7f5dcee..13a8b19 100644
--- a/arch/powerpc/platforms/iseries/setup.c
+++ b/arch/powerpc/platforms/iseries/setup.c
@@ -79,8 +79,6 @@
 static void iSeries_pci_final_fixup(void) { }
 #endif
 
-extern unsigned long iSeries_recal_tb;
-extern unsigned long iSeries_recal_titan;
 
 struct MemoryBlock {
 	unsigned long absStart;
@@ -292,8 +290,8 @@
 {
 	DBG(" -> iSeries_init_early()\n");
 
-	iSeries_recal_tb = get_tb();
-	iSeries_recal_titan = HvCallXm_loadTod();
+	/* Snapshot the timebase, for use in later recalibration */
+	iSeries_time_init_early();
 
 	/*
 	 * Initialize the DMA/TCE management
diff --git a/arch/powerpc/platforms/maple/pci.c b/arch/powerpc/platforms/maple/pci.c
index 7aaa5bb..fceaae4 100644
--- a/arch/powerpc/platforms/maple/pci.c
+++ b/arch/powerpc/platforms/maple/pci.c
@@ -444,7 +444,7 @@
 	u3_ht = hose;
 }
 
-static int __init add_bridge(struct device_node *dev)
+static int __init maple_add_bridge(struct device_node *dev)
 {
 	int len;
 	struct pci_controller *hose;
@@ -519,23 +519,6 @@
 	DBG(" <- maple_pci_irq_fixup\n");
 }
 
-static void __init maple_fixup_phb_resources(void)
-{
-	struct pci_controller *hose, *tmp;
-	
-	list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
-		unsigned long offset = (unsigned long)hose->io_base_virt - pci_io_base;
-
-		hose->io_resource.start += offset;
-		hose->io_resource.end += offset;
-
-		printk(KERN_INFO "PCI Host %d, io start: %llx; io end: %llx\n",
-		       hose->global_number,
-		       (unsigned long long)hose->io_resource.start,
-		       (unsigned long long)hose->io_resource.end);
-	}
-}
-
 void __init maple_pci_init(void)
 {
 	struct device_node *np, *root;
@@ -558,7 +541,7 @@
 			continue;
 		if ((of_device_is_compatible(np, "u4-pcie") ||
 		     of_device_is_compatible(np, "u3-agp")) &&
-		    add_bridge(np) == 0)
+		    maple_add_bridge(np) == 0)
 			of_node_get(np);
 
 		if (of_device_is_compatible(np, "u3-ht")) {
@@ -570,27 +553,9 @@
 
 	/* Now setup the HyperTransport host if we found any
 	 */
-	if (ht && add_bridge(ht) != 0)
+	if (ht && maple_add_bridge(ht) != 0)
 		of_node_put(ht);
 
-        /*
-         * We need to call pci_setup_phb_io for the HT bridge first
-         * so it gets the I/O port numbers starting at 0, and we
-         * need to call it for the AGP bridge after that so it gets
-         * small positive I/O port numbers.
-         */
-        if (u3_ht)
-                pci_setup_phb_io(u3_ht, 1);
-        if (u3_agp)
-                pci_setup_phb_io(u3_agp, 0);
-        if (u4_pcie)
-                pci_setup_phb_io(u4_pcie, 0);
-
-	/* Fixup the IO resources on our host bridges as the common code
-	 * does it only for childs of the host bridges
-	 */
-	maple_fixup_phb_resources();
-
 	/* Setup the linkage between OF nodes and PHBs */ 
 	pci_devs_phb_init();
 
diff --git a/arch/powerpc/platforms/pasemi/Kconfig b/arch/powerpc/platforms/pasemi/Kconfig
index 7c5076e..95cd90f 100644
--- a/arch/powerpc/platforms/pasemi/Kconfig
+++ b/arch/powerpc/platforms/pasemi/Kconfig
@@ -25,4 +25,13 @@
 	help
 	  Driver for MDIO via GPIO on PWRficient platforms
 
+config ELECTRA_IDE
+      tristate "Electra IDE driver"
+      default y
+      depends on PPC_PASEMI && ATA
+      select PATA_PLATFORM
+      help
+	This includes driver support for the Electra on-board IDE
+	interface.
+
 endmenu
diff --git a/arch/powerpc/platforms/pasemi/Makefile b/arch/powerpc/platforms/pasemi/Makefile
index 2cd2a4f..f47fcac 100644
--- a/arch/powerpc/platforms/pasemi/Makefile
+++ b/arch/powerpc/platforms/pasemi/Makefile
@@ -1,3 +1,4 @@
 obj-y	+= setup.o pci.o time.o idle.o powersave.o iommu.o
 obj-$(CONFIG_PPC_PASEMI_MDIO)	+= gpio_mdio.o
+obj-$(CONFIG_ELECTRA_IDE) += electra_ide.o
 obj-$(CONFIG_PPC_PASEMI_CPUFREQ) += cpufreq.o
diff --git a/arch/powerpc/platforms/pasemi/electra_ide.c b/arch/powerpc/platforms/pasemi/electra_ide.c
new file mode 100644
index 0000000..12fb0c9
--- /dev/null
+++ b/arch/powerpc/platforms/pasemi/electra_ide.c
@@ -0,0 +1,96 @@
+/*
+ * Copyright (C) 2007 PA Semi, Inc
+ *
+ * Maintained by: Olof Johansson <olof@lixom.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
+ */
+
+#include <linux/platform_device.h>
+
+#include <asm/prom.h>
+#include <asm/system.h>
+
+/* The electra IDE interface is incredibly simple: Just a device on the localbus
+ * with interrupts hooked up to one of the GPIOs. The device tree contains the
+ * address window and interrupt mappings already, and the pata_platform driver handles
+ * the rest. We just need to hook the two up.
+ */
+
+#define MAX_IFS	4	/* really, we have only one */
+
+static struct platform_device *pdevs[MAX_IFS];
+
+static int __devinit electra_ide_init(void)
+{
+	struct device_node *np;
+	struct resource r[3];
+	int ret = 0;
+	int i;
+
+	np = of_find_compatible_node(NULL, "ide", "electra-ide");
+	i = 0;
+
+	while (np && i < MAX_IFS) {
+		memset(r, 0, sizeof(r));
+
+		/* pata_platform wants two address ranges: one for the base registers,
+		 * another for the control (altstatus). It's located at offset 0x3f6 in
+		 * the window, but the device tree only has one large register window
+		 * that covers both ranges. So we need to split it up by hand here:
+		 */
+
+		ret = of_address_to_resource(np, 0, &r[0]);
+		if (ret)
+			goto out;
+		ret = of_address_to_resource(np, 0, &r[1]);
+		if (ret)
+			goto out;
+
+		r[1].start += 0x3f6;
+		r[0].end = r[1].start-1;
+
+		r[2].start = irq_of_parse_and_map(np, 0);
+		r[2].end = irq_of_parse_and_map(np, 0);
+		r[2].flags = IORESOURCE_IRQ;
+
+		pr_debug("registering platform device at 0x%lx/0x%lx, irq is %ld\n",
+			 r[0].start, r[1].start, r[2].start);
+		pdevs[i] = platform_device_register_simple("pata_platform", i, r, 3);
+		if (IS_ERR(pdevs[i])) {
+			ret = PTR_ERR(pdevs[i]);
+			pdevs[i] = NULL;
+			goto out;
+		}
+		np = of_find_compatible_node(np, "ide", "electra-ide");
+	}
+out:
+	return ret;
+}
+module_init(electra_ide_init);
+
+static void __devexit electra_ide_exit(void)
+{
+	int i;
+
+	for (i = 0; i < MAX_IFS; i++)
+		if (pdevs[i])
+			platform_device_unregister(pdevs[i]);
+}
+module_exit(electra_ide_exit);
+
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR ("Olof Johansson <olof@lixom.net>");
+MODULE_DESCRIPTION("PA Semi Electra IDE driver");
diff --git a/arch/powerpc/platforms/pasemi/pci.c b/arch/powerpc/platforms/pasemi/pci.c
index bbc6dfc..ab1f5f6 100644
--- a/arch/powerpc/platforms/pasemi/pci.c
+++ b/arch/powerpc/platforms/pasemi/pci.c
@@ -132,7 +132,7 @@
 	hose->cfg_data = ioremap(0xe0000000, 0x10000000);
 }
 
-static int __init add_bridge(struct device_node *dev)
+static int __init pas_add_bridge(struct device_node *dev)
 {
 	struct pci_controller *hose;
 
@@ -150,29 +150,11 @@
 	printk(KERN_INFO "Found PA-PXP PCI host bridge.\n");
 
 	/* Interpret the "ranges" property */
-	/* This also maps the I/O region and sets isa_io/mem_base */
 	pci_process_bridge_OF_ranges(hose, dev, 1);
-	pci_setup_phb_io(hose, 1);
 
 	return 0;
 }
 
-
-static void __init pas_fixup_phb_resources(void)
-{
-	struct pci_controller *hose, *tmp;
-
-	list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
-		unsigned long offset = (unsigned long)hose->io_base_virt - pci_io_base;
-		hose->io_resource.start += offset;
-		hose->io_resource.end += offset;
-		printk(KERN_INFO "PCI Host %d, io start: %lx; io end: %lx\n",
-		       hose->global_number,
-		       hose->io_resource.start, hose->io_resource.end);
-	}
-}
-
-
 void __init pas_pci_init(void)
 {
 	struct device_node *np, *root;
@@ -185,13 +167,11 @@
 	}
 
 	for (np = NULL; (np = of_get_next_child(root, np)) != NULL;)
-		if (np->name && !strcmp(np->name, "pxp") && !add_bridge(np))
+		if (np->name && !strcmp(np->name, "pxp") && !pas_add_bridge(np))
 			of_node_get(np);
 
 	of_node_put(root);
 
-	pas_fixup_phb_resources();
-
 	/* Setup the linkage between OF nodes and PHBs */
 	pci_devs_phb_init();
 
diff --git a/arch/powerpc/platforms/pasemi/setup.c b/arch/powerpc/platforms/pasemi/setup.c
index c5a3f61..ffe6528 100644
--- a/arch/powerpc/platforms/pasemi/setup.c
+++ b/arch/powerpc/platforms/pasemi/setup.c
@@ -239,7 +239,7 @@
 	return 1;
 }
 
-define_machine(pas) {
+define_machine(pasemi) {
 	.name			= "PA Semi PA6T-1682M",
 	.probe			= pas_probe,
 	.setup_arch		= pas_setup_arch,
diff --git a/arch/powerpc/platforms/powermac/Kconfig b/arch/powerpc/platforms/powermac/Kconfig
index 5b7afe5..055990c 100644
--- a/arch/powerpc/platforms/powermac/Kconfig
+++ b/arch/powerpc/platforms/powermac/Kconfig
@@ -2,6 +2,7 @@
 	bool "Apple PowerMac based machines"
 	depends on PPC_MULTIPLATFORM
 	select MPIC
+	select PCI
 	select PPC_INDIRECT_PCI if PPC32
 	select PPC_MPC106 if PPC32
 	select PPC_NATIVE
diff --git a/arch/powerpc/platforms/powermac/low_i2c.c b/arch/powerpc/platforms/powermac/low_i2c.c
index 3f507ab..efdf5eb 100644
--- a/arch/powerpc/platforms/powermac/low_i2c.c
+++ b/arch/powerpc/platforms/powermac/low_i2c.c
@@ -42,6 +42,7 @@
 #include <linux/interrupt.h>
 #include <linux/completion.h>
 #include <linux/timer.h>
+#include <linux/mutex.h>
 #include <asm/keylargo.h>
 #include <asm/uninorth.h>
 #include <asm/io.h>
@@ -84,7 +85,7 @@
 	void			*hostdata;
 	int			channel;	/* some hosts have multiple */
 	int			mode;		/* current mode */
-	struct semaphore	sem;
+	struct mutex		mutex;
 	int			opened;
 	int			polled;		/* open mode */
 	struct platform_device	*platform_dev;
@@ -104,7 +105,7 @@
 
 struct pmac_i2c_host_kw
 {
-	struct semaphore	mutex;		/* Access mutex for use by
+	struct mutex		mutex;		/* Access mutex for use by
 						 * i2c-keywest */
 	void __iomem		*base;		/* register base address */
 	int			bsteps;		/* register stepping */
@@ -375,14 +376,14 @@
 static int kw_i2c_open(struct pmac_i2c_bus *bus)
 {
 	struct pmac_i2c_host_kw *host = bus->hostdata;
-	down(&host->mutex);
+	mutex_lock(&host->mutex);
 	return 0;
 }
 
 static void kw_i2c_close(struct pmac_i2c_bus *bus)
 {
 	struct pmac_i2c_host_kw *host = bus->hostdata;
-	up(&host->mutex);
+	mutex_unlock(&host->mutex);
 }
 
 static int kw_i2c_xfer(struct pmac_i2c_bus *bus, u8 addrdir, int subsize,
@@ -498,7 +499,7 @@
 		kfree(host);
 		return NULL;
 	}
-	init_MUTEX(&host->mutex);
+	mutex_init(&host->mutex);
 	init_completion(&host->complete);
 	spin_lock_init(&host->lock);
 	init_timer(&host->timeout_timer);
@@ -571,7 +572,7 @@
 	bus->open = kw_i2c_open;
 	bus->close = kw_i2c_close;
 	bus->xfer = kw_i2c_xfer;
-	init_MUTEX(&bus->sem);
+	mutex_init(&bus->mutex);
 	if (controller == busnode)
 		bus->flags = pmac_i2c_multibus;
 	list_add(&bus->link, &pmac_i2c_busses);
@@ -798,7 +799,7 @@
 		bus->mode = pmac_i2c_mode_std;
 		bus->hostdata = bus + 1;
 		bus->xfer = pmu_i2c_xfer;
-		init_MUTEX(&bus->sem);
+		mutex_init(&bus->mutex);
 		bus->flags = pmac_i2c_multibus;
 		list_add(&bus->link, &pmac_i2c_busses);
 
@@ -921,7 +922,7 @@
 		bus->mode = pmac_i2c_mode_std;
 		bus->hostdata = bus + 1;
 		bus->xfer = smu_i2c_xfer;
-		init_MUTEX(&bus->sem);
+		mutex_init(&bus->mutex);
 		bus->flags = 0;
 		list_add(&bus->link, &pmac_i2c_busses);
 
@@ -1093,13 +1094,13 @@
 {
 	int rc;
 
-	down(&bus->sem);
+	mutex_lock(&bus->mutex);
 	bus->polled = polled || pmac_i2c_force_poll;
 	bus->opened = 1;
 	bus->mode = pmac_i2c_mode_std;
 	if (bus->open && (rc = bus->open(bus)) != 0) {
 		bus->opened = 0;
-		up(&bus->sem);
+		mutex_unlock(&bus->mutex);
 		return rc;
 	}
 	return 0;
@@ -1112,7 +1113,7 @@
 	if (bus->close)
 		bus->close(bus);
 	bus->opened = 0;
-	up(&bus->sem);
+	mutex_unlock(&bus->mutex);
 }
 EXPORT_SYMBOL_GPL(pmac_i2c_close);
 
diff --git a/arch/powerpc/platforms/powermac/pci.c b/arch/powerpc/platforms/powermac/pci.c
index c4af9e2..92586db 100644
--- a/arch/powerpc/platforms/powermac/pci.c
+++ b/arch/powerpc/platforms/powermac/pci.c
@@ -35,8 +35,6 @@
 #define DBG(x...)
 #endif
 
-static int add_bridge(struct device_node *dev);
-
 /* XXX Could be per-controller, but I don't think we risk anything by
  * assuming we won't have both UniNorth and Bandit */
 static int has_uninorth;
@@ -897,7 +895,7 @@
  * "pci" (a MPC106) and no bandit or chaos bridges, and contrariwise,
  * if we have one or more bandit or chaos bridges, we don't have a MPC106.
  */
-static int __init add_bridge(struct device_node *dev)
+static int __init pmac_add_bridge(struct device_node *dev)
 {
 	int len;
 	struct pci_controller *hose;
@@ -918,15 +916,9 @@
 		       " bus 0\n", dev->full_name);
 	}
 
-	/* XXX Different prototypes, to be merged */
-#ifdef CONFIG_PPC64
 	hose = pcibios_alloc_controller(dev);
-#else
-	hose = pcibios_alloc_controller();
-#endif
 	if (!hose)
 		return -ENOMEM;
-	hose->arch_data = dev;
 	hose->first_busno = bus_range ? bus_range[0] : 0;
 	hose->last_busno = bus_range ? bus_range[1] : 0xff;
 
@@ -1006,19 +998,6 @@
 #endif /* CONFIG_PPC32 */
 }
 
-#ifdef CONFIG_PPC64
-static void __init pmac_fixup_phb_resources(void)
-{
-	struct pci_controller *hose, *tmp;
-
-	list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
-		printk(KERN_INFO "PCI Host %d, io start: %lx; io end: %lx\n",
-		       hose->global_number,
-		       hose->io_resource.start, hose->io_resource.end);
-	}
-}
-#endif
-
 void __init pmac_pci_init(void)
 {
 	struct device_node *np, *root;
@@ -1036,7 +1015,7 @@
 		if (strcmp(np->name, "bandit") == 0
 		    || strcmp(np->name, "chaos") == 0
 		    || strcmp(np->name, "pci") == 0) {
-			if (add_bridge(np) == 0)
+			if (pmac_add_bridge(np) == 0)
 				of_node_get(np);
 		}
 		if (strcmp(np->name, "ht") == 0) {
@@ -1050,28 +1029,9 @@
 	/* Probe HT last as it relies on the agp resources to be already
 	 * setup
 	 */
-	if (ht && add_bridge(ht) != 0)
+	if (ht && pmac_add_bridge(ht) != 0)
 		of_node_put(ht);
 
-	/*
-	 * We need to call pci_setup_phb_io for the HT bridge first
-	 * so it gets the I/O port numbers starting at 0, and we
-	 * need to call it for the AGP bridge after that so it gets
-	 * small positive I/O port numbers.
-	 */
-	if (u3_ht)
-		pci_setup_phb_io(u3_ht, 1);
-	if (u3_agp)
-		pci_setup_phb_io(u3_agp, 0);
-	if (u4_pcie)
-		pci_setup_phb_io(u4_pcie, 0);
-
-	/*
-	 * On ppc64, fixup the IO resources on our host bridges as
-	 * the common code does it only for children of the host bridges
-	 */
-	pmac_fixup_phb_resources();
-
 	/* Setup the linkage between OF nodes and PHBs */
 	pci_devs_phb_init();
 
diff --git a/arch/powerpc/platforms/ps3/Kconfig b/arch/powerpc/platforms/ps3/Kconfig
index 40f0008..a05079b 100644
--- a/arch/powerpc/platforms/ps3/Kconfig
+++ b/arch/powerpc/platforms/ps3/Kconfig
@@ -7,6 +7,7 @@
 	select USB_OHCI_BIG_ENDIAN_MMIO
 	select USB_ARCH_HAS_EHCI
 	select USB_EHCI_BIG_ENDIAN_MMIO
+	select MEMORY_HOTPLUG
 	help
 	  This option enables support for the Sony PS3 game console
 	  and other platforms using the PS3 hypervisor.
@@ -73,18 +74,12 @@
 
 config PS3_VUART
 	depends on PPC_PS3
-	bool "PS3 Virtual UART support" if PS3_ADVANCED
-	default y
-	help
-	  Include support for the PS3 Virtual UART.
-
-	  This support is required for several system services
-	  including the System Manager and AV Settings.  In
-	  general, all users will say Y.
+	tristate
 
 config PS3_PS3AV
+	depends on PPC_PS3
 	tristate "PS3 AV settings driver" if PS3_ADVANCED
-	depends on PS3_VUART
+	select PS3_VUART
 	default y
 	help
 	  Include support for the PS3 AV Settings driver.
@@ -93,13 +88,18 @@
 	  general, all users will say Y or M.
 
 config PS3_SYS_MANAGER
-	bool "PS3 System Manager driver" if PS3_ADVANCED
-	depends on PS3_VUART
-	default y
+	depends on PPC_PS3
+	tristate "PS3 System Manager driver" if PS3_ADVANCED
+	select PS3_VUART
+	default m
 	help
 	  Include support for the PS3 System Manager.
 
 	  This support is required for system control.  In
-	  general, all users will say Y.
+	  general, all users will say Y or M.
+
+config PS3_STORAGE
+	depends on PPC_PS3
+	tristate
 
 endmenu
diff --git a/arch/powerpc/platforms/ps3/Makefile b/arch/powerpc/platforms/ps3/Makefile
index a0048fc..ac1bdf8 100644
--- a/arch/powerpc/platforms/ps3/Makefile
+++ b/arch/powerpc/platforms/ps3/Makefile
@@ -4,3 +4,4 @@
 
 obj-$(CONFIG_SMP) += smp.o
 obj-$(CONFIG_SPU_BASE) += spu.o
+obj-y += device-init.o
diff --git a/arch/powerpc/platforms/ps3/device-init.c b/arch/powerpc/platforms/ps3/device-init.c
new file mode 100644
index 0000000..825ebb2
--- /dev/null
+++ b/arch/powerpc/platforms/ps3/device-init.c
@@ -0,0 +1,785 @@
+/*
+ *  PS3 device registration routines.
+ *
+ *  Copyright (C) 2007 Sony Computer Entertainment Inc.
+ *  Copyright 2007 Sony Corp.
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; version 2 of the License.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#include <linux/delay.h>
+#include <linux/freezer.h>
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <linux/init.h>
+
+#include <asm/firmware.h>
+#include <asm/lv1call.h>
+#include <asm/ps3stor.h>
+
+#include "platform.h"
+
+/**
+ * ps3_setup_gelic_device - Setup and register a gelic device instance.
+ *
+ * Allocates memory for a struct ps3_system_bus_device instance, initialises the
+ * structure members, and registers the device instance with the system bus.
+ */
+
+static int __init ps3_setup_gelic_device(
+	const struct ps3_repository_device *repo)
+{
+	int result;
+	struct layout {
+		struct ps3_system_bus_device dev;
+		struct ps3_dma_region d_region;
+	} *p;
+
+	pr_debug(" -> %s:%d\n", __func__, __LINE__);
+
+	BUG_ON(repo->bus_type != PS3_BUS_TYPE_SB);
+	BUG_ON(repo->dev_type != PS3_DEV_TYPE_SB_GELIC);
+
+	p = kzalloc(sizeof(struct layout), GFP_KERNEL);
+
+	if (!p) {
+		result = -ENOMEM;
+		goto fail_malloc;
+	}
+
+	p->dev.match_id = PS3_MATCH_ID_GELIC;
+	p->dev.dev_type = PS3_DEVICE_TYPE_SB;
+	p->dev.bus_id = repo->bus_id;
+	p->dev.dev_id = repo->dev_id;
+	p->dev.d_region = &p->d_region;
+
+	result = ps3_repository_find_interrupt(repo,
+		PS3_INTERRUPT_TYPE_EVENT_PORT, &p->dev.interrupt_id);
+
+	if (result) {
+		pr_debug("%s:%d ps3_repository_find_interrupt failed\n",
+			__func__, __LINE__);
+		goto fail_find_interrupt;
+	}
+
+	BUG_ON(p->dev.interrupt_id != 0);
+
+	result = ps3_dma_region_init(&p->dev, p->dev.d_region, PS3_DMA_64K,
+		PS3_DMA_OTHER, NULL, 0);
+
+	if (result) {
+		pr_debug("%s:%d ps3_dma_region_init failed\n",
+			__func__, __LINE__);
+		goto fail_dma_init;
+	}
+
+	result = ps3_system_bus_device_register(&p->dev);
+
+	if (result) {
+		pr_debug("%s:%d ps3_system_bus_device_register failed\n",
+			__func__, __LINE__);
+		goto fail_device_register;
+	}
+
+	pr_debug(" <- %s:%d\n", __func__, __LINE__);
+	return result;
+
+fail_device_register:
+fail_dma_init:
+fail_find_interrupt:
+	kfree(p);
+fail_malloc:
+	pr_debug(" <- %s:%d: fail.\n", __func__, __LINE__);
+	return result;
+}
+
+static int __init_refok ps3_setup_uhc_device(
+	const struct ps3_repository_device *repo, enum ps3_match_id match_id,
+	enum ps3_interrupt_type interrupt_type, enum ps3_reg_type reg_type)
+{
+	int result;
+	struct layout {
+		struct ps3_system_bus_device dev;
+		struct ps3_dma_region d_region;
+		struct ps3_mmio_region m_region;
+	} *p;
+	u64 bus_addr;
+	u64 len;
+
+	pr_debug(" -> %s:%d\n", __func__, __LINE__);
+
+	BUG_ON(repo->bus_type != PS3_BUS_TYPE_SB);
+	BUG_ON(repo->dev_type != PS3_DEV_TYPE_SB_USB);
+
+	p = kzalloc(sizeof(struct layout), GFP_KERNEL);
+
+	if (!p) {
+		result = -ENOMEM;
+		goto fail_malloc;
+	}
+
+	p->dev.match_id = match_id;
+	p->dev.dev_type = PS3_DEVICE_TYPE_SB;
+	p->dev.bus_id = repo->bus_id;
+	p->dev.dev_id = repo->dev_id;
+	p->dev.d_region = &p->d_region;
+	p->dev.m_region = &p->m_region;
+
+	result = ps3_repository_find_interrupt(repo,
+		interrupt_type, &p->dev.interrupt_id);
+
+	if (result) {
+		pr_debug("%s:%d ps3_repository_find_interrupt failed\n",
+			__func__, __LINE__);
+		goto fail_find_interrupt;
+	}
+
+	result = ps3_repository_find_reg(repo, reg_type,
+		&bus_addr, &len);
+
+	if (result) {
+		pr_debug("%s:%d ps3_repository_find_reg failed\n",
+			__func__, __LINE__);
+		goto fail_find_reg;
+	}
+
+	result = ps3_dma_region_init(&p->dev, p->dev.d_region, PS3_DMA_64K,
+		PS3_DMA_INTERNAL, NULL, 0);
+
+	if (result) {
+		pr_debug("%s:%d ps3_dma_region_init failed\n",
+			__func__, __LINE__);
+		goto fail_dma_init;
+	}
+
+	result = ps3_mmio_region_init(&p->dev, p->dev.m_region, bus_addr, len,
+		PS3_MMIO_4K);
+
+	if (result) {
+		pr_debug("%s:%d ps3_mmio_region_init failed\n",
+			__func__, __LINE__);
+		goto fail_mmio_init;
+	}
+
+	result = ps3_system_bus_device_register(&p->dev);
+
+	if (result) {
+		pr_debug("%s:%d ps3_system_bus_device_register failed\n",
+			__func__, __LINE__);
+		goto fail_device_register;
+	}
+
+	pr_debug(" <- %s:%d\n", __func__, __LINE__);
+	return result;
+
+fail_device_register:
+fail_mmio_init:
+fail_dma_init:
+fail_find_reg:
+fail_find_interrupt:
+	kfree(p);
+fail_malloc:
+	pr_debug(" <- %s:%d: fail.\n", __func__, __LINE__);
+	return result;
+}
+
+static int __init ps3_setup_ehci_device(
+	const struct ps3_repository_device *repo)
+{
+	return ps3_setup_uhc_device(repo, PS3_MATCH_ID_EHCI,
+		PS3_INTERRUPT_TYPE_SB_EHCI, PS3_REG_TYPE_SB_EHCI);
+}
+
+static int __init ps3_setup_ohci_device(
+	const struct ps3_repository_device *repo)
+{
+	return ps3_setup_uhc_device(repo, PS3_MATCH_ID_OHCI,
+		PS3_INTERRUPT_TYPE_SB_OHCI, PS3_REG_TYPE_SB_OHCI);
+}
+
+static int __init ps3_setup_vuart_device(enum ps3_match_id match_id,
+	unsigned int port_number)
+{
+	int result;
+	struct layout {
+		struct ps3_system_bus_device dev;
+	} *p;
+
+	pr_debug(" -> %s:%d: match_id %u, port %u\n", __func__, __LINE__,
+		match_id, port_number);
+
+	p = kzalloc(sizeof(struct layout), GFP_KERNEL);
+
+	if (!p)
+		return -ENOMEM;
+
+	p->dev.match_id = match_id;
+	p->dev.dev_type = PS3_DEVICE_TYPE_VUART;
+	p->dev.port_number = port_number;
+
+	result = ps3_system_bus_device_register(&p->dev);
+
+	if (result)
+		pr_debug("%s:%d ps3_system_bus_device_register failed\n",
+			__func__, __LINE__);
+
+	pr_debug(" <- %s:%d\n", __func__, __LINE__);
+	return result;
+}
+
+static int ps3stor_wait_for_completion(u64 dev_id, u64 tag,
+				       unsigned int timeout)
+{
+	int result = -1;
+	unsigned int retries = 0;
+	u64 status;
+
+	for (retries = 0; retries < timeout; retries++) {
+		result = lv1_storage_check_async_status(dev_id, tag, &status);
+		if (!result)
+			break;
+
+		msleep(1);
+	}
+
+	if (result)
+		pr_debug("%s:%u: check_async_status: %s, status %lx\n",
+			 __func__, __LINE__, ps3_result(result), status);
+
+	return result;
+}
+
+/**
+ * ps3_storage_wait_for_device - Wait for a storage device to become ready.
+ * @repo: The repository device to wait for.
+ *
+ * Uses the hypervisor's storage device notification mechanism to wait until
+ * a storage device is ready.  The device notification mechanism uses a
+ * psuedo device (id = -1) to asynchronously notify the guest when storage
+ * devices become ready.  The notification device has a block size of 512
+ * bytes.
+ */
+
+static int ps3_storage_wait_for_device(const struct ps3_repository_device *repo)
+{
+	int result;
+	const u64 notification_dev_id = (u64)-1LL;
+	const unsigned int timeout = HZ;
+	u64 lpar;
+	u64 tag;
+	struct {
+		u64 operation_code;	/* must be zero */
+		u64 event_mask;		/* 1 = device ready */
+	} *notify_cmd;
+	struct {
+		u64 event_type;		/* notify_device_ready */
+		u64 bus_id;
+		u64 dev_id;
+		u64 dev_type;
+		u64 dev_port;
+	} *notify_event;
+	enum {
+		notify_device_ready = 1
+	};
+
+	pr_debug(" -> %s:%u: bus_id %u, dev_id %u, dev_type %u\n", __func__,
+		 __LINE__, repo->bus_id, repo->dev_id, repo->dev_type);
+
+	notify_cmd = kzalloc(512, GFP_KERNEL);
+	notify_event = (void *)notify_cmd;
+	if (!notify_cmd)
+		return -ENOMEM;
+
+	lpar = ps3_mm_phys_to_lpar(__pa(notify_cmd));
+
+	result = lv1_open_device(repo->bus_id, notification_dev_id, 0);
+	if (result) {
+		printk(KERN_ERR "%s:%u: lv1_open_device %s\n", __func__,
+		       __LINE__, ps3_result(result));
+		result = -ENODEV;
+		goto fail_free;
+	}
+
+	/* Setup and write the request for device notification. */
+
+	notify_cmd->operation_code = 0;	/* must be zero */
+	notify_cmd->event_mask = 0x01;	/* device ready */
+
+	result = lv1_storage_write(notification_dev_id, 0, 0, 1, 0, lpar,
+				   &tag);
+	if (result) {
+		printk(KERN_ERR "%s:%u: write failed %s\n", __func__, __LINE__,
+		       ps3_result(result));
+		result = -ENODEV;
+		goto fail_close;
+	}
+
+	/* Wait for the write completion */
+
+	result = ps3stor_wait_for_completion(notification_dev_id, tag,
+					     timeout);
+	if (result) {
+		printk(KERN_ERR "%s:%u: write not completed %s\n", __func__,
+		       __LINE__, ps3_result(result));
+		result = -ENODEV;
+		goto fail_close;
+	}
+
+	/* Loop here processing the requested notification events. */
+
+	result = -ENODEV;
+	while (1) {
+		memset(notify_event, 0, sizeof(*notify_event));
+
+		result = lv1_storage_read(notification_dev_id, 0, 0, 1, 0,
+					  lpar, &tag);
+		if (result) {
+			printk(KERN_ERR "%s:%u: write failed %s\n", __func__,
+			       __LINE__, ps3_result(result));
+			break;
+		}
+
+		result = ps3stor_wait_for_completion(notification_dev_id, tag,
+						     timeout);
+		if (result) {
+			printk(KERN_ERR "%s:%u: read not completed %s\n",
+			       __func__, __LINE__, ps3_result(result));
+			break;
+		}
+
+		if (notify_event->event_type != notify_device_ready ||
+		    notify_event->bus_id != repo->bus_id) {
+			pr_debug("%s:%u: bad notify_event: event %lu, "
+				 "dev_id %lu, dev_type %lu\n",
+				 __func__, __LINE__, notify_event->event_type,
+				 notify_event->dev_id, notify_event->dev_type);
+			break;
+		}
+
+		if (notify_event->dev_id == repo->dev_id &&
+		    notify_event->dev_type == repo->dev_type) {
+			pr_debug("%s:%u: device ready: dev_id %u\n", __func__,
+				 __LINE__, repo->dev_id);
+			result = 0;
+			break;
+		}
+
+		if (notify_event->dev_id == repo->dev_id &&
+		    notify_event->dev_type == PS3_DEV_TYPE_NOACCESS) {
+			pr_debug("%s:%u: no access: dev_id %u\n", __func__,
+				 __LINE__, repo->dev_id);
+			break;
+		}
+	}
+
+fail_close:
+	lv1_close_device(repo->bus_id, notification_dev_id);
+fail_free:
+	kfree(notify_cmd);
+	pr_debug(" <- %s:%u\n", __func__, __LINE__);
+	return result;
+}
+
+static int ps3_setup_storage_dev(const struct ps3_repository_device *repo,
+				 enum ps3_match_id match_id)
+{
+	int result;
+	struct ps3_storage_device *p;
+	u64 port, blk_size, num_blocks;
+	unsigned int num_regions, i;
+
+	pr_debug(" -> %s:%u: match_id %u\n", __func__, __LINE__, match_id);
+
+	result = ps3_repository_read_stor_dev_info(repo->bus_index,
+						   repo->dev_index, &port,
+						   &blk_size, &num_blocks,
+						   &num_regions);
+	if (result) {
+		printk(KERN_ERR "%s:%u: _read_stor_dev_info failed %d\n",
+		       __func__, __LINE__, result);
+		return -ENODEV;
+	}
+
+	pr_debug("%s:%u: index %u:%u: port %lu blk_size %lu num_blocks %lu "
+		 "num_regions %u\n", __func__, __LINE__, repo->bus_index,
+		 repo->dev_index, port, blk_size, num_blocks, num_regions);
+
+	p = kzalloc(sizeof(struct ps3_storage_device) +
+		    num_regions * sizeof(struct ps3_storage_region),
+		    GFP_KERNEL);
+	if (!p) {
+		result = -ENOMEM;
+		goto fail_malloc;
+	}
+
+	p->sbd.match_id = match_id;
+	p->sbd.dev_type = PS3_DEVICE_TYPE_SB;
+	p->sbd.bus_id = repo->bus_id;
+	p->sbd.dev_id = repo->dev_id;
+	p->sbd.d_region = &p->dma_region;
+	p->blk_size = blk_size;
+	p->num_regions = num_regions;
+
+	result = ps3_repository_find_interrupt(repo,
+					       PS3_INTERRUPT_TYPE_EVENT_PORT,
+					       &p->sbd.interrupt_id);
+	if (result) {
+		printk(KERN_ERR "%s:%u: find_interrupt failed %d\n", __func__,
+		       __LINE__, result);
+		result = -ENODEV;
+		goto fail_find_interrupt;
+	}
+
+	/* FIXME: Arrange to only do this on a 'cold' boot */
+
+	result = ps3_storage_wait_for_device(repo);
+	if (result) {
+		printk(KERN_ERR "%s:%u: storage_notification failed %d\n",
+		       __func__, __LINE__, result);
+		result = -ENODEV;
+		goto fail_probe_notification;
+	}
+
+	for (i = 0; i < num_regions; i++) {
+		unsigned int id;
+		u64 start, size;
+
+		result = ps3_repository_read_stor_dev_region(repo->bus_index,
+							     repo->dev_index,
+							     i, &id, &start,
+							     &size);
+		if (result) {
+			printk(KERN_ERR
+			       "%s:%u: read_stor_dev_region failed %d\n",
+			       __func__, __LINE__, result);
+			result = -ENODEV;
+			goto fail_read_region;
+		}
+		pr_debug("%s:%u: region %u: id %u start %lu size %lu\n",
+			 __func__, __LINE__, i, id, start, size);
+
+		p->regions[i].id = id;
+		p->regions[i].start = start;
+		p->regions[i].size = size;
+	}
+
+	result = ps3_system_bus_device_register(&p->sbd);
+	if (result) {
+		pr_debug("%s:%u ps3_system_bus_device_register failed\n",
+			 __func__, __LINE__);
+		goto fail_device_register;
+	}
+
+	pr_debug(" <- %s:%u\n", __func__, __LINE__);
+	return 0;
+
+fail_device_register:
+fail_read_region:
+fail_probe_notification:
+fail_find_interrupt:
+	kfree(p);
+fail_malloc:
+	pr_debug(" <- %s:%u: fail.\n", __func__, __LINE__);
+	return result;
+}
+
+static int __init ps3_register_vuart_devices(void)
+{
+	int result;
+	unsigned int port_number;
+
+	pr_debug(" -> %s:%d\n", __func__, __LINE__);
+
+	result = ps3_repository_read_vuart_av_port(&port_number);
+	if (result)
+		port_number = 0; /* av default */
+
+	result = ps3_setup_vuart_device(PS3_MATCH_ID_AV_SETTINGS, port_number);
+	WARN_ON(result);
+
+	result = ps3_repository_read_vuart_sysmgr_port(&port_number);
+	if (result)
+		port_number = 2; /* sysmgr default */
+
+	result = ps3_setup_vuart_device(PS3_MATCH_ID_SYSTEM_MANAGER,
+		port_number);
+	WARN_ON(result);
+
+	pr_debug(" <- %s:%d\n", __func__, __LINE__);
+	return result;
+}
+
+static int __init ps3_register_sound_devices(void)
+{
+	int result;
+	struct layout {
+		struct ps3_system_bus_device dev;
+		struct ps3_dma_region d_region;
+		struct ps3_mmio_region m_region;
+	} *p;
+
+	pr_debug(" -> %s:%d\n", __func__, __LINE__);
+
+	p = kzalloc(sizeof(*p), GFP_KERNEL);
+	if (!p)
+		return -ENOMEM;
+
+	p->dev.match_id = PS3_MATCH_ID_SOUND;
+	p->dev.dev_type = PS3_DEVICE_TYPE_IOC0;
+	p->dev.d_region = &p->d_region;
+	p->dev.m_region = &p->m_region;
+
+	result = ps3_system_bus_device_register(&p->dev);
+
+	if (result)
+		pr_debug("%s:%d ps3_system_bus_device_register failed\n",
+			__func__, __LINE__);
+
+	pr_debug(" <- %s:%d\n", __func__, __LINE__);
+	return result;
+}
+
+static int __init ps3_register_graphics_devices(void)
+{
+	int result;
+	struct layout {
+		struct ps3_system_bus_device dev;
+	} *p;
+
+	pr_debug(" -> %s:%d\n", __func__, __LINE__);
+
+	p = kzalloc(sizeof(struct layout), GFP_KERNEL);
+
+	if (!p)
+		return -ENOMEM;
+
+	p->dev.match_id = PS3_MATCH_ID_GRAPHICS;
+	p->dev.dev_type = PS3_DEVICE_TYPE_IOC0;
+
+	result = ps3_system_bus_device_register(&p->dev);
+
+	if (result)
+		pr_debug("%s:%d ps3_system_bus_device_register failed\n",
+			__func__, __LINE__);
+
+	pr_debug(" <- %s:%d\n", __func__, __LINE__);
+	return result;
+}
+
+/**
+ * ps3_register_repository_device - Register a device from the repositiory info.
+ *
+ */
+
+static int ps3_register_repository_device(
+	const struct ps3_repository_device *repo)
+{
+	int result;
+
+	switch (repo->dev_type) {
+	case PS3_DEV_TYPE_SB_GELIC:
+		result = ps3_setup_gelic_device(repo);
+		if (result) {
+			pr_debug("%s:%d ps3_setup_gelic_device failed\n",
+				__func__, __LINE__);
+		}
+		break;
+	case PS3_DEV_TYPE_SB_USB:
+
+		/* Each USB device has both an EHCI and an OHCI HC */
+
+		result = ps3_setup_ehci_device(repo);
+
+		if (result) {
+			pr_debug("%s:%d ps3_setup_ehci_device failed\n",
+				__func__, __LINE__);
+		}
+
+		result = ps3_setup_ohci_device(repo);
+
+		if (result) {
+			pr_debug("%s:%d ps3_setup_ohci_device failed\n",
+				__func__, __LINE__);
+		}
+		break;
+	case PS3_DEV_TYPE_STOR_DISK:
+		result = ps3_setup_storage_dev(repo, PS3_MATCH_ID_STOR_DISK);
+
+		/* Some devices are not accessable from the Other OS lpar. */
+		if (result == -ENODEV) {
+			result = 0;
+			pr_debug("%s:%u: not accessable\n", __func__,
+				 __LINE__);
+		}
+
+		if (result)
+			pr_debug("%s:%u ps3_setup_storage_dev failed\n",
+				 __func__, __LINE__);
+		break;
+
+	case PS3_DEV_TYPE_STOR_ROM:
+		result = ps3_setup_storage_dev(repo, PS3_MATCH_ID_STOR_ROM);
+		if (result)
+			pr_debug("%s:%u ps3_setup_storage_dev failed\n",
+				 __func__, __LINE__);
+		break;
+
+	case PS3_DEV_TYPE_STOR_FLASH:
+		result = ps3_setup_storage_dev(repo, PS3_MATCH_ID_STOR_FLASH);
+		if (result)
+			pr_debug("%s:%u ps3_setup_storage_dev failed\n",
+				 __func__, __LINE__);
+		break;
+
+	default:
+		result = 0;
+		pr_debug("%s:%u: unsupported dev_type %u\n", __func__, __LINE__,
+			repo->dev_type);
+	}
+
+	return result;
+}
+
+/**
+ * ps3_probe_thread - Background repository probing at system startup.
+ *
+ * This implementation only supports background probing on a single bus.
+ */
+
+static int ps3_probe_thread(void *data)
+{
+	struct ps3_repository_device *repo = data;
+	int result;
+	unsigned int ms = 250;
+
+	pr_debug(" -> %s:%u: kthread started\n", __func__, __LINE__);
+
+	do {
+		try_to_freeze();
+
+		pr_debug("%s:%u: probing...\n", __func__, __LINE__);
+
+		do {
+			result = ps3_repository_find_device(repo);
+
+			if (result == -ENODEV)
+				pr_debug("%s:%u: nothing new\n", __func__,
+					__LINE__);
+			else if (result)
+				pr_debug("%s:%u: find device error.\n",
+					__func__, __LINE__);
+			else {
+				pr_debug("%s:%u: found device\n", __func__,
+					__LINE__);
+				ps3_register_repository_device(repo);
+				ps3_repository_bump_device(repo);
+				ms = 250;
+			}
+		} while (!result);
+
+		pr_debug("%s:%u: ms %u\n", __func__, __LINE__, ms);
+
+		if ( ms > 60000)
+			break;
+
+		msleep_interruptible(ms);
+
+		/* An exponential backoff. */
+		ms <<= 1;
+
+	} while (!kthread_should_stop());
+
+	pr_debug(" <- %s:%u: kthread finished\n", __func__, __LINE__);
+
+	return 0;
+}
+
+/**
+ * ps3_start_probe_thread - Starts the background probe thread.
+ *
+ */
+
+static int __init ps3_start_probe_thread(enum ps3_bus_type bus_type)
+{
+	int result;
+	struct task_struct *task;
+	static struct ps3_repository_device repo; /* must be static */
+
+	pr_debug(" -> %s:%d\n", __func__, __LINE__);
+
+	memset(&repo, 0, sizeof(repo));
+
+	repo.bus_type = bus_type;
+
+	result = ps3_repository_find_bus(repo.bus_type, 0, &repo.bus_index);
+
+	if (result) {
+		printk(KERN_ERR "%s: Cannot find bus (%d)\n", __func__, result);
+		return -ENODEV;
+	}
+
+	result = ps3_repository_read_bus_id(repo.bus_index, &repo.bus_id);
+
+	if (result) {
+		printk(KERN_ERR "%s: read_bus_id failed %d\n", __func__,
+			result);
+		return -ENODEV;
+	}
+
+	task = kthread_run(ps3_probe_thread, &repo, "ps3-probe-%u", bus_type);
+
+	if (IS_ERR(task)) {
+		result = PTR_ERR(task);
+		printk(KERN_ERR "%s: kthread_run failed %d\n", __func__,
+		       result);
+		return result;
+	}
+
+	pr_debug(" <- %s:%d\n", __func__, __LINE__);
+	return 0;
+}
+
+/**
+ * ps3_register_devices - Probe the system and register devices found.
+ *
+ * A device_initcall() routine.
+ */
+
+static int __init ps3_register_devices(void)
+{
+	int result;
+
+	if (!firmware_has_feature(FW_FEATURE_PS3_LV1))
+		return -ENODEV;
+
+	pr_debug(" -> %s:%d\n", __func__, __LINE__);
+
+	/* ps3_repository_dump_bus_info(); */
+
+	result = ps3_start_probe_thread(PS3_BUS_TYPE_STORAGE);
+
+	ps3_register_vuart_devices();
+
+	ps3_register_graphics_devices();
+
+	ps3_repository_find_devices(PS3_BUS_TYPE_SB,
+		ps3_register_repository_device);
+
+	ps3_register_sound_devices();
+
+	pr_debug(" <- %s:%d\n", __func__, __LINE__);
+	return 0;
+}
+
+device_initcall(ps3_register_devices);
diff --git a/arch/powerpc/platforms/ps3/htab.c b/arch/powerpc/platforms/ps3/htab.c
index a1409e4..5d2e176 100644
--- a/arch/powerpc/platforms/ps3/htab.c
+++ b/arch/powerpc/platforms/ps3/htab.c
@@ -29,12 +29,12 @@
 #include "platform.h"
 
 #if defined(DEBUG)
-#define DBG(fmt...) udbg_printf(fmt)
+#define DBG udbg_printf
 #else
-#define DBG(fmt...) do{if(0)printk(fmt);}while(0)
+#define DBG pr_debug
 #endif
 
-static hpte_t *htab;
+static struct hash_pte *htab;
 static unsigned long htab_addr;
 static unsigned char *bolttab;
 static unsigned char *inusetab;
@@ -44,8 +44,8 @@
 #define debug_dump_hpte(_a, _b, _c, _d, _e, _f, _g) \
 	_debug_dump_hpte(_a, _b, _c, _d, _e, _f, _g, __func__, __LINE__)
 static void _debug_dump_hpte(unsigned long pa, unsigned long va,
-	unsigned long group, unsigned long bitmap, hpte_t lhpte, int psize,
-	unsigned long slot, const char* func, int line)
+	unsigned long group, unsigned long bitmap, struct hash_pte lhpte,
+	int psize, unsigned long slot, const char* func, int line)
 {
 	DBG("%s:%d: pa     = %lxh\n", func, line, pa);
 	DBG("%s:%d: lpar   = %lxh\n", func, line,
@@ -63,7 +63,7 @@
 	unsigned long pa, unsigned long rflags, unsigned long vflags, int psize)
 {
 	unsigned long slot;
-	hpte_t lhpte;
+	struct hash_pte lhpte;
 	int secondary = 0;
 	unsigned long result;
 	unsigned long bitmap;
@@ -234,10 +234,17 @@
 
 static void ps3_hpte_clear(void)
 {
-	/* Make sure to clean up the frame buffer device first */
-	ps3fb_cleanup();
+	int result;
 
-	lv1_unmap_htab(htab_addr);
+	DBG(" -> %s:%d\n", __func__, __LINE__);
+
+	result = lv1_unmap_htab(htab_addr);
+	BUG_ON(result);
+
+	ps3_mm_shutdown();
+	ps3_mm_vas_destroy();
+
+	DBG(" <- %s:%d\n", __func__, __LINE__);
 }
 
 void __init ps3_hpte_init(unsigned long htab_size)
@@ -255,7 +262,7 @@
 
 	ppc64_pft_size = __ilog2(htab_size);
 
-	bitmap_size = htab_size / sizeof(hpte_t) / 8;
+	bitmap_size = htab_size / sizeof(struct hash_pte) / 8;
 
 	bolttab = __va(lmb_alloc(bitmap_size, 1));
 	inusetab = __va(lmb_alloc(bitmap_size, 1));
@@ -273,8 +280,8 @@
 
 	result = lv1_map_htab(0, &htab_addr);
 
-	htab = (hpte_t *)__ioremap(htab_addr, htab_size,
-				   pgprot_val(PAGE_READONLY_X));
+	htab = (__force struct hash_pte *)ioremap_flags(htab_addr, htab_size,
+					    pgprot_val(PAGE_READONLY_X));
 
 	DBG("%s:%d: lpar %016lxh, virt %016lxh\n", __func__, __LINE__,
 		htab_addr, (unsigned long)htab);
diff --git a/arch/powerpc/platforms/ps3/interrupt.c b/arch/powerpc/platforms/ps3/interrupt.c
index ec9030d..67e32ec 100644
--- a/arch/powerpc/platforms/ps3/interrupt.c
+++ b/arch/powerpc/platforms/ps3/interrupt.c
@@ -30,9 +30,9 @@
 #include "platform.h"
 
 #if defined(DEBUG)
-#define DBG(fmt...) udbg_printf(fmt)
+#define DBG udbg_printf
 #else
-#define DBG(fmt...) do{if(0)printk(fmt);}while(0)
+#define DBG pr_debug
 #endif
 
 /**
@@ -78,19 +78,85 @@
 /**
  * struct ps3_private - a per cpu data structure
  * @bmp: ps3_bmp structure
- * @node: HV logical_ppe_id
- * @cpu: HV thread_id
+ * @ppe_id: HV logical_ppe_id
+ * @thread_id: HV thread_id
  */
 
 struct ps3_private {
 	struct ps3_bmp bmp __attribute__ ((aligned (PS3_BMP_MINALIGN)));
-	u64 node;
-	unsigned int cpu;
+	u64 ppe_id;
+	u64 thread_id;
 };
 
 static DEFINE_PER_CPU(struct ps3_private, ps3_private);
 
 /**
+ * ps3_chip_mask - Set an interrupt mask bit in ps3_bmp.
+ * @virq: The assigned Linux virq.
+ *
+ * Sets ps3_bmp.mask and calls lv1_did_update_interrupt_mask().
+ */
+
+static void ps3_chip_mask(unsigned int virq)
+{
+	struct ps3_private *pd = get_irq_chip_data(virq);
+	unsigned long flags;
+
+	pr_debug("%s:%d: thread_id %lu, virq %d\n", __func__, __LINE__,
+		pd->thread_id, virq);
+
+	local_irq_save(flags);
+	clear_bit(63 - virq, &pd->bmp.mask);
+	lv1_did_update_interrupt_mask(pd->ppe_id, pd->thread_id);
+	local_irq_restore(flags);
+}
+
+/**
+ * ps3_chip_unmask - Clear an interrupt mask bit in ps3_bmp.
+ * @virq: The assigned Linux virq.
+ *
+ * Clears ps3_bmp.mask and calls lv1_did_update_interrupt_mask().
+ */
+
+static void ps3_chip_unmask(unsigned int virq)
+{
+	struct ps3_private *pd = get_irq_chip_data(virq);
+	unsigned long flags;
+
+	pr_debug("%s:%d: thread_id %lu, virq %d\n", __func__, __LINE__,
+		pd->thread_id, virq);
+
+	local_irq_save(flags);
+	set_bit(63 - virq, &pd->bmp.mask);
+	lv1_did_update_interrupt_mask(pd->ppe_id, pd->thread_id);
+	local_irq_restore(flags);
+}
+
+/**
+ * ps3_chip_eoi - HV end-of-interrupt.
+ * @virq: The assigned Linux virq.
+ *
+ * Calls lv1_end_of_interrupt_ext().
+ */
+
+static void ps3_chip_eoi(unsigned int virq)
+{
+	const struct ps3_private *pd = get_irq_chip_data(virq);
+	lv1_end_of_interrupt_ext(pd->ppe_id, pd->thread_id, virq);
+}
+
+/**
+ * ps3_irq_chip - Represents the ps3_bmp as a Linux struct irq_chip.
+ */
+
+static struct irq_chip ps3_irq_chip = {
+	.typename = "ps3",
+	.mask = ps3_chip_mask,
+	.unmask = ps3_chip_unmask,
+	.eoi = ps3_chip_eoi,
+};
+
+/**
  * ps3_virq_setup - virq related setup.
  * @cpu: enum ps3_cpu_binding indicating the cpu the interrupt should be
  * serviced on.
@@ -134,6 +200,8 @@
 		goto fail_set;
 	}
 
+	ps3_chip_mask(*virq);
+
 	return result;
 
 fail_set:
@@ -153,8 +221,8 @@
 {
 	const struct ps3_private *pd = get_irq_chip_data(virq);
 
-	pr_debug("%s:%d: node %lu, cpu %d, virq %u\n", __func__, __LINE__,
-		pd->node, pd->cpu, virq);
+	pr_debug("%s:%d: ppe_id %lu, thread_id %lu, virq %u\n", __func__,
+		__LINE__, pd->ppe_id, pd->thread_id, virq);
 
 	set_irq_chip_data(virq, NULL);
 	irq_dispose_mapping(virq);
@@ -190,7 +258,8 @@
 
 	/* Binds outlet to cpu + virq. */
 
-	result = lv1_connect_irq_plug_ext(pd->node, pd->cpu, *virq, outlet, 0);
+	result = lv1_connect_irq_plug_ext(pd->ppe_id, pd->thread_id, *virq,
+		outlet, 0);
 
 	if (result) {
 		pr_info("%s:%d: lv1_connect_irq_plug_ext failed: %s\n",
@@ -222,10 +291,12 @@
 	int result;
 	const struct ps3_private *pd = get_irq_chip_data(virq);
 
-	pr_debug("%s:%d: node %lu, cpu %d, virq %u\n", __func__, __LINE__,
-		pd->node, pd->cpu, virq);
+	pr_debug("%s:%d: ppe_id %lu, thread_id %lu, virq %u\n", __func__,
+		__LINE__, pd->ppe_id, pd->thread_id, virq);
 
-	result = lv1_disconnect_irq_plug_ext(pd->node, pd->cpu, virq);
+	ps3_chip_mask(virq);
+
+	result = lv1_disconnect_irq_plug_ext(pd->ppe_id, pd->thread_id, virq);
 
 	if (result)
 		pr_info("%s:%d: lv1_disconnect_irq_plug_ext failed: %s\n",
@@ -282,7 +353,9 @@
 {
 	int result;
 
-	pr_debug(" -> %s:%d virq: %u\n", __func__, __LINE__, virq);
+	pr_debug(" -> %s:%d virq %u\n", __func__, __LINE__, virq);
+
+	ps3_chip_mask(virq);
 
 	result = lv1_destruct_event_receive_port(virq_to_hw(virq));
 
@@ -290,17 +363,14 @@
 		pr_debug("%s:%d: lv1_destruct_event_receive_port failed: %s\n",
 			__func__, __LINE__, ps3_result(result));
 
-	/* lv1_destruct_event_receive_port() destroys the IRQ plug,
-	 * so don't call ps3_irq_plug_destroy() here.
+	/*
+	 * Don't call ps3_virq_destroy() here since ps3_smp_cleanup_cpu()
+	 * calls from interrupt context (smp_call_function) when kexecing.
 	 */
 
-	result = ps3_virq_destroy(virq);
-	BUG_ON(result);
-
 	pr_debug(" <- %s:%d\n", __func__, __LINE__);
 	return result;
 }
-EXPORT_SYMBOL_GPL(ps3_event_receive_port_destroy);
 
 int ps3_send_event_locally(unsigned int virq)
 {
@@ -311,17 +381,15 @@
  * ps3_sb_event_receive_port_setup - Setup a system bus event receive port.
  * @cpu: enum ps3_cpu_binding indicating the cpu the interrupt should be
  * serviced on.
- * @did: The HV device identifier read from the system repository.
- * @interrupt_id: The device interrupt id read from the system repository.
+ * @dev: The system bus device instance.
  * @virq: The assigned Linux virq.
  *
  * An event irq represents a virtual device interrupt.  The interrupt_id
  * coresponds to the software interrupt number.
  */
 
-int ps3_sb_event_receive_port_setup(enum ps3_cpu_binding cpu,
-	const struct ps3_device_id *did, unsigned int interrupt_id,
-	unsigned int *virq)
+int ps3_sb_event_receive_port_setup(struct ps3_system_bus_device *dev,
+	enum ps3_cpu_binding cpu, unsigned int *virq)
 {
 	/* this should go in system-bus.c */
 
@@ -332,8 +400,8 @@
 	if (result)
 		return result;
 
-	result = lv1_connect_interrupt_event_receive_port(did->bus_id,
-		did->dev_id, virq_to_hw(*virq), interrupt_id);
+	result = lv1_connect_interrupt_event_receive_port(dev->bus_id,
+		dev->dev_id, virq_to_hw(*virq), dev->interrupt_id);
 
 	if (result) {
 		pr_debug("%s:%d: lv1_connect_interrupt_event_receive_port"
@@ -345,24 +413,24 @@
 	}
 
 	pr_debug("%s:%d: interrupt_id %u, virq %u\n", __func__, __LINE__,
-		interrupt_id, *virq);
+		dev->interrupt_id, *virq);
 
 	return 0;
 }
 EXPORT_SYMBOL(ps3_sb_event_receive_port_setup);
 
-int ps3_sb_event_receive_port_destroy(const struct ps3_device_id *did,
-	unsigned int interrupt_id, unsigned int virq)
+int ps3_sb_event_receive_port_destroy(struct ps3_system_bus_device *dev,
+	unsigned int virq)
 {
 	/* this should go in system-bus.c */
 
 	int result;
 
 	pr_debug(" -> %s:%d: interrupt_id %u, virq %u\n", __func__, __LINE__,
-		interrupt_id, virq);
+		dev->interrupt_id, virq);
 
-	result = lv1_disconnect_interrupt_event_receive_port(did->bus_id,
-		did->dev_id, virq_to_hw(virq), interrupt_id);
+	result = lv1_disconnect_interrupt_event_receive_port(dev->bus_id,
+		dev->dev_id, virq_to_hw(virq), dev->interrupt_id);
 
 	if (result)
 		pr_debug("%s:%d: lv1_disconnect_interrupt_event_receive_port"
@@ -372,6 +440,14 @@
 	result = ps3_event_receive_port_destroy(virq);
 	BUG_ON(result);
 
+	/*
+	 * ps3_event_receive_port_destroy() destroys the IRQ plug,
+	 * so don't call ps3_irq_plug_destroy() here.
+	 */
+
+	result = ps3_virq_destroy(virq);
+	BUG_ON(result);
+
 	pr_debug(" <- %s:%d\n", __func__, __LINE__);
 	return result;
 }
@@ -412,16 +488,24 @@
 int ps3_io_irq_destroy(unsigned int virq)
 {
 	int result;
+	unsigned long outlet = virq_to_hw(virq);
 
-	result = lv1_destruct_io_irq_outlet(virq_to_hw(virq));
+	ps3_chip_mask(virq);
+
+	/*
+	 * lv1_destruct_io_irq_outlet() will destroy the IRQ plug,
+	 * so call ps3_irq_plug_destroy() first.
+	 */
+
+	result = ps3_irq_plug_destroy(virq);
+	BUG_ON(result);
+
+	result = lv1_destruct_io_irq_outlet(outlet);
 
 	if (result)
 		pr_debug("%s:%d: lv1_destruct_io_irq_outlet failed: %s\n",
 			__func__, __LINE__, ps3_result(result));
 
-	result = ps3_irq_plug_destroy(virq);
-	BUG_ON(result);
-
 	return result;
 }
 EXPORT_SYMBOL_GPL(ps3_io_irq_destroy);
@@ -461,11 +545,13 @@
 
 	return result;
 }
+EXPORT_SYMBOL_GPL(ps3_vuart_irq_setup);
 
 int ps3_vuart_irq_destroy(unsigned int virq)
 {
 	int result;
 
+	ps3_chip_mask(virq);
 	result = lv1_deconfigure_virtual_uart_irq();
 
 	if (result) {
@@ -479,6 +565,7 @@
 
 	return result;
 }
+EXPORT_SYMBOL_GPL(ps3_vuart_irq_destroy);
 
 /**
  * ps3_spe_irq_setup - Setup an spe virq.
@@ -514,9 +601,14 @@
 
 int ps3_spe_irq_destroy(unsigned int virq)
 {
-	int result = ps3_irq_plug_destroy(virq);
+	int result;
+
+	ps3_chip_mask(virq);
+
+	result = ps3_irq_plug_destroy(virq);
 	BUG_ON(result);
-	return 0;
+
+	return result;
 }
 
 
@@ -533,7 +625,7 @@
 		*p & 0xffff);
 }
 
-static void __attribute__ ((unused)) _dump_256_bmp(const char *header,
+static void __maybe_unused _dump_256_bmp(const char *header,
 	const u64 *p, unsigned cpu, const char* func, int line)
 {
 	pr_debug("%s:%d: %s %u {%016lx:%016lx:%016lx:%016lx}\n",
@@ -546,86 +638,25 @@
 	unsigned long flags;
 
 	spin_lock_irqsave(&pd->bmp.lock, flags);
-	_dump_64_bmp("stat", &pd->bmp.status, pd->cpu, func, line);
-	_dump_64_bmp("mask", &pd->bmp.mask, pd->cpu, func, line);
+	_dump_64_bmp("stat", &pd->bmp.status, pd->thread_id, func, line);
+	_dump_64_bmp("mask", &pd->bmp.mask, pd->thread_id, func, line);
 	spin_unlock_irqrestore(&pd->bmp.lock, flags);
 }
 
 #define dump_mask(_x) _dump_mask(_x, __func__, __LINE__)
-static void __attribute__ ((unused)) _dump_mask(struct ps3_private* pd,
+static void __maybe_unused _dump_mask(struct ps3_private *pd,
 	const char* func, int line)
 {
 	unsigned long flags;
 
 	spin_lock_irqsave(&pd->bmp.lock, flags);
-	_dump_64_bmp("mask", &pd->bmp.mask, pd->cpu, func, line);
+	_dump_64_bmp("mask", &pd->bmp.mask, pd->thread_id, func, line);
 	spin_unlock_irqrestore(&pd->bmp.lock, flags);
 }
 #else
 static void dump_bmp(struct ps3_private* pd) {};
 #endif /* defined(DEBUG) */
 
-static void ps3_chip_mask(unsigned int virq)
-{
-	struct ps3_private *pd = get_irq_chip_data(virq);
-	u64 bit = 0x8000000000000000UL >> virq;
-	u64 *p = &pd->bmp.mask;
-	u64 old;
-	unsigned long flags;
-
-	pr_debug("%s:%d: cpu %u, virq %d\n", __func__, __LINE__, pd->cpu, virq);
-
-	local_irq_save(flags);
-	asm volatile(
-		     "1:	ldarx %0,0,%3\n"
-		     "andc	%0,%0,%2\n"
-		     "stdcx.	%0,0,%3\n"
-		     "bne-	1b"
-		     : "=&r" (old), "+m" (*p)
-		     : "r" (bit), "r" (p)
-		     : "cc" );
-
-	lv1_did_update_interrupt_mask(pd->node, pd->cpu);
-	local_irq_restore(flags);
-}
-
-static void ps3_chip_unmask(unsigned int virq)
-{
-	struct ps3_private *pd = get_irq_chip_data(virq);
-	u64 bit = 0x8000000000000000UL >> virq;
-	u64 *p = &pd->bmp.mask;
-	u64 old;
-	unsigned long flags;
-
-	pr_debug("%s:%d: cpu %u, virq %d\n", __func__, __LINE__, pd->cpu, virq);
-
-	local_irq_save(flags);
-	asm volatile(
-		     "1:	ldarx %0,0,%3\n"
-		     "or	%0,%0,%2\n"
-		     "stdcx.	%0,0,%3\n"
-		     "bne-	1b"
-		     : "=&r" (old), "+m" (*p)
-		     : "r" (bit), "r" (p)
-		     : "cc" );
-
-	lv1_did_update_interrupt_mask(pd->node, pd->cpu);
-	local_irq_restore(flags);
-}
-
-static void ps3_chip_eoi(unsigned int virq)
-{
-	const struct ps3_private *pd = get_irq_chip_data(virq);
-	lv1_end_of_interrupt_ext(pd->node, pd->cpu, virq);
-}
-
-static struct irq_chip irq_chip = {
-	.typename = "ps3",
-	.mask = ps3_chip_mask,
-	.unmask = ps3_chip_unmask,
-	.eoi = ps3_chip_eoi,
-};
-
 static void ps3_host_unmap(struct irq_host *h, unsigned int virq)
 {
 	set_irq_chip_data(virq, NULL);
@@ -637,7 +668,7 @@
 	pr_debug("%s:%d: hwirq %lu, virq %u\n", __func__, __LINE__, hwirq,
 		virq);
 
-	set_irq_chip_and_handler(virq, &irq_chip, handle_fasteoi_irq);
+	set_irq_chip_and_handler(virq, &ps3_irq_chip, handle_fasteoi_irq);
 
 	return 0;
 }
@@ -657,7 +688,7 @@
 		cpu, virq, pd->bmp.ipi_debug_brk_mask);
 }
 
-unsigned int ps3_get_irq(void)
+static unsigned int ps3_get_irq(void)
 {
 	struct ps3_private *pd = &__get_cpu_var(ps3_private);
 	u64 x = (pd->bmp.status & pd->bmp.mask);
@@ -672,8 +703,8 @@
 	plug &= 0x3f;
 
 	if (unlikely(plug) == NO_IRQ) {
-		pr_debug("%s:%d: no plug found: cpu %u\n", __func__, __LINE__,
-			pd->cpu);
+		pr_debug("%s:%d: no plug found: thread_id %lu\n", __func__,
+			__LINE__, pd->thread_id);
 		dump_bmp(&per_cpu(ps3_private, 0));
 		dump_bmp(&per_cpu(ps3_private, 1));
 		return NO_IRQ;
@@ -703,16 +734,16 @@
 	for_each_possible_cpu(cpu) {
 		struct ps3_private *pd = &per_cpu(ps3_private, cpu);
 
-		lv1_get_logical_ppe_id(&pd->node);
-		pd->cpu = get_hard_smp_processor_id(cpu);
+		lv1_get_logical_ppe_id(&pd->ppe_id);
+		pd->thread_id = get_hard_smp_processor_id(cpu);
 		spin_lock_init(&pd->bmp.lock);
 
-		pr_debug("%s:%d: node %lu, cpu %d, bmp %lxh\n", __func__,
-			__LINE__, pd->node, pd->cpu,
+		pr_debug("%s:%d: ppe_id %lu, thread_id %lu, bmp %lxh\n",
+			__func__, __LINE__, pd->ppe_id, pd->thread_id,
 			ps3_mm_phys_to_lpar(__pa(&pd->bmp)));
 
-		result = lv1_configure_irq_state_bitmap(pd->node, pd->cpu,
-			ps3_mm_phys_to_lpar(__pa(&pd->bmp)));
+		result = lv1_configure_irq_state_bitmap(pd->ppe_id,
+			pd->thread_id, ps3_mm_phys_to_lpar(__pa(&pd->bmp)));
 
 		if (result)
 			pr_debug("%s:%d: lv1_configure_irq_state_bitmap failed:"
@@ -722,3 +753,16 @@
 
 	ppc_md.get_irq = ps3_get_irq;
 }
+
+void ps3_shutdown_IRQ(int cpu)
+{
+	int result;
+	u64 ppe_id;
+	u64 thread_id = get_hard_smp_processor_id(cpu);
+
+	lv1_get_logical_ppe_id(&ppe_id);
+	result = lv1_configure_irq_state_bitmap(ppe_id, thread_id, 0);
+
+	DBG("%s:%d: lv1_configure_irq_state_bitmap (%lu:%lu/%d) %s\n", __func__,
+		__LINE__, ppe_id, thread_id, cpu, ps3_result(result));
+}
diff --git a/arch/powerpc/platforms/ps3/mm.c b/arch/powerpc/platforms/ps3/mm.c
index f8a3e20..7bb3e16 100644
--- a/arch/powerpc/platforms/ps3/mm.c
+++ b/arch/powerpc/platforms/ps3/mm.c
@@ -30,9 +30,9 @@
 #include "platform.h"
 
 #if defined(DEBUG)
-#define DBG(fmt...) udbg_printf(fmt)
+#define DBG udbg_printf
 #else
-#define DBG(fmt...) do{if(0)printk(fmt);}while(0)
+#define DBG pr_debug
 #endif
 
 enum {
@@ -115,7 +115,8 @@
 };
 
 #define debug_dump_map(x) _debug_dump_map(x, __func__, __LINE__)
-static void _debug_dump_map(const struct map* m, const char* func, int line)
+static void __maybe_unused _debug_dump_map(const struct map *m,
+	const char *func, int line)
 {
 	DBG("%s:%d: map.total     = %lxh\n", func, line, m->total);
 	DBG("%s:%d: map.rm.size   = %lxh\n", func, line, m->rm.size);
@@ -212,9 +213,15 @@
 
 void ps3_mm_vas_destroy(void)
 {
+	int result;
+
+	DBG("%s:%d: map.vas_id    = %lu\n", __func__, __LINE__, map.vas_id);
+
 	if (map.vas_id) {
-		lv1_select_virtual_address_space(0);
-		lv1_destruct_virtual_address_space(map.vas_id);
+		result = lv1_select_virtual_address_space(0);
+		BUG_ON(result);
+		result = lv1_destruct_virtual_address_space(map.vas_id);
+		BUG_ON(result);
 		map.vas_id = 0;
 	}
 }
@@ -232,7 +239,7 @@
  * @size is rounded down to a multiple of the vas large page size.
  */
 
-int ps3_mm_region_create(struct mem_region *r, unsigned long size)
+static int ps3_mm_region_create(struct mem_region *r, unsigned long size)
 {
 	int result;
 	unsigned long muid;
@@ -273,10 +280,14 @@
  * @r: pointer to struct mem_region
  */
 
-void ps3_mm_region_destroy(struct mem_region *r)
+static void ps3_mm_region_destroy(struct mem_region *r)
 {
+	int result;
+
+	DBG("%s:%d: r->base = %lxh\n", __func__, __LINE__, r->base);
 	if (r->base) {
-		lv1_release_memory(r->base);
+		result = lv1_release_memory(r->base);
+		BUG_ON(result);
 		r->size = r->base = r->offset = 0;
 		map.total = map.rm.size;
 	}
@@ -329,31 +340,34 @@
 /*============================================================================*/
 
 /**
- * dma_lpar_to_bus - Translate an lpar address to ioc mapped bus address.
+ * dma_sb_lpar_to_bus - Translate an lpar address to ioc mapped bus address.
  * @r: pointer to dma region structure
  * @lpar_addr: HV lpar address
  */
 
-static unsigned long dma_lpar_to_bus(struct ps3_dma_region *r,
+static unsigned long dma_sb_lpar_to_bus(struct ps3_dma_region *r,
 	unsigned long lpar_addr)
 {
-	BUG_ON(lpar_addr >= map.r1.base + map.r1.size);
-	return r->bus_addr + (lpar_addr <= map.rm.size ? lpar_addr
-		: lpar_addr - map.r1.offset);
+	if (lpar_addr >= map.rm.size)
+		lpar_addr -= map.r1.offset;
+	BUG_ON(lpar_addr < r->offset);
+	BUG_ON(lpar_addr >= r->offset + r->len);
+	return r->bus_addr + lpar_addr - r->offset;
 }
 
 #define dma_dump_region(_a) _dma_dump_region(_a, __func__, __LINE__)
-static void _dma_dump_region(const struct ps3_dma_region *r, const char* func,
-	int line)
+static void  __maybe_unused _dma_dump_region(const struct ps3_dma_region *r,
+	const char *func, int line)
 {
-	DBG("%s:%d: dev        %u:%u\n", func, line, r->did.bus_id,
-		r->did.dev_id);
+	DBG("%s:%d: dev        %u:%u\n", func, line, r->dev->bus_id,
+		r->dev->dev_id);
 	DBG("%s:%d: page_size  %u\n", func, line, r->page_size);
 	DBG("%s:%d: bus_addr   %lxh\n", func, line, r->bus_addr);
 	DBG("%s:%d: len        %lxh\n", func, line, r->len);
+	DBG("%s:%d: offset     %lxh\n", func, line, r->offset);
 }
 
-/**
+  /**
  * dma_chunk - A chunk of dma pages mapped by the io controller.
  * @region - The dma region that owns this chunk.
  * @lpar_addr: Starting lpar address of the area to map.
@@ -381,10 +395,11 @@
 	int line)
 {
 	DBG("%s:%d: r.dev        %u:%u\n", func, line,
-		c->region->did.bus_id, c->region->did.dev_id);
+		c->region->dev->bus_id, c->region->dev->dev_id);
 	DBG("%s:%d: r.bus_addr   %lxh\n", func, line, c->region->bus_addr);
 	DBG("%s:%d: r.page_size  %u\n", func, line, c->region->page_size);
 	DBG("%s:%d: r.len        %lxh\n", func, line, c->region->len);
+	DBG("%s:%d: r.offset     %lxh\n", func, line, c->region->offset);
 	DBG("%s:%d: c.lpar_addr  %lxh\n", func, line, c->lpar_addr);
 	DBG("%s:%d: c.bus_addr   %lxh\n", func, line, c->bus_addr);
 	DBG("%s:%d: c.len        %lxh\n", func, line, c->len);
@@ -395,39 +410,68 @@
 {
 	struct dma_chunk *c;
 	unsigned long aligned_bus = _ALIGN_DOWN(bus_addr, 1 << r->page_size);
-	unsigned long aligned_len = _ALIGN_UP(len, 1 << r->page_size);
+	unsigned long aligned_len = _ALIGN_UP(len+bus_addr-aligned_bus,
+					      1 << r->page_size);
 
 	list_for_each_entry(c, &r->chunk_list.head, link) {
 		/* intersection */
-		if (aligned_bus >= c->bus_addr
-			&& aligned_bus < c->bus_addr + c->len
-			&& aligned_bus + aligned_len <= c->bus_addr + c->len) {
+		if (aligned_bus >= c->bus_addr &&
+		    aligned_bus + aligned_len <= c->bus_addr + c->len)
 			return c;
-		}
+
 		/* below */
-		if (aligned_bus + aligned_len <= c->bus_addr) {
+		if (aligned_bus + aligned_len <= c->bus_addr)
 			continue;
-		}
+
 		/* above */
-		if (aligned_bus >= c->bus_addr + c->len) {
+		if (aligned_bus >= c->bus_addr + c->len)
 			continue;
-		}
 
 		/* we don't handle the multi-chunk case for now */
-
 		dma_dump_chunk(c);
 		BUG();
 	}
 	return NULL;
 }
 
-static int dma_free_chunk(struct dma_chunk *c)
+static struct dma_chunk *dma_find_chunk_lpar(struct ps3_dma_region *r,
+	unsigned long lpar_addr, unsigned long len)
+{
+	struct dma_chunk *c;
+	unsigned long aligned_lpar = _ALIGN_DOWN(lpar_addr, 1 << r->page_size);
+	unsigned long aligned_len = _ALIGN_UP(len + lpar_addr - aligned_lpar,
+					      1 << r->page_size);
+
+	list_for_each_entry(c, &r->chunk_list.head, link) {
+		/* intersection */
+		if (c->lpar_addr <= aligned_lpar &&
+		    aligned_lpar < c->lpar_addr + c->len) {
+			if (aligned_lpar + aligned_len <= c->lpar_addr + c->len)
+				return c;
+			else {
+				dma_dump_chunk(c);
+				BUG();
+			}
+		}
+		/* below */
+		if (aligned_lpar + aligned_len <= c->lpar_addr) {
+			continue;
+		}
+		/* above */
+		if (c->lpar_addr + c->len <= aligned_lpar) {
+			continue;
+		}
+	}
+	return NULL;
+}
+
+static int dma_sb_free_chunk(struct dma_chunk *c)
 {
 	int result = 0;
 
 	if (c->bus_addr) {
-		result = lv1_unmap_device_dma_region(c->region->did.bus_id,
-			c->region->did.dev_id, c->bus_addr, c->len);
+		result = lv1_unmap_device_dma_region(c->region->dev->bus_id,
+			c->region->dev->dev_id, c->bus_addr, c->len);
 		BUG_ON(result);
 	}
 
@@ -435,8 +479,39 @@
 	return result;
 }
 
+static int dma_ioc0_free_chunk(struct dma_chunk *c)
+{
+	int result = 0;
+	int iopage;
+	unsigned long offset;
+	struct ps3_dma_region *r = c->region;
+
+	DBG("%s:start\n", __func__);
+	for (iopage = 0; iopage < (c->len >> r->page_size); iopage++) {
+		offset = (1 << r->page_size) * iopage;
+		/* put INVALID entry */
+		result = lv1_put_iopte(0,
+				       c->bus_addr + offset,
+				       c->lpar_addr + offset,
+				       r->ioid,
+				       0);
+		DBG("%s: bus=%#lx, lpar=%#lx, ioid=%d\n", __func__,
+		    c->bus_addr + offset,
+		    c->lpar_addr + offset,
+		    r->ioid);
+
+		if (result) {
+			DBG("%s:%d: lv1_put_iopte failed: %s\n", __func__,
+			    __LINE__, ps3_result(result));
+		}
+	}
+	kfree(c);
+	DBG("%s:end\n", __func__);
+	return result;
+}
+
 /**
- * dma_map_pages - Maps dma pages into the io controller bus address space.
+ * dma_sb_map_pages - Maps dma pages into the io controller bus address space.
  * @r: Pointer to a struct ps3_dma_region.
  * @phys_addr: Starting physical address of the area to map.
  * @len: Length in bytes of the area to map.
@@ -446,8 +521,8 @@
  * make the HV call to add the pages into the io controller address space.
  */
 
-static int dma_map_pages(struct ps3_dma_region *r, unsigned long phys_addr,
-	unsigned long len, struct dma_chunk **c_out)
+static int dma_sb_map_pages(struct ps3_dma_region *r, unsigned long phys_addr,
+	    unsigned long len, struct dma_chunk **c_out, u64 iopte_flag)
 {
 	int result;
 	struct dma_chunk *c;
@@ -461,13 +536,13 @@
 
 	c->region = r;
 	c->lpar_addr = ps3_mm_phys_to_lpar(phys_addr);
-	c->bus_addr = dma_lpar_to_bus(r, c->lpar_addr);
+	c->bus_addr = dma_sb_lpar_to_bus(r, c->lpar_addr);
 	c->len = len;
 
-	result = lv1_map_device_dma_region(c->region->did.bus_id,
-		c->region->did.dev_id, c->lpar_addr, c->bus_addr, c->len,
-		0xf800000000000000UL);
-
+	BUG_ON(iopte_flag != 0xf800000000000000UL);
+	result = lv1_map_device_dma_region(c->region->dev->bus_id,
+					   c->region->dev->dev_id, c->lpar_addr,
+					   c->bus_addr, c->len, iopte_flag);
 	if (result) {
 		DBG("%s:%d: lv1_map_device_dma_region failed: %s\n",
 			__func__, __LINE__, ps3_result(result));
@@ -487,26 +562,120 @@
 	return result;
 }
 
+static int dma_ioc0_map_pages(struct ps3_dma_region *r, unsigned long phys_addr,
+			      unsigned long len, struct dma_chunk **c_out,
+			      u64 iopte_flag)
+{
+	int result;
+	struct dma_chunk *c, *last;
+	int iopage, pages;
+	unsigned long offset;
+
+	DBG(KERN_ERR "%s: phy=%#lx, lpar%#lx, len=%#lx\n", __func__,
+	    phys_addr, ps3_mm_phys_to_lpar(phys_addr), len);
+	c = kzalloc(sizeof(struct dma_chunk), GFP_ATOMIC);
+
+	if (!c) {
+		result = -ENOMEM;
+		goto fail_alloc;
+	}
+
+	c->region = r;
+	c->len = len;
+	c->lpar_addr = ps3_mm_phys_to_lpar(phys_addr);
+	/* allocate IO address */
+	if (list_empty(&r->chunk_list.head)) {
+		/* first one */
+		c->bus_addr = r->bus_addr;
+	} else {
+		/* derive from last bus addr*/
+		last  = list_entry(r->chunk_list.head.next,
+				   struct dma_chunk, link);
+		c->bus_addr = last->bus_addr + last->len;
+		DBG("%s: last bus=%#lx, len=%#lx\n", __func__,
+		    last->bus_addr, last->len);
+	}
+
+	/* FIXME: check whether length exceeds region size */
+
+	/* build ioptes for the area */
+	pages = len >> r->page_size;
+	DBG("%s: pgsize=%#x len=%#lx pages=%#x iopteflag=%#lx\n", __func__,
+	    r->page_size, r->len, pages, iopte_flag);
+	for (iopage = 0; iopage < pages; iopage++) {
+		offset = (1 << r->page_size) * iopage;
+		result = lv1_put_iopte(0,
+				       c->bus_addr + offset,
+				       c->lpar_addr + offset,
+				       r->ioid,
+				       iopte_flag);
+		if (result) {
+			printk(KERN_WARNING "%s:%d: lv1_map_device_dma_region "
+				"failed: %s\n", __func__, __LINE__,
+				ps3_result(result));
+			goto fail_map;
+		}
+		DBG("%s: pg=%d bus=%#lx, lpar=%#lx, ioid=%#x\n", __func__,
+		    iopage, c->bus_addr + offset, c->lpar_addr + offset,
+		    r->ioid);
+	}
+
+	/* be sure that last allocated one is inserted at head */
+	list_add(&c->link, &r->chunk_list.head);
+
+	*c_out = c;
+	DBG("%s: end\n", __func__);
+	return 0;
+
+fail_map:
+	for (iopage--; 0 <= iopage; iopage--) {
+		lv1_put_iopte(0,
+			      c->bus_addr + offset,
+			      c->lpar_addr + offset,
+			      r->ioid,
+			      0);
+	}
+	kfree(c);
+fail_alloc:
+	*c_out = NULL;
+	return result;
+}
+
 /**
- * dma_region_create - Create a device dma region.
+ * dma_sb_region_create - Create a device dma region.
  * @r: Pointer to a struct ps3_dma_region.
  *
  * This is the lowest level dma region create routine, and is the one that
  * will make the HV call to create the region.
  */
 
-static int dma_region_create(struct ps3_dma_region* r)
+static int dma_sb_region_create(struct ps3_dma_region *r)
 {
 	int result;
 
-	r->len = _ALIGN_UP(map.total, 1 << r->page_size);
+	pr_info(" -> %s:%d:\n", __func__, __LINE__);
+
+	BUG_ON(!r);
+
+	if (!r->dev->bus_id) {
+		pr_info("%s:%d: %u:%u no dma\n", __func__, __LINE__,
+			r->dev->bus_id, r->dev->dev_id);
+		return 0;
+	}
+
+	DBG("%s:%u: len = 0x%lx, page_size = %u, offset = 0x%lx\n", __func__,
+	    __LINE__, r->len, r->page_size, r->offset);
+
+	BUG_ON(!r->len);
+	BUG_ON(!r->page_size);
+	BUG_ON(!r->region_ops);
+
 	INIT_LIST_HEAD(&r->chunk_list.head);
 	spin_lock_init(&r->chunk_list.lock);
 
-	result = lv1_allocate_device_dma_region(r->did.bus_id, r->did.dev_id,
-		r->len, r->page_size, r->region_type, &r->bus_addr);
-
-	dma_dump_region(r);
+	result = lv1_allocate_device_dma_region(r->dev->bus_id, r->dev->dev_id,
+		roundup_pow_of_two(r->len), r->page_size, r->region_type,
+		&r->bus_addr);
 
 	if (result) {
 		DBG("%s:%d: lv1_allocate_device_dma_region failed: %s\n",
@@ -517,6 +686,27 @@
 	return result;
 }
 
+static int dma_ioc0_region_create(struct ps3_dma_region *r)
+{
+	int result;
+
+	INIT_LIST_HEAD(&r->chunk_list.head);
+	spin_lock_init(&r->chunk_list.lock);
+
+	result = lv1_allocate_io_segment(0,
+					 r->len,
+					 r->page_size,
+					 &r->bus_addr);
+	if (result) {
+		DBG("%s:%d: lv1_allocate_io_segment failed: %s\n",
+			__func__, __LINE__, ps3_result(result));
+		r->len = r->bus_addr = 0;
+	}
+	DBG("%s: len=%#lx, pg=%d, bus=%#lx\n", __func__,
+	    r->len, r->page_size, r->bus_addr);
+	return result;
+}
+
 /**
  * dma_region_free - Free a device dma region.
  * @r: Pointer to a struct ps3_dma_region.
@@ -525,31 +715,62 @@
  * will make the HV call to free the region.
  */
 
-static int dma_region_free(struct ps3_dma_region* r)
+static int dma_sb_region_free(struct ps3_dma_region *r)
 {
 	int result;
 	struct dma_chunk *c;
 	struct dma_chunk *tmp;
 
-	list_for_each_entry_safe(c, tmp, &r->chunk_list.head, link) {
-		list_del(&c->link);
-		dma_free_chunk(c);
+	BUG_ON(!r);
+
+	if (!r->dev->bus_id) {
+		pr_info("%s:%d: %u:%u no dma\n", __func__, __LINE__,
+			r->dev->bus_id, r->dev->dev_id);
+		return 0;
 	}
 
-	result = lv1_free_device_dma_region(r->did.bus_id, r->did.dev_id,
+	list_for_each_entry_safe(c, tmp, &r->chunk_list.head, link) {
+		list_del(&c->link);
+		dma_sb_free_chunk(c);
+	}
+
+	result = lv1_free_device_dma_region(r->dev->bus_id, r->dev->dev_id,
 		r->bus_addr);
 
 	if (result)
 		DBG("%s:%d: lv1_free_device_dma_region failed: %s\n",
 			__func__, __LINE__, ps3_result(result));
 
-	r->len = r->bus_addr = 0;
+	r->bus_addr = 0;
+
+	return result;
+}
+
+static int dma_ioc0_region_free(struct ps3_dma_region *r)
+{
+	int result;
+	struct dma_chunk *c, *n;
+
+	DBG("%s: start\n", __func__);
+	list_for_each_entry_safe(c, n, &r->chunk_list.head, link) {
+		list_del(&c->link);
+		dma_ioc0_free_chunk(c);
+	}
+
+	result = lv1_release_io_segment(0, r->bus_addr);
+
+	if (result)
+		DBG("%s:%d: lv1_free_device_dma_region failed: %s\n",
+			__func__, __LINE__, ps3_result(result));
+
+	r->bus_addr = 0;
+	DBG("%s: end\n", __func__);
 
 	return result;
 }
 
 /**
- * dma_map_area - Map an area of memory into a device dma region.
+ * dma_sb_map_area - Map an area of memory into a device dma region.
  * @r: Pointer to a struct ps3_dma_region.
  * @virt_addr: Starting virtual address of the area to map.
  * @len: Length in bytes of the area to map.
@@ -559,16 +780,19 @@
  * This is the common dma mapping routine.
  */
 
-static int dma_map_area(struct ps3_dma_region *r, unsigned long virt_addr,
-	unsigned long len, unsigned long *bus_addr)
+static int dma_sb_map_area(struct ps3_dma_region *r, unsigned long virt_addr,
+	   unsigned long len, unsigned long *bus_addr,
+	   u64 iopte_flag)
 {
 	int result;
 	unsigned long flags;
 	struct dma_chunk *c;
 	unsigned long phys_addr = is_kernel_addr(virt_addr) ? __pa(virt_addr)
 		: virt_addr;
-
-	*bus_addr = dma_lpar_to_bus(r, ps3_mm_phys_to_lpar(phys_addr));
+	unsigned long aligned_phys = _ALIGN_DOWN(phys_addr, 1 << r->page_size);
+	unsigned long aligned_len = _ALIGN_UP(len + phys_addr - aligned_phys,
+					      1 << r->page_size);
+	*bus_addr = dma_sb_lpar_to_bus(r, ps3_mm_phys_to_lpar(phys_addr));
 
 	if (!USE_DYNAMIC_DMA) {
 		unsigned long lpar_addr = ps3_mm_phys_to_lpar(phys_addr);
@@ -588,17 +812,18 @@
 	c = dma_find_chunk(r, *bus_addr, len);
 
 	if (c) {
+		DBG("%s:%d: reusing mapped chunk", __func__, __LINE__);
+		dma_dump_chunk(c);
 		c->usage_count++;
 		spin_unlock_irqrestore(&r->chunk_list.lock, flags);
 		return 0;
 	}
 
-	result = dma_map_pages(r, _ALIGN_DOWN(phys_addr, 1 << r->page_size),
-		_ALIGN_UP(len, 1 << r->page_size), &c);
+	result = dma_sb_map_pages(r, aligned_phys, aligned_len, &c, iopte_flag);
 
 	if (result) {
 		*bus_addr = 0;
-		DBG("%s:%d: dma_map_pages failed (%d)\n",
+		DBG("%s:%d: dma_sb_map_pages failed (%d)\n",
 			__func__, __LINE__, result);
 		spin_unlock_irqrestore(&r->chunk_list.lock, flags);
 		return result;
@@ -610,8 +835,57 @@
 	return result;
 }
 
+static int dma_ioc0_map_area(struct ps3_dma_region *r, unsigned long virt_addr,
+	     unsigned long len, unsigned long *bus_addr,
+	     u64 iopte_flag)
+{
+	int result;
+	unsigned long flags;
+	struct dma_chunk *c;
+	unsigned long phys_addr = is_kernel_addr(virt_addr) ? __pa(virt_addr)
+		: virt_addr;
+	unsigned long aligned_phys = _ALIGN_DOWN(phys_addr, 1 << r->page_size);
+	unsigned long aligned_len = _ALIGN_UP(len + phys_addr - aligned_phys,
+					      1 << r->page_size);
+
+	DBG(KERN_ERR "%s: vaddr=%#lx, len=%#lx\n", __func__,
+	    virt_addr, len);
+	DBG(KERN_ERR "%s: ph=%#lx a_ph=%#lx a_l=%#lx\n", __func__,
+	    phys_addr, aligned_phys, aligned_len);
+
+	spin_lock_irqsave(&r->chunk_list.lock, flags);
+	c = dma_find_chunk_lpar(r, ps3_mm_phys_to_lpar(phys_addr), len);
+
+	if (c) {
+		/* FIXME */
+		BUG();
+		*bus_addr = c->bus_addr + phys_addr - aligned_phys;
+		c->usage_count++;
+		spin_unlock_irqrestore(&r->chunk_list.lock, flags);
+		return 0;
+	}
+
+	result = dma_ioc0_map_pages(r, aligned_phys, aligned_len, &c,
+				    iopte_flag);
+
+	if (result) {
+		*bus_addr = 0;
+		DBG("%s:%d: dma_ioc0_map_pages failed (%d)\n",
+			__func__, __LINE__, result);
+		spin_unlock_irqrestore(&r->chunk_list.lock, flags);
+		return result;
+	}
+	*bus_addr = c->bus_addr + phys_addr - aligned_phys;
+	DBG("%s: va=%#lx pa=%#lx a_pa=%#lx bus=%#lx\n", __func__,
+	    virt_addr, phys_addr, aligned_phys, *bus_addr);
+	c->usage_count = 1;
+
+	spin_unlock_irqrestore(&r->chunk_list.lock, flags);
+	return result;
+}
+
 /**
- * dma_unmap_area - Unmap an area of memory from a device dma region.
+ * dma_sb_unmap_area - Unmap an area of memory from a device dma region.
  * @r: Pointer to a struct ps3_dma_region.
  * @bus_addr: The starting ioc bus address of the area to unmap.
  * @len: Length in bytes of the area to unmap.
@@ -619,7 +893,7 @@
  * This is the common dma unmap routine.
  */
 
-int dma_unmap_area(struct ps3_dma_region *r, unsigned long bus_addr,
+static int dma_sb_unmap_area(struct ps3_dma_region *r, unsigned long bus_addr,
 	unsigned long len)
 {
 	unsigned long flags;
@@ -631,7 +905,8 @@
 	if (!c) {
 		unsigned long aligned_bus = _ALIGN_DOWN(bus_addr,
 			1 << r->page_size);
-		unsigned long aligned_len = _ALIGN_UP(len, 1 << r->page_size);
+		unsigned long aligned_len = _ALIGN_UP(len + bus_addr
+			- aligned_bus, 1 << r->page_size);
 		DBG("%s:%d: not found: bus_addr %lxh\n",
 			__func__, __LINE__, bus_addr);
 		DBG("%s:%d: not found: len %lxh\n",
@@ -647,94 +922,166 @@
 
 	if (!c->usage_count) {
 		list_del(&c->link);
-		dma_free_chunk(c);
+		dma_sb_free_chunk(c);
 	}
 
 	spin_unlock_irqrestore(&r->chunk_list.lock, flags);
 	return 0;
 }
 
+static int dma_ioc0_unmap_area(struct ps3_dma_region *r,
+			unsigned long bus_addr, unsigned long len)
+{
+	unsigned long flags;
+	struct dma_chunk *c;
+
+	DBG("%s: start a=%#lx l=%#lx\n", __func__, bus_addr, len);
+	spin_lock_irqsave(&r->chunk_list.lock, flags);
+	c = dma_find_chunk(r, bus_addr, len);
+
+	if (!c) {
+		unsigned long aligned_bus = _ALIGN_DOWN(bus_addr,
+							1 << r->page_size);
+		unsigned long aligned_len = _ALIGN_UP(len + bus_addr
+						      - aligned_bus,
+						      1 << r->page_size);
+		DBG("%s:%d: not found: bus_addr %lxh\n",
+		    __func__, __LINE__, bus_addr);
+		DBG("%s:%d: not found: len %lxh\n",
+		    __func__, __LINE__, len);
+		DBG("%s:%d: not found: aligned_bus %lxh\n",
+		    __func__, __LINE__, aligned_bus);
+		DBG("%s:%d: not found: aligned_len %lxh\n",
+		    __func__, __LINE__, aligned_len);
+		BUG();
+	}
+
+	c->usage_count--;
+
+	if (!c->usage_count) {
+		list_del(&c->link);
+		dma_ioc0_free_chunk(c);
+	}
+
+	spin_unlock_irqrestore(&r->chunk_list.lock, flags);
+	DBG("%s: end\n", __func__);
+	return 0;
+}
+
 /**
- * dma_region_create_linear - Setup a linear dma maping for a device.
+ * dma_sb_region_create_linear - Setup a linear dma mapping for a device.
  * @r: Pointer to a struct ps3_dma_region.
  *
  * This routine creates an HV dma region for the device and maps all available
  * ram into the io controller bus address space.
  */
 
-static int dma_region_create_linear(struct ps3_dma_region *r)
+static int dma_sb_region_create_linear(struct ps3_dma_region *r)
 {
 	int result;
-	unsigned long tmp;
+	unsigned long virt_addr, len, tmp;
 
-	/* force 16M dma pages for linear mapping */
-
-	if (r->page_size != PS3_DMA_16M) {
-		pr_info("%s:%d: forcing 16M pages for linear map\n",
-			__func__, __LINE__);
-		r->page_size = PS3_DMA_16M;
+	if (r->len > 16*1024*1024) {	/* FIXME: need proper fix */
+		/* force 16M dma pages for linear mapping */
+		if (r->page_size != PS3_DMA_16M) {
+			pr_info("%s:%d: forcing 16M pages for linear map\n",
+				__func__, __LINE__);
+			r->page_size = PS3_DMA_16M;
+			r->len = _ALIGN_UP(r->len, 1 << r->page_size);
+		}
 	}
 
-	result = dma_region_create(r);
+	result = dma_sb_region_create(r);
 	BUG_ON(result);
 
-	result = dma_map_area(r, map.rm.base, map.rm.size, &tmp);
-	BUG_ON(result);
+	if (r->offset < map.rm.size) {
+		/* Map (part of) 1st RAM chunk */
+		virt_addr = map.rm.base + r->offset;
+		len = map.rm.size - r->offset;
+		if (len > r->len)
+			len = r->len;
+		result = dma_sb_map_area(r, virt_addr, len, &tmp,
+			IOPTE_PP_W | IOPTE_PP_R | IOPTE_SO_RW | IOPTE_M);
+		BUG_ON(result);
+	}
 
-	if (USE_LPAR_ADDR)
-		result = dma_map_area(r, map.r1.base, map.r1.size,
-			&tmp);
-	else
-		result = dma_map_area(r, map.rm.size, map.r1.size,
-			&tmp);
-
-	BUG_ON(result);
+	if (r->offset + r->len > map.rm.size) {
+		/* Map (part of) 2nd RAM chunk */
+		virt_addr = USE_LPAR_ADDR ? map.r1.base : map.rm.size;
+		len = r->len;
+		if (r->offset >= map.rm.size)
+			virt_addr += r->offset - map.rm.size;
+		else
+			len -= map.rm.size - r->offset;
+		result = dma_sb_map_area(r, virt_addr, len, &tmp,
+			IOPTE_PP_W | IOPTE_PP_R | IOPTE_SO_RW | IOPTE_M);
+		BUG_ON(result);
+	}
 
 	return result;
 }
 
 /**
- * dma_region_free_linear - Free a linear dma mapping for a device.
+ * dma_sb_region_free_linear - Free a linear dma mapping for a device.
  * @r: Pointer to a struct ps3_dma_region.
  *
  * This routine will unmap all mapped areas and free the HV dma region.
  */
 
-static int dma_region_free_linear(struct ps3_dma_region *r)
+static int dma_sb_region_free_linear(struct ps3_dma_region *r)
 {
 	int result;
+	unsigned long bus_addr, len, lpar_addr;
 
-	result = dma_unmap_area(r, dma_lpar_to_bus(r, 0), map.rm.size);
-	BUG_ON(result);
+	if (r->offset < map.rm.size) {
+		/* Unmap (part of) 1st RAM chunk */
+		lpar_addr = map.rm.base + r->offset;
+		len = map.rm.size - r->offset;
+		if (len > r->len)
+			len = r->len;
+		bus_addr = dma_sb_lpar_to_bus(r, lpar_addr);
+		result = dma_sb_unmap_area(r, bus_addr, len);
+		BUG_ON(result);
+	}
 
-	result = dma_unmap_area(r, dma_lpar_to_bus(r, map.r1.base),
-		map.r1.size);
-	BUG_ON(result);
+	if (r->offset + r->len > map.rm.size) {
+		/* Unmap (part of) 2nd RAM chunk */
+		lpar_addr = map.r1.base;
+		len = r->len;
+		if (r->offset >= map.rm.size)
+			lpar_addr += r->offset - map.rm.size;
+		else
+			len -= map.rm.size - r->offset;
+		bus_addr = dma_sb_lpar_to_bus(r, lpar_addr);
+		result = dma_sb_unmap_area(r, bus_addr, len);
+		BUG_ON(result);
+	}
 
-	result = dma_region_free(r);
+	result = dma_sb_region_free(r);
 	BUG_ON(result);
 
 	return result;
 }
 
 /**
- * dma_map_area_linear - Map an area of memory into a device dma region.
+ * dma_sb_map_area_linear - Map an area of memory into a device dma region.
  * @r: Pointer to a struct ps3_dma_region.
  * @virt_addr: Starting virtual address of the area to map.
  * @len: Length in bytes of the area to map.
  * @bus_addr: A pointer to return the starting ioc bus address of the area to
  * map.
  *
- * This routine just returns the coresponding bus address.  Actual mapping
+ * This routine just returns the corresponding bus address.  Actual mapping
  * occurs in dma_region_create_linear().
  */
 
-static int dma_map_area_linear(struct ps3_dma_region *r,
-	unsigned long virt_addr, unsigned long len, unsigned long *bus_addr)
+static int dma_sb_map_area_linear(struct ps3_dma_region *r,
+	unsigned long virt_addr, unsigned long len, unsigned long *bus_addr,
+	u64 iopte_flag)
 {
 	unsigned long phys_addr = is_kernel_addr(virt_addr) ? __pa(virt_addr)
 		: virt_addr;
-	*bus_addr = dma_lpar_to_bus(r, ps3_mm_phys_to_lpar(phys_addr));
+	*bus_addr = dma_sb_lpar_to_bus(r, ps3_mm_phys_to_lpar(phys_addr));
 	return 0;
 }
 
@@ -744,42 +1091,98 @@
  * @bus_addr: The starting ioc bus address of the area to unmap.
  * @len: Length in bytes of the area to unmap.
  *
- * This routine does nothing.  Unmapping occurs in dma_region_free_linear().
+ * This routine does nothing.  Unmapping occurs in dma_sb_region_free_linear().
  */
 
-static int dma_unmap_area_linear(struct ps3_dma_region *r,
+static int dma_sb_unmap_area_linear(struct ps3_dma_region *r,
 	unsigned long bus_addr, unsigned long len)
 {
 	return 0;
+};
+
+static const struct ps3_dma_region_ops ps3_dma_sb_region_ops =  {
+	.create = dma_sb_region_create,
+	.free = dma_sb_region_free,
+	.map = dma_sb_map_area,
+	.unmap = dma_sb_unmap_area
+};
+
+static const struct ps3_dma_region_ops ps3_dma_sb_region_linear_ops = {
+	.create = dma_sb_region_create_linear,
+	.free = dma_sb_region_free_linear,
+	.map = dma_sb_map_area_linear,
+	.unmap = dma_sb_unmap_area_linear
+};
+
+static const struct ps3_dma_region_ops ps3_dma_ioc0_region_ops = {
+	.create = dma_ioc0_region_create,
+	.free = dma_ioc0_region_free,
+	.map = dma_ioc0_map_area,
+	.unmap = dma_ioc0_unmap_area
+};
+
+int ps3_dma_region_init(struct ps3_system_bus_device *dev,
+	struct ps3_dma_region *r, enum ps3_dma_page_size page_size,
+	enum ps3_dma_region_type region_type, void *addr, unsigned long len)
+{
+	unsigned long lpar_addr;
+
+	lpar_addr = addr ? ps3_mm_phys_to_lpar(__pa(addr)) : 0;
+
+	r->dev = dev;
+	r->page_size = page_size;
+	r->region_type = region_type;
+	r->offset = lpar_addr;
+	if (r->offset >= map.rm.size)
+		r->offset -= map.r1.offset;
+	r->len = len ? len : _ALIGN_UP(map.total, 1 << r->page_size);
+
+	switch (dev->dev_type) {
+	case PS3_DEVICE_TYPE_SB:
+		r->region_ops =  (USE_DYNAMIC_DMA)
+			? &ps3_dma_sb_region_ops
+			: &ps3_dma_sb_region_linear_ops;
+		break;
+	case PS3_DEVICE_TYPE_IOC0:
+		r->region_ops = &ps3_dma_ioc0_region_ops;
+		break;
+	default:
+		BUG();
+		return -EINVAL;
+	}
+	return 0;
 }
+EXPORT_SYMBOL(ps3_dma_region_init);
 
 int ps3_dma_region_create(struct ps3_dma_region *r)
 {
-	return (USE_DYNAMIC_DMA)
-		? dma_region_create(r)
-		: dma_region_create_linear(r);
+	BUG_ON(!r);
+	BUG_ON(!r->region_ops);
+	BUG_ON(!r->region_ops->create);
+	return r->region_ops->create(r);
 }
+EXPORT_SYMBOL(ps3_dma_region_create);
 
 int ps3_dma_region_free(struct ps3_dma_region *r)
 {
-	return (USE_DYNAMIC_DMA)
-		? dma_region_free(r)
-		: dma_region_free_linear(r);
+	BUG_ON(!r);
+	BUG_ON(!r->region_ops);
+	BUG_ON(!r->region_ops->free);
+	return r->region_ops->free(r);
 }
+EXPORT_SYMBOL(ps3_dma_region_free);
 
 int ps3_dma_map(struct ps3_dma_region *r, unsigned long virt_addr,
-	unsigned long len, unsigned long *bus_addr)
+	unsigned long len, unsigned long *bus_addr,
+	u64 iopte_flag)
 {
-	return (USE_DYNAMIC_DMA)
-		? dma_map_area(r, virt_addr, len, bus_addr)
-		: dma_map_area_linear(r, virt_addr, len, bus_addr);
+	return r->region_ops->map(r, virt_addr, len, bus_addr, iopte_flag);
 }
 
 int ps3_dma_unmap(struct ps3_dma_region *r, unsigned long bus_addr,
 	unsigned long len)
 {
-	return (USE_DYNAMIC_DMA) ? dma_unmap_area(r, bus_addr, len)
-		: dma_unmap_area_linear(r, bus_addr, len);
+	return r->region_ops->unmap(r, bus_addr, len);
 }
 
 /*============================================================================*/
@@ -810,12 +1213,13 @@
 	BUG_ON(map.rm.base);
 	BUG_ON(!map.rm.size);
 
-	lmb_add(map.rm.base, map.rm.size);
-	lmb_analyze();
 
 	/* arrange to do this in ps3_mm_add_memory */
 	ps3_mm_region_create(&map.r1, map.total - map.rm.size);
 
+	/* correct map.total for the real total amount of memory we use */
+	map.total = map.rm.size + map.r1.size;
+
 	DBG(" <- %s:%d\n", __func__, __LINE__);
 }
 
diff --git a/arch/powerpc/platforms/ps3/os-area.c b/arch/powerpc/platforms/ps3/os-area.c
index 5c3da08..b70e474 100644
--- a/arch/powerpc/platforms/ps3/os-area.c
+++ b/arch/powerpc/platforms/ps3/os-area.c
@@ -133,7 +133,7 @@
 } static saved_params;
 
 #define dump_header(_a) _dump_header(_a, __func__, __LINE__)
-static void _dump_header(const struct os_area_header __iomem *h, const char* func,
+static void _dump_header(const struct os_area_header *h, const char *func,
 	int line)
 {
 	pr_debug("%s:%d: h.magic_num:         '%s'\n", func, line,
@@ -151,7 +151,7 @@
 }
 
 #define dump_params(_a) _dump_params(_a, __func__, __LINE__)
-static void _dump_params(const struct os_area_params __iomem *p, const char* func,
+static void _dump_params(const struct os_area_params *p, const char *func,
 	int line)
 {
 	pr_debug("%s:%d: p.boot_flag:       %u\n", func, line, p->boot_flag);
diff --git a/arch/powerpc/platforms/ps3/platform.h b/arch/powerpc/platforms/ps3/platform.h
index ca04f03..87d5206 100644
--- a/arch/powerpc/platforms/ps3/platform.h
+++ b/arch/powerpc/platforms/ps3/platform.h
@@ -41,6 +41,7 @@
 /* irq */
 
 void ps3_init_IRQ(void);
+void ps3_shutdown_IRQ(int cpu);
 void __init ps3_register_ipi_debug_brk(unsigned int cpu, unsigned int virq);
 
 /* smp */
@@ -82,6 +83,7 @@
 	PS3_DEV_TYPE_STOR_ROM = TYPE_ROM,	/* 5 */
 	PS3_DEV_TYPE_SB_GPIO = 6,
 	PS3_DEV_TYPE_STOR_FLASH = TYPE_RBC,	/* 14 */
+	PS3_DEV_TYPE_NOACCESS = 255,
 };
 
 int ps3_repository_read_bus_str(unsigned int bus_index, const char *bus_str,
@@ -129,24 +131,28 @@
 /* repository bus enumerators */
 
 struct ps3_repository_device {
+	enum ps3_bus_type bus_type;
 	unsigned int bus_index;
+	unsigned int bus_id;
+	enum ps3_dev_type dev_type;
 	unsigned int dev_index;
-	struct ps3_device_id did;
+	unsigned int dev_id;
 };
 
-int ps3_repository_find_device(enum ps3_bus_type bus_type,
-	enum ps3_dev_type dev_type,
-	const struct ps3_repository_device *start_dev,
-	struct ps3_repository_device *dev);
-static inline int ps3_repository_find_first_device(
-	enum ps3_bus_type bus_type, enum ps3_dev_type dev_type,
-	struct ps3_repository_device *dev)
+static inline struct ps3_repository_device *ps3_repository_bump_device(
+	struct ps3_repository_device *repo)
 {
-	return ps3_repository_find_device(bus_type, dev_type, NULL, dev);
+	repo->dev_index++;
+	return repo;
 }
-int ps3_repository_find_interrupt(const struct ps3_repository_device *dev,
+int ps3_repository_find_device(struct ps3_repository_device *repo);
+int ps3_repository_find_devices(enum ps3_bus_type bus_type,
+	int (*callback)(const struct ps3_repository_device *repo));
+int ps3_repository_find_bus(enum ps3_bus_type bus_type, unsigned int from,
+	unsigned int *bus_index);
+int ps3_repository_find_interrupt(const struct ps3_repository_device *repo,
 	enum ps3_interrupt_type intr_type, unsigned int *interrupt_id);
-int ps3_repository_find_reg(const struct ps3_repository_device *dev,
+int ps3_repository_find_reg(const struct ps3_repository_device *repo,
 	enum ps3_reg_type reg_type, u64 *bus_addr, u64 *len);
 
 /* repository block device info */
@@ -216,4 +222,19 @@
 int ps3_repository_read_spu_resource_id(unsigned int res_index,
 	enum ps3_spu_resource_type* resource_type, unsigned int *resource_id);
 
+/* repository vuart info */
+
+int ps3_repository_read_vuart_av_port(unsigned int *port);
+int ps3_repository_read_vuart_sysmgr_port(unsigned int *port);
+
+/* Page table entries */
+#define IOPTE_PP_W		0x8000000000000000ul /* protection: write */
+#define IOPTE_PP_R		0x4000000000000000ul /* protection: read */
+#define IOPTE_M			0x2000000000000000ul /* coherency required */
+#define IOPTE_SO_R		0x1000000000000000ul /* ordering: writes */
+#define IOPTE_SO_RW             0x1800000000000000ul /* ordering: r & w */
+#define IOPTE_RPN_Mask		0x07fffffffffff000ul /* RPN */
+#define IOPTE_H			0x0000000000000800ul /* cache hint */
+#define IOPTE_IOID_Mask		0x00000000000007fful /* ioid */
+
 #endif
diff --git a/arch/powerpc/platforms/ps3/repository.c b/arch/powerpc/platforms/ps3/repository.c
index ae586a0..8cc37cfe 100644
--- a/arch/powerpc/platforms/ps3/repository.c
+++ b/arch/powerpc/platforms/ps3/repository.c
@@ -138,7 +138,7 @@
 		pr_debug("%s:%d: lv1_get_repository_node_value failed: %s\n",
 			__func__, __LINE__, ps3_result(result));
 		dump_node_name(lpar_id, n1, n2, n3, n4);
-		return result;
+		return -ENOENT;
 	}
 
 	dump_node(lpar_id, n1, n2, n3, n4, v1, v2);
@@ -155,7 +155,7 @@
 		pr_debug("%s:%d: warning: discarding non-zero v2: %016lx\n",
 			__func__, __LINE__, v2);
 
-	return result;
+	return 0;
 }
 
 int ps3_repository_read_bus_str(unsigned int bus_index, const char *bus_str,
@@ -314,324 +314,140 @@
 		reg_index, bus_addr, len);
 }
 
-#if defined(DEBUG)
-int ps3_repository_dump_resource_info(unsigned int bus_index,
-	unsigned int dev_index)
+
+
+int ps3_repository_find_device(struct ps3_repository_device *repo)
 {
-	int result = 0;
-	unsigned int res_index;
+	int result;
+	struct ps3_repository_device tmp = *repo;
+	unsigned int num_dev;
 
-	pr_debug(" -> %s:%d: (%u:%u)\n", __func__, __LINE__,
-		bus_index, dev_index);
+	BUG_ON(repo->bus_index > 10);
+	BUG_ON(repo->dev_index > 10);
 
-	for (res_index = 0; res_index < 10; res_index++) {
-		enum ps3_interrupt_type intr_type;
-		unsigned int interrupt_id;
+	result = ps3_repository_read_bus_num_dev(tmp.bus_index, &num_dev);
 
-		result = ps3_repository_read_dev_intr(bus_index, dev_index,
-			res_index, &intr_type, &interrupt_id);
-
-		if (result) {
-			if (result !=  LV1_NO_ENTRY)
-				pr_debug("%s:%d ps3_repository_read_dev_intr"
-					" (%u:%u) failed\n", __func__, __LINE__,
-					bus_index, dev_index);
-			break;
-		}
-
-		pr_debug("%s:%d (%u:%u) intr_type %u, interrupt_id %u\n",
-			__func__, __LINE__, bus_index, dev_index, intr_type,
-			interrupt_id);
-	}
-
-	for (res_index = 0; res_index < 10; res_index++) {
-		enum ps3_reg_type reg_type;
-		u64 bus_addr;
-		u64 len;
-
-		result = ps3_repository_read_dev_reg(bus_index, dev_index,
-			res_index, &reg_type, &bus_addr, &len);
-
-		if (result) {
-			if (result !=  LV1_NO_ENTRY)
-				pr_debug("%s:%d ps3_repository_read_dev_reg"
-					" (%u:%u) failed\n", __func__, __LINE__,
-					bus_index, dev_index);
-			break;
-		}
-
-		pr_debug("%s:%d (%u:%u) reg_type %u, bus_addr %lxh, len %lxh\n",
-			__func__, __LINE__, bus_index, dev_index, reg_type,
-			bus_addr, len);
-	}
-
-	pr_debug(" <- %s:%d\n", __func__, __LINE__);
-	return result;
-}
-
-static int dump_stor_dev_info(unsigned int bus_index, unsigned int dev_index)
-{
-	int result = 0;
-	unsigned int num_regions, region_index;
-	u64 port, blk_size, num_blocks;
-
-	pr_debug(" -> %s:%d: (%u:%u)\n", __func__, __LINE__,
-		bus_index, dev_index);
-
-	result = ps3_repository_read_stor_dev_info(bus_index, dev_index, &port,
-		&blk_size, &num_blocks, &num_regions);
 	if (result) {
-		pr_debug("%s:%d ps3_repository_read_stor_dev_info"
-			" (%u:%u) failed\n", __func__, __LINE__,
-			bus_index, dev_index);
-		goto out;
+		pr_debug("%s:%d read_bus_num_dev failed\n", __func__, __LINE__);
+		return result;
 	}
 
-	pr_debug("%s:%d  (%u:%u): port %lu, blk_size %lu, num_blocks "
-		 "%lu, num_regions %u\n",
-		 __func__, __LINE__, bus_index, dev_index, port,
-		 blk_size, num_blocks, num_regions);
+	pr_debug("%s:%d: bus_type %u, bus_index %u, bus_id %u, num_dev %u\n",
+		__func__, __LINE__, tmp.bus_type, tmp.bus_index, tmp.bus_id,
+		num_dev);
 
-	for (region_index = 0; region_index < num_regions; region_index++) {
-		unsigned int region_id;
-		u64 region_start, region_size;
-
-		result = ps3_repository_read_stor_dev_region(bus_index,
-			dev_index, region_index, &region_id, &region_start,
-			&region_size);
-		if (result) {
-			 pr_debug("%s:%d ps3_repository_read_stor_dev_region"
-				  " (%u:%u) failed\n", __func__, __LINE__,
-				  bus_index, dev_index);
-			break;
-		}
-
-		pr_debug("%s:%d (%u:%u) region_id %u, start %lxh, size %lxh\n",
-			 __func__, __LINE__, bus_index, dev_index, region_id,
-			 region_start, region_size);
+	if (tmp.dev_index >= num_dev) {
+		pr_debug("%s:%d: no device found\n", __func__, __LINE__);
+		return -ENODEV;
 	}
 
-out:
-	pr_debug(" <- %s:%d\n", __func__, __LINE__);
-	return result;
+	result = ps3_repository_read_dev_type(tmp.bus_index, tmp.dev_index,
+		&tmp.dev_type);
+
+	if (result) {
+		pr_debug("%s:%d read_dev_type failed\n", __func__, __LINE__);
+		return result;
+	}
+
+	result = ps3_repository_read_dev_id(tmp.bus_index, tmp.dev_index,
+		&tmp.dev_id);
+
+	if (result) {
+		pr_debug("%s:%d ps3_repository_read_dev_id failed\n", __func__,
+		__LINE__);
+		return result;
+	}
+
+	pr_debug("%s:%d: found: dev_type %u, dev_index %u, dev_id %u\n",
+		__func__, __LINE__, tmp.dev_type, tmp.dev_index, tmp.dev_id);
+
+	*repo = tmp;
+	return 0;
 }
 
-static int dump_device_info(unsigned int bus_index, enum ps3_bus_type bus_type,
-			    unsigned int num_dev)
+int __devinit ps3_repository_find_devices(enum ps3_bus_type bus_type,
+	int (*callback)(const struct ps3_repository_device *repo))
 {
 	int result = 0;
-	unsigned int dev_index;
+	struct ps3_repository_device repo;
 
-	pr_debug(" -> %s:%d: bus_%u\n", __func__, __LINE__, bus_index);
+	pr_debug(" -> %s:%d: find bus_type %u\n", __func__, __LINE__, bus_type);
 
-	for (dev_index = 0; dev_index < num_dev; dev_index++) {
-		enum ps3_dev_type dev_type;
-		unsigned int dev_id;
+	for (repo.bus_index = 0; repo.bus_index < 10; repo.bus_index++) {
 
-		result = ps3_repository_read_dev_type(bus_index, dev_index,
-			&dev_type);
-
-		if (result) {
-			pr_debug("%s:%d ps3_repository_read_dev_type"
-				" (%u:%u) failed\n", __func__, __LINE__,
-				bus_index, dev_index);
-			break;
-		}
-
-		result = ps3_repository_read_dev_id(bus_index, dev_index,
-			&dev_id);
-
-		if (result) {
-			pr_debug("%s:%d ps3_repository_read_dev_id"
-				" (%u:%u) failed\n", __func__, __LINE__,
-				bus_index, dev_index);
-			continue;
-		}
-
-		pr_debug("%s:%d  (%u:%u): dev_type %u, dev_id %u\n", __func__,
-			__LINE__, bus_index, dev_index, dev_type, dev_id);
-
-		ps3_repository_dump_resource_info(bus_index, dev_index);
-
-		if (bus_type == PS3_BUS_TYPE_STORAGE)
-			dump_stor_dev_info(bus_index, dev_index);
-	}
-
-	pr_debug(" <- %s:%d\n", __func__, __LINE__);
-	return result;
-}
-
-int ps3_repository_dump_bus_info(void)
-{
-	int result = 0;
-	unsigned int bus_index;
-
-	pr_debug(" -> %s:%d\n", __func__, __LINE__);
-
-	for (bus_index = 0; bus_index < 10; bus_index++) {
-		enum ps3_bus_type bus_type;
-		unsigned int bus_id;
-		unsigned int num_dev;
-
-		result = ps3_repository_read_bus_type(bus_index, &bus_type);
+		result = ps3_repository_read_bus_type(repo.bus_index,
+			&repo.bus_type);
 
 		if (result) {
 			pr_debug("%s:%d read_bus_type(%u) failed\n",
-				__func__, __LINE__, bus_index);
+				__func__, __LINE__, repo.bus_index);
 			break;
 		}
 
-		result = ps3_repository_read_bus_id(bus_index, &bus_id);
+		if (repo.bus_type != bus_type) {
+			pr_debug("%s:%d: skip, bus_type %u\n", __func__,
+				__LINE__, repo.bus_type);
+			continue;
+		}
+
+		result = ps3_repository_read_bus_id(repo.bus_index,
+			&repo.bus_id);
 
 		if (result) {
 			pr_debug("%s:%d read_bus_id(%u) failed\n",
-				__func__, __LINE__, bus_index);
+				__func__, __LINE__, repo.bus_index);
 			continue;
 		}
 
-		if (bus_index != bus_id)
-			pr_debug("%s:%d bus_index != bus_id\n",
-				__func__, __LINE__);
+		for (repo.dev_index = 0; ; repo.dev_index++) {
+			result = ps3_repository_find_device(&repo);
 
-		result = ps3_repository_read_bus_num_dev(bus_index, &num_dev);
+			if (result == -ENODEV) {
+				result = 0;
+				break;
+			} else if (result)
+				break;
 
-		if (result) {
-			pr_debug("%s:%d read_bus_num_dev(%u) failed\n",
-				__func__, __LINE__, bus_index);
-			continue;
+			result = callback(&repo);
+
+			if (result) {
+				pr_debug("%s:%d: abort at callback\n", __func__,
+					__LINE__);
+				break;
+			}
 		}
-
-		pr_debug("%s:%d bus_%u: bus_type %u, bus_id %u, num_dev %u\n",
-			__func__, __LINE__, bus_index, bus_type, bus_id,
-			num_dev);
-
-		dump_device_info(bus_index, bus_type, num_dev);
+		break;
 	}
 
 	pr_debug(" <- %s:%d\n", __func__, __LINE__);
 	return result;
 }
-#endif /* defined(DEBUG) */
 
-static int find_device(unsigned int bus_index, unsigned int num_dev,
-	unsigned int start_dev_index, enum ps3_dev_type dev_type,
-	struct ps3_repository_device *dev)
+int ps3_repository_find_bus(enum ps3_bus_type bus_type, unsigned int from,
+	unsigned int *bus_index)
 {
-	int result = 0;
-	unsigned int dev_index;
+	unsigned int i;
+	enum ps3_bus_type type;
+	int error;
 
-	pr_debug("%s:%d: find dev_type %u\n", __func__, __LINE__, dev_type);
-
-	dev->dev_index = UINT_MAX;
-
-	for (dev_index = start_dev_index; dev_index < num_dev; dev_index++) {
-		enum ps3_dev_type x;
-
-		result = ps3_repository_read_dev_type(bus_index, dev_index,
-			&x);
-
-		if (result) {
-			pr_debug("%s:%d read_dev_type failed\n",
-				__func__, __LINE__);
-			return result;
-		}
-
-		if (x == dev_type)
-			break;
-	}
-
-	if (dev_index == num_dev)
-		return -1;
-
-	pr_debug("%s:%d: found dev_type %u at dev_index %u\n",
-		__func__, __LINE__, dev_type, dev_index);
-
-	result = ps3_repository_read_dev_id(bus_index, dev_index,
-		&dev->did.dev_id);
-
-	if (result) {
-		pr_debug("%s:%d read_dev_id failed\n",
-			__func__, __LINE__);
-		return result;
-	}
-
-	dev->dev_index = dev_index;
-
-	pr_debug("%s:%d found: dev_id %u\n", __func__, __LINE__,
-		dev->did.dev_id);
-
-	return result;
-}
-
-int ps3_repository_find_device (enum ps3_bus_type bus_type,
-	enum ps3_dev_type dev_type,
-	const struct ps3_repository_device *start_dev,
-	struct ps3_repository_device *dev)
-{
-	int result = 0;
-	unsigned int bus_index;
-	unsigned int num_dev;
-
-	pr_debug("%s:%d: find bus_type %u, dev_type %u\n", __func__, __LINE__,
-		bus_type, dev_type);
-
-	BUG_ON(start_dev && start_dev->bus_index > 10);
-
-	for (bus_index = start_dev ? start_dev->bus_index : 0; bus_index < 10;
-		bus_index++) {
-		enum ps3_bus_type x;
-
-		result = ps3_repository_read_bus_type(bus_index, &x);
-
-		if (result) {
+	for (i = from; i < 10; i++) {
+		error = ps3_repository_read_bus_type(i, &type);
+		if (error) {
 			pr_debug("%s:%d read_bus_type failed\n",
 				__func__, __LINE__);
-			dev->bus_index = UINT_MAX;
-			return result;
+			*bus_index = UINT_MAX;
+			return error;
 		}
-		if (x == bus_type)
-			break;
+		if (type == bus_type) {
+			*bus_index = i;
+			return 0;
+		}
 	}
-
-	if (bus_index >= 10)
-		return -ENODEV;
-
-	pr_debug("%s:%d: found bus_type %u at bus_index %u\n",
-		__func__, __LINE__, bus_type, bus_index);
-
-	result = ps3_repository_read_bus_num_dev(bus_index, &num_dev);
-
-	if (result) {
-		pr_debug("%s:%d read_bus_num_dev failed\n",
-			__func__, __LINE__);
-		return result;
-	}
-
-	result = find_device(bus_index, num_dev, start_dev
-		? start_dev->dev_index + 1 : 0, dev_type, dev);
-
-	if (result) {
-		pr_debug("%s:%d get_did failed\n", __func__, __LINE__);
-		return result;
-	}
-
-	result = ps3_repository_read_bus_id(bus_index, &dev->did.bus_id);
-
-	if (result) {
-		pr_debug("%s:%d read_bus_id failed\n",
-			__func__, __LINE__);
-		return result;
-	}
-
-	dev->bus_index = bus_index;
-
-	pr_debug("%s:%d found: bus_id %u, dev_id %u\n",
-		__func__, __LINE__, dev->did.bus_id, dev->did.dev_id);
-
-	return result;
+	*bus_index = UINT_MAX;
+	return -ENODEV;
 }
 
-int ps3_repository_find_interrupt(const struct ps3_repository_device *dev,
+int ps3_repository_find_interrupt(const struct ps3_repository_device *repo,
 	enum ps3_interrupt_type intr_type, unsigned int *interrupt_id)
 {
 	int result = 0;
@@ -645,8 +461,8 @@
 		enum ps3_interrupt_type t;
 		unsigned int id;
 
-		result = ps3_repository_read_dev_intr(dev->bus_index,
-			dev->dev_index, res_index, &t, &id);
+		result = ps3_repository_read_dev_intr(repo->bus_index,
+			repo->dev_index, res_index, &t, &id);
 
 		if (result) {
 			pr_debug("%s:%d read_dev_intr failed\n",
@@ -669,7 +485,7 @@
 	return result;
 }
 
-int ps3_repository_find_reg(const struct ps3_repository_device *dev,
+int ps3_repository_find_reg(const struct ps3_repository_device *repo,
 	enum ps3_reg_type reg_type, u64 *bus_addr, u64 *len)
 {
 	int result = 0;
@@ -684,8 +500,8 @@
 		u64 a;
 		u64 l;
 
-		result = ps3_repository_read_dev_reg(dev->bus_index,
-			dev->dev_index, res_index, &t, &a, &l);
+		result = ps3_repository_read_dev_reg(repo->bus_index,
+			repo->dev_index, res_index, &t, &a, &l);
 
 		if (result) {
 			pr_debug("%s:%d read_dev_reg failed\n",
@@ -965,6 +781,36 @@
 	return result;
 }
 
+int ps3_repository_read_vuart_av_port(unsigned int *port)
+{
+	int result;
+	u64 v1;
+
+	result = read_node(PS3_LPAR_ID_CURRENT,
+		make_first_field("bi", 0),
+		make_field("vir_uart", 0),
+		make_field("port", 0),
+		make_field("avset", 0),
+		&v1, 0);
+	*port = v1;
+	return result;
+}
+
+int ps3_repository_read_vuart_sysmgr_port(unsigned int *port)
+{
+	int result;
+	u64 v1;
+
+	result = read_node(PS3_LPAR_ID_CURRENT,
+		make_first_field("bi", 0),
+		make_field("vir_uart", 0),
+		make_field("port", 0),
+		make_field("sysmgr", 0),
+		&v1, 0);
+	*port = v1;
+	return result;
+}
+
 /**
   * ps3_repository_read_boot_dat_info - Get address and size of cell_ext_os_area.
   * address: lpar address of cell_ext_os_area
@@ -1026,3 +872,205 @@
 	return result ? result
 		: ps3_repository_read_tb_freq(node_id, tb_freq);
 }
+
+#if defined(DEBUG)
+
+int ps3_repository_dump_resource_info(const struct ps3_repository_device *repo)
+{
+	int result = 0;
+	unsigned int res_index;
+
+	pr_debug(" -> %s:%d: (%u:%u)\n", __func__, __LINE__,
+		repo->bus_index, repo->dev_index);
+
+	for (res_index = 0; res_index < 10; res_index++) {
+		enum ps3_interrupt_type intr_type;
+		unsigned int interrupt_id;
+
+		result = ps3_repository_read_dev_intr(repo->bus_index,
+			repo->dev_index, res_index, &intr_type, &interrupt_id);
+
+		if (result) {
+			if (result !=  LV1_NO_ENTRY)
+				pr_debug("%s:%d ps3_repository_read_dev_intr"
+					" (%u:%u) failed\n", __func__, __LINE__,
+					repo->bus_index, repo->dev_index);
+			break;
+		}
+
+		pr_debug("%s:%d (%u:%u) intr_type %u, interrupt_id %u\n",
+			__func__, __LINE__, repo->bus_index, repo->dev_index,
+			intr_type, interrupt_id);
+	}
+
+	for (res_index = 0; res_index < 10; res_index++) {
+		enum ps3_reg_type reg_type;
+		u64 bus_addr;
+		u64 len;
+
+		result = ps3_repository_read_dev_reg(repo->bus_index,
+			repo->dev_index, res_index, &reg_type, &bus_addr, &len);
+
+		if (result) {
+			if (result !=  LV1_NO_ENTRY)
+				pr_debug("%s:%d ps3_repository_read_dev_reg"
+					" (%u:%u) failed\n", __func__, __LINE__,
+					repo->bus_index, repo->dev_index);
+			break;
+		}
+
+		pr_debug("%s:%d (%u:%u) reg_type %u, bus_addr %lxh, len %lxh\n",
+			__func__, __LINE__, repo->bus_index, repo->dev_index,
+			reg_type, bus_addr, len);
+	}
+
+	pr_debug(" <- %s:%d\n", __func__, __LINE__);
+	return result;
+}
+
+static int dump_stor_dev_info(struct ps3_repository_device *repo)
+{
+	int result = 0;
+	unsigned int num_regions, region_index;
+	u64 port, blk_size, num_blocks;
+
+	pr_debug(" -> %s:%d: (%u:%u)\n", __func__, __LINE__,
+		repo->bus_index, repo->dev_index);
+
+	result = ps3_repository_read_stor_dev_info(repo->bus_index,
+		repo->dev_index, &port, &blk_size, &num_blocks, &num_regions);
+	if (result) {
+		pr_debug("%s:%d ps3_repository_read_stor_dev_info"
+			" (%u:%u) failed\n", __func__, __LINE__,
+			repo->bus_index, repo->dev_index);
+		goto out;
+	}
+
+	pr_debug("%s:%d  (%u:%u): port %lu, blk_size %lu, num_blocks "
+		 "%lu, num_regions %u\n",
+		 __func__, __LINE__, repo->bus_index, repo->dev_index, port,
+		 blk_size, num_blocks, num_regions);
+
+	for (region_index = 0; region_index < num_regions; region_index++) {
+		unsigned int region_id;
+		u64 region_start, region_size;
+
+		result = ps3_repository_read_stor_dev_region(repo->bus_index,
+			repo->dev_index, region_index, &region_id,
+			&region_start, &region_size);
+		if (result) {
+			 pr_debug("%s:%d ps3_repository_read_stor_dev_region"
+				  " (%u:%u) failed\n", __func__, __LINE__,
+				  repo->bus_index, repo->dev_index);
+			break;
+		}
+
+		pr_debug("%s:%d (%u:%u) region_id %u, start %lxh, size %lxh\n",
+			__func__, __LINE__, repo->bus_index, repo->dev_index,
+			region_id, region_start, region_size);
+	}
+
+out:
+	pr_debug(" <- %s:%d\n", __func__, __LINE__);
+	return result;
+}
+
+static int dump_device_info(struct ps3_repository_device *repo,
+	unsigned int num_dev)
+{
+	int result = 0;
+
+	pr_debug(" -> %s:%d: bus_%u\n", __func__, __LINE__, repo->bus_index);
+
+	for (repo->dev_index = 0; repo->dev_index < num_dev;
+		repo->dev_index++) {
+
+		result = ps3_repository_read_dev_type(repo->bus_index,
+			repo->dev_index, &repo->dev_type);
+
+		if (result) {
+			pr_debug("%s:%d ps3_repository_read_dev_type"
+				" (%u:%u) failed\n", __func__, __LINE__,
+				repo->bus_index, repo->dev_index);
+			break;
+		}
+
+		result = ps3_repository_read_dev_id(repo->bus_index,
+			repo->dev_index, &repo->dev_id);
+
+		if (result) {
+			pr_debug("%s:%d ps3_repository_read_dev_id"
+				" (%u:%u) failed\n", __func__, __LINE__,
+				repo->bus_index, repo->dev_index);
+			continue;
+		}
+
+		pr_debug("%s:%d  (%u:%u): dev_type %u, dev_id %u\n", __func__,
+			__LINE__, repo->bus_index, repo->dev_index,
+			repo->dev_type, repo->dev_id);
+
+		ps3_repository_dump_resource_info(repo);
+
+		if (repo->bus_type == PS3_BUS_TYPE_STORAGE)
+			dump_stor_dev_info(repo);
+	}
+
+	pr_debug(" <- %s:%d\n", __func__, __LINE__);
+	return result;
+}
+
+int ps3_repository_dump_bus_info(void)
+{
+	int result = 0;
+	struct ps3_repository_device repo;
+
+	pr_debug(" -> %s:%d\n", __func__, __LINE__);
+
+	memset(&repo, 0, sizeof(repo));
+
+	for (repo.bus_index = 0; repo.bus_index < 10; repo.bus_index++) {
+		unsigned int num_dev;
+
+		result = ps3_repository_read_bus_type(repo.bus_index,
+			&repo.bus_type);
+
+		if (result) {
+			pr_debug("%s:%d read_bus_type(%u) failed\n",
+				__func__, __LINE__, repo.bus_index);
+			break;
+		}
+
+		result = ps3_repository_read_bus_id(repo.bus_index,
+			&repo.bus_id);
+
+		if (result) {
+			pr_debug("%s:%d read_bus_id(%u) failed\n",
+				__func__, __LINE__, repo.bus_index);
+			continue;
+		}
+
+		if (repo.bus_index != repo.bus_id)
+			pr_debug("%s:%d bus_index != bus_id\n",
+				__func__, __LINE__);
+
+		result = ps3_repository_read_bus_num_dev(repo.bus_index,
+			&num_dev);
+
+		if (result) {
+			pr_debug("%s:%d read_bus_num_dev(%u) failed\n",
+				__func__, __LINE__, repo.bus_index);
+			continue;
+		}
+
+		pr_debug("%s:%d bus_%u: bus_type %u, bus_id %u, num_dev %u\n",
+			__func__, __LINE__, repo.bus_index, repo.bus_type,
+			repo.bus_id, num_dev);
+
+		dump_device_info(&repo, num_dev);
+	}
+
+	pr_debug(" <- %s:%d\n", __func__, __LINE__);
+	return result;
+}
+
+#endif /* defined(DEBUG) */
diff --git a/arch/powerpc/platforms/ps3/setup.c b/arch/powerpc/platforms/ps3/setup.c
index 9353967..aa05288 100644
--- a/arch/powerpc/platforms/ps3/setup.c
+++ b/arch/powerpc/platforms/ps3/setup.c
@@ -37,28 +37,36 @@
 #include "platform.h"
 
 #if defined(DEBUG)
-#define DBG(fmt...) udbg_printf(fmt)
+#define DBG udbg_printf
 #else
-#define DBG(fmt...) do{if(0)printk(fmt);}while(0)
+#define DBG pr_debug
 #endif
 
 #if !defined(CONFIG_SMP)
 static void smp_send_stop(void) {}
 #endif
 
-int ps3_get_firmware_version(union ps3_firmware_version *v)
+static union ps3_firmware_version ps3_firmware_version;
+
+void ps3_get_firmware_version(union ps3_firmware_version *v)
 {
-	int result = lv1_get_version_info(&v->raw);
-
-	if (result) {
-		v->raw = 0;
-		return -1;
-	}
-
-	return result;
+	*v = ps3_firmware_version;
 }
 EXPORT_SYMBOL_GPL(ps3_get_firmware_version);
 
+int ps3_compare_firmware_version(u16 major, u16 minor, u16 rev)
+{
+	union ps3_firmware_version x;
+
+	x.pad = 0;
+	x.major = major;
+	x.minor = minor;
+	x.rev = rev;
+
+	return (ps3_firmware_version.raw - x.raw);
+}
+EXPORT_SYMBOL_GPL(ps3_compare_firmware_version);
+
 static void ps3_power_save(void)
 {
 	/*
@@ -99,7 +107,8 @@
 	while(1);
 }
 
-#ifdef CONFIG_FB_PS3
+#if defined(CONFIG_FB_PS3) || defined(CONFIG_FB_PS3_MODULE) || \
+    defined(CONFIG_PS3_FLASH) || defined(CONFIG_PS3_FLASH_MODULE)
 static void prealloc(struct ps3_prealloc *p)
 {
 	if (!p->size)
@@ -115,12 +124,15 @@
 	printk(KERN_INFO "%s: %lu bytes at %p\n", p->name, p->size,
 	       p->address);
 }
+#endif
 
+#if defined(CONFIG_FB_PS3) || defined(CONFIG_FB_PS3_MODULE)
 struct ps3_prealloc ps3fb_videomemory = {
-    .name = "ps3fb videomemory",
-    .size = CONFIG_FB_PS3_DEFAULT_SIZE_M*1024*1024,
-    .align = 1024*1024			/* the GPU requires 1 MiB alignment */
+	.name = "ps3fb videomemory",
+	.size = CONFIG_FB_PS3_DEFAULT_SIZE_M*1024*1024,
+	.align = 1024*1024		/* the GPU requires 1 MiB alignment */
 };
+EXPORT_SYMBOL_GPL(ps3fb_videomemory);
 #define prealloc_ps3fb_videomemory()	prealloc(&ps3fb_videomemory)
 
 static int __init early_parse_ps3fb(char *p)
@@ -137,6 +149,30 @@
 #define prealloc_ps3fb_videomemory()	do { } while (0)
 #endif
 
+#if defined(CONFIG_PS3_FLASH) || defined(CONFIG_PS3_FLASH_MODULE)
+struct ps3_prealloc ps3flash_bounce_buffer = {
+	.name = "ps3flash bounce buffer",
+	.size = 256*1024,
+	.align = 256*1024
+};
+EXPORT_SYMBOL_GPL(ps3flash_bounce_buffer);
+#define prealloc_ps3flash_bounce_buffer()	prealloc(&ps3flash_bounce_buffer)
+
+static int __init early_parse_ps3flash(char *p)
+{
+	if (!p)
+		return 1;
+
+	if (!strcmp(p, "off"))
+		ps3flash_bounce_buffer.size = 0;
+
+	return 0;
+}
+early_param("ps3flash", early_parse_ps3flash);
+#else
+#define prealloc_ps3flash_bounce_buffer()	do { } while (0)
+#endif
+
 static int ps3_set_dabr(u64 dabr)
 {
 	enum {DABR_USER = 1, DABR_KERNEL = 2,};
@@ -146,13 +182,13 @@
 
 static void __init ps3_setup_arch(void)
 {
-	union ps3_firmware_version v;
 
 	DBG(" -> %s:%d\n", __func__, __LINE__);
 
-	ps3_get_firmware_version(&v);
-	printk(KERN_INFO "PS3 firmware version %u.%u.%u\n", v.major, v.minor,
-		v.rev);
+	lv1_get_version_info(&ps3_firmware_version.raw);
+	printk(KERN_INFO "PS3 firmware version %u.%u.%u\n",
+	       ps3_firmware_version.major, ps3_firmware_version.minor,
+	       ps3_firmware_version.rev);
 
 	ps3_spu_set_platform();
 	ps3_map_htab();
@@ -166,6 +202,8 @@
 #endif
 
 	prealloc_ps3fb_videomemory();
+	prealloc_ps3flash_bounce_buffer();
+
 	ppc_md.power_save = ps3_power_save;
 
 	DBG(" <- %s:%d\n", __func__, __LINE__);
@@ -184,7 +222,7 @@
 	DBG(" -> %s:%d\n", __func__, __LINE__);
 
 	dt_root = of_get_flat_dt_root();
-	if (!of_flat_dt_is_compatible(dt_root, "PS3"))
+	if (!of_flat_dt_is_compatible(dt_root, "sony,ps3"))
 		return 0;
 
 	powerpc_firmware_features |= FW_FEATURE_PS3_POSSIBLE;
@@ -201,31 +239,12 @@
 #if defined(CONFIG_KEXEC)
 static void ps3_kexec_cpu_down(int crash_shutdown, int secondary)
 {
-	DBG(" -> %s:%d\n", __func__, __LINE__);
+	int cpu = smp_processor_id();
 
-	if (secondary) {
-		int cpu;
-		for_each_online_cpu(cpu)
-			if (cpu)
-				ps3_smp_cleanup_cpu(cpu);
-	} else
-		ps3_smp_cleanup_cpu(0);
+	DBG(" -> %s:%d: (%d)\n", __func__, __LINE__, cpu);
 
-	DBG(" <- %s:%d\n", __func__, __LINE__);
-}
-
-static void ps3_machine_kexec(struct kimage *image)
-{
-	unsigned long ppe_id;
-
-	DBG(" -> %s:%d\n", __func__, __LINE__);
-
-	lv1_get_logical_ppe_id(&ppe_id);
-	lv1_configure_irq_state_bitmap(ppe_id, 0, 0);
-	ps3_mm_shutdown();
-	ps3_mm_vas_destroy();
-
-	default_machine_kexec(image);
+	ps3_smp_cleanup_cpu(cpu);
+	ps3_shutdown_IRQ(cpu);
 
 	DBG(" <- %s:%d\n", __func__, __LINE__);
 }
@@ -247,7 +266,7 @@
 	.power_off			= ps3_power_off,
 #if defined(CONFIG_KEXEC)
 	.kexec_cpu_down			= ps3_kexec_cpu_down,
-	.machine_kexec			= ps3_machine_kexec,
+	.machine_kexec			= default_machine_kexec,
 	.machine_kexec_prepare		= default_machine_kexec_prepare,
 	.machine_crash_shutdown		= default_machine_crash_shutdown,
 #endif
diff --git a/arch/powerpc/platforms/ps3/smp.c b/arch/powerpc/platforms/ps3/smp.c
index 53416ec..f0b12f2 100644
--- a/arch/powerpc/platforms/ps3/smp.c
+++ b/arch/powerpc/platforms/ps3/smp.c
@@ -27,9 +27,9 @@
 #include "platform.h"
 
 #if defined(DEBUG)
-#define DBG(fmt...) udbg_printf(fmt)
+#define DBG udbg_printf
 #else
-#define DBG(fmt...) do{if(0)printk(fmt);}while(0)
+#define DBG pr_debug
 #endif
 
 static irqreturn_t ipi_function_handler(int irq, void *msg)
@@ -39,11 +39,11 @@
 }
 
 /**
-  * virqs - a per cpu array of virqs for ipi use
+  * ps3_ipi_virqs - a per cpu array of virqs for ipi use
   */
 
 #define MSG_COUNT 4
-static DEFINE_PER_CPU(unsigned int, virqs[MSG_COUNT]);
+static DEFINE_PER_CPU(unsigned int, ps3_ipi_virqs[MSG_COUNT]);
 
 static const char *names[MSG_COUNT] = {
 	"ipi call",
@@ -62,7 +62,7 @@
 		return;
 	}
 
-	virq = per_cpu(virqs, target)[msg];
+	virq = per_cpu(ps3_ipi_virqs, target)[msg];
 	result = ps3_send_event_locally(virq);
 
 	if (result)
@@ -94,13 +94,13 @@
 static void __init ps3_smp_setup_cpu(int cpu)
 {
 	int result;
-	unsigned int *virqs = per_cpu(virqs, cpu);
+	unsigned int *virqs = per_cpu(ps3_ipi_virqs, cpu);
 	int i;
 
 	DBG(" -> %s:%d: (%d)\n", __func__, __LINE__, cpu);
 
 	/*
-	 * Check assumptions on virqs[] indexing. If this
+	 * Check assumptions on ps3_ipi_virqs[] indexing. If this
 	 * check fails, then a different mapping of PPC_MSG_
 	 * to index needs to be setup.
 	 */
@@ -132,13 +132,13 @@
 
 void ps3_smp_cleanup_cpu(int cpu)
 {
-	unsigned int *virqs = per_cpu(virqs, cpu);
+	unsigned int *virqs = per_cpu(ps3_ipi_virqs, cpu);
 	int i;
 
 	DBG(" -> %s:%d: (%d)\n", __func__, __LINE__, cpu);
 
 	for (i = 0; i < MSG_COUNT; i++) {
-		free_irq(virqs[i], (void*)(long)i);
+		/* Can't call free_irq from interrupt context. */
 		ps3_event_receive_port_destroy(virqs[i]);
 		virqs[i] = NO_IRQ;
 	}
diff --git a/arch/powerpc/platforms/ps3/spu.c b/arch/powerpc/platforms/ps3/spu.c
index 651437c..502d80e 100644
--- a/arch/powerpc/platforms/ps3/spu.c
+++ b/arch/powerpc/platforms/ps3/spu.c
@@ -182,15 +182,18 @@
 {
 	struct table {char* name; unsigned long addr; unsigned long size;};
 
-	spu_pdata(spu)->shadow = __ioremap(
-		spu_pdata(spu)->shadow_addr, sizeof(struct spe_shadow),
-		pgprot_val(PAGE_READONLY) | _PAGE_NO_CACHE | _PAGE_GUARDED);
+	spu_pdata(spu)->shadow = ioremap_flags(spu_pdata(spu)->shadow_addr,
+					       sizeof(struct spe_shadow),
+					       pgprot_val(PAGE_READONLY) |
+					       _PAGE_NO_CACHE);
 	if (!spu_pdata(spu)->shadow) {
 		pr_debug("%s:%d: ioremap shadow failed\n", __func__, __LINE__);
 		goto fail_ioremap;
 	}
 
-	spu->local_store = ioremap(spu->local_store_phys, LS_SIZE);
+	spu->local_store = (__force void *)ioremap_flags(spu->local_store_phys,
+		LS_SIZE, _PAGE_NO_CACHE);
+
 	if (!spu->local_store) {
 		pr_debug("%s:%d: ioremap local_store failed\n",
 			__func__, __LINE__);
@@ -199,6 +202,7 @@
 
 	spu->problem = ioremap(spu->problem_phys,
 		sizeof(struct spu_problem));
+
 	if (!spu->problem) {
 		pr_debug("%s:%d: ioremap problem failed\n", __func__, __LINE__);
 		goto fail_ioremap;
@@ -206,6 +210,7 @@
 
 	spu->priv2 = ioremap(spu_pdata(spu)->priv2_addr,
 		sizeof(struct spu_priv2));
+
 	if (!spu->priv2) {
 		pr_debug("%s:%d: ioremap priv2 failed\n", __func__, __LINE__);
 		goto fail_ioremap;
@@ -400,11 +405,13 @@
 		}
 	}
 
-	if (result)
+	if (result) {
 		printk(KERN_WARNING "%s:%d: Error initializing spus\n",
 			__func__, __LINE__);
+		return result;
+	}
 
-	return result;
+	return num_resource_id;
 }
 
 const struct spu_management_ops spu_management_ps3_ops = {
diff --git a/arch/powerpc/platforms/ps3/system-bus.c b/arch/powerpc/platforms/ps3/system-bus.c
index 6bda510..4bb634a 100644
--- a/arch/powerpc/platforms/ps3/system-bus.c
+++ b/arch/powerpc/platforms/ps3/system-bus.c
@@ -30,22 +30,228 @@
 
 #include "platform.h"
 
+static struct device ps3_system_bus = {
+	.bus_id = "ps3_system",
+};
+
+/* FIXME: need device usage counters! */
+struct {
+	struct mutex mutex;
+	int sb_11; /* usb 0 */
+	int sb_12; /* usb 0 */
+	int gpu;
+} static usage_hack;
+
+static int ps3_is_device(struct ps3_system_bus_device *dev,
+			 unsigned int bus_id, unsigned int dev_id)
+{
+	return dev->bus_id == bus_id && dev->dev_id == dev_id;
+}
+
+static int ps3_open_hv_device_sb(struct ps3_system_bus_device *dev)
+{
+	int result;
+
+	BUG_ON(!dev->bus_id);
+	mutex_lock(&usage_hack.mutex);
+
+	if (ps3_is_device(dev, 1, 1)) {
+		usage_hack.sb_11++;
+		if (usage_hack.sb_11 > 1) {
+			result = 0;
+			goto done;
+		}
+	}
+
+	if (ps3_is_device(dev, 1, 2)) {
+		usage_hack.sb_12++;
+		if (usage_hack.sb_12 > 1) {
+			result = 0;
+			goto done;
+		}
+	}
+
+	result = lv1_open_device(dev->bus_id, dev->dev_id, 0);
+
+	if (result) {
+		pr_debug("%s:%d: lv1_open_device failed: %s\n", __func__,
+			__LINE__, ps3_result(result));
+			result = -EPERM;
+	}
+
+done:
+	mutex_unlock(&usage_hack.mutex);
+	return result;
+}
+
+static int ps3_close_hv_device_sb(struct ps3_system_bus_device *dev)
+{
+	int result;
+
+	BUG_ON(!dev->bus_id);
+	mutex_lock(&usage_hack.mutex);
+
+	if (ps3_is_device(dev, 1, 1)) {
+		usage_hack.sb_11--;
+		if (usage_hack.sb_11) {
+			result = 0;
+			goto done;
+		}
+	}
+
+	if (ps3_is_device(dev, 1, 2)) {
+		usage_hack.sb_12--;
+		if (usage_hack.sb_12) {
+			result = 0;
+			goto done;
+		}
+	}
+
+	result = lv1_close_device(dev->bus_id, dev->dev_id);
+	BUG_ON(result);
+
+done:
+	mutex_unlock(&usage_hack.mutex);
+	return result;
+}
+
+static int ps3_open_hv_device_gpu(struct ps3_system_bus_device *dev)
+{
+	int result;
+
+	mutex_lock(&usage_hack.mutex);
+
+	usage_hack.gpu++;
+	if (usage_hack.gpu > 1) {
+		result = 0;
+		goto done;
+	}
+
+	result = lv1_gpu_open(0);
+
+	if (result) {
+		pr_debug("%s:%d: lv1_gpu_open failed: %s\n", __func__,
+			__LINE__, ps3_result(result));
+			result = -EPERM;
+	}
+
+done:
+	mutex_unlock(&usage_hack.mutex);
+	return result;
+}
+
+static int ps3_close_hv_device_gpu(struct ps3_system_bus_device *dev)
+{
+	int result;
+
+	mutex_lock(&usage_hack.mutex);
+
+	usage_hack.gpu--;
+	if (usage_hack.gpu) {
+		result = 0;
+		goto done;
+	}
+
+	result = lv1_gpu_close();
+	BUG_ON(result);
+
+done:
+	mutex_unlock(&usage_hack.mutex);
+	return result;
+}
+
+int ps3_open_hv_device(struct ps3_system_bus_device *dev)
+{
+	BUG_ON(!dev);
+	pr_debug("%s:%d: match_id: %u\n", __func__, __LINE__, dev->match_id);
+
+	switch (dev->match_id) {
+	case PS3_MATCH_ID_EHCI:
+	case PS3_MATCH_ID_OHCI:
+	case PS3_MATCH_ID_GELIC:
+	case PS3_MATCH_ID_STOR_DISK:
+	case PS3_MATCH_ID_STOR_ROM:
+	case PS3_MATCH_ID_STOR_FLASH:
+		return ps3_open_hv_device_sb(dev);
+
+	case PS3_MATCH_ID_SOUND:
+	case PS3_MATCH_ID_GRAPHICS:
+		return ps3_open_hv_device_gpu(dev);
+
+	case PS3_MATCH_ID_AV_SETTINGS:
+	case PS3_MATCH_ID_SYSTEM_MANAGER:
+		pr_debug("%s:%d: unsupported match_id: %u\n", __func__,
+			__LINE__, dev->match_id);
+		pr_debug("%s:%d: bus_id: %u\n", __func__,
+			__LINE__, dev->bus_id);
+		BUG();
+		return -EINVAL;
+
+	default:
+		break;
+	}
+
+	pr_debug("%s:%d: unknown match_id: %u\n", __func__, __LINE__,
+		dev->match_id);
+	BUG();
+	return -ENODEV;
+}
+EXPORT_SYMBOL_GPL(ps3_open_hv_device);
+
+int ps3_close_hv_device(struct ps3_system_bus_device *dev)
+{
+	BUG_ON(!dev);
+	pr_debug("%s:%d: match_id: %u\n", __func__, __LINE__, dev->match_id);
+
+	switch (dev->match_id) {
+	case PS3_MATCH_ID_EHCI:
+	case PS3_MATCH_ID_OHCI:
+	case PS3_MATCH_ID_GELIC:
+	case PS3_MATCH_ID_STOR_DISK:
+	case PS3_MATCH_ID_STOR_ROM:
+	case PS3_MATCH_ID_STOR_FLASH:
+		return ps3_close_hv_device_sb(dev);
+
+	case PS3_MATCH_ID_SOUND:
+	case PS3_MATCH_ID_GRAPHICS:
+		return ps3_close_hv_device_gpu(dev);
+
+	case PS3_MATCH_ID_AV_SETTINGS:
+	case PS3_MATCH_ID_SYSTEM_MANAGER:
+		pr_debug("%s:%d: unsupported match_id: %u\n", __func__,
+			__LINE__, dev->match_id);
+		pr_debug("%s:%d: bus_id: %u\n", __func__,
+			__LINE__, dev->bus_id);
+		BUG();
+		return -EINVAL;
+
+	default:
+		break;
+	}
+
+	pr_debug("%s:%d: unknown match_id: %u\n", __func__, __LINE__,
+		dev->match_id);
+	BUG();
+	return -ENODEV;
+}
+EXPORT_SYMBOL_GPL(ps3_close_hv_device);
+
 #define dump_mmio_region(_a) _dump_mmio_region(_a, __func__, __LINE__)
 static void _dump_mmio_region(const struct ps3_mmio_region* r,
 	const char* func, int line)
 {
-	pr_debug("%s:%d: dev       %u:%u\n", func, line, r->did.bus_id,
-		r->did.dev_id);
+	pr_debug("%s:%d: dev       %u:%u\n", func, line, r->dev->bus_id,
+		r->dev->dev_id);
 	pr_debug("%s:%d: bus_addr  %lxh\n", func, line, r->bus_addr);
 	pr_debug("%s:%d: len       %lxh\n", func, line, r->len);
 	pr_debug("%s:%d: lpar_addr %lxh\n", func, line, r->lpar_addr);
 }
 
-int ps3_mmio_region_create(struct ps3_mmio_region *r)
+static int ps3_sb_mmio_region_create(struct ps3_mmio_region *r)
 {
 	int result;
 
-	result = lv1_map_device_mmio_region(r->did.bus_id, r->did.dev_id,
+	result = lv1_map_device_mmio_region(r->dev->bus_id, r->dev->dev_id,
 		r->bus_addr, r->len, r->page_size, &r->lpar_addr);
 
 	if (result) {
@@ -57,13 +263,26 @@
 	dump_mmio_region(r);
 	return result;
 }
+
+static int ps3_ioc0_mmio_region_create(struct ps3_mmio_region *r)
+{
+	/* device specific; do nothing currently */
+	return 0;
+}
+
+int ps3_mmio_region_create(struct ps3_mmio_region *r)
+{
+	return r->mmio_ops->create(r);
+}
 EXPORT_SYMBOL_GPL(ps3_mmio_region_create);
 
-int ps3_free_mmio_region(struct ps3_mmio_region *r)
+static int ps3_sb_free_mmio_region(struct ps3_mmio_region *r)
 {
 	int result;
 
-	result = lv1_unmap_device_mmio_region(r->did.bus_id, r->did.dev_id,
+	dump_mmio_region(r);
+;
+	result = lv1_unmap_device_mmio_region(r->dev->bus_id, r->dev->dev_id,
 		r->lpar_addr);
 
 	if (result)
@@ -73,14 +292,60 @@
 	r->lpar_addr = 0;
 	return result;
 }
+
+static int ps3_ioc0_free_mmio_region(struct ps3_mmio_region *r)
+{
+	/* device specific; do nothing currently */
+	return 0;
+}
+
+
+int ps3_free_mmio_region(struct ps3_mmio_region *r)
+{
+	return r->mmio_ops->free(r);
+}
+
 EXPORT_SYMBOL_GPL(ps3_free_mmio_region);
 
+static const struct ps3_mmio_region_ops ps3_mmio_sb_region_ops = {
+	.create = ps3_sb_mmio_region_create,
+	.free = ps3_sb_free_mmio_region
+};
+
+static const struct ps3_mmio_region_ops ps3_mmio_ioc0_region_ops = {
+	.create = ps3_ioc0_mmio_region_create,
+	.free = ps3_ioc0_free_mmio_region
+};
+
+int ps3_mmio_region_init(struct ps3_system_bus_device *dev,
+	struct ps3_mmio_region *r, unsigned long bus_addr, unsigned long len,
+	enum ps3_mmio_page_size page_size)
+{
+	r->dev = dev;
+	r->bus_addr = bus_addr;
+	r->len = len;
+	r->page_size = page_size;
+	switch (dev->dev_type) {
+	case PS3_DEVICE_TYPE_SB:
+		r->mmio_ops = &ps3_mmio_sb_region_ops;
+		break;
+	case PS3_DEVICE_TYPE_IOC0:
+		r->mmio_ops = &ps3_mmio_ioc0_region_ops;
+		break;
+	default:
+		BUG();
+		return -EINVAL;
+	}
+	return 0;
+}
+EXPORT_SYMBOL_GPL(ps3_mmio_region_init);
+
 static int ps3_system_bus_match(struct device *_dev,
 	struct device_driver *_drv)
 {
 	int result;
-	struct ps3_system_bus_driver *drv = to_ps3_system_bus_driver(_drv);
-	struct ps3_system_bus_device *dev = to_ps3_system_bus_device(_dev);
+	struct ps3_system_bus_driver *drv = ps3_drv_to_system_bus_drv(_drv);
+	struct ps3_system_bus_device *dev = ps3_dev_to_system_bus_dev(_dev);
 
 	result = dev->match_id == drv->match_id;
 
@@ -92,32 +357,14 @@
 
 static int ps3_system_bus_probe(struct device *_dev)
 {
-	int result;
-	struct ps3_system_bus_device *dev = to_ps3_system_bus_device(_dev);
-	struct ps3_system_bus_driver *drv =
-		to_ps3_system_bus_driver(_dev->driver);
+	int result = 0;
+	struct ps3_system_bus_device *dev = ps3_dev_to_system_bus_dev(_dev);
+	struct ps3_system_bus_driver *drv;
 
-	result = lv1_open_device(dev->did.bus_id, dev->did.dev_id, 0);
+	BUG_ON(!dev);
+	pr_info(" -> %s:%d: %s\n", __func__, __LINE__, _dev->bus_id);
 
-	if (result) {
-		pr_debug("%s:%d: lv1_open_device failed (%d)\n",
-			__func__, __LINE__, result);
-		result = -EACCES;
-		goto clean_none;
-	}
-
-	if (dev->d_region->did.bus_id) {
-		result = ps3_dma_region_create(dev->d_region);
-
-		if (result) {
-			pr_debug("%s:%d: ps3_dma_region_create failed (%d)\n",
-				__func__, __LINE__, result);
-			BUG_ON("check region type");
-			result = -EINVAL;
-			goto clean_device;
-		}
-	}
-
+	drv = ps3_system_bus_dev_to_system_bus_drv(dev);
 	BUG_ON(!drv);
 
 	if (drv->probe)
@@ -126,56 +373,127 @@
 		pr_info("%s:%d: %s no probe method\n", __func__, __LINE__,
 			dev->core.bus_id);
 
-	if (result) {
-		pr_debug("%s:%d: drv->probe failed\n", __func__, __LINE__);
-		goto clean_dma;
-	}
-
-	return result;
-
-clean_dma:
-	ps3_dma_region_free(dev->d_region);
-clean_device:
-	lv1_close_device(dev->did.bus_id, dev->did.dev_id);
-clean_none:
+	pr_info(" <- %s:%d: %s\n", __func__, __LINE__, dev->core.bus_id);
 	return result;
 }
 
 static int ps3_system_bus_remove(struct device *_dev)
 {
-	struct ps3_system_bus_device *dev = to_ps3_system_bus_device(_dev);
-	struct ps3_system_bus_driver *drv =
-		to_ps3_system_bus_driver(_dev->driver);
+	int result = 0;
+	struct ps3_system_bus_device *dev = ps3_dev_to_system_bus_dev(_dev);
+	struct ps3_system_bus_driver *drv;
+
+	BUG_ON(!dev);
+	pr_info(" -> %s:%d: %s\n", __func__, __LINE__, _dev->bus_id);
+
+	drv = ps3_system_bus_dev_to_system_bus_drv(dev);
+	BUG_ON(!drv);
 
 	if (drv->remove)
-		drv->remove(dev);
+		result = drv->remove(dev);
 	else
-		pr_info("%s:%d: %s no remove method\n", __func__, __LINE__,
-			dev->core.bus_id);
+		dev_dbg(&dev->core, "%s:%d %s: no remove method\n",
+			__func__, __LINE__, drv->core.name);
 
-	ps3_dma_region_free(dev->d_region);
-	ps3_free_mmio_region(dev->m_region);
-	lv1_close_device(dev->did.bus_id, dev->did.dev_id);
+	pr_info(" <- %s:%d: %s\n", __func__, __LINE__, dev->core.bus_id);
+	return result;
+}
 
+static void ps3_system_bus_shutdown(struct device *_dev)
+{
+	struct ps3_system_bus_device *dev = ps3_dev_to_system_bus_dev(_dev);
+	struct ps3_system_bus_driver *drv;
+
+	BUG_ON(!dev);
+
+	dev_dbg(&dev->core, " -> %s:%d: match_id %d\n", __func__, __LINE__,
+		dev->match_id);
+
+	if (!dev->core.driver) {
+		dev_dbg(&dev->core, "%s:%d: no driver bound\n", __func__,
+			__LINE__);
+		return;
+	}
+
+	drv = ps3_system_bus_dev_to_system_bus_drv(dev);
+
+	BUG_ON(!drv);
+
+	dev_dbg(&dev->core, "%s:%d: %s -> %s\n", __func__, __LINE__,
+		dev->core.bus_id, drv->core.name);
+
+	if (drv->shutdown)
+		drv->shutdown(dev);
+	else if (drv->remove) {
+		dev_dbg(&dev->core, "%s:%d %s: no shutdown, calling remove\n",
+			__func__, __LINE__, drv->core.name);
+		drv->remove(dev);
+	} else {
+		dev_dbg(&dev->core, "%s:%d %s: no shutdown method\n",
+			__func__, __LINE__, drv->core.name);
+		BUG();
+	}
+
+	dev_dbg(&dev->core, " <- %s:%d\n", __func__, __LINE__);
+}
+
+static int ps3_system_bus_uevent(struct device *_dev, char **envp,
+				 int num_envp, char *buffer, int buffer_size)
+{
+	struct ps3_system_bus_device *dev = ps3_dev_to_system_bus_dev(_dev);
+	int i = 0, length = 0;
+
+	if (add_uevent_var(envp, num_envp, &i, buffer, buffer_size,
+			   &length, "MODALIAS=ps3:%d",
+			   dev->match_id))
+		return -ENOMEM;
+
+	envp[i] = NULL;
 	return 0;
 }
 
+static ssize_t modalias_show(struct device *_dev, struct device_attribute *a,
+	char *buf)
+{
+	struct ps3_system_bus_device *dev = ps3_dev_to_system_bus_dev(_dev);
+	int len = snprintf(buf, PAGE_SIZE, "ps3:%d\n", dev->match_id);
+
+	return (len >= PAGE_SIZE) ? (PAGE_SIZE - 1) : len;
+}
+
+static struct device_attribute ps3_system_bus_dev_attrs[] = {
+	__ATTR_RO(modalias),
+	__ATTR_NULL,
+};
+
 struct bus_type ps3_system_bus_type = {
 	.name = "ps3_system_bus",
 	.match = ps3_system_bus_match,
+	.uevent = ps3_system_bus_uevent,
 	.probe = ps3_system_bus_probe,
 	.remove = ps3_system_bus_remove,
+	.shutdown = ps3_system_bus_shutdown,
+	.dev_attrs = ps3_system_bus_dev_attrs,
 };
 
-int __init ps3_system_bus_init(void)
+static int __init ps3_system_bus_init(void)
 {
 	int result;
 
 	if (!firmware_has_feature(FW_FEATURE_PS3_LV1))
 		return -ENODEV;
 
+	pr_debug(" -> %s:%d\n", __func__, __LINE__);
+
+	mutex_init(&usage_hack.mutex);
+
+	result = device_register(&ps3_system_bus);
+	BUG_ON(result);
+
 	result = bus_register(&ps3_system_bus_type);
 	BUG_ON(result);
+
+	pr_debug(" <- %s:%d\n", __func__, __LINE__);
 	return result;
 }
 
@@ -185,16 +503,13 @@
  * Returns the virtual address of the buffer and sets dma_handle
  * to the dma address (mapping) of the first page.
  */
-
 static void * ps3_alloc_coherent(struct device *_dev, size_t size,
-	dma_addr_t *dma_handle, gfp_t flag)
+				      dma_addr_t *dma_handle, gfp_t flag)
 {
 	int result;
-	struct ps3_system_bus_device *dev = to_ps3_system_bus_device(_dev);
+	struct ps3_system_bus_device *dev = ps3_dev_to_system_bus_dev(_dev);
 	unsigned long virt_addr;
 
-	BUG_ON(!dev->d_region->bus_addr);
-
 	flag &= ~(__GFP_DMA | __GFP_HIGHMEM);
 	flag |= __GFP_ZERO;
 
@@ -205,7 +520,8 @@
 		goto clean_none;
 	}
 
-	result = ps3_dma_map(dev->d_region, virt_addr, size, dma_handle);
+	result = ps3_dma_map(dev->d_region, virt_addr, size, dma_handle,
+			     IOPTE_PP_W | IOPTE_PP_R | IOPTE_SO_RW | IOPTE_M);
 
 	if (result) {
 		pr_debug("%s:%d: ps3_dma_map failed (%d)\n",
@@ -226,7 +542,7 @@
 static void ps3_free_coherent(struct device *_dev, size_t size, void *vaddr,
 	dma_addr_t dma_handle)
 {
-	struct ps3_system_bus_device *dev = to_ps3_system_bus_device(_dev);
+	struct ps3_system_bus_device *dev = ps3_dev_to_system_bus_dev(_dev);
 
 	ps3_dma_unmap(dev->d_region, dma_handle, size);
 	free_pages((unsigned long)vaddr, get_order(size));
@@ -239,15 +555,16 @@
  * byte within the page as vaddr.
  */
 
-static dma_addr_t ps3_map_single(struct device *_dev, void *ptr, size_t size,
+static dma_addr_t ps3_sb_map_single(struct device *_dev, void *ptr, size_t size,
 	enum dma_data_direction direction)
 {
-	struct ps3_system_bus_device *dev = to_ps3_system_bus_device(_dev);
+	struct ps3_system_bus_device *dev = ps3_dev_to_system_bus_dev(_dev);
 	int result;
 	unsigned long bus_addr;
 
 	result = ps3_dma_map(dev->d_region, (unsigned long)ptr, size,
-		&bus_addr);
+			     &bus_addr,
+			     IOPTE_PP_R | IOPTE_PP_W | IOPTE_SO_RW | IOPTE_M);
 
 	if (result) {
 		pr_debug("%s:%d: ps3_dma_map failed (%d)\n",
@@ -257,10 +574,44 @@
 	return bus_addr;
 }
 
+static dma_addr_t ps3_ioc0_map_single(struct device *_dev, void *ptr,
+				      size_t size,
+				      enum dma_data_direction direction)
+{
+	struct ps3_system_bus_device *dev = ps3_dev_to_system_bus_dev(_dev);
+	int result;
+	unsigned long bus_addr;
+	u64 iopte_flag;
+
+	iopte_flag = IOPTE_M;
+	switch (direction) {
+	case DMA_BIDIRECTIONAL:
+		iopte_flag |= IOPTE_PP_R | IOPTE_PP_W | IOPTE_SO_RW;
+		break;
+	case DMA_TO_DEVICE:
+		iopte_flag |= IOPTE_PP_R | IOPTE_SO_R;
+		break;
+	case DMA_FROM_DEVICE:
+		iopte_flag |= IOPTE_PP_W | IOPTE_SO_RW;
+		break;
+	default:
+		/* not happned */
+		BUG();
+	};
+	result = ps3_dma_map(dev->d_region, (unsigned long)ptr, size,
+			     &bus_addr, iopte_flag);
+
+	if (result) {
+		pr_debug("%s:%d: ps3_dma_map failed (%d)\n",
+			__func__, __LINE__, result);
+	}
+	return bus_addr;
+}
+
 static void ps3_unmap_single(struct device *_dev, dma_addr_t dma_addr,
 	size_t size, enum dma_data_direction direction)
 {
-	struct ps3_system_bus_device *dev = to_ps3_system_bus_device(_dev);
+	struct ps3_system_bus_device *dev = ps3_dev_to_system_bus_dev(_dev);
 	int result;
 
 	result = ps3_dma_unmap(dev->d_region, dma_addr, size);
@@ -271,20 +622,20 @@
 	}
 }
 
-static int ps3_map_sg(struct device *_dev, struct scatterlist *sg, int nents,
+static int ps3_sb_map_sg(struct device *_dev, struct scatterlist *sg, int nents,
 	enum dma_data_direction direction)
 {
 #if defined(CONFIG_PS3_DYNAMIC_DMA)
 	BUG_ON("do");
 	return -EPERM;
 #else
-	struct ps3_system_bus_device *dev = to_ps3_system_bus_device(_dev);
+	struct ps3_system_bus_device *dev = ps3_dev_to_system_bus_dev(_dev);
 	int i;
 
 	for (i = 0; i < nents; i++, sg++) {
 		int result = ps3_dma_map(dev->d_region,
 			page_to_phys(sg->page) + sg->offset, sg->length,
-			&sg->dma_address);
+					 &sg->dma_address, 0);
 
 		if (result) {
 			pr_debug("%s:%d: ps3_dma_map failed (%d)\n",
@@ -299,7 +650,15 @@
 #endif
 }
 
-static void ps3_unmap_sg(struct device *_dev, struct scatterlist *sg,
+static int ps3_ioc0_map_sg(struct device *_dev, struct scatterlist *sg,
+			   int nents,
+			   enum dma_data_direction direction)
+{
+	BUG();
+	return 0;
+}
+
+static void ps3_sb_unmap_sg(struct device *_dev, struct scatterlist *sg,
 	int nents, enum dma_data_direction direction)
 {
 #if defined(CONFIG_PS3_DYNAMIC_DMA)
@@ -307,18 +666,34 @@
 #endif
 }
 
+static void ps3_ioc0_unmap_sg(struct device *_dev, struct scatterlist *sg,
+			    int nents, enum dma_data_direction direction)
+{
+	BUG();
+}
+
 static int ps3_dma_supported(struct device *_dev, u64 mask)
 {
 	return mask >= DMA_32BIT_MASK;
 }
 
-static struct dma_mapping_ops ps3_dma_ops = {
+static struct dma_mapping_ops ps3_sb_dma_ops = {
 	.alloc_coherent = ps3_alloc_coherent,
 	.free_coherent = ps3_free_coherent,
-	.map_single = ps3_map_single,
+	.map_single = ps3_sb_map_single,
 	.unmap_single = ps3_unmap_single,
-	.map_sg = ps3_map_sg,
-	.unmap_sg = ps3_unmap_sg,
+	.map_sg = ps3_sb_map_sg,
+	.unmap_sg = ps3_sb_unmap_sg,
+	.dma_supported = ps3_dma_supported
+};
+
+static struct dma_mapping_ops ps3_ioc0_dma_ops = {
+	.alloc_coherent = ps3_alloc_coherent,
+	.free_coherent = ps3_free_coherent,
+	.map_single = ps3_ioc0_map_single,
+	.unmap_single = ps3_unmap_single,
+	.map_sg = ps3_ioc0_map_sg,
+	.unmap_sg = ps3_ioc0_unmap_sg,
 	.dma_supported = ps3_dma_supported
 };
 
@@ -328,7 +703,7 @@
 
 static void ps3_system_bus_release_device(struct device *_dev)
 {
-	struct ps3_system_bus_device *dev = to_ps3_system_bus_device(_dev);
+	struct ps3_system_bus_device *dev = ps3_dev_to_system_bus_dev(_dev);
 	kfree(dev);
 }
 
@@ -343,18 +718,37 @@
 int ps3_system_bus_device_register(struct ps3_system_bus_device *dev)
 {
 	int result;
-	static unsigned int dev_count = 1;
+	static unsigned int dev_ioc0_count;
+	static unsigned int dev_sb_count;
+	static unsigned int dev_vuart_count;
 
-	dev->core.parent = NULL;
+	if (!dev->core.parent)
+		dev->core.parent = &ps3_system_bus;
 	dev->core.bus = &ps3_system_bus_type;
 	dev->core.release = ps3_system_bus_release_device;
 
-	dev->core.archdata.of_node = NULL;
-	dev->core.archdata.dma_ops = &ps3_dma_ops;
-	dev->core.archdata.numa_node = 0;
+	switch (dev->dev_type) {
+	case PS3_DEVICE_TYPE_IOC0:
+		dev->core.archdata.dma_ops = &ps3_ioc0_dma_ops;
+		snprintf(dev->core.bus_id, sizeof(dev->core.bus_id),
+			"ioc0_%02x", ++dev_ioc0_count);
+		break;
+	case PS3_DEVICE_TYPE_SB:
+		dev->core.archdata.dma_ops = &ps3_sb_dma_ops;
+		snprintf(dev->core.bus_id, sizeof(dev->core.bus_id),
+			"sb_%02x", ++dev_sb_count);
 
-	snprintf(dev->core.bus_id, sizeof(dev->core.bus_id), "sb_%02x",
-		dev_count++);
+		break;
+	case PS3_DEVICE_TYPE_VUART:
+		snprintf(dev->core.bus_id, sizeof(dev->core.bus_id),
+			"vuart_%02x", ++dev_vuart_count);
+		break;
+	default:
+		BUG();
+	};
+
+	dev->core.archdata.of_node = NULL;
+	dev->core.archdata.numa_node = 0;
 
 	pr_debug("%s:%d add %s\n", __func__, __LINE__, dev->core.bus_id);
 
@@ -368,9 +762,15 @@
 {
 	int result;
 
+	pr_debug(" -> %s:%d: %s\n", __func__, __LINE__, drv->core.name);
+
+	if (!firmware_has_feature(FW_FEATURE_PS3_LV1))
+		return -ENODEV;
+
 	drv->core.bus = &ps3_system_bus_type;
 
 	result = driver_register(&drv->core);
+	pr_debug(" <- %s:%d: %s\n", __func__, __LINE__, drv->core.name);
 	return result;
 }
 
@@ -378,7 +778,9 @@
 
 void ps3_system_bus_driver_unregister(struct ps3_system_bus_driver *drv)
 {
+	pr_debug(" -> %s:%d: %s\n", __func__, __LINE__, drv->core.name);
 	driver_unregister(&drv->core);
+	pr_debug(" <- %s:%d: %s\n", __func__, __LINE__, drv->core.name);
 }
 
 EXPORT_SYMBOL_GPL(ps3_system_bus_driver_unregister);
diff --git a/arch/powerpc/platforms/ps3/time.c b/arch/powerpc/platforms/ps3/time.c
index 1bae8b1..802a9cc 100644
--- a/arch/powerpc/platforms/ps3/time.c
+++ b/arch/powerpc/platforms/ps3/time.c
@@ -39,7 +39,7 @@
 }
 
 #define dump_time(_a) _dump_time(_a, __func__, __LINE__)
-static void __attribute__ ((unused)) _dump_time(int time, const char* func,
+static void __maybe_unused _dump_time(int time, const char *func,
 	int line)
 {
 	struct rtc_time tm;
diff --git a/arch/powerpc/platforms/pseries/Makefile b/arch/powerpc/platforms/pseries/Makefile
index ae1fc92..992ba67 100644
--- a/arch/powerpc/platforms/pseries/Makefile
+++ b/arch/powerpc/platforms/pseries/Makefile
@@ -8,7 +8,7 @@
 obj-$(CONFIG_SMP)	+= smp.o
 obj-$(CONFIG_XICS)	+= xics.o
 obj-$(CONFIG_SCANLOG)	+= scanlog.o
-obj-$(CONFIG_EEH)	+= eeh.o eeh_cache.o eeh_driver.o eeh_event.o
+obj-$(CONFIG_EEH)	+= eeh.o eeh_cache.o eeh_driver.o eeh_event.o eeh_sysfs.o
 obj-$(CONFIG_KEXEC)	+= kexec.o
 obj-$(CONFIG_PCI)	+= pci.o pci_dlpar.o
 obj-$(CONFIG_PCI_MSI)	+= msi.o
diff --git a/arch/powerpc/platforms/pseries/eeh.c b/arch/powerpc/platforms/pseries/eeh.c
index 5f3e6d8..b8770395 100644
--- a/arch/powerpc/platforms/pseries/eeh.c
+++ b/arch/powerpc/platforms/pseries/eeh.c
@@ -1,6 +1,8 @@
 /*
  * eeh.c
- * Copyright (C) 2001 Dave Engebretsen & Todd Inglett IBM Corporation
+ * Copyright IBM Corporation 2001, 2005, 2006
+ * Copyright Dave Engebretsen & Todd Inglett 2001
+ * Copyright Linas Vepstas 2005, 2006
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -15,6 +17,8 @@
  * You should have received a copy of the GNU General Public License
  * along with this program; if not, write to the Free Software
  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
+ *
+ * Please address comments and feedback to Linas Vepstas <linas@austin.ibm.com>
  */
 
 #include <linux/delay.h>
@@ -117,7 +121,6 @@
 static unsigned long ignored_check;
 static unsigned long total_mmio_ffs;
 static unsigned long false_positives;
-static unsigned long ignored_failures;
 static unsigned long slot_resets;
 
 #define IS_BRIDGE(class_code) (((class_code)<<16) == PCI_BASE_CLASS_BRIDGE)
@@ -505,6 +508,7 @@
 		printk(KERN_WARNING "EEH: read_slot_reset_state() failed; rc=%d dn=%s\n",
 		       ret, dn->full_name);
 		false_positives++;
+		pdn->eeh_false_positives ++;
 		rc = 0;
 		goto dn_unlock;
 	}
@@ -513,6 +517,7 @@
 	 * they are empty when they don't have children. */
 	if ((rets[0] == 5) && (dn->child == NULL)) {
 		false_positives++;
+		pdn->eeh_false_positives ++;
 		rc = 0;
 		goto dn_unlock;
 	}
@@ -522,6 +527,7 @@
 		printk(KERN_WARNING "EEH: event on unsupported device, rc=%d dn=%s\n",
 		       ret, dn->full_name);
 		false_positives++;
+		pdn->eeh_false_positives ++;
 		rc = 0;
 		goto dn_unlock;
 	}
@@ -529,6 +535,7 @@
 	/* If not the kind of error we know about, punt. */
 	if (rets[0] != 1 && rets[0] != 2 && rets[0] != 4 && rets[0] != 5) {
 		false_positives++;
+		pdn->eeh_false_positives ++;
 		rc = 0;
 		goto dn_unlock;
 	}
@@ -921,6 +928,7 @@
 	pdn->eeh_mode = 0;
 	pdn->eeh_check_count = 0;
 	pdn->eeh_freeze_count = 0;
+	pdn->eeh_false_positives = 0;
 
 	if (status && strcmp(status, "ok") != 0)
 		return NULL;	/* ignore devices with bad status */
@@ -1139,7 +1147,8 @@
 	pdn = PCI_DN(dn);
 	pdn->pcidev = dev;
 
-	pci_addr_cache_insert_device (dev);
+	pci_addr_cache_insert_device(dev);
+	eeh_sysfs_add_device(dev);
 }
 
 void eeh_add_device_tree_late(struct pci_bus *bus)
@@ -1178,6 +1187,7 @@
 	printk(KERN_DEBUG "EEH: remove device %s\n", pci_name(dev));
 #endif
 	pci_addr_cache_remove_device(dev);
+	eeh_sysfs_remove_device(dev);
 
 	dn = pci_device_to_OF_node(dev);
 	if (PCI_DN(dn)->pcidev) {
@@ -1214,11 +1224,10 @@
 				"check not wanted=%ld\n"
 				"eeh_total_mmio_ffs=%ld\n"
 				"eeh_false_positives=%ld\n"
-				"eeh_ignored_failures=%ld\n"
 				"eeh_slot_resets=%ld\n",
 				no_device, no_dn, no_cfg_addr, 
 				ignored_check, total_mmio_ffs, 
-				false_positives, ignored_failures, 
+				false_positives,
 				slot_resets);
 	}
 
diff --git a/arch/powerpc/platforms/pseries/eeh_cache.c b/arch/powerpc/platforms/pseries/eeh_cache.c
index f2bae04..e49c815 100644
--- a/arch/powerpc/platforms/pseries/eeh_cache.c
+++ b/arch/powerpc/platforms/pseries/eeh_cache.c
@@ -2,7 +2,8 @@
  * eeh_cache.c
  * PCI address cache; allows the lookup of PCI devices based on I/O address
  *
- * Copyright (C) 2004 Linas Vepstas <linas@austin.ibm.com> IBM Corporation
+ * Copyright IBM Corporation 2004
+ * Copyright Linas Vepstas <linas@austin.ibm.com> 2004
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -295,6 +296,8 @@
 			continue;
 		pci_dev_get (dev);  /* matching put is in eeh_remove_device() */
 		PCI_DN(dn)->pcidev = dev;
+
+		eeh_sysfs_add_device(dev);
 	}
 
 #ifdef DEBUG
diff --git a/arch/powerpc/platforms/pseries/eeh_driver.c b/arch/powerpc/platforms/pseries/eeh_driver.c
index 161a584..15e015e 100644
--- a/arch/powerpc/platforms/pseries/eeh_driver.c
+++ b/arch/powerpc/platforms/pseries/eeh_driver.c
@@ -1,6 +1,7 @@
 /*
  * PCI Error Recovery Driver for RPA-compliant PPC64 platform.
- * Copyright (C) 2004, 2005 Linas Vepstas <linas@linas.org>
+ * Copyright IBM Corp. 2004 2005
+ * Copyright Linas Vepstas <linas@linas.org> 2004, 2005
  *
  * All rights reserved.
  *
@@ -19,8 +20,7 @@
  * along with this program; if not, write to the Free Software
  * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  *
- * Send feedback to <linas@us.ibm.com>
- *
+ * Send comments and feedback to Linas Vepstas <linas@austin.ibm.com>
  */
 #include <linux/delay.h>
 #include <linux/interrupt.h>
diff --git a/arch/powerpc/platforms/pseries/eeh_sysfs.c b/arch/powerpc/platforms/pseries/eeh_sysfs.c
new file mode 100644
index 0000000..15e13b5
--- /dev/null
+++ b/arch/powerpc/platforms/pseries/eeh_sysfs.c
@@ -0,0 +1,87 @@
+/*
+ * Sysfs entries for PCI Error Recovery for PAPR-compliant platform.
+ * Copyright IBM Corporation 2007
+ * Copyright Linas Vepstas <linas@austin.ibm.com> 2007
+ *
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT.  See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Send comments and feedback to Linas Vepstas <linas@austin.ibm.com>
+ */
+#include <linux/pci.h>
+#include <asm/ppc-pci.h>
+#include <asm/pci-bridge.h>
+#include <linux/kobject.h>
+
+/**
+ * EEH_SHOW_ATTR -- create sysfs entry for eeh statistic
+ * @_name: name of file in sysfs directory
+ * @_memb: name of member in struct pci_dn to access
+ * @_format: printf format for display
+ *
+ * All of the attributes look very similar, so just
+ * auto-gen a cut-n-paste routine to display them.
+ */
+#define EEH_SHOW_ATTR(_name,_memb,_format)               \
+static ssize_t eeh_show_##_name(struct device *dev,      \
+		struct device_attribute *attr, char *buf)          \
+{                                                        \
+	struct pci_dev *pdev = to_pci_dev(dev);               \
+	struct device_node *dn = pci_device_to_OF_node(pdev); \
+	struct pci_dn *pdn;                                   \
+	                                                      \
+	if (!dn || PCI_DN(dn) == NULL)                        \
+		return 0;                                          \
+	                                                      \
+	pdn = PCI_DN(dn);                                     \
+	return sprintf(buf, _format "\n", pdn->_memb);        \
+}                                                        \
+static DEVICE_ATTR(_name, S_IRUGO, eeh_show_##_name, NULL);
+
+
+EEH_SHOW_ATTR(eeh_mode, eeh_mode, "0x%x");
+EEH_SHOW_ATTR(eeh_config_addr, eeh_config_addr, "0x%x");
+EEH_SHOW_ATTR(eeh_pe_config_addr, eeh_pe_config_addr, "0x%x");
+EEH_SHOW_ATTR(eeh_check_count, eeh_check_count, "%d");
+EEH_SHOW_ATTR(eeh_freeze_count, eeh_freeze_count, "%d");
+EEH_SHOW_ATTR(eeh_false_positives, eeh_false_positives, "%d");
+
+void eeh_sysfs_add_device(struct pci_dev *pdev)
+{
+	int rc=0;
+
+	rc += device_create_file(&pdev->dev, &dev_attr_eeh_mode);
+	rc += device_create_file(&pdev->dev, &dev_attr_eeh_config_addr);
+	rc += device_create_file(&pdev->dev, &dev_attr_eeh_pe_config_addr);
+	rc += device_create_file(&pdev->dev, &dev_attr_eeh_check_count);
+	rc += device_create_file(&pdev->dev, &dev_attr_eeh_false_positives);
+	rc += device_create_file(&pdev->dev, &dev_attr_eeh_freeze_count);
+
+	if (rc)
+		printk(KERN_WARNING "EEH: Unable to create sysfs entries\n");
+}
+
+void eeh_sysfs_remove_device(struct pci_dev *pdev)
+{
+	device_remove_file(&pdev->dev, &dev_attr_eeh_mode);
+	device_remove_file(&pdev->dev, &dev_attr_eeh_config_addr);
+	device_remove_file(&pdev->dev, &dev_attr_eeh_pe_config_addr);
+	device_remove_file(&pdev->dev, &dev_attr_eeh_check_count);
+	device_remove_file(&pdev->dev, &dev_attr_eeh_false_positives);
+	device_remove_file(&pdev->dev, &dev_attr_eeh_freeze_count);
+}
+
diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c
index 362dfbc..8cc6eee 100644
--- a/arch/powerpc/platforms/pseries/lpar.c
+++ b/arch/powerpc/platforms/pseries/lpar.c
@@ -373,12 +373,23 @@
 {
 	unsigned long size_bytes = 1UL << ppc64_pft_size;
 	unsigned long hpte_count = size_bytes >> 4;
-	unsigned long dummy1, dummy2;
+	unsigned long dummy1, dummy2, dword0;
+	long lpar_rc;
 	int i;
 
 	/* TODO: Use bulk call */
-	for (i = 0; i < hpte_count; i++)
-		plpar_pte_remove_raw(0, i, 0, &dummy1, &dummy2);
+	for (i = 0; i < hpte_count; i++) {
+		/* dont remove HPTEs with VRMA mappings */
+		lpar_rc = plpar_pte_remove_raw(H_ANDCOND, i, HPTE_V_1TB_SEG,
+						&dummy1, &dummy2);
+		if (lpar_rc == H_NOT_FOUND) {
+			lpar_rc = plpar_pte_read_raw(0, i, &dword0, &dummy1);
+			if (!lpar_rc && ((dword0 & HPTE_V_VRMA_MASK)
+				!= HPTE_V_VRMA_MASK))
+				/* Can be hpte for 1TB Seg. So remove it */
+				plpar_pte_remove_raw(0, i, 0, &dummy1, &dummy2);
+		}
+	}
 }
 
 /*
diff --git a/arch/powerpc/platforms/pseries/pci_dlpar.c b/arch/powerpc/platforms/pseries/pci_dlpar.c
index ffaf6c5..47f0e08 100644
--- a/arch/powerpc/platforms/pseries/pci_dlpar.c
+++ b/arch/powerpc/platforms/pseries/pci_dlpar.c
@@ -110,8 +110,6 @@
 			}
 		}
 	}
-
-	eeh_add_device_tree_late(bus);
 }
 EXPORT_SYMBOL_GPL(pcibios_fixup_new_pci_devices);
 
@@ -139,6 +137,8 @@
 
 	/* Make the discovered devices available */
 	pci_bus_add_devices(child_bus);
+
+	eeh_add_device_tree_late(child_bus);
 	return 0;
 }
 
@@ -171,6 +171,7 @@
 		if (!list_empty(&bus->devices)) {
 			pcibios_fixup_new_pci_devices(bus, 0);
 			pci_bus_add_devices(bus);
+			eeh_add_device_tree_late(bus);
 		}
 	} else if (mode == PCI_PROBE_NORMAL) {
 		/* use legacy probe */
@@ -179,6 +180,7 @@
 		if (num) {
 			pcibios_fixup_new_pci_devices(bus, 1);
 			pci_bus_add_devices(bus);
+			eeh_add_device_tree_late(bus);
 		}
 
 		list_for_each_entry(dev, &bus->devices, bus_list)
@@ -200,8 +202,6 @@
 	rtas_setup_phb(phb);
 	pci_process_bridge_OF_ranges(phb, dn, 0);
 
-	pci_setup_phb_io_dynamic(phb, primary);
-
 	pci_devs_phb_init_dynamic(phb);
 
 	if (dn->child)
@@ -210,6 +210,7 @@
 	scan_phb(phb);
 	pcibios_fixup_new_pci_devices(phb->bus, 0);
 	pci_bus_add_devices(phb->bus);
+	eeh_add_device_tree_late(phb->bus);
 
 	return phb;
 }
diff --git a/arch/powerpc/platforms/pseries/plpar_wrappers.h b/arch/powerpc/platforms/pseries/plpar_wrappers.h
index 2e4d10c..d003c80 100644
--- a/arch/powerpc/platforms/pseries/plpar_wrappers.h
+++ b/arch/powerpc/platforms/pseries/plpar_wrappers.h
@@ -108,6 +108,21 @@
 	return rc;
 }
 
+/* plpar_pte_read_raw can be called in real mode. It calls plpar_hcall_raw */
+static inline long plpar_pte_read_raw(unsigned long flags, unsigned long ptex,
+		unsigned long *old_pteh_ret, unsigned long *old_ptel_ret)
+{
+	long rc;
+	unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
+
+	rc = plpar_hcall_raw(H_READ, retbuf, flags, ptex);
+
+	*old_pteh_ret = retbuf[0];
+	*old_ptel_ret = retbuf[1];
+
+	return rc;
+}
+
 static inline long plpar_pte_protect(unsigned long flags, unsigned long ptex,
 		unsigned long avpn)
 {
diff --git a/arch/powerpc/platforms/pseries/pseries.h b/arch/powerpc/platforms/pseries/pseries.h
index 2729d55..61e19f7 100644
--- a/arch/powerpc/platforms/pseries/pseries.h
+++ b/arch/powerpc/platforms/pseries/pseries.h
@@ -33,6 +33,8 @@
 static inline void setup_kexec_cpu_down_mpic(void) { }
 #endif
 
+extern void pSeries_final_fixup(void);
+
 /* Poweron flag used for enabling auto ups restart */
 extern unsigned long rtas_poweron_auto;
 
diff --git a/arch/powerpc/platforms/pseries/reconfig.c b/arch/powerpc/platforms/pseries/reconfig.c
index 5aa97af..c02f874 100644
--- a/arch/powerpc/platforms/pseries/reconfig.c
+++ b/arch/powerpc/platforms/pseries/reconfig.c
@@ -123,7 +123,7 @@
 	strcpy(np->full_name, path);
 
 	np->properties = proplist;
-	OF_MARK_DYNAMIC(np);
+	of_node_set_flag(np, OF_DYNAMIC);
 	kref_init(&np->kref);
 
 	np->parent = derive_parent(path);
diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
index a031d99..59e69f0 100644
--- a/arch/powerpc/platforms/pseries/setup.c
+++ b/arch/powerpc/platforms/pseries/setup.c
@@ -399,6 +399,7 @@
 	 * a good time to find other work to dispatch.
 	 */
 	get_lppaca()->idle = 1;
+	get_lppaca()->donate_dedicated_cpu = 1;
 
 	/*
 	 * We come in with interrupts disabled, and need_resched()
@@ -431,6 +432,7 @@
 
 out:
 	HMT_medium();
+	get_lppaca()->donate_dedicated_cpu = 0;
 	get_lppaca()->idle = 0;
 }
 
diff --git a/arch/powerpc/platforms/pseries/xics.c b/arch/powerpc/platforms/pseries/xics.c
index f1df942..5bd90a7 100644
--- a/arch/powerpc/platforms/pseries/xics.c
+++ b/arch/powerpc/platforms/pseries/xics.c
@@ -156,9 +156,9 @@
 
 
 #ifdef CONFIG_SMP
-static int get_irq_server(unsigned int virq)
+static int get_irq_server(unsigned int virq, unsigned int strict_check)
 {
-	unsigned int server;
+	int server;
 	/* For the moment only implement delivery to all cpus or one cpu */
 	cpumask_t cpumask = irq_desc[virq].affinity;
 	cpumask_t tmp = CPU_MASK_NONE;
@@ -166,22 +166,25 @@
 	if (!distribute_irqs)
 		return default_server;
 
-	if (cpus_equal(cpumask, CPU_MASK_ALL)) {
-		server = default_distrib_server;
-	} else {
+	if (!cpus_equal(cpumask, CPU_MASK_ALL)) {
 		cpus_and(tmp, cpu_online_map, cpumask);
 
-		if (cpus_empty(tmp))
-			server = default_distrib_server;
-		else
-			server = get_hard_smp_processor_id(first_cpu(tmp));
+		server = first_cpu(tmp);
+
+		if (server < NR_CPUS)
+			return get_hard_smp_processor_id(server);
+
+		if (strict_check)
+			return -1;
 	}
 
-	return server;
+	if (cpus_equal(cpu_online_map, cpu_present_map))
+		return default_distrib_server;
 
+	return default_server;
 }
 #else
-static int get_irq_server(unsigned int virq)
+static int get_irq_server(unsigned int virq, unsigned int strict_check)
 {
 	return default_server;
 }
@@ -192,7 +195,7 @@
 {
 	unsigned int irq;
 	int call_status;
-	unsigned int server;
+	int server;
 
 	pr_debug("xics: unmask virq %d\n", virq);
 
@@ -201,7 +204,7 @@
 	if (irq == XICS_IPI || irq == XICS_IRQ_SPURIOUS)
 		return;
 
-	server = get_irq_server(virq);
+	server = get_irq_server(virq, 0);
 
 	call_status = rtas_call(ibm_set_xive, 3, 1, NULL, irq, server,
 				DEFAULT_PRIORITY);
@@ -398,8 +401,7 @@
 	unsigned int irq;
 	int status;
 	int xics_status[2];
-	unsigned long newmask;
-	cpumask_t tmp = CPU_MASK_NONE;
+	int irq_server;
 
 	irq = (unsigned int)irq_map[virq].hwirq;
 	if (irq == XICS_IPI || irq == XICS_IRQ_SPURIOUS)
@@ -413,18 +415,21 @@
 		return;
 	}
 
-	/* For the moment only implement delivery to all cpus or one cpu */
-	if (cpus_equal(cpumask, CPU_MASK_ALL)) {
-		newmask = default_distrib_server;
-	} else {
-		cpus_and(tmp, cpu_online_map, cpumask);
-		if (cpus_empty(tmp))
-			return;
-		newmask = get_hard_smp_processor_id(first_cpu(tmp));
+	/*
+	 * For the moment only implement delivery to all cpus or one cpu.
+	 * Get current irq_server for the given irq
+	 */
+	irq_server = get_irq_server(irq, 1);
+	if (irq_server == -1) {
+		char cpulist[128];
+		cpumask_scnprintf(cpulist, sizeof(cpulist), cpumask);
+		printk(KERN_WARNING "xics_set_affinity: No online cpus in "
+				"the mask %s for irq %d\n", cpulist, virq);
+		return;
 	}
 
 	status = rtas_call(ibm_set_xive, 3, 1, NULL,
-				irq, newmask, xics_status[1]);
+				irq, irq_server, xics_status[1]);
 
 	if (status) {
 		printk(KERN_ERR "xics_set_affinity: irq=%u ibm,set-xive "
diff --git a/arch/powerpc/sysdev/Makefile b/arch/powerpc/sysdev/Makefile
index c3ce0bd..f65078c 100644
--- a/arch/powerpc/sysdev/Makefile
+++ b/arch/powerpc/sysdev/Makefile
@@ -5,7 +5,6 @@
 mpic-msi-obj-$(CONFIG_PCI_MSI)	+= mpic_msi.o mpic_u3msi.o
 obj-$(CONFIG_MPIC)		+= mpic.o $(mpic-msi-obj-y)
 
-obj-$(CONFIG_PPC_INDIRECT_PCI)	+= indirect_pci.o
 obj-$(CONFIG_PPC_MPC106)	+= grackle.o
 obj-$(CONFIG_PPC_DCR)		+= dcr.o
 obj-$(CONFIG_PPC_DCR_NATIVE)	+= dcr-low.o
@@ -13,16 +12,19 @@
 obj-$(CONFIG_U3_DART)		+= dart_iommu.o
 obj-$(CONFIG_MMIO_NVRAM)	+= mmio_nvram.o
 obj-$(CONFIG_FSL_SOC)		+= fsl_soc.o
-obj-$(CONFIG_FSL_PCIE)		+= fsl_pcie.o
 obj-$(CONFIG_TSI108_BRIDGE)	+= tsi108_pci.o tsi108_dev.o
 obj-$(CONFIG_QUICC_ENGINE)	+= qe_lib/
 mv64x60-$(CONFIG_PCI)		+= mv64x60_pci.o
 obj-$(CONFIG_MV64X60)		+= $(mv64x60-y) mv64x60_pic.o mv64x60_dev.o
+obj-$(CONFIG_RTC_DRV_CMOS)	+= rtc_cmos_setup.o
 
 # contains only the suspend handler for time
+ifeq ($(CONFIG_RTC_CLASS),)
 obj-$(CONFIG_PM)		+= timer.o
+endif
 
 ifeq ($(CONFIG_PPC_MERGE),y)
+obj-$(CONFIG_PPC_INDIRECT_PCI)	+= indirect_pci.o
 obj-$(CONFIG_PPC_I8259)		+= i8259.o
 obj-$(CONFIG_PPC_83xx)		+= ipic.o
 obj-$(CONFIG_4xx)		+= uic.o
diff --git a/arch/powerpc/sysdev/fsl_pcie.c b/arch/powerpc/sysdev/fsl_pcie.c
deleted file mode 100644
index 041c07e..0000000
--- a/arch/powerpc/sysdev/fsl_pcie.c
+++ /dev/null
@@ -1,171 +0,0 @@
-/*
- * Support for indirect PCI bridges.
- *
- * Copyright (C) 1998 Gabriel Paubert.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- *
- * "Temporary" MPC8548 Errata file -
- * The standard indirect_pci code should work with future silicon versions.
- */
-
-#include <linux/kernel.h>
-#include <linux/pci.h>
-#include <linux/delay.h>
-#include <linux/string.h>
-#include <linux/init.h>
-#include <linux/bootmem.h>
-
-#include <asm/io.h>
-#include <asm/prom.h>
-#include <asm/pci-bridge.h>
-#include <asm/machdep.h>
-
-#define PCI_CFG_OUT out_be32
-
-/* ERRATA PCI-Ex 14 PCIE Controller timeout */
-#define PCIE_FIX		out_be32(hose->cfg_addr+0x4, 0x0400ffff)
-
-
-static int
-indirect_read_config_pcie(struct pci_bus *bus, unsigned int devfn, int offset,
-		     int len, u32 *val)
-{
-	struct pci_controller *hose = bus->sysdata;
-	volatile void __iomem *cfg_data;
-	u32 temp;
-
-	if (ppc_md.pci_exclude_device)
-		if (ppc_md.pci_exclude_device(bus->number, devfn))
-			return PCIBIOS_DEVICE_NOT_FOUND;
-
-	/* Possible artifact of CDCpp50937 needs further investigation */
-	if (devfn != 0x0 && bus->number == 0xff)
-		return PCIBIOS_DEVICE_NOT_FOUND;
-
-	PCIE_FIX;
-	if (bus->number == 0xff) {
-		PCI_CFG_OUT(hose->cfg_addr,
-			    (0x80000000 | ((offset & 0xf00) << 16) |
-			     ((bus->number - hose->bus_offset) << 16)
-			     | (devfn << 8) | ((offset & 0xfc) )));
-	} else {
-		PCI_CFG_OUT(hose->cfg_addr,
-			    (0x80000001 | ((offset & 0xf00) << 16) |
-			     ((bus->number - hose->bus_offset) << 16)
-			     | (devfn << 8) | ((offset & 0xfc) )));
-	}
-
-	/*
-	 * Note: the caller has already checked that offset is
-	 * suitably aligned and that len is 1, 2 or 4.
-	 */
-	/* ERRATA PCI-Ex 12 - Configuration Address/Data Alignment */
-	cfg_data = hose->cfg_data;
-	PCIE_FIX;
-	temp = in_le32(cfg_data);
-	switch (len) {
-	case 1:
-		*val = (temp >> (((offset & 3))*8)) & 0xff;
-		break;
-	case 2:
-		*val = (temp >> (((offset & 3))*8)) & 0xffff;
-		break;
-	default:
-		*val = temp;
-		break;
-	}
-	return PCIBIOS_SUCCESSFUL;
-}
-
-static int
-indirect_write_config_pcie(struct pci_bus *bus, unsigned int devfn, int offset,
-		      int len, u32 val)
-{
-	struct pci_controller *hose = bus->sysdata;
-	volatile void __iomem *cfg_data;
-	u32 temp;
-
-	if (ppc_md.pci_exclude_device)
-		if (ppc_md.pci_exclude_device(bus->number, devfn))
-			return PCIBIOS_DEVICE_NOT_FOUND;
-
-	/* Possible artifact of CDCpp50937 needs further investigation */
-	if (devfn != 0x0 && bus->number == 0xff)
-		return PCIBIOS_DEVICE_NOT_FOUND;
-
-	PCIE_FIX;
-	if (bus->number == 0xff) {
-		PCI_CFG_OUT(hose->cfg_addr,
-			    (0x80000000 | ((offset & 0xf00) << 16) |
-			     ((bus->number - hose->bus_offset) << 16)
-			     | (devfn << 8) | ((offset & 0xfc) )));
-	} else {
-		PCI_CFG_OUT(hose->cfg_addr,
-			    (0x80000001 | ((offset & 0xf00) << 16) |
-			     ((bus->number - hose->bus_offset) << 16)
-			     | (devfn << 8) | ((offset & 0xfc) )));
-        }
-
-	/*
-	 * Note: the caller has already checked that offset is
-	 * suitably aligned and that len is 1, 2 or 4.
-	 */
-	/* ERRATA PCI-Ex 12 - Configuration Address/Data Alignment */
-	cfg_data = hose->cfg_data;
-	switch (len) {
-	case 1:
-		PCIE_FIX;
-		temp = in_le32(cfg_data);
-		temp = (temp & ~(0xff << ((offset & 3) * 8))) |
-			(val << ((offset & 3) * 8));
-		PCIE_FIX;
-		out_le32(cfg_data, temp);
-		break;
-	case 2:
-		PCIE_FIX;
-		temp = in_le32(cfg_data);
-		temp = (temp & ~(0xffff << ((offset & 3) * 8)));
-		temp |= (val << ((offset & 3) * 8)) ;
-		PCIE_FIX;
-		out_le32(cfg_data, temp);
-		break;
-	default:
-		PCIE_FIX;
-		out_le32(cfg_data, val);
-		break;
-	}
-	PCIE_FIX;
-	return PCIBIOS_SUCCESSFUL;
-}
-
-static struct pci_ops indirect_pcie_ops = {
-	indirect_read_config_pcie,
-	indirect_write_config_pcie
-};
-
-void __init
-setup_indirect_pcie_nomap(struct pci_controller* hose, void __iomem * cfg_addr,
-	void __iomem * cfg_data)
-{
-	hose->cfg_addr = cfg_addr;
-	hose->cfg_data = cfg_data;
-	hose->ops = &indirect_pcie_ops;
-}
-
-void __init
-setup_indirect_pcie(struct pci_controller* hose, u32 cfg_addr, u32 cfg_data)
-{
-	unsigned long base = cfg_addr & PAGE_MASK;
-	void __iomem *mbase, *addr, *data;
-
-	mbase = ioremap(base, PAGE_SIZE);
-	addr = mbase + (cfg_addr & ~PAGE_MASK);
-	if ((cfg_data & PAGE_MASK) != base)
-		mbase = ioremap(cfg_data & PAGE_MASK, PAGE_SIZE);
-	data = mbase + (cfg_data & ~PAGE_MASK);
-	setup_indirect_pcie_nomap(hose, addr, data);
-}
diff --git a/arch/powerpc/sysdev/fsl_soc.c b/arch/powerpc/sysdev/fsl_soc.c
index cad1757..3289fab 100644
--- a/arch/powerpc/sysdev/fsl_soc.c
+++ b/arch/powerpc/sysdev/fsl_soc.c
@@ -197,6 +197,7 @@
 		struct gianfar_platform_data gfar_data;
 		const unsigned int *id;
 		const char *model;
+		const char *ctype;
 		const void *mac_addr;
 		const phandle *ph;
 		int n_res = 2;
@@ -254,6 +255,14 @@
 			    FSL_GIANFAR_DEV_HAS_VLAN |
 			    FSL_GIANFAR_DEV_HAS_EXTENDED_HASH;
 
+		ctype = of_get_property(np, "phy-connection-type", NULL);
+
+		/* We only care about rgmii-id.  The rest are autodetected */
+		if (ctype && !strcmp(ctype, "rgmii-id"))
+			gfar_data.interface = PHY_INTERFACE_MODE_RGMII_ID;
+		else
+			gfar_data.interface = PHY_INTERFACE_MODE_MII;
+
 		ph = of_get_property(np, "phy-handle", NULL);
 		phy = of_find_node_by_phandle(*ph);
 
@@ -1028,6 +1037,19 @@
 
 arch_initcall(fs_enet_of_init);
 
+static int __init fsl_pcmcia_of_init(void)
+{
+	struct device_node *np = NULL;
+	/*
+	 * Register all the devices which type is "pcmcia"
+	 */
+	while ((np = of_find_compatible_node(np,
+			"pcmcia", "fsl,pq-pcmcia")) != NULL)
+			    of_platform_device_create(np, "m8xx-pcmcia", NULL);
+	return 0;
+}
+
+arch_initcall(fsl_pcmcia_of_init);
 
 static const char *smc_regs = "regs";
 static const char *smc_pram = "pram";
diff --git a/arch/powerpc/sysdev/indirect_pci.c b/arch/powerpc/sysdev/indirect_pci.c
index e7148846..c7e6e85 100644
--- a/arch/powerpc/sysdev/indirect_pci.c
+++ b/arch/powerpc/sysdev/indirect_pci.c
@@ -33,18 +33,27 @@
 	struct pci_controller *hose = bus->sysdata;
 	volatile void __iomem *cfg_data;
 	u8 cfg_type = 0;
+	u32 bus_no, reg;
 
 	if (ppc_md.pci_exclude_device)
-		if (ppc_md.pci_exclude_device(bus->number, devfn))
+		if (ppc_md.pci_exclude_device(hose, bus->number, devfn))
 			return PCIBIOS_DEVICE_NOT_FOUND;
 	
-	if (hose->set_cfg_type)
+	if (hose->indirect_type & PPC_INDIRECT_TYPE_SET_CFG_TYPE)
 		if (bus->number != hose->first_busno)
 			cfg_type = 1;
 
-	PCI_CFG_OUT(hose->cfg_addr, 					 
-		 (0x80000000 | ((bus->number - hose->bus_offset) << 16)
-		  | (devfn << 8) | ((offset & 0xfc) | cfg_type)));
+	bus_no = (bus->number == hose->first_busno) ?
+			hose->self_busno : bus->number;
+
+	if (hose->indirect_type & PPC_INDIRECT_TYPE_EXT_REG)
+		reg = ((offset & 0xf00) << 16) | (offset & 0xfc);
+	else
+		reg = offset & 0xfc;
+
+	PCI_CFG_OUT(hose->cfg_addr,
+		 (0x80000000 | (bus_no << 16)
+		  | (devfn << 8) | reg | cfg_type));
 
 	/*
 	 * Note: the caller has already checked that offset is
@@ -72,18 +81,33 @@
 	struct pci_controller *hose = bus->sysdata;
 	volatile void __iomem *cfg_data;
 	u8 cfg_type = 0;
+	u32 bus_no, reg;
 
 	if (ppc_md.pci_exclude_device)
-		if (ppc_md.pci_exclude_device(bus->number, devfn))
+		if (ppc_md.pci_exclude_device(hose, bus->number, devfn))
 			return PCIBIOS_DEVICE_NOT_FOUND;
 
-	if (hose->set_cfg_type)
+	if (hose->indirect_type & PPC_INDIRECT_TYPE_SET_CFG_TYPE)
 		if (bus->number != hose->first_busno)
 			cfg_type = 1;
 
-	PCI_CFG_OUT(hose->cfg_addr, 					 
-		 (0x80000000 | ((bus->number - hose->bus_offset) << 16)
-		  | (devfn << 8) | ((offset & 0xfc) | cfg_type)));
+	bus_no = (bus->number == hose->first_busno) ?
+			hose->self_busno : bus->number;
+
+	if (hose->indirect_type & PPC_INDIRECT_TYPE_EXT_REG)
+		reg = ((offset & 0xf00) << 16) | (offset & 0xfc);
+	else
+		reg = offset & 0xfc;
+
+	PCI_CFG_OUT(hose->cfg_addr,
+		 (0x80000000 | (bus_no << 16)
+		  | (devfn << 8) | reg | cfg_type));
+
+	/* surpress setting of PCI_PRIMARY_BUS */
+	if (hose->indirect_type & PPC_INDIRECT_TYPE_SURPRESS_PRIMARY_BUS)
+		if ((offset == PCI_PRIMARY_BUS) &&
+			(bus->number == hose->first_busno))
+		val &= 0xffffff00;
 
 	/*
 	 * Note: the caller has already checked that offset is
diff --git a/arch/powerpc/sysdev/mpc8xx_pic.h b/arch/powerpc/sysdev/mpc8xx_pic.h
index afa2ee6..9fe00ee 100644
--- a/arch/powerpc/sysdev/mpc8xx_pic.h
+++ b/arch/powerpc/sysdev/mpc8xx_pic.h
@@ -4,9 +4,16 @@
 #include <linux/irq.h>
 #include <linux/interrupt.h>
 
-extern struct hw_interrupt_type mpc8xx_pic;
-
 int mpc8xx_pic_init(void);
 unsigned int mpc8xx_get_irq(void);
 
+/*
+ * Some internal interrupt registers use an 8-bit mask for the interrupt
+ * level instead of a number.
+ */
+static inline uint mk_int_int_mask(uint mask)
+{
+	return (1 << (7 - (mask/2)));
+}
+
 #endif /* _PPC_KERNEL_PPC8xx_H */
diff --git a/arch/powerpc/sysdev/mv64x60_dev.c b/arch/powerpc/sysdev/mv64x60_dev.c
index 4b0a9c8..b618fa6 100644
--- a/arch/powerpc/sysdev/mv64x60_dev.c
+++ b/arch/powerpc/sysdev/mv64x60_dev.c
@@ -12,6 +12,7 @@
 #include <linux/stddef.h>
 #include <linux/kernel.h>
 #include <linux/init.h>
+#include <linux/console.h>
 #include <linux/mv643xx.h>
 #include <linux/platform_device.h>
 
@@ -420,3 +421,30 @@
 	return err;
 }
 arch_initcall(mv64x60_device_setup);
+
+static int __init mv64x60_add_mpsc_console(void)
+{
+	struct device_node *np = NULL;
+	const char *prop;
+
+	prop = of_get_property(of_chosen, "linux,stdout-path", NULL);
+	if (prop == NULL)
+		goto not_mpsc;
+
+	np = of_find_node_by_path(prop);
+	if (!np)
+		goto not_mpsc;
+
+	if (!of_device_is_compatible(np, "marvell,mpsc"))
+		goto not_mpsc;
+
+	prop = of_get_property(np, "block-index", NULL);
+	if (!prop)
+		goto not_mpsc;
+
+	add_preferred_console("ttyMM", *(int *)prop, NULL);
+
+not_mpsc:
+	return 0;
+}
+console_initcall(mv64x60_add_mpsc_console);
diff --git a/arch/powerpc/sysdev/mv64x60_pci.c b/arch/powerpc/sysdev/mv64x60_pci.c
index b5aef4c..45db86c 100644
--- a/arch/powerpc/sysdev/mv64x60_pci.c
+++ b/arch/powerpc/sysdev/mv64x60_pci.c
@@ -137,18 +137,15 @@
 		printk(KERN_WARNING "Can't get bus-range for %s, assume"
 		       " bus 0\n", dev->full_name);
 
-	hose = pcibios_alloc_controller();
+	hose = pcibios_alloc_controller(dev);
 	if (!hose)
 		return -ENOMEM;
 
-	hose->arch_data = dev;
-	hose->set_cfg_type = 1;
-
 	hose->first_busno = bus_range ? bus_range[0] : 0;
 	hose->last_busno = bus_range ? bus_range[1] : 0xff;
 
 	setup_indirect_pci(hose, rsrc.start, rsrc.start + 4);
-	hose->bus_offset = hose->first_busno;
+	hose->self_busno = hose->first_busno;
 
 	printk(KERN_INFO "Found MV64x60 PCI host bridge at 0x%016llx. "
 	       "Firmware bus number: %d->%d\n",
diff --git a/arch/powerpc/sysdev/qe_lib/ucc.c b/arch/powerpc/sysdev/qe_lib/ucc.c
index ac12a44..f970e54 100644
--- a/arch/powerpc/sysdev/qe_lib/ucc.c
+++ b/arch/powerpc/sysdev/qe_lib/ucc.c
@@ -18,6 +18,7 @@
 #include <linux/errno.h>
 #include <linux/slab.h>
 #include <linux/stddef.h>
+#include <linux/module.h>
 
 #include <asm/irq.h>
 #include <asm/io.h>
@@ -40,6 +41,7 @@
 
 	return 0;
 }
+EXPORT_SYMBOL(ucc_set_qe_mux_mii_mng);
 
 int ucc_set_type(int ucc_num, struct ucc_common *regs,
 		 enum ucc_speed_type speed)
diff --git a/arch/powerpc/sysdev/qe_lib/ucc_fast.c b/arch/powerpc/sysdev/qe_lib/ucc_fast.c
index 9143236..3df202e 100644
--- a/arch/powerpc/sysdev/qe_lib/ucc_fast.c
+++ b/arch/powerpc/sysdev/qe_lib/ucc_fast.c
@@ -19,6 +19,7 @@
 #include <linux/stddef.h>
 #include <linux/interrupt.h>
 #include <linux/err.h>
+#include <linux/module.h>
 
 #include <asm/io.h>
 #include <asm/immap_qe.h>
@@ -70,6 +71,7 @@
 	printk(KERN_INFO "guemr : addr - 0x%08x, val - 0x%02x",
 		  (u32) & uccf->uf_regs->guemr, uccf->uf_regs->guemr);
 }
+EXPORT_SYMBOL(ucc_fast_dump_regs);
 
 u32 ucc_fast_get_qe_cr_subblock(int uccf_num)
 {
@@ -85,11 +87,13 @@
 	default: return QE_CR_SUBBLOCK_INVALID;
 	}
 }
+EXPORT_SYMBOL(ucc_fast_get_qe_cr_subblock);
 
 void ucc_fast_transmit_on_demand(struct ucc_fast_private * uccf)
 {
 	out_be16(&uccf->uf_regs->utodr, UCC_FAST_TOD);
 }
+EXPORT_SYMBOL(ucc_fast_transmit_on_demand);
 
 void ucc_fast_enable(struct ucc_fast_private * uccf, enum comm_dir mode)
 {
@@ -110,6 +114,7 @@
 	}
 	out_be32(&uf_regs->gumr, gumr);
 }
+EXPORT_SYMBOL(ucc_fast_enable);
 
 void ucc_fast_disable(struct ucc_fast_private * uccf, enum comm_dir mode)
 {
@@ -130,6 +135,7 @@
 	}
 	out_be32(&uf_regs->gumr, gumr);
 }
+EXPORT_SYMBOL(ucc_fast_disable);
 
 int ucc_fast_init(struct ucc_fast_info * uf_info, struct ucc_fast_private ** uccf_ret)
 {
@@ -341,6 +347,7 @@
 	*uccf_ret = uccf;
 	return 0;
 }
+EXPORT_SYMBOL(ucc_fast_init);
 
 void ucc_fast_free(struct ucc_fast_private * uccf)
 {
@@ -355,3 +362,4 @@
 
 	kfree(uccf);
 }
+EXPORT_SYMBOL(ucc_fast_free);
diff --git a/arch/powerpc/sysdev/rtc_cmos_setup.c b/arch/powerpc/sysdev/rtc_cmos_setup.c
new file mode 100644
index 0000000..e276048
--- /dev/null
+++ b/arch/powerpc/sysdev/rtc_cmos_setup.c
@@ -0,0 +1,49 @@
+/*
+ * Setup code for PC-style Real-Time Clock.
+ *
+ * Author: Wade Farnsworth <wfarnsworth@mvista.com>
+ *
+ * 2007 (c) MontaVista Software, Inc. This file is licensed under
+ * the terms of the GNU General Public License version 2. This program
+ * is licensed "as is" without any warranty of any kind, whether express
+ * or implied.
+ */
+
+#include <linux/platform_device.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/mc146818rtc.h>
+
+#include <asm/prom.h>
+
+static int  __init add_rtc(void)
+{
+	struct device_node *np;
+	struct platform_device *pd;
+	struct resource res;
+	int ret;
+
+	np = of_find_compatible_node(NULL, NULL, "pnpPNP,b00");
+	if (!np)
+		return -ENODEV;
+
+	ret = of_address_to_resource(np, 0, &res);
+	of_node_put(np);
+	if (ret)
+		return ret;
+
+	/*
+	 * RTC_PORT(x) is hardcoded in asm/mc146818rtc.h.  Verify that the
+	 * address provided by the device node matches.
+	 */
+	if (res.start != RTC_PORT(0))
+		return -EINVAL;
+
+	pd = platform_device_register_simple("rtc_cmos", -1,
+					     &res, 1);
+	if (IS_ERR(pd))
+		return PTR_ERR(pd);
+
+	return 0;
+}
+fs_initcall(add_rtc);
diff --git a/arch/powerpc/sysdev/timer.c b/arch/powerpc/sysdev/timer.c
index 4a01748..e81e7ec 100644
--- a/arch/powerpc/sysdev/timer.c
+++ b/arch/powerpc/sysdev/timer.c
@@ -24,7 +24,12 @@
 
 	/* get current RTC time and convert to seconds */
 	get_rtc_time(&cur_rtc_tm);
-	rtc_tm_to_time(&cur_rtc_tm, &cur_rtc_time);
+	cur_rtc_time = mktime(cur_rtc_tm.tm_year + 1900,
+			      cur_rtc_tm.tm_mon + 1,
+			      cur_rtc_tm.tm_mday,
+			      cur_rtc_tm.tm_hour,
+			      cur_rtc_tm.tm_min,
+			      cur_rtc_tm.tm_sec);
 
 	diff = cur_rtc_time - suspend_rtc_time;
 
@@ -44,7 +49,12 @@
 	WARN_ON(!ppc_md.get_rtc_time);
 
 	get_rtc_time(&suspend_rtc_tm);
-	rtc_tm_to_time(&suspend_rtc_tm, &suspend_rtc_time);
+	suspend_rtc_time = mktime(suspend_rtc_tm.tm_year + 1900,
+				  suspend_rtc_tm.tm_mon + 1,
+				  suspend_rtc_tm.tm_mday,
+				  suspend_rtc_tm.tm_hour,
+				  suspend_rtc_tm.tm_min,
+				  suspend_rtc_tm.tm_sec);
 
 	return 0;
 }
diff --git a/arch/powerpc/sysdev/tsi108_dev.c b/arch/powerpc/sysdev/tsi108_dev.c
index 7d3b09b..a113d80 100644
--- a/arch/powerpc/sysdev/tsi108_dev.c
+++ b/arch/powerpc/sysdev/tsi108_dev.c
@@ -72,12 +72,11 @@
 	int ret;
 
 	for (np = NULL, i = 0;
-	     (np = of_find_compatible_node(np, "network", "tsi-ethernet")) != NULL;
+	     (np = of_find_compatible_node(np, "network", "tsi108-ethernet")) != NULL;
 	     i++) {
 		struct resource r[2];
-		struct device_node *phy;
+		struct device_node *phy, *mdio;
 		hw_info tsi_eth_data;
-		const unsigned int *id;
 		const unsigned int *phy_id;
 		const void *mac_addr;
 		const phandle *ph;
@@ -111,6 +110,13 @@
 		if (mac_addr)
 			memcpy(tsi_eth_data.mac_addr, mac_addr, 6);
 
+		ph = of_get_property(np, "mdio-handle", NULL);
+		mdio = of_find_node_by_phandle(*ph);
+		ret = of_address_to_resource(mdio, 0, &res);
+		of_node_put(mdio);
+		if (ret)
+			goto unreg;
+
 		ph = of_get_property(np, "phy-handle", NULL);
 		phy = of_find_node_by_phandle(*ph);
 
@@ -119,20 +125,25 @@
 			goto unreg;
 		}
 
-		id = of_get_property(phy, "reg", NULL);
-		phy_id = of_get_property(phy, "phy-id", NULL);
-		ret = of_address_to_resource(phy, 0, &res);
-		if (ret) {
-			of_node_put(phy);
-			goto unreg;
-		}
+		phy_id = of_get_property(phy, "reg", NULL);
+
 		tsi_eth_data.regs = r[0].start;
 		tsi_eth_data.phyregs = res.start;
 		tsi_eth_data.phy = *phy_id;
 		tsi_eth_data.irq_num = irq_of_parse_and_map(np, 0);
-		if (of_device_is_compatible(phy, "bcm54xx"))
+
+		/* Some boards with the TSI108 bridge (e.g. Holly)
+		 * have a miswiring of the ethernet PHYs which
+		 * requires a workaround.  The special
+		 * "txc-rxc-delay-disable" property enables this
+		 * workaround.  FIXME: Need to port the tsi108_eth
+		 * driver itself to phylib and use a non-misleading
+		 * name for the workaround flag - it's not actually to
+		 * do with the model of PHY in use */
+		if (of_get_property(phy, "txc-rxc-delay-disable", NULL))
 			tsi_eth_data.phy_type = TSI108_PHY_BCM54XX;
 		of_node_put(phy);
+
 		ret =
 		    platform_device_add_data(tsi_eth_dev, &tsi_eth_data,
 					     sizeof(hw_info));
diff --git a/arch/powerpc/sysdev/tsi108_pci.c b/arch/powerpc/sysdev/tsi108_pci.c
index 2153163..90db8a7 100644
--- a/arch/powerpc/sysdev/tsi108_pci.c
+++ b/arch/powerpc/sysdev/tsi108_pci.c
@@ -64,9 +64,10 @@
 			   int offset, int len, u32 val)
 {
 	volatile unsigned char *cfg_addr;
+	struct pci_controller *hose = bus->sysdata;
 
 	if (ppc_md.pci_exclude_device)
-		if (ppc_md.pci_exclude_device(bus->number, devfunc))
+		if (ppc_md.pci_exclude_device(hose, bus->number, devfunc))
 			return PCIBIOS_DEVICE_NOT_FOUND;
 
 	cfg_addr = (unsigned char *)(tsi_mk_config_addr(bus->number,
@@ -149,10 +150,11 @@
 			  int len, u32 * val)
 {
 	volatile unsigned char *cfg_addr;
+	struct pci_controller *hose = bus->sysdata;
 	u32 temp;
 
 	if (ppc_md.pci_exclude_device)
-		if (ppc_md.pci_exclude_device(bus->number, devfn))
+		if (ppc_md.pci_exclude_device(hose, bus->number, devfn))
 			return PCIBIOS_DEVICE_NOT_FOUND;
 
 	cfg_addr = (unsigned char *)(tsi_mk_config_addr(bus->number,
@@ -219,14 +221,12 @@
 		       " bus 0\n", dev->full_name);
 	}
 
-	hose = pcibios_alloc_controller();
+	hose = pcibios_alloc_controller(dev);
 
 	if (!hose) {
 		printk("PCI Host bridge init failed\n");
 		return -ENOMEM;
 	}
-	hose->arch_data = dev;
-	hose->set_cfg_type = 1;
 
 	hose->first_busno = bus_range ? bus_range[0] : 0;
 	hose->last_busno = bus_range ? bus_range[1] : 0xff;
diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c
index 28fdf4f..669e656 100644
--- a/arch/powerpc/xmon/xmon.c
+++ b/arch/powerpc/xmon/xmon.c
@@ -2634,7 +2634,7 @@
 __initcall(setup_xmon_sysrq);
 #endif /* CONFIG_MAGIC_SYSRQ */
 
-int __initdata xmon_early, xmon_off;
+static int __initdata xmon_early, xmon_off;
 
 static int __init early_parse_xmon(char *p)
 {
diff --git a/arch/ppc/configs/ev64260_defconfig b/arch/ppc/configs/ev64260_defconfig
index 84cc142..587e9a3 100644
--- a/arch/ppc/configs/ev64260_defconfig
+++ b/arch/ppc/configs/ev64260_defconfig
@@ -531,7 +531,6 @@
 # CONFIG_I2C_AMD8111 is not set
 # CONFIG_I2C_I801 is not set
 # CONFIG_I2C_I810 is not set
-# CONFIG_I2C_ISA is not set
 # CONFIG_I2C_NFORCE2 is not set
 # CONFIG_I2C_PARPORT_LIGHT is not set
 # CONFIG_I2C_PIIX4 is not set
diff --git a/arch/ppc/configs/mpc8540_ads_defconfig b/arch/ppc/configs/mpc8540_ads_defconfig
index c5c8602..bf676eb 100644
--- a/arch/ppc/configs/mpc8540_ads_defconfig
+++ b/arch/ppc/configs/mpc8540_ads_defconfig
@@ -452,7 +452,6 @@
 # CONFIG_I2C_AMD8111 is not set
 # CONFIG_I2C_I801 is not set
 # CONFIG_I2C_I810 is not set
-# CONFIG_I2C_ISA is not set
 CONFIG_I2C_MPC=y
 # CONFIG_I2C_NFORCE2 is not set
 # CONFIG_I2C_PARPORT_LIGHT is not set
diff --git a/arch/ppc/configs/mpc8548_cds_defconfig b/arch/ppc/configs/mpc8548_cds_defconfig
index abe034f..f36fc5db 100644
--- a/arch/ppc/configs/mpc8548_cds_defconfig
+++ b/arch/ppc/configs/mpc8548_cds_defconfig
@@ -413,7 +413,6 @@
 #
 # I2C Hardware Bus support
 #
-# CONFIG_I2C_ISA is not set
 CONFIG_I2C_MPC=y
 # CONFIG_I2C_PARPORT_LIGHT is not set
 # CONFIG_I2C_PCA_ISA is not set
diff --git a/arch/ppc/configs/mpc8555_cds_defconfig b/arch/ppc/configs/mpc8555_cds_defconfig
index 15abebf..4f1e320 100644
--- a/arch/ppc/configs/mpc8555_cds_defconfig
+++ b/arch/ppc/configs/mpc8555_cds_defconfig
@@ -518,7 +518,6 @@
 # CONFIG_I2C_I801 is not set
 # CONFIG_I2C_I810 is not set
 # CONFIG_I2C_PIIX4 is not set
-# CONFIG_I2C_ISA is not set
 CONFIG_I2C_MPC=y
 # CONFIG_I2C_NFORCE2 is not set
 # CONFIG_I2C_PARPORT_LIGHT is not set
diff --git a/arch/ppc/configs/mpc8560_ads_defconfig b/arch/ppc/configs/mpc8560_ads_defconfig
index f834fb5..f12d48f 100644
--- a/arch/ppc/configs/mpc8560_ads_defconfig
+++ b/arch/ppc/configs/mpc8560_ads_defconfig
@@ -489,7 +489,6 @@
 # CONFIG_I2C_I801 is not set
 # CONFIG_I2C_I810 is not set
 # CONFIG_I2C_PIIX4 is not set
-# CONFIG_I2C_ISA is not set
 CONFIG_I2C_MPC=y
 # CONFIG_I2C_NFORCE2 is not set
 # CONFIG_I2C_PARPORT_LIGHT is not set
diff --git a/arch/ppc/configs/radstone_ppc7d_defconfig b/arch/ppc/configs/radstone_ppc7d_defconfig
index ca4d1fd..9f64532 100644
--- a/arch/ppc/configs/radstone_ppc7d_defconfig
+++ b/arch/ppc/configs/radstone_ppc7d_defconfig
@@ -710,7 +710,6 @@
 # CONFIG_I2C_I801 is not set
 # CONFIG_I2C_I810 is not set
 # CONFIG_I2C_PIIX4 is not set
-# CONFIG_I2C_ISA is not set
 # CONFIG_I2C_MPC is not set
 # CONFIG_I2C_NFORCE2 is not set
 # CONFIG_I2C_PARPORT_LIGHT is not set
diff --git a/arch/ppc/configs/stx_gp3_defconfig b/arch/ppc/configs/stx_gp3_defconfig
index 3fedc43..70d6f84 100644
--- a/arch/ppc/configs/stx_gp3_defconfig
+++ b/arch/ppc/configs/stx_gp3_defconfig
@@ -661,7 +661,6 @@
 # CONFIG_I2C_I801 is not set
 # CONFIG_I2C_I810 is not set
 # CONFIG_I2C_PIIX4 is not set
-# CONFIG_I2C_ISA is not set
 # CONFIG_I2C_MPC is not set
 # CONFIG_I2C_NFORCE2 is not set
 # CONFIG_I2C_PARPORT is not set
diff --git a/arch/ppc/configs/sycamore_defconfig b/arch/ppc/configs/sycamore_defconfig
index 758114c..6996cca 100644
--- a/arch/ppc/configs/sycamore_defconfig
+++ b/arch/ppc/configs/sycamore_defconfig
@@ -461,7 +461,6 @@
 # CONFIG_I2C_I801 is not set
 # CONFIG_I2C_I810 is not set
 # CONFIG_I2C_IBM_IIC is not set
-# CONFIG_I2C_ISA is not set
 # CONFIG_I2C_NFORCE2 is not set
 # CONFIG_I2C_PARPORT_LIGHT is not set
 # CONFIG_I2C_PIIX4 is not set
diff --git a/arch/ppc/kernel/misc.S b/arch/ppc/kernel/misc.S
index d319f9b..0da55368 100644
--- a/arch/ppc/kernel/misc.S
+++ b/arch/ppc/kernel/misc.S
@@ -328,7 +328,7 @@
 	mtspr   SPRN_L1CSR0,r3
 	isync
 	blr
-END_FTR_SECTION_IFCLR(CPU_FTR_SPLIT_ID_CACHE)
+END_FTR_SECTION_IFSET(CPU_FTR_UNIFIED_ID_CACHE)
 	mfspr	r3,SPRN_L1CSR1
 	ori	r3,r3,L1CSR1_ICFI|L1CSR1_ICLFR
 	mtspr	SPRN_L1CSR1,r3
@@ -355,7 +355,7 @@
 _GLOBAL(__flush_icache_range)
 BEGIN_FTR_SECTION
 	blr				/* for 601, do nothing */
-END_FTR_SECTION_IFCLR(CPU_FTR_SPLIT_ID_CACHE)
+END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE)
 	li	r5,L1_CACHE_BYTES-1
 	andc	r3,r3,r5
 	subf	r4,r3,r4
@@ -472,7 +472,7 @@
 _GLOBAL(__flush_dcache_icache)
 BEGIN_FTR_SECTION
 	blr					/* for 601, do nothing */
-END_FTR_SECTION_IFCLR(CPU_FTR_SPLIT_ID_CACHE)
+END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE)
 	rlwinm	r3,r3,0,0,19			/* Get page base address */
 	li	r4,4096/L1_CACHE_BYTES	/* Number of lines in a page */
 	mtctr	r4
@@ -500,7 +500,7 @@
 _GLOBAL(__flush_dcache_icache_phys)
 BEGIN_FTR_SECTION
 	blr					/* for 601, do nothing */
-END_FTR_SECTION_IFCLR(CPU_FTR_SPLIT_ID_CACHE)
+END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE)
 	mfmsr	r10
 	rlwinm	r0,r10,0,28,26			/* clear DR */
 	mtmsr	r0
diff --git a/arch/ppc/kernel/ppc_ksyms.c b/arch/ppc/kernel/ppc_ksyms.c
index a416520..63f0a98 100644
--- a/arch/ppc/kernel/ppc_ksyms.c
+++ b/arch/ppc/kernel/ppc_ksyms.c
@@ -64,7 +64,6 @@
 
 EXPORT_SYMBOL(clear_pages);
 EXPORT_SYMBOL(clear_user_page);
-EXPORT_SYMBOL(do_signal);
 EXPORT_SYMBOL(transfer_to_handler);
 EXPORT_SYMBOL(do_IRQ);
 EXPORT_SYMBOL(machine_check_exception);
diff --git a/arch/ppc/kernel/setup.c b/arch/ppc/kernel/setup.c
index c79704f..967c1ef 100644
--- a/arch/ppc/kernel/setup.c
+++ b/arch/ppc/kernel/setup.c
@@ -526,7 +526,7 @@
 	 * Systems with OF can look in the properties on the cpu node(s)
 	 * for a possibly more accurate value.
 	 */
-	if (cpu_has_feature(CPU_FTR_SPLIT_ID_CACHE)) {
+	if (! cpu_has_feature(CPU_FTR_UNIFIED_ID_CACHE)) {
 		dcache_bsize = cur_cpu_spec->dcache_bsize;
 		icache_bsize = cur_cpu_spec->icache_bsize;
 		ucache_bsize = 0;
diff --git a/arch/ppc/kernel/traps.c b/arch/ppc/kernel/traps.c
index 0eaef7c..3f3b292 100644
--- a/arch/ppc/kernel/traps.c
+++ b/arch/ppc/kernel/traps.c
@@ -92,6 +92,7 @@
 	if (nl)
 		printk("\n");
 	show_regs(fp);
+	add_taint(TAINT_DIE);
 	spin_unlock_irq(&die_lock);
 	/* do_exit() should take care of panic'ing from an interrupt
 	 * context so we don't handle it here
diff --git a/arch/ppc/kernel/vmlinux.lds.S b/arch/ppc/kernel/vmlinux.lds.S
index 19db874..c0aac3f 100644
--- a/arch/ppc/kernel/vmlinux.lds.S
+++ b/arch/ppc/kernel/vmlinux.lds.S
@@ -130,10 +130,7 @@
   __ftr_fixup : { *(__ftr_fixup) }
   __stop___ftr_fixup = .;
 
-  . = ALIGN(4096);
-  __per_cpu_start = .;
-  .data.percpu  : { *(.data.percpu) }
-  __per_cpu_end = .;
+  PERCPU(4096)
 
 #ifdef CONFIG_BLK_DEV_INITRD
   . = ALIGN(4096);
diff --git a/arch/ppc/mm/fault.c b/arch/ppc/mm/fault.c
index 465f451..b98244e 100644
--- a/arch/ppc/mm/fault.c
+++ b/arch/ppc/mm/fault.c
@@ -96,6 +96,7 @@
 	struct mm_struct *mm = current->mm;
 	siginfo_t info;
 	int code = SEGV_MAPERR;
+	int fault;
 #if defined(CONFIG_4xx) || defined (CONFIG_BOOKE)
 	int is_write = error_code & ESR_DST;
 #else
@@ -249,20 +250,18 @@
 	 * the fault.
 	 */
  survive:
-        switch (handle_mm_fault(mm, vma, address, is_write)) {
-        case VM_FAULT_MINOR:
-                current->min_flt++;
-                break;
-        case VM_FAULT_MAJOR:
-                current->maj_flt++;
-                break;
-        case VM_FAULT_SIGBUS:
-                goto do_sigbus;
-        case VM_FAULT_OOM:
-                goto out_of_memory;
-	default:
+	fault = handle_mm_fault(mm, vma, address, is_write);
+	if (unlikely(fault & VM_FAULT_ERROR)) {
+		if (fault & VM_FAULT_OOM)
+			goto out_of_memory;
+		else if (fault & VM_FAULT_SIGBUS)
+			goto do_sigbus;
 		BUG();
 	}
+	if (fault & VM_FAULT_MAJOR)
+		current->maj_flt++;
+	else
+		current->min_flt++;
 
 	up_read(&mm->mmap_sem);
 	/*
diff --git a/arch/ppc/mm/tlb.c b/arch/ppc/mm/tlb.c
index fa29740..4ff260b 100644
--- a/arch/ppc/mm/tlb.c
+++ b/arch/ppc/mm/tlb.c
@@ -27,6 +27,7 @@
 #include <linux/mm.h>
 #include <linux/init.h>
 #include <linux/highmem.h>
+#include <linux/pagemap.h>
 #include <asm/tlbflush.h>
 #include <asm/tlb.h>
 
diff --git a/arch/ppc/platforms/4xx/bamboo.c b/arch/ppc/platforms/4xx/bamboo.c
index 349660b..017623c 100644
--- a/arch/ppc/platforms/4xx/bamboo.c
+++ b/arch/ppc/platforms/4xx/bamboo.c
@@ -29,6 +29,7 @@
 #include <linux/tty.h>
 #include <linux/serial.h>
 #include <linux/serial_core.h>
+#include <linux/serial_8250.h>
 #include <linux/ethtool.h>
 
 #include <asm/system.h>
diff --git a/arch/ppc/platforms/4xx/bubinga.c b/arch/ppc/platforms/4xx/bubinga.c
index 1a7f075..cd696be 100644
--- a/arch/ppc/platforms/4xx/bubinga.c
+++ b/arch/ppc/platforms/4xx/bubinga.c
@@ -21,6 +21,7 @@
 #include <linux/tty.h>
 #include <linux/serial.h>
 #include <linux/serial_core.h>
+#include <linux/serial_8250.h>
 
 #include <asm/system.h>
 #include <asm/pci-bridge.h>
diff --git a/arch/ppc/platforms/4xx/cpci405.c b/arch/ppc/platforms/4xx/cpci405.c
index 8474b05..2e7e25d 100644
--- a/arch/ppc/platforms/4xx/cpci405.c
+++ b/arch/ppc/platforms/4xx/cpci405.c
@@ -23,6 +23,7 @@
 #include <asm/todc.h>
 #include <linux/serial.h>
 #include <linux/serial_core.h>
+#include <linux/serial_8250.h>
 #include <asm/ocp.h>
 #include <asm/ibm_ocp_pci.h>
 #include <platforms/4xx/ibm405gp.h>
diff --git a/arch/ppc/platforms/4xx/ebony.c b/arch/ppc/platforms/4xx/ebony.c
index f0f9cc8..05d7184 100644
--- a/arch/ppc/platforms/4xx/ebony.c
+++ b/arch/ppc/platforms/4xx/ebony.c
@@ -32,6 +32,7 @@
 #include <linux/tty.h>
 #include <linux/serial.h>
 #include <linux/serial_core.h>
+#include <linux/serial_8250.h>
 
 #include <asm/system.h>
 #include <asm/pgtable.h>
diff --git a/arch/ppc/platforms/4xx/luan.c b/arch/ppc/platforms/4xx/luan.c
index 61706ef..4b16961 100644
--- a/arch/ppc/platforms/4xx/luan.c
+++ b/arch/ppc/platforms/4xx/luan.c
@@ -30,6 +30,7 @@
 #include <linux/tty.h>
 #include <linux/serial.h>
 #include <linux/serial_core.h>
+#include <linux/serial_8250.h>
 
 #include <asm/system.h>
 #include <asm/pgtable.h>
diff --git a/arch/ppc/platforms/4xx/ocotea.c b/arch/ppc/platforms/4xx/ocotea.c
index 5e994e1..fd0f971 100644
--- a/arch/ppc/platforms/4xx/ocotea.c
+++ b/arch/ppc/platforms/4xx/ocotea.c
@@ -30,6 +30,7 @@
 #include <linux/tty.h>
 #include <linux/serial.h>
 #include <linux/serial_core.h>
+#include <linux/serial_8250.h>
 
 #include <asm/system.h>
 #include <asm/pgtable.h>
diff --git a/arch/ppc/platforms/4xx/taishan.c b/arch/ppc/platforms/4xx/taishan.c
index 5d9af8d..888c492 100644
--- a/arch/ppc/platforms/4xx/taishan.c
+++ b/arch/ppc/platforms/4xx/taishan.c
@@ -30,6 +30,7 @@
 #include <linux/tty.h>
 #include <linux/serial.h>
 #include <linux/serial_core.h>
+#include <linux/serial_8250.h>
 #include <linux/platform_device.h>
 #include <linux/mtd/partitions.h>
 #include <linux/mtd/nand.h>
diff --git a/arch/ppc/platforms/4xx/yucca.c b/arch/ppc/platforms/4xx/yucca.c
index 346787d..a83b0ba 100644
--- a/arch/ppc/platforms/4xx/yucca.c
+++ b/arch/ppc/platforms/4xx/yucca.c
@@ -31,6 +31,7 @@
 #include <linux/tty.h>
 #include <linux/serial.h>
 #include <linux/serial_core.h>
+#include <linux/serial_8250.h>
 
 #include <asm/system.h>
 #include <asm/pgtable.h>
diff --git a/arch/ppc/platforms/85xx/sbc8560.c b/arch/ppc/platforms/85xx/sbc8560.c
index 1d10ab9..3d7addb 100644
--- a/arch/ppc/platforms/85xx/sbc8560.c
+++ b/arch/ppc/platforms/85xx/sbc8560.c
@@ -26,6 +26,7 @@
 #include <linux/serial.h>
 #include <linux/tty.h>	/* for linux/serial_core.h */
 #include <linux/serial_core.h>
+#include <linux/serial_8250.h>
 #include <linux/initrd.h>
 #include <linux/module.h>
 #include <linux/fsl_devices.h>
diff --git a/arch/ppc/platforms/chestnut.c b/arch/ppc/platforms/chestnut.c
index a764ae7..248684f 100644
--- a/arch/ppc/platforms/chestnut.c
+++ b/arch/ppc/platforms/chestnut.c
@@ -25,6 +25,7 @@
 #include <linux/ide.h>
 #include <linux/serial.h>
 #include <linux/serial_core.h>
+#include <linux/serial_8250.h>
 #include <linux/mtd/physmap.h>
 #include <asm/system.h>
 #include <asm/pgtable.h>
diff --git a/arch/ppc/platforms/ev64260.c b/arch/ppc/platforms/ev64260.c
index 4957a7b..976270d 100644
--- a/arch/ppc/platforms/ev64260.c
+++ b/arch/ppc/platforms/ev64260.c
@@ -35,6 +35,7 @@
 #include <linux/serial.h>
 #include <linux/tty.h>
 #include <linux/serial_core.h>
+#include <linux/serial_8250.h>
 #else
 #include <linux/mv643xx.h>
 #endif
diff --git a/arch/ppc/platforms/prep_setup.c b/arch/ppc/platforms/prep_setup.c
index 6f21110..3c56654 100644
--- a/arch/ppc/platforms/prep_setup.c
+++ b/arch/ppc/platforms/prep_setup.c
@@ -69,9 +69,6 @@
 
 TODC_ALLOC();
 
-unsigned char ucBoardRev;
-unsigned char ucBoardRevMaj, ucBoardRevMin;
-
 extern unsigned char prep_nvram_read_val(int addr);
 extern void prep_nvram_write_val(int addr,
 				 unsigned char val);
diff --git a/arch/ppc/platforms/radstone_ppc7d.c b/arch/ppc/platforms/radstone_ppc7d.c
index b558607..44d4398 100644
--- a/arch/ppc/platforms/radstone_ppc7d.c
+++ b/arch/ppc/platforms/radstone_ppc7d.c
@@ -35,6 +35,7 @@
 #include <linux/serial.h>
 #include <linux/tty.h>		/* for linux/serial_core.h */
 #include <linux/serial_core.h>
+#include <linux/serial_8250.h>
 #include <linux/mv643xx.h>
 #include <linux/netdevice.h>
 #include <linux/platform_device.h>
diff --git a/arch/ppc/platforms/spruce.c b/arch/ppc/platforms/spruce.c
index 3c78427..f4de50b 100644
--- a/arch/ppc/platforms/spruce.c
+++ b/arch/ppc/platforms/spruce.c
@@ -27,6 +27,7 @@
 #include <linux/serial.h>
 #include <linux/tty.h>
 #include <linux/serial_core.h>
+#include <linux/serial_8250.h>
 
 #include <asm/system.h>
 #include <asm/pgtable.h>
diff --git a/arch/ppc/syslib/Makefile b/arch/ppc/syslib/Makefile
index 95694159..543795b 100644
--- a/arch/ppc/syslib/Makefile
+++ b/arch/ppc/syslib/Makefile
@@ -7,6 +7,7 @@
 
 wdt-mpc8xx-$(CONFIG_8xx_WDT)	+= m8xx_wdt.o
 
+obj-$(CONFIG_PPC_INDIRECT_PCI)	+= indirect_pci.o
 obj-$(CONFIG_PPCBUG_NVRAM)	+= prep_nvram.o
 obj-$(CONFIG_PPC_OCP)		+= ocp.o
 obj-$(CONFIG_IBM_OCP)		+= ibm_ocp.o
diff --git a/arch/ppc/syslib/indirect_pci.c b/arch/ppc/syslib/indirect_pci.c
new file mode 100644
index 0000000..83b323a
--- /dev/null
+++ b/arch/ppc/syslib/indirect_pci.c
@@ -0,0 +1,134 @@
+/*
+ * Support for indirect PCI bridges.
+ *
+ * Copyright (C) 1998 Gabriel Paubert.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/string.h>
+#include <linux/init.h>
+
+#include <asm/io.h>
+#include <asm/prom.h>
+#include <asm/pci-bridge.h>
+#include <asm/machdep.h>
+
+#ifdef CONFIG_PPC_INDIRECT_PCI_BE
+#define PCI_CFG_OUT out_be32
+#else
+#define PCI_CFG_OUT out_le32
+#endif
+
+static int
+indirect_read_config(struct pci_bus *bus, unsigned int devfn, int offset,
+		     int len, u32 *val)
+{
+	struct pci_controller *hose = bus->sysdata;
+	volatile void __iomem *cfg_data;
+	u8 cfg_type = 0;
+
+	if (ppc_md.pci_exclude_device)
+		if (ppc_md.pci_exclude_device(bus->number, devfn))
+			return PCIBIOS_DEVICE_NOT_FOUND;
+
+	if (hose->set_cfg_type)
+		if (bus->number != hose->first_busno)
+			cfg_type = 1;
+
+	PCI_CFG_OUT(hose->cfg_addr,
+		 (0x80000000 | ((bus->number - hose->bus_offset) << 16)
+		  | (devfn << 8) | ((offset & 0xfc) | cfg_type)));
+
+	/*
+	 * Note: the caller has already checked that offset is
+	 * suitably aligned and that len is 1, 2 or 4.
+	 */
+	cfg_data = hose->cfg_data + (offset & 3);
+	switch (len) {
+	case 1:
+		*val = in_8(cfg_data);
+		break;
+	case 2:
+		*val = in_le16(cfg_data);
+		break;
+	default:
+		*val = in_le32(cfg_data);
+		break;
+	}
+	return PCIBIOS_SUCCESSFUL;
+}
+
+static int
+indirect_write_config(struct pci_bus *bus, unsigned int devfn, int offset,
+		      int len, u32 val)
+{
+	struct pci_controller *hose = bus->sysdata;
+	volatile void __iomem *cfg_data;
+	u8 cfg_type = 0;
+
+	if (ppc_md.pci_exclude_device)
+		if (ppc_md.pci_exclude_device(bus->number, devfn))
+			return PCIBIOS_DEVICE_NOT_FOUND;
+
+	if (hose->set_cfg_type)
+		if (bus->number != hose->first_busno)
+			cfg_type = 1;
+
+	PCI_CFG_OUT(hose->cfg_addr,
+		 (0x80000000 | ((bus->number - hose->bus_offset) << 16)
+		  | (devfn << 8) | ((offset & 0xfc) | cfg_type)));
+
+	/*
+	 * Note: the caller has already checked that offset is
+	 * suitably aligned and that len is 1, 2 or 4.
+	 */
+	cfg_data = hose->cfg_data + (offset & 3);
+	switch (len) {
+	case 1:
+		out_8(cfg_data, val);
+		break;
+	case 2:
+		out_le16(cfg_data, val);
+		break;
+	default:
+		out_le32(cfg_data, val);
+		break;
+	}
+	return PCIBIOS_SUCCESSFUL;
+}
+
+static struct pci_ops indirect_pci_ops =
+{
+	indirect_read_config,
+	indirect_write_config
+};
+
+void __init
+setup_indirect_pci_nomap(struct pci_controller* hose, void __iomem * cfg_addr,
+	void __iomem * cfg_data)
+{
+	hose->cfg_addr = cfg_addr;
+	hose->cfg_data = cfg_data;
+	hose->ops = &indirect_pci_ops;
+}
+
+void __init
+setup_indirect_pci(struct pci_controller* hose, u32 cfg_addr, u32 cfg_data)
+{
+	unsigned long base = cfg_addr & PAGE_MASK;
+	void __iomem *mbase, *addr, *data;
+
+	mbase = ioremap(base, PAGE_SIZE);
+	addr = mbase + (cfg_addr & ~PAGE_MASK);
+	if ((cfg_data & PAGE_MASK) != base)
+		mbase = ioremap(cfg_data & PAGE_MASK, PAGE_SIZE);
+	data = mbase + (cfg_data & ~PAGE_MASK);
+	setup_indirect_pci_nomap(hose, addr, data);
+}
diff --git a/arch/ppc/syslib/virtex_devices.c b/arch/ppc/syslib/virtex_devices.c
index 1654678..ace4ec0 100644
--- a/arch/ppc/syslib/virtex_devices.c
+++ b/arch/ppc/syslib/virtex_devices.c
@@ -71,6 +71,21 @@
 	}, \
 }
 
+/*
+ * ML300/ML403 Video Device: shortcut macro for single instance
+ */
+#define XPAR_TFT(num) { \
+	.name = "xilinxfb", \
+	.id = num, \
+	.num_resources = 1, \
+	.resource = (struct resource[]) { \
+		{ \
+			.start = XPAR_TFT_##num##_BASEADDR, \
+			.end = XPAR_TFT_##num##_BASEADDR+7, \
+			.flags = IORESOURCE_IO, \
+		}, \
+	}, \
+}
 
 /* UART 8250 driver platform data table */
 struct plat_serial8250_port virtex_serial_platform_data[] = {
@@ -146,20 +161,17 @@
 	XPAR_SYSACE(1),
 #endif
 
-	/* ML300/403 reference design framebuffer */
 #if defined(XPAR_TFT_0_BASEADDR)
-	{
-		.name		= "xilinxfb",
-		.id		= 0,
-		.num_resources	= 1,
-		.resource = (struct resource[]) {
-			{
-				.start	= XPAR_TFT_0_BASEADDR,
-				.end	= XPAR_TFT_0_BASEADDR+7,
-				.flags	= IORESOURCE_IO,
-			},
-		},
-	},
+	XPAR_TFT(0),
+#endif
+#if defined(XPAR_TFT_1_BASEADDR)
+	XPAR_TFT(1),
+#endif
+#if defined(XPAR_TFT_2_BASEADDR)
+	XPAR_TFT(2),
+#endif
+#if defined(XPAR_TFT_3_BASEADDR)
+	XPAR_TFT(3),
 #endif
 };
 
diff --git a/arch/ppc/syslib/virtex_devices.h b/arch/ppc/syslib/virtex_devices.h
index 3d4be14..9f38d92 100644
--- a/arch/ppc/syslib/virtex_devices.h
+++ b/arch/ppc/syslib/virtex_devices.h
@@ -31,4 +31,11 @@
  */
 int virtex_device_fixup(struct platform_device *dev);
 
+/* SPI Controller IP */
+struct xspi_platform_data {
+	s16 bus_num;
+	u16 num_chipselect;
+	u32 speed_hz;
+};
+
 #endif  /* __ASM_VIRTEX_DEVICES_H__ */
diff --git a/arch/s390/defconfig b/arch/s390/defconfig
index 485b60c..2aae23d 100644
--- a/arch/s390/defconfig
+++ b/arch/s390/defconfig
@@ -1,7 +1,7 @@
 #
 # Automatically generated make config: don't edit
-# Linux kernel version: 2.6.21
-# Thu May 10 15:18:19 2007
+# Linux kernel version: 2.6.22
+# Tue Jul 17 12:50:23 2007
 #
 CONFIG_MMU=y
 CONFIG_ZONE_DMA=y
@@ -32,12 +32,11 @@
 CONFIG_LOCALVERSION_AUTO=y
 CONFIG_SWAP=y
 CONFIG_SYSVIPC=y
-# CONFIG_IPC_NS is not set
 CONFIG_SYSVIPC_SYSCTL=y
 CONFIG_POSIX_MQUEUE=y
 # CONFIG_BSD_PROCESS_ACCT is not set
 # CONFIG_TASKSTATS is not set
-# CONFIG_UTS_NS is not set
+# CONFIG_USER_NS is not set
 CONFIG_AUDIT=y
 # CONFIG_AUDITSYSCALL is not set
 CONFIG_IKCONFIG=y
@@ -61,20 +60,19 @@
 CONFIG_ELF_CORE=y
 CONFIG_BASE_FULL=y
 CONFIG_FUTEX=y
+CONFIG_ANON_INODES=y
 CONFIG_EPOLL=y
+CONFIG_SIGNALFD=y
+CONFIG_TIMERFD=y
+CONFIG_EVENTFD=y
 CONFIG_SHMEM=y
 CONFIG_VM_EVENT_COUNTERS=y
-CONFIG_SLUB_DEBUG=y
 CONFIG_SLAB=y
 # CONFIG_SLUB is not set
 # CONFIG_SLOB is not set
 CONFIG_RT_MUTEXES=y
 # CONFIG_TINY_SHMEM is not set
 CONFIG_BASE_SMALL=0
-
-#
-# Loadable module support
-#
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
 # CONFIG_MODULE_FORCE_UNLOAD is not set
@@ -82,12 +80,9 @@
 # CONFIG_MODULE_SRCVERSION_ALL is not set
 CONFIG_KMOD=y
 CONFIG_STOP_MACHINE=y
-
-#
-# Block layer
-#
 CONFIG_BLOCK=y
 # CONFIG_BLK_DEV_IO_TRACE is not set
+CONFIG_BLK_DEV_BSG=y
 
 #
 # IO Schedulers
@@ -151,6 +146,7 @@
 CONFIG_SPLIT_PTLOCK_CPUS=4
 CONFIG_RESOURCES_64BIT=y
 CONFIG_ZONE_DMA_FLAG=1
+CONFIG_VIRT_TO_BUS=y
 CONFIG_HOLES_IN_ZONE=y
 
 #
@@ -248,25 +244,13 @@
 # CONFIG_IPV6_MULTIPLE_TABLES is not set
 # CONFIG_NETWORK_SECMARK is not set
 # CONFIG_NETFILTER is not set
-
-#
-# DCCP Configuration (EXPERIMENTAL)
-#
 # CONFIG_IP_DCCP is not set
-
-#
-# SCTP Configuration (EXPERIMENTAL)
-#
 CONFIG_IP_SCTP=m
 # CONFIG_SCTP_DBG_MSG is not set
 # CONFIG_SCTP_DBG_OBJCNT is not set
 # CONFIG_SCTP_HMAC_NONE is not set
 # CONFIG_SCTP_HMAC_SHA1 is not set
 CONFIG_SCTP_HMAC_MD5=y
-
-#
-# TIPC Configuration (EXPERIMENTAL)
-#
 # CONFIG_TIPC is not set
 # CONFIG_ATM is not set
 # CONFIG_BRIDGE is not set
@@ -293,6 +277,7 @@
 # CONFIG_NET_SCH_HTB is not set
 # CONFIG_NET_SCH_HFSC is not set
 CONFIG_NET_SCH_PRIO=m
+CONFIG_NET_SCH_RR=m
 CONFIG_NET_SCH_RED=m
 CONFIG_NET_SCH_SFQ=m
 CONFIG_NET_SCH_TEQL=m
@@ -317,10 +302,14 @@
 CONFIG_NET_CLS_RSVP=m
 CONFIG_NET_CLS_RSVP6=m
 # CONFIG_NET_EMATCH is not set
-# CONFIG_NET_CLS_ACT is not set
+CONFIG_NET_CLS_ACT=y
+CONFIG_NET_ACT_POLICE=y
+# CONFIG_NET_ACT_GACT is not set
+# CONFIG_NET_ACT_MIRRED is not set
+# CONFIG_NET_ACT_PEDIT is not set
+# CONFIG_NET_ACT_SIMP is not set
 CONFIG_NET_CLS_POLICE=y
 # CONFIG_NET_CLS_IND is not set
-CONFIG_NET_ESTIMATOR=y
 
 #
 # Network testing
@@ -329,6 +318,7 @@
 # CONFIG_NET_TCPPROBE is not set
 # CONFIG_AF_RXRPC is not set
 # CONFIG_RFKILL is not set
+# CONFIG_NET_9P is not set
 # CONFIG_PCMCIA is not set
 CONFIG_CCW=y
 
@@ -345,15 +335,8 @@
 # CONFIG_DEBUG_DRIVER is not set
 # CONFIG_DEBUG_DEVRES is not set
 CONFIG_SYS_HYPERVISOR=y
-
-#
-# Connector - unified userspace <-> kernelspace linker
-#
 # CONFIG_CONNECTOR is not set
-
-#
-# Block devices
-#
+CONFIG_BLK_DEV=y
 # CONFIG_BLK_DEV_COW_COMMON is not set
 CONFIG_BLK_DEV_LOOP=m
 # CONFIG_BLK_DEV_CRYPTOLOOP is not set
@@ -376,17 +359,15 @@
 CONFIG_DASD_FBA=y
 CONFIG_DASD_DIAG=y
 CONFIG_DASD_EER=y
-
-#
-# Misc devices
-#
-# CONFIG_BLINK is not set
+CONFIG_MISC_DEVICES=y
+# CONFIG_EEPROM_93CX6 is not set
 
 #
 # SCSI device support
 #
 # CONFIG_RAID_ATTRS is not set
 CONFIG_SCSI=y
+# CONFIG_SCSI_DMA is not set
 # CONFIG_SCSI_TGT is not set
 CONFIG_SCSI_NETLINK=y
 CONFIG_SCSI_PROC_FS=y
@@ -447,40 +428,21 @@
 CONFIG_DM_ZERO=y
 CONFIG_DM_MULTIPATH=y
 # CONFIG_DM_MULTIPATH_EMC is not set
+# CONFIG_DM_MULTIPATH_RDAC is not set
 # CONFIG_DM_DELAY is not set
-
-#
-# Network device support
-#
 CONFIG_NETDEVICES=y
+# CONFIG_NETDEVICES_MULTIQUEUE is not set
+# CONFIG_IFB is not set
 CONFIG_DUMMY=m
 CONFIG_BONDING=m
+# CONFIG_MACVLAN is not set
 CONFIG_EQUALIZER=m
 CONFIG_TUN=m
-
-#
-# Ethernet (10 or 100Mbit)
-#
 CONFIG_NET_ETHERNET=y
 # CONFIG_MII is not set
-
-#
-# Ethernet (1000 Mbit)
-#
-
-#
-# Ethernet (10000 Mbit)
-#
-CONFIG_MLX4_DEBUG=y
-
-#
-# Token Ring devices
-#
+CONFIG_NETDEV_1000=y
+CONFIG_NETDEV_10000=y
 # CONFIG_TR is not set
-
-#
-# Wan interfaces
-#
 # CONFIG_WAN is not set
 
 #
@@ -511,10 +473,6 @@
 CONFIG_UNIX98_PTYS=y
 CONFIG_LEGACY_PTYS=y
 CONFIG_LEGACY_PTY_COUNT=256
-
-#
-# Watchdog Cards
-#
 # CONFIG_WATCHDOG is not set
 CONFIG_HW_RANDOM=m
 # CONFIG_R3964 is not set
@@ -554,6 +512,8 @@
 # CONFIG_VMCP is not set
 # CONFIG_MONREADER is not set
 CONFIG_MONWRITER=m
+CONFIG_S390_VMUR=m
+# CONFIG_POWER_SUPPLY is not set
 
 #
 # File systems
@@ -655,7 +615,6 @@
 # CONFIG_NCP_FS is not set
 # CONFIG_CODA_FS is not set
 # CONFIG_AFS_FS is not set
-# CONFIG_9P_FS is not set
 
 #
 # Partition Types
@@ -712,6 +671,7 @@
 CONFIG_DEBUG_FS=y
 CONFIG_HEADERS_CHECK=y
 CONFIG_DEBUG_KERNEL=y
+# CONFIG_SCHED_DEBUG is not set
 # CONFIG_SCHEDSTATS is not set
 # CONFIG_TIMER_STATS is not set
 # CONFIG_DEBUG_SLAB is not set
@@ -740,10 +700,6 @@
 #
 # CONFIG_KEYS is not set
 # CONFIG_SECURITY is not set
-
-#
-# Cryptographic options
-#
 CONFIG_CRYPTO=y
 CONFIG_CRYPTO_ALGAPI=y
 CONFIG_CRYPTO_BLKCIPHER=y
@@ -782,10 +738,7 @@
 # CONFIG_CRYPTO_CRC32C is not set
 CONFIG_CRYPTO_CAMELLIA=m
 # CONFIG_CRYPTO_TEST is not set
-
-#
-# Hardware crypto devices
-#
+CONFIG_CRYPTO_HW=y
 # CONFIG_CRYPTO_SHA1_S390 is not set
 # CONFIG_CRYPTO_SHA256_S390 is not set
 # CONFIG_CRYPTO_DES_S390 is not set
@@ -800,6 +753,7 @@
 CONFIG_BITREVERSE=m
 # CONFIG_CRC_CCITT is not set
 # CONFIG_CRC16 is not set
+# CONFIG_CRC_ITU_T is not set
 CONFIG_CRC32=m
 # CONFIG_LIBCRC32C is not set
 CONFIG_PLIST=y
diff --git a/arch/s390/kernel/dis.c b/arch/s390/kernel/dis.c
index a057ebf..d305731 100644
--- a/arch/s390/kernel/dis.c
+++ b/arch/s390/kernel/dis.c
@@ -240,8 +240,8 @@
 	[INSTR_RXY_FRRD]  = { 0xff, F_8,D20_20,X_12,B_16,0,0 },/* e.g. ley   */
 	[INSTR_RX_FRRD]	  = { 0xff, F_8,D_20,X_12,B_16,0,0 },  /* e.g. ae    */
 	[INSTR_RX_RRRD]	  = { 0xff, R_8,D_20,X_12,B_16,0,0 },  /* e.g. l     */
-	[INSTR_RX_URRD]	  = { 0x00, U4_8,D_20,X_12,B_16,0,0 }, /* e.g. bc    */
-	[INSTR_SI_URD]	  = { 0x00, D_20,B_16,U8_8,0,0,0 },    /* e.g. cli   */
+	[INSTR_RX_URRD]	  = { 0xff, U4_8,D_20,X_12,B_16,0,0 }, /* e.g. bc    */
+	[INSTR_SI_URD]	  = { 0xff, D_20,B_16,U8_8,0,0,0 },    /* e.g. cli   */
 	[INSTR_SIY_URD]	  = { 0xff, D20_20,B_16,U8_8,0,0,0 },  /* e.g. tmy   */
 	[INSTR_SSE_RDRD]  = { 0xff, D_20,B_16,D_36,B_32,0,0 }, /* e.g. mvsdk */
 	[INSTR_SS_L0RDRD] = { 0xff, D_20,L8_8,B_16,D_36,B_32,0 },
@@ -1190,7 +1190,8 @@
 			else if (operand->flags & OPERAND_CR)
 				ptr += sprintf(ptr, "%%c%i", value);
 			else if (operand->flags & OPERAND_PCREL)
-				ptr += sprintf(ptr, "%lx", value + addr);
+				ptr += sprintf(ptr, "%lx", (signed int) value
+								      + addr);
 			else if (operand->flags & OPERAND_SIGNED)
 				ptr += sprintf(ptr, "%i", value);
 			else
diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c
index 2a8f087..f4503ca 100644
--- a/arch/s390/kernel/ptrace.c
+++ b/arch/s390/kernel/ptrace.c
@@ -294,7 +294,6 @@
 static int
 do_ptrace_normal(struct task_struct *child, long request, long addr, long data)
 {
-	unsigned long tmp;
 	ptrace_area parea; 
 	int copied, ret;
 
@@ -304,10 +303,7 @@
 		/* Remove high order bit from address (only for 31 bit). */
 		addr &= PSW_ADDR_INSN;
 		/* read word at location addr. */
-		copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0);
-		if (copied != sizeof(tmp))
-			return -EIO;
-		return put_user(tmp, (unsigned long __force __user *) data);
+		return generic_ptrace_peekdata(child, addr, data);
 
 	case PTRACE_PEEKUSR:
 		/* read the word at location addr in the USER area. */
@@ -318,10 +314,7 @@
 		/* Remove high order bit from address (only for 31 bit). */
 		addr &= PSW_ADDR_INSN;
 		/* write the word at location addr. */
-		copied = access_process_vm(child, addr, &data, sizeof(data),1);
-		if (copied != sizeof(data))
-			return -EIO;
-		return 0;
+		return generic_ptrace_pokedata(child, addr, data);
 
 	case PTRACE_POKEUSR:
 		/* write the word at location addr in the USER area */
diff --git a/arch/s390/kernel/stacktrace.c b/arch/s390/kernel/stacktrace.c
index 515ff90..da69247 100644
--- a/arch/s390/kernel/stacktrace.c
+++ b/arch/s390/kernel/stacktrace.c
@@ -12,7 +12,6 @@
 #include <linux/kallsyms.h>
 
 static unsigned long save_context_stack(struct stack_trace *trace,
-					unsigned int *skip,
 					unsigned long sp,
 					unsigned long low,
 					unsigned long high)
@@ -28,10 +27,10 @@
 		sf = (struct stack_frame *)sp;
 		while(1) {
 			addr = sf->gprs[8] & PSW_ADDR_INSN;
-			if (!(*skip))
+			if (!trace->skip)
 				trace->entries[trace->nr_entries++] = addr;
 			else
-				(*skip)--;
+				trace->skip--;
 			if (trace->nr_entries >= trace->max_entries)
 				return sp;
 			low = sp;
@@ -48,10 +47,10 @@
 			return sp;
 		regs = (struct pt_regs *)sp;
 		addr = regs->psw.addr & PSW_ADDR_INSN;
-		if (!(*skip))
+		if (!trace->skip)
 			trace->entries[trace->nr_entries++] = addr;
 		else
-			(*skip)--;
+			trace->skip--;
 		if (trace->nr_entries >= trace->max_entries)
 			return sp;
 		low = sp;
@@ -65,20 +64,17 @@
 	unsigned long orig_sp, new_sp;
 
 	orig_sp = sp & PSW_ADDR_INSN;
-
-	new_sp = save_context_stack(trace, &trace->skip, orig_sp,
-				S390_lowcore.panic_stack - PAGE_SIZE,
-				S390_lowcore.panic_stack);
+	new_sp = save_context_stack(trace, orig_sp,
+				    S390_lowcore.panic_stack - PAGE_SIZE,
+				    S390_lowcore.panic_stack);
 	if (new_sp != orig_sp)
 		return;
-	new_sp = save_context_stack(trace, &trace->skip, new_sp,
-				S390_lowcore.async_stack - ASYNC_SIZE,
-				S390_lowcore.async_stack);
+	new_sp = save_context_stack(trace, new_sp,
+				    S390_lowcore.async_stack - ASYNC_SIZE,
+				    S390_lowcore.async_stack);
 	if (new_sp != orig_sp)
 		return;
-
-	save_context_stack(trace, &trace->skip, new_sp,
+	save_context_stack(trace, new_sp,
 			   S390_lowcore.thread_info,
 			   S390_lowcore.thread_info + THREAD_SIZE);
-	return;
 }
diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c
index 81e03b9..8ec9def 100644
--- a/arch/s390/kernel/traps.c
+++ b/arch/s390/kernel/traps.c
@@ -262,6 +262,7 @@
 	print_modules();
 	show_regs(regs);
 	bust_spinlocks(0);
+	add_taint(TAINT_DIE);
 	spin_unlock_irq(&die_lock);
 	if (in_interrupt())
 		panic("Fatal exception in interrupt");
diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S
index 7158a80..6ab7d4e 100644
--- a/arch/s390/kernel/vmlinux.lds.S
+++ b/arch/s390/kernel/vmlinux.lds.S
@@ -45,6 +45,8 @@
   __ex_table : { *(__ex_table) }
   __stop___ex_table = .;
 
+  NOTES
+
   BUG_TABLE
 
   .data : {			/* Data */
@@ -107,10 +109,7 @@
   . = ALIGN(2);
   __initramfs_end = .;
 #endif
-  . = ALIGN(4096);
-  __per_cpu_start = .;
-  .data.percpu  : { *(.data.percpu) }
-  __per_cpu_end = .;
+  PERCPU(4096)
   . = ALIGN(4096);
   __init_end = .;
   /* freed after init ends here */
diff --git a/arch/s390/lib/uaccess_pt.c b/arch/s390/lib/uaccess_pt.c
index 63181671..60604b2 100644
--- a/arch/s390/lib/uaccess_pt.c
+++ b/arch/s390/lib/uaccess_pt.c
@@ -20,6 +20,7 @@
 {
 	struct vm_area_struct *vma;
 	int ret = -EFAULT;
+	int fault;
 
 	if (in_atomic())
 		return ret;
@@ -44,20 +45,18 @@
 	}
 
 survive:
-	switch (handle_mm_fault(mm, vma, address, write_access)) {
-	case VM_FAULT_MINOR:
-		current->min_flt++;
-		break;
-	case VM_FAULT_MAJOR:
-		current->maj_flt++;
-		break;
-	case VM_FAULT_SIGBUS:
-		goto out_sigbus;
-	case VM_FAULT_OOM:
-		goto out_of_memory;
-	default:
+	fault = handle_mm_fault(mm, vma, address, write_access);
+	if (unlikely(fault & VM_FAULT_ERROR)) {
+		if (fault & VM_FAULT_OOM)
+			goto out_of_memory;
+		else if (fault & VM_FAULT_SIGBUS)
+			goto out_sigbus;
 		BUG();
 	}
+	if (fault & VM_FAULT_MAJOR)
+		current->maj_flt++;
+	else
+		current->min_flt++;
 	ret = 0;
 out:
 	up_read(&mm->mmap_sem);
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index d855cdb..5405519 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -307,6 +307,7 @@
 	unsigned long address;
 	int space;
 	int si_code;
+	int fault;
 
 	if (notify_page_fault(regs, error_code))
 		return;
@@ -377,23 +378,22 @@
 	 * make sure we exit gracefully rather than endlessly redo
 	 * the fault.
 	 */
-	switch (handle_mm_fault(mm, vma, address, write)) {
-	case VM_FAULT_MINOR:
-		tsk->min_flt++;
-		break;
-	case VM_FAULT_MAJOR:
-		tsk->maj_flt++;
-		break;
-	case VM_FAULT_SIGBUS:
-		do_sigbus(regs, error_code, address);
-		return;
-	case VM_FAULT_OOM:
-		if (do_out_of_memory(regs, error_code, address))
-			goto survive;
-		return;
-	default:
+	fault = handle_mm_fault(mm, vma, address, write);
+	if (unlikely(fault & VM_FAULT_ERROR)) {
+		if (fault & VM_FAULT_OOM) {
+			if (do_out_of_memory(regs, error_code, address))
+				goto survive;
+			return;
+		} else if (fault & VM_FAULT_SIGBUS) {
+			do_sigbus(regs, error_code, address);
+			return;
+		}
 		BUG();
 	}
+	if (fault & VM_FAULT_MAJOR)
+		tsk->maj_flt++;
+	else
+		tsk->min_flt++;
 
         up_read(&mm->mmap_sem);
 	/*
diff --git a/arch/sh/kernel/ptrace.c b/arch/sh/kernel/ptrace.c
index f2eaa48..891d1d4 100644
--- a/arch/sh/kernel/ptrace.c
+++ b/arch/sh/kernel/ptrace.c
@@ -91,17 +91,8 @@
 	switch (request) {
 	/* when I and D space are separate, these will need to be fixed. */
 	case PTRACE_PEEKTEXT: /* read word at location addr. */
-	case PTRACE_PEEKDATA: {
-		unsigned long tmp;
-		int copied;
-
-		copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0);
-		ret = -EIO;
-		if (copied != sizeof(tmp))
-			break;
-		ret = put_user(tmp,(unsigned long __user *) data);
-		break;
-	}
+	case PTRACE_PEEKDATA:
+		ret = generic_ptrace_peekdata(child, addr, data);
 
 	/* read the word at location addr in the USER area. */
 	case PTRACE_PEEKUSR: {
@@ -135,10 +126,7 @@
 	/* when I and D space are separate, this will have to be fixed. */
 	case PTRACE_POKETEXT: /* write the word at location addr. */
 	case PTRACE_POKEDATA:
-		ret = 0;
-		if (access_process_vm(child, addr, &data, sizeof(data), 1) == sizeof(data))
-			break;
-		ret = -EIO;
+		ret = generic_ptrace_pokedata(child, addr, data);
 		break;
 
 	case PTRACE_POKEUSR: /* write the word at location addr in the USER area */
diff --git a/arch/sh/kernel/traps.c b/arch/sh/kernel/traps.c
index 05a40f3..502d43e 100644
--- a/arch/sh/kernel/traps.c
+++ b/arch/sh/kernel/traps.c
@@ -103,6 +103,7 @@
 			 (unsigned long)task_stack_page(current));
 
 	bust_spinlocks(0);
+	add_taint(TAINT_DIE);
 	spin_unlock_irq(&die_lock);
 
 	if (kexec_should_crash(current))
diff --git a/arch/sh/kernel/vmlinux.lds.S b/arch/sh/kernel/vmlinux.lds.S
index 0696402..5ba2161 100644
--- a/arch/sh/kernel/vmlinux.lds.S
+++ b/arch/sh/kernel/vmlinux.lds.S
@@ -60,10 +60,7 @@
   . = ALIGN(PAGE_SIZE);
   __nosave_end = .;
 
-  . = ALIGN(PAGE_SIZE);
-  __per_cpu_start = .;
-  .data.percpu : { *(.data.percpu) }
-  __per_cpu_end = .;
+  PERCPU(PAGE_SIZE)
   .data.cacheline_aligned : { *(.data.cacheline_aligned) }
 
   _edata = .;			/* End of data section */
diff --git a/arch/sh/mm/fault.c b/arch/sh/mm/fault.c
index 0b3eaf6..964c676 100644
--- a/arch/sh/mm/fault.c
+++ b/arch/sh/mm/fault.c
@@ -33,6 +33,7 @@
 	struct mm_struct *mm;
 	struct vm_area_struct * vma;
 	int si_code;
+	int fault;
 	siginfo_t info;
 
 	trace_hardirqs_on();
@@ -124,20 +125,18 @@
 	 * the fault.
 	 */
 survive:
-	switch (handle_mm_fault(mm, vma, address, writeaccess)) {
-		case VM_FAULT_MINOR:
-			tsk->min_flt++;
-			break;
-		case VM_FAULT_MAJOR:
-			tsk->maj_flt++;
-			break;
-		case VM_FAULT_SIGBUS:
-			goto do_sigbus;
-		case VM_FAULT_OOM:
+	fault = handle_mm_fault(mm, vma, address, writeaccess);
+	if (unlikely(fault & VM_FAULT_ERROR)) {
+		if (fault & VM_FAULT_OOM)
 			goto out_of_memory;
-		default:
-			BUG();
+		else if (fault & VM_FAULT_SIGBUS)
+			goto do_sigbus;
+		BUG();
 	}
+	if (fault & VM_FAULT_MAJOR)
+		tsk->maj_flt++;
+	else
+		tsk->min_flt++;
 
 	up_read(&mm->mmap_sem);
 	return;
diff --git a/arch/sh64/kernel/ptrace.c b/arch/sh64/kernel/ptrace.c
index 4e95e18..df06c64 100644
--- a/arch/sh64/kernel/ptrace.c
+++ b/arch/sh64/kernel/ptrace.c
@@ -129,17 +129,9 @@
 	switch (request) {
 	/* when I and D space are separate, these will need to be fixed. */
 	case PTRACE_PEEKTEXT: /* read word at location addr. */
-	case PTRACE_PEEKDATA: {
-		unsigned long tmp;
-		int copied;
-
-		copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0);
-		ret = -EIO;
-		if (copied != sizeof(tmp))
-			break;
-		ret = put_user(tmp,(unsigned long *) data);
+	case PTRACE_PEEKDATA:
+		ret = generic_ptrace_peekdata(child, addr, data);
 		break;
-	}
 
 	/* read the word at location addr in the USER area. */
 	case PTRACE_PEEKUSR: {
@@ -166,10 +158,7 @@
 	/* when I and D space are separate, this will have to be fixed. */
 	case PTRACE_POKETEXT: /* write the word at location addr. */
 	case PTRACE_POKEDATA:
-		ret = 0;
-		if (access_process_vm(child, addr, &data, sizeof(data), 1) == sizeof(data))
-			break;
-		ret = -EIO;
+		ret = generic_ptrace_pokedata(child, addr, data);
 		break;
 
 	case PTRACE_POKEUSR:
diff --git a/arch/sh64/kernel/vmlinux.lds.S b/arch/sh64/kernel/vmlinux.lds.S
index 02aea86..8ac9c7c 100644
--- a/arch/sh64/kernel/vmlinux.lds.S
+++ b/arch/sh64/kernel/vmlinux.lds.S
@@ -87,7 +87,10 @@
 
   . = ALIGN(PAGE_SIZE);
   __per_cpu_start = .;
-  .data.percpu : C_PHYS(.data.percpu) { *(.data.percpu) }
+  .data.percpu : C_PHYS(.data.percpu) {
+	*(.data.percpu)
+	*(.data.percpu.shared_aligned)
+  }
   __per_cpu_end = . ;
   .data.cacheline_aligned : C_PHYS(.data.cacheline_aligned) { *(.data.cacheline_aligned) }
 
diff --git a/arch/sh64/lib/c-checksum.c b/arch/sh64/lib/c-checksum.c
index 4b26763..bd55017 100644
--- a/arch/sh64/lib/c-checksum.c
+++ b/arch/sh64/lib/c-checksum.c
@@ -213,3 +213,4 @@
 
 	return (__wsum)result;
 }
+EXPORT_SYMBOL(csum_tcpudp_nofold);
diff --git a/arch/sh64/mm/fault.c b/arch/sh64/mm/fault.c
index 3cd93ba..0d069d8 100644
--- a/arch/sh64/mm/fault.c
+++ b/arch/sh64/mm/fault.c
@@ -127,6 +127,7 @@
 	struct vm_area_struct * vma;
 	const struct exception_table_entry *fixup;
 	pte_t *pte;
+	int fault;
 
 #if defined(CONFIG_SH64_PROC_TLB)
         ++calls_to_do_slow_page_fault;
@@ -221,18 +222,19 @@
 	 * the fault.
 	 */
 survive:
-	switch (handle_mm_fault(mm, vma, address, writeaccess)) {
-	case VM_FAULT_MINOR:
-		tsk->min_flt++;
-		break;
-	case VM_FAULT_MAJOR:
-		tsk->maj_flt++;
-		break;
-	case VM_FAULT_SIGBUS:
-		goto do_sigbus;
-	default:
-		goto out_of_memory;
+	fault = handle_mm_fault(mm, vma, address, writeaccess);
+	if (unlikely(fault & VM_FAULT_ERROR)) {
+		if (fault & VM_FAULT_OOM)
+			goto out_of_memory;
+		else if (fault & VM_FAULT_SIGBUS)
+			goto do_sigbus;
+		BUG();
 	}
+	if (fault & VM_FAULT_MAJOR)
+		tsk->maj_flt++;
+	else
+		tsk->min_flt++;
+
 	/* If we get here, the page fault has been handled.  Do the TLB refill
 	   now from the newly-setup PTE, to avoid having to fault again right
 	   away on the same instruction. */
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index 8567cc9..603d83a 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -21,6 +21,9 @@
 	bool
 	default y
 
+config ARCH_NO_VIRT_TO_BUS
+	def_bool y
+
 source "init/Kconfig"
 
 menu "General machine setup"
@@ -217,6 +220,9 @@
 
 endif
 
+config NO_DMA
+	def_bool !PCI
+
 config SUN_OPENPROMFS
 	tristate "Openprom tree appears in /proc/openprom"
 	help
diff --git a/arch/sparc/kernel/traps.c b/arch/sparc/kernel/traps.c
index dc9ffea..3bc3bff 100644
--- a/arch/sparc/kernel/traps.c
+++ b/arch/sparc/kernel/traps.c
@@ -101,6 +101,7 @@
 
 	printk("%s(%d): %s [#%d]\n", current->comm, current->pid, str, ++die_counter);
 	show_regs(regs);
+	add_taint(TAINT_DIE);
 
 	__SAVE; __SAVE; __SAVE; __SAVE;
 	__SAVE; __SAVE; __SAVE; __SAVE;
diff --git a/arch/sparc/kernel/vmlinux.lds.S b/arch/sparc/kernel/vmlinux.lds.S
index f75a1b8..47583887 100644
--- a/arch/sparc/kernel/vmlinux.lds.S
+++ b/arch/sparc/kernel/vmlinux.lds.S
@@ -65,10 +65,7 @@
   __initramfs_end = .;
 #endif
 
-  . = ALIGN(4096);
-  __per_cpu_start = .;
-  .data.percpu  : { *(.data.percpu) }
-  __per_cpu_end = .;
+  PERCPU(4096)
   . = ALIGN(4096);
   __init_end = .;
   . = ALIGN(32);
diff --git a/arch/sparc/mm/fault.c b/arch/sparc/mm/fault.c
index c348336..50747fe 100644
--- a/arch/sparc/mm/fault.c
+++ b/arch/sparc/mm/fault.c
@@ -226,6 +226,7 @@
 	unsigned long g2;
 	siginfo_t info;
 	int from_user = !(regs->psr & PSR_PS);
+	int fault;
 
 	if(text_fault)
 		address = regs->pc;
@@ -289,19 +290,18 @@
 	 * make sure we exit gracefully rather than endlessly redo
 	 * the fault.
 	 */
-	switch (handle_mm_fault(mm, vma, address, write)) {
-	case VM_FAULT_SIGBUS:
-		goto do_sigbus;
-	case VM_FAULT_OOM:
-		goto out_of_memory;
-	case VM_FAULT_MAJOR:
-		current->maj_flt++;
-		break;
-	case VM_FAULT_MINOR:
-	default:
-		current->min_flt++;
-		break;
+	fault = handle_mm_fault(mm, vma, address, write);
+	if (unlikely(fault & VM_FAULT_ERROR)) {
+		if (fault & VM_FAULT_OOM)
+			goto out_of_memory;
+		else if (fault & VM_FAULT_SIGBUS)
+			goto do_sigbus;
+		BUG();
 	}
+	if (fault & VM_FAULT_MAJOR)
+		current->maj_flt++;
+	else
+		current->min_flt++;
 	up_read(&mm->mmap_sem);
 	return;
 
diff --git a/arch/sparc64/Kconfig b/arch/sparc64/Kconfig
index b84b6af..df6ee71 100644
--- a/arch/sparc64/Kconfig
+++ b/arch/sparc64/Kconfig
@@ -62,6 +62,9 @@
 	bool
 	default y
 
+config ARCH_NO_VIRT_TO_BUS
+	def_bool y
+
 choice
 	prompt "Kernel page size"
 	default SPARC64_PAGE_SIZE_8KB
diff --git a/arch/sparc64/defconfig b/arch/sparc64/defconfig
index 65840a6..45ebf91 100644
--- a/arch/sparc64/defconfig
+++ b/arch/sparc64/defconfig
@@ -1,7 +1,7 @@
 #
 # Automatically generated make config: don't edit
-# Linux kernel version: 2.6.22-rc1
-# Mon May 14 04:17:48 2007
+# Linux kernel version: 2.6.22
+# Tue Jul 17 01:19:52 2007
 #
 CONFIG_SPARC=y
 CONFIG_SPARC64=y
@@ -42,12 +42,11 @@
 # CONFIG_LOCALVERSION_AUTO is not set
 CONFIG_SWAP=y
 CONFIG_SYSVIPC=y
-# CONFIG_IPC_NS is not set
 CONFIG_SYSVIPC_SYSCTL=y
 CONFIG_POSIX_MQUEUE=y
 # CONFIG_BSD_PROCESS_ACCT is not set
 # CONFIG_TASKSTATS is not set
-# CONFIG_UTS_NS is not set
+# CONFIG_USER_NS is not set
 # CONFIG_AUDIT is not set
 # CONFIG_IKCONFIG is not set
 CONFIG_LOG_BUF_SHIFT=18
@@ -82,22 +81,15 @@
 CONFIG_RT_MUTEXES=y
 # CONFIG_TINY_SHMEM is not set
 CONFIG_BASE_SMALL=0
-
-#
-# Loadable module support
-#
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
 CONFIG_MODULE_FORCE_UNLOAD=y
 CONFIG_MODVERSIONS=y
 CONFIG_MODULE_SRCVERSION_ALL=y
 CONFIG_KMOD=y
-
-#
-# Block layer
-#
 CONFIG_BLOCK=y
 CONFIG_BLK_DEV_IO_TRACE=y
+CONFIG_BLK_DEV_BSG=y
 
 #
 # IO Schedulers
@@ -156,12 +148,15 @@
 CONFIG_RESOURCES_64BIT=y
 CONFIG_ZONE_DMA_FLAG=0
 CONFIG_NR_QUICK=1
+CONFIG_VIRT_TO_BUS=y
 CONFIG_SBUS=y
 CONFIG_SBUSCHAR=y
 CONFIG_SUN_AUXIO=y
 CONFIG_SUN_IO=y
+# CONFIG_SUN_LDOMS is not set
 CONFIG_PCI=y
 CONFIG_PCI_DOMAINS=y
+CONFIG_PCI_SYSCALL=y
 CONFIG_ARCH_SUPPORTS_MSI=y
 CONFIG_PCI_MSI=y
 # CONFIG_PCI_DEBUG is not set
@@ -246,10 +241,6 @@
 # CONFIG_IPV6_MULTIPLE_TABLES is not set
 # CONFIG_NETWORK_SECMARK is not set
 # CONFIG_NETFILTER is not set
-
-#
-# DCCP Configuration (EXPERIMENTAL)
-#
 CONFIG_IP_DCCP=m
 CONFIG_INET_DCCP_DIAG=m
 CONFIG_IP_DCCP_ACKVEC=y
@@ -269,15 +260,7 @@
 #
 # CONFIG_IP_DCCP_DEBUG is not set
 # CONFIG_NET_DCCPPROBE is not set
-
-#
-# SCTP Configuration (EXPERIMENTAL)
-#
 # CONFIG_IP_SCTP is not set
-
-#
-# TIPC Configuration (EXPERIMENTAL)
-#
 # CONFIG_TIPC is not set
 # CONFIG_ATM is not set
 # CONFIG_BRIDGE is not set
@@ -314,6 +297,7 @@
 # CONFIG_MAC80211 is not set
 # CONFIG_IEEE80211 is not set
 # CONFIG_RFKILL is not set
+# CONFIG_NET_9P is not set
 
 #
 # Device Drivers
@@ -328,26 +312,10 @@
 # CONFIG_DEBUG_DRIVER is not set
 # CONFIG_DEBUG_DEVRES is not set
 # CONFIG_SYS_HYPERVISOR is not set
-
-#
-# Connector - unified userspace <-> kernelspace linker
-#
 CONFIG_CONNECTOR=m
 # CONFIG_MTD is not set
-
-#
-# Parallel port support
-#
 # CONFIG_PARPORT is not set
-
-#
-# Plug and Play support
-#
-# CONFIG_PNPACPI is not set
-
-#
-# Block devices
-#
+CONFIG_BLK_DEV=y
 # CONFIG_BLK_DEV_FD is not set
 # CONFIG_BLK_CPQ_DA is not set
 # CONFIG_BLK_CPQ_CISS_DA is not set
@@ -364,18 +332,11 @@
 CONFIG_CDROM_PKTCDVD_BUFFERS=8
 CONFIG_CDROM_PKTCDVD_WCACHE=y
 CONFIG_ATA_OVER_ETH=m
-
-#
-# Misc devices
-#
+CONFIG_MISC_DEVICES=y
 # CONFIG_PHANTOM is not set
+# CONFIG_EEPROM_93CX6 is not set
 # CONFIG_SGI_IOC4 is not set
 # CONFIG_TIFM_CORE is not set
-# CONFIG_BLINK is not set
-
-#
-# ATA/ATAPI/MFM/RLL support
-#
 CONFIG_IDE=y
 CONFIG_BLK_DEV_IDE=y
 
@@ -440,6 +401,7 @@
 #
 CONFIG_RAID_ATTRS=m
 CONFIG_SCSI=y
+CONFIG_SCSI_DMA=y
 # CONFIG_SCSI_TGT is not set
 CONFIG_SCSI_NETLINK=y
 CONFIG_SCSI_PROC_FS=y
@@ -505,7 +467,6 @@
 # CONFIG_SCSI_DC395x is not set
 # CONFIG_SCSI_DC390T is not set
 # CONFIG_SCSI_DEBUG is not set
-# CONFIG_SCSI_ESP_CORE is not set
 # CONFIG_SCSI_SUNESP is not set
 # CONFIG_SCSI_SRP is not set
 # CONFIG_ATA is not set
@@ -545,30 +506,16 @@
 #
 # CONFIG_FIREWIRE is not set
 # CONFIG_IEEE1394 is not set
-
-#
-# I2O device support
-#
 # CONFIG_I2O is not set
-
-#
-# Network device support
-#
 CONFIG_NETDEVICES=y
+# CONFIG_NETDEVICES_MULTIQUEUE is not set
 CONFIG_DUMMY=m
 # CONFIG_BONDING is not set
+# CONFIG_MACVLAN is not set
 # CONFIG_EQUALIZER is not set
 # CONFIG_TUN is not set
-
-#
-# ARCnet devices
-#
 # CONFIG_ARCNET is not set
 # CONFIG_PHYLIB is not set
-
-#
-# Ethernet (10 or 100Mbit)
-#
 CONFIG_NET_ETHERNET=y
 CONFIG_MII=m
 # CONFIG_SUNLANCE is not set
@@ -578,10 +525,6 @@
 # CONFIG_SUNGEM is not set
 CONFIG_CASSINI=m
 # CONFIG_NET_VENDOR_3COM is not set
-
-#
-# Tulip family network device support
-#
 # CONFIG_NET_TULIP is not set
 # CONFIG_HP100 is not set
 CONFIG_NET_PCI=y
@@ -617,7 +560,6 @@
 # CONFIG_SIS190 is not set
 # CONFIG_SKGE is not set
 # CONFIG_SKY2 is not set
-# CONFIG_SK98LIN is not set
 # CONFIG_VIA_VELOCITY is not set
 CONFIG_TIGON3=m
 CONFIG_BNX2=m
@@ -631,11 +573,6 @@
 # CONFIG_MYRI10GE is not set
 # CONFIG_NETXEN_NIC is not set
 # CONFIG_MLX4_CORE is not set
-CONFIG_MLX4_DEBUG=y
-
-#
-# Token Ring devices
-#
 # CONFIG_TR is not set
 
 #
@@ -665,6 +602,7 @@
 CONFIG_PPP_BSDCOMP=m
 CONFIG_PPP_MPPE=m
 CONFIG_PPPOE=m
+# CONFIG_PPPOL2TP is not set
 # CONFIG_SLIP is not set
 CONFIG_SLHC=m
 # CONFIG_NET_FC is not set
@@ -677,10 +615,6 @@
 # ISDN subsystem
 #
 # CONFIG_ISDN is not set
-
-#
-# Telephony Support
-#
 # CONFIG_PHONE is not set
 
 #
@@ -688,6 +622,7 @@
 #
 CONFIG_INPUT=y
 # CONFIG_INPUT_FF_MEMLESS is not set
+# CONFIG_INPUT_POLLDEV is not set
 
 #
 # Userland interfaces
@@ -733,7 +668,6 @@
 # CONFIG_INPUT_POWERMATE is not set
 # CONFIG_INPUT_YEALINK is not set
 # CONFIG_INPUT_UINPUT is not set
-# CONFIG_INPUT_POLLDEV is not set
 
 #
 # Hardware I/O ports
@@ -773,10 +707,6 @@
 # CONFIG_SERIAL_JSM is not set
 CONFIG_UNIX98_PTYS=y
 # CONFIG_LEGACY_PTYS is not set
-
-#
-# IPMI
-#
 # CONFIG_IPMI_HANDLER is not set
 # CONFIG_WATCHDOG is not set
 # CONFIG_HW_RANDOM is not set
@@ -785,10 +715,6 @@
 # CONFIG_APPLICOM is not set
 # CONFIG_DRM is not set
 # CONFIG_RAW_DRIVER is not set
-
-#
-# TPM devices
-#
 # CONFIG_TCG_TPM is not set
 CONFIG_DEVPORT=y
 CONFIG_I2C=y
@@ -822,6 +748,7 @@
 # CONFIG_I2C_SIS5595 is not set
 # CONFIG_I2C_SIS630 is not set
 # CONFIG_I2C_SIS96X is not set
+# CONFIG_I2C_TAOS_EVM is not set
 # CONFIG_I2C_STUB is not set
 # CONFIG_I2C_TINY_USB is not set
 # CONFIG_I2C_VIA is not set
@@ -833,11 +760,13 @@
 #
 # CONFIG_SENSORS_DS1337 is not set
 # CONFIG_SENSORS_DS1374 is not set
+# CONFIG_DS1682 is not set
 # CONFIG_SENSORS_EEPROM is not set
 # CONFIG_SENSORS_PCF8574 is not set
 # CONFIG_SENSORS_PCA9539 is not set
 # CONFIG_SENSORS_PCF8591 is not set
 # CONFIG_SENSORS_MAX6875 is not set
+# CONFIG_SENSORS_TSL2550 is not set
 # CONFIG_I2C_DEBUG_CORE is not set
 # CONFIG_I2C_DEBUG_ALGO is not set
 # CONFIG_I2C_DEBUG_BUS is not set
@@ -848,11 +777,8 @@
 #
 # CONFIG_SPI is not set
 # CONFIG_SPI_MASTER is not set
-
-#
-# Dallas's 1-wire bus
-#
 # CONFIG_W1 is not set
+# CONFIG_POWER_SUPPLY is not set
 CONFIG_HWMON=y
 # CONFIG_HWMON_VID is not set
 # CONFIG_SENSORS_ABITUGURU is not set
@@ -949,6 +875,8 @@
 # CONFIG_FB_ASILIANT is not set
 # CONFIG_FB_IMSTT is not set
 # CONFIG_FB_SBUS is not set
+# CONFIG_FB_XVR500 is not set
+# CONFIG_FB_XVR2500 is not set
 # CONFIG_FB_S1D13XXX is not set
 # CONFIG_FB_NVIDIA is not set
 # CONFIG_FB_RIVA is not set
@@ -970,9 +898,6 @@
 # CONFIG_FB_TRIDENT is not set
 # CONFIG_FB_ARK is not set
 # CONFIG_FB_PM3 is not set
-# CONFIG_FB_XVR500 is not set
-# CONFIG_FB_XVR2500 is not set
-# CONFIG_FB_PCI is not set
 # CONFIG_FB_VIRTUAL is not set
 
 #
@@ -1118,10 +1043,7 @@
 #
 # CONFIG_SOUND_PRIME is not set
 CONFIG_AC97_BUS=m
-
-#
-# HID Devices
-#
+CONFIG_HID_SUPPORT=y
 CONFIG_HID=y
 # CONFIG_HID_DEBUG is not set
 
@@ -1132,10 +1054,7 @@
 # CONFIG_USB_HIDINPUT_POWERBOOK is not set
 # CONFIG_HID_FF is not set
 CONFIG_USB_HIDDEV=y
-
-#
-# USB support
-#
+CONFIG_USB_SUPPORT=y
 CONFIG_USB_ARCH_HAS_HCD=y
 CONFIG_USB_ARCH_HAS_OHCI=y
 CONFIG_USB_ARCH_HAS_EHCI=y
@@ -1157,7 +1076,6 @@
 # CONFIG_USB_EHCI_SPLIT_ISO is not set
 # CONFIG_USB_EHCI_ROOT_HUB_TT is not set
 # CONFIG_USB_EHCI_TT_NEWSCHED is not set
-# CONFIG_USB_EHCI_BIG_ENDIAN_MMIO is not set
 # CONFIG_USB_ISP116X_HCD is not set
 CONFIG_USB_OHCI_HCD=y
 # CONFIG_USB_OHCI_BIG_ENDIAN_DESC is not set
@@ -1165,6 +1083,7 @@
 CONFIG_USB_OHCI_LITTLE_ENDIAN=y
 CONFIG_USB_UHCI_HCD=m
 # CONFIG_USB_SL811_HCD is not set
+# CONFIG_USB_R8A66597_HCD is not set
 
 #
 # USB Device Class drivers
@@ -1256,17 +1175,9 @@
 #
 # LED Triggers
 #
-
-#
-# InfiniBand support
-#
 # CONFIG_INFINIBAND is not set
 
 #
-# EDAC - error detection and reporting (RAS) (EXPERIMENTAL)
-#
-
-#
 # Real Time Clock
 #
 # CONFIG_RTC_CLASS is not set
@@ -1387,7 +1298,6 @@
 # CONFIG_NCP_FS is not set
 # CONFIG_CODA_FS is not set
 # CONFIG_AFS_FS is not set
-# CONFIG_9P_FS is not set
 
 #
 # Partition Types
@@ -1465,8 +1375,10 @@
 CONFIG_DEBUG_KERNEL=y
 # CONFIG_DEBUG_SHIRQ is not set
 CONFIG_DETECT_SOFTLOCKUP=y
+# CONFIG_SCHED_DEBUG is not set
 CONFIG_SCHEDSTATS=y
 # CONFIG_TIMER_STATS is not set
+# CONFIG_SLUB_DEBUG_ON is not set
 # CONFIG_DEBUG_RT_MUTEXES is not set
 # CONFIG_RT_MUTEX_TESTER is not set
 # CONFIG_DEBUG_SPINLOCK is not set
@@ -1496,10 +1408,10 @@
 CONFIG_KEYS=y
 # CONFIG_KEYS_DEBUG_PROC_KEYS is not set
 # CONFIG_SECURITY is not set
-
-#
-# Cryptographic options
-#
+CONFIG_XOR_BLOCKS=m
+CONFIG_ASYNC_CORE=m
+CONFIG_ASYNC_MEMCPY=m
+CONFIG_ASYNC_XOR=m
 CONFIG_CRYPTO=y
 CONFIG_CRYPTO_ALGAPI=y
 CONFIG_CRYPTO_BLKCIPHER=y
@@ -1539,10 +1451,7 @@
 CONFIG_CRYPTO_CRC32C=m
 CONFIG_CRYPTO_CAMELLIA=m
 CONFIG_CRYPTO_TEST=m
-
-#
-# Hardware crypto devices
-#
+CONFIG_CRYPTO_HW=y
 
 #
 # Library routines
diff --git a/arch/sparc64/kernel/ds.c b/arch/sparc64/kernel/ds.c
index 1c58710..fa1f04d 100644
--- a/arch/sparc64/kernel/ds.c
+++ b/arch/sparc64/kernel/ds.c
@@ -228,7 +228,7 @@
 	return NULL;
 }
 
-static int ds_send(struct ldc_channel *lp, void *data, int len)
+static int __ds_send(struct ldc_channel *lp, void *data, int len)
 {
 	int err, limit = 1000;
 
@@ -243,6 +243,18 @@
 	return err;
 }
 
+static int ds_send(struct ldc_channel *lp, void *data, int len)
+{
+	unsigned long flags;
+	int err;
+
+	spin_lock_irqsave(&ds_lock, flags);
+	err = __ds_send(lp, data, len);
+	spin_unlock_irqrestore(&ds_lock, flags);
+
+	return err;
+}
+
 struct ds_md_update_req {
 	__u64				req_num;
 };
@@ -267,6 +279,8 @@
 
 	printk(KERN_INFO PFX "Machine description update.\n");
 
+	mdesc_update();
+
 	memset(&pkt, 0, sizeof(pkt));
 	pkt.data.tag.type = DS_DATA;
 	pkt.data.tag.len = sizeof(pkt) - sizeof(struct ds_msg_tag);
@@ -275,8 +289,6 @@
 	pkt.res.result = DS_OK;
 
 	ds_send(lp, &pkt, sizeof(pkt));
-
-	mdesc_update();
 }
 
 struct ds_shutdown_req {
@@ -391,18 +403,6 @@
 	__u32				str_off;
 };
 
-/* DR cpu requests get queued onto the work list by the
- * dr_cpu_data() callback.  The list is protected by
- * ds_lock, and processed by dr_cpu_process() in order.
- */
-static LIST_HEAD(dr_cpu_work_list);
-static DECLARE_WAIT_QUEUE_HEAD(dr_cpu_wait);
-
-struct dr_cpu_queue_entry {
-	struct list_head		list;
-	char				req[0];
-};
-
 static void __dr_cpu_send_error(struct ds_cap_state *cp, struct ds_data *data)
 {
 	struct dr_cpu_tag *tag = (struct dr_cpu_tag *) (data + 1);
@@ -425,7 +425,7 @@
 
 	pkt.data.tag.len = msg_len - sizeof(struct ds_msg_tag);
 
-	ds_send(dp->lp, &pkt, msg_len);
+	__ds_send(dp->lp, &pkt, msg_len);
 }
 
 static void dr_cpu_send_error(struct ds_cap_state *cp, struct ds_data *data)
@@ -555,7 +555,7 @@
 	}
 
 	spin_lock_irqsave(&ds_lock, flags);
-	ds_send(ds_info->lp, resp, resp_len);
+	__ds_send(ds_info->lp, resp, resp_len);
 	spin_unlock_irqrestore(&ds_lock, flags);
 
 	kfree(resp);
@@ -596,7 +596,7 @@
 	}
 
 	spin_lock_irqsave(&ds_lock, flags);
-	ds_send(ds_info->lp, resp, resp_len);
+	__ds_send(ds_info->lp, resp, resp_len);
 	spin_unlock_irqrestore(&ds_lock, flags);
 
 	kfree(resp);
@@ -604,107 +604,49 @@
 	return 0;
 }
 
-static void process_dr_cpu_list(struct ds_cap_state *cp)
-{
-	struct dr_cpu_queue_entry *qp, *tmp;
-	unsigned long flags;
-	LIST_HEAD(todo);
-	cpumask_t mask;
-
-	spin_lock_irqsave(&ds_lock, flags);
-	list_splice(&dr_cpu_work_list, &todo);
-	INIT_LIST_HEAD(&dr_cpu_work_list);
-	spin_unlock_irqrestore(&ds_lock, flags);
-
-	list_for_each_entry_safe(qp, tmp, &todo, list) {
-		struct ds_data *data = (struct ds_data *) qp->req;
-		struct dr_cpu_tag *tag = (struct dr_cpu_tag *) (data + 1);
-		u32 *cpu_list = (u32 *) (tag + 1);
-		u64 req_num = tag->req_num;
-		unsigned int i;
-		int err;
-
-		switch (tag->type) {
-		case DR_CPU_CONFIGURE:
-		case DR_CPU_UNCONFIGURE:
-		case DR_CPU_FORCE_UNCONFIGURE:
-			break;
-
-		default:
-			dr_cpu_send_error(cp, data);
-			goto next;
-		}
-
-		purge_dups(cpu_list, tag->num_records);
-
-		cpus_clear(mask);
-		for (i = 0; i < tag->num_records; i++) {
-			if (cpu_list[i] == CPU_SENTINEL)
-				continue;
-
-			if (cpu_list[i] < NR_CPUS)
-				cpu_set(cpu_list[i], mask);
-		}
-
-		if (tag->type == DR_CPU_CONFIGURE)
-			err = dr_cpu_configure(cp, req_num, &mask);
-		else
-			err = dr_cpu_unconfigure(cp, req_num, &mask);
-
-		if (err)
-			dr_cpu_send_error(cp, data);
-
-next:
-		list_del(&qp->list);
-		kfree(qp);
-	}
-}
-
-static int dr_cpu_thread(void *__unused)
-{
-	struct ds_cap_state *cp;
-	DEFINE_WAIT(wait);
-
-	cp = find_cap_by_string("dr-cpu");
-
-	while (1) {
-		prepare_to_wait(&dr_cpu_wait, &wait, TASK_INTERRUPTIBLE);
-		if (list_empty(&dr_cpu_work_list))
-			schedule();
-		finish_wait(&dr_cpu_wait, &wait);
-
-		if (kthread_should_stop())
-			break;
-
-		process_dr_cpu_list(cp);
-	}
-
-	return 0;
-}
-
 static void dr_cpu_data(struct ldc_channel *lp,
-			struct ds_cap_state *dp,
+			struct ds_cap_state *cp,
 			void *buf, int len)
 {
-	struct dr_cpu_queue_entry *qp;
-	struct ds_data *dpkt = buf;
-	struct dr_cpu_tag *rp;
+	struct ds_data *data = buf;
+	struct dr_cpu_tag *tag = (struct dr_cpu_tag *) (data + 1);
+	u32 *cpu_list = (u32 *) (tag + 1);
+	u64 req_num = tag->req_num;
+	cpumask_t mask;
+	unsigned int i;
+	int err;
 
-	rp = (struct dr_cpu_tag *) (dpkt + 1);
+	switch (tag->type) {
+	case DR_CPU_CONFIGURE:
+	case DR_CPU_UNCONFIGURE:
+	case DR_CPU_FORCE_UNCONFIGURE:
+		break;
 
-	qp = kmalloc(sizeof(struct dr_cpu_queue_entry) + len, GFP_ATOMIC);
-	if (!qp) {
-		struct ds_cap_state *cp;
-
-		cp = find_cap_by_string("dr-cpu");
-		__dr_cpu_send_error(cp, dpkt);
-	} else {
-		memcpy(&qp->req, buf, len);
-		list_add_tail(&qp->list, &dr_cpu_work_list);
-		wake_up(&dr_cpu_wait);
+	default:
+		dr_cpu_send_error(cp, data);
+		return;
 	}
+
+	purge_dups(cpu_list, tag->num_records);
+
+	cpus_clear(mask);
+	for (i = 0; i < tag->num_records; i++) {
+		if (cpu_list[i] == CPU_SENTINEL)
+			continue;
+
+		if (cpu_list[i] < NR_CPUS)
+			cpu_set(cpu_list[i], mask);
+	}
+
+	if (tag->type == DR_CPU_CONFIGURE)
+		err = dr_cpu_configure(cp, req_num, &mask);
+	else
+		err = dr_cpu_unconfigure(cp, req_num, &mask);
+
+	if (err)
+		dr_cpu_send_error(cp, data);
 }
-#endif
+#endif /* CONFIG_HOTPLUG_CPU */
 
 struct ds_pri_msg {
 	__u64				req_num;
@@ -820,7 +762,7 @@
 		ds_var_doorbell = 0;
 		ds_var_response = -1;
 
-		ds_send(dp->lp, &pkt, msg_len);
+		__ds_send(dp->lp, &pkt, msg_len);
 		spin_unlock_irqrestore(&ds_lock, flags);
 
 		loops = 1000;
@@ -904,7 +846,7 @@
 		pbuf.req.minor = 0;
 		strcpy(pbuf.req.svc_id, cp->service_id);
 
-		err = ds_send(lp, &pbuf, msg_len);
+		err = __ds_send(lp, &pbuf, msg_len);
 		if (err > 0)
 			cp->state = CAP_STATE_REG_SENT;
 	}
@@ -960,27 +902,97 @@
 	return -ECONNRESET;
 }
 
+static void __send_ds_nack(struct ds_info *dp, u64 handle)
+{
+	struct ds_data_nack nack = {
+		.tag = {
+			.type = DS_NACK,
+			.len = (sizeof(struct ds_data_nack) -
+				sizeof(struct ds_msg_tag)),
+		},
+		.handle = handle,
+		.result = DS_INV_HDL,
+	};
+
+	__ds_send(dp->lp, &nack, sizeof(nack));
+}
+
+static LIST_HEAD(ds_work_list);
+static DECLARE_WAIT_QUEUE_HEAD(ds_wait);
+
+struct ds_queue_entry {
+	struct list_head		list;
+	int				req_len;
+	int				__pad;
+	u64				req[0];
+};
+
+static void process_ds_work(void)
+{
+	struct ds_queue_entry *qp, *tmp;
+	static struct ds_info *dp;
+	unsigned long flags;
+	LIST_HEAD(todo);
+
+	spin_lock_irqsave(&ds_lock, flags);
+	list_splice(&ds_work_list, &todo);
+	INIT_LIST_HEAD(&ds_work_list);
+	spin_unlock_irqrestore(&ds_lock, flags);
+
+	dp = ds_info;
+
+	list_for_each_entry_safe(qp, tmp, &todo, list) {
+		struct ds_data *dpkt = (struct ds_data *) qp->req;
+		struct ds_cap_state *cp = find_cap(dpkt->handle);
+		int req_len = qp->req_len;
+
+		if (!cp) {
+			printk(KERN_ERR PFX "Data for unknown handle %lu\n",
+			       dpkt->handle);
+
+			spin_lock_irqsave(&ds_lock, flags);
+			__send_ds_nack(dp, dpkt->handle);
+			spin_unlock_irqrestore(&ds_lock, flags);
+		} else {
+			cp->data(dp->lp, cp, dpkt, req_len);
+		}
+
+		list_del(&qp->list);
+		kfree(qp);
+	}
+}
+
+static int ds_thread(void *__unused)
+{
+	DEFINE_WAIT(wait);
+
+	while (1) {
+		prepare_to_wait(&ds_wait, &wait, TASK_INTERRUPTIBLE);
+		if (list_empty(&ds_work_list))
+			schedule();
+		finish_wait(&ds_wait, &wait);
+
+		if (kthread_should_stop())
+			break;
+
+		process_ds_work();
+	}
+
+	return 0;
+}
+
 static int ds_data(struct ds_info *dp, struct ds_msg_tag *pkt, int len)
 {
 	struct ds_data *dpkt = (struct ds_data *) pkt;
-	struct ds_cap_state *cp = find_cap(dpkt->handle);
+	struct ds_queue_entry *qp;
 
-	if (!cp) {
-		struct ds_data_nack nack = {
-			.tag = {
-				.type = DS_NACK,
-				.len = (sizeof(struct ds_data_nack) -
-					sizeof(struct ds_msg_tag)),
-			},
-			.handle = dpkt->handle,
-			.result = DS_INV_HDL,
-		};
-
-		printk(KERN_ERR PFX "Data for unknown handle %lu\n",
-		       dpkt->handle);
-		ds_send(dp->lp, &nack, sizeof(nack));
+	qp = kmalloc(sizeof(struct ds_queue_entry) + len, GFP_ATOMIC);
+	if (!qp) {
+		__send_ds_nack(dp, dpkt->handle);
 	} else {
-		cp->data(dp->lp, cp, dpkt, len);
+		memcpy(&qp->req, pkt, len);
+		list_add_tail(&qp->list, &ds_work_list);
+		wake_up(&ds_wait);
 	}
 	return 0;
 }
@@ -996,11 +1008,24 @@
 	req.ver.major = 1;
 	req.ver.minor = 0;
 
-	err = ds_send(lp, &req, sizeof(req));
+	err = __ds_send(lp, &req, sizeof(req));
 	if (err > 0)
 		dp->hs_state = DS_HS_START;
 }
 
+static void ds_reset(struct ds_info *dp)
+{
+	int i;
+
+	dp->hs_state = 0;
+
+	for (i = 0; i < ARRAY_SIZE(ds_states); i++) {
+		struct ds_cap_state *cp = &ds_states[i];
+
+		cp->state = CAP_STATE_UNKNOWN;
+	}
+}
+
 static void ds_event(void *arg, int event)
 {
 	struct ds_info *dp = arg;
@@ -1016,6 +1041,12 @@
 		return;
 	}
 
+	if (event == LDC_EVENT_RESET) {
+		ds_reset(dp);
+		spin_unlock_irqrestore(&ds_lock, flags);
+		return;
+	}
+
 	if (event != LDC_EVENT_DATA_READY) {
 		printk(KERN_WARNING PFX "Unexpected LDC event %d\n", event);
 		spin_unlock_irqrestore(&ds_lock, flags);
@@ -1148,9 +1179,7 @@
 	for (i = 0; i < ARRAY_SIZE(ds_states); i++)
 		ds_states[i].handle = ((u64)i << 32);
 
-#ifdef CONFIG_HOTPLUG_CPU
-	kthread_run(dr_cpu_thread, NULL, "kdrcpud");
-#endif
+	kthread_run(ds_thread, NULL, "kldomd");
 
 	return vio_register_driver(&ds_driver);
 }
diff --git a/arch/sparc64/kernel/hvtramp.S b/arch/sparc64/kernel/hvtramp.S
index 76a090e..a55c252 100644
--- a/arch/sparc64/kernel/hvtramp.S
+++ b/arch/sparc64/kernel/hvtramp.S
@@ -10,6 +10,7 @@
 #include <asm/hvtramp.h>
 #include <asm/pstate.h>
 #include <asm/ptrace.h>
+#include <asm/head.h>
 #include <asm/asi.h>
 
 	.text
@@ -28,7 +29,7 @@
 	 * First setup basic privileged cpu state.
 	 */
 hv_cpu_startup:
-	wrpr		%g0, 0, %gl
+	SET_GL(0)
 	wrpr		%g0, 15, %pil
 	wrpr		%g0, 0, %canrestore
 	wrpr		%g0, 0, %otherwin
diff --git a/arch/sparc64/kernel/mdesc.c b/arch/sparc64/kernel/mdesc.c
index 62a3897..302ba5e 100644
--- a/arch/sparc64/kernel/mdesc.c
+++ b/arch/sparc64/kernel/mdesc.c
@@ -137,7 +137,7 @@
 		       sizeof(struct mdesc_hdr) +
 		       mdesc_size);
 
-	base = kmalloc(handle_size + 15, GFP_KERNEL);
+	base = kmalloc(handle_size + 15, GFP_KERNEL | __GFP_NOFAIL);
 	if (base) {
 		struct mdesc_handle *hp;
 		unsigned long addr;
@@ -214,18 +214,83 @@
 }
 EXPORT_SYMBOL(mdesc_release);
 
-static void do_mdesc_update(struct work_struct *work)
+static DEFINE_MUTEX(mdesc_mutex);
+static struct mdesc_notifier_client *client_list;
+
+void mdesc_register_notifier(struct mdesc_notifier_client *client)
+{
+	u64 node;
+
+	mutex_lock(&mdesc_mutex);
+	client->next = client_list;
+	client_list = client;
+
+	mdesc_for_each_node_by_name(cur_mdesc, node, client->node_name)
+		client->add(cur_mdesc, node);
+
+	mutex_unlock(&mdesc_mutex);
+}
+
+/* Run 'func' on nodes which are in A but not in B.  */
+static void invoke_on_missing(const char *name,
+			      struct mdesc_handle *a,
+			      struct mdesc_handle *b,
+			      void (*func)(struct mdesc_handle *, u64))
+{
+	u64 node;
+
+	mdesc_for_each_node_by_name(a, node, name) {
+		const u64 *id = mdesc_get_property(a, node, "id", NULL);
+		int found = 0;
+		u64 fnode;
+
+		mdesc_for_each_node_by_name(b, fnode, name) {
+			const u64 *fid = mdesc_get_property(b, fnode,
+							    "id", NULL);
+
+			if (*id == *fid) {
+				found = 1;
+				break;
+			}
+		}
+		if (!found)
+			func(a, node);
+	}
+}
+
+static void notify_one(struct mdesc_notifier_client *p,
+		       struct mdesc_handle *old_hp,
+		       struct mdesc_handle *new_hp)
+{
+	invoke_on_missing(p->node_name, old_hp, new_hp, p->remove);
+	invoke_on_missing(p->node_name, new_hp, old_hp, p->add);
+}
+
+static void mdesc_notify_clients(struct mdesc_handle *old_hp,
+				 struct mdesc_handle *new_hp)
+{
+	struct mdesc_notifier_client *p = client_list;
+
+	while (p) {
+		notify_one(p, old_hp, new_hp);
+		p = p->next;
+	}
+}
+
+void mdesc_update(void)
 {
 	unsigned long len, real_len, status;
 	struct mdesc_handle *hp, *orig_hp;
 	unsigned long flags;
 
+	mutex_lock(&mdesc_mutex);
+
 	(void) sun4v_mach_desc(0UL, 0UL, &len);
 
 	hp = mdesc_alloc(len, &kmalloc_mdesc_memops);
 	if (!hp) {
 		printk(KERN_ERR "MD: mdesc alloc fails\n");
-		return;
+		goto out;
 	}
 
 	status = sun4v_mach_desc(__pa(&hp->mdesc), len, &real_len);
@@ -234,25 +299,25 @@
 		       status);
 		atomic_dec(&hp->refcnt);
 		mdesc_free(hp);
-		return;
+		goto out;
 	}
 
 	spin_lock_irqsave(&mdesc_lock, flags);
 	orig_hp = cur_mdesc;
 	cur_mdesc = hp;
+	spin_unlock_irqrestore(&mdesc_lock, flags);
 
+	mdesc_notify_clients(orig_hp, hp);
+
+	spin_lock_irqsave(&mdesc_lock, flags);
 	if (atomic_dec_and_test(&orig_hp->refcnt))
 		mdesc_free(orig_hp);
 	else
 		list_add(&orig_hp->list, &mdesc_zombie_list);
 	spin_unlock_irqrestore(&mdesc_lock, flags);
-}
 
-static DECLARE_WORK(mdesc_update_work, do_mdesc_update);
-
-void mdesc_update(void)
-{
-	schedule_work(&mdesc_update_work);
+out:
+	mutex_unlock(&mdesc_mutex);
 }
 
 static struct mdesc_elem *node_block(struct mdesc_hdr *mdesc)
@@ -278,13 +343,14 @@
 	u64 last_node = hp->mdesc.node_sz / 16;
 	u64 ret;
 
-	if (from_node == MDESC_NODE_NULL)
-		from_node = 0;
-
-	if (from_node >= last_node)
+	if (from_node == MDESC_NODE_NULL) {
+		ret = from_node = 0;
+	} else if (from_node >= last_node) {
 		return MDESC_NODE_NULL;
+	} else {
+		ret = ep[from_node].d.val;
+	}
 
-	ret = ep[from_node].d.val;
 	while (ret < last_node) {
 		if (ep[ret].tag != MD_NODE)
 			return MDESC_NODE_NULL;
diff --git a/arch/sparc64/kernel/setup.c b/arch/sparc64/kernel/setup.c
index dc928e4..aafde3d 100644
--- a/arch/sparc64/kernel/setup.c
+++ b/arch/sparc64/kernel/setup.c
@@ -454,9 +454,9 @@
 		   ncpus_probed,
 		   num_online_cpus(),
 		   dcache_parity_tl1_occurred,
-		   icache_parity_tl1_occurred,
+		   icache_parity_tl1_occurred
 #ifndef CONFIG_SMP
-		   cpu_data(0).clock_tick
+		   , cpu_data(0).clock_tick
 #endif
 		);
 #ifdef CONFIG_SMP
diff --git a/arch/sparc64/kernel/signal.c b/arch/sparc64/kernel/signal.c
index 203e873..fb13775 100644
--- a/arch/sparc64/kernel/signal.c
+++ b/arch/sparc64/kernel/signal.c
@@ -289,9 +289,7 @@
 	struct rt_signal_frame __user *sf;
 	unsigned long tpc, tnpc, tstate;
 	__siginfo_fpu_t __user *fpu_save;
-	mm_segment_t old_fs;
 	sigset_t set;
-	stack_t st;
 	int err;
 
 	/* Always make any pending restarted system calls return -EINTR */
@@ -327,20 +325,13 @@
 		err |= restore_fpu_state(regs, &sf->fpu_state);
 
 	err |= __copy_from_user(&set, &sf->mask, sizeof(sigset_t));
-	err |= __copy_from_user(&st, &sf->stack, sizeof(stack_t));
-	
+	err |= do_sigaltstack(&sf->stack, NULL, (unsigned long)sf);
+
 	if (err)
 		goto segv;
-		
+
 	regs->tpc = tpc;
 	regs->tnpc = tnpc;
-	
-	/* It is more difficult to avoid calling this function than to
-	   call it and ignore errors.  */
-	old_fs = get_fs();
-	set_fs(KERNEL_DS);
-	do_sigaltstack((const stack_t __user *) &st, NULL, (unsigned long)sf);
-	set_fs(old_fs);
 
 	sigdelsetmask(&set, ~_BLOCKABLE);
 	spin_lock_irq(&current->sighand->siglock);
diff --git a/arch/sparc64/kernel/traps.c b/arch/sparc64/kernel/traps.c
index 00a9e32..6ef2d29 100644
--- a/arch/sparc64/kernel/traps.c
+++ b/arch/sparc64/kernel/traps.c
@@ -2225,6 +2225,7 @@
 	notify_die(DIE_OOPS, str, regs, 0, 255, SIGSEGV);
 	__asm__ __volatile__("flushw");
 	__show_regs(regs);
+	add_taint(TAINT_DIE);
 	if (regs->tstate & TSTATE_PRIV) {
 		struct reg_window *rw = (struct reg_window *)
 			(regs->u_regs[UREG_FP] + STACK_BIAS);
diff --git a/arch/sparc64/kernel/vio.c b/arch/sparc64/kernel/vio.c
index 49569b4..8d3cc4f 100644
--- a/arch/sparc64/kernel/vio.c
+++ b/arch/sparc64/kernel/vio.c
@@ -201,10 +201,11 @@
 static struct vio_dev *vio_create_one(struct mdesc_handle *hp, u64 mp,
 				      struct device *parent)
 {
-	const char *type, *compat;
+	const char *type, *compat, *bus_id_name;
 	struct device_node *dp;
 	struct vio_dev *vdev;
 	int err, tlen, clen;
+	const u64 *id;
 
 	type = mdesc_get_property(hp, mp, "device-type", &tlen);
 	if (!type) {
@@ -220,6 +221,16 @@
 		return NULL;
 	}
 
+	bus_id_name = type;
+	if (!strcmp(type, "domain-services-port"))
+		bus_id_name = "ds";
+
+	if (strlen(bus_id_name) >= KOBJ_NAME_LEN - 4) {
+		printk(KERN_ERR "VIO: bus_id_name [%s] is too long.\n",
+		       bus_id_name);
+		return NULL;
+	}
+
 	compat = mdesc_get_property(hp, mp, "device-type", &clen);
 	if (!compat) {
 		clen = 0;
@@ -249,7 +260,14 @@
 
 	vio_fill_channel_info(hp, mp, vdev);
 
-	snprintf(vdev->dev.bus_id, BUS_ID_SIZE, "%lx", mp);
+	id = mdesc_get_property(hp, mp, "id", NULL);
+	if (!id)
+		snprintf(vdev->dev.bus_id, BUS_ID_SIZE, "%s",
+			 bus_id_name);
+	else
+		snprintf(vdev->dev.bus_id, BUS_ID_SIZE, "%s-%lu",
+			 bus_id_name, *id);
+
 	vdev->dev.parent = parent;
 	vdev->dev.bus = &vio_bus_type;
 	vdev->dev.release = vio_dev_release;
@@ -269,6 +287,8 @@
 	}
 	vdev->dp = dp;
 
+	printk(KERN_ERR "VIO: Adding device %s\n", vdev->dev.bus_id);
+
 	err = device_register(&vdev->dev);
 	if (err) {
 		printk(KERN_ERR "VIO: Could not register device %s, err=%d\n",
@@ -283,45 +303,45 @@
 	return vdev;
 }
 
-static void walk_tree(struct mdesc_handle *hp, u64 n, struct vio_dev *parent)
+static void vio_add(struct mdesc_handle *hp, u64 node)
 {
-	u64 a;
+	(void) vio_create_one(hp, node, &root_vdev->dev);
+}
 
-	mdesc_for_each_arc(a, hp, n, MDESC_ARC_TYPE_FWD) {
-		struct vio_dev *vdev;
-		u64 target;
+static int vio_md_node_match(struct device *dev, void *arg)
+{
+	struct vio_dev *vdev = to_vio_dev(dev);
 
-		target = mdesc_arc_target(hp, a);
-		vdev = vio_create_one(hp, target, &parent->dev);
-		if (vdev)
-			walk_tree(hp, target, vdev);
+	if (vdev->mp == (u64) arg)
+		return 1;
+
+	return 0;
+}
+
+static void vio_remove(struct mdesc_handle *hp, u64 node)
+{
+	struct device *dev;
+
+	dev = device_find_child(&root_vdev->dev, (void *) node,
+				vio_md_node_match);
+	if (dev) {
+		printk(KERN_INFO "VIO: Removing device %s\n", dev->bus_id);
+
+		device_unregister(dev);
 	}
 }
 
-static void create_devices(struct mdesc_handle *hp, u64 root)
-{
-	u64 mp;
+static struct mdesc_notifier_client vio_device_notifier = {
+	.add		= vio_add,
+	.remove		= vio_remove,
+	.node_name	= "virtual-device-port",
+};
 
-	root_vdev = vio_create_one(hp, root, NULL);
-	if (!root_vdev) {
-		printk(KERN_ERR "VIO: Coult not create root device.\n");
-		return;
-	}
-
-	walk_tree(hp, root, root_vdev);
-
-	/* Domain services is odd as it doesn't sit underneath the
-	 * channel-devices node, so we plug it in manually.
-	 */
-	mp = mdesc_node_by_name(hp, MDESC_NODE_NULL, "domain-services");
-	if (mp != MDESC_NODE_NULL) {
-		struct vio_dev *parent = vio_create_one(hp, mp,
-							&root_vdev->dev);
-
-		if (parent)
-			walk_tree(hp, mp, parent);
-	}
-}
+static struct mdesc_notifier_client vio_ds_notifier = {
+	.add		= vio_add,
+	.remove		= vio_remove,
+	.node_name	= "domain-services-port",
+};
 
 const char *channel_devices_node = "channel-devices";
 const char *channel_devices_compat = "SUNW,sun4v-channel-devices";
@@ -381,11 +401,19 @@
 
 	cdev_cfg_handle = *cfg_handle;
 
-	create_devices(hp, root);
+	root_vdev = vio_create_one(hp, root, NULL);
+	err = -ENODEV;
+	if (!root_vdev) {
+		printk(KERN_ERR "VIO: Coult not create root device.\n");
+		goto out_release;
+	}
+
+	mdesc_register_notifier(&vio_device_notifier);
+	mdesc_register_notifier(&vio_ds_notifier);
 
 	mdesc_release(hp);
 
-	return 0;
+	return err;
 
 out_release:
 	mdesc_release(hp);
diff --git a/arch/sparc64/kernel/viohs.c b/arch/sparc64/kernel/viohs.c
index 15613ad..09126fc 100644
--- a/arch/sparc64/kernel/viohs.c
+++ b/arch/sparc64/kernel/viohs.c
@@ -78,6 +78,24 @@
 	return 0;
 }
 
+static void flush_rx_dring(struct vio_driver_state *vio)
+{
+	struct vio_dring_state *dr;
+	u64 ident;
+
+	BUG_ON(!(vio->dr_state & VIO_DR_STATE_RXREG));
+
+	dr = &vio->drings[VIO_DRIVER_RX_RING];
+	ident = dr->ident;
+
+	BUG_ON(!vio->desc_buf);
+	kfree(vio->desc_buf);
+	vio->desc_buf = NULL;
+
+	memset(dr, 0, sizeof(*dr));
+	dr->ident = ident;
+}
+
 void vio_link_state_change(struct vio_driver_state *vio, int event)
 {
 	if (event == LDC_EVENT_UP) {
@@ -98,6 +116,16 @@
 			break;
 		}
 		start_handshake(vio);
+	} else if (event == LDC_EVENT_RESET) {
+		vio->hs_state = VIO_HS_INVALID;
+
+		if (vio->dr_state & VIO_DR_STATE_RXREG)
+			flush_rx_dring(vio);
+
+		vio->dr_state = 0x00;
+		memset(&vio->ver, 0, sizeof(vio->ver));
+
+		ldc_disconnect(vio->lp);
 	}
 }
 EXPORT_SYMBOL(vio_link_state_change);
@@ -396,6 +424,8 @@
 	if (vio->dr_state & VIO_DR_STATE_RXREG)
 		goto send_nack;
 
+	BUG_ON(vio->desc_buf);
+
 	vio->desc_buf = kzalloc(pkt->descr_size, GFP_ATOMIC);
 	if (!vio->desc_buf)
 		goto send_nack;
diff --git a/arch/sparc64/kernel/vmlinux.lds.S b/arch/sparc64/kernel/vmlinux.lds.S
index 3ad10f3..4818617 100644
--- a/arch/sparc64/kernel/vmlinux.lds.S
+++ b/arch/sparc64/kernel/vmlinux.lds.S
@@ -90,10 +90,8 @@
   __initramfs_end = .;
 #endif
 
-  . = ALIGN(PAGE_SIZE);
-  __per_cpu_start = .;
-  .data.percpu  : { *(.data.percpu) }
-  __per_cpu_end = .;
+  PERCPU(PAGE_SIZE)
+
   . = ALIGN(PAGE_SIZE);
   __init_end = .;
   __bss_start = .;
diff --git a/arch/sparc64/mm/fault.c b/arch/sparc64/mm/fault.c
index b582024..17123e9 100644
--- a/arch/sparc64/mm/fault.c
+++ b/arch/sparc64/mm/fault.c
@@ -278,7 +278,7 @@
 	struct mm_struct *mm = current->mm;
 	struct vm_area_struct *vma;
 	unsigned int insn = 0;
-	int si_code, fault_code;
+	int si_code, fault_code, fault;
 	unsigned long address, mm_rss;
 
 	fault_code = get_thread_fault_code();
@@ -415,20 +415,18 @@
 			goto bad_area;
 	}
 
-	switch (handle_mm_fault(mm, vma, address, (fault_code & FAULT_CODE_WRITE))) {
-	case VM_FAULT_MINOR:
-		current->min_flt++;
-		break;
-	case VM_FAULT_MAJOR:
-		current->maj_flt++;
-		break;
-	case VM_FAULT_SIGBUS:
-		goto do_sigbus;
-	case VM_FAULT_OOM:
-		goto out_of_memory;
-	default:
+	fault = handle_mm_fault(mm, vma, address, (fault_code & FAULT_CODE_WRITE));
+	if (unlikely(fault & VM_FAULT_ERROR)) {
+		if (fault & VM_FAULT_OOM)
+			goto out_of_memory;
+		else if (fault & VM_FAULT_SIGBUS)
+			goto do_sigbus;
 		BUG();
 	}
+	if (fault & VM_FAULT_MAJOR)
+		current->maj_flt++;
+	else
+		current->min_flt++;
 
 	up_read(&mm->mmap_sem);
 
diff --git a/arch/sparc64/solaris/socksys.c b/arch/sparc64/solaris/socksys.c
index e94f6e5..7736411 100644
--- a/arch/sparc64/solaris/socksys.c
+++ b/arch/sparc64/solaris/socksys.c
@@ -199,6 +199,5 @@
 
 void __exit cleanup_socksys(void)
 {
-	if (unregister_chrdev(30, "socksys"))
-		printk ("Couldn't unregister socksys character device\n");
+	unregister_chrdev(30, "socksys");
 }
diff --git a/arch/um/drivers/pcap_user.c b/arch/um/drivers/pcap_user.c
index 483aa15..1316456 100644
--- a/arch/um/drivers/pcap_user.c
+++ b/arch/um/drivers/pcap_user.c
@@ -53,7 +53,7 @@
 			return -EIO;
 		}
 
-		pri->compiled = um_kmalloc(sizeof(struct bpf_program));
+		pri->compiled = kmalloc(sizeof(struct bpf_program), UM_GFP_KERNEL);
 		if(pri->compiled == NULL){
 			printk(UM_KERN_ERR "pcap_open : kmalloc failed\n");
 			return -ENOMEM;
diff --git a/arch/um/kernel/ptrace.c b/arch/um/kernel/ptrace.c
index 627742d..6916c88 100644
--- a/arch/um/kernel/ptrace.c
+++ b/arch/um/kernel/ptrace.c
@@ -52,17 +52,9 @@
 	switch (request) {
 		/* when I and D space are separate, these will need to be fixed. */
 	case PTRACE_PEEKTEXT: /* read word at location addr. */ 
-	case PTRACE_PEEKDATA: {
-		unsigned long tmp;
-		int copied;
-
-		ret = -EIO;
-		copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0);
-		if (copied != sizeof(tmp))
-			break;
-		ret = put_user(tmp, p);
+	case PTRACE_PEEKDATA:
+		ret = generic_ptrace_peekdata(child, addr, data);
 		break;
-	}
 
 	/* read the word at location addr in the USER area. */
         case PTRACE_PEEKUSR:
@@ -72,11 +64,7 @@
 	/* when I and D space are separate, this will have to be fixed. */
 	case PTRACE_POKETEXT: /* write the word at location addr. */
 	case PTRACE_POKEDATA:
-		ret = -EIO;
-		if (access_process_vm(child, addr, &data, sizeof(data), 
-				      1) != sizeof(data))
-			break;
-		ret = 0;
+		ret = generic_ptrace_pokedata(child, addr, data);
 		break;
 
 	case PTRACE_POKEUSR: /* write the word at location addr in the USER area */
diff --git a/arch/um/kernel/trap.c b/arch/um/kernel/trap.c
index abab90c..3850d53 100644
--- a/arch/um/kernel/trap.c
+++ b/arch/um/kernel/trap.c
@@ -76,23 +76,24 @@
 		goto out;
 
 	do {
+		int fault;
 survive:
-		switch (handle_mm_fault(mm, vma, address, is_write)){
-		case VM_FAULT_MINOR:
-			current->min_flt++;
-			break;
-		case VM_FAULT_MAJOR:
-			current->maj_flt++;
-			break;
-		case VM_FAULT_SIGBUS:
-			err = -EACCES;
-			goto out;
-		case VM_FAULT_OOM:
-			err = -ENOMEM;
-			goto out_of_memory;
-		default:
+		fault = handle_mm_fault(mm, vma, address, is_write);
+		if (unlikely(fault & VM_FAULT_ERROR)) {
+			if (fault & VM_FAULT_OOM) {
+				err = -ENOMEM;
+				goto out_of_memory;
+			} else if (fault & VM_FAULT_SIGBUS) {
+				err = -EACCES;
+				goto out;
+			}
 			BUG();
 		}
+		if (fault & VM_FAULT_MAJOR)
+			current->maj_flt++;
+		else
+			current->min_flt++;
+
 		pgd = pgd_offset(mm, address);
 		pud = pud_offset(pgd, address);
 		pmd = pmd_offset(pud, address);
diff --git a/arch/v850/kernel/ptrace.c b/arch/v850/kernel/ptrace.c
index a9b0934..a458ac9 100644
--- a/arch/v850/kernel/ptrace.c
+++ b/arch/v850/kernel/ptrace.c
@@ -117,24 +117,16 @@
 	int rval;
 
 	switch (request) {
-		unsigned long val, copied;
+		unsigned long val;
 
 	case PTRACE_PEEKTEXT: /* read word at location addr. */
 	case PTRACE_PEEKDATA:
-		copied = access_process_vm(child, addr, &val, sizeof(val), 0);
-		rval = -EIO;
-		if (copied != sizeof(val))
-			break;
-		rval = put_user(val, (unsigned long *)data);
+		rval = generic_ptrace_peekdata(child, addr, data);
 		goto out;
 
 	case PTRACE_POKETEXT: /* write the word at location addr. */
 	case PTRACE_POKEDATA:
-		rval = 0;
-		if (access_process_vm(child, addr, &data, sizeof(data), 1)
-		    == sizeof(data))
-			break;
-		rval = -EIO;
+		rval = generic_ptrace_pokedata(child, addr, data);
 		goto out;
 
 	/* Read/write the word at location ADDR in the registers.  */
diff --git a/arch/x86_64/Kconfig b/arch/x86_64/Kconfig
index 8bdd25a..14bf8ce 100644
--- a/arch/x86_64/Kconfig
+++ b/arch/x86_64/Kconfig
@@ -774,8 +774,8 @@
 source "arch/x86_64/oprofile/Kconfig"
 
 config KPROBES
-	bool "Kprobes (EXPERIMENTAL)"
-	depends on KALLSYMS && EXPERIMENTAL && MODULES
+	bool "Kprobes"
+	depends on KALLSYMS && MODULES
 	help
 	  Kprobes allows you to trap at almost any kernel address and
 	  execute a callback function.  register_kprobe() establishes
diff --git a/arch/x86_64/ia32/ia32_aout.c b/arch/x86_64/ia32/ia32_aout.c
index fe83edb..0878137 100644
--- a/arch/x86_64/ia32/ia32_aout.c
+++ b/arch/x86_64/ia32/ia32_aout.c
@@ -404,7 +404,7 @@
 
 	set_brk(current->mm->start_brk, current->mm->brk);
 
-	retval = ia32_setup_arg_pages(bprm, IA32_STACK_TOP, EXSTACK_DEFAULT);
+	retval = setup_arg_pages(bprm, IA32_STACK_TOP, EXSTACK_DEFAULT);
 	if (retval < 0) { 
 		/* Someone check-me: is this error path enough? */ 
 		send_sig(SIGKILL, current, 0); 
diff --git a/arch/x86_64/ia32/ia32_binfmt.c b/arch/x86_64/ia32/ia32_binfmt.c
index 185399b..ed56a88 100644
--- a/arch/x86_64/ia32/ia32_binfmt.c
+++ b/arch/x86_64/ia32/ia32_binfmt.c
@@ -232,9 +232,6 @@
 #define load_elf_binary load_elf32_binary
 
 #define ELF_PLAT_INIT(r, load_addr)	elf32_init(r)
-#define setup_arg_pages(bprm, stack_top, exec_stack) \
-	ia32_setup_arg_pages(bprm, stack_top, exec_stack)
-int ia32_setup_arg_pages(struct linux_binprm *bprm, unsigned long stack_top, int executable_stack);
 
 #undef start_thread
 #define start_thread(regs,new_rip,new_rsp) do { \
@@ -286,61 +283,6 @@
 	me->thread.es = __USER_DS;
 }
 
-int ia32_setup_arg_pages(struct linux_binprm *bprm, unsigned long stack_top,
-			 int executable_stack)
-{
-	unsigned long stack_base;
-	struct vm_area_struct *mpnt;
-	struct mm_struct *mm = current->mm;
-	int i, ret;
-
-	stack_base = stack_top - MAX_ARG_PAGES * PAGE_SIZE;
-	mm->arg_start = bprm->p + stack_base;
-
-	bprm->p += stack_base;
-	if (bprm->loader)
-		bprm->loader += stack_base;
-	bprm->exec += stack_base;
-
-	mpnt = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
-	if (!mpnt) 
-		return -ENOMEM; 
-
-	down_write(&mm->mmap_sem);
-	{
-		mpnt->vm_mm = mm;
-		mpnt->vm_start = PAGE_MASK & (unsigned long) bprm->p;
-		mpnt->vm_end = stack_top;
-		if (executable_stack == EXSTACK_ENABLE_X)
-			mpnt->vm_flags = VM_STACK_FLAGS |  VM_EXEC;
-		else if (executable_stack == EXSTACK_DISABLE_X)
-			mpnt->vm_flags = VM_STACK_FLAGS & ~VM_EXEC;
-		else
-			mpnt->vm_flags = VM_STACK_FLAGS;
- 		mpnt->vm_page_prot = (mpnt->vm_flags & VM_EXEC) ? 
- 			PAGE_COPY_EXEC : PAGE_COPY;
-		if ((ret = insert_vm_struct(mm, mpnt))) {
-			up_write(&mm->mmap_sem);
-			kmem_cache_free(vm_area_cachep, mpnt);
-			return ret;
-		}
-		mm->stack_vm = mm->total_vm = vma_pages(mpnt);
-	} 
-
-	for (i = 0 ; i < MAX_ARG_PAGES ; i++) {
-		struct page *page = bprm->page[i];
-		if (page) {
-			bprm->page[i] = NULL;
-			install_arg_page(mpnt, page, stack_base);
-		}
-		stack_base += PAGE_SIZE;
-	}
-	up_write(&mm->mmap_sem);
-	
-	return 0;
-}
-EXPORT_SYMBOL(ia32_setup_arg_pages);
-
 #ifdef CONFIG_SYSCTL
 /* Register vsyscall32 into the ABI table */
 #include <linux/sysctl.h>
diff --git a/arch/x86_64/ia32/ia32entry.S b/arch/x86_64/ia32/ia32entry.S
index 782dea8..3f66e970 100644
--- a/arch/x86_64/ia32/ia32entry.S
+++ b/arch/x86_64/ia32/ia32entry.S
@@ -719,4 +719,5 @@
 	.quad compat_sys_signalfd
 	.quad compat_sys_timerfd
 	.quad sys_eventfd
+	.quad sys32_fallocate
 ia32_syscall_end:
diff --git a/arch/x86_64/ia32/sys_ia32.c b/arch/x86_64/ia32/sys_ia32.c
index 99a78a3..bee96d6 100644
--- a/arch/x86_64/ia32/sys_ia32.c
+++ b/arch/x86_64/ia32/sys_ia32.c
@@ -879,3 +879,11 @@
 	return sys_fadvise64_64(fd, ((u64)offset_hi << 32) | offset_lo,
 				len, advice);
 }
+
+asmlinkage long sys32_fallocate(int fd, int mode, unsigned offset_lo,
+				unsigned offset_hi, unsigned len_lo,
+				unsigned len_hi)
+{
+	return sys_fallocate(fd, mode, ((u64)offset_hi << 32) | offset_lo,
+			     ((u64)len_hi << 32) | len_lo);
+}
diff --git a/arch/x86_64/kernel/acpi/sleep.c b/arch/x86_64/kernel/acpi/sleep.c
index 195b703..4277f2b 100644
--- a/arch/x86_64/kernel/acpi/sleep.c
+++ b/arch/x86_64/kernel/acpi/sleep.c
@@ -55,7 +55,7 @@
 
 /* address in low memory of the wakeup routine. */
 unsigned long acpi_wakeup_address = 0;
-unsigned long acpi_video_flags;
+unsigned long acpi_realmode_flags;
 extern char wakeup_start, wakeup_end;
 
 extern unsigned long acpi_copy_wakeup_routine(unsigned long);
@@ -103,9 +103,11 @@
 {
 	while ((str != NULL) && (*str != '\0')) {
 		if (strncmp(str, "s3_bios", 7) == 0)
-			acpi_video_flags = 1;
+			acpi_realmode_flags |= 1;
 		if (strncmp(str, "s3_mode", 7) == 0)
-			acpi_video_flags |= 2;
+			acpi_realmode_flags |= 2;
+		if (strncmp(str, "s3_beep", 7) == 0)
+			acpi_realmode_flags |= 4;
 		str = strchr(str, ',');
 		if (str != NULL)
 			str += strspn(str, ", \t");
diff --git a/arch/x86_64/kernel/acpi/wakeup.S b/arch/x86_64/kernel/acpi/wakeup.S
index 8550a6f..13f1480 100644
--- a/arch/x86_64/kernel/acpi/wakeup.S
+++ b/arch/x86_64/kernel/acpi/wakeup.S
@@ -16,6 +16,21 @@
 # cs = 0x1234, eip = 0x05
 #
 
+#define BEEP \
+	inb	$97, %al; 	\
+	outb	%al, $0x80; 	\
+	movb	$3, %al; 	\
+	outb	%al, $97; 	\
+	outb	%al, $0x80; 	\
+	movb	$-74, %al; 	\
+	outb	%al, $67; 	\
+	outb	%al, $0x80; 	\
+	movb	$-119, %al; 	\
+	outb	%al, $66; 	\
+	outb	%al, $0x80; 	\
+	movb	$15, %al; 	\
+	outb	%al, $66;
+
 
 ALIGN
 	.align	16
@@ -33,6 +48,13 @@
 	movw	%cs, %ax
 	movw	%ax, %ds		# Make ds:0 point to wakeup_start
 	movw	%ax, %ss
+
+	# Data segment must be set up before we can see whether to beep.
+	testl   $4, realmode_flags - wakeup_code
+	jz      1f
+	BEEP
+1:
+
 					# Private stack is needed for ASUS board
 	mov	$(wakeup_stack - wakeup_code), %sp
 
@@ -48,7 +70,7 @@
 	testl	%eax, %eax
 	jnz	no_longmode
 
-	testl	$1, video_flags - wakeup_code
+	testl	$1, realmode_flags - wakeup_code
 	jz	1f
 	lcall   $0xc000,$3
 	movw	%cs, %ax
@@ -56,7 +78,7 @@
 	movw	%ax, %ss
 1:
 
-	testl	$2, video_flags - wakeup_code
+	testl	$2, realmode_flags - wakeup_code
 	jz	1f
 	mov	video_mode - wakeup_code, %ax
 	call	mode_seta
@@ -230,7 +252,7 @@
 	
 real_magic:	.quad 0
 video_mode:	.quad 0
-video_flags:	.quad 0
+realmode_flags:	.quad 0
 
 .code16
 bogus_real_magic:
@@ -346,8 +368,8 @@
 
 	movl	saved_video_mode, %edx
 	movl	%edx, video_mode - wakeup_start (,%rdi)
-	movl	acpi_video_flags, %edx
-	movl	%edx, video_flags - wakeup_start (,%rdi)
+	movl	acpi_realmode_flags, %edx
+	movl	%edx, realmode_flags - wakeup_start (,%rdi)
 	movq	$0x12345678, real_magic - wakeup_start (,%rdi)
 	movq	$0x123456789abcdef0, %rdx
 	movq	%rdx, saved_magic
diff --git a/arch/x86_64/kernel/early_printk.c b/arch/x86_64/kernel/early_printk.c
index 296d2b0..fd9aff3 100644
--- a/arch/x86_64/kernel/early_printk.c
+++ b/arch/x86_64/kernel/early_printk.c
@@ -6,6 +6,7 @@
 #include <asm/io.h>
 #include <asm/processor.h>
 #include <asm/fcntl.h>
+#include <xen/hvc-console.h>
 
 /* Simple VGA output */
 
@@ -242,6 +243,10 @@
  		simnow_init(buf + 6);
  		early_console = &simnow_console;
  		keep_early = 1;
+#ifdef CONFIG_HVC_XEN
+	} else if (!strncmp(buf, "xen", 3)) {
+		early_console = &xenboot_console;
+#endif
 	}
 
 	if (keep_early)
diff --git a/arch/x86_64/kernel/init_task.c b/arch/x86_64/kernel/init_task.c
index 3dc5854..4ff33d4 100644
--- a/arch/x86_64/kernel/init_task.c
+++ b/arch/x86_64/kernel/init_task.c
@@ -44,7 +44,7 @@
  * section. Since TSS's are completely CPU-local, we want them
  * on exact cacheline boundaries, to eliminate cacheline ping-pong.
  */ 
-DEFINE_PER_CPU(struct tss_struct, init_tss) ____cacheline_internodealigned_in_smp = INIT_TSS;
+DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
 
 /* Copies of the original ist values from the tss are only accessed during
  * debugging, no special alignment required.
diff --git a/arch/x86_64/kernel/mce.c b/arch/x86_64/kernel/mce.c
index aa1d159..f3fb817 100644
--- a/arch/x86_64/kernel/mce.c
+++ b/arch/x86_64/kernel/mce.c
@@ -174,7 +174,7 @@
 	if (events != atomic_read(&mce_logged) && trigger[0]) {
 		/* Small race window, but should be harmless.  */
 		atomic_set(&mce_logged, events);
-		call_usermodehelper(trigger, trigger_argv, NULL, -1);
+		call_usermodehelper(trigger, trigger_argv, NULL, UMH_NO_WAIT);
 	}
 }
 
diff --git a/arch/x86_64/kernel/nmi.c b/arch/x86_64/kernel/nmi.c
index 931c64b..edbbc59 100644
--- a/arch/x86_64/kernel/nmi.c
+++ b/arch/x86_64/kernel/nmi.c
@@ -296,7 +296,7 @@
 static DEFINE_PER_CPU(local_t, alert_counter);
 static DEFINE_PER_CPU(int, nmi_touch);
 
-void touch_nmi_watchdog (void)
+void touch_nmi_watchdog(void)
 {
 	if (nmi_watchdog > 0) {
 		unsigned cpu;
@@ -306,8 +306,10 @@
 		 * do it ourselves because the alert count increase is not
 		 * atomic.
 		 */
-		for_each_present_cpu (cpu)
-			per_cpu(nmi_touch, cpu) = 1;
+		for_each_present_cpu(cpu) {
+			if (per_cpu(nmi_touch, cpu) != 1)
+				per_cpu(nmi_touch, cpu) = 1;
+		}
 	}
 
  	touch_softlockup_watchdog();
diff --git a/arch/x86_64/kernel/ptrace.c b/arch/x86_64/kernel/ptrace.c
index 9409117..e83cc67 100644
--- a/arch/x86_64/kernel/ptrace.c
+++ b/arch/x86_64/kernel/ptrace.c
@@ -102,16 +102,25 @@
 		u32 *desc;
 		unsigned long base;
 
-		down(&child->mm->context.sem);
-		desc = child->mm->context.ldt + (seg & ~7);
-		base = (desc[0] >> 16) | ((desc[1] & 0xff) << 16) | (desc[1] & 0xff000000);
+		seg &= ~7UL;
 
-		/* 16-bit code segment? */
-		if (!((desc[1] >> 22) & 1))
-			addr &= 0xffff;
-		addr += base;
+		down(&child->mm->context.sem);
+		if (unlikely((seg >> 3) >= child->mm->context.size))
+			addr = -1L; /* bogus selector, access would fault */
+		else {
+			desc = child->mm->context.ldt + seg;
+			base = ((desc[0] >> 16) |
+				((desc[1] & 0xff) << 16) |
+				(desc[1] & 0xff000000));
+
+			/* 16-bit code segment? */
+			if (!((desc[1] >> 22) & 1))
+				addr &= 0xffff;
+			addr += base;
+		}
 		up(&child->mm->context.sem);
 	}
+
 	return addr;
 }
 
@@ -313,17 +322,9 @@
 	switch (request) {
 	/* when I and D space are separate, these will need to be fixed. */
 	case PTRACE_PEEKTEXT: /* read word at location addr. */ 
-	case PTRACE_PEEKDATA: {
-		unsigned long tmp;
-		int copied;
-
-		copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0);
-		ret = -EIO;
-		if (copied != sizeof(tmp))
-			break;
-		ret = put_user(tmp,(unsigned long __user *) data);
+	case PTRACE_PEEKDATA:
+		ret = generic_ptrace_peekdata(child, addr, data);
 		break;
-	}
 
 	/* read the word at location addr in the USER area. */
 	case PTRACE_PEEKUSR: {
@@ -367,10 +368,7 @@
 	/* when I and D space are separate, this will have to be fixed. */
 	case PTRACE_POKETEXT: /* write the word at location addr. */
 	case PTRACE_POKEDATA:
-		ret = 0;
-		if (access_process_vm(child, addr, &data, sizeof(data), 1) == sizeof(data))
-			break;
-		ret = -EIO;
+		ret = generic_ptrace_pokedata(child, addr, data);
 		break;
 
 	case PTRACE_POKEUSR: /* write the word at location addr in the USER area */
diff --git a/arch/x86_64/kernel/smp.c b/arch/x86_64/kernel/smp.c
index 2ff4685..0694940 100644
--- a/arch/x86_64/kernel/smp.c
+++ b/arch/x86_64/kernel/smp.c
@@ -357,7 +357,7 @@
 }
 
 /*
- * smp_call_function_single - Run a function on another CPU
+ * smp_call_function_single - Run a function on a specific CPU
  * @func: The function to run. This must be fast and non-blocking.
  * @info: An arbitrary pointer to pass to the function.
  * @nonatomic: Currently unused.
@@ -374,14 +374,18 @@
 {
 	/* prevent preemption and reschedule on another processor */
 	int me = get_cpu();
-	if (cpu == me) {
-		put_cpu();
-		return 0;
-	}
 
 	/* Can deadlock when called with interrupts disabled */
 	WARN_ON(irqs_disabled());
 
+	if (cpu == me) {
+		local_irq_disable();
+		func(info);
+		local_irq_enable();
+		put_cpu();
+		return 0;
+	}
+
 	spin_lock_bh(&call_lock);
 	__smp_call_function_single(cpu, func, info, nonatomic, wait);
 	spin_unlock_bh(&call_lock);
diff --git a/arch/x86_64/kernel/traps.c b/arch/x86_64/kernel/traps.c
index 7fa155c..8713ad4 100644
--- a/arch/x86_64/kernel/traps.c
+++ b/arch/x86_64/kernel/traps.c
@@ -34,6 +34,10 @@
 #include <linux/bug.h>
 #include <linux/kdebug.h>
 
+#if defined(CONFIG_EDAC)
+#include <linux/edac.h>
+#endif
+
 #include <asm/system.h>
 #include <asm/io.h>
 #include <asm/atomic.h>
@@ -330,6 +334,7 @@
 
 static void print_trace_address(void *data, unsigned long addr)
 {
+	touch_nmi_watchdog();
 	printk_address(addr);
 }
 
@@ -518,6 +523,7 @@
 	printk("\n");
 	notify_die(DIE_OOPS, str, regs, err, current->thread.trap_no, SIGSEGV);
 	show_registers(regs);
+	add_taint(TAINT_DIE);
 	/* Executive summary in case the oops scrolled away */
 	printk(KERN_ALERT "RIP ");
 	printk_address(regs->rip); 
@@ -717,6 +723,13 @@
 		reason);
 	printk(KERN_EMERG "You have some hardware problem, likely on the PCI bus.\n");
 
+#if defined(CONFIG_EDAC)
+	if(edac_handler_set()) {
+		edac_atomic_assert_error();
+		return;
+	}
+#endif
+
 	if (panic_on_unrecovered_nmi)
 		panic("NMI: Not continuing");
 
diff --git a/arch/x86_64/kernel/tsc.c b/arch/x86_64/kernel/tsc.c
index 48f9a8e..e850aa0 100644
--- a/arch/x86_64/kernel/tsc.c
+++ b/arch/x86_64/kernel/tsc.c
@@ -44,7 +44,7 @@
 
 static int tsc_unstable;
 
-static inline int check_tsc_unstable(void)
+inline int check_tsc_unstable(void)
 {
 	return tsc_unstable;
 }
diff --git a/arch/x86_64/kernel/vmlinux.lds.S b/arch/x86_64/kernel/vmlinux.lds.S
index dbccfda..5c57ea4 100644
--- a/arch/x86_64/kernel/vmlinux.lds.S
+++ b/arch/x86_64/kernel/vmlinux.lds.S
@@ -48,7 +48,9 @@
   __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) { *(__ex_table) }
   __stop___ex_table = .;
 
-  BUG_TABLE
+  NOTES :text :note
+
+  BUG_TABLE :text
 
   RODATA
 
@@ -194,10 +196,8 @@
   __initramfs_end = .;
 #endif
 
-  . = ALIGN(4096);
-  __per_cpu_start = .;
-  .data.percpu  : AT(ADDR(.data.percpu) - LOAD_OFFSET) { *(.data.percpu) }
-  __per_cpu_end = .;
+  PERCPU(4096)
+
   . = ALIGN(4096);
   __init_end = .;
 
diff --git a/arch/x86_64/mm/fault.c b/arch/x86_64/mm/fault.c
index 635e58d..84f1172 100644
--- a/arch/x86_64/mm/fault.c
+++ b/arch/x86_64/mm/fault.c
@@ -317,7 +317,7 @@
 	struct vm_area_struct * vma;
 	unsigned long address;
 	const struct exception_table_entry *fixup;
-	int write;
+	int write, fault;
 	unsigned long flags;
 	siginfo_t info;
 
@@ -450,19 +450,18 @@
 	 * make sure we exit gracefully rather than endlessly redo
 	 * the fault.
 	 */
-	switch (handle_mm_fault(mm, vma, address, write)) {
-	case VM_FAULT_MINOR:
-		tsk->min_flt++;
-		break;
-	case VM_FAULT_MAJOR:
-		tsk->maj_flt++;
-		break;
-	case VM_FAULT_SIGBUS:
-		goto do_sigbus;
-	default:
-		goto out_of_memory;
+	fault = handle_mm_fault(mm, vma, address, write);
+	if (unlikely(fault & VM_FAULT_ERROR)) {
+		if (fault & VM_FAULT_OOM)
+			goto out_of_memory;
+		else if (fault & VM_FAULT_SIGBUS)
+			goto do_sigbus;
+		BUG();
 	}
-
+	if (fault & VM_FAULT_MAJOR)
+		tsk->maj_flt++;
+	else
+		tsk->min_flt++;
 	up_read(&mm->mmap_sem);
 	return;
 
diff --git a/arch/xtensa/kernel/ptrace.c b/arch/xtensa/kernel/ptrace.c
index 14104ff..06a13d9 100644
--- a/arch/xtensa/kernel/ptrace.c
+++ b/arch/xtensa/kernel/ptrace.c
@@ -50,18 +50,8 @@
 	switch (request) {
 	case PTRACE_PEEKTEXT: /* read word at location addr. */
 	case PTRACE_PEEKDATA:
-	{
-		unsigned long tmp;
-		int copied;
-
-		copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0);
-		ret = -EIO;
-		if (copied != sizeof(tmp))
-			break;
-		ret = put_user(tmp,(unsigned long *) data);
-
+		ret = generic_ptrace_peekdata(child, addr, data);
 		goto out;
-	}
 
 	/* Read the word at location addr in the USER area.  */
 
@@ -138,10 +128,7 @@
 
 	case PTRACE_POKETEXT: /* write the word at location addr. */
 	case PTRACE_POKEDATA:
-		if (access_process_vm(child, addr, &data, sizeof(data), 1)
-		    == sizeof(data))
-			break;
-		ret = -EIO;
+		ret = generic_ptrace_pokedata(child, addr, data);
 		goto out;
 
 	case PTRACE_POKEUSR:
diff --git a/arch/xtensa/kernel/traps.c b/arch/xtensa/kernel/traps.c
index 693ab26..c5e62f9 100644
--- a/arch/xtensa/kernel/traps.c
+++ b/arch/xtensa/kernel/traps.c
@@ -482,6 +482,7 @@
 	if (!user_mode(regs))
 		show_stack(NULL, (unsigned long*)regs->areg[1]);
 
+	add_taint(TAINT_DIE);
 	spin_unlock_irq(&die_lock);
 
 	if (in_interrupt())
diff --git a/arch/xtensa/kernel/vmlinux.lds.S b/arch/xtensa/kernel/vmlinux.lds.S
index bb3f1f3..ac4ed52 100644
--- a/arch/xtensa/kernel/vmlinux.lds.S
+++ b/arch/xtensa/kernel/vmlinux.lds.S
@@ -191,10 +191,7 @@
   __initramfs_end = .;
 #endif
 
-  . = ALIGN(4096);
-  __per_cpu_start = .;
-  .data.percpu  : { *(.data.percpu) }
-  __per_cpu_end = .;
+  PERCPU(4096)
 
 
   /* We need this dummy segment here */
diff --git a/arch/xtensa/mm/fault.c b/arch/xtensa/mm/fault.c
index 3dc6f2f..1600406 100644
--- a/arch/xtensa/mm/fault.c
+++ b/arch/xtensa/mm/fault.c
@@ -41,6 +41,7 @@
 	siginfo_t info;
 
 	int is_write, is_exec;
+	int fault;
 
 	info.si_code = SEGV_MAPERR;
 
@@ -102,20 +103,18 @@
 	 * the fault.
 	 */
 survive:
-	switch (handle_mm_fault(mm, vma, address, is_write)) {
-	case VM_FAULT_MINOR:
-		current->min_flt++;
-		break;
-	case VM_FAULT_MAJOR:
-		current->maj_flt++;
-		break;
-	case VM_FAULT_SIGBUS:
-		goto do_sigbus;
-	case VM_FAULT_OOM:
-		goto out_of_memory;
-	default:
+	fault = handle_mm_fault(mm, vma, address, is_write);
+	if (unlikely(fault & VM_FAULT_ERROR)) {
+		if (fault & VM_FAULT_OOM)
+			goto out_of_memory;
+		else if (fault & VM_FAULT_SIGBUS)
+			goto do_sigbus;
 		BUG();
 	}
+	if (fault & VM_FAULT_MAJOR)
+		current->maj_flt++;
+	else
+		current->min_flt++;
 
 	up_read(&mm->mmap_sem);
 	return;
diff --git a/block/Kconfig b/block/Kconfig
index 1d16b08..0768741 100644
--- a/block/Kconfig
+++ b/block/Kconfig
@@ -52,11 +52,16 @@
 endif # BLOCK
 
 config BLK_DEV_BSG
-	bool "Block layer SG support"
-	depends on SCSI && EXPERIMENTAL
-	default y
+	bool "Block layer SG support v4 (EXPERIMENTAL)"
+	depends on (SCSI=y) && EXPERIMENTAL
 	---help---
-	Saying Y here will enable generic SG (SCSI generic) v4
-	support for any block device.
+	Saying Y here will enable generic SG (SCSI generic) v4 support
+	for any block device.
+
+	Unlike SG v3 (aka block/scsi_ioctl.c drivers/scsi/sg.c), SG v4
+	can handle complicated SCSI commands: tagged variable length cdbs
+	with bidirectional data transfers and generic request/response
+	protocols (e.g. Task Management Functions and SMP in Serial
+	Attached SCSI).
 
 source block/Kconfig.iosched
diff --git a/block/as-iosched.c b/block/as-iosched.c
index 109e91b..3e316dd 100644
--- a/block/as-iosched.c
+++ b/block/as-iosched.c
@@ -1322,10 +1322,9 @@
 {
 	struct as_data *ad;
 
-	ad = kmalloc_node(sizeof(*ad), GFP_KERNEL, q->node);
+	ad = kmalloc_node(sizeof(*ad), GFP_KERNEL | __GFP_ZERO, q->node);
 	if (!ad)
 		return NULL;
-	memset(ad, 0, sizeof(*ad));
 
 	ad->q = q; /* Identify what queue the data belongs to */
 
diff --git a/block/bsg.c b/block/bsg.c
index 576933f..baa04e7 100644
--- a/block/bsg.c
+++ b/block/bsg.c
@@ -33,7 +33,8 @@
 #include <scsi/scsi_driver.h>
 #include <scsi/sg.h>
 
-static char bsg_version[] = "block layer sg (bsg) 0.4";
+#define BSG_DESCRIPTION	"Block layer SCSI generic (bsg) driver"
+#define BSG_VERSION	"0.4"
 
 struct bsg_device {
 	request_queue_t *queue;
@@ -68,22 +69,15 @@
 #define dprintk(fmt, args...)
 #endif
 
-#define list_entry_bc(entry)	list_entry((entry), struct bsg_command, list)
-
-/*
- * just for testing
- */
-#define BSG_MAJOR	(240)
-
 static DEFINE_MUTEX(bsg_mutex);
 static int bsg_device_nr, bsg_minor_idx;
 
-#define BSG_LIST_SIZE	(8)
-#define bsg_list_idx(minor)	((minor) & (BSG_LIST_SIZE - 1))
-static struct hlist_head bsg_device_list[BSG_LIST_SIZE];
+#define BSG_LIST_ARRAY_SIZE	8
+static struct hlist_head bsg_device_list[BSG_LIST_ARRAY_SIZE];
 
 static struct class *bsg_class;
 static LIST_HEAD(bsg_class_list);
+static int bsg_major;
 
 static struct kmem_cache *bsg_cmd_cachep;
 
@@ -128,7 +122,7 @@
 	bd->queued_cmds++;
 	spin_unlock_irq(&bd->lock);
 
-	bc = kmem_cache_alloc(bsg_cmd_cachep, GFP_USER);
+	bc = kmem_cache_zalloc(bsg_cmd_cachep, GFP_KERNEL);
 	if (unlikely(!bc)) {
 		spin_lock_irq(&bd->lock);
 		bd->queued_cmds--;
@@ -136,7 +130,6 @@
 		goto out;
 	}
 
-	memset(bc, 0, sizeof(*bc));
 	bc->bd = bd;
 	INIT_LIST_HEAD(&bc->list);
 	dprintk("%s: returning free cmd %p\n", bd->name, bc);
@@ -146,22 +139,12 @@
 	return bc;
 }
 
-static inline void
-bsg_del_done_cmd(struct bsg_device *bd, struct bsg_command *bc)
+static inline struct hlist_head *bsg_dev_idx_hash(int index)
 {
-	bd->done_cmds--;
-	list_del(&bc->list);
+	return &bsg_device_list[index & (BSG_LIST_ARRAY_SIZE - 1)];
 }
 
-static inline void
-bsg_add_done_cmd(struct bsg_device *bd, struct bsg_command *bc)
-{
-	bd->done_cmds++;
-	list_add_tail(&bc->list, &bd->done_list);
-	wake_up(&bd->wq_done);
-}
-
-static inline int bsg_io_schedule(struct bsg_device *bd, int state)
+static int bsg_io_schedule(struct bsg_device *bd)
 {
 	DEFINE_WAIT(wait);
 	int ret = 0;
@@ -186,14 +169,11 @@
 		goto unlock;
 	}
 
-	prepare_to_wait(&bd->wq_done, &wait, state);
+	prepare_to_wait(&bd->wq_done, &wait, TASK_UNINTERRUPTIBLE);
 	spin_unlock_irq(&bd->lock);
 	io_schedule();
 	finish_wait(&bd->wq_done, &wait);
 
-	if ((state == TASK_INTERRUPTIBLE) && signal_pending(current))
-		ret = -ERESTARTSYS;
-
 	return ret;
 unlock:
 	spin_unlock_irq(&bd->lock);
@@ -272,7 +252,7 @@
 {
 	request_queue_t *q = bd->queue;
 	struct request *rq, *next_rq = NULL;
-	int ret, rw = 0; /* shut up gcc */
+	int ret, rw;
 	unsigned int dxfer_len;
 	void *dxferp = NULL;
 
@@ -354,9 +334,11 @@
 	bc->hdr.duration = jiffies_to_msecs(jiffies - bc->hdr.duration);
 
 	spin_lock_irqsave(&bd->lock, flags);
-	list_del(&bc->list);
-	bsg_add_done_cmd(bd, bc);
+	list_move_tail(&bc->list, &bd->done_list);
+	bd->done_cmds++;
 	spin_unlock_irqrestore(&bd->lock, flags);
+
+	wake_up(&bd->wq_done);
 }
 
 /*
@@ -387,14 +369,15 @@
 	blk_execute_rq_nowait(q, NULL, rq, 1, bsg_rq_end_io);
 }
 
-static inline struct bsg_command *bsg_next_done_cmd(struct bsg_device *bd)
+static struct bsg_command *bsg_next_done_cmd(struct bsg_device *bd)
 {
 	struct bsg_command *bc = NULL;
 
 	spin_lock_irq(&bd->lock);
 	if (bd->done_cmds) {
-		bc = list_entry_bc(bd->done_list.next);
-		bsg_del_done_cmd(bd, bc);
+		bc = list_entry(bd->done_list.next, struct bsg_command, list);
+		list_del(&bc->list);
+		bd->done_cmds--;
 	}
 	spin_unlock_irq(&bd->lock);
 
@@ -450,8 +433,8 @@
 	hdr->response_len = 0;
 
 	if (rq->sense_len && hdr->response) {
-		int len = min((unsigned int) hdr->max_response_len,
-			      rq->sense_len);
+		int len = min_t(unsigned int, hdr->max_response_len,
+					rq->sense_len);
 
 		ret = copy_to_user((void*)(unsigned long)hdr->response,
 				   rq->sense, len);
@@ -486,7 +469,7 @@
 	 */
 	ret = 0;
 	do {
-		ret = bsg_io_schedule(bd, TASK_UNINTERRUPTIBLE);
+		ret = bsg_io_schedule(bd);
 		/*
 		 * look for -ENODATA specifically -- we'll sometimes get
 		 * -ERESTARTSYS when we've taken a signal, but we can't
@@ -523,7 +506,7 @@
 	return ret;
 }
 
-static ssize_t
+static int
 __bsg_read(char __user *buf, size_t count, struct bsg_device *bd,
 	   const struct iovec *iov, ssize_t *bytes_read)
 {
@@ -550,7 +533,7 @@
 		ret = blk_complete_sgv4_hdr_rq(bc->rq, &bc->hdr, bc->bio,
 					       bc->bidi_bio);
 
-		if (copy_to_user(buf, (char *) &bc->hdr, sizeof(bc->hdr)))
+		if (copy_to_user(buf, &bc->hdr, sizeof(bc->hdr)))
 			ret = -EFAULT;
 
 		bsg_free_command(bc);
@@ -582,6 +565,9 @@
 		clear_bit(BSG_F_WRITE_PERM, &bd->flags);
 }
 
+/*
+ * Check if the error is a "real" error that we should return.
+ */
 static inline int err_block_err(int ret)
 {
 	if (ret && ret != -ENOSPC && ret != -ENODATA && ret != -EAGAIN)
@@ -610,8 +596,8 @@
 	return bytes_read;
 }
 
-static ssize_t __bsg_write(struct bsg_device *bd, const char __user *buf,
-			   size_t count, ssize_t *bytes_read)
+static int __bsg_write(struct bsg_device *bd, const char __user *buf,
+		       size_t count, ssize_t *bytes_written)
 {
 	struct bsg_command *bc;
 	struct request *rq;
@@ -655,7 +641,7 @@
 		rq = NULL;
 		nr_commands--;
 		buf += sizeof(struct sg_io_v4);
-		*bytes_read += sizeof(struct sg_io_v4);
+		*bytes_written += sizeof(struct sg_io_v4);
 	}
 
 	if (bc)
@@ -668,7 +654,7 @@
 bsg_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
 {
 	struct bsg_device *bd = file->private_data;
-	ssize_t bytes_read;
+	ssize_t bytes_written;
 	int ret;
 
 	dprintk("%s: write %Zd bytes\n", bd->name, count);
@@ -676,18 +662,18 @@
 	bsg_set_block(bd, file);
 	bsg_set_write_perm(bd, file);
 
-	bytes_read = 0;
-	ret = __bsg_write(bd, buf, count, &bytes_read);
-	*ppos = bytes_read;
+	bytes_written = 0;
+	ret = __bsg_write(bd, buf, count, &bytes_written);
+	*ppos = bytes_written;
 
 	/*
 	 * return bytes written on non-fatal errors
 	 */
-	if (!bytes_read || (bytes_read && err_block_err(ret)))
-		bytes_read = ret;
+	if (!bytes_written || (bytes_written && err_block_err(ret)))
+		bytes_written = ret;
 
-	dprintk("%s: returning %Zd\n", bd->name, bytes_read);
-	return bytes_read;
+	dprintk("%s: returning %Zd\n", bd->name, bytes_written);
+	return bytes_written;
 }
 
 static struct bsg_device *bsg_alloc_device(void)
@@ -746,7 +732,7 @@
 					 struct request_queue *rq,
 					 struct file *file)
 {
-	struct bsg_device *bd = NULL;
+	struct bsg_device *bd;
 #ifdef BSG_DEBUG
 	unsigned char buf[32];
 #endif
@@ -762,7 +748,7 @@
 	atomic_set(&bd->ref_count, 1);
 	bd->minor = iminor(inode);
 	mutex_lock(&bsg_mutex);
-	hlist_add_head(&bd->dev_list, &bsg_device_list[bsg_list_idx(bd->minor)]);
+	hlist_add_head(&bd->dev_list, bsg_dev_idx_hash(bd->minor));
 
 	strncpy(bd->name, rq->bsg_dev.class_dev->class_id, sizeof(bd->name) - 1);
 	dprintk("bound to <%s>, max queue %d\n",
@@ -774,13 +760,12 @@
 
 static struct bsg_device *__bsg_get_device(int minor)
 {
-	struct hlist_head *list = &bsg_device_list[bsg_list_idx(minor)];
 	struct bsg_device *bd = NULL;
 	struct hlist_node *entry;
 
 	mutex_lock(&bsg_mutex);
 
-	hlist_for_each(entry, list) {
+	hlist_for_each(entry, bsg_dev_idx_hash(minor)) {
 		bd = hlist_entry(entry, struct bsg_device, dev_list);
 		if (bd->minor == minor) {
 			atomic_inc(&bd->ref_count);
@@ -858,16 +843,11 @@
 	return mask;
 }
 
-static int
-bsg_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
-	  unsigned long arg)
+static long bsg_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
 {
 	struct bsg_device *bd = file->private_data;
 	int __user *uarg = (int __user *) arg;
 
-	if (!bd)
-		return -ENXIO;
-
 	switch (cmd) {
 		/*
 		 * our own ioctls
@@ -944,7 +924,7 @@
 	.poll		=	bsg_poll,
 	.open		=	bsg_open,
 	.release	=	bsg_release,
-	.ioctl		=	bsg_ioctl,
+	.unlocked_ioctl	=	bsg_ioctl,
 	.owner		=	THIS_MODULE,
 };
 
@@ -952,12 +932,11 @@
 {
 	struct bsg_class_device *bcd = &q->bsg_dev;
 
-	if (!bcd->class_dev)
-		return;
+	WARN_ON(!bcd->class_dev);
 
 	mutex_lock(&bsg_mutex);
 	sysfs_remove_link(&q->kobj, "bsg");
-	class_device_destroy(bsg_class, MKDEV(BSG_MAJOR, bcd->minor));
+	class_device_destroy(bsg_class, MKDEV(bsg_major, bcd->minor));
 	bcd->class_dev = NULL;
 	list_del_init(&bcd->list);
 	bsg_device_nr--;
@@ -1003,7 +982,7 @@
 		bsg_minor_idx = 0;
 
 	bcd->queue = q;
-	dev = MKDEV(BSG_MAJOR, bcd->minor);
+	dev = MKDEV(bsg_major, bcd->minor);
 	class_dev = class_device_create(bsg_class, NULL, dev, bcd->dev, "%s", name);
 	if (IS_ERR(class_dev)) {
 		ret = PTR_ERR(class_dev);
@@ -1024,7 +1003,7 @@
 	return 0;
 err:
 	if (class_dev)
-		class_device_destroy(bsg_class, MKDEV(BSG_MAJOR, bcd->minor));
+		class_device_destroy(bsg_class, MKDEV(bsg_major, bcd->minor));
 	mutex_unlock(&bsg_mutex);
 	return ret;
 }
@@ -1061,6 +1040,7 @@
 static int __init bsg_init(void)
 {
 	int ret, i;
+	dev_t devid;
 
 	bsg_cmd_cachep = kmem_cache_create("bsg_cmd",
 				sizeof(struct bsg_command), 0, 0, NULL, NULL);
@@ -1069,46 +1049,47 @@
 		return -ENOMEM;
 	}
 
-	for (i = 0; i < BSG_LIST_SIZE; i++)
+	for (i = 0; i < BSG_LIST_ARRAY_SIZE; i++)
 		INIT_HLIST_HEAD(&bsg_device_list[i]);
 
 	bsg_class = class_create(THIS_MODULE, "bsg");
 	if (IS_ERR(bsg_class)) {
-		kmem_cache_destroy(bsg_cmd_cachep);
-		return PTR_ERR(bsg_class);
+		ret = PTR_ERR(bsg_class);
+		goto destroy_kmemcache;
 	}
 
-	ret = register_chrdev_region(MKDEV(BSG_MAJOR, 0), BSG_MAX_DEVS, "bsg");
-	if (ret) {
-		kmem_cache_destroy(bsg_cmd_cachep);
-		class_destroy(bsg_class);
-		return ret;
-	}
+	ret = alloc_chrdev_region(&devid, 0, BSG_MAX_DEVS, "bsg");
+	if (ret)
+		goto destroy_bsg_class;
+
+	bsg_major = MAJOR(devid);
 
 	cdev_init(&bsg_cdev, &bsg_fops);
-	ret = cdev_add(&bsg_cdev, MKDEV(BSG_MAJOR, 0), BSG_MAX_DEVS);
-	if (ret) {
-		kmem_cache_destroy(bsg_cmd_cachep);
-		class_destroy(bsg_class);
-		unregister_chrdev_region(MKDEV(BSG_MAJOR, 0), BSG_MAX_DEVS);
-		return ret;
-	}
+	ret = cdev_add(&bsg_cdev, MKDEV(bsg_major, 0), BSG_MAX_DEVS);
+	if (ret)
+		goto unregister_chrdev;
 
 	ret = scsi_register_interface(&bsg_intf);
-	if (ret) {
-		printk(KERN_ERR "bsg: failed register scsi interface %d\n", ret);
-		kmem_cache_destroy(bsg_cmd_cachep);
-		class_destroy(bsg_class);
-		unregister_chrdev(BSG_MAJOR, "bsg");
-		return ret;
-	}
+	if (ret)
+		goto remove_cdev;
 
-	printk(KERN_INFO "%s loaded\n", bsg_version);
+	printk(KERN_INFO BSG_DESCRIPTION " version " BSG_VERSION
+	       " loaded (major %d)\n", bsg_major);
 	return 0;
+remove_cdev:
+	printk(KERN_ERR "bsg: failed register scsi interface %d\n", ret);
+	cdev_del(&bsg_cdev);
+unregister_chrdev:
+	unregister_chrdev_region(MKDEV(bsg_major, 0), BSG_MAX_DEVS);
+destroy_bsg_class:
+	class_destroy(bsg_class);
+destroy_kmemcache:
+	kmem_cache_destroy(bsg_cmd_cachep);
+	return ret;
 }
 
 MODULE_AUTHOR("Jens Axboe");
-MODULE_DESCRIPTION("Block layer SGSI generic (sg) driver");
+MODULE_DESCRIPTION(BSG_DESCRIPTION);
 MODULE_LICENSE("GPL");
 
 device_initcall(bsg_init);
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index e0aa4da..9755a3c 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -1251,9 +1251,9 @@
 {
 	struct cfq_io_context *cic;
 
-	cic = kmem_cache_alloc_node(cfq_ioc_pool, gfp_mask, cfqd->queue->node);
+	cic = kmem_cache_alloc_node(cfq_ioc_pool, gfp_mask | __GFP_ZERO,
+							cfqd->queue->node);
 	if (cic) {
-		memset(cic, 0, sizeof(*cic));
 		cic->last_end_request = jiffies;
 		INIT_LIST_HEAD(&cic->queue_list);
 		cic->dtor = cfq_free_io_context;
@@ -1376,17 +1376,19 @@
 			 * free memory.
 			 */
 			spin_unlock_irq(cfqd->queue->queue_lock);
-			new_cfqq = kmem_cache_alloc_node(cfq_pool, gfp_mask|__GFP_NOFAIL, cfqd->queue->node);
+			new_cfqq = kmem_cache_alloc_node(cfq_pool,
+					gfp_mask | __GFP_NOFAIL | __GFP_ZERO,
+					cfqd->queue->node);
 			spin_lock_irq(cfqd->queue->queue_lock);
 			goto retry;
 		} else {
-			cfqq = kmem_cache_alloc_node(cfq_pool, gfp_mask, cfqd->queue->node);
+			cfqq = kmem_cache_alloc_node(cfq_pool,
+					gfp_mask | __GFP_ZERO,
+					cfqd->queue->node);
 			if (!cfqq)
 				goto out;
 		}
 
-		memset(cfqq, 0, sizeof(*cfqq));
-
 		RB_CLEAR_NODE(&cfqq->rb_node);
 		INIT_LIST_HEAD(&cfqq->fifo);
 
@@ -2079,12 +2081,10 @@
 {
 	struct cfq_data *cfqd;
 
-	cfqd = kmalloc_node(sizeof(*cfqd), GFP_KERNEL, q->node);
+	cfqd = kmalloc_node(sizeof(*cfqd), GFP_KERNEL | __GFP_ZERO, q->node);
 	if (!cfqd)
 		return NULL;
 
-	memset(cfqd, 0, sizeof(*cfqd));
-
 	cfqd->service_tree = CFQ_RB_ROOT;
 	INIT_LIST_HEAD(&cfqd->cic_list);
 
diff --git a/block/deadline-iosched.c b/block/deadline-iosched.c
index 6d673e9..87ca02a 100644
--- a/block/deadline-iosched.c
+++ b/block/deadline-iosched.c
@@ -360,10 +360,9 @@
 {
 	struct deadline_data *dd;
 
-	dd = kmalloc_node(sizeof(*dd), GFP_KERNEL, q->node);
+	dd = kmalloc_node(sizeof(*dd), GFP_KERNEL | __GFP_ZERO, q->node);
 	if (!dd)
 		return NULL;
-	memset(dd, 0, sizeof(*dd));
 
 	INIT_LIST_HEAD(&dd->fifo_list[READ]);
 	INIT_LIST_HEAD(&dd->fifo_list[WRITE]);
diff --git a/block/elevator.c b/block/elevator.c
index 4769a25..d265963 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -177,11 +177,10 @@
 	elevator_t *eq;
 	int i;
 
-	eq = kmalloc_node(sizeof(elevator_t), GFP_KERNEL, q->node);
+	eq = kmalloc_node(sizeof(elevator_t), GFP_KERNEL | __GFP_ZERO, q->node);
 	if (unlikely(!eq))
 		goto err;
 
-	memset(eq, 0, sizeof(*eq));
 	eq->ops = &e->ops;
 	eq->elevator_type = e;
 	kobject_init(&eq->kobj);
diff --git a/block/genhd.c b/block/genhd.c
index 863a8c0..3af1e7a 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -108,28 +108,24 @@
 
 EXPORT_SYMBOL(register_blkdev);
 
-/* todo: make void - error printk here */
-int unregister_blkdev(unsigned int major, const char *name)
+void unregister_blkdev(unsigned int major, const char *name)
 {
 	struct blk_major_name **n;
 	struct blk_major_name *p = NULL;
 	int index = major_to_index(major);
-	int ret = 0;
 
 	mutex_lock(&block_subsys_lock);
 	for (n = &major_names[index]; *n; n = &(*n)->next)
 		if ((*n)->major == major)
 			break;
-	if (!*n || strcmp((*n)->name, name))
-		ret = -EINVAL;
-	else {
+	if (!*n || strcmp((*n)->name, name)) {
+		WARN_ON(1);
+	} else {
 		p = *n;
 		*n = p->next;
 	}
 	mutex_unlock(&block_subsys_lock);
 	kfree(p);
-
-	return ret;
 }
 
 EXPORT_SYMBOL(unregister_blkdev);
@@ -726,21 +722,21 @@
 {
 	struct gendisk *disk;
 
-	disk = kmalloc_node(sizeof(struct gendisk), GFP_KERNEL, node_id);
+	disk = kmalloc_node(sizeof(struct gendisk),
+				GFP_KERNEL | __GFP_ZERO, node_id);
 	if (disk) {
-		memset(disk, 0, sizeof(struct gendisk));
 		if (!init_disk_stats(disk)) {
 			kfree(disk);
 			return NULL;
 		}
 		if (minors > 1) {
 			int size = (minors - 1) * sizeof(struct hd_struct *);
-			disk->part = kmalloc_node(size, GFP_KERNEL, node_id);
+			disk->part = kmalloc_node(size,
+				GFP_KERNEL | __GFP_ZERO, node_id);
 			if (!disk->part) {
 				kfree(disk);
 				return NULL;
 			}
-			memset(disk->part, 0, size);
 		}
 		disk->minors = minors;
 		kobj_set_kset_s(disk,block_subsys);
diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c
index 11e4235..d7cadf3 100644
--- a/block/ll_rw_blk.c
+++ b/block/ll_rw_blk.c
@@ -1829,11 +1829,11 @@
 {
 	request_queue_t *q;
 
-	q = kmem_cache_alloc_node(requestq_cachep, gfp_mask, node_id);
+	q = kmem_cache_alloc_node(requestq_cachep,
+				gfp_mask | __GFP_ZERO, node_id);
 	if (!q)
 		return NULL;
 
-	memset(q, 0, sizeof(*q));
 	init_timer(&q->unplug_timer);
 
 	snprintf(q->kobj.name, KOBJ_NAME_LEN, "%s", "queue");
diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
index a26ba07..71bdf88 100644
--- a/block/scsi_ioctl.c
+++ b/block/scsi_ioctl.c
@@ -436,11 +436,10 @@
 
 	bytes = max(in_len, out_len);
 	if (bytes) {
-		buffer = kmalloc(bytes, q->bounce_gfp | GFP_USER| __GFP_NOWARN);
+		buffer = kzalloc(bytes, q->bounce_gfp | GFP_USER| __GFP_NOWARN);
 		if (!buffer)
 			return -ENOMEM;
 
-		memset(buffer, 0, bytes);
 	}
 
 	rq = blk_get_request(q, in_len ? WRITE : READ, __GFP_WAIT);
diff --git a/drivers/Kconfig b/drivers/Kconfig
index 7916f4b..707650a 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -84,4 +84,7 @@
 
 source "drivers/kvm/Kconfig"
 
+source "drivers/uio/Kconfig"
+
+source "drivers/lguest/Kconfig"
 endmenu
diff --git a/drivers/Makefile b/drivers/Makefile
index 503d825..0ea8e32 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -15,6 +15,8 @@
 obj-$(CONFIG_PNP)		+= pnp/
 obj-$(CONFIG_ARM_AMBA)		+= amba/
 
+obj-$(CONFIG_XEN)		+= xen/
+
 # char/ comes before serial/ etc so that the VT console is the boot-time
 # default.
 obj-y				+= char/
@@ -38,6 +40,7 @@
 obj-$(CONFIG_FUSION)		+= message/
 obj-$(CONFIG_FIREWIRE)		+= firewire/
 obj-$(CONFIG_IEEE1394)		+= ieee1394/
+obj-$(CONFIG_UIO)		+= uio/
 obj-y				+= cdrom/
 obj-y				+= auxdisplay/
 obj-$(CONFIG_MTD)		+= mtd/
@@ -70,6 +73,7 @@
 obj-$(CONFIG_EDAC)		+= edac/
 obj-$(CONFIG_MCA)		+= mca/
 obj-$(CONFIG_EISA)		+= eisa/
+obj-$(CONFIG_LGUEST_GUEST)	+= lguest/
 obj-$(CONFIG_CPU_FREQ)		+= cpufreq/
 obj-$(CONFIG_MMC)		+= mmc/
 obj-$(CONFIG_NEW_LEDS)		+= leds/
diff --git a/drivers/acpi/sleep/main.c b/drivers/acpi/sleep/main.c
index bc7e16e..42127c0 100644
--- a/drivers/acpi/sleep/main.c
+++ b/drivers/acpi/sleep/main.c
@@ -217,10 +217,26 @@
 	}
 }
 
+static int acpi_hibernation_pre_restore(void)
+{
+	acpi_status status;
+
+	status = acpi_hw_disable_all_gpes();
+
+	return ACPI_SUCCESS(status) ? 0 : -EFAULT;
+}
+
+static void acpi_hibernation_restore_cleanup(void)
+{
+	acpi_hw_enable_all_runtime_gpes();
+}
+
 static struct hibernation_ops acpi_hibernation_ops = {
 	.prepare = acpi_hibernation_prepare,
 	.enter = acpi_hibernation_enter,
 	.finish = acpi_hibernation_finish,
+	.pre_restore = acpi_hibernation_pre_restore,
+	.restore_cleanup = acpi_hibernation_restore_cleanup,
 };
 #endif				/* CONFIG_SOFTWARE_SUSPEND */
 
diff --git a/drivers/acpi/sleep/poweroff.c b/drivers/acpi/sleep/poweroff.c
index d9801ef..39e40d5 100644
--- a/drivers/acpi/sleep/poweroff.c
+++ b/drivers/acpi/sleep/poweroff.c
@@ -39,7 +39,13 @@
 
 #ifdef CONFIG_PM
 
-void acpi_power_off(void)
+static void acpi_power_off_prepare(void)
+{
+	/* Prepare to power off the system */
+	acpi_sleep_prepare(ACPI_STATE_S5);
+}
+
+static void acpi_power_off(void)
 {
 	/* acpi_sleep_prepare(ACPI_STATE_S5) should have already been called */
 	printk("%s called\n", __FUNCTION__);
@@ -48,30 +54,6 @@
 	acpi_enter_sleep_state(ACPI_STATE_S5);
 }
 
-static int acpi_shutdown(struct sys_device *x)
-{
-	switch (system_state) {
-	case SYSTEM_POWER_OFF:
-		/* Prepare to power off the system */
-		return acpi_sleep_prepare(ACPI_STATE_S5);
-	case SYSTEM_SUSPEND_DISK:
-		/* Prepare to suspend the system to disk */
-		return acpi_sleep_prepare(ACPI_STATE_S4);
-	default:
-		return 0;
-	}
-}
-
-static struct sysdev_class acpi_sysclass = {
-	set_kset_name("acpi"),
-	.shutdown = acpi_shutdown
-};
-
-static struct sys_device device_acpi = {
-	.id = 0,
-	.cls = &acpi_sysclass,
-};
-
 static int acpi_poweroff_init(void)
 {
 	if (!acpi_disabled) {
@@ -81,13 +63,8 @@
 		status =
 		    acpi_get_sleep_type_data(ACPI_STATE_S5, &type_a, &type_b);
 		if (ACPI_SUCCESS(status)) {
-			int error;
-			error = sysdev_class_register(&acpi_sysclass);
-			if (!error)
-				error = sysdev_register(&device_acpi);
-			if (!error)
-				pm_power_off = acpi_power_off;
-			return error;
+			pm_power_off_prepare = acpi_power_off_prepare;
+			pm_power_off = acpi_power_off;
 		}
 	}
 	return 0;
diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c
index 88a6fc7..58f1338 100644
--- a/drivers/acpi/thermal.c
+++ b/drivers/acpi/thermal.c
@@ -40,6 +40,7 @@
 #include <linux/jiffies.h>
 #include <linux/kmod.h>
 #include <linux/seq_file.h>
+#include <linux/reboot.h>
 #include <asm/uaccess.h>
 
 #include <acpi/acpi_bus.h>
@@ -59,7 +60,6 @@
 #define ACPI_THERMAL_NOTIFY_CRITICAL	0xF0
 #define ACPI_THERMAL_NOTIFY_HOT		0xF1
 #define ACPI_THERMAL_MODE_ACTIVE	0x00
-#define ACPI_THERMAL_PATH_POWEROFF	"/sbin/poweroff"
 
 #define ACPI_THERMAL_MAX_ACTIVE	10
 #define ACPI_THERMAL_MAX_LIMIT_STR_LEN 65
@@ -419,26 +419,6 @@
 	return 0;
 }
 
-static int acpi_thermal_call_usermode(char *path)
-{
-	char *argv[2] = { NULL, NULL };
-	char *envp[3] = { NULL, NULL, NULL };
-
-
-	if (!path)
-		return -EINVAL;
-
-	argv[0] = path;
-
-	/* minimal command environment */
-	envp[0] = "HOME=/";
-	envp[1] = "PATH=/sbin:/bin:/usr/sbin:/usr/bin";
-
-	call_usermodehelper(argv[0], argv, envp, 0);
-
-	return 0;
-}
-
 static int acpi_thermal_critical(struct acpi_thermal *tz)
 {
 	if (!tz || !tz->trips.critical.flags.valid)
@@ -456,7 +436,7 @@
 	acpi_bus_generate_event(tz->device, ACPI_THERMAL_NOTIFY_CRITICAL,
 				tz->trips.critical.flags.enabled);
 
-	acpi_thermal_call_usermode(ACPI_THERMAL_PATH_POWEROFF);
+	orderly_poweroff(true);
 
 	return 0;
 }
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
index 5d57643..fb8a749 100644
--- a/drivers/ata/sata_mv.c
+++ b/drivers/ata/sata_mv.c
@@ -2666,7 +2666,7 @@
 	mv_print_info(host);
 
 	pci_set_master(pdev);
-	pci_set_mwi(pdev);
+	pci_try_set_mwi(pdev);
 	return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED,
 				 IS_GEN_I(hpriv) ? &mv5_sht : &mv6_sht);
 }
diff --git a/drivers/atm/Kconfig b/drivers/atm/Kconfig
index bb4ae628..bed9f58 100644
--- a/drivers/atm/Kconfig
+++ b/drivers/atm/Kconfig
@@ -172,7 +172,7 @@
 
 config ATM_NICSTAR
 	tristate "IDT 77201 (NICStAR) (ForeRunnerLE)"
-	depends on PCI && !64BIT
+	depends on PCI && !64BIT && VIRT_TO_BUS
 	help
 	  The NICStAR chipset family is used in a large number of ATM NICs for
 	  25 and for 155 Mbps, including IDT cards and the Fore ForeRunnerLE
diff --git a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c
index 59651ab..b34b382 100644
--- a/drivers/atm/ambassador.c
+++ b/drivers/atm/ambassador.c
@@ -1040,7 +1040,7 @@
   struct atm_qos * qos;
   struct atm_trafprm * txtp;
   struct atm_trafprm * rxtp;
-  u16 tx_rate_bits;
+  u16 tx_rate_bits = -1; // hush gcc
   u16 tx_vc_bits = -1; // hush gcc
   u16 tx_frame_bits = -1; // hush gcc
   
@@ -1096,6 +1096,8 @@
 	    r = round_up;
 	  }
 	  error = make_rate (pcr, r, &tx_rate_bits, NULL);
+	  if (error)
+	    return error;
 	  tx_vc_bits = TX_UBR_CAPPED;
 	  tx_frame_bits = TX_FRAME_CAPPED;
 	}
diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c
index 77637e7..41b2204 100644
--- a/drivers/atm/eni.c
+++ b/drivers/atm/eni.c
@@ -1738,7 +1738,8 @@
 			printk(KERN_ERR KERN_ERR DEV_LABEL "(itf %d): bad "
 			    "magic - expected 0x%x, got 0x%x\n",dev->number,
 			    ENI155_MAGIC,(unsigned) readl(&eprom->magic));
-			return -EINVAL;
+			error = -EINVAL;
+			goto unmap;
 		}
 	}
 	eni_dev->phy = base+PHY_BASE;
@@ -1765,17 +1766,27 @@
 		printk(")\n");
 		printk(KERN_ERR DEV_LABEL "(itf %d): ERROR - wrong id 0x%x\n",
 		    dev->number,(unsigned) eni_in(MID_RES_ID_MCON));
-		return -EINVAL;
+		error = -EINVAL;
+		goto unmap;
 	}
 	error = eni_dev->asic ? get_esi_asic(dev) : get_esi_fpga(dev,base);
-	if (error) return error;
+	if (error)
+		goto unmap;
 	for (i = 0; i < ESI_LEN; i++)
 		printk("%s%02X",i ? "-" : "",dev->esi[i]);
 	printk(")\n");
 	printk(KERN_NOTICE DEV_LABEL "(itf %d): %s,%s\n",dev->number,
 	    eni_in(MID_RES_ID_MCON) & 0x200 ? "ASIC" : "FPGA",
 	    media_name[eni_in(MID_RES_ID_MCON) & DAUGTHER_ID]);
-	return suni_init(dev);
+
+	error = suni_init(dev);
+	if (error)
+		goto unmap;
+out:
+	return error;
+unmap:
+	iounmap(base);
+	goto out;
 }
 
 
diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c
index 38b688f..737cea4 100644
--- a/drivers/atm/firestream.c
+++ b/drivers/atm/firestream.c
@@ -1710,7 +1710,7 @@
 		/* This bit is documented as "RESERVED" */
 		if (isr & ISR_INIT_ERR) {
 			printk (KERN_ERR "Error initializing the FS... \n");
-			return 1;
+			goto unmap;
 		}
 		if (isr & ISR_INIT) {
 			fs_dprintk (FS_DEBUG_INIT, "Ha! Initialized OK!\n");
@@ -1723,7 +1723,7 @@
 
 	if (!to) {
 		printk (KERN_ERR "timeout initializing the FS... \n");
-		return 1;
+		goto unmap;
 	}
 
 	/* XXX fix for fs155 */
@@ -1803,7 +1803,7 @@
 	if (!dev->atm_vccs) {
 		printk (KERN_WARNING "Couldn't allocate memory for VCC buffers. Woops!\n");
 		/* XXX Clean up..... */
-		return 1;
+		goto unmap;
 	}
 
 	dev->tx_inuse = kzalloc (dev->nchannels / 8 /* bits/byte */ , GFP_KERNEL);
@@ -1813,7 +1813,7 @@
 	if (!dev->tx_inuse) {
 		printk (KERN_WARNING "Couldn't allocate memory for tx_inuse bits!\n");
 		/* XXX Clean up..... */
-		return 1;
+		goto unmap;
 	}
 	/* -- RAS1 : FS155 and 50 differ. Default (0) should be OK for both */
 	/* -- RAS2 : FS50 only: Default is OK. */
@@ -1840,7 +1840,7 @@
 	if (request_irq (dev->irq, fs_irq, IRQF_SHARED, "firestream", dev)) {
 		printk (KERN_WARNING "couldn't get irq %d for firestream.\n", pci_dev->irq);
 		/* XXX undo all previous stuff... */
-		return 1;
+		goto unmap;
 	}
 	fs_dprintk (FS_DEBUG_INIT, "Grabbed irq %d for dev at %p.\n", dev->irq, dev);
   
@@ -1890,6 +1890,9 @@
   
 	func_exit ();
 	return 0;
+unmap:
+	iounmap(dev->base);
+	return 1;
 }
 
 static int __devinit firestream_init_one (struct pci_dev *pci_dev,
@@ -2012,6 +2015,7 @@
 		for (i=0;i < FS_NR_RX_QUEUES;i++)
 			free_queue (dev, &dev->rx_rq[i]);
 
+		iounmap(dev->base);
 		fs_dprintk (FS_DEBUG_ALLOC, "Free fs-dev: %p\n", dev);
 		nxtdev = dev->next;
 		kfree (dev);
diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
index 8f995ce..f8b1700 100644
--- a/drivers/atm/idt77252.c
+++ b/drivers/atm/idt77252.c
@@ -65,7 +65,7 @@
 static unsigned int vpibits = 1;
 
 
-#define CONFIG_ATM_IDT77252_SEND_IDLE 1
+#define ATM_IDT77252_SEND_IDLE 1
 
 
 /*
@@ -3404,7 +3404,7 @@
 	conf =	SAR_CFG_TX_FIFO_SIZE_9 |	/* Use maximum fifo size */
 		SAR_CFG_RXSTQ_SIZE_8k |		/* Receive Status Queue is 8k */
 		SAR_CFG_IDLE_CLP |		/* Set CLP on idle cells */
-#ifndef CONFIG_ATM_IDT77252_SEND_IDLE
+#ifndef ATM_IDT77252_SEND_IDLE
 		SAR_CFG_NO_IDLE |		/* Do not send idle cells */
 #endif
 		0;
@@ -3541,7 +3541,7 @@
 	printk("%s: Linkrate on ATM line : %u bit/s, %u cell/s.\n",
 	       card->name, linkrate, card->link_pcr);
 
-#ifdef CONFIG_ATM_IDT77252_SEND_IDLE
+#ifdef ATM_IDT77252_SEND_IDLE
 	card->utopia_pcr = card->link_pcr;
 #else
 	card->utopia_pcr = (160000000 / 8 / 54);
diff --git a/drivers/atm/lanai.c b/drivers/atm/lanai.c
index 0e2c1ae..55fd1b4 100644
--- a/drivers/atm/lanai.c
+++ b/drivers/atm/lanai.c
@@ -552,8 +552,8 @@
 	writel(val, sram_addr(lanai, offset));
 }
 
-static int __init sram_test_word(
-	const struct lanai_dev *lanai, int offset, u32 pattern)
+static int __devinit sram_test_word(const struct lanai_dev *lanai,
+				    int offset, u32 pattern)
 {
 	u32 readback;
 	sram_write(lanai, pattern, offset);
diff --git a/drivers/atm/nicstarmac.c b/drivers/atm/nicstarmac.c
index 480947f..842e26c 100644
--- a/drivers/atm/nicstarmac.c
+++ b/drivers/atm/nicstarmac.c
@@ -134,7 +134,7 @@
    /* Send read instruction */
    val = NICSTAR_REG_READ( base, NICSTAR_REG_GENERAL_PURPOSE ) & 0xFFFFFFF0;
 
-   for (i=0; i<sizeof rdsrtab/sizeof rdsrtab[0]; i++)
+   for (i=0; i<ARRAY_SIZE(rdsrtab); i++)
    {
 	NICSTAR_REG_WRITE( base, NICSTAR_REG_GENERAL_PURPOSE,
 		(val | rdsrtab[i]) );
diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c
index 020a87a..58583c6 100644
--- a/drivers/atm/zatm.c
+++ b/drivers/atm/zatm.c
@@ -915,7 +915,7 @@
 	unsigned long flags;
 	u32 *loop;
 	unsigned short chan;
-	int pcr,unlimited;
+	int unlimited;
 
 	DPRINTK("open_tx_first\n");
 	zatm_dev = ZATM_DEV(vcc->dev);
@@ -936,6 +936,8 @@
 	    vcc->qos.txtp.max_pcr >= ATM_OC3_PCR);
 	if (unlimited && zatm_dev->ubr != -1) zatm_vcc->shaper = zatm_dev->ubr;
 	else {
+		int uninitialized_var(pcr);
+
 		if (unlimited) vcc->qos.txtp.max_sdu = ATM_MAX_AAL5_PDU;
 		if ((zatm_vcc->shaper = alloc_shaper(vcc->dev,&pcr,
 		    vcc->qos.txtp.min_pcr,vcc->qos.txtp.max_pcr,unlimited))
diff --git a/drivers/base/core.c b/drivers/base/core.c
index 0455aa7..3599ab2 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -24,6 +24,8 @@
 #include "base.h"
 #include "power/power.h"
 
+extern const char *kobject_actions[];
+
 int (*platform_notify)(struct device * dev) = NULL;
 int (*platform_notify_remove)(struct device * dev) = NULL;
 
@@ -303,10 +305,25 @@
 static ssize_t store_uevent(struct device *dev, struct device_attribute *attr,
 			    const char *buf, size_t count)
 {
-	if (memcmp(buf, "add", 3) != 0)
-		dev_err(dev, "uevent: unsupported action-string; this will "
-			"be ignored in a future kernel version");
+	size_t len = count;
+	enum kobject_action action;
+
+	if (len && buf[len-1] == '\n')
+		len--;
+
+	for (action = 0; action < KOBJ_MAX; action++) {
+		if (strncmp(kobject_actions[action], buf, len) != 0)
+			continue;
+		if (kobject_actions[action][len] != '\0')
+			continue;
+		kobject_uevent(&dev->kobj, action);
+		goto out;
+	}
+
+	dev_err(dev, "uevent: unsupported action-string; this will "
+		     "be ignored in a future kernel version\n");
 	kobject_uevent(&dev->kobj, KOBJ_ADD);
+out:
 	return count;
 }
 
@@ -643,6 +660,82 @@
 	return 0;
 }
 
+static int device_add_class_symlinks(struct device *dev)
+{
+	int error;
+
+	if (!dev->class)
+		return 0;
+	error = sysfs_create_link(&dev->kobj, &dev->class->subsys.kobj,
+				  "subsystem");
+	if (error)
+		goto out;
+	/*
+	 * If this is not a "fake" compatible device, then create the
+	 * symlink from the class to the device.
+	 */
+	if (dev->kobj.parent != &dev->class->subsys.kobj) {
+		error = sysfs_create_link(&dev->class->subsys.kobj, &dev->kobj,
+					  dev->bus_id);
+		if (error)
+			goto out_subsys;
+	}
+	/* only bus-device parents get a "device"-link */
+	if (dev->parent && dev->parent->bus) {
+		error = sysfs_create_link(&dev->kobj, &dev->parent->kobj,
+					  "device");
+		if (error)
+			goto out_busid;
+#ifdef CONFIG_SYSFS_DEPRECATED
+		{
+			char * class_name = make_class_name(dev->class->name,
+								&dev->kobj);
+			if (class_name)
+				error = sysfs_create_link(&dev->parent->kobj,
+							&dev->kobj, class_name);
+			kfree(class_name);
+			if (error)
+				goto out_device;
+		}
+#endif
+	}
+	return 0;
+
+#ifdef CONFIG_SYSFS_DEPRECATED
+out_device:
+	if (dev->parent)
+		sysfs_remove_link(&dev->kobj, "device");
+#endif
+out_busid:
+	if (dev->kobj.parent != &dev->class->subsys.kobj)
+		sysfs_remove_link(&dev->class->subsys.kobj, dev->bus_id);
+out_subsys:
+	sysfs_remove_link(&dev->kobj, "subsystem");
+out:
+	return error;
+}
+
+static void device_remove_class_symlinks(struct device *dev)
+{
+	if (!dev->class)
+		return;
+	if (dev->parent) {
+#ifdef CONFIG_SYSFS_DEPRECATED
+		char *class_name;
+
+		class_name = make_class_name(dev->class->name, &dev->kobj);
+		if (class_name) {
+			sysfs_remove_link(&dev->parent->kobj, class_name);
+			kfree(class_name);
+		}
+#endif
+		sysfs_remove_link(&dev->kobj, "device");
+	}
+	if (dev->kobj.parent != &dev->class->subsys.kobj)
+		sysfs_remove_link(&dev->class->subsys.kobj, dev->bus_id);
+	sysfs_remove_link(&dev->kobj, "subsystem");
+}
+
 /**
  *	device_add - add device to device hierarchy.
  *	@dev:	device.
@@ -657,7 +750,6 @@
 int device_add(struct device *dev)
 {
 	struct device *parent = NULL;
-	char *class_name = NULL;
 	struct class_interface *class_intf;
 	int error = -EINVAL;
 
@@ -697,27 +789,9 @@
 			goto ueventattrError;
 	}
 
-	if (dev->class) {
-		sysfs_create_link(&dev->kobj, &dev->class->subsys.kobj,
-				  "subsystem");
-		/* If this is not a "fake" compatible device, then create the
-		 * symlink from the class to the device. */
-		if (dev->kobj.parent != &dev->class->subsys.kobj)
-			sysfs_create_link(&dev->class->subsys.kobj,
-					  &dev->kobj, dev->bus_id);
-		if (parent) {
-			sysfs_create_link(&dev->kobj, &dev->parent->kobj,
-							"device");
-#ifdef CONFIG_SYSFS_DEPRECATED
-			class_name = make_class_name(dev->class->name,
-							&dev->kobj);
-			if (class_name)
-				sysfs_create_link(&dev->parent->kobj,
-						  &dev->kobj, class_name);
-#endif
-		}
-	}
-
+	error = device_add_class_symlinks(dev);
+	if (error)
+		goto SymlinkError;
 	error = device_add_attrs(dev);
 	if (error)
 		goto AttrsError;
@@ -744,7 +818,6 @@
 		up(&dev->class->sem);
 	}
  Done:
-	kfree(class_name);
 	put_device(dev);
 	return error;
  BusError:
@@ -755,6 +828,8 @@
 					     BUS_NOTIFY_DEL_DEVICE, dev);
 	device_remove_attrs(dev);
  AttrsError:
+	device_remove_class_symlinks(dev);
+ SymlinkError:
 	if (MAJOR(dev->devt))
 		device_remove_file(dev, &devt_attr);
 
@@ -1139,7 +1214,7 @@
 {
 	char *old_class_name = NULL;
 	char *new_class_name = NULL;
-	char *old_symlink_name = NULL;
+	char *old_device_name = NULL;
 	int error;
 
 	dev = get_device(dev);
@@ -1153,42 +1228,49 @@
 		old_class_name = make_class_name(dev->class->name, &dev->kobj);
 #endif
 
-	if (dev->class) {
-		old_symlink_name = kmalloc(BUS_ID_SIZE, GFP_KERNEL);
-		if (!old_symlink_name) {
-			error = -ENOMEM;
-			goto out_free_old_class;
-		}
-		strlcpy(old_symlink_name, dev->bus_id, BUS_ID_SIZE);
+	old_device_name = kmalloc(BUS_ID_SIZE, GFP_KERNEL);
+	if (!old_device_name) {
+		error = -ENOMEM;
+		goto out;
 	}
-
+	strlcpy(old_device_name, dev->bus_id, BUS_ID_SIZE);
 	strlcpy(dev->bus_id, new_name, BUS_ID_SIZE);
 
 	error = kobject_rename(&dev->kobj, new_name);
+	if (error) {
+		strlcpy(dev->bus_id, old_device_name, BUS_ID_SIZE);
+		goto out;
+	}
 
 #ifdef CONFIG_SYSFS_DEPRECATED
 	if (old_class_name) {
 		new_class_name = make_class_name(dev->class->name, &dev->kobj);
 		if (new_class_name) {
-			sysfs_create_link(&dev->parent->kobj, &dev->kobj,
-					  new_class_name);
+			error = sysfs_create_link(&dev->parent->kobj,
+						  &dev->kobj, new_class_name);
+			if (error)
+				goto out;
 			sysfs_remove_link(&dev->parent->kobj, old_class_name);
 		}
 	}
 #endif
 
 	if (dev->class) {
-		sysfs_remove_link(&dev->class->subsys.kobj,
-				  old_symlink_name);
-		sysfs_create_link(&dev->class->subsys.kobj, &dev->kobj,
-				  dev->bus_id);
+		sysfs_remove_link(&dev->class->subsys.kobj, old_device_name);
+		error = sysfs_create_link(&dev->class->subsys.kobj, &dev->kobj,
+					  dev->bus_id);
+		if (error) {
+			/* Uh... how to unravel this if restoring can fail? */
+			dev_err(dev, "%s: sysfs_create_symlink failed (%d)\n",
+				__FUNCTION__, error);
+		}
 	}
+out:
 	put_device(dev);
 
 	kfree(new_class_name);
-	kfree(old_symlink_name);
- out_free_old_class:
 	kfree(old_class_name);
+	kfree(old_device_name);
 
 	return error;
 }
diff --git a/drivers/base/power/Makefile b/drivers/base/power/Makefile
index 91f2309..966a5e2 100644
--- a/drivers/base/power/Makefile
+++ b/drivers/base/power/Makefile
@@ -1,10 +1,10 @@
 obj-y			:= shutdown.o
-obj-$(CONFIG_PM)	+= main.o suspend.o resume.o runtime.o sysfs.o
+obj-$(CONFIG_PM)	+= main.o suspend.o resume.o sysfs.o
 obj-$(CONFIG_PM_TRACE)	+= trace.o
 
 ifeq ($(CONFIG_DEBUG_DRIVER),y)
 EXTRA_CFLAGS += -DDEBUG
 endif
-ifeq ($(CONFIG_PM_DEBUG),y)
+ifeq ($(CONFIG_PM_VERBOSE),y)
 EXTRA_CFLAGS += -DDEBUG
 endif
diff --git a/drivers/base/power/power.h b/drivers/base/power/power.h
index 2760f25..591a0dd 100644
--- a/drivers/base/power/power.h
+++ b/drivers/base/power/power.h
@@ -62,11 +62,6 @@
  */
 extern int suspend_device(struct device *, pm_message_t);
 
-
-/*
- * runtime.c
- */
-
 #else /* CONFIG_PM */
 
 
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
deleted file mode 100644
index df6174d..0000000
--- a/drivers/base/power/runtime.c
+++ /dev/null
@@ -1,85 +0,0 @@
-/*
- * drivers/base/power/runtime.c - Handling dynamic device power management.
- *
- * Copyright (c) 2003 Patrick Mochel
- * Copyright (c) 2003 Open Source Development Lab
- *
- */
-
-#include <linux/device.h>
-#include "power.h"
-
-
-static void runtime_resume(struct device * dev)
-{
-	dev_dbg(dev, "resuming\n");
-	if (!dev->power.power_state.event)
-		return;
-	if (!resume_device(dev))
-		dev->power.power_state = PMSG_ON;
-}
-
-
-/**
- *	dpm_runtime_resume - Power one device back on.
- *	@dev:	Device.
- *
- *	Bring one device back to the on state by first powering it
- *	on, then restoring state. We only operate on devices that aren't
- *	already on.
- *	FIXME: We need to handle devices that are in an unknown state.
- */
-
-void dpm_runtime_resume(struct device * dev)
-{
-	mutex_lock(&dpm_mtx);
-	runtime_resume(dev);
-	mutex_unlock(&dpm_mtx);
-}
-EXPORT_SYMBOL(dpm_runtime_resume);
-
-
-/**
- *	dpm_runtime_suspend - Put one device in low-power state.
- *	@dev:	Device.
- *	@state:	State to enter.
- */
-
-int dpm_runtime_suspend(struct device * dev, pm_message_t state)
-{
-	int error = 0;
-
-	mutex_lock(&dpm_mtx);
-	if (dev->power.power_state.event == state.event)
-		goto Done;
-
-	if (dev->power.power_state.event)
-		runtime_resume(dev);
-
-	if (!(error = suspend_device(dev, state)))
-		dev->power.power_state = state;
- Done:
-	mutex_unlock(&dpm_mtx);
-	return error;
-}
-EXPORT_SYMBOL(dpm_runtime_suspend);
-
-
-#if 0
-/**
- *	dpm_set_power_state - Update power_state field.
- *	@dev:	Device.
- *	@state:	Power state device is in.
- *
- *	This is an update mechanism for drivers to notify the core
- *	what power state a device is in. Device probing code may not
- *	always be able to tell, but we need accurate information to
- *	work reliably.
- */
-void dpm_set_power_state(struct device * dev, pm_message_t state)
-{
-	mutex_lock(&dpm_mtx);
-	dev->power.power_state = state;
-	mutex_unlock(&dpm_mtx);
-}
-#endif  /*  0  */
diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c
index 2d47517..f2ed179 100644
--- a/drivers/base/power/sysfs.c
+++ b/drivers/base/power/sysfs.c
@@ -7,69 +7,6 @@
 #include "power.h"
 
 
-#ifdef	CONFIG_PM_SYSFS_DEPRECATED
-
-/**
- *	state - Control current power state of device
- *
- *	show() returns the current power state of the device. '0' indicates
- *	the device is on. Other values (2) indicate the device is in some low
- *	power state.
- *
- *	store() sets the current power state, which is an integer valued
- *	0, 2, or 3.  Devices with bus.suspend_late(), or bus.resume_early()
- *	methods fail this operation; those methods couldn't be called.
- *	Otherwise,
- *
- *	- If the recorded dev->power.power_state.event matches the
- *	  target value, nothing is done.
- *	- If the recorded event code is nonzero, the device is reactivated
- *	  by calling bus.resume() and/or class.resume().
- *	- If the target value is nonzero, the device is suspended by
- *	  calling class.suspend() and/or bus.suspend() with event code
- *	  PM_EVENT_SUSPEND.
- *
- *	This mechanism is DEPRECATED and should only be used for testing.
- */
-
-static ssize_t state_show(struct device * dev, struct device_attribute *attr, char * buf)
-{
-	if (dev->power.power_state.event)
-		return sprintf(buf, "2\n");
-	else
-		return sprintf(buf, "0\n");
-}
-
-static ssize_t state_store(struct device * dev, struct device_attribute *attr, const char * buf, size_t n)
-{
-	pm_message_t state;
-	int error = -EINVAL;
-
-	/* disallow incomplete suspend sequences */
-	if (dev->bus && (dev->bus->suspend_late || dev->bus->resume_early))
-		return error;
-
-	state.event = PM_EVENT_SUSPEND;
-	/* Older apps expected to write "3" here - confused with PCI D3 */
-	if ((n == 1) && !strcmp(buf, "3"))
-		error = dpm_runtime_suspend(dev, state);
-
-	if ((n == 1) && !strcmp(buf, "2"))
-		error = dpm_runtime_suspend(dev, state);
-
-	if ((n == 1) && !strcmp(buf, "0")) {
-		dpm_runtime_resume(dev);
-		error = 0;
-	}
-
-	return error ? error : n;
-}
-
-static DEVICE_ATTR(state, 0644, state_show, state_store);
-
-
-#endif	/* CONFIG_PM_SYSFS_DEPRECATED */
-
 /*
  *	wakeup - Report/change current wakeup option for device
  *
@@ -143,9 +80,6 @@
 
 
 static struct attribute * power_attrs[] = {
-#ifdef	CONFIG_PM_SYSFS_DEPRECATED
-	&dev_attr_state.attr,
-#endif
 	&dev_attr_wakeup.attr,
 	NULL,
 };
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
index c5a6157..a4a3119 100644
--- a/drivers/block/Kconfig
+++ b/drivers/block/Kconfig
@@ -421,4 +421,19 @@
 
 source "drivers/s390/block/Kconfig"
 
+config XILINX_SYSACE
+	tristate "Xilinx SystemACE support"
+	depends on 4xx
+	help
+	  Include support for the Xilinx SystemACE CompactFlash interface
+
+config XEN_BLKDEV_FRONTEND
+	tristate "Xen virtual block device support"
+	depends on XEN
+	default y
+	help
+	  This driver implements the front-end of the Xen virtual
+	  block device driver.  It communicates with a back-end driver
+	  in another domain which drives the actual block device.
+
 endif # BLK_DEV
diff --git a/drivers/block/Makefile b/drivers/block/Makefile
index 7926be8..819c829 100644
--- a/drivers/block/Makefile
+++ b/drivers/block/Makefile
@@ -17,6 +17,7 @@
 obj-$(CONFIG_BLK_CPQ_DA)	+= cpqarray.o
 obj-$(CONFIG_BLK_CPQ_CISS_DA)  += cciss.o
 obj-$(CONFIG_BLK_DEV_DAC960)	+= DAC960.o
+obj-$(CONFIG_XILINX_SYSACE)	+= xsysace.o
 obj-$(CONFIG_CDROM_PKTCDVD)	+= pktcdvd.o
 obj-$(CONFIG_SUNVDC)		+= sunvdc.o
 
@@ -28,3 +29,5 @@
 obj-$(CONFIG_BLK_DEV_SX8)	+= sx8.o
 obj-$(CONFIG_BLK_DEV_UB)	+= ub.o
 
+obj-$(CONFIG_XEN_BLKDEV_FRONTEND)	+= xen-blkfront.o
+obj-$(CONFIG_LGUEST_GUEST)	+= lguest_blk.o
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index 0fcad43..a2d6612 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -1170,7 +1170,7 @@
 	case SG_EMULATED_HOST:
 	case SG_IO:
 	case SCSI_IOCTL_SEND_COMMAND:
-		return scsi_cmd_ioctl(filep, disk, cmd, argp);
+		return scsi_cmd_ioctl(filep, disk->queue, disk, cmd, argp);
 
 	/* scsi_cmd_ioctl would normally handle these, below, but */
 	/* they aren't a good fit for cciss, as CD-ROMs are */
diff --git a/drivers/block/lguest_blk.c b/drivers/block/lguest_blk.c
new file mode 100644
index 0000000..1634c2d
--- /dev/null
+++ b/drivers/block/lguest_blk.c
@@ -0,0 +1,275 @@
+/* A simple block driver for lguest.
+ *
+ * Copyright 2006 Rusty Russell <rusty@rustcorp.com.au> IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+//#define DEBUG
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/blkdev.h>
+#include <linux/interrupt.h>
+#include <linux/lguest_bus.h>
+
+static char next_block_index = 'a';
+
+struct blockdev
+{
+	spinlock_t lock;
+
+	/* The disk structure for the kernel. */
+	struct gendisk *disk;
+
+	/* The major number for this disk. */
+	int major;
+	int irq;
+
+	unsigned long phys_addr;
+	/* The mapped block page. */
+	struct lguest_block_page *lb_page;
+
+	/* We only have a single request outstanding at a time. */
+	struct lguest_dma dma;
+	struct request *req;
+};
+
+/* Jens gave me this nice helper to end all chunks of a request. */
+static void end_entire_request(struct request *req, int uptodate)
+{
+	if (end_that_request_first(req, uptodate, req->hard_nr_sectors))
+		BUG();
+	add_disk_randomness(req->rq_disk);
+	blkdev_dequeue_request(req);
+	end_that_request_last(req, uptodate);
+}
+
+static irqreturn_t lgb_irq(int irq, void *_bd)
+{
+	struct blockdev *bd = _bd;
+	unsigned long flags;
+
+	if (!bd->req) {
+		pr_debug("No work!\n");
+		return IRQ_NONE;
+	}
+
+	if (!bd->lb_page->result) {
+		pr_debug("No result!\n");
+		return IRQ_NONE;
+	}
+
+	spin_lock_irqsave(&bd->lock, flags);
+	end_entire_request(bd->req, bd->lb_page->result == 1);
+	bd->req = NULL;
+	bd->dma.used_len = 0;
+	blk_start_queue(bd->disk->queue);
+	spin_unlock_irqrestore(&bd->lock, flags);
+	return IRQ_HANDLED;
+}
+
+static unsigned int req_to_dma(struct request *req, struct lguest_dma *dma)
+{
+	unsigned int i = 0, idx, len = 0;
+	struct bio *bio;
+
+	rq_for_each_bio(bio, req) {
+		struct bio_vec *bvec;
+		bio_for_each_segment(bvec, bio, idx) {
+			BUG_ON(i == LGUEST_MAX_DMA_SECTIONS);
+			BUG_ON(!bvec->bv_len);
+			dma->addr[i] = page_to_phys(bvec->bv_page)
+				+ bvec->bv_offset;
+			dma->len[i] = bvec->bv_len;
+			len += bvec->bv_len;
+			i++;
+		}
+	}
+	if (i < LGUEST_MAX_DMA_SECTIONS)
+		dma->len[i] = 0;
+	return len;
+}
+
+static void empty_dma(struct lguest_dma *dma)
+{
+	dma->len[0] = 0;
+}
+
+static void setup_req(struct blockdev *bd,
+		      int type, struct request *req, struct lguest_dma *dma)
+{
+	bd->lb_page->type = type;
+	bd->lb_page->sector = req->sector;
+	bd->lb_page->result = 0;
+	bd->req = req;
+	bd->lb_page->bytes = req_to_dma(req, dma);
+}
+
+static void do_write(struct blockdev *bd, struct request *req)
+{
+	struct lguest_dma send;
+
+	pr_debug("lgb: WRITE sector %li\n", (long)req->sector);
+	setup_req(bd, 1, req, &send);
+
+	lguest_send_dma(bd->phys_addr, &send);
+}
+
+static void do_read(struct blockdev *bd, struct request *req)
+{
+	struct lguest_dma ping;
+
+	pr_debug("lgb: READ sector %li\n", (long)req->sector);
+	setup_req(bd, 0, req, &bd->dma);
+
+	empty_dma(&ping);
+	lguest_send_dma(bd->phys_addr, &ping);
+}
+
+static void do_lgb_request(request_queue_t *q)
+{
+	struct blockdev *bd;
+	struct request *req;
+
+again:
+	req = elv_next_request(q);
+	if (!req)
+		return;
+
+	bd = req->rq_disk->private_data;
+	/* Sometimes we get repeated requests after blk_stop_queue. */
+	if (bd->req)
+		return;
+
+	if (!blk_fs_request(req)) {
+		pr_debug("Got non-command 0x%08x\n", req->cmd_type);
+		req->errors++;
+		end_entire_request(req, 0);
+		goto again;
+	}
+
+	if (rq_data_dir(req) == WRITE)
+		do_write(bd, req);
+	else
+		do_read(bd, req);
+
+	/* Wait for interrupt to tell us it's done. */
+	blk_stop_queue(q);
+}
+
+static struct block_device_operations lguestblk_fops = {
+	.owner = THIS_MODULE,
+};
+
+static int lguestblk_probe(struct lguest_device *lgdev)
+{
+	struct blockdev *bd;
+	int err;
+	int irqflags = IRQF_SHARED;
+
+	bd = kmalloc(sizeof(*bd), GFP_KERNEL);
+	if (!bd)
+		return -ENOMEM;
+
+	spin_lock_init(&bd->lock);
+	bd->irq = lgdev_irq(lgdev);
+	bd->req = NULL;
+	bd->dma.used_len = 0;
+	bd->dma.len[0] = 0;
+	bd->phys_addr = (lguest_devices[lgdev->index].pfn << PAGE_SHIFT);
+
+	bd->lb_page = lguest_map(bd->phys_addr, 1);
+	if (!bd->lb_page) {
+		err = -ENOMEM;
+		goto out_free_bd;
+	}
+
+	bd->major = register_blkdev(0, "lguestblk");
+	if (bd->major < 0) {
+		err = bd->major;
+		goto out_unmap;
+	}
+
+	bd->disk = alloc_disk(1);
+	if (!bd->disk) {
+		err = -ENOMEM;
+		goto out_unregister_blkdev;
+	}
+
+	bd->disk->queue = blk_init_queue(do_lgb_request, &bd->lock);
+	if (!bd->disk->queue) {
+		err = -ENOMEM;
+		goto out_put_disk;
+	}
+
+	/* We can only handle a certain number of sg entries */
+	blk_queue_max_hw_segments(bd->disk->queue, LGUEST_MAX_DMA_SECTIONS);
+	/* Buffers must not cross page boundaries */
+	blk_queue_segment_boundary(bd->disk->queue, PAGE_SIZE-1);
+
+	sprintf(bd->disk->disk_name, "lgb%c", next_block_index++);
+	if (lguest_devices[lgdev->index].features & LGUEST_DEVICE_F_RANDOMNESS)
+		irqflags |= IRQF_SAMPLE_RANDOM;
+	err = request_irq(bd->irq, lgb_irq, irqflags, bd->disk->disk_name, bd);
+	if (err)
+		goto out_cleanup_queue;
+
+	err = lguest_bind_dma(bd->phys_addr, &bd->dma, 1, bd->irq);
+	if (err)
+		goto out_free_irq;
+
+	bd->disk->major = bd->major;
+	bd->disk->first_minor = 0;
+	bd->disk->private_data = bd;
+	bd->disk->fops = &lguestblk_fops;
+	/* This is initialized to the disk size by the other end. */
+	set_capacity(bd->disk, bd->lb_page->num_sectors);
+	add_disk(bd->disk);
+
+	printk(KERN_INFO "%s: device %i at major %d\n",
+	       bd->disk->disk_name, lgdev->index, bd->major);
+
+	lgdev->private = bd;
+	return 0;
+
+out_free_irq:
+	free_irq(bd->irq, bd);
+out_cleanup_queue:
+	blk_cleanup_queue(bd->disk->queue);
+out_put_disk:
+	put_disk(bd->disk);
+out_unregister_blkdev:
+	unregister_blkdev(bd->major, "lguestblk");
+out_unmap:
+	lguest_unmap(bd->lb_page);
+out_free_bd:
+	kfree(bd);
+	return err;
+}
+
+static struct lguest_driver lguestblk_drv = {
+	.name = "lguestblk",
+	.owner = THIS_MODULE,
+	.device_type = LGUEST_DEVICE_T_BLOCK,
+	.probe = lguestblk_probe,
+};
+
+static __init int lguestblk_init(void)
+{
+	return register_lguest_driver(&lguestblk_drv);
+}
+module_init(lguestblk_init);
+
+MODULE_DESCRIPTION("Lguest block driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 4503290..e425daa 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -68,6 +68,7 @@
 #include <linux/loop.h>
 #include <linux/compat.h>
 #include <linux/suspend.h>
+#include <linux/freezer.h>
 #include <linux/writeback.h>
 #include <linux/buffer_head.h>		/* for invalidate_bdev() */
 #include <linux/completion.h>
@@ -600,13 +601,6 @@
 	struct loop_device *lo = data;
 	struct bio *bio;
 
-	/*
-	 * loop can be used in an encrypted device,
-	 * hence, it mustn't be stopped at all
-	 * because it could be indirectly used during suspension
-	 */
-	current->flags |= PF_NOFREEZE;
-
 	set_user_nice(current, -20);
 
 	while (!kthread_should_stop() || lo->lo_bio) {
@@ -1574,8 +1568,7 @@
 		loop_del_one(lo);
 
 	blk_unregister_region(MKDEV(LOOP_MAJOR, 0), range);
-	if (unregister_blkdev(LOOP_MAJOR, "loop"))
-		printk(KERN_WARNING "loop: cannot unregister blkdev\n");
+	unregister_blkdev(LOOP_MAJOR, "loop");
 }
 
 module_init(loop_init);
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index 7c294a4..31be33e 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -1593,6 +1593,7 @@
 	long min_sleep_time, residue;
 
 	set_user_nice(current, -20);
+	set_freezable();
 
 	for (;;) {
 		DECLARE_WAITQUEUE(wait, current);
diff --git a/drivers/block/sunvdc.c b/drivers/block/sunvdc.c
index 0f5e3ca..2288b55 100644
--- a/drivers/block/sunvdc.c
+++ b/drivers/block/sunvdc.c
@@ -45,8 +45,6 @@
 struct vdc_port {
 	struct vio_driver_state	vio;
 
-	struct vdc		*vp;
-
 	struct gendisk		*disk;
 
 	struct vdc_completion	*cmp;
@@ -72,8 +70,6 @@
 
 	struct vio_disk_geom	geom;
 	struct vio_disk_vtoc	label;
-
-	struct list_head	list;
 };
 
 static inline struct vdc_port *to_vdc_port(struct vio_driver_state *vio)
@@ -81,15 +77,6 @@
 	return container_of(vio, struct vdc_port, vio);
 }
 
-struct vdc {
-	/* Protects prot_list.  */
-	spinlock_t		lock;
-
-	struct vio_dev		*dev;
-
-	struct list_head	port_list;
-};
-
 /* Ordered from largest major to lowest */
 static struct vio_version vdc_versions[] = {
 	{ .major = 1, .minor = 0 },
@@ -747,21 +734,23 @@
 	.handshake_complete	= vdc_handshake_complete,
 };
 
+static void print_version(void)
+{
+	static int version_printed;
+
+	if (version_printed++ == 0)
+		printk(KERN_INFO "%s", version);
+}
+
 static int __devinit vdc_port_probe(struct vio_dev *vdev,
 				    const struct vio_device_id *id)
 {
 	struct mdesc_handle *hp;
 	struct vdc_port *port;
-	unsigned long flags;
-	struct vdc *vp;
 	const u64 *port_id;
 	int err;
 
-	vp = dev_get_drvdata(vdev->dev.parent);
-	if (!vp) {
-		printk(KERN_ERR PFX "Cannot find port parent vdc.\n");
-		return -ENODEV;
-	}
+	print_version();
 
 	hp = mdesc_grab();
 
@@ -783,7 +772,6 @@
 		goto err_out_release_mdesc;
 	}
 
-	port->vp = vp;
 	port->dev_no = *port_id;
 
 	if (port->dev_no >= 26)
@@ -818,12 +806,6 @@
 	if (err)
 		goto err_out_free_tx_ring;
 
-	INIT_LIST_HEAD(&port->list);
-
-	spin_lock_irqsave(&vp->lock, flags);
-	list_add(&port->list, &vp->port_list);
-	spin_unlock_irqrestore(&vp->lock, flags);
-
 	dev_set_drvdata(&vdev->dev, port);
 
 	mdesc_release(hp);
@@ -879,58 +861,6 @@
 	}
 };
 
-static int __devinit vdc_probe(struct vio_dev *vdev,
-			       const struct vio_device_id *id)
-{
-	static int vdc_version_printed;
-	struct vdc *vp;
-
-	if (vdc_version_printed++ == 0)
-		printk(KERN_INFO "%s", version);
-
-	vp = kzalloc(sizeof(struct vdc), GFP_KERNEL);
-	if (!vp)
-		return -ENOMEM;
-
-	spin_lock_init(&vp->lock);
-	vp->dev = vdev;
-	INIT_LIST_HEAD(&vp->port_list);
-
-	dev_set_drvdata(&vdev->dev, vp);
-
-	return 0;
-}
-
-static int vdc_remove(struct vio_dev *vdev)
-{
-
-	struct vdc *vp = dev_get_drvdata(&vdev->dev);
-
-	if (vp) {
-		kfree(vp);
-		dev_set_drvdata(&vdev->dev, NULL);
-	}
-	return 0;
-}
-
-static struct vio_device_id vdc_match[] = {
-	{
-		.type = "block",
-	},
-	{},
-};
-MODULE_DEVICE_TABLE(vio, vdc_match);
-
-static struct vio_driver vdc_driver = {
-	.id_table	= vdc_match,
-	.probe		= vdc_probe,
-	.remove		= vdc_remove,
-	.driver		= {
-		.name	= "vdc",
-		.owner	= THIS_MODULE,
-	}
-};
-
 static int __init vdc_init(void)
 {
 	int err;
@@ -940,19 +870,13 @@
 		goto out_err;
 
 	vdc_major = err;
-	err = vio_register_driver(&vdc_driver);
-	if (err)
-		goto out_unregister_blkdev;
 
 	err = vio_register_driver(&vdc_port_driver);
 	if (err)
-		goto out_unregister_vdc;
+		goto out_unregister_blkdev;
 
 	return 0;
 
-out_unregister_vdc:
-	vio_unregister_driver(&vdc_driver);
-
 out_unregister_blkdev:
 	unregister_blkdev(vdc_major, VDCBLK_NAME);
 	vdc_major = 0;
@@ -964,7 +888,6 @@
 static void __exit vdc_exit(void)
 {
 	vio_unregister_driver(&vdc_port_driver);
-	vio_unregister_driver(&vdc_driver);
 	unregister_blkdev(vdc_major, VDCBLK_NAME);
 }
 
diff --git a/drivers/block/sx8.c b/drivers/block/sx8.c
index 54509eb..949ae93 100644
--- a/drivers/block/sx8.c
+++ b/drivers/block/sx8.c
@@ -1608,7 +1608,7 @@
 	}
 #endif
 
-	host = kmalloc(sizeof(*host), GFP_KERNEL);
+	host = kzalloc(sizeof(*host), GFP_KERNEL);
 	if (!host) {
 		printk(KERN_ERR DRV_NAME "(%s): memory alloc failure\n",
 		       pci_name(pdev));
@@ -1616,7 +1616,6 @@
 		goto err_out_regions;
 	}
 
-	memset(host, 0, sizeof(*host));
 	host->pdev = pdev;
 	host->flags = pci_dac ? FL_DAC : 0;
 	spin_lock_init(&host->lock);
diff --git a/drivers/block/viodasd.c b/drivers/block/viodasd.c
index 68592c3..dae3991 100644
--- a/drivers/block/viodasd.c
+++ b/drivers/block/viodasd.c
@@ -252,10 +252,10 @@
 	struct gendisk *disk = bdev->bd_disk;
 	struct viodasd_device *d = disk->private_data;
 
-	geo->sectors = d->sectors ? d->sectors : 0;
+	geo->sectors = d->sectors ? d->sectors : 32;
 	geo->heads = d->tracks ? d->tracks  : 64;
 	geo->cylinders = d->cylinders ? d->cylinders :
-		get_capacity(disk) / (geo->cylinders * geo->heads);
+		get_capacity(disk) / (geo->sectors * geo->heads);
 
 	return 0;
 }
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
new file mode 100644
index 0000000..6746c29
--- /dev/null
+++ b/drivers/block/xen-blkfront.c
@@ -0,0 +1,988 @@
+/*
+ * blkfront.c
+ *
+ * XenLinux virtual block device driver.
+ *
+ * Copyright (c) 2003-2004, Keir Fraser & Steve Hand
+ * Modifications by Mark A. Williamson are (c) Intel Research Cambridge
+ * Copyright (c) 2004, Christian Limpach
+ * Copyright (c) 2004, Andrew Warfield
+ * Copyright (c) 2005, Christopher Clark
+ * Copyright (c) 2005, XenSource Ltd
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation; or, when distributed
+ * separately from the Linux kernel or incorporated into other
+ * software packages, subject to the following license:
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this source file (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy, modify,
+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/blkdev.h>
+#include <linux/module.h>
+
+#include <xen/xenbus.h>
+#include <xen/grant_table.h>
+#include <xen/events.h>
+#include <xen/page.h>
+
+#include <xen/interface/grant_table.h>
+#include <xen/interface/io/blkif.h>
+
+#include <asm/xen/hypervisor.h>
+
+enum blkif_state {
+	BLKIF_STATE_DISCONNECTED,
+	BLKIF_STATE_CONNECTED,
+	BLKIF_STATE_SUSPENDED,
+};
+
+struct blk_shadow {
+	struct blkif_request req;
+	unsigned long request;
+	unsigned long frame[BLKIF_MAX_SEGMENTS_PER_REQUEST];
+};
+
+static struct block_device_operations xlvbd_block_fops;
+
+#define BLK_RING_SIZE __RING_SIZE((struct blkif_sring *)0, PAGE_SIZE)
+
+/*
+ * We have one of these per vbd, whether ide, scsi or 'other'.  They
+ * hang in private_data off the gendisk structure. We may end up
+ * putting all kinds of interesting stuff here :-)
+ */
+struct blkfront_info
+{
+	struct xenbus_device *xbdev;
+	dev_t dev;
+	struct gendisk *gd;
+	int vdevice;
+	blkif_vdev_t handle;
+	enum blkif_state connected;
+	int ring_ref;
+	struct blkif_front_ring ring;
+	unsigned int evtchn, irq;
+	struct request_queue *rq;
+	struct work_struct work;
+	struct gnttab_free_callback callback;
+	struct blk_shadow shadow[BLK_RING_SIZE];
+	unsigned long shadow_free;
+	int feature_barrier;
+
+	/**
+	 * The number of people holding this device open.  We won't allow a
+	 * hot-unplug unless this is 0.
+	 */
+	int users;
+};
+
+static DEFINE_SPINLOCK(blkif_io_lock);
+
+#define MAXIMUM_OUTSTANDING_BLOCK_REQS \
+	(BLKIF_MAX_SEGMENTS_PER_REQUEST * BLK_RING_SIZE)
+#define GRANT_INVALID_REF	0
+
+#define PARTS_PER_DISK		16
+
+#define BLKIF_MAJOR(dev) ((dev)>>8)
+#define BLKIF_MINOR(dev) ((dev) & 0xff)
+
+#define DEV_NAME	"xvd"	/* name in /dev */
+
+/* Information about our VBDs. */
+#define MAX_VBDS 64
+static LIST_HEAD(vbds_list);
+
+static int get_id_from_freelist(struct blkfront_info *info)
+{
+	unsigned long free = info->shadow_free;
+	BUG_ON(free > BLK_RING_SIZE);
+	info->shadow_free = info->shadow[free].req.id;
+	info->shadow[free].req.id = 0x0fffffee; /* debug */
+	return free;
+}
+
+static void add_id_to_freelist(struct blkfront_info *info,
+			       unsigned long id)
+{
+	info->shadow[id].req.id  = info->shadow_free;
+	info->shadow[id].request = 0;
+	info->shadow_free = id;
+}
+
+static void blkif_restart_queue_callback(void *arg)
+{
+	struct blkfront_info *info = (struct blkfront_info *)arg;
+	schedule_work(&info->work);
+}
+
+/*
+ * blkif_queue_request
+ *
+ * request block io
+ *
+ * id: for guest use only.
+ * operation: BLKIF_OP_{READ,WRITE,PROBE}
+ * buffer: buffer to read/write into. this should be a
+ *   virtual address in the guest os.
+ */
+static int blkif_queue_request(struct request *req)
+{
+	struct blkfront_info *info = req->rq_disk->private_data;
+	unsigned long buffer_mfn;
+	struct blkif_request *ring_req;
+	struct bio *bio;
+	struct bio_vec *bvec;
+	int idx;
+	unsigned long id;
+	unsigned int fsect, lsect;
+	int ref;
+	grant_ref_t gref_head;
+
+	if (unlikely(info->connected != BLKIF_STATE_CONNECTED))
+		return 1;
+
+	if (gnttab_alloc_grant_references(
+		BLKIF_MAX_SEGMENTS_PER_REQUEST, &gref_head) < 0) {
+		gnttab_request_free_callback(
+			&info->callback,
+			blkif_restart_queue_callback,
+			info,
+			BLKIF_MAX_SEGMENTS_PER_REQUEST);
+		return 1;
+	}
+
+	/* Fill out a communications ring structure. */
+	ring_req = RING_GET_REQUEST(&info->ring, info->ring.req_prod_pvt);
+	id = get_id_from_freelist(info);
+	info->shadow[id].request = (unsigned long)req;
+
+	ring_req->id = id;
+	ring_req->sector_number = (blkif_sector_t)req->sector;
+	ring_req->handle = info->handle;
+
+	ring_req->operation = rq_data_dir(req) ?
+		BLKIF_OP_WRITE : BLKIF_OP_READ;
+	if (blk_barrier_rq(req))
+		ring_req->operation = BLKIF_OP_WRITE_BARRIER;
+
+	ring_req->nr_segments = 0;
+	rq_for_each_bio (bio, req) {
+		bio_for_each_segment (bvec, bio, idx) {
+			BUG_ON(ring_req->nr_segments
+			       == BLKIF_MAX_SEGMENTS_PER_REQUEST);
+			buffer_mfn = pfn_to_mfn(page_to_pfn(bvec->bv_page));
+			fsect = bvec->bv_offset >> 9;
+			lsect = fsect + (bvec->bv_len >> 9) - 1;
+			/* install a grant reference. */
+			ref = gnttab_claim_grant_reference(&gref_head);
+			BUG_ON(ref == -ENOSPC);
+
+			gnttab_grant_foreign_access_ref(
+				ref,
+				info->xbdev->otherend_id,
+				buffer_mfn,
+				rq_data_dir(req) );
+
+			info->shadow[id].frame[ring_req->nr_segments] =
+				mfn_to_pfn(buffer_mfn);
+
+			ring_req->seg[ring_req->nr_segments] =
+				(struct blkif_request_segment) {
+					.gref       = ref,
+					.first_sect = fsect,
+					.last_sect  = lsect };
+
+			ring_req->nr_segments++;
+		}
+	}
+
+	info->ring.req_prod_pvt++;
+
+	/* Keep a private copy so we can reissue requests when recovering. */
+	info->shadow[id].req = *ring_req;
+
+	gnttab_free_grant_references(gref_head);
+
+	return 0;
+}
+
+
+static inline void flush_requests(struct blkfront_info *info)
+{
+	int notify;
+
+	RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&info->ring, notify);
+
+	if (notify)
+		notify_remote_via_irq(info->irq);
+}
+
+/*
+ * do_blkif_request
+ *  read a block; request is in a request queue
+ */
+static void do_blkif_request(request_queue_t *rq)
+{
+	struct blkfront_info *info = NULL;
+	struct request *req;
+	int queued;
+
+	pr_debug("Entered do_blkif_request\n");
+
+	queued = 0;
+
+	while ((req = elv_next_request(rq)) != NULL) {
+		info = req->rq_disk->private_data;
+		if (!blk_fs_request(req)) {
+			end_request(req, 0);
+			continue;
+		}
+
+		if (RING_FULL(&info->ring))
+			goto wait;
+
+		pr_debug("do_blk_req %p: cmd %p, sec %lx, "
+			 "(%u/%li) buffer:%p [%s]\n",
+			 req, req->cmd, (unsigned long)req->sector,
+			 req->current_nr_sectors,
+			 req->nr_sectors, req->buffer,
+			 rq_data_dir(req) ? "write" : "read");
+
+
+		blkdev_dequeue_request(req);
+		if (blkif_queue_request(req)) {
+			blk_requeue_request(rq, req);
+wait:
+			/* Avoid pointless unplugs. */
+			blk_stop_queue(rq);
+			break;
+		}
+
+		queued++;
+	}
+
+	if (queued != 0)
+		flush_requests(info);
+}
+
+static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size)
+{
+	request_queue_t *rq;
+
+	rq = blk_init_queue(do_blkif_request, &blkif_io_lock);
+	if (rq == NULL)
+		return -1;
+
+	elevator_init(rq, "noop");
+
+	/* Hard sector size and max sectors impersonate the equiv. hardware. */
+	blk_queue_hardsect_size(rq, sector_size);
+	blk_queue_max_sectors(rq, 512);
+
+	/* Each segment in a request is up to an aligned page in size. */
+	blk_queue_segment_boundary(rq, PAGE_SIZE - 1);
+	blk_queue_max_segment_size(rq, PAGE_SIZE);
+
+	/* Ensure a merged request will fit in a single I/O ring slot. */
+	blk_queue_max_phys_segments(rq, BLKIF_MAX_SEGMENTS_PER_REQUEST);
+	blk_queue_max_hw_segments(rq, BLKIF_MAX_SEGMENTS_PER_REQUEST);
+
+	/* Make sure buffer addresses are sector-aligned. */
+	blk_queue_dma_alignment(rq, 511);
+
+	gd->queue = rq;
+
+	return 0;
+}
+
+
+static int xlvbd_barrier(struct blkfront_info *info)
+{
+	int err;
+
+	err = blk_queue_ordered(info->rq,
+				info->feature_barrier ? QUEUE_ORDERED_DRAIN : QUEUE_ORDERED_NONE,
+				NULL);
+
+	if (err)
+		return err;
+
+	printk(KERN_INFO "blkfront: %s: barriers %s\n",
+	       info->gd->disk_name,
+	       info->feature_barrier ? "enabled" : "disabled");
+	return 0;
+}
+
+
+static int xlvbd_alloc_gendisk(int minor, blkif_sector_t capacity,
+			       int vdevice, u16 vdisk_info, u16 sector_size,
+			       struct blkfront_info *info)
+{
+	struct gendisk *gd;
+	int nr_minors = 1;
+	int err = -ENODEV;
+
+	BUG_ON(info->gd != NULL);
+	BUG_ON(info->rq != NULL);
+
+	if ((minor % PARTS_PER_DISK) == 0)
+		nr_minors = PARTS_PER_DISK;
+
+	gd = alloc_disk(nr_minors);
+	if (gd == NULL)
+		goto out;
+
+	if (nr_minors > 1)
+		sprintf(gd->disk_name, "%s%c", DEV_NAME,
+			'a' + minor / PARTS_PER_DISK);
+	else
+		sprintf(gd->disk_name, "%s%c%d", DEV_NAME,
+			'a' + minor / PARTS_PER_DISK,
+			minor % PARTS_PER_DISK);
+
+	gd->major = XENVBD_MAJOR;
+	gd->first_minor = minor;
+	gd->fops = &xlvbd_block_fops;
+	gd->private_data = info;
+	gd->driverfs_dev = &(info->xbdev->dev);
+	set_capacity(gd, capacity);
+
+	if (xlvbd_init_blk_queue(gd, sector_size)) {
+		del_gendisk(gd);
+		goto out;
+	}
+
+	info->rq = gd->queue;
+	info->gd = gd;
+
+	if (info->feature_barrier)
+		xlvbd_barrier(info);
+
+	if (vdisk_info & VDISK_READONLY)
+		set_disk_ro(gd, 1);
+
+	if (vdisk_info & VDISK_REMOVABLE)
+		gd->flags |= GENHD_FL_REMOVABLE;
+
+	if (vdisk_info & VDISK_CDROM)
+		gd->flags |= GENHD_FL_CD;
+
+	return 0;
+
+ out:
+	return err;
+}
+
+static void kick_pending_request_queues(struct blkfront_info *info)
+{
+	if (!RING_FULL(&info->ring)) {
+		/* Re-enable calldowns. */
+		blk_start_queue(info->rq);
+		/* Kick things off immediately. */
+		do_blkif_request(info->rq);
+	}
+}
+
+static void blkif_restart_queue(struct work_struct *work)
+{
+	struct blkfront_info *info = container_of(work, struct blkfront_info, work);
+
+	spin_lock_irq(&blkif_io_lock);
+	if (info->connected == BLKIF_STATE_CONNECTED)
+		kick_pending_request_queues(info);
+	spin_unlock_irq(&blkif_io_lock);
+}
+
+static void blkif_free(struct blkfront_info *info, int suspend)
+{
+	/* Prevent new requests being issued until we fix things up. */
+	spin_lock_irq(&blkif_io_lock);
+	info->connected = suspend ?
+		BLKIF_STATE_SUSPENDED : BLKIF_STATE_DISCONNECTED;
+	/* No more blkif_request(). */
+	if (info->rq)
+		blk_stop_queue(info->rq);
+	/* No more gnttab callback work. */
+	gnttab_cancel_free_callback(&info->callback);
+	spin_unlock_irq(&blkif_io_lock);
+
+	/* Flush gnttab callback work. Must be done with no locks held. */
+	flush_scheduled_work();
+
+	/* Free resources associated with old device channel. */
+	if (info->ring_ref != GRANT_INVALID_REF) {
+		gnttab_end_foreign_access(info->ring_ref, 0,
+					  (unsigned long)info->ring.sring);
+		info->ring_ref = GRANT_INVALID_REF;
+		info->ring.sring = NULL;
+	}
+	if (info->irq)
+		unbind_from_irqhandler(info->irq, info);
+	info->evtchn = info->irq = 0;
+
+}
+
+static void blkif_completion(struct blk_shadow *s)
+{
+	int i;
+	for (i = 0; i < s->req.nr_segments; i++)
+		gnttab_end_foreign_access(s->req.seg[i].gref, 0, 0UL);
+}
+
+static irqreturn_t blkif_interrupt(int irq, void *dev_id)
+{
+	struct request *req;
+	struct blkif_response *bret;
+	RING_IDX i, rp;
+	unsigned long flags;
+	struct blkfront_info *info = (struct blkfront_info *)dev_id;
+	int uptodate;
+
+	spin_lock_irqsave(&blkif_io_lock, flags);
+
+	if (unlikely(info->connected != BLKIF_STATE_CONNECTED)) {
+		spin_unlock_irqrestore(&blkif_io_lock, flags);
+		return IRQ_HANDLED;
+	}
+
+ again:
+	rp = info->ring.sring->rsp_prod;
+	rmb(); /* Ensure we see queued responses up to 'rp'. */
+
+	for (i = info->ring.rsp_cons; i != rp; i++) {
+		unsigned long id;
+		int ret;
+
+		bret = RING_GET_RESPONSE(&info->ring, i);
+		id   = bret->id;
+		req  = (struct request *)info->shadow[id].request;
+
+		blkif_completion(&info->shadow[id]);
+
+		add_id_to_freelist(info, id);
+
+		uptodate = (bret->status == BLKIF_RSP_OKAY);
+		switch (bret->operation) {
+		case BLKIF_OP_WRITE_BARRIER:
+			if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) {
+				printk(KERN_WARNING "blkfront: %s: write barrier op failed\n",
+				       info->gd->disk_name);
+				uptodate = -EOPNOTSUPP;
+				info->feature_barrier = 0;
+				xlvbd_barrier(info);
+			}
+			/* fall through */
+		case BLKIF_OP_READ:
+		case BLKIF_OP_WRITE:
+			if (unlikely(bret->status != BLKIF_RSP_OKAY))
+				dev_dbg(&info->xbdev->dev, "Bad return from blkdev data "
+					"request: %x\n", bret->status);
+
+			ret = end_that_request_first(req, uptodate,
+				req->hard_nr_sectors);
+			BUG_ON(ret);
+			end_that_request_last(req, uptodate);
+			break;
+		default:
+			BUG();
+		}
+	}
+
+	info->ring.rsp_cons = i;
+
+	if (i != info->ring.req_prod_pvt) {
+		int more_to_do;
+		RING_FINAL_CHECK_FOR_RESPONSES(&info->ring, more_to_do);
+		if (more_to_do)
+			goto again;
+	} else
+		info->ring.sring->rsp_event = i + 1;
+
+	kick_pending_request_queues(info);
+
+	spin_unlock_irqrestore(&blkif_io_lock, flags);
+
+	return IRQ_HANDLED;
+}
+
+
+static int setup_blkring(struct xenbus_device *dev,
+			 struct blkfront_info *info)
+{
+	struct blkif_sring *sring;
+	int err;
+
+	info->ring_ref = GRANT_INVALID_REF;
+
+	sring = (struct blkif_sring *)__get_free_page(GFP_KERNEL);
+	if (!sring) {
+		xenbus_dev_fatal(dev, -ENOMEM, "allocating shared ring");
+		return -ENOMEM;
+	}
+	SHARED_RING_INIT(sring);
+	FRONT_RING_INIT(&info->ring, sring, PAGE_SIZE);
+
+	err = xenbus_grant_ring(dev, virt_to_mfn(info->ring.sring));
+	if (err < 0) {
+		free_page((unsigned long)sring);
+		info->ring.sring = NULL;
+		goto fail;
+	}
+	info->ring_ref = err;
+
+	err = xenbus_alloc_evtchn(dev, &info->evtchn);
+	if (err)
+		goto fail;
+
+	err = bind_evtchn_to_irqhandler(info->evtchn,
+					blkif_interrupt,
+					IRQF_SAMPLE_RANDOM, "blkif", info);
+	if (err <= 0) {
+		xenbus_dev_fatal(dev, err,
+				 "bind_evtchn_to_irqhandler failed");
+		goto fail;
+	}
+	info->irq = err;
+
+	return 0;
+fail:
+	blkif_free(info, 0);
+	return err;
+}
+
+
+/* Common code used when first setting up, and when resuming. */
+static int talk_to_backend(struct xenbus_device *dev,
+			   struct blkfront_info *info)
+{
+	const char *message = NULL;
+	struct xenbus_transaction xbt;
+	int err;
+
+	/* Create shared ring, alloc event channel. */
+	err = setup_blkring(dev, info);
+	if (err)
+		goto out;
+
+again:
+	err = xenbus_transaction_start(&xbt);
+	if (err) {
+		xenbus_dev_fatal(dev, err, "starting transaction");
+		goto destroy_blkring;
+	}
+
+	err = xenbus_printf(xbt, dev->nodename,
+			    "ring-ref", "%u", info->ring_ref);
+	if (err) {
+		message = "writing ring-ref";
+		goto abort_transaction;
+	}
+	err = xenbus_printf(xbt, dev->nodename,
+			    "event-channel", "%u", info->evtchn);
+	if (err) {
+		message = "writing event-channel";
+		goto abort_transaction;
+	}
+
+	err = xenbus_transaction_end(xbt, 0);
+	if (err) {
+		if (err == -EAGAIN)
+			goto again;
+		xenbus_dev_fatal(dev, err, "completing transaction");
+		goto destroy_blkring;
+	}
+
+	xenbus_switch_state(dev, XenbusStateInitialised);
+
+	return 0;
+
+ abort_transaction:
+	xenbus_transaction_end(xbt, 1);
+	if (message)
+		xenbus_dev_fatal(dev, err, "%s", message);
+ destroy_blkring:
+	blkif_free(info, 0);
+ out:
+	return err;
+}
+
+
+/**
+ * Entry point to this code when a new device is created.  Allocate the basic
+ * structures and the ring buffer for communication with the backend, and
+ * inform the backend of the appropriate details for those.  Switch to
+ * Initialised state.
+ */
+static int blkfront_probe(struct xenbus_device *dev,
+			  const struct xenbus_device_id *id)
+{
+	int err, vdevice, i;
+	struct blkfront_info *info;
+
+	/* FIXME: Use dynamic device id if this is not set. */
+	err = xenbus_scanf(XBT_NIL, dev->nodename,
+			   "virtual-device", "%i", &vdevice);
+	if (err != 1) {
+		xenbus_dev_fatal(dev, err, "reading virtual-device");
+		return err;
+	}
+
+	info = kzalloc(sizeof(*info), GFP_KERNEL);
+	if (!info) {
+		xenbus_dev_fatal(dev, -ENOMEM, "allocating info structure");
+		return -ENOMEM;
+	}
+
+	info->xbdev = dev;
+	info->vdevice = vdevice;
+	info->connected = BLKIF_STATE_DISCONNECTED;
+	INIT_WORK(&info->work, blkif_restart_queue);
+
+	for (i = 0; i < BLK_RING_SIZE; i++)
+		info->shadow[i].req.id = i+1;
+	info->shadow[BLK_RING_SIZE-1].req.id = 0x0fffffff;
+
+	/* Front end dir is a number, which is used as the id. */
+	info->handle = simple_strtoul(strrchr(dev->nodename, '/')+1, NULL, 0);
+	dev->dev.driver_data = info;
+
+	err = talk_to_backend(dev, info);
+	if (err) {
+		kfree(info);
+		dev->dev.driver_data = NULL;
+		return err;
+	}
+
+	return 0;
+}
+
+
+static int blkif_recover(struct blkfront_info *info)
+{
+	int i;
+	struct blkif_request *req;
+	struct blk_shadow *copy;
+	int j;
+
+	/* Stage 1: Make a safe copy of the shadow state. */
+	copy = kmalloc(sizeof(info->shadow), GFP_KERNEL);
+	if (!copy)
+		return -ENOMEM;
+	memcpy(copy, info->shadow, sizeof(info->shadow));
+
+	/* Stage 2: Set up free list. */
+	memset(&info->shadow, 0, sizeof(info->shadow));
+	for (i = 0; i < BLK_RING_SIZE; i++)
+		info->shadow[i].req.id = i+1;
+	info->shadow_free = info->ring.req_prod_pvt;
+	info->shadow[BLK_RING_SIZE-1].req.id = 0x0fffffff;
+
+	/* Stage 3: Find pending requests and requeue them. */
+	for (i = 0; i < BLK_RING_SIZE; i++) {
+		/* Not in use? */
+		if (copy[i].request == 0)
+			continue;
+
+		/* Grab a request slot and copy shadow state into it. */
+		req = RING_GET_REQUEST(&info->ring, info->ring.req_prod_pvt);
+		*req = copy[i].req;
+
+		/* We get a new request id, and must reset the shadow state. */
+		req->id = get_id_from_freelist(info);
+		memcpy(&info->shadow[req->id], &copy[i], sizeof(copy[i]));
+
+		/* Rewrite any grant references invalidated by susp/resume. */
+		for (j = 0; j < req->nr_segments; j++)
+			gnttab_grant_foreign_access_ref(
+				req->seg[j].gref,
+				info->xbdev->otherend_id,
+				pfn_to_mfn(info->shadow[req->id].frame[j]),
+				rq_data_dir(
+					(struct request *)
+					info->shadow[req->id].request));
+		info->shadow[req->id].req = *req;
+
+		info->ring.req_prod_pvt++;
+	}
+
+	kfree(copy);
+
+	xenbus_switch_state(info->xbdev, XenbusStateConnected);
+
+	spin_lock_irq(&blkif_io_lock);
+
+	/* Now safe for us to use the shared ring */
+	info->connected = BLKIF_STATE_CONNECTED;
+
+	/* Send off requeued requests */
+	flush_requests(info);
+
+	/* Kick any other new requests queued since we resumed */
+	kick_pending_request_queues(info);
+
+	spin_unlock_irq(&blkif_io_lock);
+
+	return 0;
+}
+
+/**
+ * We are reconnecting to the backend, due to a suspend/resume, or a backend
+ * driver restart.  We tear down our blkif structure and recreate it, but
+ * leave the device-layer structures intact so that this is transparent to the
+ * rest of the kernel.
+ */
+static int blkfront_resume(struct xenbus_device *dev)
+{
+	struct blkfront_info *info = dev->dev.driver_data;
+	int err;
+
+	dev_dbg(&dev->dev, "blkfront_resume: %s\n", dev->nodename);
+
+	blkif_free(info, info->connected == BLKIF_STATE_CONNECTED);
+
+	err = talk_to_backend(dev, info);
+	if (info->connected == BLKIF_STATE_SUSPENDED && !err)
+		err = blkif_recover(info);
+
+	return err;
+}
+
+
+/*
+ * Invoked when the backend is finally 'ready' (and has told produced
+ * the details about the physical device - #sectors, size, etc).
+ */
+static void blkfront_connect(struct blkfront_info *info)
+{
+	unsigned long long sectors;
+	unsigned long sector_size;
+	unsigned int binfo;
+	int err;
+
+	if ((info->connected == BLKIF_STATE_CONNECTED) ||
+	    (info->connected == BLKIF_STATE_SUSPENDED) )
+		return;
+
+	dev_dbg(&info->xbdev->dev, "%s:%s.\n",
+		__func__, info->xbdev->otherend);
+
+	err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
+			    "sectors", "%llu", &sectors,
+			    "info", "%u", &binfo,
+			    "sector-size", "%lu", &sector_size,
+			    NULL);
+	if (err) {
+		xenbus_dev_fatal(info->xbdev, err,
+				 "reading backend fields at %s",
+				 info->xbdev->otherend);
+		return;
+	}
+
+	err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
+			    "feature-barrier", "%lu", &info->feature_barrier,
+			    NULL);
+	if (err)
+		info->feature_barrier = 0;
+
+	err = xlvbd_alloc_gendisk(BLKIF_MINOR(info->vdevice),
+				  sectors, info->vdevice,
+				  binfo, sector_size, info);
+	if (err) {
+		xenbus_dev_fatal(info->xbdev, err, "xlvbd_add at %s",
+				 info->xbdev->otherend);
+		return;
+	}
+
+	xenbus_switch_state(info->xbdev, XenbusStateConnected);
+
+	/* Kick pending requests. */
+	spin_lock_irq(&blkif_io_lock);
+	info->connected = BLKIF_STATE_CONNECTED;
+	kick_pending_request_queues(info);
+	spin_unlock_irq(&blkif_io_lock);
+
+	add_disk(info->gd);
+}
+
+/**
+ * Handle the change of state of the backend to Closing.  We must delete our
+ * device-layer structures now, to ensure that writes are flushed through to
+ * the backend.  Once is this done, we can switch to Closed in
+ * acknowledgement.
+ */
+static void blkfront_closing(struct xenbus_device *dev)
+{
+	struct blkfront_info *info = dev->dev.driver_data;
+	unsigned long flags;
+
+	dev_dbg(&dev->dev, "blkfront_closing: %s removed\n", dev->nodename);
+
+	if (info->rq == NULL)
+		goto out;
+
+	spin_lock_irqsave(&blkif_io_lock, flags);
+
+	del_gendisk(info->gd);
+
+	/* No more blkif_request(). */
+	blk_stop_queue(info->rq);
+
+	/* No more gnttab callback work. */
+	gnttab_cancel_free_callback(&info->callback);
+	spin_unlock_irqrestore(&blkif_io_lock, flags);
+
+	/* Flush gnttab callback work. Must be done with no locks held. */
+	flush_scheduled_work();
+
+	blk_cleanup_queue(info->rq);
+	info->rq = NULL;
+
+ out:
+	xenbus_frontend_closed(dev);
+}
+
+/**
+ * Callback received when the backend's state changes.
+ */
+static void backend_changed(struct xenbus_device *dev,
+			    enum xenbus_state backend_state)
+{
+	struct blkfront_info *info = dev->dev.driver_data;
+	struct block_device *bd;
+
+	dev_dbg(&dev->dev, "blkfront:backend_changed.\n");
+
+	switch (backend_state) {
+	case XenbusStateInitialising:
+	case XenbusStateInitWait:
+	case XenbusStateInitialised:
+	case XenbusStateUnknown:
+	case XenbusStateClosed:
+		break;
+
+	case XenbusStateConnected:
+		blkfront_connect(info);
+		break;
+
+	case XenbusStateClosing:
+		bd = bdget(info->dev);
+		if (bd == NULL)
+			xenbus_dev_fatal(dev, -ENODEV, "bdget failed");
+
+		mutex_lock(&bd->bd_mutex);
+		if (info->users > 0)
+			xenbus_dev_error(dev, -EBUSY,
+					 "Device in use; refusing to close");
+		else
+			blkfront_closing(dev);
+		mutex_unlock(&bd->bd_mutex);
+		bdput(bd);
+		break;
+	}
+}
+
+static int blkfront_remove(struct xenbus_device *dev)
+{
+	struct blkfront_info *info = dev->dev.driver_data;
+
+	dev_dbg(&dev->dev, "blkfront_remove: %s removed\n", dev->nodename);
+
+	blkif_free(info, 0);
+
+	kfree(info);
+
+	return 0;
+}
+
+static int blkif_open(struct inode *inode, struct file *filep)
+{
+	struct blkfront_info *info = inode->i_bdev->bd_disk->private_data;
+	info->users++;
+	return 0;
+}
+
+static int blkif_release(struct inode *inode, struct file *filep)
+{
+	struct blkfront_info *info = inode->i_bdev->bd_disk->private_data;
+	info->users--;
+	if (info->users == 0) {
+		/* Check whether we have been instructed to close.  We will
+		   have ignored this request initially, as the device was
+		   still mounted. */
+		struct xenbus_device *dev = info->xbdev;
+		enum xenbus_state state = xenbus_read_driver_state(dev->otherend);
+
+		if (state == XenbusStateClosing)
+			blkfront_closing(dev);
+	}
+	return 0;
+}
+
+static struct block_device_operations xlvbd_block_fops =
+{
+	.owner = THIS_MODULE,
+	.open = blkif_open,
+	.release = blkif_release,
+};
+
+
+static struct xenbus_device_id blkfront_ids[] = {
+	{ "vbd" },
+	{ "" }
+};
+
+static struct xenbus_driver blkfront = {
+	.name = "vbd",
+	.owner = THIS_MODULE,
+	.ids = blkfront_ids,
+	.probe = blkfront_probe,
+	.remove = blkfront_remove,
+	.resume = blkfront_resume,
+	.otherend_changed = backend_changed,
+};
+
+static int __init xlblk_init(void)
+{
+	if (!is_running_on_xen())
+		return -ENODEV;
+
+	if (register_blkdev(XENVBD_MAJOR, DEV_NAME)) {
+		printk(KERN_WARNING "xen_blk: can't get major %d with name %s\n",
+		       XENVBD_MAJOR, DEV_NAME);
+		return -ENODEV;
+	}
+
+	return xenbus_register_frontend(&blkfront);
+}
+module_init(xlblk_init);
+
+
+static void xlblk_exit(void)
+{
+	return xenbus_unregister_driver(&blkfront);
+}
+module_exit(xlblk_exit);
+
+MODULE_DESCRIPTION("Xen virtual block device frontend");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_BLOCKDEV_MAJOR(XENVBD_MAJOR);
diff --git a/drivers/block/xsysace.c b/drivers/block/xsysace.c
new file mode 100644
index 0000000..732ec63
--- /dev/null
+++ b/drivers/block/xsysace.c
@@ -0,0 +1,1164 @@
+/*
+ * Xilinx SystemACE device driver
+ *
+ * Copyright 2007 Secret Lab Technologies Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ */
+
+/*
+ * The SystemACE chip is designed to configure FPGAs by loading an FPGA
+ * bitstream from a file on a CF card and squirting it into FPGAs connected
+ * to the SystemACE JTAG chain.  It also has the advantage of providing an
+ * MPU interface which can be used to control the FPGA configuration process
+ * and to use the attached CF card for general purpose storage.
+ *
+ * This driver is a block device driver for the SystemACE.
+ *
+ * Initialization:
+ *    The driver registers itself as a platform_device driver at module
+ *    load time.  The platform bus will take care of calling the
+ *    ace_probe() method for all SystemACE instances in the system.  Any
+ *    number of SystemACE instances are supported.  ace_probe() calls
+ *    ace_setup() which initialized all data structures, reads the CF
+ *    id structure and registers the device.
+ *
+ * Processing:
+ *    Just about all of the heavy lifting in this driver is performed by
+ *    a Finite State Machine (FSM).  The driver needs to wait on a number
+ *    of events; some raised by interrupts, some which need to be polled
+ *    for.  Describing all of the behaviour in a FSM seems to be the
+ *    easiest way to keep the complexity low and make it easy to
+ *    understand what the driver is doing.  If the block ops or the
+ *    request function need to interact with the hardware, then they
+ *    simply need to flag the request and kick of FSM processing.
+ *
+ *    The FSM itself is atomic-safe code which can be run from any
+ *    context.  The general process flow is:
+ *    1. obtain the ace->lock spinlock.
+ *    2. loop on ace_fsm_dostate() until the ace->fsm_continue flag is
+ *       cleared.
+ *    3. release the lock.
+ *
+ *    Individual states do not sleep in any way.  If a condition needs to
+ *    be waited for then the state much clear the fsm_continue flag and
+ *    either schedule the FSM to be run again at a later time, or expect
+ *    an interrupt to call the FSM when the desired condition is met.
+ *
+ *    In normal operation, the FSM is processed at interrupt context
+ *    either when the driver's tasklet is scheduled, or when an irq is
+ *    raised by the hardware.  The tasklet can be scheduled at any time.
+ *    The request method in particular schedules the tasklet when a new
+ *    request has been indicated by the block layer.  Once started, the
+ *    FSM proceeds as far as it can processing the request until it
+ *    needs on a hardware event.  At this point, it must yield execution.
+ *
+ *    A state has two options when yielding execution:
+ *    1. ace_fsm_yield()
+ *       - Call if need to poll for event.
+ *       - clears the fsm_continue flag to exit the processing loop
+ *       - reschedules the tasklet to run again as soon as possible
+ *    2. ace_fsm_yieldirq()
+ *       - Call if an irq is expected from the HW
+ *       - clears the fsm_continue flag to exit the processing loop
+ *       - does not reschedule the tasklet so the FSM will not be processed
+ *         again until an irq is received.
+ *    After calling a yield function, the state must return control back
+ *    to the FSM main loop.
+ *
+ *    Additionally, the driver maintains a kernel timer which can process
+ *    the FSM.  If the FSM gets stalled, typically due to a missed
+ *    interrupt, then the kernel timer will expire and the driver can
+ *    continue where it left off.
+ *
+ * To Do:
+ *    - Add FPGA configuration control interface.
+ *    - Request major number from lanana
+ */
+
+#undef DEBUG
+
+#include <linux/module.h>
+#include <linux/ctype.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/blkdev.h>
+#include <linux/hdreg.h>
+#include <linux/platform_device.h>
+
+MODULE_AUTHOR("Grant Likely <grant.likely@secretlab.ca>");
+MODULE_DESCRIPTION("Xilinx SystemACE device driver");
+MODULE_LICENSE("GPL");
+
+/* SystemACE register definitions */
+#define ACE_BUSMODE (0x00)
+
+#define ACE_STATUS (0x04)
+#define ACE_STATUS_CFGLOCK      (0x00000001)
+#define ACE_STATUS_MPULOCK      (0x00000002)
+#define ACE_STATUS_CFGERROR     (0x00000004)	/* config controller error */
+#define ACE_STATUS_CFCERROR     (0x00000008)	/* CF controller error */
+#define ACE_STATUS_CFDETECT     (0x00000010)
+#define ACE_STATUS_DATABUFRDY   (0x00000020)
+#define ACE_STATUS_DATABUFMODE  (0x00000040)
+#define ACE_STATUS_CFGDONE      (0x00000080)
+#define ACE_STATUS_RDYFORCFCMD  (0x00000100)
+#define ACE_STATUS_CFGMODEPIN   (0x00000200)
+#define ACE_STATUS_CFGADDR_MASK (0x0000e000)
+#define ACE_STATUS_CFBSY        (0x00020000)
+#define ACE_STATUS_CFRDY        (0x00040000)
+#define ACE_STATUS_CFDWF        (0x00080000)
+#define ACE_STATUS_CFDSC        (0x00100000)
+#define ACE_STATUS_CFDRQ        (0x00200000)
+#define ACE_STATUS_CFCORR       (0x00400000)
+#define ACE_STATUS_CFERR        (0x00800000)
+
+#define ACE_ERROR (0x08)
+#define ACE_CFGLBA (0x0c)
+#define ACE_MPULBA (0x10)
+
+#define ACE_SECCNTCMD (0x14)
+#define ACE_SECCNTCMD_RESET      (0x0100)
+#define ACE_SECCNTCMD_IDENTIFY   (0x0200)
+#define ACE_SECCNTCMD_READ_DATA  (0x0300)
+#define ACE_SECCNTCMD_WRITE_DATA (0x0400)
+#define ACE_SECCNTCMD_ABORT      (0x0600)
+
+#define ACE_VERSION (0x16)
+#define ACE_VERSION_REVISION_MASK (0x00FF)
+#define ACE_VERSION_MINOR_MASK    (0x0F00)
+#define ACE_VERSION_MAJOR_MASK    (0xF000)
+
+#define ACE_CTRL (0x18)
+#define ACE_CTRL_FORCELOCKREQ   (0x0001)
+#define ACE_CTRL_LOCKREQ        (0x0002)
+#define ACE_CTRL_FORCECFGADDR   (0x0004)
+#define ACE_CTRL_FORCECFGMODE   (0x0008)
+#define ACE_CTRL_CFGMODE        (0x0010)
+#define ACE_CTRL_CFGSTART       (0x0020)
+#define ACE_CTRL_CFGSEL         (0x0040)
+#define ACE_CTRL_CFGRESET       (0x0080)
+#define ACE_CTRL_DATABUFRDYIRQ  (0x0100)
+#define ACE_CTRL_ERRORIRQ       (0x0200)
+#define ACE_CTRL_CFGDONEIRQ     (0x0400)
+#define ACE_CTRL_RESETIRQ       (0x0800)
+#define ACE_CTRL_CFGPROG        (0x1000)
+#define ACE_CTRL_CFGADDR_MASK   (0xe000)
+
+#define ACE_FATSTAT (0x1c)
+
+#define ACE_NUM_MINORS 16
+#define ACE_SECTOR_SIZE (512)
+#define ACE_FIFO_SIZE (32)
+#define ACE_BUF_PER_SECTOR (ACE_SECTOR_SIZE / ACE_FIFO_SIZE)
+
+struct ace_reg_ops;
+
+struct ace_device {
+	/* driver state data */
+	int id;
+	int media_change;
+	int users;
+	struct list_head list;
+
+	/* finite state machine data */
+	struct tasklet_struct fsm_tasklet;
+	uint fsm_task;		/* Current activity (ACE_TASK_*) */
+	uint fsm_state;		/* Current state (ACE_FSM_STATE_*) */
+	uint fsm_continue_flag;	/* cleared to exit FSM mainloop */
+	uint fsm_iter_num;
+	struct timer_list stall_timer;
+
+	/* Transfer state/result, use for both id and block request */
+	struct request *req;	/* request being processed */
+	void *data_ptr;		/* pointer to I/O buffer */
+	int data_count;		/* number of buffers remaining */
+	int data_result;	/* Result of transfer; 0 := success */
+
+	int id_req_count;	/* count of id requests */
+	int id_result;
+	struct completion id_completion;	/* used when id req finishes */
+	int in_irq;
+
+	/* Details of hardware device */
+	unsigned long physaddr;
+	void *baseaddr;
+	int irq;
+	int bus_width;		/* 0 := 8 bit; 1 := 16 bit */
+	struct ace_reg_ops *reg_ops;
+	int lock_count;
+
+	/* Block device data structures */
+	spinlock_t lock;
+	struct device *dev;
+	struct request_queue *queue;
+	struct gendisk *gd;
+
+	/* Inserted CF card parameters */
+	struct hd_driveid cf_id;
+};
+
+static int ace_major;
+
+/* ---------------------------------------------------------------------
+ * Low level register access
+ */
+
+struct ace_reg_ops {
+	u16(*in) (struct ace_device * ace, int reg);
+	void (*out) (struct ace_device * ace, int reg, u16 val);
+	void (*datain) (struct ace_device * ace);
+	void (*dataout) (struct ace_device * ace);
+};
+
+/* 8 Bit bus width */
+static u16 ace_in_8(struct ace_device *ace, int reg)
+{
+	void *r = ace->baseaddr + reg;
+	return in_8(r) | (in_8(r + 1) << 8);
+}
+
+static void ace_out_8(struct ace_device *ace, int reg, u16 val)
+{
+	void *r = ace->baseaddr + reg;
+	out_8(r, val);
+	out_8(r + 1, val >> 8);
+}
+
+static void ace_datain_8(struct ace_device *ace)
+{
+	void *r = ace->baseaddr + 0x40;
+	u8 *dst = ace->data_ptr;
+	int i = ACE_FIFO_SIZE;
+	while (i--)
+		*dst++ = in_8(r++);
+	ace->data_ptr = dst;
+}
+
+static void ace_dataout_8(struct ace_device *ace)
+{
+	void *r = ace->baseaddr + 0x40;
+	u8 *src = ace->data_ptr;
+	int i = ACE_FIFO_SIZE;
+	while (i--)
+		out_8(r++, *src++);
+	ace->data_ptr = src;
+}
+
+static struct ace_reg_ops ace_reg_8_ops = {
+	.in = ace_in_8,
+	.out = ace_out_8,
+	.datain = ace_datain_8,
+	.dataout = ace_dataout_8,
+};
+
+/* 16 bit big endian bus attachment */
+static u16 ace_in_be16(struct ace_device *ace, int reg)
+{
+	return in_be16(ace->baseaddr + reg);
+}
+
+static void ace_out_be16(struct ace_device *ace, int reg, u16 val)
+{
+	out_be16(ace->baseaddr + reg, val);
+}
+
+static void ace_datain_be16(struct ace_device *ace)
+{
+	int i = ACE_FIFO_SIZE / 2;
+	u16 *dst = ace->data_ptr;
+	while (i--)
+		*dst++ = in_le16(ace->baseaddr + 0x40);
+	ace->data_ptr = dst;
+}
+
+static void ace_dataout_be16(struct ace_device *ace)
+{
+	int i = ACE_FIFO_SIZE / 2;
+	u16 *src = ace->data_ptr;
+	while (i--)
+		out_le16(ace->baseaddr + 0x40, *src++);
+	ace->data_ptr = src;
+}
+
+/* 16 bit little endian bus attachment */
+static u16 ace_in_le16(struct ace_device *ace, int reg)
+{
+	return in_le16(ace->baseaddr + reg);
+}
+
+static void ace_out_le16(struct ace_device *ace, int reg, u16 val)
+{
+	out_le16(ace->baseaddr + reg, val);
+}
+
+static void ace_datain_le16(struct ace_device *ace)
+{
+	int i = ACE_FIFO_SIZE / 2;
+	u16 *dst = ace->data_ptr;
+	while (i--)
+		*dst++ = in_be16(ace->baseaddr + 0x40);
+	ace->data_ptr = dst;
+}
+
+static void ace_dataout_le16(struct ace_device *ace)
+{
+	int i = ACE_FIFO_SIZE / 2;
+	u16 *src = ace->data_ptr;
+	while (i--)
+		out_be16(ace->baseaddr + 0x40, *src++);
+	ace->data_ptr = src;
+}
+
+static struct ace_reg_ops ace_reg_be16_ops = {
+	.in = ace_in_be16,
+	.out = ace_out_be16,
+	.datain = ace_datain_be16,
+	.dataout = ace_dataout_be16,
+};
+
+static struct ace_reg_ops ace_reg_le16_ops = {
+	.in = ace_in_le16,
+	.out = ace_out_le16,
+	.datain = ace_datain_le16,
+	.dataout = ace_dataout_le16,
+};
+
+static inline u16 ace_in(struct ace_device *ace, int reg)
+{
+	return ace->reg_ops->in(ace, reg);
+}
+
+static inline u32 ace_in32(struct ace_device *ace, int reg)
+{
+	return ace_in(ace, reg) | (ace_in(ace, reg + 2) << 16);
+}
+
+static inline void ace_out(struct ace_device *ace, int reg, u16 val)
+{
+	ace->reg_ops->out(ace, reg, val);
+}
+
+static inline void ace_out32(struct ace_device *ace, int reg, u32 val)
+{
+	ace_out(ace, reg, val);
+	ace_out(ace, reg + 2, val >> 16);
+}
+
+/* ---------------------------------------------------------------------
+ * Debug support functions
+ */
+
+#if defined(DEBUG)
+static void ace_dump_mem(void *base, int len)
+{
+	const char *ptr = base;
+	int i, j;
+
+	for (i = 0; i < len; i += 16) {
+		printk(KERN_INFO "%.8x:", i);
+		for (j = 0; j < 16; j++) {
+			if (!(j % 4))
+				printk(" ");
+			printk("%.2x", ptr[i + j]);
+		}
+		printk(" ");
+		for (j = 0; j < 16; j++)
+			printk("%c", isprint(ptr[i + j]) ? ptr[i + j] : '.');
+		printk("\n");
+	}
+}
+#else
+static inline void ace_dump_mem(void *base, int len)
+{
+}
+#endif
+
+static void ace_dump_regs(struct ace_device *ace)
+{
+	dev_info(ace->dev, "    ctrl:  %.8x  seccnt/cmd: %.4x      ver:%.4x\n"
+		 "    status:%.8x  mpu_lba:%.8x  busmode:%4x\n"
+		 "    error: %.8x  cfg_lba:%.8x  fatstat:%.4x\n",
+		 ace_in32(ace, ACE_CTRL),
+		 ace_in(ace, ACE_SECCNTCMD),
+		 ace_in(ace, ACE_VERSION),
+		 ace_in32(ace, ACE_STATUS),
+		 ace_in32(ace, ACE_MPULBA),
+		 ace_in(ace, ACE_BUSMODE),
+		 ace_in32(ace, ACE_ERROR),
+		 ace_in32(ace, ACE_CFGLBA), ace_in(ace, ACE_FATSTAT));
+}
+
+void ace_fix_driveid(struct hd_driveid *id)
+{
+#if defined(__BIG_ENDIAN)
+	u16 *buf = (void *)id;
+	int i;
+
+	/* All half words have wrong byte order; swap the bytes */
+	for (i = 0; i < sizeof(struct hd_driveid); i += 2, buf++)
+		*buf = le16_to_cpu(*buf);
+
+	/* Some of the data values are 32bit; swap the half words  */
+	id->lba_capacity = ((id->lba_capacity >> 16) & 0x0000FFFF) |
+	    ((id->lba_capacity << 16) & 0xFFFF0000);
+	id->spg = ((id->spg >> 16) & 0x0000FFFF) |
+	    ((id->spg << 16) & 0xFFFF0000);
+#endif
+}
+
+/* ---------------------------------------------------------------------
+ * Finite State Machine (FSM) implementation
+ */
+
+/* FSM tasks; used to direct state transitions */
+#define ACE_TASK_IDLE      0
+#define ACE_TASK_IDENTIFY  1
+#define ACE_TASK_READ      2
+#define ACE_TASK_WRITE     3
+#define ACE_FSM_NUM_TASKS  4
+
+/* FSM state definitions */
+#define ACE_FSM_STATE_IDLE               0
+#define ACE_FSM_STATE_REQ_LOCK           1
+#define ACE_FSM_STATE_WAIT_LOCK          2
+#define ACE_FSM_STATE_WAIT_CFREADY       3
+#define ACE_FSM_STATE_IDENTIFY_PREPARE   4
+#define ACE_FSM_STATE_IDENTIFY_TRANSFER  5
+#define ACE_FSM_STATE_IDENTIFY_COMPLETE  6
+#define ACE_FSM_STATE_REQ_PREPARE        7
+#define ACE_FSM_STATE_REQ_TRANSFER       8
+#define ACE_FSM_STATE_REQ_COMPLETE       9
+#define ACE_FSM_STATE_ERROR             10
+#define ACE_FSM_NUM_STATES              11
+
+/* Set flag to exit FSM loop and reschedule tasklet */
+static inline void ace_fsm_yield(struct ace_device *ace)
+{
+	dev_dbg(ace->dev, "ace_fsm_yield()\n");
+	tasklet_schedule(&ace->fsm_tasklet);
+	ace->fsm_continue_flag = 0;
+}
+
+/* Set flag to exit FSM loop and wait for IRQ to reschedule tasklet */
+static inline void ace_fsm_yieldirq(struct ace_device *ace)
+{
+	dev_dbg(ace->dev, "ace_fsm_yieldirq()\n");
+
+	if (ace->irq == NO_IRQ)
+		/* No IRQ assigned, so need to poll */
+		tasklet_schedule(&ace->fsm_tasklet);
+	ace->fsm_continue_flag = 0;
+}
+
+/* Get the next read/write request; ending requests that we don't handle */
+struct request *ace_get_next_request(request_queue_t * q)
+{
+	struct request *req;
+
+	while ((req = elv_next_request(q)) != NULL) {
+		if (blk_fs_request(req))
+			break;
+		end_request(req, 0);
+	}
+	return req;
+}
+
+static void ace_fsm_dostate(struct ace_device *ace)
+{
+	struct request *req;
+	u32 status;
+	u16 val;
+	int count;
+	int i;
+
+#if defined(DEBUG)
+	dev_dbg(ace->dev, "fsm_state=%i, id_req_count=%i\n",
+		ace->fsm_state, ace->id_req_count);
+#endif
+
+	switch (ace->fsm_state) {
+	case ACE_FSM_STATE_IDLE:
+		/* See if there is anything to do */
+		if (ace->id_req_count || ace_get_next_request(ace->queue)) {
+			ace->fsm_iter_num++;
+			ace->fsm_state = ACE_FSM_STATE_REQ_LOCK;
+			mod_timer(&ace->stall_timer, jiffies + HZ);
+			if (!timer_pending(&ace->stall_timer))
+				add_timer(&ace->stall_timer);
+			break;
+		}
+		del_timer(&ace->stall_timer);
+		ace->fsm_continue_flag = 0;
+		break;
+
+	case ACE_FSM_STATE_REQ_LOCK:
+		if (ace_in(ace, ACE_STATUS) & ACE_STATUS_MPULOCK) {
+			/* Already have the lock, jump to next state */
+			ace->fsm_state = ACE_FSM_STATE_WAIT_CFREADY;
+			break;
+		}
+
+		/* Request the lock */
+		val = ace_in(ace, ACE_CTRL);
+		ace_out(ace, ACE_CTRL, val | ACE_CTRL_LOCKREQ);
+		ace->fsm_state = ACE_FSM_STATE_WAIT_LOCK;
+		break;
+
+	case ACE_FSM_STATE_WAIT_LOCK:
+		if (ace_in(ace, ACE_STATUS) & ACE_STATUS_MPULOCK) {
+			/* got the lock; move to next state */
+			ace->fsm_state = ACE_FSM_STATE_WAIT_CFREADY;
+			break;
+		}
+
+		/* wait a bit for the lock */
+		ace_fsm_yield(ace);
+		break;
+
+	case ACE_FSM_STATE_WAIT_CFREADY:
+		status = ace_in32(ace, ACE_STATUS);
+		if (!(status & ACE_STATUS_RDYFORCFCMD) ||
+		    (status & ACE_STATUS_CFBSY)) {
+			/* CF card isn't ready; it needs to be polled */
+			ace_fsm_yield(ace);
+			break;
+		}
+
+		/* Device is ready for command; determine what to do next */
+		if (ace->id_req_count)
+			ace->fsm_state = ACE_FSM_STATE_IDENTIFY_PREPARE;
+		else
+			ace->fsm_state = ACE_FSM_STATE_REQ_PREPARE;
+		break;
+
+	case ACE_FSM_STATE_IDENTIFY_PREPARE:
+		/* Send identify command */
+		ace->fsm_task = ACE_TASK_IDENTIFY;
+		ace->data_ptr = &ace->cf_id;
+		ace->data_count = ACE_BUF_PER_SECTOR;
+		ace_out(ace, ACE_SECCNTCMD, ACE_SECCNTCMD_IDENTIFY);
+
+		/* As per datasheet, put config controller in reset */
+		val = ace_in(ace, ACE_CTRL);
+		ace_out(ace, ACE_CTRL, val | ACE_CTRL_CFGRESET);
+
+		/* irq handler takes over from this point; wait for the
+		 * transfer to complete */
+		ace->fsm_state = ACE_FSM_STATE_IDENTIFY_TRANSFER;
+		ace_fsm_yieldirq(ace);
+		break;
+
+	case ACE_FSM_STATE_IDENTIFY_TRANSFER:
+		/* Check that the sysace is ready to receive data */
+		status = ace_in32(ace, ACE_STATUS);
+		if (status & ACE_STATUS_CFBSY) {
+			dev_dbg(ace->dev, "CFBSY set; t=%i iter=%i dc=%i\n",
+				ace->fsm_task, ace->fsm_iter_num,
+				ace->data_count);
+			ace_fsm_yield(ace);
+			break;
+		}
+		if (!(status & ACE_STATUS_DATABUFRDY)) {
+			ace_fsm_yield(ace);
+			break;
+		}
+
+		/* Transfer the next buffer */
+		ace->reg_ops->datain(ace);
+		ace->data_count--;
+
+		/* If there are still buffers to be transfers; jump out here */
+		if (ace->data_count != 0) {
+			ace_fsm_yieldirq(ace);
+			break;
+		}
+
+		/* transfer finished; kick state machine */
+		dev_dbg(ace->dev, "identify finished\n");
+		ace->fsm_state = ACE_FSM_STATE_IDENTIFY_COMPLETE;
+		break;
+
+	case ACE_FSM_STATE_IDENTIFY_COMPLETE:
+		ace_fix_driveid(&ace->cf_id);
+		ace_dump_mem(&ace->cf_id, 512);	/* Debug: Dump out disk ID */
+
+		if (ace->data_result) {
+			/* Error occured, disable the disk */
+			ace->media_change = 1;
+			set_capacity(ace->gd, 0);
+			dev_err(ace->dev, "error fetching CF id (%i)\n",
+				ace->data_result);
+		} else {
+			ace->media_change = 0;
+
+			/* Record disk parameters */
+			set_capacity(ace->gd, ace->cf_id.lba_capacity);
+			dev_info(ace->dev, "capacity: %i sectors\n",
+				 ace->cf_id.lba_capacity);
+		}
+
+		/* We're done, drop to IDLE state and notify waiters */
+		ace->fsm_state = ACE_FSM_STATE_IDLE;
+		ace->id_result = ace->data_result;
+		while (ace->id_req_count) {
+			complete(&ace->id_completion);
+			ace->id_req_count--;
+		}
+		break;
+
+	case ACE_FSM_STATE_REQ_PREPARE:
+		req = ace_get_next_request(ace->queue);
+		if (!req) {
+			ace->fsm_state = ACE_FSM_STATE_IDLE;
+			break;
+		}
+
+		/* Okay, it's a data request, set it up for transfer */
+		dev_dbg(ace->dev,
+			"request: sec=%lx hcnt=%lx, ccnt=%x, dir=%i\n",
+			req->sector, req->hard_nr_sectors,
+			req->current_nr_sectors, rq_data_dir(req));
+
+		ace->req = req;
+		ace->data_ptr = req->buffer;
+		ace->data_count = req->current_nr_sectors * ACE_BUF_PER_SECTOR;
+		ace_out32(ace, ACE_MPULBA, req->sector & 0x0FFFFFFF);
+
+		count = req->hard_nr_sectors;
+		if (rq_data_dir(req)) {
+			/* Kick off write request */
+			dev_dbg(ace->dev, "write data\n");
+			ace->fsm_task = ACE_TASK_WRITE;
+			ace_out(ace, ACE_SECCNTCMD,
+				count | ACE_SECCNTCMD_WRITE_DATA);
+		} else {
+			/* Kick off read request */
+			dev_dbg(ace->dev, "read data\n");
+			ace->fsm_task = ACE_TASK_READ;
+			ace_out(ace, ACE_SECCNTCMD,
+				count | ACE_SECCNTCMD_READ_DATA);
+		}
+
+		/* As per datasheet, put config controller in reset */
+		val = ace_in(ace, ACE_CTRL);
+		ace_out(ace, ACE_CTRL, val | ACE_CTRL_CFGRESET);
+
+		/* Move to the transfer state.  The systemace will raise
+		 * an interrupt once there is something to do
+		 */
+		ace->fsm_state = ACE_FSM_STATE_REQ_TRANSFER;
+		if (ace->fsm_task == ACE_TASK_READ)
+			ace_fsm_yieldirq(ace);	/* wait for data ready */
+		break;
+
+	case ACE_FSM_STATE_REQ_TRANSFER:
+		/* Check that the sysace is ready to receive data */
+		status = ace_in32(ace, ACE_STATUS);
+		if (status & ACE_STATUS_CFBSY) {
+			dev_dbg(ace->dev,
+				"CFBSY set; t=%i iter=%i c=%i dc=%i irq=%i\n",
+				ace->fsm_task, ace->fsm_iter_num,
+				ace->req->current_nr_sectors * 16,
+				ace->data_count, ace->in_irq);
+			ace_fsm_yield(ace);	/* need to poll CFBSY bit */
+			break;
+		}
+		if (!(status & ACE_STATUS_DATABUFRDY)) {
+			dev_dbg(ace->dev,
+				"DATABUF not set; t=%i iter=%i c=%i dc=%i irq=%i\n",
+				ace->fsm_task, ace->fsm_iter_num,
+				ace->req->current_nr_sectors * 16,
+				ace->data_count, ace->in_irq);
+			ace_fsm_yieldirq(ace);
+			break;
+		}
+
+		/* Transfer the next buffer */
+		i = 16;
+		if (ace->fsm_task == ACE_TASK_WRITE)
+			ace->reg_ops->dataout(ace);
+		else
+			ace->reg_ops->datain(ace);
+		ace->data_count--;
+
+		/* If there are still buffers to be transfers; jump out here */
+		if (ace->data_count != 0) {
+			ace_fsm_yieldirq(ace);
+			break;
+		}
+
+		/* bio finished; is there another one? */
+		i = ace->req->current_nr_sectors;
+		if (end_that_request_first(ace->req, 1, i)) {
+			/* dev_dbg(ace->dev, "next block; h=%li c=%i\n",
+			 *      ace->req->hard_nr_sectors,
+			 *      ace->req->current_nr_sectors);
+			 */
+			ace->data_ptr = ace->req->buffer;
+			ace->data_count = ace->req->current_nr_sectors * 16;
+			ace_fsm_yieldirq(ace);
+			break;
+		}
+
+		ace->fsm_state = ACE_FSM_STATE_REQ_COMPLETE;
+		break;
+
+	case ACE_FSM_STATE_REQ_COMPLETE:
+		/* Complete the block request */
+		blkdev_dequeue_request(ace->req);
+		end_that_request_last(ace->req, 1);
+		ace->req = NULL;
+
+		/* Finished request; go to idle state */
+		ace->fsm_state = ACE_FSM_STATE_IDLE;
+		break;
+
+	default:
+		ace->fsm_state = ACE_FSM_STATE_IDLE;
+		break;
+	}
+}
+
+static void ace_fsm_tasklet(unsigned long data)
+{
+	struct ace_device *ace = (void *)data;
+	unsigned long flags;
+
+	spin_lock_irqsave(&ace->lock, flags);
+
+	/* Loop over state machine until told to stop */
+	ace->fsm_continue_flag = 1;
+	while (ace->fsm_continue_flag)
+		ace_fsm_dostate(ace);
+
+	spin_unlock_irqrestore(&ace->lock, flags);
+}
+
+static void ace_stall_timer(unsigned long data)
+{
+	struct ace_device *ace = (void *)data;
+	unsigned long flags;
+
+	dev_warn(ace->dev,
+		 "kicking stalled fsm; state=%i task=%i iter=%i dc=%i\n",
+		 ace->fsm_state, ace->fsm_task, ace->fsm_iter_num,
+		 ace->data_count);
+	spin_lock_irqsave(&ace->lock, flags);
+
+	/* Rearm the stall timer *before* entering FSM (which may then
+	 * delete the timer) */
+	mod_timer(&ace->stall_timer, jiffies + HZ);
+
+	/* Loop over state machine until told to stop */
+	ace->fsm_continue_flag = 1;
+	while (ace->fsm_continue_flag)
+		ace_fsm_dostate(ace);
+
+	spin_unlock_irqrestore(&ace->lock, flags);
+}
+
+/* ---------------------------------------------------------------------
+ * Interrupt handling routines
+ */
+static int ace_interrupt_checkstate(struct ace_device *ace)
+{
+	u32 sreg = ace_in32(ace, ACE_STATUS);
+	u16 creg = ace_in(ace, ACE_CTRL);
+
+	/* Check for error occurance */
+	if ((sreg & (ACE_STATUS_CFGERROR | ACE_STATUS_CFCERROR)) &&
+	    (creg & ACE_CTRL_ERRORIRQ)) {
+		dev_err(ace->dev, "transfer failure\n");
+		ace_dump_regs(ace);
+		return -EIO;
+	}
+
+	return 0;
+}
+
+static irqreturn_t ace_interrupt(int irq, void *dev_id)
+{
+	u16 creg;
+	struct ace_device *ace = dev_id;
+
+	/* be safe and get the lock */
+	spin_lock(&ace->lock);
+	ace->in_irq = 1;
+
+	/* clear the interrupt */
+	creg = ace_in(ace, ACE_CTRL);
+	ace_out(ace, ACE_CTRL, creg | ACE_CTRL_RESETIRQ);
+	ace_out(ace, ACE_CTRL, creg);
+
+	/* check for IO failures */
+	if (ace_interrupt_checkstate(ace))
+		ace->data_result = -EIO;
+
+	if (ace->fsm_task == 0) {
+		dev_err(ace->dev,
+			"spurious irq; stat=%.8x ctrl=%.8x cmd=%.4x\n",
+			ace_in32(ace, ACE_STATUS), ace_in32(ace, ACE_CTRL),
+			ace_in(ace, ACE_SECCNTCMD));
+		dev_err(ace->dev, "fsm_task=%i fsm_state=%i data_count=%i\n",
+			ace->fsm_task, ace->fsm_state, ace->data_count);
+	}
+
+	/* Loop over state machine until told to stop */
+	ace->fsm_continue_flag = 1;
+	while (ace->fsm_continue_flag)
+		ace_fsm_dostate(ace);
+
+	/* done with interrupt; drop the lock */
+	ace->in_irq = 0;
+	spin_unlock(&ace->lock);
+
+	return IRQ_HANDLED;
+}
+
+/* ---------------------------------------------------------------------
+ * Block ops
+ */
+static void ace_request(request_queue_t * q)
+{
+	struct request *req;
+	struct ace_device *ace;
+
+	req = ace_get_next_request(q);
+
+	if (req) {
+		ace = req->rq_disk->private_data;
+		tasklet_schedule(&ace->fsm_tasklet);
+	}
+}
+
+static int ace_media_changed(struct gendisk *gd)
+{
+	struct ace_device *ace = gd->private_data;
+	dev_dbg(ace->dev, "ace_media_changed(): %i\n", ace->media_change);
+
+	return ace->media_change;
+}
+
+static int ace_revalidate_disk(struct gendisk *gd)
+{
+	struct ace_device *ace = gd->private_data;
+	unsigned long flags;
+
+	dev_dbg(ace->dev, "ace_revalidate_disk()\n");
+
+	if (ace->media_change) {
+		dev_dbg(ace->dev, "requesting cf id and scheduling tasklet\n");
+
+		spin_lock_irqsave(&ace->lock, flags);
+		ace->id_req_count++;
+		spin_unlock_irqrestore(&ace->lock, flags);
+
+		tasklet_schedule(&ace->fsm_tasklet);
+		wait_for_completion(&ace->id_completion);
+	}
+
+	dev_dbg(ace->dev, "revalidate complete\n");
+	return ace->id_result;
+}
+
+static int ace_open(struct inode *inode, struct file *filp)
+{
+	struct ace_device *ace = inode->i_bdev->bd_disk->private_data;
+	unsigned long flags;
+
+	dev_dbg(ace->dev, "ace_open() users=%i\n", ace->users + 1);
+
+	filp->private_data = ace;
+	spin_lock_irqsave(&ace->lock, flags);
+	ace->users++;
+	spin_unlock_irqrestore(&ace->lock, flags);
+
+	check_disk_change(inode->i_bdev);
+	return 0;
+}
+
+static int ace_release(struct inode *inode, struct file *filp)
+{
+	struct ace_device *ace = inode->i_bdev->bd_disk->private_data;
+	unsigned long flags;
+	u16 val;
+
+	dev_dbg(ace->dev, "ace_release() users=%i\n", ace->users - 1);
+
+	spin_lock_irqsave(&ace->lock, flags);
+	ace->users--;
+	if (ace->users == 0) {
+		val = ace_in(ace, ACE_CTRL);
+		ace_out(ace, ACE_CTRL, val & ~ACE_CTRL_LOCKREQ);
+	}
+	spin_unlock_irqrestore(&ace->lock, flags);
+	return 0;
+}
+
+static int ace_ioctl(struct inode *inode, struct file *filp,
+		     unsigned int cmd, unsigned long arg)
+{
+	struct ace_device *ace = inode->i_bdev->bd_disk->private_data;
+	struct hd_geometry __user *geo = (struct hd_geometry __user *)arg;
+	struct hd_geometry g;
+	dev_dbg(ace->dev, "ace_ioctl()\n");
+
+	switch (cmd) {
+	case HDIO_GETGEO:
+		g.heads = ace->cf_id.heads;
+		g.sectors = ace->cf_id.sectors;
+		g.cylinders = ace->cf_id.cyls;
+		g.start = 0;
+		return copy_to_user(geo, &g, sizeof(g)) ? -EFAULT : 0;
+
+	default:
+		return -ENOTTY;
+	}
+	return -ENOTTY;
+}
+
+static struct block_device_operations ace_fops = {
+	.owner = THIS_MODULE,
+	.open = ace_open,
+	.release = ace_release,
+	.media_changed = ace_media_changed,
+	.revalidate_disk = ace_revalidate_disk,
+	.ioctl = ace_ioctl,
+};
+
+/* --------------------------------------------------------------------
+ * SystemACE device setup/teardown code
+ */
+static int __devinit ace_setup(struct ace_device *ace)
+{
+	u16 version;
+	u16 val;
+
+	int rc;
+
+	spin_lock_init(&ace->lock);
+	init_completion(&ace->id_completion);
+
+	/*
+	 * Map the device
+	 */
+	ace->baseaddr = ioremap(ace->physaddr, 0x80);
+	if (!ace->baseaddr)
+		goto err_ioremap;
+
+	if (ace->irq != NO_IRQ) {
+		rc = request_irq(ace->irq, ace_interrupt, 0, "systemace", ace);
+		if (rc) {
+			/* Failure - fall back to polled mode */
+			dev_err(ace->dev, "request_irq failed\n");
+			ace->irq = NO_IRQ;
+		}
+	}
+
+	/*
+	 * Initialize the state machine tasklet and stall timer
+	 */
+	tasklet_init(&ace->fsm_tasklet, ace_fsm_tasklet, (unsigned long)ace);
+	setup_timer(&ace->stall_timer, ace_stall_timer, (unsigned long)ace);
+
+	/*
+	 * Initialize the request queue
+	 */
+	ace->queue = blk_init_queue(ace_request, &ace->lock);
+	if (ace->queue == NULL)
+		goto err_blk_initq;
+	blk_queue_hardsect_size(ace->queue, 512);
+
+	/*
+	 * Allocate and initialize GD structure
+	 */
+	ace->gd = alloc_disk(ACE_NUM_MINORS);
+	if (!ace->gd)
+		goto err_alloc_disk;
+
+	ace->gd->major = ace_major;
+	ace->gd->first_minor = ace->id * ACE_NUM_MINORS;
+	ace->gd->fops = &ace_fops;
+	ace->gd->queue = ace->queue;
+	ace->gd->private_data = ace;
+	snprintf(ace->gd->disk_name, 32, "xs%c", ace->id + 'a');
+
+	/* set bus width */
+	if (ace->bus_width == 1) {
+		/* 0x0101 should work regardless of endianess */
+		ace_out_le16(ace, ACE_BUSMODE, 0x0101);
+
+		/* read it back to determine endianess */
+		if (ace_in_le16(ace, ACE_BUSMODE) == 0x0001)
+			ace->reg_ops = &ace_reg_le16_ops;
+		else
+			ace->reg_ops = &ace_reg_be16_ops;
+	} else {
+		ace_out_8(ace, ACE_BUSMODE, 0x00);
+		ace->reg_ops = &ace_reg_8_ops;
+	}
+
+	/* Make sure version register is sane */
+	version = ace_in(ace, ACE_VERSION);
+	if ((version == 0) || (version == 0xFFFF))
+		goto err_read;
+
+	/* Put sysace in a sane state by clearing most control reg bits */
+	ace_out(ace, ACE_CTRL, ACE_CTRL_FORCECFGMODE |
+		ACE_CTRL_DATABUFRDYIRQ | ACE_CTRL_ERRORIRQ);
+
+	/* Enable interrupts */
+	val = ace_in(ace, ACE_CTRL);
+	val |= ACE_CTRL_DATABUFRDYIRQ | ACE_CTRL_ERRORIRQ;
+	ace_out(ace, ACE_CTRL, val);
+
+	/* Print the identification */
+	dev_info(ace->dev, "Xilinx SystemACE revision %i.%i.%i\n",
+		 (version >> 12) & 0xf, (version >> 8) & 0x0f, version & 0xff);
+	dev_dbg(ace->dev, "physaddr 0x%lx, mapped to 0x%p, irq=%i\n",
+		ace->physaddr, ace->baseaddr, ace->irq);
+
+	ace->media_change = 1;
+	ace_revalidate_disk(ace->gd);
+
+	/* Make the sysace device 'live' */
+	add_disk(ace->gd);
+
+	return 0;
+
+      err_read:
+	put_disk(ace->gd);
+      err_alloc_disk:
+	blk_cleanup_queue(ace->queue);
+      err_blk_initq:
+	iounmap(ace->baseaddr);
+	if (ace->irq != NO_IRQ)
+		free_irq(ace->irq, ace);
+      err_ioremap:
+	printk(KERN_INFO "xsysace: error initializing device at 0x%lx\n",
+	       ace->physaddr);
+	return -ENOMEM;
+}
+
+static void __devexit ace_teardown(struct ace_device *ace)
+{
+	if (ace->gd) {
+		del_gendisk(ace->gd);
+		put_disk(ace->gd);
+	}
+
+	if (ace->queue)
+		blk_cleanup_queue(ace->queue);
+
+	tasklet_kill(&ace->fsm_tasklet);
+
+	if (ace->irq != NO_IRQ)
+		free_irq(ace->irq, ace);
+
+	iounmap(ace->baseaddr);
+}
+
+/* ---------------------------------------------------------------------
+ * Platform Bus Support
+ */
+
+static int __devinit ace_probe(struct device *device)
+{
+	struct platform_device *dev = to_platform_device(device);
+	struct ace_device *ace;
+	int i;
+
+	dev_dbg(device, "ace_probe(%p)\n", device);
+
+	/*
+	 * Allocate the ace device structure
+	 */
+	ace = kzalloc(sizeof(struct ace_device), GFP_KERNEL);
+	if (!ace)
+		goto err_alloc;
+
+	ace->dev = device;
+	ace->id = dev->id;
+	ace->irq = NO_IRQ;
+
+	for (i = 0; i < dev->num_resources; i++) {
+		if (dev->resource[i].flags & IORESOURCE_MEM)
+			ace->physaddr = dev->resource[i].start;
+		if (dev->resource[i].flags & IORESOURCE_IRQ)
+			ace->irq = dev->resource[i].start;
+	}
+
+	/* FIXME: Should get bus_width from the platform_device struct */
+	ace->bus_width = 1;
+
+	dev_set_drvdata(&dev->dev, ace);
+
+	/* Call the bus-independant setup code */
+	if (ace_setup(ace) != 0)
+		goto err_setup;
+
+	return 0;
+
+      err_setup:
+	dev_set_drvdata(&dev->dev, NULL);
+	kfree(ace);
+      err_alloc:
+	printk(KERN_ERR "xsysace: could not initialize device\n");
+	return -ENOMEM;
+}
+
+/*
+ * Platform bus remove() method
+ */
+static int __devexit ace_remove(struct device *device)
+{
+	struct ace_device *ace = dev_get_drvdata(device);
+
+	dev_dbg(device, "ace_remove(%p)\n", device);
+
+	if (ace) {
+		ace_teardown(ace);
+		kfree(ace);
+	}
+
+	return 0;
+}
+
+static struct device_driver ace_driver = {
+	.name = "xsysace",
+	.bus = &platform_bus_type,
+	.probe = ace_probe,
+	.remove = __devexit_p(ace_remove),
+};
+
+/* ---------------------------------------------------------------------
+ * Module init/exit routines
+ */
+static int __init ace_init(void)
+{
+	ace_major = register_blkdev(ace_major, "xsysace");
+	if (ace_major <= 0) {
+		printk(KERN_WARNING "xsysace: register_blkdev() failed\n");
+		return ace_major;
+	}
+
+	pr_debug("Registering Xilinx SystemACE driver, major=%i\n", ace_major);
+	return driver_register(&ace_driver);
+}
+
+static void __exit ace_exit(void)
+{
+	pr_debug("Unregistering Xilinx SystemACE driver\n");
+	driver_unregister(&ace_driver);
+	unregister_blkdev(ace_major, "xsysace");
+}
+
+module_init(ace_init);
+module_exit(ace_exit);
diff --git a/drivers/block/z2ram.c b/drivers/block/z2ram.c
index 2abf94c..e40fa98 100644
--- a/drivers/block/z2ram.c
+++ b/drivers/block/z2ram.c
@@ -371,9 +371,7 @@
 {
     int i, j;
     blk_unregister_region(MKDEV(Z2RAM_MAJOR, 0), 256);
-    if ( unregister_blkdev( Z2RAM_MAJOR, DEVICE_NAME ) != 0 )
-	printk( KERN_ERR DEVICE_NAME ": unregister of device failed\n");
-
+    unregister_blkdev(Z2RAM_MAJOR, DEVICE_NAME);
     del_gendisk(z2ram_gendisk);
     put_disk(z2ram_gendisk);
     blk_cleanup_queue(z2_queue);
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index ec9dc3d..9e8f214 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -114,7 +114,7 @@
 
 config ROCKETPORT
 	tristate "Comtrol RocketPort support"
-	depends on SERIAL_NONSTANDARD
+	depends on SERIAL_NONSTANDARD && (ISA || EISA || PCI)
 	help
 	  This driver supports Comtrol RocketPort and RocketModem PCI boards.   
           These boards provide 2, 4, 8, 16, or 32 high-speed serial ports or
@@ -157,7 +157,7 @@
 
 config DIGIEPCA
 	tristate "Digiboard Intelligent Async Support"
-	depends on SERIAL_NONSTANDARD
+	depends on SERIAL_NONSTANDARD && (ISA || EISA || PCI)
 	---help---
 	  This is a driver for Digi International's Xx, Xeve, and Xem series
 	  of cards which provide multiple serial ports. You would need
@@ -213,8 +213,6 @@
 	  This is upgraded (1.9.1) driver from original Moxa drivers with
 	  changes finally resulting in PCI probing.
 
-	  Use at your own risk.
-
 	  This driver can also be built as a module. The module will be called
 	  mxser_new. If you want to do that, say M here.
 
@@ -354,7 +352,7 @@
 
 config STALLION
 	tristate "Stallion EasyIO or EC8/32 support"
-	depends on STALDRV && BROKEN_ON_SMP
+	depends on STALDRV && BROKEN_ON_SMP && (ISA || EISA || PCI)
 	help
 	  If you have an EasyIO or EasyConnection 8/32 multiport Stallion
 	  card, then this is for you; say Y.  Make sure to read
@@ -365,7 +363,7 @@
 
 config ISTALLION
 	tristate "Stallion EC8/64, ONboard, Brumby support"
-	depends on STALDRV && BROKEN_ON_SMP
+	depends on STALDRV && BROKEN_ON_SMP && (ISA || EISA || PCI)
 	help
 	  If you have an EasyConnection 8/64, ONboard, Brumby or Stallion
 	  serial multiport card, say Y here. Make sure to read
@@ -374,39 +372,6 @@
 	  To compile this driver as a module, choose M here: the
 	  module will be called istallion.
 
-config SERIAL_DEC
-	bool "DECstation serial support"
-	depends on MACH_DECSTATION
-	default y
-	help
-	  This selects whether you want to be asked about drivers for
-	  DECstation serial ports.
-
-	  Note that the answer to this question won't directly affect the
-	  kernel: saying N will just cause the configurator to skip all
-	  the questions about DECstation serial ports.
-
-config SERIAL_DEC_CONSOLE
-	bool "Support for console on a DECstation serial port"
-	depends on SERIAL_DEC
-	default y
-	help
-	  If you say Y here, it will be possible to use a serial port as the
-	  system console (the system console is the device which receives all
-	  kernel messages and warnings and which allows logins in single user
-	  mode).  Note that the firmware uses ttyS0 as the serial console on
-	  the Maxine and ttyS2 on the others.
-
-	  If unsure, say Y.
-
-config ZS
-	bool "Z85C30 Serial Support"
-	depends on SERIAL_DEC
-	default y
-	help
-	  Documentation on the Zilog 85C350 serial communications controller
-	  is downloadable at <http://www.zilog.com/pdfs/serial/z85c30.pdf>
-
 config A2232
 	tristate "Commodore A2232 serial support (EXPERIMENTAL)"
 	depends on EXPERIMENTAL && ZORRO && BROKEN_ON_SMP
@@ -639,6 +604,14 @@
 	help
 	  Toshiba's Cell Reference Set Beat Console device driver
 
+config HVC_XEN
+	bool "Xen Hypervisor Console support"
+	depends on XEN
+	select HVC_DRIVER
+	default y
+	help
+	  Xen virtual console device driver
+
 config HVCS
 	tristate "IBM Hypervisor Virtual Console Server support"
 	depends on PPC_PSERIES
diff --git a/drivers/char/Makefile b/drivers/char/Makefile
index f2996a9..4e6f387 100644
--- a/drivers/char/Makefile
+++ b/drivers/char/Makefile
@@ -42,12 +42,14 @@
 obj-$(CONFIG_N_HDLC)		+= n_hdlc.o
 obj-$(CONFIG_AMIGA_BUILTIN_SERIAL) += amiserial.o
 obj-$(CONFIG_SX)		+= sx.o generic_serial.o
+obj-$(CONFIG_LGUEST_GUEST)	+= hvc_lguest.o
 obj-$(CONFIG_RIO)		+= rio/ generic_serial.o
 obj-$(CONFIG_HVC_CONSOLE)	+= hvc_vio.o hvsi.o
 obj-$(CONFIG_HVC_ISERIES)	+= hvc_iseries.o
 obj-$(CONFIG_HVC_RTAS)		+= hvc_rtas.o
 obj-$(CONFIG_HVC_BEAT)		+= hvc_beat.o
 obj-$(CONFIG_HVC_DRIVER)	+= hvc_console.o
+obj-$(CONFIG_HVC_XEN)		+= hvc_xen.o
 obj-$(CONFIG_RAW_DRIVER)	+= raw.o
 obj-$(CONFIG_SGI_SNSC)		+= snsc.o snsc_event.o
 obj-$(CONFIG_MSPEC)		+= mspec.o
diff --git a/drivers/char/amiserial.c b/drivers/char/amiserial.c
index 7b02bf1..3d468f5 100644
--- a/drivers/char/amiserial.c
+++ b/drivers/char/amiserial.c
@@ -1721,12 +1721,11 @@
 		*ret_info = sstate->info;
 		return 0;
 	}
-	info = kmalloc(sizeof(struct async_struct), GFP_KERNEL);
+	info = kzalloc(sizeof(struct async_struct), GFP_KERNEL);
 	if (!info) {
 		sstate->count--;
 		return -ENOMEM;
 	}
-	memset(info, 0, sizeof(struct async_struct));
 #ifdef DECLARE_WAITQUEUE
 	init_waitqueue_head(&info->open_wait);
 	init_waitqueue_head(&info->close_wait);
diff --git a/drivers/char/apm-emulation.c b/drivers/char/apm-emulation.c
index 179c7a3..ec116df 100644
--- a/drivers/char/apm-emulation.c
+++ b/drivers/char/apm-emulation.c
@@ -20,6 +20,7 @@
 #include <linux/sched.h>
 #include <linux/pm.h>
 #include <linux/apm-emulation.h>
+#include <linux/freezer.h>
 #include <linux/device.h>
 #include <linux/kernel.h>
 #include <linux/list.h>
@@ -329,13 +330,8 @@
 			/*
 			 * Wait for the suspend/resume to complete.  If there
 			 * are pending acknowledges, we wait here for them.
-			 *
-			 * Note: we need to ensure that the PM subsystem does
-			 * not kick us out of the wait when it suspends the
-			 * threads.
 			 */
 			flags = current->flags;
-			current->flags |= PF_NOFREEZE;
 
 			wait_event(apm_suspend_waitqueue,
 				   as->suspend_state == SUSPEND_DONE);
@@ -365,13 +361,8 @@
 			/*
 			 * Wait for the suspend/resume to complete.  If there
 			 * are pending acknowledges, we wait here for them.
-			 *
-			 * Note: we need to ensure that the PM subsystem does
-			 * not kick us out of the wait when it suspends the
-			 * threads.
 			 */
 			flags = current->flags;
-			current->flags |= PF_NOFREEZE;
 
 			wait_event_interruptible(apm_suspend_waitqueue,
 					 as->suspend_state == SUSPEND_DONE);
@@ -598,7 +589,6 @@
 		kapmd_tsk = NULL;
 		return ret;
 	}
-	kapmd_tsk->flags |= PF_NOFREEZE;
 	wake_up_process(kapmd_tsk);
 
 #ifdef CONFIG_PROC_FS
diff --git a/drivers/char/briq_panel.c b/drivers/char/briq_panel.c
index ed53f54..b6f2639 100644
--- a/drivers/char/briq_panel.c
+++ b/drivers/char/briq_panel.c
@@ -91,11 +91,6 @@
 	unsigned short c;
 	unsigned char cp;
 
-#if 0	/*  Can't seek (pread) on this device  */
-	if (ppos != &file->f_pos)
-		return -ESPIPE;
-#endif
-
 	if (!vfd_is_open)
 		return -ENODEV;
 
@@ -139,11 +134,6 @@
 	size_t indx = len;
 	int i, esc = 0;
 
-#if 0	/*  Can't seek (pwrite) on this device  */
-	if (ppos != &file->f_pos)
-		return -ESPIPE;
-#endif
-
 	if (!vfd_is_open)
 		return -EBUSY;
 
diff --git a/drivers/char/cyclades.c b/drivers/char/cyclades.c
index e04005b..9e0adfe 100644
--- a/drivers/char/cyclades.c
+++ b/drivers/char/cyclades.c
@@ -646,6 +646,7 @@
 #include <linux/delay.h>
 #include <linux/spinlock.h>
 #include <linux/bitops.h>
+#include <linux/firmware.h>
 
 #include <asm/system.h>
 #include <asm/io.h>
@@ -680,6 +681,44 @@
 
 #define STD_COM_FLAGS (0)
 
+/* firmware stuff */
+#define ZL_MAX_BLOCKS	16
+#define DRIVER_VERSION	0x02010203
+#define RAM_SIZE 0x80000
+
+#define Z_FPGA_LOADED(X)	((readl(&(X)->init_ctrl) & (1<<17)) != 0)
+
+enum zblock_type {
+	ZBLOCK_PRG = 0,
+	ZBLOCK_FPGA = 1
+};
+
+struct zfile_header {
+	char name[64];
+	char date[32];
+	char aux[32];
+	u32 n_config;
+	u32 config_offset;
+	u32 n_blocks;
+	u32 block_offset;
+	u32 reserved[9];
+} __attribute__ ((packed));
+
+struct zfile_config {
+	char name[64];
+	u32 mailbox;
+	u32 function;
+	u32 n_blocks;
+	u32 block_list[ZL_MAX_BLOCKS];
+} __attribute__ ((packed));
+
+struct zfile_block {
+	u32 type;
+	u32 file_offset;
+	u32 ram_offset;
+	u32 size;
+} __attribute__ ((packed));
+
 static struct tty_driver *cy_serial_driver;
 
 #ifdef CONFIG_ISA
@@ -1851,11 +1890,11 @@
 	struct cyclades_card *cinfo;
 	struct cyclades_port *info;
 	struct tty_struct *tty;
-	static struct FIRM_ID *firm_id;
-	static struct ZFW_CTRL *zfw_ctrl;
-	static struct BOARD_CTRL *board_ctrl;
-	static struct CH_CTRL *ch_ctrl;
-	static struct BUF_CTRL *buf_ctrl;
+	struct FIRM_ID __iomem *firm_id;
+	struct ZFW_CTRL __iomem *zfw_ctrl;
+	struct BOARD_CTRL __iomem *board_ctrl;
+	struct CH_CTRL __iomem *ch_ctrl;
+	struct BUF_CTRL __iomem *buf_ctrl;
 	unsigned long expires = jiffies + HZ;
 	int card, port;
 
@@ -1999,7 +2038,6 @@
 		struct ZFW_CTRL __iomem *zfw_ctrl;
 		struct BOARD_CTRL __iomem *board_ctrl;
 		struct CH_CTRL __iomem *ch_ctrl;
-		int retval;
 
 		base_addr = card->base_addr;
 
@@ -2371,7 +2409,6 @@
 		struct ZFW_CTRL __iomem *zfw_ctrl;
 		struct BOARD_CTRL __iomem *board_ctrl;
 		struct CH_CTRL __iomem *ch_ctrl;
-		int retval;
 
 		base_addr = cinfo->base_addr;
 		firm_id = base_addr + ID_ADDRESS;
@@ -4429,10 +4466,10 @@
 static int __devinit cy_init_card(struct cyclades_card *cinfo)
 {
 	struct cyclades_port *info;
-	u32 mailbox;
+	u32 uninitialized_var(mailbox);
 	unsigned int nports;
 	unsigned short chip_number;
-	int index, port;
+	int uninitialized_var(index), port;
 
 	spin_lock_init(&cinfo->card_lock);
 
@@ -4735,17 +4772,295 @@
 }				/* cy_detect_isa */
 
 #ifdef CONFIG_PCI
-static void __devinit plx_init(void __iomem * addr, __u32 initctl)
+static inline int __devinit cyc_isfwstr(const char *str, unsigned int size)
+{
+	unsigned int a;
+
+	for (a = 0; a < size && *str; a++, str++)
+		if (*str & 0x80)
+			return -EINVAL;
+
+	for (; a < size; a++, str++)
+		if (*str)
+			return -EINVAL;
+
+	return 0;
+}
+
+static inline void __devinit cyz_fpga_copy(void __iomem *fpga, u8 *data,
+		unsigned int size)
+{
+	for (; size > 0; size--) {
+		cy_writel(fpga, *data++);
+		udelay(10);
+	}
+}
+
+static void __devinit plx_init(struct pci_dev *pdev, int irq,
+		struct RUNTIME_9060 __iomem *addr)
 {
 	/* Reset PLX */
-	cy_writel(addr + initctl, readl(addr + initctl) | 0x40000000);
+	cy_writel(&addr->init_ctrl, readl(&addr->init_ctrl) | 0x40000000);
 	udelay(100L);
-	cy_writel(addr + initctl, readl(addr + initctl) & ~0x40000000);
+	cy_writel(&addr->init_ctrl, readl(&addr->init_ctrl) & ~0x40000000);
 
 	/* Reload Config. Registers from EEPROM */
-	cy_writel(addr + initctl, readl(addr + initctl) | 0x20000000);
+	cy_writel(&addr->init_ctrl, readl(&addr->init_ctrl) | 0x20000000);
 	udelay(100L);
-	cy_writel(addr + initctl, readl(addr + initctl) & ~0x20000000);
+	cy_writel(&addr->init_ctrl, readl(&addr->init_ctrl) & ~0x20000000);
+
+	/* For some yet unknown reason, once the PLX9060 reloads the EEPROM,
+	 * the IRQ is lost and, thus, we have to re-write it to the PCI config.
+	 * registers. This will remain here until we find a permanent fix.
+	 */
+	pci_write_config_byte(pdev, PCI_INTERRUPT_LINE, irq);
+}
+
+static int __devinit __cyz_load_fw(const struct firmware *fw,
+		const char *name, const u32 mailbox, void __iomem *base,
+		void __iomem *fpga)
+{
+	void *ptr = fw->data;
+	struct zfile_header *h = ptr;
+	struct zfile_config *c, *cs;
+	struct zfile_block *b, *bs;
+	unsigned int a, tmp, len = fw->size;
+#define BAD_FW KERN_ERR "Bad firmware: "
+	if (len < sizeof(*h)) {
+		printk(BAD_FW "too short: %u<%zu\n", len, sizeof(*h));
+		return -EINVAL;
+	}
+
+	cs = ptr + h->config_offset;
+	bs = ptr + h->block_offset;
+
+	if ((void *)(cs + h->n_config) > ptr + len ||
+			(void *)(bs + h->n_blocks) > ptr + len) {
+		printk(BAD_FW "too short");
+		return  -EINVAL;
+	}
+
+	if (cyc_isfwstr(h->name, sizeof(h->name)) ||
+			cyc_isfwstr(h->date, sizeof(h->date))) {
+		printk(BAD_FW "bad formatted header string\n");
+		return -EINVAL;
+	}
+
+	if (strncmp(name, h->name, sizeof(h->name))) {
+		printk(BAD_FW "bad name '%s' (expected '%s')\n", h->name, name);
+		return -EINVAL;
+	}
+
+	tmp = 0;
+	for (c = cs; c < cs + h->n_config; c++) {
+		for (a = 0; a < c->n_blocks; a++)
+			if (c->block_list[a] > h->n_blocks) {
+				printk(BAD_FW "bad block ref number in cfgs\n");
+				return -EINVAL;
+			}
+		if (c->mailbox == mailbox && c->function == 0) /* 0 is normal */
+			tmp++;
+	}
+	if (!tmp) {
+		printk(BAD_FW "nothing appropriate\n");
+		return -EINVAL;
+	}
+
+	for (b = bs; b < bs + h->n_blocks; b++)
+		if (b->file_offset + b->size > len) {
+			printk(BAD_FW "bad block data offset\n");
+			return -EINVAL;
+		}
+
+	/* everything is OK, let's seek'n'load it */
+	for (c = cs; c < cs + h->n_config; c++)
+		if (c->mailbox == mailbox && c->function == 0)
+			break;
+
+	for (a = 0; a < c->n_blocks; a++) {
+		b = &bs[c->block_list[a]];
+		if (b->type == ZBLOCK_FPGA) {
+			if (fpga != NULL)
+				cyz_fpga_copy(fpga, ptr + b->file_offset,
+						b->size);
+		} else {
+			if (base != NULL)
+				memcpy_toio(base + b->ram_offset,
+					       ptr + b->file_offset, b->size);
+		}
+	}
+#undef BAD_FW
+	return 0;
+}
+
+static int __devinit cyz_load_fw(struct pci_dev *pdev, void __iomem *base_addr,
+		struct RUNTIME_9060 __iomem *ctl_addr, int irq)
+{
+	const struct firmware *fw;
+	struct FIRM_ID __iomem *fid = base_addr + ID_ADDRESS;
+	struct CUSTOM_REG __iomem *cust = base_addr;
+	struct ZFW_CTRL __iomem *pt_zfwctrl;
+	void __iomem *tmp;
+	u32 mailbox, status;
+	unsigned int i;
+	int retval;
+
+	retval = request_firmware(&fw, "cyzfirm.bin", &pdev->dev);
+	if (retval) {
+		dev_err(&pdev->dev, "can't get firmware\n");
+		goto err;
+	}
+
+	/* Check whether the firmware is already loaded and running. If
+	   positive, skip this board */
+	if (Z_FPGA_LOADED(ctl_addr) && readl(&fid->signature) == ZFIRM_ID) {
+		u32 cntval = readl(base_addr + 0x190);
+
+		udelay(100);
+		if (cntval != readl(base_addr + 0x190)) {
+			/* FW counter is working, FW is running */
+			dev_dbg(&pdev->dev, "Cyclades-Z FW already loaded. "
+					"Skipping board.\n");
+			retval = 0;
+			goto err_rel;
+		}
+	}
+
+	/* start boot */
+	cy_writel(&ctl_addr->intr_ctrl_stat, readl(&ctl_addr->intr_ctrl_stat) &
+			~0x00030800UL);
+
+	mailbox = readl(&ctl_addr->mail_box_0);
+
+	if (mailbox == 0 || Z_FPGA_LOADED(ctl_addr)) {
+		/* stops CPU and set window to beginning of RAM */
+		cy_writel(&ctl_addr->loc_addr_base, WIN_CREG);
+		cy_writel(&cust->cpu_stop, 0);
+		cy_writel(&ctl_addr->loc_addr_base, WIN_RAM);
+		udelay(100);
+	}
+
+	plx_init(pdev, irq, ctl_addr);
+
+	if (mailbox != 0) {
+		/* load FPGA */
+		retval = __cyz_load_fw(fw, "Cyclom-Z", mailbox, NULL,
+				base_addr);
+		if (retval)
+			goto err_rel;
+		if (!Z_FPGA_LOADED(ctl_addr)) {
+			dev_err(&pdev->dev, "fw upload successful, but fw is "
+					"not loaded\n");
+			goto err_rel;
+		}
+	}
+
+	/* stops CPU and set window to beginning of RAM */
+	cy_writel(&ctl_addr->loc_addr_base, WIN_CREG);
+	cy_writel(&cust->cpu_stop, 0);
+	cy_writel(&ctl_addr->loc_addr_base, WIN_RAM);
+	udelay(100);
+
+	/* clear memory */
+	for (tmp = base_addr; tmp < base_addr + RAM_SIZE; tmp++)
+		cy_writeb(tmp, 255);
+	if (mailbox != 0) {
+		/* set window to last 512K of RAM */
+		cy_writel(&ctl_addr->loc_addr_base, WIN_RAM + RAM_SIZE);
+		//sleep(1);
+		for (tmp = base_addr; tmp < base_addr + RAM_SIZE; tmp++)
+			cy_writeb(tmp, 255);
+		/* set window to beginning of RAM */
+		cy_writel(&ctl_addr->loc_addr_base, WIN_RAM);
+		//sleep(1);
+	}
+
+	retval = __cyz_load_fw(fw, "Cyclom-Z", mailbox, base_addr, NULL);
+	release_firmware(fw);
+	if (retval)
+		goto err;
+
+	/* finish boot and start boards */
+	cy_writel(&ctl_addr->loc_addr_base, WIN_CREG);
+	cy_writel(&cust->cpu_start, 0);
+	cy_writel(&ctl_addr->loc_addr_base, WIN_RAM);
+	i = 0;
+	while ((status = readl(&fid->signature)) != ZFIRM_ID && i++ < 40)
+		msleep(100);
+	if (status != ZFIRM_ID) {
+		if (status == ZFIRM_HLT) {
+			dev_err(&pdev->dev, "you need an external power supply "
+				"for this number of ports. Firmware halted and "
+				"board reset.\n");
+			retval = -EIO;
+			goto err;
+		}
+		dev_warn(&pdev->dev, "fid->signature = 0x%x... Waiting "
+				"some more time\n", status);
+		while ((status = readl(&fid->signature)) != ZFIRM_ID &&
+				i++ < 200)
+			msleep(100);
+		if (status != ZFIRM_ID) {
+			dev_err(&pdev->dev, "Board not started in 20 seconds! "
+					"Giving up. (fid->signature = 0x%x)\n",
+					status);
+			dev_info(&pdev->dev, "*** Warning ***: if you are "
+				"upgrading the FW, please power cycle the "
+				"system before loading the new FW to the "
+				"Cyclades-Z.\n");
+
+			if (Z_FPGA_LOADED(ctl_addr))
+				plx_init(pdev, irq, ctl_addr);
+
+			retval = -EIO;
+			goto err;
+		}
+		dev_dbg(&pdev->dev, "Firmware started after %d seconds.\n",
+				i / 10);
+	}
+	pt_zfwctrl = base_addr + readl(&fid->zfwctrl_addr);
+
+	dev_dbg(&pdev->dev, "fid=> %p, zfwctrl_addr=> %x, npt_zfwctrl=> %p\n",
+			base_addr + ID_ADDRESS, readl(&fid->zfwctrl_addr),
+			base_addr + readl(&fid->zfwctrl_addr));
+
+	dev_info(&pdev->dev, "Cyclades-Z FW loaded: version = %x, ports = %u\n",
+		readl(&pt_zfwctrl->board_ctrl.fw_version),
+		readl(&pt_zfwctrl->board_ctrl.n_channel));
+
+	if (readl(&pt_zfwctrl->board_ctrl.n_channel) == 0) {
+		dev_warn(&pdev->dev, "no Cyclades-Z ports were found. Please "
+			"check the connection between the Z host card and the "
+			"serial expanders.\n");
+
+		if (Z_FPGA_LOADED(ctl_addr))
+			plx_init(pdev, irq, ctl_addr);
+
+		dev_info(&pdev->dev, "Null number of ports detected. Board "
+				"reset.\n");
+		retval = 0;
+		goto err;
+	}
+
+	cy_writel(&pt_zfwctrl->board_ctrl.op_system, C_OS_LINUX);
+	cy_writel(&pt_zfwctrl->board_ctrl.dr_version, DRIVER_VERSION);
+
+	/*
+	   Early firmware failed to start looking for commands.
+	   This enables firmware interrupts for those commands.
+	 */
+	cy_writel(&ctl_addr->intr_ctrl_stat, readl(&ctl_addr->intr_ctrl_stat) |
+			(1 << 17));
+	cy_writel(&ctl_addr->intr_ctrl_stat, readl(&ctl_addr->intr_ctrl_stat) |
+			0x00030800UL);
+
+	plx_init(pdev, irq, ctl_addr);
+
+	return 0;
+err_rel:
+	release_firmware(fw);
+err:
+	return retval;
 }
 
 static int __devinit cy_pci_probe(struct pci_dev *pdev,
@@ -4827,16 +5142,9 @@
 		}
 
 		/* Disable interrupts on the PLX before resetting it */
-		cy_writew(addr0 + 0x68,
-			readw(addr0 + 0x68) & ~0x0900);
+		cy_writew(addr0 + 0x68, readw(addr0 + 0x68) & ~0x0900);
 
-		plx_init(addr0, 0x6c);
-		/* For some yet unknown reason, once the PLX9060 reloads
-		   the EEPROM, the IRQ is lost and, thus, we have to
-		   re-write it to the PCI config. registers.
-		   This will remain here until we find a permanent
-		   fix. */
-		pci_write_config_byte(pdev, PCI_INTERRUPT_LINE, irq);
+		plx_init(pdev, irq, addr0);
 
 		mailbox = (u32)readl(&ctl_addr->mail_box_0);
 
@@ -4877,6 +5185,9 @@
 			if ((mailbox == ZO_V1) || (mailbox == ZO_V2))
 				cy_writel(addr2 + ID_ADDRESS, 0L);
 
+			retval = cyz_load_fw(pdev, addr2, addr0, irq);
+			if (retval)
+				goto err_unmap;
 			/* This must be a Cyclades-8Zo/PCI.  The extendable
 			   version will have a different device_id and will
 			   be allocated its maximum number of ports. */
@@ -4953,15 +5264,7 @@
 		case PLX_9060:
 		case PLX_9080:
 		default:	/* Old boards, use PLX_9060 */
-
-			plx_init(addr0, 0x6c);
-		/* For some yet unknown reason, once the PLX9060 reloads
-		   the EEPROM, the IRQ is lost and, thus, we have to
-		   re-write it to the PCI config. registers.
-		   This will remain here until we find a permanent
-		   fix. */
-			pci_write_config_byte(pdev, PCI_INTERRUPT_LINE, irq);
-
+			plx_init(pdev, irq, addr0);
 			cy_writew(addr0 + 0x68, readw(addr0 + 0x68) | 0x0900);
 			break;
 		}
diff --git a/drivers/char/decserial.c b/drivers/char/decserial.c
deleted file mode 100644
index 8ea2bea..0000000
--- a/drivers/char/decserial.c
+++ /dev/null
@@ -1,67 +0,0 @@
-/*
- * sercons.c
- *      choose the right serial device at boot time
- *
- * triemer 6-SEP-1998
- *      sercons.c is designed to allow the three different kinds 
- *      of serial devices under the decstation world to co-exist
- *      in the same kernel.  The idea here is to abstract 
- *      the pieces of the drivers that are common to this file
- *      so that they do not clash at compile time and runtime.
- *
- * HK 16-SEP-1998 v0.002
- *      removed the PROM console as this is not a real serial
- *      device. Added support for PROM console in drivers/char/tty_io.c
- *      instead. Although it may work to enable more than one 
- *      console device I strongly recommend to use only one.
- */
-
-#include <linux/init.h>
-#include <asm/dec/machtype.h>
-
-#ifdef CONFIG_ZS
-extern int zs_init(void);
-#endif
-
-#ifdef CONFIG_SERIAL_CONSOLE
-
-#ifdef CONFIG_ZS
-extern void zs_serial_console_init(void);
-#endif
-
-#endif
-
-/* rs_init - starts up the serial interface -
-   handle normal case of starting up the serial interface */
-
-#ifdef CONFIG_SERIAL
-
-int __init rs_init(void)
-{
-#ifdef CONFIG_ZS
-    if (IOASIC)
-	return zs_init();
-#endif
-    return -ENXIO;
-}
-
-__initcall(rs_init);
-
-#endif
-
-#ifdef CONFIG_SERIAL_CONSOLE
-
-/* serial_console_init handles the special case of starting
- *   up the console on the serial port
- */
-static int __init decserial_console_init(void)
-{
-#ifdef CONFIG_ZS
-    if (IOASIC)
-	zs_serial_console_init();
-#endif
-    return 0;
-}
-console_initcall(decserial_console_init);
-
-#endif
diff --git a/drivers/char/drm/ati_pcigart.c b/drivers/char/drm/ati_pcigart.c
index 5b91bc0..3345641 100644
--- a/drivers/char/drm/ati_pcigart.c
+++ b/drivers/char/drm/ati_pcigart.c
@@ -73,9 +73,9 @@
 	free_pages((unsigned long)address, order);
 }
 
-int drm_ati_pcigart_cleanup(drm_device_t *dev, drm_ati_pcigart_info *gart_info)
+int drm_ati_pcigart_cleanup(struct drm_device *dev, struct drm_ati_pcigart_info *gart_info)
 {
-	drm_sg_mem_t *entry = dev->sg;
+	struct drm_sg_mem *entry = dev->sg;
 	unsigned long pages;
 	int i;
 	int order;
@@ -122,9 +122,9 @@
 }
 EXPORT_SYMBOL(drm_ati_pcigart_cleanup);
 
-int drm_ati_pcigart_init(drm_device_t *dev, drm_ati_pcigart_info *gart_info)
+int drm_ati_pcigart_init(struct drm_device *dev, struct drm_ati_pcigart_info *gart_info)
 {
-	drm_sg_mem_t *entry = dev->sg;
+	struct drm_sg_mem *entry = dev->sg;
 	void *address = NULL;
 	unsigned long pages;
 	u32 *pci_gart, page_base, bus_address = 0;
diff --git a/drivers/char/drm/drm.h b/drivers/char/drm/drm.h
index 0891984..2d6f2d0 100644
--- a/drivers/char/drm/drm.h
+++ b/drivers/char/drm/drm.h
@@ -109,31 +109,31 @@
  * \note KW: Actually it's illegal to change either for
  * backwards-compatibility reasons.
  */
-typedef struct drm_clip_rect {
+struct drm_clip_rect {
 	unsigned short x1;
 	unsigned short y1;
 	unsigned short x2;
 	unsigned short y2;
-} drm_clip_rect_t;
+};
 
 /**
  * Drawable information.
  */
-typedef struct drm_drawable_info {
+struct drm_drawable_info {
 	unsigned int num_rects;
-	drm_clip_rect_t *rects;
-} drm_drawable_info_t;
+	struct drm_clip_rect *rects;
+};
 
 /**
  * Texture region,
  */
-typedef struct drm_tex_region {
+struct drm_tex_region {
 	unsigned char next;
 	unsigned char prev;
 	unsigned char in_use;
 	unsigned char padding;
 	unsigned int age;
-} drm_tex_region_t;
+};
 
 /**
  * Hardware lock.
@@ -142,17 +142,17 @@
  * processor bus contention on a multiprocessor system, there should not be any
  * other data stored in the same cache line.
  */
-typedef struct drm_hw_lock {
+struct drm_hw_lock {
 	__volatile__ unsigned int lock;		/**< lock variable */
 	char padding[60];			/**< Pad to cache line */
-} drm_hw_lock_t;
+};
 
 /**
  * DRM_IOCTL_VERSION ioctl argument type.
  *
  * \sa drmGetVersion().
  */
-typedef struct drm_version {
+struct drm_version {
 	int version_major;	  /**< Major version */
 	int version_minor;	  /**< Minor version */
 	int version_patchlevel;	  /**< Patch level */
@@ -162,33 +162,33 @@
 	char __user *date;	  /**< User-space buffer to hold date */
 	size_t desc_len;	  /**< Length of desc buffer */
 	char __user *desc;	  /**< User-space buffer to hold desc */
-} drm_version_t;
+};
 
 /**
  * DRM_IOCTL_GET_UNIQUE ioctl argument type.
  *
  * \sa drmGetBusid() and drmSetBusId().
  */
-typedef struct drm_unique {
+struct drm_unique {
 	size_t unique_len;	  /**< Length of unique */
 	char __user *unique;	  /**< Unique name for driver instantiation */
-} drm_unique_t;
+};
 
-typedef struct drm_list {
+struct drm_list {
 	int count;		  /**< Length of user-space structures */
-	drm_version_t __user *version;
-} drm_list_t;
+	struct drm_version __user *version;
+};
 
-typedef struct drm_block {
+struct drm_block {
 	int unused;
-} drm_block_t;
+};
 
 /**
  * DRM_IOCTL_CONTROL ioctl argument type.
  *
  * \sa drmCtlInstHandler() and drmCtlUninstHandler().
  */
-typedef struct drm_control {
+struct drm_control {
 	enum {
 		DRM_ADD_COMMAND,
 		DRM_RM_COMMAND,
@@ -196,24 +196,24 @@
 		DRM_UNINST_HANDLER
 	} func;
 	int irq;
-} drm_control_t;
+};
 
 /**
  * Type of memory to map.
  */
-typedef enum drm_map_type {
+enum drm_map_type {
 	_DRM_FRAME_BUFFER = 0,	  /**< WC (no caching), no core dump */
 	_DRM_REGISTERS = 1,	  /**< no caching, no core dump */
 	_DRM_SHM = 2,		  /**< shared, cached */
 	_DRM_AGP = 3,		  /**< AGP/GART */
 	_DRM_SCATTER_GATHER = 4,  /**< Scatter/gather memory for PCI DMA */
 	_DRM_CONSISTENT = 5,	  /**< Consistent memory for PCI DMA */
-} drm_map_type_t;
+};
 
 /**
  * Memory mapping flags.
  */
-typedef enum drm_map_flags {
+enum drm_map_flags {
 	_DRM_RESTRICTED = 0x01,	     /**< Cannot be mapped to user-virtual */
 	_DRM_READ_ONLY = 0x02,
 	_DRM_LOCKED = 0x04,	     /**< shared, cached, locked */
@@ -221,12 +221,12 @@
 	_DRM_WRITE_COMBINING = 0x10, /**< use write-combining if available */
 	_DRM_CONTAINS_LOCK = 0x20,   /**< SHM page that contains lock */
 	_DRM_REMOVABLE = 0x40	     /**< Removable mapping */
-} drm_map_flags_t;
+};
 
-typedef struct drm_ctx_priv_map {
+struct drm_ctx_priv_map {
 	unsigned int ctx_id;	 /**< Context requesting private mapping */
 	void *handle;		 /**< Handle of map */
-} drm_ctx_priv_map_t;
+};
 
 /**
  * DRM_IOCTL_GET_MAP, DRM_IOCTL_ADD_MAP and DRM_IOCTL_RM_MAP ioctls
@@ -234,30 +234,30 @@
  *
  * \sa drmAddMap().
  */
-typedef struct drm_map {
+struct drm_map {
 	unsigned long offset;	 /**< Requested physical address (0 for SAREA)*/
 	unsigned long size;	 /**< Requested physical size (bytes) */
-	drm_map_type_t type;	 /**< Type of memory to map */
-	drm_map_flags_t flags;	 /**< Flags */
+	enum drm_map_type type;	 /**< Type of memory to map */
+	enum drm_map_flags flags;	 /**< Flags */
 	void *handle;		 /**< User-space: "Handle" to pass to mmap() */
 				 /**< Kernel-space: kernel-virtual address */
 	int mtrr;		 /**< MTRR slot used */
 	/*   Private data */
-} drm_map_t;
+};
 
 /**
  * DRM_IOCTL_GET_CLIENT ioctl argument type.
  */
-typedef struct drm_client {
+struct drm_client {
 	int idx;		/**< Which client desired? */
 	int auth;		/**< Is client authenticated? */
 	unsigned long pid;	/**< Process ID */
 	unsigned long uid;	/**< User ID */
 	unsigned long magic;	/**< Magic */
 	unsigned long iocs;	/**< Ioctl count */
-} drm_client_t;
+};
 
-typedef enum {
+enum drm_stat_type {
 	_DRM_STAT_LOCK,
 	_DRM_STAT_OPENS,
 	_DRM_STAT_CLOSES,
@@ -275,23 +275,23 @@
 	_DRM_STAT_SPECIAL,	/**< Special DMA (e.g., priority or polled) */
 	_DRM_STAT_MISSED	/**< Missed DMA opportunity */
 	    /* Add to the *END* of the list */
-} drm_stat_type_t;
+};
 
 /**
  * DRM_IOCTL_GET_STATS ioctl argument type.
  */
-typedef struct drm_stats {
+struct drm_stats {
 	unsigned long count;
 	struct {
 		unsigned long value;
-		drm_stat_type_t type;
+		enum drm_stat_type type;
 	} data[15];
-} drm_stats_t;
+};
 
 /**
  * Hardware locking flags.
  */
-typedef enum drm_lock_flags {
+enum drm_lock_flags {
 	_DRM_LOCK_READY = 0x01,	     /**< Wait until hardware is ready for DMA */
 	_DRM_LOCK_QUIESCENT = 0x02,  /**< Wait until hardware quiescent */
 	_DRM_LOCK_FLUSH = 0x04,	     /**< Flush this context's DMA queue first */
@@ -301,17 +301,17 @@
 	   full-screen DGA-like mode. */
 	_DRM_HALT_ALL_QUEUES = 0x10, /**< Halt all current and future queues */
 	_DRM_HALT_CUR_QUEUES = 0x20  /**< Halt all current queues */
-} drm_lock_flags_t;
+};
 
 /**
  * DRM_IOCTL_LOCK, DRM_IOCTL_UNLOCK and DRM_IOCTL_FINISH ioctl argument type.
  *
  * \sa drmGetLock() and drmUnlock().
  */
-typedef struct drm_lock {
+struct drm_lock {
 	int context;
-	drm_lock_flags_t flags;
-} drm_lock_t;
+	enum drm_lock_flags flags;
+};
 
 /**
  * DMA flags
@@ -321,7 +321,7 @@
  *
  * \sa drm_dma.
  */
-typedef enum drm_dma_flags {
+enum drm_dma_flags {
 	/* Flags for DMA buffer dispatch */
 	_DRM_DMA_BLOCK = 0x01,	      /**<
 				       * Block until buffer dispatched.
@@ -340,14 +340,14 @@
 	_DRM_DMA_WAIT = 0x10,	      /**< Wait for free buffers */
 	_DRM_DMA_SMALLER_OK = 0x20,   /**< Smaller-than-requested buffers OK */
 	_DRM_DMA_LARGER_OK = 0x40     /**< Larger-than-requested buffers OK */
-} drm_dma_flags_t;
+};
 
 /**
  * DRM_IOCTL_ADD_BUFS and DRM_IOCTL_MARK_BUFS ioctl argument type.
  *
  * \sa drmAddBufs().
  */
-typedef struct drm_buf_desc {
+struct drm_buf_desc {
 	int count;		 /**< Number of buffers of this size */
 	int size;		 /**< Size in bytes */
 	int low_mark;		 /**< Low water mark */
@@ -363,44 +363,44 @@
 				  * Start address of where the AGP buffers are
 				  * in the AGP aperture
 				  */
-} drm_buf_desc_t;
+};
 
 /**
  * DRM_IOCTL_INFO_BUFS ioctl argument type.
  */
-typedef struct drm_buf_info {
+struct drm_buf_info {
 	int count;		/**< Entries in list */
-	drm_buf_desc_t __user *list;
-} drm_buf_info_t;
+	struct drm_buf_desc __user *list;
+};
 
 /**
  * DRM_IOCTL_FREE_BUFS ioctl argument type.
  */
-typedef struct drm_buf_free {
+struct drm_buf_free {
 	int count;
 	int __user *list;
-} drm_buf_free_t;
+};
 
 /**
  * Buffer information
  *
  * \sa drm_buf_map.
  */
-typedef struct drm_buf_pub {
+struct drm_buf_pub {
 	int idx;		       /**< Index into the master buffer list */
 	int total;		       /**< Buffer size */
 	int used;		       /**< Amount of buffer in use (for DMA) */
 	void __user *address;	       /**< Address of buffer */
-} drm_buf_pub_t;
+};
 
 /**
  * DRM_IOCTL_MAP_BUFS ioctl argument type.
  */
-typedef struct drm_buf_map {
+struct drm_buf_map {
 	int count;		/**< Length of the buffer list */
 	void __user *virtual;		/**< Mmap'd area in user-virtual */
-	drm_buf_pub_t __user *list;	/**< Buffer information */
-} drm_buf_map_t;
+	struct drm_buf_pub __user *list;	/**< Buffer information */
+};
 
 /**
  * DRM_IOCTL_DMA ioctl argument type.
@@ -409,48 +409,48 @@
  *
  * \sa drmDMA().
  */
-typedef struct drm_dma {
+struct drm_dma {
 	int context;			  /**< Context handle */
 	int send_count;			  /**< Number of buffers to send */
 	int __user *send_indices;	  /**< List of handles to buffers */
 	int __user *send_sizes;		  /**< Lengths of data to send */
-	drm_dma_flags_t flags;		  /**< Flags */
+	enum drm_dma_flags flags;	  /**< Flags */
 	int request_count;		  /**< Number of buffers requested */
 	int request_size;		  /**< Desired size for buffers */
 	int __user *request_indices;	  /**< Buffer information */
 	int __user *request_sizes;
 	int granted_count;		  /**< Number of buffers granted */
-} drm_dma_t;
+};
 
-typedef enum {
+enum drm_ctx_flags {
 	_DRM_CONTEXT_PRESERVED = 0x01,
 	_DRM_CONTEXT_2DONLY = 0x02
-} drm_ctx_flags_t;
+};
 
 /**
  * DRM_IOCTL_ADD_CTX ioctl argument type.
  *
  * \sa drmCreateContext() and drmDestroyContext().
  */
-typedef struct drm_ctx {
+struct drm_ctx {
 	drm_context_t handle;
-	drm_ctx_flags_t flags;
-} drm_ctx_t;
+	enum drm_ctx_flags flags;
+};
 
 /**
  * DRM_IOCTL_RES_CTX ioctl argument type.
  */
-typedef struct drm_ctx_res {
+struct drm_ctx_res {
 	int count;
-	drm_ctx_t __user *contexts;
-} drm_ctx_res_t;
+	struct drm_ctx __user *contexts;
+};
 
 /**
  * DRM_IOCTL_ADD_DRAW and DRM_IOCTL_RM_DRAW ioctl argument type.
  */
-typedef struct drm_draw {
+struct drm_draw {
 	drm_drawable_t handle;
-} drm_draw_t;
+};
 
 /**
  * DRM_IOCTL_UPDATE_DRAW ioctl argument type.
@@ -459,52 +459,52 @@
 	DRM_DRAWABLE_CLIPRECTS,
 } drm_drawable_info_type_t;
 
-typedef struct drm_update_draw {
+struct drm_update_draw {
 	drm_drawable_t handle;
 	unsigned int type;
 	unsigned int num;
 	unsigned long long data;
-} drm_update_draw_t;
+};
 
 /**
  * DRM_IOCTL_GET_MAGIC and DRM_IOCTL_AUTH_MAGIC ioctl argument type.
  */
-typedef struct drm_auth {
+struct drm_auth {
 	drm_magic_t magic;
-} drm_auth_t;
+};
 
 /**
  * DRM_IOCTL_IRQ_BUSID ioctl argument type.
  *
  * \sa drmGetInterruptFromBusID().
  */
-typedef struct drm_irq_busid {
+struct drm_irq_busid {
 	int irq;	/**< IRQ number */
 	int busnum;	/**< bus number */
 	int devnum;	/**< device number */
 	int funcnum;	/**< function number */
-} drm_irq_busid_t;
+};
 
-typedef enum {
+enum drm_vblank_seq_type {
 	_DRM_VBLANK_ABSOLUTE = 0x0,	/**< Wait for specific vblank sequence number */
 	_DRM_VBLANK_RELATIVE = 0x1,	/**< Wait for given number of vblanks */
 	_DRM_VBLANK_NEXTONMISS = 0x10000000,	/**< If missed, wait for next vblank */
 	_DRM_VBLANK_SECONDARY = 0x20000000,	/**< Secondary display controller */
 	_DRM_VBLANK_SIGNAL = 0x40000000	/**< Send signal instead of blocking */
-} drm_vblank_seq_type_t;
+};
 
 #define _DRM_VBLANK_TYPES_MASK (_DRM_VBLANK_ABSOLUTE | _DRM_VBLANK_RELATIVE)
 #define _DRM_VBLANK_FLAGS_MASK (_DRM_VBLANK_SIGNAL | _DRM_VBLANK_SECONDARY | \
 				_DRM_VBLANK_NEXTONMISS)
 
 struct drm_wait_vblank_request {
-	drm_vblank_seq_type_t type;
+	enum drm_vblank_seq_type type;
 	unsigned int sequence;
 	unsigned long signal;
 };
 
 struct drm_wait_vblank_reply {
-	drm_vblank_seq_type_t type;
+	enum drm_vblank_seq_type type;
 	unsigned int sequence;
 	long tval_sec;
 	long tval_usec;
@@ -515,41 +515,41 @@
  *
  * \sa drmWaitVBlank().
  */
-typedef union drm_wait_vblank {
+union drm_wait_vblank {
 	struct drm_wait_vblank_request request;
 	struct drm_wait_vblank_reply reply;
-} drm_wait_vblank_t;
+};
 
 /**
  * DRM_IOCTL_AGP_ENABLE ioctl argument type.
  *
  * \sa drmAgpEnable().
  */
-typedef struct drm_agp_mode {
+struct drm_agp_mode {
 	unsigned long mode;	/**< AGP mode */
-} drm_agp_mode_t;
+};
 
 /**
  * DRM_IOCTL_AGP_ALLOC and DRM_IOCTL_AGP_FREE ioctls argument type.
  *
  * \sa drmAgpAlloc() and drmAgpFree().
  */
-typedef struct drm_agp_buffer {
+struct drm_agp_buffer {
 	unsigned long size;	/**< In bytes -- will round to page boundary */
 	unsigned long handle;	/**< Used for binding / unbinding */
 	unsigned long type;	/**< Type of memory to allocate */
 	unsigned long physical;	/**< Physical used by i810 */
-} drm_agp_buffer_t;
+};
 
 /**
  * DRM_IOCTL_AGP_BIND and DRM_IOCTL_AGP_UNBIND ioctls argument type.
  *
  * \sa drmAgpBind() and drmAgpUnbind().
  */
-typedef struct drm_agp_binding {
+struct drm_agp_binding {
 	unsigned long handle;	/**< From drm_agp_buffer */
 	unsigned long offset;	/**< In bytes -- will round to page boundary */
-} drm_agp_binding_t;
+};
 
 /**
  * DRM_IOCTL_AGP_INFO ioctl argument type.
@@ -558,7 +558,7 @@
  * drmAgpBase(), drmAgpSize(), drmAgpMemoryUsed(), drmAgpMemoryAvail(),
  * drmAgpVendorId() and drmAgpDeviceId().
  */
-typedef struct drm_agp_info {
+struct drm_agp_info {
 	int agp_version_major;
 	int agp_version_minor;
 	unsigned long mode;
@@ -570,25 +570,25 @@
 	/* PCI information */
 	unsigned short id_vendor;
 	unsigned short id_device;
-} drm_agp_info_t;
+};
 
 /**
  * DRM_IOCTL_SG_ALLOC ioctl argument type.
  */
-typedef struct drm_scatter_gather {
+struct drm_scatter_gather {
 	unsigned long size;	/**< In bytes -- will round to page boundary */
 	unsigned long handle;	/**< Used for mapping / unmapping */
-} drm_scatter_gather_t;
+};
 
 /**
  * DRM_IOCTL_SET_VERSION ioctl argument type.
  */
-typedef struct drm_set_version {
+struct drm_set_version {
 	int drm_di_major;
 	int drm_di_minor;
 	int drm_dd_major;
 	int drm_dd_minor;
-} drm_set_version_t;
+};
 
 #define DRM_IOCTL_BASE			'd'
 #define DRM_IO(nr)			_IO(DRM_IOCTL_BASE,nr)
@@ -596,61 +596,61 @@
 #define DRM_IOW(nr,type)		_IOW(DRM_IOCTL_BASE,nr,type)
 #define DRM_IOWR(nr,type)		_IOWR(DRM_IOCTL_BASE,nr,type)
 
-#define DRM_IOCTL_VERSION		DRM_IOWR(0x00, drm_version_t)
-#define DRM_IOCTL_GET_UNIQUE		DRM_IOWR(0x01, drm_unique_t)
-#define DRM_IOCTL_GET_MAGIC		DRM_IOR( 0x02, drm_auth_t)
-#define DRM_IOCTL_IRQ_BUSID		DRM_IOWR(0x03, drm_irq_busid_t)
-#define DRM_IOCTL_GET_MAP               DRM_IOWR(0x04, drm_map_t)
-#define DRM_IOCTL_GET_CLIENT            DRM_IOWR(0x05, drm_client_t)
-#define DRM_IOCTL_GET_STATS             DRM_IOR( 0x06, drm_stats_t)
-#define DRM_IOCTL_SET_VERSION		DRM_IOWR(0x07, drm_set_version_t)
+#define DRM_IOCTL_VERSION		DRM_IOWR(0x00, struct drm_version)
+#define DRM_IOCTL_GET_UNIQUE		DRM_IOWR(0x01, struct drm_unique)
+#define DRM_IOCTL_GET_MAGIC		DRM_IOR( 0x02, struct drm_auth)
+#define DRM_IOCTL_IRQ_BUSID		DRM_IOWR(0x03, struct drm_irq_busid)
+#define DRM_IOCTL_GET_MAP               DRM_IOWR(0x04, struct drm_map)
+#define DRM_IOCTL_GET_CLIENT            DRM_IOWR(0x05, struct drm_client)
+#define DRM_IOCTL_GET_STATS             DRM_IOR( 0x06, struct drm_stats)
+#define DRM_IOCTL_SET_VERSION		DRM_IOWR(0x07, struct drm_set_version)
 
-#define DRM_IOCTL_SET_UNIQUE		DRM_IOW( 0x10, drm_unique_t)
-#define DRM_IOCTL_AUTH_MAGIC		DRM_IOW( 0x11, drm_auth_t)
-#define DRM_IOCTL_BLOCK			DRM_IOWR(0x12, drm_block_t)
-#define DRM_IOCTL_UNBLOCK		DRM_IOWR(0x13, drm_block_t)
-#define DRM_IOCTL_CONTROL		DRM_IOW( 0x14, drm_control_t)
-#define DRM_IOCTL_ADD_MAP		DRM_IOWR(0x15, drm_map_t)
-#define DRM_IOCTL_ADD_BUFS		DRM_IOWR(0x16, drm_buf_desc_t)
-#define DRM_IOCTL_MARK_BUFS		DRM_IOW( 0x17, drm_buf_desc_t)
-#define DRM_IOCTL_INFO_BUFS		DRM_IOWR(0x18, drm_buf_info_t)
-#define DRM_IOCTL_MAP_BUFS		DRM_IOWR(0x19, drm_buf_map_t)
-#define DRM_IOCTL_FREE_BUFS		DRM_IOW( 0x1a, drm_buf_free_t)
+#define DRM_IOCTL_SET_UNIQUE		DRM_IOW( 0x10, struct drm_unique)
+#define DRM_IOCTL_AUTH_MAGIC		DRM_IOW( 0x11, struct drm_auth)
+#define DRM_IOCTL_BLOCK			DRM_IOWR(0x12, struct drm_block)
+#define DRM_IOCTL_UNBLOCK		DRM_IOWR(0x13, struct drm_block)
+#define DRM_IOCTL_CONTROL		DRM_IOW( 0x14, struct drm_control)
+#define DRM_IOCTL_ADD_MAP		DRM_IOWR(0x15, struct drm_map)
+#define DRM_IOCTL_ADD_BUFS		DRM_IOWR(0x16, struct drm_buf_desc)
+#define DRM_IOCTL_MARK_BUFS		DRM_IOW( 0x17, struct drm_buf_desc)
+#define DRM_IOCTL_INFO_BUFS		DRM_IOWR(0x18, struct drm_buf_info)
+#define DRM_IOCTL_MAP_BUFS		DRM_IOWR(0x19, struct drm_buf_map)
+#define DRM_IOCTL_FREE_BUFS		DRM_IOW( 0x1a, struct drm_buf_free)
 
-#define DRM_IOCTL_RM_MAP		DRM_IOW( 0x1b, drm_map_t)
+#define DRM_IOCTL_RM_MAP		DRM_IOW( 0x1b, struct drm_map)
 
-#define DRM_IOCTL_SET_SAREA_CTX		DRM_IOW( 0x1c, drm_ctx_priv_map_t)
-#define DRM_IOCTL_GET_SAREA_CTX 	DRM_IOWR(0x1d, drm_ctx_priv_map_t)
+#define DRM_IOCTL_SET_SAREA_CTX		DRM_IOW( 0x1c, struct drm_ctx_priv_map)
+#define DRM_IOCTL_GET_SAREA_CTX 	DRM_IOWR(0x1d, struct drm_ctx_priv_map)
 
-#define DRM_IOCTL_ADD_CTX		DRM_IOWR(0x20, drm_ctx_t)
-#define DRM_IOCTL_RM_CTX		DRM_IOWR(0x21, drm_ctx_t)
-#define DRM_IOCTL_MOD_CTX		DRM_IOW( 0x22, drm_ctx_t)
-#define DRM_IOCTL_GET_CTX		DRM_IOWR(0x23, drm_ctx_t)
-#define DRM_IOCTL_SWITCH_CTX		DRM_IOW( 0x24, drm_ctx_t)
-#define DRM_IOCTL_NEW_CTX		DRM_IOW( 0x25, drm_ctx_t)
-#define DRM_IOCTL_RES_CTX		DRM_IOWR(0x26, drm_ctx_res_t)
-#define DRM_IOCTL_ADD_DRAW		DRM_IOWR(0x27, drm_draw_t)
-#define DRM_IOCTL_RM_DRAW		DRM_IOWR(0x28, drm_draw_t)
-#define DRM_IOCTL_DMA			DRM_IOWR(0x29, drm_dma_t)
-#define DRM_IOCTL_LOCK			DRM_IOW( 0x2a, drm_lock_t)
-#define DRM_IOCTL_UNLOCK		DRM_IOW( 0x2b, drm_lock_t)
-#define DRM_IOCTL_FINISH		DRM_IOW( 0x2c, drm_lock_t)
+#define DRM_IOCTL_ADD_CTX		DRM_IOWR(0x20, struct drm_ctx)
+#define DRM_IOCTL_RM_CTX		DRM_IOWR(0x21, struct drm_ctx)
+#define DRM_IOCTL_MOD_CTX		DRM_IOW( 0x22, struct drm_ctx)
+#define DRM_IOCTL_GET_CTX		DRM_IOWR(0x23, struct drm_ctx)
+#define DRM_IOCTL_SWITCH_CTX		DRM_IOW( 0x24, struct drm_ctx)
+#define DRM_IOCTL_NEW_CTX		DRM_IOW( 0x25, struct drm_ctx)
+#define DRM_IOCTL_RES_CTX		DRM_IOWR(0x26, struct drm_ctx_res)
+#define DRM_IOCTL_ADD_DRAW		DRM_IOWR(0x27, struct drm_draw)
+#define DRM_IOCTL_RM_DRAW		DRM_IOWR(0x28, struct drm_draw)
+#define DRM_IOCTL_DMA			DRM_IOWR(0x29, struct drm_dma)
+#define DRM_IOCTL_LOCK			DRM_IOW( 0x2a, struct drm_lock)
+#define DRM_IOCTL_UNLOCK		DRM_IOW( 0x2b, struct drm_lock)
+#define DRM_IOCTL_FINISH		DRM_IOW( 0x2c, struct drm_lock)
 
 #define DRM_IOCTL_AGP_ACQUIRE		DRM_IO(  0x30)
 #define DRM_IOCTL_AGP_RELEASE		DRM_IO(  0x31)
-#define DRM_IOCTL_AGP_ENABLE		DRM_IOW( 0x32, drm_agp_mode_t)
-#define DRM_IOCTL_AGP_INFO		DRM_IOR( 0x33, drm_agp_info_t)
-#define DRM_IOCTL_AGP_ALLOC		DRM_IOWR(0x34, drm_agp_buffer_t)
-#define DRM_IOCTL_AGP_FREE		DRM_IOW( 0x35, drm_agp_buffer_t)
-#define DRM_IOCTL_AGP_BIND		DRM_IOW( 0x36, drm_agp_binding_t)
-#define DRM_IOCTL_AGP_UNBIND		DRM_IOW( 0x37, drm_agp_binding_t)
+#define DRM_IOCTL_AGP_ENABLE		DRM_IOW( 0x32, struct drm_agp_mode)
+#define DRM_IOCTL_AGP_INFO		DRM_IOR( 0x33, struct drm_agp_info)
+#define DRM_IOCTL_AGP_ALLOC		DRM_IOWR(0x34, struct drm_agp_buffer)
+#define DRM_IOCTL_AGP_FREE		DRM_IOW( 0x35, struct drm_agp_buffer)
+#define DRM_IOCTL_AGP_BIND		DRM_IOW( 0x36, struct drm_agp_binding)
+#define DRM_IOCTL_AGP_UNBIND		DRM_IOW( 0x37, struct drm_agp_binding)
 
-#define DRM_IOCTL_SG_ALLOC		DRM_IOW( 0x38, drm_scatter_gather_t)
-#define DRM_IOCTL_SG_FREE		DRM_IOW( 0x39, drm_scatter_gather_t)
+#define DRM_IOCTL_SG_ALLOC		DRM_IOW( 0x38, struct drm_scatter_gather)
+#define DRM_IOCTL_SG_FREE		DRM_IOW( 0x39, struct drm_scatter_gather)
 
-#define DRM_IOCTL_WAIT_VBLANK		DRM_IOWR(0x3a, drm_wait_vblank_t)
+#define DRM_IOCTL_WAIT_VBLANK		DRM_IOWR(0x3a, union drm_wait_vblank)
 
-#define DRM_IOCTL_UPDATE_DRAW		DRM_IOW(0x3f, drm_update_draw_t)
+#define DRM_IOCTL_UPDATE_DRAW		DRM_IOW(0x3f, struct drm_update_draw)
 
 /**
  * Device specific ioctls should only be in their respective headers
@@ -663,4 +663,49 @@
 #define DRM_COMMAND_BASE                0x40
 #define DRM_COMMAND_END			0xA0
 
+/* typedef area */
+#ifndef __KERNEL__
+typedef struct drm_clip_rect drm_clip_rect_t;
+typedef struct drm_drawable_info drm_drawable_info_t;
+typedef struct drm_tex_region drm_tex_region_t;
+typedef struct drm_hw_lock drm_hw_lock_t;
+typedef struct drm_version drm_version_t;
+typedef struct drm_unique drm_unique_t;
+typedef struct drm_list drm_list_t;
+typedef struct drm_block drm_block_t;
+typedef struct drm_control drm_control_t;
+typedef enum drm_map_type drm_map_type_t;
+typedef enum drm_map_flags drm_map_flags_t;
+typedef struct drm_ctx_priv_map drm_ctx_priv_map_t;
+typedef struct drm_map drm_map_t;
+typedef struct drm_client drm_client_t;
+typedef enum drm_stat_type drm_stat_type_t;
+typedef struct drm_stats drm_stats_t;
+typedef enum drm_lock_flags drm_lock_flags_t;
+typedef struct drm_lock drm_lock_t;
+typedef enum drm_dma_flags drm_dma_flags_t;
+typedef struct drm_buf_desc drm_buf_desc_t;
+typedef struct drm_buf_info drm_buf_info_t;
+typedef struct drm_buf_free drm_buf_free_t;
+typedef struct drm_buf_pub drm_buf_pub_t;
+typedef struct drm_buf_map drm_buf_map_t;
+typedef struct drm_dma drm_dma_t;
+typedef union drm_wait_vblank drm_wait_vblank_t;
+typedef struct drm_agp_mode drm_agp_mode_t;
+typedef enum drm_ctx_flags drm_ctx_flags_t;
+typedef struct drm_ctx drm_ctx_t;
+typedef struct drm_ctx_res drm_ctx_res_t;
+typedef struct drm_draw drm_draw_t;
+typedef struct drm_update_draw drm_update_draw_t;
+typedef struct drm_auth drm_auth_t;
+typedef struct drm_irq_busid drm_irq_busid_t;
+typedef enum drm_vblank_seq_type drm_vblank_seq_type_t;
+
+typedef struct drm_agp_buffer drm_agp_buffer_t;
+typedef struct drm_agp_binding drm_agp_binding_t;
+typedef struct drm_agp_info drm_agp_info_t;
+typedef struct drm_scatter_gather drm_scatter_gather_t;
+typedef struct drm_set_version drm_set_version_t;
+#endif
+
 #endif
diff --git a/drivers/char/drm/drmP.h b/drivers/char/drm/drmP.h
index d494315..0df87fc 100644
--- a/drivers/char/drm/drmP.h
+++ b/drivers/char/drm/drmP.h
@@ -75,6 +75,8 @@
 #include <asm/pgalloc.h>
 #include "drm.h"
 
+#include <linux/idr.h>
+
 #define __OS_HAS_AGP (defined(CONFIG_AGP) || (defined(CONFIG_AGP_MODULE) && defined(MODULE)))
 #define __OS_HAS_MTRR (defined(CONFIG_MTRR))
 
@@ -274,32 +276,23 @@
 	int flags;
 } drm_ioctl_desc_t;
 
-typedef struct drm_devstate {
-	pid_t owner;			/**< X server pid holding x_lock */
-} drm_devstate_t;
-
-typedef struct drm_magic_entry {
-	drm_hash_item_t hash_item;
+struct drm_magic_entry {
 	struct list_head head;
+	struct drm_hash_item hash_item;
 	struct drm_file *priv;
 	struct drm_magic_entry *next;
-} drm_magic_entry_t;
+};
 
-typedef struct drm_magic_head {
-	struct drm_magic_entry *head;
-	struct drm_magic_entry *tail;
-} drm_magic_head_t;
-
-typedef struct drm_vma_entry {
+struct drm_vma_entry {
+	struct list_head head;
 	struct vm_area_struct *vma;
-	struct drm_vma_entry *next;
 	pid_t pid;
-} drm_vma_entry_t;
+};
 
 /**
  * DMA buffer.
  */
-typedef struct drm_buf {
+struct drm_buf {
 	int idx;		       /**< Index into master buflist */
 	int total;		       /**< Buffer size */
 	int order;		       /**< log-base-2(total) */
@@ -325,30 +318,30 @@
 
 	int dev_priv_size;		 /**< Size of buffer private storage */
 	void *dev_private;		 /**< Per-buffer private storage */
-} drm_buf_t;
+};
 
 /** bufs is one longer than it has to be */
-typedef struct drm_waitlist {
+struct drm_waitlist {
 	int count;			/**< Number of possible buffers */
-	drm_buf_t **bufs;		/**< List of pointers to buffers */
-	drm_buf_t **rp;			/**< Read pointer */
-	drm_buf_t **wp;			/**< Write pointer */
-	drm_buf_t **end;		/**< End pointer */
+	struct drm_buf **bufs;		/**< List of pointers to buffers */
+	struct drm_buf **rp;			/**< Read pointer */
+	struct drm_buf **wp;			/**< Write pointer */
+	struct drm_buf **end;		/**< End pointer */
 	spinlock_t read_lock;
 	spinlock_t write_lock;
-} drm_waitlist_t;
+};
 
-typedef struct drm_freelist {
+struct drm_freelist {
 	int initialized;	       /**< Freelist in use */
 	atomic_t count;		       /**< Number of free buffers */
-	drm_buf_t *next;	       /**< End pointer */
+	struct drm_buf *next;	       /**< End pointer */
 
 	wait_queue_head_t waiting;     /**< Processes waiting on free bufs */
 	int low_mark;		       /**< Low water mark */
 	int high_mark;		       /**< High water mark */
 	atomic_t wfh;		       /**< If waiting for high mark */
 	spinlock_t lock;
-} drm_freelist_t;
+};
 
 typedef struct drm_dma_handle {
 	dma_addr_t busaddr;
@@ -359,19 +352,19 @@
 /**
  * Buffer entry.  There is one of this for each buffer size order.
  */
-typedef struct drm_buf_entry {
+struct drm_buf_entry {
 	int buf_size;			/**< size */
 	int buf_count;			/**< number of buffers */
-	drm_buf_t *buflist;		/**< buffer list */
+	struct drm_buf *buflist;		/**< buffer list */
 	int seg_count;
 	int page_order;
-	drm_dma_handle_t **seglist;
+	struct drm_dma_handle **seglist;
 
-	drm_freelist_t freelist;
-} drm_buf_entry_t;
+	struct drm_freelist freelist;
+};
 
 /** File private data */
-typedef struct drm_file {
+struct drm_file {
 	int authenticated;
 	int master;
 	int minor;
@@ -379,16 +372,15 @@
 	uid_t uid;
 	drm_magic_t magic;
 	unsigned long ioctl_count;
-	struct drm_file *next;
-	struct drm_file *prev;
+	struct list_head lhead;
 	struct drm_head *head;
 	int remove_auth_on_close;
 	unsigned long lock_count;
 	void *driver_priv;
-} drm_file_t;
+};
 
 /** Wait queue */
-typedef struct drm_queue {
+struct drm_queue {
 	atomic_t use_count;		/**< Outstanding uses (+1) */
 	atomic_t finalization;		/**< Finalization in progress */
 	atomic_t block_count;		/**< Count of processes waiting */
@@ -401,16 +393,16 @@
 	atomic_t total_flushed;		/**< Total flushes statistic */
 	atomic_t total_locks;		/**< Total locks statistics */
 #endif
-	drm_ctx_flags_t flags;		/**< Context preserving and 2D-only */
-	drm_waitlist_t waitlist;	/**< Pending buffers */
+	enum drm_ctx_flags flags;	/**< Context preserving and 2D-only */
+	struct drm_waitlist waitlist;	/**< Pending buffers */
 	wait_queue_head_t flush_queue;	/**< Processes waiting until flush */
-} drm_queue_t;
+};
 
 /**
  * Lock data.
  */
-typedef struct drm_lock_data {
-	drm_hw_lock_t *hw_lock;		/**< Hardware lock */
+struct drm_lock_data {
+	struct drm_hw_lock *hw_lock;	/**< Hardware lock */
 	struct file *filp;		/**< File descr of lock holder (0=kernel) */
 	wait_queue_head_t lock_queue;	/**< Queue of blocked processes */
 	unsigned long lock_time;	/**< Time of last lock in jiffies */
@@ -418,16 +410,16 @@
 	uint32_t kernel_waiters;
 	uint32_t user_waiters;
 	int idle_has_lock;
-} drm_lock_data_t;
+};
 
 /**
  * DMA data.
  */
-typedef struct drm_device_dma {
+struct drm_device_dma {
 
-	drm_buf_entry_t bufs[DRM_MAX_ORDER + 1];	/**< buffers, grouped by their size order */
+	struct drm_buf_entry bufs[DRM_MAX_ORDER + 1];	/**< buffers, grouped by their size order */
 	int buf_count;			/**< total number of buffers */
-	drm_buf_t **buflist;		/**< Vector of pointers into drm_device_dma::bufs */
+	struct drm_buf **buflist;		/**< Vector of pointers into drm_device_dma::bufs */
 	int seg_count;
 	int page_count;			/**< number of pages */
 	unsigned long *pagelist;	/**< page list */
@@ -439,28 +431,27 @@
 		_DRM_DMA_USE_PCI_RO = 0x08
 	} flags;
 
-} drm_device_dma_t;
+};
 
 /**
  * AGP memory entry.  Stored as a doubly linked list.
  */
-typedef struct drm_agp_mem {
+struct drm_agp_mem {
 	unsigned long handle;		/**< handle */
 	DRM_AGP_MEM *memory;
 	unsigned long bound;		/**< address */
 	int pages;
-	struct drm_agp_mem *prev;	/**< previous entry */
-	struct drm_agp_mem *next;	/**< next entry */
-} drm_agp_mem_t;
+	struct list_head head;
+};
 
 /**
  * AGP data.
  *
  * \sa drm_agp_init() and drm_device::agp.
  */
-typedef struct drm_agp_head {
+struct drm_agp_head {
 	DRM_AGP_KERN agp_info;		/**< AGP device information */
-	drm_agp_mem_t *memory;		/**< memory entries */
+	struct list_head memory;
 	unsigned long mode;		/**< AGP mode */
 	struct agp_bridge_data *bridge;
 	int enabled;			/**< whether the AGP bus as been enabled */
@@ -469,51 +460,51 @@
 	int agp_mtrr;
 	int cant_use_aperture;
 	unsigned long page_mask;
-} drm_agp_head_t;
+};
 
 /**
  * Scatter-gather memory.
  */
-typedef struct drm_sg_mem {
+struct drm_sg_mem {
 	unsigned long handle;
 	void *virtual;
 	int pages;
 	struct page **pagelist;
 	dma_addr_t *busaddr;
-} drm_sg_mem_t;
+};
 
-typedef struct drm_sigdata {
+struct drm_sigdata {
 	int context;
-	drm_hw_lock_t *lock;
-} drm_sigdata_t;
+	struct drm_hw_lock *lock;
+};
 
 /**
  * Mappings list
  */
-typedef struct drm_map_list {
+struct drm_map_list {
 	struct list_head head;		/**< list head */
-	drm_hash_item_t hash;
-	drm_map_t *map;			/**< mapping */
+	struct drm_hash_item hash;
+	struct drm_map *map;			/**< mapping */
 	unsigned int user_token;
-} drm_map_list_t;
+};
 
-typedef drm_map_t drm_local_map_t;
+typedef struct drm_map drm_local_map_t;
 
 /**
  * Context handle list
  */
-typedef struct drm_ctx_list {
+struct drm_ctx_list {
 	struct list_head head;		/**< list head */
 	drm_context_t handle;		/**< context handle */
-	drm_file_t *tag;		/**< associated fd private data */
-} drm_ctx_list_t;
+	struct drm_file *tag;		/**< associated fd private data */
+};
 
-typedef struct drm_vbl_sig {
+struct drm_vbl_sig {
 	struct list_head head;
 	unsigned int sequence;
 	struct siginfo info;
 	struct task_struct *task;
-} drm_vbl_sig_t;
+};
 
 /* location of GART table */
 #define DRM_ATI_GART_MAIN 1
@@ -523,19 +514,19 @@
 #define DRM_ATI_GART_PCIE 2
 #define DRM_ATI_GART_IGP 3
 
-typedef struct ati_pcigart_info {
+struct drm_ati_pcigart_info {
 	int gart_table_location;
 	int gart_reg_if;
 	void *addr;
 	dma_addr_t bus_addr;
 	drm_local_map_t mapping;
 	int table_size;
-} drm_ati_pcigart_info;
+};
 
 /*
  * Generic memory manager structs
  */
-typedef struct drm_mm_node {
+struct drm_mm_node {
 	struct list_head fl_entry;
 	struct list_head ml_entry;
 	int free;
@@ -543,12 +534,12 @@
 	unsigned long size;
 	struct drm_mm *mm;
 	void *private;
-} drm_mm_node_t;
+};
 
-typedef struct drm_mm {
+struct drm_mm {
 	struct list_head fl_entry;
 	struct list_head ml_entry;
-} drm_mm_t;
+};
 
 /**
  * DRM driver structure. This structure represent the common code for
@@ -560,21 +551,21 @@
 struct drm_driver {
 	int (*load) (struct drm_device *, unsigned long flags);
 	int (*firstopen) (struct drm_device *);
-	int (*open) (struct drm_device *, drm_file_t *);
+	int (*open) (struct drm_device *, struct drm_file *);
 	void (*preclose) (struct drm_device *, struct file * filp);
-	void (*postclose) (struct drm_device *, drm_file_t *);
+	void (*postclose) (struct drm_device *, struct drm_file *);
 	void (*lastclose) (struct drm_device *);
 	int (*unload) (struct drm_device *);
 	int (*dma_ioctl) (DRM_IOCTL_ARGS);
 	void (*dma_ready) (struct drm_device *);
 	int (*dma_quiescent) (struct drm_device *);
-	int (*context_ctor) (struct drm_device * dev, int context);
-	int (*context_dtor) (struct drm_device * dev, int context);
-	int (*kernel_context_switch) (struct drm_device * dev, int old,
+	int (*context_ctor) (struct drm_device *dev, int context);
+	int (*context_dtor) (struct drm_device *dev, int context);
+	int (*kernel_context_switch) (struct drm_device *dev, int old,
 				      int new);
-	void (*kernel_context_switch_unlock) (struct drm_device * dev);
-	int (*vblank_wait) (struct drm_device * dev, unsigned int *sequence);
-	int (*vblank_wait2) (struct drm_device * dev, unsigned int *sequence);
+	void (*kernel_context_switch_unlock) (struct drm_device *dev);
+	int (*vblank_wait) (struct drm_device *dev, unsigned int *sequence);
+	int (*vblank_wait2) (struct drm_device *dev, unsigned int *sequence);
 	int (*dri_library_name) (struct drm_device *dev, char *buf);
 
 	/**
@@ -588,22 +579,23 @@
 	 * card is absolutely \b not AGP (return of 0), absolutely \b is AGP
 	 * (return of 1), or may or may not be AGP (return of 2).
 	 */
-	int (*device_is_agp) (struct drm_device * dev);
+	int (*device_is_agp) (struct drm_device *dev);
 
 	/* these have to be filled in */
 
 	irqreturn_t(*irq_handler) (DRM_IRQ_ARGS);
-	void (*irq_preinstall) (struct drm_device * dev);
-	void (*irq_postinstall) (struct drm_device * dev);
-	void (*irq_uninstall) (struct drm_device * dev);
-	void (*reclaim_buffers) (struct drm_device * dev, struct file * filp);
+	void (*irq_preinstall) (struct drm_device *dev);
+	void (*irq_postinstall) (struct drm_device *dev);
+	void (*irq_uninstall) (struct drm_device *dev);
+	void (*reclaim_buffers) (struct drm_device *dev, struct file * filp);
 	void (*reclaim_buffers_locked) (struct drm_device *dev,
 					struct file *filp);
 	void (*reclaim_buffers_idlelocked) (struct drm_device *dev,
 					struct file * filp);
-	unsigned long (*get_map_ofs) (drm_map_t * map);
-	unsigned long (*get_reg_ofs) (struct drm_device * dev);
-	void (*set_version) (struct drm_device * dev, drm_set_version_t * sv);
+	unsigned long (*get_map_ofs) (struct drm_map * map);
+	unsigned long (*get_reg_ofs) (struct drm_device *dev);
+	void (*set_version) (struct drm_device *dev,
+			     struct drm_set_version *sv);
 
 	int major;
 	int minor;
@@ -625,19 +617,19 @@
  * that may contain multiple heads. Embed one per head of these in the
  * private drm_device structure.
  */
-typedef struct drm_head {
+struct drm_head {
 	int minor;			/**< Minor device number */
 	struct drm_device *dev;
 	struct proc_dir_entry *dev_root;  /**< proc directory entry */
 	dev_t device;			/**< Device number for mknod */
 	struct class_device *dev_class;
-} drm_head_t;
+};
 
 /**
  * DRM device structure. This structure represent a complete card that
  * may contain multiple heads.
  */
-typedef struct drm_device {
+struct drm_device {
 	char *unique;			/**< Unique identifier: e.g., busid */
 	int unique_len;			/**< Length of unique field */
 	char *devname;			/**< For /proc/interrupts */
@@ -663,35 +655,33 @@
 	/** \name Performance counters */
 	/*@{ */
 	unsigned long counters;
-	drm_stat_type_t types[15];
+	enum drm_stat_type types[15];
 	atomic_t counts[15];
 	/*@} */
 
 	/** \name Authentication */
 	/*@{ */
-	drm_file_t *file_first;		/**< file list head */
-	drm_file_t *file_last;		/**< file list tail */
-	drm_open_hash_t magiclist;	/**< magic hash table */
+	struct list_head filelist;
+	struct drm_open_hash magiclist;	/**< magic hash table */
 	struct list_head magicfree;
 	/*@} */
 
 	/** \name Memory management */
 	/*@{ */
-	drm_map_list_t *maplist;	/**< Linked list of regions */
+	struct list_head maplist;	/**< Linked list of regions */
 	int map_count;			/**< Number of mappable regions */
-	drm_open_hash_t map_hash;	/**< User token hash table for maps */
+	struct drm_open_hash map_hash;	/**< User token hash table for maps */
 
 	/** \name Context handle management */
 	/*@{ */
-	drm_ctx_list_t *ctxlist;	/**< Linked list of context handles */
+	struct list_head ctxlist;	/**< Linked list of context handles */
 	int ctx_count;			/**< Number of context handles */
 	struct mutex ctxlist_mutex;	/**< For ctxlist */
 
-	drm_map_t **context_sareas;	    /**< per-context SAREA's */
-	int max_context;
+	struct idr ctx_idr;
 
-	drm_vma_entry_t *vmalist;	/**< List of vmas (for debugging) */
-	drm_lock_data_t lock;		/**< Information on hardware lock */
+	struct list_head vmalist;	/**< List of vmas (for debugging) */
+	struct drm_lock_data lock;	/**< Information on hardware lock */
 	/*@} */
 
 	/** \name DMA queues (contexts) */
@@ -699,8 +689,8 @@
 	int queue_count;		/**< Number of active DMA queues */
 	int queue_reserved;		  /**< Number of reserved DMA queues */
 	int queue_slots;		/**< Actual length of queuelist */
-	drm_queue_t **queuelist;	/**< Vector of pointers to DMA queues */
-	drm_device_dma_t *dma;		/**< Optional pointer for DMA support */
+	struct drm_queue **queuelist;	/**< Vector of pointers to DMA queues */
+	struct drm_device_dma *dma;		/**< Optional pointer for DMA support */
 	/*@} */
 
 	/** \name Context support */
@@ -725,8 +715,8 @@
 	atomic_t vbl_received;
 	atomic_t vbl_received2;		/**< number of secondary VBLANK interrupts */
 	spinlock_t vbl_lock;
-	drm_vbl_sig_t vbl_sigs;		/**< signal list to send on VBLANK */
-	drm_vbl_sig_t vbl_sigs2;	/**< signals to send on secondary VBLANK */
+	struct list_head vbl_sigs;		/**< signal list to send on VBLANK */
+	struct list_head vbl_sigs2;	/**< signals to send on secondary VBLANK */
 	unsigned int vbl_pending;
 	spinlock_t tasklet_lock;	/**< For drm_locked_tasklet */
 	void (*locked_tasklet_func)(struct drm_device *dev);
@@ -739,7 +729,7 @@
 	wait_queue_head_t buf_readers;	/**< Processes waiting to read */
 	wait_queue_head_t buf_writers;	/**< Processes waiting to ctx switch */
 
-	drm_agp_head_t *agp;	/**< AGP data */
+	struct drm_agp_head *agp;	/**< AGP data */
 
 	struct pci_dev *pdev;		/**< PCI device structure */
 	int pci_vendor;			/**< PCI vendor id */
@@ -747,26 +737,23 @@
 #ifdef __alpha__
 	struct pci_controller *hose;
 #endif
-	drm_sg_mem_t *sg;	/**< Scatter gather memory */
+	struct drm_sg_mem *sg;	/**< Scatter gather memory */
 	unsigned long *ctx_bitmap;	/**< context bitmap */
 	void *dev_private;		/**< device private data */
-	drm_sigdata_t sigdata;	   /**< For block_all_signals */
+	struct drm_sigdata sigdata;	   /**< For block_all_signals */
 	sigset_t sigmask;
 
 	struct drm_driver *driver;
 	drm_local_map_t *agp_buffer_map;
 	unsigned int agp_buffer_token;
-	drm_head_t primary;		/**< primary screen head */
+	struct drm_head primary;		/**< primary screen head */
 
 	/** \name Drawable information */
 	/*@{ */
 	spinlock_t drw_lock;
-	unsigned int drw_bitfield_length;
-	u32 *drw_bitfield;
-	unsigned int drw_info_length;
-	drm_drawable_info_t **drw_info;
+	struct idr drw_idr;
 	/*@} */
-} drm_device_t;
+};
 
 static __inline__ int drm_core_check_feature(struct drm_device *dev,
 					     int feature)
@@ -838,7 +825,7 @@
 		     unsigned int cmd, unsigned long arg);
 extern long drm_compat_ioctl(struct file *filp,
 			     unsigned int cmd, unsigned long arg);
-extern int drm_lastclose(drm_device_t *dev);
+extern int drm_lastclose(struct drm_device *dev);
 
 				/* Device support (drm_fops.h) */
 extern int drm_open(struct inode *inode, struct file *filp);
@@ -857,7 +844,7 @@
 			int request, int *eof, void *data);
 extern void *drm_realloc(void *oldpt, size_t oldsize, size_t size, int area);
 
-extern DRM_AGP_MEM *drm_alloc_agp(drm_device_t * dev, int pages, u32 type);
+extern DRM_AGP_MEM *drm_alloc_agp(struct drm_device *dev, int pages, u32 type);
 extern int drm_free_agp(DRM_AGP_MEM * handle, int pages);
 extern int drm_bind_agp(DRM_AGP_MEM * handle, unsigned int start);
 extern int drm_unbind_agp(DRM_AGP_MEM * handle);
@@ -896,9 +883,9 @@
 extern int drm_rmctx(struct inode *inode, struct file *filp,
 		     unsigned int cmd, unsigned long arg);
 
-extern int drm_ctxbitmap_init(drm_device_t * dev);
-extern void drm_ctxbitmap_cleanup(drm_device_t * dev);
-extern void drm_ctxbitmap_free(drm_device_t * dev, int ctx_handle);
+extern int drm_ctxbitmap_init(struct drm_device *dev);
+extern void drm_ctxbitmap_cleanup(struct drm_device *dev);
+extern void drm_ctxbitmap_free(struct drm_device *dev, int ctx_handle);
 
 extern int drm_setsareactx(struct inode *inode, struct file *filp,
 			   unsigned int cmd, unsigned long arg);
@@ -912,8 +899,9 @@
 		      unsigned int cmd, unsigned long arg);
 extern int drm_update_drawable_info(struct inode *inode, struct file *filp,
 		       unsigned int cmd, unsigned long arg);
-extern drm_drawable_info_t *drm_get_drawable_info(drm_device_t *dev,
+extern struct drm_drawable_info *drm_get_drawable_info(struct drm_device *dev,
 						  drm_drawable_t id);
+extern void drm_drawable_free_all(struct drm_device *dev);
 
 				/* Authentication IOCTL support (drm_auth.h) */
 extern int drm_getmagic(struct inode *inode, struct file *filp,
@@ -926,10 +914,10 @@
 		    unsigned int cmd, unsigned long arg);
 extern int drm_unlock(struct inode *inode, struct file *filp,
 		      unsigned int cmd, unsigned long arg);
-extern int drm_lock_take(drm_lock_data_t *lock_data, unsigned int context);
-extern int drm_lock_free(drm_lock_data_t *lock_data, unsigned int context);
-extern void drm_idlelock_take(drm_lock_data_t *lock_data);
-extern void drm_idlelock_release(drm_lock_data_t *lock_data);
+extern int drm_lock_take(struct drm_lock_data *lock_data, unsigned int context);
+extern int drm_lock_free(struct drm_lock_data *lock_data, unsigned int context);
+extern void drm_idlelock_take(struct drm_lock_data *lock_data);
+extern void drm_idlelock_release(struct drm_lock_data *lock_data);
 
 /*
  * These are exported to drivers so that they can implement fencing using
@@ -940,15 +928,15 @@
 extern int drm_kernel_take_hw_lock(struct file *filp);
 
 				/* Buffer management support (drm_bufs.h) */
-extern int drm_addbufs_agp(drm_device_t * dev, drm_buf_desc_t * request);
-extern int drm_addbufs_pci(drm_device_t * dev, drm_buf_desc_t * request);
-extern int drm_addmap(drm_device_t * dev, unsigned int offset,
-		      unsigned int size, drm_map_type_t type,
-		      drm_map_flags_t flags, drm_local_map_t ** map_ptr);
+extern int drm_addbufs_agp(struct drm_device *dev, struct drm_buf_desc * request);
+extern int drm_addbufs_pci(struct drm_device *dev, struct drm_buf_desc * request);
+extern int drm_addmap(struct drm_device *dev, unsigned int offset,
+		      unsigned int size, enum drm_map_type type,
+		      enum drm_map_flags flags, drm_local_map_t ** map_ptr);
 extern int drm_addmap_ioctl(struct inode *inode, struct file *filp,
 			    unsigned int cmd, unsigned long arg);
-extern int drm_rmmap(drm_device_t * dev, drm_local_map_t * map);
-extern int drm_rmmap_locked(drm_device_t * dev, drm_local_map_t * map);
+extern int drm_rmmap(struct drm_device *dev, drm_local_map_t * map);
+extern int drm_rmmap_locked(struct drm_device *dev, drm_local_map_t * map);
 extern int drm_rmmap_ioctl(struct inode *inode, struct file *filp,
 			   unsigned int cmd, unsigned long arg);
 
@@ -963,56 +951,56 @@
 			unsigned int cmd, unsigned long arg);
 extern int drm_mapbufs(struct inode *inode, struct file *filp,
 		       unsigned int cmd, unsigned long arg);
-extern unsigned long drm_get_resource_start(drm_device_t * dev,
+extern unsigned long drm_get_resource_start(struct drm_device *dev,
 					    unsigned int resource);
-extern unsigned long drm_get_resource_len(drm_device_t * dev,
+extern unsigned long drm_get_resource_len(struct drm_device *dev,
 					  unsigned int resource);
 
 				/* DMA support (drm_dma.h) */
-extern int drm_dma_setup(drm_device_t * dev);
-extern void drm_dma_takedown(drm_device_t * dev);
-extern void drm_free_buffer(drm_device_t * dev, drm_buf_t * buf);
-extern void drm_core_reclaim_buffers(drm_device_t * dev, struct file *filp);
+extern int drm_dma_setup(struct drm_device *dev);
+extern void drm_dma_takedown(struct drm_device *dev);
+extern void drm_free_buffer(struct drm_device *dev, struct drm_buf * buf);
+extern void drm_core_reclaim_buffers(struct drm_device *dev, struct file *filp);
 
 				/* IRQ support (drm_irq.h) */
 extern int drm_control(struct inode *inode, struct file *filp,
 		       unsigned int cmd, unsigned long arg);
 extern irqreturn_t drm_irq_handler(DRM_IRQ_ARGS);
-extern int drm_irq_uninstall(drm_device_t * dev);
-extern void drm_driver_irq_preinstall(drm_device_t * dev);
-extern void drm_driver_irq_postinstall(drm_device_t * dev);
-extern void drm_driver_irq_uninstall(drm_device_t * dev);
+extern int drm_irq_uninstall(struct drm_device *dev);
+extern void drm_driver_irq_preinstall(struct drm_device *dev);
+extern void drm_driver_irq_postinstall(struct drm_device *dev);
+extern void drm_driver_irq_uninstall(struct drm_device *dev);
 
 extern int drm_wait_vblank(struct inode *inode, struct file *filp,
 			   unsigned int cmd, unsigned long arg);
-extern int drm_vblank_wait(drm_device_t * dev, unsigned int *vbl_seq);
-extern void drm_vbl_send_signals(drm_device_t * dev);
-extern void drm_locked_tasklet(drm_device_t *dev, void(*func)(drm_device_t*));
+extern int drm_vblank_wait(struct drm_device *dev, unsigned int *vbl_seq);
+extern void drm_vbl_send_signals(struct drm_device *dev);
+extern void drm_locked_tasklet(struct drm_device *dev, void(*func)(struct drm_device*));
 
 				/* AGP/GART support (drm_agpsupport.h) */
-extern drm_agp_head_t *drm_agp_init(drm_device_t * dev);
-extern int drm_agp_acquire(drm_device_t * dev);
+extern struct drm_agp_head *drm_agp_init(struct drm_device *dev);
+extern int drm_agp_acquire(struct drm_device *dev);
 extern int drm_agp_acquire_ioctl(struct inode *inode, struct file *filp,
 				 unsigned int cmd, unsigned long arg);
-extern int drm_agp_release(drm_device_t * dev);
+extern int drm_agp_release(struct drm_device *dev);
 extern int drm_agp_release_ioctl(struct inode *inode, struct file *filp,
 				 unsigned int cmd, unsigned long arg);
-extern int drm_agp_enable(drm_device_t * dev, drm_agp_mode_t mode);
+extern int drm_agp_enable(struct drm_device *dev, struct drm_agp_mode mode);
 extern int drm_agp_enable_ioctl(struct inode *inode, struct file *filp,
 				unsigned int cmd, unsigned long arg);
-extern int drm_agp_info(drm_device_t * dev, drm_agp_info_t * info);
+extern int drm_agp_info(struct drm_device *dev, struct drm_agp_info * info);
 extern int drm_agp_info_ioctl(struct inode *inode, struct file *filp,
 			      unsigned int cmd, unsigned long arg);
-extern int drm_agp_alloc(drm_device_t *dev, drm_agp_buffer_t *request);
+extern int drm_agp_alloc(struct drm_device *dev, struct drm_agp_buffer *request);
 extern int drm_agp_alloc_ioctl(struct inode *inode, struct file *filp,
 			 unsigned int cmd, unsigned long arg);
-extern int drm_agp_free(drm_device_t *dev, drm_agp_buffer_t *request);
+extern int drm_agp_free(struct drm_device *dev, struct drm_agp_buffer *request);
 extern int drm_agp_free_ioctl(struct inode *inode, struct file *filp,
 			unsigned int cmd, unsigned long arg);
-extern int drm_agp_unbind(drm_device_t *dev, drm_agp_binding_t *request);
+extern int drm_agp_unbind(struct drm_device *dev, struct drm_agp_binding *request);
 extern int drm_agp_unbind_ioctl(struct inode *inode, struct file *filp,
 			  unsigned int cmd, unsigned long arg);
-extern int drm_agp_bind(drm_device_t *dev, drm_agp_binding_t *request);
+extern int drm_agp_bind(struct drm_device *dev, struct drm_agp_binding *request);
 extern int drm_agp_bind_ioctl(struct inode *inode, struct file *filp,
 			unsigned int cmd, unsigned long arg);
 extern DRM_AGP_MEM *drm_agp_allocate_memory(struct agp_bridge_data *bridge,
@@ -1024,16 +1012,18 @@
 				/* Stub support (drm_stub.h) */
 extern int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent,
 		       struct drm_driver *driver);
-extern int drm_put_dev(drm_device_t * dev);
-extern int drm_put_head(drm_head_t * head);
+extern int drm_put_dev(struct drm_device *dev);
+extern int drm_put_head(struct drm_head *head);
 extern unsigned int drm_debug;
 extern unsigned int drm_cards_limit;
-extern drm_head_t **drm_heads;
+extern struct drm_head **drm_heads;
 extern struct class *drm_class;
 extern struct proc_dir_entry *drm_proc_root;
 
+extern drm_local_map_t *drm_getsarea(struct drm_device *dev);
+
 				/* Proc support (drm_proc.h) */
-extern int drm_proc_init(drm_device_t * dev,
+extern int drm_proc_init(struct drm_device *dev,
 			 int minor,
 			 struct proc_dir_entry *root,
 			 struct proc_dir_entry **dev_root);
@@ -1042,45 +1032,45 @@
 			    struct proc_dir_entry *dev_root);
 
 				/* Scatter Gather Support (drm_scatter.h) */
-extern void drm_sg_cleanup(drm_sg_mem_t * entry);
+extern void drm_sg_cleanup(struct drm_sg_mem * entry);
 extern int drm_sg_alloc(struct inode *inode, struct file *filp,
 			unsigned int cmd, unsigned long arg);
 extern int drm_sg_free(struct inode *inode, struct file *filp,
 		       unsigned int cmd, unsigned long arg);
 
 			       /* ATI PCIGART support (ati_pcigart.h) */
-extern int drm_ati_pcigart_init(drm_device_t * dev,
-				drm_ati_pcigart_info * gart_info);
-extern int drm_ati_pcigart_cleanup(drm_device_t * dev,
-				   drm_ati_pcigart_info * gart_info);
+extern int drm_ati_pcigart_init(struct drm_device *dev,
+				struct drm_ati_pcigart_info * gart_info);
+extern int drm_ati_pcigart_cleanup(struct drm_device *dev,
+				   struct drm_ati_pcigart_info * gart_info);
 
-extern drm_dma_handle_t *drm_pci_alloc(drm_device_t * dev, size_t size,
+extern drm_dma_handle_t *drm_pci_alloc(struct drm_device *dev, size_t size,
 				       size_t align, dma_addr_t maxaddr);
-extern void __drm_pci_free(drm_device_t * dev, drm_dma_handle_t * dmah);
-extern void drm_pci_free(drm_device_t * dev, drm_dma_handle_t * dmah);
+extern void __drm_pci_free(struct drm_device *dev, drm_dma_handle_t * dmah);
+extern void drm_pci_free(struct drm_device *dev, drm_dma_handle_t * dmah);
 
 			       /* sysfs support (drm_sysfs.c) */
 extern struct class *drm_sysfs_create(struct module *owner, char *name);
 extern void drm_sysfs_destroy(struct class *cs);
 extern struct class_device *drm_sysfs_device_add(struct class *cs,
-						 drm_head_t *head);
+						 struct drm_head *head);
 extern void drm_sysfs_device_remove(struct class_device *class_dev);
 
 /*
  * Basic memory manager support (drm_mm.c)
  */
-extern drm_mm_node_t *drm_mm_get_block(drm_mm_node_t * parent,
+extern struct drm_mm_node *drm_mm_get_block(struct drm_mm_node * parent,
 				       unsigned long size,
 				       unsigned alignment);
-void drm_mm_put_block(drm_mm_node_t * cur);
-extern drm_mm_node_t *drm_mm_search_free(const drm_mm_t *mm, unsigned long size,
+void drm_mm_put_block(struct drm_mm_node * cur);
+extern struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm, unsigned long size,
 					 unsigned alignment, int best_match);
-extern int drm_mm_init(drm_mm_t *mm, unsigned long start, unsigned long size);
-extern void drm_mm_takedown(drm_mm_t *mm);
-extern int drm_mm_clean(drm_mm_t *mm);
-extern unsigned long drm_mm_tail_space(drm_mm_t *mm);
-extern int drm_mm_remove_space_from_tail(drm_mm_t *mm, unsigned long size);
-extern int drm_mm_add_space_to_tail(drm_mm_t *mm, unsigned long size);
+extern int drm_mm_init(struct drm_mm *mm, unsigned long start, unsigned long size);
+extern void drm_mm_takedown(struct drm_mm *mm);
+extern int drm_mm_clean(struct drm_mm *mm);
+extern unsigned long drm_mm_tail_space(struct drm_mm *mm);
+extern int drm_mm_remove_space_from_tail(struct drm_mm *mm, unsigned long size);
+extern int drm_mm_add_space_to_tail(struct drm_mm *mm, unsigned long size);
 
 extern void drm_core_ioremap(struct drm_map *map, struct drm_device *dev);
 extern void drm_core_ioremapfree(struct drm_map *map, struct drm_device *dev);
@@ -1088,14 +1078,14 @@
 static __inline__ struct drm_map *drm_core_findmap(struct drm_device *dev,
 						   unsigned int token)
 {
-	drm_map_list_t *_entry;
-	list_for_each_entry(_entry, &dev->maplist->head, head)
+	struct drm_map_list *_entry;
+	list_for_each_entry(_entry, &dev->maplist, head)
 	    if (_entry->user_token == token)
 		return _entry->map;
 	return NULL;
 }
 
-static __inline__ int drm_device_is_agp(drm_device_t * dev)
+static __inline__ int drm_device_is_agp(struct drm_device *dev)
 {
 	if (dev->driver->device_is_agp != NULL) {
 		int err = (*dev->driver->device_is_agp) (dev);
@@ -1108,7 +1098,7 @@
 	return pci_find_capability(dev->pdev, PCI_CAP_ID_AGP);
 }
 
-static __inline__ int drm_device_is_pcie(drm_device_t * dev)
+static __inline__ int drm_device_is_pcie(struct drm_device *dev)
 {
 	return pci_find_capability(dev->pdev, PCI_CAP_ID_EXP);
 }
@@ -1143,7 +1133,7 @@
 
 /*@}*/
 
-extern unsigned long drm_core_get_map_ofs(drm_map_t * map);
+extern unsigned long drm_core_get_map_ofs(struct drm_map * map);
 extern unsigned long drm_core_get_reg_ofs(struct drm_device *dev);
 
 #endif				/* __KERNEL__ */
diff --git a/drivers/char/drm/drm_agpsupport.c b/drivers/char/drm/drm_agpsupport.c
index 40bfd9b..354f0e3 100644
--- a/drivers/char/drm/drm_agpsupport.c
+++ b/drivers/char/drm/drm_agpsupport.c
@@ -48,7 +48,7 @@
  * Verifies the AGP device has been initialized and acquired and fills in the
  * drm_agp_info structure with the information in drm_agp_head::agp_info.
  */
-int drm_agp_info(drm_device_t * dev, drm_agp_info_t * info)
+int drm_agp_info(struct drm_device *dev, struct drm_agp_info *info)
 {
 	DRM_AGP_KERN *kern;
 
@@ -74,16 +74,16 @@
 int drm_agp_info_ioctl(struct inode *inode, struct file *filp,
 		       unsigned int cmd, unsigned long arg)
 {
-	drm_file_t *priv = filp->private_data;
-	drm_device_t *dev = priv->head->dev;
-	drm_agp_info_t info;
+	struct drm_file *priv = filp->private_data;
+	struct drm_device *dev = priv->head->dev;
+	struct drm_agp_info info;
 	int err;
 
 	err = drm_agp_info(dev, &info);
 	if (err)
 		return err;
 
-	if (copy_to_user((drm_agp_info_t __user *) arg, &info, sizeof(info)))
+	if (copy_to_user((struct drm_agp_info __user *) arg, &info, sizeof(info)))
 		return -EFAULT;
 	return 0;
 }
@@ -97,7 +97,7 @@
  * Verifies the AGP device hasn't been acquired before and calls
  * \c agp_backend_acquire.
  */
-int drm_agp_acquire(drm_device_t * dev)
+int drm_agp_acquire(struct drm_device * dev)
 {
 	if (!dev->agp)
 		return -ENODEV;
@@ -126,9 +126,9 @@
 int drm_agp_acquire_ioctl(struct inode *inode, struct file *filp,
 			  unsigned int cmd, unsigned long arg)
 {
-	drm_file_t *priv = filp->private_data;
+	struct drm_file *priv = filp->private_data;
 
-	return drm_agp_acquire((drm_device_t *) priv->head->dev);
+	return drm_agp_acquire((struct drm_device *) priv->head->dev);
 }
 
 /**
@@ -139,7 +139,7 @@
  *
  * Verifies the AGP device has been acquired and calls \c agp_backend_release.
  */
-int drm_agp_release(drm_device_t * dev)
+int drm_agp_release(struct drm_device * dev)
 {
 	if (!dev->agp || !dev->agp->acquired)
 		return -EINVAL;
@@ -152,8 +152,8 @@
 int drm_agp_release_ioctl(struct inode *inode, struct file *filp,
 			  unsigned int cmd, unsigned long arg)
 {
-	drm_file_t *priv = filp->private_data;
-	drm_device_t *dev = priv->head->dev;
+	struct drm_file *priv = filp->private_data;
+	struct drm_device *dev = priv->head->dev;
 
 	return drm_agp_release(dev);
 }
@@ -168,7 +168,7 @@
  * Verifies the AGP device has been acquired but not enabled, and calls
  * \c agp_enable.
  */
-int drm_agp_enable(drm_device_t * dev, drm_agp_mode_t mode)
+int drm_agp_enable(struct drm_device * dev, struct drm_agp_mode mode)
 {
 	if (!dev->agp || !dev->agp->acquired)
 		return -EINVAL;
@@ -185,11 +185,11 @@
 int drm_agp_enable_ioctl(struct inode *inode, struct file *filp,
 			 unsigned int cmd, unsigned long arg)
 {
-	drm_file_t *priv = filp->private_data;
-	drm_device_t *dev = priv->head->dev;
-	drm_agp_mode_t mode;
+	struct drm_file *priv = filp->private_data;
+	struct drm_device *dev = priv->head->dev;
+	struct drm_agp_mode mode;
 
-	if (copy_from_user(&mode, (drm_agp_mode_t __user *) arg, sizeof(mode)))
+	if (copy_from_user(&mode, (struct drm_agp_mode __user *) arg, sizeof(mode)))
 		return -EFAULT;
 
 	return drm_agp_enable(dev, mode);
@@ -207,9 +207,9 @@
  * Verifies the AGP device is present and has been acquired, allocates the
  * memory via alloc_agp() and creates a drm_agp_mem entry for it.
  */
-int drm_agp_alloc(drm_device_t *dev, drm_agp_buffer_t *request)
+int drm_agp_alloc(struct drm_device *dev, struct drm_agp_buffer *request)
 {
-	drm_agp_mem_t *entry;
+	struct drm_agp_mem *entry;
 	DRM_AGP_MEM *memory;
 	unsigned long pages;
 	u32 type;
@@ -232,11 +232,7 @@
 	entry->memory = memory;
 	entry->bound = 0;
 	entry->pages = pages;
-	entry->prev = NULL;
-	entry->next = dev->agp->memory;
-	if (dev->agp->memory)
-		dev->agp->memory->prev = entry;
-	dev->agp->memory = entry;
+	list_add(&entry->head, &dev->agp->memory);
 
 	request->handle = entry->handle;
 	request->physical = memory->physical;
@@ -248,10 +244,10 @@
 int drm_agp_alloc_ioctl(struct inode *inode, struct file *filp,
 			unsigned int cmd, unsigned long arg)
 {
-	drm_file_t *priv = filp->private_data;
-	drm_device_t *dev = priv->head->dev;
-	drm_agp_buffer_t request;
-	drm_agp_buffer_t __user *argp = (void __user *)arg;
+	struct drm_file *priv = filp->private_data;
+	struct drm_device *dev = priv->head->dev;
+	struct drm_agp_buffer request;
+	struct drm_agp_buffer __user *argp = (void __user *)arg;
 	int err;
 
 	if (copy_from_user(&request, argp, sizeof(request)))
@@ -262,10 +258,12 @@
 		return err;
 
 	if (copy_to_user(argp, &request, sizeof(request))) {
-		drm_agp_mem_t *entry = dev->agp->memory;
-
-		dev->agp->memory = entry->next;
-		dev->agp->memory->prev = NULL;
+		struct drm_agp_mem *entry;
+		list_for_each_entry(entry, &dev->agp->memory, head) {
+			if (entry->handle == request.handle)
+				break;
+		}
+		list_del(&entry->head);
 		drm_free_agp(entry->memory, entry->pages);
 		drm_free(entry, sizeof(*entry), DRM_MEM_AGPLISTS);
 		return -EFAULT;
@@ -283,12 +281,12 @@
  *
  * Walks through drm_agp_head::memory until finding a matching handle.
  */
-static drm_agp_mem_t *drm_agp_lookup_entry(drm_device_t * dev,
+static struct drm_agp_mem *drm_agp_lookup_entry(struct drm_device * dev,
 					   unsigned long handle)
 {
-	drm_agp_mem_t *entry;
+	struct drm_agp_mem *entry;
 
-	for (entry = dev->agp->memory; entry; entry = entry->next) {
+	list_for_each_entry(entry, &dev->agp->memory, head) {
 		if (entry->handle == handle)
 			return entry;
 	}
@@ -307,9 +305,9 @@
  * Verifies the AGP device is present and acquired, looks-up the AGP memory
  * entry and passes it to the unbind_agp() function.
  */
-int drm_agp_unbind(drm_device_t *dev, drm_agp_binding_t *request)
+int drm_agp_unbind(struct drm_device *dev, struct drm_agp_binding *request)
 {
-	drm_agp_mem_t *entry;
+	struct drm_agp_mem *entry;
 	int ret;
 
 	if (!dev->agp || !dev->agp->acquired)
@@ -328,12 +326,12 @@
 int drm_agp_unbind_ioctl(struct inode *inode, struct file *filp,
 			 unsigned int cmd, unsigned long arg)
 {
-	drm_file_t *priv = filp->private_data;
-	drm_device_t *dev = priv->head->dev;
-	drm_agp_binding_t request;
+	struct drm_file *priv = filp->private_data;
+	struct drm_device *dev = priv->head->dev;
+	struct drm_agp_binding request;
 
 	if (copy_from_user
-	    (&request, (drm_agp_binding_t __user *) arg, sizeof(request)))
+	    (&request, (struct drm_agp_binding __user *) arg, sizeof(request)))
 		return -EFAULT;
 
 	return drm_agp_unbind(dev, &request);
@@ -352,9 +350,9 @@
  * is currently bound into the GATT. Looks-up the AGP memory entry and passes
  * it to bind_agp() function.
  */
-int drm_agp_bind(drm_device_t *dev, drm_agp_binding_t *request)
+int drm_agp_bind(struct drm_device *dev, struct drm_agp_binding *request)
 {
-	drm_agp_mem_t *entry;
+	struct drm_agp_mem *entry;
 	int retcode;
 	int page;
 
@@ -377,12 +375,12 @@
 int drm_agp_bind_ioctl(struct inode *inode, struct file *filp,
 		       unsigned int cmd, unsigned long arg)
 {
-	drm_file_t *priv = filp->private_data;
-	drm_device_t *dev = priv->head->dev;
-	drm_agp_binding_t request;
+	struct drm_file *priv = filp->private_data;
+	struct drm_device *dev = priv->head->dev;
+	struct drm_agp_binding request;
 
 	if (copy_from_user
-	    (&request, (drm_agp_binding_t __user *) arg, sizeof(request)))
+	    (&request, (struct drm_agp_binding __user *) arg, sizeof(request)))
 		return -EFAULT;
 
 	return drm_agp_bind(dev, &request);
@@ -402,9 +400,9 @@
  * unbind_agp(). Frees it via free_agp() as well as the entry itself
  * and unlinks from the doubly linked list it's inserted in.
  */
-int drm_agp_free(drm_device_t *dev, drm_agp_buffer_t *request)
+int drm_agp_free(struct drm_device *dev, struct drm_agp_buffer *request)
 {
-	drm_agp_mem_t *entry;
+	struct drm_agp_mem *entry;
 
 	if (!dev->agp || !dev->agp->acquired)
 		return -EINVAL;
@@ -413,13 +411,7 @@
 	if (entry->bound)
 		drm_unbind_agp(entry->memory);
 
-	if (entry->prev)
-		entry->prev->next = entry->next;
-	else
-		dev->agp->memory = entry->next;
-
-	if (entry->next)
-		entry->next->prev = entry->prev;
+	list_del(&entry->head);
 
 	drm_free_agp(entry->memory, entry->pages);
 	drm_free(entry, sizeof(*entry), DRM_MEM_AGPLISTS);
@@ -430,12 +422,12 @@
 int drm_agp_free_ioctl(struct inode *inode, struct file *filp,
 		       unsigned int cmd, unsigned long arg)
 {
-	drm_file_t *priv = filp->private_data;
-	drm_device_t *dev = priv->head->dev;
-	drm_agp_buffer_t request;
+	struct drm_file *priv = filp->private_data;
+	struct drm_device *dev = priv->head->dev;
+	struct drm_agp_buffer request;
 
 	if (copy_from_user
-	    (&request, (drm_agp_buffer_t __user *) arg, sizeof(request)))
+	    (&request, (struct drm_agp_buffer __user *) arg, sizeof(request)))
 		return -EFAULT;
 
 	return drm_agp_free(dev, &request);
@@ -450,9 +442,9 @@
  * via the inter_module_* functions. Creates and initializes a drm_agp_head
  * structure.
  */
-drm_agp_head_t *drm_agp_init(drm_device_t * dev)
+struct drm_agp_head *drm_agp_init(struct drm_device *dev)
 {
-	drm_agp_head_t *head = NULL;
+	struct drm_agp_head *head = NULL;
 
 	if (!(head = drm_alloc(sizeof(*head), DRM_MEM_AGPLISTS)))
 		return NULL;
@@ -472,7 +464,7 @@
 		drm_free(head, sizeof(*head), DRM_MEM_AGPLISTS);
 		return NULL;
 	}
-	head->memory = NULL;
+	INIT_LIST_HEAD(&head->memory);
 	head->cant_use_aperture = head->agp_info.cant_use_aperture;
 	head->page_mask = head->agp_info.page_mask;
 
diff --git a/drivers/char/drm/drm_auth.c b/drivers/char/drm/drm_auth.c
index c7b19d3..7f777da 100644
--- a/drivers/char/drm/drm_auth.c
+++ b/drivers/char/drm/drm_auth.c
@@ -45,15 +45,15 @@
  * the one with matching magic number, while holding the drm_device::struct_mutex
  * lock.
  */
-static drm_file_t *drm_find_file(drm_device_t * dev, drm_magic_t magic)
+static struct drm_file *drm_find_file(struct drm_device * dev, drm_magic_t magic)
 {
-	drm_file_t *retval = NULL;
-	drm_magic_entry_t *pt;
-	drm_hash_item_t *hash;
+	struct drm_file *retval = NULL;
+	struct drm_magic_entry *pt;
+	struct drm_hash_item *hash;
 
 	mutex_lock(&dev->struct_mutex);
 	if (!drm_ht_find_item(&dev->magiclist, (unsigned long)magic, &hash)) {
-		pt = drm_hash_entry(hash, drm_magic_entry_t, hash_item);
+		pt = drm_hash_entry(hash, struct drm_magic_entry, hash_item);
 		retval = pt->priv;
 	}
 	mutex_unlock(&dev->struct_mutex);
@@ -71,10 +71,10 @@
  * associated the magic number hash key in drm_device::magiclist, while holding
  * the drm_device::struct_mutex lock.
  */
-static int drm_add_magic(drm_device_t * dev, drm_file_t * priv,
+static int drm_add_magic(struct drm_device * dev, struct drm_file * priv,
 			 drm_magic_t magic)
 {
-	drm_magic_entry_t *entry;
+	struct drm_magic_entry *entry;
 
 	DRM_DEBUG("%d\n", magic);
 
@@ -102,10 +102,10 @@
  * Searches and unlinks the entry in drm_device::magiclist with the magic
  * number hash key, while holding the drm_device::struct_mutex lock.
  */
-static int drm_remove_magic(drm_device_t * dev, drm_magic_t magic)
+static int drm_remove_magic(struct drm_device * dev, drm_magic_t magic)
 {
-	drm_magic_entry_t *pt;
-	drm_hash_item_t *hash;
+	struct drm_magic_entry *pt;
+	struct drm_hash_item *hash;
 
 	DRM_DEBUG("%d\n", magic);
 
@@ -114,7 +114,7 @@
 		mutex_unlock(&dev->struct_mutex);
 		return -EINVAL;
 	}
-	pt = drm_hash_entry(hash, drm_magic_entry_t, hash_item);
+	pt = drm_hash_entry(hash, struct drm_magic_entry, hash_item);
 	drm_ht_remove_item(&dev->magiclist, hash);
 	list_del(&pt->head);
 	mutex_unlock(&dev->struct_mutex);
@@ -142,9 +142,9 @@
 {
 	static drm_magic_t sequence = 0;
 	static DEFINE_SPINLOCK(lock);
-	drm_file_t *priv = filp->private_data;
-	drm_device_t *dev = priv->head->dev;
-	drm_auth_t auth;
+	struct drm_file *priv = filp->private_data;
+	struct drm_device *dev = priv->head->dev;
+	struct drm_auth auth;
 
 	/* Find unique magic */
 	if (priv->magic) {
@@ -162,7 +162,7 @@
 	}
 
 	DRM_DEBUG("%u\n", auth.magic);
-	if (copy_to_user((drm_auth_t __user *) arg, &auth, sizeof(auth)))
+	if (copy_to_user((struct drm_auth __user *) arg, &auth, sizeof(auth)))
 		return -EFAULT;
 	return 0;
 }
@@ -181,12 +181,12 @@
 int drm_authmagic(struct inode *inode, struct file *filp,
 		  unsigned int cmd, unsigned long arg)
 {
-	drm_file_t *priv = filp->private_data;
-	drm_device_t *dev = priv->head->dev;
-	drm_auth_t auth;
-	drm_file_t *file;
+	struct drm_file *priv = filp->private_data;
+	struct drm_device *dev = priv->head->dev;
+	struct drm_auth auth;
+	struct drm_file *file;
 
-	if (copy_from_user(&auth, (drm_auth_t __user *) arg, sizeof(auth)))
+	if (copy_from_user(&auth, (struct drm_auth __user *) arg, sizeof(auth)))
 		return -EFAULT;
 	DRM_DEBUG("%u\n", auth.magic);
 	if ((file = drm_find_file(dev, auth.magic))) {
diff --git a/drivers/char/drm/drm_bufs.c b/drivers/char/drm/drm_bufs.c
index c113458..923174c 100644
--- a/drivers/char/drm/drm_bufs.c
+++ b/drivers/char/drm/drm_bufs.c
@@ -36,26 +36,24 @@
 #include <linux/vmalloc.h>
 #include "drmP.h"
 
-unsigned long drm_get_resource_start(drm_device_t *dev, unsigned int resource)
+unsigned long drm_get_resource_start(struct drm_device *dev, unsigned int resource)
 {
 	return pci_resource_start(dev->pdev, resource);
 }
 EXPORT_SYMBOL(drm_get_resource_start);
 
-unsigned long drm_get_resource_len(drm_device_t *dev, unsigned int resource)
+unsigned long drm_get_resource_len(struct drm_device *dev, unsigned int resource)
 {
 	return pci_resource_len(dev->pdev, resource);
 }
 
 EXPORT_SYMBOL(drm_get_resource_len);
 
-static drm_map_list_t *drm_find_matching_map(drm_device_t *dev,
+static struct drm_map_list *drm_find_matching_map(struct drm_device *dev,
 					     drm_local_map_t *map)
 {
-	struct list_head *list;
-
-	list_for_each(list, &dev->maplist->head) {
-		drm_map_list_t *entry = list_entry(list, drm_map_list_t, head);
+	struct drm_map_list *entry;
+	list_for_each_entry(entry, &dev->maplist, head) {
 		if (entry->map && map->type == entry->map->type &&
 		    ((entry->map->offset == map->offset) ||
 		     (map->type == _DRM_SHM && map->flags==_DRM_CONTAINS_LOCK))) {
@@ -66,7 +64,7 @@
 	return NULL;
 }
 
-static int drm_map_handle(drm_device_t *dev, drm_hash_item_t *hash,
+static int drm_map_handle(struct drm_device *dev, struct drm_hash_item *hash,
 			  unsigned long user_token, int hashed_handle)
 {
 	int use_hashed_handle;
@@ -103,12 +101,13 @@
  * type.  Adds the map to the map list drm_device::maplist. Adds MTRR's where
  * applicable and if supported by the kernel.
  */
-static int drm_addmap_core(drm_device_t * dev, unsigned int offset,
-			   unsigned int size, drm_map_type_t type,
-			   drm_map_flags_t flags, drm_map_list_t ** maplist)
+static int drm_addmap_core(struct drm_device * dev, unsigned int offset,
+			   unsigned int size, enum drm_map_type type,
+			   enum drm_map_flags flags,
+			   struct drm_map_list ** maplist)
 {
-	drm_map_t *map;
-	drm_map_list_t *list;
+	struct drm_map *map;
+	struct drm_map_list *list;
 	drm_dma_handle_t *dmah;
 	unsigned long user_token;
 	int ret;
@@ -214,7 +213,7 @@
 		}
 		break;
 	case _DRM_AGP: {
-		drm_agp_mem_t *entry;
+		struct drm_agp_mem *entry;
 		int valid = 0;
 
 		if (!drm_core_has_AGP(dev)) {
@@ -237,14 +236,14 @@
 		 * skipped and we double check that dev->agp->memory is
 		 * actually set as well as being invalid before EPERM'ing
 		 */
-		for (entry = dev->agp->memory; entry; entry = entry->next) {
+		list_for_each_entry(entry, &dev->agp->memory, head) {
 			if ((map->offset >= entry->bound) &&
 			    (map->offset + map->size <= entry->bound + entry->pages * PAGE_SIZE)) {
 				valid = 1;
 				break;
 			}
 		}
-		if (dev->agp->memory && !valid) {
+		if (!list_empty(&dev->agp->memory) && !valid) {
 			drm_free(map, sizeof(*map), DRM_MEM_MAPS);
 			return -EPERM;
 		}
@@ -289,7 +288,7 @@
 	list->map = map;
 
 	mutex_lock(&dev->struct_mutex);
-	list_add(&list->head, &dev->maplist->head);
+	list_add(&list->head, &dev->maplist);
 
 	/* Assign a 32-bit handle */
 	/* We do it here so that dev->struct_mutex protects the increment */
@@ -312,11 +311,11 @@
 	return 0;
 	}
 
-int drm_addmap(drm_device_t * dev, unsigned int offset,
-	       unsigned int size, drm_map_type_t type,
-	       drm_map_flags_t flags, drm_local_map_t ** map_ptr)
+int drm_addmap(struct drm_device * dev, unsigned int offset,
+	       unsigned int size, enum drm_map_type type,
+	       enum drm_map_flags flags, drm_local_map_t ** map_ptr)
 {
-	drm_map_list_t *list;
+	struct drm_map_list *list;
 	int rc;
 
 	rc = drm_addmap_core(dev, offset, size, type, flags, &list);
@@ -330,11 +329,11 @@
 int drm_addmap_ioctl(struct inode *inode, struct file *filp,
 		     unsigned int cmd, unsigned long arg)
 {
-	drm_file_t *priv = filp->private_data;
-	drm_device_t *dev = priv->head->dev;
-	drm_map_t map;
-	drm_map_list_t *maplist;
-	drm_map_t __user *argp = (void __user *)arg;
+	struct drm_file *priv = filp->private_data;
+	struct drm_device *dev = priv->head->dev;
+	struct drm_map map;
+	struct drm_map_list *maplist;
+	struct drm_map __user *argp = (void __user *)arg;
 	int err;
 
 	if (!(filp->f_mode & 3))
@@ -353,7 +352,7 @@
 	if (err)
 		return err;
 
-	if (copy_to_user(argp, maplist->map, sizeof(drm_map_t)))
+	if (copy_to_user(argp, maplist->map, sizeof(struct drm_map)))
 		return -EFAULT;
 
 	/* avoid a warning on 64-bit, this casting isn't very nice, but the API is set so too late */
@@ -369,7 +368,7 @@
  * \param inode device inode.
  * \param filp file pointer.
  * \param cmd command.
- * \param arg pointer to a drm_map_t structure.
+ * \param arg pointer to a struct drm_map structure.
  * \return zero on success or a negative value on error.
  *
  * Searches the map on drm_device::maplist, removes it from the list, see if
@@ -378,31 +377,26 @@
  *
  * \sa drm_addmap
  */
-int drm_rmmap_locked(drm_device_t *dev, drm_local_map_t *map)
+int drm_rmmap_locked(struct drm_device *dev, drm_local_map_t *map)
 {
-	struct list_head *list;
-	drm_map_list_t *r_list = NULL;
+	struct drm_map_list *r_list = NULL, *list_t;
 	drm_dma_handle_t dmah;
+	int found = 0;
 
 	/* Find the list entry for the map and remove it */
-	list_for_each(list, &dev->maplist->head) {
-		r_list = list_entry(list, drm_map_list_t, head);
-
+	list_for_each_entry_safe(r_list, list_t, &dev->maplist, head) {
 		if (r_list->map == map) {
-			list_del(list);
+			list_del(&r_list->head);
 			drm_ht_remove_key(&dev->map_hash,
 					  r_list->user_token >> PAGE_SHIFT);
-			drm_free(list, sizeof(*list), DRM_MEM_MAPS);
+			drm_free(r_list, sizeof(*r_list), DRM_MEM_MAPS);
+			found = 1;
 			break;
 		}
 	}
 
-	/* List has wrapped around to the head pointer, or it's empty and we
-	 * didn't find anything.
-	 */
-	if (list == (&dev->maplist->head)) {
+	if (!found)
 		return -EINVAL;
-	}
 
 	switch (map->type) {
 	case _DRM_REGISTERS:
@@ -433,7 +427,7 @@
 	return 0;
 }
 
-int drm_rmmap(drm_device_t *dev, drm_local_map_t *map)
+int drm_rmmap(struct drm_device *dev, drm_local_map_t *map)
 {
 	int ret;
 
@@ -456,21 +450,19 @@
 int drm_rmmap_ioctl(struct inode *inode, struct file *filp,
 		    unsigned int cmd, unsigned long arg)
 {
-	drm_file_t *priv = filp->private_data;
-	drm_device_t *dev = priv->head->dev;
-	drm_map_t request;
+	struct drm_file *priv = filp->private_data;
+	struct drm_device *dev = priv->head->dev;
+	struct drm_map request;
 	drm_local_map_t *map = NULL;
-	struct list_head *list;
+	struct drm_map_list *r_list;
 	int ret;
 
-	if (copy_from_user(&request, (drm_map_t __user *) arg, sizeof(request))) {
+	if (copy_from_user(&request, (struct drm_map __user *) arg, sizeof(request))) {
 		return -EFAULT;
 	}
 
 	mutex_lock(&dev->struct_mutex);
-	list_for_each(list, &dev->maplist->head) {
-		drm_map_list_t *r_list = list_entry(list, drm_map_list_t, head);
-
+	list_for_each_entry(r_list, &dev->maplist, head) {
 		if (r_list->map &&
 		    r_list->user_token == (unsigned long)request.handle &&
 		    r_list->map->flags & _DRM_REMOVABLE) {
@@ -482,7 +474,7 @@
 	/* List has wrapped around to the head pointer, or its empty we didn't
 	 * find anything.
 	 */
-	if (list == (&dev->maplist->head)) {
+	if (list_empty(&dev->maplist) || !map) {
 		mutex_unlock(&dev->struct_mutex);
 		return -EINVAL;
 	}
@@ -513,7 +505,8 @@
  *
  * Frees any pages and buffers associated with the given entry.
  */
-static void drm_cleanup_buf_error(drm_device_t * dev, drm_buf_entry_t * entry)
+static void drm_cleanup_buf_error(struct drm_device * dev,
+				  struct drm_buf_entry * entry)
 {
 	int i;
 
@@ -550,20 +543,20 @@
 /**
  * Add AGP buffers for DMA transfers.
  *
- * \param dev drm_device_t to which the buffers are to be added.
- * \param request pointer to a drm_buf_desc_t describing the request.
+ * \param dev struct drm_device to which the buffers are to be added.
+ * \param request pointer to a struct drm_buf_desc describing the request.
  * \return zero on success or a negative number on failure.
  *
  * After some sanity checks creates a drm_buf structure for each buffer and
  * reallocates the buffer list of the same size order to accommodate the new
  * buffers.
  */
-int drm_addbufs_agp(drm_device_t * dev, drm_buf_desc_t * request)
+int drm_addbufs_agp(struct drm_device * dev, struct drm_buf_desc * request)
 {
-	drm_device_dma_t *dma = dev->dma;
-	drm_buf_entry_t *entry;
-	drm_agp_mem_t *agp_entry;
-	drm_buf_t *buf;
+	struct drm_device_dma *dma = dev->dma;
+	struct drm_buf_entry *entry;
+	struct drm_agp_mem *agp_entry;
+	struct drm_buf *buf;
 	unsigned long offset;
 	unsigned long agp_offset;
 	int count;
@@ -574,7 +567,7 @@
 	int total;
 	int byte_count;
 	int i, valid;
-	drm_buf_t **temp_buflist;
+	struct drm_buf **temp_buflist;
 
 	if (!dma)
 		return -EINVAL;
@@ -606,14 +599,14 @@
 
 	/* Make sure buffers are located in AGP memory that we own */
 	valid = 0;
-	for (agp_entry = dev->agp->memory; agp_entry; agp_entry = agp_entry->next) {
+	list_for_each_entry(agp_entry, &dev->agp->memory, head) {
 		if ((agp_offset >= agp_entry->bound) &&
 		    (agp_offset + total * count <= agp_entry->bound + agp_entry->pages * PAGE_SIZE)) {
 			valid = 1;
 			break;
 		}
 	}
-	if (dev->agp->memory && !valid) {
+	if (!list_empty(&dev->agp->memory) && !valid) {
 		DRM_DEBUG("zone invalid\n");
 		return -EINVAL;
 	}
@@ -728,24 +721,24 @@
 EXPORT_SYMBOL(drm_addbufs_agp);
 #endif				/* __OS_HAS_AGP */
 
-int drm_addbufs_pci(drm_device_t * dev, drm_buf_desc_t * request)
+int drm_addbufs_pci(struct drm_device * dev, struct drm_buf_desc * request)
 {
-	drm_device_dma_t *dma = dev->dma;
+	struct drm_device_dma *dma = dev->dma;
 	int count;
 	int order;
 	int size;
 	int total;
 	int page_order;
-	drm_buf_entry_t *entry;
+	struct drm_buf_entry *entry;
 	drm_dma_handle_t *dmah;
-	drm_buf_t *buf;
+	struct drm_buf *buf;
 	int alignment;
 	unsigned long offset;
 	int i;
 	int byte_count;
 	int page_count;
 	unsigned long *temp_pagelist;
-	drm_buf_t **temp_buflist;
+	struct drm_buf **temp_buflist;
 
 	if (!drm_core_check_feature(dev, DRIVER_PCI_DMA))
 		return -EINVAL;
@@ -954,11 +947,11 @@
 }
 EXPORT_SYMBOL(drm_addbufs_pci);
 
-static int drm_addbufs_sg(drm_device_t * dev, drm_buf_desc_t * request)
+static int drm_addbufs_sg(struct drm_device * dev, struct drm_buf_desc * request)
 {
-	drm_device_dma_t *dma = dev->dma;
-	drm_buf_entry_t *entry;
-	drm_buf_t *buf;
+	struct drm_device_dma *dma = dev->dma;
+	struct drm_buf_entry *entry;
+	struct drm_buf *buf;
 	unsigned long offset;
 	unsigned long agp_offset;
 	int count;
@@ -969,7 +962,7 @@
 	int total;
 	int byte_count;
 	int i;
-	drm_buf_t **temp_buflist;
+	struct drm_buf **temp_buflist;
 
 	if (!drm_core_check_feature(dev, DRIVER_SG))
 		return -EINVAL;
@@ -1116,11 +1109,11 @@
 	return 0;
 }
 
-static int drm_addbufs_fb(drm_device_t * dev, drm_buf_desc_t * request)
+static int drm_addbufs_fb(struct drm_device * dev, struct drm_buf_desc * request)
 {
-	drm_device_dma_t *dma = dev->dma;
-	drm_buf_entry_t *entry;
-	drm_buf_t *buf;
+	struct drm_device_dma *dma = dev->dma;
+	struct drm_buf_entry *entry;
+	struct drm_buf *buf;
 	unsigned long offset;
 	unsigned long agp_offset;
 	int count;
@@ -1131,7 +1124,7 @@
 	int total;
 	int byte_count;
 	int i;
-	drm_buf_t **temp_buflist;
+	struct drm_buf **temp_buflist;
 
 	if (!drm_core_check_feature(dev, DRIVER_FB_DMA))
 		return -EINVAL;
@@ -1283,7 +1276,7 @@
  * \param inode device inode.
  * \param filp file pointer.
  * \param cmd command.
- * \param arg pointer to a drm_buf_desc_t request.
+ * \param arg pointer to a struct drm_buf_desc request.
  * \return zero on success or a negative number on failure.
  *
  * According with the memory type specified in drm_buf_desc::flags and the
@@ -1294,15 +1287,15 @@
 int drm_addbufs(struct inode *inode, struct file *filp,
 		unsigned int cmd, unsigned long arg)
 {
-	drm_buf_desc_t request;
-	drm_file_t *priv = filp->private_data;
-	drm_device_t *dev = priv->head->dev;
+	struct drm_buf_desc request;
+	struct drm_file *priv = filp->private_data;
+	struct drm_device *dev = priv->head->dev;
 	int ret;
 
 	if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
 		return -EINVAL;
 
-	if (copy_from_user(&request, (drm_buf_desc_t __user *) arg,
+	if (copy_from_user(&request, (struct drm_buf_desc __user *) arg,
 			   sizeof(request)))
 		return -EFAULT;
 
@@ -1346,11 +1339,11 @@
 int drm_infobufs(struct inode *inode, struct file *filp,
 		 unsigned int cmd, unsigned long arg)
 {
-	drm_file_t *priv = filp->private_data;
-	drm_device_t *dev = priv->head->dev;
-	drm_device_dma_t *dma = dev->dma;
-	drm_buf_info_t request;
-	drm_buf_info_t __user *argp = (void __user *)arg;
+	struct drm_file *priv = filp->private_data;
+	struct drm_device *dev = priv->head->dev;
+	struct drm_device_dma *dma = dev->dma;
+	struct drm_buf_info request;
+	struct drm_buf_info __user *argp = (void __user *)arg;
 	int i;
 	int count;
 
@@ -1381,10 +1374,10 @@
 	if (request.count >= count) {
 		for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
 			if (dma->bufs[i].buf_count) {
-				drm_buf_desc_t __user *to =
+				struct drm_buf_desc __user *to =
 				    &request.list[count];
-				drm_buf_entry_t *from = &dma->bufs[i];
-				drm_freelist_t *list = &dma->bufs[i].freelist;
+				struct drm_buf_entry *from = &dma->bufs[i];
+				struct drm_freelist *list = &dma->bufs[i].freelist;
 				if (copy_to_user(&to->count,
 						 &from->buf_count,
 						 sizeof(from->buf_count)) ||
@@ -1434,12 +1427,12 @@
 int drm_markbufs(struct inode *inode, struct file *filp,
 		 unsigned int cmd, unsigned long arg)
 {
-	drm_file_t *priv = filp->private_data;
-	drm_device_t *dev = priv->head->dev;
-	drm_device_dma_t *dma = dev->dma;
-	drm_buf_desc_t request;
+	struct drm_file *priv = filp->private_data;
+	struct drm_device *dev = priv->head->dev;
+	struct drm_device_dma *dma = dev->dma;
+	struct drm_buf_desc request;
 	int order;
-	drm_buf_entry_t *entry;
+	struct drm_buf_entry *entry;
 
 	if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
 		return -EINVAL;
@@ -1448,7 +1441,7 @@
 		return -EINVAL;
 
 	if (copy_from_user(&request,
-			   (drm_buf_desc_t __user *) arg, sizeof(request)))
+			   (struct drm_buf_desc __user *) arg, sizeof(request)))
 		return -EFAULT;
 
 	DRM_DEBUG("%d, %d, %d\n",
@@ -1484,13 +1477,13 @@
 int drm_freebufs(struct inode *inode, struct file *filp,
 		 unsigned int cmd, unsigned long arg)
 {
-	drm_file_t *priv = filp->private_data;
-	drm_device_t *dev = priv->head->dev;
-	drm_device_dma_t *dma = dev->dma;
-	drm_buf_free_t request;
+	struct drm_file *priv = filp->private_data;
+	struct drm_device *dev = priv->head->dev;
+	struct drm_device_dma *dma = dev->dma;
+	struct drm_buf_free request;
 	int i;
 	int idx;
-	drm_buf_t *buf;
+	struct drm_buf *buf;
 
 	if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
 		return -EINVAL;
@@ -1499,7 +1492,7 @@
 		return -EINVAL;
 
 	if (copy_from_user(&request,
-			   (drm_buf_free_t __user *) arg, sizeof(request)))
+			   (struct drm_buf_free __user *) arg, sizeof(request)))
 		return -EFAULT;
 
 	DRM_DEBUG("%d\n", request.count);
@@ -1540,15 +1533,15 @@
 int drm_mapbufs(struct inode *inode, struct file *filp,
 		unsigned int cmd, unsigned long arg)
 {
-	drm_file_t *priv = filp->private_data;
-	drm_device_t *dev = priv->head->dev;
-	drm_device_dma_t *dma = dev->dma;
-	drm_buf_map_t __user *argp = (void __user *)arg;
+	struct drm_file *priv = filp->private_data;
+	struct drm_device *dev = priv->head->dev;
+	struct drm_device_dma *dma = dev->dma;
+	struct drm_buf_map __user *argp = (void __user *)arg;
 	int retcode = 0;
 	const int zero = 0;
 	unsigned long virtual;
 	unsigned long address;
-	drm_buf_map_t request;
+	struct drm_buf_map request;
 	int i;
 
 	if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
@@ -1574,7 +1567,7 @@
 			&& (dma->flags & _DRM_DMA_USE_SG))
 		    || (drm_core_check_feature(dev, DRIVER_FB_DMA)
 			&& (dma->flags & _DRM_DMA_USE_FB))) {
-			drm_map_t *map = dev->agp_buffer_map;
+			struct drm_map *map = dev->agp_buffer_map;
 			unsigned long token = dev->agp_buffer_token;
 
 			if (!map) {
diff --git a/drivers/char/drm/drm_context.c b/drivers/char/drm/drm_context.c
index 83094c7..61ad986 100644
--- a/drivers/char/drm/drm_context.c
+++ b/drivers/char/drm/drm_context.c
@@ -53,26 +53,14 @@
  * \param ctx_handle context handle.
  *
  * Clears the bit specified by \p ctx_handle in drm_device::ctx_bitmap and the entry
- * in drm_device::context_sareas, while holding the drm_device::struct_mutex
+ * in drm_device::ctx_idr, while holding the drm_device::struct_mutex
  * lock.
  */
-void drm_ctxbitmap_free(drm_device_t * dev, int ctx_handle)
+void drm_ctxbitmap_free(struct drm_device * dev, int ctx_handle)
 {
-	if (ctx_handle < 0)
-		goto failed;
-	if (!dev->ctx_bitmap)
-		goto failed;
-
-	if (ctx_handle < DRM_MAX_CTXBITMAP) {
-		mutex_lock(&dev->struct_mutex);
-		clear_bit(ctx_handle, dev->ctx_bitmap);
-		dev->context_sareas[ctx_handle] = NULL;
-		mutex_unlock(&dev->struct_mutex);
-		return;
-	}
-      failed:
-	DRM_ERROR("Attempt to free invalid context handle: %d\n", ctx_handle);
-	return;
+	mutex_lock(&dev->struct_mutex);
+	idr_remove(&dev->ctx_idr, ctx_handle);
+	mutex_unlock(&dev->struct_mutex);
 }
 
 /**
@@ -81,62 +69,28 @@
  * \param dev DRM device.
  * \return (non-negative) context handle on success or a negative number on failure.
  *
- * Find the first zero bit in drm_device::ctx_bitmap and (re)allocates
- * drm_device::context_sareas to accommodate the new entry while holding the
+ * Allocate a new idr from drm_device::ctx_idr while holding the
  * drm_device::struct_mutex lock.
  */
-static int drm_ctxbitmap_next(drm_device_t * dev)
+static int drm_ctxbitmap_next(struct drm_device * dev)
 {
-	int bit;
+	int new_id;
+	int ret;
 
-	if (!dev->ctx_bitmap)
-		return -1;
-
+again:
+	if (idr_pre_get(&dev->ctx_idr, GFP_KERNEL) == 0) {
+		DRM_ERROR("Out of memory expanding drawable idr\n");
+		return -ENOMEM;
+	}
 	mutex_lock(&dev->struct_mutex);
-	bit = find_first_zero_bit(dev->ctx_bitmap, DRM_MAX_CTXBITMAP);
-	if (bit < DRM_MAX_CTXBITMAP) {
-		set_bit(bit, dev->ctx_bitmap);
-		DRM_DEBUG("drm_ctxbitmap_next bit : %d\n", bit);
-		if ((bit + 1) > dev->max_context) {
-			dev->max_context = (bit + 1);
-			if (dev->context_sareas) {
-				drm_map_t **ctx_sareas;
-
-				ctx_sareas = drm_realloc(dev->context_sareas,
-							 (dev->max_context -
-							  1) *
-							 sizeof(*dev->
-								context_sareas),
-							 dev->max_context *
-							 sizeof(*dev->
-								context_sareas),
-							 DRM_MEM_MAPS);
-				if (!ctx_sareas) {
-					clear_bit(bit, dev->ctx_bitmap);
-					mutex_unlock(&dev->struct_mutex);
-					return -1;
-				}
-				dev->context_sareas = ctx_sareas;
-				dev->context_sareas[bit] = NULL;
-			} else {
-				/* max_context == 1 at this point */
-				dev->context_sareas =
-				    drm_alloc(dev->max_context *
-					      sizeof(*dev->context_sareas),
-					      DRM_MEM_MAPS);
-				if (!dev->context_sareas) {
-					clear_bit(bit, dev->ctx_bitmap);
-					mutex_unlock(&dev->struct_mutex);
-					return -1;
-				}
-				dev->context_sareas[bit] = NULL;
-			}
-		}
+	ret = idr_get_new_above(&dev->ctx_idr, NULL,
+				DRM_RESERVED_CONTEXTS, &new_id);
+	if (ret == -EAGAIN) {
 		mutex_unlock(&dev->struct_mutex);
-		return bit;
+		goto again;
 	}
 	mutex_unlock(&dev->struct_mutex);
-	return -1;
+	return new_id;
 }
 
 /**
@@ -144,31 +98,11 @@
  *
  * \param dev DRM device.
  *
- * Allocates and initialize drm_device::ctx_bitmap and drm_device::context_sareas, while holding
- * the drm_device::struct_mutex lock.
+ * Initialise the drm_device::ctx_idr
  */
-int drm_ctxbitmap_init(drm_device_t * dev)
+int drm_ctxbitmap_init(struct drm_device * dev)
 {
-	int i;
-	int temp;
-
-	mutex_lock(&dev->struct_mutex);
-	dev->ctx_bitmap = (unsigned long *)drm_alloc(PAGE_SIZE,
-						     DRM_MEM_CTXBITMAP);
-	if (dev->ctx_bitmap == NULL) {
-		mutex_unlock(&dev->struct_mutex);
-		return -ENOMEM;
-	}
-	memset((void *)dev->ctx_bitmap, 0, PAGE_SIZE);
-	dev->context_sareas = NULL;
-	dev->max_context = -1;
-	mutex_unlock(&dev->struct_mutex);
-
-	for (i = 0; i < DRM_RESERVED_CONTEXTS; i++) {
-		temp = drm_ctxbitmap_next(dev);
-		DRM_DEBUG("drm_ctxbitmap_init : %d\n", temp);
-	}
-
+	idr_init(&dev->ctx_idr);
 	return 0;
 }
 
@@ -177,17 +111,13 @@
  *
  * \param dev DRM device.
  *
- * Frees drm_device::ctx_bitmap and drm_device::context_sareas, while holding
- * the drm_device::struct_mutex lock.
+ * Free all idr members using drm_ctx_sarea_free helper function
+ * while holding the drm_device::struct_mutex lock.
  */
-void drm_ctxbitmap_cleanup(drm_device_t * dev)
+void drm_ctxbitmap_cleanup(struct drm_device * dev)
 {
 	mutex_lock(&dev->struct_mutex);
-	if (dev->context_sareas)
-		drm_free(dev->context_sareas,
-			 sizeof(*dev->context_sareas) *
-			 dev->max_context, DRM_MEM_MAPS);
-	drm_free((void *)dev->ctx_bitmap, PAGE_SIZE, DRM_MEM_CTXBITMAP);
+	idr_remove_all(&dev->ctx_idr);
 	mutex_unlock(&dev->struct_mutex);
 }
 
@@ -206,34 +136,34 @@
  * \param arg user argument pointing to a drm_ctx_priv_map structure.
  * \return zero on success or a negative number on failure.
  *
- * Gets the map from drm_device::context_sareas with the handle specified and
+ * Gets the map from drm_device::ctx_idr with the handle specified and
  * returns its handle.
  */
 int drm_getsareactx(struct inode *inode, struct file *filp,
 		    unsigned int cmd, unsigned long arg)
 {
-	drm_file_t *priv = filp->private_data;
-	drm_device_t *dev = priv->head->dev;
-	drm_ctx_priv_map_t __user *argp = (void __user *)arg;
-	drm_ctx_priv_map_t request;
-	drm_map_t *map;
-	drm_map_list_t *_entry;
+	struct drm_file *priv = filp->private_data;
+	struct drm_device *dev = priv->head->dev;
+	struct drm_ctx_priv_map __user *argp = (void __user *)arg;
+	struct drm_ctx_priv_map request;
+	struct drm_map *map;
+	struct drm_map_list *_entry;
 
 	if (copy_from_user(&request, argp, sizeof(request)))
 		return -EFAULT;
 
 	mutex_lock(&dev->struct_mutex);
-	if (dev->max_context < 0
-	    || request.ctx_id >= (unsigned)dev->max_context) {
+
+	map = idr_find(&dev->ctx_idr, request.ctx_id);
+	if (!map) {
 		mutex_unlock(&dev->struct_mutex);
 		return -EINVAL;
 	}
 
-	map = dev->context_sareas[request.ctx_id];
 	mutex_unlock(&dev->struct_mutex);
 
 	request.handle = NULL;
-	list_for_each_entry(_entry, &dev->maplist->head, head) {
+	list_for_each_entry(_entry, &dev->maplist, head) {
 		if (_entry->map == map) {
 			request.handle =
 			    (void *)(unsigned long)_entry->user_token;
@@ -258,25 +188,24 @@
  * \return zero on success or a negative number on failure.
  *
  * Searches the mapping specified in \p arg and update the entry in
- * drm_device::context_sareas with it.
+ * drm_device::ctx_idr with it.
  */
 int drm_setsareactx(struct inode *inode, struct file *filp,
 		    unsigned int cmd, unsigned long arg)
 {
-	drm_file_t *priv = filp->private_data;
-	drm_device_t *dev = priv->head->dev;
-	drm_ctx_priv_map_t request;
-	drm_map_t *map = NULL;
-	drm_map_list_t *r_list = NULL;
-	struct list_head *list;
+	struct drm_file *priv = filp->private_data;
+	struct drm_device *dev = priv->head->dev;
+	struct drm_ctx_priv_map request;
+	struct drm_map *map = NULL;
+	struct drm_map_list *r_list = NULL;
 
 	if (copy_from_user(&request,
-			   (drm_ctx_priv_map_t __user *) arg, sizeof(request)))
+			   (struct drm_ctx_priv_map __user *) arg,
+			   sizeof(request)))
 		return -EFAULT;
 
 	mutex_lock(&dev->struct_mutex);
-	list_for_each(list, &dev->maplist->head) {
-		r_list = list_entry(list, drm_map_list_t, head);
+	list_for_each_entry(r_list, &dev->maplist, head) {
 		if (r_list->map
 		    && r_list->user_token == (unsigned long)request.handle)
 			goto found;
@@ -289,11 +218,10 @@
 	map = r_list->map;
 	if (!map)
 		goto bad;
-	if (dev->max_context < 0)
+
+	if (IS_ERR(idr_replace(&dev->ctx_idr, map, request.ctx_id)))
 		goto bad;
-	if (request.ctx_id >= (unsigned)dev->max_context)
-		goto bad;
-	dev->context_sareas[request.ctx_id] = map;
+
 	mutex_unlock(&dev->struct_mutex);
 	return 0;
 }
@@ -314,7 +242,7 @@
  *
  * Attempt to set drm_device::context_flag.
  */
-static int drm_context_switch(drm_device_t * dev, int old, int new)
+static int drm_context_switch(struct drm_device * dev, int old, int new)
 {
 	if (test_and_set_bit(0, &dev->context_flag)) {
 		DRM_ERROR("Reentering -- FIXME\n");
@@ -342,7 +270,7 @@
  * hardware lock is held, clears the drm_device::context_flag and wakes up
  * drm_device::context_wait.
  */
-static int drm_context_switch_complete(drm_device_t * dev, int new)
+static int drm_context_switch_complete(struct drm_device * dev, int new)
 {
 	dev->last_context = new;	/* PRE/POST: This is the _only_ writer. */
 	dev->last_switch = jiffies;
@@ -372,9 +300,9 @@
 int drm_resctx(struct inode *inode, struct file *filp,
 	       unsigned int cmd, unsigned long arg)
 {
-	drm_ctx_res_t res;
-	drm_ctx_t __user *argp = (void __user *)arg;
-	drm_ctx_t ctx;
+	struct drm_ctx_res res;
+	struct drm_ctx_res __user *argp = (void __user *)arg;
+	struct drm_ctx ctx;
 	int i;
 
 	if (copy_from_user(&res, argp, sizeof(res)))
@@ -409,11 +337,11 @@
 int drm_addctx(struct inode *inode, struct file *filp,
 	       unsigned int cmd, unsigned long arg)
 {
-	drm_file_t *priv = filp->private_data;
-	drm_device_t *dev = priv->head->dev;
-	drm_ctx_list_t *ctx_entry;
-	drm_ctx_t __user *argp = (void __user *)arg;
-	drm_ctx_t ctx;
+	struct drm_file *priv = filp->private_data;
+	struct drm_device *dev = priv->head->dev;
+	struct drm_ctx_list *ctx_entry;
+	struct drm_ctx __user *argp = (void __user *)arg;
+	struct drm_ctx ctx;
 
 	if (copy_from_user(&ctx, argp, sizeof(ctx)))
 		return -EFAULT;
@@ -449,7 +377,7 @@
 	ctx_entry->tag = priv;
 
 	mutex_lock(&dev->ctxlist_mutex);
-	list_add(&ctx_entry->head, &dev->ctxlist->head);
+	list_add(&ctx_entry->head, &dev->ctxlist);
 	++dev->ctx_count;
 	mutex_unlock(&dev->ctxlist_mutex);
 
@@ -477,8 +405,8 @@
 int drm_getctx(struct inode *inode, struct file *filp,
 	       unsigned int cmd, unsigned long arg)
 {
-	drm_ctx_t __user *argp = (void __user *)arg;
-	drm_ctx_t ctx;
+	struct drm_ctx __user *argp = (void __user *)arg;
+	struct drm_ctx ctx;
 
 	if (copy_from_user(&ctx, argp, sizeof(ctx)))
 		return -EFAULT;
@@ -505,11 +433,11 @@
 int drm_switchctx(struct inode *inode, struct file *filp,
 		  unsigned int cmd, unsigned long arg)
 {
-	drm_file_t *priv = filp->private_data;
-	drm_device_t *dev = priv->head->dev;
-	drm_ctx_t ctx;
+	struct drm_file *priv = filp->private_data;
+	struct drm_device *dev = priv->head->dev;
+	struct drm_ctx ctx;
 
-	if (copy_from_user(&ctx, (drm_ctx_t __user *) arg, sizeof(ctx)))
+	if (copy_from_user(&ctx, (struct drm_ctx __user *) arg, sizeof(ctx)))
 		return -EFAULT;
 
 	DRM_DEBUG("%d\n", ctx.handle);
@@ -530,11 +458,11 @@
 int drm_newctx(struct inode *inode, struct file *filp,
 	       unsigned int cmd, unsigned long arg)
 {
-	drm_file_t *priv = filp->private_data;
-	drm_device_t *dev = priv->head->dev;
-	drm_ctx_t ctx;
+	struct drm_file *priv = filp->private_data;
+	struct drm_device *dev = priv->head->dev;
+	struct drm_ctx ctx;
 
-	if (copy_from_user(&ctx, (drm_ctx_t __user *) arg, sizeof(ctx)))
+	if (copy_from_user(&ctx, (struct drm_ctx __user *) arg, sizeof(ctx)))
 		return -EFAULT;
 
 	DRM_DEBUG("%d\n", ctx.handle);
@@ -557,11 +485,11 @@
 int drm_rmctx(struct inode *inode, struct file *filp,
 	      unsigned int cmd, unsigned long arg)
 {
-	drm_file_t *priv = filp->private_data;
-	drm_device_t *dev = priv->head->dev;
-	drm_ctx_t ctx;
+	struct drm_file *priv = filp->private_data;
+	struct drm_device *dev = priv->head->dev;
+	struct drm_ctx ctx;
 
-	if (copy_from_user(&ctx, (drm_ctx_t __user *) arg, sizeof(ctx)))
+	if (copy_from_user(&ctx, (struct drm_ctx __user *) arg, sizeof(ctx)))
 		return -EFAULT;
 
 	DRM_DEBUG("%d\n", ctx.handle);
@@ -575,10 +503,10 @@
 	}
 
 	mutex_lock(&dev->ctxlist_mutex);
-	if (!list_empty(&dev->ctxlist->head)) {
-		drm_ctx_list_t *pos, *n;
+	if (!list_empty(&dev->ctxlist)) {
+		struct drm_ctx_list *pos, *n;
 
-		list_for_each_entry_safe(pos, n, &dev->ctxlist->head, head) {
+		list_for_each_entry_safe(pos, n, &dev->ctxlist, head) {
 			if (pos->handle == ctx.handle) {
 				list_del(&pos->head);
 				drm_free(pos, sizeof(*pos), DRM_MEM_CTXLIST);
diff --git a/drivers/char/drm/drm_dma.c b/drivers/char/drm/drm_dma.c
index 32ed19c..802fbdb 100644
--- a/drivers/char/drm/drm_dma.c
+++ b/drivers/char/drm/drm_dma.c
@@ -43,7 +43,7 @@
  *
  * Allocate and initialize a drm_device_dma structure.
  */
-int drm_dma_setup(drm_device_t * dev)
+int drm_dma_setup(struct drm_device *dev)
 {
 	int i;
 
@@ -67,9 +67,9 @@
  * Free all pages associated with DMA buffers, the buffers and pages lists, and
  * finally the drm_device::dma structure itself.
  */
-void drm_dma_takedown(drm_device_t * dev)
+void drm_dma_takedown(struct drm_device *dev)
 {
-	drm_device_dma_t *dma = dev->dma;
+	struct drm_device_dma *dma = dev->dma;
 	int i, j;
 
 	if (!dma)
@@ -129,7 +129,7 @@
  *
  * Resets the fields of \p buf.
  */
-void drm_free_buffer(drm_device_t * dev, drm_buf_t * buf)
+void drm_free_buffer(struct drm_device *dev, struct drm_buf * buf)
 {
 	if (!buf)
 		return;
@@ -152,9 +152,9 @@
  *
  * Frees each buffer associated with \p filp not already on the hardware.
  */
-void drm_core_reclaim_buffers(drm_device_t * dev, struct file *filp)
+void drm_core_reclaim_buffers(struct drm_device *dev, struct file *filp)
 {
-	drm_device_dma_t *dma = dev->dma;
+	struct drm_device_dma *dma = dev->dma;
 	int i;
 
 	if (!dma)
diff --git a/drivers/char/drm/drm_drawable.c b/drivers/char/drm/drm_drawable.c
index b33313b..d6cdba5 100644
--- a/drivers/char/drm/drm_drawable.c
+++ b/drivers/char/drm/drm_drawable.c
@@ -44,83 +44,30 @@
 {
 	DRM_DEVICE;
 	unsigned long irqflags;
-	int i, j;
-	u32 *bitfield = dev->drw_bitfield;
-	unsigned int bitfield_length = dev->drw_bitfield_length;
-	drm_drawable_info_t **info = dev->drw_info;
-	unsigned int info_length = dev->drw_info_length;
-	drm_draw_t draw;
+	struct drm_draw draw;
+	int new_id = 0;
+	int ret;
 
-	for (i = 0, j = 0; i < bitfield_length; i++) {
-		if (bitfield[i] == ~0)
-			continue;
-
-		for (; j < 8 * sizeof(*bitfield); j++)
-			if (!(bitfield[i] & (1 << j)))
-				goto done;
+again:
+	if (idr_pre_get(&dev->drw_idr, GFP_KERNEL) == 0) {
+		DRM_ERROR("Out of memory expanding drawable idr\n");
+		return -ENOMEM;
 	}
-done:
-
-	if (i == bitfield_length) {
-		bitfield_length++;
-
-		bitfield = drm_alloc(bitfield_length * sizeof(*bitfield),
-				     DRM_MEM_BUFS);
-
-		if (!bitfield) {
-			DRM_ERROR("Failed to allocate new drawable bitfield\n");
-			return DRM_ERR(ENOMEM);
-		}
-
-		if (8 * sizeof(*bitfield) * bitfield_length > info_length) {
-			info_length += 8 * sizeof(*bitfield);
-
-			info = drm_alloc(info_length * sizeof(*info),
-					 DRM_MEM_BUFS);
-
-			if (!info) {
-				DRM_ERROR("Failed to allocate new drawable info"
-					  " array\n");
-
-				drm_free(bitfield,
-					 bitfield_length * sizeof(*bitfield),
-					 DRM_MEM_BUFS);
-				return DRM_ERR(ENOMEM);
-			}
-		}
-
-		bitfield[i] = 0;
-	}
-
-	draw.handle = i * 8 * sizeof(*bitfield) + j + 1;
-	DRM_DEBUG("%d\n", draw.handle);
 
 	spin_lock_irqsave(&dev->drw_lock, irqflags);
-
-	bitfield[i] |= 1 << j;
-	info[draw.handle - 1] = NULL;
-
-	if (bitfield != dev->drw_bitfield) {
-		memcpy(bitfield, dev->drw_bitfield, dev->drw_bitfield_length *
-		       sizeof(*bitfield));
-		drm_free(dev->drw_bitfield, sizeof(*bitfield) *
-			 dev->drw_bitfield_length, DRM_MEM_BUFS);
-		dev->drw_bitfield = bitfield;
-		dev->drw_bitfield_length = bitfield_length;
-	}
-
-	if (info != dev->drw_info) {
-		memcpy(info, dev->drw_info, dev->drw_info_length *
-		       sizeof(*info));
-		drm_free(dev->drw_info, sizeof(*info) * dev->drw_info_length,
-			 DRM_MEM_BUFS);
-		dev->drw_info = info;
-		dev->drw_info_length = info_length;
+	ret = idr_get_new_above(&dev->drw_idr, NULL, 1, &new_id);
+	if (ret == -EAGAIN) {
+		spin_unlock_irqrestore(&dev->drw_lock, irqflags);
+		goto again;
 	}
 
 	spin_unlock_irqrestore(&dev->drw_lock, irqflags);
 
-	DRM_COPY_TO_USER_IOCTL((drm_draw_t __user *)data, draw, sizeof(draw));
+	draw.handle = new_id;
+
+	DRM_DEBUG("%d\n", draw.handle);
+
+	DRM_COPY_TO_USER_IOCTL((struct drm_draw __user *)data, draw, sizeof(draw));
 
 	return 0;
 }
@@ -131,141 +78,52 @@
 int drm_rmdraw(DRM_IOCTL_ARGS)
 {
 	DRM_DEVICE;
-	drm_draw_t draw;
- 	int id, idx;
- 	unsigned int shift;
+	struct drm_draw draw;
 	unsigned long irqflags;
-	u32 *bitfield = dev->drw_bitfield;
-	unsigned int bitfield_length = dev->drw_bitfield_length;
-	drm_drawable_info_t **info = dev->drw_info;
-	unsigned int info_length = dev->drw_info_length;
 
-	DRM_COPY_FROM_USER_IOCTL(draw, (drm_draw_t __user *) data,
+	DRM_COPY_FROM_USER_IOCTL(draw, (struct drm_draw __user *) data,
 				 sizeof(draw));
 
-	id = draw.handle - 1;
-	idx = id / (8 * sizeof(*bitfield));
-	shift = id % (8 * sizeof(*bitfield));
-
-	if (idx < 0 || idx >= bitfield_length ||
-	    !(bitfield[idx] & (1 << shift))) {
-		DRM_DEBUG("No such drawable %d\n", draw.handle);
-		return 0;
-	}
-
 	spin_lock_irqsave(&dev->drw_lock, irqflags);
 
-	bitfield[idx] &= ~(1 << shift);
+	drm_free(drm_get_drawable_info(dev, draw.handle),
+		 sizeof(struct drm_drawable_info), DRM_MEM_BUFS);
+
+	idr_remove(&dev->drw_idr, draw.handle);
 
 	spin_unlock_irqrestore(&dev->drw_lock, irqflags);
-
-	if (info[id]) {
-		drm_free(info[id]->rects, info[id]->num_rects *
-			 sizeof(drm_clip_rect_t), DRM_MEM_BUFS);
-		drm_free(info[id], sizeof(**info), DRM_MEM_BUFS);
-	}
-
-	/* Can we shrink the arrays? */
-	if (idx == bitfield_length - 1) {
-		while (idx >= 0 && !bitfield[idx])
-			--idx;
-
-		bitfield_length = idx + 1;
-
-		bitfield = NULL;
-
-		if (bitfield_length) {
-			if (bitfield_length != dev->drw_bitfield_length)
-				bitfield = drm_alloc(bitfield_length *
-						     sizeof(*bitfield),
-						     DRM_MEM_BUFS);
-
-			if (!bitfield) {
-				bitfield = dev->drw_bitfield;
-				bitfield_length = dev->drw_bitfield_length;
-			}
-		}
-	}
-
-	if (bitfield != dev->drw_bitfield) {
-		info_length = 8 * sizeof(*bitfield) * bitfield_length;
-
-		if (info_length) {
-			info = drm_alloc(info_length * sizeof(*info),
-					 DRM_MEM_BUFS);
-
-			if (!info) {
-				info = dev->drw_info;
-				info_length = dev->drw_info_length;
-			}
-		} else
-			info = NULL;
-
-		spin_lock_irqsave(&dev->drw_lock, irqflags);
-
-		if (bitfield)
-			memcpy(bitfield, dev->drw_bitfield, bitfield_length *
-			       sizeof(*bitfield));
-		drm_free(dev->drw_bitfield, sizeof(*bitfield) *
-			 dev->drw_bitfield_length, DRM_MEM_BUFS);
-		dev->drw_bitfield = bitfield;
-		dev->drw_bitfield_length = bitfield_length;
-
-		if (info != dev->drw_info) {
-			if (info)
-				memcpy(info, dev->drw_info, info_length *
-				       sizeof(*info));
-			drm_free(dev->drw_info, sizeof(*info) *
-				 dev->drw_info_length, DRM_MEM_BUFS);
-			dev->drw_info = info;
-			dev->drw_info_length = info_length;
-		}
-
-		spin_unlock_irqrestore(&dev->drw_lock, irqflags);
-	}
-
 	DRM_DEBUG("%d\n", draw.handle);
 	return 0;
 }
 
-int drm_update_drawable_info(DRM_IOCTL_ARGS) {
+int drm_update_drawable_info(DRM_IOCTL_ARGS)
+{
 	DRM_DEVICE;
-	drm_update_draw_t update;
-	unsigned int id, idx, shift;
-	u32 *bitfield = dev->drw_bitfield;
-	unsigned long irqflags, bitfield_length = dev->drw_bitfield_length;
-	drm_drawable_info_t *info;
-	drm_clip_rect_t *rects;
+	struct drm_update_draw update;
+	unsigned long irqflags;
+	struct drm_clip_rect *rects;
+	struct drm_drawable_info *info;
 	int err;
 
-	DRM_COPY_FROM_USER_IOCTL(update, (drm_update_draw_t __user *) data,
+	DRM_COPY_FROM_USER_IOCTL(update, (struct drm_update_draw __user *) data,
 				 sizeof(update));
 
-	id = update.handle - 1;
-	idx = id / (8 * sizeof(*bitfield));
-	shift = id % (8 * sizeof(*bitfield));
-
-	if (idx < 0 || idx >= bitfield_length ||
-	    !(bitfield[idx] & (1 << shift))) {
-		DRM_ERROR("No such drawable %d\n", update.handle);
-		return DRM_ERR(EINVAL);
-	}
-
-	info = dev->drw_info[id];
-
+	info = idr_find(&dev->drw_idr, update.handle);
 	if (!info) {
-		info = drm_calloc(1, sizeof(drm_drawable_info_t), DRM_MEM_BUFS);
-
-		if (!info) {
-			DRM_ERROR("Failed to allocate drawable info memory\n");
-			return DRM_ERR(ENOMEM);
+		info = drm_calloc(1, sizeof(*info), DRM_MEM_BUFS);
+		if (!info)
+			return -ENOMEM;
+		if (IS_ERR(idr_replace(&dev->drw_idr, info, update.handle))) {
+			DRM_ERROR("No such drawable %d\n", update.handle);
+			drm_free(info, sizeof(*info), DRM_MEM_BUFS);
+			return -EINVAL;
 		}
 	}
 
 	switch (update.type) {
 	case DRM_DRAWABLE_CLIPRECTS:
 		if (update.num != info->num_rects) {
-			rects = drm_alloc(update.num * sizeof(drm_clip_rect_t),
+			rects = drm_alloc(update.num * sizeof(struct drm_clip_rect),
 					 DRM_MEM_BUFS);
 		} else
 			rects = info->rects;
@@ -277,7 +135,7 @@
 		}
 
 		if (update.num && DRM_COPY_FROM_USER(rects,
-						     (drm_clip_rect_t __user *)
+						     (struct drm_clip_rect __user *)
 						     (unsigned long)update.data,
 						     update.num *
 						     sizeof(*rects))) {
@@ -290,17 +148,16 @@
 
 		if (rects != info->rects) {
 			drm_free(info->rects, info->num_rects *
-				 sizeof(drm_clip_rect_t), DRM_MEM_BUFS);
+				 sizeof(struct drm_clip_rect), DRM_MEM_BUFS);
 		}
 
 		info->rects = rects;
 		info->num_rects = update.num;
-		dev->drw_info[id] = info;
 
 		spin_unlock_irqrestore(&dev->drw_lock, irqflags);
 
 		DRM_DEBUG("Updated %d cliprects for drawable %d\n",
-			  info->num_rects, id);
+			  info->num_rects, update.handle);
 		break;
 	default:
 		DRM_ERROR("Invalid update type %d\n", update.type);
@@ -310,11 +167,9 @@
 	return 0;
 
 error:
-	if (!dev->drw_info[id])
-		drm_free(info, sizeof(*info), DRM_MEM_BUFS);
-	else if (rects != dev->drw_info[id]->rects)
-		drm_free(rects, update.num *
-			 sizeof(drm_clip_rect_t), DRM_MEM_BUFS);
+	if (rects != info->rects)
+		drm_free(rects, update.num * sizeof(struct drm_clip_rect),
+			 DRM_MEM_BUFS);
 
 	return err;
 }
@@ -322,20 +177,27 @@
 /**
  * Caller must hold the drawable spinlock!
  */
-drm_drawable_info_t *drm_get_drawable_info(drm_device_t *dev, drm_drawable_t id) {
-	u32 *bitfield = dev->drw_bitfield;
-	unsigned int idx, shift;
-
-	id--;
-	idx = id / (8 * sizeof(*bitfield));
-	shift = id % (8 * sizeof(*bitfield));
-
-	if (idx < 0 || idx >= dev->drw_bitfield_length ||
-	    !(bitfield[idx] & (1 << shift))) {
-		DRM_DEBUG("No such drawable %d\n", id);
-		return NULL;
-	}
-
-	return dev->drw_info[id];
+struct drm_drawable_info *drm_get_drawable_info(struct drm_device *dev, drm_drawable_t id)
+{
+	return idr_find(&dev->drw_idr, id);
 }
 EXPORT_SYMBOL(drm_get_drawable_info);
+
+static int drm_drawable_free(int idr, void *p, void *data)
+{
+	struct drm_drawable_info *info = p;
+
+	if (info) {
+		drm_free(info->rects, info->num_rects *
+			 sizeof(struct drm_clip_rect), DRM_MEM_BUFS);
+		drm_free(info, sizeof(*info), DRM_MEM_BUFS);
+	}
+
+	return 0;
+}
+
+void drm_drawable_free_all(struct drm_device *dev)
+{
+	idr_for_each(&dev->drw_idr, drm_drawable_free, NULL);
+	idr_remove_all(&dev->drw_idr);
+}
diff --git a/drivers/char/drm/drm_drv.c b/drivers/char/drm/drm_drv.c
index 8e77b7e..19994cd 100644
--- a/drivers/char/drm/drm_drv.c
+++ b/drivers/char/drm/drm_drv.c
@@ -129,11 +129,11 @@
  *
  * \sa drm_device
  */
-int drm_lastclose(drm_device_t * dev)
+int drm_lastclose(struct drm_device * dev)
 {
-	drm_magic_entry_t *pt, *next;
-	drm_map_list_t *r_list;
-	drm_vma_entry_t *vma, *vma_next;
+	struct drm_magic_entry *pt, *next;
+	struct drm_map_list *r_list, *list_t;
+	struct drm_vma_entry *vma, *vma_temp;
 	int i;
 
 	DRM_DEBUG("\n");
@@ -151,19 +151,10 @@
 	if (dev->irq_enabled)
 		drm_irq_uninstall(dev);
 
-	/* Free drawable information memory */
-	for (i = 0; i < dev->drw_bitfield_length / sizeof(*dev->drw_bitfield);
-	     i++) {
-		drm_drawable_info_t *info = drm_get_drawable_info(dev, i);
-
-		if (info) {
-			drm_free(info->rects, info->num_rects *
-				 sizeof(drm_clip_rect_t), DRM_MEM_BUFS);
-			drm_free(info, sizeof(*info), DRM_MEM_BUFS);
-		}
-	}
-
 	mutex_lock(&dev->struct_mutex);
+
+	/* Free drawable information memory */
+	drm_drawable_free_all(dev);
 	del_timer(&dev->timer);
 
 	/* Clear pid list */
@@ -178,19 +169,17 @@
 
 	/* Clear AGP information */
 	if (drm_core_has_AGP(dev) && dev->agp) {
-		drm_agp_mem_t *entry;
-		drm_agp_mem_t *nexte;
+		struct drm_agp_mem *entry, *tempe;
 
 		/* Remove AGP resources, but leave dev->agp
 		   intact until drv_cleanup is called. */
-		for (entry = dev->agp->memory; entry; entry = nexte) {
-			nexte = entry->next;
+		list_for_each_entry_safe(entry, tempe, &dev->agp->memory, head) {
 			if (entry->bound)
 				drm_unbind_agp(entry->memory);
 			drm_free_agp(entry->memory, entry->pages);
 			drm_free(entry, sizeof(*entry), DRM_MEM_AGPLISTS);
 		}
-		dev->agp->memory = NULL;
+		INIT_LIST_HEAD(&dev->agp->memory);
 
 		if (dev->agp->acquired)
 			drm_agp_release(dev);
@@ -204,20 +193,14 @@
 	}
 
 	/* Clear vma list (only built for debugging) */
-	if (dev->vmalist) {
-		for (vma = dev->vmalist; vma; vma = vma_next) {
-			vma_next = vma->next;
-			drm_free(vma, sizeof(*vma), DRM_MEM_VMAS);
-		}
-		dev->vmalist = NULL;
+	list_for_each_entry_safe(vma, vma_temp, &dev->vmalist, head) {
+		list_del(&vma->head);
+		drm_free(vma, sizeof(*vma), DRM_MEM_VMAS);
 	}
 
-	if (dev->maplist) {
-		while (!list_empty(&dev->maplist->head)) {
-			struct list_head *list = dev->maplist->head.next;
-			r_list = list_entry(list, drm_map_list_t, head);
-			drm_rmmap_locked(dev, r_list->map);
-		}
+	list_for_each_entry_safe(r_list, list_t, &dev->maplist, head) {
+		drm_rmmap_locked(dev, r_list->map);
+		r_list = NULL;
 	}
 
 	if (drm_core_check_feature(dev, DRIVER_DMA_QUEUE) && dev->queuelist) {
@@ -298,7 +281,7 @@
  *
  * \sa drm_init
  */
-static void drm_cleanup(drm_device_t * dev)
+static void drm_cleanup(struct drm_device * dev)
 {
 	DRM_DEBUG("\n");
 
@@ -309,11 +292,7 @@
 
 	drm_lastclose(dev);
 
-	if (dev->maplist) {
-		drm_free(dev->maplist, sizeof(*dev->maplist), DRM_MEM_MAPS);
-		dev->maplist = NULL;
-		drm_ht_remove(&dev->map_hash);
-	}
+	drm_ht_remove(&dev->map_hash);
 
 	drm_ctxbitmap_cleanup(dev);
 
@@ -342,8 +321,8 @@
 void drm_exit(struct drm_driver *driver)
 {
 	int i;
-	drm_device_t *dev = NULL;
-	drm_head_t *head;
+	struct drm_device *dev = NULL;
+	struct drm_head *head;
 
 	DRM_DEBUG("\n");
 
@@ -442,10 +421,10 @@
 static int drm_version(struct inode *inode, struct file *filp,
 		       unsigned int cmd, unsigned long arg)
 {
-	drm_file_t *priv = filp->private_data;
-	drm_device_t *dev = priv->head->dev;
-	drm_version_t __user *argp = (void __user *)arg;
-	drm_version_t version;
+	struct drm_file *priv = filp->private_data;
+	struct drm_device *dev = priv->head->dev;
+	struct drm_version __user *argp = (void __user *)arg;
+	struct drm_version version;
 	int len;
 
 	if (copy_from_user(&version, argp, sizeof(version)))
@@ -478,8 +457,8 @@
 int drm_ioctl(struct inode *inode, struct file *filp,
 	      unsigned int cmd, unsigned long arg)
 {
-	drm_file_t *priv = filp->private_data;
-	drm_device_t *dev = priv->head->dev;
+	struct drm_file *priv = filp->private_data;
+	struct drm_device *dev = priv->head->dev;
 	drm_ioctl_desc_t *ioctl;
 	drm_ioctl_t *func;
 	unsigned int nr = DRM_IOCTL_NR(cmd);
@@ -529,3 +508,17 @@
 }
 
 EXPORT_SYMBOL(drm_ioctl);
+
+drm_local_map_t *drm_getsarea(struct drm_device *dev)
+{
+	struct drm_map_list *entry;
+
+	list_for_each_entry(entry, &dev->maplist, head) {
+		if (entry->map && entry->map->type == _DRM_SHM &&
+		    (entry->map->flags & _DRM_CONTAINS_LOCK)) {
+			return entry->map;
+		}
+	}
+	return NULL;
+}
+EXPORT_SYMBOL(drm_getsarea);
diff --git a/drivers/char/drm/drm_fops.c b/drivers/char/drm/drm_fops.c
index 3b159ca..7bc51ba 100644
--- a/drivers/char/drm/drm_fops.c
+++ b/drivers/char/drm/drm_fops.c
@@ -39,9 +39,9 @@
 #include <linux/poll.h>
 
 static int drm_open_helper(struct inode *inode, struct file *filp,
-			   drm_device_t * dev);
+			   struct drm_device * dev);
 
-static int drm_setup(drm_device_t * dev)
+static int drm_setup(struct drm_device * dev)
 {
 	drm_local_map_t *map;
 	int i;
@@ -79,13 +79,6 @@
 	drm_ht_create(&dev->magiclist, DRM_MAGIC_HASH_ORDER);
 	INIT_LIST_HEAD(&dev->magicfree);
 
-	dev->ctxlist = drm_alloc(sizeof(*dev->ctxlist), DRM_MEM_CTXLIST);
-	if (dev->ctxlist == NULL)
-		return -ENOMEM;
-	memset(dev->ctxlist, 0, sizeof(*dev->ctxlist));
-	INIT_LIST_HEAD(&dev->ctxlist->head);
-
-	dev->vmalist = NULL;
 	dev->sigdata.lock = NULL;
 	init_waitqueue_head(&dev->lock.lock_queue);
 	dev->queue_count = 0;
@@ -135,7 +128,7 @@
  */
 int drm_open(struct inode *inode, struct file *filp)
 {
-	drm_device_t *dev = NULL;
+	struct drm_device *dev = NULL;
 	int minor = iminor(inode);
 	int retcode = 0;
 
@@ -174,7 +167,7 @@
  */
 int drm_stub_open(struct inode *inode, struct file *filp)
 {
-	drm_device_t *dev = NULL;
+	struct drm_device *dev = NULL;
 	int minor = iminor(inode);
 	int err = -ENODEV;
 	const struct file_operations *old_fops;
@@ -230,10 +223,10 @@
  * filp and add it into the double linked list in \p dev.
  */
 static int drm_open_helper(struct inode *inode, struct file *filp,
-			   drm_device_t * dev)
+			   struct drm_device * dev)
 {
 	int minor = iminor(inode);
-	drm_file_t *priv;
+	struct drm_file *priv;
 	int ret;
 
 	if (filp->f_flags & O_EXCL)
@@ -258,6 +251,8 @@
 	priv->authenticated = capable(CAP_SYS_ADMIN);
 	priv->lock_count = 0;
 
+	INIT_LIST_HEAD(&priv->lhead);
+
 	if (dev->driver->open) {
 		ret = dev->driver->open(dev, priv);
 		if (ret < 0)
@@ -265,19 +260,10 @@
 	}
 
 	mutex_lock(&dev->struct_mutex);
-	if (!dev->file_last) {
-		priv->next = NULL;
-		priv->prev = NULL;
-		dev->file_first = priv;
-		dev->file_last = priv;
-		/* first opener automatically becomes master */
+	if (list_empty(&dev->filelist))
 		priv->master = 1;
-	} else {
-		priv->next = NULL;
-		priv->prev = dev->file_last;
-		dev->file_last->next = priv;
-		dev->file_last = priv;
-	}
+
+	list_add(&priv->lhead, &dev->filelist);
 	mutex_unlock(&dev->struct_mutex);
 
 #ifdef __alpha__
@@ -309,8 +295,8 @@
 /** No-op. */
 int drm_fasync(int fd, struct file *filp, int on)
 {
-	drm_file_t *priv = filp->private_data;
-	drm_device_t *dev = priv->head->dev;
+	struct drm_file *priv = filp->private_data;
+	struct drm_device *dev = priv->head->dev;
 	int retcode;
 
 	DRM_DEBUG("fd = %d, device = 0x%lx\n", fd,
@@ -336,8 +322,8 @@
  */
 int drm_release(struct inode *inode, struct file *filp)
 {
-	drm_file_t *priv = filp->private_data;
-	drm_device_t *dev;
+	struct drm_file *priv = filp->private_data;
+	struct drm_device *dev;
 	int retcode = 0;
 
 	lock_kernel();
@@ -414,10 +400,10 @@
 	drm_fasync(-1, filp, 0);
 
 	mutex_lock(&dev->ctxlist_mutex);
-	if (dev->ctxlist && (!list_empty(&dev->ctxlist->head))) {
-		drm_ctx_list_t *pos, *n;
+	if (!list_empty(&dev->ctxlist)) {
+		struct drm_ctx_list *pos, *n;
 
-		list_for_each_entry_safe(pos, n, &dev->ctxlist->head, head) {
+		list_for_each_entry_safe(pos, n, &dev->ctxlist, head) {
 			if (pos->tag == priv &&
 			    pos->handle != DRM_KERNEL_CONTEXT) {
 				if (dev->driver->context_dtor)
@@ -436,22 +422,12 @@
 
 	mutex_lock(&dev->struct_mutex);
 	if (priv->remove_auth_on_close == 1) {
-		drm_file_t *temp = dev->file_first;
-		while (temp) {
+		struct drm_file *temp;
+
+		list_for_each_entry(temp, &dev->filelist, lhead)
 			temp->authenticated = 0;
-			temp = temp->next;
-		}
 	}
-	if (priv->prev) {
-		priv->prev->next = priv->next;
-	} else {
-		dev->file_first = priv->next;
-	}
-	if (priv->next) {
-		priv->next->prev = priv->prev;
-	} else {
-		dev->file_last = priv->prev;
-	}
+	list_del(&priv->lhead);
 	mutex_unlock(&dev->struct_mutex);
 
 	if (dev->driver->postclose)
diff --git a/drivers/char/drm/drm_hashtab.c b/drivers/char/drm/drm_hashtab.c
index 31acb62..3ad3190 100644
--- a/drivers/char/drm/drm_hashtab.c
+++ b/drivers/char/drm/drm_hashtab.c
@@ -36,7 +36,7 @@
 #include "drm_hashtab.h"
 #include <linux/hash.h>
 
-int drm_ht_create(drm_open_hash_t *ht, unsigned int order)
+int drm_ht_create(struct drm_open_hash *ht, unsigned int order)
 {
 	unsigned int i;
 
@@ -63,9 +63,9 @@
 	return 0;
 }
 
-void drm_ht_verbose_list(drm_open_hash_t *ht, unsigned long key)
+void drm_ht_verbose_list(struct drm_open_hash *ht, unsigned long key)
 {
-	drm_hash_item_t *entry;
+	struct drm_hash_item *entry;
 	struct hlist_head *h_list;
 	struct hlist_node *list;
 	unsigned int hashed_key;
@@ -75,15 +75,15 @@
 	DRM_DEBUG("Key is 0x%08lx, Hashed key is 0x%08x\n", key, hashed_key);
 	h_list = &ht->table[hashed_key];
 	hlist_for_each(list, h_list) {
-		entry = hlist_entry(list, drm_hash_item_t, head);
+		entry = hlist_entry(list, struct drm_hash_item, head);
 		DRM_DEBUG("count %d, key: 0x%08lx\n", count++, entry->key);
 	}
 }
 
-static struct hlist_node *drm_ht_find_key(drm_open_hash_t *ht, 
+static struct hlist_node *drm_ht_find_key(struct drm_open_hash *ht, 
 					  unsigned long key)
 {
-	drm_hash_item_t *entry;
+	struct drm_hash_item *entry;
 	struct hlist_head *h_list;
 	struct hlist_node *list;
 	unsigned int hashed_key;
@@ -91,7 +91,7 @@
 	hashed_key = hash_long(key, ht->order);
 	h_list = &ht->table[hashed_key];
 	hlist_for_each(list, h_list) {
-		entry = hlist_entry(list, drm_hash_item_t, head);
+		entry = hlist_entry(list, struct drm_hash_item, head);
 		if (entry->key == key)
 			return list;
 		if (entry->key > key)
@@ -101,9 +101,9 @@
 }
 
 
-int drm_ht_insert_item(drm_open_hash_t *ht, drm_hash_item_t *item)
+int drm_ht_insert_item(struct drm_open_hash *ht, struct drm_hash_item *item)
 {
-	drm_hash_item_t *entry;
+	struct drm_hash_item *entry;
 	struct hlist_head *h_list;
 	struct hlist_node *list, *parent;
 	unsigned int hashed_key;
@@ -113,7 +113,7 @@
 	h_list = &ht->table[hashed_key];
 	parent = NULL;
 	hlist_for_each(list, h_list) {
-		entry = hlist_entry(list, drm_hash_item_t, head);
+		entry = hlist_entry(list, struct drm_hash_item, head);
 		if (entry->key == key)
 			return -EINVAL;
 		if (entry->key > key)
@@ -132,7 +132,7 @@
  * Just insert an item and return any "bits" bit key that hasn't been 
  * used before.
  */
-int drm_ht_just_insert_please(drm_open_hash_t *ht, drm_hash_item_t *item,
+int drm_ht_just_insert_please(struct drm_open_hash *ht, struct drm_hash_item *item,
 			      unsigned long seed, int bits, int shift,
 			      unsigned long add)
 {
@@ -156,8 +156,8 @@
 	return 0;
 }
 
-int drm_ht_find_item(drm_open_hash_t *ht, unsigned long key,
-		     drm_hash_item_t **item)
+int drm_ht_find_item(struct drm_open_hash *ht, unsigned long key,
+		     struct drm_hash_item **item)
 {
 	struct hlist_node *list;
 
@@ -165,11 +165,11 @@
 	if (!list)
 		return -EINVAL;
 
-	*item = hlist_entry(list, drm_hash_item_t, head);
+	*item = hlist_entry(list, struct drm_hash_item, head);
 	return 0;
 }
 
-int drm_ht_remove_key(drm_open_hash_t *ht, unsigned long key)
+int drm_ht_remove_key(struct drm_open_hash *ht, unsigned long key)
 {
 	struct hlist_node *list;
 
@@ -182,14 +182,14 @@
 	return -EINVAL;
 }
 
-int drm_ht_remove_item(drm_open_hash_t *ht, drm_hash_item_t *item)
+int drm_ht_remove_item(struct drm_open_hash *ht, struct drm_hash_item *item)
 {
 	hlist_del_init(&item->head);
 	ht->fill--;
 	return 0;
 }
 
-void drm_ht_remove(drm_open_hash_t *ht)
+void drm_ht_remove(struct drm_open_hash *ht)
 {
 	if (ht->table) {
 		if (ht->use_vmalloc)
diff --git a/drivers/char/drm/drm_hashtab.h b/drivers/char/drm/drm_hashtab.h
index 613091c..0f13767 100644
--- a/drivers/char/drm/drm_hashtab.h
+++ b/drivers/char/drm/drm_hashtab.h
@@ -37,31 +37,31 @@
 
 #define drm_hash_entry(_ptr, _type, _member) container_of(_ptr, _type, _member)
 
-typedef struct drm_hash_item{
+struct drm_hash_item {
 	struct hlist_node head;
 	unsigned long key;
-} drm_hash_item_t;
+};
 
-typedef struct drm_open_hash{
+struct drm_open_hash {
 	unsigned int size;
 	unsigned int order;
 	unsigned int fill;
 	struct hlist_head *table;
 	int use_vmalloc;
-} drm_open_hash_t;
+};
 
 
-extern int drm_ht_create(drm_open_hash_t *ht, unsigned int order);
-extern int drm_ht_insert_item(drm_open_hash_t *ht, drm_hash_item_t *item);
-extern int drm_ht_just_insert_please(drm_open_hash_t *ht, drm_hash_item_t *item,
+extern int drm_ht_create(struct drm_open_hash *ht, unsigned int order);
+extern int drm_ht_insert_item(struct drm_open_hash *ht, struct drm_hash_item *item);
+extern int drm_ht_just_insert_please(struct drm_open_hash *ht, struct drm_hash_item *item,
 				     unsigned long seed, int bits, int shift,
 				     unsigned long add);
-extern int drm_ht_find_item(drm_open_hash_t *ht, unsigned long key, drm_hash_item_t **item);
+extern int drm_ht_find_item(struct drm_open_hash *ht, unsigned long key, struct drm_hash_item **item);
 
-extern void drm_ht_verbose_list(drm_open_hash_t *ht, unsigned long key);
-extern int drm_ht_remove_key(drm_open_hash_t *ht, unsigned long key);
-extern int drm_ht_remove_item(drm_open_hash_t *ht, drm_hash_item_t *item);
-extern void drm_ht_remove(drm_open_hash_t *ht);
+extern void drm_ht_verbose_list(struct drm_open_hash *ht, unsigned long key);
+extern int drm_ht_remove_key(struct drm_open_hash *ht, unsigned long key);
+extern int drm_ht_remove_item(struct drm_open_hash *ht, struct drm_hash_item *item);
+extern void drm_ht_remove(struct drm_open_hash *ht);
 
 
 #endif
diff --git a/drivers/char/drm/drm_ioc32.c b/drivers/char/drm/drm_ioc32.c
index fafeb34..462f46f 100644
--- a/drivers/char/drm/drm_ioc32.c
+++ b/drivers/char/drm/drm_ioc32.c
@@ -82,7 +82,7 @@
 			      unsigned long arg)
 {
 	drm_version32_t v32;
-	drm_version_t __user *version;
+	struct drm_version __user *version;
 	int err;
 
 	if (copy_from_user(&v32, (void __user *)arg, sizeof(v32)))
@@ -129,7 +129,7 @@
 				unsigned long arg)
 {
 	drm_unique32_t uq32;
-	drm_unique_t __user *u;
+	struct drm_unique __user *u;
 	int err;
 
 	if (copy_from_user(&uq32, (void __user *)arg, sizeof(uq32)))
@@ -159,7 +159,7 @@
 				unsigned long arg)
 {
 	drm_unique32_t uq32;
-	drm_unique_t __user *u;
+	struct drm_unique __user *u;
 
 	if (copy_from_user(&uq32, (void __user *)arg, sizeof(uq32)))
 		return -EFAULT;
@@ -179,8 +179,8 @@
 typedef struct drm_map32 {
 	u32 offset;		/**< Requested physical address (0 for SAREA)*/
 	u32 size;		/**< Requested physical size (bytes) */
-	drm_map_type_t type;	/**< Type of memory to map */
-	drm_map_flags_t flags;	/**< Flags */
+	enum drm_map_type type;	/**< Type of memory to map */
+	enum drm_map_flags flags;	/**< Flags */
 	u32 handle;		/**< User-space: "Handle" to pass to mmap() */
 	int mtrr;		/**< MTRR slot used */
 } drm_map32_t;
@@ -190,7 +190,7 @@
 {
 	drm_map32_t __user *argp = (void __user *)arg;
 	drm_map32_t m32;
-	drm_map_t __user *map;
+	struct drm_map __user *map;
 	int idx, err;
 	void *handle;
 
@@ -228,7 +228,7 @@
 {
 	drm_map32_t __user *argp = (void __user *)arg;
 	drm_map32_t m32;
-	drm_map_t __user *map;
+	struct drm_map __user *map;
 	int err;
 	void *handle;
 
@@ -270,7 +270,7 @@
 			    unsigned long arg)
 {
 	drm_map32_t __user *argp = (void __user *)arg;
-	drm_map_t __user *map;
+	struct drm_map __user *map;
 	u32 handle;
 
 	if (get_user(handle, &argp->handle))
@@ -300,7 +300,7 @@
 {
 	drm_client32_t c32;
 	drm_client32_t __user *argp = (void __user *)arg;
-	drm_client_t __user *client;
+	struct drm_client __user *client;
 	int idx, err;
 
 	if (get_user(idx, &argp->idx))
@@ -333,7 +333,7 @@
 	u32 count;
 	struct {
 		u32 value;
-		drm_stat_type_t type;
+		enum drm_stat_type type;
 	} data[15];
 } drm_stats32_t;
 
@@ -342,7 +342,7 @@
 {
 	drm_stats32_t s32;
 	drm_stats32_t __user *argp = (void __user *)arg;
-	drm_stats_t __user *stats;
+	struct drm_stats __user *stats;
 	int i, err;
 
 	stats = compat_alloc_user_space(sizeof(*stats));
@@ -379,7 +379,7 @@
 			      unsigned long arg)
 {
 	drm_buf_desc32_t __user *argp = (void __user *)arg;
-	drm_buf_desc_t __user *buf;
+	struct drm_buf_desc __user *buf;
 	int err;
 	unsigned long agp_start;
 
@@ -411,7 +411,7 @@
 {
 	drm_buf_desc32_t b32;
 	drm_buf_desc32_t __user *argp = (void __user *)arg;
-	drm_buf_desc_t __user *buf;
+	struct drm_buf_desc __user *buf;
 
 	if (copy_from_user(&b32, argp, sizeof(b32)))
 		return -EFAULT;
@@ -440,8 +440,8 @@
 	drm_buf_info32_t req32;
 	drm_buf_info32_t __user *argp = (void __user *)arg;
 	drm_buf_desc32_t __user *to;
-	drm_buf_info_t __user *request;
-	drm_buf_desc_t __user *list;
+	struct drm_buf_info __user *request;
+	struct drm_buf_desc __user *list;
 	size_t nbytes;
 	int i, err;
 	int count, actual;
@@ -457,11 +457,11 @@
 	    && !access_ok(VERIFY_WRITE, to, count * sizeof(drm_buf_desc32_t)))
 		return -EFAULT;
 
-	nbytes = sizeof(*request) + count * sizeof(drm_buf_desc_t);
+	nbytes = sizeof(*request) + count * sizeof(struct drm_buf_desc);
 	request = compat_alloc_user_space(nbytes);
 	if (!access_ok(VERIFY_WRITE, request, nbytes))
 		return -EFAULT;
-	list = (drm_buf_desc_t *) (request + 1);
+	list = (struct drm_buf_desc *) (request + 1);
 
 	if (__put_user(count, &request->count)
 	    || __put_user(list, &request->list))
@@ -477,7 +477,7 @@
 	if (count >= actual)
 		for (i = 0; i < actual; ++i)
 			if (__copy_in_user(&to[i], &list[i],
-					   offsetof(drm_buf_desc_t, flags)))
+					   offsetof(struct drm_buf_desc, flags)))
 				return -EFAULT;
 
 	if (__put_user(actual, &argp->count))
@@ -505,8 +505,8 @@
 	drm_buf_map32_t __user *argp = (void __user *)arg;
 	drm_buf_map32_t req32;
 	drm_buf_pub32_t __user *list32;
-	drm_buf_map_t __user *request;
-	drm_buf_pub_t __user *list;
+	struct drm_buf_map __user *request;
+	struct drm_buf_pub __user *list;
 	int i, err;
 	int count, actual;
 	size_t nbytes;
@@ -519,11 +519,11 @@
 
 	if (count < 0)
 		return -EINVAL;
-	nbytes = sizeof(*request) + count * sizeof(drm_buf_pub_t);
+	nbytes = sizeof(*request) + count * sizeof(struct drm_buf_pub);
 	request = compat_alloc_user_space(nbytes);
 	if (!access_ok(VERIFY_WRITE, request, nbytes))
 		return -EFAULT;
-	list = (drm_buf_pub_t *) (request + 1);
+	list = (struct drm_buf_pub *) (request + 1);
 
 	if (__put_user(count, &request->count)
 	    || __put_user(list, &request->list))
@@ -539,7 +539,7 @@
 	if (count >= actual)
 		for (i = 0; i < actual; ++i)
 			if (__copy_in_user(&list32[i], &list[i],
-					   offsetof(drm_buf_pub_t, address))
+					   offsetof(struct drm_buf_pub, address))
 			    || __get_user(addr, &list[i].address)
 			    || __put_user((unsigned long)addr,
 					  &list32[i].address))
@@ -562,7 +562,7 @@
 			       unsigned long arg)
 {
 	drm_buf_free32_t req32;
-	drm_buf_free_t __user *request;
+	struct drm_buf_free __user *request;
 	drm_buf_free32_t __user *argp = (void __user *)arg;
 
 	if (copy_from_user(&req32, argp, sizeof(req32)))
@@ -589,7 +589,7 @@
 				  unsigned long arg)
 {
 	drm_ctx_priv_map32_t req32;
-	drm_ctx_priv_map_t __user *request;
+	struct drm_ctx_priv_map __user *request;
 	drm_ctx_priv_map32_t __user *argp = (void __user *)arg;
 
 	if (copy_from_user(&req32, argp, sizeof(req32)))
@@ -610,7 +610,7 @@
 static int compat_drm_getsareactx(struct file *file, unsigned int cmd,
 				  unsigned long arg)
 {
-	drm_ctx_priv_map_t __user *request;
+	struct drm_ctx_priv_map __user *request;
 	drm_ctx_priv_map32_t __user *argp = (void __user *)arg;
 	int err;
 	unsigned int ctx_id;
@@ -648,7 +648,7 @@
 {
 	drm_ctx_res32_t __user *argp = (void __user *)arg;
 	drm_ctx_res32_t res32;
-	drm_ctx_res_t __user *res;
+	struct drm_ctx_res __user *res;
 	int err;
 
 	if (copy_from_user(&res32, argp, sizeof(res32)))
@@ -658,7 +658,7 @@
 	if (!access_ok(VERIFY_WRITE, res, sizeof(*res)))
 		return -EFAULT;
 	if (__put_user(res32.count, &res->count)
-	    || __put_user((drm_ctx_t __user *) (unsigned long)res32.contexts,
+	    || __put_user((struct drm_ctx __user *) (unsigned long)res32.contexts,
 			  &res->contexts))
 		return -EFAULT;
 
@@ -679,7 +679,7 @@
 	int send_count;		  /**< Number of buffers to send */
 	u32 send_indices;	  /**< List of handles to buffers */
 	u32 send_sizes;		  /**< Lengths of data to send */
-	drm_dma_flags_t flags;		  /**< Flags */
+	enum drm_dma_flags flags;		  /**< Flags */
 	int request_count;	  /**< Number of buffers requested */
 	int request_size;	  /**< Desired size for buffers */
 	u32 request_indices;	  /**< Buffer information */
@@ -692,7 +692,7 @@
 {
 	drm_dma32_t d32;
 	drm_dma32_t __user *argp = (void __user *)arg;
-	drm_dma_t __user *d;
+	struct drm_dma __user *d;
 	int err;
 
 	if (copy_from_user(&d32, argp, sizeof(d32)))
@@ -740,7 +740,7 @@
 {
 	drm_agp_mode32_t __user *argp = (void __user *)arg;
 	drm_agp_mode32_t m32;
-	drm_agp_mode_t __user *mode;
+	struct drm_agp_mode __user *mode;
 
 	if (get_user(m32.mode, &argp->mode))
 		return -EFAULT;
@@ -772,7 +772,7 @@
 {
 	drm_agp_info32_t __user *argp = (void __user *)arg;
 	drm_agp_info32_t i32;
-	drm_agp_info_t __user *info;
+	struct drm_agp_info __user *info;
 	int err;
 
 	info = compat_alloc_user_space(sizeof(*info));
@@ -813,7 +813,7 @@
 {
 	drm_agp_buffer32_t __user *argp = (void __user *)arg;
 	drm_agp_buffer32_t req32;
-	drm_agp_buffer_t __user *request;
+	struct drm_agp_buffer __user *request;
 	int err;
 
 	if (copy_from_user(&req32, argp, sizeof(req32)))
@@ -845,7 +845,7 @@
 			       unsigned long arg)
 {
 	drm_agp_buffer32_t __user *argp = (void __user *)arg;
-	drm_agp_buffer_t __user *request;
+	struct drm_agp_buffer __user *request;
 	u32 handle;
 
 	request = compat_alloc_user_space(sizeof(*request));
@@ -868,7 +868,7 @@
 {
 	drm_agp_binding32_t __user *argp = (void __user *)arg;
 	drm_agp_binding32_t req32;
-	drm_agp_binding_t __user *request;
+	struct drm_agp_binding __user *request;
 
 	if (copy_from_user(&req32, argp, sizeof(req32)))
 		return -EFAULT;
@@ -887,7 +887,7 @@
 				 unsigned long arg)
 {
 	drm_agp_binding32_t __user *argp = (void __user *)arg;
-	drm_agp_binding_t __user *request;
+	struct drm_agp_binding __user *request;
 	u32 handle;
 
 	request = compat_alloc_user_space(sizeof(*request));
@@ -910,7 +910,7 @@
 			       unsigned long arg)
 {
 	drm_scatter_gather32_t __user *argp = (void __user *)arg;
-	drm_scatter_gather_t __user *request;
+	struct drm_scatter_gather __user *request;
 	int err;
 	unsigned long x;
 
@@ -938,7 +938,7 @@
 			      unsigned long arg)
 {
 	drm_scatter_gather32_t __user *argp = (void __user *)arg;
-	drm_scatter_gather_t __user *request;
+	struct drm_scatter_gather __user *request;
 	unsigned long x;
 
 	request = compat_alloc_user_space(sizeof(*request));
@@ -953,13 +953,13 @@
 }
 
 struct drm_wait_vblank_request32 {
-	drm_vblank_seq_type_t type;
+	enum drm_vblank_seq_type type;
 	unsigned int sequence;
 	u32 signal;
 };
 
 struct drm_wait_vblank_reply32 {
-	drm_vblank_seq_type_t type;
+	enum drm_vblank_seq_type type;
 	unsigned int sequence;
 	s32 tval_sec;
 	s32 tval_usec;
@@ -975,7 +975,7 @@
 {
 	drm_wait_vblank32_t __user *argp = (void __user *)arg;
 	drm_wait_vblank32_t req32;
-	drm_wait_vblank_t __user *request;
+	union drm_wait_vblank __user *request;
 	int err;
 
 	if (copy_from_user(&req32, argp, sizeof(req32)))
diff --git a/drivers/char/drm/drm_ioctl.c b/drivers/char/drm/drm_ioctl.c
index 5658955..b195e10 100644
--- a/drivers/char/drm/drm_ioctl.c
+++ b/drivers/char/drm/drm_ioctl.c
@@ -52,10 +52,10 @@
 int drm_getunique(struct inode *inode, struct file *filp,
 		  unsigned int cmd, unsigned long arg)
 {
-	drm_file_t *priv = filp->private_data;
-	drm_device_t *dev = priv->head->dev;
-	drm_unique_t __user *argp = (void __user *)arg;
-	drm_unique_t u;
+	struct drm_file *priv = filp->private_data;
+	struct drm_device *dev = priv->head->dev;
+	struct drm_unique __user *argp = (void __user *)arg;
+	struct drm_unique u;
 
 	if (copy_from_user(&u, argp, sizeof(u)))
 		return -EFAULT;
@@ -86,15 +86,15 @@
 int drm_setunique(struct inode *inode, struct file *filp,
 		  unsigned int cmd, unsigned long arg)
 {
-	drm_file_t *priv = filp->private_data;
-	drm_device_t *dev = priv->head->dev;
-	drm_unique_t u;
+	struct drm_file *priv = filp->private_data;
+	struct drm_device *dev = priv->head->dev;
+	struct drm_unique u;
 	int domain, bus, slot, func, ret;
 
 	if (dev->unique_len || dev->unique)
 		return -EBUSY;
 
-	if (copy_from_user(&u, (drm_unique_t __user *) arg, sizeof(u)))
+	if (copy_from_user(&u, (struct drm_unique __user *) arg, sizeof(u)))
 		return -EFAULT;
 
 	if (!u.unique_len || u.unique_len > 1024)
@@ -136,7 +136,7 @@
 	return 0;
 }
 
-static int drm_set_busid(drm_device_t * dev)
+static int drm_set_busid(struct drm_device * dev)
 {
 	int len;
 
@@ -184,11 +184,11 @@
 int drm_getmap(struct inode *inode, struct file *filp,
 	       unsigned int cmd, unsigned long arg)
 {
-	drm_file_t *priv = filp->private_data;
-	drm_device_t *dev = priv->head->dev;
-	drm_map_t __user *argp = (void __user *)arg;
-	drm_map_t map;
-	drm_map_list_t *r_list = NULL;
+	struct drm_file *priv = filp->private_data;
+	struct drm_device *dev = priv->head->dev;
+	struct drm_map __user *argp = (void __user *)arg;
+	struct drm_map map;
+	struct drm_map_list *r_list = NULL;
 	struct list_head *list;
 	int idx;
 	int i;
@@ -204,9 +204,9 @@
 	}
 
 	i = 0;
-	list_for_each(list, &dev->maplist->head) {
+	list_for_each(list, &dev->maplist) {
 		if (i == idx) {
-			r_list = list_entry(list, drm_map_list_t, head);
+			r_list = list_entry(list, struct drm_map_list, head);
 			break;
 		}
 		i++;
@@ -245,11 +245,11 @@
 int drm_getclient(struct inode *inode, struct file *filp,
 		  unsigned int cmd, unsigned long arg)
 {
-	drm_file_t *priv = filp->private_data;
-	drm_device_t *dev = priv->head->dev;
-	drm_client_t __user *argp = (drm_client_t __user *)arg;
-	drm_client_t client;
-	drm_file_t *pt;
+	struct drm_file *priv = filp->private_data;
+	struct drm_device *dev = priv->head->dev;
+	struct drm_client __user *argp = (struct drm_client __user *)arg;
+	struct drm_client client;
+	struct drm_file *pt;
 	int idx;
 	int i;
 
@@ -257,12 +257,18 @@
 		return -EFAULT;
 	idx = client.idx;
 	mutex_lock(&dev->struct_mutex);
-	for (i = 0, pt = dev->file_first; i < idx && pt; i++, pt = pt->next) ;
-
-	if (!pt) {
+	
+	if (list_empty(&dev->filelist)) {
 		mutex_unlock(&dev->struct_mutex);
 		return -EINVAL;
 	}
+
+	i = 0;
+	list_for_each_entry(pt, &dev->filelist, lhead) {
+		if (i++ >= idx)
+			break;
+	}
+
 	client.auth = pt->authenticated;
 	client.pid = pt->pid;
 	client.uid = pt->uid;
@@ -288,9 +294,9 @@
 int drm_getstats(struct inode *inode, struct file *filp,
 		 unsigned int cmd, unsigned long arg)
 {
-	drm_file_t *priv = filp->private_data;
-	drm_device_t *dev = priv->head->dev;
-	drm_stats_t stats;
+	struct drm_file *priv = filp->private_data;
+	struct drm_device *dev = priv->head->dev;
+	struct drm_stats stats;
 	int i;
 
 	memset(&stats, 0, sizeof(stats));
@@ -310,7 +316,7 @@
 
 	mutex_unlock(&dev->struct_mutex);
 
-	if (copy_to_user((drm_stats_t __user *) arg, &stats, sizeof(stats)))
+	if (copy_to_user((struct drm_stats __user *) arg, &stats, sizeof(stats)))
 		return -EFAULT;
 	return 0;
 }
@@ -329,10 +335,10 @@
 int drm_setversion(DRM_IOCTL_ARGS)
 {
 	DRM_DEVICE;
-	drm_set_version_t sv;
-	drm_set_version_t retv;
+	struct drm_set_version sv;
+	struct drm_set_version retv;
 	int if_version;
-	drm_set_version_t __user *argp = (void __user *)data;
+	struct drm_set_version __user *argp = (void __user *)data;
 	int ret;
 
 	if (copy_from_user(&sv, argp, sizeof(sv)))
diff --git a/drivers/char/drm/drm_irq.c b/drivers/char/drm/drm_irq.c
index 2e75331..871d2fd 100644
--- a/drivers/char/drm/drm_irq.c
+++ b/drivers/char/drm/drm_irq.c
@@ -53,10 +53,10 @@
 int drm_irq_by_busid(struct inode *inode, struct file *filp,
 		     unsigned int cmd, unsigned long arg)
 {
-	drm_file_t *priv = filp->private_data;
-	drm_device_t *dev = priv->head->dev;
-	drm_irq_busid_t __user *argp = (void __user *)arg;
-	drm_irq_busid_t p;
+	struct drm_file *priv = filp->private_data;
+	struct drm_device *dev = priv->head->dev;
+	struct drm_irq_busid __user *argp = (void __user *)arg;
+	struct drm_irq_busid p;
 
 	if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
 		return -EINVAL;
@@ -87,7 +87,7 @@
  * \c drm_driver_irq_preinstall() and \c drm_driver_irq_postinstall() functions
  * before and after the installation.
  */
-static int drm_irq_install(drm_device_t * dev)
+static int drm_irq_install(struct drm_device * dev)
 {
 	int ret;
 	unsigned long sh_flags = 0;
@@ -120,8 +120,8 @@
 
 		spin_lock_init(&dev->vbl_lock);
 
-		INIT_LIST_HEAD(&dev->vbl_sigs.head);
-		INIT_LIST_HEAD(&dev->vbl_sigs2.head);
+		INIT_LIST_HEAD(&dev->vbl_sigs);
+		INIT_LIST_HEAD(&dev->vbl_sigs2);
 
 		dev->vbl_pending = 0;
 	}
@@ -155,7 +155,7 @@
  *
  * Calls the driver's \c drm_driver_irq_uninstall() function, and stops the irq.
  */
-int drm_irq_uninstall(drm_device_t * dev)
+int drm_irq_uninstall(struct drm_device * dev)
 {
 	int irq_enabled;
 
@@ -197,13 +197,13 @@
 int drm_control(struct inode *inode, struct file *filp,
 		unsigned int cmd, unsigned long arg)
 {
-	drm_file_t *priv = filp->private_data;
-	drm_device_t *dev = priv->head->dev;
-	drm_control_t ctl;
+	struct drm_file *priv = filp->private_data;
+	struct drm_device *dev = priv->head->dev;
+	struct drm_control ctl;
 
 	/* if we haven't irq we fallback for compatibility reasons - this used to be a separate function in drm_dma.h */
 
-	if (copy_from_user(&ctl, (drm_control_t __user *) arg, sizeof(ctl)))
+	if (copy_from_user(&ctl, (struct drm_control __user *) arg, sizeof(ctl)))
 		return -EFAULT;
 
 	switch (ctl.func) {
@@ -244,10 +244,10 @@
  */
 int drm_wait_vblank(DRM_IOCTL_ARGS)
 {
-	drm_file_t *priv = filp->private_data;
-	drm_device_t *dev = priv->head->dev;
-	drm_wait_vblank_t __user *argp = (void __user *)data;
-	drm_wait_vblank_t vblwait;
+	struct drm_file *priv = filp->private_data;
+	struct drm_device *dev = priv->head->dev;
+	union drm_wait_vblank __user *argp = (void __user *)data;
+	union drm_wait_vblank vblwait;
 	struct timeval now;
 	int ret = 0;
 	unsigned int flags, seq;
@@ -292,9 +292,9 @@
 
 	if (flags & _DRM_VBLANK_SIGNAL) {
 		unsigned long irqflags;
-		drm_vbl_sig_t *vbl_sigs = (flags & _DRM_VBLANK_SECONDARY)
+		struct list_head *vbl_sigs = (flags & _DRM_VBLANK_SECONDARY)
 				      ? &dev->vbl_sigs2 : &dev->vbl_sigs;
-		drm_vbl_sig_t *vbl_sig;
+		struct drm_vbl_sig *vbl_sig;
 
 		spin_lock_irqsave(&dev->vbl_lock, irqflags);
 
@@ -302,7 +302,7 @@
 		 * for the same vblank sequence number; nothing to be done in
 		 * that case
 		 */
-		list_for_each_entry(vbl_sig, &vbl_sigs->head, head) {
+		list_for_each_entry(vbl_sig, vbl_sigs, head) {
 			if (vbl_sig->sequence == vblwait.request.sequence
 			    && vbl_sig->info.si_signo == vblwait.request.signal
 			    && vbl_sig->task == current) {
@@ -324,7 +324,7 @@
 
 		if (!
 		    (vbl_sig =
-		     drm_alloc(sizeof(drm_vbl_sig_t), DRM_MEM_DRIVER))) {
+		     drm_alloc(sizeof(struct drm_vbl_sig), DRM_MEM_DRIVER))) {
 			return -ENOMEM;
 		}
 
@@ -336,7 +336,7 @@
 
 		spin_lock_irqsave(&dev->vbl_lock, irqflags);
 
-		list_add_tail((struct list_head *)vbl_sig, &vbl_sigs->head);
+		list_add_tail(&vbl_sig->head, vbl_sigs);
 
 		spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
 
@@ -371,7 +371,7 @@
  *
  * If a signal is not requested, then calls vblank_wait().
  */
-void drm_vbl_send_signals(drm_device_t * dev)
+void drm_vbl_send_signals(struct drm_device * dev)
 {
 	unsigned long flags;
 	int i;
@@ -379,20 +379,18 @@
 	spin_lock_irqsave(&dev->vbl_lock, flags);
 
 	for (i = 0; i < 2; i++) {
-		struct list_head *list, *tmp;
-		drm_vbl_sig_t *vbl_sig;
-		drm_vbl_sig_t *vbl_sigs = i ? &dev->vbl_sigs2 : &dev->vbl_sigs;
+		struct drm_vbl_sig *vbl_sig, *tmp;
+		struct list_head *vbl_sigs = i ? &dev->vbl_sigs2 : &dev->vbl_sigs;
 		unsigned int vbl_seq = atomic_read(i ? &dev->vbl_received2 :
 						   &dev->vbl_received);
 
-		list_for_each_safe(list, tmp, &vbl_sigs->head) {
-			vbl_sig = list_entry(list, drm_vbl_sig_t, head);
+		list_for_each_entry_safe(vbl_sig, tmp, vbl_sigs, head) {
 			if ((vbl_seq - vbl_sig->sequence) <= (1 << 23)) {
 				vbl_sig->info.si_code = vbl_seq;
 				send_sig_info(vbl_sig->info.si_signo,
 					      &vbl_sig->info, vbl_sig->task);
 
-				list_del(list);
+				list_del(&vbl_sig->head);
 
 				drm_free(vbl_sig, sizeof(*vbl_sig),
 					 DRM_MEM_DRIVER);
@@ -418,7 +416,7 @@
  */
 static void drm_locked_tasklet_func(unsigned long data)
 {
-	drm_device_t *dev = (drm_device_t*)data;
+	struct drm_device *dev = (struct drm_device *)data;
 	unsigned long irqflags;
 
 	spin_lock_irqsave(&dev->tasklet_lock, irqflags);
@@ -455,7 +453,7 @@
  * context, it must not make any assumptions about this. Also, the HW lock will
  * be held with the kernel context or any client context.
  */
-void drm_locked_tasklet(drm_device_t *dev, void (*func)(drm_device_t*))
+void drm_locked_tasklet(struct drm_device *dev, void (*func)(struct drm_device *))
 {
 	unsigned long irqflags;
 	static DECLARE_TASKLET(drm_tasklet, drm_locked_tasklet_func, 0);
diff --git a/drivers/char/drm/drm_lock.c b/drivers/char/drm/drm_lock.c
index befd1af..c0534b5 100644
--- a/drivers/char/drm/drm_lock.c
+++ b/drivers/char/drm/drm_lock.c
@@ -51,15 +51,15 @@
 int drm_lock(struct inode *inode, struct file *filp,
 	     unsigned int cmd, unsigned long arg)
 {
-	drm_file_t *priv = filp->private_data;
-	drm_device_t *dev = priv->head->dev;
+	struct drm_file *priv = filp->private_data;
+	struct drm_device *dev = priv->head->dev;
 	DECLARE_WAITQUEUE(entry, current);
-	drm_lock_t lock;
+	struct drm_lock lock;
 	int ret = 0;
 
 	++priv->lock_count;
 
-	if (copy_from_user(&lock, (drm_lock_t __user *) arg, sizeof(lock)))
+	if (copy_from_user(&lock, (struct drm_lock __user *) arg, sizeof(lock)))
 		return -EFAULT;
 
 	if (lock.context == DRM_KERNEL_CONTEXT) {
@@ -152,12 +152,12 @@
 int drm_unlock(struct inode *inode, struct file *filp,
 	       unsigned int cmd, unsigned long arg)
 {
-	drm_file_t *priv = filp->private_data;
-	drm_device_t *dev = priv->head->dev;
-	drm_lock_t lock;
+	struct drm_file *priv = filp->private_data;
+	struct drm_device *dev = priv->head->dev;
+	struct drm_lock lock;
 	unsigned long irqflags;
 
-	if (copy_from_user(&lock, (drm_lock_t __user *) arg, sizeof(lock)))
+	if (copy_from_user(&lock, (struct drm_lock __user *) arg, sizeof(lock)))
 		return -EFAULT;
 
 	if (lock.context == DRM_KERNEL_CONTEXT) {
@@ -202,7 +202,7 @@
  *
  * Attempt to mark the lock as held by the given context, via the \p cmpxchg instruction.
  */
-int drm_lock_take(drm_lock_data_t *lock_data,
+int drm_lock_take(struct drm_lock_data *lock_data,
 		  unsigned int context)
 {
 	unsigned int old, new, prev;
@@ -251,7 +251,7 @@
  * Resets the lock file pointer.
  * Marks the lock as held by the given context, via the \p cmpxchg instruction.
  */
-static int drm_lock_transfer(drm_lock_data_t *lock_data,
+static int drm_lock_transfer(struct drm_lock_data *lock_data,
 			     unsigned int context)
 {
 	unsigned int old, new, prev;
@@ -277,7 +277,7 @@
  * Marks the lock as not held, via the \p cmpxchg instruction. Wakes any task
  * waiting on the lock queue.
  */
-int drm_lock_free(drm_lock_data_t *lock_data, unsigned int context)
+int drm_lock_free(struct drm_lock_data *lock_data, unsigned int context)
 {
 	unsigned int old, new, prev;
 	volatile unsigned int *lock = &lock_data->hw_lock->lock;
@@ -319,7 +319,7 @@
  */
 static int drm_notifier(void *priv)
 {
-	drm_sigdata_t *s = (drm_sigdata_t *) priv;
+	struct drm_sigdata *s = (struct drm_sigdata *) priv;
 	unsigned int old, new, prev;
 
 	/* Allow signal delivery if lock isn't held */
@@ -350,7 +350,7 @@
  * having to worry about starvation.
  */
 
-void drm_idlelock_take(drm_lock_data_t *lock_data)
+void drm_idlelock_take(struct drm_lock_data *lock_data)
 {
 	int ret = 0;
 
@@ -369,7 +369,7 @@
 }
 EXPORT_SYMBOL(drm_idlelock_take);
 
-void drm_idlelock_release(drm_lock_data_t *lock_data)
+void drm_idlelock_release(struct drm_lock_data *lock_data)
 {
 	unsigned int old, prev;
 	volatile unsigned int *lock = &lock_data->hw_lock->lock;
diff --git a/drivers/char/drm/drm_memory.c b/drivers/char/drm/drm_memory.c
index 92a8670..9301990 100644
--- a/drivers/char/drm/drm_memory.c
+++ b/drivers/char/drm/drm_memory.c
@@ -80,7 +80,7 @@
 
 #if __OS_HAS_AGP
 static void *agp_remap(unsigned long offset, unsigned long size,
-		       drm_device_t * dev)
+		       struct drm_device * dev)
 {
 	unsigned long *phys_addr_map, i, num_pages =
 	    PAGE_ALIGN(size) / PAGE_SIZE;
@@ -94,7 +94,7 @@
 	offset -= dev->hose->mem_space->start;
 #endif
 
-	for (agpmem = dev->agp->memory; agpmem; agpmem = agpmem->next)
+	list_for_each_entry(agpmem, &dev->agp->memory, head)
 		if (agpmem->bound <= offset
 		    && (agpmem->bound + (agpmem->pages << PAGE_SHIFT)) >=
 		    (offset + size))
@@ -123,7 +123,7 @@
 }
 
 /** Wrapper around agp_allocate_memory() */
-DRM_AGP_MEM *drm_alloc_agp(drm_device_t * dev, int pages, u32 type)
+DRM_AGP_MEM *drm_alloc_agp(struct drm_device * dev, int pages, u32 type)
 {
 	return drm_agp_allocate_memory(dev->agp->bridge, pages, type);
 }
@@ -148,7 +148,7 @@
 
 #else  /*  __OS_HAS_AGP  */
 static inline void *agp_remap(unsigned long offset, unsigned long size,
-			      drm_device_t * dev)
+			      struct drm_device * dev)
 {
 	return NULL;
 }
diff --git a/drivers/char/drm/drm_mm.c b/drivers/char/drm/drm_mm.c
index 2ec1d9f..3e6bc14 100644
--- a/drivers/char/drm/drm_mm.c
+++ b/drivers/char/drm/drm_mm.c
@@ -44,26 +44,26 @@
 #include "drmP.h"
 #include <linux/slab.h>
 
-unsigned long drm_mm_tail_space(drm_mm_t *mm)
+unsigned long drm_mm_tail_space(struct drm_mm *mm)
 {
 	struct list_head *tail_node;
-	drm_mm_node_t *entry;
+	struct drm_mm_node *entry;
 
 	tail_node = mm->ml_entry.prev;
-	entry = list_entry(tail_node, drm_mm_node_t, ml_entry);
+	entry = list_entry(tail_node, struct drm_mm_node, ml_entry);
 	if (!entry->free)
 		return 0;
 
 	return entry->size;
 }
 
-int drm_mm_remove_space_from_tail(drm_mm_t *mm, unsigned long size)
+int drm_mm_remove_space_from_tail(struct drm_mm *mm, unsigned long size)
 {
 	struct list_head *tail_node;
-	drm_mm_node_t *entry;
+	struct drm_mm_node *entry;
 
 	tail_node = mm->ml_entry.prev;
-	entry = list_entry(tail_node, drm_mm_node_t, ml_entry);
+	entry = list_entry(tail_node, struct drm_mm_node, ml_entry);
 	if (!entry->free)
 		return -ENOMEM;
 
@@ -75,13 +75,13 @@
 }
 
 
-static int drm_mm_create_tail_node(drm_mm_t *mm,
+static int drm_mm_create_tail_node(struct drm_mm *mm,
 			    unsigned long start,
 			    unsigned long size)
 {
-	drm_mm_node_t *child;
+	struct drm_mm_node *child;
 
-	child = (drm_mm_node_t *)
+	child = (struct drm_mm_node *)
 		drm_alloc(sizeof(*child), DRM_MEM_MM);
 	if (!child)
 		return -ENOMEM;
@@ -98,13 +98,13 @@
 }
 
 
-int drm_mm_add_space_to_tail(drm_mm_t *mm, unsigned long size)
+int drm_mm_add_space_to_tail(struct drm_mm *mm, unsigned long size)
 {
 	struct list_head *tail_node;
-	drm_mm_node_t *entry;
+	struct drm_mm_node *entry;
 
 	tail_node = mm->ml_entry.prev;
-	entry = list_entry(tail_node, drm_mm_node_t, ml_entry);
+	entry = list_entry(tail_node, struct drm_mm_node, ml_entry);
 	if (!entry->free) {
 		return drm_mm_create_tail_node(mm, entry->start + entry->size, size);
 	}
@@ -112,12 +112,12 @@
 	return 0;
 }
 
-static drm_mm_node_t *drm_mm_split_at_start(drm_mm_node_t *parent,
+static struct drm_mm_node *drm_mm_split_at_start(struct drm_mm_node *parent,
 					    unsigned long size)
 {
-	drm_mm_node_t *child;
+	struct drm_mm_node *child;
 
-	child = (drm_mm_node_t *)
+	child = (struct drm_mm_node *)
 		drm_alloc(sizeof(*child), DRM_MEM_MM);
 	if (!child)
 		return NULL;
@@ -139,12 +139,12 @@
 
 
 
-drm_mm_node_t *drm_mm_get_block(drm_mm_node_t * parent,
+struct drm_mm_node *drm_mm_get_block(struct drm_mm_node * parent,
 				unsigned long size, unsigned alignment)
 {
 
-	drm_mm_node_t *align_splitoff = NULL;
-	drm_mm_node_t *child;
+	struct drm_mm_node *align_splitoff = NULL;
+	struct drm_mm_node *child;
 	unsigned tmp = 0;
 
 	if (alignment)
@@ -175,26 +175,26 @@
  * Otherwise add to the free stack.
  */
 
-void drm_mm_put_block(drm_mm_node_t * cur)
+void drm_mm_put_block(struct drm_mm_node * cur)
 {
 
-	drm_mm_t *mm = cur->mm;
+	struct drm_mm *mm = cur->mm;
 	struct list_head *cur_head = &cur->ml_entry;
 	struct list_head *root_head = &mm->ml_entry;
-	drm_mm_node_t *prev_node = NULL;
-	drm_mm_node_t *next_node;
+	struct drm_mm_node *prev_node = NULL;
+	struct drm_mm_node *next_node;
 
 	int merged = 0;
 
 	if (cur_head->prev != root_head) {
-		prev_node = list_entry(cur_head->prev, drm_mm_node_t, ml_entry);
+		prev_node = list_entry(cur_head->prev, struct drm_mm_node, ml_entry);
 		if (prev_node->free) {
 			prev_node->size += cur->size;
 			merged = 1;
 		}
 	}
 	if (cur_head->next != root_head) {
-		next_node = list_entry(cur_head->next, drm_mm_node_t, ml_entry);
+		next_node = list_entry(cur_head->next, struct drm_mm_node, ml_entry);
 		if (next_node->free) {
 			if (merged) {
 				prev_node->size += next_node->size;
@@ -218,14 +218,14 @@
 	}
 }
 
-drm_mm_node_t *drm_mm_search_free(const drm_mm_t * mm,
+struct drm_mm_node *drm_mm_search_free(const struct drm_mm * mm,
 				  unsigned long size,
 				  unsigned alignment, int best_match)
 {
 	struct list_head *list;
 	const struct list_head *free_stack = &mm->fl_entry;
-	drm_mm_node_t *entry;
-	drm_mm_node_t *best;
+	struct drm_mm_node *entry;
+	struct drm_mm_node *best;
 	unsigned long best_size;
 	unsigned wasted;
 
@@ -233,7 +233,7 @@
 	best_size = ~0UL;
 
 	list_for_each(list, free_stack) {
-		entry = list_entry(list, drm_mm_node_t, fl_entry);
+		entry = list_entry(list, struct drm_mm_node, fl_entry);
 		wasted = 0;
 
 		if (entry->size < size)
@@ -259,14 +259,14 @@
 	return best;
 }
 
-int drm_mm_clean(drm_mm_t * mm)
+int drm_mm_clean(struct drm_mm * mm)
 {
 	struct list_head *head = &mm->ml_entry;
 
 	return (head->next->next == head);
 }
 
-int drm_mm_init(drm_mm_t * mm, unsigned long start, unsigned long size)
+int drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size)
 {
 	INIT_LIST_HEAD(&mm->ml_entry);
 	INIT_LIST_HEAD(&mm->fl_entry);
@@ -275,12 +275,12 @@
 }
 
 
-void drm_mm_takedown(drm_mm_t * mm)
+void drm_mm_takedown(struct drm_mm * mm)
 {
 	struct list_head *bnode = mm->fl_entry.next;
-	drm_mm_node_t *entry;
+	struct drm_mm_node *entry;
 
-	entry = list_entry(bnode, drm_mm_node_t, fl_entry);
+	entry = list_entry(bnode, struct drm_mm_node, fl_entry);
 
 	if (entry->ml_entry.next != &mm->ml_entry ||
 	    entry->fl_entry.next != &mm->fl_entry) {
diff --git a/drivers/char/drm/drm_os_linux.h b/drivers/char/drm/drm_os_linux.h
index 0fe7b44..0b8d343 100644
--- a/drivers/char/drm/drm_os_linux.h
+++ b/drivers/char/drm/drm_os_linux.h
@@ -34,8 +34,8 @@
 /** Read/write memory barrier */
 #define DRM_MEMORYBARRIER()		mb()
 /** DRM device local declaration */
-#define DRM_DEVICE	drm_file_t	*priv	= filp->private_data; \
-			drm_device_t	*dev	= priv->head->dev
+#define DRM_DEVICE	struct drm_file	*priv	= filp->private_data; \
+			struct drm_device *dev	= priv->head->dev
 
 /** IRQ handler arguments and return type and values */
 #define DRM_IRQ_ARGS		int irq, void *arg
@@ -96,24 +96,6 @@
 
 #define DRM_GET_PRIV_WITH_RETURN(_priv, _filp) _priv = _filp->private_data
 
-/**
- * Get the pointer to the SAREA.
- *
- * Searches the SAREA on the mapping lists and points drm_device::sarea to it.
- */
-#define DRM_GETSAREA()							 \
-do { 									 \
-	drm_map_list_t *entry;						 \
-	list_for_each_entry( entry, &dev->maplist->head, head ) {	 \
-		if ( entry->map &&					 \
-		     entry->map->type == _DRM_SHM &&			 \
-		     (entry->map->flags & _DRM_CONTAINS_LOCK) ) {	 \
-			dev_priv->sarea = entry->map;			 \
- 			break;						 \
- 		}							 \
- 	}								 \
-} while (0)
-
 #define DRM_HZ HZ
 
 #define DRM_WAIT_ON( ret, queue, timeout, condition )		\
diff --git a/drivers/char/drm/drm_pci.c b/drivers/char/drm/drm_pci.c
index 86a0f1c..e292bb0 100644
--- a/drivers/char/drm/drm_pci.c
+++ b/drivers/char/drm/drm_pci.c
@@ -47,7 +47,7 @@
 /**
  * \brief Allocate a PCI consistent memory block, for DMA.
  */
-drm_dma_handle_t *drm_pci_alloc(drm_device_t * dev, size_t size, size_t align,
+drm_dma_handle_t *drm_pci_alloc(struct drm_device * dev, size_t size, size_t align,
 				dma_addr_t maxaddr)
 {
 	drm_dma_handle_t *dmah;
@@ -126,7 +126,7 @@
  *
  * This function is for internal use in the Linux-specific DRM core code.
  */
-void __drm_pci_free(drm_device_t * dev, drm_dma_handle_t * dmah)
+void __drm_pci_free(struct drm_device * dev, drm_dma_handle_t * dmah)
 {
 #if 1
 	unsigned long addr;
@@ -172,7 +172,7 @@
 /**
  * \brief Free a PCI consistent memory block
  */
-void drm_pci_free(drm_device_t * dev, drm_dma_handle_t * dmah)
+void drm_pci_free(struct drm_device * dev, drm_dma_handle_t * dmah)
 {
 	__drm_pci_free(dev, dmah);
 	kfree(dmah);
diff --git a/drivers/char/drm/drm_proc.c b/drivers/char/drm/drm_proc.c
index b204498..12dfea8 100644
--- a/drivers/char/drm/drm_proc.c
+++ b/drivers/char/drm/drm_proc.c
@@ -87,7 +87,7 @@
  * "/proc/dri/%minor%/", and each entry in proc_list as
  * "/proc/dri/%minor%/%name%".
  */
-int drm_proc_init(drm_device_t * dev, int minor,
+int drm_proc_init(struct drm_device * dev, int minor,
 		  struct proc_dir_entry *root, struct proc_dir_entry **dev_root)
 {
 	struct proc_dir_entry *ent;
@@ -163,7 +163,7 @@
 static int drm_name_info(char *buf, char **start, off_t offset, int request,
 			 int *eof, void *data)
 {
-	drm_device_t *dev = (drm_device_t *) data;
+	struct drm_device *dev = (struct drm_device *) data;
 	int len = 0;
 
 	if (offset > DRM_PROC_LIMIT) {
@@ -205,11 +205,10 @@
 static int drm__vm_info(char *buf, char **start, off_t offset, int request,
 			int *eof, void *data)
 {
-	drm_device_t *dev = (drm_device_t *) data;
+	struct drm_device *dev = (struct drm_device *) data;
 	int len = 0;
-	drm_map_t *map;
-	drm_map_list_t *r_list;
-	struct list_head *list;
+	struct drm_map *map;
+	struct drm_map_list *r_list;
 
 	/* Hardcoded from _DRM_FRAME_BUFFER,
 	   _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
@@ -229,9 +228,7 @@
 	DRM_PROC_PRINT("slot	 offset	      size type flags	 "
 		       "address mtrr\n\n");
 	i = 0;
-	if (dev->maplist != NULL)
-		list_for_each(list, &dev->maplist->head) {
-		r_list = list_entry(list, drm_map_list_t, head);
+	list_for_each_entry(r_list, &dev->maplist, head) {
 		map = r_list->map;
 		if (!map)
 			continue;
@@ -242,14 +239,15 @@
 		DRM_PROC_PRINT("%4d 0x%08lx 0x%08lx %4.4s  0x%02x 0x%08x ",
 			       i,
 			       map->offset,
-			       map->size, type, map->flags, r_list->user_token);
+			       map->size, type, map->flags,
+			       r_list->user_token);
 		if (map->mtrr < 0) {
 			DRM_PROC_PRINT("none\n");
 		} else {
 			DRM_PROC_PRINT("%4d\n", map->mtrr);
 		}
 		i++;
-		}
+	}
 
 	if (len > request + offset)
 		return request;
@@ -263,7 +261,7 @@
 static int drm_vm_info(char *buf, char **start, off_t offset, int request,
 		       int *eof, void *data)
 {
-	drm_device_t *dev = (drm_device_t *) data;
+	struct drm_device *dev = (struct drm_device *) data;
 	int ret;
 
 	mutex_lock(&dev->struct_mutex);
@@ -286,10 +284,10 @@
 static int drm__queues_info(char *buf, char **start, off_t offset,
 			    int request, int *eof, void *data)
 {
-	drm_device_t *dev = (drm_device_t *) data;
+	struct drm_device *dev = (struct drm_device *) data;
 	int len = 0;
 	int i;
-	drm_queue_t *q;
+	struct drm_queue *q;
 
 	if (offset > DRM_PROC_LIMIT) {
 		*eof = 1;
@@ -336,7 +334,7 @@
 static int drm_queues_info(char *buf, char **start, off_t offset, int request,
 			   int *eof, void *data)
 {
-	drm_device_t *dev = (drm_device_t *) data;
+	struct drm_device *dev = (struct drm_device *) data;
 	int ret;
 
 	mutex_lock(&dev->struct_mutex);
@@ -359,9 +357,9 @@
 static int drm__bufs_info(char *buf, char **start, off_t offset, int request,
 			  int *eof, void *data)
 {
-	drm_device_t *dev = (drm_device_t *) data;
+	struct drm_device *dev = (struct drm_device *) data;
 	int len = 0;
-	drm_device_dma_t *dma = dev->dma;
+	struct drm_device_dma *dma = dev->dma;
 	int i;
 
 	if (!dma || offset > DRM_PROC_LIMIT) {
@@ -408,7 +406,7 @@
 static int drm_bufs_info(char *buf, char **start, off_t offset, int request,
 			 int *eof, void *data)
 {
-	drm_device_t *dev = (drm_device_t *) data;
+	struct drm_device *dev = (struct drm_device *) data;
 	int ret;
 
 	mutex_lock(&dev->struct_mutex);
@@ -431,9 +429,9 @@
 static int drm__clients_info(char *buf, char **start, off_t offset,
 			     int request, int *eof, void *data)
 {
-	drm_device_t *dev = (drm_device_t *) data;
+	struct drm_device *dev = (struct drm_device *) data;
 	int len = 0;
-	drm_file_t *priv;
+	struct drm_file *priv;
 
 	if (offset > DRM_PROC_LIMIT) {
 		*eof = 1;
@@ -444,7 +442,7 @@
 	*eof = 0;
 
 	DRM_PROC_PRINT("a dev	pid    uid	magic	  ioctls\n\n");
-	for (priv = dev->file_first; priv; priv = priv->next) {
+	list_for_each_entry(priv, &dev->filelist, lhead) {
 		DRM_PROC_PRINT("%c %3d %5d %5d %10u %10lu\n",
 			       priv->authenticated ? 'y' : 'n',
 			       priv->minor,
@@ -464,7 +462,7 @@
 static int drm_clients_info(char *buf, char **start, off_t offset,
 			    int request, int *eof, void *data)
 {
-	drm_device_t *dev = (drm_device_t *) data;
+	struct drm_device *dev = (struct drm_device *) data;
 	int ret;
 
 	mutex_lock(&dev->struct_mutex);
@@ -478,9 +476,9 @@
 static int drm__vma_info(char *buf, char **start, off_t offset, int request,
 			 int *eof, void *data)
 {
-	drm_device_t *dev = (drm_device_t *) data;
+	struct drm_device *dev = (struct drm_device *) data;
 	int len = 0;
-	drm_vma_entry_t *pt;
+	struct drm_vma_entry *pt;
 	struct vm_area_struct *vma;
 #if defined(__i386__)
 	unsigned int pgprot;
@@ -497,7 +495,7 @@
 	DRM_PROC_PRINT("vma use count: %d, high_memory = %p, 0x%08lx\n",
 		       atomic_read(&dev->vma_count),
 		       high_memory, virt_to_phys(high_memory));
-	for (pt = dev->vmalist; pt; pt = pt->next) {
+	list_for_each_entry(pt, &dev->vmalist, head) {
 		if (!(vma = pt->vma))
 			continue;
 		DRM_PROC_PRINT("\n%5d 0x%08lx-0x%08lx %c%c%c%c%c%c 0x%08lx000",
@@ -537,7 +535,7 @@
 static int drm_vma_info(char *buf, char **start, off_t offset, int request,
 			int *eof, void *data)
 {
-	drm_device_t *dev = (drm_device_t *) data;
+	struct drm_device *dev = (struct drm_device *) data;
 	int ret;
 
 	mutex_lock(&dev->struct_mutex);
diff --git a/drivers/char/drm/drm_sarea.h b/drivers/char/drm/drm_sarea.h
index e94297b..f546696 100644
--- a/drivers/char/drm/drm_sarea.h
+++ b/drivers/char/drm/drm_sarea.h
@@ -50,29 +50,35 @@
 #define SAREA_DRAWABLE_CLAIMED_ENTRY    0x80000000
 
 /** SAREA drawable */
-typedef struct drm_sarea_drawable {
+struct drm_sarea_drawable {
 	unsigned int stamp;
 	unsigned int flags;
-} drm_sarea_drawable_t;
+};
 
 /** SAREA frame */
-typedef struct drm_sarea_frame {
+struct drm_sarea_frame {
 	unsigned int x;
 	unsigned int y;
 	unsigned int width;
 	unsigned int height;
 	unsigned int fullscreen;
-} drm_sarea_frame_t;
+};
 
 /** SAREA */
-typedef struct drm_sarea {
+struct drm_sarea {
     /** first thing is always the DRM locking structure */
-	drm_hw_lock_t lock;
+	struct drm_hw_lock lock;
     /** \todo Use readers/writer lock for drm_sarea::drawable_lock */
-	drm_hw_lock_t drawable_lock;
-	drm_sarea_drawable_t drawableTable[SAREA_MAX_DRAWABLES];	/**< drawables */
-	drm_sarea_frame_t frame;	/**< frame */
+	struct drm_hw_lock drawable_lock;
+	struct drm_sarea_drawable drawableTable[SAREA_MAX_DRAWABLES];	/**< drawables */
+	struct drm_sarea_frame frame;	/**< frame */
 	drm_context_t dummy_context;
-} drm_sarea_t;
+};
+
+#ifndef __KERNEL__
+typedef struct drm_sarea_drawable drm_sarea_drawable_t;
+typedef struct drm_sarea_frame drm_sarea_frame_t;
+typedef struct drm_sarea drm_sarea_t;
+#endif
 
 #endif				/* _DRM_SAREA_H_ */
diff --git a/drivers/char/drm/drm_scatter.c b/drivers/char/drm/drm_scatter.c
index 06ef7dd..067d25d 100644
--- a/drivers/char/drm/drm_scatter.c
+++ b/drivers/char/drm/drm_scatter.c
@@ -36,7 +36,7 @@
 
 #define DEBUG_SCATTER 0
 
-void drm_sg_cleanup(drm_sg_mem_t * entry)
+void drm_sg_cleanup(struct drm_sg_mem * entry)
 {
 	struct page *page;
 	int i;
@@ -65,11 +65,11 @@
 int drm_sg_alloc(struct inode *inode, struct file *filp,
 		 unsigned int cmd, unsigned long arg)
 {
-	drm_file_t *priv = filp->private_data;
-	drm_device_t *dev = priv->head->dev;
-	drm_scatter_gather_t __user *argp = (void __user *)arg;
-	drm_scatter_gather_t request;
-	drm_sg_mem_t *entry;
+	struct drm_file *priv = filp->private_data;
+	struct drm_device *dev = priv->head->dev;
+	struct drm_scatter_gather __user *argp = (void __user *)arg;
+	struct drm_scatter_gather request;
+	struct drm_sg_mem *entry;
 	unsigned long pages, i, j;
 
 	DRM_DEBUG("%s\n", __FUNCTION__);
@@ -201,16 +201,16 @@
 int drm_sg_free(struct inode *inode, struct file *filp,
 		unsigned int cmd, unsigned long arg)
 {
-	drm_file_t *priv = filp->private_data;
-	drm_device_t *dev = priv->head->dev;
-	drm_scatter_gather_t request;
-	drm_sg_mem_t *entry;
+	struct drm_file *priv = filp->private_data;
+	struct drm_device *dev = priv->head->dev;
+	struct drm_scatter_gather request;
+	struct drm_sg_mem *entry;
 
 	if (!drm_core_check_feature(dev, DRIVER_SG))
 		return -EINVAL;
 
 	if (copy_from_user(&request,
-			   (drm_scatter_gather_t __user *) arg,
+			   (struct drm_scatter_gather __user *) arg,
 			   sizeof(request)))
 		return -EFAULT;
 
diff --git a/drivers/char/drm/drm_sman.c b/drivers/char/drm/drm_sman.c
index e15db6d..8421a93 100644
--- a/drivers/char/drm/drm_sman.c
+++ b/drivers/char/drm/drm_sman.c
@@ -38,13 +38,13 @@
 
 #include "drm_sman.h"
 
-typedef struct drm_owner_item {
-	drm_hash_item_t owner_hash;
+struct drm_owner_item {
+	struct drm_hash_item owner_hash;
 	struct list_head sman_list;
 	struct list_head mem_blocks;
-} drm_owner_item_t;
+};
 
-void drm_sman_takedown(drm_sman_t * sman)
+void drm_sman_takedown(struct drm_sman * sman)
 {
 	drm_ht_remove(&sman->user_hash_tab);
 	drm_ht_remove(&sman->owner_hash_tab);
@@ -56,12 +56,12 @@
 EXPORT_SYMBOL(drm_sman_takedown);
 
 int
-drm_sman_init(drm_sman_t * sman, unsigned int num_managers,
+drm_sman_init(struct drm_sman * sman, unsigned int num_managers,
 	      unsigned int user_order, unsigned int owner_order)
 {
 	int ret = 0;
 
-	sman->mm = (drm_sman_mm_t *) drm_calloc(num_managers, sizeof(*sman->mm),
+	sman->mm = (struct drm_sman_mm *) drm_calloc(num_managers, sizeof(*sman->mm),
 						DRM_MEM_MM);
 	if (!sman->mm) {
 		ret = -ENOMEM;
@@ -88,8 +88,8 @@
 static void *drm_sman_mm_allocate(void *private, unsigned long size,
 				  unsigned alignment)
 {
-	drm_mm_t *mm = (drm_mm_t *) private;
-	drm_mm_node_t *tmp;
+	struct drm_mm *mm = (struct drm_mm *) private;
+	struct drm_mm_node *tmp;
 
 	tmp = drm_mm_search_free(mm, size, alignment, 1);
 	if (!tmp) {
@@ -101,30 +101,30 @@
 
 static void drm_sman_mm_free(void *private, void *ref)
 {
-	drm_mm_node_t *node = (drm_mm_node_t *) ref;
+	struct drm_mm_node *node = (struct drm_mm_node *) ref;
 
 	drm_mm_put_block(node);
 }
 
 static void drm_sman_mm_destroy(void *private)
 {
-	drm_mm_t *mm = (drm_mm_t *) private;
+	struct drm_mm *mm = (struct drm_mm *) private;
 	drm_mm_takedown(mm);
 	drm_free(mm, sizeof(*mm), DRM_MEM_MM);
 }
 
 static unsigned long drm_sman_mm_offset(void *private, void *ref)
 {
-	drm_mm_node_t *node = (drm_mm_node_t *) ref;
+	struct drm_mm_node *node = (struct drm_mm_node *) ref;
 	return node->start;
 }
 
 int
-drm_sman_set_range(drm_sman_t * sman, unsigned int manager,
+drm_sman_set_range(struct drm_sman * sman, unsigned int manager,
 		   unsigned long start, unsigned long size)
 {
-	drm_sman_mm_t *sman_mm;
-	drm_mm_t *mm;
+	struct drm_sman_mm *sman_mm;
+	struct drm_mm *mm;
 	int ret;
 
 	BUG_ON(manager >= sman->num_managers);
@@ -153,8 +153,8 @@
 EXPORT_SYMBOL(drm_sman_set_range);
 
 int
-drm_sman_set_manager(drm_sman_t * sman, unsigned int manager,
-		     drm_sman_mm_t * allocator)
+drm_sman_set_manager(struct drm_sman * sman, unsigned int manager,
+		     struct drm_sman_mm * allocator)
 {
 	BUG_ON(manager >= sman->num_managers);
 	sman->mm[manager] = *allocator;
@@ -163,16 +163,16 @@
 }
 EXPORT_SYMBOL(drm_sman_set_manager);
 
-static drm_owner_item_t *drm_sman_get_owner_item(drm_sman_t * sman,
+static struct drm_owner_item *drm_sman_get_owner_item(struct drm_sman * sman,
 						 unsigned long owner)
 {
 	int ret;
-	drm_hash_item_t *owner_hash_item;
-	drm_owner_item_t *owner_item;
+	struct drm_hash_item *owner_hash_item;
+	struct drm_owner_item *owner_item;
 
 	ret = drm_ht_find_item(&sman->owner_hash_tab, owner, &owner_hash_item);
 	if (!ret) {
-		return drm_hash_entry(owner_hash_item, drm_owner_item_t,
+		return drm_hash_entry(owner_hash_item, struct drm_owner_item,
 				      owner_hash);
 	}
 
@@ -194,14 +194,14 @@
 	return NULL;
 }
 
-drm_memblock_item_t *drm_sman_alloc(drm_sman_t *sman, unsigned int manager,
+struct drm_memblock_item *drm_sman_alloc(struct drm_sman *sman, unsigned int manager,
 				    unsigned long size, unsigned alignment,
 				    unsigned long owner)
 {
 	void *tmp;
-	drm_sman_mm_t *sman_mm;
-	drm_owner_item_t *owner_item;
-	drm_memblock_item_t *memblock;
+	struct drm_sman_mm *sman_mm;
+	struct drm_owner_item *owner_item;
+	struct drm_memblock_item *memblock;
 
 	BUG_ON(manager >= sman->num_managers);
 
@@ -246,9 +246,9 @@
 
 EXPORT_SYMBOL(drm_sman_alloc);
 
-static void drm_sman_free(drm_memblock_item_t *item)
+static void drm_sman_free(struct drm_memblock_item *item)
 {
-	drm_sman_t *sman = item->sman;
+	struct drm_sman *sman = item->sman;
 
 	list_del(&item->owner_list);
 	drm_ht_remove_item(&sman->user_hash_tab, &item->user_hash);
@@ -256,40 +256,41 @@
 	drm_free(item, sizeof(*item), DRM_MEM_MM);
 }
 
-int drm_sman_free_key(drm_sman_t *sman, unsigned int key)
+int drm_sman_free_key(struct drm_sman *sman, unsigned int key)
 {
-	drm_hash_item_t *hash_item;
-	drm_memblock_item_t *memblock_item;
+	struct drm_hash_item *hash_item;
+	struct drm_memblock_item *memblock_item;
 
 	if (drm_ht_find_item(&sman->user_hash_tab, key, &hash_item))
 		return -EINVAL;
 
-	memblock_item = drm_hash_entry(hash_item, drm_memblock_item_t, user_hash);
+	memblock_item = drm_hash_entry(hash_item, struct drm_memblock_item,
+				       user_hash);
 	drm_sman_free(memblock_item);
 	return 0;
 }
 
 EXPORT_SYMBOL(drm_sman_free_key);
 
-static void drm_sman_remove_owner(drm_sman_t *sman,
-				  drm_owner_item_t *owner_item)
+static void drm_sman_remove_owner(struct drm_sman *sman,
+				  struct drm_owner_item *owner_item)
 {
 	list_del(&owner_item->sman_list);
 	drm_ht_remove_item(&sman->owner_hash_tab, &owner_item->owner_hash);
 	drm_free(owner_item, sizeof(*owner_item), DRM_MEM_MM);
 }
 
-int drm_sman_owner_clean(drm_sman_t *sman, unsigned long owner)
+int drm_sman_owner_clean(struct drm_sman *sman, unsigned long owner)
 {
 
-	drm_hash_item_t *hash_item;
-	drm_owner_item_t *owner_item;
+	struct drm_hash_item *hash_item;
+	struct drm_owner_item *owner_item;
 
 	if (drm_ht_find_item(&sman->owner_hash_tab, owner, &hash_item)) {
 		return -1;
 	}
 
-	owner_item = drm_hash_entry(hash_item, drm_owner_item_t, owner_hash);
+	owner_item = drm_hash_entry(hash_item, struct drm_owner_item, owner_hash);
 	if (owner_item->mem_blocks.next == &owner_item->mem_blocks) {
 		drm_sman_remove_owner(sman, owner_item);
 		return -1;
@@ -300,10 +301,10 @@
 
 EXPORT_SYMBOL(drm_sman_owner_clean);
 
-static void drm_sman_do_owner_cleanup(drm_sman_t *sman,
-				      drm_owner_item_t *owner_item)
+static void drm_sman_do_owner_cleanup(struct drm_sman *sman,
+				      struct drm_owner_item *owner_item)
 {
-	drm_memblock_item_t *entry, *next;
+	struct drm_memblock_item *entry, *next;
 
 	list_for_each_entry_safe(entry, next, &owner_item->mem_blocks,
 				 owner_list) {
@@ -312,28 +313,28 @@
 	drm_sman_remove_owner(sman, owner_item);
 }
 
-void drm_sman_owner_cleanup(drm_sman_t *sman, unsigned long owner)
+void drm_sman_owner_cleanup(struct drm_sman *sman, unsigned long owner)
 {
 
-	drm_hash_item_t *hash_item;
-	drm_owner_item_t *owner_item;
+	struct drm_hash_item *hash_item;
+	struct drm_owner_item *owner_item;
 
 	if (drm_ht_find_item(&sman->owner_hash_tab, owner, &hash_item)) {
 
 		return;
 	}
 
-	owner_item = drm_hash_entry(hash_item, drm_owner_item_t, owner_hash);
+	owner_item = drm_hash_entry(hash_item, struct drm_owner_item, owner_hash);
 	drm_sman_do_owner_cleanup(sman, owner_item);
 }
 
 EXPORT_SYMBOL(drm_sman_owner_cleanup);
 
-void drm_sman_cleanup(drm_sman_t *sman)
+void drm_sman_cleanup(struct drm_sman *sman)
 {
-	drm_owner_item_t *entry, *next;
+	struct drm_owner_item *entry, *next;
 	unsigned int i;
-	drm_sman_mm_t *sman_mm;
+	struct drm_sman_mm *sman_mm;
 
 	list_for_each_entry_safe(entry, next, &sman->owner_items, sman_list) {
 		drm_sman_do_owner_cleanup(sman, entry);
diff --git a/drivers/char/drm/drm_sman.h b/drivers/char/drm/drm_sman.h
index ddc732a..39a39fe 100644
--- a/drivers/char/drm/drm_sman.h
+++ b/drivers/char/drm/drm_sman.h
@@ -50,7 +50,7 @@
  * for memory management.
  */
 
-typedef struct drm_sman_mm {
+struct drm_sman_mm {
 	/* private info. If allocated, needs to be destroyed by the destroy
 	   function */
 	void *private;
@@ -74,30 +74,30 @@
 	   "alloc" function */
 
 	unsigned long (*offset) (void *private, void *ref);
-} drm_sman_mm_t;
+};
 
-typedef struct drm_memblock_item {
+struct drm_memblock_item {
 	struct list_head owner_list;
-	drm_hash_item_t user_hash;
+	struct drm_hash_item user_hash;
 	void *mm_info;
-	drm_sman_mm_t *mm;
+	struct drm_sman_mm *mm;
 	struct drm_sman *sman;
-} drm_memblock_item_t;
+};
 
-typedef struct drm_sman {
-	drm_sman_mm_t *mm;
+struct drm_sman {
+	struct drm_sman_mm *mm;
 	int num_managers;
-	drm_open_hash_t owner_hash_tab;
-	drm_open_hash_t user_hash_tab;
+	struct drm_open_hash owner_hash_tab;
+	struct drm_open_hash user_hash_tab;
 	struct list_head owner_items;
-} drm_sman_t;
+};
 
 /*
  * Take down a memory manager. This function should only be called after a
  * successful init and after a call to drm_sman_cleanup.
  */
 
-extern void drm_sman_takedown(drm_sman_t * sman);
+extern void drm_sman_takedown(struct drm_sman * sman);
 
 /*
  * Allocate structures for a manager.
@@ -112,7 +112,7 @@
  *
  */
 
-extern int drm_sman_init(drm_sman_t * sman, unsigned int num_managers,
+extern int drm_sman_init(struct drm_sman * sman, unsigned int num_managers,
 			 unsigned int user_order, unsigned int owner_order);
 
 /*
@@ -120,7 +120,7 @@
  * manager unless a customized allogator is used.
  */
 
-extern int drm_sman_set_range(drm_sman_t * sman, unsigned int manager,
+extern int drm_sman_set_range(struct drm_sman * sman, unsigned int manager,
 			      unsigned long start, unsigned long size);
 
 /*
@@ -129,23 +129,23 @@
  * so it can be destroyed after this call.
  */
 
-extern int drm_sman_set_manager(drm_sman_t * sman, unsigned int mananger,
-				drm_sman_mm_t * allocator);
+extern int drm_sman_set_manager(struct drm_sman * sman, unsigned int mananger,
+				struct drm_sman_mm * allocator);
 
 /*
  * Allocate a memory block. Aligment is not implemented yet.
  */
 
-extern drm_memblock_item_t *drm_sman_alloc(drm_sman_t * sman,
-					   unsigned int manager,
-					   unsigned long size,
-					   unsigned alignment,
-					   unsigned long owner);
+extern struct drm_memblock_item *drm_sman_alloc(struct drm_sman * sman,
+						unsigned int manager,
+						unsigned long size,
+						unsigned alignment,
+						unsigned long owner);
 /*
  * Free a memory block identified by its user hash key.
  */
 
-extern int drm_sman_free_key(drm_sman_t * sman, unsigned int key);
+extern int drm_sman_free_key(struct drm_sman * sman, unsigned int key);
 
 /*
  * returns 1 iff there are no stale memory blocks associated with this owner.
@@ -154,7 +154,7 @@
  * resources associated with owner.
  */
 
-extern int drm_sman_owner_clean(drm_sman_t * sman, unsigned long owner);
+extern int drm_sman_owner_clean(struct drm_sman * sman, unsigned long owner);
 
 /*
  * Frees all stale memory blocks associated with this owner. Note that this
@@ -164,13 +164,13 @@
  * is not going to be referenced anymore.
  */
 
-extern void drm_sman_owner_cleanup(drm_sman_t * sman, unsigned long owner);
+extern void drm_sman_owner_cleanup(struct drm_sman * sman, unsigned long owner);
 
 /*
  * Frees all stale memory blocks associated with the memory manager.
  * See idling above.
  */
 
-extern void drm_sman_cleanup(drm_sman_t * sman);
+extern void drm_sman_cleanup(struct drm_sman * sman);
 
 #endif
diff --git a/drivers/char/drm/drm_stub.c b/drivers/char/drm/drm_stub.c
index 19408ad..ee83ff9 100644
--- a/drivers/char/drm/drm_stub.c
+++ b/drivers/char/drm/drm_stub.c
@@ -49,16 +49,21 @@
 module_param_named(cards_limit, drm_cards_limit, int, 0444);
 module_param_named(debug, drm_debug, int, 0600);
 
-drm_head_t **drm_heads;
+struct drm_head **drm_heads;
 struct class *drm_class;
 struct proc_dir_entry *drm_proc_root;
 
-static int drm_fill_in_dev(drm_device_t * dev, struct pci_dev *pdev,
+static int drm_fill_in_dev(struct drm_device * dev, struct pci_dev *pdev,
 			   const struct pci_device_id *ent,
 			   struct drm_driver *driver)
 {
 	int retcode;
 
+	INIT_LIST_HEAD(&dev->filelist);
+	INIT_LIST_HEAD(&dev->ctxlist);
+	INIT_LIST_HEAD(&dev->vmalist);
+	INIT_LIST_HEAD(&dev->maplist);
+
 	spin_lock_init(&dev->count_lock);
 	spin_lock_init(&dev->drw_lock);
 	spin_lock_init(&dev->tasklet_lock);
@@ -67,6 +72,8 @@
 	mutex_init(&dev->struct_mutex);
 	mutex_init(&dev->ctxlist_mutex);
 
+	idr_init(&dev->drw_idr);
+
 	dev->pdev = pdev;
 	dev->pci_device = pdev->device;
 	dev->pci_vendor = pdev->vendor;
@@ -76,12 +83,7 @@
 #endif
 	dev->irq = pdev->irq;
 
-	dev->maplist = drm_calloc(1, sizeof(*dev->maplist), DRM_MEM_MAPS);
-	if (dev->maplist == NULL)
-		return -ENOMEM;
-	INIT_LIST_HEAD(&dev->maplist->head);
 	if (drm_ht_create(&dev->map_hash, 12)) {
-		drm_free(dev->maplist, sizeof(*dev->maplist), DRM_MEM_MAPS);
 		return -ENOMEM;
 	}
 
@@ -143,9 +145,9 @@
  * create the proc init entry via proc_init(). This routines assigns
  * minor numbers to secondary heads of multi-headed cards
  */
-static int drm_get_head(drm_device_t * dev, drm_head_t * head)
+static int drm_get_head(struct drm_device * dev, struct drm_head * head)
 {
-	drm_head_t **heads = drm_heads;
+	struct drm_head **heads = drm_heads;
 	int ret;
 	int minor;
 
@@ -154,7 +156,7 @@
 	for (minor = 0; minor < drm_cards_limit; minor++, heads++) {
 		if (!*heads) {
 
-			*head = (drm_head_t) {
+			*head = (struct drm_head) {
 			.dev = dev,.device =
 				    MKDEV(DRM_MAJOR, minor),.minor = minor,};
 
@@ -184,7 +186,7 @@
       err_g2:
 	drm_proc_cleanup(minor, drm_proc_root, head->dev_root);
       err_g1:
-	*head = (drm_head_t) {
+	*head = (struct drm_head) {
 	.dev = NULL};
 	return ret;
 }
@@ -203,7 +205,7 @@
 int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent,
 		struct drm_driver *driver)
 {
-	drm_device_t *dev;
+	struct drm_device *dev;
 	int ret;
 
 	DRM_DEBUG("\n");
@@ -246,7 +248,7 @@
  * "drm" data, otherwise unregisters the "drm" data, frees the dev list and
  * unregisters the character device.
  */
-int drm_put_dev(drm_device_t * dev)
+int drm_put_dev(struct drm_device * dev)
 {
 	DRM_DEBUG("release primary %s\n", dev->driver->pci_driver.name);
 
@@ -274,7 +276,7 @@
  * last minor released.
  *
  */
-int drm_put_head(drm_head_t * head)
+int drm_put_head(struct drm_head * head)
 {
 	int minor = head->minor;
 
@@ -283,7 +285,7 @@
 	drm_proc_cleanup(minor, drm_proc_root, head->dev_root);
 	drm_sysfs_device_remove(head->dev_class);
 
-	*head = (drm_head_t) {.dev = NULL};
+	*head = (struct drm_head) {.dev = NULL};
 
 	drm_heads[minor] = NULL;
 
diff --git a/drivers/char/drm/drm_sysfs.c b/drivers/char/drm/drm_sysfs.c
index cc8e2eb..cf4349b 100644
--- a/drivers/char/drm/drm_sysfs.c
+++ b/drivers/char/drm/drm_sysfs.c
@@ -80,7 +80,7 @@
 
 static ssize_t show_dri(struct class_device *class_device, char *buf)
 {
-	drm_device_t * dev = ((drm_head_t *)class_get_devdata(class_device))->dev;
+	struct drm_device * dev = ((struct drm_head *)class_get_devdata(class_device))->dev;
 	if (dev->driver->dri_library_name)
 		return dev->driver->dri_library_name(dev, buf);
 	return snprintf(buf, PAGE_SIZE, "%s\n", dev->driver->pci_driver.name);
@@ -104,7 +104,7 @@
  * Note: the struct class passed to this function must have previously been
  * created with a call to drm_sysfs_create().
  */
-struct class_device *drm_sysfs_device_add(struct class *cs, drm_head_t *head)
+struct class_device *drm_sysfs_device_add(struct class *cs, struct drm_head *head)
 {
 	struct class_device *class_dev;
 	int i, j, err;
diff --git a/drivers/char/drm/drm_vm.c b/drivers/char/drm/drm_vm.c
index b5c5b9f..68e36e5 100644
--- a/drivers/char/drm/drm_vm.c
+++ b/drivers/char/drm/drm_vm.c
@@ -79,11 +79,11 @@
 static __inline__ struct page *drm_do_vm_nopage(struct vm_area_struct *vma,
 						unsigned long address)
 {
-	drm_file_t *priv = vma->vm_file->private_data;
-	drm_device_t *dev = priv->head->dev;
-	drm_map_t *map = NULL;
-	drm_map_list_t *r_list;
-	drm_hash_item_t *hash;
+	struct drm_file *priv = vma->vm_file->private_data;
+	struct drm_device *dev = priv->head->dev;
+	struct drm_map *map = NULL;
+	struct drm_map_list *r_list;
+	struct drm_hash_item *hash;
 
 	/*
 	 * Find the right map
@@ -97,7 +97,7 @@
 	if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash))
 		goto vm_nopage_error;
 
-	r_list = drm_hash_entry(hash, drm_map_list_t, hash);
+	r_list = drm_hash_entry(hash, struct drm_map_list, hash);
 	map = r_list->map;
 
 	if (map && map->type == _DRM_AGP) {
@@ -116,7 +116,7 @@
 		/*
 		 * It's AGP memory - find the real physical page to map
 		 */
-		for (agpmem = dev->agp->memory; agpmem; agpmem = agpmem->next) {
+		list_for_each_entry(agpmem, &dev->agp->memory, head) {
 			if (agpmem->bound <= baddr &&
 			    agpmem->bound + agpmem->pages * PAGE_SIZE > baddr)
 				break;
@@ -163,7 +163,7 @@
 static __inline__ struct page *drm_do_vm_shm_nopage(struct vm_area_struct *vma,
 						    unsigned long address)
 {
-	drm_map_t *map = (drm_map_t *) vma->vm_private_data;
+	struct drm_map *map = (struct drm_map *) vma->vm_private_data;
 	unsigned long offset;
 	unsigned long i;
 	struct page *page;
@@ -194,12 +194,11 @@
  */
 static void drm_vm_shm_close(struct vm_area_struct *vma)
 {
-	drm_file_t *priv = vma->vm_file->private_data;
-	drm_device_t *dev = priv->head->dev;
-	drm_vma_entry_t *pt, *prev, *next;
-	drm_map_t *map;
-	drm_map_list_t *r_list;
-	struct list_head *list;
+	struct drm_file *priv = vma->vm_file->private_data;
+	struct drm_device *dev = priv->head->dev;
+	struct drm_vma_entry *pt, *temp;
+	struct drm_map *map;
+	struct drm_map_list *r_list;
 	int found_maps = 0;
 
 	DRM_DEBUG("0x%08lx,0x%08lx\n",
@@ -209,30 +208,22 @@
 	map = vma->vm_private_data;
 
 	mutex_lock(&dev->struct_mutex);
-	for (pt = dev->vmalist, prev = NULL; pt; pt = next) {
-		next = pt->next;
+	list_for_each_entry_safe(pt, temp, &dev->vmalist, head) {
 		if (pt->vma->vm_private_data == map)
 			found_maps++;
 		if (pt->vma == vma) {
-			if (prev) {
-				prev->next = pt->next;
-			} else {
-				dev->vmalist = pt->next;
-			}
+			list_del(&pt->head);
 			drm_free(pt, sizeof(*pt), DRM_MEM_VMAS);
-		} else {
-			prev = pt;
 		}
 	}
+
 	/* We were the only map that was found */
 	if (found_maps == 1 && map->flags & _DRM_REMOVABLE) {
 		/* Check to see if we are in the maplist, if we are not, then
 		 * we delete this mappings information.
 		 */
 		found_maps = 0;
-		list = &dev->maplist->head;
-		list_for_each(list, &dev->maplist->head) {
-			r_list = list_entry(list, drm_map_list_t, head);
+		list_for_each_entry(r_list, &dev->maplist, head) {
 			if (r_list->map == map)
 				found_maps++;
 		}
@@ -283,9 +274,9 @@
 static __inline__ struct page *drm_do_vm_dma_nopage(struct vm_area_struct *vma,
 						    unsigned long address)
 {
-	drm_file_t *priv = vma->vm_file->private_data;
-	drm_device_t *dev = priv->head->dev;
-	drm_device_dma_t *dma = dev->dma;
+	struct drm_file *priv = vma->vm_file->private_data;
+	struct drm_device *dev = priv->head->dev;
+	struct drm_device_dma *dma = dev->dma;
 	unsigned long offset;
 	unsigned long page_nr;
 	struct page *page;
@@ -319,10 +310,10 @@
 static __inline__ struct page *drm_do_vm_sg_nopage(struct vm_area_struct *vma,
 						   unsigned long address)
 {
-	drm_map_t *map = (drm_map_t *) vma->vm_private_data;
-	drm_file_t *priv = vma->vm_file->private_data;
-	drm_device_t *dev = priv->head->dev;
-	drm_sg_mem_t *entry = dev->sg;
+	struct drm_map *map = (struct drm_map *) vma->vm_private_data;
+	struct drm_file *priv = vma->vm_file->private_data;
+	struct drm_device *dev = priv->head->dev;
+	struct drm_sg_mem *entry = dev->sg;
 	unsigned long offset;
 	unsigned long map_offset;
 	unsigned long page_offset;
@@ -414,9 +405,9 @@
  */
 static void drm_vm_open_locked(struct vm_area_struct *vma)
 {
-	drm_file_t *priv = vma->vm_file->private_data;
-	drm_device_t *dev = priv->head->dev;
-	drm_vma_entry_t *vma_entry;
+	struct drm_file *priv = vma->vm_file->private_data;
+	struct drm_device *dev = priv->head->dev;
+	struct drm_vma_entry *vma_entry;
 
 	DRM_DEBUG("0x%08lx,0x%08lx\n",
 		  vma->vm_start, vma->vm_end - vma->vm_start);
@@ -425,16 +416,15 @@
 	vma_entry = drm_alloc(sizeof(*vma_entry), DRM_MEM_VMAS);
 	if (vma_entry) {
 		vma_entry->vma = vma;
-		vma_entry->next = dev->vmalist;
 		vma_entry->pid = current->pid;
-		dev->vmalist = vma_entry;
+		list_add(&vma_entry->head, &dev->vmalist);
 	}
 }
 
 static void drm_vm_open(struct vm_area_struct *vma)
 {
-	drm_file_t *priv = vma->vm_file->private_data;
-	drm_device_t *dev = priv->head->dev;
+	struct drm_file *priv = vma->vm_file->private_data;
+	struct drm_device *dev = priv->head->dev;
 
 	mutex_lock(&dev->struct_mutex);
 	drm_vm_open_locked(vma);
@@ -451,22 +441,18 @@
  */
 static void drm_vm_close(struct vm_area_struct *vma)
 {
-	drm_file_t *priv = vma->vm_file->private_data;
-	drm_device_t *dev = priv->head->dev;
-	drm_vma_entry_t *pt, *prev;
+	struct drm_file *priv = vma->vm_file->private_data;
+	struct drm_device *dev = priv->head->dev;
+	struct drm_vma_entry *pt, *temp;
 
 	DRM_DEBUG("0x%08lx,0x%08lx\n",
 		  vma->vm_start, vma->vm_end - vma->vm_start);
 	atomic_dec(&dev->vma_count);
 
 	mutex_lock(&dev->struct_mutex);
-	for (pt = dev->vmalist, prev = NULL; pt; prev = pt, pt = pt->next) {
+	list_for_each_entry_safe(pt, temp, &dev->vmalist, head) {
 		if (pt->vma == vma) {
-			if (prev) {
-				prev->next = pt->next;
-			} else {
-				dev->vmalist = pt->next;
-			}
+			list_del(&pt->head);
 			drm_free(pt, sizeof(*pt), DRM_MEM_VMAS);
 			break;
 		}
@@ -486,9 +472,9 @@
  */
 static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma)
 {
-	drm_file_t *priv = filp->private_data;
-	drm_device_t *dev;
-	drm_device_dma_t *dma;
+	struct drm_file *priv = filp->private_data;
+	struct drm_device *dev;
+	struct drm_device_dma *dma;
 	unsigned long length = vma->vm_end - vma->vm_start;
 
 	dev = priv->head->dev;
@@ -526,7 +512,7 @@
 	return 0;
 }
 
-unsigned long drm_core_get_map_ofs(drm_map_t * map)
+unsigned long drm_core_get_map_ofs(struct drm_map * map)
 {
 	return map->offset;
 }
@@ -559,11 +545,11 @@
  */
 static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
 {
-	drm_file_t *priv = filp->private_data;
-	drm_device_t *dev = priv->head->dev;
-	drm_map_t *map = NULL;
+	struct drm_file *priv = filp->private_data;
+	struct drm_device *dev = priv->head->dev;
+	struct drm_map *map = NULL;
 	unsigned long offset = 0;
-	drm_hash_item_t *hash;
+	struct drm_hash_item *hash;
 
 	DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n",
 		  vma->vm_start, vma->vm_end, vma->vm_pgoff);
@@ -588,7 +574,7 @@
 		return -EINVAL;
 	}
 
-	map = drm_hash_entry(hash, drm_map_list_t, hash)->map;
+	map = drm_hash_entry(hash, struct drm_map_list, hash)->map;
 	if (!map || ((map->flags & _DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN)))
 		return -EPERM;
 
@@ -677,8 +663,8 @@
 
 int drm_mmap(struct file *filp, struct vm_area_struct *vma)
 {
-	drm_file_t *priv = filp->private_data;
-	drm_device_t *dev = priv->head->dev;
+	struct drm_file *priv = filp->private_data;
+	struct drm_device *dev = priv->head->dev;
 	int ret;
 
 	mutex_lock(&dev->struct_mutex);
diff --git a/drivers/char/drm/i810_dma.c b/drivers/char/drm/i810_dma.c
index 603d17f..cb44999 100644
--- a/drivers/char/drm/i810_dma.c
+++ b/drivers/char/drm/i810_dma.c
@@ -45,16 +45,16 @@
 #define I810_BUF_UNMAPPED 0
 #define I810_BUF_MAPPED   1
 
-static drm_buf_t *i810_freelist_get(drm_device_t * dev)
+static struct drm_buf *i810_freelist_get(struct drm_device * dev)
 {
-	drm_device_dma_t *dma = dev->dma;
+	struct drm_device_dma *dma = dev->dma;
 	int i;
 	int used;
 
 	/* Linear search might not be the best solution */
 
 	for (i = 0; i < dma->buf_count; i++) {
-		drm_buf_t *buf = dma->buflist[i];
+		struct drm_buf *buf = dma->buflist[i];
 		drm_i810_buf_priv_t *buf_priv = buf->dev_private;
 		/* In use is already a pointer */
 		used = cmpxchg(buf_priv->in_use, I810_BUF_FREE,
@@ -70,7 +70,7 @@
  * yet, the hardware updates in use for us once its on the ring buffer.
  */
 
-static int i810_freelist_put(drm_device_t * dev, drm_buf_t * buf)
+static int i810_freelist_put(struct drm_device * dev, struct drm_buf * buf)
 {
 	drm_i810_buf_priv_t *buf_priv = buf->dev_private;
 	int used;
@@ -87,10 +87,10 @@
 
 static int i810_mmap_buffers(struct file *filp, struct vm_area_struct *vma)
 {
-	drm_file_t *priv = filp->private_data;
-	drm_device_t *dev;
+	struct drm_file *priv = filp->private_data;
+	struct drm_device *dev;
 	drm_i810_private_t *dev_priv;
-	drm_buf_t *buf;
+	struct drm_buf *buf;
 	drm_i810_buf_priv_t *buf_priv;
 
 	lock_kernel();
@@ -120,10 +120,10 @@
 	.fasync = drm_fasync,
 };
 
-static int i810_map_buffer(drm_buf_t * buf, struct file *filp)
+static int i810_map_buffer(struct drm_buf * buf, struct file *filp)
 {
-	drm_file_t *priv = filp->private_data;
-	drm_device_t *dev = priv->head->dev;
+	struct drm_file *priv = filp->private_data;
+	struct drm_device *dev = priv->head->dev;
 	drm_i810_buf_priv_t *buf_priv = buf->dev_private;
 	drm_i810_private_t *dev_priv = dev->dev_private;
 	const struct file_operations *old_fops;
@@ -152,7 +152,7 @@
 	return retcode;
 }
 
-static int i810_unmap_buffer(drm_buf_t * buf)
+static int i810_unmap_buffer(struct drm_buf * buf)
 {
 	drm_i810_buf_priv_t *buf_priv = buf->dev_private;
 	int retcode = 0;
@@ -172,10 +172,10 @@
 	return retcode;
 }
 
-static int i810_dma_get_buffer(drm_device_t * dev, drm_i810_dma_t * d,
+static int i810_dma_get_buffer(struct drm_device * dev, drm_i810_dma_t * d,
 			       struct file *filp)
 {
-	drm_buf_t *buf;
+	struct drm_buf *buf;
 	drm_i810_buf_priv_t *buf_priv;
 	int retcode = 0;
 
@@ -202,9 +202,9 @@
 	return retcode;
 }
 
-static int i810_dma_cleanup(drm_device_t * dev)
+static int i810_dma_cleanup(struct drm_device * dev)
 {
-	drm_device_dma_t *dma = dev->dma;
+	struct drm_device_dma *dma = dev->dma;
 
 	/* Make sure interrupts are disabled here because the uninstall ioctl
 	 * may not have been called from userspace and after dev_private
@@ -233,7 +233,7 @@
 		dev->dev_private = NULL;
 
 		for (i = 0; i < dma->buf_count; i++) {
-			drm_buf_t *buf = dma->buflist[i];
+			struct drm_buf *buf = dma->buflist[i];
 			drm_i810_buf_priv_t *buf_priv = buf->dev_private;
 
 			if (buf_priv->kernel_virtual && buf->total)
@@ -243,7 +243,7 @@
 	return 0;
 }
 
-static int i810_wait_ring(drm_device_t * dev, int n)
+static int i810_wait_ring(struct drm_device * dev, int n)
 {
 	drm_i810_private_t *dev_priv = dev->dev_private;
 	drm_i810_ring_buffer_t *ring = &(dev_priv->ring);
@@ -276,7 +276,7 @@
 	return iters;
 }
 
-static void i810_kernel_lost_context(drm_device_t * dev)
+static void i810_kernel_lost_context(struct drm_device * dev)
 {
 	drm_i810_private_t *dev_priv = dev->dev_private;
 	drm_i810_ring_buffer_t *ring = &(dev_priv->ring);
@@ -288,9 +288,9 @@
 		ring->space += ring->Size;
 }
 
-static int i810_freelist_init(drm_device_t * dev, drm_i810_private_t * dev_priv)
+static int i810_freelist_init(struct drm_device * dev, drm_i810_private_t * dev_priv)
 {
-	drm_device_dma_t *dma = dev->dma;
+	struct drm_device_dma *dma = dev->dma;
 	int my_idx = 24;
 	u32 *hw_status = (u32 *) (dev_priv->hw_status_page + my_idx);
 	int i;
@@ -301,7 +301,7 @@
 	}
 
 	for (i = 0; i < dma->buf_count; i++) {
-		drm_buf_t *buf = dma->buflist[i];
+		struct drm_buf *buf = dma->buflist[i];
 		drm_i810_buf_priv_t *buf_priv = buf->dev_private;
 
 		buf_priv->in_use = hw_status++;
@@ -323,16 +323,14 @@
 	return 0;
 }
 
-static int i810_dma_initialize(drm_device_t * dev,
+static int i810_dma_initialize(struct drm_device * dev,
 			       drm_i810_private_t * dev_priv,
 			       drm_i810_init_t * init)
 {
-	struct list_head *list;
-
+	struct drm_map_list *r_list;
 	memset(dev_priv, 0, sizeof(drm_i810_private_t));
 
-	list_for_each(list, &dev->maplist->head) {
-		drm_map_list_t *r_list = list_entry(list, drm_map_list_t, head);
+	list_for_each_entry(r_list, &dev->maplist, head) {
 		if (r_list->map &&
 		    r_list->map->type == _DRM_SHM &&
 		    r_list->map->flags & _DRM_CONTAINS_LOCK) {
@@ -478,8 +476,8 @@
 static int i810_dma_init(struct inode *inode, struct file *filp,
 			 unsigned int cmd, unsigned long arg)
 {
-	drm_file_t *priv = filp->private_data;
-	drm_device_t *dev = priv->head->dev;
+	struct drm_file *priv = filp->private_data;
+	struct drm_device *dev = priv->head->dev;
 	drm_i810_private_t *dev_priv;
 	drm_i810_init_t init;
 	int retcode = 0;
@@ -536,7 +534,7 @@
  * Use 'volatile' & local var tmp to force the emitted values to be
  * identical to the verified ones.
  */
-static void i810EmitContextVerified(drm_device_t * dev,
+static void i810EmitContextVerified(struct drm_device * dev,
 				    volatile unsigned int *code)
 {
 	drm_i810_private_t *dev_priv = dev->dev_private;
@@ -569,7 +567,7 @@
 	ADVANCE_LP_RING();
 }
 
-static void i810EmitTexVerified(drm_device_t * dev, volatile unsigned int *code)
+static void i810EmitTexVerified(struct drm_device * dev, volatile unsigned int *code)
 {
 	drm_i810_private_t *dev_priv = dev->dev_private;
 	int i, j = 0;
@@ -602,7 +600,7 @@
 
 /* Need to do some additional checking when setting the dest buffer.
  */
-static void i810EmitDestVerified(drm_device_t * dev,
+static void i810EmitDestVerified(struct drm_device * dev,
 				 volatile unsigned int *code)
 {
 	drm_i810_private_t *dev_priv = dev->dev_private;
@@ -637,7 +635,7 @@
 	ADVANCE_LP_RING();
 }
 
-static void i810EmitState(drm_device_t * dev)
+static void i810EmitState(struct drm_device * dev)
 {
 	drm_i810_private_t *dev_priv = dev->dev_private;
 	drm_i810_sarea_t *sarea_priv = dev_priv->sarea_priv;
@@ -668,14 +666,14 @@
 
 /* need to verify
  */
-static void i810_dma_dispatch_clear(drm_device_t * dev, int flags,
+static void i810_dma_dispatch_clear(struct drm_device * dev, int flags,
 				    unsigned int clear_color,
 				    unsigned int clear_zval)
 {
 	drm_i810_private_t *dev_priv = dev->dev_private;
 	drm_i810_sarea_t *sarea_priv = dev_priv->sarea_priv;
 	int nbox = sarea_priv->nbox;
-	drm_clip_rect_t *pbox = sarea_priv->boxes;
+	struct drm_clip_rect *pbox = sarea_priv->boxes;
 	int pitch = dev_priv->pitch;
 	int cpp = 2;
 	int i;
@@ -743,12 +741,12 @@
 	}
 }
 
-static void i810_dma_dispatch_swap(drm_device_t * dev)
+static void i810_dma_dispatch_swap(struct drm_device * dev)
 {
 	drm_i810_private_t *dev_priv = dev->dev_private;
 	drm_i810_sarea_t *sarea_priv = dev_priv->sarea_priv;
 	int nbox = sarea_priv->nbox;
-	drm_clip_rect_t *pbox = sarea_priv->boxes;
+	struct drm_clip_rect *pbox = sarea_priv->boxes;
 	int pitch = dev_priv->pitch;
 	int cpp = 2;
 	int i;
@@ -789,13 +787,13 @@
 	}
 }
 
-static void i810_dma_dispatch_vertex(drm_device_t * dev,
-				     drm_buf_t * buf, int discard, int used)
+static void i810_dma_dispatch_vertex(struct drm_device * dev,
+				     struct drm_buf * buf, int discard, int used)
 {
 	drm_i810_private_t *dev_priv = dev->dev_private;
 	drm_i810_buf_priv_t *buf_priv = buf->dev_private;
 	drm_i810_sarea_t *sarea_priv = dev_priv->sarea_priv;
-	drm_clip_rect_t *box = sarea_priv->boxes;
+	struct drm_clip_rect *box = sarea_priv->boxes;
 	int nbox = sarea_priv->nbox;
 	unsigned long address = (unsigned long)buf->bus_address;
 	unsigned long start = address - dev->agp->base;
@@ -869,7 +867,7 @@
 	}
 }
 
-static void i810_dma_dispatch_flip(drm_device_t * dev)
+static void i810_dma_dispatch_flip(struct drm_device * dev)
 {
 	drm_i810_private_t *dev_priv = dev->dev_private;
 	int pitch = dev_priv->pitch;
@@ -916,7 +914,7 @@
 
 }
 
-static void i810_dma_quiescent(drm_device_t * dev)
+static void i810_dma_quiescent(struct drm_device * dev)
 {
 	drm_i810_private_t *dev_priv = dev->dev_private;
 	RING_LOCALS;
@@ -935,10 +933,10 @@
 	i810_wait_ring(dev, dev_priv->ring.Size - 8);
 }
 
-static int i810_flush_queue(drm_device_t * dev)
+static int i810_flush_queue(struct drm_device * dev)
 {
 	drm_i810_private_t *dev_priv = dev->dev_private;
-	drm_device_dma_t *dma = dev->dma;
+	struct drm_device_dma *dma = dev->dma;
 	int i, ret = 0;
 	RING_LOCALS;
 
@@ -954,7 +952,7 @@
 	i810_wait_ring(dev, dev_priv->ring.Size - 8);
 
 	for (i = 0; i < dma->buf_count; i++) {
-		drm_buf_t *buf = dma->buflist[i];
+		struct drm_buf *buf = dma->buflist[i];
 		drm_i810_buf_priv_t *buf_priv = buf->dev_private;
 
 		int used = cmpxchg(buf_priv->in_use, I810_BUF_HARDWARE,
@@ -970,9 +968,9 @@
 }
 
 /* Must be called with the lock held */
-static void i810_reclaim_buffers(drm_device_t * dev, struct file *filp)
+static void i810_reclaim_buffers(struct drm_device * dev, struct file *filp)
 {
-	drm_device_dma_t *dma = dev->dma;
+	struct drm_device_dma *dma = dev->dma;
 	int i;
 
 	if (!dma)
@@ -985,7 +983,7 @@
 	i810_flush_queue(dev);
 
 	for (i = 0; i < dma->buf_count; i++) {
-		drm_buf_t *buf = dma->buflist[i];
+		struct drm_buf *buf = dma->buflist[i];
 		drm_i810_buf_priv_t *buf_priv = buf->dev_private;
 
 		if (buf->filp == filp && buf_priv) {
@@ -1003,8 +1001,8 @@
 static int i810_flush_ioctl(struct inode *inode, struct file *filp,
 			    unsigned int cmd, unsigned long arg)
 {
-	drm_file_t *priv = filp->private_data;
-	drm_device_t *dev = priv->head->dev;
+	struct drm_file *priv = filp->private_data;
+	struct drm_device *dev = priv->head->dev;
 
 	LOCK_TEST_WITH_RETURN(dev, filp);
 
@@ -1015,9 +1013,9 @@
 static int i810_dma_vertex(struct inode *inode, struct file *filp,
 			   unsigned int cmd, unsigned long arg)
 {
-	drm_file_t *priv = filp->private_data;
-	drm_device_t *dev = priv->head->dev;
-	drm_device_dma_t *dma = dev->dma;
+	struct drm_file *priv = filp->private_data;
+	struct drm_device *dev = priv->head->dev;
+	struct drm_device_dma *dma = dev->dma;
 	drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private;
 	u32 *hw_status = dev_priv->hw_status_page;
 	drm_i810_sarea_t *sarea_priv = (drm_i810_sarea_t *)
@@ -1051,8 +1049,8 @@
 static int i810_clear_bufs(struct inode *inode, struct file *filp,
 			   unsigned int cmd, unsigned long arg)
 {
-	drm_file_t *priv = filp->private_data;
-	drm_device_t *dev = priv->head->dev;
+	struct drm_file *priv = filp->private_data;
+	struct drm_device *dev = priv->head->dev;
 	drm_i810_clear_t clear;
 
 	if (copy_from_user
@@ -1074,8 +1072,8 @@
 static int i810_swap_bufs(struct inode *inode, struct file *filp,
 			  unsigned int cmd, unsigned long arg)
 {
-	drm_file_t *priv = filp->private_data;
-	drm_device_t *dev = priv->head->dev;
+	struct drm_file *priv = filp->private_data;
+	struct drm_device *dev = priv->head->dev;
 
 	DRM_DEBUG("i810_swap_bufs\n");
 
@@ -1088,8 +1086,8 @@
 static int i810_getage(struct inode *inode, struct file *filp, unsigned int cmd,
 		       unsigned long arg)
 {
-	drm_file_t *priv = filp->private_data;
-	drm_device_t *dev = priv->head->dev;
+	struct drm_file *priv = filp->private_data;
+	struct drm_device *dev = priv->head->dev;
 	drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private;
 	u32 *hw_status = dev_priv->hw_status_page;
 	drm_i810_sarea_t *sarea_priv = (drm_i810_sarea_t *)
@@ -1102,8 +1100,8 @@
 static int i810_getbuf(struct inode *inode, struct file *filp, unsigned int cmd,
 		       unsigned long arg)
 {
-	drm_file_t *priv = filp->private_data;
-	drm_device_t *dev = priv->head->dev;
+	struct drm_file *priv = filp->private_data;
+	struct drm_device *dev = priv->head->dev;
 	int retcode = 0;
 	drm_i810_dma_t d;
 	drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private;
@@ -1123,7 +1121,7 @@
 	DRM_DEBUG("i810_dma: %d returning %d, granted = %d\n",
 		  current->pid, retcode, d.granted);
 
-	if (copy_to_user((drm_dma_t __user *) arg, &d, sizeof(d)))
+	if (copy_to_user((void __user *) arg, &d, sizeof(d)))
 		return -EFAULT;
 	sarea_priv->last_dispatch = (int)hw_status[5];
 
@@ -1144,7 +1142,7 @@
 	return 0;
 }
 
-static void i810_dma_dispatch_mc(drm_device_t * dev, drm_buf_t * buf, int used,
+static void i810_dma_dispatch_mc(struct drm_device * dev, struct drm_buf * buf, int used,
 				 unsigned int last_render)
 {
 	drm_i810_private_t *dev_priv = dev->dev_private;
@@ -1207,9 +1205,9 @@
 static int i810_dma_mc(struct inode *inode, struct file *filp,
 		       unsigned int cmd, unsigned long arg)
 {
-	drm_file_t *priv = filp->private_data;
-	drm_device_t *dev = priv->head->dev;
-	drm_device_dma_t *dma = dev->dma;
+	struct drm_file *priv = filp->private_data;
+	struct drm_device *dev = priv->head->dev;
+	struct drm_device_dma *dma = dev->dma;
 	drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private;
 	u32 *hw_status = dev_priv->hw_status_page;
 	drm_i810_sarea_t *sarea_priv = (drm_i810_sarea_t *)
@@ -1238,8 +1236,8 @@
 static int i810_rstatus(struct inode *inode, struct file *filp,
 			unsigned int cmd, unsigned long arg)
 {
-	drm_file_t *priv = filp->private_data;
-	drm_device_t *dev = priv->head->dev;
+	struct drm_file *priv = filp->private_data;
+	struct drm_device *dev = priv->head->dev;
 	drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private;
 
 	return (int)(((u32 *) (dev_priv->hw_status_page))[4]);
@@ -1248,8 +1246,8 @@
 static int i810_ov0_info(struct inode *inode, struct file *filp,
 			 unsigned int cmd, unsigned long arg)
 {
-	drm_file_t *priv = filp->private_data;
-	drm_device_t *dev = priv->head->dev;
+	struct drm_file *priv = filp->private_data;
+	struct drm_device *dev = priv->head->dev;
 	drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private;
 	drm_i810_overlay_t data;
 
@@ -1264,8 +1262,8 @@
 static int i810_fstatus(struct inode *inode, struct file *filp,
 			unsigned int cmd, unsigned long arg)
 {
-	drm_file_t *priv = filp->private_data;
-	drm_device_t *dev = priv->head->dev;
+	struct drm_file *priv = filp->private_data;
+	struct drm_device *dev = priv->head->dev;
 	drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private;
 
 	LOCK_TEST_WITH_RETURN(dev, filp);
@@ -1276,8 +1274,8 @@
 static int i810_ov0_flip(struct inode *inode, struct file *filp,
 			 unsigned int cmd, unsigned long arg)
 {
-	drm_file_t *priv = filp->private_data;
-	drm_device_t *dev = priv->head->dev;
+	struct drm_file *priv = filp->private_data;
+	struct drm_device *dev = priv->head->dev;
 	drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private;
 
 	LOCK_TEST_WITH_RETURN(dev, filp);
@@ -1290,7 +1288,7 @@
 
 /* Not sure why this isn't set all the time:
  */
-static void i810_do_init_pageflip(drm_device_t * dev)
+static void i810_do_init_pageflip(struct drm_device * dev)
 {
 	drm_i810_private_t *dev_priv = dev->dev_private;
 
@@ -1300,7 +1298,7 @@
 	dev_priv->sarea_priv->pf_current_page = dev_priv->current_page;
 }
 
-static int i810_do_cleanup_pageflip(drm_device_t * dev)
+static int i810_do_cleanup_pageflip(struct drm_device * dev)
 {
 	drm_i810_private_t *dev_priv = dev->dev_private;
 
@@ -1315,8 +1313,8 @@
 static int i810_flip_bufs(struct inode *inode, struct file *filp,
 			  unsigned int cmd, unsigned long arg)
 {
-	drm_file_t *priv = filp->private_data;
-	drm_device_t *dev = priv->head->dev;
+	struct drm_file *priv = filp->private_data;
+	struct drm_device *dev = priv->head->dev;
 	drm_i810_private_t *dev_priv = dev->dev_private;
 
 	DRM_DEBUG("%s\n", __FUNCTION__);
@@ -1330,7 +1328,7 @@
 	return 0;
 }
 
-int i810_driver_load(drm_device_t *dev, unsigned long flags)
+int i810_driver_load(struct drm_device *dev, unsigned long flags)
 {
 	/* i810 has 4 more counters */
 	dev->counters += 4;
@@ -1342,12 +1340,12 @@
 	return 0;
 }
 
-void i810_driver_lastclose(drm_device_t * dev)
+void i810_driver_lastclose(struct drm_device * dev)
 {
 	i810_dma_cleanup(dev);
 }
 
-void i810_driver_preclose(drm_device_t * dev, DRMFILE filp)
+void i810_driver_preclose(struct drm_device * dev, DRMFILE filp)
 {
 	if (dev->dev_private) {
 		drm_i810_private_t *dev_priv = dev->dev_private;
@@ -1357,12 +1355,12 @@
 	}
 }
 
-void i810_driver_reclaim_buffers_locked(drm_device_t * dev, struct file *filp)
+void i810_driver_reclaim_buffers_locked(struct drm_device * dev, struct file *filp)
 {
 	i810_reclaim_buffers(dev, filp);
 }
 
-int i810_driver_dma_quiescent(drm_device_t * dev)
+int i810_driver_dma_quiescent(struct drm_device * dev)
 {
 	i810_dma_quiescent(dev);
 	return 0;
@@ -1399,7 +1397,7 @@
  * \returns
  * A value of 1 is always retured to indictate every i810 is AGP.
  */
-int i810_driver_device_is_agp(drm_device_t * dev)
+int i810_driver_device_is_agp(struct drm_device * dev)
 {
 	return 1;
 }
diff --git a/drivers/char/drm/i810_drm.h b/drivers/char/drm/i810_drm.h
index 2deb925..614977d 100644
--- a/drivers/char/drm/i810_drm.h
+++ b/drivers/char/drm/i810_drm.h
@@ -158,7 +158,7 @@
 	unsigned int dirty;
 
 	unsigned int nbox;
-	drm_clip_rect_t boxes[I810_NR_SAREA_CLIPRECTS];
+	struct drm_clip_rect boxes[I810_NR_SAREA_CLIPRECTS];
 
 	/* Maintain an LRU of contiguous regions of texture space.  If
 	 * you think you own a region of texture memory, and it has an
diff --git a/drivers/char/drm/i810_drv.h b/drivers/char/drm/i810_drv.h
index e6df49f4..6488338 100644
--- a/drivers/char/drm/i810_drv.h
+++ b/drivers/char/drm/i810_drv.h
@@ -77,8 +77,8 @@
 } drm_i810_ring_buffer_t;
 
 typedef struct drm_i810_private {
-	drm_map_t *sarea_map;
-	drm_map_t *mmio_map;
+	struct drm_map *sarea_map;
+	struct drm_map *mmio_map;
 
 	drm_i810_sarea_t *sarea_priv;
 	drm_i810_ring_buffer_t ring;
@@ -88,7 +88,7 @@
 
 	dma_addr_t dma_status_page;
 
-	drm_buf_t *mmap_buffer;
+	struct drm_buf *mmap_buffer;
 
 	u32 front_di1, back_di1, zi1;
 
@@ -115,15 +115,15 @@
 } drm_i810_private_t;
 
 				/* i810_dma.c */
-extern int i810_driver_dma_quiescent(drm_device_t * dev);
-extern void i810_driver_reclaim_buffers_locked(drm_device_t * dev,
+extern int i810_driver_dma_quiescent(struct drm_device * dev);
+extern void i810_driver_reclaim_buffers_locked(struct drm_device * dev,
 					       struct file *filp);
 extern int i810_driver_load(struct drm_device *, unsigned long flags);
-extern void i810_driver_lastclose(drm_device_t * dev);
-extern void i810_driver_preclose(drm_device_t * dev, DRMFILE filp);
-extern void i810_driver_reclaim_buffers_locked(drm_device_t * dev,
+extern void i810_driver_lastclose(struct drm_device * dev);
+extern void i810_driver_preclose(struct drm_device * dev, DRMFILE filp);
+extern void i810_driver_reclaim_buffers_locked(struct drm_device * dev,
 					       struct file *filp);
-extern int i810_driver_device_is_agp(drm_device_t * dev);
+extern int i810_driver_device_is_agp(struct drm_device * dev);
 
 extern drm_ioctl_desc_t i810_ioctls[];
 extern int i810_max_ioctl;
diff --git a/drivers/char/drm/i830_dma.c b/drivers/char/drm/i830_dma.c
index 3314a9f..dc20c1a 100644
--- a/drivers/char/drm/i830_dma.c
+++ b/drivers/char/drm/i830_dma.c
@@ -47,16 +47,16 @@
 #define I830_BUF_UNMAPPED 0
 #define I830_BUF_MAPPED   1
 
-static drm_buf_t *i830_freelist_get(drm_device_t * dev)
+static struct drm_buf *i830_freelist_get(struct drm_device * dev)
 {
-	drm_device_dma_t *dma = dev->dma;
+	struct drm_device_dma *dma = dev->dma;
 	int i;
 	int used;
 
 	/* Linear search might not be the best solution */
 
 	for (i = 0; i < dma->buf_count; i++) {
-		drm_buf_t *buf = dma->buflist[i];
+		struct drm_buf *buf = dma->buflist[i];
 		drm_i830_buf_priv_t *buf_priv = buf->dev_private;
 		/* In use is already a pointer */
 		used = cmpxchg(buf_priv->in_use, I830_BUF_FREE,
@@ -72,7 +72,7 @@
  * yet, the hardware updates in use for us once its on the ring buffer.
  */
 
-static int i830_freelist_put(drm_device_t * dev, drm_buf_t * buf)
+static int i830_freelist_put(struct drm_device * dev, struct drm_buf * buf)
 {
 	drm_i830_buf_priv_t *buf_priv = buf->dev_private;
 	int used;
@@ -89,10 +89,10 @@
 
 static int i830_mmap_buffers(struct file *filp, struct vm_area_struct *vma)
 {
-	drm_file_t *priv = filp->private_data;
-	drm_device_t *dev;
+	struct drm_file *priv = filp->private_data;
+	struct drm_device *dev;
 	drm_i830_private_t *dev_priv;
-	drm_buf_t *buf;
+	struct drm_buf *buf;
 	drm_i830_buf_priv_t *buf_priv;
 
 	lock_kernel();
@@ -122,10 +122,10 @@
 	.fasync = drm_fasync,
 };
 
-static int i830_map_buffer(drm_buf_t * buf, struct file *filp)
+static int i830_map_buffer(struct drm_buf * buf, struct file *filp)
 {
-	drm_file_t *priv = filp->private_data;
-	drm_device_t *dev = priv->head->dev;
+	struct drm_file *priv = filp->private_data;
+	struct drm_device *dev = priv->head->dev;
 	drm_i830_buf_priv_t *buf_priv = buf->dev_private;
 	drm_i830_private_t *dev_priv = dev->dev_private;
 	const struct file_operations *old_fops;
@@ -156,7 +156,7 @@
 	return retcode;
 }
 
-static int i830_unmap_buffer(drm_buf_t * buf)
+static int i830_unmap_buffer(struct drm_buf * buf)
 {
 	drm_i830_buf_priv_t *buf_priv = buf->dev_private;
 	int retcode = 0;
@@ -176,10 +176,10 @@
 	return retcode;
 }
 
-static int i830_dma_get_buffer(drm_device_t * dev, drm_i830_dma_t * d,
+static int i830_dma_get_buffer(struct drm_device * dev, drm_i830_dma_t * d,
 			       struct file *filp)
 {
-	drm_buf_t *buf;
+	struct drm_buf *buf;
 	drm_i830_buf_priv_t *buf_priv;
 	int retcode = 0;
 
@@ -206,9 +206,9 @@
 	return retcode;
 }
 
-static int i830_dma_cleanup(drm_device_t * dev)
+static int i830_dma_cleanup(struct drm_device * dev)
 {
-	drm_device_dma_t *dma = dev->dma;
+	struct drm_device_dma *dma = dev->dma;
 
 	/* Make sure interrupts are disabled here because the uninstall ioctl
 	 * may not have been called from userspace and after dev_private
@@ -238,7 +238,7 @@
 		dev->dev_private = NULL;
 
 		for (i = 0; i < dma->buf_count; i++) {
-			drm_buf_t *buf = dma->buflist[i];
+			struct drm_buf *buf = dma->buflist[i];
 			drm_i830_buf_priv_t *buf_priv = buf->dev_private;
 			if (buf_priv->kernel_virtual && buf->total)
 				drm_core_ioremapfree(&buf_priv->map, dev);
@@ -247,7 +247,7 @@
 	return 0;
 }
 
-int i830_wait_ring(drm_device_t * dev, int n, const char *caller)
+int i830_wait_ring(struct drm_device * dev, int n, const char *caller)
 {
 	drm_i830_private_t *dev_priv = dev->dev_private;
 	drm_i830_ring_buffer_t *ring = &(dev_priv->ring);
@@ -281,7 +281,7 @@
 	return iters;
 }
 
-static void i830_kernel_lost_context(drm_device_t * dev)
+static void i830_kernel_lost_context(struct drm_device * dev)
 {
 	drm_i830_private_t *dev_priv = dev->dev_private;
 	drm_i830_ring_buffer_t *ring = &(dev_priv->ring);
@@ -296,9 +296,9 @@
 		dev_priv->sarea_priv->perf_boxes |= I830_BOX_RING_EMPTY;
 }
 
-static int i830_freelist_init(drm_device_t * dev, drm_i830_private_t * dev_priv)
+static int i830_freelist_init(struct drm_device * dev, drm_i830_private_t * dev_priv)
 {
-	drm_device_dma_t *dma = dev->dma;
+	struct drm_device_dma *dma = dev->dma;
 	int my_idx = 36;
 	u32 *hw_status = (u32 *) (dev_priv->hw_status_page + my_idx);
 	int i;
@@ -309,7 +309,7 @@
 	}
 
 	for (i = 0; i < dma->buf_count; i++) {
-		drm_buf_t *buf = dma->buflist[i];
+		struct drm_buf *buf = dma->buflist[i];
 		drm_i830_buf_priv_t *buf_priv = buf->dev_private;
 
 		buf_priv->in_use = hw_status++;
@@ -330,16 +330,15 @@
 	return 0;
 }
 
-static int i830_dma_initialize(drm_device_t * dev,
+static int i830_dma_initialize(struct drm_device * dev,
 			       drm_i830_private_t * dev_priv,
 			       drm_i830_init_t * init)
 {
-	struct list_head *list;
+	struct drm_map_list *r_list;
 
 	memset(dev_priv, 0, sizeof(drm_i830_private_t));
 
-	list_for_each(list, &dev->maplist->head) {
-		drm_map_list_t *r_list = list_entry(list, drm_map_list_t, head);
+	list_for_each_entry(r_list, &dev->maplist, head) {
 		if (r_list->map &&
 		    r_list->map->type == _DRM_SHM &&
 		    r_list->map->flags & _DRM_CONTAINS_LOCK) {
@@ -455,8 +454,8 @@
 static int i830_dma_init(struct inode *inode, struct file *filp,
 			 unsigned int cmd, unsigned long arg)
 {
-	drm_file_t *priv = filp->private_data;
-	drm_device_t *dev = priv->head->dev;
+	struct drm_file *priv = filp->private_data;
+	struct drm_device *dev = priv->head->dev;
 	drm_i830_private_t *dev_priv;
 	drm_i830_init_t init;
 	int retcode = 0;
@@ -490,7 +489,7 @@
 /* Most efficient way to verify state for the i830 is as it is
  * emitted.  Non-conformant state is silently dropped.
  */
-static void i830EmitContextVerified(drm_device_t * dev, unsigned int *code)
+static void i830EmitContextVerified(struct drm_device * dev, unsigned int *code)
 {
 	drm_i830_private_t *dev_priv = dev->dev_private;
 	int i, j = 0;
@@ -535,7 +534,7 @@
 	ADVANCE_LP_RING();
 }
 
-static void i830EmitTexVerified(drm_device_t * dev, unsigned int *code)
+static void i830EmitTexVerified(struct drm_device * dev, unsigned int *code)
 {
 	drm_i830_private_t *dev_priv = dev->dev_private;
 	int i, j = 0;
@@ -569,7 +568,7 @@
 		printk("rejected packet %x\n", code[0]);
 }
 
-static void i830EmitTexBlendVerified(drm_device_t * dev,
+static void i830EmitTexBlendVerified(struct drm_device * dev,
 				     unsigned int *code, unsigned int num)
 {
 	drm_i830_private_t *dev_priv = dev->dev_private;
@@ -594,7 +593,7 @@
 	ADVANCE_LP_RING();
 }
 
-static void i830EmitTexPalette(drm_device_t * dev,
+static void i830EmitTexPalette(struct drm_device * dev,
 			       unsigned int *palette, int number, int is_shared)
 {
 	drm_i830_private_t *dev_priv = dev->dev_private;
@@ -621,7 +620,7 @@
 
 /* Need to do some additional checking when setting the dest buffer.
  */
-static void i830EmitDestVerified(drm_device_t * dev, unsigned int *code)
+static void i830EmitDestVerified(struct drm_device * dev, unsigned int *code)
 {
 	drm_i830_private_t *dev_priv = dev->dev_private;
 	unsigned int tmp;
@@ -682,7 +681,7 @@
 	ADVANCE_LP_RING();
 }
 
-static void i830EmitStippleVerified(drm_device_t * dev, unsigned int *code)
+static void i830EmitStippleVerified(struct drm_device * dev, unsigned int *code)
 {
 	drm_i830_private_t *dev_priv = dev->dev_private;
 	RING_LOCALS;
@@ -693,7 +692,7 @@
 	ADVANCE_LP_RING();
 }
 
-static void i830EmitState(drm_device_t * dev)
+static void i830EmitState(struct drm_device * dev)
 {
 	drm_i830_private_t *dev_priv = dev->dev_private;
 	drm_i830_sarea_t *sarea_priv = dev_priv->sarea_priv;
@@ -796,7 +795,7 @@
  * Performance monitoring functions
  */
 
-static void i830_fill_box(drm_device_t * dev,
+static void i830_fill_box(struct drm_device * dev,
 			  int x, int y, int w, int h, int r, int g, int b)
 {
 	drm_i830_private_t *dev_priv = dev->dev_private;
@@ -834,7 +833,7 @@
 	ADVANCE_LP_RING();
 }
 
-static void i830_cp_performance_boxes(drm_device_t * dev)
+static void i830_cp_performance_boxes(struct drm_device * dev)
 {
 	drm_i830_private_t *dev_priv = dev->dev_private;
 
@@ -879,7 +878,7 @@
 	dev_priv->sarea_priv->perf_boxes = 0;
 }
 
-static void i830_dma_dispatch_clear(drm_device_t * dev, int flags,
+static void i830_dma_dispatch_clear(struct drm_device * dev, int flags,
 				    unsigned int clear_color,
 				    unsigned int clear_zval,
 				    unsigned int clear_depthmask)
@@ -887,7 +886,7 @@
 	drm_i830_private_t *dev_priv = dev->dev_private;
 	drm_i830_sarea_t *sarea_priv = dev_priv->sarea_priv;
 	int nbox = sarea_priv->nbox;
-	drm_clip_rect_t *pbox = sarea_priv->boxes;
+	struct drm_clip_rect *pbox = sarea_priv->boxes;
 	int pitch = dev_priv->pitch;
 	int cpp = dev_priv->cpp;
 	int i;
@@ -974,12 +973,12 @@
 	}
 }
 
-static void i830_dma_dispatch_swap(drm_device_t * dev)
+static void i830_dma_dispatch_swap(struct drm_device * dev)
 {
 	drm_i830_private_t *dev_priv = dev->dev_private;
 	drm_i830_sarea_t *sarea_priv = dev_priv->sarea_priv;
 	int nbox = sarea_priv->nbox;
-	drm_clip_rect_t *pbox = sarea_priv->boxes;
+	struct drm_clip_rect *pbox = sarea_priv->boxes;
 	int pitch = dev_priv->pitch;
 	int cpp = dev_priv->cpp;
 	int i;
@@ -1044,7 +1043,7 @@
 	}
 }
 
-static void i830_dma_dispatch_flip(drm_device_t * dev)
+static void i830_dma_dispatch_flip(struct drm_device * dev)
 {
 	drm_i830_private_t *dev_priv = dev->dev_private;
 	RING_LOCALS;
@@ -1087,13 +1086,13 @@
 	dev_priv->sarea_priv->pf_current_page = dev_priv->current_page;
 }
 
-static void i830_dma_dispatch_vertex(drm_device_t * dev,
-				     drm_buf_t * buf, int discard, int used)
+static void i830_dma_dispatch_vertex(struct drm_device * dev,
+				     struct drm_buf * buf, int discard, int used)
 {
 	drm_i830_private_t *dev_priv = dev->dev_private;
 	drm_i830_buf_priv_t *buf_priv = buf->dev_private;
 	drm_i830_sarea_t *sarea_priv = dev_priv->sarea_priv;
-	drm_clip_rect_t *box = sarea_priv->boxes;
+	struct drm_clip_rect *box = sarea_priv->boxes;
 	int nbox = sarea_priv->nbox;
 	unsigned long address = (unsigned long)buf->bus_address;
 	unsigned long start = address - dev->agp->base;
@@ -1199,7 +1198,7 @@
 	}
 }
 
-static void i830_dma_quiescent(drm_device_t * dev)
+static void i830_dma_quiescent(struct drm_device * dev)
 {
 	drm_i830_private_t *dev_priv = dev->dev_private;
 	RING_LOCALS;
@@ -1216,10 +1215,10 @@
 	i830_wait_ring(dev, dev_priv->ring.Size - 8, __FUNCTION__);
 }
 
-static int i830_flush_queue(drm_device_t * dev)
+static int i830_flush_queue(struct drm_device * dev)
 {
 	drm_i830_private_t *dev_priv = dev->dev_private;
-	drm_device_dma_t *dma = dev->dma;
+	struct drm_device_dma *dma = dev->dma;
 	int i, ret = 0;
 	RING_LOCALS;
 
@@ -1233,7 +1232,7 @@
 	i830_wait_ring(dev, dev_priv->ring.Size - 8, __FUNCTION__);
 
 	for (i = 0; i < dma->buf_count; i++) {
-		drm_buf_t *buf = dma->buflist[i];
+		struct drm_buf *buf = dma->buflist[i];
 		drm_i830_buf_priv_t *buf_priv = buf->dev_private;
 
 		int used = cmpxchg(buf_priv->in_use, I830_BUF_HARDWARE,
@@ -1249,9 +1248,9 @@
 }
 
 /* Must be called with the lock held */
-static void i830_reclaim_buffers(drm_device_t * dev, struct file *filp)
+static void i830_reclaim_buffers(struct drm_device * dev, struct file *filp)
 {
-	drm_device_dma_t *dma = dev->dma;
+	struct drm_device_dma *dma = dev->dma;
 	int i;
 
 	if (!dma)
@@ -1264,7 +1263,7 @@
 	i830_flush_queue(dev);
 
 	for (i = 0; i < dma->buf_count; i++) {
-		drm_buf_t *buf = dma->buflist[i];
+		struct drm_buf *buf = dma->buflist[i];
 		drm_i830_buf_priv_t *buf_priv = buf->dev_private;
 
 		if (buf->filp == filp && buf_priv) {
@@ -1282,8 +1281,8 @@
 static int i830_flush_ioctl(struct inode *inode, struct file *filp,
 			    unsigned int cmd, unsigned long arg)
 {
-	drm_file_t *priv = filp->private_data;
-	drm_device_t *dev = priv->head->dev;
+	struct drm_file *priv = filp->private_data;
+	struct drm_device *dev = priv->head->dev;
 
 	LOCK_TEST_WITH_RETURN(dev, filp);
 
@@ -1294,9 +1293,9 @@
 static int i830_dma_vertex(struct inode *inode, struct file *filp,
 			   unsigned int cmd, unsigned long arg)
 {
-	drm_file_t *priv = filp->private_data;
-	drm_device_t *dev = priv->head->dev;
-	drm_device_dma_t *dma = dev->dma;
+	struct drm_file *priv = filp->private_data;
+	struct drm_device *dev = priv->head->dev;
+	struct drm_device_dma *dma = dev->dma;
 	drm_i830_private_t *dev_priv = (drm_i830_private_t *) dev->dev_private;
 	u32 *hw_status = dev_priv->hw_status_page;
 	drm_i830_sarea_t *sarea_priv = (drm_i830_sarea_t *)
@@ -1328,8 +1327,8 @@
 static int i830_clear_bufs(struct inode *inode, struct file *filp,
 			   unsigned int cmd, unsigned long arg)
 {
-	drm_file_t *priv = filp->private_data;
-	drm_device_t *dev = priv->head->dev;
+	struct drm_file *priv = filp->private_data;
+	struct drm_device *dev = priv->head->dev;
 	drm_i830_clear_t clear;
 
 	if (copy_from_user
@@ -1352,8 +1351,8 @@
 static int i830_swap_bufs(struct inode *inode, struct file *filp,
 			  unsigned int cmd, unsigned long arg)
 {
-	drm_file_t *priv = filp->private_data;
-	drm_device_t *dev = priv->head->dev;
+	struct drm_file *priv = filp->private_data;
+	struct drm_device *dev = priv->head->dev;
 
 	DRM_DEBUG("i830_swap_bufs\n");
 
@@ -1365,7 +1364,7 @@
 
 /* Not sure why this isn't set all the time:
  */
-static void i830_do_init_pageflip(drm_device_t * dev)
+static void i830_do_init_pageflip(struct drm_device * dev)
 {
 	drm_i830_private_t *dev_priv = dev->dev_private;
 
@@ -1375,7 +1374,7 @@
 	dev_priv->sarea_priv->pf_current_page = dev_priv->current_page;
 }
 
-static int i830_do_cleanup_pageflip(drm_device_t * dev)
+static int i830_do_cleanup_pageflip(struct drm_device * dev)
 {
 	drm_i830_private_t *dev_priv = dev->dev_private;
 
@@ -1390,8 +1389,8 @@
 static int i830_flip_bufs(struct inode *inode, struct file *filp,
 			  unsigned int cmd, unsigned long arg)
 {
-	drm_file_t *priv = filp->private_data;
-	drm_device_t *dev = priv->head->dev;
+	struct drm_file *priv = filp->private_data;
+	struct drm_device *dev = priv->head->dev;
 	drm_i830_private_t *dev_priv = dev->dev_private;
 
 	DRM_DEBUG("%s\n", __FUNCTION__);
@@ -1408,8 +1407,8 @@
 static int i830_getage(struct inode *inode, struct file *filp, unsigned int cmd,
 		       unsigned long arg)
 {
-	drm_file_t *priv = filp->private_data;
-	drm_device_t *dev = priv->head->dev;
+	struct drm_file *priv = filp->private_data;
+	struct drm_device *dev = priv->head->dev;
 	drm_i830_private_t *dev_priv = (drm_i830_private_t *) dev->dev_private;
 	u32 *hw_status = dev_priv->hw_status_page;
 	drm_i830_sarea_t *sarea_priv = (drm_i830_sarea_t *)
@@ -1422,8 +1421,8 @@
 static int i830_getbuf(struct inode *inode, struct file *filp, unsigned int cmd,
 		       unsigned long arg)
 {
-	drm_file_t *priv = filp->private_data;
-	drm_device_t *dev = priv->head->dev;
+	struct drm_file *priv = filp->private_data;
+	struct drm_device *dev = priv->head->dev;
 	int retcode = 0;
 	drm_i830_dma_t d;
 	drm_i830_private_t *dev_priv = (drm_i830_private_t *) dev->dev_private;
@@ -1444,7 +1443,7 @@
 	DRM_DEBUG("i830_dma: %d returning %d, granted = %d\n",
 		  current->pid, retcode, d.granted);
 
-	if (copy_to_user((drm_dma_t __user *) arg, &d, sizeof(d)))
+	if (copy_to_user((void __user *) arg, &d, sizeof(d)))
 		return -EFAULT;
 	sarea_priv->last_dispatch = (int)hw_status[5];
 
@@ -1467,8 +1466,8 @@
 static int i830_getparam(struct inode *inode, struct file *filp,
 			 unsigned int cmd, unsigned long arg)
 {
-	drm_file_t *priv = filp->private_data;
-	drm_device_t *dev = priv->head->dev;
+	struct drm_file *priv = filp->private_data;
+	struct drm_device *dev = priv->head->dev;
 	drm_i830_private_t *dev_priv = dev->dev_private;
 	drm_i830_getparam_t param;
 	int value;
@@ -1501,8 +1500,8 @@
 static int i830_setparam(struct inode *inode, struct file *filp,
 			 unsigned int cmd, unsigned long arg)
 {
-	drm_file_t *priv = filp->private_data;
-	drm_device_t *dev = priv->head->dev;
+	struct drm_file *priv = filp->private_data;
+	struct drm_device *dev = priv->head->dev;
 	drm_i830_private_t *dev_priv = dev->dev_private;
 	drm_i830_setparam_t param;
 
@@ -1526,7 +1525,7 @@
 	return 0;
 }
 
-int i830_driver_load(drm_device_t *dev, unsigned long flags)
+int i830_driver_load(struct drm_device *dev, unsigned long flags)
 {
 	/* i830 has 4 more counters */
 	dev->counters += 4;
@@ -1538,12 +1537,12 @@
 	return 0;
 }
 
-void i830_driver_lastclose(drm_device_t * dev)
+void i830_driver_lastclose(struct drm_device * dev)
 {
 	i830_dma_cleanup(dev);
 }
 
-void i830_driver_preclose(drm_device_t * dev, DRMFILE filp)
+void i830_driver_preclose(struct drm_device * dev, DRMFILE filp)
 {
 	if (dev->dev_private) {
 		drm_i830_private_t *dev_priv = dev->dev_private;
@@ -1553,12 +1552,12 @@
 	}
 }
 
-void i830_driver_reclaim_buffers_locked(drm_device_t * dev, struct file *filp)
+void i830_driver_reclaim_buffers_locked(struct drm_device * dev, struct file *filp)
 {
 	i830_reclaim_buffers(dev, filp);
 }
 
-int i830_driver_dma_quiescent(drm_device_t * dev)
+int i830_driver_dma_quiescent(struct drm_device * dev)
 {
 	i830_dma_quiescent(dev);
 	return 0;
@@ -1594,7 +1593,7 @@
  * \returns
  * A value of 1 is always retured to indictate every i8xx is AGP.
  */
-int i830_driver_device_is_agp(drm_device_t * dev)
+int i830_driver_device_is_agp(struct drm_device * dev)
 {
 	return 1;
 }
diff --git a/drivers/char/drm/i830_drm.h b/drivers/char/drm/i830_drm.h
index 66dd750..968a6d9 100644
--- a/drivers/char/drm/i830_drm.h
+++ b/drivers/char/drm/i830_drm.h
@@ -191,7 +191,7 @@
 	unsigned int dirty;
 
 	unsigned int nbox;
-	drm_clip_rect_t boxes[I830_NR_SAREA_CLIPRECTS];
+	struct drm_clip_rect boxes[I830_NR_SAREA_CLIPRECTS];
 
 	/* Maintain an LRU of contiguous regions of texture space.  If
 	 * you think you own a region of texture memory, and it has an
diff --git a/drivers/char/drm/i830_drv.h b/drivers/char/drm/i830_drv.h
index e91f94a..ddda679 100644
--- a/drivers/char/drm/i830_drv.h
+++ b/drivers/char/drm/i830_drv.h
@@ -84,8 +84,8 @@
 } drm_i830_ring_buffer_t;
 
 typedef struct drm_i830_private {
-	drm_map_t *sarea_map;
-	drm_map_t *mmio_map;
+	struct drm_map *sarea_map;
+	struct drm_map *mmio_map;
 
 	drm_i830_sarea_t *sarea_priv;
 	drm_i830_ring_buffer_t ring;
@@ -95,7 +95,7 @@
 
 	dma_addr_t dma_status_page;
 
-	drm_buf_t *mmap_buffer;
+	struct drm_buf *mmap_buffer;
 
 	u32 front_di1, back_di1, zi1;
 
@@ -132,16 +132,16 @@
 			 unsigned int cmd, unsigned long arg);
 
 extern irqreturn_t i830_driver_irq_handler(DRM_IRQ_ARGS);
-extern void i830_driver_irq_preinstall(drm_device_t * dev);
-extern void i830_driver_irq_postinstall(drm_device_t * dev);
-extern void i830_driver_irq_uninstall(drm_device_t * dev);
+extern void i830_driver_irq_preinstall(struct drm_device * dev);
+extern void i830_driver_irq_postinstall(struct drm_device * dev);
+extern void i830_driver_irq_uninstall(struct drm_device * dev);
 extern int i830_driver_load(struct drm_device *, unsigned long flags);
-extern void i830_driver_preclose(drm_device_t * dev, DRMFILE filp);
-extern void i830_driver_lastclose(drm_device_t * dev);
-extern void i830_driver_reclaim_buffers_locked(drm_device_t * dev,
+extern void i830_driver_preclose(struct drm_device * dev, DRMFILE filp);
+extern void i830_driver_lastclose(struct drm_device * dev);
+extern void i830_driver_reclaim_buffers_locked(struct drm_device * dev,
 					       struct file *filp);
-extern int i830_driver_dma_quiescent(drm_device_t * dev);
-extern int i830_driver_device_is_agp(drm_device_t * dev);
+extern int i830_driver_dma_quiescent(struct drm_device * dev);
+extern int i830_driver_device_is_agp(struct drm_device * dev);
 
 #define I830_READ(reg)          DRM_READ32(dev_priv->mmio_map, reg)
 #define I830_WRITE(reg,val)     DRM_WRITE32(dev_priv->mmio_map, reg, val)
@@ -180,7 +180,7 @@
 	I830_WRITE(LP_RING + RING_TAIL, outring);			\
 } while(0)
 
-extern int i830_wait_ring(drm_device_t * dev, int n, const char *caller);
+extern int i830_wait_ring(struct drm_device * dev, int n, const char *caller);
 
 #define GFX_OP_USER_INTERRUPT 		((0<<29)|(2<<23))
 #define GFX_OP_BREAKPOINT_INTERRUPT	((0<<29)|(1<<23))
diff --git a/drivers/char/drm/i830_irq.c b/drivers/char/drm/i830_irq.c
index 5841f76..a1b5c63 100644
--- a/drivers/char/drm/i830_irq.c
+++ b/drivers/char/drm/i830_irq.c
@@ -35,7 +35,7 @@
 
 irqreturn_t i830_driver_irq_handler(DRM_IRQ_ARGS)
 {
-	drm_device_t *dev = (drm_device_t *) arg;
+	struct drm_device *dev = (struct drm_device *) arg;
 	drm_i830_private_t *dev_priv = (drm_i830_private_t *) dev->dev_private;
 	u16 temp;
 
@@ -53,7 +53,7 @@
 	return IRQ_HANDLED;
 }
 
-static int i830_emit_irq(drm_device_t * dev)
+static int i830_emit_irq(struct drm_device * dev)
 {
 	drm_i830_private_t *dev_priv = dev->dev_private;
 	RING_LOCALS;
@@ -70,7 +70,7 @@
 	return atomic_read(&dev_priv->irq_emitted);
 }
 
-static int i830_wait_irq(drm_device_t * dev, int irq_nr)
+static int i830_wait_irq(struct drm_device * dev, int irq_nr)
 {
 	drm_i830_private_t *dev_priv = (drm_i830_private_t *) dev->dev_private;
 	DECLARE_WAITQUEUE(entry, current);
@@ -117,8 +117,8 @@
 int i830_irq_emit(struct inode *inode, struct file *filp, unsigned int cmd,
 		  unsigned long arg)
 {
-	drm_file_t *priv = filp->private_data;
-	drm_device_t *dev = priv->head->dev;
+	struct drm_file *priv = filp->private_data;
+	struct drm_device *dev = priv->head->dev;
 	drm_i830_private_t *dev_priv = dev->dev_private;
 	drm_i830_irq_emit_t emit;
 	int result;
@@ -149,8 +149,8 @@
 int i830_irq_wait(struct inode *inode, struct file *filp, unsigned int cmd,
 		  unsigned long arg)
 {
-	drm_file_t *priv = filp->private_data;
-	drm_device_t *dev = priv->head->dev;
+	struct drm_file *priv = filp->private_data;
+	struct drm_device *dev = priv->head->dev;
 	drm_i830_private_t *dev_priv = dev->dev_private;
 	drm_i830_irq_wait_t irqwait;
 
@@ -168,7 +168,7 @@
 
 /* drm_dma.h hooks
 */
-void i830_driver_irq_preinstall(drm_device_t * dev)
+void i830_driver_irq_preinstall(struct drm_device * dev)
 {
 	drm_i830_private_t *dev_priv = (drm_i830_private_t *) dev->dev_private;
 
@@ -180,14 +180,14 @@
 	init_waitqueue_head(&dev_priv->irq_queue);
 }
 
-void i830_driver_irq_postinstall(drm_device_t * dev)
+void i830_driver_irq_postinstall(struct drm_device * dev)
 {
 	drm_i830_private_t *dev_priv = (drm_i830_private_t *) dev->dev_private;
 
 	I830_WRITE16(I830REG_INT_ENABLE_R, 0x2);
 }
 
-void i830_driver_irq_uninstall(drm_device_t * dev)
+void i830_driver_irq_uninstall(struct drm_device * dev)
 {
 	drm_i830_private_t *dev_priv = (drm_i830_private_t *) dev->dev_private;
 	if (!dev_priv)
diff --git a/drivers/char/drm/i915_dma.c b/drivers/char/drm/i915_dma.c
index ea52740a..3359cc2 100644
--- a/drivers/char/drm/i915_dma.c
+++ b/drivers/char/drm/i915_dma.c
@@ -47,7 +47,7 @@
  * the head pointer changes, so that EBUSY only happens if the ring
  * actually stalls for (eg) 3 seconds.
  */
-int i915_wait_ring(drm_device_t * dev, int n, const char *caller)
+int i915_wait_ring(struct drm_device * dev, int n, const char *caller)
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	drm_i915_ring_buffer_t *ring = &(dev_priv->ring);
@@ -73,7 +73,7 @@
 	return DRM_ERR(EBUSY);
 }
 
-void i915_kernel_lost_context(drm_device_t * dev)
+void i915_kernel_lost_context(struct drm_device * dev)
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	drm_i915_ring_buffer_t *ring = &(dev_priv->ring);
@@ -88,7 +88,7 @@
 		dev_priv->sarea_priv->perf_boxes |= I915_BOX_RING_EMPTY;
 }
 
-static int i915_dma_cleanup(drm_device_t * dev)
+static int i915_dma_cleanup(struct drm_device * dev)
 {
 	/* Make sure interrupts are disabled here because the uninstall ioctl
 	 * may not have been called from userspace and after dev_private
@@ -126,13 +126,13 @@
 	return 0;
 }
 
-static int i915_initialize(drm_device_t * dev,
+static int i915_initialize(struct drm_device * dev,
 			   drm_i915_private_t * dev_priv,
 			   drm_i915_init_t * init)
 {
 	memset(dev_priv, 0, sizeof(drm_i915_private_t));
 
-	DRM_GETSAREA();
+	dev_priv->sarea = drm_getsarea(dev);
 	if (!dev_priv->sarea) {
 		DRM_ERROR("can not find sarea!\n");
 		dev->dev_private = (void *)dev_priv;
@@ -211,7 +211,7 @@
 	return 0;
 }
 
-static int i915_dma_resume(drm_device_t * dev)
+static int i915_dma_resume(struct drm_device * dev)
 {
 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
 
@@ -357,7 +357,7 @@
 	return ret;
 }
 
-static int i915_emit_cmds(drm_device_t * dev, int __user * buffer, int dwords)
+static int i915_emit_cmds(struct drm_device * dev, int __user * buffer, int dwords)
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	int i;
@@ -396,12 +396,12 @@
 	return 0;
 }
 
-static int i915_emit_box(drm_device_t * dev,
-			 drm_clip_rect_t __user * boxes,
+static int i915_emit_box(struct drm_device * dev,
+			 struct drm_clip_rect __user * boxes,
 			 int i, int DR1, int DR4)
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
-	drm_clip_rect_t box;
+	struct drm_clip_rect box;
 	RING_LOCALS;
 
 	if (DRM_COPY_FROM_USER_UNCHECKED(&box, &boxes[i], sizeof(box))) {
@@ -439,7 +439,7 @@
  * emit. For now, do it in both places:
  */
 
-static void i915_emit_breadcrumb(drm_device_t *dev)
+static void i915_emit_breadcrumb(struct drm_device *dev)
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	RING_LOCALS;
@@ -457,7 +457,7 @@
 	ADVANCE_LP_RING();
 }
 
-static int i915_dispatch_cmdbuffer(drm_device_t * dev,
+static int i915_dispatch_cmdbuffer(struct drm_device * dev,
 				   drm_i915_cmdbuffer_t * cmd)
 {
 	int nbox = cmd->num_cliprects;
@@ -489,11 +489,11 @@
 	return 0;
 }
 
-static int i915_dispatch_batchbuffer(drm_device_t * dev,
+static int i915_dispatch_batchbuffer(struct drm_device * dev,
 				     drm_i915_batchbuffer_t * batch)
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
-	drm_clip_rect_t __user *boxes = batch->cliprects;
+	struct drm_clip_rect __user *boxes = batch->cliprects;
 	int nbox = batch->num_cliprects;
 	int i = 0, count;
 	RING_LOCALS;
@@ -535,7 +535,7 @@
 	return 0;
 }
 
-static int i915_dispatch_flip(drm_device_t * dev)
+static int i915_dispatch_flip(struct drm_device * dev)
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	RING_LOCALS;
@@ -583,7 +583,7 @@
 	return 0;
 }
 
-static int i915_quiescent(drm_device_t * dev)
+static int i915_quiescent(struct drm_device * dev)
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
 
@@ -625,7 +625,7 @@
 
 	if (batch.num_cliprects && DRM_VERIFYAREA_READ(batch.cliprects,
 						       batch.num_cliprects *
-						       sizeof(drm_clip_rect_t)))
+						       sizeof(struct drm_clip_rect)))
 		return DRM_ERR(EFAULT);
 
 	ret = i915_dispatch_batchbuffer(dev, &batch);
@@ -655,7 +655,7 @@
 	if (cmdbuf.num_cliprects &&
 	    DRM_VERIFYAREA_READ(cmdbuf.cliprects,
 				cmdbuf.num_cliprects *
-				sizeof(drm_clip_rect_t))) {
+				sizeof(struct drm_clip_rect))) {
 		DRM_ERROR("Fault accessing cliprects\n");
 		return DRM_ERR(EFAULT);
 	}
@@ -792,7 +792,7 @@
 	return 0;
 }
 
-int i915_driver_load(drm_device_t *dev, unsigned long flags)
+int i915_driver_load(struct drm_device *dev, unsigned long flags)
 {
 	/* i915 has 4 more counters */
 	dev->counters += 4;
@@ -804,7 +804,7 @@
 	return 0;
 }
 
-void i915_driver_lastclose(drm_device_t * dev)
+void i915_driver_lastclose(struct drm_device * dev)
 {
 	if (dev->dev_private) {
 		drm_i915_private_t *dev_priv = dev->dev_private;
@@ -813,7 +813,7 @@
 	i915_dma_cleanup(dev);
 }
 
-void i915_driver_preclose(drm_device_t * dev, DRMFILE filp)
+void i915_driver_preclose(struct drm_device * dev, DRMFILE filp)
 {
 	if (dev->dev_private) {
 		drm_i915_private_t *dev_priv = dev->dev_private;
@@ -854,7 +854,7 @@
  * \returns
  * A value of 1 is always retured to indictate every i9x5 is AGP.
  */
-int i915_driver_device_is_agp(drm_device_t * dev)
+int i915_driver_device_is_agp(struct drm_device * dev)
 {
 	return 1;
 }
diff --git a/drivers/char/drm/i915_drm.h b/drivers/char/drm/i915_drm.h
index 7b7b68b9..05c66cf 100644
--- a/drivers/char/drm/i915_drm.h
+++ b/drivers/char/drm/i915_drm.h
@@ -64,7 +64,7 @@
 } drm_i915_init_t;
 
 typedef struct _drm_i915_sarea {
-	drm_tex_region_t texList[I915_NR_TEX_REGIONS + 1];
+	struct drm_tex_region texList[I915_NR_TEX_REGIONS + 1];
 	int last_upload;	/* last time texture was uploaded */
 	int last_enqueue;	/* last time a buffer was enqueued */
 	int last_dispatch;	/* age of the most recently dispatched buffer */
@@ -170,7 +170,7 @@
 	int DR1;		/* hw flags for GFX_OP_DRAWRECT_INFO */
 	int DR4;		/* window origin for GFX_OP_DRAWRECT_INFO */
 	int num_cliprects;	/* mulitpass with multiple cliprects? */
-	drm_clip_rect_t __user *cliprects;	/* pointer to userspace cliprects */
+	struct drm_clip_rect __user *cliprects;	/* pointer to userspace cliprects */
 } drm_i915_batchbuffer_t;
 
 /* As above, but pass a pointer to userspace buffer which can be
@@ -182,7 +182,7 @@
 	int DR1;		/* hw flags for GFX_OP_DRAWRECT_INFO */
 	int DR4;		/* window origin for GFX_OP_DRAWRECT_INFO */
 	int num_cliprects;	/* mulitpass with multiple cliprects? */
-	drm_clip_rect_t __user *cliprects;	/* pointer to userspace cliprects */
+	struct drm_clip_rect __user *cliprects;	/* pointer to userspace cliprects */
 } drm_i915_cmdbuffer_t;
 
 /* Userspace can request & wait on irq's:
@@ -259,7 +259,7 @@
  */
 typedef struct drm_i915_vblank_swap {
 	drm_drawable_t drawable;
-	drm_vblank_seq_type_t seqtype;
+	enum drm_vblank_seq_type seqtype;
 	unsigned int sequence;
 } drm_i915_vblank_swap_t;
 
diff --git a/drivers/char/drm/i915_drv.h b/drivers/char/drm/i915_drv.h
index 85e323a..fd91856 100644
--- a/drivers/char/drm/i915_drv.h
+++ b/drivers/char/drm/i915_drv.h
@@ -120,11 +120,11 @@
 extern int i915_max_ioctl;
 
 				/* i915_dma.c */
-extern void i915_kernel_lost_context(drm_device_t * dev);
+extern void i915_kernel_lost_context(struct drm_device * dev);
 extern int i915_driver_load(struct drm_device *, unsigned long flags);
-extern void i915_driver_lastclose(drm_device_t * dev);
-extern void i915_driver_preclose(drm_device_t * dev, DRMFILE filp);
-extern int i915_driver_device_is_agp(drm_device_t * dev);
+extern void i915_driver_lastclose(struct drm_device * dev);
+extern void i915_driver_preclose(struct drm_device * dev, DRMFILE filp);
+extern int i915_driver_device_is_agp(struct drm_device * dev);
 extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
 			      unsigned long arg);
 
@@ -132,12 +132,12 @@
 extern int i915_irq_emit(DRM_IOCTL_ARGS);
 extern int i915_irq_wait(DRM_IOCTL_ARGS);
 
-extern int i915_driver_vblank_wait(drm_device_t *dev, unsigned int *sequence);
-extern int i915_driver_vblank_wait2(drm_device_t *dev, unsigned int *sequence);
+extern int i915_driver_vblank_wait(struct drm_device *dev, unsigned int *sequence);
+extern int i915_driver_vblank_wait2(struct drm_device *dev, unsigned int *sequence);
 extern irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS);
-extern void i915_driver_irq_preinstall(drm_device_t * dev);
-extern void i915_driver_irq_postinstall(drm_device_t * dev);
-extern void i915_driver_irq_uninstall(drm_device_t * dev);
+extern void i915_driver_irq_preinstall(struct drm_device * dev);
+extern void i915_driver_irq_postinstall(struct drm_device * dev);
+extern void i915_driver_irq_uninstall(struct drm_device * dev);
 extern int i915_vblank_pipe_set(DRM_IOCTL_ARGS);
 extern int i915_vblank_pipe_get(DRM_IOCTL_ARGS);
 extern int i915_vblank_swap(DRM_IOCTL_ARGS);
@@ -148,7 +148,7 @@
 extern int i915_mem_init_heap(DRM_IOCTL_ARGS);
 extern int i915_mem_destroy_heap(DRM_IOCTL_ARGS);
 extern void i915_mem_takedown(struct mem_block **heap);
-extern void i915_mem_release(drm_device_t * dev,
+extern void i915_mem_release(struct drm_device * dev,
 			     DRMFILE filp, struct mem_block *heap);
 
 #define I915_READ(reg)          DRM_READ32(dev_priv->mmio_map, (reg))
@@ -188,7 +188,7 @@
 	I915_WRITE(LP_RING + RING_TAIL, outring);			\
 } while(0)
 
-extern int i915_wait_ring(drm_device_t * dev, int n, const char *caller);
+extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
 
 #define GFX_OP_USER_INTERRUPT 		((0<<29)|(2<<23))
 #define GFX_OP_BREAKPOINT_INTERRUPT	((0<<29)|(1<<23))
diff --git a/drivers/char/drm/i915_irq.c b/drivers/char/drm/i915_irq.c
index b92062a..4b4b2ce 100644
--- a/drivers/char/drm/i915_irq.c
+++ b/drivers/char/drm/i915_irq.c
@@ -42,7 +42,7 @@
  *
  * This function will be called with the HW lock held.
  */
-static void i915_vblank_tasklet(drm_device_t *dev)
+static void i915_vblank_tasklet(struct drm_device *dev)
 {
 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
 	unsigned long irqflags;
@@ -50,7 +50,7 @@
 	int nhits, nrects, slice[2], upper[2], lower[2], i;
 	unsigned counter[2] = { atomic_read(&dev->vbl_received),
 				atomic_read(&dev->vbl_received2) };
-	drm_drawable_info_t *drw;
+	struct drm_drawable_info *drw;
 	drm_i915_sarea_t *sarea_priv = dev_priv->sarea_priv;
 	u32 cpp = dev_priv->cpp;
 	u32 cmd = (cpp == 4) ? (XY_SRC_COPY_BLT_CMD |
@@ -95,7 +95,7 @@
 		list_for_each(hit, &hits) {
 			drm_i915_vbl_swap_t *swap_cmp =
 				list_entry(hit, drm_i915_vbl_swap_t, head);
-			drm_drawable_info_t *drw_cmp =
+			struct drm_drawable_info *drw_cmp =
 				drm_get_drawable_info(dev, swap_cmp->drw_id);
 
 			if (drw_cmp &&
@@ -160,7 +160,7 @@
 		list_for_each(hit, &hits) {
 			drm_i915_vbl_swap_t *swap_hit =
 				list_entry(hit, drm_i915_vbl_swap_t, head);
-			drm_clip_rect_t *rect;
+			struct drm_clip_rect *rect;
 			int num_rects, pipe;
 			unsigned short top, bottom;
 
@@ -211,7 +211,7 @@
 
 irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
 {
-	drm_device_t *dev = (drm_device_t *) arg;
+	struct drm_device *dev = (struct drm_device *) arg;
 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
 	u16 temp;
 
@@ -257,7 +257,7 @@
 	return IRQ_HANDLED;
 }
 
-static int i915_emit_irq(drm_device_t * dev)
+static int i915_emit_irq(struct drm_device * dev)
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	RING_LOCALS;
@@ -283,7 +283,7 @@
 	return dev_priv->counter;
 }
 
-static int i915_wait_irq(drm_device_t * dev, int irq_nr)
+static int i915_wait_irq(struct drm_device * dev, int irq_nr)
 {
 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
 	int ret = 0;
@@ -309,7 +309,7 @@
 	return ret;
 }
 
-static int i915_driver_vblank_do_wait(drm_device_t *dev, unsigned int *sequence,
+static int i915_driver_vblank_do_wait(struct drm_device *dev, unsigned int *sequence,
 				      atomic_t *counter)
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
@@ -331,12 +331,12 @@
 }
 
 
-int i915_driver_vblank_wait(drm_device_t *dev, unsigned int *sequence)
+int i915_driver_vblank_wait(struct drm_device *dev, unsigned int *sequence)
 {
 	return i915_driver_vblank_do_wait(dev, sequence, &dev->vbl_received);
 }
 
-int i915_driver_vblank_wait2(drm_device_t *dev, unsigned int *sequence)
+int i915_driver_vblank_wait2(struct drm_device *dev, unsigned int *sequence)
 {
 	return i915_driver_vblank_do_wait(dev, sequence, &dev->vbl_received2);
 }
@@ -389,7 +389,7 @@
 	return i915_wait_irq(dev, irqwait.irq_seq);
 }
 
-static void i915_enable_interrupt (drm_device_t *dev)
+static void i915_enable_interrupt (struct drm_device *dev)
 {
 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
 	u16 flag;
@@ -569,7 +569,7 @@
 
 /* drm_dma.h hooks
 */
-void i915_driver_irq_preinstall(drm_device_t * dev)
+void i915_driver_irq_preinstall(struct drm_device * dev)
 {
 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
 
@@ -578,7 +578,7 @@
 	I915_WRITE16(I915REG_INT_ENABLE_R, 0x0);
 }
 
-void i915_driver_irq_postinstall(drm_device_t * dev)
+void i915_driver_irq_postinstall(struct drm_device * dev)
 {
 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
 
@@ -592,7 +592,7 @@
 	DRM_INIT_WAITQUEUE(&dev_priv->irq_queue);
 }
 
-void i915_driver_irq_uninstall(drm_device_t * dev)
+void i915_driver_irq_uninstall(struct drm_device * dev)
 {
 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
 	u16 temp;
diff --git a/drivers/char/drm/i915_mem.c b/drivers/char/drm/i915_mem.c
index 52c6732..50b4bac 100644
--- a/drivers/char/drm/i915_mem.c
+++ b/drivers/char/drm/i915_mem.c
@@ -43,11 +43,11 @@
  * block to allocate, and the ring is drained prior to allocations --
  * in other words allocation is expensive.
  */
-static void mark_block(drm_device_t * dev, struct mem_block *p, int in_use)
+static void mark_block(struct drm_device * dev, struct mem_block *p, int in_use)
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	drm_i915_sarea_t *sarea_priv = dev_priv->sarea_priv;
-	drm_tex_region_t *list;
+	struct drm_tex_region *list;
 	unsigned shift, nr;
 	unsigned start;
 	unsigned end;
@@ -208,7 +208,7 @@
 
 /* Free all blocks associated with the releasing file.
  */
-void i915_mem_release(drm_device_t * dev, DRMFILE filp, struct mem_block *heap)
+void i915_mem_release(struct drm_device * dev, DRMFILE filp, struct mem_block *heap)
 {
 	struct mem_block *p;
 
diff --git a/drivers/char/drm/mga_dma.c b/drivers/char/drm/mga_dma.c
index c2a4bac..9c73a6e 100644
--- a/drivers/char/drm/mga_dma.c
+++ b/drivers/char/drm/mga_dma.c
@@ -46,7 +46,7 @@
 
 #define MINIMAL_CLEANUP 0
 #define FULL_CLEANUP 1
-static int mga_do_cleanup_dma(drm_device_t *dev, int full_cleanup);
+static int mga_do_cleanup_dma(struct drm_device *dev, int full_cleanup);
 
 /* ================================================================
  * Engine control
@@ -224,7 +224,7 @@
 #define MGA_BUFFER_FREE		0
 
 #if MGA_FREELIST_DEBUG
-static void mga_freelist_print(drm_device_t * dev)
+static void mga_freelist_print(struct drm_device * dev)
 {
 	drm_mga_private_t *dev_priv = dev->dev_private;
 	drm_mga_freelist_t *entry;
@@ -245,10 +245,10 @@
 }
 #endif
 
-static int mga_freelist_init(drm_device_t * dev, drm_mga_private_t * dev_priv)
+static int mga_freelist_init(struct drm_device * dev, drm_mga_private_t * dev_priv)
 {
-	drm_device_dma_t *dma = dev->dma;
-	drm_buf_t *buf;
+	struct drm_device_dma *dma = dev->dma;
+	struct drm_buf *buf;
 	drm_mga_buf_priv_t *buf_priv;
 	drm_mga_freelist_t *entry;
 	int i;
@@ -291,7 +291,7 @@
 	return 0;
 }
 
-static void mga_freelist_cleanup(drm_device_t * dev)
+static void mga_freelist_cleanup(struct drm_device * dev)
 {
 	drm_mga_private_t *dev_priv = dev->dev_private;
 	drm_mga_freelist_t *entry;
@@ -311,10 +311,10 @@
 #if 0
 /* FIXME: Still needed?
  */
-static void mga_freelist_reset(drm_device_t * dev)
+static void mga_freelist_reset(struct drm_device * dev)
 {
-	drm_device_dma_t *dma = dev->dma;
-	drm_buf_t *buf;
+	struct drm_device_dma *dma = dev->dma;
+	struct drm_buf *buf;
 	drm_mga_buf_priv_t *buf_priv;
 	int i;
 
@@ -326,7 +326,7 @@
 }
 #endif
 
-static drm_buf_t *mga_freelist_get(drm_device_t * dev)
+static struct drm_buf *mga_freelist_get(struct drm_device * dev)
 {
 	drm_mga_private_t *dev_priv = dev->dev_private;
 	drm_mga_freelist_t *next;
@@ -359,7 +359,7 @@
 	return NULL;
 }
 
-int mga_freelist_put(drm_device_t * dev, drm_buf_t * buf)
+int mga_freelist_put(struct drm_device * dev, struct drm_buf * buf)
 {
 	drm_mga_private_t *dev_priv = dev->dev_private;
 	drm_mga_buf_priv_t *buf_priv = buf->dev_private;
@@ -393,7 +393,7 @@
  * DMA initialization, cleanup
  */
 
-int mga_driver_load(drm_device_t * dev, unsigned long flags)
+int mga_driver_load(struct drm_device * dev, unsigned long flags)
 {
 	drm_mga_private_t *dev_priv;
 
@@ -434,7 +434,7 @@
  *
  * \sa mga_do_dma_bootstrap, mga_do_pci_dma_bootstrap
  */
-static int mga_do_agp_dma_bootstrap(drm_device_t * dev,
+static int mga_do_agp_dma_bootstrap(struct drm_device * dev,
 				    drm_mga_dma_bootstrap_t * dma_bs)
 {
 	drm_mga_private_t *const dev_priv =
@@ -445,11 +445,11 @@
 	const unsigned secondary_size = dma_bs->secondary_bin_count
 	    * dma_bs->secondary_bin_size;
 	const unsigned agp_size = (dma_bs->agp_size << 20);
-	drm_buf_desc_t req;
-	drm_agp_mode_t mode;
-	drm_agp_info_t info;
-	drm_agp_buffer_t agp_req;
-	drm_agp_binding_t bind_req;
+	struct drm_buf_desc req;
+	struct drm_agp_mode mode;
+	struct drm_agp_info info;
+	struct drm_agp_buffer agp_req;
+	struct drm_agp_binding bind_req;
 
 	/* Acquire AGP. */
 	err = drm_agp_acquire(dev);
@@ -548,10 +548,10 @@
 	}
 
 	{
-		drm_map_list_t *_entry;
+		struct drm_map_list *_entry;
 		unsigned long agp_token = 0;
 		
-		list_for_each_entry(_entry, &dev->maplist->head, head) {
+		list_for_each_entry(_entry, &dev->maplist, head) {
 			if (_entry->map == dev->agp_buffer_map)
 				agp_token = _entry->user_token;
 		}
@@ -588,7 +588,7 @@
 	return 0;
 }
 #else
-static int mga_do_agp_dma_bootstrap(drm_device_t * dev,
+static int mga_do_agp_dma_bootstrap(struct drm_device * dev,
 				    drm_mga_dma_bootstrap_t * dma_bs)
 {
 	return -EINVAL;
@@ -609,7 +609,7 @@
  *
  * \sa mga_do_dma_bootstrap, mga_do_agp_dma_bootstrap
  */
-static int mga_do_pci_dma_bootstrap(drm_device_t * dev,
+static int mga_do_pci_dma_bootstrap(struct drm_device * dev,
 				    drm_mga_dma_bootstrap_t * dma_bs)
 {
 	drm_mga_private_t *const dev_priv =
@@ -618,7 +618,7 @@
 	unsigned int primary_size;
 	unsigned int bin_count;
 	int err;
-	drm_buf_desc_t req;
+	struct drm_buf_desc req;
 
 	if (dev->dma == NULL) {
 		DRM_ERROR("dev->dma is NULL\n");
@@ -699,7 +699,7 @@
 	return 0;
 }
 
-static int mga_do_dma_bootstrap(drm_device_t * dev,
+static int mga_do_dma_bootstrap(struct drm_device * dev,
 				drm_mga_dma_bootstrap_t * dma_bs)
 {
 	const int is_agp = (dma_bs->agp_mode != 0) && drm_device_is_agp(dev);
@@ -793,7 +793,7 @@
 	return err;
 }
 
-static int mga_do_init_dma(drm_device_t * dev, drm_mga_init_t * init)
+static int mga_do_init_dma(struct drm_device * dev, drm_mga_init_t * init)
 {
 	drm_mga_private_t *dev_priv;
 	int ret;
@@ -823,8 +823,7 @@
 	dev_priv->texture_offset = init->texture_offset[0];
 	dev_priv->texture_size = init->texture_size[0];
 
-	DRM_GETSAREA();
-
+	dev_priv->sarea = drm_getsarea(dev);
 	if (!dev_priv->sarea) {
 		DRM_ERROR("failed to find sarea!\n");
 		return DRM_ERR(EINVAL);
@@ -934,7 +933,7 @@
 	return 0;
 }
 
-static int mga_do_cleanup_dma(drm_device_t *dev, int full_cleanup)
+static int mga_do_cleanup_dma(struct drm_device *dev, int full_cleanup)
 {
 	int err = 0;
 	DRM_DEBUG("\n");
@@ -963,8 +962,8 @@
 		if (dev_priv->used_new_dma_init) {
 #if __OS_HAS_AGP
 			if (dev_priv->agp_handle != 0) {
-				drm_agp_binding_t unbind_req;
-				drm_agp_buffer_t free_req;
+				struct drm_agp_binding unbind_req;
+				struct drm_agp_buffer free_req;
 
 				unbind_req.handle = dev_priv->agp_handle;
 				drm_agp_unbind(dev, &unbind_req);
@@ -1041,11 +1040,11 @@
 {
 	DRM_DEVICE;
 	drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private;
-	drm_lock_t lock;
+	struct drm_lock lock;
 
 	LOCK_TEST_WITH_RETURN(dev, filp);
 
-	DRM_COPY_FROM_USER_IOCTL(lock, (drm_lock_t __user *) data,
+	DRM_COPY_FROM_USER_IOCTL(lock, (struct drm_lock __user *) data,
 				 sizeof(lock));
 
 	DRM_DEBUG("%s%s%s\n",
@@ -1087,9 +1086,9 @@
  * DMA buffer management
  */
 
-static int mga_dma_get_buffers(DRMFILE filp, drm_device_t * dev, drm_dma_t * d)
+static int mga_dma_get_buffers(DRMFILE filp, struct drm_device * dev, struct drm_dma * d)
 {
-	drm_buf_t *buf;
+	struct drm_buf *buf;
 	int i;
 
 	for (i = d->granted_count; i < d->request_count; i++) {
@@ -1114,10 +1113,10 @@
 int mga_dma_buffers(DRM_IOCTL_ARGS)
 {
 	DRM_DEVICE;
-	drm_device_dma_t *dma = dev->dma;
+	struct drm_device_dma *dma = dev->dma;
 	drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private;
-	drm_dma_t __user *argp = (void __user *)data;
-	drm_dma_t d;
+	struct drm_dma __user *argp = (void __user *)data;
+	struct drm_dma d;
 	int ret = 0;
 
 	LOCK_TEST_WITH_RETURN(dev, filp);
@@ -1156,7 +1155,7 @@
 /**
  * Called just before the module is unloaded.
  */
-int mga_driver_unload(drm_device_t * dev)
+int mga_driver_unload(struct drm_device * dev)
 {
 	drm_free(dev->dev_private, sizeof(drm_mga_private_t), DRM_MEM_DRIVER);
 	dev->dev_private = NULL;
@@ -1167,12 +1166,12 @@
 /**
  * Called when the last opener of the device is closed.
  */
-void mga_driver_lastclose(drm_device_t * dev)
+void mga_driver_lastclose(struct drm_device * dev)
 {
 	mga_do_cleanup_dma(dev, FULL_CLEANUP);
 }
 
-int mga_driver_dma_quiescent(drm_device_t * dev)
+int mga_driver_dma_quiescent(struct drm_device * dev)
 {
 	drm_mga_private_t *dev_priv = dev->dev_private;
 	return mga_do_wait_for_idle(dev_priv);
diff --git a/drivers/char/drm/mga_drm.h b/drivers/char/drm/mga_drm.h
index 44d1293..944b50a 100644
--- a/drivers/char/drm/mga_drm.h
+++ b/drivers/char/drm/mga_drm.h
@@ -181,7 +181,7 @@
 
 	/* The current cliprects, or a subset thereof.
 	 */
-	drm_clip_rect_t boxes[MGA_NR_SAREA_CLIPRECTS];
+	struct drm_clip_rect boxes[MGA_NR_SAREA_CLIPRECTS];
 	unsigned int nbox;
 
 	/* Information about the most recently used 3d drawable.  The
@@ -202,7 +202,7 @@
 	unsigned int exported_nback;
 	int exported_back_x, exported_front_x, exported_w;
 	int exported_back_y, exported_front_y, exported_h;
-	drm_clip_rect_t exported_boxes[MGA_NR_SAREA_CLIPRECTS];
+	struct drm_clip_rect exported_boxes[MGA_NR_SAREA_CLIPRECTS];
 
 	/* Counters for aging textures and for client-side throttling.
 	 */
@@ -216,7 +216,7 @@
 
 	/* LRU lists for texture memory in agp space and on the card.
 	 */
-	drm_tex_region_t texList[MGA_NR_TEX_HEAPS][MGA_NR_TEX_REGIONS + 1];
+	struct drm_tex_region texList[MGA_NR_TEX_HEAPS][MGA_NR_TEX_REGIONS + 1];
 	unsigned int texAge[MGA_NR_TEX_HEAPS];
 
 	/* Mechanism to validate card state.
diff --git a/drivers/char/drm/mga_drv.c b/drivers/char/drm/mga_drv.c
index be49dbb..5572939 100644
--- a/drivers/char/drm/mga_drv.c
+++ b/drivers/char/drm/mga_drv.c
@@ -36,7 +36,7 @@
 
 #include "drm_pciids.h"
 
-static int mga_driver_device_is_agp(drm_device_t * dev);
+static int mga_driver_device_is_agp(struct drm_device * dev);
 
 static struct pci_device_id pciidlist[] = {
 	mga_PCI_IDS
@@ -118,7 +118,7 @@
  * \returns
  * If the device is a PCI G450, zero is returned.  Otherwise 2 is returned.
  */
-static int mga_driver_device_is_agp(drm_device_t * dev)
+static int mga_driver_device_is_agp(struct drm_device * dev)
 {
 	const struct pci_dev *const pdev = dev->pdev;
 
diff --git a/drivers/char/drm/mga_drv.h b/drivers/char/drm/mga_drv.h
index 6b0c531..49253af 100644
--- a/drivers/char/drm/mga_drv.h
+++ b/drivers/char/drm/mga_drv.h
@@ -65,7 +65,7 @@
 	struct drm_mga_freelist *next;
 	struct drm_mga_freelist *prev;
 	drm_mga_age_t age;
-	drm_buf_t *buf;
+	struct drm_buf *buf;
 } drm_mga_freelist_t;
 
 typedef struct {
@@ -157,10 +157,10 @@
 extern int mga_dma_flush(DRM_IOCTL_ARGS);
 extern int mga_dma_reset(DRM_IOCTL_ARGS);
 extern int mga_dma_buffers(DRM_IOCTL_ARGS);
-extern int mga_driver_load(drm_device_t *dev, unsigned long flags);
-extern int mga_driver_unload(drm_device_t * dev);
-extern void mga_driver_lastclose(drm_device_t * dev);
-extern int mga_driver_dma_quiescent(drm_device_t * dev);
+extern int mga_driver_load(struct drm_device *dev, unsigned long flags);
+extern int mga_driver_unload(struct drm_device * dev);
+extern void mga_driver_lastclose(struct drm_device * dev);
+extern int mga_driver_dma_quiescent(struct drm_device * dev);
 
 extern int mga_do_wait_for_idle(drm_mga_private_t * dev_priv);
 
@@ -168,7 +168,7 @@
 extern void mga_do_dma_wrap_start(drm_mga_private_t * dev_priv);
 extern void mga_do_dma_wrap_end(drm_mga_private_t * dev_priv);
 
-extern int mga_freelist_put(drm_device_t * dev, drm_buf_t * buf);
+extern int mga_freelist_put(struct drm_device * dev, struct drm_buf * buf);
 
 				/* mga_warp.c */
 extern unsigned int mga_warp_microcode_size(const drm_mga_private_t * dev_priv);
@@ -176,12 +176,12 @@
 extern int mga_warp_init(drm_mga_private_t * dev_priv);
 
 				/* mga_irq.c */
-extern int mga_driver_fence_wait(drm_device_t * dev, unsigned int *sequence);
-extern int mga_driver_vblank_wait(drm_device_t * dev, unsigned int *sequence);
+extern int mga_driver_fence_wait(struct drm_device * dev, unsigned int *sequence);
+extern int mga_driver_vblank_wait(struct drm_device * dev, unsigned int *sequence);
 extern irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS);
-extern void mga_driver_irq_preinstall(drm_device_t * dev);
-extern void mga_driver_irq_postinstall(drm_device_t * dev);
-extern void mga_driver_irq_uninstall(drm_device_t * dev);
+extern void mga_driver_irq_preinstall(struct drm_device * dev);
+extern void mga_driver_irq_postinstall(struct drm_device * dev);
+extern void mga_driver_irq_uninstall(struct drm_device * dev);
 extern long mga_compat_ioctl(struct file *filp, unsigned int cmd,
 			     unsigned long arg);
 
diff --git a/drivers/char/drm/mga_irq.c b/drivers/char/drm/mga_irq.c
index eb96440..9302cb8 100644
--- a/drivers/char/drm/mga_irq.c
+++ b/drivers/char/drm/mga_irq.c
@@ -37,7 +37,7 @@
 
 irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
 {
-	drm_device_t *dev = (drm_device_t *) arg;
+	struct drm_device *dev = (struct drm_device *) arg;
 	drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private;
 	int status;
 	int handled = 0;
@@ -78,7 +78,7 @@
 	return IRQ_NONE;
 }
 
-int mga_driver_vblank_wait(drm_device_t * dev, unsigned int *sequence)
+int mga_driver_vblank_wait(struct drm_device * dev, unsigned int *sequence)
 {
 	unsigned int cur_vblank;
 	int ret = 0;
@@ -96,7 +96,7 @@
 	return ret;
 }
 
-int mga_driver_fence_wait(drm_device_t * dev, unsigned int *sequence)
+int mga_driver_fence_wait(struct drm_device * dev, unsigned int *sequence)
 {
 	drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private;
 	unsigned int cur_fence;
@@ -115,7 +115,7 @@
 	return ret;
 }
 
-void mga_driver_irq_preinstall(drm_device_t * dev)
+void mga_driver_irq_preinstall(struct drm_device * dev)
 {
 	drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private;
 
@@ -125,7 +125,7 @@
 	MGA_WRITE(MGA_ICLEAR, ~0);
 }
 
-void mga_driver_irq_postinstall(drm_device_t * dev)
+void mga_driver_irq_postinstall(struct drm_device * dev)
 {
 	drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private;
 
@@ -135,7 +135,7 @@
 	MGA_WRITE(MGA_IEN, MGA_VLINEIEN | MGA_SOFTRAPEN);
 }
 
-void mga_driver_irq_uninstall(drm_device_t * dev)
+void mga_driver_irq_uninstall(struct drm_device * dev)
 {
 	drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private;
 	if (!dev_priv)
diff --git a/drivers/char/drm/mga_state.c b/drivers/char/drm/mga_state.c
index 2837e66..d448b0a 100644
--- a/drivers/char/drm/mga_state.c
+++ b/drivers/char/drm/mga_state.c
@@ -42,7 +42,7 @@
  */
 
 static void mga_emit_clip_rect(drm_mga_private_t * dev_priv,
-			       drm_clip_rect_t * box)
+			       struct drm_clip_rect * box)
 {
 	drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
 	drm_mga_context_regs_t *ctx = &sarea_priv->context_state;
@@ -480,12 +480,12 @@
  *
  */
 
-static void mga_dma_dispatch_clear(drm_device_t * dev, drm_mga_clear_t * clear)
+static void mga_dma_dispatch_clear(struct drm_device * dev, drm_mga_clear_t * clear)
 {
 	drm_mga_private_t *dev_priv = dev->dev_private;
 	drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
 	drm_mga_context_regs_t *ctx = &sarea_priv->context_state;
-	drm_clip_rect_t *pbox = sarea_priv->boxes;
+	struct drm_clip_rect *pbox = sarea_priv->boxes;
 	int nbox = sarea_priv->nbox;
 	int i;
 	DMA_LOCALS;
@@ -500,7 +500,7 @@
 	ADVANCE_DMA();
 
 	for (i = 0; i < nbox; i++) {
-		drm_clip_rect_t *box = &pbox[i];
+		struct drm_clip_rect *box = &pbox[i];
 		u32 height = box->y2 - box->y1;
 
 		DRM_DEBUG("   from=%d,%d to=%d,%d\n",
@@ -568,12 +568,12 @@
 	FLUSH_DMA();
 }
 
-static void mga_dma_dispatch_swap(drm_device_t * dev)
+static void mga_dma_dispatch_swap(struct drm_device * dev)
 {
 	drm_mga_private_t *dev_priv = dev->dev_private;
 	drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
 	drm_mga_context_regs_t *ctx = &sarea_priv->context_state;
-	drm_clip_rect_t *pbox = sarea_priv->boxes;
+	struct drm_clip_rect *pbox = sarea_priv->boxes;
 	int nbox = sarea_priv->nbox;
 	int i;
 	DMA_LOCALS;
@@ -598,7 +598,7 @@
 		  MGA_PLNWT, 0xffffffff, MGA_DWGCTL, MGA_DWGCTL_COPY);
 
 	for (i = 0; i < nbox; i++) {
-		drm_clip_rect_t *box = &pbox[i];
+		struct drm_clip_rect *box = &pbox[i];
 		u32 height = box->y2 - box->y1;
 		u32 start = box->y1 * dev_priv->front_pitch;
 
@@ -622,7 +622,7 @@
 	DRM_DEBUG("%s... done.\n", __FUNCTION__);
 }
 
-static void mga_dma_dispatch_vertex(drm_device_t * dev, drm_buf_t * buf)
+static void mga_dma_dispatch_vertex(struct drm_device * dev, struct drm_buf * buf)
 {
 	drm_mga_private_t *dev_priv = dev->dev_private;
 	drm_mga_buf_priv_t *buf_priv = buf->dev_private;
@@ -669,7 +669,7 @@
 	FLUSH_DMA();
 }
 
-static void mga_dma_dispatch_indices(drm_device_t * dev, drm_buf_t * buf,
+static void mga_dma_dispatch_indices(struct drm_device * dev, struct drm_buf * buf,
 				     unsigned int start, unsigned int end)
 {
 	drm_mga_private_t *dev_priv = dev->dev_private;
@@ -718,7 +718,7 @@
 /* This copies a 64 byte aligned agp region to the frambuffer with a
  * standard blit, the ioctl needs to do checking.
  */
-static void mga_dma_dispatch_iload(drm_device_t * dev, drm_buf_t * buf,
+static void mga_dma_dispatch_iload(struct drm_device * dev, struct drm_buf * buf,
 				   unsigned int dstorg, unsigned int length)
 {
 	drm_mga_private_t *dev_priv = dev->dev_private;
@@ -766,12 +766,12 @@
 	FLUSH_DMA();
 }
 
-static void mga_dma_dispatch_blit(drm_device_t * dev, drm_mga_blit_t * blit)
+static void mga_dma_dispatch_blit(struct drm_device * dev, drm_mga_blit_t * blit)
 {
 	drm_mga_private_t *dev_priv = dev->dev_private;
 	drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
 	drm_mga_context_regs_t *ctx = &sarea_priv->context_state;
-	drm_clip_rect_t *pbox = sarea_priv->boxes;
+	struct drm_clip_rect *pbox = sarea_priv->boxes;
 	int nbox = sarea_priv->nbox;
 	u32 scandir = 0, i;
 	DMA_LOCALS;
@@ -880,8 +880,8 @@
 {
 	DRM_DEVICE;
 	drm_mga_private_t *dev_priv = dev->dev_private;
-	drm_device_dma_t *dma = dev->dma;
-	drm_buf_t *buf;
+	struct drm_device_dma *dma = dev->dma;
+	struct drm_buf *buf;
 	drm_mga_buf_priv_t *buf_priv;
 	drm_mga_vertex_t vertex;
 
@@ -920,8 +920,8 @@
 {
 	DRM_DEVICE;
 	drm_mga_private_t *dev_priv = dev->dev_private;
-	drm_device_dma_t *dma = dev->dma;
-	drm_buf_t *buf;
+	struct drm_device_dma *dma = dev->dma;
+	struct drm_buf *buf;
 	drm_mga_buf_priv_t *buf_priv;
 	drm_mga_indices_t indices;
 
@@ -959,9 +959,9 @@
 static int mga_dma_iload(DRM_IOCTL_ARGS)
 {
 	DRM_DEVICE;
-	drm_device_dma_t *dma = dev->dma;
+	struct drm_device_dma *dma = dev->dma;
 	drm_mga_private_t *dev_priv = dev->dev_private;
-	drm_buf_t *buf;
+	struct drm_buf *buf;
 	drm_mga_buf_priv_t *buf_priv;
 	drm_mga_iload_t iload;
 	DRM_DEBUG("\n");
diff --git a/drivers/char/drm/r128_cce.c b/drivers/char/drm/r128_cce.c
index 1014602..b163ed0 100644
--- a/drivers/char/drm/r128_cce.c
+++ b/drivers/char/drm/r128_cce.c
@@ -81,7 +81,7 @@
 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
 };
 
-static int R128_READ_PLL(drm_device_t * dev, int addr)
+static int R128_READ_PLL(struct drm_device * dev, int addr)
 {
 	drm_r128_private_t *dev_priv = dev->dev_private;
 
@@ -271,7 +271,7 @@
 
 /* Reset the engine.  This will stop the CCE if it is running.
  */
-static int r128_do_engine_reset(drm_device_t * dev)
+static int r128_do_engine_reset(struct drm_device * dev)
 {
 	drm_r128_private_t *dev_priv = dev->dev_private;
 	u32 clock_cntl_index, mclk_cntl, gen_reset_cntl;
@@ -308,7 +308,7 @@
 	return 0;
 }
 
-static void r128_cce_init_ring_buffer(drm_device_t * dev,
+static void r128_cce_init_ring_buffer(struct drm_device * dev,
 				      drm_r128_private_t * dev_priv)
 {
 	u32 ring_start;
@@ -347,7 +347,7 @@
 	R128_WRITE(R128_BUS_CNTL, tmp);
 }
 
-static int r128_do_init_cce(drm_device_t * dev, drm_r128_init_t * init)
+static int r128_do_init_cce(struct drm_device * dev, drm_r128_init_t * init)
 {
 	drm_r128_private_t *dev_priv;
 
@@ -456,8 +456,7 @@
 	dev_priv->span_pitch_offset_c = (((dev_priv->depth_pitch / 8) << 21) |
 					 (dev_priv->span_offset >> 5));
 
-	DRM_GETSAREA();
-
+	dev_priv->sarea = drm_getsarea(dev);
 	if (!dev_priv->sarea) {
 		DRM_ERROR("could not find sarea!\n");
 		dev->dev_private = (void *)dev_priv;
@@ -585,7 +584,7 @@
 	return 0;
 }
 
-int r128_do_cleanup_cce(drm_device_t * dev)
+int r128_do_cleanup_cce(struct drm_device * dev)
 {
 
 	/* Make sure interrupts are disabled here because the uninstall ioctl
@@ -770,11 +769,11 @@
 #define R128_BUFFER_FREE	0
 
 #if 0
-static int r128_freelist_init(drm_device_t * dev)
+static int r128_freelist_init(struct drm_device * dev)
 {
-	drm_device_dma_t *dma = dev->dma;
+	struct drm_device_dma *dma = dev->dma;
 	drm_r128_private_t *dev_priv = dev->dev_private;
-	drm_buf_t *buf;
+	struct drm_buf *buf;
 	drm_r128_buf_priv_t *buf_priv;
 	drm_r128_freelist_t *entry;
 	int i;
@@ -816,12 +815,12 @@
 }
 #endif
 
-static drm_buf_t *r128_freelist_get(drm_device_t * dev)
+static struct drm_buf *r128_freelist_get(struct drm_device * dev)
 {
-	drm_device_dma_t *dma = dev->dma;
+	struct drm_device_dma *dma = dev->dma;
 	drm_r128_private_t *dev_priv = dev->dev_private;
 	drm_r128_buf_priv_t *buf_priv;
-	drm_buf_t *buf;
+	struct drm_buf *buf;
 	int i, t;
 
 	/* FIXME: Optimize -- use freelist code */
@@ -854,13 +853,13 @@
 	return NULL;
 }
 
-void r128_freelist_reset(drm_device_t * dev)
+void r128_freelist_reset(struct drm_device * dev)
 {
-	drm_device_dma_t *dma = dev->dma;
+	struct drm_device_dma *dma = dev->dma;
 	int i;
 
 	for (i = 0; i < dma->buf_count; i++) {
-		drm_buf_t *buf = dma->buflist[i];
+		struct drm_buf *buf = dma->buflist[i];
 		drm_r128_buf_priv_t *buf_priv = buf->dev_private;
 		buf_priv->age = 0;
 	}
@@ -887,10 +886,10 @@
 	return DRM_ERR(EBUSY);
 }
 
-static int r128_cce_get_buffers(DRMFILE filp, drm_device_t * dev, drm_dma_t * d)
+static int r128_cce_get_buffers(DRMFILE filp, struct drm_device * dev, struct drm_dma * d)
 {
 	int i;
-	drm_buf_t *buf;
+	struct drm_buf *buf;
 
 	for (i = d->granted_count; i < d->request_count; i++) {
 		buf = r128_freelist_get(dev);
@@ -914,10 +913,10 @@
 int r128_cce_buffers(DRM_IOCTL_ARGS)
 {
 	DRM_DEVICE;
-	drm_device_dma_t *dma = dev->dma;
+	struct drm_device_dma *dma = dev->dma;
 	int ret = 0;
-	drm_dma_t __user *argp = (void __user *)data;
-	drm_dma_t d;
+	struct drm_dma __user *argp = (void __user *)data;
+	struct drm_dma d;
 
 	LOCK_TEST_WITH_RETURN(dev, filp);
 
diff --git a/drivers/char/drm/r128_drm.h b/drivers/char/drm/r128_drm.h
index 6e8af31..e94a39c 100644
--- a/drivers/char/drm/r128_drm.h
+++ b/drivers/char/drm/r128_drm.h
@@ -153,7 +153,7 @@
 
 	/* The current cliprects, or a subset thereof.
 	 */
-	drm_clip_rect_t boxes[R128_NR_SAREA_CLIPRECTS];
+	struct drm_clip_rect boxes[R128_NR_SAREA_CLIPRECTS];
 	unsigned int nbox;
 
 	/* Counters for client-side throttling of rendering clients.
@@ -161,7 +161,7 @@
 	unsigned int last_frame;
 	unsigned int last_dispatch;
 
-	drm_tex_region_t tex_list[R128_NR_TEX_HEAPS][R128_NR_TEX_REGIONS + 1];
+	struct drm_tex_region tex_list[R128_NR_TEX_HEAPS][R128_NR_TEX_REGIONS + 1];
 	unsigned int tex_age[R128_NR_TEX_HEAPS];
 	int ctx_owner;
 	int pfAllowPageFlip;	/* number of 3d windows (0,1,2 or more) */
diff --git a/drivers/char/drm/r128_drv.h b/drivers/char/drm/r128_drv.h
index 9086835..72249fb 100644
--- a/drivers/char/drm/r128_drv.h
+++ b/drivers/char/drm/r128_drv.h
@@ -57,7 +57,7 @@
 
 typedef struct drm_r128_freelist {
 	unsigned int age;
-	drm_buf_t *buf;
+	struct drm_buf *buf;
 	struct drm_r128_freelist *next;
 	struct drm_r128_freelist *prev;
 } drm_r128_freelist_t;
@@ -118,7 +118,7 @@
 	drm_local_map_t *cce_ring;
 	drm_local_map_t *ring_rptr;
 	drm_local_map_t *agp_textures;
-	drm_ati_pcigart_info gart_info;
+	struct drm_ati_pcigart_info gart_info;
 } drm_r128_private_t;
 
 typedef struct drm_r128_buf_priv {
@@ -142,21 +142,21 @@
 extern int r128_fullscreen(DRM_IOCTL_ARGS);
 extern int r128_cce_buffers(DRM_IOCTL_ARGS);
 
-extern void r128_freelist_reset(drm_device_t * dev);
+extern void r128_freelist_reset(struct drm_device * dev);
 
 extern int r128_wait_ring(drm_r128_private_t * dev_priv, int n);
 
 extern int r128_do_cce_idle(drm_r128_private_t * dev_priv);
-extern int r128_do_cleanup_cce(drm_device_t * dev);
+extern int r128_do_cleanup_cce(struct drm_device * dev);
 
-extern int r128_driver_vblank_wait(drm_device_t * dev, unsigned int *sequence);
+extern int r128_driver_vblank_wait(struct drm_device * dev, unsigned int *sequence);
 
 extern irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS);
-extern void r128_driver_irq_preinstall(drm_device_t * dev);
-extern void r128_driver_irq_postinstall(drm_device_t * dev);
-extern void r128_driver_irq_uninstall(drm_device_t * dev);
-extern void r128_driver_lastclose(drm_device_t * dev);
-extern void r128_driver_preclose(drm_device_t * dev, DRMFILE filp);
+extern void r128_driver_irq_preinstall(struct drm_device * dev);
+extern void r128_driver_irq_postinstall(struct drm_device * dev);
+extern void r128_driver_irq_uninstall(struct drm_device * dev);
+extern void r128_driver_lastclose(struct drm_device * dev);
+extern void r128_driver_preclose(struct drm_device * dev, DRMFILE filp);
 
 extern long r128_compat_ioctl(struct file *filp, unsigned int cmd,
 			      unsigned long arg);
diff --git a/drivers/char/drm/r128_irq.c b/drivers/char/drm/r128_irq.c
index 87f8ca2..c76fdca 100644
--- a/drivers/char/drm/r128_irq.c
+++ b/drivers/char/drm/r128_irq.c
@@ -37,7 +37,7 @@
 
 irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
 {
-	drm_device_t *dev = (drm_device_t *) arg;
+	struct drm_device *dev = (struct drm_device *) arg;
 	drm_r128_private_t *dev_priv = (drm_r128_private_t *) dev->dev_private;
 	int status;
 
@@ -54,7 +54,7 @@
 	return IRQ_NONE;
 }
 
-int r128_driver_vblank_wait(drm_device_t * dev, unsigned int *sequence)
+int r128_driver_vblank_wait(struct drm_device * dev, unsigned int *sequence)
 {
 	unsigned int cur_vblank;
 	int ret = 0;
@@ -72,7 +72,7 @@
 	return ret;
 }
 
-void r128_driver_irq_preinstall(drm_device_t * dev)
+void r128_driver_irq_preinstall(struct drm_device * dev)
 {
 	drm_r128_private_t *dev_priv = (drm_r128_private_t *) dev->dev_private;
 
@@ -82,7 +82,7 @@
 	R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
 }
 
-void r128_driver_irq_postinstall(drm_device_t * dev)
+void r128_driver_irq_postinstall(struct drm_device * dev)
 {
 	drm_r128_private_t *dev_priv = (drm_r128_private_t *) dev->dev_private;
 
@@ -90,7 +90,7 @@
 	R128_WRITE(R128_GEN_INT_CNTL, R128_CRTC_VBLANK_INT_EN);
 }
 
-void r128_driver_irq_uninstall(drm_device_t * dev)
+void r128_driver_irq_uninstall(struct drm_device * dev)
 {
 	drm_r128_private_t *dev_priv = (drm_r128_private_t *) dev->dev_private;
 	if (!dev_priv)
diff --git a/drivers/char/drm/r128_state.c b/drivers/char/drm/r128_state.c
index 17b11e7..7b334fb 100644
--- a/drivers/char/drm/r128_state.c
+++ b/drivers/char/drm/r128_state.c
@@ -38,7 +38,7 @@
  */
 
 static void r128_emit_clip_rects(drm_r128_private_t * dev_priv,
-				 drm_clip_rect_t * boxes, int count)
+				 struct drm_clip_rect * boxes, int count)
 {
 	u32 aux_sc_cntl = 0x00000000;
 	RING_LOCALS;
@@ -352,13 +352,13 @@
 		 (flags & R128_REQUIRE_QUIESCENCE) ? "quiescence, " : "");
 }
 
-static void r128_cce_dispatch_clear(drm_device_t * dev,
+static void r128_cce_dispatch_clear(struct drm_device * dev,
 				    drm_r128_clear_t * clear)
 {
 	drm_r128_private_t *dev_priv = dev->dev_private;
 	drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
 	int nbox = sarea_priv->nbox;
-	drm_clip_rect_t *pbox = sarea_priv->boxes;
+	struct drm_clip_rect *pbox = sarea_priv->boxes;
 	unsigned int flags = clear->flags;
 	int i;
 	RING_LOCALS;
@@ -458,12 +458,12 @@
 	}
 }
 
-static void r128_cce_dispatch_swap(drm_device_t * dev)
+static void r128_cce_dispatch_swap(struct drm_device * dev)
 {
 	drm_r128_private_t *dev_priv = dev->dev_private;
 	drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
 	int nbox = sarea_priv->nbox;
-	drm_clip_rect_t *pbox = sarea_priv->boxes;
+	struct drm_clip_rect *pbox = sarea_priv->boxes;
 	int i;
 	RING_LOCALS;
 	DRM_DEBUG("%s\n", __FUNCTION__);
@@ -524,7 +524,7 @@
 	ADVANCE_RING();
 }
 
-static void r128_cce_dispatch_flip(drm_device_t * dev)
+static void r128_cce_dispatch_flip(struct drm_device * dev)
 {
 	drm_r128_private_t *dev_priv = dev->dev_private;
 	RING_LOCALS;
@@ -567,7 +567,7 @@
 	ADVANCE_RING();
 }
 
-static void r128_cce_dispatch_vertex(drm_device_t * dev, drm_buf_t * buf)
+static void r128_cce_dispatch_vertex(struct drm_device * dev, struct drm_buf * buf)
 {
 	drm_r128_private_t *dev_priv = dev->dev_private;
 	drm_r128_buf_priv_t *buf_priv = buf->dev_private;
@@ -637,8 +637,8 @@
 	sarea_priv->nbox = 0;
 }
 
-static void r128_cce_dispatch_indirect(drm_device_t * dev,
-				       drm_buf_t * buf, int start, int end)
+static void r128_cce_dispatch_indirect(struct drm_device * dev,
+				       struct drm_buf * buf, int start, int end)
 {
 	drm_r128_private_t *dev_priv = dev->dev_private;
 	drm_r128_buf_priv_t *buf_priv = buf->dev_private;
@@ -692,8 +692,8 @@
 	dev_priv->sarea_priv->last_dispatch++;
 }
 
-static void r128_cce_dispatch_indices(drm_device_t * dev,
-				      drm_buf_t * buf,
+static void r128_cce_dispatch_indices(struct drm_device * dev,
+				      struct drm_buf * buf,
 				      int start, int end, int count)
 {
 	drm_r128_private_t *dev_priv = dev->dev_private;
@@ -777,11 +777,11 @@
 }
 
 static int r128_cce_dispatch_blit(DRMFILE filp,
-				  drm_device_t * dev, drm_r128_blit_t * blit)
+				  struct drm_device * dev, drm_r128_blit_t * blit)
 {
 	drm_r128_private_t *dev_priv = dev->dev_private;
-	drm_device_dma_t *dma = dev->dma;
-	drm_buf_t *buf;
+	struct drm_device_dma *dma = dev->dma;
+	struct drm_buf *buf;
 	drm_r128_buf_priv_t *buf_priv;
 	u32 *data;
 	int dword_shift, dwords;
@@ -887,7 +887,7 @@
  * have hardware stencil support.
  */
 
-static int r128_cce_dispatch_write_span(drm_device_t * dev,
+static int r128_cce_dispatch_write_span(struct drm_device * dev,
 					drm_r128_depth_t * depth)
 {
 	drm_r128_private_t *dev_priv = dev->dev_private;
@@ -983,7 +983,7 @@
 	return 0;
 }
 
-static int r128_cce_dispatch_write_pixels(drm_device_t * dev,
+static int r128_cce_dispatch_write_pixels(struct drm_device * dev,
 					  drm_r128_depth_t * depth)
 {
 	drm_r128_private_t *dev_priv = dev->dev_private;
@@ -1105,7 +1105,7 @@
 	return 0;
 }
 
-static int r128_cce_dispatch_read_span(drm_device_t * dev,
+static int r128_cce_dispatch_read_span(struct drm_device * dev,
 				       drm_r128_depth_t * depth)
 {
 	drm_r128_private_t *dev_priv = dev->dev_private;
@@ -1148,7 +1148,7 @@
 	return 0;
 }
 
-static int r128_cce_dispatch_read_pixels(drm_device_t * dev,
+static int r128_cce_dispatch_read_pixels(struct drm_device * dev,
 					 drm_r128_depth_t * depth)
 {
 	drm_r128_private_t *dev_priv = dev->dev_private;
@@ -1220,7 +1220,7 @@
  * Polygon stipple
  */
 
-static void r128_cce_dispatch_stipple(drm_device_t * dev, u32 * stipple)
+static void r128_cce_dispatch_stipple(struct drm_device * dev, u32 * stipple)
 {
 	drm_r128_private_t *dev_priv = dev->dev_private;
 	int i;
@@ -1269,7 +1269,7 @@
 	return 0;
 }
 
-static int r128_do_init_pageflip(drm_device_t * dev)
+static int r128_do_init_pageflip(struct drm_device * dev)
 {
 	drm_r128_private_t *dev_priv = dev->dev_private;
 	DRM_DEBUG("\n");
@@ -1288,7 +1288,7 @@
 	return 0;
 }
 
-static int r128_do_cleanup_pageflip(drm_device_t * dev)
+static int r128_do_cleanup_pageflip(struct drm_device * dev)
 {
 	drm_r128_private_t *dev_priv = dev->dev_private;
 	DRM_DEBUG("\n");
@@ -1354,8 +1354,8 @@
 {
 	DRM_DEVICE;
 	drm_r128_private_t *dev_priv = dev->dev_private;
-	drm_device_dma_t *dma = dev->dma;
-	drm_buf_t *buf;
+	struct drm_device_dma *dma = dev->dma;
+	struct drm_buf *buf;
 	drm_r128_buf_priv_t *buf_priv;
 	drm_r128_vertex_t vertex;
 
@@ -1413,8 +1413,8 @@
 {
 	DRM_DEVICE;
 	drm_r128_private_t *dev_priv = dev->dev_private;
-	drm_device_dma_t *dma = dev->dma;
-	drm_buf_t *buf;
+	struct drm_device_dma *dma = dev->dma;
+	struct drm_buf *buf;
 	drm_r128_buf_priv_t *buf_priv;
 	drm_r128_indices_t elts;
 	int count;
@@ -1483,7 +1483,7 @@
 static int r128_cce_blit(DRM_IOCTL_ARGS)
 {
 	DRM_DEVICE;
-	drm_device_dma_t *dma = dev->dma;
+	struct drm_device_dma *dma = dev->dma;
 	drm_r128_private_t *dev_priv = dev->dev_private;
 	drm_r128_blit_t blit;
 	int ret;
@@ -1571,8 +1571,8 @@
 {
 	DRM_DEVICE;
 	drm_r128_private_t *dev_priv = dev->dev_private;
-	drm_device_dma_t *dma = dev->dma;
-	drm_buf_t *buf;
+	struct drm_device_dma *dma = dev->dma;
+	struct drm_buf *buf;
 	drm_r128_buf_priv_t *buf_priv;
 	drm_r128_indirect_t indirect;
 #if 0
@@ -1675,7 +1675,7 @@
 	return 0;
 }
 
-void r128_driver_preclose(drm_device_t * dev, DRMFILE filp)
+void r128_driver_preclose(struct drm_device * dev, DRMFILE filp)
 {
 	if (dev->dev_private) {
 		drm_r128_private_t *dev_priv = dev->dev_private;
@@ -1685,7 +1685,7 @@
 	}
 }
 
-void r128_driver_lastclose(drm_device_t * dev)
+void r128_driver_lastclose(struct drm_device * dev)
 {
 	r128_do_cleanup_cce(dev);
 }
diff --git a/drivers/char/drm/r300_cmdbuf.c b/drivers/char/drm/r300_cmdbuf.c
index 032a022..4e5aca6 100644
--- a/drivers/char/drm/r300_cmdbuf.c
+++ b/drivers/char/drm/r300_cmdbuf.c
@@ -55,7 +55,7 @@
 static int r300_emit_cliprects(drm_radeon_private_t *dev_priv,
 			       drm_radeon_kcmd_buffer_t *cmdbuf, int n)
 {
-	drm_clip_rect_t box;
+	struct drm_clip_rect box;
 	int nr;
 	int i;
 	RING_LOCALS;
@@ -148,15 +148,16 @@
 
 	/* these match cmducs() command in r300_driver/r300/r300_cmdbuf.c */
 	ADD_RANGE(R300_SE_VPORT_XSCALE, 6);
-	ADD_RANGE(0x2080, 1);
+	ADD_RANGE(R300_VAP_CNTL, 1);
 	ADD_RANGE(R300_SE_VTE_CNTL, 2);
 	ADD_RANGE(0x2134, 2);
-	ADD_RANGE(0x2140, 1);
+	ADD_RANGE(R300_VAP_CNTL_STATUS, 1);
 	ADD_RANGE(R300_VAP_INPUT_CNTL_0, 2);
 	ADD_RANGE(0x21DC, 1);
-	ADD_RANGE(0x221C, 1);
-	ADD_RANGE(0x2220, 4);
-	ADD_RANGE(0x2288, 1);
+	ADD_RANGE(R300_VAP_UNKNOWN_221C, 1);
+	ADD_RANGE(R300_VAP_CLIP_X_0, 4);
+	ADD_RANGE(R300_VAP_PVS_WAITIDLE, 1);
+	ADD_RANGE(R300_VAP_UNKNOWN_2288, 1);
 	ADD_RANGE(R300_VAP_OUTPUT_VTX_FMT_0, 2);
 	ADD_RANGE(R300_VAP_PVS_CNTL_1, 3);
 	ADD_RANGE(R300_GB_ENABLE, 1);
@@ -168,13 +169,13 @@
 	ADD_RANGE(R300_RE_POINTSIZE, 1);
 	ADD_RANGE(0x4230, 3);
 	ADD_RANGE(R300_RE_LINE_CNT, 1);
-	ADD_RANGE(0x4238, 1);
+	ADD_RANGE(R300_RE_UNK4238, 1);
 	ADD_RANGE(0x4260, 3);
-	ADD_RANGE(0x4274, 4);
-	ADD_RANGE(0x4288, 5);
-	ADD_RANGE(0x42A0, 1);
+	ADD_RANGE(R300_RE_SHADE, 4);
+	ADD_RANGE(R300_RE_POLYGON_MODE, 5);
+	ADD_RANGE(R300_RE_ZBIAS_CNTL, 1);
 	ADD_RANGE(R300_RE_ZBIAS_T_FACTOR, 4);
-	ADD_RANGE(0x42B4, 1);
+	ADD_RANGE(R300_RE_OCCLUSION_CNTL, 1);
 	ADD_RANGE(R300_RE_CULL_CNTL, 1);
 	ADD_RANGE(0x42C0, 2);
 	ADD_RANGE(R300_RS_CNTL_0, 2);
@@ -190,22 +191,22 @@
 	ADD_RANGE(R300_PFS_INSTR1_0, 64);
 	ADD_RANGE(R300_PFS_INSTR2_0, 64);
 	ADD_RANGE(R300_PFS_INSTR3_0, 64);
-	ADD_RANGE(0x4BC0, 1);
-	ADD_RANGE(0x4BC8, 3);
+	ADD_RANGE(R300_RE_FOG_STATE, 1);
+	ADD_RANGE(R300_FOG_COLOR_R, 3);
 	ADD_RANGE(R300_PP_ALPHA_TEST, 2);
 	ADD_RANGE(0x4BD8, 1);
 	ADD_RANGE(R300_PFS_PARAM_0_X, 64);
 	ADD_RANGE(0x4E00, 1);
 	ADD_RANGE(R300_RB3D_CBLEND, 2);
 	ADD_RANGE(R300_RB3D_COLORMASK, 1);
-	ADD_RANGE(0x4E10, 3);
+	ADD_RANGE(R300_RB3D_BLEND_COLOR, 3);
 	ADD_RANGE_MARK(R300_RB3D_COLOROFFSET0, 1, MARK_CHECK_OFFSET);	/* check offset */
 	ADD_RANGE(R300_RB3D_COLORPITCH0, 1);
 	ADD_RANGE(0x4E50, 9);
 	ADD_RANGE(0x4E88, 1);
 	ADD_RANGE(0x4EA0, 2);
 	ADD_RANGE(R300_RB3D_ZSTENCIL_CNTL_0, 3);
-	ADD_RANGE(0x4F10, 4);
+	ADD_RANGE(R300_RB3D_ZSTENCIL_FORMAT, 4);
 	ADD_RANGE_MARK(R300_RB3D_DEPTHOFFSET, 1, MARK_CHECK_OFFSET);	/* check offset */
 	ADD_RANGE(R300_RB3D_DEPTHPITCH, 1);
 	ADD_RANGE(0x4F28, 1);
@@ -224,7 +225,7 @@
 	ADD_RANGE(R300_TX_BORDER_COLOR_0, 16);
 
 	/* Sporadic registers used as primitives are emitted */
-	ADD_RANGE(0x4f18, 1);
+	ADD_RANGE(R300_RB3D_ZCACHE_CTLSTAT, 1);
 	ADD_RANGE(R300_RB3D_DSTCACHE_CTLSTAT, 1);
 	ADD_RANGE(R300_VAP_INPUT_ROUTE_0_0, 8);
 	ADD_RANGE(R300_VAP_INPUT_ROUTE_1_0, 8);
@@ -692,9 +693,9 @@
 
 	BEGIN_RING(6);
 	OUT_RING(CP_PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
-	OUT_RING(0xa);
-	OUT_RING(CP_PACKET0(0x4f18, 0));
-	OUT_RING(0x3);
+	OUT_RING(R300_RB3D_DSTCACHE_UNKNOWN_0A);
+	OUT_RING(CP_PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0));
+	OUT_RING(R300_RB3D_ZCACHE_UNKNOWN_03);
 	OUT_RING(CP_PACKET3(RADEON_CP_NOP, 0));
 	OUT_RING(0x0);
 	ADVANCE_RING();
@@ -705,7 +706,7 @@
  * The actual age emit is done by r300_do_cp_cmdbuf, which is why you must
  * be careful about how this function is called.
  */
-static void r300_discard_buffer(drm_device_t * dev, drm_buf_t * buf)
+static void r300_discard_buffer(struct drm_device * dev, struct drm_buf * buf)
 {
 	drm_radeon_private_t *dev_priv = dev->dev_private;
 	drm_radeon_buf_priv_t *buf_priv = buf->dev_private;
@@ -766,8 +767,8 @@
 	}
 	
 	BEGIN_RING(2);
-	OUT_RING(CP_PACKET0(RADEON_SCRATCH_REG0 + header.scratch.reg * 4, 0));
-	OUT_RING(dev_priv->scratch_ages[header.scratch.reg]);
+	OUT_RING( CP_PACKET0( RADEON_SCRATCH_REG0 + header.scratch.reg * 4, 0 ) );
+	OUT_RING( dev_priv->scratch_ages[header.scratch.reg] );
 	ADVANCE_RING();
 	
 	return 0;
@@ -778,14 +779,14 @@
  * commands on the DMA ring buffer.
  * Called by the ioctl handler function radeon_cp_cmdbuf.
  */
-int r300_do_cp_cmdbuf(drm_device_t *dev,
+int r300_do_cp_cmdbuf(struct drm_device *dev,
 		      DRMFILE filp,
-		      drm_file_t *filp_priv,
+		      struct drm_file *filp_priv,
 		      drm_radeon_kcmd_buffer_t *cmdbuf)
 {
 	drm_radeon_private_t *dev_priv = dev->dev_private;
-	drm_device_dma_t *dma = dev->dma;
-	drm_buf_t *buf = NULL;
+	struct drm_device_dma *dma = dev->dma;
+	struct drm_buf *buf = NULL;
 	int emit_dispatch_age = 0;
 	int ret = 0;
 
diff --git a/drivers/char/drm/r300_reg.h b/drivers/char/drm/r300_reg.h
index ecda760..3ae57ec 100644
--- a/drivers/char/drm/r300_reg.h
+++ b/drivers/char/drm/r300_reg.h
@@ -47,12 +47,12 @@
 #	define R300_MC_MISC__MC_GLOBW_FULL_LAT_SHIFT	28
 
 /*
-This file contains registers and constants for the R300. They have been
-found mostly by examining command buffers captured using glxtest, as well
-as by extrapolating some known registers and constants from the R200.
-
-I am fairly certain that they are correct unless stated otherwise in comments.
-*/
+ * This file contains registers and constants for the R300. They have been
+ * found mostly by examining command buffers captured using glxtest, as well
+ * as by extrapolating some known registers and constants from the R200.
+ * I am fairly certain that they are correct unless stated otherwise
+ * in comments.
+ */
 
 #define R300_SE_VPORT_XSCALE                0x1D98
 #define R300_SE_VPORT_XOFFSET               0x1D9C
@@ -61,49 +61,60 @@
 #define R300_SE_VPORT_ZSCALE                0x1DA8
 #define R300_SE_VPORT_ZOFFSET               0x1DAC
 
-/* This register is written directly and also starts data section in many 3d CP_PACKET3's */
+
+/*
+ * Vertex Array Processing (VAP) Control
+ * Stolen from r200 code from Christoph Brill (It's a guess!)
+ */
+#define R300_VAP_CNTL	0x2080
+
+/* This register is written directly and also starts data section
+ * in many 3d CP_PACKET3's
+ */
 #define R300_VAP_VF_CNTL	0x2084
+#	define	R300_VAP_VF_CNTL__PRIM_TYPE__SHIFT              0
+#	define  R300_VAP_VF_CNTL__PRIM_NONE                     (0<<0)
+#	define  R300_VAP_VF_CNTL__PRIM_POINTS                   (1<<0)
+#	define  R300_VAP_VF_CNTL__PRIM_LINES                    (2<<0)
+#	define  R300_VAP_VF_CNTL__PRIM_LINE_STRIP               (3<<0)
+#	define  R300_VAP_VF_CNTL__PRIM_TRIANGLES                (4<<0)
+#	define  R300_VAP_VF_CNTL__PRIM_TRIANGLE_FAN             (5<<0)
+#	define  R300_VAP_VF_CNTL__PRIM_TRIANGLE_STRIP           (6<<0)
+#	define  R300_VAP_VF_CNTL__PRIM_LINE_LOOP                (12<<0)
+#	define  R300_VAP_VF_CNTL__PRIM_QUADS                    (13<<0)
+#	define  R300_VAP_VF_CNTL__PRIM_QUAD_STRIP               (14<<0)
+#	define  R300_VAP_VF_CNTL__PRIM_POLYGON                  (15<<0)
 
-#	define	R300_VAP_VF_CNTL__PRIM_TYPE__SHIFT                       0
-#	define  R300_VAP_VF_CNTL__PRIM_NONE				 (0<<0)
-#	define  R300_VAP_VF_CNTL__PRIM_POINTS				 (1<<0)
-#	define  R300_VAP_VF_CNTL__PRIM_LINES				 (2<<0)
-#	define  R300_VAP_VF_CNTL__PRIM_LINE_STRIP			 (3<<0)
-#	define  R300_VAP_VF_CNTL__PRIM_TRIANGLES			 (4<<0)
-#	define  R300_VAP_VF_CNTL__PRIM_TRIANGLE_FAN			 (5<<0)
-#	define  R300_VAP_VF_CNTL__PRIM_TRIANGLE_STRIP			 (6<<0)
-#	define  R300_VAP_VF_CNTL__PRIM_LINE_LOOP			 (12<<0)
-#	define  R300_VAP_VF_CNTL__PRIM_QUADS			 	 (13<<0)
-#	define  R300_VAP_VF_CNTL__PRIM_QUAD_STRIP			 (14<<0)
-#	define  R300_VAP_VF_CNTL__PRIM_POLYGON			 	 (15<<0)
+#	define	R300_VAP_VF_CNTL__PRIM_WALK__SHIFT              4
+	/* State based - direct writes to registers trigger vertex
+           generation */
+#	define	R300_VAP_VF_CNTL__PRIM_WALK_STATE_BASED         (0<<4)
+#	define	R300_VAP_VF_CNTL__PRIM_WALK_INDICES             (1<<4)
+#	define	R300_VAP_VF_CNTL__PRIM_WALK_VERTEX_LIST         (2<<4)
+#	define	R300_VAP_VF_CNTL__PRIM_WALK_VERTEX_EMBEDDED     (3<<4)
 
-#	define	R300_VAP_VF_CNTL__PRIM_WALK__SHIFT                       4
-	/* State based - direct writes to registers trigger vertex generation */
-#	define	R300_VAP_VF_CNTL__PRIM_WALK_STATE_BASED                      (0<<4)
-#	define	R300_VAP_VF_CNTL__PRIM_WALK_INDICES                          (1<<4)
-#	define	R300_VAP_VF_CNTL__PRIM_WALK_VERTEX_LIST                      (2<<4)
-#	define	R300_VAP_VF_CNTL__PRIM_WALK_VERTEX_EMBEDDED                  (3<<4)
+	/* I don't think I saw these three used.. */
+#	define	R300_VAP_VF_CNTL__COLOR_ORDER__SHIFT            6
+#	define	R300_VAP_VF_CNTL__TCL_OUTPUT_CTL_ENA__SHIFT     9
+#	define	R300_VAP_VF_CNTL__PROG_STREAM_ENA__SHIFT        10
 
-		/* I don't think I saw these three used.. */
-#	define	R300_VAP_VF_CNTL__COLOR_ORDER__SHIFT                     6
-#	define	R300_VAP_VF_CNTL__TCL_OUTPUT_CTL_ENA__SHIFT              9
-#	define	R300_VAP_VF_CNTL__PROG_STREAM_ENA__SHIFT                 10
-
-		/* index size - when not set the indices are assumed to be 16 bit */
-#	define	R300_VAP_VF_CNTL__INDEX_SIZE_32bit                      (1<<11)
-		/* number of vertices */
-#	define	R300_VAP_VF_CNTL__NUM_VERTICES__SHIFT                    16
+	/* index size - when not set the indices are assumed to be 16 bit */
+#	define	R300_VAP_VF_CNTL__INDEX_SIZE_32bit              (1<<11)
+	/* number of vertices */
+#	define	R300_VAP_VF_CNTL__NUM_VERTICES__SHIFT           16
 
 /* BEGIN: Wild guesses */
 #define R300_VAP_OUTPUT_VTX_FMT_0           0x2090
 #       define R300_VAP_OUTPUT_VTX_FMT_0__POS_PRESENT     (1<<0)
 #       define R300_VAP_OUTPUT_VTX_FMT_0__COLOR_PRESENT   (1<<1)
-#       define R300_VAP_OUTPUT_VTX_FMT_0__COLOR_1_PRESENT (1<<2)	/* GUESS */
-#       define R300_VAP_OUTPUT_VTX_FMT_0__COLOR_2_PRESENT (1<<3)	/* GUESS */
-#       define R300_VAP_OUTPUT_VTX_FMT_0__COLOR_3_PRESENT (1<<4)	/* GUESS */
-#       define R300_VAP_OUTPUT_VTX_FMT_0__PT_SIZE_PRESENT (1<<16)	/* GUESS */
+#       define R300_VAP_OUTPUT_VTX_FMT_0__COLOR_1_PRESENT (1<<2)  /* GUESS */
+#       define R300_VAP_OUTPUT_VTX_FMT_0__COLOR_2_PRESENT (1<<3)  /* GUESS */
+#       define R300_VAP_OUTPUT_VTX_FMT_0__COLOR_3_PRESENT (1<<4)  /* GUESS */
+#       define R300_VAP_OUTPUT_VTX_FMT_0__PT_SIZE_PRESENT (1<<16) /* GUESS */
 
 #define R300_VAP_OUTPUT_VTX_FMT_1           0x2094
+	/* each of the following is 3 bits wide, specifies number
+	   of components */
 #       define R300_VAP_OUTPUT_VTX_FMT_1__TEX_0_COMP_CNT_SHIFT 0
 #       define R300_VAP_OUTPUT_VTX_FMT_1__TEX_1_COMP_CNT_SHIFT 3
 #       define R300_VAP_OUTPUT_VTX_FMT_1__TEX_2_COMP_CNT_SHIFT 6
@@ -112,7 +123,7 @@
 #       define R300_VAP_OUTPUT_VTX_FMT_1__TEX_5_COMP_CNT_SHIFT 15
 #       define R300_VAP_OUTPUT_VTX_FMT_1__TEX_6_COMP_CNT_SHIFT 18
 #       define R300_VAP_OUTPUT_VTX_FMT_1__TEX_7_COMP_CNT_SHIFT 21
-/* END */
+/* END: Wild guesses */
 
 #define R300_SE_VTE_CNTL                  0x20b0
 #	define     R300_VPORT_X_SCALE_ENA                0x00000001
@@ -128,43 +139,54 @@
 #	define     R300_VTX_ST_DENORMALIZED              0x00001000
 
 /* BEGIN: Vertex data assembly - lots of uncertainties */
-/* gap */
-/* Where do we get our vertex data?
-//
-// Vertex data either comes either from immediate mode registers or from
-// vertex arrays.
-// There appears to be no mixed mode (though we can force the pitch of
-// vertex arrays to 0, effectively reusing the same element over and over
-// again).
-//
-// Immediate mode is controlled by the INPUT_CNTL registers. I am not sure
-// if these registers influence vertex array processing.
-//
-// Vertex arrays are controlled via the 3D_LOAD_VBPNTR packet3.
-//
-// In both cases, vertex attributes are then passed through INPUT_ROUTE.
 
-// Beginning with INPUT_ROUTE_0_0 is a list of WORDs that route vertex data
-// into the vertex processor's input registers.
-// The first word routes the first input, the second word the second, etc.
-// The corresponding input is routed into the register with the given index.
-// The list is ended by a word with INPUT_ROUTE_END set.
-//
-// Always set COMPONENTS_4 in immediate mode. */
+/* gap */
+
+#define R300_VAP_CNTL_STATUS              0x2140
+#	define R300_VC_NO_SWAP                  (0 << 0)
+#	define R300_VC_16BIT_SWAP               (1 << 0)
+#	define R300_VC_32BIT_SWAP               (2 << 0)
+#	define R300_VAP_TCL_BYPASS		(1 << 8)
+
+/* gap */
+
+/* Where do we get our vertex data?
+ *
+ * Vertex data either comes either from immediate mode registers or from
+ * vertex arrays.
+ * There appears to be no mixed mode (though we can force the pitch of
+ * vertex arrays to 0, effectively reusing the same element over and over
+ * again).
+ *
+ * Immediate mode is controlled by the INPUT_CNTL registers. I am not sure
+ * if these registers influence vertex array processing.
+ *
+ * Vertex arrays are controlled via the 3D_LOAD_VBPNTR packet3.
+ *
+ * In both cases, vertex attributes are then passed through INPUT_ROUTE.
+ *
+ * Beginning with INPUT_ROUTE_0_0 is a list of WORDs that route vertex data
+ * into the vertex processor's input registers.
+ * The first word routes the first input, the second word the second, etc.
+ * The corresponding input is routed into the register with the given index.
+ * The list is ended by a word with INPUT_ROUTE_END set.
+ *
+ * Always set COMPONENTS_4 in immediate mode.
+ */
 
 #define R300_VAP_INPUT_ROUTE_0_0            0x2150
 #       define R300_INPUT_ROUTE_COMPONENTS_1     (0 << 0)
 #       define R300_INPUT_ROUTE_COMPONENTS_2     (1 << 0)
 #       define R300_INPUT_ROUTE_COMPONENTS_3     (2 << 0)
 #       define R300_INPUT_ROUTE_COMPONENTS_4     (3 << 0)
-#       define R300_INPUT_ROUTE_COMPONENTS_RGBA  (4 << 0)	/* GUESS */
+#       define R300_INPUT_ROUTE_COMPONENTS_RGBA  (4 << 0) /* GUESS */
 #       define R300_VAP_INPUT_ROUTE_IDX_SHIFT    8
-#       define R300_VAP_INPUT_ROUTE_IDX_MASK     (31 << 8)	/* GUESS */
+#       define R300_VAP_INPUT_ROUTE_IDX_MASK     (31 << 8) /* GUESS */
 #       define R300_VAP_INPUT_ROUTE_END          (1 << 13)
-#       define R300_INPUT_ROUTE_IMMEDIATE_MODE   (0 << 14)	/* GUESS */
-#       define R300_INPUT_ROUTE_FLOAT            (1 << 14)	/* GUESS */
-#       define R300_INPUT_ROUTE_UNSIGNED_BYTE    (2 << 14)	/* GUESS */
-#       define R300_INPUT_ROUTE_FLOAT_COLOR      (3 << 14)	/* GUESS */
+#       define R300_INPUT_ROUTE_IMMEDIATE_MODE   (0 << 14) /* GUESS */
+#       define R300_INPUT_ROUTE_FLOAT            (1 << 14) /* GUESS */
+#       define R300_INPUT_ROUTE_UNSIGNED_BYTE    (2 << 14) /* GUESS */
+#       define R300_INPUT_ROUTE_FLOAT_COLOR      (3 << 14) /* GUESS */
 #define R300_VAP_INPUT_ROUTE_0_1            0x2154
 #define R300_VAP_INPUT_ROUTE_0_2            0x2158
 #define R300_VAP_INPUT_ROUTE_0_3            0x215C
@@ -174,10 +196,12 @@
 #define R300_VAP_INPUT_ROUTE_0_7            0x216C
 
 /* gap */
+
 /* Notes:
-//  - always set up to produce at least two attributes:
-//    if vertex program uses only position, fglrx will set normal, too
-//  - INPUT_CNTL_0_COLOR and INPUT_CNTL_COLOR bits are always equal */
+ *  - always set up to produce at least two attributes:
+ *    if vertex program uses only position, fglrx will set normal, too
+ *  - INPUT_CNTL_0_COLOR and INPUT_CNTL_COLOR bits are always equal.
+ */
 #define R300_VAP_INPUT_CNTL_0               0x2180
 #       define R300_INPUT_CNTL_0_COLOR           0x00000001
 #define R300_VAP_INPUT_CNTL_1               0x2184
@@ -186,20 +210,22 @@
 #       define R300_INPUT_CNTL_COLOR             0x00000004
 #       define R300_INPUT_CNTL_TC0               0x00000400
 #       define R300_INPUT_CNTL_TC1               0x00000800
-#       define R300_INPUT_CNTL_TC2               0x00001000	/* GUESS */
-#       define R300_INPUT_CNTL_TC3               0x00002000	/* GUESS */
-#       define R300_INPUT_CNTL_TC4               0x00004000	/* GUESS */
-#       define R300_INPUT_CNTL_TC5               0x00008000	/* GUESS */
-#       define R300_INPUT_CNTL_TC6               0x00010000	/* GUESS */
-#       define R300_INPUT_CNTL_TC7               0x00020000	/* GUESS */
+#       define R300_INPUT_CNTL_TC2               0x00001000 /* GUESS */
+#       define R300_INPUT_CNTL_TC3               0x00002000 /* GUESS */
+#       define R300_INPUT_CNTL_TC4               0x00004000 /* GUESS */
+#       define R300_INPUT_CNTL_TC5               0x00008000 /* GUESS */
+#       define R300_INPUT_CNTL_TC6               0x00010000 /* GUESS */
+#       define R300_INPUT_CNTL_TC7               0x00020000 /* GUESS */
 
 /* gap */
+
 /* Words parallel to INPUT_ROUTE_0; All words that are active in INPUT_ROUTE_0
-// are set to a swizzling bit pattern, other words are 0.
-//
-// In immediate mode, the pattern is always set to xyzw. In vertex array
-// mode, the swizzling pattern is e.g. used to set zw components in texture
-// coordinates with only tweo components. */
+ * are set to a swizzling bit pattern, other words are 0.
+ *
+ * In immediate mode, the pattern is always set to xyzw. In vertex array
+ * mode, the swizzling pattern is e.g. used to set zw components in texture
+ * coordinates with only tweo components.
+ */
 #define R300_VAP_INPUT_ROUTE_1_0            0x21E0
 #       define R300_INPUT_ROUTE_SELECT_X    0
 #       define R300_INPUT_ROUTE_SELECT_Y    1
@@ -208,11 +234,11 @@
 #       define R300_INPUT_ROUTE_SELECT_ZERO 4
 #       define R300_INPUT_ROUTE_SELECT_ONE  5
 #       define R300_INPUT_ROUTE_SELECT_MASK 7
-#       define R300_INPUT_ROUTE_X_SHIFT          0
-#       define R300_INPUT_ROUTE_Y_SHIFT          3
-#       define R300_INPUT_ROUTE_Z_SHIFT          6
-#       define R300_INPUT_ROUTE_W_SHIFT          9
-#       define R300_INPUT_ROUTE_ENABLE           (15 << 12)
+#       define R300_INPUT_ROUTE_X_SHIFT     0
+#       define R300_INPUT_ROUTE_Y_SHIFT     3
+#       define R300_INPUT_ROUTE_Z_SHIFT     6
+#       define R300_INPUT_ROUTE_W_SHIFT     9
+#       define R300_INPUT_ROUTE_ENABLE      (15 << 12)
 #define R300_VAP_INPUT_ROUTE_1_1            0x21E4
 #define R300_VAP_INPUT_ROUTE_1_2            0x21E8
 #define R300_VAP_INPUT_ROUTE_1_3            0x21EC
@@ -221,79 +247,107 @@
 #define R300_VAP_INPUT_ROUTE_1_6            0x21F8
 #define R300_VAP_INPUT_ROUTE_1_7            0x21FC
 
-/* END */
+/* END: Vertex data assembly */
 
 /* gap */
-/* BEGIN: Upload vertex program and data
-// The programmable vertex shader unit has a memory bank of unknown size
-// that can be written to in 16 byte units by writing the address into
-// UPLOAD_ADDRESS, followed by data in UPLOAD_DATA (multiples of 4 DWORDs).
-//
-// Pointers into the memory bank are always in multiples of 16 bytes.
-//
-// The memory bank is divided into areas with fixed meaning.
-//
-// Starting at address UPLOAD_PROGRAM: Vertex program instructions.
-// Native limits reported by drivers from ATI suggest size 256 (i.e. 4KB),
-// whereas the difference between known addresses suggests size 512.
-//
-// Starting at address UPLOAD_PARAMETERS: Vertex program parameters.
-// Native reported limits and the VPI layout suggest size 256, whereas
-// difference between known addresses suggests size 512.
-//
-// At address UPLOAD_POINTSIZE is a vector (0, 0, ps, 0), where ps is the
-// floating point pointsize. The exact purpose of this state is uncertain,
-// as there is also the R300_RE_POINTSIZE register.
-//
-// Multiple vertex programs and parameter sets can be loaded at once,
-// which could explain the size discrepancy. */
+
+/* BEGIN: Upload vertex program and data */
+
+/*
+ * The programmable vertex shader unit has a memory bank of unknown size
+ * that can be written to in 16 byte units by writing the address into
+ * UPLOAD_ADDRESS, followed by data in UPLOAD_DATA (multiples of 4 DWORDs).
+ *
+ * Pointers into the memory bank are always in multiples of 16 bytes.
+ *
+ * The memory bank is divided into areas with fixed meaning.
+ *
+ * Starting at address UPLOAD_PROGRAM: Vertex program instructions.
+ * Native limits reported by drivers from ATI suggest size 256 (i.e. 4KB),
+ * whereas the difference between known addresses suggests size 512.
+ *
+ * Starting at address UPLOAD_PARAMETERS: Vertex program parameters.
+ * Native reported limits and the VPI layout suggest size 256, whereas
+ * difference between known addresses suggests size 512.
+ *
+ * At address UPLOAD_POINTSIZE is a vector (0, 0, ps, 0), where ps is the
+ * floating point pointsize. The exact purpose of this state is uncertain,
+ * as there is also the R300_RE_POINTSIZE register.
+ *
+ * Multiple vertex programs and parameter sets can be loaded at once,
+ * which could explain the size discrepancy.
+ */
 #define R300_VAP_PVS_UPLOAD_ADDRESS         0x2200
 #       define R300_PVS_UPLOAD_PROGRAM           0x00000000
 #       define R300_PVS_UPLOAD_PARAMETERS        0x00000200
 #       define R300_PVS_UPLOAD_POINTSIZE         0x00000406
-/* gap */
-#define R300_VAP_PVS_UPLOAD_DATA            0x2208
-/* END */
 
 /* gap */
+
+#define R300_VAP_PVS_UPLOAD_DATA            0x2208
+
+/* END: Upload vertex program and data */
+
+/* gap */
+
 /* I do not know the purpose of this register. However, I do know that
-// it is set to 221C_CLEAR for clear operations and to 221C_NORMAL
-// for normal rendering. */
+ * it is set to 221C_CLEAR for clear operations and to 221C_NORMAL
+ * for normal rendering.
+ */
 #define R300_VAP_UNKNOWN_221C               0x221C
 #       define R300_221C_NORMAL                  0x00000000
 #       define R300_221C_CLEAR                   0x0001C000
 
+/* These seem to be per-pixel and per-vertex X and Y clipping planes. The first
+ * plane is per-pixel and the second plane is per-vertex.
+ *
+ * This was determined by experimentation alone but I believe it is correct.
+ *
+ * These registers are called X_QUAD0_1_FL to X_QUAD0_4_FL by glxtest.
+ */
+#define R300_VAP_CLIP_X_0                   0x2220
+#define R300_VAP_CLIP_X_1                   0x2224
+#define R300_VAP_CLIP_Y_0                   0x2228
+#define R300_VAP_CLIP_Y_1                   0x2230
+
 /* gap */
+
 /* Sometimes, END_OF_PKT and 0x2284=0 are the only commands sent between
-// rendering commands and overwriting vertex program parameters.
-// Therefore, I suspect writing zero to 0x2284 synchronizes the engine and
-// avoids bugs caused by still running shaders reading bad data from memory. */
-#define R300_VAP_PVS_WAITIDLE               0x2284	/* GUESS */
+ * rendering commands and overwriting vertex program parameters.
+ * Therefore, I suspect writing zero to 0x2284 synchronizes the engine and
+ * avoids bugs caused by still running shaders reading bad data from memory.
+ */
+#define R300_VAP_PVS_WAITIDLE               0x2284 /* GUESS */
 
 /* Absolutely no clue what this register is about. */
 #define R300_VAP_UNKNOWN_2288               0x2288
-#       define R300_2288_R300                    0x00750000	/* -- nh */
-#       define R300_2288_RV350                   0x0000FFFF	/* -- Vladimir */
+#       define R300_2288_R300                    0x00750000 /* -- nh */
+#       define R300_2288_RV350                   0x0000FFFF /* -- Vladimir */
 
 /* gap */
+
 /* Addresses are relative to the vertex program instruction area of the
-// memory bank. PROGRAM_END points to the last instruction of the active
-// program
-//
-// The meaning of the two UNKNOWN fields is obviously not known. However,
-// experiments so far have shown that both *must* point to an instruction
-// inside the vertex program, otherwise the GPU locks up.
-// fglrx usually sets CNTL_3_UNKNOWN to the end of the program and
-// CNTL_1_UNKNOWN points to instruction where last write to position takes place.
-// Most likely this is used to ignore rest of the program in cases where group of verts arent visible.
-// For some reason this "section" is sometimes accepted other instruction that have
-// no relationship with position calculations.
-*/
+ * memory bank. PROGRAM_END points to the last instruction of the active
+ * program
+ *
+ * The meaning of the two UNKNOWN fields is obviously not known. However,
+ * experiments so far have shown that both *must* point to an instruction
+ * inside the vertex program, otherwise the GPU locks up.
+ *
+ * fglrx usually sets CNTL_3_UNKNOWN to the end of the program and
+ * R300_PVS_CNTL_1_POS_END_SHIFT points to instruction where last write to
+ * position takes place.
+ *
+ * Most likely this is used to ignore rest of the program in cases
+ * where group of verts arent visible. For some reason this "section"
+ * is sometimes accepted other instruction that have no relationship with
+ * position calculations.
+ */
 #define R300_VAP_PVS_CNTL_1                 0x22D0
 #       define R300_PVS_CNTL_1_PROGRAM_START_SHIFT   0
 #       define R300_PVS_CNTL_1_POS_END_SHIFT         10
 #       define R300_PVS_CNTL_1_PROGRAM_END_SHIFT     20
-/* Addresses are relative to the vertex program parameters area. */
+/* Addresses are relative the the vertex program parameters area. */
 #define R300_VAP_PVS_CNTL_2                 0x22D4
 #       define R300_PVS_CNTL_2_PARAM_OFFSET_SHIFT 0
 #       define R300_PVS_CNTL_2_PARAM_COUNT_SHIFT  16
@@ -302,23 +356,26 @@
 #       define R300_PVS_CNTL_3_PROGRAM_UNKNOWN2_SHIFT 0
 
 /* The entire range from 0x2300 to 0x2AC inclusive seems to be used for
-// immediate vertices */
+ * immediate vertices
+ */
 #define R300_VAP_VTX_COLOR_R                0x2464
 #define R300_VAP_VTX_COLOR_G                0x2468
 #define R300_VAP_VTX_COLOR_B                0x246C
-#define R300_VAP_VTX_POS_0_X_1              0x2490	/* used for glVertex2*() */
+#define R300_VAP_VTX_POS_0_X_1              0x2490 /* used for glVertex2*() */
 #define R300_VAP_VTX_POS_0_Y_1              0x2494
-#define R300_VAP_VTX_COLOR_PKD              0x249C	/* RGBA */
-#define R300_VAP_VTX_POS_0_X_2              0x24A0	/* used for glVertex3*() */
+#define R300_VAP_VTX_COLOR_PKD              0x249C /* RGBA */
+#define R300_VAP_VTX_POS_0_X_2              0x24A0 /* used for glVertex3*() */
 #define R300_VAP_VTX_POS_0_Y_2              0x24A4
 #define R300_VAP_VTX_POS_0_Z_2              0x24A8
-#define R300_VAP_VTX_END_OF_PKT             0x24AC	/* write 0 to indicate end of packet? */
+/* write 0 to indicate end of packet? */
+#define R300_VAP_VTX_END_OF_PKT             0x24AC
 
 /* gap */
 
 /* These are values from r300_reg/r300_reg.h - they are known to be correct
-   and are here so we can use one register file instead of several
-   - Vladimir */
+ * and are here so we can use one register file instead of several
+ * - Vladimir
+ */
 #define R300_GB_VAP_RASTER_VTX_FMT_0	0x4000
 #	define R300_GB_VAP_RASTER_VTX_FMT_0__POS_PRESENT	(1<<0)
 #	define R300_GB_VAP_RASTER_VTX_FMT_0__COLOR_0_PRESENT	(1<<1)
@@ -341,14 +398,16 @@
 #	define R300_GB_VAP_RASTER_VTX_FMT_1__TEX_7_COMP_CNT_SHIFT	21
 
 /* UNK30 seems to enables point to quad transformation on textures
-   (or something closely related to that).
-   This bit is rather fatal at the time being due to lackings at pixel shader side */
+ * (or something closely related to that).
+ * This bit is rather fatal at the time being due to lackings at pixel
+ * shader side
+ */
 #define R300_GB_ENABLE	0x4008
 #	define R300_GB_POINT_STUFF_ENABLE	(1<<0)
 #	define R300_GB_LINE_STUFF_ENABLE	(1<<1)
 #	define R300_GB_TRIANGLE_STUFF_ENABLE	(1<<2)
 #	define R300_GB_STENCIL_AUTO_ENABLE	(1<<4)
-#	define R300_GB_UNK30			(1<<30)
+#	define R300_GB_UNK31			(1<<31)
 	/* each of the following is 2 bits wide */
 #define R300_GB_TEX_REPLICATE	0
 #define R300_GB_TEX_ST		1
@@ -383,11 +442,13 @@
 #	define R300_GB_MSPOS1__MS_Y5_SHIFT	20
 #	define R300_GB_MSPOS1__MSBD1		24
 
+
 #define R300_GB_TILE_CONFIG	0x4018
 #	define R300_GB_TILE_ENABLE	(1<<0)
 #	define R300_GB_TILE_PIPE_COUNT_RV300	0
 #	define R300_GB_TILE_PIPE_COUNT_R300	(3<<1)
 #	define R300_GB_TILE_PIPE_COUNT_R420	(7<<1)
+#	define R300_GB_TILE_PIPE_COUNT_RV410	(3<<1)
 #	define R300_GB_TILE_SIZE_8		0
 #	define R300_GB_TILE_SIZE_16		(1<<4)
 #	define R300_GB_TILE_SIZE_32		(2<<4)
@@ -442,17 +503,18 @@
 #	define R300_GB_W_SELECT_1		(1<<4)
 
 #define R300_GB_AA_CONFIG		0x4020
+#	define R300_AA_DISABLE			0x00
 #	define R300_AA_ENABLE			0x01
 #	define R300_AA_SUBSAMPLES_2		0
 #	define R300_AA_SUBSAMPLES_3		(1<<1)
 #	define R300_AA_SUBSAMPLES_4		(2<<1)
 #	define R300_AA_SUBSAMPLES_6		(3<<1)
 
-/* END */
-
 /* gap */
+
 /* Zero to flush caches. */
 #define R300_TX_CNTL                        0x4100
+#define R300_TX_FLUSH                       0x0
 
 /* The upper enable bits are guessed, based on fglrx reported limits. */
 #define R300_TX_ENABLE                      0x4104
@@ -474,24 +536,25 @@
 #       define R300_TX_ENABLE_15                 (1 << 15)
 
 /* The pointsize is given in multiples of 6. The pointsize can be
-// enormous: Clear() renders a single point that fills the entire
-// framebuffer. */
+ * enormous: Clear() renders a single point that fills the entire
+ * framebuffer.
+ */
 #define R300_RE_POINTSIZE                   0x421C
 #       define R300_POINTSIZE_Y_SHIFT            0
-#       define R300_POINTSIZE_Y_MASK             (0xFFFF << 0)	/* GUESS */
+#       define R300_POINTSIZE_Y_MASK             (0xFFFF << 0) /* GUESS */
 #       define R300_POINTSIZE_X_SHIFT            16
-#       define R300_POINTSIZE_X_MASK             (0xFFFF << 16)	/* GUESS */
+#       define R300_POINTSIZE_X_MASK             (0xFFFF << 16) /* GUESS */
 #       define R300_POINTSIZE_MAX             (R300_POINTSIZE_Y_MASK / 6)
 
 /* The line width is given in multiples of 6.
-   In default mode lines are classified as vertical lines.
-   HO: horizontal
-   VE: vertical or horizontal
-   HO & VE: no classification
-*/
+ * In default mode lines are classified as vertical lines.
+ * HO: horizontal
+ * VE: vertical or horizontal
+ * HO & VE: no classification
+ */
 #define R300_RE_LINE_CNT                      0x4234
 #       define R300_LINESIZE_SHIFT            0
-#       define R300_LINESIZE_MASK             (0xFFFF << 0)	/* GUESS */
+#       define R300_LINESIZE_MASK             (0xFFFF << 0) /* GUESS */
 #       define R300_LINESIZE_MAX             (R300_LINESIZE_MASK / 6)
 #       define R300_LINE_CNT_HO               (1 << 16)
 #       define R300_LINE_CNT_VE               (1 << 17)
@@ -499,6 +562,9 @@
 /* Some sort of scale or clamp value for texcoordless textures. */
 #define R300_RE_UNK4238                       0x4238
 
+/* Something shade related */
+#define R300_RE_SHADE                         0x4274
+
 #define R300_RE_SHADE_MODEL                   0x4278
 #	define R300_RE_SHADE_MODEL_SMOOTH     0x3aaaa
 #	define R300_RE_SHADE_MODEL_FLAT       0x39595
@@ -513,24 +579,31 @@
 #	define R300_PM_BACK_LINE              (1 << 7)
 #	define R300_PM_BACK_FILL              (1 << 8)
 
+/* Fog parameters */
+#define R300_RE_FOG_SCALE                     0x4294
+#define R300_RE_FOG_START                     0x4298
+
 /* Not sure why there are duplicate of factor and constant values.
-   My best guess so far is that there are seperate zbiases for test and write.
-   Ordering might be wrong.
-   Some of the tests indicate that fgl has a fallback implementation of zbias
-   via pixel shaders. */
+ * My best guess so far is that there are seperate zbiases for test and write.
+ * Ordering might be wrong.
+ * Some of the tests indicate that fgl has a fallback implementation of zbias
+ * via pixel shaders.
+ */
+#define R300_RE_ZBIAS_CNTL                    0x42A0 /* GUESS */
 #define R300_RE_ZBIAS_T_FACTOR                0x42A4
 #define R300_RE_ZBIAS_T_CONSTANT              0x42A8
 #define R300_RE_ZBIAS_W_FACTOR                0x42AC
 #define R300_RE_ZBIAS_W_CONSTANT              0x42B0
 
 /* This register needs to be set to (1<<1) for RV350 to correctly
-   perform depth test (see --vb-triangles in r300_demo)
-   Don't know about other chips. - Vladimir
-   This is set to 3 when GL_POLYGON_OFFSET_FILL is on.
-   My guess is that there are two bits for each zbias primitive (FILL, LINE, POINT).
-   One to enable depth test and one for depth write.
-   Yet this doesnt explain why depth writes work ...
-    */
+ * perform depth test (see --vb-triangles in r300_demo)
+ * Don't know about other chips. - Vladimir
+ * This is set to 3 when GL_POLYGON_OFFSET_FILL is on.
+ * My guess is that there are two bits for each zbias primitive
+ * (FILL, LINE, POINT).
+ *  One to enable depth test and one for depth write.
+ * Yet this doesnt explain why depth writes work ...
+ */
 #define R300_RE_OCCLUSION_CNTL		    0x42B4
 #	define R300_OCCLUSION_ON		(1<<1)
 
@@ -540,30 +613,38 @@
 #       define R300_FRONT_FACE_CCW               (0 << 2)
 #       define R300_FRONT_FACE_CW                (1 << 2)
 
-/* BEGIN: Rasterization / Interpolators - many guesses
-// 0_UNKNOWN_18 has always been set except for clear operations.
-// TC_CNT is the number of incoming texture coordinate sets (i.e. it depends
-// on the vertex program, *not* the fragment program) */
+
+/* BEGIN: Rasterization / Interpolators - many guesses */
+
+/* 0_UNKNOWN_18 has always been set except for clear operations.
+ * TC_CNT is the number of incoming texture coordinate sets (i.e. it depends
+ * on the vertex program, *not* the fragment program)
+ */
 #define R300_RS_CNTL_0                      0x4300
 #       define R300_RS_CNTL_TC_CNT_SHIFT         2
 #       define R300_RS_CNTL_TC_CNT_MASK          (7 << 2)
-#		define R300_RS_CNTL_CI_CNT_SHIFT         7	/* number of color interpolators used */
+	/* number of color interpolators used */
+#	define R300_RS_CNTL_CI_CNT_SHIFT         7
 #       define R300_RS_CNTL_0_UNKNOWN_18         (1 << 18)
-/* Guess: RS_CNTL_1 holds the index of the highest used RS_ROUTE_n register. */
+	/* Guess: RS_CNTL_1 holds the index of the highest used RS_ROUTE_n
+	   register. */
 #define R300_RS_CNTL_1                      0x4304
 
 /* gap */
+
 /* Only used for texture coordinates.
-// Use the source field to route texture coordinate input from the vertex program
-// to the desired interpolator. Note that the source field is relative to the
-// outputs the vertex program *actually* writes. If a vertex program only writes
-// texcoord[1], this will be source index 0.
-// Set INTERP_USED on all interpolators that produce data used by the
-// fragment program. INTERP_USED looks like a swizzling mask, but
-// I haven't seen it used that way.
-//
-// Note: The _UNKNOWN constants are always set in their respective register.
-// I don't know if this is necessary. */
+ * Use the source field to route texture coordinate input from the
+ * vertex program to the desired interpolator. Note that the source
+ * field is relative to the outputs the vertex program *actually*
+ * writes. If a vertex program only writes texcoord[1], this will
+ * be source index 0.
+ * Set INTERP_USED on all interpolators that produce data used by
+ * the fragment program. INTERP_USED looks like a swizzling mask,
+ * but I haven't seen it used that way.
+ *
+ * Note: The _UNKNOWN constants are always set in their respective
+ * register. I don't know if this is necessary.
+ */
 #define R300_RS_INTERP_0                    0x4310
 #define R300_RS_INTERP_1                    0x4314
 #       define R300_RS_INTERP_1_UNKNOWN          0x40
@@ -580,54 +661,63 @@
 #       define R300_RS_INTERP_USED               0x00D10000
 
 /* These DWORDs control how vertex data is routed into fragment program
-// registers, after interpolators. */
+ * registers, after interpolators.
+ */
 #define R300_RS_ROUTE_0                     0x4330
 #define R300_RS_ROUTE_1                     0x4334
 #define R300_RS_ROUTE_2                     0x4338
-#define R300_RS_ROUTE_3                     0x433C	/* GUESS */
-#define R300_RS_ROUTE_4                     0x4340	/* GUESS */
-#define R300_RS_ROUTE_5                     0x4344	/* GUESS */
-#define R300_RS_ROUTE_6                     0x4348	/* GUESS */
-#define R300_RS_ROUTE_7                     0x434C	/* GUESS */
+#define R300_RS_ROUTE_3                     0x433C /* GUESS */
+#define R300_RS_ROUTE_4                     0x4340 /* GUESS */
+#define R300_RS_ROUTE_5                     0x4344 /* GUESS */
+#define R300_RS_ROUTE_6                     0x4348 /* GUESS */
+#define R300_RS_ROUTE_7                     0x434C /* GUESS */
 #       define R300_RS_ROUTE_SOURCE_INTERP_0     0
 #       define R300_RS_ROUTE_SOURCE_INTERP_1     1
 #       define R300_RS_ROUTE_SOURCE_INTERP_2     2
 #       define R300_RS_ROUTE_SOURCE_INTERP_3     3
 #       define R300_RS_ROUTE_SOURCE_INTERP_4     4
-#       define R300_RS_ROUTE_SOURCE_INTERP_5     5	/* GUESS */
-#       define R300_RS_ROUTE_SOURCE_INTERP_6     6	/* GUESS */
-#       define R300_RS_ROUTE_SOURCE_INTERP_7     7	/* GUESS */
-#       define R300_RS_ROUTE_ENABLE              (1 << 3)	/* GUESS */
+#       define R300_RS_ROUTE_SOURCE_INTERP_5     5 /* GUESS */
+#       define R300_RS_ROUTE_SOURCE_INTERP_6     6 /* GUESS */
+#       define R300_RS_ROUTE_SOURCE_INTERP_7     7 /* GUESS */
+#       define R300_RS_ROUTE_ENABLE              (1 << 3) /* GUESS */
 #       define R300_RS_ROUTE_DEST_SHIFT          6
-#       define R300_RS_ROUTE_DEST_MASK           (31 << 6)	/* GUESS */
+#       define R300_RS_ROUTE_DEST_MASK           (31 << 6) /* GUESS */
 
 /* Special handling for color: When the fragment program uses color,
-// the ROUTE_0_COLOR bit is set and ROUTE_0_COLOR_DEST contains the
-// color register index. */
+ * the ROUTE_0_COLOR bit is set and ROUTE_0_COLOR_DEST contains the
+ * color register index.
+ *
+ * Apperently you may set the R300_RS_ROUTE_0_COLOR bit, but not provide any
+ * R300_RS_ROUTE_0_COLOR_DEST value; this setup is used for clearing the state.
+ * See r300_ioctl.c:r300EmitClearState. I'm not sure if this setup is strictly
+ * correct or not. - Oliver.
+ */
 #       define R300_RS_ROUTE_0_COLOR             (1 << 14)
 #       define R300_RS_ROUTE_0_COLOR_DEST_SHIFT  17
-#       define R300_RS_ROUTE_0_COLOR_DEST_MASK   (31 << 17)	/* GUESS */
+#       define R300_RS_ROUTE_0_COLOR_DEST_MASK   (31 << 17) /* GUESS */
 /* As above, but for secondary color */
 #		define R300_RS_ROUTE_1_COLOR1            (1 << 14)
 #		define R300_RS_ROUTE_1_COLOR1_DEST_SHIFT 17
 #		define R300_RS_ROUTE_1_COLOR1_DEST_MASK  (31 << 17)
 #		define R300_RS_ROUTE_1_UNKNOWN11         (1 << 11)
-/* END */
+/* END: Rasterization / Interpolators - many guesses */
 
-/* BEGIN: Scissors and cliprects
-// There are four clipping rectangles. Their corner coordinates are inclusive.
-// Every pixel is assigned a number from 0 and 15 by setting bits 0-3 depending
-// on whether the pixel is inside cliprects 0-3, respectively. For example,
-// if a pixel is inside cliprects 0 and 1, but outside 2 and 3, it is assigned
-// the number 3 (binary 0011).
-// Iff the bit corresponding to the pixel's number in RE_CLIPRECT_CNTL is set,
-// the pixel is rasterized.
-//
-// In addition to this, there is a scissors rectangle. Only pixels inside the
-// scissors rectangle are drawn. (coordinates are inclusive)
-//
-// For some reason, the top-left corner of the framebuffer is at (1440, 1440)
-// for the purpose of clipping and scissors. */
+/* BEGIN: Scissors and cliprects */
+
+/* There are four clipping rectangles. Their corner coordinates are inclusive.
+ * Every pixel is assigned a number from 0 and 15 by setting bits 0-3 depending
+ * on whether the pixel is inside cliprects 0-3, respectively. For example,
+ * if a pixel is inside cliprects 0 and 1, but outside 2 and 3, it is assigned
+ * the number 3 (binary 0011).
+ * Iff the bit corresponding to the pixel's number in RE_CLIPRECT_CNTL is set,
+ * the pixel is rasterized.
+ *
+ * In addition to this, there is a scissors rectangle. Only pixels inside the
+ * scissors rectangle are drawn. (coordinates are inclusive)
+ *
+ * For some reason, the top-left corner of the framebuffer is at (1440, 1440)
+ * for the purpose of clipping and scissors.
+ */
 #define R300_RE_CLIPRECT_TL_0               0x43B0
 #define R300_RE_CLIPRECT_BR_0               0x43B4
 #define R300_RE_CLIPRECT_TL_1               0x43B8
@@ -661,6 +751,7 @@
 #       define R300_CLIP_3210                    (1 << 15)
 
 /* gap */
+
 #define R300_RE_SCISSORS_TL                 0x43E0
 #define R300_RE_SCISSORS_BR                 0x43E4
 #       define R300_SCISSORS_OFFSET              1440
@@ -668,12 +759,15 @@
 #       define R300_SCISSORS_X_MASK              (0x1FFF << 0)
 #       define R300_SCISSORS_Y_SHIFT             13
 #       define R300_SCISSORS_Y_MASK              (0x1FFF << 13)
-/* END */
+/* END: Scissors and cliprects */
 
-/* BEGIN: Texture specification
-// The texture specification dwords are grouped by meaning and not by texture unit.
-// This means that e.g. the offset for texture image unit N is found in register
-// TX_OFFSET_0 + (4*N) */
+/* BEGIN: Texture specification */
+
+/*
+ * The texture specification dwords are grouped by meaning and not by texture
+ * unit. This means that e.g. the offset for texture image unit N is found in
+ * register TX_OFFSET_0 + (4*N)
+ */
 #define R300_TX_FILTER_0                    0x4400
 #       define R300_TX_REPEAT                    0
 #       define R300_TX_MIRRORED                  1
@@ -697,13 +791,14 @@
 #	define R300_TX_MIN_FILTER_LINEAR_MIP_LINEAR         (10 <<  11)
 
 /* NOTE: NEAREST doesnt seem to exist.
-   Im not seting MAG_FILTER_MASK and (3 << 11) on for all
-   anisotropy modes because that would void selected mag filter */
-#	define R300_TX_MIN_FILTER_ANISO_NEAREST             ((0 << 13) /*|R300_TX_MAG_FILTER_MASK|(3<<11)*/)
-#	define R300_TX_MIN_FILTER_ANISO_LINEAR              ((0 << 13) /*|R300_TX_MAG_FILTER_MASK|(3<<11)*/)
-#	define R300_TX_MIN_FILTER_ANISO_NEAREST_MIP_NEAREST ((1 << 13) /*|R300_TX_MAG_FILTER_MASK|(3<<11)*/)
-#	define R300_TX_MIN_FILTER_ANISO_NEAREST_MIP_LINEAR  ((2 << 13) /*|R300_TX_MAG_FILTER_MASK|(3<<11)*/)
-#       define R300_TX_MIN_FILTER_MASK           ( (15 << 11) | (3 << 13) )
+ * Im not seting MAG_FILTER_MASK and (3 << 11) on for all
+ * anisotropy modes because that would void selected mag filter
+ */
+#	define R300_TX_MIN_FILTER_ANISO_NEAREST             (0 << 13)
+#	define R300_TX_MIN_FILTER_ANISO_LINEAR              (0 << 13)
+#	define R300_TX_MIN_FILTER_ANISO_NEAREST_MIP_NEAREST (1 << 13)
+#	define R300_TX_MIN_FILTER_ANISO_NEAREST_MIP_LINEAR  (2 << 13)
+#       define R300_TX_MIN_FILTER_MASK   ( (15 << 11) | (3 << 13) )
 #	define R300_TX_MAX_ANISO_1_TO_1  (0 << 21)
 #	define R300_TX_MAX_ANISO_2_TO_1  (2 << 21)
 #	define R300_TX_MAX_ANISO_4_TO_1  (4 << 21)
@@ -734,10 +829,10 @@
 #       define R300_TX_HEIGHTMASK_SHIFT          11
 #       define R300_TX_HEIGHTMASK_MASK           (2047 << 11)
 #       define R300_TX_UNK23                     (1 << 23)
-#       define R300_TX_SIZE_SHIFT                26	/* largest of width, height */
-#       define R300_TX_SIZE_MASK                 (15 << 26)
-#       define R300_TX_SIZE_PROJECTED                     (1<<30)
-#       define R300_TX_SIZE_TXPITCH_EN                     (1<<31)
+#       define R300_TX_MAX_MIP_LEVEL_SHIFT       26
+#       define R300_TX_MAX_MIP_LEVEL_MASK        (0xf << 26)
+#       define R300_TX_SIZE_PROJECTED            (1<<30)
+#       define R300_TX_SIZE_TXPITCH_EN           (1<<31)
 #define R300_TX_FORMAT_0                    0x44C0
 	/* The interpretation of the format word by Wladimir van der Laan */
 	/* The X, Y, Z and W refer to the layout of the components.
@@ -761,11 +856,11 @@
 #	define R300_TX_FORMAT_DXT1	    	    0xF
 #	define R300_TX_FORMAT_DXT3	    	    0x10
 #	define R300_TX_FORMAT_DXT5	    	    0x11
-#	define R300_TX_FORMAT_D3DMFT_CxV8U8	    0x12	/* no swizzle */
-#	define R300_TX_FORMAT_A8R8G8B8	    	    0x13	/* no swizzle */
-#	define R300_TX_FORMAT_B8G8_B8G8	    	    0x14	/* no swizzle */
-#	define R300_TX_FORMAT_G8R8_G8B8	    	    0x15	/* no swizzle */
-						  /* 0x16 - some 16 bit green format.. ?? */
+#	define R300_TX_FORMAT_D3DMFT_CxV8U8	    0x12     /* no swizzle */
+#	define R300_TX_FORMAT_A8R8G8B8	    	    0x13     /* no swizzle */
+#	define R300_TX_FORMAT_B8G8_B8G8	    	    0x14     /* no swizzle */
+#	define R300_TX_FORMAT_G8R8_G8B8	    	    0x15     /* no swizzle */
+	/* 0x16 - some 16 bit green format.. ?? */
 #	define R300_TX_FORMAT_UNK25		   (1 << 25) /* no swizzle */
 #	define R300_TX_FORMAT_CUBIC_MAP		   (1 << 26)
 
@@ -793,23 +888,26 @@
 #	define R300_TX_FORMAT_W		3
 #	define R300_TX_FORMAT_ZERO	4
 #	define R300_TX_FORMAT_ONE	5
-#	define R300_TX_FORMAT_CUT_Z	6	/* 2.0*Z, everything above 1.0 is set to 0.0 */
-#	define R300_TX_FORMAT_CUT_W	7	/* 2.0*W, everything above 1.0 is set to 0.0 */
+	/* 2.0*Z, everything above 1.0 is set to 0.0 */
+#	define R300_TX_FORMAT_CUT_Z	6
+	/* 2.0*W, everything above 1.0 is set to 0.0 */
+#	define R300_TX_FORMAT_CUT_W	7
 
 #	define R300_TX_FORMAT_B_SHIFT	18
 #	define R300_TX_FORMAT_G_SHIFT	15
 #	define R300_TX_FORMAT_R_SHIFT	12
 #	define R300_TX_FORMAT_A_SHIFT	9
 	/* Convenience macro to take care of layout and swizzling */
-#	define R300_EASY_TX_FORMAT(B, G, R, A, FMT)	(\
-	  ((R300_TX_FORMAT_##B)<<R300_TX_FORMAT_B_SHIFT) \
-	| ((R300_TX_FORMAT_##G)<<R300_TX_FORMAT_G_SHIFT) \
-	| ((R300_TX_FORMAT_##R)<<R300_TX_FORMAT_R_SHIFT) \
-	| ((R300_TX_FORMAT_##A)<<R300_TX_FORMAT_A_SHIFT) \
-	| (R300_TX_FORMAT_##FMT) \
-	  )
-	/* These can be ORed with result of R300_EASY_TX_FORMAT() */
-	/* We don't really know what they do. Take values from a constant color ? */
+#	define R300_EASY_TX_FORMAT(B, G, R, A, FMT)	(		\
+		((R300_TX_FORMAT_##B)<<R300_TX_FORMAT_B_SHIFT)		\
+		| ((R300_TX_FORMAT_##G)<<R300_TX_FORMAT_G_SHIFT)	\
+		| ((R300_TX_FORMAT_##R)<<R300_TX_FORMAT_R_SHIFT)	\
+		| ((R300_TX_FORMAT_##A)<<R300_TX_FORMAT_A_SHIFT)	\
+		| (R300_TX_FORMAT_##FMT)				\
+		)
+	/* These can be ORed with result of R300_EASY_TX_FORMAT()
+	   We don't really know what they do. Take values from a
+           constant color ? */
 #	define R300_TX_FORMAT_CONST_X		(1<<5)
 #	define R300_TX_FORMAT_CONST_Y		(2<<5)
 #	define R300_TX_FORMAT_CONST_Z		(4<<5)
@@ -819,7 +917,7 @@
 
 #define R300_TX_PITCH_0			    0x4500 /* obvious missing in gap */
 #define R300_TX_OFFSET_0                    0x4540
-/* BEGIN: Guess from R200 */
+	/* BEGIN: Guess from R200 */
 #       define R300_TXO_ENDIAN_NO_SWAP           (0 << 0)
 #       define R300_TXO_ENDIAN_BYTE_SWAP         (1 << 0)
 #       define R300_TXO_ENDIAN_WORD_SWAP         (2 << 0)
@@ -828,53 +926,61 @@
 #       define R300_TXO_MICRO_TILE               (1 << 3)
 #       define R300_TXO_OFFSET_MASK              0xffffffe0
 #       define R300_TXO_OFFSET_SHIFT             5
-/* END */
-#define R300_TX_CHROMA_KEY_0                      0x4580 /* 32 bit chroma key */
-#define R300_TX_BORDER_COLOR_0              0x45C0 //ff00ff00 == { 0, 1.0, 0, 1.0 }
+	/* END: Guess from R200 */
 
-/* END */
+/* 32 bit chroma key */
+#define R300_TX_CHROMA_KEY_0                      0x4580
+/* ff00ff00 == { 0, 1.0, 0, 1.0 } */
+#define R300_TX_BORDER_COLOR_0              0x45C0
 
-/* BEGIN: Fragment program instruction set
-// Fragment programs are written directly into register space.
-// There are separate instruction streams for texture instructions and ALU
-// instructions.
-// In order to synchronize these streams, the program is divided into up
-// to 4 nodes. Each node begins with a number of TEX operations, followed
-// by a number of ALU operations.
-// The first node can have zero TEX ops, all subsequent nodes must have at least
-// one TEX ops.
-// All nodes must have at least one ALU op.
-//
-// The index of the last node is stored in PFS_CNTL_0: A value of 0 means
-// 1 node, a value of 3 means 4 nodes.
-// The total amount of instructions is defined in PFS_CNTL_2. The offsets are
-// offsets into the respective instruction streams, while *_END points to the
-// last instruction relative to this offset. */
+/* END: Texture specification */
+
+/* BEGIN: Fragment program instruction set */
+
+/* Fragment programs are written directly into register space.
+ * There are separate instruction streams for texture instructions and ALU
+ * instructions.
+ * In order to synchronize these streams, the program is divided into up
+ * to 4 nodes. Each node begins with a number of TEX operations, followed
+ * by a number of ALU operations.
+ * The first node can have zero TEX ops, all subsequent nodes must have at
+ * least
+ * one TEX ops.
+ * All nodes must have at least one ALU op.
+ *
+ * The index of the last node is stored in PFS_CNTL_0: A value of 0 means
+ * 1 node, a value of 3 means 4 nodes.
+ * The total amount of instructions is defined in PFS_CNTL_2. The offsets are
+ * offsets into the respective instruction streams, while *_END points to the
+ * last instruction relative to this offset.
+ */
 #define R300_PFS_CNTL_0                     0x4600
 #       define R300_PFS_CNTL_LAST_NODES_SHIFT    0
 #       define R300_PFS_CNTL_LAST_NODES_MASK     (3 << 0)
 #       define R300_PFS_CNTL_FIRST_NODE_HAS_TEX  (1 << 3)
 #define R300_PFS_CNTL_1                     0x4604
 /* There is an unshifted value here which has so far always been equal to the
-// index of the highest used temporary register. */
+ * index of the highest used temporary register.
+ */
 #define R300_PFS_CNTL_2                     0x4608
 #       define R300_PFS_CNTL_ALU_OFFSET_SHIFT    0
 #       define R300_PFS_CNTL_ALU_OFFSET_MASK     (63 << 0)
 #       define R300_PFS_CNTL_ALU_END_SHIFT       6
-#       define R300_PFS_CNTL_ALU_END_MASK        (63 << 0)
+#       define R300_PFS_CNTL_ALU_END_MASK        (63 << 6)
 #       define R300_PFS_CNTL_TEX_OFFSET_SHIFT    12
-#       define R300_PFS_CNTL_TEX_OFFSET_MASK     (31 << 12)	/* GUESS */
+#       define R300_PFS_CNTL_TEX_OFFSET_MASK     (31 << 12) /* GUESS */
 #       define R300_PFS_CNTL_TEX_END_SHIFT       18
-#       define R300_PFS_CNTL_TEX_END_MASK        (31 << 18)	/* GUESS */
+#       define R300_PFS_CNTL_TEX_END_MASK        (31 << 18) /* GUESS */
 
 /* gap */
+
 /* Nodes are stored backwards. The last active node is always stored in
-// PFS_NODE_3.
-// Example: In a 2-node program, NODE_0 and NODE_1 are set to 0. The
-// first node is stored in NODE_2, the second node is stored in NODE_3.
-//
-// Offsets are relative to the master offset from PFS_CNTL_2.
-// LAST_NODE is set for the last node, and only for the last node. */
+ * PFS_NODE_3.
+ * Example: In a 2-node program, NODE_0 and NODE_1 are set to 0. The
+ * first node is stored in NODE_2, the second node is stored in NODE_3.
+ *
+ * Offsets are relative to the master offset from PFS_CNTL_2.
+ */
 #define R300_PFS_NODE_0                     0x4610
 #define R300_PFS_NODE_1                     0x4614
 #define R300_PFS_NODE_2                     0x4618
@@ -887,91 +993,98 @@
 #       define R300_PFS_NODE_TEX_OFFSET_MASK     (31 << 12)
 #       define R300_PFS_NODE_TEX_END_SHIFT       17
 #       define R300_PFS_NODE_TEX_END_MASK        (31 << 17)
-/*#       define R300_PFS_NODE_LAST_NODE           (1 << 22) */
 #		define R300_PFS_NODE_OUTPUT_COLOR        (1 << 22)
 #		define R300_PFS_NODE_OUTPUT_DEPTH        (1 << 23)
 
 /* TEX
-// As far as I can tell, texture instructions cannot write into output
-// registers directly. A subsequent ALU instruction is always necessary,
-// even if it's just MAD o0, r0, 1, 0 */
+ * As far as I can tell, texture instructions cannot write into output
+ * registers directly. A subsequent ALU instruction is always necessary,
+ * even if it's just MAD o0, r0, 1, 0
+ */
 #define R300_PFS_TEXI_0                     0x4620
-#       define R300_FPITX_SRC_SHIFT              0
-#       define R300_FPITX_SRC_MASK               (31 << 0)
-#       define R300_FPITX_SRC_CONST              (1 << 5)	/* GUESS */
-#       define R300_FPITX_DST_SHIFT              6
-#       define R300_FPITX_DST_MASK               (31 << 6)
-#       define R300_FPITX_IMAGE_SHIFT            11
-#       define R300_FPITX_IMAGE_MASK             (15 << 11)	/* GUESS based on layout and native limits */
+#	define R300_FPITX_SRC_SHIFT              0
+#	define R300_FPITX_SRC_MASK               (31 << 0)
+	/* GUESS */
+#	define R300_FPITX_SRC_CONST              (1 << 5)
+#	define R300_FPITX_DST_SHIFT              6
+#	define R300_FPITX_DST_MASK               (31 << 6)
+#	define R300_FPITX_IMAGE_SHIFT            11
+	/* GUESS based on layout and native limits */
+#       define R300_FPITX_IMAGE_MASK             (15 << 11)
 /* Unsure if these are opcodes, or some kind of bitfield, but this is how
  * they were set when I checked
  */
-#		define R300_FPITX_OPCODE_SHIFT			15
-#			define R300_FPITX_OP_TEX			1
-#			define R300_FPITX_OP_KIL			2
-#			define R300_FPITX_OP_TXP			3
-#			define R300_FPITX_OP_TXB			4
+#	define R300_FPITX_OPCODE_SHIFT		15
+#		define R300_FPITX_OP_TEX	1
+#		define R300_FPITX_OP_KIL	2
+#		define R300_FPITX_OP_TXP	3
+#		define R300_FPITX_OP_TXB	4
+#	define R300_FPITX_OPCODE_MASK           (7 << 15)
 
 /* ALU
-// The ALU instructions register blocks are enumerated according to the order
-// in which fglrx. I assume there is space for 64 instructions, since
-// each block has space for a maximum of 64 DWORDs, and this matches reported
-// native limits.
-//
-// The basic functional block seems to be one MAD for each color and alpha,
-// and an adder that adds all components after the MUL.
-//  - ADD, MUL, MAD etc.: use MAD with appropriate neutral operands
-//  - DP4: Use OUTC_DP4, OUTA_DP4
-//  - DP3: Use OUTC_DP3, OUTA_DP4, appropriate alpha operands
-//  - DPH: Use OUTC_DP4, OUTA_DP4, appropriate alpha operands
-//  - CMP: If ARG2 < 0, return ARG1, else return ARG0
-//  - FLR: use FRC+MAD
-//  - XPD: use MAD+MAD
-//  - SGE, SLT: use MAD+CMP
-//  - RSQ: use ABS modifier for argument
-//  - Use OUTC_REPL_ALPHA to write results of an alpha-only operation (e.g. RCP)
-//    into color register
-//  - apparently, there's no quick DST operation
-//  - fglrx set FPI2_UNKNOWN_31 on a "MAD fragment.color, tmp0, tmp1, tmp2"
-//  - fglrx set FPI2_UNKNOWN_31 on a "MAX r2, r1, c0"
-//  - fglrx once set FPI0_UNKNOWN_31 on a "FRC r1, r1"
-//
-// Operand selection
-// First stage selects three sources from the available registers and
-// constant parameters. This is defined in INSTR1 (color) and INSTR3 (alpha).
-// fglrx sorts the three source fields: Registers before constants,
-// lower indices before higher indices; I do not know whether this is necessary.
-// fglrx fills unused sources with "read constant 0"
-// According to specs, you cannot select more than two different constants.
-//
-// Second stage selects the operands from the sources. This is defined in
-// INSTR0 (color) and INSTR2 (alpha). You can also select the special constants
-// zero and one.
-// Swizzling and negation happens in this stage, as well.
-//
-// Important: Color and alpha seem to be mostly separate, i.e. their sources
-// selection appears to be fully independent (the register storage is probably
-// physically split into a color and an alpha section).
-// However (because of the apparent physical split), there is some interaction
-// WRT swizzling. If, for example, you want to load an R component into an
-// Alpha operand, this R component is taken from a *color* source, not from
-// an alpha source. The corresponding register doesn't even have to appear in
-// the alpha sources list. (I hope this alll makes sense to you)
-//
-// Destination selection
-// The destination register index is in FPI1 (color) and FPI3 (alpha) together
-// with enable bits.
-// There are separate enable bits for writing into temporary registers
-// (DSTC_REG_* /DSTA_REG) and and program output registers (DSTC_OUTPUT_* /DSTA_OUTPUT).
-// You can write to both at once, or not write at all (the same index
-// must be used for both).
-//
-// Note: There is a special form for LRP
-//  - Argument order is the same as in ARB_fragment_program.
-//  - Operation is MAD
-//  - ARG1 is set to ARGC_SRC1C_LRP/ARGC_SRC1A_LRP
-//  - Set FPI0/FPI2_SPECIAL_LRP
-// Arbitrary LRP (including support for swizzling) requires vanilla MAD+MAD */
+ * The ALU instructions register blocks are enumerated according to the order
+ * in which fglrx. I assume there is space for 64 instructions, since
+ * each block has space for a maximum of 64 DWORDs, and this matches reported
+ * native limits.
+ *
+ * The basic functional block seems to be one MAD for each color and alpha,
+ * and an adder that adds all components after the MUL.
+ *  - ADD, MUL, MAD etc.: use MAD with appropriate neutral operands
+ *  - DP4: Use OUTC_DP4, OUTA_DP4
+ *  - DP3: Use OUTC_DP3, OUTA_DP4, appropriate alpha operands
+ *  - DPH: Use OUTC_DP4, OUTA_DP4, appropriate alpha operands
+ *  - CMPH: If ARG2 > 0.5, return ARG0, else return ARG1
+ *  - CMP: If ARG2 < 0, return ARG1, else return ARG0
+ *  - FLR: use FRC+MAD
+ *  - XPD: use MAD+MAD
+ *  - SGE, SLT: use MAD+CMP
+ *  - RSQ: use ABS modifier for argument
+ *  - Use OUTC_REPL_ALPHA to write results of an alpha-only operation
+ *    (e.g. RCP) into color register
+ *  - apparently, there's no quick DST operation
+ *  - fglrx set FPI2_UNKNOWN_31 on a "MAD fragment.color, tmp0, tmp1, tmp2"
+ *  - fglrx set FPI2_UNKNOWN_31 on a "MAX r2, r1, c0"
+ *  - fglrx once set FPI0_UNKNOWN_31 on a "FRC r1, r1"
+ *
+ * Operand selection
+ * First stage selects three sources from the available registers and
+ * constant parameters. This is defined in INSTR1 (color) and INSTR3 (alpha).
+ * fglrx sorts the three source fields: Registers before constants,
+ * lower indices before higher indices; I do not know whether this is
+ * necessary.
+ *
+ * fglrx fills unused sources with "read constant 0"
+ * According to specs, you cannot select more than two different constants.
+ *
+ * Second stage selects the operands from the sources. This is defined in
+ * INSTR0 (color) and INSTR2 (alpha). You can also select the special constants
+ * zero and one.
+ * Swizzling and negation happens in this stage, as well.
+ *
+ * Important: Color and alpha seem to be mostly separate, i.e. their sources
+ * selection appears to be fully independent (the register storage is probably
+ * physically split into a color and an alpha section).
+ * However (because of the apparent physical split), there is some interaction
+ * WRT swizzling. If, for example, you want to load an R component into an
+ * Alpha operand, this R component is taken from a *color* source, not from
+ * an alpha source. The corresponding register doesn't even have to appear in
+ * the alpha sources list. (I hope this all makes sense to you)
+ *
+ * Destination selection
+ * The destination register index is in FPI1 (color) and FPI3 (alpha)
+ * together with enable bits.
+ * There are separate enable bits for writing into temporary registers
+ * (DSTC_REG_* /DSTA_REG) and and program output registers (DSTC_OUTPUT_*
+ * /DSTA_OUTPUT). You can write to both at once, or not write at all (the
+ * same index must be used for both).
+ *
+ * Note: There is a special form for LRP
+ *  - Argument order is the same as in ARB_fragment_program.
+ *  - Operation is MAD
+ *  - ARG1 is set to ARGC_SRC1C_LRP/ARGC_SRC1A_LRP
+ *  - Set FPI0/FPI2_SPECIAL_LRP
+ * Arbitrary LRP (including support for swizzling) requires vanilla MAD+MAD
+ */
 #define R300_PFS_INSTR1_0                   0x46C0
 #       define R300_FPI1_SRC0C_SHIFT             0
 #       define R300_FPI1_SRC0C_MASK              (31 << 0)
@@ -982,6 +1095,7 @@
 #       define R300_FPI1_SRC2C_SHIFT             12
 #       define R300_FPI1_SRC2C_MASK              (31 << 12)
 #       define R300_FPI1_SRC2C_CONST             (1 << 17)
+#       define R300_FPI1_SRC_MASK                0x0003ffff
 #       define R300_FPI1_DSTC_SHIFT              18
 #       define R300_FPI1_DSTC_MASK               (31 << 18)
 #		define R300_FPI1_DSTC_REG_MASK_SHIFT     23
@@ -1003,6 +1117,7 @@
 #       define R300_FPI3_SRC2A_SHIFT             12
 #       define R300_FPI3_SRC2A_MASK              (31 << 12)
 #       define R300_FPI3_SRC2A_CONST             (1 << 17)
+#       define R300_FPI3_SRC_MASK                0x0003ffff
 #       define R300_FPI3_DSTA_SHIFT              18
 #       define R300_FPI3_DSTA_MASK               (31 << 18)
 #       define R300_FPI3_DSTA_REG                (1 << 23)
@@ -1028,7 +1143,8 @@
 #       define R300_FPI0_ARGC_SRC1C_LRP          15
 #       define R300_FPI0_ARGC_ZERO               20
 #       define R300_FPI0_ARGC_ONE                21
-#       define R300_FPI0_ARGC_HALF               22	/* GUESS */
+	/* GUESS */
+#       define R300_FPI0_ARGC_HALF               22
 #       define R300_FPI0_ARGC_SRC0C_YZX          23
 #       define R300_FPI0_ARGC_SRC1C_YZX          24
 #       define R300_FPI0_ARGC_SRC2C_YZX          25
@@ -1057,6 +1173,7 @@
 #       define R300_FPI0_OUTC_DP4                (2 << 23)
 #       define R300_FPI0_OUTC_MIN                (4 << 23)
 #       define R300_FPI0_OUTC_MAX                (5 << 23)
+#       define R300_FPI0_OUTC_CMPH               (7 << 23)
 #       define R300_FPI0_OUTC_CMP                (8 << 23)
 #       define R300_FPI0_OUTC_FRC                (9 << 23)
 #       define R300_FPI0_OUTC_REPL_ALPHA         (10 << 23)
@@ -1079,20 +1196,23 @@
 #       define R300_FPI2_ARGA_SRC1A_LRP          15
 #       define R300_FPI2_ARGA_ZERO               16
 #       define R300_FPI2_ARGA_ONE                17
-#       define R300_FPI2_ARGA_HALF               18	/* GUESS */
-
+	/* GUESS */
+#       define R300_FPI2_ARGA_HALF               18
 #       define R300_FPI2_ARG0A_SHIFT             0
 #       define R300_FPI2_ARG0A_MASK              (31 << 0)
 #       define R300_FPI2_ARG0A_NEG               (1 << 5)
-#		define R300_FPI2_ARG0A_ABS				 (1 << 6)	/* GUESS */
+	/* GUESS */
+#	define R300_FPI2_ARG0A_ABS		 (1 << 6)
 #       define R300_FPI2_ARG1A_SHIFT             7
 #       define R300_FPI2_ARG1A_MASK              (31 << 7)
 #       define R300_FPI2_ARG1A_NEG               (1 << 12)
-#		define R300_FPI2_ARG1A_ABS				 (1 << 13)	/* GUESS */
+	/* GUESS */
+#	define R300_FPI2_ARG1A_ABS		 (1 << 13)
 #       define R300_FPI2_ARG2A_SHIFT             14
 #       define R300_FPI2_ARG2A_MASK              (31 << 14)
 #       define R300_FPI2_ARG2A_NEG               (1 << 19)
-#		define R300_FPI2_ARG2A_ABS				 (1 << 20)	/* GUESS */
+	/* GUESS */
+#	define R300_FPI2_ARG2A_ABS		 (1 << 20)
 #       define R300_FPI2_SPECIAL_LRP             (1 << 21)
 #       define R300_FPI2_OUTA_MAD                (0 << 23)
 #       define R300_FPI2_OUTA_DP4                (1 << 23)
@@ -1106,9 +1226,19 @@
 #       define R300_FPI2_OUTA_RSQ                (11 << 23)
 #       define R300_FPI2_OUTA_SAT                (1 << 30)
 #       define R300_FPI2_UNKNOWN_31              (1 << 31)
-/* END */
+/* END: Fragment program instruction set */
 
-/* gap */
+/* Fog state and color */
+#define R300_RE_FOG_STATE                   0x4BC0
+#       define R300_FOG_ENABLE                   (1 << 0)
+#	define R300_FOG_MODE_LINEAR              (0 << 1)
+#	define R300_FOG_MODE_EXP                 (1 << 1)
+#	define R300_FOG_MODE_EXP2                (2 << 1)
+#	define R300_FOG_MODE_MASK                (3 << 1)
+#define R300_FOG_COLOR_R                    0x4BC8
+#define R300_FOG_COLOR_G                    0x4BCC
+#define R300_FOG_COLOR_B                    0x4BD0
+
 #define R300_PP_ALPHA_TEST                  0x4BD4
 #       define R300_REF_ALPHA_MASK               0x000000ff
 #       define R300_ALPHA_TEST_FAIL              (0 << 8)
@@ -1123,6 +1253,7 @@
 #       define R300_ALPHA_TEST_ENABLE            (1 << 11)
 
 /* gap */
+
 /* Fragment program parameters in 7.16 floating point */
 #define R300_PFS_PARAM_0_X                  0x4C00
 #define R300_PFS_PARAM_0_Y                  0x4C04
@@ -1135,45 +1266,48 @@
 #define R300_PFS_PARAM_31_W                 0x4DFC
 
 /* Notes:
-// - AFAIK fglrx always sets BLEND_UNKNOWN when blending is used in the application
-// - AFAIK fglrx always sets BLEND_NO_SEPARATE when CBLEND and ABLEND are set to the same
-//   function (both registers are always set up completely in any case)
-// - Most blend flags are simply copied from R200 and not tested yet */
+ * - AFAIK fglrx always sets BLEND_UNKNOWN when blending is used in
+ *   the application
+ * - AFAIK fglrx always sets BLEND_NO_SEPARATE when CBLEND and ABLEND
+ *    are set to the same
+ *   function (both registers are always set up completely in any case)
+ * - Most blend flags are simply copied from R200 and not tested yet
+ */
 #define R300_RB3D_CBLEND                    0x4E04
 #define R300_RB3D_ABLEND                    0x4E08
- /* the following only appear in CBLEND */
+/* the following only appear in CBLEND */
 #       define R300_BLEND_ENABLE                     (1 << 0)
 #       define R300_BLEND_UNKNOWN                    (3 << 1)
 #       define R300_BLEND_NO_SEPARATE                (1 << 3)
- /* the following are shared between CBLEND and ABLEND */
+/* the following are shared between CBLEND and ABLEND */
 #       define R300_FCN_MASK                         (3  << 12)
 #       define R300_COMB_FCN_ADD_CLAMP               (0  << 12)
 #       define R300_COMB_FCN_ADD_NOCLAMP             (1  << 12)
 #       define R300_COMB_FCN_SUB_CLAMP               (2  << 12)
 #       define R300_COMB_FCN_SUB_NOCLAMP             (3  << 12)
-#       define R300_SRC_BLEND_GL_ZERO                (32 << 16)
-#       define R300_SRC_BLEND_GL_ONE                 (33 << 16)
-#       define R300_SRC_BLEND_GL_SRC_COLOR           (34 << 16)
-#       define R300_SRC_BLEND_GL_ONE_MINUS_SRC_COLOR (35 << 16)
-#       define R300_SRC_BLEND_GL_DST_COLOR           (36 << 16)
-#       define R300_SRC_BLEND_GL_ONE_MINUS_DST_COLOR (37 << 16)
-#       define R300_SRC_BLEND_GL_SRC_ALPHA           (38 << 16)
-#       define R300_SRC_BLEND_GL_ONE_MINUS_SRC_ALPHA (39 << 16)
-#       define R300_SRC_BLEND_GL_DST_ALPHA           (40 << 16)
-#       define R300_SRC_BLEND_GL_ONE_MINUS_DST_ALPHA (41 << 16)
-#       define R300_SRC_BLEND_GL_SRC_ALPHA_SATURATE  (42 << 16)
-#       define R300_SRC_BLEND_MASK                   (63 << 16)
-#       define R300_DST_BLEND_GL_ZERO                (32 << 24)
-#       define R300_DST_BLEND_GL_ONE                 (33 << 24)
-#       define R300_DST_BLEND_GL_SRC_COLOR           (34 << 24)
-#       define R300_DST_BLEND_GL_ONE_MINUS_SRC_COLOR (35 << 24)
-#       define R300_DST_BLEND_GL_DST_COLOR           (36 << 24)
-#       define R300_DST_BLEND_GL_ONE_MINUS_DST_COLOR (37 << 24)
-#       define R300_DST_BLEND_GL_SRC_ALPHA           (38 << 24)
-#       define R300_DST_BLEND_GL_ONE_MINUS_SRC_ALPHA (39 << 24)
-#       define R300_DST_BLEND_GL_DST_ALPHA           (40 << 24)
-#       define R300_DST_BLEND_GL_ONE_MINUS_DST_ALPHA (41 << 24)
-#       define R300_DST_BLEND_MASK                   (63 << 24)
+#       define R300_COMB_FCN_MIN                     (4  << 12)
+#       define R300_COMB_FCN_MAX                     (5  << 12)
+#       define R300_COMB_FCN_RSUB_CLAMP              (6  << 12)
+#       define R300_COMB_FCN_RSUB_NOCLAMP            (7  << 12)
+#       define R300_BLEND_GL_ZERO                    (32)
+#       define R300_BLEND_GL_ONE                     (33)
+#       define R300_BLEND_GL_SRC_COLOR               (34)
+#       define R300_BLEND_GL_ONE_MINUS_SRC_COLOR     (35)
+#       define R300_BLEND_GL_DST_COLOR               (36)
+#       define R300_BLEND_GL_ONE_MINUS_DST_COLOR     (37)
+#       define R300_BLEND_GL_SRC_ALPHA               (38)
+#       define R300_BLEND_GL_ONE_MINUS_SRC_ALPHA     (39)
+#       define R300_BLEND_GL_DST_ALPHA               (40)
+#       define R300_BLEND_GL_ONE_MINUS_DST_ALPHA     (41)
+#       define R300_BLEND_GL_SRC_ALPHA_SATURATE      (42)
+#       define R300_BLEND_GL_CONST_COLOR             (43)
+#       define R300_BLEND_GL_ONE_MINUS_CONST_COLOR   (44)
+#       define R300_BLEND_GL_CONST_ALPHA             (45)
+#       define R300_BLEND_GL_ONE_MINUS_CONST_ALPHA   (46)
+#       define R300_BLEND_MASK                       (63)
+#       define R300_SRC_BLEND_SHIFT                  (16)
+#       define R300_DST_BLEND_SHIFT                  (24)
+#define R300_RB3D_BLEND_COLOR               0x4E10
 #define R300_RB3D_COLORMASK                 0x4E0C
 #       define R300_COLORMASK0_B                 (1<<0)
 #       define R300_COLORMASK0_G                 (1<<1)
@@ -1181,41 +1315,49 @@
 #       define R300_COLORMASK0_A                 (1<<3)
 
 /* gap */
+
 #define R300_RB3D_COLOROFFSET0              0x4E28
-#       define R300_COLOROFFSET_MASK             0xFFFFFFF0	/* GUESS */
-#define R300_RB3D_COLOROFFSET1              0x4E2C	/* GUESS */
-#define R300_RB3D_COLOROFFSET2              0x4E30	/* GUESS */
-#define R300_RB3D_COLOROFFSET3              0x4E34	/* GUESS */
+#       define R300_COLOROFFSET_MASK             0xFFFFFFF0 /* GUESS */
+#define R300_RB3D_COLOROFFSET1              0x4E2C /* GUESS */
+#define R300_RB3D_COLOROFFSET2              0x4E30 /* GUESS */
+#define R300_RB3D_COLOROFFSET3              0x4E34 /* GUESS */
+
 /* gap */
+
 /* Bit 16: Larger tiles
-// Bit 17: 4x2 tiles
-// Bit 18: Extremely weird tile like, but some pixels duplicated? */
+ * Bit 17: 4x2 tiles
+ * Bit 18: Extremely weird tile like, but some pixels duplicated?
+ */
 #define R300_RB3D_COLORPITCH0               0x4E38
-#       define R300_COLORPITCH_MASK              0x00001FF8	/* GUESS */
-#       define R300_COLOR_TILE_ENABLE            (1 << 16)	/* GUESS */
-#       define R300_COLOR_MICROTILE_ENABLE       (1 << 17)	/* GUESS */
-#       define R300_COLOR_ENDIAN_NO_SWAP         (0 << 18)	/* GUESS */
-#       define R300_COLOR_ENDIAN_WORD_SWAP       (1 << 18)	/* GUESS */
-#       define R300_COLOR_ENDIAN_DWORD_SWAP      (2 << 18)	/* GUESS */
+#       define R300_COLORPITCH_MASK              0x00001FF8 /* GUESS */
+#       define R300_COLOR_TILE_ENABLE            (1 << 16) /* GUESS */
+#       define R300_COLOR_MICROTILE_ENABLE       (1 << 17) /* GUESS */
+#       define R300_COLOR_ENDIAN_NO_SWAP         (0 << 18) /* GUESS */
+#       define R300_COLOR_ENDIAN_WORD_SWAP       (1 << 18) /* GUESS */
+#       define R300_COLOR_ENDIAN_DWORD_SWAP      (2 << 18) /* GUESS */
 #       define R300_COLOR_FORMAT_RGB565          (2 << 22)
 #       define R300_COLOR_FORMAT_ARGB8888        (3 << 22)
-#define R300_RB3D_COLORPITCH1               0x4E3C	/* GUESS */
-#define R300_RB3D_COLORPITCH2               0x4E40	/* GUESS */
-#define R300_RB3D_COLORPITCH3               0x4E44	/* GUESS */
+#define R300_RB3D_COLORPITCH1               0x4E3C /* GUESS */
+#define R300_RB3D_COLORPITCH2               0x4E40 /* GUESS */
+#define R300_RB3D_COLORPITCH3               0x4E44 /* GUESS */
 
 /* gap */
+
 /* Guess by Vladimir.
-// Set to 0A before 3D operations, set to 02 afterwards. */
+ * Set to 0A before 3D operations, set to 02 afterwards.
+ */
 #define R300_RB3D_DSTCACHE_CTLSTAT          0x4E4C
-#       define R300_RB3D_DSTCACHE_02             0x00000002
-#       define R300_RB3D_DSTCACHE_0A             0x0000000A
+#       define R300_RB3D_DSTCACHE_UNKNOWN_02             0x00000002
+#       define R300_RB3D_DSTCACHE_UNKNOWN_0A             0x0000000A
 
 /* gap */
-/* There seems to be no "write only" setting, so use Z-test = ALWAYS for this. */
-/* Bit (1<<8) is the "test" bit. so plain write is 6  - vd */
+/* There seems to be no "write only" setting, so use Z-test = ALWAYS
+ * for this.
+ * Bit (1<<8) is the "test" bit. so plain write is 6  - vd
+ */
 #define R300_RB3D_ZSTENCIL_CNTL_0                   0x4F00
-#       define R300_RB3D_Z_DISABLED_1            0x00000010	/* GUESS */
-#       define R300_RB3D_Z_DISABLED_2            0x00000014	/* GUESS */
+#       define R300_RB3D_Z_DISABLED_1            0x00000010
+#       define R300_RB3D_Z_DISABLED_2            0x00000014
 #       define R300_RB3D_Z_TEST                  0x00000012
 #       define R300_RB3D_Z_TEST_AND_WRITE        0x00000016
 #       define R300_RB3D_Z_WRITE_ONLY        	 0x00000006
@@ -1226,7 +1368,7 @@
 #	define R300_RB3D_STENCIL_ENABLE		 0x00000001
 
 #define R300_RB3D_ZSTENCIL_CNTL_1                   0x4F04
-		/* functions */
+	/* functions */
 #	define R300_ZS_NEVER			0
 #	define R300_ZS_LESS			1
 #	define R300_ZS_LEQUAL			2
@@ -1236,7 +1378,7 @@
 #	define R300_ZS_NOTEQUAL			6
 #	define R300_ZS_ALWAYS			7
 #       define R300_ZS_MASK                     7
-		/* operations */
+	/* operations */
 #	define R300_ZS_KEEP			0
 #	define R300_ZS_ZERO			1
 #	define R300_ZS_REPLACE			2
@@ -1245,9 +1387,8 @@
 #	define R300_ZS_INVERT			5
 #	define R300_ZS_INCR_WRAP		6
 #	define R300_ZS_DECR_WRAP		7
-
-       /* front and back refer to operations done for front
-          and back faces, i.e. separate stencil function support */
+	/* front and back refer to operations done for front
+	   and back faces, i.e. separate stencil function support */
 #	define R300_RB3D_ZS1_DEPTH_FUNC_SHIFT		0
 #	define R300_RB3D_ZS1_FRONT_FUNC_SHIFT		3
 #	define R300_RB3D_ZS1_FRONT_FAIL_OP_SHIFT	6
@@ -1269,45 +1410,64 @@
 #define R300_RB3D_ZSTENCIL_FORMAT                   0x4F10
 #	define R300_DEPTH_FORMAT_16BIT_INT_Z     (0 << 0)
 #	define R300_DEPTH_FORMAT_24BIT_INT_Z     (2 << 0)
+	/* 16 bit format or some aditional bit ? */
+#	define R300_DEPTH_FORMAT_UNK32          (32 << 0)
+
+#define R300_RB3D_EARLY_Z                           0x4F14
+#	define R300_EARLY_Z_DISABLE              (0 << 0)
+#	define R300_EARLY_Z_ENABLE               (1 << 0)
 
 /* gap */
+
+#define R300_RB3D_ZCACHE_CTLSTAT            0x4F18 /* GUESS */
+#       define R300_RB3D_ZCACHE_UNKNOWN_01  0x1
+#       define R300_RB3D_ZCACHE_UNKNOWN_03  0x3
+
+/* gap */
+
 #define R300_RB3D_DEPTHOFFSET               0x4F20
 #define R300_RB3D_DEPTHPITCH                0x4F24
-#       define R300_DEPTHPITCH_MASK              0x00001FF8	/* GUESS */
-#       define R300_DEPTH_TILE_ENABLE            (1 << 16)	/* GUESS */
-#       define R300_DEPTH_MICROTILE_ENABLE       (1 << 17)	/* GUESS */
-#       define R300_DEPTH_ENDIAN_NO_SWAP         (0 << 18)	/* GUESS */
-#       define R300_DEPTH_ENDIAN_WORD_SWAP       (1 << 18)	/* GUESS */
-#       define R300_DEPTH_ENDIAN_DWORD_SWAP      (2 << 18)	/* GUESS */
+#       define R300_DEPTHPITCH_MASK              0x00001FF8 /* GUESS */
+#       define R300_DEPTH_TILE_ENABLE            (1 << 16) /* GUESS */
+#       define R300_DEPTH_MICROTILE_ENABLE       (1 << 17) /* GUESS */
+#       define R300_DEPTH_ENDIAN_NO_SWAP         (0 << 18) /* GUESS */
+#       define R300_DEPTH_ENDIAN_WORD_SWAP       (1 << 18) /* GUESS */
+#       define R300_DEPTH_ENDIAN_DWORD_SWAP      (2 << 18) /* GUESS */
 
-/* BEGIN: Vertex program instruction set
-// Every instruction is four dwords long:
-//  DWORD 0: output and opcode
-//  DWORD 1: first argument
-//  DWORD 2: second argument
-//  DWORD 3: third argument
-//
-// Notes:
-//  - ABS r, a is implemented as MAX r, a, -a
-//  - MOV is implemented as ADD to zero
-//  - XPD is implemented as MUL + MAD
-//  - FLR is implemented as FRC + ADD
-//  - apparently, fglrx tries to schedule instructions so that there is at least
-//    one instruction between the write to a temporary and the first read
-//    from said temporary; however, violations of this scheduling are allowed
-//  - register indices seem to be unrelated with OpenGL aliasing to conventional state
-//  - only one attribute and one parameter can be loaded at a time; however, the
-//    same attribute/parameter can be used for more than one argument
-//  - the second software argument for POW is the third hardware argument (no idea why)
-//  - MAD with only temporaries as input seems to use VPI_OUT_SELECT_MAD_2
-//
-// There is some magic surrounding LIT:
-//  The single argument is replicated across all three inputs, but swizzled:
-//   First argument: xyzy
-//   Second argument: xyzx
-//   Third argument: xyzw
-//  Whenever the result is used later in the fragment program, fglrx forces x and w
-//  to be 1.0 in the input selection; I don't know whether this is strictly necessary */
+/* BEGIN: Vertex program instruction set */
+
+/* Every instruction is four dwords long:
+ *  DWORD 0: output and opcode
+ *  DWORD 1: first argument
+ *  DWORD 2: second argument
+ *  DWORD 3: third argument
+ *
+ * Notes:
+ *  - ABS r, a is implemented as MAX r, a, -a
+ *  - MOV is implemented as ADD to zero
+ *  - XPD is implemented as MUL + MAD
+ *  - FLR is implemented as FRC + ADD
+ *  - apparently, fglrx tries to schedule instructions so that there is at
+ *    least one instruction between the write to a temporary and the first
+ *    read from said temporary; however, violations of this scheduling are
+ *    allowed
+ *  - register indices seem to be unrelated with OpenGL aliasing to
+ *    conventional state
+ *  - only one attribute and one parameter can be loaded at a time; however,
+ *    the same attribute/parameter can be used for more than one argument
+ *  - the second software argument for POW is the third hardware argument
+ *    (no idea why)
+ *  - MAD with only temporaries as input seems to use VPI_OUT_SELECT_MAD_2
+ *
+ * There is some magic surrounding LIT:
+ *   The single argument is replicated across all three inputs, but swizzled:
+ *     First argument: xyzy
+ *     Second argument: xyzx
+ *     Third argument: xyzw
+ *   Whenever the result is used later in the fragment program, fglrx forces
+ *   x and w to be 1.0 in the input selection; I don't know whether this is
+ *   strictly necessary
+ */
 #define R300_VPI_OUT_OP_DOT                     (1 << 0)
 #define R300_VPI_OUT_OP_MUL                     (2 << 0)
 #define R300_VPI_OUT_OP_ADD                     (3 << 0)
@@ -1318,26 +1478,33 @@
 #define R300_VPI_OUT_OP_MIN                     (8 << 0)
 #define R300_VPI_OUT_OP_SGE                     (9 << 0)
 #define R300_VPI_OUT_OP_SLT                     (10 << 0)
-#define R300_VPI_OUT_OP_UNK12                   (12 << 0)	/* Used in GL_POINT_DISTANCE_ATTENUATION_ARB, vector(scalar, vector) */
+	/* Used in GL_POINT_DISTANCE_ATTENUATION_ARB, vector(scalar, vector) */
+#define R300_VPI_OUT_OP_UNK12                   (12 << 0)
+#define R300_VPI_OUT_OP_ARL                     (13 << 0)
 #define R300_VPI_OUT_OP_EXP                     (65 << 0)
 #define R300_VPI_OUT_OP_LOG                     (66 << 0)
-#define R300_VPI_OUT_OP_UNK67                   (67 << 0)	/* Used in fog computations, scalar(scalar) */
+	/* Used in fog computations, scalar(scalar) */
+#define R300_VPI_OUT_OP_UNK67                   (67 << 0)
 #define R300_VPI_OUT_OP_LIT                     (68 << 0)
 #define R300_VPI_OUT_OP_POW                     (69 << 0)
 #define R300_VPI_OUT_OP_RCP                     (70 << 0)
 #define R300_VPI_OUT_OP_RSQ                     (72 << 0)
-#define R300_VPI_OUT_OP_UNK73                   (73 << 0)	/* Used in GL_POINT_DISTANCE_ATTENUATION_ARB, scalar(scalar) */
+	/* Used in GL_POINT_DISTANCE_ATTENUATION_ARB, scalar(scalar) */
+#define R300_VPI_OUT_OP_UNK73                   (73 << 0)
 #define R300_VPI_OUT_OP_EX2                     (75 << 0)
 #define R300_VPI_OUT_OP_LG2                     (76 << 0)
 #define R300_VPI_OUT_OP_MAD_2                   (128 << 0)
-#define R300_VPI_OUT_OP_UNK129                  (129 << 0)	/* all temps, vector(scalar, vector, vector) */
+	/* all temps, vector(scalar, vector, vector) */
+#define R300_VPI_OUT_OP_UNK129                  (129 << 0)
 
 #define R300_VPI_OUT_REG_CLASS_TEMPORARY        (0 << 8)
+#define R300_VPI_OUT_REG_CLASS_ADDR             (1 << 8)
 #define R300_VPI_OUT_REG_CLASS_RESULT           (2 << 8)
 #define R300_VPI_OUT_REG_CLASS_MASK             (31 << 8)
 
 #define R300_VPI_OUT_REG_INDEX_SHIFT            13
-#define R300_VPI_OUT_REG_INDEX_MASK             (31 << 13)	/* GUESS based on fglrx native limits */
+	/* GUESS based on fglrx native limits */
+#define R300_VPI_OUT_REG_INDEX_MASK             (31 << 13)
 
 #define R300_VPI_OUT_WRITE_X                    (1 << 20)
 #define R300_VPI_OUT_WRITE_Y                    (1 << 21)
@@ -1348,14 +1515,16 @@
 #define R300_VPI_IN_REG_CLASS_ATTRIBUTE         (1 << 0)
 #define R300_VPI_IN_REG_CLASS_PARAMETER         (2 << 0)
 #define R300_VPI_IN_REG_CLASS_NONE              (9 << 0)
-#define R300_VPI_IN_REG_CLASS_MASK              (31 << 0)	/* GUESS */
+#define R300_VPI_IN_REG_CLASS_MASK              (31 << 0)
 
 #define R300_VPI_IN_REG_INDEX_SHIFT             5
-#define R300_VPI_IN_REG_INDEX_MASK              (255 << 5)	/* GUESS based on fglrx native limits */
+	/* GUESS based on fglrx native limits */
+#define R300_VPI_IN_REG_INDEX_MASK              (255 << 5)
 
 /* The R300 can select components from the input register arbitrarily.
-// Use the following constants, shifted by the component shift you
-// want to select */
+ * Use the following constants, shifted by the component shift you
+ * want to select
+ */
 #define R300_VPI_IN_SELECT_X    0
 #define R300_VPI_IN_SELECT_Y    1
 #define R300_VPI_IN_SELECT_Z    2
@@ -1373,11 +1542,11 @@
 #define R300_VPI_IN_NEG_Y                       (1 << 26)
 #define R300_VPI_IN_NEG_Z                       (1 << 27)
 #define R300_VPI_IN_NEG_W                       (1 << 28)
-/* END */
+/* END: Vertex program instruction set */
 
-//BEGIN: Packet 3 commands
+/* BEGIN: Packet 3 commands */
 
-// A primitive emission dword.
+/* A primitive emission dword. */
 #define R300_PRIM_TYPE_NONE                     (0 << 0)
 #define R300_PRIM_TYPE_POINT                    (1 << 0)
 #define R300_PRIM_TYPE_LINE                     (2 << 0)
@@ -1389,7 +1558,8 @@
 #define R300_PRIM_TYPE_RECT_LIST                (8 << 0)
 #define R300_PRIM_TYPE_3VRT_POINT_LIST          (9 << 0)
 #define R300_PRIM_TYPE_3VRT_LINE_LIST           (10 << 0)
-#define R300_PRIM_TYPE_POINT_SPRITES            (11 << 0)	// GUESS (based on r200)
+	/* GUESS (based on r200) */
+#define R300_PRIM_TYPE_POINT_SPRITES            (11 << 0)
 #define R300_PRIM_TYPE_LINE_LOOP                (12 << 0)
 #define R300_PRIM_TYPE_QUADS                    (13 << 0)
 #define R300_PRIM_TYPE_QUAD_STRIP               (14 << 0)
@@ -1399,37 +1569,58 @@
 #define R300_PRIM_WALK_LIST                     (2 << 4)
 #define R300_PRIM_WALK_RING                     (3 << 4)
 #define R300_PRIM_WALK_MASK                     (3 << 4)
-#define R300_PRIM_COLOR_ORDER_BGRA              (0 << 6)	// GUESS (based on r200)
-#define R300_PRIM_COLOR_ORDER_RGBA              (1 << 6)	// GUESS
+	/* GUESS (based on r200) */
+#define R300_PRIM_COLOR_ORDER_BGRA              (0 << 6)
+#define R300_PRIM_COLOR_ORDER_RGBA              (1 << 6)
 #define R300_PRIM_NUM_VERTICES_SHIFT            16
+#define R300_PRIM_NUM_VERTICES_MASK             0xffff
 
-// Draw a primitive from vertex data in arrays loaded via 3D_LOAD_VBPNTR.
-// Two parameter dwords:
-// 0. The first parameter appears to be always 0
-// 1. The second parameter is a standard primitive emission dword.
+/* Draw a primitive from vertex data in arrays loaded via 3D_LOAD_VBPNTR.
+ * Two parameter dwords:
+ * 0. The first parameter appears to be always 0
+ * 1. The second parameter is a standard primitive emission dword.
+ */
 #define R300_PACKET3_3D_DRAW_VBUF           0x00002800
 
-// Specify the full set of vertex arrays as (address, stride).
-// The first parameter is the number of vertex arrays specified.
-// The rest of the command is a variable length list of blocks, where
-// each block is three dwords long and specifies two arrays.
-// The first dword of a block is split into two words, the lower significant
-// word refers to the first array, the more significant word to the second
-// array in the block.
-// The low byte of each word contains the size of an array entry in dwords,
-// the high byte contains the stride of the array.
-// The second dword of a block contains the pointer to the first array,
-// the third dword of a block contains the pointer to the second array.
-// Note that if the total number of arrays is odd, the third dword of
-// the last block is omitted.
+/* Specify the full set of vertex arrays as (address, stride).
+ * The first parameter is the number of vertex arrays specified.
+ * The rest of the command is a variable length list of blocks, where
+ * each block is three dwords long and specifies two arrays.
+ * The first dword of a block is split into two words, the lower significant
+ * word refers to the first array, the more significant word to the second
+ * array in the block.
+ * The low byte of each word contains the size of an array entry in dwords,
+ * the high byte contains the stride of the array.
+ * The second dword of a block contains the pointer to the first array,
+ * the third dword of a block contains the pointer to the second array.
+ * Note that if the total number of arrays is odd, the third dword of
+ * the last block is omitted.
+ */
 #define R300_PACKET3_3D_LOAD_VBPNTR         0x00002F00
 
 #define R300_PACKET3_INDX_BUFFER            0x00003300
 #    define R300_EB_UNK1_SHIFT                      24
 #    define R300_EB_UNK1                    (0x80<<24)
 #    define R300_EB_UNK2                        0x0810
+#define R300_PACKET3_3D_DRAW_VBUF_2         0x00003400
 #define R300_PACKET3_3D_DRAW_INDX_2         0x00003600
 
-//END
+/* END: Packet 3 commands */
 
-#endif				/* _R300_REG_H */
+
+/* Color formats for 2d packets
+ */
+#define R300_CP_COLOR_FORMAT_CI8	2
+#define R300_CP_COLOR_FORMAT_ARGB1555	3
+#define R300_CP_COLOR_FORMAT_RGB565	4
+#define R300_CP_COLOR_FORMAT_ARGB8888	6
+#define R300_CP_COLOR_FORMAT_RGB332	7
+#define R300_CP_COLOR_FORMAT_RGB8	9
+#define R300_CP_COLOR_FORMAT_ARGB4444	15
+
+/*
+ * CP type-3 packets
+ */
+#define R300_CP_CMD_BITBLT_MULTI	0xC0009B00
+
+#endif /* _R300_REG_H */
diff --git a/drivers/char/drm/radeon_cp.c b/drivers/char/drm/radeon_cp.c
index 68338389..af5790f 100644
--- a/drivers/char/drm/radeon_cp.c
+++ b/drivers/char/drm/radeon_cp.c
@@ -36,7 +36,7 @@
 
 #define RADEON_FIFO_DEBUG	0
 
-static int radeon_do_cleanup_cp(drm_device_t * dev);
+static int radeon_do_cleanup_cp(struct drm_device * dev);
 
 /* CP microcode (from ATI) */
 static const u32 R200_cp_microcode[][2] = {
@@ -816,7 +816,7 @@
 	{0000000000, 0000000000},
 };
 
-static int RADEON_READ_PLL(drm_device_t * dev, int addr)
+static int RADEON_READ_PLL(struct drm_device * dev, int addr)
 {
 	drm_radeon_private_t *dev_priv = dev->dev_private;
 
@@ -1066,7 +1066,7 @@
 
 /* Reset the engine.  This will stop the CP if it is running.
  */
-static int radeon_do_engine_reset(drm_device_t * dev)
+static int radeon_do_engine_reset(struct drm_device * dev)
 {
 	drm_radeon_private_t *dev_priv = dev->dev_private;
 	u32 clock_cntl_index, mclk_cntl, rbbm_soft_reset;
@@ -1122,7 +1122,7 @@
 	return 0;
 }
 
-static void radeon_cp_init_ring_buffer(drm_device_t * dev,
+static void radeon_cp_init_ring_buffer(struct drm_device * dev,
 				       drm_radeon_private_t * dev_priv)
 {
 	u32 ring_start, cur_read_ptr;
@@ -1174,7 +1174,7 @@
 	} else
 #endif
 	{
-		drm_sg_mem_t *entry = dev->sg;
+		struct drm_sg_mem *entry = dev->sg;
 		unsigned long tmp_ofs, page_ofs;
 
 		tmp_ofs = dev_priv->ring_rptr->offset -
@@ -1384,7 +1384,7 @@
 	}
 }
 
-static int radeon_do_init_cp(drm_device_t * dev, drm_radeon_init_t * init)
+static int radeon_do_init_cp(struct drm_device * dev, drm_radeon_init_t * init)
 {
 	drm_radeon_private_t *dev_priv = dev->dev_private;
 
@@ -1420,6 +1420,10 @@
 		return DRM_ERR(EINVAL);
 	}
 
+	/* Enable vblank on CRTC1 for older X servers
+	 */
+	dev_priv->vblank_crtc = DRM_RADEON_VBLANK_CRTC1;
+
 	switch(init->func) {
 	case RADEON_INIT_R200_CP:
 		dev_priv->microcode_version = UCODE_R200;
@@ -1501,13 +1505,13 @@
 					 RADEON_ROUND_MODE_TRUNC |
 					 RADEON_ROUND_PREC_8TH_PIX);
 
-	DRM_GETSAREA();
 
 	dev_priv->ring_offset = init->ring_offset;
 	dev_priv->ring_rptr_offset = init->ring_rptr_offset;
 	dev_priv->buffers_offset = init->buffers_offset;
 	dev_priv->gart_textures_offset = init->gart_textures_offset;
 
+	dev_priv->sarea = drm_getsarea(dev);
 	if (!dev_priv->sarea) {
 		DRM_ERROR("could not find sarea!\n");
 		radeon_do_cleanup_cp(dev);
@@ -1731,7 +1735,7 @@
 	return 0;
 }
 
-static int radeon_do_cleanup_cp(drm_device_t * dev)
+static int radeon_do_cleanup_cp(struct drm_device * dev)
 {
 	drm_radeon_private_t *dev_priv = dev->dev_private;
 	DRM_DEBUG("\n");
@@ -1787,7 +1791,7 @@
  *
  * Charl P. Botha <http://cpbotha.net>
  */
-static int radeon_do_resume_cp(drm_device_t * dev)
+static int radeon_do_resume_cp(struct drm_device * dev)
 {
 	drm_radeon_private_t *dev_priv = dev->dev_private;
 
@@ -1914,7 +1918,7 @@
 	return 0;
 }
 
-void radeon_do_release(drm_device_t * dev)
+void radeon_do_release(struct drm_device * dev)
 {
 	drm_radeon_private_t *dev_priv = dev->dev_private;
 	int i, ret;
@@ -2042,12 +2046,12 @@
  * they can't get the lock.
  */
 
-drm_buf_t *radeon_freelist_get(drm_device_t * dev)
+struct drm_buf *radeon_freelist_get(struct drm_device * dev)
 {
-	drm_device_dma_t *dma = dev->dma;
+	struct drm_device_dma *dma = dev->dma;
 	drm_radeon_private_t *dev_priv = dev->dev_private;
 	drm_radeon_buf_priv_t *buf_priv;
-	drm_buf_t *buf;
+	struct drm_buf *buf;
 	int i, t;
 	int start;
 
@@ -2082,12 +2086,12 @@
 }
 
 #if 0
-drm_buf_t *radeon_freelist_get(drm_device_t * dev)
+struct drm_buf *radeon_freelist_get(struct drm_device * dev)
 {
-	drm_device_dma_t *dma = dev->dma;
+	struct drm_device_dma *dma = dev->dma;
 	drm_radeon_private_t *dev_priv = dev->dev_private;
 	drm_radeon_buf_priv_t *buf_priv;
-	drm_buf_t *buf;
+	struct drm_buf *buf;
 	int i, t;
 	int start;
 	u32 done_age = DRM_READ32(dev_priv->ring_rptr, RADEON_SCRATCHOFF(1));
@@ -2116,15 +2120,15 @@
 }
 #endif
 
-void radeon_freelist_reset(drm_device_t * dev)
+void radeon_freelist_reset(struct drm_device * dev)
 {
-	drm_device_dma_t *dma = dev->dma;
+	struct drm_device_dma *dma = dev->dma;
 	drm_radeon_private_t *dev_priv = dev->dev_private;
 	int i;
 
 	dev_priv->last_buf = 0;
 	for (i = 0; i < dma->buf_count; i++) {
-		drm_buf_t *buf = dma->buflist[i];
+		struct drm_buf *buf = dma->buflist[i];
 		drm_radeon_buf_priv_t *buf_priv = buf->dev_private;
 		buf_priv->age = 0;
 	}
@@ -2166,11 +2170,11 @@
 	return DRM_ERR(EBUSY);
 }
 
-static int radeon_cp_get_buffers(DRMFILE filp, drm_device_t * dev,
-				 drm_dma_t * d)
+static int radeon_cp_get_buffers(DRMFILE filp, struct drm_device * dev,
+				 struct drm_dma * d)
 {
 	int i;
-	drm_buf_t *buf;
+	struct drm_buf *buf;
 
 	for (i = d->granted_count; i < d->request_count; i++) {
 		buf = radeon_freelist_get(dev);
@@ -2194,10 +2198,10 @@
 int radeon_cp_buffers(DRM_IOCTL_ARGS)
 {
 	DRM_DEVICE;
-	drm_device_dma_t *dma = dev->dma;
+	struct drm_device_dma *dma = dev->dma;
 	int ret = 0;
-	drm_dma_t __user *argp = (void __user *)data;
-	drm_dma_t d;
+	struct drm_dma __user *argp = (void __user *)data;
+	struct drm_dma d;
 
 	LOCK_TEST_WITH_RETURN(dev, filp);
 
diff --git a/drivers/char/drm/radeon_drm.h b/drivers/char/drm/radeon_drm.h
index 66c4b6f..5a8e23f 100644
--- a/drivers/char/drm/radeon_drm.h
+++ b/drivers/char/drm/radeon_drm.h
@@ -417,7 +417,7 @@
 
 	/* The current cliprects, or a subset thereof.
 	 */
-	drm_clip_rect_t boxes[RADEON_NR_SAREA_CLIPRECTS];
+	struct drm_clip_rect boxes[RADEON_NR_SAREA_CLIPRECTS];
 	unsigned int nbox;
 
 	/* Counters for client-side throttling of rendering clients.
@@ -426,7 +426,7 @@
 	unsigned int last_dispatch;
 	unsigned int last_clear;
 
-	drm_tex_region_t tex_list[RADEON_NR_TEX_HEAPS][RADEON_NR_TEX_REGIONS +
+	struct drm_tex_region tex_list[RADEON_NR_TEX_HEAPS][RADEON_NR_TEX_REGIONS +
 						       1];
 	unsigned int tex_age[RADEON_NR_TEX_HEAPS];
 	int ctx_owner;
@@ -604,7 +604,7 @@
 	int bufsz;
 	char __user *buf;
 	int nbox;
-	drm_clip_rect_t __user *boxes;
+	struct drm_clip_rect __user *boxes;
 } drm_radeon_cmd_buffer_t;
 
 typedef struct drm_radeon_tex_image {
@@ -655,6 +655,7 @@
 #define RADEON_PARAM_GART_TEX_HANDLE       10
 #define RADEON_PARAM_SCRATCH_OFFSET        11
 #define RADEON_PARAM_CARD_TYPE             12
+#define RADEON_PARAM_VBLANK_CRTC           13   /* VBLANK CRTC */
 
 typedef struct drm_radeon_getparam {
 	int param;
@@ -708,7 +709,7 @@
 #define RADEON_SETPARAM_PCIGART_LOCATION 3	/* PCI Gart Location */
 #define RADEON_SETPARAM_NEW_MEMMAP 4		/* Use new memory map */
 #define RADEON_SETPARAM_PCIGART_TABLE_SIZE 5    /* PCI GART Table Size */
-
+#define RADEON_SETPARAM_VBLANK_CRTC 6           /* VBLANK CRTC */
 /* 1.14: Clients can allocate/free a surface
  */
 typedef struct drm_radeon_surface_alloc {
@@ -721,4 +722,7 @@
 	unsigned int address;
 } drm_radeon_surface_free_t;
 
+#define	DRM_RADEON_VBLANK_CRTC1 	1
+#define	DRM_RADEON_VBLANK_CRTC2 	2
+
 #endif
diff --git a/drivers/char/drm/radeon_drv.c b/drivers/char/drm/radeon_drv.c
index 2eb652e..349ac3d 100644
--- a/drivers/char/drm/radeon_drv.c
+++ b/drivers/char/drm/radeon_drv.c
@@ -60,7 +60,7 @@
 	.driver_features =
 	    DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | DRIVER_SG |
 	    DRIVER_HAVE_IRQ | DRIVER_HAVE_DMA | DRIVER_IRQ_SHARED |
-	    DRIVER_IRQ_VBL,
+	    DRIVER_IRQ_VBL | DRIVER_IRQ_VBL2,
 	.dev_priv_size = sizeof(drm_radeon_buf_priv_t),
 	.load = radeon_driver_load,
 	.firstopen = radeon_driver_firstopen,
@@ -70,6 +70,7 @@
 	.lastclose = radeon_driver_lastclose,
 	.unload = radeon_driver_unload,
 	.vblank_wait = radeon_driver_vblank_wait,
+	.vblank_wait2 = radeon_driver_vblank_wait2,
 	.dri_library_name = dri_library_name,
 	.irq_preinstall = radeon_driver_irq_preinstall,
 	.irq_postinstall = radeon_driver_irq_postinstall,
diff --git a/drivers/char/drm/radeon_drv.h b/drivers/char/drm/radeon_drv.h
index 54f49ef..3b3d935 100644
--- a/drivers/char/drm/radeon_drv.h
+++ b/drivers/char/drm/radeon_drv.h
@@ -97,9 +97,10 @@
  *       new packet type)
  * 1.26- Add support for variable size PCI(E) gart aperture
  * 1.27- Add support for IGP GART
+ * 1.28- Add support for VBL on CRTC2
  */
 #define DRIVER_MAJOR		1
-#define DRIVER_MINOR		27
+#define DRIVER_MINOR		28
 #define DRIVER_PATCHLEVEL	0
 
 /*
@@ -154,7 +155,7 @@
 
 typedef struct drm_radeon_freelist {
 	unsigned int age;
-	drm_buf_t *buf;
+	struct drm_buf *buf;
 	struct drm_radeon_freelist *next;
 	struct drm_radeon_freelist *prev;
 } drm_radeon_freelist_t;
@@ -277,13 +278,16 @@
 	/* SW interrupt */
 	wait_queue_head_t swi_queue;
 	atomic_t swi_emitted;
+	int vblank_crtc;
+	uint32_t irq_enable_reg;
+	int irq_enabled;
 
 	struct radeon_surface surfaces[RADEON_MAX_SURFACES];
 	struct radeon_virt_surface virt_surfaces[2 * RADEON_MAX_SURFACES];
 
 	unsigned long pcigart_offset;
 	unsigned int pcigart_offset_set;
-	drm_ati_pcigart_info gart_info;
+	struct drm_ati_pcigart_info gart_info;
 
 	u32 scratch_ages[5];
 
@@ -299,7 +303,7 @@
 	int bufsz;
 	char *buf;
 	int nbox;
-	drm_clip_rect_t __user *boxes;
+	struct drm_clip_rect __user *boxes;
 } drm_radeon_kcmd_buffer_t;
 
 extern int radeon_no_wb;
@@ -332,8 +336,8 @@
 extern int radeon_fullscreen(DRM_IOCTL_ARGS);
 extern int radeon_cp_buffers(DRM_IOCTL_ARGS);
 
-extern void radeon_freelist_reset(drm_device_t * dev);
-extern drm_buf_t *radeon_freelist_get(drm_device_t * dev);
+extern void radeon_freelist_reset(struct drm_device * dev);
+extern struct drm_buf *radeon_freelist_get(struct drm_device * dev);
 
 extern int radeon_wait_ring(drm_radeon_private_t * dev_priv, int n);
 
@@ -353,29 +357,33 @@
 extern int radeon_irq_emit(DRM_IOCTL_ARGS);
 extern int radeon_irq_wait(DRM_IOCTL_ARGS);
 
-extern void radeon_do_release(drm_device_t * dev);
-extern int radeon_driver_vblank_wait(drm_device_t * dev,
+extern void radeon_do_release(struct drm_device * dev);
+extern int radeon_driver_vblank_wait(struct drm_device * dev,
 				     unsigned int *sequence);
+extern int radeon_driver_vblank_wait2(struct drm_device * dev,
+				      unsigned int *sequence);
 extern irqreturn_t radeon_driver_irq_handler(DRM_IRQ_ARGS);
-extern void radeon_driver_irq_preinstall(drm_device_t * dev);
-extern void radeon_driver_irq_postinstall(drm_device_t * dev);
-extern void radeon_driver_irq_uninstall(drm_device_t * dev);
+extern void radeon_driver_irq_preinstall(struct drm_device * dev);
+extern void radeon_driver_irq_postinstall(struct drm_device * dev);
+extern void radeon_driver_irq_uninstall(struct drm_device * dev);
+extern int radeon_vblank_crtc_get(struct drm_device *dev);
+extern int radeon_vblank_crtc_set(struct drm_device *dev, int64_t value);
 
 extern int radeon_driver_load(struct drm_device *dev, unsigned long flags);
 extern int radeon_driver_unload(struct drm_device *dev);
 extern int radeon_driver_firstopen(struct drm_device *dev);
-extern void radeon_driver_preclose(drm_device_t * dev, DRMFILE filp);
-extern void radeon_driver_postclose(drm_device_t * dev, drm_file_t * filp);
-extern void radeon_driver_lastclose(drm_device_t * dev);
-extern int radeon_driver_open(drm_device_t * dev, drm_file_t * filp_priv);
+extern void radeon_driver_preclose(struct drm_device * dev, DRMFILE filp);
+extern void radeon_driver_postclose(struct drm_device * dev, struct drm_file * filp);
+extern void radeon_driver_lastclose(struct drm_device * dev);
+extern int radeon_driver_open(struct drm_device * dev, struct drm_file * filp_priv);
 extern long radeon_compat_ioctl(struct file *filp, unsigned int cmd,
 				unsigned long arg);
 
 /* r300_cmdbuf.c */
 extern void r300_init_reg_flags(void);
 
-extern int r300_do_cp_cmdbuf(drm_device_t * dev, DRMFILE filp,
-			     drm_file_t * filp_priv,
+extern int r300_do_cp_cmdbuf(struct drm_device * dev, DRMFILE filp,
+			     struct drm_file * filp_priv,
 			     drm_radeon_kcmd_buffer_t * cmdbuf);
 
 /* Flags for stats.boxes
@@ -496,12 +504,15 @@
 
 #define RADEON_GEN_INT_CNTL		0x0040
 #	define RADEON_CRTC_VBLANK_MASK		(1 << 0)
+#	define RADEON_CRTC2_VBLANK_MASK		(1 << 9)
 #	define RADEON_GUI_IDLE_INT_ENABLE	(1 << 19)
 #	define RADEON_SW_INT_ENABLE		(1 << 25)
 
 #define RADEON_GEN_INT_STATUS		0x0044
 #	define RADEON_CRTC_VBLANK_STAT		(1 << 0)
 #	define RADEON_CRTC_VBLANK_STAT_ACK   	(1 << 0)
+#	define RADEON_CRTC2_VBLANK_STAT		(1 << 9)
+#	define RADEON_CRTC2_VBLANK_STAT_ACK   	(1 << 9)
 #	define RADEON_GUI_IDLE_INT_TEST_ACK     (1 << 19)
 #	define RADEON_SW_INT_TEST		(1 << 25)
 #	define RADEON_SW_INT_TEST_ACK   	(1 << 25)
diff --git a/drivers/char/drm/radeon_irq.c b/drivers/char/drm/radeon_irq.c
index 3ff0baa..ad8a0ac 100644
--- a/drivers/char/drm/radeon_irq.c
+++ b/drivers/char/drm/radeon_irq.c
@@ -64,7 +64,7 @@
 
 irqreturn_t radeon_driver_irq_handler(DRM_IRQ_ARGS)
 {
-	drm_device_t *dev = (drm_device_t *) arg;
+	struct drm_device *dev = (struct drm_device *) arg;
 	drm_radeon_private_t *dev_priv =
 	    (drm_radeon_private_t *) dev->dev_private;
 	u32 stat;
@@ -73,18 +73,35 @@
 	 * outside the DRM
 	 */
 	stat = radeon_acknowledge_irqs(dev_priv, (RADEON_SW_INT_TEST_ACK |
-						  RADEON_CRTC_VBLANK_STAT));
+						  RADEON_CRTC_VBLANK_STAT |
+						  RADEON_CRTC2_VBLANK_STAT));
 	if (!stat)
 		return IRQ_NONE;
 
+	stat &= dev_priv->irq_enable_reg;
+
 	/* SW interrupt */
 	if (stat & RADEON_SW_INT_TEST) {
 		DRM_WAKEUP(&dev_priv->swi_queue);
 	}
 
 	/* VBLANK interrupt */
-	if (stat & RADEON_CRTC_VBLANK_STAT) {
-		atomic_inc(&dev->vbl_received);
+	if (stat & (RADEON_CRTC_VBLANK_STAT|RADEON_CRTC2_VBLANK_STAT)) {
+		int vblank_crtc = dev_priv->vblank_crtc;
+
+		if ((vblank_crtc &
+		     (DRM_RADEON_VBLANK_CRTC1 | DRM_RADEON_VBLANK_CRTC2)) ==
+		    (DRM_RADEON_VBLANK_CRTC1 | DRM_RADEON_VBLANK_CRTC2)) {
+			if (stat & RADEON_CRTC_VBLANK_STAT)
+				atomic_inc(&dev->vbl_received);
+			if (stat & RADEON_CRTC2_VBLANK_STAT)
+				atomic_inc(&dev->vbl_received2);
+		} else if (((stat & RADEON_CRTC_VBLANK_STAT) &&
+			   (vblank_crtc & DRM_RADEON_VBLANK_CRTC1)) ||
+			   ((stat & RADEON_CRTC2_VBLANK_STAT) &&
+			    (vblank_crtc & DRM_RADEON_VBLANK_CRTC2)))
+			atomic_inc(&dev->vbl_received);
+
 		DRM_WAKEUP(&dev->vbl_queue);
 		drm_vbl_send_signals(dev);
 	}
@@ -92,7 +109,7 @@
 	return IRQ_HANDLED;
 }
 
-static int radeon_emit_irq(drm_device_t * dev)
+static int radeon_emit_irq(struct drm_device * dev)
 {
 	drm_radeon_private_t *dev_priv = dev->dev_private;
 	unsigned int ret;
@@ -110,7 +127,7 @@
 	return ret;
 }
 
-static int radeon_wait_irq(drm_device_t * dev, int swi_nr)
+static int radeon_wait_irq(struct drm_device * dev, int swi_nr)
 {
 	drm_radeon_private_t *dev_priv =
 	    (drm_radeon_private_t *) dev->dev_private;
@@ -127,19 +144,30 @@
 	return ret;
 }
 
-int radeon_driver_vblank_wait(drm_device_t * dev, unsigned int *sequence)
+int radeon_driver_vblank_do_wait(struct drm_device * dev, unsigned int *sequence,
+				 int crtc)
 {
 	drm_radeon_private_t *dev_priv =
 	    (drm_radeon_private_t *) dev->dev_private;
 	unsigned int cur_vblank;
 	int ret = 0;
-
+	int ack = 0;
+	atomic_t *counter;
 	if (!dev_priv) {
 		DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
 		return DRM_ERR(EINVAL);
 	}
 
-	radeon_acknowledge_irqs(dev_priv, RADEON_CRTC_VBLANK_STAT);
+	if (crtc == DRM_RADEON_VBLANK_CRTC1) {
+		counter = &dev->vbl_received;
+		ack |= RADEON_CRTC_VBLANK_STAT;
+	} else if (crtc == DRM_RADEON_VBLANK_CRTC2) {
+		counter = &dev->vbl_received2;
+		ack |= RADEON_CRTC2_VBLANK_STAT;
+	} else
+		return DRM_ERR(EINVAL);
+
+	radeon_acknowledge_irqs(dev_priv, ack);
 
 	dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE;
 
@@ -148,7 +176,7 @@
 	 * using vertical blanks...
 	 */
 	DRM_WAIT_ON(ret, dev->vbl_queue, 3 * DRM_HZ,
-		    (((cur_vblank = atomic_read(&dev->vbl_received))
+		    (((cur_vblank = atomic_read(counter))
 		      - *sequence) <= (1 << 23)));
 
 	*sequence = cur_vblank;
@@ -156,6 +184,16 @@
 	return ret;
 }
 
+int radeon_driver_vblank_wait(struct drm_device *dev, unsigned int *sequence)
+{
+	return radeon_driver_vblank_do_wait(dev, sequence, DRM_RADEON_VBLANK_CRTC1);
+}
+
+int radeon_driver_vblank_wait2(struct drm_device *dev, unsigned int *sequence)
+{
+	return radeon_driver_vblank_do_wait(dev, sequence, DRM_RADEON_VBLANK_CRTC2);
+}
+
 /* Needs the lock as it touches the ring.
  */
 int radeon_irq_emit(DRM_IOCTL_ARGS)
@@ -204,9 +242,24 @@
 	return radeon_wait_irq(dev, irqwait.irq_seq);
 }
 
+static void radeon_enable_interrupt(struct drm_device *dev)
+{
+	drm_radeon_private_t *dev_priv = (drm_radeon_private_t *) dev->dev_private;
+
+	dev_priv->irq_enable_reg = RADEON_SW_INT_ENABLE;
+	if (dev_priv->vblank_crtc & DRM_RADEON_VBLANK_CRTC1)
+		dev_priv->irq_enable_reg |= RADEON_CRTC_VBLANK_MASK;
+
+	if (dev_priv->vblank_crtc & DRM_RADEON_VBLANK_CRTC2)
+		dev_priv->irq_enable_reg |= RADEON_CRTC2_VBLANK_MASK;
+
+	RADEON_WRITE(RADEON_GEN_INT_CNTL, dev_priv->irq_enable_reg);
+	dev_priv->irq_enabled = 1;
+}
+
 /* drm_dma.h hooks
 */
-void radeon_driver_irq_preinstall(drm_device_t * dev)
+void radeon_driver_irq_preinstall(struct drm_device * dev)
 {
 	drm_radeon_private_t *dev_priv =
 	    (drm_radeon_private_t *) dev->dev_private;
@@ -216,10 +269,11 @@
 
 	/* Clear bits if they're already high */
 	radeon_acknowledge_irqs(dev_priv, (RADEON_SW_INT_TEST_ACK |
-					   RADEON_CRTC_VBLANK_STAT));
+					   RADEON_CRTC_VBLANK_STAT |
+					   RADEON_CRTC2_VBLANK_STAT));
 }
 
-void radeon_driver_irq_postinstall(drm_device_t * dev)
+void radeon_driver_irq_postinstall(struct drm_device * dev)
 {
 	drm_radeon_private_t *dev_priv =
 	    (drm_radeon_private_t *) dev->dev_private;
@@ -227,18 +281,48 @@
 	atomic_set(&dev_priv->swi_emitted, 0);
 	DRM_INIT_WAITQUEUE(&dev_priv->swi_queue);
 
-	/* Turn on SW and VBL ints */
-	RADEON_WRITE(RADEON_GEN_INT_CNTL,
-		     RADEON_CRTC_VBLANK_MASK | RADEON_SW_INT_ENABLE);
+	radeon_enable_interrupt(dev);
 }
 
-void radeon_driver_irq_uninstall(drm_device_t * dev)
+void radeon_driver_irq_uninstall(struct drm_device * dev)
 {
 	drm_radeon_private_t *dev_priv =
 	    (drm_radeon_private_t *) dev->dev_private;
 	if (!dev_priv)
 		return;
 
+	dev_priv->irq_enabled = 0;
+
 	/* Disable *all* interrupts */
 	RADEON_WRITE(RADEON_GEN_INT_CNTL, 0);
 }
+
+
+int radeon_vblank_crtc_get(struct drm_device *dev)
+{
+	drm_radeon_private_t *dev_priv = (drm_radeon_private_t *) dev->dev_private;
+	u32 flag;
+	u32 value;
+
+	flag = RADEON_READ(RADEON_GEN_INT_CNTL);
+	value = 0;
+
+	if (flag & RADEON_CRTC_VBLANK_MASK)
+		value |= DRM_RADEON_VBLANK_CRTC1;
+
+	if (flag & RADEON_CRTC2_VBLANK_MASK)
+		value |= DRM_RADEON_VBLANK_CRTC2;
+	return value;
+}
+
+int radeon_vblank_crtc_set(struct drm_device *dev, int64_t value)
+{
+	drm_radeon_private_t *dev_priv = (drm_radeon_private_t *) dev->dev_private;
+	if (value & ~(DRM_RADEON_VBLANK_CRTC1 | DRM_RADEON_VBLANK_CRTC2)) {
+		DRM_ERROR("called with invalid crtc 0x%x\n", (unsigned int)value);
+		return DRM_ERR(EINVAL);
+	}
+	dev_priv->vblank_crtc = (unsigned int)value;
+	radeon_enable_interrupt(dev);
+	return 0;
+}
diff --git a/drivers/char/drm/radeon_state.c b/drivers/char/drm/radeon_state.c
index 98c5f1d..3ddf86f 100644
--- a/drivers/char/drm/radeon_state.c
+++ b/drivers/char/drm/radeon_state.c
@@ -39,7 +39,7 @@
 
 static __inline__ int radeon_check_and_fixup_offset(drm_radeon_private_t *
 						    dev_priv,
-						    drm_file_t * filp_priv,
+						    struct drm_file * filp_priv,
 						    u32 *offset)
 {
 	u64 off = *offset;
@@ -90,7 +90,7 @@
 
 static __inline__ int radeon_check_and_fixup_packets(drm_radeon_private_t *
 						     dev_priv,
-						     drm_file_t * filp_priv,
+						     struct drm_file * filp_priv,
 						     int id, u32 *data)
 {
 	switch (id) {
@@ -264,7 +264,7 @@
 
 static __inline__ int radeon_check_and_fixup_packet3(drm_radeon_private_t *
 						     dev_priv,
-						     drm_file_t *filp_priv,
+						     struct drm_file *filp_priv,
 						     drm_radeon_kcmd_buffer_t *
 						     cmdbuf,
 						     unsigned int *cmdsz)
@@ -421,7 +421,7 @@
  */
 
 static __inline__ void radeon_emit_clip_rect(drm_radeon_private_t * dev_priv,
-					     drm_clip_rect_t * box)
+					     struct drm_clip_rect * box)
 {
 	RING_LOCALS;
 
@@ -439,7 +439,7 @@
 /* Emit 1.1 state
  */
 static int radeon_emit_state(drm_radeon_private_t * dev_priv,
-			     drm_file_t * filp_priv,
+			     struct drm_file * filp_priv,
 			     drm_radeon_context_regs_t * ctx,
 			     drm_radeon_texture_regs_t * tex,
 			     unsigned int dirty)
@@ -608,7 +608,7 @@
 /* Emit 1.2 state
  */
 static int radeon_emit_state2(drm_radeon_private_t * dev_priv,
-			      drm_file_t * filp_priv,
+			      struct drm_file * filp_priv,
 			      drm_radeon_state_t * state)
 {
 	RING_LOCALS;
@@ -844,7 +844,7 @@
  * CP command dispatch functions
  */
 
-static void radeon_cp_dispatch_clear(drm_device_t * dev,
+static void radeon_cp_dispatch_clear(struct drm_device * dev,
 				     drm_radeon_clear_t * clear,
 				     drm_radeon_clear_rect_t * depth_boxes)
 {
@@ -852,7 +852,7 @@
 	drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv;
 	drm_radeon_depth_clear_t *depth_clear = &dev_priv->depth_clear;
 	int nbox = sarea_priv->nbox;
-	drm_clip_rect_t *pbox = sarea_priv->boxes;
+	struct drm_clip_rect *pbox = sarea_priv->boxes;
 	unsigned int flags = clear->flags;
 	u32 rb3d_cntl = 0, rb3d_stencilrefmask = 0;
 	int i;
@@ -1335,12 +1335,12 @@
 	ADVANCE_RING();
 }
 
-static void radeon_cp_dispatch_swap(drm_device_t * dev)
+static void radeon_cp_dispatch_swap(struct drm_device * dev)
 {
 	drm_radeon_private_t *dev_priv = dev->dev_private;
 	drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv;
 	int nbox = sarea_priv->nbox;
-	drm_clip_rect_t *pbox = sarea_priv->boxes;
+	struct drm_clip_rect *pbox = sarea_priv->boxes;
 	int i;
 	RING_LOCALS;
 	DRM_DEBUG("\n");
@@ -1412,10 +1412,10 @@
 	ADVANCE_RING();
 }
 
-static void radeon_cp_dispatch_flip(drm_device_t * dev)
+static void radeon_cp_dispatch_flip(struct drm_device * dev)
 {
 	drm_radeon_private_t *dev_priv = dev->dev_private;
-	drm_sarea_t *sarea = (drm_sarea_t *) dev_priv->sarea->handle;
+	struct drm_sarea *sarea = (struct drm_sarea *) dev_priv->sarea->handle;
 	int offset = (dev_priv->sarea_priv->pfCurrentPage == 1)
 	    ? dev_priv->front_offset : dev_priv->back_offset;
 	RING_LOCALS;
@@ -1491,8 +1491,8 @@
 	unsigned int vc_format;
 } drm_radeon_tcl_prim_t;
 
-static void radeon_cp_dispatch_vertex(drm_device_t * dev,
-				      drm_buf_t * buf,
+static void radeon_cp_dispatch_vertex(struct drm_device * dev,
+				      struct drm_buf * buf,
 				      drm_radeon_tcl_prim_t * prim)
 {
 	drm_radeon_private_t *dev_priv = dev->dev_private;
@@ -1537,7 +1537,7 @@
 	} while (i < nbox);
 }
 
-static void radeon_cp_discard_buffer(drm_device_t * dev, drm_buf_t * buf)
+static void radeon_cp_discard_buffer(struct drm_device * dev, struct drm_buf * buf)
 {
 	drm_radeon_private_t *dev_priv = dev->dev_private;
 	drm_radeon_buf_priv_t *buf_priv = buf->dev_private;
@@ -1554,8 +1554,8 @@
 	buf->used = 0;
 }
 
-static void radeon_cp_dispatch_indirect(drm_device_t * dev,
-					drm_buf_t * buf, int start, int end)
+static void radeon_cp_dispatch_indirect(struct drm_device * dev,
+					struct drm_buf * buf, int start, int end)
 {
 	drm_radeon_private_t *dev_priv = dev->dev_private;
 	RING_LOCALS;
@@ -1588,8 +1588,8 @@
 	}
 }
 
-static void radeon_cp_dispatch_indices(drm_device_t * dev,
-				       drm_buf_t * elt_buf,
+static void radeon_cp_dispatch_indices(struct drm_device * dev,
+				       struct drm_buf * elt_buf,
 				       drm_radeon_tcl_prim_t * prim)
 {
 	drm_radeon_private_t *dev_priv = dev->dev_private;
@@ -1647,13 +1647,13 @@
 #define RADEON_MAX_TEXTURE_SIZE RADEON_BUFFER_SIZE
 
 static int radeon_cp_dispatch_texture(DRMFILE filp,
-				      drm_device_t * dev,
+				      struct drm_device * dev,
 				      drm_radeon_texture_t * tex,
 				      drm_radeon_tex_image_t * image)
 {
 	drm_radeon_private_t *dev_priv = dev->dev_private;
-	drm_file_t *filp_priv;
-	drm_buf_t *buf;
+	struct drm_file *filp_priv;
+	struct drm_buf *buf;
 	u32 format;
 	u32 *buffer;
 	const u8 __user *data;
@@ -1881,7 +1881,7 @@
 	return 0;
 }
 
-static void radeon_cp_dispatch_stipple(drm_device_t * dev, u32 * stipple)
+static void radeon_cp_dispatch_stipple(struct drm_device * dev, u32 * stipple)
 {
 	drm_radeon_private_t *dev_priv = dev->dev_private;
 	int i;
@@ -2134,7 +2134,7 @@
 
 /* Not sure why this isn't set all the time:
  */
-static int radeon_do_init_pageflip(drm_device_t * dev)
+static int radeon_do_init_pageflip(struct drm_device * dev)
 {
 	drm_radeon_private_t *dev_priv = dev->dev_private;
 	RING_LOCALS;
@@ -2206,10 +2206,10 @@
 {
 	DRM_DEVICE;
 	drm_radeon_private_t *dev_priv = dev->dev_private;
-	drm_file_t *filp_priv;
+	struct drm_file *filp_priv;
 	drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv;
-	drm_device_dma_t *dma = dev->dma;
-	drm_buf_t *buf;
+	struct drm_device_dma *dma = dev->dma;
+	struct drm_buf *buf;
 	drm_radeon_vertex_t vertex;
 	drm_radeon_tcl_prim_t prim;
 
@@ -2289,10 +2289,10 @@
 {
 	DRM_DEVICE;
 	drm_radeon_private_t *dev_priv = dev->dev_private;
-	drm_file_t *filp_priv;
+	struct drm_file *filp_priv;
 	drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv;
-	drm_device_dma_t *dma = dev->dma;
-	drm_buf_t *buf;
+	struct drm_device_dma *dma = dev->dma;
+	struct drm_buf *buf;
 	drm_radeon_indices_t elts;
 	drm_radeon_tcl_prim_t prim;
 	int count;
@@ -2438,8 +2438,8 @@
 {
 	DRM_DEVICE;
 	drm_radeon_private_t *dev_priv = dev->dev_private;
-	drm_device_dma_t *dma = dev->dma;
-	drm_buf_t *buf;
+	struct drm_device_dma *dma = dev->dma;
+	struct drm_buf *buf;
 	drm_radeon_indirect_t indirect;
 	RING_LOCALS;
 
@@ -2507,10 +2507,10 @@
 {
 	DRM_DEVICE;
 	drm_radeon_private_t *dev_priv = dev->dev_private;
-	drm_file_t *filp_priv;
+	struct drm_file *filp_priv;
 	drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv;
-	drm_device_dma_t *dma = dev->dma;
-	drm_buf_t *buf;
+	struct drm_device_dma *dma = dev->dma;
+	struct drm_buf *buf;
 	drm_radeon_vertex2_t vertex;
 	int i;
 	unsigned char laststate;
@@ -2603,7 +2603,7 @@
 }
 
 static int radeon_emit_packets(drm_radeon_private_t * dev_priv,
-			       drm_file_t * filp_priv,
+			       struct drm_file * filp_priv,
 			       drm_radeon_cmd_header_t header,
 			       drm_radeon_kcmd_buffer_t *cmdbuf)
 {
@@ -2728,8 +2728,8 @@
 	return 0;
 }
 
-static int radeon_emit_packet3(drm_device_t * dev,
-			       drm_file_t * filp_priv,
+static int radeon_emit_packet3(struct drm_device * dev,
+			       struct drm_file * filp_priv,
 			       drm_radeon_kcmd_buffer_t *cmdbuf)
 {
 	drm_radeon_private_t *dev_priv = dev->dev_private;
@@ -2754,16 +2754,16 @@
 	return 0;
 }
 
-static int radeon_emit_packet3_cliprect(drm_device_t *dev,
-					drm_file_t *filp_priv,
+static int radeon_emit_packet3_cliprect(struct drm_device *dev,
+					struct drm_file *filp_priv,
 					drm_radeon_kcmd_buffer_t *cmdbuf,
 					int orig_nbox)
 {
 	drm_radeon_private_t *dev_priv = dev->dev_private;
-	drm_clip_rect_t box;
+	struct drm_clip_rect box;
 	unsigned int cmdsz;
 	int ret;
-	drm_clip_rect_t __user *boxes = cmdbuf->boxes;
+	struct drm_clip_rect __user *boxes = cmdbuf->boxes;
 	int i = 0;
 	RING_LOCALS;
 
@@ -2816,7 +2816,7 @@
 	return 0;
 }
 
-static int radeon_emit_wait(drm_device_t * dev, int flags)
+static int radeon_emit_wait(struct drm_device * dev, int flags)
 {
 	drm_radeon_private_t *dev_priv = dev->dev_private;
 	RING_LOCALS;
@@ -2849,9 +2849,9 @@
 {
 	DRM_DEVICE;
 	drm_radeon_private_t *dev_priv = dev->dev_private;
-	drm_file_t *filp_priv;
-	drm_device_dma_t *dma = dev->dma;
-	drm_buf_t *buf = NULL;
+	struct drm_file *filp_priv;
+	struct drm_device_dma *dma = dev->dma;
+	struct drm_buf *buf = NULL;
 	int idx;
 	drm_radeon_kcmd_buffer_t cmdbuf;
 	drm_radeon_cmd_header_t header;
@@ -3085,6 +3085,9 @@
 		else
 			value = RADEON_CARD_PCI;
 		break;
+	case RADEON_PARAM_VBLANK_CRTC:
+		value = radeon_vblank_crtc_get(dev);
+		break;
 	default:
 		DRM_DEBUG("Invalid parameter %d\n", param.param);
 		return DRM_ERR(EINVAL);
@@ -3102,7 +3105,7 @@
 {
 	DRM_DEVICE;
 	drm_radeon_private_t *dev_priv = dev->dev_private;
-	drm_file_t *filp_priv;
+	struct drm_file *filp_priv;
 	drm_radeon_setparam_t sp;
 	struct drm_radeon_driver_file_fields *radeon_priv;
 
@@ -3141,6 +3144,9 @@
 		if (dev_priv->gart_info.table_size < RADEON_PCIGART_TABLE_SIZE)
 			dev_priv->gart_info.table_size = RADEON_PCIGART_TABLE_SIZE;
 		break;
+	case RADEON_SETPARAM_VBLANK_CRTC:
+		return radeon_vblank_crtc_set(dev, sp.value);
+		break;
 	default:
 		DRM_DEBUG("Invalid parameter %d\n", sp.param);
 		return DRM_ERR(EINVAL);
@@ -3156,7 +3162,7 @@
  *
  * DRM infrastructure takes care of reclaiming dma buffers.
  */
-void radeon_driver_preclose(drm_device_t * dev, DRMFILE filp)
+void radeon_driver_preclose(struct drm_device *dev, DRMFILE filp)
 {
 	if (dev->dev_private) {
 		drm_radeon_private_t *dev_priv = dev->dev_private;
@@ -3167,7 +3173,7 @@
 	}
 }
 
-void radeon_driver_lastclose(drm_device_t * dev)
+void radeon_driver_lastclose(struct drm_device *dev)
 {
 	if (dev->dev_private) {
 		drm_radeon_private_t *dev_priv = dev->dev_private;
@@ -3180,7 +3186,7 @@
 	radeon_do_release(dev);
 }
 
-int radeon_driver_open(drm_device_t * dev, drm_file_t * filp_priv)
+int radeon_driver_open(struct drm_device *dev, struct drm_file *filp_priv)
 {
 	drm_radeon_private_t *dev_priv = dev->dev_private;
 	struct drm_radeon_driver_file_fields *radeon_priv;
@@ -3202,7 +3208,7 @@
 	return 0;
 }
 
-void radeon_driver_postclose(drm_device_t * dev, drm_file_t * filp_priv)
+void radeon_driver_postclose(struct drm_device *dev, struct drm_file *filp_priv)
 {
 	struct drm_radeon_driver_file_fields *radeon_priv =
 	    filp_priv->driver_priv;
diff --git a/drivers/char/drm/savage_bci.c b/drivers/char/drm/savage_bci.c
index b94fab5..18c7235 100644
--- a/drivers/char/drm/savage_bci.c
+++ b/drivers/char/drm/savage_bci.c
@@ -32,7 +32,7 @@
 #define SAVAGE_EVENT_USEC_TIMEOUT	5000000	/* 5s */
 #define SAVAGE_FREELIST_DEBUG		0
 
-static int savage_do_cleanup_bci(drm_device_t *dev);
+static int savage_do_cleanup_bci(struct drm_device *dev);
 
 static int
 savage_bci_wait_fifo_shadow(drm_savage_private_t * dev_priv, unsigned int n)
@@ -203,11 +203,11 @@
 /*
  * Freelist management
  */
-static int savage_freelist_init(drm_device_t * dev)
+static int savage_freelist_init(struct drm_device * dev)
 {
 	drm_savage_private_t *dev_priv = dev->dev_private;
-	drm_device_dma_t *dma = dev->dma;
-	drm_buf_t *buf;
+	struct drm_device_dma *dma = dev->dma;
+	struct drm_buf *buf;
 	drm_savage_buf_priv_t *entry;
 	int i;
 	DRM_DEBUG("count=%d\n", dma->buf_count);
@@ -236,7 +236,7 @@
 	return 0;
 }
 
-static drm_buf_t *savage_freelist_get(drm_device_t * dev)
+static struct drm_buf *savage_freelist_get(struct drm_device * dev)
 {
 	drm_savage_private_t *dev_priv = dev->dev_private;
 	drm_savage_buf_priv_t *tail = dev_priv->tail.prev;
@@ -269,7 +269,7 @@
 	return NULL;
 }
 
-void savage_freelist_put(drm_device_t * dev, drm_buf_t * buf)
+void savage_freelist_put(struct drm_device * dev, struct drm_buf * buf)
 {
 	drm_savage_private_t *dev_priv = dev->dev_private;
 	drm_savage_buf_priv_t *entry = buf->dev_private, *prev, *next;
@@ -535,7 +535,7 @@
 	dev_priv->first_dma_page = dev_priv->current_dma_page = 0;
 }
 
-int savage_driver_load(drm_device_t *dev, unsigned long chipset)
+int savage_driver_load(struct drm_device *dev, unsigned long chipset)
 {
 	drm_savage_private_t *dev_priv;
 
@@ -558,7 +558,7 @@
  * in drm_addmap. Therefore we add them manually before the maps are
  * initialized, and tear them down on last close.
  */
-int savage_driver_firstopen(drm_device_t *dev)
+int savage_driver_firstopen(struct drm_device *dev)
 {
 	drm_savage_private_t *dev_priv = dev->dev_private;
 	unsigned long mmio_base, fb_base, fb_size, aperture_base;
@@ -655,7 +655,7 @@
 /*
  * Delete MTRRs and free device-private data.
  */
-void savage_driver_lastclose(drm_device_t *dev)
+void savage_driver_lastclose(struct drm_device *dev)
 {
 	drm_savage_private_t *dev_priv = dev->dev_private;
 	int i;
@@ -667,7 +667,7 @@
 				 dev_priv->mtrr[i].size, DRM_MTRR_WC);
 }
 
-int savage_driver_unload(drm_device_t *dev)
+int savage_driver_unload(struct drm_device *dev)
 {
 	drm_savage_private_t *dev_priv = dev->dev_private;
 
@@ -676,7 +676,7 @@
 	return 0;
 }
 
-static int savage_do_init_bci(drm_device_t * dev, drm_savage_init_t * init)
+static int savage_do_init_bci(struct drm_device * dev, drm_savage_init_t * init)
 {
 	drm_savage_private_t *dev_priv = dev->dev_private;
 
@@ -711,7 +711,7 @@
 	dev_priv->texture_offset = init->texture_offset;
 	dev_priv->texture_size = init->texture_size;
 
-	DRM_GETSAREA();
+	dev_priv->sarea = drm_getsarea(dev);
 	if (!dev_priv->sarea) {
 		DRM_ERROR("could not find sarea!\n");
 		savage_do_cleanup_bci(dev);
@@ -898,7 +898,7 @@
 	return 0;
 }
 
-static int savage_do_cleanup_bci(drm_device_t * dev)
+static int savage_do_cleanup_bci(struct drm_device * dev)
 {
 	drm_savage_private_t *dev_priv = dev->dev_private;
 
@@ -1007,9 +1007,9 @@
  * DMA buffer management
  */
 
-static int savage_bci_get_buffers(DRMFILE filp, drm_device_t *dev, drm_dma_t *d)
+static int savage_bci_get_buffers(DRMFILE filp, struct drm_device *dev, struct drm_dma *d)
 {
-	drm_buf_t *buf;
+	struct drm_buf *buf;
 	int i;
 
 	for (i = d->granted_count; i < d->request_count; i++) {
@@ -1034,13 +1034,13 @@
 int savage_bci_buffers(DRM_IOCTL_ARGS)
 {
 	DRM_DEVICE;
-	drm_device_dma_t *dma = dev->dma;
-	drm_dma_t d;
+	struct drm_device_dma *dma = dev->dma;
+	struct drm_dma d;
 	int ret = 0;
 
 	LOCK_TEST_WITH_RETURN(dev, filp);
 
-	DRM_COPY_FROM_USER_IOCTL(d, (drm_dma_t __user *) data, sizeof(d));
+	DRM_COPY_FROM_USER_IOCTL(d, (struct drm_dma __user *) data, sizeof(d));
 
 	/* Please don't send us buffers.
 	 */
@@ -1064,14 +1064,14 @@
 		ret = savage_bci_get_buffers(filp, dev, &d);
 	}
 
-	DRM_COPY_TO_USER_IOCTL((drm_dma_t __user *) data, d, sizeof(d));
+	DRM_COPY_TO_USER_IOCTL((struct drm_dma __user *) data, d, sizeof(d));
 
 	return ret;
 }
 
-void savage_reclaim_buffers(drm_device_t *dev, DRMFILE filp)
+void savage_reclaim_buffers(struct drm_device *dev, DRMFILE filp)
 {
-	drm_device_dma_t *dma = dev->dma;
+	struct drm_device_dma *dma = dev->dma;
 	drm_savage_private_t *dev_priv = dev->dev_private;
 	int i;
 
@@ -1085,7 +1085,7 @@
 	/*i830_flush_queue(dev); */
 
 	for (i = 0; i < dma->buf_count; i++) {
-		drm_buf_t *buf = dma->buflist[i];
+		struct drm_buf *buf = dma->buflist[i];
 		drm_savage_buf_priv_t *buf_priv = buf->dev_private;
 
 		if (buf->filp == filp && buf_priv &&
diff --git a/drivers/char/drm/savage_drm.h b/drivers/char/drm/savage_drm.h
index e1148e8..8a576ef 100644
--- a/drivers/char/drm/savage_drm.h
+++ b/drivers/char/drm/savage_drm.h
@@ -47,7 +47,7 @@
 typedef struct _drm_savage_sarea {
 	/* LRU lists for texture memory in agp space and on the card.
 	 */
-	drm_tex_region_t texList[SAVAGE_NR_TEX_HEAPS][SAVAGE_NR_TEX_REGIONS +
+	struct drm_tex_region texList[SAVAGE_NR_TEX_HEAPS][SAVAGE_NR_TEX_REGIONS +
 						      1];
 	unsigned int texAge[SAVAGE_NR_TEX_HEAPS];
 
@@ -113,7 +113,7 @@
 	unsigned int vb_size;	/* size of client vertex buffer in bytes */
 	unsigned int vb_stride;	/* stride of vertices in 32bit words */
 	/* boxes in client's address space */
-	drm_clip_rect_t __user *box_addr;
+	struct drm_clip_rect __user *box_addr;
 	unsigned int nbox;	/* number of clipping boxes */
 } drm_savage_cmdbuf_t;
 
diff --git a/drivers/char/drm/savage_drv.h b/drivers/char/drm/savage_drv.h
index 8f04b3d..5fd54de 100644
--- a/drivers/char/drm/savage_drv.h
+++ b/drivers/char/drm/savage_drv.h
@@ -58,7 +58,7 @@
 	struct drm_savage_buf_priv *next;
 	struct drm_savage_buf_priv *prev;
 	drm_savage_age_t age;
-	drm_buf_t *buf;
+	struct drm_buf *buf;
 } drm_savage_buf_priv_t;
 
 typedef struct drm_savage_dma_page {
@@ -192,7 +192,7 @@
 	/* Err, there is a macro wait_event in include/linux/wait.h.
 	 * Avoid unwanted macro expansion. */
 	void (*emit_clip_rect) (struct drm_savage_private * dev_priv,
-				const drm_clip_rect_t * pbox);
+				const struct drm_clip_rect * pbox);
 	void (*dma_flush) (struct drm_savage_private * dev_priv);
 } drm_savage_private_t;
 
@@ -203,22 +203,22 @@
 /* BCI functions */
 extern uint16_t savage_bci_emit_event(drm_savage_private_t * dev_priv,
 				      unsigned int flags);
-extern void savage_freelist_put(drm_device_t * dev, drm_buf_t * buf);
+extern void savage_freelist_put(struct drm_device * dev, struct drm_buf * buf);
 extern void savage_dma_reset(drm_savage_private_t * dev_priv);
 extern void savage_dma_wait(drm_savage_private_t * dev_priv, unsigned int page);
 extern uint32_t *savage_dma_alloc(drm_savage_private_t * dev_priv,
 				  unsigned int n);
-extern int savage_driver_load(drm_device_t *dev, unsigned long chipset);
-extern int savage_driver_firstopen(drm_device_t *dev);
-extern void savage_driver_lastclose(drm_device_t *dev);
-extern int savage_driver_unload(drm_device_t *dev);
-extern void savage_reclaim_buffers(drm_device_t * dev, DRMFILE filp);
+extern int savage_driver_load(struct drm_device *dev, unsigned long chipset);
+extern int savage_driver_firstopen(struct drm_device *dev);
+extern void savage_driver_lastclose(struct drm_device *dev);
+extern int savage_driver_unload(struct drm_device *dev);
+extern void savage_reclaim_buffers(struct drm_device * dev, DRMFILE filp);
 
 /* state functions */
 extern void savage_emit_clip_rect_s3d(drm_savage_private_t * dev_priv,
-				      const drm_clip_rect_t * pbox);
+				      const struct drm_clip_rect * pbox);
 extern void savage_emit_clip_rect_s4(drm_savage_private_t * dev_priv,
-				     const drm_clip_rect_t * pbox);
+				     const struct drm_clip_rect * pbox);
 
 #define SAVAGE_FB_SIZE_S3	0x01000000	/*  16MB */
 #define SAVAGE_FB_SIZE_S4	0x02000000	/*  32MB */
diff --git a/drivers/char/drm/savage_state.c b/drivers/char/drm/savage_state.c
index 1ca1e9c..7749784 100644
--- a/drivers/char/drm/savage_state.c
+++ b/drivers/char/drm/savage_state.c
@@ -27,7 +27,7 @@
 #include "savage_drv.h"
 
 void savage_emit_clip_rect_s3d(drm_savage_private_t * dev_priv,
-			       const drm_clip_rect_t * pbox)
+			       const struct drm_clip_rect * pbox)
 {
 	uint32_t scstart = dev_priv->state.s3d.new_scstart;
 	uint32_t scend = dev_priv->state.s3d.new_scend;
@@ -53,7 +53,7 @@
 }
 
 void savage_emit_clip_rect_s4(drm_savage_private_t * dev_priv,
-			      const drm_clip_rect_t * pbox)
+			      const struct drm_clip_rect * pbox)
 {
 	uint32_t drawctrl0 = dev_priv->state.s4.new_drawctrl0;
 	uint32_t drawctrl1 = dev_priv->state.s4.new_drawctrl1;
@@ -277,7 +277,7 @@
 
 static int savage_dispatch_dma_prim(drm_savage_private_t * dev_priv,
 				    const drm_savage_cmd_header_t * cmd_header,
-				    const drm_buf_t * dmabuf)
+				    const struct drm_buf * dmabuf)
 {
 	unsigned char reorder = 0;
 	unsigned int prim = cmd_header->prim.prim;
@@ -536,7 +536,7 @@
 static int savage_dispatch_dma_idx(drm_savage_private_t * dev_priv,
 				   const drm_savage_cmd_header_t * cmd_header,
 				   const uint16_t *idx,
-				   const drm_buf_t * dmabuf)
+				   const struct drm_buf * dmabuf)
 {
 	unsigned char reorder = 0;
 	unsigned int prim = cmd_header->idx.prim;
@@ -792,7 +792,7 @@
 				 const drm_savage_cmd_header_t * cmd_header,
 				 const drm_savage_cmd_header_t *data,
 				 unsigned int nbox,
-				 const drm_clip_rect_t *boxes)
+				 const struct drm_clip_rect *boxes)
 {
 	unsigned int flags = cmd_header->clear0.flags;
 	unsigned int clear_cmd;
@@ -861,7 +861,7 @@
 }
 
 static int savage_dispatch_swap(drm_savage_private_t * dev_priv,
-				unsigned int nbox, const drm_clip_rect_t *boxes)
+				unsigned int nbox, const struct drm_clip_rect *boxes)
 {
 	unsigned int swap_cmd;
 	unsigned int i;
@@ -892,11 +892,11 @@
 static int savage_dispatch_draw(drm_savage_private_t * dev_priv,
 				const drm_savage_cmd_header_t *start,
 				const drm_savage_cmd_header_t *end,
-				const drm_buf_t * dmabuf,
+				const struct drm_buf * dmabuf,
 				const unsigned int *vtxbuf,
 				unsigned int vb_size, unsigned int vb_stride,
 				unsigned int nbox,
-				const drm_clip_rect_t *boxes)
+				const struct drm_clip_rect *boxes)
 {
 	unsigned int i, j;
 	int ret;
@@ -957,13 +957,13 @@
 {
 	DRM_DEVICE;
 	drm_savage_private_t *dev_priv = dev->dev_private;
-	drm_device_dma_t *dma = dev->dma;
-	drm_buf_t *dmabuf;
+	struct drm_device_dma *dma = dev->dma;
+	struct drm_buf *dmabuf;
 	drm_savage_cmdbuf_t cmdbuf;
 	drm_savage_cmd_header_t *kcmd_addr = NULL;
 	drm_savage_cmd_header_t *first_draw_cmd;
 	unsigned int *kvb_addr = NULL;
-	drm_clip_rect_t *kbox_addr = NULL;
+	struct drm_clip_rect *kbox_addr = NULL;
 	unsigned int i, j;
 	int ret = 0;
 
@@ -1019,7 +1019,7 @@
 		cmdbuf.vb_addr = kvb_addr;
 	}
 	if (cmdbuf.nbox) {
-		kbox_addr = drm_alloc(cmdbuf.nbox * sizeof(drm_clip_rect_t),
+		kbox_addr = drm_alloc(cmdbuf.nbox * sizeof(struct drm_clip_rect),
 				       DRM_MEM_DRIVER);
 		if (kbox_addr == NULL) {
 			ret = DRM_ERR(ENOMEM);
@@ -1027,7 +1027,7 @@
 		}
 
 		if (DRM_COPY_FROM_USER(kbox_addr, cmdbuf.box_addr,
-				       cmdbuf.nbox * sizeof(drm_clip_rect_t))) {
+				       cmdbuf.nbox * sizeof(struct drm_clip_rect))) {
 			ret = DRM_ERR(EFAULT);
 			goto done;
 		}
@@ -1158,7 +1158,7 @@
 	/* If we didn't need to allocate them, these'll be NULL */
 	drm_free(kcmd_addr, cmdbuf.size * 8, DRM_MEM_DRIVER);
 	drm_free(kvb_addr, cmdbuf.vb_size, DRM_MEM_DRIVER);
-	drm_free(kbox_addr, cmdbuf.nbox * sizeof(drm_clip_rect_t),
+	drm_free(kbox_addr, cmdbuf.nbox * sizeof(struct drm_clip_rect),
 		 DRM_MEM_DRIVER);
 
 	return ret;
diff --git a/drivers/char/drm/sis_drv.c b/drivers/char/drm/sis_drv.c
index 690e0af..1912f58 100644
--- a/drivers/char/drm/sis_drv.c
+++ b/drivers/char/drm/sis_drv.c
@@ -35,7 +35,7 @@
 	sisdrv_PCI_IDS
 };
 
-static int sis_driver_load(drm_device_t *dev, unsigned long chipset)
+static int sis_driver_load(struct drm_device *dev, unsigned long chipset)
 {
 	drm_sis_private_t *dev_priv;
 	int ret;
@@ -54,7 +54,7 @@
 	return ret;
 }
 
-static int sis_driver_unload(drm_device_t *dev)
+static int sis_driver_unload(struct drm_device *dev)
 {
 	drm_sis_private_t *dev_priv = dev->dev_private;
 
diff --git a/drivers/char/drm/sis_drv.h b/drivers/char/drm/sis_drv.h
index 70d4ede..5630df8 100644
--- a/drivers/char/drm/sis_drv.h
+++ b/drivers/char/drm/sis_drv.h
@@ -46,6 +46,7 @@
 
 #include "drm_sman.h"
 
+
 #define SIS_BASE (dev_priv->mmio)
 #define SIS_READ(reg)         DRM_READ32(SIS_BASE, reg);
 #define SIS_WRITE(reg, val)   DRM_WRITE32(SIS_BASE, reg, val);
@@ -53,7 +54,7 @@
 typedef struct drm_sis_private {
 	drm_local_map_t *mmio;
 	unsigned int idle_fault;
-	drm_sman_t sman;
+	struct drm_sman sman;
 	unsigned int chipset;
 	int vram_initialized;
 	int agp_initialized;
@@ -61,9 +62,9 @@
 	unsigned long agp_offset;
 } drm_sis_private_t;
 
-extern int sis_idle(drm_device_t *dev);
-extern void sis_reclaim_buffers_locked(drm_device_t *dev, struct file *filp);
-extern void sis_lastclose(drm_device_t *dev);
+extern int sis_idle(struct drm_device *dev);
+extern void sis_reclaim_buffers_locked(struct drm_device *dev, struct file *filp);
+extern void sis_lastclose(struct drm_device *dev);
 
 extern drm_ioctl_desc_t sis_ioctls[];
 extern int sis_max_ioctl;
diff --git a/drivers/char/drm/sis_mm.c b/drivers/char/drm/sis_mm.c
index d26f5db..441bbdb 100644
--- a/drivers/char/drm/sis_mm.c
+++ b/drivers/char/drm/sis_mm.c
@@ -94,7 +94,7 @@
 	mutex_lock(&dev->struct_mutex);
 #if defined(CONFIG_FB_SIS)
 	{
-		drm_sman_mm_t sman_mm;
+		struct drm_sman_mm sman_mm;
 		sman_mm.private = (void *)0xFFFFFFFF;
 		sman_mm.allocate = sis_sman_mm_allocate;
 		sman_mm.free = sis_sman_mm_free;
@@ -123,14 +123,14 @@
 	return 0;
 }
 
-static int sis_drm_alloc(drm_device_t * dev, drm_file_t * priv,
+static int sis_drm_alloc(struct drm_device *dev, struct drm_file * priv,
 			 unsigned long data, int pool)
 {
 	drm_sis_private_t *dev_priv = dev->dev_private;
 	drm_sis_mem_t __user *argp = (drm_sis_mem_t __user *) data;
 	drm_sis_mem_t mem;
 	int retval = 0;
-	drm_memblock_item_t *item;
+	struct drm_memblock_item *item;
 
 	DRM_COPY_FROM_USER_IOCTL(mem, argp, sizeof(mem));
 
@@ -229,12 +229,12 @@
 	return sis_drm_alloc(dev, priv, data, AGP_TYPE);
 }
 
-static drm_local_map_t *sis_reg_init(drm_device_t *dev)
+static drm_local_map_t *sis_reg_init(struct drm_device *dev)
 {
-	drm_map_list_t *entry;
+	struct drm_map_list *entry;
 	drm_local_map_t *map;
 
-	list_for_each_entry(entry, &dev->maplist->head, head) {
+	list_for_each_entry(entry, &dev->maplist, head) {
 		map = entry->map;
 		if (!map)
 			continue;
@@ -245,7 +245,7 @@
 	return NULL;
 }
 
-int sis_idle(drm_device_t *dev)
+int sis_idle(struct drm_device *dev)
 {
 	drm_sis_private_t *dev_priv = dev->dev_private;
 	uint32_t idle_reg;
@@ -314,10 +314,10 @@
 	mutex_unlock(&dev->struct_mutex);
 }
 
-void sis_reclaim_buffers_locked(drm_device_t * dev, struct file *filp)
+void sis_reclaim_buffers_locked(struct drm_device * dev, struct file *filp)
 {
 	drm_sis_private_t *dev_priv = dev->dev_private;
-	drm_file_t *priv = filp->private_data;
+	struct drm_file *priv = filp->private_data;
 
 	mutex_lock(&dev->struct_mutex);
 	if (drm_sman_owner_clean(&dev_priv->sman, (unsigned long)priv)) {
diff --git a/drivers/char/drm/via_dma.c b/drivers/char/drm/via_dma.c
index 13a9c5c..7ff2b62 100644
--- a/drivers/char/drm/via_dma.c
+++ b/drivers/char/drm/via_dma.c
@@ -151,7 +151,7 @@
 	return (uint32_t *) (dev_priv->dma_ptr + dev_priv->dma_low);
 }
 
-int via_dma_cleanup(drm_device_t * dev)
+int via_dma_cleanup(struct drm_device * dev)
 {
 	if (dev->dev_private) {
 		drm_via_private_t *dev_priv =
@@ -169,7 +169,7 @@
 	return 0;
 }
 
-static int via_initialize(drm_device_t * dev,
+static int via_initialize(struct drm_device * dev,
 			  drm_via_private_t * dev_priv,
 			  drm_via_dma_init_t * init)
 {
@@ -262,7 +262,7 @@
 	return retcode;
 }
 
-static int via_dispatch_cmdbuffer(drm_device_t * dev, drm_via_cmdbuffer_t * cmd)
+static int via_dispatch_cmdbuffer(struct drm_device * dev, drm_via_cmdbuffer_t * cmd)
 {
 	drm_via_private_t *dev_priv;
 	uint32_t *vb;
@@ -316,7 +316,7 @@
 	return 0;
 }
 
-int via_driver_dma_quiescent(drm_device_t * dev)
+int via_driver_dma_quiescent(struct drm_device * dev)
 {
 	drm_via_private_t *dev_priv = dev->dev_private;
 
@@ -356,7 +356,7 @@
 	return 0;
 }
 
-static int via_dispatch_pci_cmdbuffer(drm_device_t * dev,
+static int via_dispatch_pci_cmdbuffer(struct drm_device * dev,
 				      drm_via_cmdbuffer_t * cmd)
 {
 	drm_via_private_t *dev_priv = dev->dev_private;
diff --git a/drivers/char/drm/via_dmablit.c b/drivers/char/drm/via_dmablit.c
index 2881a06..832de1d 100644
--- a/drivers/char/drm/via_dmablit.c
+++ b/drivers/char/drm/via_dmablit.c
@@ -207,7 +207,7 @@
  */
 
 static void
-via_fire_dmablit(drm_device_t *dev, drm_via_sg_info_t *vsg, int engine)
+via_fire_dmablit(struct drm_device *dev, drm_via_sg_info_t *vsg, int engine)
 {
 	drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
 
@@ -273,10 +273,9 @@
 	vsg->num_desc_pages = (vsg->num_desc + vsg->descriptors_per_page - 1) / 
 		vsg->descriptors_per_page;
 
-	if (NULL ==  (vsg->desc_pages = kmalloc(sizeof(void *) * vsg->num_desc_pages, GFP_KERNEL))) 
+	if (NULL ==  (vsg->desc_pages = kcalloc(vsg->num_desc_pages, sizeof(void *), GFP_KERNEL)))
 		return DRM_ERR(ENOMEM);
 	
-	memset(vsg->desc_pages, 0, sizeof(void *) * vsg->num_desc_pages);
 	vsg->state = dr_via_desc_pages_alloc;
 	for (i=0; i<vsg->num_desc_pages; ++i) {
 		if (NULL == (vsg->desc_pages[i] = 
@@ -289,7 +288,7 @@
 }
 			
 static void
-via_abort_dmablit(drm_device_t *dev, int engine)
+via_abort_dmablit(struct drm_device *dev, int engine)
 {
 	drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
 
@@ -297,7 +296,7 @@
 }
 
 static void
-via_dmablit_engine_off(drm_device_t *dev, int engine)
+via_dmablit_engine_off(struct drm_device *dev, int engine)
 {
 	drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
 
@@ -314,7 +313,7 @@
  */
 		
 void
-via_dmablit_handler(drm_device_t *dev, int engine, int from_irq)
+via_dmablit_handler(struct drm_device *dev, int engine, int from_irq)
 {
 	drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
 	drm_via_blitq_t *blitq = dev_priv->blit_queues + engine;
@@ -433,7 +432,7 @@
  */
 
 static int
-via_dmablit_sync(drm_device_t *dev, uint32_t handle, int engine) 
+via_dmablit_sync(struct drm_device *dev, uint32_t handle, int engine) 
 {
 
 	drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
@@ -466,7 +465,7 @@
 via_dmablit_timer(unsigned long data)
 {
 	drm_via_blitq_t *blitq = (drm_via_blitq_t *) data;
-	drm_device_t *dev = blitq->dev;
+	struct drm_device *dev = blitq->dev;
 	int engine = (int)
 		(blitq - ((drm_via_private_t *)dev->dev_private)->blit_queues);
 		
@@ -502,7 +501,7 @@
 via_dmablit_workqueue(struct work_struct *work)
 {
 	drm_via_blitq_t *blitq = container_of(work, drm_via_blitq_t, wq);
-	drm_device_t *dev = blitq->dev;
+	struct drm_device *dev = blitq->dev;
 	unsigned long irqsave;
 	drm_via_sg_info_t *cur_sg;
 	int cur_released;
@@ -545,7 +544,7 @@
 
 
 void
-via_init_dmablit(drm_device_t *dev)
+via_init_dmablit(struct drm_device *dev)
 {
 	int i,j;
 	drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
@@ -582,7 +581,7 @@
 		
 
 static int
-via_build_sg_info(drm_device_t *dev, drm_via_sg_info_t *vsg, drm_via_dmablit_t *xfer)
+via_build_sg_info(struct drm_device *dev, drm_via_sg_info_t *vsg, drm_via_dmablit_t *xfer)
 {
 	int draw = xfer->to_fb;
 	int ret = 0;
@@ -730,7 +729,7 @@
 
 
 static int 
-via_dmablit(drm_device_t *dev, drm_via_dmablit_t *xfer)	 
+via_dmablit(struct drm_device *dev, drm_via_dmablit_t *xfer)	 
 {
 	drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
 	drm_via_sg_info_t *vsg;
diff --git a/drivers/char/drm/via_dmablit.h b/drivers/char/drm/via_dmablit.h
index f4036cd..6f6a513 100644
--- a/drivers/char/drm/via_dmablit.h
+++ b/drivers/char/drm/via_dmablit.h
@@ -59,7 +59,7 @@
 } drm_via_sg_info_t;
 
 typedef struct _drm_via_blitq {
-	drm_device_t *dev;
+	struct drm_device *dev;
 	uint32_t cur_blit_handle;
 	uint32_t done_blit_handle;
 	unsigned serviced;
diff --git a/drivers/char/drm/via_drm.h b/drivers/char/drm/via_drm.h
index e4ee97d..8f53c76 100644
--- a/drivers/char/drm/via_drm.h
+++ b/drivers/char/drm/via_drm.h
@@ -40,7 +40,7 @@
 #define VIA_NR_XVMC_LOCKS               5
 #define VIA_MAX_CACHELINE_SIZE          64
 #define XVMCLOCKPTR(saPriv,lockNo)					\
-	((volatile drm_hw_lock_t *)(((((unsigned long) (saPriv)->XvMCLockArea) + \
+	((volatile struct drm_hw_lock *)(((((unsigned long) (saPriv)->XvMCLockArea) + \
 				      (VIA_MAX_CACHELINE_SIZE - 1)) &	\
 				     ~(VIA_MAX_CACHELINE_SIZE - 1)) +	\
 				    VIA_MAX_CACHELINE_SIZE*(lockNo)))
@@ -182,7 +182,7 @@
 typedef struct _drm_via_sarea {
 	unsigned int dirty;
 	unsigned int nbox;
-	drm_clip_rect_t boxes[VIA_NR_SAREA_CLIPRECTS];
+	struct drm_clip_rect boxes[VIA_NR_SAREA_CLIPRECTS];
 	drm_via_tex_region_t texList[VIA_NR_TEX_REGIONS + 1];
 	int texAge;		/* last time texture was uploaded */
 	int ctxOwner;		/* last context to upload state */
diff --git a/drivers/char/drm/via_drv.h b/drivers/char/drm/via_drv.h
index b46ca8e6..5767115 100644
--- a/drivers/char/drm/via_drv.h
+++ b/drivers/char/drm/via_drv.h
@@ -87,7 +87,7 @@
 	uint32_t irq_pending_mask;
 	int *irq_map;
 	unsigned int idle_fault;
-	drm_sman_t sman;
+	struct drm_sman sman;
 	int vram_initialized;
 	int agp_initialized;
 	unsigned long vram_offset;
@@ -123,31 +123,31 @@
 extern int via_dma_blit_sync( DRM_IOCTL_ARGS );
 extern int via_dma_blit( DRM_IOCTL_ARGS );
 
-extern int via_driver_load(drm_device_t *dev, unsigned long chipset);
-extern int via_driver_unload(drm_device_t *dev);
+extern int via_driver_load(struct drm_device *dev, unsigned long chipset);
+extern int via_driver_unload(struct drm_device *dev);
 
-extern int via_init_context(drm_device_t * dev, int context);
-extern int via_final_context(drm_device_t * dev, int context);
+extern int via_init_context(struct drm_device * dev, int context);
+extern int via_final_context(struct drm_device * dev, int context);
 
-extern int via_do_cleanup_map(drm_device_t * dev);
-extern int via_driver_vblank_wait(drm_device_t * dev, unsigned int *sequence);
+extern int via_do_cleanup_map(struct drm_device * dev);
+extern int via_driver_vblank_wait(struct drm_device * dev, unsigned int *sequence);
 
 extern irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS);
-extern void via_driver_irq_preinstall(drm_device_t * dev);
-extern void via_driver_irq_postinstall(drm_device_t * dev);
-extern void via_driver_irq_uninstall(drm_device_t * dev);
+extern void via_driver_irq_preinstall(struct drm_device * dev);
+extern void via_driver_irq_postinstall(struct drm_device * dev);
+extern void via_driver_irq_uninstall(struct drm_device * dev);
 
-extern int via_dma_cleanup(drm_device_t * dev);
+extern int via_dma_cleanup(struct drm_device * dev);
 extern void via_init_command_verifier(void);
-extern int via_driver_dma_quiescent(drm_device_t * dev);
+extern int via_driver_dma_quiescent(struct drm_device * dev);
 extern void via_init_futex(drm_via_private_t * dev_priv);
 extern void via_cleanup_futex(drm_via_private_t * dev_priv);
 extern void via_release_futex(drm_via_private_t * dev_priv, int context);
 
-extern void via_reclaim_buffers_locked(drm_device_t *dev, struct file *filp);
-extern void via_lastclose(drm_device_t *dev);
+extern void via_reclaim_buffers_locked(struct drm_device *dev, struct file *filp);
+extern void via_lastclose(struct drm_device *dev);
 
-extern void via_dmablit_handler(drm_device_t *dev, int engine, int from_irq);
-extern void via_init_dmablit(drm_device_t *dev);
+extern void via_dmablit_handler(struct drm_device *dev, int engine, int from_irq);
+extern void via_init_dmablit(struct drm_device *dev);
 
 #endif
diff --git a/drivers/char/drm/via_irq.c b/drivers/char/drm/via_irq.c
index 1ac5941..8dc99b5 100644
--- a/drivers/char/drm/via_irq.c
+++ b/drivers/char/drm/via_irq.c
@@ -98,7 +98,7 @@
 
 irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
 {
-	drm_device_t *dev = (drm_device_t *) arg;
+	struct drm_device *dev = (struct drm_device *) arg;
 	drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
 	u32 status;
 	int handled = 0;
@@ -163,7 +163,7 @@
 	}
 }
 
-int via_driver_vblank_wait(drm_device_t * dev, unsigned int *sequence)
+int via_driver_vblank_wait(struct drm_device * dev, unsigned int *sequence)
 {
 	drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
 	unsigned int cur_vblank;
@@ -191,7 +191,7 @@
 }
 
 static int
-via_driver_irq_wait(drm_device_t * dev, unsigned int irq, int force_sequence,
+via_driver_irq_wait(struct drm_device * dev, unsigned int irq, int force_sequence,
 		    unsigned int *sequence)
 {
 	drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
@@ -244,7 +244,7 @@
  * drm_dma.h hooks
  */
 
-void via_driver_irq_preinstall(drm_device_t * dev)
+void via_driver_irq_preinstall(struct drm_device * dev)
 {
 	drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
 	u32 status;
@@ -293,7 +293,7 @@
 	}
 }
 
-void via_driver_irq_postinstall(drm_device_t * dev)
+void via_driver_irq_postinstall(struct drm_device * dev)
 {
 	drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
 	u32 status;
@@ -312,7 +312,7 @@
 	}
 }
 
-void via_driver_irq_uninstall(drm_device_t * dev)
+void via_driver_irq_uninstall(struct drm_device * dev)
 {
 	drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
 	u32 status;
diff --git a/drivers/char/drm/via_map.c b/drivers/char/drm/via_map.c
index 4e3fc07..7fb9d2a 100644
--- a/drivers/char/drm/via_map.c
+++ b/drivers/char/drm/via_map.c
@@ -25,13 +25,13 @@
 #include "via_drm.h"
 #include "via_drv.h"
 
-static int via_do_init_map(drm_device_t * dev, drm_via_init_t * init)
+static int via_do_init_map(struct drm_device * dev, drm_via_init_t * init)
 {
 	drm_via_private_t *dev_priv = dev->dev_private;
 
 	DRM_DEBUG("%s\n", __FUNCTION__);
 
-	DRM_GETSAREA();
+	dev_priv->sarea = drm_getsarea(dev);
 	if (!dev_priv->sarea) {
 		DRM_ERROR("could not find sarea!\n");
 		dev->dev_private = (void *)dev_priv;
@@ -68,7 +68,7 @@
 	return 0;
 }
 
-int via_do_cleanup_map(drm_device_t * dev)
+int via_do_cleanup_map(struct drm_device * dev)
 {
 	via_dma_cleanup(dev);
 
@@ -95,7 +95,7 @@
 	return -EINVAL;
 }
 
-int via_driver_load(drm_device_t *dev, unsigned long chipset)
+int via_driver_load(struct drm_device *dev, unsigned long chipset)
 {
 	drm_via_private_t *dev_priv;
 	int ret = 0;
@@ -115,7 +115,7 @@
 	return ret;
 }
 
-int via_driver_unload(drm_device_t *dev)
+int via_driver_unload(struct drm_device *dev)
 {
 	drm_via_private_t *dev_priv = dev->dev_private;
 
diff --git a/drivers/char/drm/via_mm.c b/drivers/char/drm/via_mm.c
index 2fcf057..85d56ac 100644
--- a/drivers/char/drm/via_mm.c
+++ b/drivers/char/drm/via_mm.c
@@ -127,7 +127,7 @@
 
 	drm_via_mem_t mem;
 	int retval = 0;
-	drm_memblock_item_t *item;
+	struct drm_memblock_item *item;
 	drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
 	unsigned long tmpSize;
 
@@ -188,10 +188,10 @@
 }
 
 
-void via_reclaim_buffers_locked(drm_device_t * dev, struct file *filp)
+void via_reclaim_buffers_locked(struct drm_device * dev, struct file *filp)
 {
 	drm_via_private_t *dev_priv = dev->dev_private;
-	drm_file_t *priv = filp->private_data;
+	struct drm_file *priv = filp->private_data;
 
 	mutex_lock(&dev->struct_mutex);
 	if (drm_sman_owner_clean(&dev_priv->sman, (unsigned long)priv)) {
diff --git a/drivers/char/drm/via_verifier.c b/drivers/char/drm/via_verifier.c
index 2e7e080..832d483 100644
--- a/drivers/char/drm/via_verifier.c
+++ b/drivers/char/drm/via_verifier.c
@@ -252,10 +252,9 @@
 static __inline__ drm_local_map_t *via_drm_lookup_agp_map(drm_via_state_t *seq,
 						    unsigned long offset,
 						    unsigned long size,
-						    drm_device_t * dev)
+						    struct drm_device * dev)
 {
-	struct list_head *list;
-	drm_map_list_t *r_list;
+	struct drm_map_list *r_list;
 	drm_local_map_t *map = seq->map_cache;
 
 	if (map && map->offset <= offset
@@ -263,8 +262,7 @@
 		return map;
 	}
 
-	list_for_each(list, &dev->maplist->head) {
-		r_list = (drm_map_list_t *) list;
+	list_for_each_entry(r_list, &dev->maplist, head) {
 		map = r_list->map;
 		if (!map)
 			continue;
@@ -964,7 +962,7 @@
 
 int
 via_verify_command_stream(const uint32_t * buf, unsigned int size,
-			  drm_device_t * dev, int agp)
+			  struct drm_device * dev, int agp)
 {
 
 	drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
@@ -1039,7 +1037,7 @@
 }
 
 int
-via_parse_command_stream(drm_device_t * dev, const uint32_t * buf,
+via_parse_command_stream(struct drm_device * dev, const uint32_t * buf,
 			 unsigned int size)
 {
 
diff --git a/drivers/char/drm/via_verifier.h b/drivers/char/drm/via_verifier.h
index b77f59d..28b5029 100644
--- a/drivers/char/drm/via_verifier.h
+++ b/drivers/char/drm/via_verifier.h
@@ -47,7 +47,7 @@
 	drm_via_sequence_t unfinished;
 	int agp_texture;
 	int multitex;
-	drm_device_t *dev;
+	struct drm_device *dev;
 	drm_local_map_t *map_cache;
 	uint32_t vertex_count;
 	int agp;
@@ -55,8 +55,8 @@
 } drm_via_state_t;
 
 extern int via_verify_command_stream(const uint32_t * buf, unsigned int size,
-				     drm_device_t * dev, int agp);
-extern int via_parse_command_stream(drm_device_t *dev, const uint32_t *buf,
+				     struct drm_device * dev, int agp);
+extern int via_parse_command_stream(struct drm_device *dev, const uint32_t *buf,
 				    unsigned int size);
 
 #endif
diff --git a/drivers/char/esp.c b/drivers/char/esp.c
index 74cd511..2e7ae42 100644
--- a/drivers/char/esp.c
+++ b/drivers/char/esp.c
@@ -2459,7 +2459,7 @@
 		return 1;
 	}
 
-	info = kmalloc(sizeof(struct esp_struct), GFP_KERNEL);
+	info = kzalloc(sizeof(struct esp_struct), GFP_KERNEL);
 
 	if (!info)
 	{
@@ -2469,7 +2469,6 @@
 		return 1;
 	}
 
-	memset((void *)info, 0, sizeof(struct esp_struct));
 	spin_lock_init(&info->lock);
 	/* rx_trigger, tx_trigger are needed by autoconfig */
 	info->config.rx_trigger = rx_trigger;
@@ -2527,7 +2526,7 @@
 		if (!dma)
 			info->stat_flags |= ESP_STAT_NEVER_DMA;
 
-		info = kmalloc(sizeof(struct esp_struct), GFP_KERNEL);
+		info = kzalloc(sizeof(struct esp_struct), GFP_KERNEL);
 		if (!info)
 		{
 			printk(KERN_ERR "Couldn't allocate memory for esp serial device information\n"); 
@@ -2536,7 +2535,6 @@
 			return 0;
 		}
 
-		memset((void *)info, 0, sizeof(struct esp_struct));
 		/* rx_trigger, tx_trigger are needed by autoconfig */
 		info->config.rx_trigger = rx_trigger;
 		info->config.tx_trigger = tx_trigger;
diff --git a/drivers/char/hvc_console.c b/drivers/char/hvc_console.c
index 322bc5f..83c1151 100644
--- a/drivers/char/hvc_console.c
+++ b/drivers/char/hvc_console.c
@@ -674,11 +674,12 @@
  * calling hvc_poll() who determines whether a console adapter support
  * interrupts.
  */
-int khvcd(void *unused)
+static int khvcd(void *unused)
 {
 	int poll_mask;
 	struct hvc_struct *hp;
 
+	set_freezable();
 	__set_current_state(TASK_RUNNING);
 	do {
 		poll_mask = 0;
diff --git a/drivers/char/hvc_lguest.c b/drivers/char/hvc_lguest.c
new file mode 100644
index 0000000..e7b889e
--- /dev/null
+++ b/drivers/char/hvc_lguest.c
@@ -0,0 +1,102 @@
+/* Simple console for lguest.
+ *
+ * Copyright (C) 2006 Rusty Russell, IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/lguest_bus.h>
+#include "hvc_console.h"
+
+static char inbuf[256];
+static struct lguest_dma cons_input = { .used_len = 0,
+					.addr[0] = __pa(inbuf),
+					.len[0] = sizeof(inbuf),
+					.len[1] = 0 };
+
+static int put_chars(u32 vtermno, const char *buf, int count)
+{
+	struct lguest_dma dma;
+
+	/* FIXME: what if it's over a page boundary? */
+	dma.len[0] = count;
+	dma.len[1] = 0;
+	dma.addr[0] = __pa(buf);
+
+	lguest_send_dma(LGUEST_CONSOLE_DMA_KEY, &dma);
+	return count;
+}
+
+static int get_chars(u32 vtermno, char *buf, int count)
+{
+	static int cons_offset;
+
+	if (!cons_input.used_len)
+		return 0;
+
+	if (cons_input.used_len - cons_offset < count)
+		count = cons_input.used_len - cons_offset;
+
+	memcpy(buf, inbuf + cons_offset, count);
+	cons_offset += count;
+	if (cons_offset == cons_input.used_len) {
+		cons_offset = 0;
+		cons_input.used_len = 0;
+	}
+	return count;
+}
+
+static struct hv_ops lguest_cons = {
+	.get_chars = get_chars,
+	.put_chars = put_chars,
+};
+
+static int __init cons_init(void)
+{
+	if (strcmp(paravirt_ops.name, "lguest") != 0)
+		return 0;
+
+	return hvc_instantiate(0, 0, &lguest_cons);
+}
+console_initcall(cons_init);
+
+static int lguestcons_probe(struct lguest_device *lgdev)
+{
+	int err;
+
+	lgdev->private = hvc_alloc(0, lgdev_irq(lgdev), &lguest_cons, 256);
+	if (IS_ERR(lgdev->private))
+		return PTR_ERR(lgdev->private);
+
+	err = lguest_bind_dma(LGUEST_CONSOLE_DMA_KEY, &cons_input, 1,
+			      lgdev_irq(lgdev));
+	if (err)
+		printk("lguest console: failed to bind buffer.\n");
+	return err;
+}
+
+static struct lguest_driver lguestcons_drv = {
+	.name = "lguestcons",
+	.owner = THIS_MODULE,
+	.device_type = LGUEST_DEVICE_T_CONSOLE,
+	.probe = lguestcons_probe,
+};
+
+static int __init hvc_lguest_init(void)
+{
+	return register_lguest_driver(&lguestcons_drv);
+}
+module_init(hvc_lguest_init);
diff --git a/drivers/char/hvc_xen.c b/drivers/char/hvc_xen.c
new file mode 100644
index 0000000..dd68f85
--- /dev/null
+++ b/drivers/char/hvc_xen.c
@@ -0,0 +1,159 @@
+/*
+ * xen console driver interface to hvc_console.c
+ *
+ * (c) 2007 Gerd Hoffmann <kraxel@suse.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
+ */
+
+#include <linux/console.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/types.h>
+
+#include <asm/xen/hypervisor.h>
+#include <xen/page.h>
+#include <xen/events.h>
+#include <xen/interface/io/console.h>
+#include <xen/hvc-console.h>
+
+#include "hvc_console.h"
+
+#define HVC_COOKIE   0x58656e /* "Xen" in hex */
+
+static struct hvc_struct *hvc;
+static int xencons_irq;
+
+/* ------------------------------------------------------------------ */
+
+static inline struct xencons_interface *xencons_interface(void)
+{
+	return mfn_to_virt(xen_start_info->console.domU.mfn);
+}
+
+static inline void notify_daemon(void)
+{
+	/* Use evtchn: this is called early, before irq is set up. */
+	notify_remote_via_evtchn(xen_start_info->console.domU.evtchn);
+}
+
+static int write_console(uint32_t vtermno, const char *data, int len)
+{
+	struct xencons_interface *intf = xencons_interface();
+	XENCONS_RING_IDX cons, prod;
+	int sent = 0;
+
+	cons = intf->out_cons;
+	prod = intf->out_prod;
+	mb();			/* update queue values before going on */
+	BUG_ON((prod - cons) > sizeof(intf->out));
+
+	while ((sent < len) && ((prod - cons) < sizeof(intf->out)))
+		intf->out[MASK_XENCONS_IDX(prod++, intf->out)] = data[sent++];
+
+	wmb();			/* write ring before updating pointer */
+	intf->out_prod = prod;
+
+	notify_daemon();
+	return sent;
+}
+
+static int read_console(uint32_t vtermno, char *buf, int len)
+{
+	struct xencons_interface *intf = xencons_interface();
+	XENCONS_RING_IDX cons, prod;
+	int recv = 0;
+
+	cons = intf->in_cons;
+	prod = intf->in_prod;
+	mb();			/* get pointers before reading ring */
+	BUG_ON((prod - cons) > sizeof(intf->in));
+
+	while (cons != prod && recv < len)
+		buf[recv++] = intf->in[MASK_XENCONS_IDX(cons++, intf->in)];
+
+	mb();			/* read ring before consuming */
+	intf->in_cons = cons;
+
+	notify_daemon();
+	return recv;
+}
+
+static struct hv_ops hvc_ops = {
+	.get_chars = read_console,
+	.put_chars = write_console,
+};
+
+static int __init xen_init(void)
+{
+	struct hvc_struct *hp;
+
+	if (!is_running_on_xen())
+		return 0;
+
+	xencons_irq = bind_evtchn_to_irq(xen_start_info->console.domU.evtchn);
+	if (xencons_irq < 0)
+		xencons_irq = 0 /* NO_IRQ */;
+	hp = hvc_alloc(HVC_COOKIE, xencons_irq, &hvc_ops, 256);
+	if (IS_ERR(hp))
+		return PTR_ERR(hp);
+
+	hvc = hp;
+	return 0;
+}
+
+static void __exit xen_fini(void)
+{
+	if (hvc)
+		hvc_remove(hvc);
+}
+
+static int xen_cons_init(void)
+{
+	if (!is_running_on_xen())
+		return 0;
+
+	hvc_instantiate(HVC_COOKIE, 0, &hvc_ops);
+	return 0;
+}
+
+module_init(xen_init);
+module_exit(xen_fini);
+console_initcall(xen_cons_init);
+
+static void xenboot_write_console(struct console *console, const char *string,
+				  unsigned len)
+{
+	unsigned int linelen, off = 0;
+	const char *pos;
+
+	while (off < len && NULL != (pos = strchr(string+off, '\n'))) {
+		linelen = pos-string+off;
+		if (off + linelen > len)
+			break;
+		write_console(0, string+off, linelen);
+		write_console(0, "\r\n", 2);
+		off += linelen + 1;
+	}
+	if (off < len)
+		write_console(0, string+off, len-off);
+}
+
+struct console xenboot_console = {
+	.name		= "xenboot",
+	.write		= xenboot_write_console,
+	.flags		= CON_PRINTBUFFER | CON_BOOT,
+};
diff --git a/drivers/char/hvcs.c b/drivers/char/hvcs.c
index 207f734..17f96e0 100644
--- a/drivers/char/hvcs.c
+++ b/drivers/char/hvcs.c
@@ -784,12 +784,10 @@
 		return -EFAULT;
 	}
 
-	hvcsd = kmalloc(sizeof(*hvcsd), GFP_KERNEL);
+	hvcsd = kzalloc(sizeof(*hvcsd), GFP_KERNEL);
 	if (!hvcsd)
 		return -ENODEV;
 
-	/* hvcsd->tty is zeroed out with the memset */
-	memset(hvcsd, 0x00, sizeof(*hvcsd));
 
 	spin_lock_init(&hvcsd->lock);
 	/* Automatically incs the refcount the first time */
diff --git a/drivers/char/ip2/ip2main.c b/drivers/char/ip2/ip2main.c
index 83c7258..6005b52 100644
--- a/drivers/char/ip2/ip2main.c
+++ b/drivers/char/ip2/ip2main.c
@@ -425,9 +425,7 @@
 		printk(KERN_ERR "IP2: failed to unregister tty driver (%d)\n", err);
 	}
 	put_tty_driver(ip2_tty_driver);
-	if ( ( err = unregister_chrdev ( IP2_IPL_MAJOR, pcIpl ) ) ) {
-		printk(KERN_ERR "IP2: failed to unregister IPL driver (%d)\n", err);
-	}
+	unregister_chrdev(IP2_IPL_MAJOR, pcIpl);
 	remove_proc_entry("ip2mem", &proc_root);
 
 	// free memory
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
index b5df7e6..6a01dd9 100644
--- a/drivers/char/ipmi/ipmi_msghandler.c
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -2639,10 +2639,9 @@
 			return -ENODEV;
 	}
 
-	intf = kmalloc(sizeof(*intf), GFP_KERNEL);
+	intf = kzalloc(sizeof(*intf), GFP_KERNEL);
 	if (!intf)
 		return -ENOMEM;
-	memset(intf, 0, sizeof(*intf));
 
 	intf->ipmi_version_major = ipmi_version_major(device_id);
 	intf->ipmi_version_minor = ipmi_version_minor(device_id);
diff --git a/drivers/char/isicom.c b/drivers/char/isicom.c
index 761f777..77a7a4a 100644
--- a/drivers/char/isicom.c
+++ b/drivers/char/isicom.c
@@ -171,9 +171,6 @@
 static int prev_card = 3;	/*	start servicing isi_card[0]	*/
 static struct tty_driver *isicom_normal;
 
-static DECLARE_COMPLETION(isi_timerdone);
-static char re_schedule = 1;
-
 static void isicom_tx(unsigned long _data);
 static void isicom_start(struct tty_struct *tty);
 
@@ -187,7 +184,7 @@
 
 struct	isi_board {
 	unsigned long		base;
-	unsigned char		irq;
+	int			irq;
 	unsigned char		port_count;
 	unsigned short		status;
 	unsigned short		port_status; /* each bit for each port */
@@ -227,7 +224,7 @@
  *	it wants to talk.
  */
 
-static inline int WaitTillCardIsFree(u16 base)
+static inline int WaitTillCardIsFree(unsigned long base)
 {
 	unsigned int count = 0;
 	unsigned int a = in_atomic(); /* do we run under spinlock? */
@@ -243,17 +240,18 @@
 
 static int lock_card(struct isi_board *card)
 {
-	char		retries;
 	unsigned long base = card->base;
+	unsigned int retries, a;
 
-	for (retries = 0; retries < 100; retries++) {
+	for (retries = 0; retries < 10; retries++) {
 		spin_lock_irqsave(&card->card_lock, card->flags);
-		if (inw(base + 0xe) & 0x1) {
-			return 1;
-		} else {
-			spin_unlock_irqrestore(&card->card_lock, card->flags);
-			udelay(1000);   /* 1ms */
+		for (a = 0; a < 10; a++) {
+			if (inw(base + 0xe) & 0x1)
+				return 1;
+			udelay(10);
 		}
+		spin_unlock_irqrestore(&card->card_lock, card->flags);
+		msleep(10);
 	}
 	printk(KERN_WARNING "ISICOM: Failed to lock Card (0x%lx)\n",
 		card->base);
@@ -261,23 +259,6 @@
 	return 0;	/* Failed to acquire the card! */
 }
 
-static int lock_card_at_interrupt(struct isi_board *card)
-{
-	unsigned char		retries;
-	unsigned long base = card->base;
-
-	for (retries = 0; retries < 200; retries++) {
-		spin_lock_irqsave(&card->card_lock, card->flags);
-
-		if (inw(base + 0xe) & 0x1)
-			return 1;
-		else
-			spin_unlock_irqrestore(&card->card_lock, card->flags);
-	}
-	/* Failing in interrupt is an acceptable event */
-	return 0;	/* Failed to acquire the card! */
-}
-
 static void unlock_card(struct isi_board *card)
 {
 	spin_unlock_irqrestore(&card->card_lock, card->flags);
@@ -415,7 +396,9 @@
 
 static void isicom_tx(unsigned long _data)
 {
-	short count = (BOARD_COUNT-1), card, base;
+	unsigned long flags, base;
+	unsigned int retries;
+	short count = (BOARD_COUNT-1), card;
 	short txcount, wrd, residue, word_count, cnt;
 	struct isi_port *port;
 	struct tty_struct *tty;
@@ -435,32 +418,34 @@
 	count = isi_card[card].port_count;
 	port = isi_card[card].ports;
 	base = isi_card[card].base;
+
+	spin_lock_irqsave(&isi_card[card].card_lock, flags);
+	for (retries = 0; retries < 100; retries++) {
+		if (inw(base + 0xe) & 0x1)
+			break;
+		udelay(2);
+	}
+	if (retries >= 100)
+		goto unlock;
+
 	for (;count > 0;count--, port++) {
-		if (!lock_card_at_interrupt(&isi_card[card]))
-			continue;
 		/* port not active or tx disabled to force flow control */
 		if (!(port->flags & ASYNC_INITIALIZED) ||
 				!(port->status & ISI_TXOK))
-			unlock_card(&isi_card[card]);
 			continue;
 
 		tty = port->tty;
 
-
-		if (tty == NULL) {
-			unlock_card(&isi_card[card]);
+		if (tty == NULL)
 			continue;
-		}
 
 		txcount = min_t(short, TX_SIZE, port->xmit_cnt);
-		if (txcount <= 0 || tty->stopped || tty->hw_stopped) {
-			unlock_card(&isi_card[card]);
+		if (txcount <= 0 || tty->stopped || tty->hw_stopped)
 			continue;
-		}
-		if (!(inw(base + 0x02) & (1 << port->channel))) {
-			unlock_card(&isi_card[card]);
+
+		if (!(inw(base + 0x02) & (1 << port->channel)))
 			continue;
-		}
+
 		pr_dbg("txing %d bytes, port%d.\n", txcount,
 			port->channel + 1);
 		outw((port->channel << isi_card[card].shift_count) | txcount,
@@ -508,16 +493,12 @@
 			port->status &= ~ISI_TXOK;
 		if (port->xmit_cnt <= WAKEUP_CHARS)
 			tty_wakeup(tty);
-		unlock_card(&isi_card[card]);
 	}
 
+unlock:
+	spin_unlock_irqrestore(&isi_card[card].card_lock, flags);
 	/*	schedule another tx for hopefully in about 10ms	*/
 sched_again:
-	if (!re_schedule) {
-		complete(&isi_timerdone);
- 		return;
-	}
-
 	mod_timer(&tx, jiffies + msecs_to_jiffies(10));
 }
 
@@ -1749,17 +1730,13 @@
 static int __devinit isicom_probe(struct pci_dev *pdev,
 	const struct pci_device_id *ent)
 {
-	unsigned int ioaddr, signature, index;
+	unsigned int signature, index;
 	int retval = -EPERM;
-	u8 pciirq;
 	struct isi_board *board = NULL;
 
 	if (card_count >= BOARD_COUNT)
 		goto err;
 
-	ioaddr = pci_resource_start(pdev, 3);
-	/* i.e at offset 0x1c in the PCI configuration register space. */
-	pciirq = pdev->irq;
 	dev_info(&pdev->dev, "ISI PCI Card(Device ID 0x%x)\n", ent->device);
 
 	/* allot the first empty slot in the array */
@@ -1770,8 +1747,8 @@
 		}
 
 	board->index = index;
-	board->base = ioaddr;
-	board->irq = pciirq;
+	board->base = pci_resource_start(pdev, 3);
+	board->irq = pdev->irq;
 	card_count++;
 
 	pci_set_drvdata(pdev, board);
@@ -1901,9 +1878,7 @@
 
 static void __exit isicom_exit(void)
 {
-	re_schedule = 0;
-
-	wait_for_completion_timeout(&isi_timerdone, HZ);
+	del_timer_sync(&tx);
 
 	pci_unregister_driver(&isicom_driver);
 	tty_unregister_driver(isicom_normal);
diff --git a/drivers/char/istallion.c b/drivers/char/istallion.c
index 8094099..3c66f40 100644
--- a/drivers/char/istallion.c
+++ b/drivers/char/istallion.c
@@ -2163,14 +2163,10 @@
 	cdkhdr_t __iomem *hdrp;
 	cdkctrl_t __iomem *cp;
 	unsigned char __iomem *bits;
-	unsigned long flags;
-
-	spin_lock_irqsave(&brd_lock, flags);
 
 	if (test_bit(ST_CMDING, &portp->state)) {
 		printk(KERN_ERR "STALLION: command already busy, cmd=%x!\n",
 				(int) cmd);
-		spin_unlock_irqrestore(&brd_lock, flags);
 		return;
 	}
 
@@ -2191,7 +2187,6 @@
 	writeb(readb(bits) | portp->portbit, bits);
 	set_bit(ST_CMDING, &portp->state);
 	EBRDDISABLE(brdp);
-	spin_unlock_irqrestore(&brd_lock, flags);
 }
 
 static void stli_sendcmd(struct stlibrd *brdp, struct stliport *portp, unsigned long cmd, void *arg, int size, int copyback)
@@ -3215,13 +3210,13 @@
 		goto err;
 	}
 
+	brdp->iosize = ECP_IOSIZE;
+
 	if (!request_region(brdp->iobase, brdp->iosize, "istallion")) {
 		retval = -EIO;
 		goto err;
 	}
 
-	brdp->iosize = ECP_IOSIZE;
-
 /*
  *	Based on the specific board type setup the common vars to access
  *	and enable shared memory. Set all board specific information now
diff --git a/drivers/char/mbcs.c b/drivers/char/mbcs.c
index 57f9115..7ee5d944 100644
--- a/drivers/char/mbcs.c
+++ b/drivers/char/mbcs.c
@@ -39,14 +39,14 @@
 #else
 #define DBG(fmt...)
 #endif
-int mbcs_major;
+static int mbcs_major;
 
-LIST_HEAD(soft_list);
+static LIST_HEAD(soft_list);
 
 /*
  * file operations
  */
-const struct file_operations mbcs_ops = {
+static const struct file_operations mbcs_ops = {
 	.open = mbcs_open,
 	.llseek = mbcs_sram_llseek,
 	.read = mbcs_sram_read,
@@ -377,7 +377,7 @@
 	return rv;
 }
 
-int mbcs_open(struct inode *ip, struct file *fp)
+static int mbcs_open(struct inode *ip, struct file *fp)
 {
 	struct mbcs_soft *soft;
 	int minor;
@@ -394,7 +394,7 @@
 	return -ENODEV;
 }
 
-ssize_t mbcs_sram_read(struct file * fp, char __user *buf, size_t len, loff_t * off)
+static ssize_t mbcs_sram_read(struct file * fp, char __user *buf, size_t len, loff_t * off)
 {
 	struct cx_dev *cx_dev = fp->private_data;
 	struct mbcs_soft *soft = cx_dev->soft;
@@ -418,7 +418,7 @@
 	return rv;
 }
 
-ssize_t
+static ssize_t
 mbcs_sram_write(struct file * fp, const char __user *buf, size_t len, loff_t * off)
 {
 	struct cx_dev *cx_dev = fp->private_data;
@@ -443,7 +443,7 @@
 	return rv;
 }
 
-loff_t mbcs_sram_llseek(struct file * filp, loff_t off, int whence)
+static loff_t mbcs_sram_llseek(struct file * filp, loff_t off, int whence)
 {
 	loff_t newpos;
 
@@ -491,7 +491,7 @@
 	soft->gscr_addr = mbcs_pioaddr(soft, MBCS_GSCR_START);
 }
 
-int mbcs_gscr_mmap(struct file *fp, struct vm_area_struct *vma)
+static int mbcs_gscr_mmap(struct file *fp, struct vm_area_struct *vma)
 {
 	struct cx_dev *cx_dev = fp->private_data;
 	struct mbcs_soft *soft = cx_dev->soft;
@@ -793,7 +793,7 @@
 	return 0;
 }
 
-const struct cx_device_id __devinitdata mbcs_id_table[] = {
+static const struct cx_device_id __devinitdata mbcs_id_table[] = {
 	{
 	 .part_num = MBCS_PART_NUM,
 	 .mfg_num = MBCS_MFG_NUM,
@@ -807,7 +807,7 @@
 
 MODULE_DEVICE_TABLE(cx, mbcs_id_table);
 
-struct cx_drv mbcs_driver = {
+static struct cx_drv mbcs_driver = {
 	.name = DEVICE_NAME,
 	.id_table = mbcs_id_table,
 	.probe = mbcs_probe,
@@ -816,12 +816,7 @@
 
 static void __exit mbcs_exit(void)
 {
-	int rv;
-
-	rv = unregister_chrdev(mbcs_major, DEVICE_NAME);
-	if (rv < 0)
-		DBG(KERN_ALERT "Error in unregister_chrdev: %d\n", rv);
-
+	unregister_chrdev(mbcs_major, DEVICE_NAME);
 	cx_driver_unregister(&mbcs_driver);
 }
 
diff --git a/drivers/char/mbcs.h b/drivers/char/mbcs.h
index e7fd47e..c9905a3 100644
--- a/drivers/char/mbcs.h
+++ b/drivers/char/mbcs.h
@@ -542,12 +542,12 @@
 	struct semaphore algolock;
 };
 
-extern int mbcs_open(struct inode *ip, struct file *fp);
-extern ssize_t mbcs_sram_read(struct file *fp, char __user *buf, size_t len,
+static int mbcs_open(struct inode *ip, struct file *fp);
+static ssize_t mbcs_sram_read(struct file *fp, char __user *buf, size_t len,
 			      loff_t * off);
-extern ssize_t mbcs_sram_write(struct file *fp, const char __user *buf, size_t len,
+static ssize_t mbcs_sram_write(struct file *fp, const char __user *buf, size_t len,
 			       loff_t * off);
-extern loff_t mbcs_sram_llseek(struct file *filp, loff_t off, int whence);
-extern int mbcs_gscr_mmap(struct file *fp, struct vm_area_struct *vma);
+static loff_t mbcs_sram_llseek(struct file *filp, loff_t off, int whence);
+static int mbcs_gscr_mmap(struct file *fp, struct vm_area_struct *vma);
 
 #endif				// __MBCS_H__
diff --git a/drivers/char/moxa.c b/drivers/char/moxa.c
index e0d35c2..ed76f0a 100644
--- a/drivers/char/moxa.c
+++ b/drivers/char/moxa.c
@@ -1405,7 +1405,6 @@
 static struct mon_str moxaLog;
 static int moxaFuncTout = HZ / 2;
 
-static void moxadelay(int);
 static void moxafunc(void __iomem *, int, ushort);
 static void wait_finish(void __iomem *);
 static void low_water_check(void __iomem *);
@@ -2404,10 +2403,10 @@
 	ofsAddr = moxa_ports[port].tableAddr;
 	if (ms100) {
 		moxafunc(ofsAddr, FC_SendBreak, Magic_code);
-		moxadelay(ms100 * (HZ / 10));
+		msleep(ms100 * 10);
 	} else {
 		moxafunc(ofsAddr, FC_SendBreak, Magic_code);
-		moxadelay(HZ / 4);	/* 250 ms */
+		msleep(250);
 	}
 	moxafunc(ofsAddr, FC_StopBreak, Magic_code);
 }
@@ -2476,18 +2475,6 @@
 /*****************************************************************************
  *	Static local functions: 					     *
  *****************************************************************************/
-/*
- * moxadelay - delays a specified number ticks
- */
-static void moxadelay(int tick)
-{
-	unsigned long st, et;
-
-	st = jiffies;
-	et = st + tick;
-	while (time_before(jiffies, et));
-}
-
 static void moxafunc(void __iomem *ofsAddr, int cmd, ushort arg)
 {
 
@@ -2535,7 +2522,7 @@
 		return -EFAULT;
 	baseAddr = moxa_boards[cardno].basemem;
 	writeb(HW_reset, baseAddr + Control_reg);	/* reset */
-	moxadelay(1);		/* delay 10 ms */
+	msleep(10);
 	for (i = 0; i < 4096; i++)
 		writeb(0, baseAddr + i);	/* clear fix page */
 	for (i = 0; i < len; i++)
@@ -2713,7 +2700,7 @@
 			for (i = 0; i < 100; i++) {
 				if (readw(baseAddr + C218_key) == keycode)
 					break;
-				moxadelay(1);	/* delay 10 ms */
+				msleep(10);
 			}
 			if (readw(baseAddr + C218_key) != keycode) {
 				return (-1);
@@ -2725,7 +2712,7 @@
 		for (i = 0; i < 100; i++) {
 			if (readw(baseAddr + C218_key) == keycode)
 				break;
-			moxadelay(1);	/* delay 10 ms */
+			msleep(10);
 		}
 		retry++;
 	} while ((readb(baseAddr + C218chksum_ok) != 1) && (retry < 3));
@@ -2736,7 +2723,7 @@
 	for (i = 0; i < 100; i++) {
 		if (readw(baseAddr + Magic_no) == Magic_code)
 			break;
-		moxadelay(1);	/* delay 10 ms */
+		msleep(10);
 	}
 	if (readw(baseAddr + Magic_no) != Magic_code) {
 		return (-1);
@@ -2746,7 +2733,7 @@
 	for (i = 0; i < 100; i++) {
 		if (readw(baseAddr + Magic_no) == Magic_code)
 			break;
-		moxadelay(1);	/* delay 10 ms */
+		msleep(10);
 	}
 	if (readw(baseAddr + Magic_no) != Magic_code) {
 		return (-1);
@@ -2788,7 +2775,7 @@
 			for (i = 0; i < 10; i++) {
 				if (readw(baseAddr + C320_key) == C320_KeyCode)
 					break;
-				moxadelay(1);
+				msleep(10);
 			}
 			if (readw(baseAddr + C320_key) != C320_KeyCode)
 				return (-1);
@@ -2799,7 +2786,7 @@
 		for (i = 0; i < 10; i++) {
 			if (readw(baseAddr + C320_key) == C320_KeyCode)
 				break;
-			moxadelay(1);
+			msleep(10);
 		}
 		retry++;
 	} while ((readb(baseAddr + C320chksum_ok) != 1) && (retry < 3));
@@ -2809,7 +2796,7 @@
 	for (i = 0; i < 600; i++) {
 		if (readw(baseAddr + Magic_no) == Magic_code)
 			break;
-		moxadelay(1);
+		msleep(10);
 	}
 	if (readw(baseAddr + Magic_no) != Magic_code)
 		return (-100);
@@ -2828,7 +2815,7 @@
 	for (i = 0; i < 500; i++) {
 		if (readw(baseAddr + Magic_no) == Magic_code)
 			break;
-		moxadelay(1);
+		msleep(10);
 	}
 	if (readw(baseAddr + Magic_no) != Magic_code)
 		return (-102);
@@ -2842,7 +2829,7 @@
 	for (i = 0; i < 600; i++) {
 		if (readw(baseAddr + Magic_no) == Magic_code)
 			break;
-		moxadelay(1);
+		msleep(10);
 	}
 	if (readw(baseAddr + Magic_no) != Magic_code)
 		return (-102);
diff --git a/drivers/char/pcmcia/synclink_cs.c b/drivers/char/pcmcia/synclink_cs.c
index 13808f6..2b88931 100644
--- a/drivers/char/pcmcia/synclink_cs.c
+++ b/drivers/char/pcmcia/synclink_cs.c
@@ -540,13 +540,12 @@
     if (debug_level >= DEBUG_LEVEL_INFO)
 	    printk("mgslpc_attach\n");
 
-    info = kmalloc(sizeof(MGSLPC_INFO), GFP_KERNEL);
+    info = kzalloc(sizeof(MGSLPC_INFO), GFP_KERNEL);
     if (!info) {
 	    printk("Error can't allocate device instance data\n");
 	    return -ENOMEM;
     }
 
-    memset(info, 0, sizeof(MGSLPC_INFO));
     info->magic = MGSLPC_MAGIC;
     INIT_WORK(&info->task, bh_handler);
     info->max_frame_size = 4096;
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 7f52712..397c714 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -693,9 +693,14 @@
 
 	if (r->pull && r->entropy_count < nbytes * 8 &&
 	    r->entropy_count < r->poolinfo->POOLBITS) {
-		int bytes = max_t(int, random_read_wakeup_thresh / 8,
-				min_t(int, nbytes, sizeof(tmp)));
+		/* If we're limited, always leave two wakeup worth's BITS */
 		int rsvd = r->limit ? 0 : random_read_wakeup_thresh/4;
+		int bytes = nbytes;
+
+		/* pull at least as many as BYTES as wakeup BITS */
+		bytes = max_t(int, bytes, random_read_wakeup_thresh / 8);
+		/* but never more than the buffer size */
+		bytes = min_t(int, bytes, sizeof(tmp));
 
 		DEBUG_ENT("going to reseed %s with %d bits "
 			  "(%d of %d requested)\n",
diff --git a/drivers/char/rio/rio_linux.c b/drivers/char/rio/rio_linux.c
index 294e9cb..0ce9667 100644
--- a/drivers/char/rio/rio_linux.c
+++ b/drivers/char/rio/rio_linux.c
@@ -803,9 +803,7 @@
 {
 	void *p;
 
-	p = kmalloc(size, GFP_KERNEL);
-	if (p)
-		memset(p, 0, size);
+	p = kzalloc(size, GFP_KERNEL);
 	return p;
 }
 
diff --git a/drivers/char/rio/riocmd.c b/drivers/char/rio/riocmd.c
index 8cc60b6..7321d00 100644
--- a/drivers/char/rio/riocmd.c
+++ b/drivers/char/rio/riocmd.c
@@ -556,9 +556,7 @@
 {
 	struct CmdBlk *CmdBlkP;
 
-	CmdBlkP = kmalloc(sizeof(struct CmdBlk), GFP_ATOMIC);
-	if (CmdBlkP)
-		memset(CmdBlkP, 0, sizeof(struct CmdBlk));
+	CmdBlkP = kzalloc(sizeof(struct CmdBlk), GFP_ATOMIC);
 	return CmdBlkP;
 }
 
diff --git a/drivers/char/rio/riotable.c b/drivers/char/rio/riotable.c
index 7e98835..991119c 100644
--- a/drivers/char/rio/riotable.c
+++ b/drivers/char/rio/riotable.c
@@ -863,8 +863,7 @@
 		if (PortP->TxRingBuffer)
 			memset(PortP->TxRingBuffer, 0, p->RIOBufferSize);
 		else if (p->RIOBufferSize) {
-			PortP->TxRingBuffer = kmalloc(p->RIOBufferSize, GFP_KERNEL);
-			memset(PortP->TxRingBuffer, 0, p->RIOBufferSize);
+			PortP->TxRingBuffer = kzalloc(p->RIOBufferSize, GFP_KERNEL);
 		}
 		PortP->TxBufferOut = 0;
 		PortP->TxBufferIn = 0;
diff --git a/drivers/char/riscom8.c b/drivers/char/riscom8.c
index 3494e3f..b37e626 100644
--- a/drivers/char/riscom8.c
+++ b/drivers/char/riscom8.c
@@ -213,14 +213,6 @@
 		release_region(RC_TO_ISA(rc_ioport[i]) + bp->base, 1);
 }
 	
-/* Must be called with enabled interrupts */
-static inline void rc_long_delay(unsigned long delay)
-{
-	unsigned long i;
-	
-	for (i = jiffies + delay; time_after(i,jiffies); ) ;
-}
-
 /* Reset and setup CD180 chip */
 static void __init rc_init_CD180(struct riscom_board const * bp)
 {
@@ -231,7 +223,7 @@
 	rc_wait_CCR(bp);			   /* Wait for CCR ready        */
 	rc_out(bp, CD180_CCR, CCR_HARDRESET);      /* Reset CD180 chip          */
 	sti();
-	rc_long_delay(HZ/20);                      /* Delay 0.05 sec            */
+	msleep(50);				   /* Delay 0.05 sec            */
 	cli();
 	rc_out(bp, CD180_GIVR, RC_ID);             /* Set ID for this chip      */
 	rc_out(bp, CD180_GICR, 0);                 /* Clear all bits            */
@@ -280,7 +272,7 @@
 		rc_wait_CCR(bp);
 		rc_out(bp, CD180_CCR, CCR_TXEN);        /* Enable transmitter     */
 		rc_out(bp, CD180_IER, IER_TXRDY);       /* Enable tx empty intr   */
-		rc_long_delay(HZ/20);	       		
+		msleep(50);
 		irqs = probe_irq_off(irqs);
 		val1 = rc_in(bp, RC_BSR);		/* Get Board Status reg   */
 		val2 = rc_in(bp, RC_ACK_TINT);          /* ACK interrupt          */
diff --git a/drivers/char/rocket.c b/drivers/char/rocket.c
index 0270080..56cbba7 100644
--- a/drivers/char/rocket.c
+++ b/drivers/char/rocket.c
@@ -635,12 +635,11 @@
 	ctlp = sCtlNumToCtlPtr(board);
 
 	/*  Get a r_port struct for the port, fill it in and save it globally, indexed by line number */
-	info = kmalloc(sizeof (struct r_port), GFP_KERNEL);
+	info = kzalloc(sizeof (struct r_port), GFP_KERNEL);
 	if (!info) {
 		printk(KERN_INFO "Couldn't allocate info struct for line #%d\n", line);
 		return;
 	}
-	memset(info, 0, sizeof (struct r_port));
 
 	info->magic = RPORT_MAGIC;
 	info->line = line;
diff --git a/drivers/char/specialix.c b/drivers/char/specialix.c
index baf7234..4558556 100644
--- a/drivers/char/specialix.c
+++ b/drivers/char/specialix.c
@@ -345,18 +345,6 @@
 }
 
 
-/* Must be called with enabled interrupts */
-/* Ugly. Very ugly. Don't use this for anything else than initialization
-   code */
-static inline void sx_long_delay(unsigned long delay)
-{
-	unsigned long i;
-
-	for (i = jiffies + delay; time_after(i, jiffies); ) ;
-}
-
-
-
 /* Set the IRQ using the RTS lines that run to the PAL on the board.... */
 static int sx_set_irq ( struct specialix_board *bp)
 {
@@ -397,7 +385,7 @@
 	spin_lock_irqsave(&bp->lock, flags);
 	sx_out_off(bp, CD186x_CCR, CCR_HARDRESET);      /* Reset CD186x chip          */
 	spin_unlock_irqrestore(&bp->lock, flags);
-	sx_long_delay(HZ/20);                      /* Delay 0.05 sec            */
+	msleep(50);					/* Delay 0.05 sec            */
 	spin_lock_irqsave(&bp->lock, flags);
 	sx_out_off(bp, CD186x_GIVR, SX_ID);             /* Set ID for this chip      */
 	sx_out_off(bp, CD186x_GICR, 0);                 /* Clear all bits            */
@@ -533,7 +521,7 @@
 		sx_wait_CCR(bp);
 		sx_out(bp, CD186x_CCR, CCR_TXEN);        /* Enable transmitter     */
 		sx_out(bp, CD186x_IER, IER_TXRDY);       /* Enable tx empty intr   */
-		sx_long_delay(HZ/20);
+		msleep(50);
 		irqs = probe_irq_off(irqs);
 
 		dprintk (SX_DEBUG_INIT, "SRSR = %02x, ", sx_in(bp, CD186x_SRSR));
diff --git a/drivers/char/stallion.c b/drivers/char/stallion.c
index 8c73ccb..4a80b2f 100644
--- a/drivers/char/stallion.c
+++ b/drivers/char/stallion.c
@@ -1788,7 +1788,6 @@
 	if (tty == NULL)
 		return;
 
-	lock_kernel();
 	if (test_bit(ASYI_TXLOW, &portp->istate))
 		tty_wakeup(tty);
 
@@ -1802,7 +1801,6 @@
 			if (portp->flags & ASYNC_CHECK_CD)
 				tty_hangup(tty);	/* FIXME: module removal race here - AKPM */
 	}
-	unlock_kernel();
 }
 
 /*****************************************************************************/
@@ -2357,9 +2355,6 @@
 	if ((pdev->class >> 8) == PCI_CLASS_STORAGE_IDE)
 		goto err;
 
-	dev_info(&pdev->dev, "please, report this to LKML: %x/%x/%x\n",
-			pdev->vendor, pdev->device, pdev->class);
-
 	retval = pci_enable_device(pdev);
 	if (retval)
 		goto err;
@@ -4800,7 +4795,6 @@
 {
 	struct stlbrd *brdp;
 	unsigned int i, j;
-	int retval;
 
 	pr_debug("cleanup_module()\n");
 
@@ -4823,9 +4817,7 @@
 
 	for (i = 0; i < 4; i++)
 		class_device_destroy(stallion_class, MKDEV(STL_SIOMEMMAJOR, i));
-	if ((retval = unregister_chrdev(STL_SIOMEMMAJOR, "staliomem")))
-		printk("STALLION: failed to un-register serial memory device, "
-			"errno=%d\n", -retval);
+	unregister_chrdev(STL_SIOMEMMAJOR, "staliomem");
 	class_destroy(stallion_class);
 
 	pci_unregister_driver(&stl_pcidriver);
diff --git a/drivers/char/synclink.c b/drivers/char/synclink.c
index f53e51d..fdc256b 100644
--- a/drivers/char/synclink.c
+++ b/drivers/char/synclink.c
@@ -4324,13 +4324,12 @@
 {
 	struct mgsl_struct *info;
 	
-	info = kmalloc(sizeof(struct mgsl_struct),
+	info = kzalloc(sizeof(struct mgsl_struct),
 		 GFP_KERNEL);
 		 
 	if (!info) {
 		printk("Error can't allocate device instance data\n");
 	} else {
-		memset(info, 0, sizeof(struct mgsl_struct));
 		info->magic = MGSL_MAGIC;
 		INIT_WORK(&info->task, mgsl_bh_handler);
 		info->max_frame_size = 4096;
diff --git a/drivers/char/synclink_gt.c b/drivers/char/synclink_gt.c
index 428b514..372a37e 100644
--- a/drivers/char/synclink_gt.c
+++ b/drivers/char/synclink_gt.c
@@ -3414,13 +3414,12 @@
 {
 	struct slgt_info *info;
 
-	info = kmalloc(sizeof(struct slgt_info), GFP_KERNEL);
+	info = kzalloc(sizeof(struct slgt_info), GFP_KERNEL);
 
 	if (!info) {
 		DBGERR(("%s device alloc failed adapter=%d port=%d\n",
 			driver_name, adapter_num, port_num));
 	} else {
-		memset(info, 0, sizeof(struct slgt_info));
 		info->magic = MGSL_MAGIC;
 		INIT_WORK(&info->task, bh_handler);
 		info->max_frame_size = 4096;
diff --git a/drivers/char/synclinkmp.c b/drivers/char/synclinkmp.c
index a65407b..c63013b 100644
--- a/drivers/char/synclinkmp.c
+++ b/drivers/char/synclinkmp.c
@@ -3786,14 +3786,13 @@
 {
 	SLMP_INFO *info;
 
-	info = kmalloc(sizeof(SLMP_INFO),
+	info = kzalloc(sizeof(SLMP_INFO),
 		 GFP_KERNEL);
 
 	if (!info) {
 		printk("%s(%d) Error can't allocate device instance data for adapter %d, port %d\n",
 			__FILE__,__LINE__, adapter_num, port_num);
 	} else {
-		memset(info, 0, sizeof(SLMP_INFO));
 		info->magic = MGSL_MAGIC;
 		INIT_WORK(&info->task, bh_handler);
 		info->max_frame_size = 4096;
diff --git a/drivers/char/viotape.c b/drivers/char/viotape.c
index 13faf8d..e12275d 100644
--- a/drivers/char/viotape.c
+++ b/drivers/char/viotape.c
@@ -873,12 +873,12 @@
 }
 
 const struct file_operations viotap_fops = {
-	owner: THIS_MODULE,
-	read: viotap_read,
-	write: viotap_write,
-	ioctl: viotap_ioctl,
-	open: viotap_open,
-	release: viotap_release,
+	.owner =	THIS_MODULE,
+	.read =		viotap_read,
+	.write =	viotap_write,
+	.ioctl =	viotap_ioctl,
+	.open =		viotap_open,
+	.release =	viotap_release,
 };
 
 /* Handle interrupt events for tape */
@@ -1098,15 +1098,10 @@
 /* Cleanup */
 static void __exit viotap_exit(void)
 {
-	int ret;
-
 	remove_proc_entry("iSeries/viotape", NULL);
 	vio_unregister_driver(&viotape_driver);
 	class_destroy(tape_class);
-	ret = unregister_chrdev(VIOTAPE_MAJOR, "viotape");
-	if (ret < 0)
-		printk(VIOTAPE_KERN_WARN "Error unregistering device: %d\n",
-				ret);
+	unregister_chrdev(VIOTAPE_MAJOR, "viotape");
 	if (viotape_unitinfo)
 		dma_free_coherent(iSeries_vio_dev,
 				sizeof(viotape_unitinfo[0]) * VIOTAPE_MAX_TAPE,
diff --git a/drivers/char/vt.c b/drivers/char/vt.c
index 6650ae1..edb7002 100644
--- a/drivers/char/vt.c
+++ b/drivers/char/vt.c
@@ -729,10 +729,9 @@
 	    /* although the numbers above are not valid since long ago, the
 	       point is still up-to-date and the comment still has its value
 	       even if only as a historical artifact.  --mj, July 1998 */
-	    vc = kmalloc(sizeof(struct vc_data), GFP_KERNEL);
+	    vc = kzalloc(sizeof(struct vc_data), GFP_KERNEL);
 	    if (!vc)
 		return -ENOMEM;
-	    memset(vc, 0, sizeof(*vc));
 	    vc_cons[currcons].d = vc;
 	    INIT_WORK(&vc_cons[currcons].SAK_work, vc_SAK);
 	    visual_init(vc, currcons, 1);
@@ -1991,8 +1990,7 @@
 		{ 0xFE10, 0xFE19 }, { 0xFE30, 0xFE6F }, { 0xFF00, 0xFF60 },
 		{ 0xFFE0, 0xFFE6 }, { 0x20000, 0x2FFFD }, { 0x30000, 0x3FFFD }
 	};
-	return bisearch(ucs, double_width,
-		sizeof(double_width) / sizeof(*double_width) - 1);
+	return bisearch(ucs, double_width, ARRAY_SIZE(double_width) - 1);
 }
 
 /* acquires console_sem */
@@ -2989,8 +2987,24 @@
 	return retval;
 }
 
-static int unbind_con_driver(const struct consw *csw, int first, int last,
-			     int deflt)
+/**
+ * unbind_con_driver - unbind a console driver
+ * @csw: pointer to console driver to unregister
+ * @first: first in range of consoles that @csw should be unbound from
+ * @last: last in range of consoles that @csw should be unbound from
+ * @deflt: should next bound console driver be default after @csw is unbound?
+ *
+ * To unbind a driver from all possible consoles, pass 0 as @first and
+ * %MAX_NR_CONSOLES as @last.
+ *
+ * @deflt controls whether the console that ends up replacing @csw should be
+ * the default console.
+ *
+ * RETURNS:
+ * -ENODEV if @csw isn't a registered console driver or can't be unregistered
+ * or 0 on success.
+ */
+int unbind_con_driver(const struct consw *csw, int first, int last, int deflt)
 {
 	struct module *owner = csw->owner;
 	const struct consw *defcsw = NULL;
@@ -3075,6 +3089,7 @@
 	return retval;
 
 }
+EXPORT_SYMBOL(unbind_con_driver);
 
 static int vt_bind(struct con_driver *con)
 {
@@ -3491,9 +3506,6 @@
 		}
 		return;
 	}
-	if (blank_state != blank_normal_wait)
-		return;
-	blank_state = blank_off;
 
 	/* entering graphics mode? */
 	if (entering_gfx) {
@@ -3501,10 +3513,15 @@
 		save_screen(vc);
 		vc->vc_sw->con_blank(vc, -1, 1);
 		console_blanked = fg_console + 1;
+		blank_state = blank_off;
 		set_origin(vc);
 		return;
 	}
 
+	if (blank_state != blank_normal_wait)
+		return;
+	blank_state = blank_off;
+
 	/* don't blank graphics */
 	if (vc->vc_mode != KD_TEXT) {
 		console_blanked = fg_console + 1;
diff --git a/drivers/char/watchdog/Kconfig b/drivers/char/watchdog/Kconfig
index 53f5538..2f48ba3 100644
--- a/drivers/char/watchdog/Kconfig
+++ b/drivers/char/watchdog/Kconfig
@@ -187,6 +187,15 @@
 
 	  Say N if you are unsure.
 
+# AVR32 Architecture
+
+config AT32AP700X_WDT
+	tristate "AT32AP700x watchdog"
+	depends on WATCHDOG && CPU_AT32AP7000
+	help
+	  Watchdog timer embedded into AT32AP700x devices. This will reboot
+	  your system when the timeout is reached.
+
 # X86 (i386 + ia64 + x86_64) Architecture
 
 config ACQUIRE_WDT
diff --git a/drivers/char/watchdog/Makefile b/drivers/char/watchdog/Makefile
index d90f649..3907ec0 100644
--- a/drivers/char/watchdog/Makefile
+++ b/drivers/char/watchdog/Makefile
@@ -36,6 +36,9 @@
 obj-$(CONFIG_EP93XX_WATCHDOG) += ep93xx_wdt.o
 obj-$(CONFIG_PNX4008_WATCHDOG) += pnx4008_wdt.o
 
+# AVR32 Architecture
+obj-$(CONFIG_AT32AP700X_WDT) += at32ap700x_wdt.o
+
 # X86 (i386 + ia64 + x86_64) Architecture
 obj-$(CONFIG_ACQUIRE_WDT) += acquirewdt.o
 obj-$(CONFIG_ADVANTECH_WDT) += advantechwdt.o
diff --git a/drivers/char/watchdog/at32ap700x_wdt.c b/drivers/char/watchdog/at32ap700x_wdt.c
new file mode 100644
index 0000000..54a5161
--- /dev/null
+++ b/drivers/char/watchdog/at32ap700x_wdt.c
@@ -0,0 +1,386 @@
+/*
+ * Watchdog driver for Atmel AT32AP700X devices
+ *
+ * Copyright (C) 2005-2006 Atmel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/miscdevice.h>
+#include <linux/fs.h>
+#include <linux/platform_device.h>
+#include <linux/watchdog.h>
+#include <linux/uaccess.h>
+#include <linux/io.h>
+#include <linux/spinlock.h>
+
+#define TIMEOUT_MIN		1
+#define TIMEOUT_MAX		2
+#define TIMEOUT_DEFAULT		TIMEOUT_MAX
+
+/* module parameters */
+static int timeout =  TIMEOUT_DEFAULT;
+module_param(timeout, int, 0);
+MODULE_PARM_DESC(timeout,
+		"Timeout value. Limited to be 1 or 2 seconds. (default="
+		__MODULE_STRING(TIMEOUT_DEFAULT) ")");
+
+static int nowayout = WATCHDOG_NOWAYOUT;
+module_param(nowayout, int, 0);
+MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default="
+		__MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
+
+/* Watchdog registers and write/read macro */
+#define WDT_CTRL		0x00
+#define WDT_CTRL_EN		   0
+#define WDT_CTRL_PSEL		   8
+#define WDT_CTRL_KEY		  24
+
+#define WDT_CLR			0x04
+
+#define WDT_BIT(name)		(1 << WDT_##name)
+#define WDT_BF(name, value)	((value) << WDT_##name)
+
+#define wdt_readl(dev, reg)				\
+	__raw_readl((dev)->regs + WDT_##reg)
+#define wdt_writel(dev, reg, value)			\
+	__raw_writel((value), (dev)->regs + WDT_##reg)
+
+struct wdt_at32ap700x {
+	void __iomem		*regs;
+	spinlock_t		io_lock;
+	int			timeout;
+	unsigned long		users;
+	struct miscdevice	miscdev;
+};
+
+static struct wdt_at32ap700x *wdt;
+static char expect_release;
+
+/*
+ * Disable the watchdog.
+ */
+static inline void at32_wdt_stop(void)
+{
+	unsigned long psel;
+
+	spin_lock(&wdt->io_lock);
+	psel = wdt_readl(wdt, CTRL) & WDT_BF(CTRL_PSEL, 0x0f);
+	wdt_writel(wdt, CTRL, psel | WDT_BF(CTRL_KEY, 0x55));
+	wdt_writel(wdt, CTRL, psel | WDT_BF(CTRL_KEY, 0xaa));
+	spin_unlock(&wdt->io_lock);
+}
+
+/*
+ * Enable and reset the watchdog.
+ */
+static inline void at32_wdt_start(void)
+{
+	/* 0xf is 2^16 divider = 2 sec, 0xe is 2^15 divider = 1 sec */
+	unsigned long psel = (wdt->timeout > 1) ? 0xf : 0xe;
+
+	spin_lock(&wdt->io_lock);
+	wdt_writel(wdt, CTRL, WDT_BIT(CTRL_EN)
+			| WDT_BF(CTRL_PSEL, psel)
+			| WDT_BF(CTRL_KEY, 0x55));
+	wdt_writel(wdt, CTRL, WDT_BIT(CTRL_EN)
+			| WDT_BF(CTRL_PSEL, psel)
+			| WDT_BF(CTRL_KEY, 0xaa));
+	spin_unlock(&wdt->io_lock);
+}
+
+/*
+ * Pat the watchdog timer.
+ */
+static inline void at32_wdt_pat(void)
+{
+	spin_lock(&wdt->io_lock);
+	wdt_writel(wdt, CLR, 0x42);
+	spin_unlock(&wdt->io_lock);
+}
+
+/*
+ * Watchdog device is opened, and watchdog starts running.
+ */
+static int at32_wdt_open(struct inode *inode, struct file *file)
+{
+	if (test_and_set_bit(1, &wdt->users))
+		return -EBUSY;
+
+	at32_wdt_start();
+	return nonseekable_open(inode, file);
+}
+
+/*
+ * Close the watchdog device.
+ */
+static int at32_wdt_close(struct inode *inode, struct file *file)
+{
+	if (expect_release == 42) {
+		at32_wdt_stop();
+	} else {
+		dev_dbg(wdt->miscdev.parent,
+			"Unexpected close, not stopping watchdog!\n");
+		at32_wdt_pat();
+	}
+	clear_bit(1, &wdt->users);
+	expect_release = 0;
+	return 0;
+}
+
+/*
+ * Change the watchdog time interval.
+ */
+static int at32_wdt_settimeout(int time)
+{
+	/*
+	 * All counting occurs at 1 / SLOW_CLOCK (32 kHz) and max prescaler is
+	 * 2 ^ 16 allowing up to 2 seconds timeout.
+	 */
+	if ((time < TIMEOUT_MIN) || (time > TIMEOUT_MAX))
+		return -EINVAL;
+
+	/*
+	 * Set new watchdog time. It will be used when at32_wdt_start() is
+	 * called.
+	 */
+	wdt->timeout = time;
+	return 0;
+}
+
+static struct watchdog_info at32_wdt_info = {
+	.identity	= "at32ap700x watchdog",
+	.options	= WDIOF_SETTIMEOUT |
+			  WDIOF_KEEPALIVEPING |
+			  WDIOF_MAGICCLOSE,
+};
+
+/*
+ * Handle commands from user-space.
+ */
+static int at32_wdt_ioctl(struct inode *inode, struct file *file,
+		unsigned int cmd, unsigned long arg)
+{
+	int ret = -ENOTTY;
+	int time;
+	void __user *argp = (void __user *)arg;
+	int __user *p = argp;
+
+	switch (cmd) {
+	case WDIOC_KEEPALIVE:
+		at32_wdt_pat();
+		ret = 0;
+		break;
+	case WDIOC_GETSUPPORT:
+		ret = copy_to_user(argp, &at32_wdt_info,
+				sizeof(at32_wdt_info)) ? -EFAULT : 0;
+		break;
+	case WDIOC_SETTIMEOUT:
+		ret = get_user(time, p);
+		if (ret)
+			break;
+		ret = at32_wdt_settimeout(time);
+		if (ret)
+			break;
+		/* Enable new time value */
+		at32_wdt_start();
+		/* fall through */
+	case WDIOC_GETTIMEOUT:
+		ret = put_user(wdt->timeout, p);
+		break;
+	case WDIOC_GETSTATUS: /* fall through */
+	case WDIOC_GETBOOTSTATUS:
+		ret = put_user(0, p);
+		break;
+	case WDIOC_SETOPTIONS:
+		ret = get_user(time, p);
+		if (ret)
+			break;
+		if (time & WDIOS_DISABLECARD)
+			at32_wdt_stop();
+		if (time & WDIOS_ENABLECARD)
+			at32_wdt_start();
+		ret = 0;
+		break;
+	}
+
+	return ret;
+}
+
+static ssize_t at32_wdt_write(struct file *file, const char __user *data,
+				size_t len, loff_t *ppos)
+{
+	/* See if we got the magic character 'V' and reload the timer */
+	if (len) {
+		if (!nowayout) {
+			size_t i;
+
+			/*
+			 * note: just in case someone wrote the magic
+			 * character five months ago...
+			 */
+			expect_release = 0;
+
+			/*
+			 * scan to see whether or not we got the magic
+			 * character
+			 */
+			for (i = 0; i != len; i++) {
+				char c;
+				if (get_user(c, data+i))
+					return -EFAULT;
+				if (c == 'V')
+					expect_release = 42;
+			}
+		}
+		/* someone wrote to us, we should pat the watchdog */
+		at32_wdt_pat();
+	}
+	return len;
+}
+
+static const struct file_operations at32_wdt_fops = {
+	.owner		= THIS_MODULE,
+	.llseek		= no_llseek,
+	.ioctl		= at32_wdt_ioctl,
+	.open		= at32_wdt_open,
+	.release	= at32_wdt_close,
+	.write		= at32_wdt_write,
+};
+
+static int __init at32_wdt_probe(struct platform_device *pdev)
+{
+	struct resource	*regs;
+	int ret;
+
+	if (wdt) {
+		dev_dbg(&pdev->dev, "only 1 wdt instance supported.\n");
+		return -EBUSY;
+	}
+
+	regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!regs) {
+		dev_dbg(&pdev->dev, "missing mmio resource\n");
+		return -ENXIO;
+	}
+
+	wdt = kzalloc(sizeof(struct wdt_at32ap700x), GFP_KERNEL);
+	if (!wdt) {
+		dev_dbg(&pdev->dev, "no memory for wdt structure\n");
+		return -ENOMEM;
+	}
+
+	wdt->regs = ioremap(regs->start, regs->end - regs->start + 1);
+	if (!wdt->regs) {
+		ret = -ENOMEM;
+		dev_dbg(&pdev->dev, "could not map I/O memory\n");
+		goto err_free;
+	}
+	spin_lock_init(&wdt->io_lock);
+	wdt->users = 0;
+	wdt->miscdev.minor = WATCHDOG_MINOR;
+	wdt->miscdev.name = "watchdog";
+	wdt->miscdev.fops = &at32_wdt_fops;
+
+	if (at32_wdt_settimeout(timeout)) {
+		at32_wdt_settimeout(TIMEOUT_DEFAULT);
+		dev_dbg(&pdev->dev,
+			"default timeout invalid, set to %d sec.\n",
+			TIMEOUT_DEFAULT);
+	}
+
+	ret = misc_register(&wdt->miscdev);
+	if (ret) {
+		dev_dbg(&pdev->dev, "failed to register wdt miscdev\n");
+		goto err_iounmap;
+	}
+
+	platform_set_drvdata(pdev, wdt);
+	wdt->miscdev.parent = &pdev->dev;
+	dev_info(&pdev->dev,
+		"AT32AP700X WDT at 0x%p, timeout %d sec (nowayout=%d)\n",
+		wdt->regs, wdt->timeout, nowayout);
+
+	return 0;
+
+err_iounmap:
+	iounmap(wdt->regs);
+err_free:
+	kfree(wdt);
+	wdt = NULL;
+	return ret;
+}
+
+static int __exit at32_wdt_remove(struct platform_device *pdev)
+{
+	if (wdt && platform_get_drvdata(pdev) == wdt) {
+		/* Stop the timer before we leave */
+		if (!nowayout)
+			at32_wdt_stop();
+
+		misc_deregister(&wdt->miscdev);
+		iounmap(wdt->regs);
+		kfree(wdt);
+		wdt = NULL;
+		platform_set_drvdata(pdev, NULL);
+	}
+
+	return 0;
+}
+
+static void at32_wdt_shutdown(struct platform_device *pdev)
+{
+	at32_wdt_stop();
+}
+
+#ifdef CONFIG_PM
+static int at32_wdt_suspend(struct platform_device *pdev, pm_message_t message)
+{
+	at32_wdt_stop();
+	return 0;
+}
+
+static int at32_wdt_resume(struct platform_device *pdev)
+{
+	if (wdt->users)
+		at32_wdt_start();
+	return 0;
+}
+#else
+#define at32_wdt_suspend NULL
+#define at32_wdt_resume NULL
+#endif
+
+static struct platform_driver at32_wdt_driver = {
+	.remove		= __exit_p(at32_wdt_remove),
+	.suspend	= at32_wdt_suspend,
+	.resume		= at32_wdt_resume,
+	.driver		= {
+		.name	= "at32_wdt",
+		.owner	= THIS_MODULE,
+	},
+	.shutdown	= at32_wdt_shutdown,
+};
+
+static int __init at32_wdt_init(void)
+{
+	return platform_driver_probe(&at32_wdt_driver, at32_wdt_probe);
+}
+module_init(at32_wdt_init);
+
+static void __exit at32_wdt_exit(void)
+{
+	platform_driver_unregister(&at32_wdt_driver);
+}
+module_exit(at32_wdt_exit);
+
+MODULE_AUTHOR("Hans-Christian Egtvedt <hcegtvedt@atmel.com>");
+MODULE_DESCRIPTION("Watchdog driver for Atmel AT32AP700X");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
diff --git a/drivers/char/watchdog/ep93xx_wdt.c b/drivers/char/watchdog/ep93xx_wdt.c
index 01cf123..0e4787a 100644
--- a/drivers/char/watchdog/ep93xx_wdt.c
+++ b/drivers/char/watchdog/ep93xx_wdt.c
@@ -107,10 +107,6 @@
 ep93xx_wdt_write(struct file *file, const char __user *data, size_t len,
 		 loff_t *ppos)
 {
-	/* Can't seek (pwrite) on this device */
-	if (*ppos != file->f_pos)
-		return -ESPIPE;
-
 	if (len) {
 		if (!nowayout) {
 			size_t i;
diff --git a/drivers/char/watchdog/mixcomwd.c b/drivers/char/watchdog/mixcomwd.c
index f35e284..db2ccb8 100644
--- a/drivers/char/watchdog/mixcomwd.c
+++ b/drivers/char/watchdog/mixcomwd.c
@@ -29,11 +29,18 @@
  *		- support for one more type board
  *
  * Version 0.5 (2001/12/14) Matt Domsch <Matt_Domsch@dell.com>
- *              - added nowayout module option to override CONFIG_WATCHDOG_NOWAYOUT
+ *		- added nowayout module option to override CONFIG_WATCHDOG_NOWAYOUT
+ *
+ * Version 0.6 (2002/04/12): Rob Radez <rob@osinvestor.com>
+ *		- make mixcomwd_opened unsigned,
+ *		  removed lock_kernel/unlock_kernel from mixcomwd_release,
+ *		  modified ioctl a bit to conform to API
  *
  */
 
-#define VERSION "0.5"
+#define VERSION "0.6"
+#define WATCHDOG_NAME "mixcomwd"
+#define PFX WATCHDOG_NAME ": "
 
 #include <linux/module.h>
 #include <linux/moduleparam.h>
@@ -49,12 +56,46 @@
 #include <asm/uaccess.h>
 #include <asm/io.h>
 
-static int mixcomwd_ioports[] = { 0x180, 0x280, 0x380, 0x000 };
-
-#define MIXCOM_WATCHDOG_OFFSET 0xc10
+/*
+ * We have two types of cards that can be probed:
+ * 1) The Mixcom cards: these cards can be found at addresses
+ *    0x180, 0x280, 0x380 with an additional offset of 0xc10.
+ *    (Or 0xd90, 0xe90, 0xf90).
+ * 2) The FlashCOM cards: these cards can be set up at
+ *    0x300 -> 0x378, in 0x8 jumps with an offset of 0x04.
+ *    (Or 0x304 -> 0x37c in 0x8 jumps).
+ *    Each card has it's own ID.
+ */
 #define MIXCOM_ID 0x11
-#define FLASHCOM_WATCHDOG_OFFSET 0x4
 #define FLASHCOM_ID 0x18
+static struct {
+	int ioport;
+	int id;
+} mixcomwd_io_info[] __devinitdata = {
+	/* The Mixcom cards */
+	{0x0d90, MIXCOM_ID},
+	{0x0e90, MIXCOM_ID},
+	{0x0f90, MIXCOM_ID},
+	/* The FlashCOM cards */
+	{0x0304, FLASHCOM_ID},
+	{0x030c, FLASHCOM_ID},
+	{0x0314, FLASHCOM_ID},
+	{0x031c, FLASHCOM_ID},
+	{0x0324, FLASHCOM_ID},
+	{0x032c, FLASHCOM_ID},
+	{0x0334, FLASHCOM_ID},
+	{0x033c, FLASHCOM_ID},
+	{0x0344, FLASHCOM_ID},
+	{0x034c, FLASHCOM_ID},
+	{0x0354, FLASHCOM_ID},
+	{0x035c, FLASHCOM_ID},
+	{0x0364, FLASHCOM_ID},
+	{0x036c, FLASHCOM_ID},
+	{0x0374, FLASHCOM_ID},
+	{0x037c, FLASHCOM_ID},
+	/* The end of the list */
+	{0x0000, 0},
+};
 
 static void mixcomwd_timerfun(unsigned long d);
 
@@ -113,13 +154,13 @@
 {
 	if (expect_close == 42) {
 		if(mixcomwd_timer_alive) {
-			printk(KERN_ERR "mixcomwd: release called while internal timer alive");
+			printk(KERN_ERR PFX "release called while internal timer alive");
 			return -EBUSY;
 		}
 		mixcomwd_timer_alive=1;
 		mod_timer(&mixcomwd_timer, jiffies + 5 * HZ);
 	} else {
-		printk(KERN_CRIT "mixcomwd: WDT device closed unexpectedly.  WDT will not stop!\n");
+		printk(KERN_CRIT PFX "WDT device closed unexpectedly.  WDT will not stop!\n");
 	}
 
 	clear_bit(0,&mixcomwd_opened);
@@ -188,8 +229,7 @@
 	return 0;
 }
 
-static const struct file_operations mixcomwd_fops=
-{
+static const struct file_operations mixcomwd_fops = {
 	.owner		= THIS_MODULE,
 	.llseek		= no_llseek,
 	.write		= mixcomwd_write,
@@ -198,46 +238,30 @@
 	.release	= mixcomwd_release,
 };
 
-static struct miscdevice mixcomwd_miscdev=
-{
+static struct miscdevice mixcomwd_miscdev = {
 	.minor	= WATCHDOG_MINOR,
 	.name	= "watchdog",
 	.fops	= &mixcomwd_fops,
 };
 
-static int __init mixcomwd_checkcard(int port)
+static int __init checkcard(int port, int card_id)
 {
 	int id;
 
-	port += MIXCOM_WATCHDOG_OFFSET;
-	if (!request_region(port, 1, "MixCOM watchdog")) {
-		return 0;
-	}
-
-	id=inb_p(port) & 0x3f;
-	if(id!=MIXCOM_ID) {
-		release_region(port, 1);
-		return 0;
-	}
-	return port;
-}
-
-static int __init flashcom_checkcard(int port)
-{
-	int id;
-
-	port += FLASHCOM_WATCHDOG_OFFSET;
 	if (!request_region(port, 1, "MixCOM watchdog")) {
 		return 0;
 	}
 
 	id=inb_p(port);
- 	if(id!=FLASHCOM_ID) {
+	if (card_id==MIXCOM_ID)
+		id &= 0x3f;
+
+	if (id!=card_id) {
 		release_region(port, 1);
 		return 0;
 	}
- 	return port;
- }
+	return 1;
+}
 
 static int __init mixcomwd_init(void)
 {
@@ -245,50 +269,50 @@
 	int ret;
 	int found=0;
 
-	for (i = 0; !found && mixcomwd_ioports[i] != 0; i++) {
-		watchdog_port = mixcomwd_checkcard(mixcomwd_ioports[i]);
-		if (watchdog_port) {
+	for (i = 0; !found && mixcomwd_io_info[i].ioport != 0; i++) {
+		if (checkcard(mixcomwd_io_info[i].ioport,
+			      mixcomwd_io_info[i].id)) {
 			found = 1;
-		}
-	}
-
-	/* The FlashCOM card can be set up at 0x300 -> 0x378, in 0x8 jumps */
-	for (i = 0x300; !found && i < 0x380; i+=0x8) {
-		watchdog_port = flashcom_checkcard(i);
-		if (watchdog_port) {
-			found = 1;
+			watchdog_port = mixcomwd_io_info[i].ioport;
 		}
 	}
 
 	if (!found) {
-		printk("mixcomwd: No card detected, or port not available.\n");
+		printk(KERN_ERR PFX "No card detected, or port not available.\n");
 		return -ENODEV;
 	}
 
 	ret = misc_register(&mixcomwd_miscdev);
 	if (ret)
 	{
-		release_region(watchdog_port, 1);
-		return ret;
+		printk(KERN_ERR PFX "cannot register miscdev on minor=%d (err=%d)\n",
+			WATCHDOG_MINOR, ret);
+		goto error_misc_register_watchdog;
 	}
 
-	printk(KERN_INFO "MixCOM watchdog driver v%s, watchdog port at 0x%3x\n",VERSION,watchdog_port);
+	printk(KERN_INFO "MixCOM watchdog driver v%s, watchdog port at 0x%3x\n",
+		VERSION, watchdog_port);
 
 	return 0;
+
+error_misc_register_watchdog:
+	release_region(watchdog_port, 1);
+	watchdog_port = 0x0000;
+	return ret;
 }
 
 static void __exit mixcomwd_exit(void)
 {
 	if (!nowayout) {
 		if(mixcomwd_timer_alive) {
-			printk(KERN_WARNING "mixcomwd: I quit now, hardware will"
+			printk(KERN_WARNING PFX "I quit now, hardware will"
 			       " probably reboot!\n");
 			del_timer_sync(&mixcomwd_timer);
 			mixcomwd_timer_alive=0;
 		}
 	}
-	release_region(watchdog_port,1);
 	misc_deregister(&mixcomwd_miscdev);
+	release_region(watchdog_port,1);
 }
 
 module_init(mixcomwd_init);
@@ -296,5 +320,6 @@
 
 MODULE_AUTHOR("Gergely Madarasz <gorgo@itc.hu>");
 MODULE_DESCRIPTION("MixCom Watchdog driver");
+MODULE_VERSION(VERSION);
 MODULE_LICENSE("GPL");
 MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
diff --git a/drivers/char/watchdog/mpcore_wdt.c b/drivers/char/watchdog/mpcore_wdt.c
index e88947f..0d2b277 100644
--- a/drivers/char/watchdog/mpcore_wdt.c
+++ b/drivers/char/watchdog/mpcore_wdt.c
@@ -328,12 +328,11 @@
 		goto err_out;
 	}
 
-	wdt = kmalloc(sizeof(struct mpcore_wdt), GFP_KERNEL);
+	wdt = kzalloc(sizeof(struct mpcore_wdt), GFP_KERNEL);
 	if (!wdt) {
 		ret = -ENOMEM;
 		goto err_out;
 	}
-	memset(wdt, 0, sizeof(struct mpcore_wdt));
 
 	wdt->dev = &dev->dev;
 	wdt->irq = platform_get_irq(dev, 0);
diff --git a/drivers/char/watchdog/pcwd_usb.c b/drivers/char/watchdog/pcwd_usb.c
index 1e7a671..0f3fd6c 100644
--- a/drivers/char/watchdog/pcwd_usb.c
+++ b/drivers/char/watchdog/pcwd_usb.c
@@ -626,12 +626,11 @@
 	maxp = usb_maxpacket(udev, pipe, usb_pipeout(pipe));
 
 	/* allocate memory for our device and initialize it */
-	usb_pcwd = kmalloc (sizeof(struct usb_pcwd_private), GFP_KERNEL);
+	usb_pcwd = kzalloc (sizeof(struct usb_pcwd_private), GFP_KERNEL);
 	if (usb_pcwd == NULL) {
 		printk(KERN_ERR PFX "Out of memory\n");
 		goto error;
 	}
-	memset (usb_pcwd, 0x00, sizeof (*usb_pcwd));
 
 	usb_pcwd_device = usb_pcwd;
 
diff --git a/drivers/char/watchdog/pnx4008_wdt.c b/drivers/char/watchdog/pnx4008_wdt.c
index 5991add..22f8873 100644
--- a/drivers/char/watchdog/pnx4008_wdt.c
+++ b/drivers/char/watchdog/pnx4008_wdt.c
@@ -148,10 +148,6 @@
 pnx4008_wdt_write(struct file *file, const char *data, size_t len,
 		  loff_t * ppos)
 {
-	/*  Can't seek (pwrite) on this device  */
-	if (ppos != &file->f_pos)
-		return -ESPIPE;
-
 	if (len) {
 		if (!nowayout) {
 			size_t i;
diff --git a/drivers/char/watchdog/s3c2410_wdt.c b/drivers/char/watchdog/s3c2410_wdt.c
index 20fa29c..50430bc 100644
--- a/drivers/char/watchdog/s3c2410_wdt.c
+++ b/drivers/char/watchdog/s3c2410_wdt.c
@@ -92,6 +92,7 @@
 
 static DECLARE_MUTEX(open_lock);
 
+static struct device    *wdt_dev;	/* platform device attached to */
 static struct resource	*wdt_mem;
 static struct resource	*wdt_irq;
 static struct clk	*wdt_clock;
@@ -180,7 +181,7 @@
 		}
 
 		if ((count / divisor) >= 0x10000) {
-			printk(KERN_ERR PFX "timeout %d too big\n", timeout);
+			dev_err(wdt_dev, "timeout %d too big\n", timeout);
 			return -EINVAL;
 		}
 	}
@@ -233,7 +234,7 @@
 	if (allow_close == CLOSE_STATE_ALLOW) {
 		s3c2410wdt_stop();
 	} else {
-		printk(KERN_CRIT PFX "Unexpected close, not stopping watchdog!\n");
+		dev_err(wdt_dev, "Unexpected close, not stopping watchdog\n");
 		s3c2410wdt_keepalive();
 	}
 
@@ -338,7 +339,7 @@
 
 static irqreturn_t s3c2410wdt_irq(int irqno, void *param)
 {
-	printk(KERN_INFO PFX "Watchdog timer expired!\n");
+	dev_info(wdt_dev, "watchdog timer expired (irq)\n");
 
 	s3c2410wdt_keepalive();
 	return IRQ_HANDLED;
@@ -348,31 +349,36 @@
 static int s3c2410wdt_probe(struct platform_device *pdev)
 {
 	struct resource *res;
+	struct device *dev;
+	unsigned int wtcon;
 	int started = 0;
 	int ret;
 	int size;
 
 	DBG("%s: probe=%p\n", __FUNCTION__, pdev);
 
+	dev = &pdev->dev;
+	wdt_dev = &pdev->dev;
+
 	/* get the memory region for the watchdog timer */
 
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 	if (res == NULL) {
-		printk(KERN_INFO PFX "failed to get memory region resouce\n");
+		dev_err(dev, "no memory resource specified\n");
 		return -ENOENT;
 	}
 
 	size = (res->end-res->start)+1;
 	wdt_mem = request_mem_region(res->start, size, pdev->name);
 	if (wdt_mem == NULL) {
-		printk(KERN_INFO PFX "failed to get memory region\n");
+		dev_err(dev, "failed to get memory region\n");
 		ret = -ENOENT;
 		goto err_req;
 	}
 
 	wdt_base = ioremap(res->start, size);
 	if (wdt_base == 0) {
-		printk(KERN_INFO PFX "failed to ioremap() region\n");
+		dev_err(dev, "failed to ioremap() region\n");
 		ret = -EINVAL;
 		goto err_req;
 	}
@@ -381,20 +387,20 @@
 
 	wdt_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
 	if (wdt_irq == NULL) {
-		printk(KERN_INFO PFX "failed to get irq resource\n");
+		dev_err(dev, "no irq resource specified\n");
 		ret = -ENOENT;
 		goto err_map;
 	}
 
 	ret = request_irq(wdt_irq->start, s3c2410wdt_irq, 0, pdev->name, pdev);
 	if (ret != 0) {
-		printk(KERN_INFO PFX "failed to install irq (%d)\n", ret);
+		dev_err(dev, "failed to install irq (%d)\n", ret);
 		goto err_map;
 	}
 
 	wdt_clock = clk_get(&pdev->dev, "watchdog");
 	if (IS_ERR(wdt_clock)) {
-		printk(KERN_INFO PFX "failed to find watchdog clock source\n");
+		dev_err(dev, "failed to find watchdog clock source\n");
 		ret = PTR_ERR(wdt_clock);
 		goto err_irq;
 	}
@@ -408,22 +414,22 @@
 		started = s3c2410wdt_set_heartbeat(CONFIG_S3C2410_WATCHDOG_DEFAULT_TIME);
 
 		if (started == 0) {
-			printk(KERN_INFO PFX "tmr_margin value out of range, default %d used\n",
+			dev_info(dev,"tmr_margin value out of range, default %d used\n",
 			       CONFIG_S3C2410_WATCHDOG_DEFAULT_TIME);
 		} else {
-			printk(KERN_INFO PFX "default timer value is out of range, cannot start\n");
+			dev_info(dev, "default timer value is out of range, cannot start\n");
 		}
 	}
 
 	ret = misc_register(&s3c2410wdt_miscdev);
 	if (ret) {
-		printk (KERN_ERR PFX "cannot register miscdev on minor=%d (%d)\n",
+		dev_err(dev, "cannot register miscdev on minor=%d (%d)\n",
 			WATCHDOG_MINOR, ret);
 		goto err_clk;
 	}
 
 	if (tmr_atboot && started == 0) {
-		printk(KERN_INFO PFX "Starting Watchdog Timer\n");
+		dev_info(dev, "starting watchdog timer\n");
 		s3c2410wdt_start();
 	} else if (!tmr_atboot) {
 		/* if we're not enabling the watchdog, then ensure it is
@@ -433,6 +439,15 @@
 		s3c2410wdt_stop();
 	}
 
+	/* print out a statement of readiness */
+
+	wtcon = readl(wdt_base + S3C2410_WTCON);
+
+	dev_info(dev, "watchdog %sactive, reset %sabled, irq %sabled\n",
+		 (wtcon & S3C2410_WTCON_ENABLE) ?  "" : "in",
+		 (wtcon & S3C2410_WTCON_RSTEN) ? "" : "dis",
+		 (wtcon & S3C2410_WTCON_INTEN) ? "" : "en");
+	
 	return 0;
 
  err_clk:
diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig
index debf1d8..1724c41 100644
--- a/drivers/edac/Kconfig
+++ b/drivers/edac/Kconfig
@@ -3,18 +3,18 @@
 #	Copyright (c) 2003 Linux Networx
 #	Licensed and distributed under the GPL
 #
-# $Id: Kconfig,v 1.4.2.7 2005/07/08 22:05:38 dsp_llnl Exp $
-#
 
 menuconfig EDAC
-	tristate "EDAC - error detection and reporting (EXPERIMENTAL)"
+	bool "EDAC - error detection and reporting (EXPERIMENTAL)"
 	depends on HAS_IOMEM
-	depends on X86 && EXPERIMENTAL
+	depends on EXPERIMENTAL
+	depends on X86 || MIPS || PPC
 	help
 	  EDAC is designed to report errors in the core system.
 	  These are low-level errors that are reported in the CPU or
-	  supporting chipset: memory errors, cache errors, PCI errors,
-	  thermal throttling, etc..  If unsure, select 'Y'.
+	  supporting chipset or other subsystems:
+	  memory errors, cache errors, PCI errors, thermal throttling, etc..
+	  If unsure, select 'Y'.
 
 	  If this code is reporting problems on your system, please
 	  see the EDAC project web pages for more information at:
@@ -73,6 +73,14 @@
 	  Support for error detection and correction on the Intel
 	  E7520, E7525, E7320 server chipsets.
 
+config EDAC_I82443BXGX
+	tristate "Intel 82443BX/GX (440BX/GX)"
+	depends on EDAC_MM_EDAC && PCI && X86_32
+	depends on BROKEN
+	help
+	  Support for error detection and correction on the Intel
+	  82443BX/GX memory controllers (440BX/GX chipsets).
+
 config EDAC_I82875P
 	tristate "Intel 82875p (D82875P, E7210)"
 	depends on EDAC_MM_EDAC && PCI && X86_32
@@ -80,6 +88,20 @@
 	  Support for error detection and correction on the Intel
 	  DP82785P and E7210 server chipsets.
 
+config EDAC_I82975X
+	tristate "Intel 82975x (D82975x)"
+	depends on EDAC_MM_EDAC && PCI && X86
+	help
+	  Support for error detection and correction on the Intel
+	  DP82975x server chipsets.
+
+config EDAC_I3000
+	tristate "Intel 3000/3010"
+	depends on EDAC_MM_EDAC && PCI && X86_32
+	help
+	  Support for error detection and correction on the Intel
+	  3000 and 3010 server chipsets.
+
 config EDAC_I82860
 	tristate "Intel 82860"
 	depends on EDAC_MM_EDAC && PCI && X86_32
@@ -94,15 +116,20 @@
 	  Support for error detection and correction on the Radisys
 	  82600 embedded chipset.
 
-choice
-	prompt "Error detecting method"
-	default EDAC_POLL
-
-config EDAC_POLL
-	bool "Poll for errors"
+config EDAC_I5000
+	tristate "Intel Greencreek/Blackford chipset"
+	depends on EDAC_MM_EDAC && X86 && PCI
 	help
-	  Poll the chipset periodically to detect errors.
+	  Support for error detection and correction the Intel
+	  Greekcreek/Blackford chipsets.
 
-endchoice
+config EDAC_PASEMI
+	tristate "PA Semi PWRficient"
+	depends on EDAC_MM_EDAC && PCI
+	depends on PPC
+	help
+	  Support for error detection and correction on PA Semi
+	  PWRficient.
+
 
 endif # EDAC
diff --git a/drivers/edac/Makefile b/drivers/edac/Makefile
index 93137fd..02c09f0 100644
--- a/drivers/edac/Makefile
+++ b/drivers/edac/Makefile
@@ -5,14 +5,27 @@
 # This file may be distributed under the terms of the
 # GNU General Public License.
 #
-# $Id: Makefile,v 1.4.2.3 2005/07/08 22:05:38 dsp_llnl Exp $
 
 
-obj-$(CONFIG_EDAC_MM_EDAC)		+= edac_mc.o
+obj-$(CONFIG_EDAC)			:= edac_stub.o
+obj-$(CONFIG_EDAC_MM_EDAC)		+= edac_core.o
+
+edac_core-objs	:= edac_mc.o edac_device.o edac_mc_sysfs.o edac_pci_sysfs.o
+edac_core-objs	+= edac_module.o edac_device_sysfs.o
+
+ifdef CONFIG_PCI
+edac_core-objs	+= edac_pci.o edac_pci_sysfs.o
+endif
+
 obj-$(CONFIG_EDAC_AMD76X)		+= amd76x_edac.o
+obj-$(CONFIG_EDAC_I5000)		+= i5000_edac.o
 obj-$(CONFIG_EDAC_E7XXX)		+= e7xxx_edac.o
 obj-$(CONFIG_EDAC_E752X)		+= e752x_edac.o
+obj-$(CONFIG_EDAC_I82443BXGX)		+= i82443bxgx_edac.o
 obj-$(CONFIG_EDAC_I82875P)		+= i82875p_edac.o
+obj-$(CONFIG_EDAC_I82975X)		+= i82975x_edac.o
+obj-$(CONFIG_EDAC_I3000)		+= i3000_edac.o
 obj-$(CONFIG_EDAC_I82860)		+= i82860_edac.o
 obj-$(CONFIG_EDAC_R82600)		+= r82600_edac.o
+obj-$(CONFIG_EDAC_PASEMI)		+= pasemi_edac.o
 
diff --git a/drivers/edac/amd76x_edac.c b/drivers/edac/amd76x_edac.c
index f79f6b5..f220754 100644
--- a/drivers/edac/amd76x_edac.c
+++ b/drivers/edac/amd76x_edac.c
@@ -17,9 +17,9 @@
 #include <linux/pci.h>
 #include <linux/pci_ids.h>
 #include <linux/slab.h>
-#include "edac_mc.h"
+#include "edac_core.h"
 
-#define AMD76X_REVISION	" Ver: 2.0.1 "  __DATE__
+#define AMD76X_REVISION	" Ver: 2.0.2 "  __DATE__
 #define EDAC_MOD_STR	"amd76x_edac"
 
 #define amd76x_printk(level, fmt, arg...) \
@@ -86,13 +86,13 @@
 
 static const struct amd76x_dev_info amd76x_devs[] = {
 	[AMD761] = {
-		.ctl_name = "AMD761"
-	},
+		.ctl_name = "AMD761"},
 	[AMD762] = {
-		.ctl_name = "AMD762"
-	},
+		.ctl_name = "AMD762"},
 };
 
+static struct edac_pci_ctl_info *amd76x_pci;
+
 /**
  *	amd76x_get_error_info	-	fetch error information
  *	@mci: Memory controller
@@ -102,21 +102,21 @@
  *	on the chip so that further errors will be reported
  */
 static void amd76x_get_error_info(struct mem_ctl_info *mci,
-		struct amd76x_error_info *info)
+				struct amd76x_error_info *info)
 {
 	struct pci_dev *pdev;
 
 	pdev = to_pci_dev(mci->dev);
 	pci_read_config_dword(pdev, AMD76X_ECC_MODE_STATUS,
-				&info->ecc_mode_status);
+			&info->ecc_mode_status);
 
 	if (info->ecc_mode_status & BIT(8))
 		pci_write_bits32(pdev, AMD76X_ECC_MODE_STATUS,
-				(u32) BIT(8), (u32) BIT(8));
+				 (u32) BIT(8), (u32) BIT(8));
 
 	if (info->ecc_mode_status & BIT(9))
 		pci_write_bits32(pdev, AMD76X_ECC_MODE_STATUS,
-				(u32) BIT(9), (u32) BIT(9));
+				 (u32) BIT(9), (u32) BIT(9));
 }
 
 /**
@@ -130,7 +130,8 @@
  *	then attempt to handle and clean up after the error
  */
 static int amd76x_process_error_info(struct mem_ctl_info *mci,
-		struct amd76x_error_info *info, int handle_errors)
+				struct amd76x_error_info *info,
+				int handle_errors)
 {
 	int error_found;
 	u32 row;
@@ -138,7 +139,7 @@
 	error_found = 0;
 
 	/*
-	 *	Check for an uncorrectable error
+	 *      Check for an uncorrectable error
 	 */
 	if (info->ecc_mode_status & BIT(8)) {
 		error_found = 1;
@@ -146,12 +147,12 @@
 		if (handle_errors) {
 			row = (info->ecc_mode_status >> 4) & 0xf;
 			edac_mc_handle_ue(mci, mci->csrows[row].first_page, 0,
-				row, mci->ctl_name);
+					row, mci->ctl_name);
 		}
 	}
 
 	/*
-	 *	Check for a correctable error
+	 *      Check for a correctable error
 	 */
 	if (info->ecc_mode_status & BIT(9)) {
 		error_found = 1;
@@ -159,7 +160,7 @@
 		if (handle_errors) {
 			row = info->ecc_mode_status & 0xf;
 			edac_mc_handle_ce(mci, mci->csrows[row].first_page, 0,
-				0, row, 0, mci->ctl_name);
+					0, row, 0, mci->ctl_name);
 		}
 	}
 
@@ -182,7 +183,7 @@
 }
 
 static void amd76x_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev,
-		enum edac_type edac_mode)
+			enum edac_type edac_mode)
 {
 	struct csrow_info *csrow;
 	u32 mba, mba_base, mba_mask, dms;
@@ -193,8 +194,7 @@
 
 		/* find the DRAM Chip Select Base address and mask */
 		pci_read_config_dword(pdev,
-				      AMD76X_MEM_BASE_ADDR + (index * 4),
-				      &mba);
+				AMD76X_MEM_BASE_ADDR + (index * 4), &mba);
 
 		if (!(mba & BIT(0)))
 			continue;
@@ -238,7 +238,7 @@
 	debugf0("%s()\n", __func__);
 	pci_read_config_dword(pdev, AMD76X_ECC_MODE_STATUS, &ems);
 	ems_mode = (ems >> 10) & 0x3;
-	mci = edac_mc_alloc(0, AMD76X_NR_CSROWS, AMD76X_NR_CHANS);
+	mci = edac_mc_alloc(0, AMD76X_NR_CSROWS, AMD76X_NR_CHANS, 0);
 
 	if (mci == NULL) {
 		return -ENOMEM;
@@ -249,24 +249,36 @@
 	mci->mtype_cap = MEM_FLAG_RDDR;
 	mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_EC | EDAC_FLAG_SECDED;
 	mci->edac_cap = ems_mode ?
-			(EDAC_FLAG_EC | EDAC_FLAG_SECDED) : EDAC_FLAG_NONE;
+		(EDAC_FLAG_EC | EDAC_FLAG_SECDED) : EDAC_FLAG_NONE;
 	mci->mod_name = EDAC_MOD_STR;
 	mci->mod_ver = AMD76X_REVISION;
 	mci->ctl_name = amd76x_devs[dev_idx].ctl_name;
+	mci->dev_name = pci_name(pdev);
 	mci->edac_check = amd76x_check;
 	mci->ctl_page_to_phys = NULL;
 
 	amd76x_init_csrows(mci, pdev, ems_modes[ems_mode]);
-	amd76x_get_error_info(mci, &discard);  /* clear counters */
+	amd76x_get_error_info(mci, &discard);	/* clear counters */
 
 	/* Here we assume that we will never see multiple instances of this
 	 * type of memory controller.  The ID is therefore hardcoded to 0.
 	 */
-	if (edac_mc_add_mc(mci,0)) {
+	if (edac_mc_add_mc(mci)) {
 		debugf3("%s(): failed edac_mc_add_mc()\n", __func__);
 		goto fail;
 	}
 
+	/* allocating generic PCI control info */
+	amd76x_pci = edac_pci_create_generic_ctl(&pdev->dev, EDAC_MOD_STR);
+	if (!amd76x_pci) {
+		printk(KERN_WARNING
+			"%s(): Unable to create PCI control\n",
+			__func__);
+		printk(KERN_WARNING
+			"%s(): PCI error report via EDAC not setup\n",
+			__func__);
+	}
+
 	/* get this far and it's successful */
 	debugf3("%s(): success\n", __func__);
 	return 0;
@@ -278,7 +290,7 @@
 
 /* returns count (>= 0), or negative on error */
 static int __devinit amd76x_init_one(struct pci_dev *pdev,
-		const struct pci_device_id *ent)
+				const struct pci_device_id *ent)
 {
 	debugf0("%s()\n", __func__);
 
@@ -300,6 +312,9 @@
 
 	debugf0("%s()\n", __func__);
 
+	if (amd76x_pci)
+		edac_pci_release_generic_ctl(amd76x_pci);
+
 	if ((mci = edac_mc_del_mc(&pdev->dev)) == NULL)
 		return;
 
@@ -308,16 +323,14 @@
 
 static const struct pci_device_id amd76x_pci_tbl[] __devinitdata = {
 	{
-		PCI_VEND_DEV(AMD, FE_GATE_700C), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
-		AMD762
-	},
+	 PCI_VEND_DEV(AMD, FE_GATE_700C), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+	 AMD762},
 	{
-		PCI_VEND_DEV(AMD, FE_GATE_700E), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
-		AMD761
-	},
+	 PCI_VEND_DEV(AMD, FE_GATE_700E), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+	 AMD761},
 	{
-		0,
-	}	/* 0 terminated list. */
+	 0,
+	 }			/* 0 terminated list. */
 };
 
 MODULE_DEVICE_TABLE(pci, amd76x_pci_tbl);
diff --git a/drivers/edac/e752x_edac.c b/drivers/edac/e752x_edac.c
index 8bcc887..3bba224 100644
--- a/drivers/edac/e752x_edac.c
+++ b/drivers/edac/e752x_edac.c
@@ -22,13 +22,16 @@
 #include <linux/pci.h>
 #include <linux/pci_ids.h>
 #include <linux/slab.h>
-#include "edac_mc.h"
+#include <linux/edac.h>
+#include "edac_core.h"
 
-#define E752X_REVISION	" Ver: 2.0.1 " __DATE__
+#define E752X_REVISION	" Ver: 2.0.2 " __DATE__
 #define EDAC_MOD_STR	"e752x_edac"
 
 static int force_function_unhide;
 
+static struct edac_pci_ctl_info *e752x_pci;
+
 #define e752x_printk(level, fmt, arg...) \
 	edac_printk(level, "e752x", fmt, ##arg)
 
@@ -203,25 +206,22 @@
 	[E7520] = {
 		.err_dev = PCI_DEVICE_ID_INTEL_7520_1_ERR,
 		.ctl_dev = PCI_DEVICE_ID_INTEL_7520_0,
-		.ctl_name = "E7520"
-	},
+		.ctl_name = "E7520"},
 	[E7525] = {
 		.err_dev = PCI_DEVICE_ID_INTEL_7525_1_ERR,
 		.ctl_dev = PCI_DEVICE_ID_INTEL_7525_0,
-		.ctl_name = "E7525"
-	},
+		.ctl_name = "E7525"},
 	[E7320] = {
 		.err_dev = PCI_DEVICE_ID_INTEL_7320_1_ERR,
 		.ctl_dev = PCI_DEVICE_ID_INTEL_7320_0,
-		.ctl_name = "E7320"
-	},
+		.ctl_name = "E7320"},
 };
 
 static unsigned long ctl_page_to_phys(struct mem_ctl_info *mci,
-		unsigned long page)
+				unsigned long page)
 {
 	u32 remap;
-	struct e752x_pvt *pvt = (struct e752x_pvt *) mci->pvt_info;
+	struct e752x_pvt *pvt = (struct e752x_pvt *)mci->pvt_info;
 
 	debugf3("%s()\n", __func__);
 
@@ -241,13 +241,13 @@
 }
 
 static void do_process_ce(struct mem_ctl_info *mci, u16 error_one,
-		u32 sec1_add, u16 sec1_syndrome)
+			u32 sec1_add, u16 sec1_syndrome)
 {
 	u32 page;
 	int row;
 	int channel;
 	int i;
-	struct e752x_pvt *pvt = (struct e752x_pvt *) mci->pvt_info;
+	struct e752x_pvt *pvt = (struct e752x_pvt *)mci->pvt_info;
 
 	debugf3("%s()\n", __func__);
 
@@ -261,7 +261,8 @@
 		e752x_printk(KERN_WARNING,
 			"Test row %d Table %d %d %d %d %d %d %d %d\n", row,
 			pvt->map[0], pvt->map[1], pvt->map[2], pvt->map[3],
-			pvt->map[4], pvt->map[5], pvt->map[6], pvt->map[7]);
+			pvt->map[4], pvt->map[5], pvt->map[6],
+			pvt->map[7]);
 
 		/* test for channel remapping */
 		for (i = 0; i < 8; i++) {
@@ -275,24 +276,22 @@
 			row = i;
 		else
 			e752x_mc_printk(mci, KERN_WARNING,
-				"row %d not found in remap table\n", row);
+					"row %d not found in remap table\n",
+					row);
 	} else
 		row = edac_mc_find_csrow_by_page(mci, page);
 
 	/* 0 = channel A, 1 = channel B */
 	channel = !(error_one & 1);
 
-	if (!pvt->map_type)
-		row = 7 - row;
-
 	/* e752x mc reads 34:6 of the DRAM linear address */
 	edac_mc_handle_ce(mci, page, offset_in_page(sec1_add << 4),
 			sec1_syndrome, row, channel, "e752x CE");
 }
 
 static inline void process_ce(struct mem_ctl_info *mci, u16 error_one,
-		u32 sec1_add, u16 sec1_syndrome, int *error_found,
-		int handle_error)
+			u32 sec1_add, u16 sec1_syndrome, int *error_found,
+			int handle_error)
 {
 	*error_found = 1;
 
@@ -301,11 +300,11 @@
 }
 
 static void do_process_ue(struct mem_ctl_info *mci, u16 error_one,
-		u32 ded_add, u32 scrb_add)
+			u32 ded_add, u32 scrb_add)
 {
 	u32 error_2b, block_page;
 	int row;
-	struct e752x_pvt *pvt = (struct e752x_pvt *) mci->pvt_info;
+	struct e752x_pvt *pvt = (struct e752x_pvt *)mci->pvt_info;
 
 	debugf3("%s()\n", __func__);
 
@@ -316,14 +315,14 @@
 		block_page = error_2b >> (PAGE_SHIFT - 4);
 
 		row = pvt->mc_symmetric ?
-			/* chip select are bits 14 & 13 */
+		/* chip select are bits 14 & 13 */
 			((block_page >> 1) & 3) :
 			edac_mc_find_csrow_by_page(mci, block_page);
 
 		/* e752x mc reads 34:6 of the DRAM linear address */
 		edac_mc_handle_ue(mci, block_page,
-					offset_in_page(error_2b << 4),
-					row, "e752x UE from Read");
+				offset_in_page(error_2b << 4),
+				row, "e752x UE from Read");
 	}
 	if (error_one & 0x0404) {
 		error_2b = scrb_add;
@@ -332,19 +331,20 @@
 		block_page = error_2b >> (PAGE_SHIFT - 4);
 
 		row = pvt->mc_symmetric ?
-			/* chip select are bits 14 & 13 */
+		/* chip select are bits 14 & 13 */
 			((block_page >> 1) & 3) :
 			edac_mc_find_csrow_by_page(mci, block_page);
 
 		/* e752x mc reads 34:6 of the DRAM linear address */
 		edac_mc_handle_ue(mci, block_page,
-					offset_in_page(error_2b << 4),
-					row, "e752x UE from Scruber");
+				offset_in_page(error_2b << 4),
+				row, "e752x UE from Scruber");
 	}
 }
 
 static inline void process_ue(struct mem_ctl_info *mci, u16 error_one,
-		u32 ded_add, u32 scrb_add, int *error_found, int handle_error)
+			u32 ded_add, u32 scrb_add, int *error_found,
+			int handle_error)
 {
 	*error_found = 1;
 
@@ -353,7 +353,7 @@
 }
 
 static inline void process_ue_no_info_wr(struct mem_ctl_info *mci,
-		int *error_found, int handle_error)
+					 int *error_found, int handle_error)
 {
 	*error_found = 1;
 
@@ -365,24 +365,24 @@
 }
 
 static void do_process_ded_retry(struct mem_ctl_info *mci, u16 error,
-		u32 retry_add)
+				 u32 retry_add)
 {
 	u32 error_1b, page;
 	int row;
-	struct e752x_pvt *pvt = (struct e752x_pvt *) mci->pvt_info;
+	struct e752x_pvt *pvt = (struct e752x_pvt *)mci->pvt_info;
 
 	error_1b = retry_add;
-	page = error_1b >> (PAGE_SHIFT - 4); /* convert the addr to 4k page */
-	row = pvt->mc_symmetric ?
-		((page >> 1) & 3) : /* chip select are bits 14 & 13 */
+	page = error_1b >> (PAGE_SHIFT - 4);	/* convert the addr to 4k page */
+	row = pvt->mc_symmetric ? ((page >> 1) & 3) :	/* chip select are bits 14 & 13 */
 		edac_mc_find_csrow_by_page(mci, page);
 	e752x_mc_printk(mci, KERN_WARNING,
-		"CE page 0x%lx, row %d : Memory read retry\n",
-		(long unsigned int) page, row);
+			"CE page 0x%lx, row %d : Memory read retry\n",
+			(long unsigned int)page, row);
 }
 
 static inline void process_ded_retry(struct mem_ctl_info *mci, u16 error,
-		u32 retry_add, int *error_found, int handle_error)
+				u32 retry_add, int *error_found,
+				int handle_error)
 {
 	*error_found = 1;
 
@@ -391,7 +391,7 @@
 }
 
 static inline void process_threshold_ce(struct mem_ctl_info *mci, u16 error,
-		int *error_found, int handle_error)
+					int *error_found, int handle_error)
 {
 	*error_found = 1;
 
@@ -420,7 +420,7 @@
 }
 
 static inline void global_error(int fatal, u32 errors, int *error_found,
-		int handle_error)
+				int handle_error)
 {
 	*error_found = 1;
 
@@ -447,7 +447,7 @@
 }
 
 static inline void hub_error(int fatal, u8 errors, int *error_found,
-		int handle_error)
+			int handle_error)
 {
 	*error_found = 1;
 
@@ -505,7 +505,7 @@
 }
 
 static inline void sysbus_error(int fatal, u32 errors, int *error_found,
-		int handle_error)
+				int handle_error)
 {
 	*error_found = 1;
 
@@ -514,7 +514,7 @@
 }
 
 static void e752x_check_hub_interface(struct e752x_error_info *info,
-		int *error_found, int handle_error)
+				int *error_found, int handle_error)
 {
 	u8 stat8;
 
@@ -522,33 +522,32 @@
 
 	stat8 = info->hi_ferr;
 
-	if(stat8 & 0x7f) { /* Error, so process */
-		stat8 &= 0x7f;
-
-		if(stat8 & 0x2b)
-			hub_error(1, stat8 & 0x2b, error_found, handle_error);
-
-		if(stat8 & 0x54)
-			hub_error(0, stat8 & 0x54, error_found, handle_error);
-	}
-
-	//pci_read_config_byte(dev,E752X_HI_NERR,&stat8);
-
-	stat8 = info->hi_nerr;
-
-	if(stat8 & 0x7f) { /* Error, so process */
+	if (stat8 & 0x7f) {	/* Error, so process */
 		stat8 &= 0x7f;
 
 		if (stat8 & 0x2b)
 			hub_error(1, stat8 & 0x2b, error_found, handle_error);
 
-		if(stat8 & 0x54)
+		if (stat8 & 0x54)
+			hub_error(0, stat8 & 0x54, error_found, handle_error);
+	}
+	//pci_read_config_byte(dev,E752X_HI_NERR,&stat8);
+
+	stat8 = info->hi_nerr;
+
+	if (stat8 & 0x7f) {	/* Error, so process */
+		stat8 &= 0x7f;
+
+		if (stat8 & 0x2b)
+			hub_error(1, stat8 & 0x2b, error_found, handle_error);
+
+		if (stat8 & 0x54)
 			hub_error(0, stat8 & 0x54, error_found, handle_error);
 	}
 }
 
 static void e752x_check_sysbus(struct e752x_error_info *info,
-		int *error_found, int handle_error)
+			int *error_found, int handle_error)
 {
 	u32 stat32, error32;
 
@@ -556,47 +555,47 @@
 	stat32 = info->sysbus_ferr + (info->sysbus_nerr << 16);
 
 	if (stat32 == 0)
-		return;  /* no errors */
+		return;		/* no errors */
 
 	error32 = (stat32 >> 16) & 0x3ff;
 	stat32 = stat32 & 0x3ff;
 
-	if(stat32 & 0x087)
+	if (stat32 & 0x087)
 		sysbus_error(1, stat32 & 0x087, error_found, handle_error);
 
-	if(stat32 & 0x378)
+	if (stat32 & 0x378)
 		sysbus_error(0, stat32 & 0x378, error_found, handle_error);
 
-	if(error32 & 0x087)
+	if (error32 & 0x087)
 		sysbus_error(1, error32 & 0x087, error_found, handle_error);
 
-	if(error32 & 0x378)
+	if (error32 & 0x378)
 		sysbus_error(0, error32 & 0x378, error_found, handle_error);
 }
 
-static void e752x_check_membuf (struct e752x_error_info *info,
-		int *error_found, int handle_error)
+static void e752x_check_membuf(struct e752x_error_info *info,
+			int *error_found, int handle_error)
 {
 	u8 stat8;
 
 	stat8 = info->buf_ferr;
 
-	if (stat8 & 0x0f) { /* Error, so process */
+	if (stat8 & 0x0f) {	/* Error, so process */
 		stat8 &= 0x0f;
 		membuf_error(stat8, error_found, handle_error);
 	}
 
 	stat8 = info->buf_nerr;
 
-	if (stat8 & 0x0f) { /* Error, so process */
+	if (stat8 & 0x0f) {	/* Error, so process */
 		stat8 &= 0x0f;
 		membuf_error(stat8, error_found, handle_error);
 	}
 }
 
-static void e752x_check_dram (struct mem_ctl_info *mci,
-		struct e752x_error_info *info, int *error_found,
-		int handle_error)
+static void e752x_check_dram(struct mem_ctl_info *mci,
+			struct e752x_error_info *info, int *error_found,
+			int handle_error)
 {
 	u16 error_one, error_next;
 
@@ -604,55 +603,52 @@
 	error_next = info->dram_nerr;
 
 	/* decode and report errors */
-	if(error_one & 0x0101)  /* check first error correctable */
+	if (error_one & 0x0101)	/* check first error correctable */
 		process_ce(mci, error_one, info->dram_sec1_add,
-			   info->dram_sec1_syndrome, error_found,
-			   handle_error);
+			info->dram_sec1_syndrome, error_found, handle_error);
 
-	if(error_next & 0x0101)  /* check next error correctable */
+	if (error_next & 0x0101)	/* check next error correctable */
 		process_ce(mci, error_next, info->dram_sec2_add,
-			   info->dram_sec2_syndrome, error_found,
-			   handle_error);
+			info->dram_sec2_syndrome, error_found, handle_error);
 
-	if(error_one & 0x4040)
+	if (error_one & 0x4040)
 		process_ue_no_info_wr(mci, error_found, handle_error);
 
-	if(error_next & 0x4040)
+	if (error_next & 0x4040)
 		process_ue_no_info_wr(mci, error_found, handle_error);
 
-	if(error_one & 0x2020)
+	if (error_one & 0x2020)
 		process_ded_retry(mci, error_one, info->dram_retr_add,
-				  error_found, handle_error);
+				error_found, handle_error);
 
-	if(error_next & 0x2020)
+	if (error_next & 0x2020)
 		process_ded_retry(mci, error_next, info->dram_retr_add,
-				  error_found, handle_error);
+				error_found, handle_error);
 
-	if(error_one & 0x0808)
-		process_threshold_ce(mci, error_one, error_found,
-				     handle_error);
+	if (error_one & 0x0808)
+		process_threshold_ce(mci, error_one, error_found, handle_error);
 
-	if(error_next & 0x0808)
+	if (error_next & 0x0808)
 		process_threshold_ce(mci, error_next, error_found,
-				     handle_error);
+				handle_error);
 
-	if(error_one & 0x0606)
+	if (error_one & 0x0606)
 		process_ue(mci, error_one, info->dram_ded_add,
-			   info->dram_scrb_add, error_found, handle_error);
+			info->dram_scrb_add, error_found, handle_error);
 
-	if(error_next & 0x0606)
+	if (error_next & 0x0606)
 		process_ue(mci, error_next, info->dram_ded_add,
-			   info->dram_scrb_add, error_found, handle_error);
+			info->dram_scrb_add, error_found, handle_error);
 }
 
-static void e752x_get_error_info (struct mem_ctl_info *mci,
-		struct e752x_error_info *info)
+static void e752x_get_error_info(struct mem_ctl_info *mci,
+				 struct e752x_error_info *info)
 {
 	struct pci_dev *dev;
 	struct e752x_pvt *pvt;
 
 	memset(info, 0, sizeof(*info));
-	pvt = (struct e752x_pvt *) mci->pvt_info;
+	pvt = (struct e752x_pvt *)mci->pvt_info;
 	dev = pvt->dev_d0f1;
 	pci_read_config_dword(dev, E752X_FERR_GLOBAL, &info->ferr_global);
 
@@ -661,8 +657,7 @@
 		pci_read_config_word(dev, E752X_SYSBUS_FERR,
 				&info->sysbus_ferr);
 		pci_read_config_byte(dev, E752X_BUF_FERR, &info->buf_ferr);
-		pci_read_config_word(dev, E752X_DRAM_FERR,
-				&info->dram_ferr);
+		pci_read_config_word(dev, E752X_DRAM_FERR, &info->dram_ferr);
 		pci_read_config_dword(dev, E752X_DRAM_SEC1_ADD,
 				&info->dram_sec1_add);
 		pci_read_config_word(dev, E752X_DRAM_SEC1_SYNDROME,
@@ -688,7 +683,7 @@
 
 		if (info->dram_ferr)
 			pci_write_bits16(pvt->bridge_ck, E752X_DRAM_FERR,
-					info->dram_ferr, info->dram_ferr);
+					 info->dram_ferr, info->dram_ferr);
 
 		pci_write_config_dword(dev, E752X_FERR_GLOBAL,
 				info->ferr_global);
@@ -701,8 +696,7 @@
 		pci_read_config_word(dev, E752X_SYSBUS_NERR,
 				&info->sysbus_nerr);
 		pci_read_config_byte(dev, E752X_BUF_NERR, &info->buf_nerr);
-		pci_read_config_word(dev, E752X_DRAM_NERR,
-				&info->dram_nerr);
+		pci_read_config_word(dev, E752X_DRAM_NERR, &info->dram_nerr);
 		pci_read_config_dword(dev, E752X_DRAM_SEC2_ADD,
 				&info->dram_sec2_add);
 		pci_read_config_word(dev, E752X_DRAM_SEC2_SYNDROME,
@@ -722,15 +716,16 @@
 
 		if (info->dram_nerr)
 			pci_write_bits16(pvt->bridge_ck, E752X_DRAM_NERR,
-					info->dram_nerr, info->dram_nerr);
+					 info->dram_nerr, info->dram_nerr);
 
 		pci_write_config_dword(dev, E752X_NERR_GLOBAL,
 				info->nerr_global);
 	}
 }
 
-static int e752x_process_error_info (struct mem_ctl_info *mci,
-		struct e752x_error_info *info, int handle_errors)
+static int e752x_process_error_info(struct mem_ctl_info *mci,
+				struct e752x_error_info *info,
+				int handle_errors)
 {
 	u32 error32, stat32;
 	int error_found;
@@ -776,26 +771,38 @@
 	return (((ddrcsr >> 12) & 3) == 3);
 }
 
+/* Remap csrow index numbers if map_type is "reverse"
+ */
+static inline int remap_csrow_index(struct mem_ctl_info *mci, int index)
+{
+	struct e752x_pvt *pvt = mci->pvt_info;
+
+	if (!pvt->map_type)
+		return (7 - index);
+
+	return (index);
+}
+
 static void e752x_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev,
-		u16 ddrcsr)
+			u16 ddrcsr)
 {
 	struct csrow_info *csrow;
 	unsigned long last_cumul_size;
 	int index, mem_dev, drc_chan;
-	int drc_drbg;  /* DRB granularity 0=64mb, 1=128mb */
-	int drc_ddim;  /* DRAM Data Integrity Mode 0=none, 2=edac */
+	int drc_drbg;		/* DRB granularity 0=64mb, 1=128mb */
+	int drc_ddim;		/* DRAM Data Integrity Mode 0=none, 2=edac */
 	u8 value;
 	u32 dra, drc, cumul_size;
 
 	dra = 0;
-	for (index=0; index < 4; index++) {
+	for (index = 0; index < 4; index++) {
 		u8 dra_reg;
-		pci_read_config_byte(pdev, E752X_DRA+index, &dra_reg);
+		pci_read_config_byte(pdev, E752X_DRA + index, &dra_reg);
 		dra |= dra_reg << (index * 8);
 	}
 	pci_read_config_dword(pdev, E752X_DRC, &drc);
 	drc_chan = dual_channel_active(ddrcsr);
-	drc_drbg = drc_chan + 1;  /* 128 in dual mode, 64 in single */
+	drc_drbg = drc_chan + 1;	/* 128 in dual mode, 64 in single */
 	drc_ddim = (drc >> 20) & 0x3;
 
 	/* The dram row boundary (DRB) reg values are boundary address for
@@ -806,7 +813,7 @@
 	for (last_cumul_size = index = 0; index < mci->nr_csrows; index++) {
 		/* mem_dev 0=x8, 1=x4 */
 		mem_dev = (dra >> (index * 4 + 2)) & 0x3;
-		csrow = &mci->csrows[index];
+		csrow = &mci->csrows[remap_csrow_index(mci, index)];
 
 		mem_dev = (mem_dev == 2);
 		pci_read_config_byte(pdev, E752X_DRB + index, &value);
@@ -843,10 +850,10 @@
 }
 
 static void e752x_init_mem_map_table(struct pci_dev *pdev,
-		struct e752x_pvt *pvt)
+				struct e752x_pvt *pvt)
 {
 	int index;
-	u8 value, last, row, stat8;
+	u8 value, last, row;
 
 	last = 0;
 	row = 0;
@@ -858,7 +865,7 @@
 			/* no dimm in the slot, so flag it as empty */
 			pvt->map[index] = 0xff;
 			pvt->map[index + 1] = 0xff;
-		} else {        /* there is a dimm in the slot */
+		} else {	/* there is a dimm in the slot */
 			pvt->map[index] = row;
 			row++;
 			last = value;
@@ -866,31 +873,25 @@
 			 * sided
 			 */
 			pci_read_config_byte(pdev, E752X_DRB + index + 1,
-					     &value);
-			pvt->map[index + 1] = (value == last) ?
-			    0xff :      /* the dimm is single sided,
-					   so flag as empty */
-			    row;        /* this is a double sided dimm
-					   to save the next row # */
+					&value);
+
+			/* the dimm is single sided, so flag as empty */
+			/* this is a double sided dimm to save the next row #*/
+			pvt->map[index + 1] = (value == last) ? 0xff :	row;
 			row++;
 			last = value;
 		}
 	}
-
-	/* set the map type.  1 = normal, 0 = reversed */
-	pci_read_config_byte(pdev, E752X_DRM, &stat8);
-	pvt->map_type = ((stat8 & 0x0f) > ((stat8 >> 4) & 0x0f));
 }
 
 /* Return 0 on success or 1 on failure. */
 static int e752x_get_devs(struct pci_dev *pdev, int dev_idx,
-		struct e752x_pvt *pvt)
+			struct e752x_pvt *pvt)
 {
 	struct pci_dev *dev;
 
 	pvt->bridge_ck = pci_get_device(PCI_VENDOR_ID_INTEL,
-					pvt->dev_info->err_dev,
-					pvt->bridge_ck);
+					pvt->dev_info->err_dev, pvt->bridge_ck);
 
 	if (pvt->bridge_ck == NULL)
 		pvt->bridge_ck = pci_scan_single_device(pdev->bus,
@@ -898,13 +899,13 @@
 
 	if (pvt->bridge_ck == NULL) {
 		e752x_printk(KERN_ERR, "error reporting device not found:"
-		       "vendor %x device 0x%x (broken BIOS?)\n",
-		       PCI_VENDOR_ID_INTEL, e752x_devs[dev_idx].err_dev);
+			"vendor %x device 0x%x (broken BIOS?)\n",
+			PCI_VENDOR_ID_INTEL, e752x_devs[dev_idx].err_dev);
 		return 1;
 	}
 
 	dev = pci_get_device(PCI_VENDOR_ID_INTEL, e752x_devs[dev_idx].ctl_dev,
-			     NULL);
+			NULL);
 
 	if (dev == NULL)
 		goto fail;
@@ -942,12 +943,22 @@
 	struct mem_ctl_info *mci;
 	struct e752x_pvt *pvt;
 	u16 ddrcsr;
-	int drc_chan;	/* Number of channels 0=1chan,1=2chan */
+	int drc_chan;		/* Number of channels 0=1chan,1=2chan */
 	struct e752x_error_info discard;
 
 	debugf0("%s(): mci\n", __func__);
 	debugf0("Starting Probe1\n");
 
+	/* make sure error reporting method is sane */
+	switch (edac_op_state) {
+	case EDAC_OPSTATE_POLL:
+	case EDAC_OPSTATE_NMI:
+		break;
+	default:
+		edac_op_state = EDAC_OPSTATE_POLL;
+		break;
+	}
+
 	/* check to see if device 0 function 1 is enabled; if it isn't, we
 	 * assume the BIOS has reserved it for a reason and is expecting
 	 * exclusive access, we take care not to violate that assumption and
@@ -966,7 +977,7 @@
 	/* Dual channel = 1, Single channel = 0 */
 	drc_chan = dual_channel_active(ddrcsr);
 
-	mci = edac_mc_alloc(sizeof(*pvt), E752X_NR_CSROWS, drc_chan + 1);
+	mci = edac_mc_alloc(sizeof(*pvt), E752X_NR_CSROWS, drc_chan + 1, 0);
 
 	if (mci == NULL) {
 		return -ENOMEM;
@@ -975,14 +986,14 @@
 	debugf3("%s(): init mci\n", __func__);
 	mci->mtype_cap = MEM_FLAG_RDDR;
 	mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED |
-	    EDAC_FLAG_S4ECD4ED;
+		EDAC_FLAG_S4ECD4ED;
 	/* FIXME - what if different memory types are in different csrows? */
 	mci->mod_name = EDAC_MOD_STR;
 	mci->mod_ver = E752X_REVISION;
 	mci->dev = &pdev->dev;
 
 	debugf3("%s(): init pvt\n", __func__);
-	pvt = (struct e752x_pvt *) mci->pvt_info;
+	pvt = (struct e752x_pvt *)mci->pvt_info;
 	pvt->dev_info = &e752x_devs[dev_idx];
 	pvt->mc_symmetric = ((ddrcsr & 0x10) != 0);
 
@@ -993,16 +1004,20 @@
 
 	debugf3("%s(): more mci init\n", __func__);
 	mci->ctl_name = pvt->dev_info->ctl_name;
+	mci->dev_name = pci_name(pdev);
 	mci->edac_check = e752x_check;
 	mci->ctl_page_to_phys = ctl_page_to_phys;
 
-	e752x_init_csrows(mci, pdev, ddrcsr);
-	e752x_init_mem_map_table(pdev, pvt);
-
-	/* set the map type.  1 = normal, 0 = reversed */
+	/* set the map type.  1 = normal, 0 = reversed
+	 * Must be set before e752x_init_csrows in case csrow mapping
+	 * is reversed.
+	 */
 	pci_read_config_byte(pdev, E752X_DRM, &stat8);
 	pvt->map_type = ((stat8 & 0x0f) > ((stat8 >> 4) & 0x0f));
 
+	e752x_init_csrows(mci, pdev, ddrcsr);
+	e752x_init_mem_map_table(pdev, pvt);
+
 	mci->edac_cap |= EDAC_FLAG_NONE;
 	debugf3("%s(): tolm, remapbase, remaplimit\n", __func__);
 
@@ -1014,19 +1029,29 @@
 	pci_read_config_word(pdev, E752X_REMAPLIMIT, &pci_data);
 	pvt->remaplimit = ((u32) pci_data) << 14;
 	e752x_printk(KERN_INFO,
-		"tolm = %x, remapbase = %x, remaplimit = %x\n", pvt->tolm,
-		pvt->remapbase, pvt->remaplimit);
+			"tolm = %x, remapbase = %x, remaplimit = %x\n",
+			pvt->tolm, pvt->remapbase, pvt->remaplimit);
 
 	/* Here we assume that we will never see multiple instances of this
 	 * type of memory controller.  The ID is therefore hardcoded to 0.
 	 */
-	if (edac_mc_add_mc(mci,0)) {
+	if (edac_mc_add_mc(mci)) {
 		debugf3("%s(): failed edac_mc_add_mc()\n", __func__);
 		goto fail;
 	}
 
 	e752x_init_error_reporting_regs(pvt);
-	e752x_get_error_info(mci, &discard); /* clear other MCH errors */
+	e752x_get_error_info(mci, &discard);	/* clear other MCH errors */
+
+	/* allocating generic PCI control info */
+	e752x_pci = edac_pci_create_generic_ctl(&pdev->dev, EDAC_MOD_STR);
+	if (!e752x_pci) {
+		printk(KERN_WARNING
+			"%s(): Unable to create PCI control\n", __func__);
+		printk(KERN_WARNING
+			"%s(): PCI error report via EDAC not setup\n",
+			__func__);
+	}
 
 	/* get this far and it's successful */
 	debugf3("%s(): success\n", __func__);
@@ -1043,12 +1068,12 @@
 
 /* returns count (>= 0), or negative on error */
 static int __devinit e752x_init_one(struct pci_dev *pdev,
-		const struct pci_device_id *ent)
+				const struct pci_device_id *ent)
 {
 	debugf0("%s()\n", __func__);
 
 	/* wake up and enable device */
-	if(pci_enable_device(pdev) < 0)
+	if (pci_enable_device(pdev) < 0)
 		return -EIO;
 
 	return e752x_probe1(pdev, ent->driver_data);
@@ -1061,10 +1086,13 @@
 
 	debugf0("%s()\n", __func__);
 
+	if (e752x_pci)
+		edac_pci_release_generic_ctl(e752x_pci);
+
 	if ((mci = edac_mc_del_mc(&pdev->dev)) == NULL)
 		return;
 
-	pvt = (struct e752x_pvt *) mci->pvt_info;
+	pvt = (struct e752x_pvt *)mci->pvt_info;
 	pci_dev_put(pvt->dev_d0f0);
 	pci_dev_put(pvt->dev_d0f1);
 	pci_dev_put(pvt->bridge_ck);
@@ -1073,20 +1101,17 @@
 
 static const struct pci_device_id e752x_pci_tbl[] __devinitdata = {
 	{
-		PCI_VEND_DEV(INTEL, 7520_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
-		E7520
-	},
+	 PCI_VEND_DEV(INTEL, 7520_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+	 E7520},
 	{
-		PCI_VEND_DEV(INTEL, 7525_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
-		E7525
-	},
+	 PCI_VEND_DEV(INTEL, 7525_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+	 E7525},
 	{
-		PCI_VEND_DEV(INTEL, 7320_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
-		E7320
-	},
+	 PCI_VEND_DEV(INTEL, 7320_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+	 E7320},
 	{
-		0,
-	}	/* 0 terminated list. */
+	 0,
+	 }			/* 0 terminated list. */
 };
 
 MODULE_DEVICE_TABLE(pci, e752x_pci_tbl);
@@ -1122,5 +1147,6 @@
 
 module_param(force_function_unhide, int, 0444);
 MODULE_PARM_DESC(force_function_unhide, "if BIOS sets Dev0:Fun1 up as hidden:"
-" 1=force unhide and hope BIOS doesn't fight driver for Dev0:Fun1 access");
-
+		 " 1=force unhide and hope BIOS doesn't fight driver for Dev0:Fun1 access");
+module_param(edac_op_state, int, 0444);
+MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
diff --git a/drivers/edac/e7xxx_edac.c b/drivers/edac/e7xxx_edac.c
index 310d91b..96ecc49 100644
--- a/drivers/edac/e7xxx_edac.c
+++ b/drivers/edac/e7xxx_edac.c
@@ -27,9 +27,10 @@
 #include <linux/pci.h>
 #include <linux/pci_ids.h>
 #include <linux/slab.h>
-#include "edac_mc.h"
+#include <linux/edac.h>
+#include "edac_core.h"
 
-#define	E7XXX_REVISION " Ver: 2.0.1 " __DATE__
+#define	E7XXX_REVISION " Ver: 2.0.2 " __DATE__
 #define	EDAC_MOD_STR	"e7xxx_edac"
 
 #define e7xxx_printk(level, fmt, arg...) \
@@ -143,23 +144,21 @@
 	u32 dram_uelog_add;
 };
 
+static struct edac_pci_ctl_info *e7xxx_pci;
+
 static const struct e7xxx_dev_info e7xxx_devs[] = {
 	[E7500] = {
 		.err_dev = PCI_DEVICE_ID_INTEL_7500_1_ERR,
-		.ctl_name = "E7500"
-	},
+		.ctl_name = "E7500"},
 	[E7501] = {
 		.err_dev = PCI_DEVICE_ID_INTEL_7501_1_ERR,
-		.ctl_name = "E7501"
-	},
+		.ctl_name = "E7501"},
 	[E7505] = {
 		.err_dev = PCI_DEVICE_ID_INTEL_7505_1_ERR,
-		.ctl_name = "E7505"
-	},
+		.ctl_name = "E7505"},
 	[E7205] = {
 		.err_dev = PCI_DEVICE_ID_INTEL_7205_1_ERR,
-		.ctl_name = "E7205"
-	},
+		.ctl_name = "E7205"},
 };
 
 /* FIXME - is this valid for both SECDED and S4ECD4ED? */
@@ -180,15 +179,15 @@
 }
 
 static unsigned long ctl_page_to_phys(struct mem_ctl_info *mci,
-		unsigned long page)
+				unsigned long page)
 {
 	u32 remap;
-	struct e7xxx_pvt *pvt = (struct e7xxx_pvt *) mci->pvt_info;
+	struct e7xxx_pvt *pvt = (struct e7xxx_pvt *)mci->pvt_info;
 
 	debugf3("%s()\n", __func__);
 
 	if ((page < pvt->tolm) ||
-			((page >= 0x100000) && (page < pvt->remapbase)))
+		((page >= 0x100000) && (page < pvt->remapbase)))
 		return page;
 
 	remap = (page - pvt->tolm) + pvt->remapbase;
@@ -200,8 +199,7 @@
 	return pvt->tolm - 1;
 }
 
-static void process_ce(struct mem_ctl_info *mci,
-		struct e7xxx_error_info *info)
+static void process_ce(struct mem_ctl_info *mci, struct e7xxx_error_info *info)
 {
 	u32 error_1b, page;
 	u16 syndrome;
@@ -212,7 +210,7 @@
 	/* read the error address */
 	error_1b = info->dram_celog_add;
 	/* FIXME - should use PAGE_SHIFT */
-	page = error_1b >> 6;  /* convert the address to 4k page */
+	page = error_1b >> 6;	/* convert the address to 4k page */
 	/* read the syndrome */
 	syndrome = info->dram_celog_syndrome;
 	/* FIXME - check for -1 */
@@ -228,8 +226,7 @@
 	edac_mc_handle_ce_no_info(mci, "e7xxx CE log register overflow");
 }
 
-static void process_ue(struct mem_ctl_info *mci,
-		struct e7xxx_error_info *info)
+static void process_ue(struct mem_ctl_info *mci, struct e7xxx_error_info *info)
 {
 	u32 error_2b, block_page;
 	int row;
@@ -238,7 +235,7 @@
 	/* read the error address */
 	error_2b = info->dram_uelog_add;
 	/* FIXME - should use PAGE_SHIFT */
-	block_page = error_2b >> 6;  /* convert to 4k address */
+	block_page = error_2b >> 6;	/* convert to 4k address */
 	row = edac_mc_find_csrow_by_page(mci, block_page);
 	edac_mc_handle_ue(mci, block_page, 0, row, "e7xxx UE");
 }
@@ -249,16 +246,14 @@
 	edac_mc_handle_ue_no_info(mci, "e7xxx UE log register overflow");
 }
 
-static void e7xxx_get_error_info (struct mem_ctl_info *mci,
-		struct e7xxx_error_info *info)
+static void e7xxx_get_error_info(struct mem_ctl_info *mci,
+				 struct e7xxx_error_info *info)
 {
 	struct e7xxx_pvt *pvt;
 
-	pvt = (struct e7xxx_pvt *) mci->pvt_info;
-	pci_read_config_byte(pvt->bridge_ck, E7XXX_DRAM_FERR,
-			&info->dram_ferr);
-	pci_read_config_byte(pvt->bridge_ck, E7XXX_DRAM_NERR,
-			&info->dram_nerr);
+	pvt = (struct e7xxx_pvt *)mci->pvt_info;
+	pci_read_config_byte(pvt->bridge_ck, E7XXX_DRAM_FERR, &info->dram_ferr);
+	pci_read_config_byte(pvt->bridge_ck, E7XXX_DRAM_NERR, &info->dram_nerr);
 
 	if ((info->dram_ferr & 1) || (info->dram_nerr & 1)) {
 		pci_read_config_dword(pvt->bridge_ck, E7XXX_DRAM_CELOG_ADD,
@@ -279,8 +274,9 @@
 		pci_write_bits8(pvt->bridge_ck, E7XXX_DRAM_NERR, 0x03, 0x03);
 }
 
-static int e7xxx_process_error_info (struct mem_ctl_info *mci,
-		struct e7xxx_error_info *info, int handle_errors)
+static int e7xxx_process_error_info(struct mem_ctl_info *mci,
+				struct e7xxx_error_info *info,
+				int handle_errors)
 {
 	int error_found;
 
@@ -341,7 +337,6 @@
 	return (dev_idx == E7501) ? ((drc >> 22) & 0x1) : 1;
 }
 
-
 /* Return DRB granularity (0=32mb, 1=64mb). */
 static inline int drb_granularity(u32 drc, int dev_idx)
 {
@@ -349,9 +344,8 @@
 	return (dev_idx == E7501) ? ((drc >> 18) & 0x3) : 1;
 }
 
-
 static void e7xxx_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev,
-		int dev_idx, u32 drc)
+			int dev_idx, u32 drc)
 {
 	unsigned long last_cumul_size;
 	int index;
@@ -419,10 +413,21 @@
 	struct e7xxx_error_info discard;
 
 	debugf0("%s(): mci\n", __func__);
+
+	/* make sure error reporting method is sane */
+	switch (edac_op_state) {
+	case EDAC_OPSTATE_POLL:
+	case EDAC_OPSTATE_NMI:
+		break;
+	default:
+		edac_op_state = EDAC_OPSTATE_POLL;
+		break;
+	}
+
 	pci_read_config_dword(pdev, E7XXX_DRC, &drc);
 
 	drc_chan = dual_channel_active(drc, dev_idx);
-	mci = edac_mc_alloc(sizeof(*pvt), E7XXX_NR_CSROWS, drc_chan + 1);
+	mci = edac_mc_alloc(sizeof(*pvt), E7XXX_NR_CSROWS, drc_chan + 1, 0);
 
 	if (mci == NULL)
 		return -ENOMEM;
@@ -430,17 +435,16 @@
 	debugf3("%s(): init mci\n", __func__);
 	mci->mtype_cap = MEM_FLAG_RDDR;
 	mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED |
-			EDAC_FLAG_S4ECD4ED;
+		EDAC_FLAG_S4ECD4ED;
 	/* FIXME - what if different memory types are in different csrows? */
 	mci->mod_name = EDAC_MOD_STR;
 	mci->mod_ver = E7XXX_REVISION;
 	mci->dev = &pdev->dev;
 	debugf3("%s(): init pvt\n", __func__);
-	pvt = (struct e7xxx_pvt *) mci->pvt_info;
+	pvt = (struct e7xxx_pvt *)mci->pvt_info;
 	pvt->dev_info = &e7xxx_devs[dev_idx];
 	pvt->bridge_ck = pci_get_device(PCI_VENDOR_ID_INTEL,
-					pvt->dev_info->err_dev,
-					pvt->bridge_ck);
+					pvt->dev_info->err_dev, pvt->bridge_ck);
 
 	if (!pvt->bridge_ck) {
 		e7xxx_printk(KERN_ERR, "error reporting device not found:"
@@ -451,6 +455,7 @@
 
 	debugf3("%s(): more mci init\n", __func__);
 	mci->ctl_name = pvt->dev_info->ctl_name;
+	mci->dev_name = pci_name(pdev);
 	mci->edac_check = e7xxx_check;
 	mci->ctl_page_to_phys = ctl_page_to_phys;
 	e7xxx_init_csrows(mci, pdev, dev_idx, drc);
@@ -473,11 +478,22 @@
 	/* Here we assume that we will never see multiple instances of this
 	 * type of memory controller.  The ID is therefore hardcoded to 0.
 	 */
-	if (edac_mc_add_mc(mci,0)) {
+	if (edac_mc_add_mc(mci)) {
 		debugf3("%s(): failed edac_mc_add_mc()\n", __func__);
 		goto fail1;
 	}
 
+	/* allocating generic PCI control info */
+	e7xxx_pci = edac_pci_create_generic_ctl(&pdev->dev, EDAC_MOD_STR);
+	if (!e7xxx_pci) {
+		printk(KERN_WARNING
+			"%s(): Unable to create PCI control\n",
+			__func__);
+		printk(KERN_WARNING
+			"%s(): PCI error report via EDAC not setup\n",
+			__func__);
+	}
+
 	/* get this far and it's successful */
 	debugf3("%s(): success\n", __func__);
 	return 0;
@@ -493,7 +509,7 @@
 
 /* returns count (>= 0), or negative on error */
 static int __devinit e7xxx_init_one(struct pci_dev *pdev,
-		const struct pci_device_id *ent)
+				const struct pci_device_id *ent)
 {
 	debugf0("%s()\n", __func__);
 
@@ -509,34 +525,33 @@
 
 	debugf0("%s()\n", __func__);
 
+	if (e7xxx_pci)
+		edac_pci_release_generic_ctl(e7xxx_pci);
+
 	if ((mci = edac_mc_del_mc(&pdev->dev)) == NULL)
 		return;
 
-	pvt = (struct e7xxx_pvt *) mci->pvt_info;
+	pvt = (struct e7xxx_pvt *)mci->pvt_info;
 	pci_dev_put(pvt->bridge_ck);
 	edac_mc_free(mci);
 }
 
 static const struct pci_device_id e7xxx_pci_tbl[] __devinitdata = {
 	{
-		PCI_VEND_DEV(INTEL, 7205_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
-		E7205
-	},
+	 PCI_VEND_DEV(INTEL, 7205_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+	 E7205},
 	{
-		PCI_VEND_DEV(INTEL, 7500_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
-		E7500
-	},
+	 PCI_VEND_DEV(INTEL, 7500_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+	 E7500},
 	{
-		PCI_VEND_DEV(INTEL, 7501_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
-		E7501
-	},
+	 PCI_VEND_DEV(INTEL, 7501_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+	 E7501},
 	{
-		PCI_VEND_DEV(INTEL, 7505_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
-		E7505
-	},
+	 PCI_VEND_DEV(INTEL, 7505_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+	 E7505},
 	{
-		0,
-	}	/* 0 terminated list. */
+	 0,
+	 }			/* 0 terminated list. */
 };
 
 MODULE_DEVICE_TABLE(pci, e7xxx_pci_tbl);
@@ -563,5 +578,7 @@
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Linux Networx (http://lnxi.com) Thayne Harbaugh et al\n"
-	"Based on.work by Dan Hollis et al");
+		"Based on.work by Dan Hollis et al");
 MODULE_DESCRIPTION("MC support for Intel e7xxx memory controllers");
+module_param(edac_op_state, int, 0444);
+MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
diff --git a/drivers/edac/edac_core.h b/drivers/edac/edac_core.h
new file mode 100644
index 0000000..4e6bad1
--- /dev/null
+++ b/drivers/edac/edac_core.h
@@ -0,0 +1,851 @@
+/*
+ * Defines, structures, APIs for edac_core module
+ *
+ * (C) 2007 Linux Networx (http://lnxi.com)
+ * This file may be distributed under the terms of the
+ * GNU General Public License.
+ *
+ * Written by Thayne Harbaugh
+ * Based on work by Dan Hollis <goemon at anime dot net> and others.
+ *	http://www.anime.net/~goemon/linux-ecc/
+ *
+ * NMI handling support added by
+ *     Dave Peterson <dsp@llnl.gov> <dave_peterson@pobox.com>
+ *
+ * Refactored for multi-source files:
+ *	Doug Thompson <norsk5@xmission.com>
+ *
+ */
+
+#ifndef _EDAC_CORE_H_
+#define _EDAC_CORE_H_
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <linux/smp.h>
+#include <linux/pci.h>
+#include <linux/time.h>
+#include <linux/nmi.h>
+#include <linux/rcupdate.h>
+#include <linux/completion.h>
+#include <linux/kobject.h>
+#include <linux/platform_device.h>
+#include <linux/sysdev.h>
+#include <linux/workqueue.h>
+#include <linux/version.h>
+
+#define EDAC_MC_LABEL_LEN	31
+#define EDAC_DEVICE_NAME_LEN	31
+#define EDAC_ATTRIB_VALUE_LEN	15
+#define MC_PROC_NAME_MAX_LEN	7
+
+#if PAGE_SHIFT < 20
+#define PAGES_TO_MiB( pages )	( ( pages ) >> ( 20 - PAGE_SHIFT ) )
+#else				/* PAGE_SHIFT > 20 */
+#define PAGES_TO_MiB( pages )	( ( pages ) << ( PAGE_SHIFT - 20 ) )
+#endif
+
+#define edac_printk(level, prefix, fmt, arg...) \
+	printk(level "EDAC " prefix ": " fmt, ##arg)
+
+#define edac_mc_printk(mci, level, fmt, arg...) \
+	printk(level "EDAC MC%d: " fmt, mci->mc_idx, ##arg)
+
+#define edac_mc_chipset_printk(mci, level, prefix, fmt, arg...) \
+	printk(level "EDAC " prefix " MC%d: " fmt, mci->mc_idx, ##arg)
+
+/* edac_device printk */
+#define edac_device_printk(ctl, level, fmt, arg...) \
+	printk(level "EDAC DEVICE%d: " fmt, ctl->dev_idx, ##arg)
+
+/* edac_pci printk */
+#define edac_pci_printk(ctl, level, fmt, arg...) \
+	printk(level "EDAC PCI%d: " fmt, ctl->pci_idx, ##arg)
+
+/* prefixes for edac_printk() and edac_mc_printk() */
+#define EDAC_MC "MC"
+#define EDAC_PCI "PCI"
+#define EDAC_DEBUG "DEBUG"
+
+#ifdef CONFIG_EDAC_DEBUG
+extern int edac_debug_level;
+
+#define edac_debug_printk(level, fmt, arg...)                            \
+	do {                                                             \
+		if (level <= edac_debug_level)                           \
+			edac_printk(KERN_EMERG, EDAC_DEBUG, fmt, ##arg); \
+	} while(0)
+
+#define debugf0( ... ) edac_debug_printk(0, __VA_ARGS__ )
+#define debugf1( ... ) edac_debug_printk(1, __VA_ARGS__ )
+#define debugf2( ... ) edac_debug_printk(2, __VA_ARGS__ )
+#define debugf3( ... ) edac_debug_printk(3, __VA_ARGS__ )
+#define debugf4( ... ) edac_debug_printk(4, __VA_ARGS__ )
+
+#else				/* !CONFIG_EDAC_DEBUG */
+
+#define debugf0( ... )
+#define debugf1( ... )
+#define debugf2( ... )
+#define debugf3( ... )
+#define debugf4( ... )
+
+#endif				/* !CONFIG_EDAC_DEBUG */
+
+#define BIT(x) (1 << (x))
+
+#define PCI_VEND_DEV(vend, dev) PCI_VENDOR_ID_ ## vend, \
+	PCI_DEVICE_ID_ ## vend ## _ ## dev
+
+#define dev_name(dev) (dev)->dev_name
+
+/* memory devices */
+enum dev_type {
+	DEV_UNKNOWN = 0,
+	DEV_X1,
+	DEV_X2,
+	DEV_X4,
+	DEV_X8,
+	DEV_X16,
+	DEV_X32,		/* Do these parts exist? */
+	DEV_X64			/* Do these parts exist? */
+};
+
+#define DEV_FLAG_UNKNOWN	BIT(DEV_UNKNOWN)
+#define DEV_FLAG_X1		BIT(DEV_X1)
+#define DEV_FLAG_X2		BIT(DEV_X2)
+#define DEV_FLAG_X4		BIT(DEV_X4)
+#define DEV_FLAG_X8		BIT(DEV_X8)
+#define DEV_FLAG_X16		BIT(DEV_X16)
+#define DEV_FLAG_X32		BIT(DEV_X32)
+#define DEV_FLAG_X64		BIT(DEV_X64)
+
+/* memory types */
+enum mem_type {
+	MEM_EMPTY = 0,		/* Empty csrow */
+	MEM_RESERVED,		/* Reserved csrow type */
+	MEM_UNKNOWN,		/* Unknown csrow type */
+	MEM_FPM,		/* Fast page mode */
+	MEM_EDO,		/* Extended data out */
+	MEM_BEDO,		/* Burst Extended data out */
+	MEM_SDR,		/* Single data rate SDRAM */
+	MEM_RDR,		/* Registered single data rate SDRAM */
+	MEM_DDR,		/* Double data rate SDRAM */
+	MEM_RDDR,		/* Registered Double data rate SDRAM */
+	MEM_RMBS,		/* Rambus DRAM */
+	MEM_DDR2,		/* DDR2 RAM */
+	MEM_FB_DDR2,		/* fully buffered DDR2 */
+	MEM_RDDR2,		/* Registered DDR2 RAM */
+};
+
+#define MEM_FLAG_EMPTY		BIT(MEM_EMPTY)
+#define MEM_FLAG_RESERVED	BIT(MEM_RESERVED)
+#define MEM_FLAG_UNKNOWN	BIT(MEM_UNKNOWN)
+#define MEM_FLAG_FPM		BIT(MEM_FPM)
+#define MEM_FLAG_EDO		BIT(MEM_EDO)
+#define MEM_FLAG_BEDO		BIT(MEM_BEDO)
+#define MEM_FLAG_SDR		BIT(MEM_SDR)
+#define MEM_FLAG_RDR		BIT(MEM_RDR)
+#define MEM_FLAG_DDR		BIT(MEM_DDR)
+#define MEM_FLAG_RDDR		BIT(MEM_RDDR)
+#define MEM_FLAG_RMBS		BIT(MEM_RMBS)
+#define MEM_FLAG_DDR2           BIT(MEM_DDR2)
+#define MEM_FLAG_FB_DDR2        BIT(MEM_FB_DDR2)
+#define MEM_FLAG_RDDR2          BIT(MEM_RDDR2)
+
+/* chipset Error Detection and Correction capabilities and mode */
+enum edac_type {
+	EDAC_UNKNOWN = 0,	/* Unknown if ECC is available */
+	EDAC_NONE,		/* Doesnt support ECC */
+	EDAC_RESERVED,		/* Reserved ECC type */
+	EDAC_PARITY,		/* Detects parity errors */
+	EDAC_EC,		/* Error Checking - no correction */
+	EDAC_SECDED,		/* Single bit error correction, Double detection */
+	EDAC_S2ECD2ED,		/* Chipkill x2 devices - do these exist? */
+	EDAC_S4ECD4ED,		/* Chipkill x4 devices */
+	EDAC_S8ECD8ED,		/* Chipkill x8 devices */
+	EDAC_S16ECD16ED,	/* Chipkill x16 devices */
+};
+
+#define EDAC_FLAG_UNKNOWN	BIT(EDAC_UNKNOWN)
+#define EDAC_FLAG_NONE		BIT(EDAC_NONE)
+#define EDAC_FLAG_PARITY	BIT(EDAC_PARITY)
+#define EDAC_FLAG_EC		BIT(EDAC_EC)
+#define EDAC_FLAG_SECDED	BIT(EDAC_SECDED)
+#define EDAC_FLAG_S2ECD2ED	BIT(EDAC_S2ECD2ED)
+#define EDAC_FLAG_S4ECD4ED	BIT(EDAC_S4ECD4ED)
+#define EDAC_FLAG_S8ECD8ED	BIT(EDAC_S8ECD8ED)
+#define EDAC_FLAG_S16ECD16ED	BIT(EDAC_S16ECD16ED)
+
+/* scrubbing capabilities */
+enum scrub_type {
+	SCRUB_UNKNOWN = 0,	/* Unknown if scrubber is available */
+	SCRUB_NONE,		/* No scrubber */
+	SCRUB_SW_PROG,		/* SW progressive (sequential) scrubbing */
+	SCRUB_SW_SRC,		/* Software scrub only errors */
+	SCRUB_SW_PROG_SRC,	/* Progressive software scrub from an error */
+	SCRUB_SW_TUNABLE,	/* Software scrub frequency is tunable */
+	SCRUB_HW_PROG,		/* HW progressive (sequential) scrubbing */
+	SCRUB_HW_SRC,		/* Hardware scrub only errors */
+	SCRUB_HW_PROG_SRC,	/* Progressive hardware scrub from an error */
+	SCRUB_HW_TUNABLE	/* Hardware scrub frequency is tunable */
+};
+
+#define SCRUB_FLAG_SW_PROG	BIT(SCRUB_SW_PROG)
+#define SCRUB_FLAG_SW_SRC	BIT(SCRUB_SW_SRC)
+#define SCRUB_FLAG_SW_PROG_SRC	BIT(SCRUB_SW_PROG_SRC)
+#define SCRUB_FLAG_SW_TUN	BIT(SCRUB_SW_SCRUB_TUNABLE)
+#define SCRUB_FLAG_HW_PROG	BIT(SCRUB_HW_PROG)
+#define SCRUB_FLAG_HW_SRC	BIT(SCRUB_HW_SRC)
+#define SCRUB_FLAG_HW_PROG_SRC	BIT(SCRUB_HW_PROG_SRC)
+#define SCRUB_FLAG_HW_TUN	BIT(SCRUB_HW_TUNABLE)
+
+/* FIXME - should have notify capabilities: NMI, LOG, PROC, etc */
+
+/* EDAC internal operation states */
+#define	OP_ALLOC		0x100
+#define OP_RUNNING_POLL		0x201
+#define OP_RUNNING_INTERRUPT	0x202
+#define OP_RUNNING_POLL_INTR	0x203
+#define OP_OFFLINE		0x300
+
+/*
+ * There are several things to be aware of that aren't at all obvious:
+ *
+ *
+ * SOCKETS, SOCKET SETS, BANKS, ROWS, CHIP-SELECT ROWS, CHANNELS, etc..
+ *
+ * These are some of the many terms that are thrown about that don't always
+ * mean what people think they mean (Inconceivable!).  In the interest of
+ * creating a common ground for discussion, terms and their definitions
+ * will be established.
+ *
+ * Memory devices:	The individual chip on a memory stick.  These devices
+ *			commonly output 4 and 8 bits each.  Grouping several
+ *			of these in parallel provides 64 bits which is common
+ *			for a memory stick.
+ *
+ * Memory Stick:	A printed circuit board that agregates multiple
+ *			memory devices in parallel.  This is the atomic
+ *			memory component that is purchaseable by Joe consumer
+ *			and loaded into a memory socket.
+ *
+ * Socket:		A physical connector on the motherboard that accepts
+ *			a single memory stick.
+ *
+ * Channel:		Set of memory devices on a memory stick that must be
+ *			grouped in parallel with one or more additional
+ *			channels from other memory sticks.  This parallel
+ *			grouping of the output from multiple channels are
+ *			necessary for the smallest granularity of memory access.
+ *			Some memory controllers are capable of single channel -
+ *			which means that memory sticks can be loaded
+ *			individually.  Other memory controllers are only
+ *			capable of dual channel - which means that memory
+ *			sticks must be loaded as pairs (see "socket set").
+ *
+ * Chip-select row:	All of the memory devices that are selected together.
+ *			for a single, minimum grain of memory access.
+ *			This selects all of the parallel memory devices across
+ *			all of the parallel channels.  Common chip-select rows
+ *			for single channel are 64 bits, for dual channel 128
+ *			bits.
+ *
+ * Single-Ranked stick:	A Single-ranked stick has 1 chip-select row of memmory.
+ *			Motherboards commonly drive two chip-select pins to
+ *			a memory stick. A single-ranked stick, will occupy
+ *			only one of those rows. The other will be unused.
+ *
+ * Double-Ranked stick:	A double-ranked stick has two chip-select rows which
+ *			access different sets of memory devices.  The two
+ *			rows cannot be accessed concurrently.
+ *
+ * Double-sided stick:	DEPRECATED TERM, see Double-Ranked stick.
+ *			A double-sided stick has two chip-select rows which
+ *			access different sets of memory devices.  The two
+ *			rows cannot be accessed concurrently.  "Double-sided"
+ *			is irrespective of the memory devices being mounted
+ *			on both sides of the memory stick.
+ *
+ * Socket set:		All of the memory sticks that are required for for
+ *			a single memory access or all of the memory sticks
+ *			spanned by a chip-select row.  A single socket set
+ *			has two chip-select rows and if double-sided sticks
+ *			are used these will occupy those chip-select rows.
+ *
+ * Bank:		This term is avoided because it is unclear when
+ *			needing to distinguish between chip-select rows and
+ *			socket sets.
+ *
+ * Controller pages:
+ *
+ * Physical pages:
+ *
+ * Virtual pages:
+ *
+ *
+ * STRUCTURE ORGANIZATION AND CHOICES
+ *
+ *
+ *
+ * PS - I enjoyed writing all that about as much as you enjoyed reading it.
+ */
+
+struct channel_info {
+	int chan_idx;		/* channel index */
+	u32 ce_count;		/* Correctable Errors for this CHANNEL */
+	char label[EDAC_MC_LABEL_LEN + 1];	/* DIMM label on motherboard */
+	struct csrow_info *csrow;	/* the parent */
+};
+
+struct csrow_info {
+	unsigned long first_page;	/* first page number in dimm */
+	unsigned long last_page;	/* last page number in dimm */
+	unsigned long page_mask;	/* used for interleaving -
+					 * 0UL for non intlv
+					 */
+	u32 nr_pages;		/* number of pages in csrow */
+	u32 grain;		/* granularity of reported error in bytes */
+	int csrow_idx;		/* the chip-select row */
+	enum dev_type dtype;	/* memory device type */
+	u32 ue_count;		/* Uncorrectable Errors for this csrow */
+	u32 ce_count;		/* Correctable Errors for this csrow */
+	enum mem_type mtype;	/* memory csrow type */
+	enum edac_type edac_mode;	/* EDAC mode for this csrow */
+	struct mem_ctl_info *mci;	/* the parent */
+
+	struct kobject kobj;	/* sysfs kobject for this csrow */
+
+	/* channel information for this csrow */
+	u32 nr_channels;
+	struct channel_info *channels;
+};
+
+/* mcidev_sysfs_attribute structure
+ *	used for driver sysfs attributes and in mem_ctl_info
+ * 	sysfs top level entries
+ */
+struct mcidev_sysfs_attribute {
+        struct attribute attr;
+        ssize_t (*show)(struct mem_ctl_info *,char *);
+        ssize_t (*store)(struct mem_ctl_info *, const char *,size_t);
+};
+
+/* MEMORY controller information structure
+ */
+struct mem_ctl_info {
+	struct list_head link;	/* for global list of mem_ctl_info structs */
+
+	struct module *owner;	/* Module owner of this control struct */
+
+	unsigned long mtype_cap;	/* memory types supported by mc */
+	unsigned long edac_ctl_cap;	/* Mem controller EDAC capabilities */
+	unsigned long edac_cap;	/* configuration capabilities - this is
+				 * closely related to edac_ctl_cap.  The
+				 * difference is that the controller may be
+				 * capable of s4ecd4ed which would be listed
+				 * in edac_ctl_cap, but if channels aren't
+				 * capable of s4ecd4ed then the edac_cap would
+				 * not have that capability.
+				 */
+	unsigned long scrub_cap;	/* chipset scrub capabilities */
+	enum scrub_type scrub_mode;	/* current scrub mode */
+
+	/* Translates sdram memory scrub rate given in bytes/sec to the
+	   internal representation and configures whatever else needs
+	   to be configured.
+	 */
+	int (*set_sdram_scrub_rate) (struct mem_ctl_info * mci, u32 * bw);
+
+	/* Get the current sdram memory scrub rate from the internal
+	   representation and converts it to the closest matching
+	   bandwith in bytes/sec.
+	 */
+	int (*get_sdram_scrub_rate) (struct mem_ctl_info * mci, u32 * bw);
+
+
+	/* pointer to edac checking routine */
+	void (*edac_check) (struct mem_ctl_info * mci);
+
+	/*
+	 * Remaps memory pages: controller pages to physical pages.
+	 * For most MC's, this will be NULL.
+	 */
+	/* FIXME - why not send the phys page to begin with? */
+	unsigned long (*ctl_page_to_phys) (struct mem_ctl_info * mci,
+					   unsigned long page);
+	int mc_idx;
+	int nr_csrows;
+	struct csrow_info *csrows;
+	/*
+	 * FIXME - what about controllers on other busses? - IDs must be
+	 * unique.  dev pointer should be sufficiently unique, but
+	 * BUS:SLOT.FUNC numbers may not be unique.
+	 */
+	struct device *dev;
+	const char *mod_name;
+	const char *mod_ver;
+	const char *ctl_name;
+	const char *dev_name;
+	char proc_name[MC_PROC_NAME_MAX_LEN + 1];
+	void *pvt_info;
+	u32 ue_noinfo_count;	/* Uncorrectable Errors w/o info */
+	u32 ce_noinfo_count;	/* Correctable Errors w/o info */
+	u32 ue_count;		/* Total Uncorrectable Errors for this MC */
+	u32 ce_count;		/* Total Correctable Errors for this MC */
+	unsigned long start_time;	/* mci load start time (in jiffies) */
+
+	/* this stuff is for safe removal of mc devices from global list while
+	 * NMI handlers may be traversing list
+	 */
+	struct rcu_head rcu;
+	struct completion complete;
+
+	/* edac sysfs device control */
+	struct kobject edac_mci_kobj;
+
+	/* Additional top controller level attributes, but specified
+	 * by the low level driver.
+	 *
+	 * Set by the low level driver to provide attributes at the
+	 * controller level, same level as 'ue_count' and 'ce_count' above.
+	 * An array of structures, NULL terminated
+	 *
+	 * If attributes are desired, then set to array of attributes
+	 * If no attributes are desired, leave NULL
+	 */
+	struct mcidev_sysfs_attribute *mc_driver_sysfs_attributes;
+
+	/* work struct for this MC */
+	struct delayed_work work;
+
+	/* the internal state of this controller instance */
+	int op_state;
+};
+
+/*
+ * The following are the structures to provide for a generic
+ * or abstract 'edac_device'. This set of structures and the
+ * code that implements the APIs for the same, provide for
+ * registering EDAC type devices which are NOT standard memory.
+ *
+ * CPU caches (L1 and L2)
+ * DMA engines
+ * Core CPU swithces
+ * Fabric switch units
+ * PCIe interface controllers
+ * other EDAC/ECC type devices that can be monitored for
+ * errors, etc.
+ *
+ * It allows for a 2 level set of hiearchry. For example:
+ *
+ * cache could be composed of L1, L2 and L3 levels of cache.
+ * Each CPU core would have its own L1 cache, while sharing
+ * L2 and maybe L3 caches.
+ *
+ * View them arranged, via the sysfs presentation:
+ * /sys/devices/system/edac/..
+ *
+ *	mc/		<existing memory device directory>
+ *	cpu/cpu0/..	<L1 and L2 block directory>
+ *		/L1-cache/ce_count
+ *			 /ue_count
+ *		/L2-cache/ce_count
+ *			 /ue_count
+ *	cpu/cpu1/..	<L1 and L2 block directory>
+ *		/L1-cache/ce_count
+ *			 /ue_count
+ *		/L2-cache/ce_count
+ *			 /ue_count
+ *	...
+ *
+ *	the L1 and L2 directories would be "edac_device_block's"
+ */
+
+struct edac_device_counter {
+	u32 ue_count;
+	u32 ce_count;
+};
+
+/* forward reference */
+struct edac_device_ctl_info;
+struct edac_device_block;
+
+/* edac_dev_sysfs_attribute structure
+ *	used for driver sysfs attributes in mem_ctl_info
+ *	for extra controls and attributes:
+ *		like high level error Injection controls
+ */
+struct edac_dev_sysfs_attribute {
+	struct attribute attr;
+	ssize_t (*show)(struct edac_device_ctl_info *, char *);
+	ssize_t (*store)(struct edac_device_ctl_info *, const char *, size_t);
+};
+
+/* edac_dev_sysfs_block_attribute structure
+ *
+ *	used in leaf 'block' nodes for adding controls/attributes
+ *
+ *	each block in each instance of the containing control structure
+ *	can have an array of the following. The show and store functions
+ *	will be filled in with the show/store function in the
+ *	low level driver.
+ *
+ *	The 'value' field will be the actual value field used for
+ *	counting
+ */
+struct edac_dev_sysfs_block_attribute {
+	struct attribute attr;
+	ssize_t (*show)(struct kobject *, struct attribute *, char *);
+	ssize_t (*store)(struct kobject *, struct attribute *,
+			const char *, size_t);
+	struct edac_device_block *block;
+
+	unsigned int value;
+};
+
+/* device block control structure */
+struct edac_device_block {
+	struct edac_device_instance *instance;	/* Up Pointer */
+	char name[EDAC_DEVICE_NAME_LEN + 1];
+
+	struct edac_device_counter counters;	/* basic UE and CE counters */
+
+	int nr_attribs;		/* how many attributes */
+
+	/* this block's attributes, could be NULL */
+	struct edac_dev_sysfs_block_attribute *block_attributes;
+
+	/* edac sysfs device control */
+	struct kobject kobj;
+};
+
+/* device instance control structure */
+struct edac_device_instance {
+	struct edac_device_ctl_info *ctl;	/* Up pointer */
+	char name[EDAC_DEVICE_NAME_LEN + 4];
+
+	struct edac_device_counter counters;	/* instance counters */
+
+	u32 nr_blocks;		/* how many blocks */
+	struct edac_device_block *blocks;	/* block array */
+
+	/* edac sysfs device control */
+	struct kobject kobj;
+};
+
+
+/*
+ * Abstract edac_device control info structure
+ *
+ */
+struct edac_device_ctl_info {
+	/* for global list of edac_device_ctl_info structs */
+	struct list_head link;
+
+	struct module *owner;	/* Module owner of this control struct */
+
+	int dev_idx;
+
+	/* Per instance controls for this edac_device */
+	int log_ue;		/* boolean for logging UEs */
+	int log_ce;		/* boolean for logging CEs */
+	int panic_on_ue;	/* boolean for panic'ing on an UE */
+	unsigned poll_msec;	/* number of milliseconds to poll interval */
+	unsigned long delay;	/* number of jiffies for poll_msec */
+
+	/* Additional top controller level attributes, but specified
+	 * by the low level driver.
+	 *
+	 * Set by the low level driver to provide attributes at the
+	 * controller level, same level as 'ue_count' and 'ce_count' above.
+	 * An array of structures, NULL terminated
+	 *
+	 * If attributes are desired, then set to array of attributes
+	 * If no attributes are desired, leave NULL
+	 */
+	struct edac_dev_sysfs_attribute *sysfs_attributes;
+
+	/* pointer to main 'edac' class in sysfs */
+	struct sysdev_class *edac_class;
+
+	/* the internal state of this controller instance */
+	int op_state;
+	/* work struct for this instance */
+	struct delayed_work work;
+
+	/* pointer to edac polling checking routine:
+	 *      If NOT NULL: points to polling check routine
+	 *      If NULL: Then assumes INTERRUPT operation, where
+	 *              MC driver will receive events
+	 */
+	void (*edac_check) (struct edac_device_ctl_info * edac_dev);
+
+	struct device *dev;	/* pointer to device structure */
+
+	const char *mod_name;	/* module name */
+	const char *ctl_name;	/* edac controller  name */
+	const char *dev_name;	/* pci/platform/etc... name */
+
+	void *pvt_info;		/* pointer to 'private driver' info */
+
+	unsigned long start_time;	/* edac_device load start time (jiffies) */
+
+	/* these are for safe removal of mc devices from global list while
+	 * NMI handlers may be traversing list
+	 */
+	struct rcu_head rcu;
+	struct completion removal_complete;
+
+	/* sysfs top name under 'edac' directory
+	 * and instance name:
+	 *      cpu/cpu0/...
+	 *      cpu/cpu1/...
+	 *      cpu/cpu2/...
+	 *      ...
+	 */
+	char name[EDAC_DEVICE_NAME_LEN + 1];
+
+	/* Number of instances supported on this control structure
+	 * and the array of those instances
+	 */
+	u32 nr_instances;
+	struct edac_device_instance *instances;
+
+	/* Event counters for the this whole EDAC Device */
+	struct edac_device_counter counters;
+
+	/* edac sysfs device control for the 'name'
+	 * device this structure controls
+	 */
+	struct kobject kobj;
+};
+
+/* To get from the instance's wq to the beginning of the ctl structure */
+#define to_edac_mem_ctl_work(w) \
+		container_of(w, struct mem_ctl_info, work)
+
+#define to_edac_device_ctl_work(w) \
+		container_of(w,struct edac_device_ctl_info,work)
+
+/*
+ * The alloc() and free() functions for the 'edac_device' control info
+ * structure. A MC driver will allocate one of these for each edac_device
+ * it is going to control/register with the EDAC CORE.
+ */
+extern struct edac_device_ctl_info *edac_device_alloc_ctl_info(
+		unsigned sizeof_private,
+		char *edac_device_name, unsigned nr_instances,
+		char *edac_block_name, unsigned nr_blocks,
+		unsigned offset_value,
+		struct edac_dev_sysfs_block_attribute *block_attributes,
+		unsigned nr_attribs,
+		int device_index);
+
+/* The offset value can be:
+ *	-1 indicating no offset value
+ *	0 for zero-based block numbers
+ *	1 for 1-based block number
+ *	other for other-based block number
+ */
+#define	BLOCK_OFFSET_VALUE_OFF	((unsigned) -1)
+
+extern void edac_device_free_ctl_info(struct edac_device_ctl_info *ctl_info);
+
+#ifdef CONFIG_PCI
+
+struct edac_pci_counter {
+	atomic_t pe_count;
+	atomic_t npe_count;
+};
+
+/*
+ * Abstract edac_pci control info structure
+ *
+ */
+struct edac_pci_ctl_info {
+	/* for global list of edac_pci_ctl_info structs */
+	struct list_head link;
+
+	int pci_idx;
+
+	struct sysdev_class *edac_class;	/* pointer to class */
+
+	/* the internal state of this controller instance */
+	int op_state;
+	/* work struct for this instance */
+	struct delayed_work work;
+
+	/* pointer to edac polling checking routine:
+	 *      If NOT NULL: points to polling check routine
+	 *      If NULL: Then assumes INTERRUPT operation, where
+	 *              MC driver will receive events
+	 */
+	void (*edac_check) (struct edac_pci_ctl_info * edac_dev);
+
+	struct device *dev;	/* pointer to device structure */
+
+	const char *mod_name;	/* module name */
+	const char *ctl_name;	/* edac controller  name */
+	const char *dev_name;	/* pci/platform/etc... name */
+
+	void *pvt_info;		/* pointer to 'private driver' info */
+
+	unsigned long start_time;	/* edac_pci load start time (jiffies) */
+
+	/* these are for safe removal of devices from global list while
+	 * NMI handlers may be traversing list
+	 */
+	struct rcu_head rcu;
+	struct completion complete;
+
+	/* sysfs top name under 'edac' directory
+	 * and instance name:
+	 *      cpu/cpu0/...
+	 *      cpu/cpu1/...
+	 *      cpu/cpu2/...
+	 *      ...
+	 */
+	char name[EDAC_DEVICE_NAME_LEN + 1];
+
+	/* Event counters for the this whole EDAC Device */
+	struct edac_pci_counter counters;
+
+	/* edac sysfs device control for the 'name'
+	 * device this structure controls
+	 */
+	struct kobject kobj;
+	struct completion kobj_complete;
+};
+
+#define to_edac_pci_ctl_work(w) \
+		container_of(w, struct edac_pci_ctl_info,work)
+
+/* write all or some bits in a byte-register*/
+static inline void pci_write_bits8(struct pci_dev *pdev, int offset, u8 value,
+				   u8 mask)
+{
+	if (mask != 0xff) {
+		u8 buf;
+
+		pci_read_config_byte(pdev, offset, &buf);
+		value &= mask;
+		buf &= ~mask;
+		value |= buf;
+	}
+
+	pci_write_config_byte(pdev, offset, value);
+}
+
+/* write all or some bits in a word-register*/
+static inline void pci_write_bits16(struct pci_dev *pdev, int offset,
+				    u16 value, u16 mask)
+{
+	if (mask != 0xffff) {
+		u16 buf;
+
+		pci_read_config_word(pdev, offset, &buf);
+		value &= mask;
+		buf &= ~mask;
+		value |= buf;
+	}
+
+	pci_write_config_word(pdev, offset, value);
+}
+
+/* write all or some bits in a dword-register*/
+static inline void pci_write_bits32(struct pci_dev *pdev, int offset,
+				    u32 value, u32 mask)
+{
+	if (mask != 0xffff) {
+		u32 buf;
+
+		pci_read_config_dword(pdev, offset, &buf);
+		value &= mask;
+		buf &= ~mask;
+		value |= buf;
+	}
+
+	pci_write_config_dword(pdev, offset, value);
+}
+
+#endif				/* CONFIG_PCI */
+
+extern struct mem_ctl_info *edac_mc_alloc(unsigned sz_pvt, unsigned nr_csrows,
+					  unsigned nr_chans, int edac_index);
+extern int edac_mc_add_mc(struct mem_ctl_info *mci);
+extern void edac_mc_free(struct mem_ctl_info *mci);
+extern struct mem_ctl_info *edac_mc_find(int idx);
+extern struct mem_ctl_info *edac_mc_del_mc(struct device *dev);
+extern int edac_mc_find_csrow_by_page(struct mem_ctl_info *mci,
+				      unsigned long page);
+
+/*
+ * The no info errors are used when error overflows are reported.
+ * There are a limited number of error logging registers that can
+ * be exausted.  When all registers are exhausted and an additional
+ * error occurs then an error overflow register records that an
+ * error occured and the type of error, but doesn't have any
+ * further information.  The ce/ue versions make for cleaner
+ * reporting logic and function interface - reduces conditional
+ * statement clutter and extra function arguments.
+ */
+extern void edac_mc_handle_ce(struct mem_ctl_info *mci,
+			      unsigned long page_frame_number,
+			      unsigned long offset_in_page,
+			      unsigned long syndrome, int row, int channel,
+			      const char *msg);
+extern void edac_mc_handle_ce_no_info(struct mem_ctl_info *mci,
+				      const char *msg);
+extern void edac_mc_handle_ue(struct mem_ctl_info *mci,
+			      unsigned long page_frame_number,
+			      unsigned long offset_in_page, int row,
+			      const char *msg);
+extern void edac_mc_handle_ue_no_info(struct mem_ctl_info *mci,
+				      const char *msg);
+extern void edac_mc_handle_fbd_ue(struct mem_ctl_info *mci, unsigned int csrow,
+				  unsigned int channel0, unsigned int channel1,
+				  char *msg);
+extern void edac_mc_handle_fbd_ce(struct mem_ctl_info *mci, unsigned int csrow,
+				  unsigned int channel, char *msg);
+
+/*
+ * edac_device APIs
+ */
+extern int edac_device_add_device(struct edac_device_ctl_info *edac_dev);
+extern struct edac_device_ctl_info *edac_device_del_device(struct device *dev);
+extern void edac_device_handle_ue(struct edac_device_ctl_info *edac_dev,
+				int inst_nr, int block_nr, const char *msg);
+extern void edac_device_handle_ce(struct edac_device_ctl_info *edac_dev,
+				int inst_nr, int block_nr, const char *msg);
+
+/*
+ * edac_pci APIs
+ */
+extern struct edac_pci_ctl_info *edac_pci_alloc_ctl_info(unsigned int sz_pvt,
+				const char *edac_pci_name);
+
+extern void edac_pci_free_ctl_info(struct edac_pci_ctl_info *pci);
+
+extern void edac_pci_reset_delay_period(struct edac_pci_ctl_info *pci,
+				unsigned long value);
+
+extern int edac_pci_add_device(struct edac_pci_ctl_info *pci, int edac_idx);
+extern struct edac_pci_ctl_info *edac_pci_del_device(struct device *dev);
+
+extern struct edac_pci_ctl_info *edac_pci_create_generic_ctl(
+				struct device *dev,
+				const char *mod_name);
+
+extern void edac_pci_release_generic_ctl(struct edac_pci_ctl_info *pci);
+extern int edac_pci_create_sysfs(struct edac_pci_ctl_info *pci);
+extern void edac_pci_remove_sysfs(struct edac_pci_ctl_info *pci);
+
+/*
+ * edac misc APIs
+ */
+extern char *edac_op_state_to_string(int op_state);
+
+#endif				/* _EDAC_CORE_H_ */
diff --git a/drivers/edac/edac_device.c b/drivers/edac/edac_device.c
new file mode 100644
index 0000000..f3690a6
--- /dev/null
+++ b/drivers/edac/edac_device.c
@@ -0,0 +1,746 @@
+
+/*
+ * edac_device.c
+ * (C) 2007 www.douglaskthompson.com
+ *
+ * This file may be distributed under the terms of the
+ * GNU General Public License.
+ *
+ * Written by Doug Thompson <norsk5@xmission.com>
+ *
+ * edac_device API implementation
+ * 19 Jan 2007
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/smp.h>
+#include <linux/init.h>
+#include <linux/sysctl.h>
+#include <linux/highmem.h>
+#include <linux/timer.h>
+#include <linux/slab.h>
+#include <linux/jiffies.h>
+#include <linux/spinlock.h>
+#include <linux/list.h>
+#include <linux/sysdev.h>
+#include <linux/ctype.h>
+#include <linux/workqueue.h>
+#include <asm/uaccess.h>
+#include <asm/page.h>
+
+#include "edac_core.h"
+#include "edac_module.h"
+
+/* lock for the list: 'edac_device_list', manipulation of this list
+ * is protected by the 'device_ctls_mutex' lock
+ */
+static DEFINE_MUTEX(device_ctls_mutex);
+static struct list_head edac_device_list = LIST_HEAD_INIT(edac_device_list);
+
+#ifdef CONFIG_EDAC_DEBUG
+static void edac_device_dump_device(struct edac_device_ctl_info *edac_dev)
+{
+	debugf3("\tedac_dev = %p dev_idx=%d \n", edac_dev, edac_dev->dev_idx);
+	debugf4("\tedac_dev->edac_check = %p\n", edac_dev->edac_check);
+	debugf3("\tdev = %p\n", edac_dev->dev);
+	debugf3("\tmod_name:ctl_name = %s:%s\n",
+		edac_dev->mod_name, edac_dev->ctl_name);
+	debugf3("\tpvt_info = %p\n\n", edac_dev->pvt_info);
+}
+#endif				/* CONFIG_EDAC_DEBUG */
+
+
+/*
+ * edac_device_alloc_ctl_info()
+ *	Allocate a new edac device control info structure
+ *
+ *	The control structure is allocated in complete chunk
+ *	from the OS. It is in turn sub allocated to the
+ *	various objects that compose the struture
+ *
+ *	The structure has a 'nr_instance' array within itself.
+ *	Each instance represents a major component
+ *		Example:  L1 cache and L2 cache are 2 instance components
+ *
+ *	Within each instance is an array of 'nr_blocks' blockoffsets
+ */
+struct edac_device_ctl_info *edac_device_alloc_ctl_info(
+	unsigned sz_private,
+	char *edac_device_name, unsigned nr_instances,
+	char *edac_block_name, unsigned nr_blocks,
+	unsigned offset_value,		/* zero, 1, or other based offset */
+	struct edac_dev_sysfs_block_attribute *attrib_spec, unsigned nr_attrib,
+	int device_index)
+{
+	struct edac_device_ctl_info *dev_ctl;
+	struct edac_device_instance *dev_inst, *inst;
+	struct edac_device_block *dev_blk, *blk_p, *blk;
+	struct edac_dev_sysfs_block_attribute *dev_attrib, *attrib_p, *attrib;
+	unsigned total_size;
+	unsigned count;
+	unsigned instance, block, attr;
+	void *pvt;
+	int err;
+
+	debugf4("%s() instances=%d blocks=%d\n",
+		__func__, nr_instances, nr_blocks);
+
+	/* Calculate the size of memory we need to allocate AND
+	 * determine the offsets of the various item arrays
+	 * (instance,block,attrib) from the start of an  allocated structure.
+	 * We want the alignment of each item  (instance,block,attrib)
+	 * to be at least as stringent as what the compiler would
+	 * provide if we could simply hardcode everything into a single struct.
+	 */
+	dev_ctl = (struct edac_device_ctl_info *)NULL;
+
+	/* Calc the 'end' offset past end of ONE ctl_info structure
+	 * which will become the start of the 'instance' array
+	 */
+	dev_inst = edac_align_ptr(&dev_ctl[1], sizeof(*dev_inst));
+
+	/* Calc the 'end' offset past the instance array within the ctl_info
+	 * which will become the start of the block array
+	 */
+	dev_blk = edac_align_ptr(&dev_inst[nr_instances], sizeof(*dev_blk));
+
+	/* Calc the 'end' offset past the dev_blk array
+	 * which will become the start of the attrib array, if any.
+	 */
+	count = nr_instances * nr_blocks;
+	dev_attrib = edac_align_ptr(&dev_blk[count], sizeof(*dev_attrib));
+
+	/* Check for case of when an attribute array is specified */
+	if (nr_attrib > 0) {
+		/* calc how many nr_attrib we need */
+		count *= nr_attrib;
+
+		/* Calc the 'end' offset past the attributes array */
+		pvt = edac_align_ptr(&dev_attrib[count], sz_private);
+	} else {
+		/* no attribute array specificed */
+		pvt = edac_align_ptr(dev_attrib, sz_private);
+	}
+
+	/* 'pvt' now points to where the private data area is.
+	 * At this point 'pvt' (like dev_inst,dev_blk and dev_attrib)
+	 * is baselined at ZERO
+	 */
+	total_size = ((unsigned long)pvt) + sz_private;
+
+	/* Allocate the amount of memory for the set of control structures */
+	dev_ctl = kzalloc(total_size, GFP_KERNEL);
+	if (dev_ctl == NULL)
+		return NULL;
+
+	/* Adjust pointers so they point within the actual memory we
+	 * just allocated rather than an imaginary chunk of memory
+	 * located at address 0.
+	 * 'dev_ctl' points to REAL memory, while the others are
+	 * ZERO based and thus need to be adjusted to point within
+	 * the allocated memory.
+	 */
+	dev_inst = (struct edac_device_instance *)
+		(((char *)dev_ctl) + ((unsigned long)dev_inst));
+	dev_blk = (struct edac_device_block *)
+		(((char *)dev_ctl) + ((unsigned long)dev_blk));
+	dev_attrib = (struct edac_dev_sysfs_block_attribute *)
+		(((char *)dev_ctl) + ((unsigned long)dev_attrib));
+	pvt = sz_private ? (((char *)dev_ctl) + ((unsigned long)pvt)) : NULL;
+
+	/* Begin storing the information into the control info structure */
+	dev_ctl->dev_idx = device_index;
+	dev_ctl->nr_instances = nr_instances;
+	dev_ctl->instances = dev_inst;
+	dev_ctl->pvt_info = pvt;
+
+	/* Name of this edac device */
+	snprintf(dev_ctl->name,sizeof(dev_ctl->name),"%s",edac_device_name);
+
+	debugf4("%s() edac_dev=%p next after end=%p\n",
+		__func__, dev_ctl, pvt + sz_private );
+
+	/* Initialize every Instance */
+	for (instance = 0; instance < nr_instances; instance++) {
+		inst = &dev_inst[instance];
+		inst->ctl = dev_ctl;
+		inst->nr_blocks = nr_blocks;
+		blk_p = &dev_blk[instance * nr_blocks];
+		inst->blocks = blk_p;
+
+		/* name of this instance */
+		snprintf(inst->name, sizeof(inst->name),
+			 "%s%u", edac_device_name, instance);
+
+		/* Initialize every block in each instance */
+		for (block = 0; block < nr_blocks; block++) {
+			blk = &blk_p[block];
+			blk->instance = inst;
+			snprintf(blk->name, sizeof(blk->name),
+				 "%s%d", edac_block_name, block+offset_value);
+
+			debugf4("%s() instance=%d inst_p=%p block=#%d "
+				"block_p=%p name='%s'\n",
+				__func__, instance, inst, block,
+				blk, blk->name);
+
+			/* if there are NO attributes OR no attribute pointer
+			 * then continue on to next block iteration
+			 */
+			if ((nr_attrib == 0) || (attrib_spec == NULL))
+				continue;
+
+			/* setup the attribute array for this block */
+			blk->nr_attribs = nr_attrib;
+			attrib_p = &dev_attrib[block*nr_instances*nr_attrib];
+			blk->block_attributes = attrib_p;
+
+			debugf4("%s() THIS BLOCK_ATTRIB=%p\n",
+				__func__, blk->block_attributes);
+
+			/* Initialize every user specified attribute in this
+			 * block with the data the caller passed in
+			 * Each block gets its own copy of pointers,
+			 * and its unique 'value'
+			 */
+			for (attr = 0; attr < nr_attrib; attr++) {
+				attrib = &attrib_p[attr];
+
+				/* populate the unique per attrib
+				 * with the code pointers and info
+				 */
+				attrib->attr = attrib_spec[attr].attr;
+				attrib->show = attrib_spec[attr].show;
+				attrib->store = attrib_spec[attr].store;
+
+				attrib->block = blk;	/* up link */
+
+				debugf4("%s() alloc-attrib=%p attrib_name='%s' "
+					"attrib-spec=%p spec-name=%s\n",
+					__func__, attrib, attrib->attr.name,
+					&attrib_spec[attr],
+					attrib_spec[attr].attr.name
+					);
+			}
+		}
+	}
+
+	/* Mark this instance as merely ALLOCATED */
+	dev_ctl->op_state = OP_ALLOC;
+
+	/*
+	 * Initialize the 'root' kobj for the edac_device controller
+	 */
+	err = edac_device_register_sysfs_main_kobj(dev_ctl);
+	if (err) {
+		kfree(dev_ctl);
+		return NULL;
+	}
+
+	/* at this point, the root kobj is valid, and in order to
+	 * 'free' the object, then the function:
+	 *	edac_device_unregister_sysfs_main_kobj() must be called
+	 * which will perform kobj unregistration and the actual free
+	 * will occur during the kobject callback operation
+	 */
+
+	return dev_ctl;
+}
+EXPORT_SYMBOL_GPL(edac_device_alloc_ctl_info);
+
+/*
+ * edac_device_free_ctl_info()
+ *	frees the memory allocated by the edac_device_alloc_ctl_info()
+ *	function
+ */
+void edac_device_free_ctl_info(struct edac_device_ctl_info *ctl_info)
+{
+	edac_device_unregister_sysfs_main_kobj(ctl_info);
+}
+EXPORT_SYMBOL_GPL(edac_device_free_ctl_info);
+
+/*
+ * find_edac_device_by_dev
+ *	scans the edac_device list for a specific 'struct device *'
+ *
+ *	lock to be held prior to call:	device_ctls_mutex
+ *
+ *	Return:
+ *		pointer to control structure managing 'dev'
+ *		NULL if not found on list
+ */
+static struct edac_device_ctl_info *find_edac_device_by_dev(struct device *dev)
+{
+	struct edac_device_ctl_info *edac_dev;
+	struct list_head *item;
+
+	debugf0("%s()\n", __func__);
+
+	list_for_each(item, &edac_device_list) {
+		edac_dev = list_entry(item, struct edac_device_ctl_info, link);
+
+		if (edac_dev->dev == dev)
+			return edac_dev;
+	}
+
+	return NULL;
+}
+
+/*
+ * add_edac_dev_to_global_list
+ *	Before calling this function, caller must
+ *	assign a unique value to edac_dev->dev_idx.
+ *
+ *	lock to be held prior to call:	device_ctls_mutex
+ *
+ *	Return:
+ *		0 on success
+ *		1 on failure.
+ */
+static int add_edac_dev_to_global_list(struct edac_device_ctl_info *edac_dev)
+{
+	struct list_head *item, *insert_before;
+	struct edac_device_ctl_info *rover;
+
+	insert_before = &edac_device_list;
+
+	/* Determine if already on the list */
+	rover = find_edac_device_by_dev(edac_dev->dev);
+	if (unlikely(rover != NULL))
+		goto fail0;
+
+	/* Insert in ascending order by 'dev_idx', so find position */
+	list_for_each(item, &edac_device_list) {
+		rover = list_entry(item, struct edac_device_ctl_info, link);
+
+		if (rover->dev_idx >= edac_dev->dev_idx) {
+			if (unlikely(rover->dev_idx == edac_dev->dev_idx))
+				goto fail1;
+
+			insert_before = item;
+			break;
+		}
+	}
+
+	list_add_tail_rcu(&edac_dev->link, insert_before);
+	return 0;
+
+fail0:
+	edac_printk(KERN_WARNING, EDAC_MC,
+			"%s (%s) %s %s already assigned %d\n",
+			rover->dev->bus_id, dev_name(rover),
+			rover->mod_name, rover->ctl_name, rover->dev_idx);
+	return 1;
+
+fail1:
+	edac_printk(KERN_WARNING, EDAC_MC,
+			"bug in low-level driver: attempt to assign\n"
+			"    duplicate dev_idx %d in %s()\n", rover->dev_idx,
+			__func__);
+	return 1;
+}
+
+/*
+ * complete_edac_device_list_del
+ *
+ *	callback function when reference count is zero
+ */
+static void complete_edac_device_list_del(struct rcu_head *head)
+{
+	struct edac_device_ctl_info *edac_dev;
+
+	edac_dev = container_of(head, struct edac_device_ctl_info, rcu);
+	INIT_LIST_HEAD(&edac_dev->link);
+	complete(&edac_dev->removal_complete);
+}
+
+/*
+ * del_edac_device_from_global_list
+ *
+ *	remove the RCU, setup for a callback call,
+ *	then wait for the callback to occur
+ */
+static void del_edac_device_from_global_list(struct edac_device_ctl_info
+						*edac_device)
+{
+	list_del_rcu(&edac_device->link);
+
+	init_completion(&edac_device->removal_complete);
+	call_rcu(&edac_device->rcu, complete_edac_device_list_del);
+	wait_for_completion(&edac_device->removal_complete);
+}
+
+/**
+ * edac_device_find
+ *	Search for a edac_device_ctl_info structure whose index is 'idx'.
+ *
+ * If found, return a pointer to the structure.
+ * Else return NULL.
+ *
+ * Caller must hold device_ctls_mutex.
+ */
+struct edac_device_ctl_info *edac_device_find(int idx)
+{
+	struct list_head *item;
+	struct edac_device_ctl_info *edac_dev;
+
+	/* Iterate over list, looking for exact match of ID */
+	list_for_each(item, &edac_device_list) {
+		edac_dev = list_entry(item, struct edac_device_ctl_info, link);
+
+		if (edac_dev->dev_idx >= idx) {
+			if (edac_dev->dev_idx == idx)
+				return edac_dev;
+
+			/* not on list, so terminate early */
+			break;
+		}
+	}
+
+	return NULL;
+}
+EXPORT_SYMBOL_GPL(edac_device_find);
+
+/*
+ * edac_device_workq_function
+ *	performs the operation scheduled by a workq request
+ *
+ *	this workq is embedded within an edac_device_ctl_info
+ *	structure, that needs to be polled for possible error events.
+ *
+ *	This operation is to acquire the list mutex lock
+ *	(thus preventing insertation or deletion)
+ *	and then call the device's poll function IFF this device is
+ *	running polled and there is a poll function defined.
+ */
+static void edac_device_workq_function(struct work_struct *work_req)
+{
+	struct delayed_work *d_work = (struct delayed_work *)work_req;
+	struct edac_device_ctl_info *edac_dev = to_edac_device_ctl_work(d_work);
+
+	mutex_lock(&device_ctls_mutex);
+
+	/* Only poll controllers that are running polled and have a check */
+	if ((edac_dev->op_state == OP_RUNNING_POLL) &&
+		(edac_dev->edac_check != NULL)) {
+			edac_dev->edac_check(edac_dev);
+	}
+
+	mutex_unlock(&device_ctls_mutex);
+
+	/* Reschedule the workq for the next time period to start again
+	 * if the number of msec is for 1 sec, then adjust to the next
+	 * whole one second to save timers fireing all over the period
+	 * between integral seconds
+	 */
+	if (edac_dev->poll_msec == 1000)
+		queue_delayed_work(edac_workqueue, &edac_dev->work,
+				round_jiffies(edac_dev->delay));
+	else
+		queue_delayed_work(edac_workqueue, &edac_dev->work,
+				edac_dev->delay);
+}
+
+/*
+ * edac_device_workq_setup
+ *	initialize a workq item for this edac_device instance
+ *	passing in the new delay period in msec
+ */
+void edac_device_workq_setup(struct edac_device_ctl_info *edac_dev,
+				unsigned msec)
+{
+	debugf0("%s()\n", __func__);
+
+	/* take the arg 'msec' and set it into the control structure
+	 * to used in the time period calculation
+	 * then calc the number of jiffies that represents
+	 */
+	edac_dev->poll_msec = msec;
+	edac_dev->delay = msecs_to_jiffies(msec);
+
+	INIT_DELAYED_WORK(&edac_dev->work, edac_device_workq_function);
+
+	/* optimize here for the 1 second case, which will be normal value, to
+	 * fire ON the 1 second time event. This helps reduce all sorts of
+	 * timers firing on sub-second basis, while they are happy
+	 * to fire together on the 1 second exactly
+	 */
+	if (edac_dev->poll_msec == 1000)
+		queue_delayed_work(edac_workqueue, &edac_dev->work,
+				round_jiffies(edac_dev->delay));
+	else
+		queue_delayed_work(edac_workqueue, &edac_dev->work,
+				edac_dev->delay);
+}
+
+/*
+ * edac_device_workq_teardown
+ *	stop the workq processing on this edac_dev
+ */
+void edac_device_workq_teardown(struct edac_device_ctl_info *edac_dev)
+{
+	int status;
+
+	status = cancel_delayed_work(&edac_dev->work);
+	if (status == 0) {
+		/* workq instance might be running, wait for it */
+		flush_workqueue(edac_workqueue);
+	}
+}
+
+/*
+ * edac_device_reset_delay_period
+ *
+ *	need to stop any outstanding workq queued up at this time
+ *	because we will be resetting the sleep time.
+ *	Then restart the workq on the new delay
+ */
+void edac_device_reset_delay_period(struct edac_device_ctl_info *edac_dev,
+					unsigned long value)
+{
+	/* cancel the current workq request, without the mutex lock */
+	edac_device_workq_teardown(edac_dev);
+
+	/* acquire the mutex before doing the workq setup */
+	mutex_lock(&device_ctls_mutex);
+
+	/* restart the workq request, with new delay value */
+	edac_device_workq_setup(edac_dev, value);
+
+	mutex_unlock(&device_ctls_mutex);
+}
+
+/**
+ * edac_device_add_device: Insert the 'edac_dev' structure into the
+ * edac_device global list and create sysfs entries associated with
+ * edac_device structure.
+ * @edac_device: pointer to the edac_device structure to be added to the list
+ * 'edac_device' structure.
+ *
+ * Return:
+ *	0	Success
+ *	!0	Failure
+ */
+int edac_device_add_device(struct edac_device_ctl_info *edac_dev)
+{
+	debugf0("%s()\n", __func__);
+
+#ifdef CONFIG_EDAC_DEBUG
+	if (edac_debug_level >= 3)
+		edac_device_dump_device(edac_dev);
+#endif
+	mutex_lock(&device_ctls_mutex);
+
+	if (add_edac_dev_to_global_list(edac_dev))
+		goto fail0;
+
+	/* set load time so that error rate can be tracked */
+	edac_dev->start_time = jiffies;
+
+	/* create this instance's sysfs entries */
+	if (edac_device_create_sysfs(edac_dev)) {
+		edac_device_printk(edac_dev, KERN_WARNING,
+					"failed to create sysfs device\n");
+		goto fail1;
+	}
+
+	/* If there IS a check routine, then we are running POLLED */
+	if (edac_dev->edac_check != NULL) {
+		/* This instance is NOW RUNNING */
+		edac_dev->op_state = OP_RUNNING_POLL;
+
+		/*
+		 * enable workq processing on this instance,
+		 * default = 1000 msec
+		 */
+		edac_device_workq_setup(edac_dev, 1000);
+	} else {
+		edac_dev->op_state = OP_RUNNING_INTERRUPT;
+	}
+
+	/* Report action taken */
+	edac_device_printk(edac_dev, KERN_INFO,
+				"Giving out device to module '%s' controller "
+				"'%s': DEV '%s' (%s)\n",
+				edac_dev->mod_name,
+				edac_dev->ctl_name,
+				dev_name(edac_dev),
+				edac_op_state_to_string(edac_dev->op_state));
+
+	mutex_unlock(&device_ctls_mutex);
+	return 0;
+
+fail1:
+	/* Some error, so remove the entry from the lsit */
+	del_edac_device_from_global_list(edac_dev);
+
+fail0:
+	mutex_unlock(&device_ctls_mutex);
+	return 1;
+}
+EXPORT_SYMBOL_GPL(edac_device_add_device);
+
+/**
+ * edac_device_del_device:
+ *	Remove sysfs entries for specified edac_device structure and
+ *	then remove edac_device structure from global list
+ *
+ * @pdev:
+ *	Pointer to 'struct device' representing edac_device
+ *	structure to remove.
+ *
+ * Return:
+ *	Pointer to removed edac_device structure,
+ *	OR NULL if device not found.
+ */
+struct edac_device_ctl_info *edac_device_del_device(struct device *dev)
+{
+	struct edac_device_ctl_info *edac_dev;
+
+	debugf0("%s()\n", __func__);
+
+	mutex_lock(&device_ctls_mutex);
+
+	/* Find the structure on the list, if not there, then leave */
+	edac_dev = find_edac_device_by_dev(dev);
+	if (edac_dev == NULL) {
+		mutex_unlock(&device_ctls_mutex);
+		return NULL;
+	}
+
+	/* mark this instance as OFFLINE */
+	edac_dev->op_state = OP_OFFLINE;
+
+	/* clear workq processing on this instance */
+	edac_device_workq_teardown(edac_dev);
+
+	/* deregister from global list */
+	del_edac_device_from_global_list(edac_dev);
+
+	mutex_unlock(&device_ctls_mutex);
+
+	/* Tear down the sysfs entries for this instance */
+	edac_device_remove_sysfs(edac_dev);
+
+	edac_printk(KERN_INFO, EDAC_MC,
+		"Removed device %d for %s %s: DEV %s\n",
+		edac_dev->dev_idx,
+		edac_dev->mod_name, edac_dev->ctl_name, dev_name(edac_dev));
+
+	return edac_dev;
+}
+EXPORT_SYMBOL_GPL(edac_device_del_device);
+
+static inline int edac_device_get_log_ce(struct edac_device_ctl_info *edac_dev)
+{
+	return edac_dev->log_ce;
+}
+
+static inline int edac_device_get_log_ue(struct edac_device_ctl_info *edac_dev)
+{
+	return edac_dev->log_ue;
+}
+
+static inline int edac_device_get_panic_on_ue(struct edac_device_ctl_info
+					*edac_dev)
+{
+	return edac_dev->panic_on_ue;
+}
+
+/*
+ * edac_device_handle_ce
+ *	perform a common output and handling of an 'edac_dev' CE event
+ */
+void edac_device_handle_ce(struct edac_device_ctl_info *edac_dev,
+			int inst_nr, int block_nr, const char *msg)
+{
+	struct edac_device_instance *instance;
+	struct edac_device_block *block = NULL;
+
+	if ((inst_nr >= edac_dev->nr_instances) || (inst_nr < 0)) {
+		edac_device_printk(edac_dev, KERN_ERR,
+				"INTERNAL ERROR: 'instance' out of range "
+				"(%d >= %d)\n", inst_nr,
+				edac_dev->nr_instances);
+		return;
+	}
+
+	instance = edac_dev->instances + inst_nr;
+
+	if ((block_nr >= instance->nr_blocks) || (block_nr < 0)) {
+		edac_device_printk(edac_dev, KERN_ERR,
+				"INTERNAL ERROR: instance %d 'block' "
+				"out of range (%d >= %d)\n",
+				inst_nr, block_nr,
+				instance->nr_blocks);
+		return;
+	}
+
+	if (instance->nr_blocks > 0) {
+		block = instance->blocks + block_nr;
+		block->counters.ce_count++;
+	}
+
+	/* Propogate the count up the 'totals' tree */
+	instance->counters.ce_count++;
+	edac_dev->counters.ce_count++;
+
+	if (edac_device_get_log_ce(edac_dev))
+		edac_device_printk(edac_dev, KERN_WARNING,
+				"CE: %s instance: %s block: %s '%s'\n",
+				edac_dev->ctl_name, instance->name,
+				block ? block->name : "N/A", msg);
+}
+EXPORT_SYMBOL_GPL(edac_device_handle_ce);
+
+/*
+ * edac_device_handle_ue
+ *	perform a common output and handling of an 'edac_dev' UE event
+ */
+void edac_device_handle_ue(struct edac_device_ctl_info *edac_dev,
+			int inst_nr, int block_nr, const char *msg)
+{
+	struct edac_device_instance *instance;
+	struct edac_device_block *block = NULL;
+
+	if ((inst_nr >= edac_dev->nr_instances) || (inst_nr < 0)) {
+		edac_device_printk(edac_dev, KERN_ERR,
+				"INTERNAL ERROR: 'instance' out of range "
+				"(%d >= %d)\n", inst_nr,
+				edac_dev->nr_instances);
+		return;
+	}
+
+	instance = edac_dev->instances + inst_nr;
+
+	if ((block_nr >= instance->nr_blocks) || (block_nr < 0)) {
+		edac_device_printk(edac_dev, KERN_ERR,
+				"INTERNAL ERROR: instance %d 'block' "
+				"out of range (%d >= %d)\n",
+				inst_nr, block_nr,
+				instance->nr_blocks);
+		return;
+	}
+
+	if (instance->nr_blocks > 0) {
+		block = instance->blocks + block_nr;
+		block->counters.ue_count++;
+	}
+
+	/* Propogate the count up the 'totals' tree */
+	instance->counters.ue_count++;
+	edac_dev->counters.ue_count++;
+
+	if (edac_device_get_log_ue(edac_dev))
+		edac_device_printk(edac_dev, KERN_EMERG,
+				"UE: %s instance: %s block: %s '%s'\n",
+				edac_dev->ctl_name, instance->name,
+				block ? block->name : "N/A", msg);
+
+	if (edac_device_get_panic_on_ue(edac_dev))
+		panic("EDAC %s: UE instance: %s block %s '%s'\n",
+			edac_dev->ctl_name, instance->name,
+			block ? block->name : "N/A", msg);
+}
+EXPORT_SYMBOL_GPL(edac_device_handle_ue);
diff --git a/drivers/edac/edac_device_sysfs.c b/drivers/edac/edac_device_sysfs.c
new file mode 100644
index 0000000..70b837f
--- /dev/null
+++ b/drivers/edac/edac_device_sysfs.c
@@ -0,0 +1,896 @@
+/*
+ * file for managing the edac_device class of devices for EDAC
+ *
+ * (C) 2007 SoftwareBitMaker (http://www.softwarebitmaker.com)
+ *
+ * This file may be distributed under the terms of the
+ * GNU General Public License.
+ *
+ * Written Doug Thompson <norsk5@xmission.com>
+ *
+ */
+
+#include <linux/ctype.h>
+#include <linux/module.h>
+
+#include "edac_core.h"
+#include "edac_module.h"
+
+#define EDAC_DEVICE_SYMLINK	"device"
+
+#define to_edacdev(k) container_of(k, struct edac_device_ctl_info, kobj)
+#define to_edacdev_attr(a) container_of(a, struct edacdev_attribute, attr)
+
+
+/*
+ * Set of edac_device_ctl_info attribute store/show functions
+ */
+
+/* 'log_ue' */
+static ssize_t edac_device_ctl_log_ue_show(struct edac_device_ctl_info
+					*ctl_info, char *data)
+{
+	return sprintf(data, "%u\n", ctl_info->log_ue);
+}
+
+static ssize_t edac_device_ctl_log_ue_store(struct edac_device_ctl_info
+					*ctl_info, const char *data,
+					size_t count)
+{
+	/* if parameter is zero, turn off flag, if non-zero turn on flag */
+	ctl_info->log_ue = (simple_strtoul(data, NULL, 0) != 0);
+
+	return count;
+}
+
+/* 'log_ce' */
+static ssize_t edac_device_ctl_log_ce_show(struct edac_device_ctl_info
+					*ctl_info, char *data)
+{
+	return sprintf(data, "%u\n", ctl_info->log_ce);
+}
+
+static ssize_t edac_device_ctl_log_ce_store(struct edac_device_ctl_info
+					*ctl_info, const char *data,
+					size_t count)
+{
+	/* if parameter is zero, turn off flag, if non-zero turn on flag */
+	ctl_info->log_ce = (simple_strtoul(data, NULL, 0) != 0);
+
+	return count;
+}
+
+/* 'panic_on_ue' */
+static ssize_t edac_device_ctl_panic_on_ue_show(struct edac_device_ctl_info
+						*ctl_info, char *data)
+{
+	return sprintf(data, "%u\n", ctl_info->panic_on_ue);
+}
+
+static ssize_t edac_device_ctl_panic_on_ue_store(struct edac_device_ctl_info
+						 *ctl_info, const char *data,
+						 size_t count)
+{
+	/* if parameter is zero, turn off flag, if non-zero turn on flag */
+	ctl_info->panic_on_ue = (simple_strtoul(data, NULL, 0) != 0);
+
+	return count;
+}
+
+/* 'poll_msec' show and store functions*/
+static ssize_t edac_device_ctl_poll_msec_show(struct edac_device_ctl_info
+					*ctl_info, char *data)
+{
+	return sprintf(data, "%u\n", ctl_info->poll_msec);
+}
+
+static ssize_t edac_device_ctl_poll_msec_store(struct edac_device_ctl_info
+					*ctl_info, const char *data,
+					size_t count)
+{
+	unsigned long value;
+
+	/* get the value and enforce that it is non-zero, must be at least
+	 * one millisecond for the delay period, between scans
+	 * Then cancel last outstanding delay for the work request
+	 * and set a new one.
+	 */
+	value = simple_strtoul(data, NULL, 0);
+	edac_device_reset_delay_period(ctl_info, value);
+
+	return count;
+}
+
+/* edac_device_ctl_info specific attribute structure */
+struct ctl_info_attribute {
+	struct attribute attr;
+	ssize_t(*show) (struct edac_device_ctl_info *, char *);
+	ssize_t(*store) (struct edac_device_ctl_info *, const char *, size_t);
+};
+
+#define to_ctl_info(k) container_of(k, struct edac_device_ctl_info, kobj)
+#define to_ctl_info_attr(a) container_of(a,struct ctl_info_attribute,attr)
+
+/* Function to 'show' fields from the edac_dev 'ctl_info' structure */
+static ssize_t edac_dev_ctl_info_show(struct kobject *kobj,
+				struct attribute *attr, char *buffer)
+{
+	struct edac_device_ctl_info *edac_dev = to_ctl_info(kobj);
+	struct ctl_info_attribute *ctl_info_attr = to_ctl_info_attr(attr);
+
+	if (ctl_info_attr->show)
+		return ctl_info_attr->show(edac_dev, buffer);
+	return -EIO;
+}
+
+/* Function to 'store' fields into the edac_dev 'ctl_info' structure */
+static ssize_t edac_dev_ctl_info_store(struct kobject *kobj,
+				struct attribute *attr,
+				const char *buffer, size_t count)
+{
+	struct edac_device_ctl_info *edac_dev = to_ctl_info(kobj);
+	struct ctl_info_attribute *ctl_info_attr = to_ctl_info_attr(attr);
+
+	if (ctl_info_attr->store)
+		return ctl_info_attr->store(edac_dev, buffer, count);
+	return -EIO;
+}
+
+/* edac_dev file operations for an 'ctl_info' */
+static struct sysfs_ops device_ctl_info_ops = {
+	.show = edac_dev_ctl_info_show,
+	.store = edac_dev_ctl_info_store
+};
+
+#define CTL_INFO_ATTR(_name,_mode,_show,_store)        \
+static struct ctl_info_attribute attr_ctl_info_##_name = {      \
+	.attr = {.name = __stringify(_name), .mode = _mode },   \
+	.show   = _show,                                        \
+	.store  = _store,                                       \
+};
+
+/* Declare the various ctl_info attributes here and their respective ops */
+CTL_INFO_ATTR(log_ue, S_IRUGO | S_IWUSR,
+	edac_device_ctl_log_ue_show, edac_device_ctl_log_ue_store);
+CTL_INFO_ATTR(log_ce, S_IRUGO | S_IWUSR,
+	edac_device_ctl_log_ce_show, edac_device_ctl_log_ce_store);
+CTL_INFO_ATTR(panic_on_ue, S_IRUGO | S_IWUSR,
+	edac_device_ctl_panic_on_ue_show,
+	edac_device_ctl_panic_on_ue_store);
+CTL_INFO_ATTR(poll_msec, S_IRUGO | S_IWUSR,
+	edac_device_ctl_poll_msec_show, edac_device_ctl_poll_msec_store);
+
+/* Base Attributes of the EDAC_DEVICE ECC object */
+static struct ctl_info_attribute *device_ctrl_attr[] = {
+	&attr_ctl_info_panic_on_ue,
+	&attr_ctl_info_log_ue,
+	&attr_ctl_info_log_ce,
+	&attr_ctl_info_poll_msec,
+	NULL,
+};
+
+/*
+ * edac_device_ctrl_master_release
+ *
+ *	called when the reference count for the 'main' kobj
+ *	for a edac_device control struct reaches zero
+ *
+ *	Reference count model:
+ *		One 'main' kobject for each control structure allocated.
+ *		That main kobj is initially set to one AND
+ *		the reference count for the EDAC 'core' module is
+ *		bumped by one, thus added 'keep in memory' dependency.
+ *
+ *		Each new internal kobj (in instances and blocks) then
+ *		bumps the 'main' kobject.
+ *
+ *		When they are released their release functions decrement
+ *		the 'main' kobj.
+ *
+ *		When the main kobj reaches zero (0) then THIS function
+ *		is called which then decrements the EDAC 'core' module.
+ *		When the module reference count reaches zero then the
+ *		module no longer has dependency on keeping the release
+ *		function code in memory and module can be unloaded.
+ *
+ *		This will support several control objects as well, each
+ *		with its own 'main' kobj.
+ */
+static void edac_device_ctrl_master_release(struct kobject *kobj)
+{
+	struct edac_device_ctl_info *edac_dev = to_edacdev(kobj);
+
+	debugf4("%s() control index=%d\n", __func__, edac_dev->dev_idx);
+
+	/* decrement the EDAC CORE module ref count */
+	module_put(edac_dev->owner);
+
+	/* free the control struct containing the 'main' kobj
+	 * passed in to this routine
+	 */
+	kfree(edac_dev);
+}
+
+/* ktype for the main (master) kobject */
+static struct kobj_type ktype_device_ctrl = {
+	.release = edac_device_ctrl_master_release,
+	.sysfs_ops = &device_ctl_info_ops,
+	.default_attrs = (struct attribute **)device_ctrl_attr,
+};
+
+/*
+ * edac_device_register_sysfs_main_kobj
+ *
+ *	perform the high level setup for the new edac_device instance
+ *
+ * Return:  0 SUCCESS
+ *         !0 FAILURE
+ */
+int edac_device_register_sysfs_main_kobj(struct edac_device_ctl_info *edac_dev)
+{
+	struct sysdev_class *edac_class;
+	int err;
+
+	debugf1("%s()\n", __func__);
+
+	/* get the /sys/devices/system/edac reference */
+	edac_class = edac_get_edac_class();
+	if (edac_class == NULL) {
+		debugf1("%s() no edac_class error\n", __func__);
+		err = -ENODEV;
+		goto err_out;
+	}
+
+	/* Point to the 'edac_class' this instance 'reports' to */
+	edac_dev->edac_class = edac_class;
+
+	/* Init the devices's kobject */
+	memset(&edac_dev->kobj, 0, sizeof(struct kobject));
+	edac_dev->kobj.ktype = &ktype_device_ctrl;
+
+	/* set this new device under the edac_class kobject */
+	edac_dev->kobj.parent = &edac_class->kset.kobj;
+
+	/* generate sysfs "..../edac/<name>"   */
+	debugf4("%s() set name of kobject to: %s\n", __func__, edac_dev->name);
+	err = kobject_set_name(&edac_dev->kobj, "%s", edac_dev->name);
+	if (err)
+		goto err_out;
+
+	/* Record which module 'owns' this control structure
+	 * and bump the ref count of the module
+	 */
+	edac_dev->owner = THIS_MODULE;
+
+	if (!try_module_get(edac_dev->owner)) {
+		err = -ENODEV;
+		goto err_out;
+	}
+
+	/* register */
+	err = kobject_register(&edac_dev->kobj);
+	if (err) {
+		debugf1("%s()Failed to register '.../edac/%s'\n",
+			__func__, edac_dev->name);
+		goto err_kobj_reg;
+	}
+
+	/* At this point, to 'free' the control struct,
+	 * edac_device_unregister_sysfs_main_kobj() must be used
+	 */
+
+	debugf4("%s() Registered '.../edac/%s' kobject\n",
+		__func__, edac_dev->name);
+
+	return 0;
+
+	/* Error exit stack */
+err_kobj_reg:
+	module_put(edac_dev->owner);
+
+err_out:
+	return err;
+}
+
+/*
+ * edac_device_unregister_sysfs_main_kobj:
+ *	the '..../edac/<name>' kobject
+ */
+void edac_device_unregister_sysfs_main_kobj(
+					struct edac_device_ctl_info *edac_dev)
+{
+	debugf0("%s()\n", __func__);
+	debugf4("%s() name of kobject is: %s\n",
+		__func__, kobject_name(&edac_dev->kobj));
+
+	/*
+	 * Unregister the edac device's kobject and
+	 * allow for reference count to reach 0 at which point
+	 * the callback will be called to:
+	 *   a) module_put() this module
+	 *   b) 'kfree' the memory
+	 */
+	kobject_unregister(&edac_dev->kobj);
+}
+
+/* edac_dev -> instance information */
+
+/*
+ * Set of low-level instance attribute show functions
+ */
+static ssize_t instance_ue_count_show(struct edac_device_instance *instance,
+				char *data)
+{
+	return sprintf(data, "%u\n", instance->counters.ue_count);
+}
+
+static ssize_t instance_ce_count_show(struct edac_device_instance *instance,
+				char *data)
+{
+	return sprintf(data, "%u\n", instance->counters.ce_count);
+}
+
+#define to_instance(k) container_of(k, struct edac_device_instance, kobj)
+#define to_instance_attr(a) container_of(a,struct instance_attribute,attr)
+
+/* DEVICE instance kobject release() function */
+static void edac_device_ctrl_instance_release(struct kobject *kobj)
+{
+	struct edac_device_instance *instance;
+
+	debugf1("%s()\n", __func__);
+
+	/* map from this kobj to the main control struct
+	 * and then dec the main kobj count
+	 */
+	instance = to_instance(kobj);
+	kobject_put(&instance->ctl->kobj);
+}
+
+/* instance specific attribute structure */
+struct instance_attribute {
+	struct attribute attr;
+	ssize_t(*show) (struct edac_device_instance *, char *);
+	ssize_t(*store) (struct edac_device_instance *, const char *, size_t);
+};
+
+/* Function to 'show' fields from the edac_dev 'instance' structure */
+static ssize_t edac_dev_instance_show(struct kobject *kobj,
+				struct attribute *attr, char *buffer)
+{
+	struct edac_device_instance *instance = to_instance(kobj);
+	struct instance_attribute *instance_attr = to_instance_attr(attr);
+
+	if (instance_attr->show)
+		return instance_attr->show(instance, buffer);
+	return -EIO;
+}
+
+/* Function to 'store' fields into the edac_dev 'instance' structure */
+static ssize_t edac_dev_instance_store(struct kobject *kobj,
+				struct attribute *attr,
+				const char *buffer, size_t count)
+{
+	struct edac_device_instance *instance = to_instance(kobj);
+	struct instance_attribute *instance_attr = to_instance_attr(attr);
+
+	if (instance_attr->store)
+		return instance_attr->store(instance, buffer, count);
+	return -EIO;
+}
+
+/* edac_dev file operations for an 'instance' */
+static struct sysfs_ops device_instance_ops = {
+	.show = edac_dev_instance_show,
+	.store = edac_dev_instance_store
+};
+
+#define INSTANCE_ATTR(_name,_mode,_show,_store)        \
+static struct instance_attribute attr_instance_##_name = {      \
+	.attr = {.name = __stringify(_name), .mode = _mode },   \
+	.show   = _show,                                        \
+	.store  = _store,                                       \
+};
+
+/*
+ * Define attributes visible for the edac_device instance object
+ *	Each contains a pointer to a show and an optional set
+ *	function pointer that does the low level output/input
+ */
+INSTANCE_ATTR(ce_count, S_IRUGO, instance_ce_count_show, NULL);
+INSTANCE_ATTR(ue_count, S_IRUGO, instance_ue_count_show, NULL);
+
+/* list of edac_dev 'instance' attributes */
+static struct instance_attribute *device_instance_attr[] = {
+	&attr_instance_ce_count,
+	&attr_instance_ue_count,
+	NULL,
+};
+
+/* The 'ktype' for each edac_dev 'instance' */
+static struct kobj_type ktype_instance_ctrl = {
+	.release = edac_device_ctrl_instance_release,
+	.sysfs_ops = &device_instance_ops,
+	.default_attrs = (struct attribute **)device_instance_attr,
+};
+
+/* edac_dev -> instance -> block information */
+
+#define to_block(k) container_of(k, struct edac_device_block, kobj)
+#define to_block_attr(a) \
+	container_of(a, struct edac_dev_sysfs_block_attribute, attr)
+
+/*
+ * Set of low-level block attribute show functions
+ */
+static ssize_t block_ue_count_show(struct kobject *kobj,
+					struct attribute *attr, char *data)
+{
+	struct edac_device_block *block = to_block(kobj);
+
+	return sprintf(data, "%u\n", block->counters.ue_count);
+}
+
+static ssize_t block_ce_count_show(struct kobject *kobj,
+					struct attribute *attr, char *data)
+{
+	struct edac_device_block *block = to_block(kobj);
+
+	return sprintf(data, "%u\n", block->counters.ce_count);
+}
+
+/* DEVICE block kobject release() function */
+static void edac_device_ctrl_block_release(struct kobject *kobj)
+{
+	struct edac_device_block *block;
+
+	debugf1("%s()\n", __func__);
+
+	/* get the container of the kobj */
+	block = to_block(kobj);
+
+	/* map from 'block kobj' to 'block->instance->controller->main_kobj'
+	 * now 'release' the block kobject
+	 */
+	kobject_put(&block->instance->ctl->kobj);
+}
+
+
+/* Function to 'show' fields from the edac_dev 'block' structure */
+static ssize_t edac_dev_block_show(struct kobject *kobj,
+				struct attribute *attr, char *buffer)
+{
+	struct edac_dev_sysfs_block_attribute *block_attr =
+						to_block_attr(attr);
+
+	if (block_attr->show)
+		return block_attr->show(kobj, attr, buffer);
+	return -EIO;
+}
+
+/* Function to 'store' fields into the edac_dev 'block' structure */
+static ssize_t edac_dev_block_store(struct kobject *kobj,
+				struct attribute *attr,
+				const char *buffer, size_t count)
+{
+	struct edac_dev_sysfs_block_attribute *block_attr;
+
+	block_attr = to_block_attr(attr);
+
+	if (block_attr->store)
+		return block_attr->store(kobj, attr, buffer, count);
+	return -EIO;
+}
+
+/* edac_dev file operations for a 'block' */
+static struct sysfs_ops device_block_ops = {
+	.show = edac_dev_block_show,
+	.store = edac_dev_block_store
+};
+
+#define BLOCK_ATTR(_name,_mode,_show,_store)        \
+static struct edac_dev_sysfs_block_attribute attr_block_##_name = {	\
+	.attr = {.name = __stringify(_name), .mode = _mode },   \
+	.show   = _show,                                        \
+	.store  = _store,                                       \
+};
+
+BLOCK_ATTR(ce_count, S_IRUGO, block_ce_count_show, NULL);
+BLOCK_ATTR(ue_count, S_IRUGO, block_ue_count_show, NULL);
+
+/* list of edac_dev 'block' attributes */
+static struct edac_dev_sysfs_block_attribute *device_block_attr[] = {
+	&attr_block_ce_count,
+	&attr_block_ue_count,
+	NULL,
+};
+
+/* The 'ktype' for each edac_dev 'block' */
+static struct kobj_type ktype_block_ctrl = {
+	.release = edac_device_ctrl_block_release,
+	.sysfs_ops = &device_block_ops,
+	.default_attrs = (struct attribute **)device_block_attr,
+};
+
+/* block ctor/dtor  code */
+
+/*
+ * edac_device_create_block
+ */
+static int edac_device_create_block(struct edac_device_ctl_info *edac_dev,
+				struct edac_device_instance *instance,
+				struct edac_device_block *block)
+{
+	int i;
+	int err;
+	struct edac_dev_sysfs_block_attribute *sysfs_attrib;
+	struct kobject *main_kobj;
+
+	debugf4("%s() Instance '%s' inst_p=%p  block '%s'  block_p=%p\n",
+		__func__, instance->name, instance, block->name, block);
+	debugf4("%s() block kobj=%p  block kobj->parent=%p\n",
+		__func__, &block->kobj, &block->kobj.parent);
+
+	/* init this block's kobject */
+	memset(&block->kobj, 0, sizeof(struct kobject));
+	block->kobj.parent = &instance->kobj;
+	block->kobj.ktype = &ktype_block_ctrl;
+
+	err = kobject_set_name(&block->kobj, "%s", block->name);
+	if (err)
+		return err;
+
+	/* bump the main kobject's reference count for this controller
+	 * and this instance is dependant on the main
+	 */
+	main_kobj = kobject_get(&edac_dev->kobj);
+	if (!main_kobj) {
+		err = -ENODEV;
+		goto err_out;
+	}
+
+	/* Add this block's kobject */
+	err = kobject_register(&block->kobj);
+	if (err) {
+		debugf1("%s() Failed to register instance '%s'\n",
+			__func__, block->name);
+		kobject_put(main_kobj);
+		err = -ENODEV;
+		goto err_out;
+	}
+
+	/* If there are driver level block attributes, then added them
+	 * to the block kobject
+	 */
+	sysfs_attrib = block->block_attributes;
+	if (sysfs_attrib && block->nr_attribs) {
+		for (i = 0; i < block->nr_attribs; i++, sysfs_attrib++) {
+
+			debugf4("%s() creating block attrib='%s' "
+				"attrib->%p to kobj=%p\n",
+				__func__,
+				sysfs_attrib->attr.name,
+				sysfs_attrib, &block->kobj);
+
+			/* Create each block_attribute file */
+			err = sysfs_create_file(&block->kobj,
+				&sysfs_attrib->attr);
+			if (err)
+				goto err_on_attrib;
+		}
+	}
+
+	return 0;
+
+	/* Error unwind stack */
+err_on_attrib:
+	kobject_unregister(&block->kobj);
+
+err_out:
+	return err;
+}
+
+/*
+ * edac_device_delete_block(edac_dev,block);
+ */
+static void edac_device_delete_block(struct edac_device_ctl_info *edac_dev,
+				struct edac_device_block *block)
+{
+	struct edac_dev_sysfs_block_attribute *sysfs_attrib;
+	int i;
+
+	/* if this block has 'attributes' then we need to iterate over the list
+	 * and 'remove' the attributes on this block
+	 */
+	sysfs_attrib = block->block_attributes;
+	if (sysfs_attrib && block->nr_attribs) {
+		for (i = 0; i < block->nr_attribs; i++, sysfs_attrib++) {
+
+			/* remove each block_attrib file */
+			sysfs_remove_file(&block->kobj,
+				(struct attribute *) sysfs_attrib);
+		}
+	}
+
+	/* unregister this block's kobject, SEE:
+	 *	edac_device_ctrl_block_release() callback operation
+	 */
+	kobject_unregister(&block->kobj);
+}
+
+/* instance ctor/dtor code */
+
+/*
+ * edac_device_create_instance
+ *	create just one instance of an edac_device 'instance'
+ */
+static int edac_device_create_instance(struct edac_device_ctl_info *edac_dev,
+				int idx)
+{
+	int i, j;
+	int err;
+	struct edac_device_instance *instance;
+	struct kobject *main_kobj;
+
+	instance = &edac_dev->instances[idx];
+
+	/* Init the instance's kobject */
+	memset(&instance->kobj, 0, sizeof(struct kobject));
+
+	/* set this new device under the edac_device main kobject */
+	instance->kobj.parent = &edac_dev->kobj;
+	instance->kobj.ktype = &ktype_instance_ctrl;
+	instance->ctl = edac_dev;
+
+	err = kobject_set_name(&instance->kobj, "%s", instance->name);
+	if (err)
+		goto err_out;
+
+	/* bump the main kobject's reference count for this controller
+	 * and this instance is dependant on the main
+	 */
+	main_kobj = kobject_get(&edac_dev->kobj);
+	if (!main_kobj) {
+		err = -ENODEV;
+		goto err_out;
+	}
+
+	/* Formally register this instance's kobject */
+	err = kobject_register(&instance->kobj);
+	if (err != 0) {
+		debugf2("%s() Failed to register instance '%s'\n",
+			__func__, instance->name);
+		kobject_put(main_kobj);
+		goto err_out;
+	}
+
+	debugf4("%s() now register '%d' blocks for instance %d\n",
+		__func__, instance->nr_blocks, idx);
+
+	/* register all blocks of this instance */
+	for (i = 0; i < instance->nr_blocks; i++) {
+		err = edac_device_create_block(edac_dev, instance,
+						&instance->blocks[i]);
+		if (err) {
+			/* If any fail, remove all previous ones */
+			for (j = 0; j < i; j++)
+				edac_device_delete_block(edac_dev,
+							&instance->blocks[j]);
+			goto err_release_instance_kobj;
+		}
+	}
+
+	debugf4("%s() Registered instance %d '%s' kobject\n",
+		__func__, idx, instance->name);
+
+	return 0;
+
+	/* error unwind stack */
+err_release_instance_kobj:
+	kobject_unregister(&instance->kobj);
+
+err_out:
+	return err;
+}
+
+/*
+ * edac_device_remove_instance
+ *	remove an edac_device instance
+ */
+static void edac_device_delete_instance(struct edac_device_ctl_info *edac_dev,
+					int idx)
+{
+	struct edac_device_instance *instance;
+	int i;
+
+	instance = &edac_dev->instances[idx];
+
+	/* unregister all blocks in this instance */
+	for (i = 0; i < instance->nr_blocks; i++)
+		edac_device_delete_block(edac_dev, &instance->blocks[i]);
+
+	/* unregister this instance's kobject, SEE:
+	 *	edac_device_ctrl_instance_release() for callback operation
+	 */
+	kobject_unregister(&instance->kobj);
+}
+
+/*
+ * edac_device_create_instances
+ *	create the first level of 'instances' for this device
+ *	(ie  'cache' might have 'cache0', 'cache1', 'cache2', etc
+ */
+static int edac_device_create_instances(struct edac_device_ctl_info *edac_dev)
+{
+	int i, j;
+	int err;
+
+	debugf0("%s()\n", __func__);
+
+	/* iterate over creation of the instances */
+	for (i = 0; i < edac_dev->nr_instances; i++) {
+		err = edac_device_create_instance(edac_dev, i);
+		if (err) {
+			/* unwind previous instances on error */
+			for (j = 0; j < i; j++)
+				edac_device_delete_instance(edac_dev, j);
+			return err;
+		}
+	}
+
+	return 0;
+}
+
+/*
+ * edac_device_delete_instances(edac_dev);
+ *	unregister all the kobjects of the instances
+ */
+static void edac_device_delete_instances(struct edac_device_ctl_info *edac_dev)
+{
+	int i;
+
+	/* iterate over creation of the instances */
+	for (i = 0; i < edac_dev->nr_instances; i++)
+		edac_device_delete_instance(edac_dev, i);
+}
+
+/* edac_dev sysfs ctor/dtor  code */
+
+/*
+ * edac_device_add_main_sysfs_attributes
+ *	add some attributes to this instance's main kobject
+ */
+static int edac_device_add_main_sysfs_attributes(
+			struct edac_device_ctl_info *edac_dev)
+{
+	struct edac_dev_sysfs_attribute *sysfs_attrib;
+	int err = 0;
+
+	sysfs_attrib = edac_dev->sysfs_attributes;
+	if (sysfs_attrib) {
+		/* iterate over the array and create an attribute for each
+		 * entry in the list
+		 */
+		while (sysfs_attrib->attr.name != NULL) {
+			err = sysfs_create_file(&edac_dev->kobj,
+				(struct attribute*) sysfs_attrib);
+			if (err)
+				goto err_out;
+
+			sysfs_attrib++;
+		}
+	}
+
+err_out:
+	return err;
+}
+
+/*
+ * edac_device_remove_main_sysfs_attributes
+ *	remove any attributes to this instance's main kobject
+ */
+static void edac_device_remove_main_sysfs_attributes(
+			struct edac_device_ctl_info *edac_dev)
+{
+	struct edac_dev_sysfs_attribute *sysfs_attrib;
+
+	/* if there are main attributes, defined, remove them. First,
+	 * point to the start of the array and iterate over it
+	 * removing each attribute listed from this device's instance's kobject
+	 */
+	sysfs_attrib = edac_dev->sysfs_attributes;
+	if (sysfs_attrib) {
+		while (sysfs_attrib->attr.name != NULL) {
+			sysfs_remove_file(&edac_dev->kobj,
+					(struct attribute *) sysfs_attrib);
+			sysfs_attrib++;
+		}
+	}
+}
+
+/*
+ * edac_device_create_sysfs() Constructor
+ *
+ * accept a created edac_device control structure
+ * and 'export' it to sysfs. The 'main' kobj should already have been
+ * created. 'instance' and 'block' kobjects should be registered
+ * along with any 'block' attributes from the low driver. In addition,
+ * the main attributes (if any) are connected to the main kobject of
+ * the control structure.
+ *
+ * Return:
+ *	0	Success
+ *	!0	Failure
+ */
+int edac_device_create_sysfs(struct edac_device_ctl_info *edac_dev)
+{
+	int err;
+	struct kobject *edac_kobj = &edac_dev->kobj;
+
+	debugf0("%s() idx=%d\n", __func__, edac_dev->dev_idx);
+
+	/*  go create any main attributes callers wants */
+	err = edac_device_add_main_sysfs_attributes(edac_dev);
+	if (err) {
+		debugf0("%s() failed to add sysfs attribs\n", __func__);
+		goto err_out;
+	}
+
+	/* create a symlink from the edac device
+	 * to the platform 'device' being used for this
+	 */
+	err = sysfs_create_link(edac_kobj,
+				&edac_dev->dev->kobj, EDAC_DEVICE_SYMLINK);
+	if (err) {
+		debugf0("%s() sysfs_create_link() returned err= %d\n",
+			__func__, err);
+		goto err_remove_main_attribs;
+	}
+
+	/* Create the first level instance directories
+	 * In turn, the nested blocks beneath the instances will
+	 * be registered as well
+	 */
+	err = edac_device_create_instances(edac_dev);
+	if (err) {
+		debugf0("%s() edac_device_create_instances() "
+			"returned err= %d\n", __func__, err);
+		goto err_remove_link;
+	}
+
+
+	debugf4("%s() create-instances done, idx=%d\n",
+		__func__, edac_dev->dev_idx);
+
+	return 0;
+
+	/* Error unwind stack */
+err_remove_link:
+	/* remove the sym link */
+	sysfs_remove_link(&edac_dev->kobj, EDAC_DEVICE_SYMLINK);
+
+err_remove_main_attribs:
+	edac_device_remove_main_sysfs_attributes(edac_dev);
+
+err_out:
+	return err;
+}
+
+/*
+ * edac_device_remove_sysfs() destructor
+ *
+ * given an edac_device struct, tear down the kobject resources
+ */
+void edac_device_remove_sysfs(struct edac_device_ctl_info *edac_dev)
+{
+	debugf0("%s()\n", __func__);
+
+	/* remove any main attributes for this device */
+	edac_device_remove_main_sysfs_attributes(edac_dev);
+
+	/* remove the device sym link */
+	sysfs_remove_link(&edac_dev->kobj, EDAC_DEVICE_SYMLINK);
+
+	/* walk the instance/block kobject tree, deconstructing it */
+	edac_device_delete_instances(edac_dev);
+}
diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c
index 7b62230..4471be3 100644
--- a/drivers/edac/edac_mc.c
+++ b/drivers/edac/edac_mc.c
@@ -27,1200 +27,20 @@
 #include <linux/list.h>
 #include <linux/sysdev.h>
 #include <linux/ctype.h>
-#include <linux/kthread.h>
-#include <linux/freezer.h>
+#include <linux/edac.h>
 #include <asm/uaccess.h>
 #include <asm/page.h>
 #include <asm/edac.h>
-#include "edac_mc.h"
-
-#define EDAC_MC_VERSION "Ver: 2.0.1 " __DATE__
-
-
-#ifdef CONFIG_EDAC_DEBUG
-/* Values of 0 to 4 will generate output */
-int edac_debug_level = 1;
-EXPORT_SYMBOL_GPL(edac_debug_level);
-#endif
-
-/* EDAC Controls, setable by module parameter, and sysfs */
-static int log_ue = 1;
-static int log_ce = 1;
-static int panic_on_ue;
-static int poll_msec = 1000;
+#include "edac_core.h"
+#include "edac_module.h"
 
 /* lock to memory controller's control array */
-static DECLARE_MUTEX(mem_ctls_mutex);
+static DEFINE_MUTEX(mem_ctls_mutex);
 static struct list_head mc_devices = LIST_HEAD_INIT(mc_devices);
 
-static struct task_struct *edac_thread;
-
-#ifdef CONFIG_PCI
-static int check_pci_parity = 0;	/* default YES check PCI parity */
-static int panic_on_pci_parity;		/* default no panic on PCI Parity */
-static atomic_t pci_parity_count = ATOMIC_INIT(0);
-
-static struct kobject edac_pci_kobj; /* /sys/devices/system/edac/pci */
-static struct completion edac_pci_kobj_complete;
-#endif	/* CONFIG_PCI */
-
-/*  START sysfs data and methods */
-
-
-static const char *mem_types[] = {
-	[MEM_EMPTY] = "Empty",
-	[MEM_RESERVED] = "Reserved",
-	[MEM_UNKNOWN] = "Unknown",
-	[MEM_FPM] = "FPM",
-	[MEM_EDO] = "EDO",
-	[MEM_BEDO] = "BEDO",
-	[MEM_SDR] = "Unbuffered-SDR",
-	[MEM_RDR] = "Registered-SDR",
-	[MEM_DDR] = "Unbuffered-DDR",
-	[MEM_RDDR] = "Registered-DDR",
-	[MEM_RMBS] = "RMBS"
-};
-
-static const char *dev_types[] = {
-	[DEV_UNKNOWN] = "Unknown",
-	[DEV_X1] = "x1",
-	[DEV_X2] = "x2",
-	[DEV_X4] = "x4",
-	[DEV_X8] = "x8",
-	[DEV_X16] = "x16",
-	[DEV_X32] = "x32",
-	[DEV_X64] = "x64"
-};
-
-static const char *edac_caps[] = {
-	[EDAC_UNKNOWN] = "Unknown",
-	[EDAC_NONE] = "None",
-	[EDAC_RESERVED] = "Reserved",
-	[EDAC_PARITY] = "PARITY",
-	[EDAC_EC] = "EC",
-	[EDAC_SECDED] = "SECDED",
-	[EDAC_S2ECD2ED] = "S2ECD2ED",
-	[EDAC_S4ECD4ED] = "S4ECD4ED",
-	[EDAC_S8ECD8ED] = "S8ECD8ED",
-	[EDAC_S16ECD16ED] = "S16ECD16ED"
-};
-
-/* sysfs object: /sys/devices/system/edac */
-static struct sysdev_class edac_class = {
-	set_kset_name("edac"),
-};
-
-/* sysfs object:
- *	/sys/devices/system/edac/mc
- */
-static struct kobject edac_memctrl_kobj;
-
-/* We use these to wait for the reference counts on edac_memctrl_kobj and
- * edac_pci_kobj to reach 0.
- */
-static struct completion edac_memctrl_kobj_complete;
-
-/*
- * /sys/devices/system/edac/mc;
- *	data structures and methods
- */
-static ssize_t memctrl_int_show(void *ptr, char *buffer)
-{
-	int *value = (int*) ptr;
-	return sprintf(buffer, "%u\n", *value);
-}
-
-static ssize_t memctrl_int_store(void *ptr, const char *buffer, size_t count)
-{
-	int *value = (int*) ptr;
-
-	if (isdigit(*buffer))
-		*value = simple_strtoul(buffer, NULL, 0);
-
-	return count;
-}
-
-struct memctrl_dev_attribute {
-	struct attribute attr;
-	void *value;
-	ssize_t (*show)(void *,char *);
-	ssize_t (*store)(void *, const char *, size_t);
-};
-
-/* Set of show/store abstract level functions for memory control object */
-static ssize_t memctrl_dev_show(struct kobject *kobj,
-		struct attribute *attr, char *buffer)
-{
-	struct memctrl_dev_attribute *memctrl_dev;
-	memctrl_dev = (struct memctrl_dev_attribute*)attr;
-
-	if (memctrl_dev->show)
-		return memctrl_dev->show(memctrl_dev->value, buffer);
-
-	return -EIO;
-}
-
-static ssize_t memctrl_dev_store(struct kobject *kobj, struct attribute *attr,
-		const char *buffer, size_t count)
-{
-	struct memctrl_dev_attribute *memctrl_dev;
-	memctrl_dev = (struct memctrl_dev_attribute*)attr;
-
-	if (memctrl_dev->store)
-		return memctrl_dev->store(memctrl_dev->value, buffer, count);
-
-	return -EIO;
-}
-
-static struct sysfs_ops memctrlfs_ops = {
-	.show   = memctrl_dev_show,
-	.store  = memctrl_dev_store
-};
-
-#define MEMCTRL_ATTR(_name,_mode,_show,_store)			\
-struct memctrl_dev_attribute attr_##_name = {			\
-	.attr = {.name = __stringify(_name), .mode = _mode },	\
-	.value  = &_name,					\
-	.show   = _show,					\
-	.store  = _store,					\
-};
-
-#define MEMCTRL_STRING_ATTR(_name,_data,_mode,_show,_store)	\
-struct memctrl_dev_attribute attr_##_name = {			\
-	.attr = {.name = __stringify(_name), .mode = _mode },	\
-	.value  = _data,					\
-	.show   = _show,					\
-	.store  = _store,					\
-};
-
-/* csrow<id> control files */
-MEMCTRL_ATTR(panic_on_ue,S_IRUGO|S_IWUSR,memctrl_int_show,memctrl_int_store);
-MEMCTRL_ATTR(log_ue,S_IRUGO|S_IWUSR,memctrl_int_show,memctrl_int_store);
-MEMCTRL_ATTR(log_ce,S_IRUGO|S_IWUSR,memctrl_int_show,memctrl_int_store);
-MEMCTRL_ATTR(poll_msec,S_IRUGO|S_IWUSR,memctrl_int_show,memctrl_int_store);
-
-/* Base Attributes of the memory ECC object */
-static struct memctrl_dev_attribute *memctrl_attr[] = {
-	&attr_panic_on_ue,
-	&attr_log_ue,
-	&attr_log_ce,
-	&attr_poll_msec,
-	NULL,
-};
-
-/* Main MC kobject release() function */
-static void edac_memctrl_master_release(struct kobject *kobj)
-{
-	debugf1("%s()\n", __func__);
-	complete(&edac_memctrl_kobj_complete);
-}
-
-static struct kobj_type ktype_memctrl = {
-	.release = edac_memctrl_master_release,
-	.sysfs_ops = &memctrlfs_ops,
-	.default_attrs = (struct attribute **) memctrl_attr,
-};
-
-/* Initialize the main sysfs entries for edac:
- *   /sys/devices/system/edac
- *
- * and children
- *
- * Return:  0 SUCCESS
- *         !0 FAILURE
- */
-static int edac_sysfs_memctrl_setup(void)
-{
-	int err = 0;
-
-	debugf1("%s()\n", __func__);
-
-	/* create the /sys/devices/system/edac directory */
-	err = sysdev_class_register(&edac_class);
-
-	if (err) {
-		debugf1("%s() error=%d\n", __func__, err);
-		return err;
-	}
-
-	/* Init the MC's kobject */
-	memset(&edac_memctrl_kobj, 0, sizeof (edac_memctrl_kobj));
-	edac_memctrl_kobj.parent = &edac_class.kset.kobj;
-	edac_memctrl_kobj.ktype = &ktype_memctrl;
-
-	/* generate sysfs "..../edac/mc"   */
-	err = kobject_set_name(&edac_memctrl_kobj,"mc");
-
-	if (err)
-		goto fail;
-
-	/* FIXME: maybe new sysdev_create_subdir() */
-	err = kobject_register(&edac_memctrl_kobj);
-
-	if (err) {
-		debugf1("Failed to register '.../edac/mc'\n");
-		goto fail;
-	}
-
-	debugf1("Registered '.../edac/mc' kobject\n");
-
-	return 0;
-
-fail:
-	sysdev_class_unregister(&edac_class);
-	return err;
-}
-
-/*
- * MC teardown:
- *	the '..../edac/mc' kobject followed by '..../edac' itself
- */
-static void edac_sysfs_memctrl_teardown(void)
-{
-	debugf0("MC: " __FILE__ ": %s()\n", __func__);
-
-	/* Unregister the MC's kobject and wait for reference count to reach
-	 * 0.
-	 */
-	init_completion(&edac_memctrl_kobj_complete);
-	kobject_unregister(&edac_memctrl_kobj);
-	wait_for_completion(&edac_memctrl_kobj_complete);
-
-	/* Unregister the 'edac' object */
-	sysdev_class_unregister(&edac_class);
-}
-
-#ifdef CONFIG_PCI
-static ssize_t edac_pci_int_show(void *ptr, char *buffer)
-{
-	int *value = ptr;
-	return sprintf(buffer,"%d\n",*value);
-}
-
-static ssize_t edac_pci_int_store(void *ptr, const char *buffer, size_t count)
-{
-	int *value = ptr;
-
-	if (isdigit(*buffer))
-		*value = simple_strtoul(buffer,NULL,0);
-
-	return count;
-}
-
-struct edac_pci_dev_attribute {
-	struct attribute attr;
-	void *value;
-	ssize_t (*show)(void *,char *);
-	ssize_t (*store)(void *, const char *,size_t);
-};
-
-/* Set of show/store abstract level functions for PCI Parity object */
-static ssize_t edac_pci_dev_show(struct kobject *kobj, struct attribute *attr,
-		char *buffer)
-{
-	struct edac_pci_dev_attribute *edac_pci_dev;
-	edac_pci_dev= (struct edac_pci_dev_attribute*)attr;
-
-	if (edac_pci_dev->show)
-		return edac_pci_dev->show(edac_pci_dev->value, buffer);
-	return -EIO;
-}
-
-static ssize_t edac_pci_dev_store(struct kobject *kobj,
-		struct attribute *attr, const char *buffer, size_t count)
-{
-	struct edac_pci_dev_attribute *edac_pci_dev;
-	edac_pci_dev= (struct edac_pci_dev_attribute*)attr;
-
-	if (edac_pci_dev->show)
-		return edac_pci_dev->store(edac_pci_dev->value, buffer, count);
-	return -EIO;
-}
-
-static struct sysfs_ops edac_pci_sysfs_ops = {
-	.show   = edac_pci_dev_show,
-	.store  = edac_pci_dev_store
-};
-
-#define EDAC_PCI_ATTR(_name,_mode,_show,_store)			\
-struct edac_pci_dev_attribute edac_pci_attr_##_name = {		\
-	.attr = {.name = __stringify(_name), .mode = _mode },	\
-	.value  = &_name,					\
-	.show   = _show,					\
-	.store  = _store,					\
-};
-
-#define EDAC_PCI_STRING_ATTR(_name,_data,_mode,_show,_store)	\
-struct edac_pci_dev_attribute edac_pci_attr_##_name = {		\
-	.attr = {.name = __stringify(_name), .mode = _mode },	\
-	.value  = _data,					\
-	.show   = _show,					\
-	.store  = _store,					\
-};
-
-/* PCI Parity control files */
-EDAC_PCI_ATTR(check_pci_parity, S_IRUGO|S_IWUSR, edac_pci_int_show,
-	edac_pci_int_store);
-EDAC_PCI_ATTR(panic_on_pci_parity, S_IRUGO|S_IWUSR, edac_pci_int_show,
-	edac_pci_int_store);
-EDAC_PCI_ATTR(pci_parity_count, S_IRUGO, edac_pci_int_show, NULL);
-
-/* Base Attributes of the memory ECC object */
-static struct edac_pci_dev_attribute *edac_pci_attr[] = {
-	&edac_pci_attr_check_pci_parity,
-	&edac_pci_attr_panic_on_pci_parity,
-	&edac_pci_attr_pci_parity_count,
-	NULL,
-};
-
-/* No memory to release */
-static void edac_pci_release(struct kobject *kobj)
-{
-	debugf1("%s()\n", __func__);
-	complete(&edac_pci_kobj_complete);
-}
-
-static struct kobj_type ktype_edac_pci = {
-	.release = edac_pci_release,
-	.sysfs_ops = &edac_pci_sysfs_ops,
-	.default_attrs = (struct attribute **) edac_pci_attr,
-};
-
-/**
- * edac_sysfs_pci_setup()
- *
- */
-static int edac_sysfs_pci_setup(void)
-{
-	int err;
-
-	debugf1("%s()\n", __func__);
-
-	memset(&edac_pci_kobj, 0, sizeof(edac_pci_kobj));
-	edac_pci_kobj.parent = &edac_class.kset.kobj;
-	edac_pci_kobj.ktype = &ktype_edac_pci;
-	err = kobject_set_name(&edac_pci_kobj, "pci");
-
-	if (!err) {
-		/* Instanstiate the csrow object */
-		/* FIXME: maybe new sysdev_create_subdir() */
-		err = kobject_register(&edac_pci_kobj);
-
-		if (err)
-			debugf1("Failed to register '.../edac/pci'\n");
-		else
-			debugf1("Registered '.../edac/pci' kobject\n");
-	}
-
-	return err;
-}
-
-static void edac_sysfs_pci_teardown(void)
-{
-	debugf0("%s()\n", __func__);
-	init_completion(&edac_pci_kobj_complete);
-	kobject_unregister(&edac_pci_kobj);
-	wait_for_completion(&edac_pci_kobj_complete);
-}
-
-
-static u16 get_pci_parity_status(struct pci_dev *dev, int secondary)
-{
-	int where;
-	u16 status;
-
-	where = secondary ? PCI_SEC_STATUS : PCI_STATUS;
-	pci_read_config_word(dev, where, &status);
-
-	/* If we get back 0xFFFF then we must suspect that the card has been
-	 * pulled but the Linux PCI layer has not yet finished cleaning up.
-	 * We don't want to report on such devices
-	 */
-
-	if (status == 0xFFFF) {
-		u32 sanity;
-
-		pci_read_config_dword(dev, 0, &sanity);
-
-		if (sanity == 0xFFFFFFFF)
-			return 0;
-	}
-
-	status &= PCI_STATUS_DETECTED_PARITY | PCI_STATUS_SIG_SYSTEM_ERROR |
-		PCI_STATUS_PARITY;
-
-	if (status)
-		/* reset only the bits we are interested in */
-		pci_write_config_word(dev, where, status);
-
-	return status;
-}
-
-typedef void (*pci_parity_check_fn_t) (struct pci_dev *dev);
-
-/* Clear any PCI parity errors logged by this device. */
-static void edac_pci_dev_parity_clear(struct pci_dev *dev)
-{
-	u8 header_type;
-
-	get_pci_parity_status(dev, 0);
-
-	/* read the device TYPE, looking for bridges */
-	pci_read_config_byte(dev, PCI_HEADER_TYPE, &header_type);
-
-	if ((header_type & 0x7F) == PCI_HEADER_TYPE_BRIDGE)
-		get_pci_parity_status(dev, 1);
-}
-
-/*
- *  PCI Parity polling
- *
- */
-static void edac_pci_dev_parity_test(struct pci_dev *dev)
-{
-	u16 status;
-	u8  header_type;
-
-	/* read the STATUS register on this device
-	 */
-	status = get_pci_parity_status(dev, 0);
-
-	debugf2("PCI STATUS= 0x%04x %s\n", status, dev->dev.bus_id );
-
-	/* check the status reg for errors */
-	if (status) {
-		if (status & (PCI_STATUS_SIG_SYSTEM_ERROR))
-			edac_printk(KERN_CRIT, EDAC_PCI,
-				"Signaled System Error on %s\n",
-				pci_name(dev));
-
-		if (status & (PCI_STATUS_PARITY)) {
-			edac_printk(KERN_CRIT, EDAC_PCI,
-				"Master Data Parity Error on %s\n",
-				pci_name(dev));
-
-			atomic_inc(&pci_parity_count);
-		}
-
-		if (status & (PCI_STATUS_DETECTED_PARITY)) {
-			edac_printk(KERN_CRIT, EDAC_PCI,
-				"Detected Parity Error on %s\n",
-				pci_name(dev));
-
-			atomic_inc(&pci_parity_count);
-		}
-	}
-
-	/* read the device TYPE, looking for bridges */
-	pci_read_config_byte(dev, PCI_HEADER_TYPE, &header_type);
-
-	debugf2("PCI HEADER TYPE= 0x%02x %s\n", header_type, dev->dev.bus_id );
-
-	if ((header_type & 0x7F) == PCI_HEADER_TYPE_BRIDGE) {
-		/* On bridges, need to examine secondary status register  */
-		status = get_pci_parity_status(dev, 1);
-
-		debugf2("PCI SEC_STATUS= 0x%04x %s\n",
-				status, dev->dev.bus_id );
-
-		/* check the secondary status reg for errors */
-		if (status) {
-			if (status & (PCI_STATUS_SIG_SYSTEM_ERROR))
-				edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
-					"Signaled System Error on %s\n",
-					pci_name(dev));
-
-			if (status & (PCI_STATUS_PARITY)) {
-				edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
-					"Master Data Parity Error on "
-					"%s\n", pci_name(dev));
-
-				atomic_inc(&pci_parity_count);
-			}
-
-			if (status & (PCI_STATUS_DETECTED_PARITY)) {
-				edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
-					"Detected Parity Error on %s\n",
-					pci_name(dev));
-
-				atomic_inc(&pci_parity_count);
-			}
-		}
-	}
-}
-
-/*
- * pci_dev parity list iterator
- *	Scan the PCI device list for one iteration, looking for SERRORs
- *	Master Parity ERRORS or Parity ERRORs on primary or secondary devices
- */
-static inline void edac_pci_dev_parity_iterator(pci_parity_check_fn_t fn)
-{
-	struct pci_dev *dev = NULL;
-
-	/* request for kernel access to the next PCI device, if any,
-	 * and while we are looking at it have its reference count
-	 * bumped until we are done with it
-	 */
-	while((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) {
-		fn(dev);
-	}
-}
-
-static void do_pci_parity_check(void)
-{
-	unsigned long flags;
-	int before_count;
-
-	debugf3("%s()\n", __func__);
-
-	if (!check_pci_parity)
-		return;
-
-	before_count = atomic_read(&pci_parity_count);
-
-	/* scan all PCI devices looking for a Parity Error on devices and
-	 * bridges
-	 */
-	local_irq_save(flags);
-	edac_pci_dev_parity_iterator(edac_pci_dev_parity_test);
-	local_irq_restore(flags);
-
-	/* Only if operator has selected panic on PCI Error */
-	if (panic_on_pci_parity) {
-		/* If the count is different 'after' from 'before' */
-		if (before_count != atomic_read(&pci_parity_count))
-			panic("EDAC: PCI Parity Error");
-	}
-}
-
-static inline void clear_pci_parity_errors(void)
-{
-	/* Clear any PCI bus parity errors that devices initially have logged
-	 * in their registers.
-	 */
-	edac_pci_dev_parity_iterator(edac_pci_dev_parity_clear);
-}
-
-#else	/* CONFIG_PCI */
-
-/* pre-process these away */
-#define	do_pci_parity_check()
-#define	clear_pci_parity_errors()
-#define	edac_sysfs_pci_teardown()
-#define	edac_sysfs_pci_setup()	(0)
-
-#endif	/* CONFIG_PCI */
-
-/* EDAC sysfs CSROW data structures and methods
- */
-
-/* Set of more default csrow<id> attribute show/store functions */
-static ssize_t csrow_ue_count_show(struct csrow_info *csrow, char *data, int private)
-{
-	return sprintf(data,"%u\n", csrow->ue_count);
-}
-
-static ssize_t csrow_ce_count_show(struct csrow_info *csrow, char *data, int private)
-{
-	return sprintf(data,"%u\n", csrow->ce_count);
-}
-
-static ssize_t csrow_size_show(struct csrow_info *csrow, char *data, int private)
-{
-	return sprintf(data,"%u\n", PAGES_TO_MiB(csrow->nr_pages));
-}
-
-static ssize_t csrow_mem_type_show(struct csrow_info *csrow, char *data, int private)
-{
-	return sprintf(data,"%s\n", mem_types[csrow->mtype]);
-}
-
-static ssize_t csrow_dev_type_show(struct csrow_info *csrow, char *data, int private)
-{
-	return sprintf(data,"%s\n", dev_types[csrow->dtype]);
-}
-
-static ssize_t csrow_edac_mode_show(struct csrow_info *csrow, char *data, int private)
-{
-	return sprintf(data,"%s\n", edac_caps[csrow->edac_mode]);
-}
-
-/* show/store functions for DIMM Label attributes */
-static ssize_t channel_dimm_label_show(struct csrow_info *csrow,
-		char *data, int channel)
-{
-	return snprintf(data, EDAC_MC_LABEL_LEN,"%s",
-			csrow->channels[channel].label);
-}
-
-static ssize_t channel_dimm_label_store(struct csrow_info *csrow,
-				const char *data,
-				size_t count,
-				int channel)
-{
-	ssize_t max_size = 0;
-
-	max_size = min((ssize_t)count,(ssize_t)EDAC_MC_LABEL_LEN-1);
-	strncpy(csrow->channels[channel].label, data, max_size);
-	csrow->channels[channel].label[max_size] = '\0';
-
-	return max_size;
-}
-
-/* show function for dynamic chX_ce_count attribute */
-static ssize_t channel_ce_count_show(struct csrow_info *csrow,
-				char *data,
-				int channel)
-{
-	return sprintf(data, "%u\n", csrow->channels[channel].ce_count);
-}
-
-/* csrow specific attribute structure */
-struct csrowdev_attribute {
-	struct attribute attr;
-	ssize_t (*show)(struct csrow_info *,char *,int);
-	ssize_t (*store)(struct csrow_info *, const char *,size_t,int);
-	int    private;
-};
-
-#define to_csrow(k) container_of(k, struct csrow_info, kobj)
-#define to_csrowdev_attr(a) container_of(a, struct csrowdev_attribute, attr)
-
-/* Set of show/store higher level functions for default csrow attributes */
-static ssize_t csrowdev_show(struct kobject *kobj,
-			struct attribute *attr,
-			char *buffer)
-{
-	struct csrow_info *csrow = to_csrow(kobj);
-	struct csrowdev_attribute *csrowdev_attr = to_csrowdev_attr(attr);
-
-	if (csrowdev_attr->show)
-		return csrowdev_attr->show(csrow,
-					buffer,
-					csrowdev_attr->private);
-	return -EIO;
-}
-
-static ssize_t csrowdev_store(struct kobject *kobj, struct attribute *attr,
-		const char *buffer, size_t count)
-{
-	struct csrow_info *csrow = to_csrow(kobj);
-	struct csrowdev_attribute * csrowdev_attr = to_csrowdev_attr(attr);
-
-	if (csrowdev_attr->store)
-		return csrowdev_attr->store(csrow,
-					buffer,
-					count,
-					csrowdev_attr->private);
-	return -EIO;
-}
-
-static struct sysfs_ops csrowfs_ops = {
-	.show   = csrowdev_show,
-	.store  = csrowdev_store
-};
-
-#define CSROWDEV_ATTR(_name,_mode,_show,_store,_private)	\
-struct csrowdev_attribute attr_##_name = {			\
-	.attr = {.name = __stringify(_name), .mode = _mode },	\
-	.show   = _show,					\
-	.store  = _store,					\
-	.private = _private,					\
-};
-
-/* default cwrow<id>/attribute files */
-CSROWDEV_ATTR(size_mb,S_IRUGO,csrow_size_show,NULL,0);
-CSROWDEV_ATTR(dev_type,S_IRUGO,csrow_dev_type_show,NULL,0);
-CSROWDEV_ATTR(mem_type,S_IRUGO,csrow_mem_type_show,NULL,0);
-CSROWDEV_ATTR(edac_mode,S_IRUGO,csrow_edac_mode_show,NULL,0);
-CSROWDEV_ATTR(ue_count,S_IRUGO,csrow_ue_count_show,NULL,0);
-CSROWDEV_ATTR(ce_count,S_IRUGO,csrow_ce_count_show,NULL,0);
-
-/* default attributes of the CSROW<id> object */
-static struct csrowdev_attribute *default_csrow_attr[] = {
-	&attr_dev_type,
-	&attr_mem_type,
-	&attr_edac_mode,
-	&attr_size_mb,
-	&attr_ue_count,
-	&attr_ce_count,
-	NULL,
-};
-
-
-/* possible dynamic channel DIMM Label attribute files */
-CSROWDEV_ATTR(ch0_dimm_label,S_IRUGO|S_IWUSR,
-		channel_dimm_label_show,
-		channel_dimm_label_store,
-		0 );
-CSROWDEV_ATTR(ch1_dimm_label,S_IRUGO|S_IWUSR,
-		channel_dimm_label_show,
-		channel_dimm_label_store,
-		1 );
-CSROWDEV_ATTR(ch2_dimm_label,S_IRUGO|S_IWUSR,
-		channel_dimm_label_show,
-		channel_dimm_label_store,
-		2 );
-CSROWDEV_ATTR(ch3_dimm_label,S_IRUGO|S_IWUSR,
-		channel_dimm_label_show,
-		channel_dimm_label_store,
-		3 );
-CSROWDEV_ATTR(ch4_dimm_label,S_IRUGO|S_IWUSR,
-		channel_dimm_label_show,
-		channel_dimm_label_store,
-		4 );
-CSROWDEV_ATTR(ch5_dimm_label,S_IRUGO|S_IWUSR,
-		channel_dimm_label_show,
-		channel_dimm_label_store,
-		5 );
-
-/* Total possible dynamic DIMM Label attribute file table */
-static struct csrowdev_attribute *dynamic_csrow_dimm_attr[] = {
-		&attr_ch0_dimm_label,
-		&attr_ch1_dimm_label,
-		&attr_ch2_dimm_label,
-		&attr_ch3_dimm_label,
-		&attr_ch4_dimm_label,
-		&attr_ch5_dimm_label
-};
-
-/* possible dynamic channel ce_count attribute files */
-CSROWDEV_ATTR(ch0_ce_count,S_IRUGO|S_IWUSR,
-		channel_ce_count_show,
-		NULL,
-		0 );
-CSROWDEV_ATTR(ch1_ce_count,S_IRUGO|S_IWUSR,
-		channel_ce_count_show,
-		NULL,
-		1 );
-CSROWDEV_ATTR(ch2_ce_count,S_IRUGO|S_IWUSR,
-		channel_ce_count_show,
-		NULL,
-		2 );
-CSROWDEV_ATTR(ch3_ce_count,S_IRUGO|S_IWUSR,
-		channel_ce_count_show,
-		NULL,
-		3 );
-CSROWDEV_ATTR(ch4_ce_count,S_IRUGO|S_IWUSR,
-		channel_ce_count_show,
-		NULL,
-		4 );
-CSROWDEV_ATTR(ch5_ce_count,S_IRUGO|S_IWUSR,
-		channel_ce_count_show,
-		NULL,
-		5 );
-
-/* Total possible dynamic ce_count attribute file table */
-static struct csrowdev_attribute *dynamic_csrow_ce_count_attr[] = {
-		&attr_ch0_ce_count,
-		&attr_ch1_ce_count,
-		&attr_ch2_ce_count,
-		&attr_ch3_ce_count,
-		&attr_ch4_ce_count,
-		&attr_ch5_ce_count
-};
-
-
-#define EDAC_NR_CHANNELS	6
-
-/* Create dynamic CHANNEL files, indexed by 'chan',  under specifed CSROW */
-static int edac_create_channel_files(struct kobject *kobj, int chan)
-{
-	int err=-ENODEV;
-
-	if (chan >= EDAC_NR_CHANNELS)
-		return err;
-
-	/* create the DIMM label attribute file */
-	err = sysfs_create_file(kobj,
-			(struct attribute *) dynamic_csrow_dimm_attr[chan]);
-
-	if (!err) {
-		/* create the CE Count attribute file */
-		err = sysfs_create_file(kobj,
-			(struct attribute *) dynamic_csrow_ce_count_attr[chan]);
-	} else {
-		debugf1("%s()  dimm labels and ce_count files created", __func__);
-	}
-
-	return err;
-}
-
-/* No memory to release for this kobj */
-static void edac_csrow_instance_release(struct kobject *kobj)
-{
-	struct csrow_info *cs;
-
-	cs = container_of(kobj, struct csrow_info, kobj);
-	complete(&cs->kobj_complete);
-}
-
-/* the kobj_type instance for a CSROW */
-static struct kobj_type ktype_csrow = {
-	.release = edac_csrow_instance_release,
-	.sysfs_ops = &csrowfs_ops,
-	.default_attrs = (struct attribute **) default_csrow_attr,
-};
-
-/* Create a CSROW object under specifed edac_mc_device */
-static int edac_create_csrow_object(
-		struct kobject *edac_mci_kobj,
-		struct csrow_info *csrow,
-		int index)
-{
-	int err = 0;
-	int chan;
-
-	memset(&csrow->kobj, 0, sizeof(csrow->kobj));
-
-	/* generate ..../edac/mc/mc<id>/csrow<index>   */
-
-	csrow->kobj.parent = edac_mci_kobj;
-	csrow->kobj.ktype = &ktype_csrow;
-
-	/* name this instance of csrow<id> */
-	err = kobject_set_name(&csrow->kobj,"csrow%d",index);
-	if (err)
-		goto error_exit;
-
-	/* Instanstiate the csrow object */
-	err = kobject_register(&csrow->kobj);
-	if (!err) {
-		/* Create the dyanmic attribute files on this csrow,
-		 * namely, the DIMM labels and the channel ce_count
-		 */
-		for (chan = 0; chan < csrow->nr_channels; chan++) {
-			err = edac_create_channel_files(&csrow->kobj,chan);
-			if (err)
-				break;
-		}
-	}
-
-error_exit:
-	return err;
-}
-
-/* default sysfs methods and data structures for the main MCI kobject */
-
-static ssize_t mci_reset_counters_store(struct mem_ctl_info *mci,
-		const char *data, size_t count)
-{
-	int row, chan;
-
-	mci->ue_noinfo_count = 0;
-	mci->ce_noinfo_count = 0;
-	mci->ue_count = 0;
-	mci->ce_count = 0;
-
-	for (row = 0; row < mci->nr_csrows; row++) {
-		struct csrow_info *ri = &mci->csrows[row];
-
-		ri->ue_count = 0;
-		ri->ce_count = 0;
-
-		for (chan = 0; chan < ri->nr_channels; chan++)
-			ri->channels[chan].ce_count = 0;
-	}
-
-	mci->start_time = jiffies;
-	return count;
-}
-
-/* memory scrubbing */
-static ssize_t mci_sdram_scrub_rate_store(struct mem_ctl_info *mci,
-					const char *data, size_t count)
-{
-	u32 bandwidth = -1;
-
-	if (mci->set_sdram_scrub_rate) {
-
-		memctrl_int_store(&bandwidth, data, count);
-
-		if (!(*mci->set_sdram_scrub_rate)(mci, &bandwidth)) {
-			edac_printk(KERN_DEBUG, EDAC_MC,
-				"Scrub rate set successfully, applied: %d\n",
-				bandwidth);
-		} else {
-			/* FIXME: error codes maybe? */
-			edac_printk(KERN_DEBUG, EDAC_MC,
-				"Scrub rate set FAILED, could not apply: %d\n",
-				bandwidth);
-		}
-	} else {
-		/* FIXME: produce "not implemented" ERROR for user-side. */
-		edac_printk(KERN_WARNING, EDAC_MC,
-			"Memory scrubbing 'set'control is not implemented!\n");
-	}
-	return count;
-}
-
-static ssize_t mci_sdram_scrub_rate_show(struct mem_ctl_info *mci, char *data)
-{
-	u32 bandwidth = -1;
-
-	if (mci->get_sdram_scrub_rate) {
-		if (!(*mci->get_sdram_scrub_rate)(mci, &bandwidth)) {
-			edac_printk(KERN_DEBUG, EDAC_MC,
-				"Scrub rate successfully, fetched: %d\n",
-				bandwidth);
-		} else {
-			/* FIXME: error codes maybe? */
-			edac_printk(KERN_DEBUG, EDAC_MC,
-				"Scrub rate fetch FAILED, got: %d\n",
-				bandwidth);
-		}
-	} else {
-		/* FIXME: produce "not implemented" ERROR for user-side.  */
-		edac_printk(KERN_WARNING, EDAC_MC,
-			"Memory scrubbing 'get' control is not implemented!\n");
-	}
-	return sprintf(data, "%d\n", bandwidth);
-}
-
-/* default attribute files for the MCI object */
-static ssize_t mci_ue_count_show(struct mem_ctl_info *mci, char *data)
-{
-	return sprintf(data,"%d\n", mci->ue_count);
-}
-
-static ssize_t mci_ce_count_show(struct mem_ctl_info *mci, char *data)
-{
-	return sprintf(data,"%d\n", mci->ce_count);
-}
-
-static ssize_t mci_ce_noinfo_show(struct mem_ctl_info *mci, char *data)
-{
-	return sprintf(data,"%d\n", mci->ce_noinfo_count);
-}
-
-static ssize_t mci_ue_noinfo_show(struct mem_ctl_info *mci, char *data)
-{
-	return sprintf(data,"%d\n", mci->ue_noinfo_count);
-}
-
-static ssize_t mci_seconds_show(struct mem_ctl_info *mci, char *data)
-{
-	return sprintf(data,"%ld\n", (jiffies - mci->start_time) / HZ);
-}
-
-static ssize_t mci_ctl_name_show(struct mem_ctl_info *mci, char *data)
-{
-	return sprintf(data,"%s\n", mci->ctl_name);
-}
-
-static ssize_t mci_size_mb_show(struct mem_ctl_info *mci, char *data)
-{
-	int total_pages, csrow_idx;
-
-	for (total_pages = csrow_idx = 0; csrow_idx < mci->nr_csrows;
-			csrow_idx++) {
-		struct csrow_info *csrow = &mci->csrows[csrow_idx];
-
-		if (!csrow->nr_pages)
-			continue;
-
-		total_pages += csrow->nr_pages;
-	}
-
-	return sprintf(data,"%u\n", PAGES_TO_MiB(total_pages));
-}
-
-struct mcidev_attribute {
-	struct attribute attr;
-	ssize_t (*show)(struct mem_ctl_info *,char *);
-	ssize_t (*store)(struct mem_ctl_info *, const char *,size_t);
-};
-
-#define to_mci(k) container_of(k, struct mem_ctl_info, edac_mci_kobj)
-#define to_mcidev_attr(a) container_of(a, struct mcidev_attribute, attr)
-
-/* MCI show/store functions for top most object */
-static ssize_t mcidev_show(struct kobject *kobj, struct attribute *attr,
-		char *buffer)
-{
-	struct mem_ctl_info *mem_ctl_info = to_mci(kobj);
-	struct mcidev_attribute * mcidev_attr = to_mcidev_attr(attr);
-
-	if (mcidev_attr->show)
-		return mcidev_attr->show(mem_ctl_info, buffer);
-
-	return -EIO;
-}
-
-static ssize_t mcidev_store(struct kobject *kobj, struct attribute *attr,
-		const char *buffer, size_t count)
-{
-	struct mem_ctl_info *mem_ctl_info = to_mci(kobj);
-	struct mcidev_attribute * mcidev_attr = to_mcidev_attr(attr);
-
-	if (mcidev_attr->store)
-		return mcidev_attr->store(mem_ctl_info, buffer, count);
-
-	return -EIO;
-}
-
-static struct sysfs_ops mci_ops = {
-	.show = mcidev_show,
-	.store = mcidev_store
-};
-
-#define MCIDEV_ATTR(_name,_mode,_show,_store)			\
-struct mcidev_attribute mci_attr_##_name = {			\
-	.attr = {.name = __stringify(_name), .mode = _mode },	\
-	.show   = _show,					\
-	.store  = _store,					\
-};
-
-/* default Control file */
-MCIDEV_ATTR(reset_counters,S_IWUSR,NULL,mci_reset_counters_store);
-
-/* default Attribute files */
-MCIDEV_ATTR(mc_name,S_IRUGO,mci_ctl_name_show,NULL);
-MCIDEV_ATTR(size_mb,S_IRUGO,mci_size_mb_show,NULL);
-MCIDEV_ATTR(seconds_since_reset,S_IRUGO,mci_seconds_show,NULL);
-MCIDEV_ATTR(ue_noinfo_count,S_IRUGO,mci_ue_noinfo_show,NULL);
-MCIDEV_ATTR(ce_noinfo_count,S_IRUGO,mci_ce_noinfo_show,NULL);
-MCIDEV_ATTR(ue_count,S_IRUGO,mci_ue_count_show,NULL);
-MCIDEV_ATTR(ce_count,S_IRUGO,mci_ce_count_show,NULL);
-
-/* memory scrubber attribute file */
-MCIDEV_ATTR(sdram_scrub_rate,S_IRUGO|S_IWUSR,mci_sdram_scrub_rate_show,mci_sdram_scrub_rate_store);
-
-static struct mcidev_attribute *mci_attr[] = {
-	&mci_attr_reset_counters,
-	&mci_attr_mc_name,
-	&mci_attr_size_mb,
-	&mci_attr_seconds_since_reset,
-	&mci_attr_ue_noinfo_count,
-	&mci_attr_ce_noinfo_count,
-	&mci_attr_ue_count,
-	&mci_attr_ce_count,
-	&mci_attr_sdram_scrub_rate,
-	NULL
-};
-
-/*
- * Release of a MC controlling instance
- */
-static void edac_mci_instance_release(struct kobject *kobj)
-{
-	struct mem_ctl_info *mci;
-
-	mci = to_mci(kobj);
-	debugf0("%s() idx=%d\n", __func__, mci->mc_idx);
-	complete(&mci->kobj_complete);
-}
-
-static struct kobj_type ktype_mci = {
-	.release = edac_mci_instance_release,
-	.sysfs_ops = &mci_ops,
-	.default_attrs = (struct attribute **) mci_attr,
-};
-
-
-#define EDAC_DEVICE_SYMLINK	"device"
-
-/*
- * Create a new Memory Controller kobject instance,
- *	mc<id> under the 'mc' directory
- *
- * Return:
- *	0	Success
- *	!0	Failure
- */
-static int edac_create_sysfs_mci_device(struct mem_ctl_info *mci)
-{
-	int i;
-	int err;
-	struct csrow_info *csrow;
-	struct kobject *edac_mci_kobj=&mci->edac_mci_kobj;
-
-	debugf0("%s() idx=%d\n", __func__, mci->mc_idx);
-	memset(edac_mci_kobj, 0, sizeof(*edac_mci_kobj));
-
-	/* set the name of the mc<id> object */
-	err = kobject_set_name(edac_mci_kobj,"mc%d",mci->mc_idx);
-	if (err)
-		return err;
-
-	/* link to our parent the '..../edac/mc' object */
-	edac_mci_kobj->parent = &edac_memctrl_kobj;
-	edac_mci_kobj->ktype = &ktype_mci;
-
-	/* register the mc<id> kobject */
-	err = kobject_register(edac_mci_kobj);
-	if (err)
-		return err;
-
-	/* create a symlink for the device */
-	err = sysfs_create_link(edac_mci_kobj, &mci->dev->kobj,
-				EDAC_DEVICE_SYMLINK);
-	if (err)
-		goto fail0;
-
-	/* Make directories for each CSROW object
-	 * under the mc<id> kobject
-	 */
-	for (i = 0; i < mci->nr_csrows; i++) {
-		csrow = &mci->csrows[i];
-
-		/* Only expose populated CSROWs */
-		if (csrow->nr_pages > 0) {
-			err = edac_create_csrow_object(edac_mci_kobj,csrow,i);
-			if (err)
-				goto fail1;
-		}
-	}
-
-	return 0;
-
-	/* CSROW error: backout what has already been registered,  */
-fail1:
-	for ( i--; i >= 0; i--) {
-		if (csrow->nr_pages > 0) {
-			init_completion(&csrow->kobj_complete);
-			kobject_unregister(&mci->csrows[i].kobj);
-			wait_for_completion(&csrow->kobj_complete);
-		}
-	}
-
-fail0:
-	init_completion(&mci->kobj_complete);
-	kobject_unregister(edac_mci_kobj);
-	wait_for_completion(&mci->kobj_complete);
-	return err;
-}
-
-/*
- * remove a Memory Controller instance
- */
-static void edac_remove_sysfs_mci_device(struct mem_ctl_info *mci)
-{
-	int i;
-
-	debugf0("%s()\n", __func__);
-
-	/* remove all csrow kobjects */
-	for (i = 0; i < mci->nr_csrows; i++) {
-		if (mci->csrows[i].nr_pages > 0) {
-			init_completion(&mci->csrows[i].kobj_complete);
-			kobject_unregister(&mci->csrows[i].kobj);
-			wait_for_completion(&mci->csrows[i].kobj_complete);
-		}
-	}
-
-	sysfs_remove_link(&mci->edac_mci_kobj, EDAC_DEVICE_SYMLINK);
-	init_completion(&mci->kobj_complete);
-	kobject_unregister(&mci->edac_mci_kobj);
-	wait_for_completion(&mci->kobj_complete);
-}
-
-/* END OF sysfs data and methods */
-
 #ifdef CONFIG_EDAC_DEBUG
 
-void edac_mc_dump_channel(struct channel_info *chan)
+static void edac_mc_dump_channel(struct channel_info *chan)
 {
 	debugf4("\tchannel = %p\n", chan);
 	debugf4("\tchannel->chan_idx = %d\n", chan->chan_idx);
@@ -1228,25 +48,21 @@
 	debugf4("\tchannel->label = '%s'\n", chan->label);
 	debugf4("\tchannel->csrow = %p\n\n", chan->csrow);
 }
-EXPORT_SYMBOL_GPL(edac_mc_dump_channel);
 
-void edac_mc_dump_csrow(struct csrow_info *csrow)
+static void edac_mc_dump_csrow(struct csrow_info *csrow)
 {
 	debugf4("\tcsrow = %p\n", csrow);
 	debugf4("\tcsrow->csrow_idx = %d\n", csrow->csrow_idx);
-	debugf4("\tcsrow->first_page = 0x%lx\n",
-		csrow->first_page);
+	debugf4("\tcsrow->first_page = 0x%lx\n", csrow->first_page);
 	debugf4("\tcsrow->last_page = 0x%lx\n", csrow->last_page);
 	debugf4("\tcsrow->page_mask = 0x%lx\n", csrow->page_mask);
 	debugf4("\tcsrow->nr_pages = 0x%x\n", csrow->nr_pages);
-	debugf4("\tcsrow->nr_channels = %d\n",
-		csrow->nr_channels);
+	debugf4("\tcsrow->nr_channels = %d\n", csrow->nr_channels);
 	debugf4("\tcsrow->channels = %p\n", csrow->channels);
 	debugf4("\tcsrow->mci = %p\n\n", csrow->mci);
 }
-EXPORT_SYMBOL_GPL(edac_mc_dump_csrow);
 
-void edac_mc_dump_mci(struct mem_ctl_info *mci)
+static void edac_mc_dump_mci(struct mem_ctl_info *mci)
 {
 	debugf3("\tmci = %p\n", mci);
 	debugf3("\tmci->mtype_cap = %lx\n", mci->mtype_cap);
@@ -1256,13 +72,11 @@
 	debugf3("\tmci->nr_csrows = %d, csrows = %p\n",
 		mci->nr_csrows, mci->csrows);
 	debugf3("\tdev = %p\n", mci->dev);
-	debugf3("\tmod_name:ctl_name = %s:%s\n",
-		mci->mod_name, mci->ctl_name);
+	debugf3("\tmod_name:ctl_name = %s:%s\n", mci->mod_name, mci->ctl_name);
 	debugf3("\tpvt_info = %p\n\n", mci->pvt_info);
 }
-EXPORT_SYMBOL_GPL(edac_mc_dump_mci);
 
-#endif  /* CONFIG_EDAC_DEBUG */
+#endif				/* CONFIG_EDAC_DEBUG */
 
 /* 'ptr' points to a possibly unaligned item X such that sizeof(X) is 'size'.
  * Adjust 'ptr' so that its alignment is at least as stringent as what the
@@ -1271,7 +85,7 @@
  * If 'size' is a constant, the compiler will optimize this whole function
  * down to either a no-op or the addition of a constant to the value of 'ptr'.
  */
-static inline char * align_ptr(void *ptr, unsigned size)
+void *edac_align_ptr(void *ptr, unsigned size)
 {
 	unsigned align, r;
 
@@ -1288,14 +102,14 @@
 	else if (size > sizeof(char))
 		align = sizeof(short);
 	else
-		return (char *) ptr;
+		return (char *)ptr;
 
 	r = size % align;
 
 	if (r == 0)
-		return (char *) ptr;
+		return (char *)ptr;
 
-	return (char *) (((unsigned long) ptr) + align - r);
+	return (void *)(((unsigned long)ptr) + align - r);
 }
 
 /**
@@ -1315,7 +129,7 @@
  *	struct mem_ctl_info pointer
  */
 struct mem_ctl_info *edac_mc_alloc(unsigned sz_pvt, unsigned nr_csrows,
-		unsigned nr_chans)
+				unsigned nr_chans, int edac_index)
 {
 	struct mem_ctl_info *mci;
 	struct csrow_info *csi, *csrow;
@@ -1323,30 +137,32 @@
 	void *pvt;
 	unsigned size;
 	int row, chn;
+	int err;
 
 	/* Figure out the offsets of the various items from the start of an mc
 	 * structure.  We want the alignment of each item to be at least as
 	 * stringent as what the compiler would provide if we could simply
 	 * hardcode everything into a single struct.
 	 */
-	mci = (struct mem_ctl_info *) 0;
-	csi = (struct csrow_info *)align_ptr(&mci[1], sizeof(*csi));
-	chi = (struct channel_info *)
-			align_ptr(&csi[nr_csrows], sizeof(*chi));
-	pvt = align_ptr(&chi[nr_chans * nr_csrows], sz_pvt);
-	size = ((unsigned long) pvt) + sz_pvt;
+	mci = (struct mem_ctl_info *)0;
+	csi = edac_align_ptr(&mci[1], sizeof(*csi));
+	chi = edac_align_ptr(&csi[nr_csrows], sizeof(*chi));
+	pvt = edac_align_ptr(&chi[nr_chans * nr_csrows], sz_pvt);
+	size = ((unsigned long)pvt) + sz_pvt;
 
-	if ((mci = kmalloc(size, GFP_KERNEL)) == NULL)
+	mci = kzalloc(size, GFP_KERNEL);
+	if (mci == NULL)
 		return NULL;
 
 	/* Adjust pointers so they point within the memory we just allocated
 	 * rather than an imaginary chunk of memory located at address 0.
 	 */
-	csi = (struct csrow_info *) (((char *) mci) + ((unsigned long) csi));
-	chi = (struct channel_info *) (((char *) mci) + ((unsigned long) chi));
-	pvt = sz_pvt ? (((char *) mci) + ((unsigned long) pvt)) : NULL;
+	csi = (struct csrow_info *)(((char *)mci) + ((unsigned long)csi));
+	chi = (struct channel_info *)(((char *)mci) + ((unsigned long)chi));
+	pvt = sz_pvt ? (((char *)mci) + ((unsigned long)pvt)) : NULL;
 
-	memset(mci, 0, size);  /* clear all fields */
+	/* setup index and various internal pointers */
+	mci->mc_idx = edac_index;
 	mci->csrows = csi;
 	mci->pvt_info = pvt;
 	mci->nr_csrows = nr_csrows;
@@ -1366,17 +182,35 @@
 		}
 	}
 
+	mci->op_state = OP_ALLOC;
+
+	/*
+	 * Initialize the 'root' kobj for the edac_mc controller
+	 */
+	err = edac_mc_register_sysfs_main_kobj(mci);
+	if (err) {
+		kfree(mci);
+		return NULL;
+	}
+
+	/* at this point, the root kobj is valid, and in order to
+	 * 'free' the object, then the function:
+	 *      edac_mc_unregister_sysfs_main_kobj() must be called
+	 * which will perform kobj unregistration and the actual free
+	 * will occur during the kobject callback operation
+	 */
 	return mci;
 }
 EXPORT_SYMBOL_GPL(edac_mc_alloc);
 
 /**
- * edac_mc_free:  Free a previously allocated 'mci' structure
+ * edac_mc_free
+ *	'Free' a previously allocated 'mci' structure
  * @mci: pointer to a struct mem_ctl_info structure
  */
 void edac_mc_free(struct mem_ctl_info *mci)
 {
-	kfree(mci);
+	edac_mc_unregister_sysfs_main_kobj(mci);
 }
 EXPORT_SYMBOL_GPL(edac_mc_free);
 
@@ -1397,18 +231,136 @@
 	return NULL;
 }
 
+/*
+ * handler for EDAC to check if NMI type handler has asserted interrupt
+ */
+static int edac_mc_assert_error_check_and_clear(void)
+{
+	int old_state;
+
+	if (edac_op_state == EDAC_OPSTATE_POLL)
+		return 1;
+
+	old_state = edac_err_assert;
+	edac_err_assert = 0;
+
+	return old_state;
+}
+
+/*
+ * edac_mc_workq_function
+ *	performs the operation scheduled by a workq request
+ */
+static void edac_mc_workq_function(struct work_struct *work_req)
+{
+	struct delayed_work *d_work = (struct delayed_work *)work_req;
+	struct mem_ctl_info *mci = to_edac_mem_ctl_work(d_work);
+
+	mutex_lock(&mem_ctls_mutex);
+
+	/* if this control struct has movd to offline state, we are done */
+	if (mci->op_state == OP_OFFLINE) {
+		mutex_unlock(&mem_ctls_mutex);
+		return;
+	}
+
+	/* Only poll controllers that are running polled and have a check */
+	if (edac_mc_assert_error_check_and_clear() && (mci->edac_check != NULL))
+		mci->edac_check(mci);
+
+	/*
+	 * FIXME: temp place holder for PCI checks,
+	 * goes away when we break out PCI
+	 */
+	edac_pci_do_parity_check();
+
+	mutex_unlock(&mem_ctls_mutex);
+
+	/* Reschedule */
+	queue_delayed_work(edac_workqueue, &mci->work,
+			msecs_to_jiffies(edac_mc_get_poll_msec()));
+}
+
+/*
+ * edac_mc_workq_setup
+ *	initialize a workq item for this mci
+ *	passing in the new delay period in msec
+ *
+ *	locking model:
+ *
+ *		called with the mem_ctls_mutex held
+ */
+static void edac_mc_workq_setup(struct mem_ctl_info *mci, unsigned msec)
+{
+	debugf0("%s()\n", __func__);
+
+	/* if this instance is not in the POLL state, then simply return */
+	if (mci->op_state != OP_RUNNING_POLL)
+		return;
+
+	INIT_DELAYED_WORK(&mci->work, edac_mc_workq_function);
+	queue_delayed_work(edac_workqueue, &mci->work, msecs_to_jiffies(msec));
+}
+
+/*
+ * edac_mc_workq_teardown
+ *	stop the workq processing on this mci
+ *
+ *	locking model:
+ *
+ *		called WITHOUT lock held
+ */
+static void edac_mc_workq_teardown(struct mem_ctl_info *mci)
+{
+	int status;
+
+	/* if not running POLL, leave now */
+	if (mci->op_state == OP_RUNNING_POLL) {
+		status = cancel_delayed_work(&mci->work);
+		if (status == 0) {
+			debugf0("%s() not canceled, flush the queue\n",
+				__func__);
+
+			/* workq instance might be running, wait for it */
+			flush_workqueue(edac_workqueue);
+		}
+	}
+}
+
+/*
+ * edac_reset_delay_period
+ */
+static void edac_reset_delay_period(struct mem_ctl_info *mci, unsigned long value)
+{
+	/* cancel the current workq request */
+	edac_mc_workq_teardown(mci);
+
+	/* lock the list of devices for the new setup */
+	mutex_lock(&mem_ctls_mutex);
+
+	/* restart the workq request, with new delay value */
+	edac_mc_workq_setup(mci, value);
+
+	mutex_unlock(&mem_ctls_mutex);
+}
+
 /* Return 0 on success, 1 on failure.
  * Before calling this function, caller must
  * assign a unique value to mci->mc_idx.
+ *
+ *	locking model:
+ *
+ *		called with the mem_ctls_mutex lock held
  */
-static int add_mc_to_global_list (struct mem_ctl_info *mci)
+static int add_mc_to_global_list(struct mem_ctl_info *mci)
 {
 	struct list_head *item, *insert_before;
 	struct mem_ctl_info *p;
 
 	insert_before = &mc_devices;
 
-	if (unlikely((p = find_mci_by_dev(mci->dev)) != NULL))
+	p = find_mci_by_dev(mci->dev);
+	if (unlikely(p != NULL))
 		goto fail0;
 
 	list_for_each(item, &mc_devices) {
@@ -1424,18 +376,19 @@
 	}
 
 	list_add_tail_rcu(&mci->link, insert_before);
+	atomic_inc(&edac_handlers);
 	return 0;
 
 fail0:
 	edac_printk(KERN_WARNING, EDAC_MC,
-		    "%s (%s) %s %s already assigned %d\n", p->dev->bus_id,
-		    dev_name(p->dev), p->mod_name, p->ctl_name, p->mc_idx);
+		"%s (%s) %s %s already assigned %d\n", p->dev->bus_id,
+		dev_name(mci), p->mod_name, p->ctl_name, p->mc_idx);
 	return 1;
 
 fail1:
 	edac_printk(KERN_WARNING, EDAC_MC,
-		    "bug in low-level driver: attempt to assign\n"
-		    "    duplicate mc_idx %d in %s()\n", p->mc_idx, __func__);
+		"bug in low-level driver: attempt to assign\n"
+		"    duplicate mc_idx %d in %s()\n", p->mc_idx, __func__);
 	return 1;
 }
 
@@ -1450,6 +403,7 @@
 
 static void del_mc_from_global_list(struct mem_ctl_info *mci)
 {
+	atomic_dec(&edac_handlers);
 	list_del_rcu(&mci->link);
 	init_completion(&mci->complete);
 	call_rcu(&mci->rcu, complete_mc_list_del);
@@ -1457,6 +411,34 @@
 }
 
 /**
+ * edac_mc_find: Search for a mem_ctl_info structure whose index is 'idx'.
+ *
+ * If found, return a pointer to the structure.
+ * Else return NULL.
+ *
+ * Caller must hold mem_ctls_mutex.
+ */
+struct mem_ctl_info *edac_mc_find(int idx)
+{
+	struct list_head *item;
+	struct mem_ctl_info *mci;
+
+	list_for_each(item, &mc_devices) {
+		mci = list_entry(item, struct mem_ctl_info, link);
+
+		if (mci->mc_idx >= idx) {
+			if (mci->mc_idx == idx)
+				return mci;
+
+			break;
+		}
+	}
+
+	return NULL;
+}
+EXPORT_SYMBOL(edac_mc_find);
+
+/**
  * edac_mc_add_mc: Insert the 'mci' structure into the mci global list and
  *                 create sysfs entries associated with mci structure
  * @mci: pointer to the mci structure to be added to the list
@@ -1468,10 +450,10 @@
  */
 
 /* FIXME - should a warning be printed if no error detection? correction? */
-int edac_mc_add_mc(struct mem_ctl_info *mci, int mc_idx)
+int edac_mc_add_mc(struct mem_ctl_info *mci)
 {
 	debugf0("%s()\n", __func__);
-	mci->mc_idx = mc_idx;
+
 #ifdef CONFIG_EDAC_DEBUG
 	if (edac_debug_level >= 3)
 		edac_mc_dump_mci(mci);
@@ -1484,12 +466,12 @@
 
 			edac_mc_dump_csrow(&mci->csrows[i]);
 			for (j = 0; j < mci->csrows[i].nr_channels; j++)
-				edac_mc_dump_channel(
-					&mci->csrows[i].channels[j]);
+				edac_mc_dump_channel(&mci->csrows[i].
+						channels[j]);
 		}
 	}
 #endif
-	down(&mem_ctls_mutex);
+	mutex_lock(&mem_ctls_mutex);
 
 	if (add_mc_to_global_list(mci))
 		goto fail0;
@@ -1503,18 +485,28 @@
 		goto fail1;
 	}
 
-	/* Report action taken */
-	edac_mc_printk(mci, KERN_INFO, "Giving out device to %s %s: DEV %s\n",
-		mci->mod_name, mci->ctl_name, dev_name(mci->dev));
+	/* If there IS a check routine, then we are running POLLED */
+	if (mci->edac_check != NULL) {
+		/* This instance is NOW RUNNING */
+		mci->op_state = OP_RUNNING_POLL;
 
-	up(&mem_ctls_mutex);
+		edac_mc_workq_setup(mci, edac_mc_get_poll_msec());
+	} else {
+		mci->op_state = OP_RUNNING_INTERRUPT;
+	}
+
+	/* Report action taken */
+	edac_mc_printk(mci, KERN_INFO, "Giving out device to '%s' '%s':"
+		" DEV %s\n", mci->mod_name, mci->ctl_name, dev_name(mci));
+
+	mutex_unlock(&mem_ctls_mutex);
 	return 0;
 
 fail1:
 	del_mc_from_global_list(mci);
 
 fail0:
-	up(&mem_ctls_mutex);
+	mutex_unlock(&mem_ctls_mutex);
 	return 1;
 }
 EXPORT_SYMBOL_GPL(edac_mc_add_mc);
@@ -1526,29 +518,41 @@
  *
  * Return pointer to removed mci structure, or NULL if device not found.
  */
-struct mem_ctl_info * edac_mc_del_mc(struct device *dev)
+struct mem_ctl_info *edac_mc_del_mc(struct device *dev)
 {
 	struct mem_ctl_info *mci;
 
-	debugf0("MC: %s()\n", __func__);
-	down(&mem_ctls_mutex);
+	debugf0("%s()\n", __func__);
 
-	if ((mci = find_mci_by_dev(dev)) == NULL) {
-		up(&mem_ctls_mutex);
+	mutex_lock(&mem_ctls_mutex);
+
+	/* find the requested mci struct in the global list */
+	mci = find_mci_by_dev(dev);
+	if (mci == NULL) {
+		mutex_unlock(&mem_ctls_mutex);
 		return NULL;
 	}
 
-	edac_remove_sysfs_mci_device(mci);
+	/* marking MCI offline */
+	mci->op_state = OP_OFFLINE;
+
 	del_mc_from_global_list(mci);
-	up(&mem_ctls_mutex);
+	mutex_unlock(&mem_ctls_mutex);
+
+	/* flush workq processes and remove sysfs */
+	edac_mc_workq_teardown(mci);
+	edac_remove_sysfs_mci_device(mci);
+
 	edac_printk(KERN_INFO, EDAC_MC,
 		"Removed device %d for %s %s: DEV %s\n", mci->mc_idx,
-		mci->mod_name, mci->ctl_name, dev_name(mci->dev));
+		mci->mod_name, mci->ctl_name, dev_name(mci));
+
 	return mci;
 }
 EXPORT_SYMBOL_GPL(edac_mc_del_mc);
 
-void edac_mc_scrub_block(unsigned long page, unsigned long offset, u32 size)
+static void edac_mc_scrub_block(unsigned long page, unsigned long offset,
+				u32 size)
 {
 	struct page *pg;
 	void *virt_addr;
@@ -1557,7 +561,7 @@
 	debugf3("%s()\n", __func__);
 
 	/* ECC error page was not in our memory. Ignore it. */
-	if(!pfn_valid(page))
+	if (!pfn_valid(page))
 		return;
 
 	/* Find the actual page structure then map it and fix */
@@ -1577,7 +581,6 @@
 	if (PageHighMem(pg))
 		local_irq_restore(flags);
 }
-EXPORT_SYMBOL_GPL(edac_mc_scrub_block);
 
 /* FIXME - should return -1 */
 int edac_mc_find_csrow_by_page(struct mem_ctl_info *mci, unsigned long page)
@@ -1611,7 +614,7 @@
 	if (row == -1)
 		edac_mc_printk(mci, KERN_ERR,
 			"could not look up page error address %lx\n",
-			(unsigned long) page);
+			(unsigned long)page);
 
 	return row;
 }
@@ -1620,8 +623,9 @@
 /* FIXME - setable log (warning/emerg) levels */
 /* FIXME - integrate with evlog: http://evlog.sourceforge.net/ */
 void edac_mc_handle_ce(struct mem_ctl_info *mci,
-		unsigned long page_frame_number, unsigned long offset_in_page,
-		unsigned long syndrome, int row, int channel, const char *msg)
+		unsigned long page_frame_number,
+		unsigned long offset_in_page, unsigned long syndrome,
+		int row, int channel, const char *msg)
 {
 	unsigned long remapped_page;
 
@@ -1647,7 +651,7 @@
 		return;
 	}
 
-	if (log_ce)
+	if (edac_mc_get_log_ce())
 		/* FIXME - put in DIMM location */
 		edac_mc_printk(mci, KERN_WARNING,
 			"CE page 0x%lx, offset 0x%lx, grain %d, syndrome "
@@ -1671,18 +675,18 @@
 		 * page - which can then be scrubbed.
 		 */
 		remapped_page = mci->ctl_page_to_phys ?
-		    mci->ctl_page_to_phys(mci, page_frame_number) :
-		    page_frame_number;
+			mci->ctl_page_to_phys(mci, page_frame_number) :
+			page_frame_number;
 
 		edac_mc_scrub_block(remapped_page, offset_in_page,
-					mci->csrows[row].grain);
+				mci->csrows[row].grain);
 	}
 }
 EXPORT_SYMBOL_GPL(edac_mc_handle_ce);
 
 void edac_mc_handle_ce_no_info(struct mem_ctl_info *mci, const char *msg)
 {
-	if (log_ce)
+	if (edac_mc_get_log_ce())
 		edac_mc_printk(mci, KERN_WARNING,
 			"CE - no information available: %s\n", msg);
 
@@ -1692,8 +696,8 @@
 EXPORT_SYMBOL_GPL(edac_mc_handle_ce_no_info);
 
 void edac_mc_handle_ue(struct mem_ctl_info *mci,
-		unsigned long page_frame_number, unsigned long offset_in_page,
-		int row, const char *msg)
+		unsigned long page_frame_number,
+		unsigned long offset_in_page, int row, const char *msg)
 {
 	int len = EDAC_MC_LABEL_LEN * 4;
 	char labels[len + 1];
@@ -1714,26 +718,26 @@
 	}
 
 	chars = snprintf(pos, len + 1, "%s",
-			mci->csrows[row].channels[0].label);
+			 mci->csrows[row].channels[0].label);
 	len -= chars;
 	pos += chars;
 
 	for (chan = 1; (chan < mci->csrows[row].nr_channels) && (len > 0);
-	     chan++) {
+		chan++) {
 		chars = snprintf(pos, len + 1, ":%s",
-				mci->csrows[row].channels[chan].label);
+				 mci->csrows[row].channels[chan].label);
 		len -= chars;
 		pos += chars;
 	}
 
-	if (log_ue)
+	if (edac_mc_get_log_ue())
 		edac_mc_printk(mci, KERN_EMERG,
 			"UE page 0x%lx, offset 0x%lx, grain %d, row %d, "
 			"labels \"%s\": %s\n", page_frame_number,
-			offset_in_page, mci->csrows[row].grain, row, labels,
-			msg);
+			offset_in_page, mci->csrows[row].grain, row,
+			labels, msg);
 
-	if (panic_on_ue)
+	if (edac_mc_get_panic_on_ue())
 		panic("EDAC MC%d: UE page 0x%lx, offset 0x%lx, grain %d, "
 			"row %d, labels \"%s\": %s\n", mci->mc_idx,
 			page_frame_number, offset_in_page,
@@ -1746,10 +750,10 @@
 
 void edac_mc_handle_ue_no_info(struct mem_ctl_info *mci, const char *msg)
 {
-	if (panic_on_ue)
+	if (edac_mc_get_panic_on_ue())
 		panic("EDAC MC%d: Uncorrected Error", mci->mc_idx);
 
-	if (log_ue)
+	if (edac_mc_get_log_ue())
 		edac_mc_printk(mci, KERN_WARNING,
 			"UE - no information available: %s\n", msg);
 	mci->ue_noinfo_count++;
@@ -1757,16 +761,14 @@
 }
 EXPORT_SYMBOL_GPL(edac_mc_handle_ue_no_info);
 
-
 /*************************************************************
  * On Fully Buffered DIMM modules, this help function is
  * called to process UE events
  */
 void edac_mc_handle_fbd_ue(struct mem_ctl_info *mci,
-				unsigned int csrow,
-				unsigned int channela,
-				unsigned int channelb,
-				char *msg)
+			unsigned int csrow,
+			unsigned int channela,
+			unsigned int channelb, char *msg)
 {
 	int len = EDAC_MC_LABEL_LEN * 4;
 	char labels[len + 1];
@@ -1808,20 +810,21 @@
 	/* Generate the DIMM labels from the specified channels */
 	chars = snprintf(pos, len + 1, "%s",
 			 mci->csrows[csrow].channels[channela].label);
-	len -= chars; pos += chars;
+	len -= chars;
+	pos += chars;
 	chars = snprintf(pos, len + 1, "-%s",
 			 mci->csrows[csrow].channels[channelb].label);
 
-	if (log_ue)
+	if (edac_mc_get_log_ue())
 		edac_mc_printk(mci, KERN_EMERG,
 			"UE row %d, channel-a= %d channel-b= %d "
 			"labels \"%s\": %s\n", csrow, channela, channelb,
 			labels, msg);
 
-	if (panic_on_ue)
+	if (edac_mc_get_panic_on_ue())
 		panic("UE row %d, channel-a= %d channel-b= %d "
-				"labels \"%s\": %s\n", csrow, channela,
-				channelb, labels, msg);
+			"labels \"%s\": %s\n", csrow, channela,
+			channelb, labels, msg);
 }
 EXPORT_SYMBOL(edac_mc_handle_fbd_ue);
 
@@ -1830,9 +833,7 @@
  * called to process CE events
  */
 void edac_mc_handle_fbd_ce(struct mem_ctl_info *mci,
-			   unsigned int csrow,
-			   unsigned int channel,
-			   char *msg)
+			unsigned int csrow, unsigned int channel, char *msg)
 {
 
 	/* Ensure boundary values */
@@ -1853,13 +854,12 @@
 		return;
 	}
 
-	if (log_ce)
+	if (edac_mc_get_log_ce())
 		/* FIXME - put in DIMM location */
 		edac_mc_printk(mci, KERN_WARNING,
 			"CE row %d, channel %d, label \"%s\": %s\n",
 			csrow, channel,
-			mci->csrows[csrow].channels[channel].label,
-			msg);
+			mci->csrows[csrow].channels[channel].label, msg);
 
 	mci->ce_count++;
 	mci->csrows[csrow].ce_count++;
@@ -1867,17 +867,16 @@
 }
 EXPORT_SYMBOL(edac_mc_handle_fbd_ce);
 
-
 /*
  * Iterate over all MC instances and check for ECC, et al, errors
  */
-static inline void check_mc_devices(void)
+void edac_check_mc_devices(void)
 {
 	struct list_head *item;
 	struct mem_ctl_info *mci;
 
 	debugf3("%s()\n", __func__);
-	down(&mem_ctls_mutex);
+	mutex_lock(&mem_ctls_mutex);
 
 	list_for_each(item, &mc_devices) {
 		mci = list_entry(item, struct mem_ctl_info, link);
@@ -1886,119 +885,5 @@
 			mci->edac_check(mci);
 	}
 
-	up(&mem_ctls_mutex);
+	mutex_unlock(&mem_ctls_mutex);
 }
-
-/*
- * Check MC status every poll_msec.
- * Check PCI status every poll_msec as well.
- *
- * This where the work gets done for edac.
- *
- * SMP safe, doesn't use NMI, and auto-rate-limits.
- */
-static void do_edac_check(void)
-{
-	debugf3("%s()\n", __func__);
-	check_mc_devices();
-	do_pci_parity_check();
-}
-
-static int edac_kernel_thread(void *arg)
-{
-	while (!kthread_should_stop()) {
-		do_edac_check();
-
-		/* goto sleep for the interval */
-		schedule_timeout_interruptible((HZ * poll_msec) / 1000);
-		try_to_freeze();
-	}
-
-	return 0;
-}
-
-/*
- * edac_mc_init
- *      module initialization entry point
- */
-static int __init edac_mc_init(void)
-{
-	edac_printk(KERN_INFO, EDAC_MC, EDAC_MC_VERSION "\n");
-
-	/*
-	 * Harvest and clear any boot/initialization PCI parity errors
-	 *
-	 * FIXME: This only clears errors logged by devices present at time of
-	 * 	module initialization.  We should also do an initial clear
-	 *	of each newly hotplugged device.
-	 */
-	clear_pci_parity_errors();
-
-	/* Create the MC sysfs entries */
-	if (edac_sysfs_memctrl_setup()) {
-		edac_printk(KERN_ERR, EDAC_MC,
-			"Error initializing sysfs code\n");
-		return -ENODEV;
-	}
-
-	/* Create the PCI parity sysfs entries */
-	if (edac_sysfs_pci_setup()) {
-		edac_sysfs_memctrl_teardown();
-		edac_printk(KERN_ERR, EDAC_MC,
-			"EDAC PCI: Error initializing sysfs code\n");
-		return -ENODEV;
-	}
-
-	/* create our kernel thread */
-	edac_thread = kthread_run(edac_kernel_thread, NULL, "kedac");
-
-	if (IS_ERR(edac_thread)) {
-		/* remove the sysfs entries */
-		edac_sysfs_memctrl_teardown();
-		edac_sysfs_pci_teardown();
-		return PTR_ERR(edac_thread);
-	}
-
-	return 0;
-}
-
-/*
- * edac_mc_exit()
- *      module exit/termination functioni
- */
-static void __exit edac_mc_exit(void)
-{
-	debugf0("%s()\n", __func__);
-	kthread_stop(edac_thread);
-
-	/* tear down the sysfs device */
-	edac_sysfs_memctrl_teardown();
-	edac_sysfs_pci_teardown();
-}
-
-module_init(edac_mc_init);
-module_exit(edac_mc_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Linux Networx (http://lnxi.com) Thayne Harbaugh et al\n"
-	"Based on work by Dan Hollis et al");
-MODULE_DESCRIPTION("Core library routines for MC reporting");
-
-module_param(panic_on_ue, int, 0644);
-MODULE_PARM_DESC(panic_on_ue, "Panic on uncorrected error: 0=off 1=on");
-#ifdef CONFIG_PCI
-module_param(check_pci_parity, int, 0644);
-MODULE_PARM_DESC(check_pci_parity, "Check for PCI bus parity errors: 0=off 1=on");
-module_param(panic_on_pci_parity, int, 0644);
-MODULE_PARM_DESC(panic_on_pci_parity, "Panic on PCI Bus Parity error: 0=off 1=on");
-#endif
-module_param(log_ue, int, 0644);
-MODULE_PARM_DESC(log_ue, "Log uncorrectable error to console: 0=off 1=on");
-module_param(log_ce, int, 0644);
-MODULE_PARM_DESC(log_ce, "Log correctable error to console: 0=off 1=on");
-module_param(poll_msec, int, 0644);
-MODULE_PARM_DESC(poll_msec, "Polling period in milliseconds");
-#ifdef CONFIG_EDAC_DEBUG
-module_param(edac_debug_level, int, 0644);
-MODULE_PARM_DESC(edac_debug_level, "Debug level");
-#endif
diff --git a/drivers/edac/edac_mc.h b/drivers/edac/edac_mc.h
deleted file mode 100644
index 713444c..0000000
--- a/drivers/edac/edac_mc.h
+++ /dev/null
@@ -1,481 +0,0 @@
-/*
- * MC kernel module
- * (C) 2003 Linux Networx (http://lnxi.com)
- * This file may be distributed under the terms of the
- * GNU General Public License.
- *
- * Written by Thayne Harbaugh
- * Based on work by Dan Hollis <goemon at anime dot net> and others.
- *	http://www.anime.net/~goemon/linux-ecc/
- *
- * NMI handling support added by
- *     Dave Peterson <dsp@llnl.gov> <dave_peterson@pobox.com>
- *
- * $Id: edac_mc.h,v 1.4.2.10 2005/10/05 00:43:44 dsp_llnl Exp $
- *
- */
-
-#ifndef _EDAC_MC_H_
-#define _EDAC_MC_H_
-
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/module.h>
-#include <linux/spinlock.h>
-#include <linux/smp.h>
-#include <linux/pci.h>
-#include <linux/time.h>
-#include <linux/nmi.h>
-#include <linux/rcupdate.h>
-#include <linux/completion.h>
-#include <linux/kobject.h>
-#include <linux/platform_device.h>
-
-#define EDAC_MC_LABEL_LEN	31
-#define MC_PROC_NAME_MAX_LEN 7
-
-#if PAGE_SHIFT < 20
-#define PAGES_TO_MiB( pages )	( ( pages ) >> ( 20 - PAGE_SHIFT ) )
-#else				/* PAGE_SHIFT > 20 */
-#define PAGES_TO_MiB( pages )	( ( pages ) << ( PAGE_SHIFT - 20 ) )
-#endif
-
-#define edac_printk(level, prefix, fmt, arg...) \
-	printk(level "EDAC " prefix ": " fmt, ##arg)
-
-#define edac_mc_printk(mci, level, fmt, arg...) \
-	printk(level "EDAC MC%d: " fmt, mci->mc_idx, ##arg)
-
-#define edac_mc_chipset_printk(mci, level, prefix, fmt, arg...) \
-	printk(level "EDAC " prefix " MC%d: " fmt, mci->mc_idx, ##arg)
-
-/* prefixes for edac_printk() and edac_mc_printk() */
-#define EDAC_MC "MC"
-#define EDAC_PCI "PCI"
-#define EDAC_DEBUG "DEBUG"
-
-#ifdef CONFIG_EDAC_DEBUG
-extern int edac_debug_level;
-
-#define edac_debug_printk(level, fmt, arg...)                            \
-	do {                                                             \
-		if (level <= edac_debug_level)                           \
-			edac_printk(KERN_DEBUG, EDAC_DEBUG, fmt, ##arg); \
-	} while(0)
-
-#define debugf0( ... ) edac_debug_printk(0, __VA_ARGS__ )
-#define debugf1( ... ) edac_debug_printk(1, __VA_ARGS__ )
-#define debugf2( ... ) edac_debug_printk(2, __VA_ARGS__ )
-#define debugf3( ... ) edac_debug_printk(3, __VA_ARGS__ )
-#define debugf4( ... ) edac_debug_printk(4, __VA_ARGS__ )
-
-#else  /* !CONFIG_EDAC_DEBUG */
-
-#define debugf0( ... )
-#define debugf1( ... )
-#define debugf2( ... )
-#define debugf3( ... )
-#define debugf4( ... )
-
-#endif  /* !CONFIG_EDAC_DEBUG */
-
-#define BIT(x) (1 << (x))
-
-#define PCI_VEND_DEV(vend, dev) PCI_VENDOR_ID_ ## vend, \
-	PCI_DEVICE_ID_ ## vend ## _ ## dev
-
-#if defined(CONFIG_X86) && defined(CONFIG_PCI)
-#define dev_name(dev) pci_name(to_pci_dev(dev))
-#else
-#define dev_name(dev) to_platform_device(dev)->name
-#endif
-
-/* memory devices */
-enum dev_type {
-	DEV_UNKNOWN = 0,
-	DEV_X1,
-	DEV_X2,
-	DEV_X4,
-	DEV_X8,
-	DEV_X16,
-	DEV_X32,		/* Do these parts exist? */
-	DEV_X64			/* Do these parts exist? */
-};
-
-#define DEV_FLAG_UNKNOWN	BIT(DEV_UNKNOWN)
-#define DEV_FLAG_X1		BIT(DEV_X1)
-#define DEV_FLAG_X2		BIT(DEV_X2)
-#define DEV_FLAG_X4		BIT(DEV_X4)
-#define DEV_FLAG_X8		BIT(DEV_X8)
-#define DEV_FLAG_X16		BIT(DEV_X16)
-#define DEV_FLAG_X32		BIT(DEV_X32)
-#define DEV_FLAG_X64		BIT(DEV_X64)
-
-/* memory types */
-enum mem_type {
-	MEM_EMPTY = 0,		/* Empty csrow */
-	MEM_RESERVED,		/* Reserved csrow type */
-	MEM_UNKNOWN,		/* Unknown csrow type */
-	MEM_FPM,		/* Fast page mode */
-	MEM_EDO,		/* Extended data out */
-	MEM_BEDO,		/* Burst Extended data out */
-	MEM_SDR,		/* Single data rate SDRAM */
-	MEM_RDR,		/* Registered single data rate SDRAM */
-	MEM_DDR,		/* Double data rate SDRAM */
-	MEM_RDDR,		/* Registered Double data rate SDRAM */
-	MEM_RMBS,		/* Rambus DRAM */
-	MEM_DDR2,               /* DDR2 RAM */
-	MEM_FB_DDR2,            /* fully buffered DDR2 */
-};
-
-#define MEM_FLAG_EMPTY		BIT(MEM_EMPTY)
-#define MEM_FLAG_RESERVED	BIT(MEM_RESERVED)
-#define MEM_FLAG_UNKNOWN	BIT(MEM_UNKNOWN)
-#define MEM_FLAG_FPM		BIT(MEM_FPM)
-#define MEM_FLAG_EDO		BIT(MEM_EDO)
-#define MEM_FLAG_BEDO		BIT(MEM_BEDO)
-#define MEM_FLAG_SDR		BIT(MEM_SDR)
-#define MEM_FLAG_RDR		BIT(MEM_RDR)
-#define MEM_FLAG_DDR		BIT(MEM_DDR)
-#define MEM_FLAG_RDDR		BIT(MEM_RDDR)
-#define MEM_FLAG_RMBS		BIT(MEM_RMBS)
-#define MEM_FLAG_DDR2           BIT(MEM_DDR2)
-#define MEM_FLAG_FB_DDR2        BIT(MEM_FB_DDR2)
-
-/* chipset Error Detection and Correction capabilities and mode */
-enum edac_type {
-	EDAC_UNKNOWN = 0,	/* Unknown if ECC is available */
-	EDAC_NONE,		/* Doesnt support ECC */
-	EDAC_RESERVED,		/* Reserved ECC type */
-	EDAC_PARITY,		/* Detects parity errors */
-	EDAC_EC,		/* Error Checking - no correction */
-	EDAC_SECDED,		/* Single bit error correction, Double detection */
-	EDAC_S2ECD2ED,		/* Chipkill x2 devices - do these exist? */
-	EDAC_S4ECD4ED,		/* Chipkill x4 devices */
-	EDAC_S8ECD8ED,		/* Chipkill x8 devices */
-	EDAC_S16ECD16ED,	/* Chipkill x16 devices */
-};
-
-#define EDAC_FLAG_UNKNOWN	BIT(EDAC_UNKNOWN)
-#define EDAC_FLAG_NONE		BIT(EDAC_NONE)
-#define EDAC_FLAG_PARITY	BIT(EDAC_PARITY)
-#define EDAC_FLAG_EC		BIT(EDAC_EC)
-#define EDAC_FLAG_SECDED	BIT(EDAC_SECDED)
-#define EDAC_FLAG_S2ECD2ED	BIT(EDAC_S2ECD2ED)
-#define EDAC_FLAG_S4ECD4ED	BIT(EDAC_S4ECD4ED)
-#define EDAC_FLAG_S8ECD8ED	BIT(EDAC_S8ECD8ED)
-#define EDAC_FLAG_S16ECD16ED	BIT(EDAC_S16ECD16ED)
-
-/* scrubbing capabilities */
-enum scrub_type {
-	SCRUB_UNKNOWN = 0,	/* Unknown if scrubber is available */
-	SCRUB_NONE,		/* No scrubber */
-	SCRUB_SW_PROG,		/* SW progressive (sequential) scrubbing */
-	SCRUB_SW_SRC,		/* Software scrub only errors */
-	SCRUB_SW_PROG_SRC,	/* Progressive software scrub from an error */
-	SCRUB_SW_TUNABLE,	/* Software scrub frequency is tunable */
-	SCRUB_HW_PROG,		/* HW progressive (sequential) scrubbing */
-	SCRUB_HW_SRC,		/* Hardware scrub only errors */
-	SCRUB_HW_PROG_SRC,	/* Progressive hardware scrub from an error */
-	SCRUB_HW_TUNABLE	/* Hardware scrub frequency is tunable */
-};
-
-#define SCRUB_FLAG_SW_PROG	BIT(SCRUB_SW_PROG)
-#define SCRUB_FLAG_SW_SRC	BIT(SCRUB_SW_SRC_CORR)
-#define SCRUB_FLAG_SW_PROG_SRC	BIT(SCRUB_SW_PROG_SRC_CORR)
-#define SCRUB_FLAG_SW_TUN	BIT(SCRUB_SW_SCRUB_TUNABLE)
-#define SCRUB_FLAG_HW_PROG	BIT(SCRUB_HW_PROG)
-#define SCRUB_FLAG_HW_SRC	BIT(SCRUB_HW_SRC_CORR)
-#define SCRUB_FLAG_HW_PROG_SRC	BIT(SCRUB_HW_PROG_SRC_CORR)
-#define SCRUB_FLAG_HW_TUN	BIT(SCRUB_HW_TUNABLE)
-
-/* FIXME - should have notify capabilities: NMI, LOG, PROC, etc */
-
-/*
- * There are several things to be aware of that aren't at all obvious:
- *
- *
- * SOCKETS, SOCKET SETS, BANKS, ROWS, CHIP-SELECT ROWS, CHANNELS, etc..
- *
- * These are some of the many terms that are thrown about that don't always
- * mean what people think they mean (Inconceivable!).  In the interest of
- * creating a common ground for discussion, terms and their definitions
- * will be established.
- *
- * Memory devices:	The individual chip on a memory stick.  These devices
- *			commonly output 4 and 8 bits each.  Grouping several
- *			of these in parallel provides 64 bits which is common
- *			for a memory stick.
- *
- * Memory Stick:	A printed circuit board that agregates multiple
- *			memory devices in parallel.  This is the atomic
- *			memory component that is purchaseable by Joe consumer
- *			and loaded into a memory socket.
- *
- * Socket:		A physical connector on the motherboard that accepts
- *			a single memory stick.
- *
- * Channel:		Set of memory devices on a memory stick that must be
- *			grouped in parallel with one or more additional
- *			channels from other memory sticks.  This parallel
- *			grouping of the output from multiple channels are
- *			necessary for the smallest granularity of memory access.
- *			Some memory controllers are capable of single channel -
- *			which means that memory sticks can be loaded
- *			individually.  Other memory controllers are only
- *			capable of dual channel - which means that memory
- *			sticks must be loaded as pairs (see "socket set").
- *
- * Chip-select row:	All of the memory devices that are selected together.
- *			for a single, minimum grain of memory access.
- *			This selects all of the parallel memory devices across
- *			all of the parallel channels.  Common chip-select rows
- *			for single channel are 64 bits, for dual channel 128
- *			bits.
- *
- * Single-Ranked stick:	A Single-ranked stick has 1 chip-select row of memmory.
- *			Motherboards commonly drive two chip-select pins to
- *			a memory stick. A single-ranked stick, will occupy
- *			only one of those rows. The other will be unused.
- *
- * Double-Ranked stick:	A double-ranked stick has two chip-select rows which
- *			access different sets of memory devices.  The two
- *			rows cannot be accessed concurrently.
- *
- * Double-sided stick:	DEPRECATED TERM, see Double-Ranked stick.
- *			A double-sided stick has two chip-select rows which
- *			access different sets of memory devices.  The two
- *			rows cannot be accessed concurrently.  "Double-sided"
- *			is irrespective of the memory devices being mounted
- *			on both sides of the memory stick.
- *
- * Socket set:		All of the memory sticks that are required for for
- *			a single memory access or all of the memory sticks
- *			spanned by a chip-select row.  A single socket set
- *			has two chip-select rows and if double-sided sticks
- *			are used these will occupy those chip-select rows.
- *
- * Bank:		This term is avoided because it is unclear when
- *			needing to distinguish between chip-select rows and
- *			socket sets.
- *
- * Controller pages:
- *
- * Physical pages:
- *
- * Virtual pages:
- *
- *
- * STRUCTURE ORGANIZATION AND CHOICES
- *
- *
- *
- * PS - I enjoyed writing all that about as much as you enjoyed reading it.
- */
-
-struct channel_info {
-	int chan_idx;		/* channel index */
-	u32 ce_count;		/* Correctable Errors for this CHANNEL */
-	char label[EDAC_MC_LABEL_LEN + 1];  /* DIMM label on motherboard */
-	struct csrow_info *csrow;	/* the parent */
-};
-
-struct csrow_info {
-	unsigned long first_page;	/* first page number in dimm */
-	unsigned long last_page;	/* last page number in dimm */
-	unsigned long page_mask;	/* used for interleaving -
-					 * 0UL for non intlv
-					 */
-	u32 nr_pages;		/* number of pages in csrow */
-	u32 grain;		/* granularity of reported error in bytes */
-	int csrow_idx;		/* the chip-select row */
-	enum dev_type dtype;	/* memory device type */
-	u32 ue_count;		/* Uncorrectable Errors for this csrow */
-	u32 ce_count;		/* Correctable Errors for this csrow */
-	enum mem_type mtype;	/* memory csrow type */
-	enum edac_type edac_mode;	/* EDAC mode for this csrow */
-	struct mem_ctl_info *mci;	/* the parent */
-
-	struct kobject kobj;	/* sysfs kobject for this csrow */
-	struct completion kobj_complete;
-
-	/* FIXME the number of CHANNELs might need to become dynamic */
-	u32 nr_channels;
-	struct channel_info *channels;
-};
-
-struct mem_ctl_info {
-	struct list_head link;  /* for global list of mem_ctl_info structs */
-	unsigned long mtype_cap;	/* memory types supported by mc */
-	unsigned long edac_ctl_cap;	/* Mem controller EDAC capabilities */
-	unsigned long edac_cap;	/* configuration capabilities - this is
-				 * closely related to edac_ctl_cap.  The
-				 * difference is that the controller may be
-				 * capable of s4ecd4ed which would be listed
-				 * in edac_ctl_cap, but if channels aren't
-				 * capable of s4ecd4ed then the edac_cap would
-				 * not have that capability.
-				 */
-	unsigned long scrub_cap;	/* chipset scrub capabilities */
-	enum scrub_type scrub_mode;	/* current scrub mode */
-
-	/* Translates sdram memory scrub rate given in bytes/sec to the
-	   internal representation and configures whatever else needs
-	   to be configured.
-	*/
-	int (*set_sdram_scrub_rate) (struct mem_ctl_info *mci, u32 *bw);
-
-	/* Get the current sdram memory scrub rate from the internal
-	   representation and converts it to the closest matching
-	   bandwith in bytes/sec.
-	*/
-	int (*get_sdram_scrub_rate) (struct mem_ctl_info *mci, u32 *bw);
-
-	/* pointer to edac checking routine */
-	void (*edac_check) (struct mem_ctl_info * mci);
-
-	/*
-	 * Remaps memory pages: controller pages to physical pages.
-	 * For most MC's, this will be NULL.
-	 */
-	/* FIXME - why not send the phys page to begin with? */
-	unsigned long (*ctl_page_to_phys) (struct mem_ctl_info * mci,
-					unsigned long page);
-	int mc_idx;
-	int nr_csrows;
-	struct csrow_info *csrows;
-	/*
-	 * FIXME - what about controllers on other busses? - IDs must be
-	 * unique.  dev pointer should be sufficiently unique, but
-	 * BUS:SLOT.FUNC numbers may not be unique.
-	 */
-	struct device *dev;
-	const char *mod_name;
-	const char *mod_ver;
-	const char *ctl_name;
-	char proc_name[MC_PROC_NAME_MAX_LEN + 1];
-	void *pvt_info;
-	u32 ue_noinfo_count;	/* Uncorrectable Errors w/o info */
-	u32 ce_noinfo_count;	/* Correctable Errors w/o info */
-	u32 ue_count;		/* Total Uncorrectable Errors for this MC */
-	u32 ce_count;		/* Total Correctable Errors for this MC */
-	unsigned long start_time;	/* mci load start time (in jiffies) */
-
-	/* this stuff is for safe removal of mc devices from global list while
-	 * NMI handlers may be traversing list
-	 */
-	struct rcu_head rcu;
-	struct completion complete;
-
-	/* edac sysfs device control */
-	struct kobject edac_mci_kobj;
-	struct completion kobj_complete;
-};
-
-#ifdef CONFIG_PCI
-
-/* write all or some bits in a byte-register*/
-static inline void pci_write_bits8(struct pci_dev *pdev, int offset, u8 value,
-		u8 mask)
-{
-	if (mask != 0xff) {
-		u8 buf;
-
-		pci_read_config_byte(pdev, offset, &buf);
-		value &= mask;
-		buf &= ~mask;
-		value |= buf;
-	}
-
-	pci_write_config_byte(pdev, offset, value);
-}
-
-/* write all or some bits in a word-register*/
-static inline void pci_write_bits16(struct pci_dev *pdev, int offset,
-		u16 value, u16 mask)
-{
-	if (mask != 0xffff) {
-		u16 buf;
-
-		pci_read_config_word(pdev, offset, &buf);
-		value &= mask;
-		buf &= ~mask;
-		value |= buf;
-	}
-
-	pci_write_config_word(pdev, offset, value);
-}
-
-/* write all or some bits in a dword-register*/
-static inline void pci_write_bits32(struct pci_dev *pdev, int offset,
-		u32 value, u32 mask)
-{
-	if (mask != 0xffff) {
-		u32 buf;
-
-		pci_read_config_dword(pdev, offset, &buf);
-		value &= mask;
-		buf &= ~mask;
-		value |= buf;
-	}
-
-	pci_write_config_dword(pdev, offset, value);
-}
-
-#endif /* CONFIG_PCI */
-
-#ifdef CONFIG_EDAC_DEBUG
-void edac_mc_dump_channel(struct channel_info *chan);
-void edac_mc_dump_mci(struct mem_ctl_info *mci);
-void edac_mc_dump_csrow(struct csrow_info *csrow);
-#endif  /* CONFIG_EDAC_DEBUG */
-
-extern int edac_mc_add_mc(struct mem_ctl_info *mci,int mc_idx);
-extern struct mem_ctl_info * edac_mc_del_mc(struct device *dev);
-extern int edac_mc_find_csrow_by_page(struct mem_ctl_info *mci,
-					unsigned long page);
-extern void edac_mc_scrub_block(unsigned long page, unsigned long offset,
-		u32 size);
-
-/*
- * The no info errors are used when error overflows are reported.
- * There are a limited number of error logging registers that can
- * be exausted.  When all registers are exhausted and an additional
- * error occurs then an error overflow register records that an
- * error occured and the type of error, but doesn't have any
- * further information.  The ce/ue versions make for cleaner
- * reporting logic and function interface - reduces conditional
- * statement clutter and extra function arguments.
- */
-extern void edac_mc_handle_ce(struct mem_ctl_info *mci,
-		unsigned long page_frame_number, unsigned long offset_in_page,
-		unsigned long syndrome, int row, int channel,
-		const char *msg);
-extern void edac_mc_handle_ce_no_info(struct mem_ctl_info *mci,
-		const char *msg);
-extern void edac_mc_handle_ue(struct mem_ctl_info *mci,
-		unsigned long page_frame_number, unsigned long offset_in_page,
-		int row, const char *msg);
-extern void edac_mc_handle_ue_no_info(struct mem_ctl_info *mci,
-		const char *msg);
-extern void edac_mc_handle_fbd_ue(struct mem_ctl_info *mci,
-		unsigned int csrow,
-		unsigned int channel0,
-		unsigned int channel1,
-		char *msg);
-extern void edac_mc_handle_fbd_ce(struct mem_ctl_info *mci,
-		unsigned int csrow,
-		unsigned int channel,
-		char *msg);
-
-/*
- * This kmalloc's and initializes all the structures.
- * Can't be used if all structures don't have the same lifetime.
- */
-extern struct mem_ctl_info *edac_mc_alloc(unsigned sz_pvt, unsigned nr_csrows,
-		unsigned nr_chans);
-
-/* Free an mc previously allocated by edac_mc_alloc() */
-extern void edac_mc_free(struct mem_ctl_info *mci);
-
-#endif				/* _EDAC_MC_H_ */
diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
new file mode 100644
index 0000000..cd090b0
--- /dev/null
+++ b/drivers/edac/edac_mc_sysfs.c
@@ -0,0 +1,1024 @@
+/*
+ * edac_mc kernel module
+ * (C) 2005-2007 Linux Networx (http://lnxi.com)
+ *
+ * This file may be distributed under the terms of the
+ * GNU General Public License.
+ *
+ * Written Doug Thompson <norsk5@xmission.com> www.softwarebitmaker.com
+ *
+ */
+
+#include <linux/ctype.h>
+#include <linux/bug.h>
+
+#include "edac_core.h"
+#include "edac_module.h"
+
+
+/* MC EDAC Controls, setable by module parameter, and sysfs */
+static int edac_mc_log_ue = 1;
+static int edac_mc_log_ce = 1;
+static int edac_mc_panic_on_ue;
+static int edac_mc_poll_msec = 1000;
+
+/* Getter functions for above */
+int edac_mc_get_log_ue(void)
+{
+	return edac_mc_log_ue;
+}
+
+int edac_mc_get_log_ce(void)
+{
+	return edac_mc_log_ce;
+}
+
+int edac_mc_get_panic_on_ue(void)
+{
+	return edac_mc_panic_on_ue;
+}
+
+/* this is temporary */
+int edac_mc_get_poll_msec(void)
+{
+	return edac_mc_poll_msec;
+}
+
+/* Parameter declarations for above */
+module_param(edac_mc_panic_on_ue, int, 0644);
+MODULE_PARM_DESC(edac_mc_panic_on_ue, "Panic on uncorrected error: 0=off 1=on");
+module_param(edac_mc_log_ue, int, 0644);
+MODULE_PARM_DESC(edac_mc_log_ue,
+		 "Log uncorrectable error to console: 0=off 1=on");
+module_param(edac_mc_log_ce, int, 0644);
+MODULE_PARM_DESC(edac_mc_log_ce,
+		 "Log correctable error to console: 0=off 1=on");
+module_param(edac_mc_poll_msec, int, 0644);
+MODULE_PARM_DESC(edac_mc_poll_msec, "Polling period in milliseconds");
+
+/*
+ * various constants for Memory Controllers
+ */
+static const char *mem_types[] = {
+	[MEM_EMPTY] = "Empty",
+	[MEM_RESERVED] = "Reserved",
+	[MEM_UNKNOWN] = "Unknown",
+	[MEM_FPM] = "FPM",
+	[MEM_EDO] = "EDO",
+	[MEM_BEDO] = "BEDO",
+	[MEM_SDR] = "Unbuffered-SDR",
+	[MEM_RDR] = "Registered-SDR",
+	[MEM_DDR] = "Unbuffered-DDR",
+	[MEM_RDDR] = "Registered-DDR",
+	[MEM_RMBS] = "RMBS",
+	[MEM_DDR2] = "Unbuffered-DDR2",
+	[MEM_FB_DDR2] = "FullyBuffered-DDR2",
+	[MEM_RDDR2] = "Registered-DDR2"
+};
+
+static const char *dev_types[] = {
+	[DEV_UNKNOWN] = "Unknown",
+	[DEV_X1] = "x1",
+	[DEV_X2] = "x2",
+	[DEV_X4] = "x4",
+	[DEV_X8] = "x8",
+	[DEV_X16] = "x16",
+	[DEV_X32] = "x32",
+	[DEV_X64] = "x64"
+};
+
+static const char *edac_caps[] = {
+	[EDAC_UNKNOWN] = "Unknown",
+	[EDAC_NONE] = "None",
+	[EDAC_RESERVED] = "Reserved",
+	[EDAC_PARITY] = "PARITY",
+	[EDAC_EC] = "EC",
+	[EDAC_SECDED] = "SECDED",
+	[EDAC_S2ECD2ED] = "S2ECD2ED",
+	[EDAC_S4ECD4ED] = "S4ECD4ED",
+	[EDAC_S8ECD8ED] = "S8ECD8ED",
+	[EDAC_S16ECD16ED] = "S16ECD16ED"
+};
+
+
+
+/*
+ * /sys/devices/system/edac/mc;
+ *	data structures and methods
+ */
+static ssize_t memctrl_int_show(void *ptr, char *buffer)
+{
+	int *value = (int *)ptr;
+	return sprintf(buffer, "%u\n", *value);
+}
+
+static ssize_t memctrl_int_store(void *ptr, const char *buffer, size_t count)
+{
+	int *value = (int *)ptr;
+
+	if (isdigit(*buffer))
+		*value = simple_strtoul(buffer, NULL, 0);
+
+	return count;
+}
+
+
+/* EDAC sysfs CSROW data structures and methods
+ */
+
+/* Set of more default csrow<id> attribute show/store functions */
+static ssize_t csrow_ue_count_show(struct csrow_info *csrow, char *data,
+				int private)
+{
+	return sprintf(data, "%u\n", csrow->ue_count);
+}
+
+static ssize_t csrow_ce_count_show(struct csrow_info *csrow, char *data,
+				int private)
+{
+	return sprintf(data, "%u\n", csrow->ce_count);
+}
+
+static ssize_t csrow_size_show(struct csrow_info *csrow, char *data,
+				int private)
+{
+	return sprintf(data, "%u\n", PAGES_TO_MiB(csrow->nr_pages));
+}
+
+static ssize_t csrow_mem_type_show(struct csrow_info *csrow, char *data,
+				int private)
+{
+	return sprintf(data, "%s\n", mem_types[csrow->mtype]);
+}
+
+static ssize_t csrow_dev_type_show(struct csrow_info *csrow, char *data,
+				int private)
+{
+	return sprintf(data, "%s\n", dev_types[csrow->dtype]);
+}
+
+static ssize_t csrow_edac_mode_show(struct csrow_info *csrow, char *data,
+				int private)
+{
+	return sprintf(data, "%s\n", edac_caps[csrow->edac_mode]);
+}
+
+/* show/store functions for DIMM Label attributes */
+static ssize_t channel_dimm_label_show(struct csrow_info *csrow,
+				char *data, int channel)
+{
+	return snprintf(data, EDAC_MC_LABEL_LEN, "%s",
+			csrow->channels[channel].label);
+}
+
+static ssize_t channel_dimm_label_store(struct csrow_info *csrow,
+					const char *data,
+					size_t count, int channel)
+{
+	ssize_t max_size = 0;
+
+	max_size = min((ssize_t) count, (ssize_t) EDAC_MC_LABEL_LEN - 1);
+	strncpy(csrow->channels[channel].label, data, max_size);
+	csrow->channels[channel].label[max_size] = '\0';
+
+	return max_size;
+}
+
+/* show function for dynamic chX_ce_count attribute */
+static ssize_t channel_ce_count_show(struct csrow_info *csrow,
+				char *data, int channel)
+{
+	return sprintf(data, "%u\n", csrow->channels[channel].ce_count);
+}
+
+/* csrow specific attribute structure */
+struct csrowdev_attribute {
+	struct attribute attr;
+	 ssize_t(*show) (struct csrow_info *, char *, int);
+	 ssize_t(*store) (struct csrow_info *, const char *, size_t, int);
+	int private;
+};
+
+#define to_csrow(k) container_of(k, struct csrow_info, kobj)
+#define to_csrowdev_attr(a) container_of(a, struct csrowdev_attribute, attr)
+
+/* Set of show/store higher level functions for default csrow attributes */
+static ssize_t csrowdev_show(struct kobject *kobj,
+			struct attribute *attr, char *buffer)
+{
+	struct csrow_info *csrow = to_csrow(kobj);
+	struct csrowdev_attribute *csrowdev_attr = to_csrowdev_attr(attr);
+
+	if (csrowdev_attr->show)
+		return csrowdev_attr->show(csrow,
+					buffer, csrowdev_attr->private);
+	return -EIO;
+}
+
+static ssize_t csrowdev_store(struct kobject *kobj, struct attribute *attr,
+			const char *buffer, size_t count)
+{
+	struct csrow_info *csrow = to_csrow(kobj);
+	struct csrowdev_attribute *csrowdev_attr = to_csrowdev_attr(attr);
+
+	if (csrowdev_attr->store)
+		return csrowdev_attr->store(csrow,
+					buffer,
+					count, csrowdev_attr->private);
+	return -EIO;
+}
+
+static struct sysfs_ops csrowfs_ops = {
+	.show = csrowdev_show,
+	.store = csrowdev_store
+};
+
+#define CSROWDEV_ATTR(_name,_mode,_show,_store,_private)	\
+static struct csrowdev_attribute attr_##_name = {			\
+	.attr = {.name = __stringify(_name), .mode = _mode },	\
+	.show   = _show,					\
+	.store  = _store,					\
+	.private = _private,					\
+};
+
+/* default cwrow<id>/attribute files */
+CSROWDEV_ATTR(size_mb, S_IRUGO, csrow_size_show, NULL, 0);
+CSROWDEV_ATTR(dev_type, S_IRUGO, csrow_dev_type_show, NULL, 0);
+CSROWDEV_ATTR(mem_type, S_IRUGO, csrow_mem_type_show, NULL, 0);
+CSROWDEV_ATTR(edac_mode, S_IRUGO, csrow_edac_mode_show, NULL, 0);
+CSROWDEV_ATTR(ue_count, S_IRUGO, csrow_ue_count_show, NULL, 0);
+CSROWDEV_ATTR(ce_count, S_IRUGO, csrow_ce_count_show, NULL, 0);
+
+/* default attributes of the CSROW<id> object */
+static struct csrowdev_attribute *default_csrow_attr[] = {
+	&attr_dev_type,
+	&attr_mem_type,
+	&attr_edac_mode,
+	&attr_size_mb,
+	&attr_ue_count,
+	&attr_ce_count,
+	NULL,
+};
+
+/* possible dynamic channel DIMM Label attribute files */
+CSROWDEV_ATTR(ch0_dimm_label, S_IRUGO | S_IWUSR,
+	channel_dimm_label_show, channel_dimm_label_store, 0);
+CSROWDEV_ATTR(ch1_dimm_label, S_IRUGO | S_IWUSR,
+	channel_dimm_label_show, channel_dimm_label_store, 1);
+CSROWDEV_ATTR(ch2_dimm_label, S_IRUGO | S_IWUSR,
+	channel_dimm_label_show, channel_dimm_label_store, 2);
+CSROWDEV_ATTR(ch3_dimm_label, S_IRUGO | S_IWUSR,
+	channel_dimm_label_show, channel_dimm_label_store, 3);
+CSROWDEV_ATTR(ch4_dimm_label, S_IRUGO | S_IWUSR,
+	channel_dimm_label_show, channel_dimm_label_store, 4);
+CSROWDEV_ATTR(ch5_dimm_label, S_IRUGO | S_IWUSR,
+	channel_dimm_label_show, channel_dimm_label_store, 5);
+
+/* Total possible dynamic DIMM Label attribute file table */
+static struct csrowdev_attribute *dynamic_csrow_dimm_attr[] = {
+	&attr_ch0_dimm_label,
+	&attr_ch1_dimm_label,
+	&attr_ch2_dimm_label,
+	&attr_ch3_dimm_label,
+	&attr_ch4_dimm_label,
+	&attr_ch5_dimm_label
+};
+
+/* possible dynamic channel ce_count attribute files */
+CSROWDEV_ATTR(ch0_ce_count, S_IRUGO | S_IWUSR, channel_ce_count_show, NULL, 0);
+CSROWDEV_ATTR(ch1_ce_count, S_IRUGO | S_IWUSR, channel_ce_count_show, NULL, 1);
+CSROWDEV_ATTR(ch2_ce_count, S_IRUGO | S_IWUSR, channel_ce_count_show, NULL, 2);
+CSROWDEV_ATTR(ch3_ce_count, S_IRUGO | S_IWUSR, channel_ce_count_show, NULL, 3);
+CSROWDEV_ATTR(ch4_ce_count, S_IRUGO | S_IWUSR, channel_ce_count_show, NULL, 4);
+CSROWDEV_ATTR(ch5_ce_count, S_IRUGO | S_IWUSR, channel_ce_count_show, NULL, 5);
+
+/* Total possible dynamic ce_count attribute file table */
+static struct csrowdev_attribute *dynamic_csrow_ce_count_attr[] = {
+	&attr_ch0_ce_count,
+	&attr_ch1_ce_count,
+	&attr_ch2_ce_count,
+	&attr_ch3_ce_count,
+	&attr_ch4_ce_count,
+	&attr_ch5_ce_count
+};
+
+#define EDAC_NR_CHANNELS	6
+
+/* Create dynamic CHANNEL files, indexed by 'chan',  under specifed CSROW */
+static int edac_create_channel_files(struct kobject *kobj, int chan)
+{
+	int err = -ENODEV;
+
+	if (chan >= EDAC_NR_CHANNELS)
+		return err;
+
+	/* create the DIMM label attribute file */
+	err = sysfs_create_file(kobj,
+				(struct attribute *)
+				dynamic_csrow_dimm_attr[chan]);
+
+	if (!err) {
+		/* create the CE Count attribute file */
+		err = sysfs_create_file(kobj,
+					(struct attribute *)
+					dynamic_csrow_ce_count_attr[chan]);
+	} else {
+		debugf1("%s()  dimm labels and ce_count files created",
+			__func__);
+	}
+
+	return err;
+}
+
+/* No memory to release for this kobj */
+static void edac_csrow_instance_release(struct kobject *kobj)
+{
+	struct mem_ctl_info *mci;
+	struct csrow_info *cs;
+
+	debugf1("%s()\n", __func__);
+
+	cs = container_of(kobj, struct csrow_info, kobj);
+	mci = cs->mci;
+
+	kobject_put(&mci->edac_mci_kobj);
+}
+
+/* the kobj_type instance for a CSROW */
+static struct kobj_type ktype_csrow = {
+	.release = edac_csrow_instance_release,
+	.sysfs_ops = &csrowfs_ops,
+	.default_attrs = (struct attribute **)default_csrow_attr,
+};
+
+/* Create a CSROW object under specifed edac_mc_device */
+static int edac_create_csrow_object(struct mem_ctl_info *mci,
+					struct csrow_info *csrow, int index)
+{
+	struct kobject *kobj_mci = &mci->edac_mci_kobj;
+	struct kobject *kobj;
+	int chan;
+	int err;
+
+	/* generate ..../edac/mc/mc<id>/csrow<index>   */
+	memset(&csrow->kobj, 0, sizeof(csrow->kobj));
+	csrow->mci = mci;	/* include container up link */
+	csrow->kobj.parent = kobj_mci;
+	csrow->kobj.ktype = &ktype_csrow;
+
+	/* name this instance of csrow<id> */
+	err = kobject_set_name(&csrow->kobj, "csrow%d", index);
+	if (err)
+		goto err_out;
+
+	/* bump the mci instance's kobject's ref count */
+	kobj = kobject_get(&mci->edac_mci_kobj);
+	if (!kobj) {
+		err = -ENODEV;
+		goto err_out;
+	}
+
+	/* Instanstiate the csrow object */
+	err = kobject_register(&csrow->kobj);
+	if (err)
+		goto err_release_top_kobj;
+
+	/* At this point, to release a csrow kobj, one must
+	 * call the kobject_unregister and allow that tear down
+	 * to work the releasing
+	 */
+
+	/* Create the dyanmic attribute files on this csrow,
+	 * namely, the DIMM labels and the channel ce_count
+	 */
+	for (chan = 0; chan < csrow->nr_channels; chan++) {
+		err = edac_create_channel_files(&csrow->kobj, chan);
+		if (err) {
+			/* special case the unregister here */
+			kobject_unregister(&csrow->kobj);
+			goto err_out;
+		}
+	}
+
+	return 0;
+
+	/* error unwind stack */
+err_release_top_kobj:
+	kobject_put(&mci->edac_mci_kobj);
+
+err_out:
+	return err;
+}
+
+/* default sysfs methods and data structures for the main MCI kobject */
+
+static ssize_t mci_reset_counters_store(struct mem_ctl_info *mci,
+					const char *data, size_t count)
+{
+	int row, chan;
+
+	mci->ue_noinfo_count = 0;
+	mci->ce_noinfo_count = 0;
+	mci->ue_count = 0;
+	mci->ce_count = 0;
+
+	for (row = 0; row < mci->nr_csrows; row++) {
+		struct csrow_info *ri = &mci->csrows[row];
+
+		ri->ue_count = 0;
+		ri->ce_count = 0;
+
+		for (chan = 0; chan < ri->nr_channels; chan++)
+			ri->channels[chan].ce_count = 0;
+	}
+
+	mci->start_time = jiffies;
+	return count;
+}
+
+/* memory scrubbing */
+static ssize_t mci_sdram_scrub_rate_store(struct mem_ctl_info *mci,
+					const char *data, size_t count)
+{
+	u32 bandwidth = -1;
+
+	if (mci->set_sdram_scrub_rate) {
+
+		memctrl_int_store(&bandwidth, data, count);
+
+		if (!(*mci->set_sdram_scrub_rate) (mci, &bandwidth)) {
+			edac_printk(KERN_DEBUG, EDAC_MC,
+				"Scrub rate set successfully, applied: %d\n",
+				bandwidth);
+		} else {
+			/* FIXME: error codes maybe? */
+			edac_printk(KERN_DEBUG, EDAC_MC,
+				"Scrub rate set FAILED, could not apply: %d\n",
+				bandwidth);
+		}
+	} else {
+		/* FIXME: produce "not implemented" ERROR for user-side. */
+		edac_printk(KERN_WARNING, EDAC_MC,
+			"Memory scrubbing 'set'control is not implemented!\n");
+	}
+	return count;
+}
+
+static ssize_t mci_sdram_scrub_rate_show(struct mem_ctl_info *mci, char *data)
+{
+	u32 bandwidth = -1;
+
+	if (mci->get_sdram_scrub_rate) {
+		if (!(*mci->get_sdram_scrub_rate) (mci, &bandwidth)) {
+			edac_printk(KERN_DEBUG, EDAC_MC,
+				"Scrub rate successfully, fetched: %d\n",
+				bandwidth);
+		} else {
+			/* FIXME: error codes maybe? */
+			edac_printk(KERN_DEBUG, EDAC_MC,
+				"Scrub rate fetch FAILED, got: %d\n",
+				bandwidth);
+		}
+	} else {
+		/* FIXME: produce "not implemented" ERROR for user-side.  */
+		edac_printk(KERN_WARNING, EDAC_MC,
+			"Memory scrubbing 'get' control is not implemented\n");
+	}
+	return sprintf(data, "%d\n", bandwidth);
+}
+
+/* default attribute files for the MCI object */
+static ssize_t mci_ue_count_show(struct mem_ctl_info *mci, char *data)
+{
+	return sprintf(data, "%d\n", mci->ue_count);
+}
+
+static ssize_t mci_ce_count_show(struct mem_ctl_info *mci, char *data)
+{
+	return sprintf(data, "%d\n", mci->ce_count);
+}
+
+static ssize_t mci_ce_noinfo_show(struct mem_ctl_info *mci, char *data)
+{
+	return sprintf(data, "%d\n", mci->ce_noinfo_count);
+}
+
+static ssize_t mci_ue_noinfo_show(struct mem_ctl_info *mci, char *data)
+{
+	return sprintf(data, "%d\n", mci->ue_noinfo_count);
+}
+
+static ssize_t mci_seconds_show(struct mem_ctl_info *mci, char *data)
+{
+	return sprintf(data, "%ld\n", (jiffies - mci->start_time) / HZ);
+}
+
+static ssize_t mci_ctl_name_show(struct mem_ctl_info *mci, char *data)
+{
+	return sprintf(data, "%s\n", mci->ctl_name);
+}
+
+static ssize_t mci_size_mb_show(struct mem_ctl_info *mci, char *data)
+{
+	int total_pages, csrow_idx;
+
+	for (total_pages = csrow_idx = 0; csrow_idx < mci->nr_csrows;
+		csrow_idx++) {
+		struct csrow_info *csrow = &mci->csrows[csrow_idx];
+
+		if (!csrow->nr_pages)
+			continue;
+
+		total_pages += csrow->nr_pages;
+	}
+
+	return sprintf(data, "%u\n", PAGES_TO_MiB(total_pages));
+}
+
+#define to_mci(k) container_of(k, struct mem_ctl_info, edac_mci_kobj)
+#define to_mcidev_attr(a) container_of(a,struct mcidev_sysfs_attribute,attr)
+
+/* MCI show/store functions for top most object */
+static ssize_t mcidev_show(struct kobject *kobj, struct attribute *attr,
+			char *buffer)
+{
+	struct mem_ctl_info *mem_ctl_info = to_mci(kobj);
+	struct mcidev_sysfs_attribute *mcidev_attr = to_mcidev_attr(attr);
+
+	if (mcidev_attr->show)
+		return mcidev_attr->show(mem_ctl_info, buffer);
+
+	return -EIO;
+}
+
+static ssize_t mcidev_store(struct kobject *kobj, struct attribute *attr,
+			const char *buffer, size_t count)
+{
+	struct mem_ctl_info *mem_ctl_info = to_mci(kobj);
+	struct mcidev_sysfs_attribute *mcidev_attr = to_mcidev_attr(attr);
+
+	if (mcidev_attr->store)
+		return mcidev_attr->store(mem_ctl_info, buffer, count);
+
+	return -EIO;
+}
+
+/* Intermediate show/store table */
+static struct sysfs_ops mci_ops = {
+	.show = mcidev_show,
+	.store = mcidev_store
+};
+
+#define MCIDEV_ATTR(_name,_mode,_show,_store)			\
+static struct mcidev_sysfs_attribute mci_attr_##_name = {			\
+	.attr = {.name = __stringify(_name), .mode = _mode },	\
+	.show   = _show,					\
+	.store  = _store,					\
+};
+
+/* default Control file */
+MCIDEV_ATTR(reset_counters, S_IWUSR, NULL, mci_reset_counters_store);
+
+/* default Attribute files */
+MCIDEV_ATTR(mc_name, S_IRUGO, mci_ctl_name_show, NULL);
+MCIDEV_ATTR(size_mb, S_IRUGO, mci_size_mb_show, NULL);
+MCIDEV_ATTR(seconds_since_reset, S_IRUGO, mci_seconds_show, NULL);
+MCIDEV_ATTR(ue_noinfo_count, S_IRUGO, mci_ue_noinfo_show, NULL);
+MCIDEV_ATTR(ce_noinfo_count, S_IRUGO, mci_ce_noinfo_show, NULL);
+MCIDEV_ATTR(ue_count, S_IRUGO, mci_ue_count_show, NULL);
+MCIDEV_ATTR(ce_count, S_IRUGO, mci_ce_count_show, NULL);
+
+/* memory scrubber attribute file */
+MCIDEV_ATTR(sdram_scrub_rate, S_IRUGO | S_IWUSR, mci_sdram_scrub_rate_show,
+	mci_sdram_scrub_rate_store);
+
+static struct mcidev_sysfs_attribute *mci_attr[] = {
+	&mci_attr_reset_counters,
+	&mci_attr_mc_name,
+	&mci_attr_size_mb,
+	&mci_attr_seconds_since_reset,
+	&mci_attr_ue_noinfo_count,
+	&mci_attr_ce_noinfo_count,
+	&mci_attr_ue_count,
+	&mci_attr_ce_count,
+	&mci_attr_sdram_scrub_rate,
+	NULL
+};
+
+
+/*
+ * Release of a MC controlling instance
+ *
+ *	each MC control instance has the following resources upon entry:
+ *		a) a ref count on the top memctl kobj
+ *		b) a ref count on this module
+ *
+ *	this function must decrement those ref counts and then
+ *	issue a free on the instance's memory
+ */
+static void edac_mci_control_release(struct kobject *kobj)
+{
+	struct mem_ctl_info *mci;
+
+	mci = to_mci(kobj);
+
+	debugf0("%s() mci instance idx=%d releasing\n", __func__, mci->mc_idx);
+
+	/* decrement the module ref count */
+	module_put(mci->owner);
+
+	/* free the mci instance memory here */
+	kfree(mci);
+}
+
+static struct kobj_type ktype_mci = {
+	.release = edac_mci_control_release,
+	.sysfs_ops = &mci_ops,
+	.default_attrs = (struct attribute **)mci_attr,
+};
+
+/* show/store, tables, etc for the MC kset */
+
+
+struct memctrl_dev_attribute {
+	struct attribute attr;
+	void *value;
+	 ssize_t(*show) (void *, char *);
+	 ssize_t(*store) (void *, const char *, size_t);
+};
+
+/* Set of show/store abstract level functions for memory control object */
+static ssize_t memctrl_dev_show(struct kobject *kobj,
+				struct attribute *attr, char *buffer)
+{
+	struct memctrl_dev_attribute *memctrl_dev;
+	memctrl_dev = (struct memctrl_dev_attribute *)attr;
+
+	if (memctrl_dev->show)
+		return memctrl_dev->show(memctrl_dev->value, buffer);
+
+	return -EIO;
+}
+
+static ssize_t memctrl_dev_store(struct kobject *kobj, struct attribute *attr,
+				 const char *buffer, size_t count)
+{
+	struct memctrl_dev_attribute *memctrl_dev;
+	memctrl_dev = (struct memctrl_dev_attribute *)attr;
+
+	if (memctrl_dev->store)
+		return memctrl_dev->store(memctrl_dev->value, buffer, count);
+
+	return -EIO;
+}
+
+static struct sysfs_ops memctrlfs_ops = {
+	.show = memctrl_dev_show,
+	.store = memctrl_dev_store
+};
+
+#define MEMCTRL_ATTR(_name, _mode, _show, _store)			\
+static struct memctrl_dev_attribute attr_##_name = {			\
+	.attr = {.name = __stringify(_name), .mode = _mode },	\
+	.value  = &_name,					\
+	.show   = _show,					\
+	.store  = _store,					\
+};
+
+#define MEMCTRL_STRING_ATTR(_name, _data, _mode, _show, _store)	\
+static struct memctrl_dev_attribute attr_##_name = {			\
+	.attr = {.name = __stringify(_name), .mode = _mode },	\
+	.value  = _data,					\
+	.show   = _show,					\
+	.store  = _store,					\
+};
+
+/* csrow<id> control files */
+MEMCTRL_ATTR(edac_mc_panic_on_ue,
+	S_IRUGO | S_IWUSR, memctrl_int_show, memctrl_int_store);
+
+MEMCTRL_ATTR(edac_mc_log_ue,
+	S_IRUGO | S_IWUSR, memctrl_int_show, memctrl_int_store);
+
+MEMCTRL_ATTR(edac_mc_log_ce,
+	S_IRUGO | S_IWUSR, memctrl_int_show, memctrl_int_store);
+
+MEMCTRL_ATTR(edac_mc_poll_msec,
+	S_IRUGO | S_IWUSR, memctrl_int_show, memctrl_int_store);
+
+/* Base Attributes of the memory ECC object */
+static struct memctrl_dev_attribute *memctrl_attr[] = {
+	&attr_edac_mc_panic_on_ue,
+	&attr_edac_mc_log_ue,
+	&attr_edac_mc_log_ce,
+	&attr_edac_mc_poll_msec,
+	NULL,
+};
+
+
+/* the ktype for the mc_kset internal kobj */
+static struct kobj_type ktype_mc_set_attribs = {
+	.sysfs_ops = &memctrlfs_ops,
+	.default_attrs = (struct attribute **)memctrl_attr,
+};
+
+/* EDAC memory controller sysfs kset:
+ *	/sys/devices/system/edac/mc
+ */
+static struct kset mc_kset = {
+	.kobj = {.name = "mc", .ktype = &ktype_mc_set_attribs },
+	.ktype = &ktype_mci,
+};
+
+
+/*
+ * edac_mc_register_sysfs_main_kobj
+ *
+ *	setups and registers the main kobject for each mci
+ */
+int edac_mc_register_sysfs_main_kobj(struct mem_ctl_info *mci)
+{
+	struct kobject *kobj_mci;
+	int err;
+
+	debugf1("%s()\n", __func__);
+
+	kobj_mci = &mci->edac_mci_kobj;
+
+	/* Init the mci's kobject */
+	memset(kobj_mci, 0, sizeof(*kobj_mci));
+
+	/* this instance become part of the mc_kset */
+	kobj_mci->kset = &mc_kset;
+
+	/* set the name of the mc<id> object */
+	err = kobject_set_name(kobj_mci, "mc%d", mci->mc_idx);
+	if (err)
+		goto fail_out;
+
+	/* Record which module 'owns' this control structure
+	 * and bump the ref count of the module
+	 */
+	mci->owner = THIS_MODULE;
+
+	/* bump ref count on this module */
+	if (!try_module_get(mci->owner)) {
+		err = -ENODEV;
+		goto fail_out;
+	}
+
+	/* register the mc<id> kobject to the mc_kset */
+	err = kobject_register(kobj_mci);
+	if (err) {
+		debugf1("%s()Failed to register '.../edac/mc%d'\n",
+			__func__, mci->mc_idx);
+		goto kobj_reg_fail;
+	}
+
+	/* At this point, to 'free' the control struct,
+	 * edac_mc_unregister_sysfs_main_kobj() must be used
+	 */
+
+	debugf1("%s() Registered '.../edac/mc%d' kobject\n",
+		__func__, mci->mc_idx);
+
+	return 0;
+
+	/* Error exit stack */
+
+kobj_reg_fail:
+	module_put(mci->owner);
+
+fail_out:
+	return err;
+}
+
+/*
+ * edac_mc_register_sysfs_main_kobj
+ *
+ *	tears down and the main mci kobject from the mc_kset
+ */
+void edac_mc_unregister_sysfs_main_kobj(struct mem_ctl_info *mci)
+{
+	/* delete the kobj from the mc_kset */
+	kobject_unregister(&mci->edac_mci_kobj);
+}
+
+#define EDAC_DEVICE_SYMLINK	"device"
+
+/*
+ * edac_create_mci_instance_attributes
+ *	create MC driver specific attributes at the topmost level
+ *	directory of this mci instance.
+ */
+static int edac_create_mci_instance_attributes(struct mem_ctl_info *mci)
+{
+	int err;
+	struct mcidev_sysfs_attribute *sysfs_attrib;
+
+	/* point to the start of the array and iterate over it
+	 * adding each attribute listed to this mci instance's kobject
+	 */
+	sysfs_attrib = mci->mc_driver_sysfs_attributes;
+
+	while (sysfs_attrib && sysfs_attrib->attr.name) {
+		err = sysfs_create_file(&mci->edac_mci_kobj,
+					(struct attribute*) sysfs_attrib);
+		if (err) {
+			return err;
+		}
+
+		sysfs_attrib++;
+	}
+
+	return 0;
+}
+
+/*
+ * edac_remove_mci_instance_attributes
+ *	remove MC driver specific attributes at the topmost level
+ *	directory of this mci instance.
+ */
+static void edac_remove_mci_instance_attributes(struct mem_ctl_info *mci)
+{
+	struct mcidev_sysfs_attribute *sysfs_attrib;
+
+	/* point to the start of the array and iterate over it
+	 * adding each attribute listed to this mci instance's kobject
+	 */
+	sysfs_attrib = mci->mc_driver_sysfs_attributes;
+
+	/* loop if there are attributes and until we hit a NULL entry */
+	while (sysfs_attrib && sysfs_attrib->attr.name) {
+		sysfs_remove_file(&mci->edac_mci_kobj,
+					(struct attribute *) sysfs_attrib);
+		sysfs_attrib++;
+	}
+}
+
+
+/*
+ * Create a new Memory Controller kobject instance,
+ *	mc<id> under the 'mc' directory
+ *
+ * Return:
+ *	0	Success
+ *	!0	Failure
+ */
+int edac_create_sysfs_mci_device(struct mem_ctl_info *mci)
+{
+	int i;
+	int err;
+	struct csrow_info *csrow;
+	struct kobject *kobj_mci = &mci->edac_mci_kobj;
+
+	debugf0("%s() idx=%d\n", __func__, mci->mc_idx);
+
+	/* create a symlink for the device */
+	err = sysfs_create_link(kobj_mci, &mci->dev->kobj,
+				EDAC_DEVICE_SYMLINK);
+	if (err) {
+		debugf1("%s() failure to create symlink\n", __func__);
+		goto fail0;
+	}
+
+	/* If the low level driver desires some attributes,
+	 * then create them now for the driver.
+	 */
+	if (mci->mc_driver_sysfs_attributes) {
+		err = edac_create_mci_instance_attributes(mci);
+		if (err) {
+			debugf1("%s() failure to create mci attributes\n",
+				__func__);
+			goto fail0;
+		}
+	}
+
+	/* Make directories for each CSROW object under the mc<id> kobject
+	 */
+	for (i = 0; i < mci->nr_csrows; i++) {
+		csrow = &mci->csrows[i];
+
+		/* Only expose populated CSROWs */
+		if (csrow->nr_pages > 0) {
+			err = edac_create_csrow_object(mci, csrow, i);
+			if (err) {
+				debugf1("%s() failure: create csrow %d obj\n",
+					__func__, i);
+				goto fail1;
+			}
+		}
+	}
+
+	return 0;
+
+	/* CSROW error: backout what has already been registered,  */
+fail1:
+	for (i--; i >= 0; i--) {
+		if (csrow->nr_pages > 0) {
+			kobject_unregister(&mci->csrows[i].kobj);
+		}
+	}
+
+	/* remove the mci instance's attributes, if any */
+	edac_remove_mci_instance_attributes(mci);
+
+	/* remove the symlink */
+	sysfs_remove_link(kobj_mci, EDAC_DEVICE_SYMLINK);
+
+fail0:
+	return err;
+}
+
+/*
+ * remove a Memory Controller instance
+ */
+void edac_remove_sysfs_mci_device(struct mem_ctl_info *mci)
+{
+	int i;
+
+	debugf0("%s()\n", __func__);
+
+	/* remove all csrow kobjects */
+	for (i = 0; i < mci->nr_csrows; i++) {
+		if (mci->csrows[i].nr_pages > 0) {
+			debugf0("%s()  unreg csrow-%d\n", __func__, i);
+			kobject_unregister(&mci->csrows[i].kobj);
+		}
+	}
+
+	debugf0("%s()  remove_link\n", __func__);
+
+	/* remove the symlink */
+	sysfs_remove_link(&mci->edac_mci_kobj, EDAC_DEVICE_SYMLINK);
+
+	debugf0("%s()  remove_mci_instance\n", __func__);
+
+	/* remove this mci instance's attribtes */
+	edac_remove_mci_instance_attributes(mci);
+
+	debugf0("%s()  unregister this mci kobj\n", __func__);
+
+	/* unregister this instance's kobject */
+	kobject_unregister(&mci->edac_mci_kobj);
+}
+
+
+
+
+/*
+ * edac_setup_sysfs_mc_kset(void)
+ *
+ * Initialize the mc_kset for the 'mc' entry
+ *	This requires creating the top 'mc' directory with a kset
+ *	and its controls/attributes.
+ *
+ *	To this 'mc' kset, instance 'mci' will be grouped as children.
+ *
+ * Return:  0 SUCCESS
+ *         !0 FAILURE error code
+ */
+int edac_sysfs_setup_mc_kset(void)
+{
+	int err = 0;
+	struct sysdev_class *edac_class;
+
+	debugf1("%s()\n", __func__);
+
+	/* get the /sys/devices/system/edac class reference */
+	edac_class = edac_get_edac_class();
+	if (edac_class == NULL) {
+		debugf1("%s() no edac_class error=%d\n", __func__, err);
+		goto fail_out;
+	}
+
+	/* Init the MC's kobject */
+	mc_kset.kobj.parent = &edac_class->kset.kobj;
+
+	/* register the mc_kset */
+	err = kset_register(&mc_kset);
+	if (err) {
+		debugf1("%s() Failed to register '.../edac/mc'\n", __func__);
+		goto fail_out;
+	}
+
+	debugf1("%s() Registered '.../edac/mc' kobject\n", __func__);
+
+	return 0;
+
+
+	/* error unwind stack */
+fail_out:
+	return err;
+}
+
+/*
+ * edac_sysfs_teardown_mc_kset
+ *
+ *	deconstruct the mc_ket for memory controllers
+ */
+void edac_sysfs_teardown_mc_kset(void)
+{
+	kset_unregister(&mc_kset);
+}
+
diff --git a/drivers/edac/edac_module.c b/drivers/edac/edac_module.c
new file mode 100644
index 0000000..e0c4a40
--- /dev/null
+++ b/drivers/edac/edac_module.c
@@ -0,0 +1,222 @@
+/*
+ * edac_module.c
+ *
+ * (C) 2007 www.softwarebitmaker.com
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ *
+ * Author: Doug Thompson <dougthompson@xmission.com>
+ *
+ */
+#include <linux/edac.h>
+
+#include "edac_core.h"
+#include "edac_module.h"
+
+#define EDAC_VERSION "Ver: 2.1.0 " __DATE__
+
+#ifdef CONFIG_EDAC_DEBUG
+/* Values of 0 to 4 will generate output */
+int edac_debug_level = 2;
+EXPORT_SYMBOL_GPL(edac_debug_level);
+#endif
+
+/* scope is to module level only */
+struct workqueue_struct *edac_workqueue;
+
+/*
+ * sysfs object: /sys/devices/system/edac
+ *	need to export to other files in this modules
+ */
+static struct sysdev_class edac_class = {
+	set_kset_name("edac"),
+};
+static int edac_class_valid;
+
+/*
+ * edac_op_state_to_string()
+ */
+char *edac_op_state_to_string(int opstate)
+{
+	if (opstate == OP_RUNNING_POLL)
+		return "POLLED";
+	else if (opstate == OP_RUNNING_INTERRUPT)
+		return "INTERRUPT";
+	else if (opstate == OP_RUNNING_POLL_INTR)
+		return "POLL-INTR";
+	else if (opstate == OP_ALLOC)
+		return "ALLOC";
+	else if (opstate == OP_OFFLINE)
+		return "OFFLINE";
+
+	return "UNKNOWN";
+}
+
+/*
+ * edac_get_edac_class()
+ *
+ *	return pointer to the edac class of 'edac'
+ */
+struct sysdev_class *edac_get_edac_class(void)
+{
+	struct sysdev_class *classptr = NULL;
+
+	if (edac_class_valid)
+		classptr = &edac_class;
+
+	return classptr;
+}
+
+/*
+ * edac_register_sysfs_edac_name()
+ *
+ *	register the 'edac' into /sys/devices/system
+ *
+ * return:
+ *	0  success
+ *	!0 error
+ */
+static int edac_register_sysfs_edac_name(void)
+{
+	int err;
+
+	/* create the /sys/devices/system/edac directory */
+	err = sysdev_class_register(&edac_class);
+
+	if (err) {
+		debugf1("%s() error=%d\n", __func__, err);
+		return err;
+	}
+
+	edac_class_valid = 1;
+	return 0;
+}
+
+/*
+ * sysdev_class_unregister()
+ *
+ *	unregister the 'edac' from /sys/devices/system
+ */
+static void edac_unregister_sysfs_edac_name(void)
+{
+	/* only if currently registered, then unregister it */
+	if (edac_class_valid)
+		sysdev_class_unregister(&edac_class);
+
+	edac_class_valid = 0;
+}
+
+/*
+ * edac_workqueue_setup
+ *	initialize the edac work queue for polling operations
+ */
+static int edac_workqueue_setup(void)
+{
+	edac_workqueue = create_singlethread_workqueue("edac-poller");
+	if (edac_workqueue == NULL)
+		return -ENODEV;
+	else
+		return 0;
+}
+
+/*
+ * edac_workqueue_teardown
+ *	teardown the edac workqueue
+ */
+static void edac_workqueue_teardown(void)
+{
+	if (edac_workqueue) {
+		flush_workqueue(edac_workqueue);
+		destroy_workqueue(edac_workqueue);
+		edac_workqueue = NULL;
+	}
+}
+
+/*
+ * edac_init
+ *      module initialization entry point
+ */
+static int __init edac_init(void)
+{
+	int err = 0;
+
+	edac_printk(KERN_INFO, EDAC_MC, EDAC_VERSION "\n");
+
+	/*
+	 * Harvest and clear any boot/initialization PCI parity errors
+	 *
+	 * FIXME: This only clears errors logged by devices present at time of
+	 *      module initialization.  We should also do an initial clear
+	 *      of each newly hotplugged device.
+	 */
+	edac_pci_clear_parity_errors();
+
+	/*
+	 * perform the registration of the /sys/devices/system/edac class object
+	 */
+	if (edac_register_sysfs_edac_name()) {
+		edac_printk(KERN_ERR, EDAC_MC,
+			"Error initializing 'edac' kobject\n");
+		err = -ENODEV;
+		goto error;
+	}
+
+	/*
+	 * now set up the mc_kset under the edac class object
+	 */
+	err = edac_sysfs_setup_mc_kset();
+	if (err)
+		goto sysfs_setup_fail;
+
+	/* Setup/Initialize the workq for this core */
+	err = edac_workqueue_setup();
+	if (err) {
+		edac_printk(KERN_ERR, EDAC_MC, "init WorkQueue failure\n");
+		goto workq_fail;
+	}
+
+	return 0;
+
+	/* Error teardown stack */
+workq_fail:
+	edac_sysfs_teardown_mc_kset();
+
+sysfs_setup_fail:
+	edac_unregister_sysfs_edac_name();
+
+error:
+	return err;
+}
+
+/*
+ * edac_exit()
+ *      module exit/termination function
+ */
+static void __exit edac_exit(void)
+{
+	debugf0("%s()\n", __func__);
+
+	/* tear down the various subsystems */
+	edac_workqueue_teardown();
+	edac_sysfs_teardown_mc_kset();
+	edac_unregister_sysfs_edac_name();
+}
+
+/*
+ * Inform the kernel of our entry and exit points
+ */
+module_init(edac_init);
+module_exit(edac_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Doug Thompson www.softwarebitmaker.com, et al");
+MODULE_DESCRIPTION("Core library routines for EDAC reporting");
+
+/* refer to *_sysfs.c files for parameters that are exported via sysfs */
+
+#ifdef CONFIG_EDAC_DEBUG
+module_param(edac_debug_level, int, 0644);
+MODULE_PARM_DESC(edac_debug_level, "Debug level");
+#endif
diff --git a/drivers/edac/edac_module.h b/drivers/edac/edac_module.h
new file mode 100644
index 0000000..a2134df
--- /dev/null
+++ b/drivers/edac/edac_module.h
@@ -0,0 +1,77 @@
+
+/*
+ * edac_module.h
+ *
+ * For defining functions/data for within the EDAC_CORE module only
+ *
+ * written by doug thompson <norsk5@xmission.h>
+ */
+
+#ifndef	__EDAC_MODULE_H__
+#define	__EDAC_MODULE_H__
+
+#include <linux/sysdev.h>
+
+#include "edac_core.h"
+
+/*
+ * INTERNAL EDAC MODULE:
+ * EDAC memory controller sysfs create/remove functions
+ * and setup/teardown functions
+ *
+ * edac_mc objects
+ */
+extern int edac_sysfs_setup_mc_kset(void);
+extern void edac_sysfs_teardown_mc_kset(void);
+extern int edac_mc_register_sysfs_main_kobj(struct mem_ctl_info *mci);
+extern void edac_mc_unregister_sysfs_main_kobj(struct mem_ctl_info *mci);
+extern int edac_create_sysfs_mci_device(struct mem_ctl_info *mci);
+extern void edac_remove_sysfs_mci_device(struct mem_ctl_info *mci);
+extern void edac_check_mc_devices(void);
+extern int edac_get_log_ue(void);
+extern int edac_get_log_ce(void);
+extern int edac_get_panic_on_ue(void);
+extern int edac_mc_get_log_ue(void);
+extern int edac_mc_get_log_ce(void);
+extern int edac_mc_get_panic_on_ue(void);
+extern int edac_get_poll_msec(void);
+extern int edac_mc_get_poll_msec(void);
+
+extern int edac_device_register_sysfs_main_kobj(
+				struct edac_device_ctl_info *edac_dev);
+extern void edac_device_unregister_sysfs_main_kobj(
+				struct edac_device_ctl_info *edac_dev);
+extern int edac_device_create_sysfs(struct edac_device_ctl_info *edac_dev);
+extern void edac_device_remove_sysfs(struct edac_device_ctl_info *edac_dev);
+extern struct sysdev_class *edac_get_edac_class(void);
+
+/* edac core workqueue: single CPU mode */
+extern struct workqueue_struct *edac_workqueue;
+extern void edac_device_workq_setup(struct edac_device_ctl_info *edac_dev,
+				    unsigned msec);
+extern void edac_device_workq_teardown(struct edac_device_ctl_info *edac_dev);
+extern void edac_device_reset_delay_period(struct edac_device_ctl_info
+					   *edac_dev, unsigned long value);
+extern void *edac_align_ptr(void *ptr, unsigned size);
+
+/*
+ * EDAC PCI functions
+ */
+#ifdef	CONFIG_PCI
+extern void edac_pci_do_parity_check(void);
+extern void edac_pci_clear_parity_errors(void);
+extern int edac_sysfs_pci_setup(void);
+extern void edac_sysfs_pci_teardown(void);
+extern int edac_pci_get_check_errors(void);
+extern int edac_pci_get_poll_msec(void);
+#else				/* CONFIG_PCI */
+/* pre-process these away */
+#define edac_pci_do_parity_check()
+#define edac_pci_clear_parity_errors()
+#define edac_sysfs_pci_setup()  (0)
+#define edac_sysfs_pci_teardown()
+#define edac_pci_get_check_errors()
+#define edac_pci_get_poll_msec()
+#endif				/* CONFIG_PCI */
+
+#endif				/* __EDAC_MODULE_H__ */
diff --git a/drivers/edac/edac_pci.c b/drivers/edac/edac_pci.c
new file mode 100644
index 0000000..d9cd5e0
--- /dev/null
+++ b/drivers/edac/edac_pci.c
@@ -0,0 +1,433 @@
+/*
+ * EDAC PCI component
+ *
+ * Author: Dave Jiang <djiang@mvista.com>
+ *
+ * 2007 (c) MontaVista Software, Inc. This file is licensed under
+ * the terms of the GNU General Public License version 2. This program
+ * is licensed "as is" without any warranty of any kind, whether express
+ * or implied.
+ *
+ */
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/smp.h>
+#include <linux/init.h>
+#include <linux/sysctl.h>
+#include <linux/highmem.h>
+#include <linux/timer.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/list.h>
+#include <linux/sysdev.h>
+#include <linux/ctype.h>
+#include <linux/workqueue.h>
+#include <asm/uaccess.h>
+#include <asm/page.h>
+
+#include "edac_core.h"
+#include "edac_module.h"
+
+static DEFINE_MUTEX(edac_pci_ctls_mutex);
+static struct list_head edac_pci_list = LIST_HEAD_INIT(edac_pci_list);
+
+static inline void edac_lock_pci_list(void)
+{
+	mutex_lock(&edac_pci_ctls_mutex);
+}
+
+static inline void edac_unlock_pci_list(void)
+{
+	mutex_unlock(&edac_pci_ctls_mutex);
+}
+
+/*
+ * The alloc() and free() functions for the 'edac_pci' control info
+ * structure. The chip driver will allocate one of these for each
+ * edac_pci it is going to control/register with the EDAC CORE.
+ */
+struct edac_pci_ctl_info *edac_pci_alloc_ctl_info(unsigned int sz_pvt,
+						const char *edac_pci_name)
+{
+	struct edac_pci_ctl_info *pci;
+	void *pvt;
+	unsigned int size;
+
+	pci = (struct edac_pci_ctl_info *)0;
+	pvt = edac_align_ptr(&pci[1], sz_pvt);
+	size = ((unsigned long)pvt) + sz_pvt;
+
+	if ((pci = kzalloc(size, GFP_KERNEL)) == NULL)
+		return NULL;
+
+	pvt = sz_pvt ? ((char *)pci) + ((unsigned long)pvt) : NULL;
+
+	pci->pvt_info = pvt;
+
+	pci->op_state = OP_ALLOC;
+
+	snprintf(pci->name, strlen(edac_pci_name) + 1, "%s", edac_pci_name);
+
+	return pci;
+}
+
+EXPORT_SYMBOL_GPL(edac_pci_alloc_ctl_info);
+
+/*
+ * edac_pci_free_ctl_info()
+ * 	frees the memory allocated by edac_pci_alloc_ctl_info() function
+ */
+void edac_pci_free_ctl_info(struct edac_pci_ctl_info *pci)
+{
+	kfree(pci);
+}
+
+EXPORT_SYMBOL_GPL(edac_pci_free_ctl_info);
+
+/*
+ * find_edac_pci_by_dev()
+ * 	scans the edac_pci list for a specific 'struct device *'
+ */
+static struct edac_pci_ctl_info *find_edac_pci_by_dev(struct device *dev)
+{
+	struct edac_pci_ctl_info *pci;
+	struct list_head *item;
+
+	debugf3("%s()\n", __func__);
+
+	list_for_each(item, &edac_pci_list) {
+		pci = list_entry(item, struct edac_pci_ctl_info, link);
+
+		if (pci->dev == dev)
+			return pci;
+	}
+
+	return NULL;
+}
+
+/*
+ * add_edac_pci_to_global_list
+ * 	Before calling this function, caller must assign a unique value to
+ * 	edac_dev->pci_idx.
+ * 	Return:
+ * 		0 on success
+ * 		1 on failure
+ */
+static int add_edac_pci_to_global_list(struct edac_pci_ctl_info *pci)
+{
+	struct list_head *item, *insert_before;
+	struct edac_pci_ctl_info *rover;
+
+	insert_before = &edac_pci_list;
+
+	/* Determine if already on the list */
+	if (unlikely((rover = find_edac_pci_by_dev(pci->dev)) != NULL))
+		goto fail0;
+
+	/* Insert in ascending order by 'pci_idx', so find position */
+	list_for_each(item, &edac_pci_list) {
+		rover = list_entry(item, struct edac_pci_ctl_info, link);
+
+		if (rover->pci_idx >= pci->pci_idx) {
+			if (unlikely(rover->pci_idx == pci->pci_idx))
+				goto fail1;
+
+			insert_before = item;
+			break;
+		}
+	}
+
+	list_add_tail_rcu(&pci->link, insert_before);
+	return 0;
+
+fail0:
+	edac_printk(KERN_WARNING, EDAC_PCI,
+		"%s (%s) %s %s already assigned %d\n",
+		rover->dev->bus_id, dev_name(rover),
+		rover->mod_name, rover->ctl_name, rover->pci_idx);
+	return 1;
+
+fail1:
+	edac_printk(KERN_WARNING, EDAC_PCI,
+		"but in low-level driver: attempt to assign\n"
+		"\tduplicate pci_idx %d in %s()\n", rover->pci_idx,
+		__func__);
+	return 1;
+}
+
+/*
+ * complete_edac_pci_list_del
+ */
+static void complete_edac_pci_list_del(struct rcu_head *head)
+{
+	struct edac_pci_ctl_info *pci;
+
+	pci = container_of(head, struct edac_pci_ctl_info, rcu);
+	INIT_LIST_HEAD(&pci->link);
+	complete(&pci->complete);
+}
+
+/*
+ * del_edac_pci_from_global_list
+ */
+static void del_edac_pci_from_global_list(struct edac_pci_ctl_info *pci)
+{
+	list_del_rcu(&pci->link);
+	init_completion(&pci->complete);
+	call_rcu(&pci->rcu, complete_edac_pci_list_del);
+	wait_for_completion(&pci->complete);
+}
+
+/*
+ * edac_pci_find()
+ * 	Search for an edac_pci_ctl_info structure whose index is 'idx'
+ *
+ * If found, return a pointer to the structure
+ * Else return NULL.
+ *
+ * Caller must hold pci_ctls_mutex.
+ */
+struct edac_pci_ctl_info *edac_pci_find(int idx)
+{
+	struct list_head *item;
+	struct edac_pci_ctl_info *pci;
+
+	/* Iterage over list, looking for exact match of ID */
+	list_for_each(item, &edac_pci_list) {
+		pci = list_entry(item, struct edac_pci_ctl_info, link);
+
+		if (pci->pci_idx >= idx) {
+			if (pci->pci_idx == idx)
+				return pci;
+
+			/* not on list, so terminate early */
+			break;
+		}
+	}
+
+	return NULL;
+}
+
+EXPORT_SYMBOL_GPL(edac_pci_find);
+
+/*
+ * edac_pci_workq_function()
+ * 	performs the operation scheduled by a workq request
+ */
+static void edac_pci_workq_function(struct work_struct *work_req)
+{
+	struct delayed_work *d_work = (struct delayed_work *)work_req;
+	struct edac_pci_ctl_info *pci = to_edac_pci_ctl_work(d_work);
+
+	edac_lock_pci_list();
+
+	if ((pci->op_state == OP_RUNNING_POLL) &&
+		(pci->edac_check != NULL) && (edac_pci_get_check_errors()))
+		pci->edac_check(pci);
+
+	edac_unlock_pci_list();
+
+	/* Reschedule */
+	queue_delayed_work(edac_workqueue, &pci->work,
+			msecs_to_jiffies(edac_pci_get_poll_msec()));
+}
+
+/*
+ * edac_pci_workq_setup()
+ * 	initialize a workq item for this edac_pci instance
+ * 	passing in the new delay period in msec
+ */
+static void edac_pci_workq_setup(struct edac_pci_ctl_info *pci,
+				 unsigned int msec)
+{
+	debugf0("%s()\n", __func__);
+
+	INIT_DELAYED_WORK(&pci->work, edac_pci_workq_function);
+	queue_delayed_work(edac_workqueue, &pci->work,
+			msecs_to_jiffies(edac_pci_get_poll_msec()));
+}
+
+/*
+ * edac_pci_workq_teardown()
+ * 	stop the workq processing on this edac_pci instance
+ */
+static void edac_pci_workq_teardown(struct edac_pci_ctl_info *pci)
+{
+	int status;
+
+	status = cancel_delayed_work(&pci->work);
+	if (status == 0)
+		flush_workqueue(edac_workqueue);
+}
+
+/*
+ * edac_pci_reset_delay_period
+ */
+void edac_pci_reset_delay_period(struct edac_pci_ctl_info *pci,
+				 unsigned long value)
+{
+	edac_lock_pci_list();
+
+	edac_pci_workq_teardown(pci);
+
+	edac_pci_workq_setup(pci, value);
+
+	edac_unlock_pci_list();
+}
+
+EXPORT_SYMBOL_GPL(edac_pci_reset_delay_period);
+
+/*
+ * edac_pci_add_device: Insert the 'edac_dev' structure into the
+ * edac_pci global list and create sysfs entries associated with
+ * edac_pci structure.
+ * @pci: pointer to the edac_device structure to be added to the list
+ * @edac_idx: A unique numeric identifier to be assigned to the
+ * 'edac_pci' structure.
+ *
+ * Return:
+ *      0       Success
+ *      !0      Failure
+ */
+int edac_pci_add_device(struct edac_pci_ctl_info *pci, int edac_idx)
+{
+	debugf0("%s()\n", __func__);
+
+	pci->pci_idx = edac_idx;
+
+	edac_lock_pci_list();
+
+	if (add_edac_pci_to_global_list(pci))
+		goto fail0;
+
+	pci->start_time = jiffies;
+
+	if (edac_pci_create_sysfs(pci)) {
+		edac_pci_printk(pci, KERN_WARNING,
+				"failed to create sysfs pci\n");
+		goto fail1;
+	}
+
+	if (pci->edac_check != NULL) {
+		pci->op_state = OP_RUNNING_POLL;
+
+		edac_pci_workq_setup(pci, 1000);
+	} else {
+		pci->op_state = OP_RUNNING_INTERRUPT;
+	}
+
+	edac_pci_printk(pci, KERN_INFO,
+			"Giving out device to module '%s' controller '%s':"
+			" DEV '%s' (%s)\n",
+			pci->mod_name,
+			pci->ctl_name,
+			dev_name(pci), edac_op_state_to_string(pci->op_state));
+
+	edac_unlock_pci_list();
+	return 0;
+
+fail1:
+	del_edac_pci_from_global_list(pci);
+fail0:
+	edac_unlock_pci_list();
+	return 1;
+}
+
+EXPORT_SYMBOL_GPL(edac_pci_add_device);
+
+/*
+ * edac_pci_del_device()
+ * 	Remove sysfs entries for specified edac_pci structure and
+ * 	then remove edac_pci structure from global list
+ *
+ * @dev:
+ * 	Pointer to 'struct device' representing edac_pci structure
+ * 	to remove
+ *
+ * Return:
+ * 	Pointer to removed edac_pci structure,
+ * 	or NULL if device not found
+ */
+struct edac_pci_ctl_info *edac_pci_del_device(struct device *dev)
+{
+	struct edac_pci_ctl_info *pci;
+
+	debugf0("%s()\n", __func__);
+
+	edac_lock_pci_list();
+
+	if ((pci = find_edac_pci_by_dev(dev)) == NULL) {
+		edac_unlock_pci_list();
+		return NULL;
+	}
+
+	pci->op_state = OP_OFFLINE;
+
+	edac_pci_workq_teardown(pci);
+
+	edac_pci_remove_sysfs(pci);
+
+	del_edac_pci_from_global_list(pci);
+
+	edac_unlock_pci_list();
+
+	edac_printk(KERN_INFO, EDAC_PCI,
+		"Removed device %d for %s %s: DEV %s\n",
+		pci->pci_idx, pci->mod_name, pci->ctl_name, dev_name(pci));
+
+	return pci;
+}
+
+EXPORT_SYMBOL_GPL(edac_pci_del_device);
+
+void edac_pci_generic_check(struct edac_pci_ctl_info *pci)
+{
+	edac_pci_do_parity_check();
+}
+
+static int edac_pci_idx;
+#define EDAC_PCI_GENCTL_NAME	"EDAC PCI controller"
+
+struct edac_pci_gen_data {
+	int edac_idx;
+};
+
+struct edac_pci_ctl_info *edac_pci_create_generic_ctl(struct device *dev,
+						const char *mod_name)
+{
+	struct edac_pci_ctl_info *pci;
+	struct edac_pci_gen_data *pdata;
+
+	pci = edac_pci_alloc_ctl_info(sizeof(*pdata), EDAC_PCI_GENCTL_NAME);
+	if (!pci)
+		return NULL;
+
+	pdata = pci->pvt_info;
+	pci->dev = dev;
+	dev_set_drvdata(pci->dev, pci);
+	pci->dev_name = pci_name(to_pci_dev(dev));
+
+	pci->mod_name = mod_name;
+	pci->ctl_name = EDAC_PCI_GENCTL_NAME;
+	pci->edac_check = edac_pci_generic_check;
+
+	pdata->edac_idx = edac_pci_idx++;
+
+	if (edac_pci_add_device(pci, pdata->edac_idx) > 0) {
+		debugf3("%s(): failed edac_pci_add_device()\n", __func__);
+		edac_pci_free_ctl_info(pci);
+		return NULL;
+	}
+
+	return pci;
+}
+
+EXPORT_SYMBOL_GPL(edac_pci_create_generic_ctl);
+
+void edac_pci_release_generic_ctl(struct edac_pci_ctl_info *pci)
+{
+	edac_pci_del_device(pci->dev);
+	edac_pci_free_ctl_info(pci);
+}
+
+EXPORT_SYMBOL_GPL(edac_pci_release_generic_ctl);
diff --git a/drivers/edac/edac_pci_sysfs.c b/drivers/edac/edac_pci_sysfs.c
new file mode 100644
index 0000000..fac94ca
--- /dev/null
+++ b/drivers/edac/edac_pci_sysfs.c
@@ -0,0 +1,620 @@
+/*
+ * (C) 2005, 2006 Linux Networx (http://lnxi.com)
+ * This file may be distributed under the terms of the
+ * GNU General Public License.
+ *
+ * Written Doug Thompson <norsk5@xmission.com>
+ *
+ */
+#include <linux/module.h>
+#include <linux/sysdev.h>
+#include <linux/ctype.h>
+
+#include "edac_core.h"
+#include "edac_module.h"
+
+#ifdef CONFIG_PCI
+
+#define EDAC_PCI_SYMLINK	"device"
+
+static int check_pci_errors;	/* default YES check PCI parity */
+static int edac_pci_panic_on_pe;	/* default no panic on PCI Parity */
+static int edac_pci_log_pe = 1;	/* log PCI parity errors */
+static int edac_pci_log_npe = 1;	/* log PCI non-parity error errors */
+static atomic_t pci_parity_count = ATOMIC_INIT(0);
+static atomic_t pci_nonparity_count = ATOMIC_INIT(0);
+static int edac_pci_poll_msec = 1000;
+
+static struct kobject edac_pci_kobj;	/* /sys/devices/system/edac/pci */
+static struct completion edac_pci_kobj_complete;
+static atomic_t edac_pci_sysfs_refcount = ATOMIC_INIT(0);
+
+int edac_pci_get_check_errors(void)
+{
+	return check_pci_errors;
+}
+
+int edac_pci_get_log_pe(void)
+{
+	return edac_pci_log_pe;
+}
+
+int edac_pci_get_log_npe(void)
+{
+	return edac_pci_log_npe;
+}
+
+int edac_pci_get_panic_on_pe(void)
+{
+	return edac_pci_panic_on_pe;
+}
+
+int edac_pci_get_poll_msec(void)
+{
+	return edac_pci_poll_msec;
+}
+
+/**************************** EDAC PCI sysfs instance *******************/
+static ssize_t instance_pe_count_show(struct edac_pci_ctl_info *pci, char *data)
+{
+	return sprintf(data, "%u\n", atomic_read(&pci->counters.pe_count));
+}
+
+static ssize_t instance_npe_count_show(struct edac_pci_ctl_info *pci,
+				char *data)
+{
+	return sprintf(data, "%u\n", atomic_read(&pci->counters.npe_count));
+}
+
+#define to_instance(k) container_of(k, struct edac_pci_ctl_info, kobj)
+#define to_instance_attr(a) container_of(a, struct instance_attribute, attr)
+
+/* DEVICE instance kobject release() function */
+static void edac_pci_instance_release(struct kobject *kobj)
+{
+	struct edac_pci_ctl_info *pci;
+
+	debugf1("%s()\n", __func__);
+
+	pci = to_instance(kobj);
+	complete(&pci->kobj_complete);
+}
+
+/* instance specific attribute structure */
+struct instance_attribute {
+	struct attribute attr;
+	 ssize_t(*show) (struct edac_pci_ctl_info *, char *);
+	 ssize_t(*store) (struct edac_pci_ctl_info *, const char *, size_t);
+};
+
+/* Function to 'show' fields from the edac_pci 'instance' structure */
+static ssize_t edac_pci_instance_show(struct kobject *kobj,
+				struct attribute *attr, char *buffer)
+{
+	struct edac_pci_ctl_info *pci = to_instance(kobj);
+	struct instance_attribute *instance_attr = to_instance_attr(attr);
+
+	if (instance_attr->show)
+		return instance_attr->show(pci, buffer);
+	return -EIO;
+}
+
+/* Function to 'store' fields into the edac_pci 'instance' structure */
+static ssize_t edac_pci_instance_store(struct kobject *kobj,
+				struct attribute *attr,
+				const char *buffer, size_t count)
+{
+	struct edac_pci_ctl_info *pci = to_instance(kobj);
+	struct instance_attribute *instance_attr = to_instance_attr(attr);
+
+	if (instance_attr->store)
+		return instance_attr->store(pci, buffer, count);
+	return -EIO;
+}
+
+static struct sysfs_ops pci_instance_ops = {
+	.show = edac_pci_instance_show,
+	.store = edac_pci_instance_store
+};
+
+#define INSTANCE_ATTR(_name, _mode, _show, _store)	\
+static struct instance_attribute attr_instance_##_name = {	\
+	.attr	= {.name = __stringify(_name), .mode = _mode },	\
+	.show	= _show,					\
+	.store	= _store,					\
+};
+
+INSTANCE_ATTR(pe_count, S_IRUGO, instance_pe_count_show, NULL);
+INSTANCE_ATTR(npe_count, S_IRUGO, instance_npe_count_show, NULL);
+
+/* pci instance attributes */
+static struct instance_attribute *pci_instance_attr[] = {
+	&attr_instance_pe_count,
+	&attr_instance_npe_count,
+	NULL
+};
+
+/* the ktype for pci instance */
+static struct kobj_type ktype_pci_instance = {
+	.release = edac_pci_instance_release,
+	.sysfs_ops = &pci_instance_ops,
+	.default_attrs = (struct attribute **)pci_instance_attr,
+};
+
+static int edac_pci_create_instance_kobj(struct edac_pci_ctl_info *pci, int idx)
+{
+	int err;
+
+	pci->kobj.parent = &edac_pci_kobj;
+	pci->kobj.ktype = &ktype_pci_instance;
+
+	err = kobject_set_name(&pci->kobj, "pci%d", idx);
+	if (err)
+		return err;
+
+	err = kobject_register(&pci->kobj);
+	if (err != 0) {
+		debugf2("%s() failed to register instance pci%d\n",
+			__func__, idx);
+		return err;
+	}
+
+	debugf1("%s() Register instance 'pci%d' kobject\n", __func__, idx);
+
+	return 0;
+}
+
+static void
+edac_pci_delete_instance_kobj(struct edac_pci_ctl_info *pci, int idx)
+{
+	init_completion(&pci->kobj_complete);
+	kobject_unregister(&pci->kobj);
+	wait_for_completion(&pci->kobj_complete);
+}
+
+/***************************** EDAC PCI sysfs root **********************/
+#define to_edacpci(k) container_of(k, struct edac_pci_ctl_info, kobj)
+#define to_edacpci_attr(a) container_of(a, struct edac_pci_attr, attr)
+
+static ssize_t edac_pci_int_show(void *ptr, char *buffer)
+{
+	int *value = ptr;
+	return sprintf(buffer, "%d\n", *value);
+}
+
+static ssize_t edac_pci_int_store(void *ptr, const char *buffer, size_t count)
+{
+	int *value = ptr;
+
+	if (isdigit(*buffer))
+		*value = simple_strtoul(buffer, NULL, 0);
+
+	return count;
+}
+
+struct edac_pci_dev_attribute {
+	struct attribute attr;
+	void *value;
+	 ssize_t(*show) (void *, char *);
+	 ssize_t(*store) (void *, const char *, size_t);
+};
+
+/* Set of show/store abstract level functions for PCI Parity object */
+static ssize_t edac_pci_dev_show(struct kobject *kobj, struct attribute *attr,
+				 char *buffer)
+{
+	struct edac_pci_dev_attribute *edac_pci_dev;
+	edac_pci_dev = (struct edac_pci_dev_attribute *)attr;
+
+	if (edac_pci_dev->show)
+		return edac_pci_dev->show(edac_pci_dev->value, buffer);
+	return -EIO;
+}
+
+static ssize_t edac_pci_dev_store(struct kobject *kobj,
+				struct attribute *attr, const char *buffer,
+				size_t count)
+{
+	struct edac_pci_dev_attribute *edac_pci_dev;
+	edac_pci_dev = (struct edac_pci_dev_attribute *)attr;
+
+	if (edac_pci_dev->show)
+		return edac_pci_dev->store(edac_pci_dev->value, buffer, count);
+	return -EIO;
+}
+
+static struct sysfs_ops edac_pci_sysfs_ops = {
+	.show = edac_pci_dev_show,
+	.store = edac_pci_dev_store
+};
+
+#define EDAC_PCI_ATTR(_name,_mode,_show,_store)			\
+static struct edac_pci_dev_attribute edac_pci_attr_##_name = {		\
+	.attr = {.name = __stringify(_name), .mode = _mode },	\
+	.value  = &_name,					\
+	.show   = _show,					\
+	.store  = _store,					\
+};
+
+#define EDAC_PCI_STRING_ATTR(_name,_data,_mode,_show,_store)	\
+static struct edac_pci_dev_attribute edac_pci_attr_##_name = {		\
+	.attr = {.name = __stringify(_name), .mode = _mode },	\
+	.value  = _data,					\
+	.show   = _show,					\
+	.store  = _store,					\
+};
+
+/* PCI Parity control files */
+EDAC_PCI_ATTR(check_pci_errors, S_IRUGO | S_IWUSR, edac_pci_int_show,
+	edac_pci_int_store);
+EDAC_PCI_ATTR(edac_pci_log_pe, S_IRUGO | S_IWUSR, edac_pci_int_show,
+	edac_pci_int_store);
+EDAC_PCI_ATTR(edac_pci_log_npe, S_IRUGO | S_IWUSR, edac_pci_int_show,
+	edac_pci_int_store);
+EDAC_PCI_ATTR(edac_pci_panic_on_pe, S_IRUGO | S_IWUSR, edac_pci_int_show,
+	edac_pci_int_store);
+EDAC_PCI_ATTR(pci_parity_count, S_IRUGO, edac_pci_int_show, NULL);
+EDAC_PCI_ATTR(pci_nonparity_count, S_IRUGO, edac_pci_int_show, NULL);
+
+/* Base Attributes of the memory ECC object */
+static struct edac_pci_dev_attribute *edac_pci_attr[] = {
+	&edac_pci_attr_check_pci_errors,
+	&edac_pci_attr_edac_pci_log_pe,
+	&edac_pci_attr_edac_pci_log_npe,
+	&edac_pci_attr_edac_pci_panic_on_pe,
+	&edac_pci_attr_pci_parity_count,
+	&edac_pci_attr_pci_nonparity_count,
+	NULL,
+};
+
+/* No memory to release */
+static void edac_pci_release(struct kobject *kobj)
+{
+	struct edac_pci_ctl_info *pci;
+
+	pci = to_edacpci(kobj);
+
+	debugf1("%s()\n", __func__);
+	complete(&pci->kobj_complete);
+}
+
+static struct kobj_type ktype_edac_pci = {
+	.release = edac_pci_release,
+	.sysfs_ops = &edac_pci_sysfs_ops,
+	.default_attrs = (struct attribute **)edac_pci_attr,
+};
+
+/**
+ * edac_sysfs_pci_setup()
+ *
+ *	setup the sysfs for EDAC PCI attributes
+ *	assumes edac_class has already been initialized
+ */
+int edac_pci_register_main_kobj(void)
+{
+	int err;
+	struct sysdev_class *edac_class;
+
+	debugf1("%s()\n", __func__);
+
+	edac_class = edac_get_edac_class();
+	if (edac_class == NULL) {
+		debugf1("%s() no edac_class\n", __func__);
+		return -ENODEV;
+	}
+
+	edac_pci_kobj.ktype = &ktype_edac_pci;
+
+	edac_pci_kobj.parent = &edac_class->kset.kobj;
+
+	err = kobject_set_name(&edac_pci_kobj, "pci");
+	if (err)
+		return err;
+
+	/* Instanstiate the pci object */
+	/* FIXME: maybe new sysdev_create_subdir() */
+	err = kobject_register(&edac_pci_kobj);
+
+	if (err) {
+		debugf1("Failed to register '.../edac/pci'\n");
+		return err;
+	}
+
+	debugf1("Registered '.../edac/pci' kobject\n");
+
+	return 0;
+}
+
+/*
+ * edac_pci_unregister_main_kobj()
+ *
+ *	perform the sysfs teardown for the PCI attributes
+ */
+void edac_pci_unregister_main_kobj(void)
+{
+	debugf0("%s()\n", __func__);
+	init_completion(&edac_pci_kobj_complete);
+	kobject_unregister(&edac_pci_kobj);
+	wait_for_completion(&edac_pci_kobj_complete);
+}
+
+int edac_pci_create_sysfs(struct edac_pci_ctl_info *pci)
+{
+	int err;
+	struct kobject *edac_kobj = &pci->kobj;
+
+	if (atomic_inc_return(&edac_pci_sysfs_refcount) == 1) {
+		err = edac_pci_register_main_kobj();
+		if (err) {
+			atomic_dec(&edac_pci_sysfs_refcount);
+			return err;
+		}
+	}
+
+	err = edac_pci_create_instance_kobj(pci, pci->pci_idx);
+	if (err) {
+		if (atomic_dec_return(&edac_pci_sysfs_refcount) == 0)
+			edac_pci_unregister_main_kobj();
+	}
+
+	debugf0("%s() idx=%d\n", __func__, pci->pci_idx);
+
+	err = sysfs_create_link(edac_kobj, &pci->dev->kobj, EDAC_PCI_SYMLINK);
+	if (err) {
+		debugf0("%s() sysfs_create_link() returned err= %d\n",
+			__func__, err);
+		return err;
+	}
+
+	return 0;
+}
+
+void edac_pci_remove_sysfs(struct edac_pci_ctl_info *pci)
+{
+	debugf0("%s()\n", __func__);
+
+	edac_pci_delete_instance_kobj(pci, pci->pci_idx);
+
+	sysfs_remove_link(&pci->kobj, EDAC_PCI_SYMLINK);
+
+	if (atomic_dec_return(&edac_pci_sysfs_refcount) == 0)
+		edac_pci_unregister_main_kobj();
+}
+
+/************************ PCI error handling *************************/
+static u16 get_pci_parity_status(struct pci_dev *dev, int secondary)
+{
+	int where;
+	u16 status;
+
+	where = secondary ? PCI_SEC_STATUS : PCI_STATUS;
+	pci_read_config_word(dev, where, &status);
+
+	/* If we get back 0xFFFF then we must suspect that the card has been
+	 * pulled but the Linux PCI layer has not yet finished cleaning up.
+	 * We don't want to report on such devices
+	 */
+
+	if (status == 0xFFFF) {
+		u32 sanity;
+
+		pci_read_config_dword(dev, 0, &sanity);
+
+		if (sanity == 0xFFFFFFFF)
+			return 0;
+	}
+
+	status &= PCI_STATUS_DETECTED_PARITY | PCI_STATUS_SIG_SYSTEM_ERROR |
+		PCI_STATUS_PARITY;
+
+	if (status)
+		/* reset only the bits we are interested in */
+		pci_write_config_word(dev, where, status);
+
+	return status;
+}
+
+typedef void (*pci_parity_check_fn_t) (struct pci_dev * dev);
+
+/* Clear any PCI parity errors logged by this device. */
+static void edac_pci_dev_parity_clear(struct pci_dev *dev)
+{
+	u8 header_type;
+
+	get_pci_parity_status(dev, 0);
+
+	/* read the device TYPE, looking for bridges */
+	pci_read_config_byte(dev, PCI_HEADER_TYPE, &header_type);
+
+	if ((header_type & 0x7F) == PCI_HEADER_TYPE_BRIDGE)
+		get_pci_parity_status(dev, 1);
+}
+
+/*
+ *  PCI Parity polling
+ *
+ */
+static void edac_pci_dev_parity_test(struct pci_dev *dev)
+{
+	u16 status;
+	u8 header_type;
+
+	/* read the STATUS register on this device
+	 */
+	status = get_pci_parity_status(dev, 0);
+
+	debugf2("PCI STATUS= 0x%04x %s\n", status, dev->dev.bus_id);
+
+	/* check the status reg for errors */
+	if (status) {
+		if (status & (PCI_STATUS_SIG_SYSTEM_ERROR)) {
+			edac_printk(KERN_CRIT, EDAC_PCI,
+				"Signaled System Error on %s\n",
+				pci_name(dev));
+			atomic_inc(&pci_nonparity_count);
+		}
+
+		if (status & (PCI_STATUS_PARITY)) {
+			edac_printk(KERN_CRIT, EDAC_PCI,
+				"Master Data Parity Error on %s\n",
+				pci_name(dev));
+
+			atomic_inc(&pci_parity_count);
+		}
+
+		if (status & (PCI_STATUS_DETECTED_PARITY)) {
+			edac_printk(KERN_CRIT, EDAC_PCI,
+				"Detected Parity Error on %s\n",
+				pci_name(dev));
+
+			atomic_inc(&pci_parity_count);
+		}
+	}
+
+	/* read the device TYPE, looking for bridges */
+	pci_read_config_byte(dev, PCI_HEADER_TYPE, &header_type);
+
+	debugf2("PCI HEADER TYPE= 0x%02x %s\n", header_type, dev->dev.bus_id);
+
+	if ((header_type & 0x7F) == PCI_HEADER_TYPE_BRIDGE) {
+		/* On bridges, need to examine secondary status register  */
+		status = get_pci_parity_status(dev, 1);
+
+		debugf2("PCI SEC_STATUS= 0x%04x %s\n", status, dev->dev.bus_id);
+
+		/* check the secondary status reg for errors */
+		if (status) {
+			if (status & (PCI_STATUS_SIG_SYSTEM_ERROR)) {
+				edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
+					"Signaled System Error on %s\n",
+					pci_name(dev));
+				atomic_inc(&pci_nonparity_count);
+			}
+
+			if (status & (PCI_STATUS_PARITY)) {
+				edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
+					"Master Data Parity Error on "
+					"%s\n", pci_name(dev));
+
+				atomic_inc(&pci_parity_count);
+			}
+
+			if (status & (PCI_STATUS_DETECTED_PARITY)) {
+				edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
+					"Detected Parity Error on %s\n",
+					pci_name(dev));
+
+				atomic_inc(&pci_parity_count);
+			}
+		}
+	}
+}
+
+/*
+ * pci_dev parity list iterator
+ *	Scan the PCI device list for one iteration, looking for SERRORs
+ *	Master Parity ERRORS or Parity ERRORs on primary or secondary devices
+ */
+static inline void edac_pci_dev_parity_iterator(pci_parity_check_fn_t fn)
+{
+	struct pci_dev *dev = NULL;
+
+	/* request for kernel access to the next PCI device, if any,
+	 * and while we are looking at it have its reference count
+	 * bumped until we are done with it
+	 */
+	while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) {
+		fn(dev);
+	}
+}
+
+/*
+ * edac_pci_do_parity_check
+ *
+ *	performs the actual PCI parity check operation
+ */
+void edac_pci_do_parity_check(void)
+{
+	unsigned long flags;
+	int before_count;
+
+	debugf3("%s()\n", __func__);
+
+	if (!check_pci_errors)
+		return;
+
+	before_count = atomic_read(&pci_parity_count);
+
+	/* scan all PCI devices looking for a Parity Error on devices and
+	 * bridges
+	 */
+	local_irq_save(flags);
+	edac_pci_dev_parity_iterator(edac_pci_dev_parity_test);
+	local_irq_restore(flags);
+
+	/* Only if operator has selected panic on PCI Error */
+	if (edac_pci_get_panic_on_pe()) {
+		/* If the count is different 'after' from 'before' */
+		if (before_count != atomic_read(&pci_parity_count))
+			panic("EDAC: PCI Parity Error");
+	}
+}
+
+void edac_pci_clear_parity_errors(void)
+{
+	/* Clear any PCI bus parity errors that devices initially have logged
+	 * in their registers.
+	 */
+	edac_pci_dev_parity_iterator(edac_pci_dev_parity_clear);
+}
+void edac_pci_handle_pe(struct edac_pci_ctl_info *pci, const char *msg)
+{
+
+	/* global PE counter incremented by edac_pci_do_parity_check() */
+	atomic_inc(&pci->counters.pe_count);
+
+	if (edac_pci_get_log_pe())
+		edac_pci_printk(pci, KERN_WARNING,
+				"Parity Error ctl: %s %d: %s\n",
+				pci->ctl_name, pci->pci_idx, msg);
+
+	/*
+	 * poke all PCI devices and see which one is the troublemaker
+	 * panic() is called if set
+	 */
+	edac_pci_do_parity_check();
+}
+
+EXPORT_SYMBOL_GPL(edac_pci_handle_pe);
+
+void edac_pci_handle_npe(struct edac_pci_ctl_info *pci, const char *msg)
+{
+
+	/* global NPE counter incremented by edac_pci_do_parity_check() */
+	atomic_inc(&pci->counters.npe_count);
+
+	if (edac_pci_get_log_npe())
+		edac_pci_printk(pci, KERN_WARNING,
+				"Non-Parity Error ctl: %s %d: %s\n",
+				pci->ctl_name, pci->pci_idx, msg);
+
+	/*
+	 * poke all PCI devices and see which one is the troublemaker
+	 * panic() is called if set
+	 */
+	edac_pci_do_parity_check();
+}
+
+EXPORT_SYMBOL_GPL(edac_pci_handle_npe);
+
+/*
+ * Define the PCI parameter to the module
+ */
+module_param(check_pci_errors, int, 0644);
+MODULE_PARM_DESC(check_pci_errors,
+		 "Check for PCI bus parity errors: 0=off 1=on");
+module_param(edac_pci_panic_on_pe, int, 0644);
+MODULE_PARM_DESC(edac_pci_panic_on_pe,
+		 "Panic on PCI Bus Parity error: 0=off 1=on");
+
+#endif				/* CONFIG_PCI */
diff --git a/drivers/edac/edac_stub.c b/drivers/edac/edac_stub.c
new file mode 100644
index 0000000..20b428a
--- /dev/null
+++ b/drivers/edac/edac_stub.c
@@ -0,0 +1,46 @@
+/*
+ * common EDAC components that must be in kernel
+ *
+ * Author: Dave Jiang <djiang@mvista.com>
+ *
+ * 2007 (c) MontaVista Software, Inc. This file is licensed under
+ * the terms of the GNU General Public License version 2. This program
+ * is licensed "as is" without any warranty of any kind, whether express
+ * or implied.
+ *
+ */
+#include <linux/module.h>
+#include <linux/edac.h>
+#include <asm/atomic.h>
+#include <asm/edac.h>
+
+int edac_op_state = EDAC_OPSTATE_INVAL;
+EXPORT_SYMBOL_GPL(edac_op_state);
+
+atomic_t edac_handlers = ATOMIC_INIT(0);
+EXPORT_SYMBOL_GPL(edac_handlers);
+
+int edac_err_assert = 0;
+EXPORT_SYMBOL_GPL(edac_err_assert);
+
+/*
+ * called to determine if there is an EDAC driver interested in
+ * knowing an event (such as NMI) occurred
+ */
+int edac_handler_set(void)
+{
+	if (edac_op_state == EDAC_OPSTATE_POLL)
+		return 0;
+
+	return atomic_read(&edac_handlers);
+}
+EXPORT_SYMBOL_GPL(edac_handler_set);
+
+/*
+ * handler for NMI type of interrupts to assert error
+ */
+void edac_atomic_assert_error(void)
+{
+	edac_err_assert++;
+}
+EXPORT_SYMBOL_GPL(edac_atomic_assert_error);
diff --git a/drivers/edac/i3000_edac.c b/drivers/edac/i3000_edac.c
new file mode 100644
index 0000000..0ecfdc4
--- /dev/null
+++ b/drivers/edac/i3000_edac.c
@@ -0,0 +1,506 @@
+/*
+ * Intel 3000/3010 Memory Controller kernel module
+ * Copyright (C) 2007 Akamai Technologies, Inc.
+ * Shamelessly copied from:
+ * 	Intel D82875P Memory Controller kernel module
+ * 	(C) 2003 Linux Networx (http://lnxi.com)
+ *
+ * This file may be distributed under the terms of the
+ * GNU General Public License.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/pci_ids.h>
+#include <linux/slab.h>
+#include "edac_core.h"
+
+#define I3000_REVISION		"1.1"
+
+#define EDAC_MOD_STR		"i3000_edac"
+
+#define I3000_RANKS		8
+#define I3000_RANKS_PER_CHANNEL	4
+#define I3000_CHANNELS		2
+
+/* Intel 3000 register addresses - device 0 function 0 - DRAM Controller */
+
+#define I3000_MCHBAR		0x44	/* MCH Memory Mapped Register BAR */
+#define I3000_MCHBAR_MASK	0xffffc000
+#define I3000_MMR_WINDOW_SIZE	16384
+
+#define I3000_EDEAP		0x70	/* Extended DRAM Error Address Pointer (8b)
+					 *
+					 * 7:1   reserved
+					 * 0     bit 32 of address
+					 */
+#define I3000_DEAP		0x58	/* DRAM Error Address Pointer (32b)
+					 *
+					 * 31:7  address
+					 * 6:1   reserved
+					 * 0     Error channel 0/1
+					 */
+#define I3000_DEAP_GRAIN	(1 << 7)
+#define I3000_DEAP_PFN(edeap, deap)	((((edeap) & 1) << (32 - PAGE_SHIFT)) | \
+					((deap) >> PAGE_SHIFT))
+#define I3000_DEAP_OFFSET(deap)		((deap) & ~(I3000_DEAP_GRAIN-1) & ~PAGE_MASK)
+#define I3000_DEAP_CHANNEL(deap)	((deap) & 1)
+
+#define I3000_DERRSYN		0x5c	/* DRAM Error Syndrome (8b)
+					 *
+					 *  7:0  DRAM ECC Syndrome
+					 */
+
+#define I3000_ERRSTS		0xc8	/* Error Status Register (16b)
+					 *
+					 * 15:12 reserved
+					 * 11    MCH Thermal Sensor Event for SMI/SCI/SERR
+					 * 10    reserved
+					 *  9    LOCK to non-DRAM Memory Flag (LCKF)
+					 *  8    Received Refresh Timeout Flag (RRTOF)
+					 *  7:2  reserved
+					 *  1    Multiple-bit DRAM ECC Error Flag (DMERR)
+					 *  0    Single-bit DRAM ECC Error Flag (DSERR)
+					 */
+#define I3000_ERRSTS_BITS	0x0b03	/* bits which indicate errors */
+#define I3000_ERRSTS_UE		0x0002
+#define I3000_ERRSTS_CE		0x0001
+
+#define I3000_ERRCMD		0xca	/* Error Command (16b)
+					 *
+					 * 15:12 reserved
+					 * 11    SERR on MCH Thermal Sensor Event (TSESERR)
+					 * 10    reserved
+					 *  9    SERR on LOCK to non-DRAM Memory (LCKERR)
+					 *  8    SERR on DRAM Refresh Timeout (DRTOERR)
+					 *  7:2  reserved
+					 *  1    SERR Multiple-Bit DRAM ECC Error (DMERR)
+					 *  0    SERR on Single-Bit ECC Error (DSERR)
+					 */
+
+/* Intel  MMIO register space - device 0 function 0 - MMR space */
+
+#define I3000_DRB_SHIFT 25	/* 32MiB grain */
+
+#define I3000_C0DRB		0x100	/* Channel 0 DRAM Rank Boundary (8b x 4)
+					 *
+					 * 7:0   Channel 0 DRAM Rank Boundary Address
+					 */
+#define I3000_C1DRB		0x180	/* Channel 1 DRAM Rank Boundary (8b x 4)
+					 *
+					 * 7:0   Channel 1 DRAM Rank Boundary Address
+					 */
+
+#define I3000_C0DRA		0x108	/* Channel 0 DRAM Rank Attribute (8b x 2)
+					 *
+					 * 7     reserved
+					 * 6:4   DRAM odd Rank Attribute
+					 * 3     reserved
+					 * 2:0   DRAM even Rank Attribute
+					 *
+					 * Each attribute defines the page
+					 * size of the corresponding rank:
+					 *     000: unpopulated
+					 *     001: reserved
+					 *     010: 4 KB
+					 *     011: 8 KB
+					 *     100: 16 KB
+					 *     Others: reserved
+					 */
+#define I3000_C1DRA		0x188	/* Channel 1 DRAM Rank Attribute (8b x 2) */
+#define ODD_RANK_ATTRIB(dra) (((dra) & 0x70) >> 4)
+#define EVEN_RANK_ATTRIB(dra) ((dra) & 0x07)
+
+#define I3000_C0DRC0		0x120	/* DRAM Controller Mode 0 (32b)
+					 *
+					 * 31:30 reserved
+					 * 29    Initialization Complete (IC)
+					 * 28:11 reserved
+					 * 10:8  Refresh Mode Select (RMS)
+					 * 7     reserved
+					 * 6:4   Mode Select (SMS)
+					 * 3:2   reserved
+					 * 1:0   DRAM Type (DT)
+					 */
+
+#define I3000_C0DRC1		0x124	/* DRAM Controller Mode 1 (32b)
+					 *
+					 * 31    Enhanced Addressing Enable (ENHADE)
+					 * 30:0  reserved
+					 */
+
+enum i3000p_chips {
+	I3000 = 0,
+};
+
+struct i3000_dev_info {
+	const char *ctl_name;
+};
+
+struct i3000_error_info {
+	u16 errsts;
+	u8 derrsyn;
+	u8 edeap;
+	u32 deap;
+	u16 errsts2;
+};
+
+static const struct i3000_dev_info i3000_devs[] = {
+	[I3000] = {
+		.ctl_name = "i3000"},
+};
+
+static struct pci_dev *mci_pdev;
+static int i3000_registered = 1;
+static struct edac_pci_ctl_info *i3000_pci;
+
+static void i3000_get_error_info(struct mem_ctl_info *mci,
+				 struct i3000_error_info *info)
+{
+	struct pci_dev *pdev;
+
+	pdev = to_pci_dev(mci->dev);
+
+	/*
+	 * This is a mess because there is no atomic way to read all the
+	 * registers at once and the registers can transition from CE being
+	 * overwritten by UE.
+	 */
+	pci_read_config_word(pdev, I3000_ERRSTS, &info->errsts);
+	if (!(info->errsts & I3000_ERRSTS_BITS))
+		return;
+	pci_read_config_byte(pdev, I3000_EDEAP, &info->edeap);
+	pci_read_config_dword(pdev, I3000_DEAP, &info->deap);
+	pci_read_config_byte(pdev, I3000_DERRSYN, &info->derrsyn);
+	pci_read_config_word(pdev, I3000_ERRSTS, &info->errsts2);
+
+	/*
+	 * If the error is the same for both reads then the first set
+	 * of reads is valid.  If there is a change then there is a CE
+	 * with no info and the second set of reads is valid and
+	 * should be UE info.
+	 */
+	if ((info->errsts ^ info->errsts2) & I3000_ERRSTS_BITS) {
+		pci_read_config_byte(pdev, I3000_EDEAP, &info->edeap);
+		pci_read_config_dword(pdev, I3000_DEAP, &info->deap);
+		pci_read_config_byte(pdev, I3000_DERRSYN, &info->derrsyn);
+	}
+
+	/* Clear any error bits.
+	 * (Yes, we really clear bits by writing 1 to them.)
+	 */
+	pci_write_bits16(pdev, I3000_ERRSTS, I3000_ERRSTS_BITS,
+			 I3000_ERRSTS_BITS);
+}
+
+static int i3000_process_error_info(struct mem_ctl_info *mci,
+				struct i3000_error_info *info,
+				int handle_errors)
+{
+	int row, multi_chan;
+	int pfn, offset, channel;
+
+	multi_chan = mci->csrows[0].nr_channels - 1;
+
+	if (!(info->errsts & I3000_ERRSTS_BITS))
+		return 0;
+
+	if (!handle_errors)
+		return 1;
+
+	if ((info->errsts ^ info->errsts2) & I3000_ERRSTS_BITS) {
+		edac_mc_handle_ce_no_info(mci, "UE overwrote CE");
+		info->errsts = info->errsts2;
+	}
+
+	pfn = I3000_DEAP_PFN(info->edeap, info->deap);
+	offset = I3000_DEAP_OFFSET(info->deap);
+	channel = I3000_DEAP_CHANNEL(info->deap);
+
+	row = edac_mc_find_csrow_by_page(mci, pfn);
+
+	if (info->errsts & I3000_ERRSTS_UE)
+		edac_mc_handle_ue(mci, pfn, offset, row, "i3000 UE");
+	else
+		edac_mc_handle_ce(mci, pfn, offset, info->derrsyn, row,
+				multi_chan ? channel : 0, "i3000 CE");
+
+	return 1;
+}
+
+static void i3000_check(struct mem_ctl_info *mci)
+{
+	struct i3000_error_info info;
+
+	debugf1("MC%d: %s()\n", mci->mc_idx, __func__);
+	i3000_get_error_info(mci, &info);
+	i3000_process_error_info(mci, &info, 1);
+}
+
+static int i3000_is_interleaved(const unsigned char *c0dra,
+				const unsigned char *c1dra,
+				const unsigned char *c0drb,
+				const unsigned char *c1drb)
+{
+	int i;
+
+	/* If the channels aren't populated identically then
+	 * we're not interleaved.
+	 */
+	for (i = 0; i < I3000_RANKS_PER_CHANNEL / 2; i++)
+		if (ODD_RANK_ATTRIB(c0dra[i]) != ODD_RANK_ATTRIB(c1dra[i]) ||
+			EVEN_RANK_ATTRIB(c0dra[i]) !=
+						EVEN_RANK_ATTRIB(c1dra[i]))
+			return 0;
+
+	/* If the rank boundaries for the two channels are different
+	 * then we're not interleaved.
+	 */
+	for (i = 0; i < I3000_RANKS_PER_CHANNEL; i++)
+		if (c0drb[i] != c1drb[i])
+			return 0;
+
+	return 1;
+}
+
+static int i3000_probe1(struct pci_dev *pdev, int dev_idx)
+{
+	int rc;
+	int i;
+	struct mem_ctl_info *mci = NULL;
+	unsigned long last_cumul_size;
+	int interleaved, nr_channels;
+	unsigned char dra[I3000_RANKS / 2], drb[I3000_RANKS];
+	unsigned char *c0dra = dra, *c1dra = &dra[I3000_RANKS_PER_CHANNEL / 2];
+	unsigned char *c0drb = drb, *c1drb = &drb[I3000_RANKS_PER_CHANNEL];
+	unsigned long mchbar;
+	void *window;
+
+	debugf0("MC: %s()\n", __func__);
+
+	pci_read_config_dword(pdev, I3000_MCHBAR, (u32 *) & mchbar);
+	mchbar &= I3000_MCHBAR_MASK;
+	window = ioremap_nocache(mchbar, I3000_MMR_WINDOW_SIZE);
+	if (!window) {
+		printk(KERN_ERR "i3000: cannot map mmio space at 0x%lx\n",
+			mchbar);
+		return -ENODEV;
+	}
+
+	c0dra[0] = readb(window + I3000_C0DRA + 0);	/* ranks 0,1 */
+	c0dra[1] = readb(window + I3000_C0DRA + 1);	/* ranks 2,3 */
+	c1dra[0] = readb(window + I3000_C1DRA + 0);	/* ranks 0,1 */
+	c1dra[1] = readb(window + I3000_C1DRA + 1);	/* ranks 2,3 */
+
+	for (i = 0; i < I3000_RANKS_PER_CHANNEL; i++) {
+		c0drb[i] = readb(window + I3000_C0DRB + i);
+		c1drb[i] = readb(window + I3000_C1DRB + i);
+	}
+
+	iounmap(window);
+
+	/* Figure out how many channels we have.
+	 *
+	 * If we have what the datasheet calls "asymmetric channels"
+	 * (essentially the same as what was called "virtual single
+	 * channel mode" in the i82875) then it's a single channel as
+	 * far as EDAC is concerned.
+	 */
+	interleaved = i3000_is_interleaved(c0dra, c1dra, c0drb, c1drb);
+	nr_channels = interleaved ? 2 : 1;
+	mci = edac_mc_alloc(0, I3000_RANKS / nr_channels, nr_channels, 0);
+	if (!mci)
+		return -ENOMEM;
+
+	debugf3("MC: %s(): init mci\n", __func__);
+
+	mci->dev = &pdev->dev;
+	mci->mtype_cap = MEM_FLAG_DDR2;
+
+	mci->edac_ctl_cap = EDAC_FLAG_SECDED;
+	mci->edac_cap = EDAC_FLAG_SECDED;
+
+	mci->mod_name = EDAC_MOD_STR;
+	mci->mod_ver = I3000_REVISION;
+	mci->ctl_name = i3000_devs[dev_idx].ctl_name;
+	mci->dev_name = pci_name(pdev);
+	mci->edac_check = i3000_check;
+	mci->ctl_page_to_phys = NULL;
+
+	/*
+	 * The dram rank boundary (DRB) reg values are boundary addresses
+	 * for each DRAM rank with a granularity of 32MB.  DRB regs are
+	 * cumulative; the last one will contain the total memory
+	 * contained in all ranks.
+	 *
+	 * If we're in interleaved mode then we're only walking through
+	 * the ranks of controller 0, so we double all the values we see.
+	 */
+	for (last_cumul_size = i = 0; i < mci->nr_csrows; i++) {
+		u8 value;
+		u32 cumul_size;
+		struct csrow_info *csrow = &mci->csrows[i];
+
+		value = drb[i];
+		cumul_size = value << (I3000_DRB_SHIFT - PAGE_SHIFT);
+		if (interleaved)
+			cumul_size <<= 1;
+		debugf3("MC: %s(): (%d) cumul_size 0x%x\n",
+			__func__, i, cumul_size);
+		if (cumul_size == last_cumul_size) {
+			csrow->mtype = MEM_EMPTY;
+			continue;
+		}
+
+		csrow->first_page = last_cumul_size;
+		csrow->last_page = cumul_size - 1;
+		csrow->nr_pages = cumul_size - last_cumul_size;
+		last_cumul_size = cumul_size;
+		csrow->grain = I3000_DEAP_GRAIN;
+		csrow->mtype = MEM_DDR2;
+		csrow->dtype = DEV_UNKNOWN;
+		csrow->edac_mode = EDAC_UNKNOWN;
+	}
+
+	/* Clear any error bits.
+	 * (Yes, we really clear bits by writing 1 to them.)
+	 */
+	pci_write_bits16(pdev, I3000_ERRSTS, I3000_ERRSTS_BITS,
+			 I3000_ERRSTS_BITS);
+
+	rc = -ENODEV;
+	if (edac_mc_add_mc(mci)) {
+		debugf3("MC: %s(): failed edac_mc_add_mc()\n", __func__);
+		goto fail;
+	}
+
+	/* allocating generic PCI control info */
+	i3000_pci = edac_pci_create_generic_ctl(&pdev->dev, EDAC_MOD_STR);
+	if (!i3000_pci) {
+		printk(KERN_WARNING
+			"%s(): Unable to create PCI control\n",
+			__func__);
+		printk(KERN_WARNING
+			"%s(): PCI error report via EDAC not setup\n",
+			__func__);
+	}
+
+	/* get this far and it's successful */
+	debugf3("MC: %s(): success\n", __func__);
+	return 0;
+
+      fail:
+	if (mci)
+		edac_mc_free(mci);
+
+	return rc;
+}
+
+/* returns count (>= 0), or negative on error */
+static int __devinit i3000_init_one(struct pci_dev *pdev,
+				const struct pci_device_id *ent)
+{
+	int rc;
+
+	debugf0("MC: %s()\n", __func__);
+
+	if (pci_enable_device(pdev) < 0)
+		return -EIO;
+
+	rc = i3000_probe1(pdev, ent->driver_data);
+	if (mci_pdev == NULL)
+		mci_pdev = pci_dev_get(pdev);
+
+	return rc;
+}
+
+static void __devexit i3000_remove_one(struct pci_dev *pdev)
+{
+	struct mem_ctl_info *mci;
+
+	debugf0("%s()\n", __func__);
+
+	if (i3000_pci)
+		edac_pci_release_generic_ctl(i3000_pci);
+
+	if ((mci = edac_mc_del_mc(&pdev->dev)) == NULL)
+		return;
+
+	edac_mc_free(mci);
+}
+
+static const struct pci_device_id i3000_pci_tbl[] __devinitdata = {
+	{
+	 PCI_VEND_DEV(INTEL, 3000_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+	 I3000},
+	{
+	 0,
+	 }			/* 0 terminated list. */
+};
+
+MODULE_DEVICE_TABLE(pci, i3000_pci_tbl);
+
+static struct pci_driver i3000_driver = {
+	.name = EDAC_MOD_STR,
+	.probe = i3000_init_one,
+	.remove = __devexit_p(i3000_remove_one),
+	.id_table = i3000_pci_tbl,
+};
+
+static int __init i3000_init(void)
+{
+	int pci_rc;
+
+	debugf3("MC: %s()\n", __func__);
+	pci_rc = pci_register_driver(&i3000_driver);
+	if (pci_rc < 0)
+		goto fail0;
+
+	if (mci_pdev == NULL) {
+		i3000_registered = 0;
+		mci_pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
+					PCI_DEVICE_ID_INTEL_3000_HB, NULL);
+		if (!mci_pdev) {
+			debugf0("i3000 pci_get_device fail\n");
+			pci_rc = -ENODEV;
+			goto fail1;
+		}
+
+		pci_rc = i3000_init_one(mci_pdev, i3000_pci_tbl);
+		if (pci_rc < 0) {
+			debugf0("i3000 init fail\n");
+			pci_rc = -ENODEV;
+			goto fail1;
+		}
+	}
+
+	return 0;
+
+fail1:
+	pci_unregister_driver(&i3000_driver);
+
+fail0:
+	if (mci_pdev)
+		pci_dev_put(mci_pdev);
+
+	return pci_rc;
+}
+
+static void __exit i3000_exit(void)
+{
+	debugf3("MC: %s()\n", __func__);
+
+	pci_unregister_driver(&i3000_driver);
+	if (!i3000_registered) {
+		i3000_remove_one(mci_pdev);
+		pci_dev_put(mci_pdev);
+	}
+}
+
+module_init(i3000_init);
+module_exit(i3000_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Akamai Technologies Arthur Ulfeldt/Jason Uhlenkott");
+MODULE_DESCRIPTION("MC support for Intel 3000 memory hub controllers");
diff --git a/drivers/edac/i5000_edac.c b/drivers/edac/i5000_edac.c
new file mode 100644
index 0000000..96f7e63
--- /dev/null
+++ b/drivers/edac/i5000_edac.c
@@ -0,0 +1,1505 @@
+/*
+ * Intel 5000(P/V/X) class Memory Controllers kernel module
+ *
+ * This file may be distributed under the terms of the
+ * GNU General Public License.
+ *
+ * Written by Douglas Thompson Linux Networx (http://lnxi.com)
+ *	norsk5@xmission.com
+ *
+ * This module is based on the following document:
+ *
+ * Intel 5000X Chipset Memory Controller Hub (MCH) - Datasheet
+ * 	http://developer.intel.com/design/chipsets/datashts/313070.htm
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/pci_ids.h>
+#include <linux/slab.h>
+#include <linux/edac.h>
+#include <asm/mmzone.h>
+
+#include "edac_core.h"
+
+/*
+ * Alter this version for the I5000 module when modifications are made
+ */
+#define I5000_REVISION    " Ver: 2.0.12 " __DATE__
+#define EDAC_MOD_STR      "i5000_edac"
+
+#define i5000_printk(level, fmt, arg...) \
+        edac_printk(level, "i5000", fmt, ##arg)
+
+#define i5000_mc_printk(mci, level, fmt, arg...) \
+        edac_mc_chipset_printk(mci, level, "i5000", fmt, ##arg)
+
+#ifndef PCI_DEVICE_ID_INTEL_FBD_0
+#define PCI_DEVICE_ID_INTEL_FBD_0	0x25F5
+#endif
+#ifndef PCI_DEVICE_ID_INTEL_FBD_1
+#define PCI_DEVICE_ID_INTEL_FBD_1	0x25F6
+#endif
+
+/* Device 16,
+ * Function 0: System Address
+ * Function 1: Memory Branch Map, Control, Errors Register
+ * Function 2: FSB Error Registers
+ *
+ * All 3 functions of Device 16 (0,1,2) share the SAME DID
+ */
+#define	PCI_DEVICE_ID_INTEL_I5000_DEV16	0x25F0
+
+/* OFFSETS for Function 0 */
+
+/* OFFSETS for Function 1 */
+#define		AMBASE			0x48
+#define		MAXCH			0x56
+#define		MAXDIMMPERCH		0x57
+#define		TOLM			0x6C
+#define		REDMEMB			0x7C
+#define			RED_ECC_LOCATOR(x)	((x) & 0x3FFFF)
+#define			REC_ECC_LOCATOR_EVEN(x)	((x) & 0x001FF)
+#define			REC_ECC_LOCATOR_ODD(x)	((x) & 0x3FE00)
+#define		MIR0			0x80
+#define		MIR1			0x84
+#define		MIR2			0x88
+#define		AMIR0			0x8C
+#define		AMIR1			0x90
+#define		AMIR2			0x94
+
+#define		FERR_FAT_FBD		0x98
+#define		NERR_FAT_FBD		0x9C
+#define			EXTRACT_FBDCHAN_INDX(x)	(((x)>>28) & 0x3)
+#define			FERR_FAT_FBDCHAN 0x30000000
+#define			FERR_FAT_M3ERR	0x00000004
+#define			FERR_FAT_M2ERR	0x00000002
+#define			FERR_FAT_M1ERR	0x00000001
+#define			FERR_FAT_MASK	(FERR_FAT_M1ERR | \
+						FERR_FAT_M2ERR | \
+						FERR_FAT_M3ERR)
+
+#define		FERR_NF_FBD		0xA0
+
+/* Thermal and SPD or BFD errors */
+#define			FERR_NF_M28ERR	0x01000000
+#define			FERR_NF_M27ERR	0x00800000
+#define			FERR_NF_M26ERR	0x00400000
+#define			FERR_NF_M25ERR	0x00200000
+#define			FERR_NF_M24ERR	0x00100000
+#define			FERR_NF_M23ERR	0x00080000
+#define			FERR_NF_M22ERR	0x00040000
+#define			FERR_NF_M21ERR	0x00020000
+
+/* Correctable errors */
+#define			FERR_NF_M20ERR	0x00010000
+#define			FERR_NF_M19ERR	0x00008000
+#define			FERR_NF_M18ERR	0x00004000
+#define			FERR_NF_M17ERR	0x00002000
+
+/* Non-Retry or redundant Retry errors */
+#define			FERR_NF_M16ERR	0x00001000
+#define			FERR_NF_M15ERR	0x00000800
+#define			FERR_NF_M14ERR	0x00000400
+#define			FERR_NF_M13ERR	0x00000200
+
+/* Uncorrectable errors */
+#define			FERR_NF_M12ERR	0x00000100
+#define			FERR_NF_M11ERR	0x00000080
+#define			FERR_NF_M10ERR	0x00000040
+#define			FERR_NF_M9ERR	0x00000020
+#define			FERR_NF_M8ERR	0x00000010
+#define			FERR_NF_M7ERR	0x00000008
+#define			FERR_NF_M6ERR	0x00000004
+#define			FERR_NF_M5ERR	0x00000002
+#define			FERR_NF_M4ERR	0x00000001
+
+#define			FERR_NF_UNCORRECTABLE	(FERR_NF_M12ERR | \
+							FERR_NF_M11ERR | \
+							FERR_NF_M10ERR | \
+							FERR_NF_M8ERR | \
+							FERR_NF_M7ERR | \
+							FERR_NF_M6ERR | \
+							FERR_NF_M5ERR | \
+							FERR_NF_M4ERR)
+#define			FERR_NF_CORRECTABLE	(FERR_NF_M20ERR | \
+							FERR_NF_M19ERR | \
+							FERR_NF_M18ERR | \
+							FERR_NF_M17ERR)
+#define			FERR_NF_DIMM_SPARE	(FERR_NF_M27ERR | \
+							FERR_NF_M28ERR)
+#define			FERR_NF_THERMAL		(FERR_NF_M26ERR | \
+							FERR_NF_M25ERR | \
+							FERR_NF_M24ERR | \
+							FERR_NF_M23ERR)
+#define			FERR_NF_SPD_PROTOCOL	(FERR_NF_M22ERR)
+#define			FERR_NF_NORTH_CRC	(FERR_NF_M21ERR)
+#define			FERR_NF_NON_RETRY	(FERR_NF_M13ERR | \
+							FERR_NF_M14ERR | \
+							FERR_NF_M15ERR)
+
+#define		NERR_NF_FBD		0xA4
+#define			FERR_NF_MASK		(FERR_NF_UNCORRECTABLE | \
+							FERR_NF_CORRECTABLE | \
+							FERR_NF_DIMM_SPARE | \
+							FERR_NF_THERMAL | \
+							FERR_NF_SPD_PROTOCOL | \
+							FERR_NF_NORTH_CRC | \
+							FERR_NF_NON_RETRY)
+
+#define		EMASK_FBD		0xA8
+#define			EMASK_FBD_M28ERR	0x08000000
+#define			EMASK_FBD_M27ERR	0x04000000
+#define			EMASK_FBD_M26ERR	0x02000000
+#define			EMASK_FBD_M25ERR	0x01000000
+#define			EMASK_FBD_M24ERR	0x00800000
+#define			EMASK_FBD_M23ERR	0x00400000
+#define			EMASK_FBD_M22ERR	0x00200000
+#define			EMASK_FBD_M21ERR	0x00100000
+#define			EMASK_FBD_M20ERR	0x00080000
+#define			EMASK_FBD_M19ERR	0x00040000
+#define			EMASK_FBD_M18ERR	0x00020000
+#define			EMASK_FBD_M17ERR	0x00010000
+
+#define			EMASK_FBD_M15ERR	0x00004000
+#define			EMASK_FBD_M14ERR	0x00002000
+#define			EMASK_FBD_M13ERR	0x00001000
+#define			EMASK_FBD_M12ERR	0x00000800
+#define			EMASK_FBD_M11ERR	0x00000400
+#define			EMASK_FBD_M10ERR	0x00000200
+#define			EMASK_FBD_M9ERR		0x00000100
+#define			EMASK_FBD_M8ERR		0x00000080
+#define			EMASK_FBD_M7ERR		0x00000040
+#define			EMASK_FBD_M6ERR		0x00000020
+#define			EMASK_FBD_M5ERR		0x00000010
+#define			EMASK_FBD_M4ERR		0x00000008
+#define			EMASK_FBD_M3ERR		0x00000004
+#define			EMASK_FBD_M2ERR		0x00000002
+#define			EMASK_FBD_M1ERR		0x00000001
+
+#define			ENABLE_EMASK_FBD_FATAL_ERRORS	(EMASK_FBD_M1ERR | \
+							EMASK_FBD_M2ERR | \
+							EMASK_FBD_M3ERR)
+
+#define 		ENABLE_EMASK_FBD_UNCORRECTABLE	(EMASK_FBD_M4ERR | \
+							EMASK_FBD_M5ERR | \
+							EMASK_FBD_M6ERR | \
+							EMASK_FBD_M7ERR | \
+							EMASK_FBD_M8ERR | \
+							EMASK_FBD_M9ERR | \
+							EMASK_FBD_M10ERR | \
+							EMASK_FBD_M11ERR | \
+							EMASK_FBD_M12ERR)
+#define 		ENABLE_EMASK_FBD_CORRECTABLE	(EMASK_FBD_M17ERR | \
+							EMASK_FBD_M18ERR | \
+							EMASK_FBD_M19ERR | \
+							EMASK_FBD_M20ERR)
+#define			ENABLE_EMASK_FBD_DIMM_SPARE	(EMASK_FBD_M27ERR | \
+							EMASK_FBD_M28ERR)
+#define			ENABLE_EMASK_FBD_THERMALS	(EMASK_FBD_M26ERR | \
+							EMASK_FBD_M25ERR | \
+							EMASK_FBD_M24ERR | \
+							EMASK_FBD_M23ERR)
+#define			ENABLE_EMASK_FBD_SPD_PROTOCOL	(EMASK_FBD_M22ERR)
+#define			ENABLE_EMASK_FBD_NORTH_CRC	(EMASK_FBD_M21ERR)
+#define			ENABLE_EMASK_FBD_NON_RETRY	(EMASK_FBD_M15ERR | \
+							EMASK_FBD_M14ERR | \
+							EMASK_FBD_M13ERR)
+
+#define		ENABLE_EMASK_ALL	(ENABLE_EMASK_FBD_NON_RETRY | \
+					ENABLE_EMASK_FBD_NORTH_CRC | \
+					ENABLE_EMASK_FBD_SPD_PROTOCOL | \
+					ENABLE_EMASK_FBD_THERMALS | \
+					ENABLE_EMASK_FBD_DIMM_SPARE | \
+					ENABLE_EMASK_FBD_FATAL_ERRORS | \
+					ENABLE_EMASK_FBD_CORRECTABLE | \
+					ENABLE_EMASK_FBD_UNCORRECTABLE)
+
+#define		ERR0_FBD		0xAC
+#define		ERR1_FBD		0xB0
+#define		ERR2_FBD		0xB4
+#define		MCERR_FBD		0xB8
+#define		NRECMEMA		0xBE
+#define			NREC_BANK(x)		(((x)>>12) & 0x7)
+#define			NREC_RDWR(x)		(((x)>>11) & 1)
+#define			NREC_RANK(x)		(((x)>>8) & 0x7)
+#define		NRECMEMB		0xC0
+#define			NREC_CAS(x)		(((x)>>16) & 0xFFFFFF)
+#define			NREC_RAS(x)		((x) & 0x7FFF)
+#define		NRECFGLOG		0xC4
+#define		NREEECFBDA		0xC8
+#define		NREEECFBDB		0xCC
+#define		NREEECFBDC		0xD0
+#define		NREEECFBDD		0xD4
+#define		NREEECFBDE		0xD8
+#define		REDMEMA			0xDC
+#define		RECMEMA			0xE2
+#define			REC_BANK(x)		(((x)>>12) & 0x7)
+#define			REC_RDWR(x)		(((x)>>11) & 1)
+#define			REC_RANK(x)		(((x)>>8) & 0x7)
+#define		RECMEMB			0xE4
+#define			REC_CAS(x)		(((x)>>16) & 0xFFFFFF)
+#define			REC_RAS(x)		((x) & 0x7FFF)
+#define		RECFGLOG		0xE8
+#define		RECFBDA			0xEC
+#define		RECFBDB			0xF0
+#define		RECFBDC			0xF4
+#define		RECFBDD			0xF8
+#define		RECFBDE			0xFC
+
+/* OFFSETS for Function 2 */
+
+/*
+ * Device 21,
+ * Function 0: Memory Map Branch 0
+ *
+ * Device 22,
+ * Function 0: Memory Map Branch 1
+ */
+#define PCI_DEVICE_ID_I5000_BRANCH_0	0x25F5
+#define PCI_DEVICE_ID_I5000_BRANCH_1	0x25F6
+
+#define AMB_PRESENT_0	0x64
+#define AMB_PRESENT_1	0x66
+#define MTR0		0x80
+#define MTR1		0x84
+#define MTR2		0x88
+#define MTR3		0x8C
+
+#define NUM_MTRS		4
+#define CHANNELS_PER_BRANCH	(2)
+
+/* Defines to extract the vaious fields from the
+ *	MTRx - Memory Technology Registers
+ */
+#define MTR_DIMMS_PRESENT(mtr)		((mtr) & (0x1 << 8))
+#define MTR_DRAM_WIDTH(mtr)		((((mtr) >> 6) & 0x1) ? 8 : 4)
+#define MTR_DRAM_BANKS(mtr)		((((mtr) >> 5) & 0x1) ? 8 : 4)
+#define MTR_DRAM_BANKS_ADDR_BITS(mtr)	((MTR_DRAM_BANKS(mtr) == 8) ? 3 : 2)
+#define MTR_DIMM_RANK(mtr)		(((mtr) >> 4) & 0x1)
+#define MTR_DIMM_RANK_ADDR_BITS(mtr)	(MTR_DIMM_RANK(mtr) ? 2 : 1)
+#define MTR_DIMM_ROWS(mtr)		(((mtr) >> 2) & 0x3)
+#define MTR_DIMM_ROWS_ADDR_BITS(mtr)	(MTR_DIMM_ROWS(mtr) + 13)
+#define MTR_DIMM_COLS(mtr)		((mtr) & 0x3)
+#define MTR_DIMM_COLS_ADDR_BITS(mtr)	(MTR_DIMM_COLS(mtr) + 10)
+
+#ifdef CONFIG_EDAC_DEBUG
+static char *numrow_toString[] = {
+	"8,192 - 13 rows",
+	"16,384 - 14 rows",
+	"32,768 - 15 rows",
+	"reserved"
+};
+
+static char *numcol_toString[] = {
+	"1,024 - 10 columns",
+	"2,048 - 11 columns",
+	"4,096 - 12 columns",
+	"reserved"
+};
+#endif
+
+/* Enumeration of supported devices */
+enum i5000_chips {
+	I5000P = 0,
+	I5000V = 1,		/* future */
+	I5000X = 2		/* future */
+};
+
+/* Device name and register DID (Device ID) */
+struct i5000_dev_info {
+	const char *ctl_name;	/* name for this device */
+	u16 fsb_mapping_errors;	/* DID for the branchmap,control */
+};
+
+/* Table of devices attributes supported by this driver */
+static const struct i5000_dev_info i5000_devs[] = {
+	[I5000P] = {
+		.ctl_name = "I5000",
+		.fsb_mapping_errors = PCI_DEVICE_ID_INTEL_I5000_DEV16,
+	},
+};
+
+struct i5000_dimm_info {
+	int megabytes;		/* size, 0 means not present  */
+	int dual_rank;
+};
+
+#define	MAX_CHANNELS	6	/* max possible channels */
+#define MAX_CSROWS	(8*2)	/* max possible csrows per channel */
+
+/* driver private data structure */
+struct i5000_pvt {
+	struct pci_dev *system_address;	/* 16.0 */
+	struct pci_dev *branchmap_werrors;	/* 16.1 */
+	struct pci_dev *fsb_error_regs;	/* 16.2 */
+	struct pci_dev *branch_0;	/* 21.0 */
+	struct pci_dev *branch_1;	/* 22.0 */
+
+	u16 tolm;		/* top of low memory */
+	u64 ambase;		/* AMB BAR */
+
+	u16 mir0, mir1, mir2;
+
+	u16 b0_mtr[NUM_MTRS];	/* Memory Technlogy Reg */
+	u16 b0_ambpresent0;	/* Branch 0, Channel 0 */
+	u16 b0_ambpresent1;	/* Brnach 0, Channel 1 */
+
+	u16 b1_mtr[NUM_MTRS];	/* Memory Technlogy Reg */
+	u16 b1_ambpresent0;	/* Branch 1, Channel 8 */
+	u16 b1_ambpresent1;	/* Branch 1, Channel 1 */
+
+	/* DIMM infomation matrix, allocating architecture maximums */
+	struct i5000_dimm_info dimm_info[MAX_CSROWS][MAX_CHANNELS];
+
+	/* Actual values for this controller */
+	int maxch;		/* Max channels */
+	int maxdimmperch;	/* Max DIMMs per channel */
+};
+
+/* I5000 MCH error information retrieved from Hardware */
+struct i5000_error_info {
+
+	/* These registers are always read from the MC */
+	u32 ferr_fat_fbd;	/* First Errors Fatal */
+	u32 nerr_fat_fbd;	/* Next Errors Fatal */
+	u32 ferr_nf_fbd;	/* First Errors Non-Fatal */
+	u32 nerr_nf_fbd;	/* Next Errors Non-Fatal */
+
+	/* These registers are input ONLY if there was a Recoverable  Error */
+	u32 redmemb;		/* Recoverable Mem Data Error log B */
+	u16 recmema;		/* Recoverable Mem Error log A */
+	u32 recmemb;		/* Recoverable Mem Error log B */
+
+	/* These registers are input ONLY if there was a
+	 * Non-Recoverable Error */
+	u16 nrecmema;		/* Non-Recoverable Mem log A */
+	u16 nrecmemb;		/* Non-Recoverable Mem log B */
+
+};
+
+static struct edac_pci_ctl_info *i5000_pci;
+
+/*
+ *	i5000_get_error_info	Retrieve the hardware error information from
+ *				the hardware and cache it in the 'info'
+ *				structure
+ */
+static void i5000_get_error_info(struct mem_ctl_info *mci,
+				 struct i5000_error_info *info)
+{
+	struct i5000_pvt *pvt;
+	u32 value;
+
+	pvt = mci->pvt_info;
+
+	/* read in the 1st FATAL error register */
+	pci_read_config_dword(pvt->branchmap_werrors, FERR_FAT_FBD, &value);
+
+	/* Mask only the bits that the doc says are valid
+	 */
+	value &= (FERR_FAT_FBDCHAN | FERR_FAT_MASK);
+
+	/* If there is an error, then read in the */
+	/* NEXT FATAL error register and the Memory Error Log Register A */
+	if (value & FERR_FAT_MASK) {
+		info->ferr_fat_fbd = value;
+
+		/* harvest the various error data we need */
+		pci_read_config_dword(pvt->branchmap_werrors,
+				NERR_FAT_FBD, &info->nerr_fat_fbd);
+		pci_read_config_word(pvt->branchmap_werrors,
+				NRECMEMA, &info->nrecmema);
+		pci_read_config_word(pvt->branchmap_werrors,
+				NRECMEMB, &info->nrecmemb);
+
+		/* Clear the error bits, by writing them back */
+		pci_write_config_dword(pvt->branchmap_werrors,
+				FERR_FAT_FBD, value);
+	} else {
+		info->ferr_fat_fbd = 0;
+		info->nerr_fat_fbd = 0;
+		info->nrecmema = 0;
+		info->nrecmemb = 0;
+	}
+
+	/* read in the 1st NON-FATAL error register */
+	pci_read_config_dword(pvt->branchmap_werrors, FERR_NF_FBD, &value);
+
+	/* If there is an error, then read in the 1st NON-FATAL error
+	 * register as well */
+	if (value & FERR_NF_MASK) {
+		info->ferr_nf_fbd = value;
+
+		/* harvest the various error data we need */
+		pci_read_config_dword(pvt->branchmap_werrors,
+				NERR_NF_FBD, &info->nerr_nf_fbd);
+		pci_read_config_word(pvt->branchmap_werrors,
+				RECMEMA, &info->recmema);
+		pci_read_config_dword(pvt->branchmap_werrors,
+				RECMEMB, &info->recmemb);
+		pci_read_config_dword(pvt->branchmap_werrors,
+				REDMEMB, &info->redmemb);
+
+		/* Clear the error bits, by writing them back */
+		pci_write_config_dword(pvt->branchmap_werrors,
+				FERR_NF_FBD, value);
+	} else {
+		info->ferr_nf_fbd = 0;
+		info->nerr_nf_fbd = 0;
+		info->recmema = 0;
+		info->recmemb = 0;
+		info->redmemb = 0;
+	}
+}
+
+/*
+ * i5000_process_fatal_error_info(struct mem_ctl_info *mci,
+ * 					struct i5000_error_info *info,
+ * 					int handle_errors);
+ *
+ *	handle the Intel FATAL errors, if any
+ */
+static void i5000_process_fatal_error_info(struct mem_ctl_info *mci,
+					struct i5000_error_info *info,
+					int handle_errors)
+{
+	char msg[EDAC_MC_LABEL_LEN + 1 + 90];
+	u32 allErrors;
+	int branch;
+	int channel;
+	int bank;
+	int rank;
+	int rdwr;
+	int ras, cas;
+
+	/* mask off the Error bits that are possible */
+	allErrors = (info->ferr_fat_fbd & FERR_FAT_MASK);
+	if (!allErrors)
+		return;		/* if no error, return now */
+
+	/* ONLY ONE of the possible error bits will be set, as per the docs */
+	i5000_mc_printk(mci, KERN_ERR,
+			"FATAL ERRORS Found!!! 1st FATAL Err Reg= 0x%x\n",
+			allErrors);
+
+	branch = EXTRACT_FBDCHAN_INDX(info->ferr_fat_fbd);
+	channel = branch;
+
+	/* Use the NON-Recoverable macros to extract data */
+	bank = NREC_BANK(info->nrecmema);
+	rank = NREC_RANK(info->nrecmema);
+	rdwr = NREC_RDWR(info->nrecmema);
+	ras = NREC_RAS(info->nrecmemb);
+	cas = NREC_CAS(info->nrecmemb);
+
+	debugf0("\t\tCSROW= %d  Channels= %d,%d  (Branch= %d "
+		"DRAM Bank= %d rdwr= %s ras= %d cas= %d)\n",
+		rank, channel, channel + 1, branch >> 1, bank,
+		rdwr ? "Write" : "Read", ras, cas);
+
+	/* Only 1 bit will be on */
+	if (allErrors & FERR_FAT_M1ERR) {
+		i5000_mc_printk(mci, KERN_ERR,
+				"Alert on non-redundant retry or fast "
+				"reset timeout\n");
+
+	} else if (allErrors & FERR_FAT_M2ERR) {
+		i5000_mc_printk(mci, KERN_ERR,
+				"Northbound CRC error on non-redundant "
+				"retry\n");
+
+	} else if (allErrors & FERR_FAT_M3ERR) {
+		i5000_mc_printk(mci, KERN_ERR,
+				">Tmid Thermal event with intelligent "
+				"throttling disabled\n");
+	}
+
+	/* Form out message */
+	snprintf(msg, sizeof(msg),
+		 "(Branch=%d DRAM-Bank=%d RDWR=%s RAS=%d CAS=%d "
+		 "FATAL Err=0x%x)",
+		 branch >> 1, bank, rdwr ? "Write" : "Read", ras, cas,
+		 allErrors);
+
+	/* Call the helper to output message */
+	edac_mc_handle_fbd_ue(mci, rank, channel, channel + 1, msg);
+}
+
+/*
+ * i5000_process_fatal_error_info(struct mem_ctl_info *mci,
+ * 				struct i5000_error_info *info,
+ * 				int handle_errors);
+ *
+ *	handle the Intel NON-FATAL errors, if any
+ */
+static void i5000_process_nonfatal_error_info(struct mem_ctl_info *mci,
+					struct i5000_error_info *info,
+					int handle_errors)
+{
+	char msg[EDAC_MC_LABEL_LEN + 1 + 90];
+	u32 allErrors;
+	u32 ue_errors;
+	u32 ce_errors;
+	u32 misc_errors;
+	int branch;
+	int channel;
+	int bank;
+	int rank;
+	int rdwr;
+	int ras, cas;
+
+	/* mask off the Error bits that are possible */
+	allErrors = (info->ferr_nf_fbd & FERR_NF_MASK);
+	if (!allErrors)
+		return;		/* if no error, return now */
+
+	/* ONLY ONE of the possible error bits will be set, as per the docs */
+	i5000_mc_printk(mci, KERN_WARNING,
+			"NON-FATAL ERRORS Found!!! 1st NON-FATAL Err "
+			"Reg= 0x%x\n", allErrors);
+
+	ue_errors = allErrors & FERR_NF_UNCORRECTABLE;
+	if (ue_errors) {
+		debugf0("\tUncorrected bits= 0x%x\n", ue_errors);
+
+		branch = EXTRACT_FBDCHAN_INDX(info->ferr_nf_fbd);
+		channel = branch;
+		bank = NREC_BANK(info->nrecmema);
+		rank = NREC_RANK(info->nrecmema);
+		rdwr = NREC_RDWR(info->nrecmema);
+		ras = NREC_RAS(info->nrecmemb);
+		cas = NREC_CAS(info->nrecmemb);
+
+		debugf0
+			("\t\tCSROW= %d  Channels= %d,%d  (Branch= %d "
+			"DRAM Bank= %d rdwr= %s ras= %d cas= %d)\n",
+			rank, channel, channel + 1, branch >> 1, bank,
+			rdwr ? "Write" : "Read", ras, cas);
+
+		/* Form out message */
+		snprintf(msg, sizeof(msg),
+			 "(Branch=%d DRAM-Bank=%d RDWR=%s RAS=%d "
+			 "CAS=%d, UE Err=0x%x)",
+			 branch >> 1, bank, rdwr ? "Write" : "Read", ras, cas,
+			 ue_errors);
+
+		/* Call the helper to output message */
+		edac_mc_handle_fbd_ue(mci, rank, channel, channel + 1, msg);
+	}
+
+	/* Check correctable errors */
+	ce_errors = allErrors & FERR_NF_CORRECTABLE;
+	if (ce_errors) {
+		debugf0("\tCorrected bits= 0x%x\n", ce_errors);
+
+		branch = EXTRACT_FBDCHAN_INDX(info->ferr_nf_fbd);
+
+		channel = 0;
+		if (REC_ECC_LOCATOR_ODD(info->redmemb))
+			channel = 1;
+
+		/* Convert channel to be based from zero, instead of
+		 * from branch base of 0 */
+		channel += branch;
+
+		bank = REC_BANK(info->recmema);
+		rank = REC_RANK(info->recmema);
+		rdwr = REC_RDWR(info->recmema);
+		ras = REC_RAS(info->recmemb);
+		cas = REC_CAS(info->recmemb);
+
+		debugf0("\t\tCSROW= %d Channel= %d  (Branch %d "
+			"DRAM Bank= %d rdwr= %s ras= %d cas= %d)\n",
+			rank, channel, branch >> 1, bank,
+			rdwr ? "Write" : "Read", ras, cas);
+
+		/* Form out message */
+		snprintf(msg, sizeof(msg),
+			 "(Branch=%d DRAM-Bank=%d RDWR=%s RAS=%d "
+			 "CAS=%d, CE Err=0x%x)", branch >> 1, bank,
+			 rdwr ? "Write" : "Read", ras, cas, ce_errors);
+
+		/* Call the helper to output message */
+		edac_mc_handle_fbd_ce(mci, rank, channel, msg);
+	}
+
+	/* See if any of the thermal errors have fired */
+	misc_errors = allErrors & FERR_NF_THERMAL;
+	if (misc_errors) {
+		i5000_printk(KERN_WARNING, "\tTHERMAL Error, bits= 0x%x\n",
+			misc_errors);
+	}
+
+	/* See if any of the thermal errors have fired */
+	misc_errors = allErrors & FERR_NF_NON_RETRY;
+	if (misc_errors) {
+		i5000_printk(KERN_WARNING, "\tNON-Retry  Errors, bits= 0x%x\n",
+			misc_errors);
+	}
+
+	/* See if any of the thermal errors have fired */
+	misc_errors = allErrors & FERR_NF_NORTH_CRC;
+	if (misc_errors) {
+		i5000_printk(KERN_WARNING,
+			"\tNORTHBOUND CRC  Error, bits= 0x%x\n",
+			misc_errors);
+	}
+
+	/* See if any of the thermal errors have fired */
+	misc_errors = allErrors & FERR_NF_SPD_PROTOCOL;
+	if (misc_errors) {
+		i5000_printk(KERN_WARNING,
+			"\tSPD Protocol  Error, bits= 0x%x\n",
+			misc_errors);
+	}
+
+	/* See if any of the thermal errors have fired */
+	misc_errors = allErrors & FERR_NF_DIMM_SPARE;
+	if (misc_errors) {
+		i5000_printk(KERN_WARNING, "\tDIMM-Spare  Error, bits= 0x%x\n",
+			misc_errors);
+	}
+}
+
+/*
+ *	i5000_process_error_info	Process the error info that is
+ *	in the 'info' structure, previously retrieved from hardware
+ */
+static void i5000_process_error_info(struct mem_ctl_info *mci,
+				struct i5000_error_info *info,
+				int handle_errors)
+{
+	/* First handle any fatal errors that occurred */
+	i5000_process_fatal_error_info(mci, info, handle_errors);
+
+	/* now handle any non-fatal errors that occurred */
+	i5000_process_nonfatal_error_info(mci, info, handle_errors);
+}
+
+/*
+ *	i5000_clear_error	Retrieve any error from the hardware
+ *				but do NOT process that error.
+ *				Used for 'clearing' out of previous errors
+ *				Called by the Core module.
+ */
+static void i5000_clear_error(struct mem_ctl_info *mci)
+{
+	struct i5000_error_info info;
+
+	i5000_get_error_info(mci, &info);
+}
+
+/*
+ *	i5000_check_error	Retrieve and process errors reported by the
+ *				hardware. Called by the Core module.
+ */
+static void i5000_check_error(struct mem_ctl_info *mci)
+{
+	struct i5000_error_info info;
+	debugf4("MC%d: " __FILE__ ": %s()\n", mci->mc_idx, __func__);
+	i5000_get_error_info(mci, &info);
+	i5000_process_error_info(mci, &info, 1);
+}
+
+/*
+ *	i5000_get_devices	Find and perform 'get' operation on the MCH's
+ *			device/functions we want to reference for this driver
+ *
+ *			Need to 'get' device 16 func 1 and func 2
+ */
+static int i5000_get_devices(struct mem_ctl_info *mci, int dev_idx)
+{
+	//const struct i5000_dev_info *i5000_dev = &i5000_devs[dev_idx];
+	struct i5000_pvt *pvt;
+	struct pci_dev *pdev;
+
+	pvt = mci->pvt_info;
+
+	/* Attempt to 'get' the MCH register we want */
+	pdev = NULL;
+	while (1) {
+		pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
+				PCI_DEVICE_ID_INTEL_I5000_DEV16, pdev);
+
+		/* End of list, leave */
+		if (pdev == NULL) {
+			i5000_printk(KERN_ERR,
+				"'system address,Process Bus' "
+				"device not found:"
+				"vendor 0x%x device 0x%x FUNC 1 "
+				"(broken BIOS?)\n",
+				PCI_VENDOR_ID_INTEL,
+				PCI_DEVICE_ID_INTEL_I5000_DEV16);
+
+			return 1;
+		}
+
+		/* Scan for device 16 func 1 */
+		if (PCI_FUNC(pdev->devfn) == 1)
+			break;
+	}
+
+	pvt->branchmap_werrors = pdev;
+
+	/* Attempt to 'get' the MCH register we want */
+	pdev = NULL;
+	while (1) {
+		pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
+				PCI_DEVICE_ID_INTEL_I5000_DEV16, pdev);
+
+		if (pdev == NULL) {
+			i5000_printk(KERN_ERR,
+				"MC: 'branchmap,control,errors' "
+				"device not found:"
+				"vendor 0x%x device 0x%x Func 2 "
+				"(broken BIOS?)\n",
+				PCI_VENDOR_ID_INTEL,
+				PCI_DEVICE_ID_INTEL_I5000_DEV16);
+
+			pci_dev_put(pvt->branchmap_werrors);
+			return 1;
+		}
+
+		/* Scan for device 16 func 1 */
+		if (PCI_FUNC(pdev->devfn) == 2)
+			break;
+	}
+
+	pvt->fsb_error_regs = pdev;
+
+	debugf1("System Address, processor bus- PCI Bus ID: %s  %x:%x\n",
+		pci_name(pvt->system_address),
+		pvt->system_address->vendor, pvt->system_address->device);
+	debugf1("Branchmap, control and errors - PCI Bus ID: %s  %x:%x\n",
+		pci_name(pvt->branchmap_werrors),
+		pvt->branchmap_werrors->vendor, pvt->branchmap_werrors->device);
+	debugf1("FSB Error Regs - PCI Bus ID: %s  %x:%x\n",
+		pci_name(pvt->fsb_error_regs),
+		pvt->fsb_error_regs->vendor, pvt->fsb_error_regs->device);
+
+	pdev = NULL;
+	pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
+			PCI_DEVICE_ID_I5000_BRANCH_0, pdev);
+
+	if (pdev == NULL) {
+		i5000_printk(KERN_ERR,
+			"MC: 'BRANCH 0' device not found:"
+			"vendor 0x%x device 0x%x Func 0 (broken BIOS?)\n",
+			PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_I5000_BRANCH_0);
+
+		pci_dev_put(pvt->branchmap_werrors);
+		pci_dev_put(pvt->fsb_error_regs);
+		return 1;
+	}
+
+	pvt->branch_0 = pdev;
+
+	/* If this device claims to have more than 2 channels then
+	 * fetch Branch 1's information
+	 */
+	if (pvt->maxch >= CHANNELS_PER_BRANCH) {
+		pdev = NULL;
+		pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
+				PCI_DEVICE_ID_I5000_BRANCH_1, pdev);
+
+		if (pdev == NULL) {
+			i5000_printk(KERN_ERR,
+				"MC: 'BRANCH 1' device not found:"
+				"vendor 0x%x device 0x%x Func 0 "
+				"(broken BIOS?)\n",
+				PCI_VENDOR_ID_INTEL,
+				PCI_DEVICE_ID_I5000_BRANCH_1);
+
+			pci_dev_put(pvt->branchmap_werrors);
+			pci_dev_put(pvt->fsb_error_regs);
+			pci_dev_put(pvt->branch_0);
+			return 1;
+		}
+
+		pvt->branch_1 = pdev;
+	}
+
+	return 0;
+}
+
+/*
+ *	i5000_put_devices	'put' all the devices that we have
+ *				reserved via 'get'
+ */
+static void i5000_put_devices(struct mem_ctl_info *mci)
+{
+	struct i5000_pvt *pvt;
+
+	pvt = mci->pvt_info;
+
+	pci_dev_put(pvt->branchmap_werrors);	/* FUNC 1 */
+	pci_dev_put(pvt->fsb_error_regs);	/* FUNC 2 */
+	pci_dev_put(pvt->branch_0);	/* DEV 21 */
+
+	/* Only if more than 2 channels do we release the second branch */
+	if (pvt->maxch >= CHANNELS_PER_BRANCH)
+		pci_dev_put(pvt->branch_1);	/* DEV 22 */
+}
+
+/*
+ *	determine_amb_resent
+ *
+ *		the information is contained in NUM_MTRS different registers
+ *		determineing which of the NUM_MTRS requires knowing
+ *		which channel is in question
+ *
+ *	2 branches, each with 2 channels
+ *		b0_ambpresent0 for channel '0'
+ *		b0_ambpresent1 for channel '1'
+ *		b1_ambpresent0 for channel '2'
+ *		b1_ambpresent1 for channel '3'
+ */
+static int determine_amb_present_reg(struct i5000_pvt *pvt, int channel)
+{
+	int amb_present;
+
+	if (channel < CHANNELS_PER_BRANCH) {
+		if (channel & 0x1)
+			amb_present = pvt->b0_ambpresent1;
+		else
+			amb_present = pvt->b0_ambpresent0;
+	} else {
+		if (channel & 0x1)
+			amb_present = pvt->b1_ambpresent1;
+		else
+			amb_present = pvt->b1_ambpresent0;
+	}
+
+	return amb_present;
+}
+
+/*
+ * determine_mtr(pvt, csrow, channel)
+ *
+ *	return the proper MTR register as determine by the csrow and channel desired
+ */
+static int determine_mtr(struct i5000_pvt *pvt, int csrow, int channel)
+{
+	int mtr;
+
+	if (channel < CHANNELS_PER_BRANCH)
+		mtr = pvt->b0_mtr[csrow >> 1];
+	else
+		mtr = pvt->b1_mtr[csrow >> 1];
+
+	return mtr;
+}
+
+/*
+ */
+static void decode_mtr(int slot_row, u16 mtr)
+{
+	int ans;
+
+	ans = MTR_DIMMS_PRESENT(mtr);
+
+	debugf2("\tMTR%d=0x%x:  DIMMs are %s\n", slot_row, mtr,
+		ans ? "Present" : "NOT Present");
+	if (!ans)
+		return;
+
+	debugf2("\t\tWIDTH: x%d\n", MTR_DRAM_WIDTH(mtr));
+	debugf2("\t\tNUMBANK: %d bank(s)\n", MTR_DRAM_BANKS(mtr));
+	debugf2("\t\tNUMRANK: %s\n", MTR_DIMM_RANK(mtr) ? "double" : "single");
+	debugf2("\t\tNUMROW: %s\n", numrow_toString[MTR_DIMM_ROWS(mtr)]);
+	debugf2("\t\tNUMCOL: %s\n", numcol_toString[MTR_DIMM_COLS(mtr)]);
+}
+
+static void handle_channel(struct i5000_pvt *pvt, int csrow, int channel,
+			struct i5000_dimm_info *dinfo)
+{
+	int mtr;
+	int amb_present_reg;
+	int addrBits;
+
+	mtr = determine_mtr(pvt, csrow, channel);
+	if (MTR_DIMMS_PRESENT(mtr)) {
+		amb_present_reg = determine_amb_present_reg(pvt, channel);
+
+		/* Determine if there is  a  DIMM present in this DIMM slot */
+		if (amb_present_reg & (1 << (csrow >> 1))) {
+			dinfo->dual_rank = MTR_DIMM_RANK(mtr);
+
+			if (!((dinfo->dual_rank == 0) &&
+				((csrow & 0x1) == 0x1))) {
+				/* Start with the number of bits for a Bank
+				 * on the DRAM */
+				addrBits = MTR_DRAM_BANKS_ADDR_BITS(mtr);
+				/* Add thenumber of ROW bits */
+				addrBits += MTR_DIMM_ROWS_ADDR_BITS(mtr);
+				/* add the number of COLUMN bits */
+				addrBits += MTR_DIMM_COLS_ADDR_BITS(mtr);
+
+				addrBits += 6;	/* add 64 bits per DIMM */
+				addrBits -= 20;	/* divide by 2^^20 */
+				addrBits -= 3;	/* 8 bits per bytes */
+
+				dinfo->megabytes = 1 << addrBits;
+			}
+		}
+	}
+}
+
+/*
+ *	calculate_dimm_size
+ *
+ *	also will output a DIMM matrix map, if debug is enabled, for viewing
+ *	how the DIMMs are populated
+ */
+static void calculate_dimm_size(struct i5000_pvt *pvt)
+{
+	struct i5000_dimm_info *dinfo;
+	int csrow, max_csrows;
+	char *p, *mem_buffer;
+	int space, n;
+	int channel;
+
+	/* ================= Generate some debug output ================= */
+	space = PAGE_SIZE;
+	mem_buffer = p = kmalloc(space, GFP_KERNEL);
+	if (p == NULL) {
+		i5000_printk(KERN_ERR, "MC: %s:%s() kmalloc() failed\n",
+			__FILE__, __func__);
+		return;
+	}
+
+	n = snprintf(p, space, "\n");
+	p += n;
+	space -= n;
+
+	/* Scan all the actual CSROWS (which is # of DIMMS * 2)
+	 * and calculate the information for each DIMM
+	 * Start with the highest csrow first, to display it first
+	 * and work toward the 0th csrow
+	 */
+	max_csrows = pvt->maxdimmperch * 2;
+	for (csrow = max_csrows - 1; csrow >= 0; csrow--) {
+
+		/* on an odd csrow, first output a 'boundary' marker,
+		 * then reset the message buffer  */
+		if (csrow & 0x1) {
+			n = snprintf(p, space, "---------------------------"
+				"--------------------------------");
+			p += n;
+			space -= n;
+			debugf2("%s\n", mem_buffer);
+			p = mem_buffer;
+			space = PAGE_SIZE;
+		}
+		n = snprintf(p, space, "csrow %2d    ", csrow);
+		p += n;
+		space -= n;
+
+		for (channel = 0; channel < pvt->maxch; channel++) {
+			dinfo = &pvt->dimm_info[csrow][channel];
+			handle_channel(pvt, csrow, channel, dinfo);
+			n = snprintf(p, space, "%4d MB   | ", dinfo->megabytes);
+			p += n;
+			space -= n;
+		}
+		n = snprintf(p, space, "\n");
+		p += n;
+		space -= n;
+	}
+
+	/* Output the last bottom 'boundary' marker */
+	n = snprintf(p, space, "---------------------------"
+		"--------------------------------\n");
+	p += n;
+	space -= n;
+
+	/* now output the 'channel' labels */
+	n = snprintf(p, space, "            ");
+	p += n;
+	space -= n;
+	for (channel = 0; channel < pvt->maxch; channel++) {
+		n = snprintf(p, space, "channel %d | ", channel);
+		p += n;
+		space -= n;
+	}
+	n = snprintf(p, space, "\n");
+	p += n;
+	space -= n;
+
+	/* output the last message and free buffer */
+	debugf2("%s\n", mem_buffer);
+	kfree(mem_buffer);
+}
+
+/*
+ *	i5000_get_mc_regs	read in the necessary registers and
+ *				cache locally
+ *
+ *			Fills in the private data members
+ */
+static void i5000_get_mc_regs(struct mem_ctl_info *mci)
+{
+	struct i5000_pvt *pvt;
+	u32 actual_tolm;
+	u16 limit;
+	int slot_row;
+	int maxch;
+	int maxdimmperch;
+	int way0, way1;
+
+	pvt = mci->pvt_info;
+
+	pci_read_config_dword(pvt->system_address, AMBASE,
+			(u32 *) & pvt->ambase);
+	pci_read_config_dword(pvt->system_address, AMBASE + sizeof(u32),
+			((u32 *) & pvt->ambase) + sizeof(u32));
+
+	maxdimmperch = pvt->maxdimmperch;
+	maxch = pvt->maxch;
+
+	debugf2("AMBASE= 0x%lx  MAXCH= %d  MAX-DIMM-Per-CH= %d\n",
+		(long unsigned int)pvt->ambase, pvt->maxch, pvt->maxdimmperch);
+
+	/* Get the Branch Map regs */
+	pci_read_config_word(pvt->branchmap_werrors, TOLM, &pvt->tolm);
+	pvt->tolm >>= 12;
+	debugf2("\nTOLM (number of 256M regions) =%u (0x%x)\n", pvt->tolm,
+		pvt->tolm);
+
+	actual_tolm = pvt->tolm << 28;
+	debugf2("Actual TOLM byte addr=%u (0x%x)\n", actual_tolm, actual_tolm);
+
+	pci_read_config_word(pvt->branchmap_werrors, MIR0, &pvt->mir0);
+	pci_read_config_word(pvt->branchmap_werrors, MIR1, &pvt->mir1);
+	pci_read_config_word(pvt->branchmap_werrors, MIR2, &pvt->mir2);
+
+	/* Get the MIR[0-2] regs */
+	limit = (pvt->mir0 >> 4) & 0x0FFF;
+	way0 = pvt->mir0 & 0x1;
+	way1 = pvt->mir0 & 0x2;
+	debugf2("MIR0: limit= 0x%x  WAY1= %u  WAY0= %x\n", limit, way1, way0);
+	limit = (pvt->mir1 >> 4) & 0x0FFF;
+	way0 = pvt->mir1 & 0x1;
+	way1 = pvt->mir1 & 0x2;
+	debugf2("MIR1: limit= 0x%x  WAY1= %u  WAY0= %x\n", limit, way1, way0);
+	limit = (pvt->mir2 >> 4) & 0x0FFF;
+	way0 = pvt->mir2 & 0x1;
+	way1 = pvt->mir2 & 0x2;
+	debugf2("MIR2: limit= 0x%x  WAY1= %u  WAY0= %x\n", limit, way1, way0);
+
+	/* Get the MTR[0-3] regs */
+	for (slot_row = 0; slot_row < NUM_MTRS; slot_row++) {
+		int where = MTR0 + (slot_row * sizeof(u32));
+
+		pci_read_config_word(pvt->branch_0, where,
+				&pvt->b0_mtr[slot_row]);
+
+		debugf2("MTR%d where=0x%x B0 value=0x%x\n", slot_row, where,
+			pvt->b0_mtr[slot_row]);
+
+		if (pvt->maxch >= CHANNELS_PER_BRANCH) {
+			pci_read_config_word(pvt->branch_1, where,
+					&pvt->b1_mtr[slot_row]);
+			debugf2("MTR%d where=0x%x B1 value=0x%x\n", slot_row,
+				where, pvt->b0_mtr[slot_row]);
+		} else {
+			pvt->b1_mtr[slot_row] = 0;
+		}
+	}
+
+	/* Read and dump branch 0's MTRs */
+	debugf2("\nMemory Technology Registers:\n");
+	debugf2("   Branch 0:\n");
+	for (slot_row = 0; slot_row < NUM_MTRS; slot_row++) {
+		decode_mtr(slot_row, pvt->b0_mtr[slot_row]);
+	}
+	pci_read_config_word(pvt->branch_0, AMB_PRESENT_0,
+			&pvt->b0_ambpresent0);
+	debugf2("\t\tAMB-Branch 0-present0 0x%x:\n", pvt->b0_ambpresent0);
+	pci_read_config_word(pvt->branch_0, AMB_PRESENT_1,
+			&pvt->b0_ambpresent1);
+	debugf2("\t\tAMB-Branch 0-present1 0x%x:\n", pvt->b0_ambpresent1);
+
+	/* Only if we have 2 branchs (4 channels) */
+	if (pvt->maxch < CHANNELS_PER_BRANCH) {
+		pvt->b1_ambpresent0 = 0;
+		pvt->b1_ambpresent1 = 0;
+	} else {
+		/* Read and dump  branch 1's MTRs */
+		debugf2("   Branch 1:\n");
+		for (slot_row = 0; slot_row < NUM_MTRS; slot_row++) {
+			decode_mtr(slot_row, pvt->b1_mtr[slot_row]);
+		}
+		pci_read_config_word(pvt->branch_1, AMB_PRESENT_0,
+				&pvt->b1_ambpresent0);
+		debugf2("\t\tAMB-Branch 1-present0 0x%x:\n",
+			pvt->b1_ambpresent0);
+		pci_read_config_word(pvt->branch_1, AMB_PRESENT_1,
+				&pvt->b1_ambpresent1);
+		debugf2("\t\tAMB-Branch 1-present1 0x%x:\n",
+			pvt->b1_ambpresent1);
+	}
+
+	/* Go and determine the size of each DIMM and place in an
+	 * orderly matrix */
+	calculate_dimm_size(pvt);
+}
+
+/*
+ *	i5000_init_csrows	Initialize the 'csrows' table within
+ *				the mci control	structure with the
+ *				addressing of memory.
+ *
+ *	return:
+ *		0	success
+ *		1	no actual memory found on this MC
+ */
+static int i5000_init_csrows(struct mem_ctl_info *mci)
+{
+	struct i5000_pvt *pvt;
+	struct csrow_info *p_csrow;
+	int empty, channel_count;
+	int max_csrows;
+	int mtr;
+	int csrow_megs;
+	int channel;
+	int csrow;
+
+	pvt = mci->pvt_info;
+
+	channel_count = pvt->maxch;
+	max_csrows = pvt->maxdimmperch * 2;
+
+	empty = 1;		/* Assume NO memory */
+
+	for (csrow = 0; csrow < max_csrows; csrow++) {
+		p_csrow = &mci->csrows[csrow];
+
+		p_csrow->csrow_idx = csrow;
+
+		/* use branch 0 for the basis */
+		mtr = pvt->b0_mtr[csrow >> 1];
+
+		/* if no DIMMS on this row, continue */
+		if (!MTR_DIMMS_PRESENT(mtr))
+			continue;
+
+		/* FAKE OUT VALUES, FIXME */
+		p_csrow->first_page = 0 + csrow * 20;
+		p_csrow->last_page = 9 + csrow * 20;
+		p_csrow->page_mask = 0xFFF;
+
+		p_csrow->grain = 8;
+
+		csrow_megs = 0;
+		for (channel = 0; channel < pvt->maxch; channel++) {
+			csrow_megs += pvt->dimm_info[csrow][channel].megabytes;
+		}
+
+		p_csrow->nr_pages = csrow_megs << 8;
+
+		/* Assume DDR2 for now */
+		p_csrow->mtype = MEM_FB_DDR2;
+
+		/* ask what device type on this row */
+		if (MTR_DRAM_WIDTH(mtr))
+			p_csrow->dtype = DEV_X8;
+		else
+			p_csrow->dtype = DEV_X4;
+
+		p_csrow->edac_mode = EDAC_S8ECD8ED;
+
+		empty = 0;
+	}
+
+	return empty;
+}
+
+/*
+ *	i5000_enable_error_reporting
+ *			Turn on the memory reporting features of the hardware
+ */
+static void i5000_enable_error_reporting(struct mem_ctl_info *mci)
+{
+	struct i5000_pvt *pvt;
+	u32 fbd_error_mask;
+
+	pvt = mci->pvt_info;
+
+	/* Read the FBD Error Mask Register */
+	pci_read_config_dword(pvt->branchmap_werrors, EMASK_FBD,
+			&fbd_error_mask);
+
+	/* Enable with a '0' */
+	fbd_error_mask &= ~(ENABLE_EMASK_ALL);
+
+	pci_write_config_dword(pvt->branchmap_werrors, EMASK_FBD,
+			fbd_error_mask);
+}
+
+/*
+ * i5000_get_dimm_and_channel_counts(pdev, &num_csrows, &num_channels)
+ *
+ *	ask the device how many channels are present and how many CSROWS
+ *	 as well
+ */
+static void i5000_get_dimm_and_channel_counts(struct pci_dev *pdev,
+					int *num_dimms_per_channel,
+					int *num_channels)
+{
+	u8 value;
+
+	/* Need to retrieve just how many channels and dimms per channel are
+	 * supported on this memory controller
+	 */
+	pci_read_config_byte(pdev, MAXDIMMPERCH, &value);
+	*num_dimms_per_channel = (int)value *2;
+
+	pci_read_config_byte(pdev, MAXCH, &value);
+	*num_channels = (int)value;
+}
+
+/*
+ *	i5000_probe1	Probe for ONE instance of device to see if it is
+ *			present.
+ *	return:
+ *		0 for FOUND a device
+ *		< 0 for error code
+ */
+static int i5000_probe1(struct pci_dev *pdev, int dev_idx)
+{
+	struct mem_ctl_info *mci;
+	struct i5000_pvt *pvt;
+	int num_channels;
+	int num_dimms_per_channel;
+	int num_csrows;
+
+	debugf0("MC: " __FILE__ ": %s(), pdev bus %u dev=0x%x fn=0x%x\n",
+		__func__,
+		pdev->bus->number,
+		PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
+
+	/* We only are looking for func 0 of the set */
+	if (PCI_FUNC(pdev->devfn) != 0)
+		return -ENODEV;
+
+	/* make sure error reporting method is sane */
+	switch (edac_op_state) {
+	case EDAC_OPSTATE_POLL:
+	case EDAC_OPSTATE_NMI:
+		break;
+	default:
+		edac_op_state = EDAC_OPSTATE_POLL;
+		break;
+	}
+
+	/* Ask the devices for the number of CSROWS and CHANNELS so
+	 * that we can calculate the memory resources, etc
+	 *
+	 * The Chipset will report what it can handle which will be greater
+	 * or equal to what the motherboard manufacturer will implement.
+	 *
+	 * As we don't have a motherboard identification routine to determine
+	 * actual number of slots/dimms per channel, we thus utilize the
+	 * resource as specified by the chipset. Thus, we might have
+	 * have more DIMMs per channel than actually on the mobo, but this
+	 * allows the driver to support upto the chipset max, without
+	 * some fancy mobo determination.
+	 */
+	i5000_get_dimm_and_channel_counts(pdev, &num_dimms_per_channel,
+					&num_channels);
+	num_csrows = num_dimms_per_channel * 2;
+
+	debugf0("MC: %s(): Number of - Channels= %d  DIMMS= %d  CSROWS= %d\n",
+		__func__, num_channels, num_dimms_per_channel, num_csrows);
+
+	/* allocate a new MC control structure */
+	mci = edac_mc_alloc(sizeof(*pvt), num_csrows, num_channels, 0);
+
+	if (mci == NULL)
+		return -ENOMEM;
+
+	debugf0("MC: " __FILE__ ": %s(): mci = %p\n", __func__, mci);
+
+	mci->dev = &pdev->dev;	/* record ptr  to the generic device */
+
+	pvt = mci->pvt_info;
+	pvt->system_address = pdev;	/* Record this device in our private */
+	pvt->maxch = num_channels;
+	pvt->maxdimmperch = num_dimms_per_channel;
+
+	/* 'get' the pci devices we want to reserve for our use */
+	if (i5000_get_devices(mci, dev_idx))
+		goto fail0;
+
+	/* Time to get serious */
+	i5000_get_mc_regs(mci);	/* retrieve the hardware registers */
+
+	mci->mc_idx = 0;
+	mci->mtype_cap = MEM_FLAG_FB_DDR2;
+	mci->edac_ctl_cap = EDAC_FLAG_NONE;
+	mci->edac_cap = EDAC_FLAG_NONE;
+	mci->mod_name = "i5000_edac.c";
+	mci->mod_ver = I5000_REVISION;
+	mci->ctl_name = i5000_devs[dev_idx].ctl_name;
+	mci->dev_name = pci_name(pdev);
+	mci->ctl_page_to_phys = NULL;
+
+	/* Set the function pointer to an actual operation function */
+	mci->edac_check = i5000_check_error;
+
+	/* initialize the MC control structure 'csrows' table
+	 * with the mapping and control information */
+	if (i5000_init_csrows(mci)) {
+		debugf0("MC: Setting mci->edac_cap to EDAC_FLAG_NONE\n"
+			"    because i5000_init_csrows() returned nonzero "
+			"value\n");
+		mci->edac_cap = EDAC_FLAG_NONE;	/* no csrows found */
+	} else {
+		debugf1("MC: Enable error reporting now\n");
+		i5000_enable_error_reporting(mci);
+	}
+
+	/* add this new MC control structure to EDAC's list of MCs */
+	if (edac_mc_add_mc(mci)) {
+		debugf0("MC: " __FILE__
+			": %s(): failed edac_mc_add_mc()\n", __func__);
+		/* FIXME: perhaps some code should go here that disables error
+		 * reporting if we just enabled it
+		 */
+		goto fail1;
+	}
+
+	i5000_clear_error(mci);
+
+	/* allocating generic PCI control info */
+	i5000_pci = edac_pci_create_generic_ctl(&pdev->dev, EDAC_MOD_STR);
+	if (!i5000_pci) {
+		printk(KERN_WARNING
+			"%s(): Unable to create PCI control\n",
+			__func__);
+		printk(KERN_WARNING
+			"%s(): PCI error report via EDAC not setup\n",
+			__func__);
+	}
+
+	return 0;
+
+	/* Error exit unwinding stack */
+fail1:
+
+	i5000_put_devices(mci);
+
+fail0:
+	edac_mc_free(mci);
+	return -ENODEV;
+}
+
+/*
+ *	i5000_init_one	constructor for one instance of device
+ *
+ * 	returns:
+ *		negative on error
+ *		count (>= 0)
+ */
+static int __devinit i5000_init_one(struct pci_dev *pdev,
+				const struct pci_device_id *id)
+{
+	int rc;
+
+	debugf0("MC: " __FILE__ ": %s()\n", __func__);
+
+	/* wake up device */
+	rc = pci_enable_device(pdev);
+	if (rc == -EIO)
+		return rc;
+
+	/* now probe and enable the device */
+	return i5000_probe1(pdev, id->driver_data);
+}
+
+/*
+ *	i5000_remove_one	destructor for one instance of device
+ *
+ */
+static void __devexit i5000_remove_one(struct pci_dev *pdev)
+{
+	struct mem_ctl_info *mci;
+
+	debugf0(__FILE__ ": %s()\n", __func__);
+
+	if (i5000_pci)
+		edac_pci_release_generic_ctl(i5000_pci);
+
+	if ((mci = edac_mc_del_mc(&pdev->dev)) == NULL)
+		return;
+
+	/* retrieve references to resources, and free those resources */
+	i5000_put_devices(mci);
+
+	edac_mc_free(mci);
+}
+
+/*
+ *	pci_device_id	table for which devices we are looking for
+ *
+ *	The "E500P" device is the first device supported.
+ */
+static const struct pci_device_id i5000_pci_tbl[] __devinitdata = {
+	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_I5000_DEV16),
+	 .driver_data = I5000P},
+
+	{0,}			/* 0 terminated list. */
+};
+
+MODULE_DEVICE_TABLE(pci, i5000_pci_tbl);
+
+/*
+ *	i5000_driver	pci_driver structure for this module
+ *
+ */
+static struct pci_driver i5000_driver = {
+	.name = __stringify(KBUILD_BASENAME),
+	.probe = i5000_init_one,
+	.remove = __devexit_p(i5000_remove_one),
+	.id_table = i5000_pci_tbl,
+};
+
+/*
+ *	i5000_init		Module entry function
+ *			Try to initialize this module for its devices
+ */
+static int __init i5000_init(void)
+{
+	int pci_rc;
+
+	debugf2("MC: " __FILE__ ": %s()\n", __func__);
+
+	pci_rc = pci_register_driver(&i5000_driver);
+
+	return (pci_rc < 0) ? pci_rc : 0;
+}
+
+/*
+ *	i5000_exit()	Module exit function
+ *			Unregister the driver
+ */
+static void __exit i5000_exit(void)
+{
+	debugf2("MC: " __FILE__ ": %s()\n", __func__);
+	pci_unregister_driver(&i5000_driver);
+}
+
+module_init(i5000_init);
+module_exit(i5000_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR
+    ("Linux Networx (http://lnxi.com) Doug Thompson <norsk5@xmission.com>");
+MODULE_DESCRIPTION("MC Driver for Intel I5000 memory controllers - "
+		I5000_REVISION);
+module_param(edac_op_state, int, 0444);
+MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
diff --git a/drivers/edac/i82443bxgx_edac.c b/drivers/edac/i82443bxgx_edac.c
new file mode 100644
index 0000000..83bfe37
--- /dev/null
+++ b/drivers/edac/i82443bxgx_edac.c
@@ -0,0 +1,402 @@
+/*
+ * Intel 82443BX/GX (440BX/GX chipset) Memory Controller EDAC kernel
+ * module (C) 2006 Tim Small
+ *
+ * This file may be distributed under the terms of the GNU General
+ * Public License.
+ *
+ * Written by Tim Small <tim@buttersideup.com>, based on work by Linux
+ * Networx, Thayne Harbaugh, Dan Hollis <goemon at anime dot net> and
+ * others.
+ *
+ * 440GX fix by Jason Uhlenkott <juhlenko@akamai.com>.
+ *
+ * Written with reference to 82443BX Host Bridge Datasheet:
+ * http://www.intel.com/design/chipsets/440/documentation.htm
+ * references to this document given in [].
+ *
+ * This module doesn't support the 440LX, but it may be possible to
+ * make it do so (the 440LX's register definitions are different, but
+ * not completely so - I haven't studied them in enough detail to know
+ * how easy this would be).
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+
+#include <linux/pci.h>
+#include <linux/pci_ids.h>
+
+#include <linux/slab.h>
+
+#include "edac_core.h"
+
+#define I82443_REVISION	"0.1"
+
+#define EDAC_MOD_STR    "i82443bxgx_edac"
+
+/* The 82443BX supports SDRAM, or EDO (EDO for mobile only), "Memory
+ * Size: 8 MB to 512 MB (1GB with Registered DIMMs) with eight memory
+ * rows" "The 82443BX supports multiple-bit error detection and
+ * single-bit error correction when ECC mode is enabled and
+ * single/multi-bit error detection when correction is disabled.
+ * During writes to the DRAM, the 82443BX generates ECC for the data
+ * on a QWord basis. Partial QWord writes require a read-modify-write
+ * cycle when ECC is enabled."
+*/
+
+/* "Additionally, the 82443BX ensures that the data is corrected in
+ * main memory so that accumulation of errors is prevented. Another
+ * error within the same QWord would result in a double-bit error
+ * which is unrecoverable. This is known as hardware scrubbing since
+ * it requires no software intervention to correct the data in memory."
+ */
+
+/* [Also see page 100 (section 4.3), "DRAM Interface"]
+ * [Also see page 112 (section 4.6.1.4), ECC]
+ */
+
+#define I82443BXGX_NR_CSROWS 8
+#define I82443BXGX_NR_CHANS  1
+#define I82443BXGX_NR_DIMMS  4
+
+/* 82443 PCI Device 0 */
+#define I82443BXGX_NBXCFG 0x50	/* 32bit register starting at this PCI
+				 * config space offset */
+#define I82443BXGX_NBXCFG_OFFSET_NON_ECCROW 24	/* Array of bits, zero if
+						 * row is non-ECC */
+#define I82443BXGX_NBXCFG_OFFSET_DRAM_FREQ 12	/* 2 bits,00=100MHz,10=66 MHz */
+
+#define I82443BXGX_NBXCFG_OFFSET_DRAM_INTEGRITY 7	/* 2 bits:       */
+#define I82443BXGX_NBXCFG_INTEGRITY_NONE   0x0	/* 00 = Non-ECC */
+#define I82443BXGX_NBXCFG_INTEGRITY_EC     0x1	/* 01 = EC (only) */
+#define I82443BXGX_NBXCFG_INTEGRITY_ECC    0x2	/* 10 = ECC */
+#define I82443BXGX_NBXCFG_INTEGRITY_SCRUB  0x3	/* 11 = ECC + HW Scrub */
+
+#define I82443BXGX_NBXCFG_OFFSET_ECC_DIAG_ENABLE  6
+
+/* 82443 PCI Device 0 */
+#define I82443BXGX_EAP   0x80	/* 32bit register starting at this PCI
+				 * config space offset, Error Address
+				 * Pointer Register */
+#define I82443BXGX_EAP_OFFSET_EAP  12	/* High 20 bits of error address */
+#define I82443BXGX_EAP_OFFSET_MBE  BIT(1)	/* Err at EAP was multi-bit (W1TC) */
+#define I82443BXGX_EAP_OFFSET_SBE  BIT(0)	/* Err at EAP was single-bit (W1TC) */
+
+#define I82443BXGX_ERRCMD  0x90	/* 8bit register starting at this PCI
+				 * config space offset. */
+#define I82443BXGX_ERRCMD_OFFSET_SERR_ON_MBE BIT(1)	/* 1 = enable */
+#define I82443BXGX_ERRCMD_OFFSET_SERR_ON_SBE BIT(0)	/* 1 = enable */
+
+#define I82443BXGX_ERRSTS  0x91	/* 16bit register starting at this PCI
+				 * config space offset. */
+#define I82443BXGX_ERRSTS_OFFSET_MBFRE 5	/* 3 bits - first err row multibit */
+#define I82443BXGX_ERRSTS_OFFSET_MEF   BIT(4)	/* 1 = MBE occurred */
+#define I82443BXGX_ERRSTS_OFFSET_SBFRE 1	/* 3 bits - first err row singlebit */
+#define I82443BXGX_ERRSTS_OFFSET_SEF   BIT(0)	/* 1 = SBE occurred */
+
+#define I82443BXGX_DRAMC 0x57	/* 8bit register starting at this PCI
+				 * config space offset. */
+#define I82443BXGX_DRAMC_OFFSET_DT 3	/* 2 bits, DRAM Type */
+#define I82443BXGX_DRAMC_DRAM_IS_EDO 0	/* 00 = EDO */
+#define I82443BXGX_DRAMC_DRAM_IS_SDRAM 1	/* 01 = SDRAM */
+#define I82443BXGX_DRAMC_DRAM_IS_RSDRAM 2	/* 10 = Registered SDRAM */
+
+#define I82443BXGX_DRB 0x60	/* 8x 8bit registers starting at this PCI
+				 * config space offset. */
+
+/* FIXME - don't poll when ECC disabled? */
+
+struct i82443bxgx_edacmc_error_info {
+	u32 eap;
+};
+
+static struct edac_pci_ctl_info *i82443bxgx_pci;
+
+static void i82443bxgx_edacmc_get_error_info(struct mem_ctl_info *mci,
+				struct i82443bxgx_edacmc_error_info
+				*info)
+{
+	struct pci_dev *pdev;
+	pdev = to_pci_dev(mci->dev);
+	pci_read_config_dword(pdev, I82443BXGX_EAP, &info->eap);
+	if (info->eap & I82443BXGX_EAP_OFFSET_SBE)
+		/* Clear error to allow next error to be reported [p.61] */
+		pci_write_bits32(pdev, I82443BXGX_EAP,
+				 I82443BXGX_EAP_OFFSET_SBE,
+				 I82443BXGX_EAP_OFFSET_SBE);
+
+	if (info->eap & I82443BXGX_EAP_OFFSET_MBE)
+		/* Clear error to allow next error to be reported [p.61] */
+		pci_write_bits32(pdev, I82443BXGX_EAP,
+				 I82443BXGX_EAP_OFFSET_MBE,
+				 I82443BXGX_EAP_OFFSET_MBE);
+}
+
+static int i82443bxgx_edacmc_process_error_info(struct mem_ctl_info *mci,
+						struct
+						i82443bxgx_edacmc_error_info
+						*info, int handle_errors)
+{
+	int error_found = 0;
+	u32 eapaddr, page, pageoffset;
+
+	/* bits 30:12 hold the 4kb block in which the error occurred
+	 * [p.61] */
+	eapaddr = (info->eap & 0xfffff000);
+	page = eapaddr >> PAGE_SHIFT;
+	pageoffset = eapaddr - (page << PAGE_SHIFT);
+
+	if (info->eap & I82443BXGX_EAP_OFFSET_SBE) {
+		error_found = 1;
+		if (handle_errors)
+			edac_mc_handle_ce(mci, page, pageoffset,
+				/* 440BX/GX don't make syndrome information
+				 * available */
+				0, edac_mc_find_csrow_by_page(mci, page), 0,
+				mci->ctl_name);
+	}
+
+	if (info->eap & I82443BXGX_EAP_OFFSET_MBE) {
+		error_found = 1;
+		if (handle_errors)
+			edac_mc_handle_ue(mci, page, pageoffset,
+					edac_mc_find_csrow_by_page(mci, page),
+					mci->ctl_name);
+	}
+
+	return error_found;
+}
+
+static void i82443bxgx_edacmc_check(struct mem_ctl_info *mci)
+{
+	struct i82443bxgx_edacmc_error_info info;
+
+	debugf1("MC%d: " __FILE__ ": %s()\n", mci->mc_idx, __func__);
+	i82443bxgx_edacmc_get_error_info(mci, &info);
+	i82443bxgx_edacmc_process_error_info(mci, &info, 1);
+}
+
+static void i82443bxgx_init_csrows(struct mem_ctl_info *mci,
+				struct pci_dev *pdev,
+				enum edac_type edac_mode,
+				enum mem_type mtype)
+{
+	struct csrow_info *csrow;
+	int index;
+	u8 drbar, dramc;
+	u32 row_base, row_high_limit, row_high_limit_last;
+
+	pci_read_config_byte(pdev, I82443BXGX_DRAMC, &dramc);
+	row_high_limit_last = 0;
+	for (index = 0; index < mci->nr_csrows; index++) {
+		csrow = &mci->csrows[index];
+		pci_read_config_byte(pdev, I82443BXGX_DRB + index, &drbar);
+		debugf1("MC%d: " __FILE__ ": %s() Row=%d DRB = %#0x\n",
+			mci->mc_idx, __func__, index, drbar);
+		row_high_limit = ((u32) drbar << 23);
+		/* find the DRAM Chip Select Base address and mask */
+		debugf1("MC%d: " __FILE__ ": %s() Row=%d, "
+			"Boundry Address=%#0x, Last = %#0x \n",
+			mci->mc_idx, __func__, index, row_high_limit,
+			row_high_limit_last);
+
+		/* 440GX goes to 2GB, represented with a DRB of 0. */
+		if (row_high_limit_last && !row_high_limit)
+			row_high_limit = 1UL << 31;
+
+		/* This row is empty [p.49] */
+		if (row_high_limit == row_high_limit_last)
+			continue;
+		row_base = row_high_limit_last;
+		csrow->first_page = row_base >> PAGE_SHIFT;
+		csrow->last_page = (row_high_limit >> PAGE_SHIFT) - 1;
+		csrow->nr_pages = csrow->last_page - csrow->first_page + 1;
+		/* EAP reports in 4kilobyte granularity [61] */
+		csrow->grain = 1 << 12;
+		csrow->mtype = mtype;
+		/* I don't think 440BX can tell you device type? FIXME? */
+		csrow->dtype = DEV_UNKNOWN;
+		/* Mode is global to all rows on 440BX */
+		csrow->edac_mode = edac_mode;
+		row_high_limit_last = row_high_limit;
+	}
+}
+
+static int i82443bxgx_edacmc_probe1(struct pci_dev *pdev, int dev_idx)
+{
+	struct mem_ctl_info *mci;
+	u8 dramc;
+	u32 nbxcfg, ecc_mode;
+	enum mem_type mtype;
+	enum edac_type edac_mode;
+
+	debugf0("MC: " __FILE__ ": %s()\n", __func__);
+
+	/* Something is really hosed if PCI config space reads from
+	 * the MC aren't working.
+	 */
+	if (pci_read_config_dword(pdev, I82443BXGX_NBXCFG, &nbxcfg))
+		return -EIO;
+
+	mci = edac_mc_alloc(0, I82443BXGX_NR_CSROWS, I82443BXGX_NR_CHANS, 0);
+
+	if (mci == NULL)
+		return -ENOMEM;
+
+	debugf0("MC: " __FILE__ ": %s(): mci = %p\n", __func__, mci);
+	mci->dev = &pdev->dev;
+	mci->mtype_cap = MEM_FLAG_EDO | MEM_FLAG_SDR | MEM_FLAG_RDR;
+	mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_EC | EDAC_FLAG_SECDED;
+	pci_read_config_byte(pdev, I82443BXGX_DRAMC, &dramc);
+	switch ((dramc >> I82443BXGX_DRAMC_OFFSET_DT) & (BIT(0) | BIT(1))) {
+	case I82443BXGX_DRAMC_DRAM_IS_EDO:
+		mtype = MEM_EDO;
+		break;
+	case I82443BXGX_DRAMC_DRAM_IS_SDRAM:
+		mtype = MEM_SDR;
+		break;
+	case I82443BXGX_DRAMC_DRAM_IS_RSDRAM:
+		mtype = MEM_RDR;
+		break;
+	default:
+		debugf0("Unknown/reserved DRAM type value "
+			"in DRAMC register!\n");
+		mtype = -MEM_UNKNOWN;
+	}
+
+	if ((mtype == MEM_SDR) || (mtype == MEM_RDR))
+		mci->edac_cap = mci->edac_ctl_cap;
+	else
+		mci->edac_cap = EDAC_FLAG_NONE;
+
+	mci->scrub_cap = SCRUB_FLAG_HW_SRC;
+	pci_read_config_dword(pdev, I82443BXGX_NBXCFG, &nbxcfg);
+	ecc_mode = ((nbxcfg >> I82443BXGX_NBXCFG_OFFSET_DRAM_INTEGRITY) &
+		(BIT(0) | BIT(1)));
+
+	mci->scrub_mode = (ecc_mode == I82443BXGX_NBXCFG_INTEGRITY_SCRUB)
+		? SCRUB_HW_SRC : SCRUB_NONE;
+
+	switch (ecc_mode) {
+	case I82443BXGX_NBXCFG_INTEGRITY_NONE:
+		edac_mode = EDAC_NONE;
+		break;
+	case I82443BXGX_NBXCFG_INTEGRITY_EC:
+		edac_mode = EDAC_EC;
+		break;
+	case I82443BXGX_NBXCFG_INTEGRITY_ECC:
+	case I82443BXGX_NBXCFG_INTEGRITY_SCRUB:
+		edac_mode = EDAC_SECDED;
+		break;
+	default:
+		debugf0("%s(): Unknown/reserved ECC state "
+			"in NBXCFG register!\n", __func__);
+		edac_mode = EDAC_UNKNOWN;
+		break;
+	}
+
+	i82443bxgx_init_csrows(mci, pdev, edac_mode, mtype);
+
+	/* Many BIOSes don't clear error flags on boot, so do this
+	 * here, or we get "phantom" errors occuring at module-load
+	 * time. */
+	pci_write_bits32(pdev, I82443BXGX_EAP,
+			(I82443BXGX_EAP_OFFSET_SBE |
+				I82443BXGX_EAP_OFFSET_MBE),
+			(I82443BXGX_EAP_OFFSET_SBE |
+				I82443BXGX_EAP_OFFSET_MBE));
+
+	mci->mod_name = EDAC_MOD_STR;
+	mci->mod_ver = I82443_REVISION;
+	mci->ctl_name = "I82443BXGX";
+	mci->dev_name = pci_name(pdev);
+	mci->edac_check = i82443bxgx_edacmc_check;
+	mci->ctl_page_to_phys = NULL;
+
+	if (edac_mc_add_mc(mci)) {
+		debugf3("%s(): failed edac_mc_add_mc()\n", __func__);
+		goto fail;
+	}
+
+	/* allocating generic PCI control info */
+	i82443bxgx_pci = edac_pci_create_generic_ctl(&pdev->dev, EDAC_MOD_STR);
+	if (!i82443bxgx_pci) {
+		printk(KERN_WARNING
+			"%s(): Unable to create PCI control\n",
+			__func__);
+		printk(KERN_WARNING
+			"%s(): PCI error report via EDAC not setup\n",
+			__func__);
+	}
+
+	debugf3("MC: " __FILE__ ": %s(): success\n", __func__);
+	return 0;
+
+fail:
+	edac_mc_free(mci);
+	return -ENODEV;
+}
+
+EXPORT_SYMBOL_GPL(i82443bxgx_edacmc_probe1);
+
+/* returns count (>= 0), or negative on error */
+static int __devinit i82443bxgx_edacmc_init_one(struct pci_dev *pdev,
+						const struct pci_device_id *ent)
+{
+	debugf0("MC: " __FILE__ ": %s()\n", __func__);
+
+	/* don't need to call pci_device_enable() */
+	return i82443bxgx_edacmc_probe1(pdev, ent->driver_data);
+}
+
+static void __devexit i82443bxgx_edacmc_remove_one(struct pci_dev *pdev)
+{
+	struct mem_ctl_info *mci;
+
+	debugf0(__FILE__ ": %s()\n", __func__);
+
+	if (i82443bxgx_pci)
+		edac_pci_release_generic_ctl(i82443bxgx_pci);
+
+	if ((mci = edac_mc_del_mc(&pdev->dev)) == NULL)
+		return;
+
+	edac_mc_free(mci);
+}
+
+EXPORT_SYMBOL_GPL(i82443bxgx_edacmc_remove_one);
+
+static const struct pci_device_id i82443bxgx_pci_tbl[] __devinitdata = {
+	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_0)},
+	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_2)},
+	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443GX_0)},
+	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443GX_2)},
+	{0,}			/* 0 terminated list. */
+};
+
+MODULE_DEVICE_TABLE(pci, i82443bxgx_pci_tbl);
+
+static struct pci_driver i82443bxgx_edacmc_driver = {
+	.name = EDAC_MOD_STR,
+	.probe = i82443bxgx_edacmc_init_one,
+	.remove = __devexit_p(i82443bxgx_edacmc_remove_one),
+	.id_table = i82443bxgx_pci_tbl,
+};
+
+static int __init i82443bxgx_edacmc_init(void)
+{
+	return pci_register_driver(&i82443bxgx_edacmc_driver);
+}
+
+static void __exit i82443bxgx_edacmc_exit(void)
+{
+	pci_unregister_driver(&i82443bxgx_edacmc_driver);
+}
+
+module_init(i82443bxgx_edacmc_init);
+module_exit(i82443bxgx_edacmc_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Tim Small <tim@buttersideup.com> - WPAD");
+MODULE_DESCRIPTION("EDAC MC support for Intel 82443BX/GX memory controllers");
diff --git a/drivers/edac/i82860_edac.c b/drivers/edac/i82860_edac.c
index e4bb298..f5ecd2c 100644
--- a/drivers/edac/i82860_edac.c
+++ b/drivers/edac/i82860_edac.c
@@ -14,9 +14,9 @@
 #include <linux/pci.h>
 #include <linux/pci_ids.h>
 #include <linux/slab.h>
-#include "edac_mc.h"
+#include "edac_core.h"
 
-#define  I82860_REVISION " Ver: 2.0.1 " __DATE__
+#define  I82860_REVISION " Ver: 2.0.2 " __DATE__
 #define EDAC_MOD_STR	"i82860_edac"
 
 #define i82860_printk(level, fmt, arg...) \
@@ -54,16 +54,16 @@
 
 static const struct i82860_dev_info i82860_devs[] = {
 	[I82860] = {
-		.ctl_name = "i82860"
-	},
+		.ctl_name = "i82860"},
 };
 
-static struct pci_dev *mci_pdev = NULL;	/* init dev: in case that AGP code
+static struct pci_dev *mci_pdev;	/* init dev: in case that AGP code
 					 * has already registered driver
 					 */
+static struct edac_pci_ctl_info *i82860_pci;
 
 static void i82860_get_error_info(struct mem_ctl_info *mci,
-		struct i82860_error_info *info)
+				struct i82860_error_info *info)
 {
 	struct pci_dev *pdev;
 
@@ -91,13 +91,13 @@
 
 	if ((info->errsts ^ info->errsts2) & 0x0003) {
 		pci_read_config_dword(pdev, I82860_EAP, &info->eap);
-		pci_read_config_word(pdev, I82860_DERRCTL_STS,
-				&info->derrsyn);
+		pci_read_config_word(pdev, I82860_DERRCTL_STS, &info->derrsyn);
 	}
 }
 
 static int i82860_process_error_info(struct mem_ctl_info *mci,
-		struct i82860_error_info *info, int handle_errors)
+				struct i82860_error_info *info,
+				int handle_errors)
 {
 	int row;
 
@@ -136,7 +136,7 @@
 static void i82860_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev)
 {
 	unsigned long last_cumul_size;
-	u16 mchcfg_ddim;  /* DRAM Data Integrity Mode 0=none, 2=edac */
+	u16 mchcfg_ddim;	/* DRAM Data Integrity Mode 0=none, 2=edac */
 	u16 value;
 	u32 cumul_size;
 	struct csrow_info *csrow;
@@ -155,7 +155,7 @@
 		csrow = &mci->csrows[index];
 		pci_read_config_word(pdev, I82860_GBA + index * 2, &value);
 		cumul_size = (value & I82860_GBA_MASK) <<
-		    (I82860_GBA_SHIFT - PAGE_SHIFT);
+			(I82860_GBA_SHIFT - PAGE_SHIFT);
 		debugf3("%s(): (%d) cumul_size 0x%x\n", __func__, index,
 			cumul_size);
 
@@ -186,7 +186,7 @@
 	   the channel and the GRA registers map to physical devices so we are
 	   going to make 1 channel for group.
 	 */
-	mci = edac_mc_alloc(0, 16, 1);
+	mci = edac_mc_alloc(0, 16, 1, 0);
 
 	if (!mci)
 		return -ENOMEM;
@@ -200,19 +200,31 @@
 	mci->mod_name = EDAC_MOD_STR;
 	mci->mod_ver = I82860_REVISION;
 	mci->ctl_name = i82860_devs[dev_idx].ctl_name;
+	mci->dev_name = pci_name(pdev);
 	mci->edac_check = i82860_check;
 	mci->ctl_page_to_phys = NULL;
 	i82860_init_csrows(mci, pdev);
-	i82860_get_error_info(mci, &discard);  /* clear counters */
+	i82860_get_error_info(mci, &discard);	/* clear counters */
 
 	/* Here we assume that we will never see multiple instances of this
 	 * type of memory controller.  The ID is therefore hardcoded to 0.
 	 */
-	if (edac_mc_add_mc(mci,0)) {
+	if (edac_mc_add_mc(mci)) {
 		debugf3("%s(): failed edac_mc_add_mc()\n", __func__);
 		goto fail;
 	}
 
+	/* allocating generic PCI control info */
+	i82860_pci = edac_pci_create_generic_ctl(&pdev->dev, EDAC_MOD_STR);
+	if (!i82860_pci) {
+		printk(KERN_WARNING
+			"%s(): Unable to create PCI control\n",
+			__func__);
+		printk(KERN_WARNING
+			"%s(): PCI error report via EDAC not setup\n",
+			__func__);
+	}
+
 	/* get this far and it's successful */
 	debugf3("%s(): success\n", __func__);
 
@@ -225,7 +237,7 @@
 
 /* returns count (>= 0), or negative on error */
 static int __devinit i82860_init_one(struct pci_dev *pdev,
-		const struct pci_device_id *ent)
+				const struct pci_device_id *ent)
 {
 	int rc;
 
@@ -249,6 +261,9 @@
 
 	debugf0("%s()\n", __func__);
 
+	if (i82860_pci)
+		edac_pci_release_generic_ctl(i82860_pci);
+
 	if ((mci = edac_mc_del_mc(&pdev->dev)) == NULL)
 		return;
 
@@ -257,12 +272,11 @@
 
 static const struct pci_device_id i82860_pci_tbl[] __devinitdata = {
 	{
-		PCI_VEND_DEV(INTEL, 82860_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
-		I82860
-	},
+	 PCI_VEND_DEV(INTEL, 82860_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+	 I82860},
 	{
-		0,
-	}	/* 0 terminated list. */
+	 0,
+	 }			/* 0 terminated list. */
 };
 
 MODULE_DEVICE_TABLE(pci, i82860_pci_tbl);
@@ -329,5 +343,5 @@
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Red Hat Inc. (http://www.redhat.com) "
-	"Ben Woodard <woodard@redhat.com>");
+		"Ben Woodard <woodard@redhat.com>");
 MODULE_DESCRIPTION("ECC support for Intel 82860 memory hub controllers");
diff --git a/drivers/edac/i82875p_edac.c b/drivers/edac/i82875p_edac.c
index 2800b3e..031abad 100644
--- a/drivers/edac/i82875p_edac.c
+++ b/drivers/edac/i82875p_edac.c
@@ -18,9 +18,9 @@
 #include <linux/pci.h>
 #include <linux/pci_ids.h>
 #include <linux/slab.h>
-#include "edac_mc.h"
+#include "edac_core.h"
 
-#define I82875P_REVISION	" Ver: 2.0.1 " __DATE__
+#define I82875P_REVISION	" Ver: 2.0.2 " __DATE__
 #define EDAC_MOD_STR		"i82875p_edac"
 
 #define i82875p_printk(level, fmt, arg...) \
@@ -174,18 +174,19 @@
 
 static const struct i82875p_dev_info i82875p_devs[] = {
 	[I82875P] = {
-		.ctl_name = "i82875p"
-	},
+		.ctl_name = "i82875p"},
 };
 
-static struct pci_dev *mci_pdev = NULL;	/* init dev: in case that AGP code has
+static struct pci_dev *mci_pdev;	/* init dev: in case that AGP code has
 					 * already registered driver
 					 */
 
 static int i82875p_registered = 1;
 
+static struct edac_pci_ctl_info *i82875p_pci;
+
 static void i82875p_get_error_info(struct mem_ctl_info *mci,
-		struct i82875p_error_info *info)
+				struct i82875p_error_info *info)
 {
 	struct pci_dev *pdev;
 
@@ -197,38 +198,39 @@
 	 * overwritten by UE.
 	 */
 	pci_read_config_word(pdev, I82875P_ERRSTS, &info->errsts);
+
+	if (!(info->errsts & 0x0081))
+		return;
+
 	pci_read_config_dword(pdev, I82875P_EAP, &info->eap);
 	pci_read_config_byte(pdev, I82875P_DES, &info->des);
 	pci_read_config_byte(pdev, I82875P_DERRSYN, &info->derrsyn);
 	pci_read_config_word(pdev, I82875P_ERRSTS, &info->errsts2);
 
-	pci_write_bits16(pdev, I82875P_ERRSTS, 0x0081, 0x0081);
-
 	/*
 	 * If the error is the same then we can for both reads then
 	 * the first set of reads is valid.  If there is a change then
 	 * there is a CE no info and the second set of reads is valid
 	 * and should be UE info.
 	 */
-	if (!(info->errsts2 & 0x0081))
-		return;
-
 	if ((info->errsts ^ info->errsts2) & 0x0081) {
 		pci_read_config_dword(pdev, I82875P_EAP, &info->eap);
 		pci_read_config_byte(pdev, I82875P_DES, &info->des);
-		pci_read_config_byte(pdev, I82875P_DERRSYN,
-				&info->derrsyn);
+		pci_read_config_byte(pdev, I82875P_DERRSYN, &info->derrsyn);
 	}
+
+	pci_write_bits16(pdev, I82875P_ERRSTS, 0x0081, 0x0081);
 }
 
 static int i82875p_process_error_info(struct mem_ctl_info *mci,
-		struct i82875p_error_info *info, int handle_errors)
+				struct i82875p_error_info *info,
+				int handle_errors)
 {
 	int row, multi_chan;
 
 	multi_chan = mci->csrows[0].nr_channels - 1;
 
-	if (!(info->errsts2 & 0x0081))
+	if (!(info->errsts & 0x0081))
 		return 0;
 
 	if (!handle_errors)
@@ -263,10 +265,12 @@
 
 /* Return 0 on success or 1 on failure. */
 static int i82875p_setup_overfl_dev(struct pci_dev *pdev,
-		struct pci_dev **ovrfl_pdev, void __iomem **ovrfl_window)
+				struct pci_dev **ovrfl_pdev,
+				void __iomem **ovrfl_window)
 {
 	struct pci_dev *dev;
 	void __iomem *window;
+	int err;
 
 	*ovrfl_pdev = NULL;
 	*ovrfl_window = NULL;
@@ -284,14 +288,19 @@
 		if (dev == NULL)
 			return 1;
 
-        	pci_bus_add_device(dev);
+		err = pci_bus_add_device(dev);
+		if (err) {
+			i82875p_printk(KERN_ERR,
+				"%s(): pci_bus_add_device() Failed\n",
+				__func__);
+		}
 	}
 
 	*ovrfl_pdev = dev;
 
 	if (pci_enable_device(dev)) {
 		i82875p_printk(KERN_ERR, "%s(): Failed to enable overflow "
-			       "device\n", __func__);
+			"device\n", __func__);
 		return 1;
 	}
 
@@ -307,7 +316,7 @@
 
 	if (window == NULL) {
 		i82875p_printk(KERN_ERR, "%s(): Failed to ioremap bar6\n",
-			       __func__);
+			__func__);
 		goto fail1;
 	}
 
@@ -325,21 +334,20 @@
 	return 1;
 }
 
-
 /* Return 1 if dual channel mode is active.  Else return 0. */
 static inline int dual_channel_active(u32 drc)
 {
 	return (drc >> 21) & 0x1;
 }
 
-
 static void i82875p_init_csrows(struct mem_ctl_info *mci,
-		struct pci_dev *pdev, void __iomem *ovrfl_window, u32 drc)
+				struct pci_dev *pdev,
+				void __iomem * ovrfl_window, u32 drc)
 {
 	struct csrow_info *csrow;
 	unsigned long last_cumul_size;
 	u8 value;
-	u32 drc_ddim;  /* DRAM Data Integrity Mode 0=none,2=edac */
+	u32 drc_ddim;		/* DRAM Data Integrity Mode 0=none,2=edac */
 	u32 cumul_size;
 	int index;
 
@@ -392,7 +400,7 @@
 	drc = readl(ovrfl_window + I82875P_DRC);
 	nr_chans = dual_channel_active(drc) + 1;
 	mci = edac_mc_alloc(sizeof(*pvt), I82875P_NR_CSROWS(nr_chans),
-				nr_chans);
+			nr_chans, 0);
 
 	if (!mci) {
 		rc = -ENOMEM;
@@ -407,23 +415,35 @@
 	mci->mod_name = EDAC_MOD_STR;
 	mci->mod_ver = I82875P_REVISION;
 	mci->ctl_name = i82875p_devs[dev_idx].ctl_name;
+	mci->dev_name = pci_name(pdev);
 	mci->edac_check = i82875p_check;
 	mci->ctl_page_to_phys = NULL;
 	debugf3("%s(): init pvt\n", __func__);
-	pvt = (struct i82875p_pvt *) mci->pvt_info;
+	pvt = (struct i82875p_pvt *)mci->pvt_info;
 	pvt->ovrfl_pdev = ovrfl_pdev;
 	pvt->ovrfl_window = ovrfl_window;
 	i82875p_init_csrows(mci, pdev, ovrfl_window, drc);
-	i82875p_get_error_info(mci, &discard);  /* clear counters */
+	i82875p_get_error_info(mci, &discard);	/* clear counters */
 
 	/* Here we assume that we will never see multiple instances of this
 	 * type of memory controller.  The ID is therefore hardcoded to 0.
 	 */
-	if (edac_mc_add_mc(mci,0)) {
+	if (edac_mc_add_mc(mci)) {
 		debugf3("%s(): failed edac_mc_add_mc()\n", __func__);
 		goto fail1;
 	}
 
+	/* allocating generic PCI control info */
+	i82875p_pci = edac_pci_create_generic_ctl(&pdev->dev, EDAC_MOD_STR);
+	if (!i82875p_pci) {
+		printk(KERN_WARNING
+			"%s(): Unable to create PCI control\n",
+			__func__);
+		printk(KERN_WARNING
+			"%s(): PCI error report via EDAC not setup\n",
+			__func__);
+	}
+
 	/* get this far and it's successful */
 	debugf3("%s(): success\n", __func__);
 	return 0;
@@ -442,7 +462,7 @@
 
 /* returns count (>= 0), or negative on error */
 static int __devinit i82875p_init_one(struct pci_dev *pdev,
-		const struct pci_device_id *ent)
+				const struct pci_device_id *ent)
 {
 	int rc;
 
@@ -467,10 +487,13 @@
 
 	debugf0("%s()\n", __func__);
 
+	if (i82875p_pci)
+		edac_pci_release_generic_ctl(i82875p_pci);
+
 	if ((mci = edac_mc_del_mc(&pdev->dev)) == NULL)
 		return;
 
-	pvt = (struct i82875p_pvt *) mci->pvt_info;
+	pvt = (struct i82875p_pvt *)mci->pvt_info;
 
 	if (pvt->ovrfl_window)
 		iounmap(pvt->ovrfl_window);
@@ -488,12 +511,11 @@
 
 static const struct pci_device_id i82875p_pci_tbl[] __devinitdata = {
 	{
-		PCI_VEND_DEV(INTEL, 82875_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
-		I82875P
-	},
+	 PCI_VEND_DEV(INTEL, 82875_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+	 I82875P},
 	{
-		0,
-	}	/* 0 terminated list. */
+	 0,
+	 }			/* 0 terminated list. */
 };
 
 MODULE_DEVICE_TABLE(pci, i82875p_pci_tbl);
@@ -517,7 +539,7 @@
 
 	if (mci_pdev == NULL) {
 		mci_pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
-				PCI_DEVICE_ID_INTEL_82875_0, NULL);
+					PCI_DEVICE_ID_INTEL_82875_0, NULL);
 
 		if (!mci_pdev) {
 			debugf0("875p pci_get_device fail\n");
diff --git a/drivers/edac/i82975x_edac.c b/drivers/edac/i82975x_edac.c
new file mode 100644
index 0000000..0ee8884
--- /dev/null
+++ b/drivers/edac/i82975x_edac.c
@@ -0,0 +1,666 @@
+/*
+ * Intel 82975X Memory Controller kernel module
+ * (C) 2007 aCarLab (India) Pvt. Ltd. (http://acarlab.com)
+ * (C) 2007 jetzbroadband (http://jetzbroadband.com)
+ * This file may be distributed under the terms of the
+ * GNU General Public License.
+ *
+ * Written by Arvind R.
+ *   Copied from i82875p_edac.c source:
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/pci_ids.h>
+#include <linux/slab.h>
+
+#include "edac_core.h"
+
+#define I82975X_REVISION	" Ver: 1.0.0 " __DATE__
+#define EDAC_MOD_STR		"i82975x_edac"
+
+#define i82975x_printk(level, fmt, arg...) \
+	edac_printk(level, "i82975x", fmt, ##arg)
+
+#define i82975x_mc_printk(mci, level, fmt, arg...) \
+	edac_mc_chipset_printk(mci, level, "i82975x", fmt, ##arg)
+
+#ifndef PCI_DEVICE_ID_INTEL_82975_0
+#define PCI_DEVICE_ID_INTEL_82975_0	0x277c
+#endif				/* PCI_DEVICE_ID_INTEL_82975_0 */
+
+#define I82975X_NR_CSROWS(nr_chans)		(8/(nr_chans))
+
+/* Intel 82975X register addresses - device 0 function 0 - DRAM Controller */
+#define I82975X_EAP		0x58	/* Dram Error Address Pointer (32b)
+					 *
+					 * 31:7  128 byte cache-line address
+					 * 6:1   reserved
+					 * 0     0: CH0; 1: CH1
+					 */
+
+#define I82975X_DERRSYN		0x5c	/* Dram Error SYNdrome (8b)
+					 *
+					 *  7:0  DRAM ECC Syndrome
+					 */
+
+#define I82975X_DES		0x5d	/* Dram ERRor DeSTination (8b)
+					 * 0h:    Processor Memory Reads
+					 * 1h:7h  reserved
+					 * More - See Page 65 of Intel DocSheet.
+					 */
+
+#define I82975X_ERRSTS		0xc8	/* Error Status Register (16b)
+					 *
+					 * 15:12 reserved
+					 * 11    Thermal Sensor Event
+					 * 10    reserved
+					 *  9    non-DRAM lock error (ndlock)
+					 *  8    Refresh Timeout
+					 *  7:2  reserved
+					 *  1    ECC UE (multibit DRAM error)
+					 *  0    ECC CE (singlebit DRAM error)
+					 */
+
+/* Error Reporting is supported by 3 mechanisms:
+  1. DMI SERR generation  ( ERRCMD )
+  2. SMI DMI  generation  ( SMICMD )
+  3. SCI DMI  generation  ( SCICMD )
+NOTE: Only ONE of the three must be enabled
+*/
+#define I82975X_ERRCMD		0xca	/* Error Command (16b)
+					 *
+					 * 15:12 reserved
+					 * 11    Thermal Sensor Event
+					 * 10    reserved
+					 *  9    non-DRAM lock error (ndlock)
+					 *  8    Refresh Timeout
+					 *  7:2  reserved
+					 *  1    ECC UE (multibit DRAM error)
+					 *  0    ECC CE (singlebit DRAM error)
+					 */
+
+#define I82975X_SMICMD		0xcc	/* Error Command (16b)
+					 *
+					 * 15:2  reserved
+					 *  1    ECC UE (multibit DRAM error)
+					 *  0    ECC CE (singlebit DRAM error)
+					 */
+
+#define I82975X_SCICMD		0xce	/* Error Command (16b)
+					 *
+					 * 15:2  reserved
+					 *  1    ECC UE (multibit DRAM error)
+					 *  0    ECC CE (singlebit DRAM error)
+					 */
+
+#define I82975X_XEAP	0xfc	/* Extended Dram Error Address Pointer (8b)
+					 *
+					 * 7:1   reserved
+					 * 0     Bit32 of the Dram Error Address
+					 */
+
+#define I82975X_MCHBAR		0x44	/*
+					 *
+					 * 31:14 Base Addr of 16K memory-mapped
+					 *	configuration space
+					 * 13:1  reserverd
+					 *  0    mem-mapped config space enable
+					 */
+
+/* NOTE: Following addresses have to indexed using MCHBAR offset (44h, 32b) */
+/* Intel 82975x memory mapped register space */
+
+#define I82975X_DRB_SHIFT 25	/* fixed 32MiB grain */
+
+#define I82975X_DRB		0x100	/* DRAM Row Boundary (8b x 8)
+					 *
+					 * 7   set to 1 in highest DRB of
+					 *	channel if 4GB in ch.
+					 * 6:2 upper boundary of rank in
+					 *	32MB grains
+					 * 1:0 set to 0
+					 */
+#define I82975X_DRB_CH0R0		0x100
+#define I82975X_DRB_CH0R1		0x101
+#define I82975X_DRB_CH0R2		0x102
+#define I82975X_DRB_CH0R3		0x103
+#define I82975X_DRB_CH1R0		0x180
+#define I82975X_DRB_CH1R1		0x181
+#define I82975X_DRB_CH1R2		0x182
+#define I82975X_DRB_CH1R3		0x183
+
+
+#define I82975X_DRA		0x108	/* DRAM Row Attribute (4b x 8)
+					 *  defines the PAGE SIZE to be used
+					 *	for the rank
+					 *  7    reserved
+					 *  6:4  row attr of odd rank, i.e. 1
+					 *  3    reserved
+					 *  2:0  row attr of even rank, i.e. 0
+					 *
+					 * 000 = unpopulated
+					 * 001 = reserved
+					 * 010 = 4KiB
+					 * 011 = 8KiB
+					 * 100 = 16KiB
+					 * others = reserved
+					 */
+#define I82975X_DRA_CH0R01		0x108
+#define I82975X_DRA_CH0R23		0x109
+#define I82975X_DRA_CH1R01		0x188
+#define I82975X_DRA_CH1R23		0x189
+
+
+#define I82975X_BNKARC	0x10e /* Type of device in each rank - Bank Arch (16b)
+					 *
+					 * 15:8  reserved
+					 * 7:6  Rank 3 architecture
+					 * 5:4  Rank 2 architecture
+					 * 3:2  Rank 1 architecture
+					 * 1:0  Rank 0 architecture
+					 *
+					 * 00 => x16 devices; i.e 4 banks
+					 * 01 => x8  devices; i.e 8 banks
+					 */
+#define I82975X_C0BNKARC	0x10e
+#define I82975X_C1BNKARC	0x18e
+
+
+
+#define I82975X_DRC		0x120 /* DRAM Controller Mode0 (32b)
+					 *
+					 * 31:30 reserved
+					 * 29    init complete
+					 * 28:11 reserved, according to Intel
+					 *    22:21 number of channels
+					 *		00=1 01=2 in 82875
+					 *		seems to be ECC mode
+					 *		bits in 82975 in Asus
+					 *		P5W
+					 *	 19:18 Data Integ Mode
+					 *		00=none 01=ECC in 82875
+					 * 10:8  refresh mode
+					 *  7    reserved
+					 *  6:4  mode select
+					 *  3:2  reserved
+					 *  1:0  DRAM type 10=Second Revision
+					 *		DDR2 SDRAM
+					 *         00, 01, 11 reserved
+					 */
+#define I82975X_DRC_CH0M0		0x120
+#define I82975X_DRC_CH1M0		0x1A0
+
+
+#define I82975X_DRC_M1	0x124 /* DRAM Controller Mode1 (32b)
+					 * 31	0=Standard Address Map
+					 *	1=Enhanced Address Map
+					 * 30:0	reserved
+					 */
+
+#define I82975X_DRC_CH0M1		0x124
+#define I82975X_DRC_CH1M1		0x1A4
+
+enum i82975x_chips {
+	I82975X = 0,
+};
+
+struct i82975x_pvt {
+	void __iomem *mch_window;
+};
+
+struct i82975x_dev_info {
+	const char *ctl_name;
+};
+
+struct i82975x_error_info {
+	u16 errsts;
+	u32 eap;
+	u8 des;
+	u8 derrsyn;
+	u16 errsts2;
+	u8 chan;		/* the channel is bit 0 of EAP */
+	u8 xeap;		/* extended eap bit */
+};
+
+static const struct i82975x_dev_info i82975x_devs[] = {
+	[I82975X] = {
+		.ctl_name = "i82975x"
+	},
+};
+
+static struct pci_dev *mci_pdev;	/* init dev: in case that AGP code has
+					 * already registered driver
+					 */
+
+static int i82975x_registered = 1;
+
+static void i82975x_get_error_info(struct mem_ctl_info *mci,
+		struct i82975x_error_info *info)
+{
+	struct pci_dev *pdev;
+
+	pdev = to_pci_dev(mci->dev);
+
+	/*
+	 * This is a mess because there is no atomic way to read all the
+	 * registers at once and the registers can transition from CE being
+	 * overwritten by UE.
+	 */
+	pci_read_config_word(pdev, I82975X_ERRSTS, &info->errsts);
+	pci_read_config_dword(pdev, I82975X_EAP, &info->eap);
+	pci_read_config_byte(pdev, I82975X_XEAP, &info->xeap);
+	pci_read_config_byte(pdev, I82975X_DES, &info->des);
+	pci_read_config_byte(pdev, I82975X_DERRSYN, &info->derrsyn);
+	pci_read_config_word(pdev, I82975X_ERRSTS, &info->errsts2);
+
+	pci_write_bits16(pdev, I82975X_ERRSTS, 0x0003, 0x0003);
+
+	/*
+	 * If the error is the same then we can for both reads then
+	 * the first set of reads is valid.  If there is a change then
+	 * there is a CE no info and the second set of reads is valid
+	 * and should be UE info.
+	 */
+	if (!(info->errsts2 & 0x0003))
+		return;
+
+	if ((info->errsts ^ info->errsts2) & 0x0003) {
+		pci_read_config_dword(pdev, I82975X_EAP, &info->eap);
+		pci_read_config_byte(pdev, I82975X_XEAP, &info->xeap);
+		pci_read_config_byte(pdev, I82975X_DES, &info->des);
+		pci_read_config_byte(pdev, I82975X_DERRSYN,
+				&info->derrsyn);
+	}
+}
+
+static int i82975x_process_error_info(struct mem_ctl_info *mci,
+		struct i82975x_error_info *info, int handle_errors)
+{
+	int row, multi_chan, chan;
+
+	multi_chan = mci->csrows[0].nr_channels - 1;
+
+	if (!(info->errsts2 & 0x0003))
+		return 0;
+
+	if (!handle_errors)
+		return 1;
+
+	if ((info->errsts ^ info->errsts2) & 0x0003) {
+		edac_mc_handle_ce_no_info(mci, "UE overwrote CE");
+		info->errsts = info->errsts2;
+	}
+
+	chan = info->eap & 1;
+	info->eap >>= 1;
+	if (info->xeap )
+		info->eap |= 0x80000000;
+	info->eap >>= PAGE_SHIFT;
+	row = edac_mc_find_csrow_by_page(mci, info->eap);
+
+	if (info->errsts & 0x0002)
+		edac_mc_handle_ue(mci, info->eap, 0, row, "i82975x UE");
+	else
+		edac_mc_handle_ce(mci, info->eap, 0, info->derrsyn, row,
+				multi_chan ? chan : 0,
+				"i82975x CE");
+
+	return 1;
+}
+
+static void i82975x_check(struct mem_ctl_info *mci)
+{
+	struct i82975x_error_info info;
+
+	debugf1("MC%d: %s()\n", mci->mc_idx, __func__);
+	i82975x_get_error_info(mci, &info);
+	i82975x_process_error_info(mci, &info, 1);
+}
+
+/* Return 1 if dual channel mode is active.  Else return 0. */
+static int dual_channel_active(void __iomem *mch_window)
+{
+	/*
+	 * We treat interleaved-symmetric configuration as dual-channel - EAP's
+	 * bit-0 giving the channel of the error location.
+	 *
+	 * All other configurations are treated as single channel - the EAP's
+	 * bit-0 will resolve ok in symmetric area of mixed
+	 * (symmetric/asymmetric) configurations
+	 */
+	u8	drb[4][2];
+	int	row;
+	int    dualch;
+
+	for (dualch = 1, row = 0; dualch && (row < 4); row++) {
+		drb[row][0] = readb(mch_window + I82975X_DRB + row);
+		drb[row][1] = readb(mch_window + I82975X_DRB + row + 0x80);
+		dualch = dualch && (drb[row][0] == drb[row][1]);
+	}
+	return dualch;
+}
+
+static enum dev_type i82975x_dram_type(void __iomem *mch_window, int rank)
+{
+	/*
+	 * ASUS P5W DH either does not program this register or programs
+	 * it wrong!
+	 * ECC is possible on i92975x ONLY with DEV_X8 which should mean 'val'
+	 * for each rank should be 01b - the LSB of the word should be 0x55;
+	 * but it reads 0!
+	 */
+	return DEV_X8;
+}
+
+static void i82975x_init_csrows(struct mem_ctl_info *mci,
+		struct pci_dev *pdev, void __iomem *mch_window)
+{
+	struct csrow_info *csrow;
+	unsigned long last_cumul_size;
+	u8 value;
+	u32 cumul_size;
+	int index;
+
+	last_cumul_size = 0;
+
+	/*
+	 * 82875 comment:
+	 * The dram row boundary (DRB) reg values are boundary address
+	 * for each DRAM row with a granularity of 32 or 64MB (single/dual
+	 * channel operation).  DRB regs are cumulative; therefore DRB7 will
+	 * contain the total memory contained in all eight rows.
+	 *
+	 * FIXME:
+	 *  EDAC currently works for Dual-channel Interleaved configuration.
+	 *  Other configurations, which the chip supports, need fixing/testing.
+	 *
+	 */
+
+	for (index = 0; index < mci->nr_csrows; index++) {
+		csrow = &mci->csrows[index];
+
+		value = readb(mch_window + I82975X_DRB + index +
+					((index >= 4) ? 0x80 : 0));
+		cumul_size = value;
+		cumul_size <<= (I82975X_DRB_SHIFT - PAGE_SHIFT);
+		debugf3("%s(): (%d) cumul_size 0x%x\n", __func__, index,
+			cumul_size);
+		if (cumul_size == last_cumul_size)
+			continue;	/* not populated */
+
+		csrow->first_page = last_cumul_size;
+		csrow->last_page = cumul_size - 1;
+		csrow->nr_pages = cumul_size - last_cumul_size;
+		last_cumul_size = cumul_size;
+		csrow->grain = 1 << 7;	/* I82975X_EAP has 128B resolution */
+		csrow->mtype = MEM_DDR; /* i82975x supports only DDR2 */
+		csrow->dtype = i82975x_dram_type(mch_window, index);
+		csrow->edac_mode = EDAC_SECDED; /* only supported */
+	}
+}
+
+/* #define  i82975x_DEBUG_IOMEM */
+
+#ifdef i82975x_DEBUG_IOMEM
+static void i82975x_print_dram_timings(void __iomem *mch_window)
+{
+	/*
+	 * The register meanings are from Intel specs;
+	 * (shows 13-5-5-5 for 800-DDR2)
+	 * Asus P5W Bios reports 15-5-4-4
+	 * What's your religion?
+	 */
+	static const int caslats[4] = { 5, 4, 3, 6 };
+	u32	dtreg[2];
+
+	dtreg[0] = readl(mch_window + 0x114);
+	dtreg[1] = readl(mch_window + 0x194);
+	i82975x_printk(KERN_INFO, "DRAM Timings :     Ch0    Ch1\n"
+		"                RAS Active Min = %d     %d\n"
+		"                CAS latency    =  %d      %d\n"
+		"                RAS to CAS     =  %d      %d\n"
+		"                RAS precharge  =  %d      %d\n",
+		(dtreg[0] >> 19 ) & 0x0f,
+			(dtreg[1] >> 19) & 0x0f,
+		caslats[(dtreg[0] >> 8) & 0x03],
+			caslats[(dtreg[1] >> 8) & 0x03],
+		((dtreg[0] >> 4) & 0x07) + 2,
+			((dtreg[1] >> 4) & 0x07) + 2,
+		(dtreg[0] & 0x07) + 2,
+			(dtreg[1] & 0x07) + 2
+	);
+
+}
+#endif
+
+static int i82975x_probe1(struct pci_dev *pdev, int dev_idx)
+{
+	int rc = -ENODEV;
+	struct mem_ctl_info *mci;
+	struct i82975x_pvt *pvt;
+	void __iomem *mch_window;
+	u32 mchbar;
+	u32 drc[2];
+	struct i82975x_error_info discard;
+	int	chans;
+#ifdef i82975x_DEBUG_IOMEM
+	u8 c0drb[4];
+	u8 c1drb[4];
+#endif
+
+	debugf0("%s()\n", __func__);
+
+	pci_read_config_dword(pdev, I82975X_MCHBAR, &mchbar);
+	if (!(mchbar & 1)) {
+		debugf3("%s(): failed, MCHBAR disabled!\n", __func__);
+		goto fail0;
+	}
+	mchbar &= 0xffffc000;	/* bits 31:14 used for 16K window */
+	mch_window = ioremap_nocache(mchbar, 0x1000);
+
+#ifdef i82975x_DEBUG_IOMEM
+	i82975x_printk(KERN_INFO, "MCHBAR real = %0x, remapped = %p\n",
+					mchbar, mch_window);
+
+	c0drb[0] = readb(mch_window + I82975X_DRB_CH0R0);
+	c0drb[1] = readb(mch_window + I82975X_DRB_CH0R1);
+	c0drb[2] = readb(mch_window + I82975X_DRB_CH0R2);
+	c0drb[3] = readb(mch_window + I82975X_DRB_CH0R3);
+	c1drb[0] = readb(mch_window + I82975X_DRB_CH1R0);
+	c1drb[1] = readb(mch_window + I82975X_DRB_CH1R1);
+	c1drb[2] = readb(mch_window + I82975X_DRB_CH1R2);
+	c1drb[3] = readb(mch_window + I82975X_DRB_CH1R3);
+	i82975x_printk(KERN_INFO, "DRBCH0R0 = 0x%02x\n", c0drb[0]);
+	i82975x_printk(KERN_INFO, "DRBCH0R1 = 0x%02x\n", c0drb[1]);
+	i82975x_printk(KERN_INFO, "DRBCH0R2 = 0x%02x\n", c0drb[2]);
+	i82975x_printk(KERN_INFO, "DRBCH0R3 = 0x%02x\n", c0drb[3]);
+	i82975x_printk(KERN_INFO, "DRBCH1R0 = 0x%02x\n", c1drb[0]);
+	i82975x_printk(KERN_INFO, "DRBCH1R1 = 0x%02x\n", c1drb[1]);
+	i82975x_printk(KERN_INFO, "DRBCH1R2 = 0x%02x\n", c1drb[2]);
+	i82975x_printk(KERN_INFO, "DRBCH1R3 = 0x%02x\n", c1drb[3]);
+#endif
+
+	drc[0] = readl(mch_window + I82975X_DRC_CH0M0);
+	drc[1] = readl(mch_window + I82975X_DRC_CH1M0);
+#ifdef i82975x_DEBUG_IOMEM
+	i82975x_printk(KERN_INFO, "DRC_CH0 = %0x, %s\n", drc[0],
+			((drc[0] >> 21) & 3) == 1 ?
+				"ECC enabled" : "ECC disabled");
+	i82975x_printk(KERN_INFO, "DRC_CH1 = %0x, %s\n", drc[1],
+			((drc[1] >> 21) & 3) == 1 ?
+				"ECC enabled" : "ECC disabled");
+
+	i82975x_printk(KERN_INFO, "C0 BNKARC = %0x\n",
+		readw(mch_window + I82975X_C0BNKARC));
+	i82975x_printk(KERN_INFO, "C1 BNKARC = %0x\n",
+		readw(mch_window + I82975X_C1BNKARC));
+	i82975x_print_dram_timings(mch_window);
+	goto fail1;
+#endif
+	if (!(((drc[0] >> 21) & 3) == 1 || ((drc[1] >> 21) & 3) == 1)) {
+		i82975x_printk(KERN_INFO, "ECC disabled on both channels.\n");
+		goto fail1;
+	}
+
+	chans = dual_channel_active(mch_window) + 1;
+
+	/* assuming only one controller, index thus is 0 */
+	mci = edac_mc_alloc(sizeof(*pvt), I82975X_NR_CSROWS(chans),
+					chans, 0);
+	if (!mci) {
+		rc = -ENOMEM;
+		goto fail1;
+	}
+
+	debugf3("%s(): init mci\n", __func__);
+	mci->dev = &pdev->dev;
+	mci->mtype_cap = MEM_FLAG_DDR;
+	mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
+	mci->edac_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
+	mci->mod_name = EDAC_MOD_STR;
+	mci->mod_ver = I82975X_REVISION;
+	mci->ctl_name = i82975x_devs[dev_idx].ctl_name;
+	mci->edac_check = i82975x_check;
+	mci->ctl_page_to_phys = NULL;
+	debugf3("%s(): init pvt\n", __func__);
+	pvt = (struct i82975x_pvt *) mci->pvt_info;
+	pvt->mch_window = mch_window;
+	i82975x_init_csrows(mci, pdev, mch_window);
+	i82975x_get_error_info(mci, &discard);  /* clear counters */
+
+	/* finalize this instance of memory controller with edac core */
+	if (edac_mc_add_mc(mci)) {
+		debugf3("%s(): failed edac_mc_add_mc()\n", __func__);
+		goto fail2;
+	}
+
+	/* get this far and it's successful */
+	debugf3("%s(): success\n", __func__);
+	return 0;
+
+fail2:
+	edac_mc_free(mci);
+
+fail1:
+	iounmap(mch_window);
+fail0:
+	return rc;
+}
+
+/* returns count (>= 0), or negative on error */
+static int __devinit i82975x_init_one(struct pci_dev *pdev,
+		const struct pci_device_id *ent)
+{
+	int rc;
+
+	debugf0("%s()\n", __func__);
+
+	if (pci_enable_device(pdev) < 0)
+		return -EIO;
+
+	rc = i82975x_probe1(pdev, ent->driver_data);
+
+	if (mci_pdev == NULL)
+		mci_pdev = pci_dev_get(pdev);
+
+	return rc;
+}
+
+static void __devexit i82975x_remove_one(struct pci_dev *pdev)
+{
+	struct mem_ctl_info *mci;
+	struct i82975x_pvt *pvt;
+
+	debugf0("%s()\n", __func__);
+
+	mci = edac_mc_del_mc(&pdev->dev);
+	if (mci  == NULL)
+		return;
+
+	pvt = mci->pvt_info;
+	if (pvt->mch_window)
+		iounmap( pvt->mch_window );
+
+	edac_mc_free(mci);
+}
+
+static const struct pci_device_id i82975x_pci_tbl[] __devinitdata = {
+	{
+		PCI_VEND_DEV(INTEL, 82975_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+		I82975X
+	},
+	{
+		0,
+	}	/* 0 terminated list. */
+};
+
+MODULE_DEVICE_TABLE(pci, i82975x_pci_tbl);
+
+static struct pci_driver i82975x_driver = {
+	.name = EDAC_MOD_STR,
+	.probe = i82975x_init_one,
+	.remove = __devexit_p(i82975x_remove_one),
+	.id_table = i82975x_pci_tbl,
+};
+
+static int __init i82975x_init(void)
+{
+	int pci_rc;
+
+	debugf3("%s()\n", __func__);
+
+	pci_rc = pci_register_driver(&i82975x_driver);
+	if (pci_rc < 0)
+		goto fail0;
+
+	if (mci_pdev == NULL) {
+		mci_pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
+				PCI_DEVICE_ID_INTEL_82975_0, NULL);
+
+		if (!mci_pdev) {
+			debugf0("i82975x pci_get_device fail\n");
+			pci_rc = -ENODEV;
+			goto fail1;
+		}
+
+		pci_rc = i82975x_init_one(mci_pdev, i82975x_pci_tbl);
+
+		if (pci_rc < 0) {
+			debugf0("i82975x init fail\n");
+			pci_rc = -ENODEV;
+			goto fail1;
+		}
+	}
+
+	return 0;
+
+fail1:
+	pci_unregister_driver(&i82975x_driver);
+
+fail0:
+	if (mci_pdev != NULL)
+		pci_dev_put(mci_pdev);
+
+	return pci_rc;
+}
+
+static void __exit i82975x_exit(void)
+{
+	debugf3("%s()\n", __func__);
+
+	pci_unregister_driver(&i82975x_driver);
+
+	if (!i82975x_registered) {
+		i82975x_remove_one(mci_pdev);
+		pci_dev_put(mci_pdev);
+	}
+}
+
+module_init(i82975x_init);
+module_exit(i82975x_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Arvind R. <arvind@acarlab.com>");
+MODULE_DESCRIPTION("MC support for Intel 82975 memory hub controllers");
diff --git a/drivers/edac/pasemi_edac.c b/drivers/edac/pasemi_edac.c
new file mode 100644
index 0000000..e66cdd4
--- /dev/null
+++ b/drivers/edac/pasemi_edac.c
@@ -0,0 +1,299 @@
+/*
+ * Copyright (C) 2006-2007 PA Semi, Inc
+ *
+ * Author: Egor Martovetsky <egor@pasemi.com>
+ * Maintained by: Olof Johansson <olof@lixom.net>
+ *
+ * Driver for the PWRficient onchip memory controllers
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
+ */
+
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/pci_ids.h>
+#include <linux/slab.h>
+#include "edac_core.h"
+
+#define MODULE_NAME "pasemi_edac"
+
+#define MCCFG_MCEN				0x300
+#define   MCCFG_MCEN_MMC_EN			0x00000001
+#define MCCFG_ERRCOR				0x388
+#define   MCCFG_ERRCOR_RNK_FAIL_DET_EN		0x00000100
+#define   MCCFG_ERRCOR_ECC_GEN_EN		0x00000010
+#define   MCCFG_ERRCOR_ECC_CRR_EN		0x00000001
+#define MCCFG_SCRUB				0x384
+#define   MCCFG_SCRUB_RGLR_SCRB_EN		0x00000001
+#define MCDEBUG_ERRCTL1				0x728
+#define   MCDEBUG_ERRCTL1_RFL_LOG_EN		0x00080000
+#define   MCDEBUG_ERRCTL1_MBE_LOG_EN		0x00040000
+#define   MCDEBUG_ERRCTL1_SBE_LOG_EN		0x00020000
+#define MCDEBUG_ERRSTA				0x730
+#define   MCDEBUG_ERRSTA_RFL_STATUS		0x00000004
+#define   MCDEBUG_ERRSTA_MBE_STATUS		0x00000002
+#define   MCDEBUG_ERRSTA_SBE_STATUS		0x00000001
+#define MCDEBUG_ERRCNT1				0x734
+#define   MCDEBUG_ERRCNT1_SBE_CNT_OVRFLO	0x00000080
+#define MCDEBUG_ERRLOG1A			0x738
+#define   MCDEBUG_ERRLOG1A_MERR_TYPE_M		0x30000000
+#define   MCDEBUG_ERRLOG1A_MERR_TYPE_NONE	0x00000000
+#define   MCDEBUG_ERRLOG1A_MERR_TYPE_SBE	0x10000000
+#define   MCDEBUG_ERRLOG1A_MERR_TYPE_MBE	0x20000000
+#define   MCDEBUG_ERRLOG1A_MERR_TYPE_RFL	0x30000000
+#define   MCDEBUG_ERRLOG1A_MERR_BA_M		0x00700000
+#define   MCDEBUG_ERRLOG1A_MERR_BA_S		20
+#define   MCDEBUG_ERRLOG1A_MERR_CS_M		0x00070000
+#define   MCDEBUG_ERRLOG1A_MERR_CS_S		16
+#define   MCDEBUG_ERRLOG1A_SYNDROME_M		0x0000ffff
+#define MCDRAM_RANKCFG				0x114
+#define   MCDRAM_RANKCFG_EN			0x00000001
+#define   MCDRAM_RANKCFG_TYPE_SIZE_M		0x000001c0
+#define   MCDRAM_RANKCFG_TYPE_SIZE_S		6
+
+#define PASEMI_EDAC_NR_CSROWS			8
+#define PASEMI_EDAC_NR_CHANS			1
+#define PASEMI_EDAC_ERROR_GRAIN			64
+
+static int last_page_in_mmc;
+static int system_mmc_id;
+
+
+static u32 pasemi_edac_get_error_info(struct mem_ctl_info *mci)
+{
+	struct pci_dev *pdev = to_pci_dev(mci->dev);
+	u32 tmp;
+
+	pci_read_config_dword(pdev, MCDEBUG_ERRSTA,
+			      &tmp);
+
+	tmp &= (MCDEBUG_ERRSTA_RFL_STATUS | MCDEBUG_ERRSTA_MBE_STATUS
+		| MCDEBUG_ERRSTA_SBE_STATUS);
+
+	if (tmp) {
+		if (tmp & MCDEBUG_ERRSTA_SBE_STATUS)
+			pci_write_config_dword(pdev, MCDEBUG_ERRCNT1,
+					       MCDEBUG_ERRCNT1_SBE_CNT_OVRFLO);
+		pci_write_config_dword(pdev, MCDEBUG_ERRSTA, tmp);
+	}
+
+	return tmp;
+}
+
+static void pasemi_edac_process_error_info(struct mem_ctl_info *mci, u32 errsta)
+{
+	struct pci_dev *pdev = to_pci_dev(mci->dev);
+	u32 errlog1a;
+	u32 cs;
+
+	if (!errsta)
+		return;
+
+	pci_read_config_dword(pdev, MCDEBUG_ERRLOG1A, &errlog1a);
+
+	cs = (errlog1a & MCDEBUG_ERRLOG1A_MERR_CS_M) >>
+		MCDEBUG_ERRLOG1A_MERR_CS_S;
+
+	/* uncorrectable/multi-bit errors */
+	if (errsta & (MCDEBUG_ERRSTA_MBE_STATUS |
+		      MCDEBUG_ERRSTA_RFL_STATUS)) {
+		edac_mc_handle_ue(mci, mci->csrows[cs].first_page, 0,
+				  cs, mci->ctl_name);
+	}
+
+	/* correctable/single-bit errors */
+	if (errsta & MCDEBUG_ERRSTA_SBE_STATUS) {
+		edac_mc_handle_ce(mci, mci->csrows[cs].first_page, 0,
+				  0, cs, 0, mci->ctl_name);
+	}
+}
+
+static void pasemi_edac_check(struct mem_ctl_info *mci)
+{
+	u32 errsta;
+
+	errsta = pasemi_edac_get_error_info(mci);
+	if (errsta)
+		pasemi_edac_process_error_info(mci, errsta);
+}
+
+static int pasemi_edac_init_csrows(struct mem_ctl_info *mci,
+				   struct pci_dev *pdev,
+				   enum edac_type edac_mode)
+{
+	struct csrow_info *csrow;
+	u32 rankcfg;
+	int index;
+
+	for (index = 0; index < mci->nr_csrows; index++) {
+		csrow = &mci->csrows[index];
+
+		pci_read_config_dword(pdev,
+				      MCDRAM_RANKCFG + (index * 12),
+				      &rankcfg);
+
+		if (!(rankcfg & MCDRAM_RANKCFG_EN))
+			continue;
+
+		switch ((rankcfg & MCDRAM_RANKCFG_TYPE_SIZE_M) >>
+			MCDRAM_RANKCFG_TYPE_SIZE_S) {
+		case 0:
+			csrow->nr_pages = 128 << (20 - PAGE_SHIFT);
+			break;
+		case 1:
+			csrow->nr_pages = 256 << (20 - PAGE_SHIFT);
+			break;
+		case 2:
+		case 3:
+			csrow->nr_pages = 512 << (20 - PAGE_SHIFT);
+			break;
+		case 4:
+			csrow->nr_pages = 1024 << (20 - PAGE_SHIFT);
+			break;
+		case 5:
+			csrow->nr_pages = 2048 << (20 - PAGE_SHIFT);
+			break;
+		default:
+			edac_mc_printk(mci, KERN_ERR,
+				"Unrecognized Rank Config. rankcfg=%u\n",
+				rankcfg);
+			return -EINVAL;
+		}
+
+		csrow->first_page = last_page_in_mmc;
+		csrow->last_page = csrow->first_page + csrow->nr_pages - 1;
+		last_page_in_mmc += csrow->nr_pages;
+		csrow->page_mask = 0;
+		csrow->grain = PASEMI_EDAC_ERROR_GRAIN;
+		csrow->mtype = MEM_DDR;
+		csrow->dtype = DEV_UNKNOWN;
+		csrow->edac_mode = edac_mode;
+	}
+	return 0;
+}
+
+static int __devinit pasemi_edac_probe(struct pci_dev *pdev,
+		const struct pci_device_id *ent)
+{
+	struct mem_ctl_info *mci = NULL;
+	u32 errctl1, errcor, scrub, mcen;
+
+	pci_read_config_dword(pdev, MCCFG_MCEN, &mcen);
+	if (!(mcen & MCCFG_MCEN_MMC_EN))
+		return -ENODEV;
+
+	/*
+	 * We should think about enabling other error detection later on
+	 */
+
+	pci_read_config_dword(pdev, MCDEBUG_ERRCTL1, &errctl1);
+	errctl1 |= MCDEBUG_ERRCTL1_SBE_LOG_EN |
+		MCDEBUG_ERRCTL1_MBE_LOG_EN |
+		MCDEBUG_ERRCTL1_RFL_LOG_EN;
+	pci_write_config_dword(pdev, MCDEBUG_ERRCTL1, errctl1);
+
+	mci = edac_mc_alloc(0, PASEMI_EDAC_NR_CSROWS, PASEMI_EDAC_NR_CHANS,
+				system_mmc_id++);
+
+	if (mci == NULL)
+		return -ENOMEM;
+
+	pci_read_config_dword(pdev, MCCFG_ERRCOR, &errcor);
+	errcor |= MCCFG_ERRCOR_RNK_FAIL_DET_EN |
+		MCCFG_ERRCOR_ECC_GEN_EN |
+		MCCFG_ERRCOR_ECC_CRR_EN;
+
+	mci->dev = &pdev->dev;
+	mci->mtype_cap = MEM_FLAG_DDR | MEM_FLAG_RDDR;
+	mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_EC | EDAC_FLAG_SECDED;
+	mci->edac_cap = (errcor & MCCFG_ERRCOR_ECC_GEN_EN) ?
+		((errcor & MCCFG_ERRCOR_ECC_CRR_EN) ?
+		 (EDAC_FLAG_EC | EDAC_FLAG_SECDED) : EDAC_FLAG_EC) :
+		EDAC_FLAG_NONE;
+	mci->mod_name = MODULE_NAME;
+	mci->dev_name = pci_name(pdev);
+	mci->ctl_name = "pasemi,1682m-mc";
+	mci->edac_check = pasemi_edac_check;
+	mci->ctl_page_to_phys = NULL;
+	pci_read_config_dword(pdev, MCCFG_SCRUB, &scrub);
+	mci->scrub_cap = SCRUB_FLAG_HW_PROG | SCRUB_FLAG_HW_SRC;
+	mci->scrub_mode =
+		((errcor & MCCFG_ERRCOR_ECC_CRR_EN) ? SCRUB_FLAG_HW_SRC : 0) |
+		((scrub & MCCFG_SCRUB_RGLR_SCRB_EN) ? SCRUB_FLAG_HW_PROG : 0);
+
+	if (pasemi_edac_init_csrows(mci, pdev,
+				    (mci->edac_cap & EDAC_FLAG_SECDED) ?
+				    EDAC_SECDED :
+				    ((mci->edac_cap & EDAC_FLAG_EC) ?
+				     EDAC_EC : EDAC_NONE)))
+		goto fail;
+
+	/*
+	 * Clear status
+	 */
+	pasemi_edac_get_error_info(mci);
+
+	if (edac_mc_add_mc(mci))
+		goto fail;
+
+	/* get this far and it's successful */
+	return 0;
+
+fail:
+	edac_mc_free(mci);
+	return -ENODEV;
+}
+
+static void __devexit pasemi_edac_remove(struct pci_dev *pdev)
+{
+	struct mem_ctl_info *mci = edac_mc_del_mc(&pdev->dev);
+
+	if (!mci)
+		return;
+
+	edac_mc_free(mci);
+}
+
+
+static const struct pci_device_id pasemi_edac_pci_tbl[] = {
+	{ PCI_DEVICE(PCI_VENDOR_ID_PASEMI, 0xa00a) },
+};
+
+MODULE_DEVICE_TABLE(pci, pasemi_edac_pci_tbl);
+
+static struct pci_driver pasemi_edac_driver = {
+	.name = MODULE_NAME,
+	.probe = pasemi_edac_probe,
+	.remove = __devexit_p(pasemi_edac_remove),
+	.id_table = pasemi_edac_pci_tbl,
+};
+
+static int __init pasemi_edac_init(void)
+{
+	return pci_register_driver(&pasemi_edac_driver);
+}
+
+static void __exit pasemi_edac_exit(void)
+{
+	pci_unregister_driver(&pasemi_edac_driver);
+}
+
+module_init(pasemi_edac_init);
+module_exit(pasemi_edac_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Egor Martovetsky <egor@pasemi.com>");
+MODULE_DESCRIPTION("MC support for PA Semi PA6T-1682M memory controller");
diff --git a/drivers/edac/r82600_edac.c b/drivers/edac/r82600_edac.c
index a49cf0a..e25f712 100644
--- a/drivers/edac/r82600_edac.c
+++ b/drivers/edac/r82600_edac.c
@@ -11,7 +11,7 @@
  *
  * Written with reference to 82600 High Integration Dual PCI System
  * Controller Data Book:
- * http://www.radisys.com/files/support_downloads/007-01277-0002.82600DataBook.pdf
+ * www.radisys.com/files/support_downloads/007-01277-0002.82600DataBook.pdf
  * references to this document given in []
  */
 
@@ -20,9 +20,9 @@
 #include <linux/pci.h>
 #include <linux/pci_ids.h>
 #include <linux/slab.h>
-#include "edac_mc.h"
+#include "edac_core.h"
 
-#define R82600_REVISION	" Ver: 2.0.1 " __DATE__
+#define R82600_REVISION	" Ver: 2.0.2 " __DATE__
 #define EDAC_MOD_STR	"r82600_edac"
 
 #define r82600_printk(level, fmt, arg...) \
@@ -131,10 +131,12 @@
 	u32 eapr;
 };
 
-static unsigned int disable_hardware_scrub = 0;
+static unsigned int disable_hardware_scrub;
 
-static void r82600_get_error_info (struct mem_ctl_info *mci,
-		struct r82600_error_info *info)
+static struct edac_pci_ctl_info *r82600_pci;
+
+static void r82600_get_error_info(struct mem_ctl_info *mci,
+				struct r82600_error_info *info)
 {
 	struct pci_dev *pdev;
 
@@ -144,18 +146,19 @@
 	if (info->eapr & BIT(0))
 		/* Clear error to allow next error to be reported [p.62] */
 		pci_write_bits32(pdev, R82600_EAP,
-				((u32) BIT(0) & (u32) BIT(1)),
-				((u32) BIT(0) & (u32) BIT(1)));
+				 ((u32) BIT(0) & (u32) BIT(1)),
+				 ((u32) BIT(0) & (u32) BIT(1)));
 
 	if (info->eapr & BIT(1))
 		/* Clear error to allow next error to be reported [p.62] */
 		pci_write_bits32(pdev, R82600_EAP,
-				((u32) BIT(0) & (u32) BIT(1)),
-				((u32) BIT(0) & (u32) BIT(1)));
+				 ((u32) BIT(0) & (u32) BIT(1)),
+				 ((u32) BIT(0) & (u32) BIT(1)));
 }
 
-static int r82600_process_error_info (struct mem_ctl_info *mci,
-		struct r82600_error_info *info, int handle_errors)
+static int r82600_process_error_info(struct mem_ctl_info *mci,
+				struct r82600_error_info *info,
+				int handle_errors)
 {
 	int error_found;
 	u32 eapaddr, page;
@@ -172,25 +175,24 @@
 	 * granularity (upper 19 bits only)     */
 	page = eapaddr >> PAGE_SHIFT;
 
-	if (info->eapr & BIT(0)) {  /* CE? */
+	if (info->eapr & BIT(0)) {	/* CE? */
 		error_found = 1;
 
 		if (handle_errors)
-			edac_mc_handle_ce(mci, page, 0,  /* not avail */
+			edac_mc_handle_ce(mci, page, 0,	/* not avail */
 					syndrome,
 					edac_mc_find_csrow_by_page(mci, page),
-					0,  /* channel */
-					mci->ctl_name);
+					0, mci->ctl_name);
 	}
 
-	if (info->eapr & BIT(1)) {  /* UE? */
+	if (info->eapr & BIT(1)) {	/* UE? */
 		error_found = 1;
 
 		if (handle_errors)
 			/* 82600 doesn't give enough info */
 			edac_mc_handle_ue(mci, page, 0,
-				edac_mc_find_csrow_by_page(mci, page),
-				mci->ctl_name);
+					edac_mc_find_csrow_by_page(mci, page),
+					mci->ctl_name);
 	}
 
 	return error_found;
@@ -211,11 +213,11 @@
 }
 
 static void r82600_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev,
-		u8 dramcr)
+			u8 dramcr)
 {
 	struct csrow_info *csrow;
 	int index;
-	u8 drbar;  /* SDRAM Row Boundry Address Register */
+	u8 drbar;		/* SDRAM Row Boundry Address Register */
 	u32 row_high_limit, row_high_limit_last;
 	u32 reg_sdram, ecc_on, row_base;
 
@@ -276,7 +278,7 @@
 	debugf2("%s(): sdram refresh rate = %#0x\n", __func__,
 		sdram_refresh_rate);
 	debugf2("%s(): DRAMC register = %#0x\n", __func__, dramcr);
-	mci = edac_mc_alloc(0, R82600_NR_CSROWS, R82600_NR_CHANS);
+	mci = edac_mc_alloc(0, R82600_NR_CSROWS, R82600_NR_CHANS, 0);
 
 	if (mci == NULL)
 		return -ENOMEM;
@@ -305,15 +307,16 @@
 	mci->mod_name = EDAC_MOD_STR;
 	mci->mod_ver = R82600_REVISION;
 	mci->ctl_name = "R82600";
+	mci->dev_name = pci_name(pdev);
 	mci->edac_check = r82600_check;
 	mci->ctl_page_to_phys = NULL;
 	r82600_init_csrows(mci, pdev, dramcr);
-	r82600_get_error_info(mci, &discard);  /* clear counters */
+	r82600_get_error_info(mci, &discard);	/* clear counters */
 
 	/* Here we assume that we will never see multiple instances of this
 	 * type of memory controller.  The ID is therefore hardcoded to 0.
 	 */
-	if (edac_mc_add_mc(mci,0)) {
+	if (edac_mc_add_mc(mci)) {
 		debugf3("%s(): failed edac_mc_add_mc()\n", __func__);
 		goto fail;
 	}
@@ -326,6 +329,17 @@
 		pci_write_bits32(pdev, R82600_EAP, BIT(31), BIT(31));
 	}
 
+	/* allocating generic PCI control info */
+	r82600_pci = edac_pci_create_generic_ctl(&pdev->dev, EDAC_MOD_STR);
+	if (!r82600_pci) {
+		printk(KERN_WARNING
+			"%s(): Unable to create PCI control\n",
+			__func__);
+		printk(KERN_WARNING
+			"%s(): PCI error report via EDAC not setup\n",
+			__func__);
+	}
+
 	debugf3("%s(): success\n", __func__);
 	return 0;
 
@@ -336,7 +350,7 @@
 
 /* returns count (>= 0), or negative on error */
 static int __devinit r82600_init_one(struct pci_dev *pdev,
-		const struct pci_device_id *ent)
+				const struct pci_device_id *ent)
 {
 	debugf0("%s()\n", __func__);
 
@@ -350,6 +364,9 @@
 
 	debugf0("%s()\n", __func__);
 
+	if (r82600_pci)
+		edac_pci_release_generic_ctl(r82600_pci);
+
 	if ((mci = edac_mc_del_mc(&pdev->dev)) == NULL)
 		return;
 
@@ -358,11 +375,11 @@
 
 static const struct pci_device_id r82600_pci_tbl[] __devinitdata = {
 	{
-		PCI_DEVICE(PCI_VENDOR_ID_RADISYS, R82600_BRIDGE_ID)
-	},
+	 PCI_DEVICE(PCI_VENDOR_ID_RADISYS, R82600_BRIDGE_ID)
+	 },
 	{
-		0,
-	}	/* 0 terminated list. */
+	 0,
+	 }			/* 0 terminated list. */
 };
 
 MODULE_DEVICE_TABLE(pci, r82600_pci_tbl);
@@ -389,7 +406,7 @@
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Tim Small <tim@buttersideup.com> - WPAD Ltd. "
-	"on behalf of EADS Astrium");
+		"on behalf of EADS Astrium");
 MODULE_DESCRIPTION("MC support for Radisys 82600 memory controllers");
 
 module_param(disable_hardware_scrub, bool, 0644);
diff --git a/drivers/firewire/fw-ohci.c b/drivers/firewire/fw-ohci.c
index 41476ab..db70375 100644
--- a/drivers/firewire/fw-ohci.c
+++ b/drivers/firewire/fw-ohci.c
@@ -224,6 +224,7 @@
 	u32 val, old;
 
 	reg_write(ohci, OHCI1394_PhyControl, OHCI1394_PhyControl_Read(addr));
+	flush_writes(ohci);
 	msleep(2);
 	val = reg_read(ohci, OHCI1394_PhyControl);
 	if ((val & OHCI1394_PhyControl_ReadDone) == 0) {
@@ -586,7 +587,7 @@
 			break;
 
 		fw_notify("context_stop: still active (0x%08x)\n", reg);
-		msleep(1);
+		mdelay(1);
 	}
 }
 
diff --git a/drivers/firewire/fw-sbp2.c b/drivers/firewire/fw-sbp2.c
index 7c53be0..fc98447 100644
--- a/drivers/firewire/fw-sbp2.c
+++ b/drivers/firewire/fw-sbp2.c
@@ -840,7 +840,6 @@
 		container_of(base_orb, struct sbp2_command_orb, base);
 	struct fw_unit *unit = orb->unit;
 	struct fw_device *device = fw_device(unit->device.parent);
-	struct scatterlist *sg;
 	int result;
 
 	if (status != NULL) {
@@ -876,11 +875,10 @@
 	dma_unmap_single(device->card->device, orb->base.request_bus,
 			 sizeof(orb->request), DMA_TO_DEVICE);
 
-	if (orb->cmd->use_sg > 0) {
-		sg = (struct scatterlist *)orb->cmd->request_buffer;
-		dma_unmap_sg(device->card->device, sg, orb->cmd->use_sg,
+	if (scsi_sg_count(orb->cmd) > 0)
+		dma_unmap_sg(device->card->device, scsi_sglist(orb->cmd),
+			     scsi_sg_count(orb->cmd),
 			     orb->cmd->sc_data_direction);
-	}
 
 	if (orb->page_table_bus != 0)
 		dma_unmap_single(device->card->device, orb->page_table_bus,
@@ -901,8 +899,8 @@
 	int sg_len, l, i, j, count;
 	dma_addr_t sg_addr;
 
-	sg = (struct scatterlist *)orb->cmd->request_buffer;
-	count = dma_map_sg(device->card->device, sg, orb->cmd->use_sg,
+	sg = scsi_sglist(orb->cmd);
+	count = dma_map_sg(device->card->device, sg, scsi_sg_count(orb->cmd),
 			   orb->cmd->sc_data_direction);
 	if (count == 0)
 		goto fail;
@@ -971,7 +969,7 @@
 	return 0;
 
  fail_page_table:
-	dma_unmap_sg(device->card->device, sg, orb->cmd->use_sg,
+	dma_unmap_sg(device->card->device, sg, scsi_sg_count(orb->cmd),
 		     orb->cmd->sc_data_direction);
  fail:
 	return -ENOMEM;
@@ -1031,7 +1029,7 @@
 		orb->request.misc |=
 			COMMAND_ORB_DIRECTION(SBP2_DIRECTION_TO_MEDIA);
 
-	if (cmd->use_sg && sbp2_command_orb_map_scatterlist(orb) < 0)
+	if (scsi_sg_count(cmd) && sbp2_command_orb_map_scatterlist(orb) < 0)
 		goto fail_mapping;
 
 	fw_memcpy_to_be32(&orb->request, &orb->request, sizeof(orb->request));
diff --git a/drivers/firewire/fw-transaction.c b/drivers/firewire/fw-transaction.c
index 80d0121..3ce8e2f 100644
--- a/drivers/firewire/fw-transaction.c
+++ b/drivers/firewire/fw-transaction.c
@@ -605,8 +605,10 @@
 	 * check is sufficient to ensure we don't send response to
 	 * broadcast packets or posted writes.
 	 */
-	if (request->ack != ACK_PENDING)
+	if (request->ack != ACK_PENDING) {
+		kfree(request);
 		return;
+	}
 
 	if (rcode == RCODE_COMPLETE)
 		fw_fill_response(&request->response, request->request_header,
@@ -628,11 +630,6 @@
 	unsigned long flags;
 	int tcode, destination, source;
 
-	if (p->payload_length > 2048) {
-		/* FIXME: send error response. */
-		return;
-	}
-
 	if (p->ack != ACK_PENDING && p->ack != ACK_COMPLETE)
 		return;
 
diff --git a/drivers/firewire/fw-transaction.h b/drivers/firewire/fw-transaction.h
index 5abed19..5ceaccd 100644
--- a/drivers/firewire/fw-transaction.h
+++ b/drivers/firewire/fw-transaction.h
@@ -123,6 +123,10 @@
 					  size_t length,
 					  void *callback_data);
 
+/*
+ * Important note:  The callback must guarantee that either fw_send_response()
+ * or kfree() is called on the @request.
+ */
 typedef void (*fw_address_callback_t)(struct fw_card *card,
 				      struct fw_request *request,
 				      int tcode, int destination, int source,
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index 13eea47..dbdca6f 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -29,17 +29,34 @@
 	default n
 
 config SENSORS_ABITUGURU
-	tristate "Abit uGuru"
+	tristate "Abit uGuru (rev 1 & 2)"
 	depends on EXPERIMENTAL
 	help
-	  If you say yes here you get support for the Abit uGuru chips
-	  sensor part. The voltage and frequency control parts of the Abit
-	  uGuru are not supported. The Abit uGuru chip can be found on Abit
-	  uGuru featuring motherboards (most modern Abit motherboards).
+	  If you say yes here you get support for the sensor part of the first
+	  and second revision of the Abit uGuru chip. The voltage and frequency
+	  control parts of the Abit uGuru are not supported. The Abit uGuru
+	  chip can be found on Abit uGuru featuring motherboards (most modern
+	  Abit motherboards from before end 2005). For more info and a list
+	  of which motherboards have which revision see
+	  Documentation/hwmon/abituguru
 
 	  This driver can also be built as a module.  If so, the module
 	  will be called abituguru.
 
+config SENSORS_ABITUGURU3
+	tristate "Abit uGuru (rev 3)"
+	depends on HWMON && EXPERIMENTAL
+	help
+	  If you say yes here you get support for the sensor part of the
+	  third revision of the Abit uGuru chip. Only reading the sensors
+	  and their settings is supported. The third revision of the Abit
+	  uGuru chip can be found on recent Abit motherboards (since end
+	  2005). For more info and a list of which motherboards have which
+	  revision see Documentation/hwmon/abituguru3
+
+	  This driver can also be built as a module.  If so, the module
+	  will be called abituguru3.
+
 config SENSORS_AD7418
 	tristate "Analog Devices AD7416, AD7417 and AD7418"
 	depends on I2C && EXPERIMENTAL
@@ -250,12 +267,10 @@
 
 config SENSORS_IT87
 	tristate "ITE IT87xx and compatibles"
-	depends on I2C
-	select I2C_ISA
 	select HWMON_VID
 	help
 	  If you say yes here you get support for ITE IT8705F, IT8712F,
-	  IT8716F and IT8718F sensor chips, and the SiS960 clone.
+	  IT8716F, IT8718F and IT8726F sensor chips, and the SiS960 clone.
 
 	  This driver can also be built as a module.  If so, the module
 	  will be called it87.
@@ -365,8 +380,8 @@
 	depends on I2C
 	help
 	  If you say yes here you get support for National Semiconductor LM90,
-	  LM86, LM89 and LM99, Analog Devices ADM1032 and Maxim MAX6657 and
-	  MAX6658 sensor chips.
+	  LM86, LM89 and LM99, Analog Devices ADM1032 and Maxim MAX6657,
+	  MAX6658, MAX6659, MAX6680 and MAX6681 sensor chips.
 
 	  The Analog Devices ADT7461 sensor chip is also supported, but only
 	  if found in ADM1032 compatibility mode.
@@ -384,6 +399,17 @@
 	  This driver can also be built as a module.  If so, the module
 	  will be called lm92.
 
+config SENSORS_LM93
+	tristate "National Semiconductor LM93 and compatibles"
+	depends on HWMON && I2C
+	select HWMON_VID
+	help
+	  If you say yes here you get support for National Semiconductor LM93
+	  sensor chips.
+
+	  This driver can also be built as a module.  If so, the module
+	  will be called lm93.
+
 config SENSORS_MAX1619
 	tristate "Maxim MAX1619 sensor chip"
 	depends on I2C
@@ -405,8 +431,6 @@
 
 config SENSORS_PC87360
 	tristate "National Semiconductor PC87360 family"
-	depends on I2C && EXPERIMENTAL
-	select I2C_ISA
 	select HWMON_VID
 	help
 	  If you say yes here you get access to the hardware monitoring
@@ -433,8 +457,7 @@
 
 config SENSORS_SIS5595
 	tristate "Silicon Integrated Systems Corp. SiS5595"
-	depends on I2C && PCI && EXPERIMENTAL
-	select I2C_ISA
+	depends on PCI
 	help
 	  If you say yes here you get support for the integrated sensors in
 	  SiS5595 South Bridges.
@@ -442,6 +465,18 @@
 	  This driver can also be built as a module.  If so, the module
 	  will be called sis5595.
 
+config SENSORS_DME1737
+	tristate "SMSC DME1737 and compatibles"
+	depends on I2C && EXPERIMENTAL
+	select HWMON_VID
+	help
+	  If you say yes here you get support for the hardware monitoring
+	  and fan control features of the SMSC DME1737 (and compatibles
+	  like the Asus A8000) Super-I/O chip.
+
+	  This driver can also be built as a module.  If so, the module
+	  will be called dme1737.
+
 config SENSORS_SMSC47M1
 	tristate "SMSC LPC47M10x and compatibles"
 	help
@@ -487,8 +522,7 @@
 
 config SENSORS_VIA686A
 	tristate "VIA686A"
-	depends on I2C && PCI
-	select I2C_ISA
+	depends on PCI
 	help
 	  If you say yes here you get support for the integrated sensors in
 	  Via 686A/B South Bridges.
@@ -509,9 +543,8 @@
 
 config SENSORS_VT8231
 	tristate "VIA VT8231"
-	depends on I2C && PCI && EXPERIMENTAL
+	depends on PCI
 	select HWMON_VID
-	select I2C_ISA
 	help
 	  If you say yes here then you get support for the integrated sensors
 	  in the VIA VT8231 device.
@@ -584,17 +617,16 @@
 	  will be called w83627hf.
 
 config SENSORS_W83627EHF
-	tristate "Winbond W83627EHF"
-	depends on I2C && EXPERIMENTAL
-	select I2C_ISA
+	tristate "Winbond W83627EHF/DHG"
+	select HWMON_VID
 	help
-	  If you say yes here you get preliminary support for the hardware
+	  If you say yes here you get support for the hardware
 	  monitoring functionality of the Winbond W83627EHF Super-I/O chip.
-	  Only fan and temperature inputs are supported at the moment, while
-	  the chip does much more than that.
 
 	  This driver also supports the W83627EHG, which is the lead-free
-	  version of the W83627EHF.
+	  version of the W83627EHF, and the W83627DHG, which is a similar
+	  chip suited for specific Intel processors that use PECI such as
+	  the Core 2 Duo.
 
 	  This driver can also be built as a module.  If so, the module
 	  will be called w83627ehf.
diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile
index cfaf338..59f81fa 100644
--- a/drivers/hwmon/Makefile
+++ b/drivers/hwmon/Makefile
@@ -14,6 +14,7 @@
 obj-$(CONFIG_SENSORS_W83791D)	+= w83791d.o
 
 obj-$(CONFIG_SENSORS_ABITUGURU)	+= abituguru.o
+obj-$(CONFIG_SENSORS_ABITUGURU3)+= abituguru3.o
 obj-$(CONFIG_SENSORS_AD7418)	+= ad7418.o
 obj-$(CONFIG_SENSORS_ADM1021)	+= adm1021.o
 obj-$(CONFIG_SENSORS_ADM1025)	+= adm1025.o
@@ -25,6 +26,7 @@
 obj-$(CONFIG_SENSORS_AMS)	+= ams/
 obj-$(CONFIG_SENSORS_ATXP1)	+= atxp1.o
 obj-$(CONFIG_SENSORS_CORETEMP)	+= coretemp.o
+obj-$(CONFIG_SENSORS_DME1737)	+= dme1737.o
 obj-$(CONFIG_SENSORS_DS1621)	+= ds1621.o
 obj-$(CONFIG_SENSORS_F71805F)	+= f71805f.o
 obj-$(CONFIG_SENSORS_FSCHER)	+= fscher.o
@@ -45,6 +47,7 @@
 obj-$(CONFIG_SENSORS_LM87)	+= lm87.o
 obj-$(CONFIG_SENSORS_LM90)	+= lm90.o
 obj-$(CONFIG_SENSORS_LM92)	+= lm92.o
+obj-$(CONFIG_SENSORS_LM93)	+= lm93.o
 obj-$(CONFIG_SENSORS_MAX1619)	+= max1619.o
 obj-$(CONFIG_SENSORS_MAX6650)	+= max6650.o
 obj-$(CONFIG_SENSORS_PC87360)	+= pc87360.o
diff --git a/drivers/hwmon/abituguru.c b/drivers/hwmon/abituguru.c
index bede4d9..d575ee9 100644
--- a/drivers/hwmon/abituguru.c
+++ b/drivers/hwmon/abituguru.c
@@ -16,9 +16,9 @@
     Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
 */
 /*
-    This driver supports the sensor part of the custom Abit uGuru chip found
-    on Abit uGuru motherboards. Note: because of lack of specs the CPU / RAM /
-    etc voltage & frequency control is not supported!
+    This driver supports the sensor part of the first and second revision of
+    the custom Abit uGuru chip found on Abit uGuru motherboards. Note: because
+    of lack of specs the CPU/RAM voltage & frequency control is not supported!
 */
 #include <linux/module.h>
 #include <linux/sched.h>
@@ -31,6 +31,7 @@
 #include <linux/platform_device.h>
 #include <linux/hwmon.h>
 #include <linux/hwmon-sysfs.h>
+#include <linux/dmi.h>
 #include <asm/io.h>
 
 /* Banks */
@@ -418,7 +419,7 @@
 abituguru_detect_bank1_sensor_type(struct abituguru_data *data,
 				   u8 sensor_addr)
 {
-	u8 val, buf[3];
+	u8 val, test_flag, buf[3];
 	int i, ret = -ENODEV; /* error is the most common used retval :| */
 
 	/* If overriden by the user return the user selected type */
@@ -436,7 +437,7 @@
 		return -ENODEV;
 
 	/* Test val is sane / usable for sensor type detection. */
-	if ((val < 10u) || (val > 240u)) {
+	if ((val < 10u) || (val > 250u)) {
 		printk(KERN_WARNING ABIT_UGURU_NAME
 			": bank1-sensor: %d reading (%d) too close to limits, "
 			"unable to determine sensor type, skipping sensor\n",
@@ -449,10 +450,20 @@
 
 	ABIT_UGURU_DEBUG(2, "testing bank1 sensor %d\n", (int)sensor_addr);
 	/* Volt sensor test, enable volt low alarm, set min value ridicously
-	   high. If its a volt sensor this should always give us an alarm. */
-	buf[0] = ABIT_UGURU_VOLT_LOW_ALARM_ENABLE;
-	buf[1] = 245;
-	buf[2] = 250;
+	   high, or vica versa if the reading is very high. If its a volt
+	   sensor this should always give us an alarm. */
+	if (val <= 240u) {
+		buf[0] = ABIT_UGURU_VOLT_LOW_ALARM_ENABLE;
+		buf[1] = 245;
+		buf[2] = 250;
+		test_flag = ABIT_UGURU_VOLT_LOW_ALARM_FLAG;
+	} else {
+		buf[0] = ABIT_UGURU_VOLT_HIGH_ALARM_ENABLE;
+		buf[1] = 5;
+		buf[2] = 10;
+		test_flag = ABIT_UGURU_VOLT_HIGH_ALARM_FLAG;
+	}
+
 	if (abituguru_write(data, ABIT_UGURU_SENSOR_BANK1 + 2, sensor_addr,
 			buf, 3) != 3)
 		goto abituguru_detect_bank1_sensor_type_exit;
@@ -469,13 +480,13 @@
 				sensor_addr, buf, 3,
 				ABIT_UGURU_MAX_RETRIES) != 3)
 			goto abituguru_detect_bank1_sensor_type_exit;
-		if (buf[0] & ABIT_UGURU_VOLT_LOW_ALARM_FLAG) {
+		if (buf[0] & test_flag) {
 			ABIT_UGURU_DEBUG(2, "  found volt sensor\n");
 			ret = ABIT_UGURU_IN_SENSOR;
 			goto abituguru_detect_bank1_sensor_type_exit;
 		} else
 			ABIT_UGURU_DEBUG(2, "  alarm raised during volt "
-				"sensor test, but volt low flag not set\n");
+				"sensor test, but volt range flag not set\n");
 	} else
 		ABIT_UGURU_DEBUG(2, "  alarm not raised during volt sensor "
 			"test\n");
@@ -1287,6 +1298,7 @@
 	for (i = 0; i < ARRAY_SIZE(abituguru_sysfs_attr); i++)
 		device_remove_file(&pdev->dev,
 			&abituguru_sysfs_attr[i].dev_attr);
+	platform_set_drvdata(pdev, NULL);
 	kfree(data);
 	return res;
 }
@@ -1296,13 +1308,13 @@
 	int i;
 	struct abituguru_data *data = platform_get_drvdata(pdev);
 
-	platform_set_drvdata(pdev, NULL);
 	hwmon_device_unregister(data->class_dev);
 	for (i = 0; data->sysfs_attr[i].dev_attr.attr.name; i++)
 		device_remove_file(&pdev->dev, &data->sysfs_attr[i].dev_attr);
 	for (i = 0; i < ARRAY_SIZE(abituguru_sysfs_attr); i++)
 		device_remove_file(&pdev->dev,
 			&abituguru_sysfs_attr[i].dev_attr);
+	platform_set_drvdata(pdev, NULL);
 	kfree(data);
 
 	return 0;
@@ -1436,6 +1448,15 @@
 	int address, err;
 	struct resource res = { .flags = IORESOURCE_IO };
 
+#ifdef CONFIG_DMI
+	char *board_vendor = dmi_get_system_info(DMI_BOARD_VENDOR);
+
+	/* safety check, refuse to load on non Abit motherboards */
+	if (!force && (!board_vendor ||
+			strcmp(board_vendor, "http://www.abit.com.tw/")))
+		return -ENODEV;
+#endif
+
 	address = abituguru_detect();
 	if (address < 0)
 		return address;
diff --git a/drivers/hwmon/abituguru3.c b/drivers/hwmon/abituguru3.c
new file mode 100644
index 0000000..a003d10
--- /dev/null
+++ b/drivers/hwmon/abituguru3.c
@@ -0,0 +1,1140 @@
+/*
+    abituguru3.c Copyright (c) 2006 Hans de Goede <j.w.r.degoede@hhs.nl>
+
+    This program is free software; you can redistribute it and/or modify
+    it under the terms of the GNU General Public License as published by
+    the Free Software Foundation; either version 2 of the License, or
+    (at your option) any later version.
+
+    This program is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with this program; if not, write to the Free Software
+    Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+*/
+/*
+    This driver supports the sensor part of revision 3 of the custom Abit uGuru
+    chip found on newer Abit uGuru motherboards. Note: because of lack of specs
+    only reading the sensors and their settings is supported.
+*/
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/jiffies.h>
+#include <linux/mutex.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/platform_device.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include <asm/io.h>
+
+/* uGuru3 bank addresses */
+#define ABIT_UGURU3_SETTINGS_BANK		0x01
+#define ABIT_UGURU3_SENSORS_BANK		0x08
+#define ABIT_UGURU3_MISC_BANK			0x09
+#define ABIT_UGURU3_ALARMS_START		0x1E
+#define ABIT_UGURU3_SETTINGS_START		0x24
+#define ABIT_UGURU3_VALUES_START		0x80
+#define ABIT_UGURU3_BOARD_ID			0x0A
+/* uGuru3 sensor bank flags */			     /* Alarm if: */
+#define ABIT_UGURU3_TEMP_HIGH_ALARM_ENABLE	0x01 /*  temp over warn */
+#define ABIT_UGURU3_VOLT_HIGH_ALARM_ENABLE	0x02 /*  volt over max */
+#define ABIT_UGURU3_VOLT_LOW_ALARM_ENABLE	0x04 /*  volt under min */
+#define ABIT_UGURU3_TEMP_HIGH_ALARM_FLAG	0x10 /* temp is over warn */
+#define ABIT_UGURU3_VOLT_HIGH_ALARM_FLAG	0x20 /* volt is over max */
+#define ABIT_UGURU3_VOLT_LOW_ALARM_FLAG		0x40 /* volt is under min */
+#define ABIT_UGURU3_FAN_LOW_ALARM_ENABLE	0x01 /*   fan under min */
+#define ABIT_UGURU3_BEEP_ENABLE			0x08 /* beep if alarm */
+#define ABIT_UGURU3_SHUTDOWN_ENABLE		0x80 /* shutdown if alarm */
+/* sensor types */
+#define ABIT_UGURU3_IN_SENSOR			0
+#define ABIT_UGURU3_TEMP_SENSOR			1
+#define ABIT_UGURU3_FAN_SENSOR			2
+
+/* Timeouts / Retries, if these turn out to need a lot of fiddling we could
+   convert them to params. Determined by trial and error. I assume this is
+   cpu-speed independent, since the ISA-bus and not the CPU should be the
+   bottleneck. */
+#define ABIT_UGURU3_WAIT_TIMEOUT		250
+/* Normally the 0xAC at the end of synchronize() is reported after the
+   first read, but sometimes not and we need to poll */
+#define ABIT_UGURU3_SYNCHRONIZE_TIMEOUT		5
+/* utility macros */
+#define ABIT_UGURU3_NAME			"abituguru3"
+#define ABIT_UGURU3_DEBUG(format, arg...)	\
+	if (verbose)				\
+		printk(KERN_DEBUG ABIT_UGURU3_NAME ": "	format , ## arg)
+
+/* Macros to help calculate the sysfs_names array length */
+#define ABIT_UGURU3_MAX_NO_SENSORS 26
+/* sum of strlen +1 of: in??_input\0, in??_{min,max}\0, in??_{min,max}_alarm\0,
+   in??_{min,max}_alarm_enable\0, in??_beep\0, in??_shutdown\0, in??_label\0 */
+#define ABIT_UGURU3_IN_NAMES_LENGTH (11 + 2 * 9 + 2 * 15 + 2 * 22 + 10 + 14 + 11)
+/* sum of strlen +1 of: temp??_input\0, temp??_max\0, temp??_crit\0,
+   temp??_alarm\0, temp??_alarm_enable\0, temp??_beep\0, temp??_shutdown\0,
+   temp??_label\0 */
+#define ABIT_UGURU3_TEMP_NAMES_LENGTH (13 + 11 + 12 + 13 + 20 + 12 + 16 + 13)
+/* sum of strlen +1 of: fan??_input\0, fan??_min\0, fan??_alarm\0,
+   fan??_alarm_enable\0, fan??_beep\0, fan??_shutdown\0, fan??_label\0 */
+#define ABIT_UGURU3_FAN_NAMES_LENGTH (12 + 10 + 12 + 19 + 11 + 15 + 12)
+/* Worst case scenario 16 in sensors (longest names_length) and the rest
+   temp sensors (second longest names_length). */
+#define ABIT_UGURU3_SYSFS_NAMES_LENGTH (16 * ABIT_UGURU3_IN_NAMES_LENGTH + \
+	(ABIT_UGURU3_MAX_NO_SENSORS - 16) * ABIT_UGURU3_TEMP_NAMES_LENGTH)
+
+/* All the macros below are named identical to the openguru2 program
+   reverse engineered by Louis Kruger, hence the names might not be 100%
+   logical. I could come up with better names, but I prefer keeping the names
+   identical so that this driver can be compared with his work more easily. */
+/* Two i/o-ports are used by uGuru */
+#define ABIT_UGURU3_BASE			0x00E0
+#define ABIT_UGURU3_CMD				0x00
+#define ABIT_UGURU3_DATA			0x04
+#define ABIT_UGURU3_REGION_LENGTH		5
+/* The wait_xxx functions return this on success and the last contents
+   of the DATA register (0-255) on failure. */
+#define ABIT_UGURU3_SUCCESS			-1
+/* uGuru status flags */
+#define ABIT_UGURU3_STATUS_READY_FOR_READ	0x01
+#define ABIT_UGURU3_STATUS_BUSY			0x02
+
+
+/* Structures */
+struct abituguru3_sensor_info {
+	const char* name;
+	int port;
+	int type;
+	int multiplier;
+	int divisor;
+	int offset;
+};
+
+struct abituguru3_motherboard_info {
+	u16 id;
+	const char *name;
+	/* + 1 -> end of sensors indicated by a sensor with name == NULL */
+	struct abituguru3_sensor_info sensors[ABIT_UGURU3_MAX_NO_SENSORS + 1];
+};
+
+/* For the Abit uGuru, we need to keep some data in memory.
+   The structure is dynamically allocated, at the same time when a new
+   abituguru3 device is allocated. */
+struct abituguru3_data {
+	struct class_device *class_dev; /* hwmon registered device */
+	struct mutex update_lock;	/* protect access to data and uGuru */
+	unsigned short addr;		/* uguru base address */
+	char valid;			/* !=0 if following fields are valid */
+	unsigned long last_updated;	/* In jiffies */
+
+	/* For convenience the sysfs attr and their names are generated
+	   automatically. We have max 10 entries per sensor (for in sensors) */
+	struct sensor_device_attribute_2 sysfs_attr[ABIT_UGURU3_MAX_NO_SENSORS
+		* 10];
+
+	/* Buffer to store the dynamically generated sysfs names */
+	char sysfs_names[ABIT_UGURU3_SYSFS_NAMES_LENGTH];
+
+	/* Pointer to the sensors info for the detected motherboard */
+	const struct abituguru3_sensor_info *sensors;
+
+	/* The abituguru3 supports upto 48 sensors, and thus has registers
+	   sets for 48 sensors, for convienence reasons / simplicity of the
+	   code we always read and store all registers for all 48 sensors */
+
+	/* Alarms for all 48 sensors (1 bit per sensor) */
+	u8 alarms[48/8];
+
+	/* Value of all 48 sensors */
+	u8 value[48];
+
+	/* Settings of all 48 sensors, note in and temp sensors (the first 32
+	   sensors) have 3 bytes of settings, while fans only have 2 bytes,
+	   for convenience we use 3 bytes for all sensors */
+	u8 settings[48][3];
+};
+
+
+/* Constants */
+static const struct abituguru3_motherboard_info abituguru3_motherboards[] = {
+	{ 0x000C, "unknown", {
+		{ "CPU Core",		 0, 0, 10, 1, 0 },
+		{ "DDR",		 1, 0, 10, 1, 0 },
+		{ "DDR VTT",		 2, 0, 10, 1, 0 },
+		{ "CPU VTT 1.2V",	 3, 0, 10, 1, 0 },
+		{ "MCH & PCIE 1.5V",	 4, 0, 10, 1, 0 },
+		{ "MCH 2.5V",		 5, 0, 20, 1, 0 },
+		{ "ICH 1.05V",		 6, 0, 10, 1, 0 },
+		{ "ATX +12V (24-Pin)",	 7, 0, 60, 1, 0 },
+		{ "ATX +12V (4-pin)",	 8, 0, 60, 1, 0 },
+		{ "ATX +5V",		 9, 0, 30, 1, 0 },
+		{ "+3.3V",		10, 0, 20, 1, 0 },
+		{ "5VSB",		11, 0, 30, 1, 0 },
+		{ "CPU",		24, 1, 1, 1, 0 },
+		{ "System ",		25, 1, 1, 1, 0 },
+		{ "PWM",		26, 1, 1, 1, 0 },
+		{ "CPU Fan",		32, 2, 60, 1, 0 },
+		{ "NB Fan",		33, 2, 60, 1, 0 },
+		{ "SYS FAN",		34, 2, 60, 1, 0 },
+		{ "AUX1 Fan",		35, 2, 60, 1, 0 },
+		{ NULL, 0, 0, 0, 0, 0 } }
+	},
+	{ 0x000D, "Abit AW8", {
+		{ "CPU Core",		 0, 0, 10, 1, 0 },
+		{ "DDR",		 1, 0, 10, 1, 0 },
+		{ "DDR VTT",		 2, 0, 10, 1, 0 },
+		{ "CPU VTT 1.2V",	 3, 0, 10, 1, 0 },
+		{ "MCH & PCIE 1.5V",	 4, 0, 10, 1, 0 },
+		{ "MCH 2.5V",		 5, 0, 20, 1, 0 },
+		{ "ICH 1.05V",		 6, 0, 10, 1, 0 },
+		{ "ATX +12V (24-Pin)",	 7, 0, 60, 1, 0 },
+		{ "ATX +12V (4-pin)",	 8, 0, 60, 1, 0 },
+		{ "ATX +5V",		 9, 0, 30, 1, 0 },
+		{ "+3.3V",		10, 0, 20, 1, 0 },
+		{ "5VSB",		11, 0, 30, 1, 0 },
+		{ "CPU",		24, 1, 1, 1, 0 },
+		{ "System ",		25, 1, 1, 1, 0 },
+		{ "PWM1",		26, 1, 1, 1, 0 },
+		{ "PWM2",		27, 1, 1, 1, 0 },
+		{ "PWM3",		28, 1, 1, 1, 0 },
+		{ "PWM4",		29, 1, 1, 1, 0 },
+		{ "CPU Fan",		32, 2, 60, 1, 0 },
+		{ "NB Fan",		33, 2, 60, 1, 0 },
+		{ "SYS Fan",		34, 2, 60, 1, 0 },
+		{ "AUX1 Fan",		35, 2, 60, 1, 0 },
+		{ "AUX2 Fan",		36, 2, 60, 1, 0 },
+		{ "AUX3 Fan",		37, 2, 60, 1, 0 },
+		{ "AUX4 Fan",		38, 2, 60, 1, 0 },
+		{ "AUX5 Fan",		39, 2, 60, 1, 0 },
+		{ NULL, 0, 0, 0, 0, 0 } }
+	},
+	{ 0x000E, "AL-8", {
+		{ "CPU Core",		 0, 0, 10, 1, 0 },
+		{ "DDR",		 1, 0, 10, 1, 0 },
+		{ "DDR VTT",		 2, 0, 10, 1, 0 },
+		{ "CPU VTT 1.2V",	 3, 0, 10, 1, 0 },
+		{ "MCH & PCIE 1.5V",	 4, 0, 10, 1, 0 },
+		{ "MCH 2.5V",		 5, 0, 20, 1, 0 },
+		{ "ICH 1.05V",		 6, 0, 10, 1, 0 },
+		{ "ATX +12V (24-Pin)",	 7, 0, 60, 1, 0 },
+		{ "ATX +12V (4-pin)",	 8, 0, 60, 1, 0 },
+		{ "ATX +5V",		 9, 0, 30, 1, 0 },
+		{ "+3.3V",		10, 0, 20, 1, 0 },
+		{ "5VSB",		11, 0, 30, 1, 0 },
+		{ "CPU",		24, 1, 1, 1, 0 },
+		{ "System ",		25, 1, 1, 1, 0 },
+		{ "PWM",		26, 1, 1, 1, 0 },
+		{ "CPU Fan",		32, 2, 60, 1, 0 },
+		{ "NB Fan",		33, 2, 60, 1, 0 },
+		{ "SYS Fan",		34, 2, 60, 1, 0 },
+		{ NULL, 0, 0, 0, 0, 0 } }
+	},
+	{ 0x000F, "unknown", {
+		{ "CPU Core",		 0, 0, 10, 1, 0 },
+		{ "DDR",		 1, 0, 10, 1, 0 },
+		{ "DDR VTT",		 2, 0, 10, 1, 0 },
+		{ "CPU VTT 1.2V",	 3, 0, 10, 1, 0 },
+		{ "MCH & PCIE 1.5V",	 4, 0, 10, 1, 0 },
+		{ "MCH 2.5V",		 5, 0, 20, 1, 0 },
+		{ "ICH 1.05V",		 6, 0, 10, 1, 0 },
+		{ "ATX +12V (24-Pin)",	 7, 0, 60, 1, 0 },
+		{ "ATX +12V (4-pin)",	 8, 0, 60, 1, 0 },
+		{ "ATX +5V",		 9, 0, 30, 1, 0 },
+		{ "+3.3V",		10, 0, 20, 1, 0 },
+		{ "5VSB",		11, 0, 30, 1, 0 },
+		{ "CPU",		24, 1, 1, 1, 0 },
+		{ "System ",		25, 1, 1, 1, 0 },
+		{ "PWM",		26, 1, 1, 1, 0 },
+		{ "CPU Fan",		32, 2, 60, 1, 0 },
+		{ "NB Fan",		33, 2, 60, 1, 0 },
+		{ "SYS Fan",		34, 2, 60, 1, 0 },
+		{ NULL, 0, 0, 0, 0, 0 } }
+	},
+	{ 0x0010, "Abit NI8 SLI GR", {
+		{ "CPU Core",		 0, 0, 10, 1, 0 },
+		{ "DDR",		 1, 0, 10, 1, 0 },
+		{ "DDR VTT",		 2, 0, 10, 1, 0 },
+		{ "CPU VTT 1.2V",	 3, 0, 10, 1, 0 },
+		{ "NB 1.4V",		 4, 0, 10, 1, 0 },
+		{ "SB 1.5V",		 6, 0, 10, 1, 0 },
+		{ "ATX +12V (24-Pin)",	 7, 0, 60, 1, 0 },
+		{ "ATX +12V (4-pin)",	 8, 0, 60, 1, 0 },
+		{ "ATX +5V",		 9, 0, 30, 1, 0 },
+		{ "+3.3V",		10, 0, 20, 1, 0 },
+		{ "5VSB",		11, 0, 30, 1, 0 },
+		{ "CPU",		24, 1, 1, 1, 0 },
+		{ "SYS",		25, 1, 1, 1, 0 },
+		{ "PWM",		26, 1, 1, 1, 0 },
+		{ "CPU Fan",		32, 2, 60, 1, 0 },
+		{ "NB Fan",		33, 2, 60, 1, 0 },
+		{ "SYS Fan",		34, 2, 60, 1, 0 },
+		{ "AUX1 Fan",		35, 2, 60, 1, 0 },
+		{ "OTES1 Fan",		36, 2, 60, 1, 0 },
+		{ NULL, 0, 0, 0, 0, 0 } }
+	},
+	{ 0x0011, "Abit AT8 32X", {
+		{ "CPU Core",		 0, 0, 10, 1, 0 },
+		{ "DDR",		 1, 0, 20, 1, 0 },
+		{ "DDR VTT",		 2, 0, 10, 1, 0 },
+		{ "CPU VDDA 2.5V",	 6, 0, 20, 1, 0 },
+		{ "NB 1.8V",		 4, 0, 10, 1, 0 },
+		{ "NB 1.8V Dual",	 5, 0, 10, 1, 0 },
+		{ "HTV 1.2",		 3, 0, 10, 1, 0 },
+		{ "PCIE 1.2V",		12, 0, 10, 1, 0 },
+		{ "NB 1.2V",		13, 0, 10, 1, 0 },
+		{ "ATX +12V (24-Pin)",	 7, 0, 60, 1, 0 },
+		{ "ATX +12V (4-pin)",	 8, 0, 60, 1, 0 },
+		{ "ATX +5V",		 9, 0, 30, 1, 0 },
+		{ "+3.3V",		10, 0, 20, 1, 0 },
+		{ "5VSB",		11, 0, 30, 1, 0 },
+		{ "CPU",		24, 1, 1, 1, 0 },
+		{ "NB",			25, 1, 1, 1, 0 },
+		{ "System",		26, 1, 1, 1, 0 },
+		{ "PWM",		27, 1, 1, 1, 0 },
+		{ "CPU Fan",		32, 2, 60, 1, 0 },
+		{ "NB Fan",		33, 2, 60, 1, 0 },
+		{ "SYS Fan",		34, 2, 60, 1, 0 },
+		{ "AUX1 Fan",		35, 2, 60, 1, 0 },
+		{ "AUX2 Fan",		36, 2, 60, 1, 0 },
+		{ NULL, 0, 0, 0, 0, 0 } }
+	},
+	{ 0x0012, "Abit AN8 32X", {
+		{ "CPU Core",		 0, 0, 10, 1, 0 },
+		{ "DDR",		 1, 0, 20, 1, 0 },
+		{ "DDR VTT",		 2, 0, 10, 1, 0 },
+		{ "HyperTransport",	 3, 0, 10, 1, 0 },
+		{ "CPU VDDA 2.5V",	 5, 0, 20, 1, 0 },
+		{ "NB",			 4, 0, 10, 1, 0 },
+		{ "SB",			 6, 0, 10, 1, 0 },
+		{ "ATX +12V (24-Pin)",	 7, 0, 60, 1, 0 },
+		{ "ATX +12V (4-pin)",	 8, 0, 60, 1, 0 },
+		{ "ATX +5V",		 9, 0, 30, 1, 0 },
+		{ "+3.3V",		10, 0, 20, 1, 0 },
+		{ "5VSB",		11, 0, 30, 1, 0 },
+		{ "CPU",		24, 1, 1, 1, 0 },
+		{ "SYS",		25, 1, 1, 1, 0 },
+		{ "PWM",		26, 1, 1, 1, 0 },
+		{ "CPU Fan",		32, 2, 60, 1, 0 },
+		{ "NB Fan",		33, 2, 60, 1, 0 },
+		{ "SYS Fan",		34, 2, 60, 1, 0 },
+		{ "AUX1 Fan",		36, 2, 60, 1, 0 },
+		{ NULL, 0, 0, 0, 0, 0 } }
+	},
+	{ 0x0013, "unknown", {
+		{ "CPU Core",		 0, 0, 10, 1, 0 },
+		{ "DDR",		 1, 0, 10, 1, 0 },
+		{ "DDR VTT",		 2, 0, 10, 1, 0 },
+		{ "CPU VTT 1.2V",	 3, 0, 10, 1, 0 },
+		{ "MCH & PCIE 1.5V",	 4, 0, 10, 1, 0 },
+		{ "MCH 2.5V",		 5, 0, 20, 1, 0 },
+		{ "ICH 1.05V",		 6, 0, 10, 1, 0 },
+		{ "ATX +12V (24-Pin)",	 7, 0, 60, 1, 0 },
+		{ "ATX +12V (4-pin)",	 8, 0, 60, 1, 0 },
+		{ "ATX +5V",		 9, 0, 30, 1, 0 },
+		{ "+3.3V",		10, 0, 20, 1, 0 },
+		{ "5VSB",		11, 0, 30, 1, 0 },
+		{ "CPU",		24, 1, 1, 1, 0 },
+		{ "System ",		25, 1, 1, 1, 0 },
+		{ "PWM1",		26, 1, 1, 1, 0 },
+		{ "PWM2",		27, 1, 1, 1, 0 },
+		{ "PWM3",		28, 1, 1, 1, 0 },
+		{ "PWM4",		29, 1, 1, 1, 0 },
+		{ "CPU Fan",		32, 2, 60, 1, 0 },
+		{ "NB Fan",		33, 2, 60, 1, 0 },
+		{ "SYS Fan",		34, 2, 60, 1, 0 },
+		{ "AUX1 Fan",		35, 2, 60, 1, 0 },
+		{ "AUX2 Fan",		36, 2, 60, 1, 0 },
+		{ "AUX3 Fan",		37, 2, 60, 1, 0 },
+		{ "AUX4 Fan",		38, 2, 60, 1, 0 },
+		{ NULL, 0, 0, 0, 0, 0 } }
+	},
+	{ 0x0014, "Abit AB9 Pro", {
+		{ "CPU Core",		 0, 0, 10, 1, 0 },
+		{ "DDR",		 1, 0, 10, 1, 0 },
+		{ "DDR VTT",		 2, 0, 10, 1, 0 },
+		{ "CPU VTT 1.2V",	 3, 0, 10, 1, 0 },
+		{ "MCH & PCIE 1.5V",	 4, 0, 10, 1, 0 },
+		{ "MCH 2.5V",		 5, 0, 20, 1, 0 },
+		{ "ICH 1.05V",		 6, 0, 10, 1, 0 },
+		{ "ATX +12V (24-Pin)",	 7, 0, 60, 1, 0 },
+		{ "ATX +12V (4-pin)",	 8, 0, 60, 1, 0 },
+		{ "ATX +5V",		 9, 0, 30, 1, 0 },
+		{ "+3.3V",		10, 0, 20, 1, 0 },
+		{ "5VSB",		11, 0, 30, 1, 0 },
+		{ "CPU",		24, 1, 1, 1, 0 },
+		{ "System ",		25, 1, 1, 1, 0 },
+		{ "PWM",		26, 1, 1, 1, 0 },
+		{ "CPU Fan",		32, 2, 60, 1, 0 },
+		{ "NB Fan",		33, 2, 60, 1, 0 },
+		{ "SYS Fan",		34, 2, 60, 1, 0 },
+		{ NULL, 0, 0, 0, 0, 0 } }
+	},
+	{ 0x0015, "unknown", {
+		{ "CPU Core",		 0, 0, 10, 1, 0 },
+		{ "DDR",		 1, 0, 20, 1, 0 },
+		{ "DDR VTT",		 2, 0, 10, 1, 0 },
+		{ "HyperTransport",	 3, 0, 10, 1, 0 },
+		{ "CPU VDDA 2.5V",	 5, 0, 20, 1, 0 },
+		{ "NB",			 4, 0, 10, 1, 0 },
+		{ "SB",			 6, 0, 10, 1, 0 },
+		{ "ATX +12V (24-Pin)",	 7, 0, 60, 1, 0 },
+		{ "ATX +12V (4-pin)",	 8, 0, 60, 1, 0 },
+		{ "ATX +5V",		 9, 0, 30, 1, 0 },
+		{ "+3.3V",		10, 0, 20, 1, 0 },
+		{ "5VSB",		11, 0, 30, 1, 0 },
+		{ "CPU",		24, 1, 1, 1, 0 },
+		{ "SYS",		25, 1, 1, 1, 0 },
+		{ "PWM",		26, 1, 1, 1, 0 },
+		{ "CPU Fan",		32, 2, 60, 1, 0 },
+		{ "NB Fan",		33, 2, 60, 1, 0 },
+		{ "SYS Fan",		34, 2, 60, 1, 0 },
+		{ "AUX1 Fan",		33, 2, 60, 1, 0 },
+		{ "AUX2 Fan",		35, 2, 60, 1, 0 },
+		{ "AUX3 Fan",		36, 2, 60, 1, 0 },
+		{ NULL, 0, 0, 0, 0, 0 } }
+	},
+	{ 0x0016, "AW9D-MAX", {
+		{ "CPU Core",		 0, 0, 10, 1, 0 },
+		{ "DDR2",		 1, 0, 20, 1, 0 },
+		{ "DDR2 VTT",		 2, 0, 10, 1, 0 },
+		{ "CPU VTT 1.2V",	 3, 0, 10, 1, 0 },
+		{ "MCH & PCIE 1.5V",	 4, 0, 10, 1, 0 },
+		{ "MCH 2.5V",		 5, 0, 20, 1, 0 },
+		{ "ICH 1.05V",		 6, 0, 10, 1, 0 },
+		{ "ATX +12V (24-Pin)",	 7, 0, 60, 1, 0 },
+		{ "ATX +12V (4-pin)",	 8, 0, 60, 1, 0 },
+		{ "ATX +5V",		 9, 0, 30, 1, 0 },
+		{ "+3.3V",		10, 0, 20, 1, 0 },
+		{ "5VSB",		11, 0, 30, 1, 0 },
+		{ "CPU",		24, 1, 1, 1, 0 },
+		{ "System ",		25, 1, 1, 1, 0 },
+		{ "PWM1",		26, 1, 1, 1, 0 },
+		{ "PWM2",		27, 1, 1, 1, 0 },
+		{ "PWM3",		28, 1, 1, 1, 0 },
+		{ "PWM4",		29, 1, 1, 1, 0 },
+		{ "CPU Fan",		32, 2, 60, 1, 0 },
+		{ "NB Fan",		33, 2, 60, 1, 0 },
+		{ "SYS Fan",		34, 2, 60, 1, 0 },
+		{ "AUX1 Fan",		35, 2, 60, 1, 0 },
+		{ "AUX2 Fan",		36, 2, 60, 1, 0 },
+		{ "AUX3 Fan",		37, 2, 60, 1, 0 },
+		{ "OTES1 Fan",		38, 2, 60, 1, 0 },
+		{ NULL, 0, 0, 0, 0, 0 } }
+	},
+	{ 0x0017, "unknown", {
+		{ "CPU Core",		 0, 0, 10, 1, 0 },
+		{ "DDR2",		 1, 0, 20, 1, 0 },
+		{ "DDR2 VTT",		 2, 0, 10, 1, 0 },
+		{ "HyperTransport",	 3, 0, 10, 1, 0 },
+		{ "CPU VDDA 2.5V",	 6, 0, 20, 1, 0 },
+		{ "NB 1.8V",		 4, 0, 10, 1, 0 },
+		{ "NB 1.2V ",		13, 0, 10, 1, 0 },
+		{ "SB 1.2V",		 5, 0, 10, 1, 0 },
+		{ "PCIE 1.2V",		12, 0, 10, 1, 0 },
+		{ "ATX +12V (24-Pin)",	 7, 0, 60, 1, 0 },
+		{ "ATX +12V (4-pin)",	 8, 0, 60, 1, 0 },
+		{ "ATX +5V",		 9, 0, 30, 1, 0 },
+		{ "ATX +3.3V",		10, 0, 20, 1, 0 },
+		{ "ATX 5VSB",		11, 0, 30, 1, 0 },
+		{ "CPU",		24, 1, 1, 1, 0 },
+		{ "System ",		26, 1, 1, 1, 0 },
+		{ "PWM",		27, 1, 1, 1, 0 },
+		{ "CPU FAN",		32, 2, 60, 1, 0 },
+		{ "SYS FAN",		34, 2, 60, 1, 0 },
+		{ "AUX1 FAN",		35, 2, 60, 1, 0 },
+		{ "AUX2 FAN",		36, 2, 60, 1, 0 },
+		{ "AUX3 FAN",		37, 2, 60, 1, 0 },
+		{ NULL, 0, 0, 0, 0, 0 } }
+	},
+	{ 0x0018, "unknown", {
+		{ "CPU Core",		 0, 0, 10, 1, 0 },
+		{ "DDR2",		 1, 0, 20, 1, 0 },
+		{ "DDR2 VTT",		 2, 0, 10, 1, 0 },
+		{ "CPU VTT",		 3, 0, 10, 1, 0 },
+		{ "MCH 1.25V",		 4, 0, 10, 1, 0 },
+		{ "ICHIO 1.5V",		 5, 0, 10, 1, 0 },
+		{ "ICH 1.05V",		 6, 0, 10, 1, 0 },
+		{ "ATX +12V (24-Pin)",	 7, 0, 60, 1, 0 },
+		{ "ATX +12V (4-pin)",	 8, 0, 60, 1, 0 },
+		{ "ATX +5V",		 9, 0, 30, 1, 0 },
+		{ "+3.3V",		10, 0, 20, 1, 0 },
+		{ "5VSB",		11, 0, 30, 1, 0 },
+		{ "CPU",		24, 1, 1, 1, 0 },
+		{ "System ",		25, 1, 1, 1, 0 },
+		{ "PWM Phase1",		26, 1, 1, 1, 0 },
+		{ "PWM Phase2",		27, 1, 1, 1, 0 },
+		{ "PWM Phase3",		28, 1, 1, 1, 0 },
+		{ "PWM Phase4",		29, 1, 1, 1, 0 },
+		{ "PWM Phase5",		30, 1, 1, 1, 0 },
+		{ "CPU Fan",		32, 2, 60, 1, 0 },
+		{ "SYS Fan",		34, 2, 60, 1, 0 },
+		{ "AUX1 Fan",		33, 2, 60, 1, 0 },
+		{ "AUX2 Fan",		35, 2, 60, 1, 0 },
+		{ "AUX3 Fan",		36, 2, 60, 1, 0 },
+		{ NULL, 0, 0, 0, 0, 0 } }
+	},
+	{ 0x0019, "unknown", {
+		{ "CPU Core",		 7, 0, 10, 1, 0 },
+		{ "DDR2",		13, 0, 20, 1, 0 },
+		{ "DDR2 VTT",		14, 0, 10, 1, 0 },
+		{ "CPU VTT",		 3, 0, 20, 1, 0 },
+		{ "NB 1.2V ",		 4, 0, 10, 1, 0 },
+		{ "SB 1.5V",		 6, 0, 10, 1, 0 },
+		{ "HyperTransport",	 5, 0, 10, 1, 0 },
+		{ "ATX +12V (24-Pin)",	12, 0, 60, 1, 0 },
+		{ "ATX +12V (4-pin)",	 8, 0, 60, 1, 0 },
+		{ "ATX +5V",		 9, 0, 30, 1, 0 },
+		{ "ATX +3.3V",		10, 0, 20, 1, 0 },
+		{ "ATX 5VSB",		11, 0, 30, 1, 0 },
+		{ "CPU",		24, 1, 1, 1, 0 },
+		{ "System ",		25, 1, 1, 1, 0 },
+		{ "PWM Phase1",		26, 1, 1, 1, 0 },
+		{ "PWM Phase2",		27, 1, 1, 1, 0 },
+		{ "PWM Phase3",		28, 1, 1, 1, 0 },
+		{ "PWM Phase4",		29, 1, 1, 1, 0 },
+		{ "PWM Phase5",		30, 1, 1, 1, 0 },
+		{ "CPU FAN",		32, 2, 60, 1, 0 },
+		{ "SYS FAN",		34, 2, 60, 1, 0 },
+		{ "AUX1 FAN",		33, 2, 60, 1, 0 },
+		{ "AUX2 FAN",		35, 2, 60, 1, 0 },
+		{ "AUX3 FAN",		36, 2, 60, 1, 0 },
+		{ NULL, 0, 0, 0, 0, 0 } }
+	},
+	{ 0x001A, "unknown", {
+		{ "CPU Core",		 0, 0, 10, 1, 0 },
+		{ "DDR2",		 1, 0, 20, 1, 0 },
+		{ "DDR2 VTT",		 2, 0, 10, 1, 0 },
+		{ "CPU VTT 1.2V",	 3, 0, 10, 1, 0 },
+		{ "MCH 1.25V",		 4, 0, 10, 1, 0 },
+		{ "ICHIO 1.5V",		 5, 0, 10, 1, 0 },
+		{ "ICH 1.05V",		 6, 0, 10, 1, 0 },
+		{ "ATX +12V (24-Pin)",	 7, 0, 60, 1, 0 },
+		{ "ATX +12V (8-pin)",	 8, 0, 60, 1, 0 },
+		{ "ATX +5V",		 9, 0, 30, 1, 0 },
+		{ "+3.3V",		10, 0, 20, 1, 0 },
+		{ "5VSB",		11, 0, 30, 1, 0 },
+		{ "CPU",		24, 1, 1, 1, 0 },
+		{ "System ",		25, 1, 1, 1, 0 },
+		{ "PWM ",		26, 1, 1, 1, 0 },
+		{ "PWM Phase2",		27, 1, 1, 1, 0 },
+		{ "PWM Phase3",		28, 1, 1, 1, 0 },
+		{ "PWM Phase4",		29, 1, 1, 1, 0 },
+		{ "PWM Phase5",		30, 1, 1, 1, 0 },
+		{ "CPU Fan",		32, 2, 60, 1, 0 },
+		{ "SYS Fan",		34, 2, 60, 1, 0 },
+		{ "AUX1 Fan",		33, 2, 60, 1, 0 },
+		{ "AUX2 Fan",		35, 2, 60, 1, 0 },
+		{ "AUX3 Fan",		36, 2, 60, 1, 0 },
+		{ NULL, 0, 0, 0, 0, 0 } }
+	},
+	{ 0x0000, NULL, { { NULL, 0, 0, 0, 0, 0 } } }
+};
+
+
+/* Insmod parameters */
+static int force;
+module_param(force, bool, 0);
+MODULE_PARM_DESC(force, "Set to one to force detection.");
+/* Default verbose is 1, since this driver is still in the testing phase */
+static int verbose = 1;
+module_param(verbose, bool, 0644);
+MODULE_PARM_DESC(verbose, "Enable/disable verbose error reporting");
+
+
+/* wait while the uguru is busy (usually after a write) */
+static int abituguru3_wait_while_busy(struct abituguru3_data *data)
+{
+	u8 x;
+	int timeout = ABIT_UGURU3_WAIT_TIMEOUT;
+
+	while ((x = inb_p(data->addr + ABIT_UGURU3_DATA)) &
+			ABIT_UGURU3_STATUS_BUSY) {
+		timeout--;
+		if (timeout == 0)
+			return x;
+		/* sleep a bit before our last try, to give the uGuru3 one
+		   last chance to respond. */
+		if (timeout == 1)
+			msleep(1);
+	}
+	return ABIT_UGURU3_SUCCESS;
+}
+
+/* wait till uguru is ready to be read */
+static int abituguru3_wait_for_read(struct abituguru3_data *data)
+{
+	u8 x;
+	int timeout = ABIT_UGURU3_WAIT_TIMEOUT;
+
+	while (!((x = inb_p(data->addr + ABIT_UGURU3_DATA)) &
+			ABIT_UGURU3_STATUS_READY_FOR_READ)) {
+		timeout--;
+		if (timeout == 0)
+			return x;
+		/* sleep a bit before our last try, to give the uGuru3 one
+		   last chance to respond. */
+		if (timeout == 1)
+			msleep(1);
+	}
+	return ABIT_UGURU3_SUCCESS;
+}
+
+/* This synchronizes us with the uGuru3's protocol state machine, this
+   must be done before each command. */
+static int abituguru3_synchronize(struct abituguru3_data *data)
+{
+	int x, timeout = ABIT_UGURU3_SYNCHRONIZE_TIMEOUT;
+
+	if ((x = abituguru3_wait_while_busy(data)) != ABIT_UGURU3_SUCCESS) {
+		ABIT_UGURU3_DEBUG("synchronize timeout during initial busy "
+			"wait, status: 0x%02x\n", x);
+		return -EIO;
+	}
+
+	outb(0x20, data->addr + ABIT_UGURU3_DATA);
+	if ((x = abituguru3_wait_while_busy(data)) != ABIT_UGURU3_SUCCESS) {
+		ABIT_UGURU3_DEBUG("synchronize timeout after sending 0x20, "
+			"status: 0x%02x\n", x);
+		return -EIO;
+	}
+
+	outb(0x10, data->addr + ABIT_UGURU3_CMD);
+	if ((x = abituguru3_wait_while_busy(data)) != ABIT_UGURU3_SUCCESS) {
+		ABIT_UGURU3_DEBUG("synchronize timeout after sending 0x10, "
+			"status: 0x%02x\n", x);
+		return -EIO;
+	}
+
+	outb(0x00, data->addr + ABIT_UGURU3_CMD);
+	if ((x = abituguru3_wait_while_busy(data)) != ABIT_UGURU3_SUCCESS) {
+		ABIT_UGURU3_DEBUG("synchronize timeout after sending 0x00, "
+			"status: 0x%02x\n", x);
+		return -EIO;
+	}
+
+	if ((x = abituguru3_wait_for_read(data)) != ABIT_UGURU3_SUCCESS) {
+		ABIT_UGURU3_DEBUG("synchronize timeout waiting for read, "
+			"status: 0x%02x\n", x);
+		return -EIO;
+	}
+
+	while ((x = inb(data->addr + ABIT_UGURU3_CMD)) != 0xAC) {
+		timeout--;
+		if (timeout == 0) {
+			ABIT_UGURU3_DEBUG("synchronize timeout cmd does not "
+				"hold 0xAC after synchronize, cmd: 0x%02x\n",
+				x);
+			return -EIO;
+		}
+		msleep(1);
+	}
+	return 0;
+}
+
+/* Read count bytes from sensor sensor_addr in bank bank_addr and store the
+   result in buf */
+static int abituguru3_read(struct abituguru3_data *data, u8 bank, u8 offset,
+	u8 count, u8 *buf)
+{
+	int i, x;
+
+	if ((x = abituguru3_synchronize(data)))
+		return x;
+
+	outb(0x1A, data->addr + ABIT_UGURU3_DATA);
+	if ((x = abituguru3_wait_while_busy(data)) != ABIT_UGURU3_SUCCESS) {
+		ABIT_UGURU3_DEBUG("read from 0x%02x:0x%02x timed out after "
+			"sending 0x1A, status: 0x%02x\n", (unsigned int)bank,
+			(unsigned int)offset, x);
+		return -EIO;
+	}
+
+	outb(bank, data->addr + ABIT_UGURU3_CMD);
+	if ((x = abituguru3_wait_while_busy(data)) != ABIT_UGURU3_SUCCESS) {
+		ABIT_UGURU3_DEBUG("read from 0x%02x:0x%02x timed out after "
+			"sending the bank, status: 0x%02x\n",
+			(unsigned int)bank, (unsigned int)offset, x);
+		return -EIO;
+	}
+
+	outb(offset, data->addr + ABIT_UGURU3_CMD);
+	if ((x = abituguru3_wait_while_busy(data)) != ABIT_UGURU3_SUCCESS) {
+		ABIT_UGURU3_DEBUG("read from 0x%02x:0x%02x timed out after "
+			"sending the offset, status: 0x%02x\n",
+			(unsigned int)bank, (unsigned int)offset, x);
+		return -EIO;
+	}
+
+	outb(count, data->addr + ABIT_UGURU3_CMD);
+	if ((x = abituguru3_wait_while_busy(data)) != ABIT_UGURU3_SUCCESS) {
+		ABIT_UGURU3_DEBUG("read from 0x%02x:0x%02x timed out after "
+			"sending the count, status: 0x%02x\n",
+			(unsigned int)bank, (unsigned int)offset, x);
+		return -EIO;
+	}
+
+	for (i = 0; i < count; i++) {
+		if ((x = abituguru3_wait_for_read(data)) !=
+				ABIT_UGURU3_SUCCESS) {
+			ABIT_UGURU3_DEBUG("timeout reading byte %d from "
+				"0x%02x:0x%02x, status: 0x%02x\n", i,
+				(unsigned int)bank, (unsigned int)offset, x);
+			break;
+		}
+		buf[i] = inb(data->addr + ABIT_UGURU3_CMD);
+	}
+	return i;
+}
+
+/* Sensor settings are stored 1 byte per offset with the bytes
+   placed add consecutive offsets. */
+int abituguru3_read_increment_offset(struct abituguru3_data *data, u8 bank,
+	u8 offset, u8 count, u8 *buf, int offset_count)
+{
+	int i, x;
+
+	for (i = 0; i < offset_count; i++)
+		if ((x = abituguru3_read(data, bank, offset + i, count,
+				buf + i * count)) != count)
+			return i * count + (i && (x < 0)) ? 0 : x;
+
+	return i * count;
+}
+
+/* Following are the sysfs callback functions. These functions expect:
+   sensor_device_attribute_2->index:   index into the data->sensors array
+   sensor_device_attribute_2->nr:      register offset, bitmask or NA. */
+static struct abituguru3_data *abituguru3_update_device(struct device *dev);
+
+static ssize_t show_value(struct device *dev,
+	struct device_attribute *devattr, char *buf)
+{
+	int value;
+	struct sensor_device_attribute_2 *attr = to_sensor_dev_attr_2(devattr);
+	struct abituguru3_data *data = abituguru3_update_device(dev);
+	const struct abituguru3_sensor_info *sensor;
+
+	if (!data)
+		return -EIO;
+
+	sensor = &data->sensors[attr->index];
+
+	/* are we reading a setting, or is this a normal read? */
+	if (attr->nr)
+		value = data->settings[sensor->port][attr->nr];
+	else
+		value = data->value[sensor->port];
+
+	/* convert the value */
+	value = (value * sensor->multiplier) / sensor->divisor +
+		sensor->offset;
+
+	/* alternatively we could update the sensors settings struct for this,
+	   but then its contents would differ from the windows sw ini files */
+	if (sensor->type == ABIT_UGURU3_TEMP_SENSOR)
+		value *= 1000;
+
+	return sprintf(buf, "%d\n", value);
+}
+
+static ssize_t show_alarm(struct device *dev,
+	struct device_attribute *devattr, char *buf)
+{
+	int port;
+	struct sensor_device_attribute_2 *attr = to_sensor_dev_attr_2(devattr);
+	struct abituguru3_data *data = abituguru3_update_device(dev);
+
+	if (!data)
+		return -EIO;
+
+	port = data->sensors[attr->index].port;
+
+	/* See if the alarm bit for this sensor is set and if a bitmask is
+	   given in attr->nr also check if the alarm matches the type of alarm
+	   we're looking for (for volt it can be either low or high). The type
+	   is stored in a few readonly bits in the settings of the sensor. */
+	if ((data->alarms[port / 8] & (0x01 << (port % 8))) &&
+			(!attr->nr || (data->settings[port][0] & attr->nr)))
+		return sprintf(buf, "1\n");
+	else
+		return sprintf(buf, "0\n");
+}
+
+static ssize_t show_mask(struct device *dev,
+	struct device_attribute *devattr, char *buf)
+{
+	struct sensor_device_attribute_2 *attr = to_sensor_dev_attr_2(devattr);
+	struct abituguru3_data *data = dev_get_drvdata(dev);
+
+	if (data->settings[data->sensors[attr->index].port][0] & attr->nr)
+		return sprintf(buf, "1\n");
+	else
+		return sprintf(buf, "0\n");
+}
+
+static ssize_t show_label(struct device *dev,
+	struct device_attribute *devattr, char *buf)
+{
+	struct sensor_device_attribute_2 *attr = to_sensor_dev_attr_2(devattr);
+	struct abituguru3_data *data = dev_get_drvdata(dev);
+
+	return sprintf(buf, "%s\n", data->sensors[attr->index].name);
+}
+
+static ssize_t show_name(struct device *dev,
+	struct device_attribute *devattr, char *buf)
+{
+	return sprintf(buf, "%s\n", ABIT_UGURU3_NAME);
+}
+
+/* Sysfs attr templates, the real entries are generated automatically. */
+static const
+struct sensor_device_attribute_2 abituguru3_sysfs_templ[3][10] = { {
+	SENSOR_ATTR_2(in%d_input, 0444, show_value, NULL, 0, 0),
+	SENSOR_ATTR_2(in%d_min, 0444, show_value, NULL, 1, 0),
+	SENSOR_ATTR_2(in%d_max, 0444, show_value, NULL, 2, 0),
+	SENSOR_ATTR_2(in%d_min_alarm, 0444, show_alarm, NULL,
+		ABIT_UGURU3_VOLT_LOW_ALARM_FLAG, 0),
+	SENSOR_ATTR_2(in%d_max_alarm, 0444, show_alarm, NULL,
+		ABIT_UGURU3_VOLT_HIGH_ALARM_FLAG, 0),
+	SENSOR_ATTR_2(in%d_beep, 0444, show_mask, NULL,
+		ABIT_UGURU3_BEEP_ENABLE, 0),
+	SENSOR_ATTR_2(in%d_shutdown, 0444, show_mask, NULL,
+		ABIT_UGURU3_SHUTDOWN_ENABLE, 0),
+	SENSOR_ATTR_2(in%d_min_alarm_enable, 0444, show_mask, NULL,
+		ABIT_UGURU3_VOLT_LOW_ALARM_ENABLE, 0),
+	SENSOR_ATTR_2(in%d_max_alarm_enable, 0444, show_mask, NULL,
+		ABIT_UGURU3_VOLT_HIGH_ALARM_ENABLE, 0),
+	SENSOR_ATTR_2(in%d_label, 0444, show_label, NULL, 0, 0)
+	}, {
+	SENSOR_ATTR_2(temp%d_input, 0444, show_value, NULL, 0, 0),
+	SENSOR_ATTR_2(temp%d_max, 0444, show_value, NULL, 1, 0),
+	SENSOR_ATTR_2(temp%d_crit, 0444, show_value, NULL, 2, 0),
+	SENSOR_ATTR_2(temp%d_alarm, 0444, show_alarm, NULL, 0, 0),
+	SENSOR_ATTR_2(temp%d_beep, 0444, show_mask, NULL,
+		ABIT_UGURU3_BEEP_ENABLE, 0),
+	SENSOR_ATTR_2(temp%d_shutdown, 0444, show_mask, NULL,
+		ABIT_UGURU3_SHUTDOWN_ENABLE, 0),
+	SENSOR_ATTR_2(temp%d_alarm_enable, 0444, show_mask, NULL,
+		ABIT_UGURU3_TEMP_HIGH_ALARM_ENABLE, 0),
+	SENSOR_ATTR_2(temp%d_label, 0444, show_label, NULL, 0, 0)
+	}, {
+	SENSOR_ATTR_2(fan%d_input, 0444, show_value, NULL, 0, 0),
+	SENSOR_ATTR_2(fan%d_min, 0444, show_value, NULL, 1, 0),
+	SENSOR_ATTR_2(fan%d_alarm, 0444, show_alarm, NULL, 0, 0),
+	SENSOR_ATTR_2(fan%d_beep, 0444, show_mask, NULL,
+		ABIT_UGURU3_BEEP_ENABLE, 0),
+	SENSOR_ATTR_2(fan%d_shutdown, 0444, show_mask, NULL,
+		ABIT_UGURU3_SHUTDOWN_ENABLE, 0),
+	SENSOR_ATTR_2(fan%d_alarm_enable, 0444, show_mask, NULL,
+		ABIT_UGURU3_FAN_LOW_ALARM_ENABLE, 0),
+	SENSOR_ATTR_2(fan%d_label, 0444, show_label, NULL, 0, 0)
+} };
+
+static struct sensor_device_attribute_2 abituguru3_sysfs_attr[] = {
+	SENSOR_ATTR_2(name, 0444, show_name, NULL, 0, 0),
+};
+
+static int __devinit abituguru3_probe(struct platform_device *pdev)
+{
+	const int no_sysfs_attr[3] = { 10, 8, 7 };
+	int sensor_index[3] = { 0, 1, 1 };
+	struct abituguru3_data *data;
+	int i, j, type, used, sysfs_names_free, sysfs_attr_i, res = -ENODEV;
+	char *sysfs_filename;
+	u8 buf[2];
+	u16 id;
+
+	if (!(data = kzalloc(sizeof(struct abituguru3_data), GFP_KERNEL)))
+		return -ENOMEM;
+
+	data->addr = platform_get_resource(pdev, IORESOURCE_IO, 0)->start;
+	mutex_init(&data->update_lock);
+	platform_set_drvdata(pdev, data);
+
+	/* Read the motherboard ID */
+	if ((i = abituguru3_read(data, ABIT_UGURU3_MISC_BANK,
+			ABIT_UGURU3_BOARD_ID, 2, buf)) != 2) {
+		goto abituguru3_probe_error;
+	}
+
+	/* Completely read the uGuru to see if one really is there */
+	if (!abituguru3_update_device(&pdev->dev))
+		goto abituguru3_probe_error;
+
+	/* lookup the ID in our motherboard table */
+	id = ((u16)buf[0] << 8) | (u16)buf[1];
+	for (i = 0; abituguru3_motherboards[i].id; i++)
+		if (abituguru3_motherboards[i].id == id)
+			break;
+	if (!abituguru3_motherboards[i].id) {
+		printk(KERN_ERR ABIT_UGURU3_NAME ": error unknown motherboard "
+			"ID: %04X. Please report this to the abituguru3 "
+			"maintainer (see MAINTAINERS)\n", (unsigned int)id);
+		goto abituguru3_probe_error;
+	}
+	data->sensors = abituguru3_motherboards[i].sensors;
+	printk(KERN_INFO ABIT_UGURU3_NAME ": found Abit uGuru3, motherboard "
+		"ID: %04X (%s)\n", (unsigned int)id,
+		abituguru3_motherboards[i].name);
+
+	/* Fill the sysfs attr array */
+	sysfs_attr_i = 0;
+	sysfs_filename = data->sysfs_names;
+	sysfs_names_free = ABIT_UGURU3_SYSFS_NAMES_LENGTH;
+	for (i = 0; data->sensors[i].name; i++) {
+		/* Fail safe check, this should never happen! */
+		if (i >= ABIT_UGURU3_MAX_NO_SENSORS) {
+			printk(KERN_ERR ABIT_UGURU3_NAME
+				": Fatal error motherboard has more sensors "
+				"then ABIT_UGURU3_MAX_NO_SENSORS. This should "
+				"never happen please report to the abituguru3 "
+				"maintainer (see MAINTAINERS)\n");
+			res = -ENAMETOOLONG;
+			goto abituguru3_probe_error;
+		}
+		type = data->sensors[i].type;
+		for (j = 0; j < no_sysfs_attr[type]; j++) {
+			used = snprintf(sysfs_filename, sysfs_names_free,
+				abituguru3_sysfs_templ[type][j].dev_attr.attr.
+				name, sensor_index[type]) + 1;
+			data->sysfs_attr[sysfs_attr_i] =
+				abituguru3_sysfs_templ[type][j];
+			data->sysfs_attr[sysfs_attr_i].dev_attr.attr.name =
+				sysfs_filename;
+			data->sysfs_attr[sysfs_attr_i].index = i;
+			sysfs_filename += used;
+			sysfs_names_free -= used;
+			sysfs_attr_i++;
+		}
+		sensor_index[type]++;
+	}
+	/* Fail safe check, this should never happen! */
+	if (sysfs_names_free < 0) {
+		printk(KERN_ERR ABIT_UGURU3_NAME
+			": Fatal error ran out of space for sysfs attr names. "
+			"This should never happen please report to the "
+			"abituguru3 maintainer (see MAINTAINERS)\n");
+		res = -ENAMETOOLONG;
+		goto abituguru3_probe_error;
+	}
+
+	/* Register sysfs hooks */
+	for (i = 0; i < sysfs_attr_i; i++)
+		if (device_create_file(&pdev->dev,
+				&data->sysfs_attr[i].dev_attr))
+			goto abituguru3_probe_error;
+	for (i = 0; i < ARRAY_SIZE(abituguru3_sysfs_attr); i++)
+		if (device_create_file(&pdev->dev,
+				&abituguru3_sysfs_attr[i].dev_attr))
+			goto abituguru3_probe_error;
+
+	data->class_dev = hwmon_device_register(&pdev->dev);
+	if (IS_ERR(data->class_dev)) {
+		res = PTR_ERR(data->class_dev);
+		goto abituguru3_probe_error;
+	}
+
+	return 0; /* success */
+
+abituguru3_probe_error:
+	for (i = 0; data->sysfs_attr[i].dev_attr.attr.name; i++)
+		device_remove_file(&pdev->dev, &data->sysfs_attr[i].dev_attr);
+	for (i = 0; i < ARRAY_SIZE(abituguru3_sysfs_attr); i++)
+		device_remove_file(&pdev->dev,
+			&abituguru3_sysfs_attr[i].dev_attr);
+	kfree(data);
+	return res;
+}
+
+static int __devexit abituguru3_remove(struct platform_device *pdev)
+{
+	int i;
+	struct abituguru3_data *data = platform_get_drvdata(pdev);
+
+	platform_set_drvdata(pdev, NULL);
+	hwmon_device_unregister(data->class_dev);
+	for (i = 0; data->sysfs_attr[i].dev_attr.attr.name; i++)
+		device_remove_file(&pdev->dev, &data->sysfs_attr[i].dev_attr);
+	for (i = 0; i < ARRAY_SIZE(abituguru3_sysfs_attr); i++)
+		device_remove_file(&pdev->dev,
+			&abituguru3_sysfs_attr[i].dev_attr);
+	kfree(data);
+
+	return 0;
+}
+
+static struct abituguru3_data *abituguru3_update_device(struct device *dev)
+{
+	int i;
+	struct abituguru3_data *data = dev_get_drvdata(dev);
+
+	mutex_lock(&data->update_lock);
+	if (!data->valid || time_after(jiffies, data->last_updated + HZ)) {
+		/* Clear data->valid while updating */
+		data->valid = 0;
+		/* Read alarms */
+		if (abituguru3_read_increment_offset(data,
+				ABIT_UGURU3_SETTINGS_BANK,
+				ABIT_UGURU3_ALARMS_START,
+				1, data->alarms, 48/8) != (48/8))
+			goto LEAVE_UPDATE;
+		/* Read in and temp sensors (3 byte settings / sensor) */
+		for (i = 0; i < 32; i++) {
+			if (abituguru3_read(data, ABIT_UGURU3_SENSORS_BANK,
+					ABIT_UGURU3_VALUES_START + i,
+					1, &data->value[i]) != 1)
+				goto LEAVE_UPDATE;
+			if (abituguru3_read_increment_offset(data,
+					ABIT_UGURU3_SETTINGS_BANK,
+					ABIT_UGURU3_SETTINGS_START + i * 3,
+					1,
+					data->settings[i], 3) != 3)
+				goto LEAVE_UPDATE;
+		}
+		/* Read temp sensors (2 byte settings / sensor) */
+		for (i = 0; i < 16; i++) {
+			if (abituguru3_read(data, ABIT_UGURU3_SENSORS_BANK,
+					ABIT_UGURU3_VALUES_START + 32 + i,
+					1, &data->value[32 + i]) != 1)
+				goto LEAVE_UPDATE;
+			if (abituguru3_read_increment_offset(data,
+					ABIT_UGURU3_SETTINGS_BANK,
+					ABIT_UGURU3_SETTINGS_START + 32 * 3 +
+						i * 2, 1,
+					data->settings[32 + i], 2) != 2)
+				goto LEAVE_UPDATE;
+		}
+		data->last_updated = jiffies;
+		data->valid = 1;
+	}
+LEAVE_UPDATE:
+	mutex_unlock(&data->update_lock);
+	if (data->valid)
+		return data;
+	else
+		return NULL;
+}
+
+#ifdef CONFIG_PM
+static int abituguru3_suspend(struct platform_device *pdev, pm_message_t state)
+{
+	struct abituguru3_data *data = platform_get_drvdata(pdev);
+	/* make sure all communications with the uguru3 are done and no new
+	   ones are started */
+	mutex_lock(&data->update_lock);
+	return 0;
+}
+
+static int abituguru3_resume(struct platform_device *pdev)
+{
+	struct abituguru3_data *data = platform_get_drvdata(pdev);
+	mutex_unlock(&data->update_lock);
+	return 0;
+}
+#else
+#define abituguru3_suspend	NULL
+#define abituguru3_resume	NULL
+#endif /* CONFIG_PM */
+
+static struct platform_driver abituguru3_driver = {
+	.driver = {
+		.owner	= THIS_MODULE,
+		.name	= ABIT_UGURU3_NAME,
+	},
+	.probe	= abituguru3_probe,
+	.remove	= __devexit_p(abituguru3_remove),
+	.suspend = abituguru3_suspend,
+	.resume = abituguru3_resume
+};
+
+static int __init abituguru3_detect(void)
+{
+	/* See if there is an uguru3 there. An idle uGuru3 will hold 0x00 or
+	   0x08 at DATA and 0xAC at CMD. Sometimes the uGuru3 will hold 0x05
+	   at CMD instead, why is unknown. So we test for 0x05 too. */
+	u8 data_val = inb_p(ABIT_UGURU3_BASE + ABIT_UGURU3_DATA);
+	u8 cmd_val = inb_p(ABIT_UGURU3_BASE + ABIT_UGURU3_CMD);
+	if (((data_val == 0x00) || (data_val == 0x08)) &&
+			((cmd_val == 0xAC) || (cmd_val == 0x05)))
+		return ABIT_UGURU3_BASE;
+
+	ABIT_UGURU3_DEBUG("no Abit uGuru3 found, data = 0x%02X, cmd = "
+		"0x%02X\n", (unsigned int)data_val, (unsigned int)cmd_val);
+
+	if (force) {
+		printk(KERN_INFO ABIT_UGURU3_NAME ": Assuming Abit uGuru3 is "
+				"present because of \"force\" parameter\n");
+		return ABIT_UGURU3_BASE;
+	}
+
+	/* No uGuru3 found */
+	return -ENODEV;
+}
+
+static struct platform_device *abituguru3_pdev;
+
+static int __init abituguru3_init(void)
+{
+	int address, err;
+	struct resource res = { .flags = IORESOURCE_IO };
+
+	address = abituguru3_detect();
+	if (address < 0)
+		return address;
+
+	err = platform_driver_register(&abituguru3_driver);
+	if (err)
+		goto exit;
+
+	abituguru3_pdev = platform_device_alloc(ABIT_UGURU3_NAME, address);
+	if (!abituguru3_pdev) {
+		printk(KERN_ERR ABIT_UGURU3_NAME
+			": Device allocation failed\n");
+		err = -ENOMEM;
+		goto exit_driver_unregister;
+	}
+
+	res.start = address;
+	res.end = address + ABIT_UGURU3_REGION_LENGTH - 1;
+	res.name = ABIT_UGURU3_NAME;
+
+	err = platform_device_add_resources(abituguru3_pdev, &res, 1);
+	if (err) {
+		printk(KERN_ERR ABIT_UGURU3_NAME
+			": Device resource addition failed (%d)\n", err);
+		goto exit_device_put;
+	}
+
+	err = platform_device_add(abituguru3_pdev);
+	if (err) {
+		printk(KERN_ERR ABIT_UGURU3_NAME
+			": Device addition failed (%d)\n", err);
+		goto exit_device_put;
+	}
+
+	return 0;
+
+exit_device_put:
+	platform_device_put(abituguru3_pdev);
+exit_driver_unregister:
+	platform_driver_unregister(&abituguru3_driver);
+exit:
+	return err;
+}
+
+static void __exit abituguru3_exit(void)
+{
+	platform_device_unregister(abituguru3_pdev);
+	platform_driver_unregister(&abituguru3_driver);
+}
+
+MODULE_AUTHOR("Hans de Goede <j.w.r.degoede@hhs.nl>");
+MODULE_DESCRIPTION("Abit uGuru3 Sensor device");
+MODULE_LICENSE("GPL");
+
+module_init(abituguru3_init);
+module_exit(abituguru3_exit);
diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
index 6d54c8c..7c17952 100644
--- a/drivers/hwmon/coretemp.c
+++ b/drivers/hwmon/coretemp.c
@@ -318,7 +318,7 @@
 }
 
 #ifdef CONFIG_HOTPLUG_CPU
-void coretemp_device_remove(unsigned int cpu)
+static void coretemp_device_remove(unsigned int cpu)
 {
 	struct pdev_entry *p, *n;
 	mutex_lock(&pdev_list_mutex);
diff --git a/drivers/hwmon/dme1737.c b/drivers/hwmon/dme1737.c
new file mode 100644
index 0000000..be3aaa5
--- /dev/null
+++ b/drivers/hwmon/dme1737.c
@@ -0,0 +1,2080 @@
+/*
+ * dme1737.c - driver for the SMSC DME1737 and Asus A8000 Super-I/O chips
+ *             integrated hardware monitoring features.
+ * Copyright (c) 2007 Juerg Haefliger <juergh@gmail.com>
+ *
+ * This driver is based on the LM85 driver. The hardware monitoring
+ * capabilities of the DME1737 are very similar to the LM85 with some
+ * additional features. Even though the DME1737 is a Super-I/O chip, the
+ * hardware monitoring registers are only accessible via SMBus.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/jiffies.h>
+#include <linux/i2c.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/hwmon-vid.h>
+#include <linux/err.h>
+#include <linux/mutex.h>
+#include <asm/io.h>
+
+/* Module load parameters */
+static int force_start;
+module_param(force_start, bool, 0);
+MODULE_PARM_DESC(force_start, "Force the chip to start monitoring inputs");
+
+/* Addresses to scan */
+static unsigned short normal_i2c[] = {0x2c, 0x2d, 0x2e, I2C_CLIENT_END};
+
+/* Insmod parameters */
+I2C_CLIENT_INSMOD_1(dme1737);
+
+/* ---------------------------------------------------------------------
+ * Registers
+ *
+ * The sensors are defined as follows:
+ *
+ * Voltages                          Temperatures
+ * --------                          ------------
+ * in0   +5VTR (+5V stdby)           temp1   Remote diode 1
+ * in1   Vccp  (proc core)           temp2   Internal temp
+ * in2   VCC   (internal +3.3V)      temp3   Remote diode 2
+ * in3   +5V
+ * in4   +12V
+ * in5   VTR   (+3.3V stby)
+ * in6   Vbat
+ *
+ * --------------------------------------------------------------------- */
+
+/* Voltages (in) numbered 0-6 (ix) */
+#define	DME1737_REG_IN(ix)		((ix) < 5 ? 0x20 + (ix) \
+						  : 0x94 + (ix))
+#define	DME1737_REG_IN_MIN(ix)		((ix) < 5 ? 0x44 + (ix) * 2 \
+						  : 0x91 + (ix) * 2)
+#define	DME1737_REG_IN_MAX(ix)		((ix) < 5 ? 0x45 + (ix) * 2 \
+						  : 0x92 + (ix) * 2)
+
+/* Temperatures (temp) numbered 0-2 (ix) */
+#define DME1737_REG_TEMP(ix)		(0x25 + (ix))
+#define DME1737_REG_TEMP_MIN(ix)	(0x4e + (ix) * 2)
+#define DME1737_REG_TEMP_MAX(ix)	(0x4f + (ix) * 2)
+#define DME1737_REG_TEMP_OFFSET(ix)	((ix) == 0 ? 0x1f \
+						   : 0x1c + (ix))
+
+/* Voltage and temperature LSBs
+ * The LSBs (4 bits each) are stored in 5 registers with the following layouts:
+ *    IN_TEMP_LSB(0) = [in5, in6]
+ *    IN_TEMP_LSB(1) = [temp3, temp1]
+ *    IN_TEMP_LSB(2) = [in4, temp2]
+ *    IN_TEMP_LSB(3) = [in3, in0]
+ *    IN_TEMP_LSB(4) = [in2, in1] */
+#define DME1737_REG_IN_TEMP_LSB(ix)	(0x84 + (ix))
+static const u8 DME1737_REG_IN_LSB[] = {3, 4, 4, 3, 2, 0, 0};
+static const u8 DME1737_REG_IN_LSB_SHL[] = {4, 4, 0, 0, 0, 0, 4};
+static const u8 DME1737_REG_TEMP_LSB[] = {1, 2, 1};
+static const u8 DME1737_REG_TEMP_LSB_SHL[] = {4, 4, 0};
+
+/* Fans numbered 0-5 (ix) */
+#define DME1737_REG_FAN(ix)		((ix) < 4 ? 0x28 + (ix) * 2 \
+						  : 0xa1 + (ix) * 2)
+#define DME1737_REG_FAN_MIN(ix)		((ix) < 4 ? 0x54 + (ix) * 2 \
+						  : 0xa5 + (ix) * 2)
+#define DME1737_REG_FAN_OPT(ix)		((ix) < 4 ? 0x90 + (ix) \
+						  : 0xb2 + (ix))
+#define DME1737_REG_FAN_MAX(ix)		(0xb4 + (ix)) /* only for fan[4-5] */
+
+/* PWMs numbered 0-2, 4-5 (ix) */
+#define DME1737_REG_PWM(ix)		((ix) < 3 ? 0x30 + (ix) \
+						  : 0xa1 + (ix))
+#define DME1737_REG_PWM_CONFIG(ix)	(0x5c + (ix)) /* only for pwm[0-2] */
+#define DME1737_REG_PWM_MIN(ix)		(0x64 + (ix)) /* only for pwm[0-2] */
+#define DME1737_REG_PWM_FREQ(ix)	((ix) < 3 ? 0x5f + (ix) \
+						  : 0xa3 + (ix))
+/* The layout of the ramp rate registers is different from the other pwm
+ * registers. The bits for the 3 PWMs are stored in 2 registers:
+ *    PWM_RR(0) = [OFF3, OFF2,  OFF1,  RES,   RR1E, RR1-2, RR1-1, RR1-0]
+ *    PWM_RR(1) = [RR2E, RR2-2, RR2-1, RR2-0, RR3E, RR3-2, RR3-1, RR3-0] */
+#define DME1737_REG_PWM_RR(ix)		(0x62 + (ix)) /* only for pwm[0-2] */
+
+/* Thermal zones 0-2 */
+#define DME1737_REG_ZONE_LOW(ix)	(0x67 + (ix))
+#define DME1737_REG_ZONE_ABS(ix)	(0x6a + (ix))
+/* The layout of the hysteresis registers is different from the other zone
+ * registers. The bits for the 3 zones are stored in 2 registers:
+ *    ZONE_HYST(0) = [H1-3,  H1-2,  H1-1, H1-0, H2-3, H2-2, H2-1, H2-0]
+ *    ZONE_HYST(1) = [H3-3,  H3-2,  H3-1, H3-0, RES,  RES,  RES,  RES] */
+#define DME1737_REG_ZONE_HYST(ix)	(0x6d + (ix))
+
+/* Alarm registers and bit mapping
+ * The 3 8-bit alarm registers will be concatenated to a single 32-bit
+ * alarm value [0, ALARM3, ALARM2, ALARM1]. */
+#define DME1737_REG_ALARM1		0x41
+#define DME1737_REG_ALARM2		0x42
+#define DME1737_REG_ALARM3		0x83
+static const u8 DME1737_BIT_ALARM_IN[] = {0, 1, 2, 3, 8, 16, 17};
+static const u8 DME1737_BIT_ALARM_TEMP[] = {4, 5, 6};
+static const u8 DME1737_BIT_ALARM_FAN[] = {10, 11, 12, 13, 22, 23};
+
+/* Miscellaneous registers */
+#define DME1737_REG_COMPANY		0x3e
+#define DME1737_REG_VERSTEP		0x3f
+#define DME1737_REG_CONFIG		0x40
+#define DME1737_REG_CONFIG2		0x7f
+#define DME1737_REG_VID			0x43
+#define DME1737_REG_TACH_PWM		0x81
+
+/* ---------------------------------------------------------------------
+ * Misc defines
+ * --------------------------------------------------------------------- */
+
+/* Chip identification */
+#define DME1737_COMPANY_SMSC	0x5c
+#define DME1737_VERSTEP		0x88
+#define DME1737_VERSTEP_MASK	0xf8
+
+/* ---------------------------------------------------------------------
+ * Data structures and manipulation thereof
+ * --------------------------------------------------------------------- */
+
+struct dme1737_data {
+	struct i2c_client client;
+	struct class_device *class_dev;
+
+	struct mutex update_lock;
+	int valid;			/* !=0 if following fields are valid */
+	unsigned long last_update;	/* in jiffies */
+	unsigned long last_vbat;	/* in jiffies */
+
+	u8 vid;
+	u8 pwm_rr_en;
+	u8 has_pwm;
+	u8 has_fan;
+
+	/* Register values */
+	u16 in[7];
+	u8  in_min[7];
+	u8  in_max[7];
+	s16 temp[3];
+	s8  temp_min[3];
+	s8  temp_max[3];
+	s8  temp_offset[3];
+	u8  config;
+	u8  config2;
+	u8  vrm;
+	u16 fan[6];
+	u16 fan_min[6];
+	u8  fan_max[2];
+	u8  fan_opt[6];
+	u8  pwm[6];
+	u8  pwm_min[3];
+	u8  pwm_config[3];
+	u8  pwm_acz[3];
+	u8  pwm_freq[6];
+	u8  pwm_rr[2];
+	u8  zone_low[3];
+	u8  zone_abs[3];
+	u8  zone_hyst[2];
+	u32 alarms;
+};
+
+/* Nominal voltage values */
+static const int IN_NOMINAL[] = {5000, 2250, 3300, 5000, 12000, 3300, 3300};
+
+/* Voltage input
+ * Voltage inputs have 16 bits resolution, limit values have 8 bits
+ * resolution. */
+static inline int IN_FROM_REG(int reg, int ix, int res)
+{
+	return (reg * IN_NOMINAL[ix] + (3 << (res - 3))) / (3 << (res - 2));
+}
+
+static inline int IN_TO_REG(int val, int ix)
+{
+	return SENSORS_LIMIT((val * 192 + IN_NOMINAL[ix] / 2) /
+			     IN_NOMINAL[ix], 0, 255);
+}
+
+/* Temperature input
+ * The register values represent temperatures in 2's complement notation from
+ * -127 degrees C to +127 degrees C. Temp inputs have 16 bits resolution, limit
+ * values have 8 bits resolution. */
+static inline int TEMP_FROM_REG(int reg, int res)
+{
+	return (reg * 1000) >> (res - 8);
+}
+
+static inline int TEMP_TO_REG(int val)
+{
+	return SENSORS_LIMIT((val < 0 ? val - 500 : val + 500) / 1000,
+			     -128, 127);
+}
+
+/* Temperature range */
+static const int TEMP_RANGE[] = {2000, 2500, 3333, 4000, 5000, 6666, 8000,
+				 10000, 13333, 16000, 20000, 26666, 32000,
+				 40000, 53333, 80000};
+
+static inline int TEMP_RANGE_FROM_REG(int reg)
+{
+	return TEMP_RANGE[(reg >> 4) & 0x0f];
+}
+
+static int TEMP_RANGE_TO_REG(int val, int reg)
+{
+	int i;
+
+	for (i = 15; i > 0; i--) {
+		if (val > (TEMP_RANGE[i] + TEMP_RANGE[i - 1] + 1) / 2) {
+			break;
+		}
+	}
+
+	return (reg & 0x0f) | (i << 4);
+}
+
+/* Temperature hysteresis
+ * Register layout:
+ *    reg[0] = [H1-3, H1-2, H1-1, H1-0, H2-3, H2-2, H2-1, H2-0]
+ *    reg[1] = [H3-3, H3-2, H3-1, H3-0, xxxx, xxxx, xxxx, xxxx] */
+static inline int TEMP_HYST_FROM_REG(int reg, int ix)
+{
+	return (((ix == 1) ? reg : reg >> 4) & 0x0f) * 1000;
+}
+
+static inline int TEMP_HYST_TO_REG(int val, int ix, int reg)
+{
+	int hyst = SENSORS_LIMIT((val + 500) / 1000, 0, 15);
+
+	return (ix == 1) ? (reg & 0xf0) | hyst : (reg & 0x0f) | (hyst << 4);
+}
+
+/* Fan input RPM */
+static inline int FAN_FROM_REG(int reg, int tpc)
+{
+	return (reg == 0 || reg == 0xffff) ? 0 :
+		(tpc == 0) ? 90000 * 60 / reg : tpc * reg;
+}
+
+static inline int FAN_TO_REG(int val, int tpc)
+{
+	return SENSORS_LIMIT((tpc == 0) ? 90000 * 60 / val : val / tpc,
+			     0, 0xffff);
+}
+
+/* Fan TPC (tach pulse count)
+ * Converts a register value to a TPC multiplier or returns 0 if the tachometer
+ * is configured in legacy (non-tpc) mode */
+static inline int FAN_TPC_FROM_REG(int reg)
+{
+	return (reg & 0x20) ? 0 : 60 >> (reg & 0x03);
+}
+
+/* Fan type
+ * The type of a fan is expressed in number of pulses-per-revolution that it
+ * emits */
+static inline int FAN_TYPE_FROM_REG(int reg)
+{
+	int edge = (reg >> 1) & 0x03;
+
+	return (edge > 0) ? 1 << (edge - 1) : 0;
+}
+
+static inline int FAN_TYPE_TO_REG(int val, int reg)
+{
+	int edge = (val == 4) ? 3 : val;
+
+	return (reg & 0xf9) | (edge << 1);
+}
+
+/* Fan max RPM */
+static const int FAN_MAX[] = {0x54, 0x38, 0x2a, 0x21, 0x1c, 0x18, 0x15, 0x12,
+			      0x11, 0x0f, 0x0e};
+
+static int FAN_MAX_FROM_REG(int reg)
+{
+	int i;
+
+	for (i = 10; i > 0; i--) {
+		if (reg == FAN_MAX[i]) {
+			break;
+		}
+	}
+
+	return 1000 + i * 500;
+}
+
+static int FAN_MAX_TO_REG(int val)
+{
+	int i;
+
+	for (i = 10; i > 0; i--) {
+		if (val > (1000 + (i - 1) * 500)) {
+			break;
+		}
+	}
+
+	return FAN_MAX[i];
+}
+
+/* PWM enable
+ * Register to enable mapping:
+ * 000:  2  fan on zone 1 auto
+ * 001:  2  fan on zone 2 auto
+ * 010:  2  fan on zone 3 auto
+ * 011:  0  fan full on
+ * 100: -1  fan disabled
+ * 101:  2  fan on hottest of zones 2,3 auto
+ * 110:  2  fan on hottest of zones 1,2,3 auto
+ * 111:  1  fan in manual mode */
+static inline int PWM_EN_FROM_REG(int reg)
+{
+	static const int en[] = {2, 2, 2, 0, -1, 2, 2, 1};
+
+	return en[(reg >> 5) & 0x07];
+}
+
+static inline int PWM_EN_TO_REG(int val, int reg)
+{
+	int en = (val == 1) ? 7 : 3;
+
+	return (reg & 0x1f) | ((en & 0x07) << 5);
+}
+
+/* PWM auto channels zone
+ * Register to auto channels zone mapping (ACZ is a bitfield with bit x
+ * corresponding to zone x+1):
+ * 000: 001  fan on zone 1 auto
+ * 001: 010  fan on zone 2 auto
+ * 010: 100  fan on zone 3 auto
+ * 011: 000  fan full on
+ * 100: 000  fan disabled
+ * 101: 110  fan on hottest of zones 2,3 auto
+ * 110: 111  fan on hottest of zones 1,2,3 auto
+ * 111: 000  fan in manual mode */
+static inline int PWM_ACZ_FROM_REG(int reg)
+{
+	static const int acz[] = {1, 2, 4, 0, 0, 6, 7, 0};
+
+	return acz[(reg >> 5) & 0x07];
+}
+
+static inline int PWM_ACZ_TO_REG(int val, int reg)
+{
+	int acz = (val == 4) ? 2 : val - 1;
+
+	return (reg & 0x1f) | ((acz & 0x07) << 5);
+}
+
+/* PWM frequency */
+static const int PWM_FREQ[] = {11, 15, 22, 29, 35, 44, 59, 88,
+			       15000, 20000, 30000, 25000, 0, 0, 0, 0};
+
+static inline int PWM_FREQ_FROM_REG(int reg)
+{
+	return PWM_FREQ[reg & 0x0f];
+}
+
+static int PWM_FREQ_TO_REG(int val, int reg)
+{
+	int i;
+
+	/* the first two cases are special - stupid chip design! */
+	if (val > 27500) {
+		i = 10;
+	} else if (val > 22500) {
+		i = 11;
+	} else {
+		for (i = 9; i > 0; i--) {
+			if (val > (PWM_FREQ[i] + PWM_FREQ[i - 1] + 1) / 2) {
+				break;
+			}
+		}
+	}
+
+	return (reg & 0xf0) | i;
+}
+
+/* PWM ramp rate
+ * Register layout:
+ *    reg[0] = [OFF3,  OFF2,  OFF1,  RES,   RR1-E, RR1-2, RR1-1, RR1-0]
+ *    reg[1] = [RR2-E, RR2-2, RR2-1, RR2-0, RR3-E, RR3-2, RR3-1, RR3-0] */
+static const u8 PWM_RR[] = {206, 104, 69, 41, 26, 18, 10, 5};
+
+static inline int PWM_RR_FROM_REG(int reg, int ix)
+{
+	int rr = (ix == 1) ? reg >> 4 : reg;
+
+	return (rr & 0x08) ? PWM_RR[rr & 0x07] : 0;
+}
+
+static int PWM_RR_TO_REG(int val, int ix, int reg)
+{
+	int i;
+
+	for (i = 0; i < 7; i++) {
+		if (val > (PWM_RR[i] + PWM_RR[i + 1] + 1) / 2) {
+			break;
+		}
+	}
+
+	return (ix == 1) ? (reg & 0x8f) | (i << 4) : (reg & 0xf8) | i;
+}
+
+/* PWM ramp rate enable */
+static inline int PWM_RR_EN_FROM_REG(int reg, int ix)
+{
+	return PWM_RR_FROM_REG(reg, ix) ? 1 : 0;
+}
+
+static inline int PWM_RR_EN_TO_REG(int val, int ix, int reg)
+{
+	int en = (ix == 1) ? 0x80 : 0x08;
+
+	return val ? reg | en : reg & ~en;
+}
+
+/* PWM min/off
+ * The PWM min/off bits are part of the PMW ramp rate register 0 (see above for
+ * the register layout). */
+static inline int PWM_OFF_FROM_REG(int reg, int ix)
+{
+	return (reg >> (ix + 5)) & 0x01;
+}
+
+static inline int PWM_OFF_TO_REG(int val, int ix, int reg)
+{
+	return (reg & ~(1 << (ix + 5))) | ((val & 0x01) << (ix + 5));
+}
+
+/* ---------------------------------------------------------------------
+ * Device I/O access
+ * --------------------------------------------------------------------- */
+
+static u8 dme1737_read(struct i2c_client *client, u8 reg)
+{
+	s32 val = i2c_smbus_read_byte_data(client, reg);
+
+	if (val < 0) {
+		dev_warn(&client->dev, "Read from register 0x%02x failed! "
+			 "Please report to the driver maintainer.\n", reg);
+	}
+
+	return val;
+}
+
+static s32 dme1737_write(struct i2c_client *client, u8 reg, u8 value)
+{
+	s32 res = i2c_smbus_write_byte_data(client, reg, value);
+
+	if (res < 0) {
+		dev_warn(&client->dev, "Write to register 0x%02x failed! "
+			 "Please report to the driver maintainer.\n", reg);
+	}
+
+	return res;
+}
+
+static struct dme1737_data *dme1737_update_device(struct device *dev)
+{
+	struct i2c_client *client = to_i2c_client(dev);
+	struct dme1737_data *data = i2c_get_clientdata(client);
+	int ix;
+	u8 lsb[5];
+
+	mutex_lock(&data->update_lock);
+
+	/* Enable a Vbat monitoring cycle every 10 mins */
+	if (time_after(jiffies, data->last_vbat + 600 * HZ) || !data->valid) {
+		dme1737_write(client, DME1737_REG_CONFIG, dme1737_read(client,
+						DME1737_REG_CONFIG) | 0x10);
+		data->last_vbat = jiffies;
+	}
+
+	/* Sample register contents every 1 sec */
+	if (time_after(jiffies, data->last_update + HZ) || !data->valid) {
+		data->vid = dme1737_read(client, DME1737_REG_VID) & 0x3f;
+
+		/* In (voltage) registers */
+		for (ix = 0; ix < ARRAY_SIZE(data->in); ix++) {
+			/* Voltage inputs are stored as 16 bit values even
+			 * though they have only 12 bits resolution. This is
+			 * to make it consistent with the temp inputs. */
+			data->in[ix] = dme1737_read(client,
+					DME1737_REG_IN(ix)) << 8;
+			data->in_min[ix] = dme1737_read(client,
+					DME1737_REG_IN_MIN(ix));
+			data->in_max[ix] = dme1737_read(client,
+					DME1737_REG_IN_MAX(ix));
+		}
+
+		/* Temp registers */
+		for (ix = 0; ix < ARRAY_SIZE(data->temp); ix++) {
+			/* Temp inputs are stored as 16 bit values even
+			 * though they have only 12 bits resolution. This is
+			 * to take advantage of implicit conversions between
+			 * register values (2's complement) and temp values
+			 * (signed decimal). */
+			data->temp[ix] = dme1737_read(client,
+					DME1737_REG_TEMP(ix)) << 8;
+			data->temp_min[ix] = dme1737_read(client,
+					DME1737_REG_TEMP_MIN(ix));
+			data->temp_max[ix] = dme1737_read(client,
+					DME1737_REG_TEMP_MAX(ix));
+			data->temp_offset[ix] = dme1737_read(client,
+					DME1737_REG_TEMP_OFFSET(ix));
+		}
+
+		/* In and temp LSB registers
+		 * The LSBs are latched when the MSBs are read, so the order in
+		 * which the registers are read (MSB first, then LSB) is
+		 * important! */
+		for (ix = 0; ix < ARRAY_SIZE(lsb); ix++) {
+			lsb[ix] = dme1737_read(client,
+					DME1737_REG_IN_TEMP_LSB(ix));
+		}
+		for (ix = 0; ix < ARRAY_SIZE(data->in); ix++) {
+			data->in[ix] |= (lsb[DME1737_REG_IN_LSB[ix]] <<
+					DME1737_REG_IN_LSB_SHL[ix]) & 0xf0;
+		}
+		for (ix = 0; ix < ARRAY_SIZE(data->temp); ix++) {
+			data->temp[ix] |= (lsb[DME1737_REG_TEMP_LSB[ix]] <<
+					DME1737_REG_TEMP_LSB_SHL[ix]) & 0xf0;
+		}
+
+		/* Fan registers */
+		for (ix = 0; ix < ARRAY_SIZE(data->fan); ix++) {
+			/* Skip reading registers if optional fans are not
+			 * present */
+			if (!(data->has_fan & (1 << ix))) {
+				continue;
+			}
+			data->fan[ix] = dme1737_read(client,
+					DME1737_REG_FAN(ix));
+			data->fan[ix] |= dme1737_read(client,
+					DME1737_REG_FAN(ix) + 1) << 8;
+			data->fan_min[ix] = dme1737_read(client,
+					DME1737_REG_FAN_MIN(ix));
+			data->fan_min[ix] |= dme1737_read(client,
+					DME1737_REG_FAN_MIN(ix) + 1) << 8;
+			data->fan_opt[ix] = dme1737_read(client,
+					DME1737_REG_FAN_OPT(ix));
+			/* fan_max exists only for fan[5-6] */
+			if (ix > 3) {
+				data->fan_max[ix - 4] = dme1737_read(client,
+					DME1737_REG_FAN_MAX(ix));
+			}
+		}
+
+		/* PWM registers */
+		for (ix = 0; ix < ARRAY_SIZE(data->pwm); ix++) {
+			/* Skip reading registers if optional PWMs are not
+			 * present */
+			if (!(data->has_pwm & (1 << ix))) {
+				continue;
+			}
+			data->pwm[ix] = dme1737_read(client,
+					DME1737_REG_PWM(ix));
+			data->pwm_freq[ix] = dme1737_read(client,
+					DME1737_REG_PWM_FREQ(ix));
+			/* pwm_config and pwm_min exist only for pwm[1-3] */
+			if (ix < 3) {
+				data->pwm_config[ix] = dme1737_read(client,
+						DME1737_REG_PWM_CONFIG(ix));
+				data->pwm_min[ix] = dme1737_read(client,
+						DME1737_REG_PWM_MIN(ix));
+			}
+		}
+		for (ix = 0; ix < ARRAY_SIZE(data->pwm_rr); ix++) {
+			data->pwm_rr[ix] = dme1737_read(client,
+						DME1737_REG_PWM_RR(ix));
+		}
+
+		/* Thermal zone registers */
+		for (ix = 0; ix < ARRAY_SIZE(data->zone_low); ix++) {
+			data->zone_low[ix] = dme1737_read(client,
+					DME1737_REG_ZONE_LOW(ix));
+			data->zone_abs[ix] = dme1737_read(client,
+					DME1737_REG_ZONE_ABS(ix));
+		}
+		for (ix = 0; ix < ARRAY_SIZE(data->zone_hyst); ix++) {
+			data->zone_hyst[ix] = dme1737_read(client,
+						DME1737_REG_ZONE_HYST(ix));
+		}
+
+		/* Alarm registers */
+		data->alarms = dme1737_read(client,
+						DME1737_REG_ALARM1);
+		/* Bit 7 tells us if the other alarm registers are non-zero and
+		 * therefore also need to be read */
+		if (data->alarms & 0x80) {
+			data->alarms |= dme1737_read(client,
+						DME1737_REG_ALARM2) << 8;
+			data->alarms |= dme1737_read(client,
+						DME1737_REG_ALARM3) << 16;
+		}
+
+		data->last_update = jiffies;
+		data->valid = 1;
+	}
+
+	mutex_unlock(&data->update_lock);
+
+	return data;
+}
+
+/* ---------------------------------------------------------------------
+ * Voltage sysfs attributes
+ * ix = [0-5]
+ * --------------------------------------------------------------------- */
+
+#define SYS_IN_INPUT	0
+#define SYS_IN_MIN	1
+#define SYS_IN_MAX	2
+#define SYS_IN_ALARM	3
+
+static ssize_t show_in(struct device *dev, struct device_attribute *attr,
+		       char *buf)
+{
+	struct dme1737_data *data = dme1737_update_device(dev);
+	struct sensor_device_attribute_2
+		*sensor_attr_2 = to_sensor_dev_attr_2(attr);
+	int ix = sensor_attr_2->index;
+	int fn = sensor_attr_2->nr;
+	int res;
+
+	switch (fn) {
+	case SYS_IN_INPUT:
+		res = IN_FROM_REG(data->in[ix], ix, 16);
+		break;
+	case SYS_IN_MIN:
+		res = IN_FROM_REG(data->in_min[ix], ix, 8);
+		break;
+	case SYS_IN_MAX:
+		res = IN_FROM_REG(data->in_max[ix], ix, 8);
+		break;
+	case SYS_IN_ALARM:
+		res = (data->alarms >> DME1737_BIT_ALARM_IN[ix]) & 0x01;
+		break;
+	default:
+		res = 0;
+		dev_dbg(dev, "Unknown attr fetch (%d)\n", fn);
+	}
+
+	return sprintf(buf, "%d\n", res);
+}
+
+static ssize_t set_in(struct device *dev, struct device_attribute *attr,
+		      const char *buf, size_t count)
+{
+	struct i2c_client *client = to_i2c_client(dev);
+	struct dme1737_data *data = i2c_get_clientdata(client);
+	struct sensor_device_attribute_2
+		*sensor_attr_2 = to_sensor_dev_attr_2(attr);
+	int ix = sensor_attr_2->index;
+	int fn = sensor_attr_2->nr;
+	long val = simple_strtol(buf, NULL, 10);
+
+	mutex_lock(&data->update_lock);
+	switch (fn) {
+	case SYS_IN_MIN:
+		data->in_min[ix] = IN_TO_REG(val, ix);
+		dme1737_write(client, DME1737_REG_IN_MIN(ix),
+			      data->in_min[ix]);
+		break;
+	case SYS_IN_MAX:
+		data->in_max[ix] = IN_TO_REG(val, ix);
+		dme1737_write(client, DME1737_REG_IN_MAX(ix),
+			      data->in_max[ix]);
+		break;
+	default:
+		dev_dbg(dev, "Unknown attr fetch (%d)\n", fn);
+	}
+	mutex_unlock(&data->update_lock);
+
+	return count;
+}
+
+/* ---------------------------------------------------------------------
+ * Temperature sysfs attributes
+ * ix = [0-2]
+ * --------------------------------------------------------------------- */
+
+#define SYS_TEMP_INPUT			0
+#define SYS_TEMP_MIN			1
+#define SYS_TEMP_MAX			2
+#define SYS_TEMP_OFFSET			3
+#define SYS_TEMP_ALARM			4
+#define SYS_TEMP_FAULT			5
+
+static ssize_t show_temp(struct device *dev, struct device_attribute *attr,
+			 char *buf)
+{
+	struct dme1737_data *data = dme1737_update_device(dev);
+	struct sensor_device_attribute_2
+		*sensor_attr_2 = to_sensor_dev_attr_2(attr);
+	int ix = sensor_attr_2->index;
+	int fn = sensor_attr_2->nr;
+	int res;
+
+	switch (fn) {
+	case SYS_TEMP_INPUT:
+		res = TEMP_FROM_REG(data->temp[ix], 16);
+		break;
+	case SYS_TEMP_MIN:
+		res = TEMP_FROM_REG(data->temp_min[ix], 8);
+		break;
+	case SYS_TEMP_MAX:
+		res = TEMP_FROM_REG(data->temp_max[ix], 8);
+		break;
+	case SYS_TEMP_OFFSET:
+		res = TEMP_FROM_REG(data->temp_offset[ix], 8);
+		break;
+	case SYS_TEMP_ALARM:
+		res = (data->alarms >> DME1737_BIT_ALARM_TEMP[ix]) & 0x01;
+		break;
+	case SYS_TEMP_FAULT:
+		res = (data->temp[ix] == 0x0800);
+		break;
+	default:
+		res = 0;
+		dev_dbg(dev, "Unknown attr fetch (%d)\n", fn);
+	}
+
+	return sprintf(buf, "%d\n", res);
+}
+
+static ssize_t set_temp(struct device *dev, struct device_attribute *attr,
+			const char *buf, size_t count)
+{
+	struct i2c_client *client = to_i2c_client(dev);
+	struct dme1737_data *data = i2c_get_clientdata(client);
+	struct sensor_device_attribute_2
+		*sensor_attr_2 = to_sensor_dev_attr_2(attr);
+	int ix = sensor_attr_2->index;
+	int fn = sensor_attr_2->nr;
+	long val = simple_strtol(buf, NULL, 10);
+
+	mutex_lock(&data->update_lock);
+	switch (fn) {
+	case SYS_TEMP_MIN:
+		data->temp_min[ix] = TEMP_TO_REG(val);
+		dme1737_write(client, DME1737_REG_TEMP_MIN(ix),
+			      data->temp_min[ix]);
+		break;
+	case SYS_TEMP_MAX:
+		data->temp_max[ix] = TEMP_TO_REG(val);
+		dme1737_write(client, DME1737_REG_TEMP_MAX(ix),
+			      data->temp_max[ix]);
+		break;
+	case SYS_TEMP_OFFSET:
+		data->temp_offset[ix] = TEMP_TO_REG(val);
+		dme1737_write(client, DME1737_REG_TEMP_OFFSET(ix),
+			      data->temp_offset[ix]);
+		break;
+	default:
+		dev_dbg(dev, "Unknown attr fetch (%d)\n", fn);
+	}
+	mutex_unlock(&data->update_lock);
+
+	return count;
+}
+
+/* ---------------------------------------------------------------------
+ * Zone sysfs attributes
+ * ix = [0-2]
+ * --------------------------------------------------------------------- */
+
+#define SYS_ZONE_AUTO_CHANNELS_TEMP	0
+#define SYS_ZONE_AUTO_POINT1_TEMP_HYST	1
+#define SYS_ZONE_AUTO_POINT1_TEMP	2
+#define SYS_ZONE_AUTO_POINT2_TEMP	3
+#define SYS_ZONE_AUTO_POINT3_TEMP	4
+
+static ssize_t show_zone(struct device *dev, struct device_attribute *attr,
+			 char *buf)
+{
+	struct dme1737_data *data = dme1737_update_device(dev);
+	struct sensor_device_attribute_2
+		*sensor_attr_2 = to_sensor_dev_attr_2(attr);
+	int ix = sensor_attr_2->index;
+	int fn = sensor_attr_2->nr;
+	int res;
+
+	switch (fn) {
+	case SYS_ZONE_AUTO_CHANNELS_TEMP:
+		/* check config2 for non-standard temp-to-zone mapping */
+		if ((ix == 1) && (data->config2 & 0x02)) {
+			res = 4;
+		} else {
+			res = 1 << ix;
+		}
+		break;
+	case SYS_ZONE_AUTO_POINT1_TEMP_HYST:
+		res = TEMP_FROM_REG(data->zone_low[ix], 8) -
+		      TEMP_HYST_FROM_REG(data->zone_hyst[ix == 2], ix);
+		break;
+	case SYS_ZONE_AUTO_POINT1_TEMP:
+		res = TEMP_FROM_REG(data->zone_low[ix], 8);
+		break;
+	case SYS_ZONE_AUTO_POINT2_TEMP:
+		/* pwm_freq holds the temp range bits in the upper nibble */
+		res = TEMP_FROM_REG(data->zone_low[ix], 8) +
+		      TEMP_RANGE_FROM_REG(data->pwm_freq[ix]);
+		break;
+	case SYS_ZONE_AUTO_POINT3_TEMP:
+		res = TEMP_FROM_REG(data->zone_abs[ix], 8);
+		break;
+	default:
+		res = 0;
+		dev_dbg(dev, "Unknown attr fetch (%d)\n", fn);
+	}
+
+	return sprintf(buf, "%d\n", res);
+}
+
+static ssize_t set_zone(struct device *dev, struct device_attribute *attr,
+			const char *buf, size_t count)
+{
+	struct i2c_client *client = to_i2c_client(dev);
+	struct dme1737_data *data = i2c_get_clientdata(client);
+	struct sensor_device_attribute_2
+		*sensor_attr_2 = to_sensor_dev_attr_2(attr);
+	int ix = sensor_attr_2->index;
+	int fn = sensor_attr_2->nr;
+	long val = simple_strtol(buf, NULL, 10);
+
+	mutex_lock(&data->update_lock);
+	switch (fn) {
+	case SYS_ZONE_AUTO_POINT1_TEMP_HYST:
+		/* Refresh the cache */
+		data->zone_low[ix] = dme1737_read(client,
+						  DME1737_REG_ZONE_LOW(ix));
+		/* Modify the temp hyst value */
+		data->zone_hyst[ix == 2] = TEMP_HYST_TO_REG(
+					TEMP_FROM_REG(data->zone_low[ix], 8) -
+					val, ix, dme1737_read(client,
+					DME1737_REG_ZONE_HYST(ix == 2)));
+		dme1737_write(client, DME1737_REG_ZONE_HYST(ix == 2),
+			      data->zone_hyst[ix == 2]);
+		break;
+	case SYS_ZONE_AUTO_POINT1_TEMP:
+		data->zone_low[ix] = TEMP_TO_REG(val);
+		dme1737_write(client, DME1737_REG_ZONE_LOW(ix),
+			      data->zone_low[ix]);
+		break;
+	case SYS_ZONE_AUTO_POINT2_TEMP:
+		/* Refresh the cache */
+		data->zone_low[ix] = dme1737_read(client,
+						  DME1737_REG_ZONE_LOW(ix));
+		/* Modify the temp range value (which is stored in the upper
+		 * nibble of the pwm_freq register) */
+		data->pwm_freq[ix] = TEMP_RANGE_TO_REG(val -
+					TEMP_FROM_REG(data->zone_low[ix], 8),
+					dme1737_read(client,
+					DME1737_REG_PWM_FREQ(ix)));
+		dme1737_write(client, DME1737_REG_PWM_FREQ(ix),
+			      data->pwm_freq[ix]);
+		break;
+	case SYS_ZONE_AUTO_POINT3_TEMP:
+		data->zone_abs[ix] = TEMP_TO_REG(val);
+		dme1737_write(client, DME1737_REG_ZONE_ABS(ix),
+			      data->zone_abs[ix]);
+		break;
+	default:
+		dev_dbg(dev, "Unknown attr fetch (%d)\n", fn);
+	}
+	mutex_unlock(&data->update_lock);
+
+	return count;
+}
+
+/* ---------------------------------------------------------------------
+ * Fan sysfs attributes
+ * ix = [0-5]
+ * --------------------------------------------------------------------- */
+
+#define SYS_FAN_INPUT	0
+#define SYS_FAN_MIN	1
+#define SYS_FAN_MAX	2
+#define SYS_FAN_ALARM	3
+#define SYS_FAN_TYPE	4
+
+static ssize_t show_fan(struct device *dev, struct device_attribute *attr,
+			char *buf)
+{
+	struct dme1737_data *data = dme1737_update_device(dev);
+	struct sensor_device_attribute_2
+		*sensor_attr_2 = to_sensor_dev_attr_2(attr);
+	int ix = sensor_attr_2->index;
+	int fn = sensor_attr_2->nr;
+	int res;
+
+	switch (fn) {
+	case SYS_FAN_INPUT:
+		res = FAN_FROM_REG(data->fan[ix],
+				   ix < 4 ? 0 :
+				   FAN_TPC_FROM_REG(data->fan_opt[ix]));
+		break;
+	case SYS_FAN_MIN:
+		res = FAN_FROM_REG(data->fan_min[ix],
+				   ix < 4 ? 0 :
+				   FAN_TPC_FROM_REG(data->fan_opt[ix]));
+		break;
+	case SYS_FAN_MAX:
+		/* only valid for fan[5-6] */
+		res = FAN_MAX_FROM_REG(data->fan_max[ix - 4]);
+		break;
+	case SYS_FAN_ALARM:
+		res = (data->alarms >> DME1737_BIT_ALARM_FAN[ix]) & 0x01;
+		break;
+	case SYS_FAN_TYPE:
+		/* only valid for fan[1-4] */
+		res = FAN_TYPE_FROM_REG(data->fan_opt[ix]);
+		break;
+	default:
+		res = 0;
+		dev_dbg(dev, "Unknown attr fetch (%d)\n", fn);
+	}
+
+	return sprintf(buf, "%d\n", res);
+}
+
+static ssize_t set_fan(struct device *dev, struct device_attribute *attr,
+		       const char *buf, size_t count)
+{
+	struct i2c_client *client = to_i2c_client(dev);
+	struct dme1737_data *data = i2c_get_clientdata(client);
+	struct sensor_device_attribute_2
+		*sensor_attr_2 = to_sensor_dev_attr_2(attr);
+	int ix = sensor_attr_2->index;
+	int fn = sensor_attr_2->nr;
+	long val = simple_strtol(buf, NULL, 10);
+
+	mutex_lock(&data->update_lock);
+	switch (fn) {
+	case SYS_FAN_MIN:
+		if (ix < 4) {
+			data->fan_min[ix] = FAN_TO_REG(val, 0);
+		} else {
+			/* Refresh the cache */
+			data->fan_opt[ix] = dme1737_read(client,
+						DME1737_REG_FAN_OPT(ix));
+			/* Modify the fan min value */
+			data->fan_min[ix] = FAN_TO_REG(val,
+					FAN_TPC_FROM_REG(data->fan_opt[ix]));
+		}
+		dme1737_write(client, DME1737_REG_FAN_MIN(ix),
+			      data->fan_min[ix] & 0xff);
+		dme1737_write(client, DME1737_REG_FAN_MIN(ix) + 1,
+			      data->fan_min[ix] >> 8);
+		break;
+	case SYS_FAN_MAX:
+		/* Only valid for fan[5-6] */
+		data->fan_max[ix - 4] = FAN_MAX_TO_REG(val);
+		dme1737_write(client, DME1737_REG_FAN_MAX(ix),
+			      data->fan_max[ix - 4]);
+		break;
+	case SYS_FAN_TYPE:
+		/* Only valid for fan[1-4] */
+		if (!(val == 1 || val == 2 || val == 4)) {
+			count = -EINVAL;
+			dev_warn(&client->dev, "Fan type value %ld not "
+				 "supported. Choose one of 1, 2, or 4.\n",
+				 val);
+			goto exit;
+		}
+		data->fan_opt[ix] = FAN_TYPE_TO_REG(val, dme1737_read(client,
+					DME1737_REG_FAN_OPT(ix)));
+		dme1737_write(client, DME1737_REG_FAN_OPT(ix),
+			      data->fan_opt[ix]);
+		break;
+	default:
+		dev_dbg(dev, "Unknown attr fetch (%d)\n", fn);
+	}
+exit:
+	mutex_unlock(&data->update_lock);
+
+	return count;
+}
+
+/* ---------------------------------------------------------------------
+ * PWM sysfs attributes
+ * ix = [0-4]
+ * --------------------------------------------------------------------- */
+
+#define SYS_PWM				0
+#define SYS_PWM_FREQ			1
+#define SYS_PWM_ENABLE			2
+#define SYS_PWM_RAMP_RATE		3
+#define SYS_PWM_AUTO_CHANNELS_ZONE	4
+#define SYS_PWM_AUTO_PWM_MIN		5
+#define SYS_PWM_AUTO_POINT1_PWM		6
+#define SYS_PWM_AUTO_POINT2_PWM		7
+
+static ssize_t show_pwm(struct device *dev, struct device_attribute *attr,
+			char *buf)
+{
+	struct dme1737_data *data = dme1737_update_device(dev);
+	struct sensor_device_attribute_2
+		*sensor_attr_2 = to_sensor_dev_attr_2(attr);
+	int ix = sensor_attr_2->index;
+	int fn = sensor_attr_2->nr;
+	int res;
+
+	switch (fn) {
+	case SYS_PWM:
+		if (PWM_EN_FROM_REG(data->pwm_config[ix]) == 0) {
+			res = 255;
+		} else {
+			res = data->pwm[ix];
+		}
+		break;
+	case SYS_PWM_FREQ:
+		res = PWM_FREQ_FROM_REG(data->pwm_freq[ix]);
+		break;
+	case SYS_PWM_ENABLE:
+		if (ix > 3) {
+			res = 1; /* pwm[5-6] hard-wired to manual mode */
+		} else {
+			res = PWM_EN_FROM_REG(data->pwm_config[ix]);
+		}
+		break;
+	case SYS_PWM_RAMP_RATE:
+		/* Only valid for pwm[1-3] */
+		res = PWM_RR_FROM_REG(data->pwm_rr[ix > 0], ix);
+		break;
+	case SYS_PWM_AUTO_CHANNELS_ZONE:
+		/* Only valid for pwm[1-3] */
+		if (PWM_EN_FROM_REG(data->pwm_config[ix]) == 2) {
+			res = PWM_ACZ_FROM_REG(data->pwm_config[ix]);
+		} else {
+			res = data->pwm_acz[ix];
+		}
+		break;
+	case SYS_PWM_AUTO_PWM_MIN:
+		/* Only valid for pwm[1-3] */
+		if (PWM_OFF_FROM_REG(data->pwm_rr[0], ix)) {
+			res = data->pwm_min[ix];
+		} else {
+			res = 0;
+		}
+		break;
+	case SYS_PWM_AUTO_POINT1_PWM:
+		/* Only valid for pwm[1-3] */
+		res = data->pwm_min[ix];
+		break;
+	case SYS_PWM_AUTO_POINT2_PWM:
+		/* Only valid for pwm[1-3] */
+		res = 255; /* hard-wired */
+		break;
+	default:
+		res = 0;
+		dev_dbg(dev, "Unknown attr fetch (%d)\n", fn);
+	}
+
+	return sprintf(buf, "%d\n", res);
+}
+
+static struct attribute *dme1737_attr_pwm[];
+static void dme1737_chmod_file(struct i2c_client*, struct attribute*, mode_t);
+
+static ssize_t set_pwm(struct device *dev, struct device_attribute *attr,
+		       const char *buf, size_t count)
+{
+	struct i2c_client *client = to_i2c_client(dev);
+	struct dme1737_data *data = i2c_get_clientdata(client);
+	struct sensor_device_attribute_2
+		*sensor_attr_2 = to_sensor_dev_attr_2(attr);
+	int ix = sensor_attr_2->index;
+	int fn = sensor_attr_2->nr;
+	long val = simple_strtol(buf, NULL, 10);
+
+	mutex_lock(&data->update_lock);
+	switch (fn) {
+	case SYS_PWM:
+		data->pwm[ix] = SENSORS_LIMIT(val, 0, 255);
+		dme1737_write(client, DME1737_REG_PWM(ix), data->pwm[ix]);
+		break;
+	case SYS_PWM_FREQ:
+		data->pwm_freq[ix] = PWM_FREQ_TO_REG(val, dme1737_read(client,
+						DME1737_REG_PWM_FREQ(ix)));
+		dme1737_write(client, DME1737_REG_PWM_FREQ(ix),
+			      data->pwm_freq[ix]);
+		break;
+	case SYS_PWM_ENABLE:
+		/* Only valid for pwm[1-3] */
+		if (val < 0 || val > 2) {
+			count = -EINVAL;
+			dev_warn(&client->dev, "PWM enable %ld not "
+				 "supported. Choose one of 0, 1, or 2.\n",
+				 val);
+			goto exit;
+		}
+		/* Refresh the cache */
+		data->pwm_config[ix] = dme1737_read(client,
+						DME1737_REG_PWM_CONFIG(ix));
+		if (val == PWM_EN_FROM_REG(data->pwm_config[ix])) {
+			/* Bail out if no change */
+			goto exit;
+		}
+		/* Do some housekeeping if we are currently in auto mode */
+		if (PWM_EN_FROM_REG(data->pwm_config[ix]) == 2) {
+			/* Save the current zone channel assignment */
+			data->pwm_acz[ix] = PWM_ACZ_FROM_REG(
+							data->pwm_config[ix]);
+			/* Save the current ramp rate state and disable it */
+			data->pwm_rr[ix > 0] = dme1737_read(client,
+						DME1737_REG_PWM_RR(ix > 0));
+			data->pwm_rr_en &= ~(1 << ix);
+			if (PWM_RR_EN_FROM_REG(data->pwm_rr[ix > 0], ix)) {
+				data->pwm_rr_en |= (1 << ix);
+				data->pwm_rr[ix > 0] = PWM_RR_EN_TO_REG(0, ix,
+							data->pwm_rr[ix > 0]);
+				dme1737_write(client,
+					      DME1737_REG_PWM_RR(ix > 0),
+					      data->pwm_rr[ix > 0]);
+			}
+		}
+		/* Set the new PWM mode */
+		switch (val) {
+		case 0:
+			/* Change permissions of pwm[ix] to read-only */
+			dme1737_chmod_file(client, dme1737_attr_pwm[ix],
+					   S_IRUGO);
+			/* Turn fan fully on */
+			data->pwm_config[ix] = PWM_EN_TO_REG(0,
+							data->pwm_config[ix]);
+			dme1737_write(client, DME1737_REG_PWM_CONFIG(ix),
+				      data->pwm_config[ix]);
+			break;
+		case 1:
+			/* Turn on manual mode */
+			data->pwm_config[ix] = PWM_EN_TO_REG(1,
+							data->pwm_config[ix]);
+			dme1737_write(client, DME1737_REG_PWM_CONFIG(ix),
+				      data->pwm_config[ix]);
+			/* Change permissions of pwm[ix] to read-writeable */
+			dme1737_chmod_file(client, dme1737_attr_pwm[ix],
+					   S_IRUGO | S_IWUSR);
+			break;
+		case 2:
+			/* Change permissions of pwm[ix] to read-only */
+			dme1737_chmod_file(client, dme1737_attr_pwm[ix],
+					   S_IRUGO);
+			/* Turn on auto mode using the saved zone channel
+			 * assignment */
+			data->pwm_config[ix] = PWM_ACZ_TO_REG(
+							data->pwm_acz[ix],
+							data->pwm_config[ix]);
+			dme1737_write(client, DME1737_REG_PWM_CONFIG(ix),
+				      data->pwm_config[ix]);
+			/* Enable PWM ramp rate if previously enabled */
+			if (data->pwm_rr_en & (1 << ix)) {
+				data->pwm_rr[ix > 0] = PWM_RR_EN_TO_REG(1, ix,
+						dme1737_read(client,
+						DME1737_REG_PWM_RR(ix > 0)));
+				dme1737_write(client,
+					      DME1737_REG_PWM_RR(ix > 0),
+					      data->pwm_rr[ix > 0]);
+			}
+			break;
+		}
+		break;
+	case SYS_PWM_RAMP_RATE:
+		/* Only valid for pwm[1-3] */
+		/* Refresh the cache */
+		data->pwm_config[ix] = dme1737_read(client,
+						DME1737_REG_PWM_CONFIG(ix));
+		data->pwm_rr[ix > 0] = dme1737_read(client,
+						DME1737_REG_PWM_RR(ix > 0));
+		/* Set the ramp rate value */
+		if (val > 0) {
+			data->pwm_rr[ix > 0] = PWM_RR_TO_REG(val, ix,
+							data->pwm_rr[ix > 0]);
+		}
+		/* Enable/disable the feature only if the associated PWM
+		 * output is in automatic mode. */
+		if (PWM_EN_FROM_REG(data->pwm_config[ix]) == 2) {
+			data->pwm_rr[ix > 0] = PWM_RR_EN_TO_REG(val > 0, ix,
+							data->pwm_rr[ix > 0]);
+		}
+		dme1737_write(client, DME1737_REG_PWM_RR(ix > 0),
+			      data->pwm_rr[ix > 0]);
+		break;
+	case SYS_PWM_AUTO_CHANNELS_ZONE:
+		/* Only valid for pwm[1-3] */
+		if (!(val == 1 || val == 2 || val == 4 ||
+		      val == 6 || val == 7)) {
+			count = -EINVAL;
+			dev_warn(&client->dev, "PWM auto channels zone %ld "
+				 "not supported. Choose one of 1, 2, 4, 6, "
+				 "or 7.\n", val);
+			goto exit;
+		}
+		/* Refresh the cache */
+		data->pwm_config[ix] = dme1737_read(client,
+						DME1737_REG_PWM_CONFIG(ix));
+		if (PWM_EN_FROM_REG(data->pwm_config[ix]) == 2) {
+			/* PWM is already in auto mode so update the temp
+			 * channel assignment */
+			data->pwm_config[ix] = PWM_ACZ_TO_REG(val,
+						data->pwm_config[ix]);
+			dme1737_write(client, DME1737_REG_PWM_CONFIG(ix),
+				      data->pwm_config[ix]);
+		} else {
+			/* PWM is not in auto mode so we save the temp
+			 * channel assignment for later use */
+			data->pwm_acz[ix] = val;
+		}
+		break;
+	case SYS_PWM_AUTO_PWM_MIN:
+		/* Only valid for pwm[1-3] */
+		/* Refresh the cache */
+		data->pwm_min[ix] = dme1737_read(client,
+						DME1737_REG_PWM_MIN(ix));
+		/* There are only 2 values supported for the auto_pwm_min
+		 * value: 0 or auto_point1_pwm. So if the temperature drops
+		 * below the auto_point1_temp_hyst value, the fan either turns
+		 * off or runs at auto_point1_pwm duty-cycle. */
+		if (val > ((data->pwm_min[ix] + 1) / 2)) {
+			data->pwm_rr[0] = PWM_OFF_TO_REG(1, ix,
+						dme1737_read(client,
+						DME1737_REG_PWM_RR(0)));
+
+		} else {
+			data->pwm_rr[0] = PWM_OFF_TO_REG(0, ix,
+						dme1737_read(client,
+						DME1737_REG_PWM_RR(0)));
+
+		}
+		dme1737_write(client, DME1737_REG_PWM_RR(0),
+			      data->pwm_rr[0]);
+		break;
+	case SYS_PWM_AUTO_POINT1_PWM:
+		/* Only valid for pwm[1-3] */
+		data->pwm_min[ix] = SENSORS_LIMIT(val, 0, 255);
+		dme1737_write(client, DME1737_REG_PWM_MIN(ix),
+			      data->pwm_min[ix]);
+		break;
+	default:
+		dev_dbg(dev, "Unknown attr fetch (%d)\n", fn);
+	}
+exit:
+	mutex_unlock(&data->update_lock);
+
+	return count;
+}
+
+/* ---------------------------------------------------------------------
+ * Miscellaneous sysfs attributes
+ * --------------------------------------------------------------------- */
+
+static ssize_t show_vrm(struct device *dev, struct device_attribute *attr,
+			char *buf)
+{
+	struct i2c_client *client = to_i2c_client(dev);
+	struct dme1737_data *data = i2c_get_clientdata(client);
+
+	return sprintf(buf, "%d\n", data->vrm);
+}
+
+static ssize_t set_vrm(struct device *dev, struct device_attribute *attr,
+		       const char *buf, size_t count)
+{
+	struct i2c_client *client = to_i2c_client(dev);
+	struct dme1737_data *data = i2c_get_clientdata(client);
+	long val = simple_strtol(buf, NULL, 10);
+
+	data->vrm = val;
+	return count;
+}
+
+static ssize_t show_vid(struct device *dev, struct device_attribute *attr,
+			char *buf)
+{
+	struct dme1737_data *data = dme1737_update_device(dev);
+
+	return sprintf(buf, "%d\n", vid_from_reg(data->vid, data->vrm));
+}
+
+/* ---------------------------------------------------------------------
+ * Sysfs device attribute defines and structs
+ * --------------------------------------------------------------------- */
+
+/* Voltages 0-6 */
+
+#define SENSOR_DEVICE_ATTR_IN(ix) \
+static SENSOR_DEVICE_ATTR_2(in##ix##_input, S_IRUGO, \
+        show_in, NULL, SYS_IN_INPUT, ix); \
+static SENSOR_DEVICE_ATTR_2(in##ix##_min, S_IRUGO | S_IWUSR, \
+        show_in, set_in, SYS_IN_MIN, ix); \
+static SENSOR_DEVICE_ATTR_2(in##ix##_max, S_IRUGO | S_IWUSR, \
+        show_in, set_in, SYS_IN_MAX, ix); \
+static SENSOR_DEVICE_ATTR_2(in##ix##_alarm, S_IRUGO, \
+        show_in, NULL, SYS_IN_ALARM, ix)
+
+SENSOR_DEVICE_ATTR_IN(0);
+SENSOR_DEVICE_ATTR_IN(1);
+SENSOR_DEVICE_ATTR_IN(2);
+SENSOR_DEVICE_ATTR_IN(3);
+SENSOR_DEVICE_ATTR_IN(4);
+SENSOR_DEVICE_ATTR_IN(5);
+SENSOR_DEVICE_ATTR_IN(6);
+
+/* Temperatures 1-3 */
+
+#define SENSOR_DEVICE_ATTR_TEMP(ix) \
+static SENSOR_DEVICE_ATTR_2(temp##ix##_input, S_IRUGO, \
+        show_temp, NULL, SYS_TEMP_INPUT, ix-1); \
+static SENSOR_DEVICE_ATTR_2(temp##ix##_min, S_IRUGO | S_IWUSR, \
+        show_temp, set_temp, SYS_TEMP_MIN, ix-1); \
+static SENSOR_DEVICE_ATTR_2(temp##ix##_max, S_IRUGO | S_IWUSR, \
+        show_temp, set_temp, SYS_TEMP_MAX, ix-1); \
+static SENSOR_DEVICE_ATTR_2(temp##ix##_offset, S_IRUGO, \
+        show_temp, set_temp, SYS_TEMP_OFFSET, ix-1); \
+static SENSOR_DEVICE_ATTR_2(temp##ix##_alarm, S_IRUGO, \
+        show_temp, NULL, SYS_TEMP_ALARM, ix-1); \
+static SENSOR_DEVICE_ATTR_2(temp##ix##_fault, S_IRUGO, \
+        show_temp, NULL, SYS_TEMP_FAULT, ix-1)
+
+SENSOR_DEVICE_ATTR_TEMP(1);
+SENSOR_DEVICE_ATTR_TEMP(2);
+SENSOR_DEVICE_ATTR_TEMP(3);
+
+/* Zones 1-3 */
+
+#define SENSOR_DEVICE_ATTR_ZONE(ix) \
+static SENSOR_DEVICE_ATTR_2(zone##ix##_auto_channels_temp, S_IRUGO, \
+        show_zone, NULL, SYS_ZONE_AUTO_CHANNELS_TEMP, ix-1); \
+static SENSOR_DEVICE_ATTR_2(zone##ix##_auto_point1_temp_hyst, S_IRUGO, \
+        show_zone, set_zone, SYS_ZONE_AUTO_POINT1_TEMP_HYST, ix-1); \
+static SENSOR_DEVICE_ATTR_2(zone##ix##_auto_point1_temp, S_IRUGO, \
+        show_zone, set_zone, SYS_ZONE_AUTO_POINT1_TEMP, ix-1); \
+static SENSOR_DEVICE_ATTR_2(zone##ix##_auto_point2_temp, S_IRUGO, \
+        show_zone, set_zone, SYS_ZONE_AUTO_POINT2_TEMP, ix-1); \
+static SENSOR_DEVICE_ATTR_2(zone##ix##_auto_point3_temp, S_IRUGO, \
+        show_zone, set_zone, SYS_ZONE_AUTO_POINT3_TEMP, ix-1)
+
+SENSOR_DEVICE_ATTR_ZONE(1);
+SENSOR_DEVICE_ATTR_ZONE(2);
+SENSOR_DEVICE_ATTR_ZONE(3);
+
+/* Fans 1-4 */
+
+#define SENSOR_DEVICE_ATTR_FAN_1TO4(ix) \
+static SENSOR_DEVICE_ATTR_2(fan##ix##_input, S_IRUGO, \
+        show_fan, NULL, SYS_FAN_INPUT, ix-1); \
+static SENSOR_DEVICE_ATTR_2(fan##ix##_min, S_IRUGO | S_IWUSR, \
+        show_fan, set_fan, SYS_FAN_MIN, ix-1); \
+static SENSOR_DEVICE_ATTR_2(fan##ix##_alarm, S_IRUGO, \
+        show_fan, NULL, SYS_FAN_ALARM, ix-1); \
+static SENSOR_DEVICE_ATTR_2(fan##ix##_type, S_IRUGO | S_IWUSR, \
+        show_fan, set_fan, SYS_FAN_TYPE, ix-1)
+
+SENSOR_DEVICE_ATTR_FAN_1TO4(1);
+SENSOR_DEVICE_ATTR_FAN_1TO4(2);
+SENSOR_DEVICE_ATTR_FAN_1TO4(3);
+SENSOR_DEVICE_ATTR_FAN_1TO4(4);
+
+/* Fans 5-6 */
+
+#define SENSOR_DEVICE_ATTR_FAN_5TO6(ix) \
+static SENSOR_DEVICE_ATTR_2(fan##ix##_input, S_IRUGO, \
+        show_fan, NULL, SYS_FAN_INPUT, ix-1); \
+static SENSOR_DEVICE_ATTR_2(fan##ix##_min, S_IRUGO | S_IWUSR, \
+        show_fan, set_fan, SYS_FAN_MIN, ix-1); \
+static SENSOR_DEVICE_ATTR_2(fan##ix##_alarm, S_IRUGO, \
+        show_fan, NULL, SYS_FAN_ALARM, ix-1); \
+static SENSOR_DEVICE_ATTR_2(fan##ix##_max, S_IRUGO | S_IWUSR, \
+        show_fan, set_fan, SYS_FAN_MAX, ix-1)
+
+SENSOR_DEVICE_ATTR_FAN_5TO6(5);
+SENSOR_DEVICE_ATTR_FAN_5TO6(6);
+
+/* PWMs 1-3 */
+
+#define SENSOR_DEVICE_ATTR_PWM_1TO3(ix) \
+static SENSOR_DEVICE_ATTR_2(pwm##ix, S_IRUGO, \
+        show_pwm, set_pwm, SYS_PWM, ix-1); \
+static SENSOR_DEVICE_ATTR_2(pwm##ix##_freq, S_IRUGO, \
+        show_pwm, set_pwm, SYS_PWM_FREQ, ix-1); \
+static SENSOR_DEVICE_ATTR_2(pwm##ix##_enable, S_IRUGO, \
+        show_pwm, set_pwm, SYS_PWM_ENABLE, ix-1); \
+static SENSOR_DEVICE_ATTR_2(pwm##ix##_ramp_rate, S_IRUGO, \
+        show_pwm, set_pwm, SYS_PWM_RAMP_RATE, ix-1); \
+static SENSOR_DEVICE_ATTR_2(pwm##ix##_auto_channels_zone, S_IRUGO, \
+        show_pwm, set_pwm, SYS_PWM_AUTO_CHANNELS_ZONE, ix-1); \
+static SENSOR_DEVICE_ATTR_2(pwm##ix##_auto_pwm_min, S_IRUGO, \
+        show_pwm, set_pwm, SYS_PWM_AUTO_PWM_MIN, ix-1); \
+static SENSOR_DEVICE_ATTR_2(pwm##ix##_auto_point1_pwm, S_IRUGO, \
+        show_pwm, set_pwm, SYS_PWM_AUTO_POINT1_PWM, ix-1); \
+static SENSOR_DEVICE_ATTR_2(pwm##ix##_auto_point2_pwm, S_IRUGO, \
+        show_pwm, NULL, SYS_PWM_AUTO_POINT2_PWM, ix-1)
+
+SENSOR_DEVICE_ATTR_PWM_1TO3(1);
+SENSOR_DEVICE_ATTR_PWM_1TO3(2);
+SENSOR_DEVICE_ATTR_PWM_1TO3(3);
+
+/* PWMs 5-6 */
+
+#define SENSOR_DEVICE_ATTR_PWM_5TO6(ix) \
+static SENSOR_DEVICE_ATTR_2(pwm##ix, S_IRUGO | S_IWUSR, \
+        show_pwm, set_pwm, SYS_PWM, ix-1); \
+static SENSOR_DEVICE_ATTR_2(pwm##ix##_freq, S_IRUGO | S_IWUSR, \
+        show_pwm, set_pwm, SYS_PWM_FREQ, ix-1); \
+static SENSOR_DEVICE_ATTR_2(pwm##ix##_enable, S_IRUGO, \
+        show_pwm, NULL, SYS_PWM_ENABLE, ix-1)
+
+SENSOR_DEVICE_ATTR_PWM_5TO6(5);
+SENSOR_DEVICE_ATTR_PWM_5TO6(6);
+
+/* Misc */
+
+static DEVICE_ATTR(vrm, S_IRUGO | S_IWUSR, show_vrm, set_vrm);
+static DEVICE_ATTR(cpu0_vid, S_IRUGO, show_vid, NULL);
+
+#define SENSOR_DEV_ATTR_IN(ix) \
+&sensor_dev_attr_in##ix##_input.dev_attr.attr, \
+&sensor_dev_attr_in##ix##_min.dev_attr.attr, \
+&sensor_dev_attr_in##ix##_max.dev_attr.attr, \
+&sensor_dev_attr_in##ix##_alarm.dev_attr.attr
+
+/* These attributes are read-writeable only if the chip is *not* locked */
+#define SENSOR_DEV_ATTR_TEMP_LOCK(ix) \
+&sensor_dev_attr_temp##ix##_offset.dev_attr.attr
+
+#define SENSOR_DEV_ATTR_TEMP(ix) \
+SENSOR_DEV_ATTR_TEMP_LOCK(ix), \
+&sensor_dev_attr_temp##ix##_input.dev_attr.attr, \
+&sensor_dev_attr_temp##ix##_min.dev_attr.attr, \
+&sensor_dev_attr_temp##ix##_max.dev_attr.attr, \
+&sensor_dev_attr_temp##ix##_alarm.dev_attr.attr, \
+&sensor_dev_attr_temp##ix##_fault.dev_attr.attr
+
+/* These attributes are read-writeable only if the chip is *not* locked */
+#define SENSOR_DEV_ATTR_ZONE_LOCK(ix) \
+&sensor_dev_attr_zone##ix##_auto_point1_temp_hyst.dev_attr.attr, \
+&sensor_dev_attr_zone##ix##_auto_point1_temp.dev_attr.attr, \
+&sensor_dev_attr_zone##ix##_auto_point2_temp.dev_attr.attr, \
+&sensor_dev_attr_zone##ix##_auto_point3_temp.dev_attr.attr
+
+#define SENSOR_DEV_ATTR_ZONE(ix) \
+SENSOR_DEV_ATTR_ZONE_LOCK(ix), \
+&sensor_dev_attr_zone##ix##_auto_channels_temp.dev_attr.attr
+
+#define SENSOR_DEV_ATTR_FAN_1TO4(ix) \
+&sensor_dev_attr_fan##ix##_input.dev_attr.attr, \
+&sensor_dev_attr_fan##ix##_min.dev_attr.attr, \
+&sensor_dev_attr_fan##ix##_alarm.dev_attr.attr, \
+&sensor_dev_attr_fan##ix##_type.dev_attr.attr
+
+#define SENSOR_DEV_ATTR_FAN_5TO6(ix) \
+&sensor_dev_attr_fan##ix##_input.dev_attr.attr, \
+&sensor_dev_attr_fan##ix##_min.dev_attr.attr, \
+&sensor_dev_attr_fan##ix##_alarm.dev_attr.attr, \
+&sensor_dev_attr_fan##ix##_max.dev_attr.attr
+
+/* These attributes are read-writeable only if the chip is *not* locked */
+#define SENSOR_DEV_ATTR_PWM_1TO3_LOCK(ix) \
+&sensor_dev_attr_pwm##ix##_freq.dev_attr.attr, \
+&sensor_dev_attr_pwm##ix##_enable.dev_attr.attr, \
+&sensor_dev_attr_pwm##ix##_ramp_rate.dev_attr.attr, \
+&sensor_dev_attr_pwm##ix##_auto_channels_zone.dev_attr.attr, \
+&sensor_dev_attr_pwm##ix##_auto_pwm_min.dev_attr.attr, \
+&sensor_dev_attr_pwm##ix##_auto_point1_pwm.dev_attr.attr
+
+#define SENSOR_DEV_ATTR_PWM_1TO3(ix) \
+SENSOR_DEV_ATTR_PWM_1TO3_LOCK(ix), \
+&sensor_dev_attr_pwm##ix.dev_attr.attr, \
+&sensor_dev_attr_pwm##ix##_auto_point2_pwm.dev_attr.attr
+
+/* These attributes are read-writeable only if the chip is *not* locked */
+#define SENSOR_DEV_ATTR_PWM_5TO6_LOCK(ix) \
+&sensor_dev_attr_pwm##ix.dev_attr.attr, \
+&sensor_dev_attr_pwm##ix##_freq.dev_attr.attr
+
+#define SENSOR_DEV_ATTR_PWM_5TO6(ix) \
+SENSOR_DEV_ATTR_PWM_5TO6_LOCK(ix), \
+&sensor_dev_attr_pwm##ix##_enable.dev_attr.attr
+
+/* This struct holds all the attributes that are always present and need to be
+ * created unconditionally. The attributes that need modification of their
+ * permissions are created read-only and write permissions are added or removed
+ * on the fly when required */
+static struct attribute *dme1737_attr[] ={
+        /* Voltages */
+        SENSOR_DEV_ATTR_IN(0),
+        SENSOR_DEV_ATTR_IN(1),
+        SENSOR_DEV_ATTR_IN(2),
+        SENSOR_DEV_ATTR_IN(3),
+        SENSOR_DEV_ATTR_IN(4),
+        SENSOR_DEV_ATTR_IN(5),
+        SENSOR_DEV_ATTR_IN(6),
+        /* Temperatures */
+        SENSOR_DEV_ATTR_TEMP(1),
+        SENSOR_DEV_ATTR_TEMP(2),
+        SENSOR_DEV_ATTR_TEMP(3),
+        /* Zones */
+        SENSOR_DEV_ATTR_ZONE(1),
+        SENSOR_DEV_ATTR_ZONE(2),
+        SENSOR_DEV_ATTR_ZONE(3),
+        /* Misc */
+        &dev_attr_vrm.attr,
+        &dev_attr_cpu0_vid.attr,
+	NULL
+};
+
+static const struct attribute_group dme1737_group = {
+        .attrs = dme1737_attr,
+};
+
+/* The following structs hold the PWM attributes, some of which are optional.
+ * Their creation depends on the chip configuration which is determined during
+ * module load. */
+static struct attribute *dme1737_attr_pwm1[] = {
+        SENSOR_DEV_ATTR_PWM_1TO3(1),
+	NULL
+};
+static struct attribute *dme1737_attr_pwm2[] = {
+        SENSOR_DEV_ATTR_PWM_1TO3(2),
+	NULL
+};
+static struct attribute *dme1737_attr_pwm3[] = {
+        SENSOR_DEV_ATTR_PWM_1TO3(3),
+	NULL
+};
+static struct attribute *dme1737_attr_pwm5[] = {
+        SENSOR_DEV_ATTR_PWM_5TO6(5),
+	NULL
+};
+static struct attribute *dme1737_attr_pwm6[] = {
+        SENSOR_DEV_ATTR_PWM_5TO6(6),
+	NULL
+};
+
+static const struct attribute_group dme1737_pwm_group[] = {
+	{ .attrs = dme1737_attr_pwm1 },
+	{ .attrs = dme1737_attr_pwm2 },
+	{ .attrs = dme1737_attr_pwm3 },
+	{ .attrs = NULL },
+	{ .attrs = dme1737_attr_pwm5 },
+	{ .attrs = dme1737_attr_pwm6 },
+};
+
+/* The following structs hold the fan attributes, some of which are optional.
+ * Their creation depends on the chip configuration which is determined during
+ * module load. */
+static struct attribute *dme1737_attr_fan1[] = {
+        SENSOR_DEV_ATTR_FAN_1TO4(1),
+	NULL
+};
+static struct attribute *dme1737_attr_fan2[] = {
+        SENSOR_DEV_ATTR_FAN_1TO4(2),
+	NULL
+};
+static struct attribute *dme1737_attr_fan3[] = {
+        SENSOR_DEV_ATTR_FAN_1TO4(3),
+	NULL
+};
+static struct attribute *dme1737_attr_fan4[] = {
+        SENSOR_DEV_ATTR_FAN_1TO4(4),
+	NULL
+};
+static struct attribute *dme1737_attr_fan5[] = {
+        SENSOR_DEV_ATTR_FAN_5TO6(5),
+	NULL
+};
+static struct attribute *dme1737_attr_fan6[] = {
+        SENSOR_DEV_ATTR_FAN_5TO6(6),
+	NULL
+};
+
+static const struct attribute_group dme1737_fan_group[] = {
+	{ .attrs = dme1737_attr_fan1 },
+	{ .attrs = dme1737_attr_fan2 },
+	{ .attrs = dme1737_attr_fan3 },
+	{ .attrs = dme1737_attr_fan4 },
+	{ .attrs = dme1737_attr_fan5 },
+	{ .attrs = dme1737_attr_fan6 },
+};
+
+/* The permissions of all of the following attributes are changed to read-
+ * writeable if the chip is *not* locked. Otherwise they stay read-only. */
+static struct attribute *dme1737_attr_lock[] = {
+	/* Temperatures */
+	SENSOR_DEV_ATTR_TEMP_LOCK(1),
+	SENSOR_DEV_ATTR_TEMP_LOCK(2),
+	SENSOR_DEV_ATTR_TEMP_LOCK(3),
+	/* Zones */
+	SENSOR_DEV_ATTR_ZONE_LOCK(1),
+	SENSOR_DEV_ATTR_ZONE_LOCK(2),
+	SENSOR_DEV_ATTR_ZONE_LOCK(3),
+	NULL
+};
+
+static const struct attribute_group dme1737_lock_group = {
+	.attrs = dme1737_attr_lock,
+};
+
+/* The permissions of the following PWM attributes are changed to read-
+ * writeable if the chip is *not* locked and the respective PWM is available.
+ * Otherwise they stay read-only. */
+static struct attribute *dme1737_attr_pwm1_lock[] = {
+        SENSOR_DEV_ATTR_PWM_1TO3_LOCK(1),
+	NULL
+};
+static struct attribute *dme1737_attr_pwm2_lock[] = {
+        SENSOR_DEV_ATTR_PWM_1TO3_LOCK(2),
+	NULL
+};
+static struct attribute *dme1737_attr_pwm3_lock[] = {
+        SENSOR_DEV_ATTR_PWM_1TO3_LOCK(3),
+	NULL
+};
+static struct attribute *dme1737_attr_pwm5_lock[] = {
+        SENSOR_DEV_ATTR_PWM_5TO6_LOCK(5),
+	NULL
+};
+static struct attribute *dme1737_attr_pwm6_lock[] = {
+        SENSOR_DEV_ATTR_PWM_5TO6_LOCK(6),
+	NULL
+};
+
+static const struct attribute_group dme1737_pwm_lock_group[] = {
+	{ .attrs = dme1737_attr_pwm1_lock },
+	{ .attrs = dme1737_attr_pwm2_lock },
+	{ .attrs = dme1737_attr_pwm3_lock },
+	{ .attrs = NULL },
+	{ .attrs = dme1737_attr_pwm5_lock },
+	{ .attrs = dme1737_attr_pwm6_lock },
+};
+
+/* Pwm[1-3] are read-writeable if the associated pwm is in manual mode and the
+ * chip is not locked. Otherwise they are read-only. */
+static struct attribute *dme1737_attr_pwm[] = {
+	&sensor_dev_attr_pwm1.dev_attr.attr,
+	&sensor_dev_attr_pwm2.dev_attr.attr,
+	&sensor_dev_attr_pwm3.dev_attr.attr,
+};
+
+/* ---------------------------------------------------------------------
+ * Super-IO functions
+ * --------------------------------------------------------------------- */
+
+static inline int dme1737_sio_inb(int sio_cip, int reg)
+{
+	outb(reg, sio_cip);
+	return inb(sio_cip + 1);
+}
+
+static inline void dme1737_sio_outb(int sio_cip, int reg, int val)
+{
+	outb(reg, sio_cip);
+	outb(val, sio_cip + 1);
+}
+
+static int dme1737_sio_get_features(int sio_cip, struct i2c_client *client)
+{
+	struct dme1737_data *data = i2c_get_clientdata(client);
+	int err = 0, reg;
+	u16 addr;
+
+	/* Enter configuration mode */
+	outb(0x55, sio_cip);
+
+	/* Check device ID
+	 * The DME1737 can return either 0x78 or 0x77 as its device ID. */
+	reg = dme1737_sio_inb(sio_cip, 0x20);
+	if (!(reg == 0x77 || reg == 0x78)) {
+		err = -ENODEV;
+		goto exit;
+	}
+
+	/* Select logical device A (runtime registers) */
+	dme1737_sio_outb(sio_cip, 0x07, 0x0a);
+
+	/* Get the base address of the runtime registers */
+	if (!(addr = (dme1737_sio_inb(sio_cip, 0x60) << 8) |
+		      dme1737_sio_inb(sio_cip, 0x61))) {
+		err = -ENODEV;
+		goto exit;
+	}
+
+	/* Read the runtime registers to determine which optional features
+	 * are enabled and available. Bits [3:2] of registers 0x43-0x46 are set
+	 * to '10' if the respective feature is enabled. */
+	if ((inb(addr + 0x43) & 0x0c) == 0x08) { /* fan6 */
+		data->has_fan |= (1 << 5);
+	}
+	if ((inb(addr + 0x44) & 0x0c) == 0x08) { /* pwm6 */
+		data->has_pwm |= (1 << 5);
+	}
+	if ((inb(addr + 0x45) & 0x0c) == 0x08) { /* fan5 */
+		data->has_fan |= (1 << 4);
+	}
+	if ((inb(addr + 0x46) & 0x0c) == 0x08) { /* pwm5 */
+		data->has_pwm |= (1 << 4);
+	}
+
+exit:
+	/* Exit configuration mode */
+	outb(0xaa, sio_cip);
+
+	return err;
+}
+
+/* ---------------------------------------------------------------------
+ * Device detection, registration and initialization
+ * --------------------------------------------------------------------- */
+
+static struct i2c_driver dme1737_driver;
+
+static void dme1737_chmod_file(struct i2c_client *client,
+			       struct attribute *attr, mode_t mode)
+{
+	if (sysfs_chmod_file(&client->dev.kobj, attr, mode)) {
+		dev_warn(&client->dev, "Failed to change permissions of %s.\n",
+			 attr->name);
+	}
+}
+
+static void dme1737_chmod_group(struct i2c_client *client,
+				const struct attribute_group *group,
+				mode_t mode)
+{
+	struct attribute **attr;
+
+	for (attr = group->attrs; *attr; attr++) {
+		dme1737_chmod_file(client, *attr, mode);
+	}
+}
+
+static int dme1737_init_client(struct i2c_client *client)
+{
+	struct dme1737_data *data = i2c_get_clientdata(client);
+	int ix;
+	u8 reg;
+
+        data->config = dme1737_read(client, DME1737_REG_CONFIG);
+        /* Inform if part is not monitoring/started */
+        if (!(data->config & 0x01)) {
+                if (!force_start) {
+                        dev_err(&client->dev, "Device is not monitoring. "
+                                "Use the force_start load parameter to "
+                                "override.\n");
+                        return -EFAULT;
+                }
+
+                /* Force monitoring */
+                data->config |= 0x01;
+                dme1737_write(client, DME1737_REG_CONFIG, data->config);
+        }
+	/* Inform if part is not ready */
+	if (!(data->config & 0x04)) {
+		dev_err(&client->dev, "Device is not ready.\n");
+		return -EFAULT;
+	}
+
+	data->config2 = dme1737_read(client, DME1737_REG_CONFIG2);
+	/* Check if optional fan3 input is enabled */
+	if (data->config2 & 0x04) {
+		data->has_fan |= (1 << 2);
+	}
+
+	/* Fan4 and pwm3 are only available if the client's I2C address
+	 * is the default 0x2e. Otherwise the I/Os associated with these
+	 * functions are used for addr enable/select. */
+	if (client->addr == 0x2e) {
+		data->has_fan |= (1 << 3);
+		data->has_pwm |= (1 << 2);
+	}
+
+	/* Determine if the optional fan[5-6] and/or pwm[5-6] are enabled.
+	 * For this, we need to query the runtime registers through the
+	 * Super-IO LPC interface. Try both config ports 0x2e and 0x4e. */
+	if (dme1737_sio_get_features(0x2e, client) &&
+	    dme1737_sio_get_features(0x4e, client)) {
+		dev_warn(&client->dev, "Failed to query Super-IO for optional "
+			 "features.\n");
+	}
+
+	/* Fan1, fan2, pwm1, and pwm2 are always present */
+	data->has_fan |= 0x03;
+	data->has_pwm |= 0x03;
+
+	dev_info(&client->dev, "Optional features: pwm3=%s, pwm5=%s, pwm6=%s, "
+		 "fan3=%s, fan4=%s, fan5=%s, fan6=%s.\n",
+		 (data->has_pwm & (1 << 2)) ? "yes" : "no",
+		 (data->has_pwm & (1 << 4)) ? "yes" : "no",
+		 (data->has_pwm & (1 << 5)) ? "yes" : "no",
+		 (data->has_fan & (1 << 2)) ? "yes" : "no",
+		 (data->has_fan & (1 << 3)) ? "yes" : "no",
+		 (data->has_fan & (1 << 4)) ? "yes" : "no",
+		 (data->has_fan & (1 << 5)) ? "yes" : "no");
+
+	reg = dme1737_read(client, DME1737_REG_TACH_PWM);
+	/* Inform if fan-to-pwm mapping differs from the default */
+	if (reg != 0xa4) {
+		dev_warn(&client->dev, "Non-standard fan to pwm mapping: "
+			 "fan1->pwm%d, fan2->pwm%d, fan3->pwm%d, "
+			 "fan4->pwm%d. Please report to the driver "
+			 "maintainer.\n",
+			 (reg & 0x03) + 1, ((reg >> 2) & 0x03) + 1,
+			 ((reg >> 4) & 0x03) + 1, ((reg >> 6) & 0x03) + 1);
+	}
+
+	/* Switch pwm[1-3] to manual mode if they are currently disabled and
+	 * set the duty-cycles to 0% (which is identical to the PWMs being
+	 * disabled). */
+	if (!(data->config & 0x02)) {
+		for (ix = 0; ix < 3; ix++) {
+			data->pwm_config[ix] = dme1737_read(client,
+						DME1737_REG_PWM_CONFIG(ix));
+			if ((data->has_pwm & (1 << ix)) &&
+			    (PWM_EN_FROM_REG(data->pwm_config[ix]) == -1)) {
+				dev_info(&client->dev, "Switching pwm%d to "
+					 "manual mode.\n", ix + 1);
+				data->pwm_config[ix] = PWM_EN_TO_REG(1,
+							data->pwm_config[ix]);
+				dme1737_write(client, DME1737_REG_PWM(ix), 0);
+				dme1737_write(client,
+					      DME1737_REG_PWM_CONFIG(ix),
+					      data->pwm_config[ix]);
+			}
+		}
+	}
+
+	/* Initialize the default PWM auto channels zone (acz) assignments */
+	data->pwm_acz[0] = 1;	/* pwm1 -> zone1 */
+	data->pwm_acz[1] = 2;	/* pwm2 -> zone2 */
+	data->pwm_acz[2] = 4;	/* pwm3 -> zone3 */
+
+	/* Set VRM */
+	data->vrm = vid_which_vrm();
+
+	return 0;
+}
+
+static int dme1737_detect(struct i2c_adapter *adapter, int address,
+			  int kind)
+{
+	u8 company, verstep = 0;
+	struct i2c_client *client;
+	struct dme1737_data *data;
+	int ix, err = 0;
+	const char *name;
+
+	if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) {
+		goto exit;
+	}
+
+	if (!(data = kzalloc(sizeof(struct dme1737_data), GFP_KERNEL))) {
+		err = -ENOMEM;
+		goto exit;
+	}
+
+	client = &data->client;
+	i2c_set_clientdata(client, data);
+	client->addr = address;
+	client->adapter = adapter;
+	client->driver = &dme1737_driver;
+
+	/* A negative kind means that the driver was loaded with no force
+	 * parameter (default), so we must identify the chip. */
+	if (kind < 0) {
+		company = dme1737_read(client, DME1737_REG_COMPANY);
+		verstep = dme1737_read(client, DME1737_REG_VERSTEP);
+
+		if (!((company == DME1737_COMPANY_SMSC) &&
+		      ((verstep & DME1737_VERSTEP_MASK) == DME1737_VERSTEP))) {
+			err = -ENODEV;
+			goto exit_kfree;
+		}
+	}
+
+	kind = dme1737;
+	name = "dme1737";
+
+	/* Fill in the remaining client fields and put it into the global
+	 * list */
+	strlcpy(client->name, name, I2C_NAME_SIZE);
+	mutex_init(&data->update_lock);
+
+	/* Tell the I2C layer a new client has arrived */
+	if ((err = i2c_attach_client(client))) {
+		goto exit_kfree;
+	}
+
+	/* Initialize the DME1737 chip */
+	if ((err = dme1737_init_client(client))) {
+		goto exit_detach;
+	}
+
+	/* Create standard sysfs attributes */
+	if ((err = sysfs_create_group(&client->dev.kobj, &dme1737_group))) {
+                goto exit_detach;
+	}
+
+	/* Create fan sysfs attributes */
+	for (ix = 0; ix < ARRAY_SIZE(dme1737_fan_group); ix++) {
+		if (data->has_fan & (1 << ix)) {
+			if ((err = sysfs_create_group(&client->dev.kobj,
+						&dme1737_fan_group[ix]))) {
+				goto exit_remove;
+			}
+		}
+	}
+
+	/* Create PWM sysfs attributes */
+	for (ix = 0; ix < ARRAY_SIZE(dme1737_pwm_group); ix++) {
+		if (data->has_pwm & (1 << ix)) {
+			if ((err = sysfs_create_group(&client->dev.kobj,
+						&dme1737_pwm_group[ix]))) {
+				goto exit_remove;
+			}
+		}
+	}
+
+	/* Inform if the device is locked. Otherwise change the permissions of
+	 * selected attributes from read-only to read-writeable. */
+	if (data->config & 0x02) {
+		dev_info(&client->dev, "Device is locked. Some attributes "
+			 "will be read-only.\n");
+	} else {
+		/* Change permissions of standard attributes */
+		dme1737_chmod_group(client, &dme1737_lock_group,
+				    S_IRUGO | S_IWUSR);
+
+		/* Change permissions of PWM attributes */
+		for (ix = 0; ix < ARRAY_SIZE(dme1737_pwm_lock_group); ix++) {
+			if (data->has_pwm & (1 << ix)) {
+				dme1737_chmod_group(client,
+						&dme1737_pwm_lock_group[ix],
+						S_IRUGO | S_IWUSR);
+			}
+		}
+
+		/* Change permissions of pwm[1-3] if in manual mode */
+		for (ix = 0; ix < 3; ix++) {
+			if ((data->has_pwm & (1 << ix)) &&
+			    (PWM_EN_FROM_REG(data->pwm_config[ix]) == 1)) {
+				dme1737_chmod_file(client,
+						   dme1737_attr_pwm[ix],
+						   S_IRUGO | S_IWUSR);
+			}
+		}
+	}
+
+	/* Register device */
+	data->class_dev = hwmon_device_register(&client->dev);
+	if (IS_ERR(data->class_dev)) {
+		err = PTR_ERR(data->class_dev);
+		goto exit_remove;
+	}
+
+	dev_info(&adapter->dev, "Found a DME1737 chip at 0x%02x "
+		 "(rev 0x%02x)\n", client->addr, verstep);
+
+	return 0;
+
+exit_remove:
+	for (ix = 0; ix < ARRAY_SIZE(dme1737_fan_group); ix++) {
+		if (data->has_fan & (1 << ix)) {
+			sysfs_remove_group(&client->dev.kobj,
+					   &dme1737_fan_group[ix]);
+		}
+	}
+	for (ix = 0; ix < ARRAY_SIZE(dme1737_pwm_group); ix++) {
+		if (data->has_pwm & (1 << ix)) {
+			sysfs_remove_group(&client->dev.kobj,
+					   &dme1737_pwm_group[ix]);
+		}
+	}
+	sysfs_remove_group(&client->dev.kobj, &dme1737_group);
+exit_detach:
+	i2c_detach_client(client);
+exit_kfree:
+	kfree(data);
+exit:
+	return err;
+}
+
+static int dme1737_attach_adapter(struct i2c_adapter *adapter)
+{
+	if (!(adapter->class & I2C_CLASS_HWMON)) {
+		return 0;
+	}
+
+	return i2c_probe(adapter, &addr_data, dme1737_detect);
+}
+
+static int dme1737_detach_client(struct i2c_client *client)
+{
+	struct dme1737_data *data = i2c_get_clientdata(client);
+	int ix, err;
+
+	hwmon_device_unregister(data->class_dev);
+
+	for (ix = 0; ix < ARRAY_SIZE(dme1737_fan_group); ix++) {
+		if (data->has_fan & (1 << ix)) {
+			sysfs_remove_group(&client->dev.kobj,
+					   &dme1737_fan_group[ix]);
+		}
+	}
+	for (ix = 0; ix < ARRAY_SIZE(dme1737_pwm_group); ix++) {
+		if (data->has_pwm & (1 << ix)) {
+			sysfs_remove_group(&client->dev.kobj,
+					   &dme1737_pwm_group[ix]);
+		}
+	}
+	sysfs_remove_group(&client->dev.kobj, &dme1737_group);
+
+	if ((err = i2c_detach_client(client))) {
+		return err;
+	}
+
+	kfree(data);
+	return 0;
+}
+
+static struct i2c_driver dme1737_driver = {
+	.driver = {
+		.name = "dme1737",
+	},
+	.attach_adapter	= dme1737_attach_adapter,
+	.detach_client = dme1737_detach_client,
+};
+
+static int __init dme1737_init(void)
+{
+	return i2c_add_driver(&dme1737_driver);
+}
+
+static void __exit dme1737_exit(void)
+{
+	i2c_del_driver(&dme1737_driver);
+}
+
+MODULE_AUTHOR("Juerg Haefliger <juergh@gmail.com>");
+MODULE_DESCRIPTION("DME1737 sensors");
+MODULE_LICENSE("GPL");
+
+module_init(dme1737_init);
+module_exit(dme1737_exit);
diff --git a/drivers/hwmon/ds1621.c b/drivers/hwmon/ds1621.c
index d5ac422..1212d6b 100644
--- a/drivers/hwmon/ds1621.c
+++ b/drivers/hwmon/ds1621.c
@@ -27,6 +27,7 @@
 #include <linux/jiffies.h>
 #include <linux/i2c.h>
 #include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
 #include <linux/err.h>
 #include <linux/mutex.h>
 #include <linux/sysfs.h>
@@ -52,9 +53,11 @@
 #define DS1621_REG_CONFIG_DONE		0x80
 
 /* The DS1621 registers */
-#define DS1621_REG_TEMP			0xAA /* word, RO */
-#define DS1621_REG_TEMP_MIN		0xA2 /* word, RW */
-#define DS1621_REG_TEMP_MAX		0xA1 /* word, RW */
+static const u8 DS1621_REG_TEMP[3] = {
+	0xAA,		/* input, word, RO */
+	0xA2,		/* min, word, RW */
+	0xA1,		/* max, word, RW */
+};
 #define DS1621_REG_CONF			0xAC /* byte, RW */
 #define DS1621_COM_START		0xEE /* no data */
 #define DS1621_COM_STOP			0x22 /* no data */
@@ -63,10 +66,7 @@
 #define DS1621_ALARM_TEMP_HIGH		0x40
 #define DS1621_ALARM_TEMP_LOW		0x20
 
-/* Conversions. Rounding and limit checking is only done on the TO_REG
-   variants. Note that you should be a bit careful with which arguments
-   these macros are called: arguments may be evaluated more than once.
-   Fixing this is just not worth it. */
+/* Conversions */
 #define ALARMS_FROM_REG(val) ((val) & \
                               (DS1621_ALARM_TEMP_HIGH | DS1621_ALARM_TEMP_LOW))
 
@@ -78,7 +78,7 @@
 	char valid;			/* !=0 if following fields are valid */
 	unsigned long last_updated;	/* In jiffies */
 
-	u16 temp, temp_min, temp_max;	/* Register values, word */
+	u16 temp[3];			/* Register values, word */
 	u8 conf;			/* Register encoding, combined */
 };
 
@@ -101,7 +101,7 @@
 
 /* All registers are word-sized, except for the configuration register.
    DS1621 uses a high-byte first convention, which is exactly opposite to
-   the usual practice. */
+   the SMBus standard. */
 static int ds1621_read_value(struct i2c_client *client, u8 reg)
 {
 	if (reg == DS1621_REG_CONF)
@@ -110,9 +110,6 @@
 		return swab16(i2c_smbus_read_word_data(client, reg));
 }
 
-/* All registers are word-sized, except for the configuration register.
-   DS1621 uses a high-byte first convention, which is exactly opposite to
-   the usual practice. */
 static int ds1621_write_value(struct i2c_client *client, u8 reg, u16 value)
 {
 	if (reg == DS1621_REG_CONF)
@@ -139,50 +136,61 @@
 	i2c_smbus_write_byte(client, DS1621_COM_START);
 }
 
-#define show(value)							\
-static ssize_t show_##value(struct device *dev, struct device_attribute *attr, char *buf)		\
-{									\
-	struct ds1621_data *data = ds1621_update_client(dev);		\
-	return sprintf(buf, "%d\n", LM75_TEMP_FROM_REG(data->value));	\
+static ssize_t show_temp(struct device *dev, struct device_attribute *da,
+			 char *buf)
+{
+	struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
+	struct ds1621_data *data = ds1621_update_client(dev);
+	return sprintf(buf, "%d\n",
+		       LM75_TEMP_FROM_REG(data->temp[attr->index]));
 }
 
-show(temp);
-show(temp_min);
-show(temp_max);
+static ssize_t set_temp(struct device *dev, struct device_attribute *da,
+			const char *buf, size_t count)
+{
+	struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
+	struct i2c_client *client = to_i2c_client(dev);
+	struct ds1621_data *data = ds1621_update_client(dev);
+	u16 val = LM75_TEMP_TO_REG(simple_strtoul(buf, NULL, 10));
 
-#define set_temp(suffix, value, reg)					\
-static ssize_t set_temp_##suffix(struct device *dev, struct device_attribute *attr, const char *buf,	\
-				 size_t count)				\
-{									\
-	struct i2c_client *client = to_i2c_client(dev);			\
-	struct ds1621_data *data = ds1621_update_client(dev);		\
-	u16 val = LM75_TEMP_TO_REG(simple_strtoul(buf, NULL, 10));	\
-									\
-	mutex_lock(&data->update_lock);					\
-	data->value = val;						\
-	ds1621_write_value(client, reg, data->value);			\
-	mutex_unlock(&data->update_lock);				\
-	return count;							\
+	mutex_lock(&data->update_lock);
+	data->temp[attr->index] = val;
+	ds1621_write_value(client, DS1621_REG_TEMP[attr->index],
+			   data->temp[attr->index]);
+	mutex_unlock(&data->update_lock);
+	return count;
 }
 
-set_temp(min, temp_min, DS1621_REG_TEMP_MIN);
-set_temp(max, temp_max, DS1621_REG_TEMP_MAX);
-
-static ssize_t show_alarms(struct device *dev, struct device_attribute *attr, char *buf)
+static ssize_t show_alarms(struct device *dev, struct device_attribute *da,
+			   char *buf)
 {
 	struct ds1621_data *data = ds1621_update_client(dev);
 	return sprintf(buf, "%d\n", ALARMS_FROM_REG(data->conf));
 }
 
+static ssize_t show_alarm(struct device *dev, struct device_attribute *da,
+			  char *buf)
+{
+	struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
+	struct ds1621_data *data = ds1621_update_client(dev);
+	return sprintf(buf, "%d\n", !!(data->conf & attr->index));
+}
+
 static DEVICE_ATTR(alarms, S_IRUGO, show_alarms, NULL);
-static DEVICE_ATTR(temp1_input, S_IRUGO , show_temp, NULL);
-static DEVICE_ATTR(temp1_min, S_IWUSR | S_IRUGO , show_temp_min, set_temp_min);
-static DEVICE_ATTR(temp1_max, S_IWUSR | S_IRUGO, show_temp_max, set_temp_max);
+static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, show_temp, NULL, 0);
+static SENSOR_DEVICE_ATTR(temp1_min, S_IWUSR | S_IRUGO, show_temp, set_temp, 1);
+static SENSOR_DEVICE_ATTR(temp1_max, S_IWUSR | S_IRUGO, show_temp, set_temp, 2);
+static SENSOR_DEVICE_ATTR(temp1_min_alarm, S_IRUGO, show_alarm, NULL,
+		DS1621_ALARM_TEMP_LOW);
+static SENSOR_DEVICE_ATTR(temp1_max_alarm, S_IRUGO, show_alarm, NULL,
+		DS1621_ALARM_TEMP_HIGH);
 
 static struct attribute *ds1621_attributes[] = {
-	&dev_attr_temp1_input.attr,
-	&dev_attr_temp1_min.attr,
-	&dev_attr_temp1_max.attr,
+	&sensor_dev_attr_temp1_input.dev_attr.attr,
+	&sensor_dev_attr_temp1_min.dev_attr.attr,
+	&sensor_dev_attr_temp1_max.dev_attr.attr,
+	&sensor_dev_attr_temp1_min_alarm.dev_attr.attr,
+	&sensor_dev_attr_temp1_max_alarm.dev_attr.attr,
 	&dev_attr_alarms.attr,
 	NULL
 };
@@ -204,9 +212,9 @@
 			 int kind)
 {
 	int conf, temp;
-	struct i2c_client *new_client;
+	struct i2c_client *client;
 	struct ds1621_data *data;
-	int err = 0;
+	int i, err = 0;
 
 	if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA 
 				     | I2C_FUNC_SMBUS_WORD_DATA 
@@ -221,55 +229,44 @@
 		goto exit;
 	}
 	
-	new_client = &data->client;
-	i2c_set_clientdata(new_client, data);
-	new_client->addr = address;
-	new_client->adapter = adapter;
-	new_client->driver = &ds1621_driver;
-	new_client->flags = 0;
-
+	client = &data->client;
+	i2c_set_clientdata(client, data);
+	client->addr = address;
+	client->adapter = adapter;
+	client->driver = &ds1621_driver;
 
 	/* Now, we do the remaining detection. It is lousy. */
 	if (kind < 0) {
 		/* The NVB bit should be low if no EEPROM write has been 
 		   requested during the latest 10ms, which is highly 
 		   improbable in our case. */
-		conf = ds1621_read_value(new_client, DS1621_REG_CONF);
+		conf = ds1621_read_value(client, DS1621_REG_CONF);
 		if (conf & DS1621_REG_CONFIG_NVB)
 			goto exit_free;
 		/* The 7 lowest bits of a temperature should always be 0. */
-		temp = ds1621_read_value(new_client, DS1621_REG_TEMP);
-		if (temp & 0x007f)
-			goto exit_free;
-		temp = ds1621_read_value(new_client, DS1621_REG_TEMP_MIN);
-		if (temp & 0x007f)
-			goto exit_free;
-		temp = ds1621_read_value(new_client, DS1621_REG_TEMP_MAX);
-		if (temp & 0x007f)
-			goto exit_free;
+		for (i = 0; i < ARRAY_SIZE(data->temp); i++) {
+			temp = ds1621_read_value(client, DS1621_REG_TEMP[i]);
+			if (temp & 0x007f)
+				goto exit_free;
+		}
 	}
 
-	/* Determine the chip type - only one kind supported! */
-	if (kind <= 0)
-		kind = ds1621;
-
 	/* Fill in remaining client fields and put it into the global list */
-	strlcpy(new_client->name, "ds1621", I2C_NAME_SIZE);
-	data->valid = 0;
+	strlcpy(client->name, "ds1621", I2C_NAME_SIZE);
 	mutex_init(&data->update_lock);
 
 	/* Tell the I2C layer a new client has arrived */
-	if ((err = i2c_attach_client(new_client)))
+	if ((err = i2c_attach_client(client)))
 		goto exit_free;
 
 	/* Initialize the DS1621 chip */
-	ds1621_init_client(new_client);
+	ds1621_init_client(client);
 
 	/* Register sysfs hooks */
-	if ((err = sysfs_create_group(&new_client->dev.kobj, &ds1621_group)))
+	if ((err = sysfs_create_group(&client->dev.kobj, &ds1621_group)))
 		goto exit_detach;
 
-	data->class_dev = hwmon_device_register(&new_client->dev);
+	data->class_dev = hwmon_device_register(&client->dev);
 	if (IS_ERR(data->class_dev)) {
 		err = PTR_ERR(data->class_dev);
 		goto exit_remove_files;
@@ -278,9 +275,9 @@
 	return 0;
 
       exit_remove_files:
-	sysfs_remove_group(&new_client->dev.kobj, &ds1621_group);
+	sysfs_remove_group(&client->dev.kobj, &ds1621_group);
       exit_detach:
-	i2c_detach_client(new_client);
+	i2c_detach_client(client);
       exit_free:
 	kfree(data);
       exit:
@@ -314,23 +311,21 @@
 
 	if (time_after(jiffies, data->last_updated + HZ + HZ / 2)
 	    || !data->valid) {
+		int i;
 
 		dev_dbg(&client->dev, "Starting ds1621 update\n");
 
 		data->conf = ds1621_read_value(client, DS1621_REG_CONF);
 
-		data->temp = ds1621_read_value(client, DS1621_REG_TEMP);
-		
-		data->temp_min = ds1621_read_value(client,
-		                                    DS1621_REG_TEMP_MIN);
-		data->temp_max = ds1621_read_value(client,
-						    DS1621_REG_TEMP_MAX);
+		for (i = 0; i < ARRAY_SIZE(data->temp); i++)
+			data->temp[i] = ds1621_read_value(client,
+							  DS1621_REG_TEMP[i]);
 
 		/* reset alarms if necessary */
 		new_conf = data->conf;
-		if (data->temp > data->temp_min)
+		if (data->temp[0] > data->temp[1])	/* input > min */
 			new_conf &= ~DS1621_ALARM_TEMP_LOW;
-		if (data->temp < data->temp_max)
+		if (data->temp[0] < data->temp[2])	/* input < max */
 			new_conf &= ~DS1621_ALARM_TEMP_HIGH;
 		if (data->conf != new_conf)
 			ds1621_write_value(client, DS1621_REG_CONF,
diff --git a/drivers/hwmon/f71805f.c b/drivers/hwmon/f71805f.c
index cdbe309..6f60715 100644
--- a/drivers/hwmon/f71805f.c
+++ b/drivers/hwmon/f71805f.c
@@ -127,6 +127,13 @@
 #define F71805F_REG_TEMP_HIGH(nr)	(0x54 + 2 * (nr))
 #define F71805F_REG_TEMP_HYST(nr)	(0x55 + 2 * (nr))
 #define F71805F_REG_TEMP_MODE		0x01
+/* pwm/fan pwmnr from 0 to 2, auto point apnr from 0 to 2 */
+/* map Fintek numbers to our numbers as follows: 9->0, 5->1, 1->2 */
+#define F71805F_REG_PWM_AUTO_POINT_TEMP(pwmnr, apnr) \
+					(0xA0 + 0x10 * (pwmnr) + (2 - (apnr)))
+#define F71805F_REG_PWM_AUTO_POINT_FAN(pwmnr, apnr) \
+					(0xA4 + 0x10 * (pwmnr) + \
+						2 * (2 - (apnr)))
 
 #define F71805F_REG_START		0x00
 /* status nr from 0 to 2 */
@@ -144,6 +151,11 @@
  * Data structures and manipulation thereof
  */
 
+struct f71805f_auto_point {
+	u8 temp[3];
+	u16 fan[3];
+};
+
 struct f71805f_data {
 	unsigned short addr;
 	const char *name;
@@ -170,6 +182,7 @@
 	u8 temp_hyst[3];
 	u8 temp_mode;
 	unsigned long alarms;
+	struct f71805f_auto_point auto_points[3];
 };
 
 struct f71805f_sio_data {
@@ -312,7 +325,7 @@
 static struct f71805f_data *f71805f_update_device(struct device *dev)
 {
 	struct f71805f_data *data = dev_get_drvdata(dev);
-	int nr;
+	int nr, apnr;
 
 	mutex_lock(&data->update_lock);
 
@@ -342,6 +355,18 @@
 					      F71805F_REG_TEMP_HYST(nr));
 		}
 		data->temp_mode = f71805f_read8(data, F71805F_REG_TEMP_MODE);
+		for (nr = 0; nr < 3; nr++) {
+			for (apnr = 0; apnr < 3; apnr++) {
+				data->auto_points[nr].temp[apnr] =
+					f71805f_read8(data,
+					F71805F_REG_PWM_AUTO_POINT_TEMP(nr,
+									apnr));
+				data->auto_points[nr].fan[apnr] =
+					f71805f_read16(data,
+					F71805F_REG_PWM_AUTO_POINT_FAN(nr,
+								       apnr));
+			}
+		}
 
 		data->last_limits = jiffies;
 	}
@@ -705,6 +730,70 @@
 	return count;
 }
 
+static ssize_t show_pwm_auto_point_temp(struct device *dev,
+					struct device_attribute *devattr,
+					char* buf)
+{
+	struct f71805f_data *data = dev_get_drvdata(dev);
+	struct sensor_device_attribute_2 *attr = to_sensor_dev_attr_2(devattr);
+	int pwmnr = attr->nr;
+	int apnr = attr->index;
+
+	return sprintf(buf, "%ld\n",
+		       temp_from_reg(data->auto_points[pwmnr].temp[apnr]));
+}
+
+static ssize_t set_pwm_auto_point_temp(struct device *dev,
+				       struct device_attribute *devattr,
+				       const char* buf, size_t count)
+{
+	struct f71805f_data *data = dev_get_drvdata(dev);
+	struct sensor_device_attribute_2 *attr = to_sensor_dev_attr_2(devattr);
+	int pwmnr = attr->nr;
+	int apnr = attr->index;
+	unsigned long val = simple_strtol(buf, NULL, 10);
+
+	mutex_lock(&data->update_lock);
+	data->auto_points[pwmnr].temp[apnr] = temp_to_reg(val);
+	f71805f_write8(data, F71805F_REG_PWM_AUTO_POINT_TEMP(pwmnr, apnr),
+		       data->auto_points[pwmnr].temp[apnr]);
+	mutex_unlock(&data->update_lock);
+
+	return count;
+}
+
+static ssize_t show_pwm_auto_point_fan(struct device *dev,
+				       struct device_attribute *devattr,
+				       char* buf)
+{
+	struct f71805f_data *data = dev_get_drvdata(dev);
+	struct sensor_device_attribute_2 *attr = to_sensor_dev_attr_2(devattr);
+	int pwmnr = attr->nr;
+	int apnr = attr->index;
+
+	return sprintf(buf, "%ld\n",
+		       fan_from_reg(data->auto_points[pwmnr].fan[apnr]));
+}
+
+static ssize_t set_pwm_auto_point_fan(struct device *dev,
+				      struct device_attribute *devattr,
+				      const char* buf, size_t count)
+{
+	struct f71805f_data *data = dev_get_drvdata(dev);
+	struct sensor_device_attribute_2 *attr = to_sensor_dev_attr_2(devattr);
+	int pwmnr = attr->nr;
+	int apnr = attr->index;
+	unsigned long val = simple_strtoul(buf, NULL, 10);
+
+	mutex_lock(&data->update_lock);
+	data->auto_points[pwmnr].fan[apnr] = fan_to_reg(val);
+	f71805f_write16(data, F71805F_REG_PWM_AUTO_POINT_FAN(pwmnr, apnr),
+		        data->auto_points[pwmnr].fan[apnr]);
+	mutex_unlock(&data->update_lock);
+
+	return count;
+}
+
 static ssize_t show_temp(struct device *dev, struct device_attribute *devattr,
 			 char *buf)
 {
@@ -932,6 +1021,63 @@
 			  show_pwm_freq, set_pwm_freq, 2);
 static SENSOR_DEVICE_ATTR(pwm3_mode, S_IRUGO, show_pwm_mode, NULL, 2);
 
+static SENSOR_DEVICE_ATTR_2(pwm1_auto_point1_temp, S_IRUGO | S_IWUSR,
+			    show_pwm_auto_point_temp, set_pwm_auto_point_temp,
+			    0, 0);
+static SENSOR_DEVICE_ATTR_2(pwm1_auto_point1_fan, S_IRUGO | S_IWUSR,
+			    show_pwm_auto_point_fan, set_pwm_auto_point_fan,
+			    0, 0);
+static SENSOR_DEVICE_ATTR_2(pwm1_auto_point2_temp, S_IRUGO | S_IWUSR,
+			    show_pwm_auto_point_temp, set_pwm_auto_point_temp,
+			    0, 1);
+static SENSOR_DEVICE_ATTR_2(pwm1_auto_point2_fan, S_IRUGO | S_IWUSR,
+			    show_pwm_auto_point_fan, set_pwm_auto_point_fan,
+			    0, 1);
+static SENSOR_DEVICE_ATTR_2(pwm1_auto_point3_temp, S_IRUGO | S_IWUSR,
+			    show_pwm_auto_point_temp, set_pwm_auto_point_temp,
+			    0, 2);
+static SENSOR_DEVICE_ATTR_2(pwm1_auto_point3_fan, S_IRUGO | S_IWUSR,
+			    show_pwm_auto_point_fan, set_pwm_auto_point_fan,
+			    0, 2);
+
+static SENSOR_DEVICE_ATTR_2(pwm2_auto_point1_temp, S_IRUGO | S_IWUSR,
+			    show_pwm_auto_point_temp, set_pwm_auto_point_temp,
+			    1, 0);
+static SENSOR_DEVICE_ATTR_2(pwm2_auto_point1_fan, S_IRUGO | S_IWUSR,
+			    show_pwm_auto_point_fan, set_pwm_auto_point_fan,
+			    1, 0);
+static SENSOR_DEVICE_ATTR_2(pwm2_auto_point2_temp, S_IRUGO | S_IWUSR,
+			    show_pwm_auto_point_temp, set_pwm_auto_point_temp,
+			    1, 1);
+static SENSOR_DEVICE_ATTR_2(pwm2_auto_point2_fan, S_IRUGO | S_IWUSR,
+			    show_pwm_auto_point_fan, set_pwm_auto_point_fan,
+			    1, 1);
+static SENSOR_DEVICE_ATTR_2(pwm2_auto_point3_temp, S_IRUGO | S_IWUSR,
+			    show_pwm_auto_point_temp, set_pwm_auto_point_temp,
+			    1, 2);
+static SENSOR_DEVICE_ATTR_2(pwm2_auto_point3_fan, S_IRUGO | S_IWUSR,
+			    show_pwm_auto_point_fan, set_pwm_auto_point_fan,
+			    1, 2);
+
+static SENSOR_DEVICE_ATTR_2(pwm3_auto_point1_temp, S_IRUGO | S_IWUSR,
+			    show_pwm_auto_point_temp, set_pwm_auto_point_temp,
+			    2, 0);
+static SENSOR_DEVICE_ATTR_2(pwm3_auto_point1_fan, S_IRUGO | S_IWUSR,
+			    show_pwm_auto_point_fan, set_pwm_auto_point_fan,
+			    2, 0);
+static SENSOR_DEVICE_ATTR_2(pwm3_auto_point2_temp, S_IRUGO | S_IWUSR,
+			    show_pwm_auto_point_temp, set_pwm_auto_point_temp,
+			    2, 1);
+static SENSOR_DEVICE_ATTR_2(pwm3_auto_point2_fan, S_IRUGO | S_IWUSR,
+			    show_pwm_auto_point_fan, set_pwm_auto_point_fan,
+			    2, 1);
+static SENSOR_DEVICE_ATTR_2(pwm3_auto_point3_temp, S_IRUGO | S_IWUSR,
+			    show_pwm_auto_point_temp, set_pwm_auto_point_temp,
+			    2, 2);
+static SENSOR_DEVICE_ATTR_2(pwm3_auto_point3_fan, S_IRUGO | S_IWUSR,
+			    show_pwm_auto_point_fan, set_pwm_auto_point_fan,
+			    2, 2);
+
 static SENSOR_DEVICE_ATTR(in0_alarm, S_IRUGO, show_alarm, NULL, 0);
 static SENSOR_DEVICE_ATTR(in1_alarm, S_IRUGO, show_alarm, NULL, 1);
 static SENSOR_DEVICE_ATTR(in2_alarm, S_IRUGO, show_alarm, NULL, 2);
@@ -1014,6 +1160,25 @@
 	&sensor_dev_attr_temp3_max_hyst.dev_attr.attr,
 	&sensor_dev_attr_temp3_type.dev_attr.attr,
 
+	&sensor_dev_attr_pwm1_auto_point1_temp.dev_attr.attr,
+	&sensor_dev_attr_pwm1_auto_point1_fan.dev_attr.attr,
+	&sensor_dev_attr_pwm1_auto_point2_temp.dev_attr.attr,
+	&sensor_dev_attr_pwm1_auto_point2_fan.dev_attr.attr,
+	&sensor_dev_attr_pwm1_auto_point3_temp.dev_attr.attr,
+	&sensor_dev_attr_pwm1_auto_point3_fan.dev_attr.attr,
+	&sensor_dev_attr_pwm2_auto_point1_temp.dev_attr.attr,
+	&sensor_dev_attr_pwm2_auto_point1_fan.dev_attr.attr,
+	&sensor_dev_attr_pwm2_auto_point2_temp.dev_attr.attr,
+	&sensor_dev_attr_pwm2_auto_point2_fan.dev_attr.attr,
+	&sensor_dev_attr_pwm2_auto_point3_temp.dev_attr.attr,
+	&sensor_dev_attr_pwm2_auto_point3_fan.dev_attr.attr,
+	&sensor_dev_attr_pwm3_auto_point1_temp.dev_attr.attr,
+	&sensor_dev_attr_pwm3_auto_point1_fan.dev_attr.attr,
+	&sensor_dev_attr_pwm3_auto_point2_temp.dev_attr.attr,
+	&sensor_dev_attr_pwm3_auto_point2_fan.dev_attr.attr,
+	&sensor_dev_attr_pwm3_auto_point3_temp.dev_attr.attr,
+	&sensor_dev_attr_pwm3_auto_point3_fan.dev_attr.attr,
+
 	&sensor_dev_attr_in0_alarm.dev_attr.attr,
 	&sensor_dev_attr_in1_alarm.dev_attr.attr,
 	&sensor_dev_attr_in2_alarm.dev_attr.attr,
@@ -1242,12 +1407,12 @@
 	struct resource *res;
 	int i;
 
-	platform_set_drvdata(pdev, NULL);
 	hwmon_device_unregister(data->class_dev);
 	sysfs_remove_group(&pdev->dev.kobj, &f71805f_group);
 	for (i = 0; i < 4; i++)
 		sysfs_remove_group(&pdev->dev.kobj, &f71805f_group_optin[i]);
 	sysfs_remove_group(&pdev->dev.kobj, &f71805f_group_pwm_freq);
+	platform_set_drvdata(pdev, NULL);
 	kfree(data);
 
 	res = platform_get_resource(pdev, IORESOURCE_IO, 0);
@@ -1290,15 +1455,12 @@
 		goto exit_device_put;
 	}
 
-	pdev->dev.platform_data = kmalloc(sizeof(struct f71805f_sio_data),
-					  GFP_KERNEL);
-	if (!pdev->dev.platform_data) {
-		err = -ENOMEM;
+	err = platform_device_add_data(pdev, sio_data,
+				       sizeof(struct f71805f_sio_data));
+	if (err) {
 		printk(KERN_ERR DRVNAME ": Platform data allocation failed\n");
 		goto exit_device_put;
 	}
-	memcpy(pdev->dev.platform_data, sio_data,
-	       sizeof(struct f71805f_sio_data));
 
 	err = platform_device_add(pdev);
 	if (err) {
diff --git a/drivers/hwmon/it87.c b/drivers/hwmon/it87.c
index 62afc63..eff6036 100644
--- a/drivers/hwmon/it87.c
+++ b/drivers/hwmon/it87.c
@@ -6,6 +6,7 @@
               IT8712F  Super I/O chip w/LPC interface
               IT8716F  Super I/O chip w/LPC interface
               IT8718F  Super I/O chip w/LPC interface
+              IT8726F  Super I/O chip w/LPC interface
               Sis950   A clone of the IT8705F
 
     Copyright (C) 2001 Chris Gauthron <chrisg@0-in.com> 
@@ -30,8 +31,7 @@
 #include <linux/init.h>
 #include <linux/slab.h>
 #include <linux/jiffies.h>
-#include <linux/i2c.h>
-#include <linux/i2c-isa.h>
+#include <linux/platform_device.h>
 #include <linux/hwmon.h>
 #include <linux/hwmon-sysfs.h>
 #include <linux/hwmon-vid.h>
@@ -40,10 +40,12 @@
 #include <linux/sysfs.h>
 #include <asm/io.h>
 
+#define DRVNAME "it87"
 
-static unsigned short isa_address;
 enum chips { it87, it8712, it8716, it8718 };
 
+static struct platform_device *pdev;
+
 #define	REG	0x2e	/* The register to read/write */
 #define	DEV	0x07	/* Register: Logical device select */
 #define	VAL	0x2f	/* The value to read/write */
@@ -97,6 +99,7 @@
 #define IT8705F_DEVID 0x8705
 #define IT8716F_DEVID 0x8716
 #define IT8718F_DEVID 0x8718
+#define IT8726F_DEVID 0x8726
 #define IT87_ACT_REG  0x30
 #define IT87_BASE_REG 0x60
 
@@ -110,10 +113,6 @@
 /* Not all BIOSes properly configure the PWM registers */
 static int fix_pwm_polarity;
 
-/* Values read from Super-I/O config space */
-static u16 chip_type;
-static u8 vid_value;
-
 /* Many IT87 constants specified below */
 
 /* Length of ISA address segment */
@@ -214,13 +213,20 @@
 };
 
 
+struct it87_sio_data {
+	enum chips type;
+	/* Values read from Super-I/O config space */
+	u8 vid_value;
+};
+
 /* For each registered chip, we need to keep some data in memory.
    The structure is dynamically allocated. */
 struct it87_data {
-	struct i2c_client client;
 	struct class_device *class_dev;
 	enum chips type;
 
+	unsigned short addr;
+	const char *name;
 	struct mutex update_lock;
 	char valid;		/* !=0 if following fields are valid */
 	unsigned long last_updated;	/* In jiffies */
@@ -245,26 +251,25 @@
 };
 
 
-static int it87_detect(struct i2c_adapter *adapter);
-static int it87_detach_client(struct i2c_client *client);
+static int it87_probe(struct platform_device *pdev);
+static int it87_remove(struct platform_device *pdev);
 
-static int it87_read_value(struct i2c_client *client, u8 reg);
-static void it87_write_value(struct i2c_client *client, u8 reg, u8 value);
+static int it87_read_value(struct it87_data *data, u8 reg);
+static void it87_write_value(struct it87_data *data, u8 reg, u8 value);
 static struct it87_data *it87_update_device(struct device *dev);
-static int it87_check_pwm(struct i2c_client *client);
-static void it87_init_client(struct i2c_client *client, struct it87_data *data);
+static int it87_check_pwm(struct device *dev);
+static void it87_init_device(struct platform_device *pdev);
 
 
-static struct i2c_driver it87_isa_driver = {
+static struct platform_driver it87_driver = {
 	.driver = {
 		.owner	= THIS_MODULE,
-		.name	= "it87-isa",
+		.name	= DRVNAME,
 	},
-	.attach_adapter	= it87_detect,
-	.detach_client	= it87_detach_client,
+	.probe	= it87_probe,
+	.remove	= __devexit_p(it87_remove),
 };
 
-
 static ssize_t show_in(struct device *dev, struct device_attribute *attr,
 		char *buf)
 {
@@ -301,13 +306,12 @@
 	struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
 	int nr = sensor_attr->index;
 
-	struct i2c_client *client = to_i2c_client(dev);
-	struct it87_data *data = i2c_get_clientdata(client);
+	struct it87_data *data = dev_get_drvdata(dev);
 	unsigned long val = simple_strtoul(buf, NULL, 10);
 
 	mutex_lock(&data->update_lock);
 	data->in_min[nr] = IN_TO_REG(val);
-	it87_write_value(client, IT87_REG_VIN_MIN(nr), 
+	it87_write_value(data, IT87_REG_VIN_MIN(nr),
 			data->in_min[nr]);
 	mutex_unlock(&data->update_lock);
 	return count;
@@ -318,13 +322,12 @@
 	struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
 	int nr = sensor_attr->index;
 
-	struct i2c_client *client = to_i2c_client(dev);
-	struct it87_data *data = i2c_get_clientdata(client);
+	struct it87_data *data = dev_get_drvdata(dev);
 	unsigned long val = simple_strtoul(buf, NULL, 10);
 
 	mutex_lock(&data->update_lock);
 	data->in_max[nr] = IN_TO_REG(val);
-	it87_write_value(client, IT87_REG_VIN_MAX(nr), 
+	it87_write_value(data, IT87_REG_VIN_MAX(nr),
 			data->in_max[nr]);
 	mutex_unlock(&data->update_lock);
 	return count;
@@ -392,13 +395,12 @@
 	struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
 	int nr = sensor_attr->index;
 
-	struct i2c_client *client = to_i2c_client(dev);
-	struct it87_data *data = i2c_get_clientdata(client);
+	struct it87_data *data = dev_get_drvdata(dev);
 	int val = simple_strtol(buf, NULL, 10);
 
 	mutex_lock(&data->update_lock);
 	data->temp_high[nr] = TEMP_TO_REG(val);
-	it87_write_value(client, IT87_REG_TEMP_HIGH(nr), data->temp_high[nr]);
+	it87_write_value(data, IT87_REG_TEMP_HIGH(nr), data->temp_high[nr]);
 	mutex_unlock(&data->update_lock);
 	return count;
 }
@@ -408,13 +410,12 @@
 	struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
 	int nr = sensor_attr->index;
 
-	struct i2c_client *client = to_i2c_client(dev);
-	struct it87_data *data = i2c_get_clientdata(client);
+	struct it87_data *data = dev_get_drvdata(dev);
 	int val = simple_strtol(buf, NULL, 10);
 
 	mutex_lock(&data->update_lock);
 	data->temp_low[nr] = TEMP_TO_REG(val);
-	it87_write_value(client, IT87_REG_TEMP_LOW(nr), data->temp_low[nr]);
+	it87_write_value(data, IT87_REG_TEMP_LOW(nr), data->temp_low[nr]);
 	mutex_unlock(&data->update_lock);
 	return count;
 }
@@ -451,8 +452,7 @@
 	struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
 	int nr = sensor_attr->index;
 
-	struct i2c_client *client = to_i2c_client(dev);
-	struct it87_data *data = i2c_get_clientdata(client);
+	struct it87_data *data = dev_get_drvdata(dev);
 	int val = simple_strtol(buf, NULL, 10);
 
 	mutex_lock(&data->update_lock);
@@ -468,7 +468,7 @@
 		mutex_unlock(&data->update_lock);
 		return -EINVAL;
 	}
-	it87_write_value(client, IT87_REG_TEMP_ENABLE, data->sensor);
+	it87_write_value(data, IT87_REG_TEMP_ENABLE, data->sensor);
 	mutex_unlock(&data->update_lock);
 	return count;
 }
@@ -542,13 +542,12 @@
 	struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
 	int nr = sensor_attr->index;
 
-	struct i2c_client *client = to_i2c_client(dev);
-	struct it87_data *data = i2c_get_clientdata(client);
+	struct it87_data *data = dev_get_drvdata(dev);
 	int val = simple_strtol(buf, NULL, 10);
 	u8 reg;
 
 	mutex_lock(&data->update_lock);
-	reg = it87_read_value(client, IT87_REG_FAN_DIV);
+	reg = it87_read_value(data, IT87_REG_FAN_DIV);
 	switch (nr) {
 	case 0: data->fan_div[nr] = reg & 0x07; break;
 	case 1: data->fan_div[nr] = (reg >> 3) & 0x07; break;
@@ -556,7 +555,7 @@
 	}
 
 	data->fan_min[nr] = FAN_TO_REG(val, DIV_FROM_REG(data->fan_div[nr]));
-	it87_write_value(client, IT87_REG_FAN_MIN(nr), data->fan_min[nr]);
+	it87_write_value(data, IT87_REG_FAN_MIN(nr), data->fan_min[nr]);
 	mutex_unlock(&data->update_lock);
 	return count;
 }
@@ -566,14 +565,13 @@
 	struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
 	int nr = sensor_attr->index;
 
-	struct i2c_client *client = to_i2c_client(dev);
-	struct it87_data *data = i2c_get_clientdata(client);
+	struct it87_data *data = dev_get_drvdata(dev);
 	unsigned long val = simple_strtoul(buf, NULL, 10);
 	int min;
 	u8 old;
 
 	mutex_lock(&data->update_lock);
-	old = it87_read_value(client, IT87_REG_FAN_DIV);
+	old = it87_read_value(data, IT87_REG_FAN_DIV);
 
 	/* Save fan min limit */
 	min = FAN_FROM_REG(data->fan_min[nr], DIV_FROM_REG(data->fan_div[nr]));
@@ -594,11 +592,11 @@
 	val |= (data->fan_div[1] & 0x07) << 3;
 	if (data->fan_div[2] == 3)
 		val |= 0x1 << 6;
-	it87_write_value(client, IT87_REG_FAN_DIV, val);
+	it87_write_value(data, IT87_REG_FAN_DIV, val);
 
 	/* Restore fan min limit */
 	data->fan_min[nr] = FAN_TO_REG(min, DIV_FROM_REG(data->fan_div[nr]));
-	it87_write_value(client, IT87_REG_FAN_MIN(nr), data->fan_min[nr]);
+	it87_write_value(data, IT87_REG_FAN_MIN(nr), data->fan_min[nr]);
 
 	mutex_unlock(&data->update_lock);
 	return count;
@@ -609,8 +607,7 @@
 	struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
 	int nr = sensor_attr->index;
 
-	struct i2c_client *client = to_i2c_client(dev);
-	struct it87_data *data = i2c_get_clientdata(client);
+	struct it87_data *data = dev_get_drvdata(dev);
 	int val = simple_strtol(buf, NULL, 10);
 
 	mutex_lock(&data->update_lock);
@@ -618,17 +615,17 @@
 	if (val == 0) {
 		int tmp;
 		/* make sure the fan is on when in on/off mode */
-		tmp = it87_read_value(client, IT87_REG_FAN_CTL);
-		it87_write_value(client, IT87_REG_FAN_CTL, tmp | (1 << nr));
+		tmp = it87_read_value(data, IT87_REG_FAN_CTL);
+		it87_write_value(data, IT87_REG_FAN_CTL, tmp | (1 << nr));
 		/* set on/off mode */
 		data->fan_main_ctrl &= ~(1 << nr);
-		it87_write_value(client, IT87_REG_FAN_MAIN_CTRL, data->fan_main_ctrl);
+		it87_write_value(data, IT87_REG_FAN_MAIN_CTRL, data->fan_main_ctrl);
 	} else if (val == 1) {
 		/* set SmartGuardian mode */
 		data->fan_main_ctrl |= (1 << nr);
-		it87_write_value(client, IT87_REG_FAN_MAIN_CTRL, data->fan_main_ctrl);
+		it87_write_value(data, IT87_REG_FAN_MAIN_CTRL, data->fan_main_ctrl);
 		/* set saved pwm value, clear FAN_CTLX PWM mode bit */
-		it87_write_value(client, IT87_REG_PWM(nr), PWM_TO_REG(data->manual_pwm_ctl[nr]));
+		it87_write_value(data, IT87_REG_PWM(nr), PWM_TO_REG(data->manual_pwm_ctl[nr]));
 	} else {
 		mutex_unlock(&data->update_lock);
 		return -EINVAL;
@@ -643,8 +640,7 @@
 	struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
 	int nr = sensor_attr->index;
 
-	struct i2c_client *client = to_i2c_client(dev);
-	struct it87_data *data = i2c_get_clientdata(client);
+	struct it87_data *data = dev_get_drvdata(dev);
 	int val = simple_strtol(buf, NULL, 10);
 
 	if (val < 0 || val > 255)
@@ -653,15 +649,14 @@
 	mutex_lock(&data->update_lock);
 	data->manual_pwm_ctl[nr] = val;
 	if (data->fan_main_ctrl & (1 << nr))
-		it87_write_value(client, IT87_REG_PWM(nr), PWM_TO_REG(data->manual_pwm_ctl[nr]));
+		it87_write_value(data, IT87_REG_PWM(nr), PWM_TO_REG(data->manual_pwm_ctl[nr]));
 	mutex_unlock(&data->update_lock);
 	return count;
 }
 static ssize_t set_pwm_freq(struct device *dev,
 		struct device_attribute *attr, const char *buf, size_t count)
 {
-	struct i2c_client *client = to_i2c_client(dev);
-	struct it87_data *data = i2c_get_clientdata(client);
+	struct it87_data *data = dev_get_drvdata(dev);
 	unsigned long val = simple_strtoul(buf, NULL, 10);
 	int i;
 
@@ -672,9 +667,9 @@
 	}
 
 	mutex_lock(&data->update_lock);
-	data->fan_ctl = it87_read_value(client, IT87_REG_FAN_CTL) & 0x8f;
+	data->fan_ctl = it87_read_value(data, IT87_REG_FAN_CTL) & 0x8f;
 	data->fan_ctl |= i << 4;
-	it87_write_value(client, IT87_REG_FAN_CTL, data->fan_ctl);
+	it87_write_value(data, IT87_REG_FAN_CTL, data->fan_ctl);
 	mutex_unlock(&data->update_lock);
 
 	return count;
@@ -729,15 +724,14 @@
 {
 	struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
 	int nr = sensor_attr->index;
-	struct i2c_client *client = to_i2c_client(dev);
-	struct it87_data *data = i2c_get_clientdata(client);
+	struct it87_data *data = dev_get_drvdata(dev);
 	int val = simple_strtol(buf, NULL, 10);
 
 	mutex_lock(&data->update_lock);
 	data->fan_min[nr] = FAN16_TO_REG(val);
-	it87_write_value(client, IT87_REG_FAN_MIN(nr),
+	it87_write_value(data, IT87_REG_FAN_MIN(nr),
 			 data->fan_min[nr] & 0xff);
-	it87_write_value(client, IT87_REG_FANX_MIN(nr),
+	it87_write_value(data, IT87_REG_FANX_MIN(nr),
 			 data->fan_min[nr] >> 8);
 	mutex_unlock(&data->update_lock);
 	return count;
@@ -775,8 +769,7 @@
 static ssize_t
 store_vrm_reg(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
 {
-	struct i2c_client *client = to_i2c_client(dev);
-	struct it87_data *data = i2c_get_clientdata(client);
+	struct it87_data *data = dev_get_drvdata(dev);
 	u32 val;
 
 	val = simple_strtoul(buf, NULL, 10);
@@ -794,6 +787,14 @@
 }
 static DEVICE_ATTR(cpu0_vid, S_IRUGO, show_vid_reg, NULL);
 
+static ssize_t show_name(struct device *dev, struct device_attribute
+			 *devattr, char *buf)
+{
+	struct it87_data *data = dev_get_drvdata(dev);
+	return sprintf(buf, "%s\n", data->name);
+}
+static DEVICE_ATTR(name, S_IRUGO, show_name, NULL);
+
 static struct attribute *it87_attributes[] = {
 	&sensor_dev_attr_in0_input.dev_attr.attr,
 	&sensor_dev_attr_in1_input.dev_attr.attr,
@@ -835,6 +836,7 @@
 	&sensor_dev_attr_temp3_type.dev_attr.attr,
 
 	&dev_attr_alarms.attr,
+	&dev_attr_name.attr,
 	NULL
 };
 
@@ -877,17 +879,36 @@
 };
 
 /* SuperIO detection - will change isa_address if a chip is found */
-static int __init it87_find(unsigned short *address)
+static int __init it87_find(unsigned short *address,
+	struct it87_sio_data *sio_data)
 {
 	int err = -ENODEV;
+	u16 chip_type;
 
 	superio_enter();
 	chip_type = superio_inw(DEVID);
-	if (chip_type != IT8712F_DEVID
-	 && chip_type != IT8716F_DEVID
-	 && chip_type != IT8718F_DEVID
-	 && chip_type != IT8705F_DEVID)
-	 	goto exit;
+
+	switch (chip_type) {
+	case IT8705F_DEVID:
+		sio_data->type = it87;
+		break;
+	case IT8712F_DEVID:
+		sio_data->type = it8712;
+		break;
+	case IT8716F_DEVID:
+	case IT8726F_DEVID:
+		sio_data->type = it8716;
+		break;
+	case IT8718F_DEVID:
+		sio_data->type = it8718;
+		break;
+	case 0xffff:	/* No device at all */
+		goto exit;
+	default:
+		pr_debug(DRVNAME ": Unsupported chip (DEVID=0x%x)\n",
+			 chip_type);
+		goto exit;
+	}
 
 	superio_select(PME);
 	if (!(superio_inb(IT87_ACT_REG) & 0x01)) {
@@ -911,7 +932,7 @@
 
 		superio_select(GPIO);
 		if (chip_type == it8718)
-			vid_value = superio_inb(IT87_SIO_VID_REG);
+			sio_data->vid_value = superio_inb(IT87_SIO_VID_REG);
 
 		reg = superio_inb(IT87_SIO_PINX2_REG);
 		if (reg & (1 << 0))
@@ -925,18 +946,26 @@
 	return err;
 }
 
-/* This function is called by i2c_probe */
-static int it87_detect(struct i2c_adapter *adapter)
+static int __devinit it87_probe(struct platform_device *pdev)
 {
-	struct i2c_client *new_client;
 	struct it87_data *data;
+	struct resource *res;
+	struct device *dev = &pdev->dev;
+	struct it87_sio_data *sio_data = dev->platform_data;
 	int err = 0;
-	const char *name;
 	int enable_pwm_interface;
+	static const char *names[] = {
+		"it87",
+		"it8712",
+		"it8716",
+		"it8718",
+	};
 
-	/* Reserve the ISA region */
-	if (!request_region(isa_address, IT87_EXTENT,
-			    it87_isa_driver.driver.name)){
+	res = platform_get_resource(pdev, IORESOURCE_IO, 0);
+	if (!request_region(res->start, IT87_EXTENT, DRVNAME)) {
+		dev_err(dev, "Failed to request region 0x%lx-0x%lx\n",
+			(unsigned long)res->start,
+			(unsigned long)(res->start + IT87_EXTENT - 1));
 		err = -EBUSY;
 		goto ERROR0;
 	}
@@ -946,129 +975,104 @@
 		goto ERROR1;
 	}
 
-	new_client = &data->client;
-	i2c_set_clientdata(new_client, data);
-	new_client->addr = isa_address;
-	new_client->adapter = adapter;
-	new_client->driver = &it87_isa_driver;
+	data->addr = res->start;
+	data->type = sio_data->type;
+	data->name = names[sio_data->type];
 
 	/* Now, we do the remaining detection. */
-	if ((it87_read_value(new_client, IT87_REG_CONFIG) & 0x80)
-	 || it87_read_value(new_client, IT87_REG_CHIPID) != 0x90) {
+	if ((it87_read_value(data, IT87_REG_CONFIG) & 0x80)
+	 || it87_read_value(data, IT87_REG_CHIPID) != 0x90) {
 		err = -ENODEV;
 		goto ERROR2;
 	}
 
-	/* Determine the chip type. */
-	switch (chip_type) {
-	case IT8712F_DEVID:
-		data->type = it8712;
-		name = "it8712";
-		break;
-	case IT8716F_DEVID:
-		data->type = it8716;
-		name = "it8716";
-		break;
-	case IT8718F_DEVID:
-		data->type = it8718;
-		name = "it8718";
-		break;
-	default:
-		data->type = it87;
-		name = "it87";
-	}
+	platform_set_drvdata(pdev, data);
 
-	/* Fill in the remaining client fields and put it into the global list */
-	strlcpy(new_client->name, name, I2C_NAME_SIZE);
 	mutex_init(&data->update_lock);
 
-	/* Tell the I2C layer a new client has arrived */
-	if ((err = i2c_attach_client(new_client)))
-		goto ERROR2;
-
 	/* Check PWM configuration */
-	enable_pwm_interface = it87_check_pwm(new_client);
+	enable_pwm_interface = it87_check_pwm(dev);
 
 	/* Initialize the IT87 chip */
-	it87_init_client(new_client, data);
+	it87_init_device(pdev);
 
 	/* Register sysfs hooks */
-	if ((err = sysfs_create_group(&new_client->dev.kobj, &it87_group)))
-		goto ERROR3;
+	if ((err = sysfs_create_group(&dev->kobj, &it87_group)))
+		goto ERROR2;
 
 	/* Do not create fan files for disabled fans */
 	if (data->type == it8716 || data->type == it8718) {
 		/* 16-bit tachometers */
 		if (data->has_fan & (1 << 0)) {
-			if ((err = device_create_file(&new_client->dev,
+			if ((err = device_create_file(dev,
 			     &sensor_dev_attr_fan1_input16.dev_attr))
-			 || (err = device_create_file(&new_client->dev,
+			 || (err = device_create_file(dev,
 			     &sensor_dev_attr_fan1_min16.dev_attr)))
 				goto ERROR4;
 		}
 		if (data->has_fan & (1 << 1)) {
-			if ((err = device_create_file(&new_client->dev,
+			if ((err = device_create_file(dev,
 			     &sensor_dev_attr_fan2_input16.dev_attr))
-			 || (err = device_create_file(&new_client->dev,
+			 || (err = device_create_file(dev,
 			     &sensor_dev_attr_fan2_min16.dev_attr)))
 				goto ERROR4;
 		}
 		if (data->has_fan & (1 << 2)) {
-			if ((err = device_create_file(&new_client->dev,
+			if ((err = device_create_file(dev,
 			     &sensor_dev_attr_fan3_input16.dev_attr))
-			 || (err = device_create_file(&new_client->dev,
+			 || (err = device_create_file(dev,
 			     &sensor_dev_attr_fan3_min16.dev_attr)))
 				goto ERROR4;
 		}
 	} else {
 		/* 8-bit tachometers with clock divider */
 		if (data->has_fan & (1 << 0)) {
-			if ((err = device_create_file(&new_client->dev,
+			if ((err = device_create_file(dev,
 			     &sensor_dev_attr_fan1_input.dev_attr))
-			 || (err = device_create_file(&new_client->dev,
+			 || (err = device_create_file(dev,
 			     &sensor_dev_attr_fan1_min.dev_attr))
-			 || (err = device_create_file(&new_client->dev,
+			 || (err = device_create_file(dev,
 			     &sensor_dev_attr_fan1_div.dev_attr)))
 				goto ERROR4;
 		}
 		if (data->has_fan & (1 << 1)) {
-			if ((err = device_create_file(&new_client->dev,
+			if ((err = device_create_file(dev,
 			     &sensor_dev_attr_fan2_input.dev_attr))
-			 || (err = device_create_file(&new_client->dev,
+			 || (err = device_create_file(dev,
 			     &sensor_dev_attr_fan2_min.dev_attr))
-			 || (err = device_create_file(&new_client->dev,
+			 || (err = device_create_file(dev,
 			     &sensor_dev_attr_fan2_div.dev_attr)))
 				goto ERROR4;
 		}
 		if (data->has_fan & (1 << 2)) {
-			if ((err = device_create_file(&new_client->dev,
+			if ((err = device_create_file(dev,
 			     &sensor_dev_attr_fan3_input.dev_attr))
-			 || (err = device_create_file(&new_client->dev,
+			 || (err = device_create_file(dev,
 			     &sensor_dev_attr_fan3_min.dev_attr))
-			 || (err = device_create_file(&new_client->dev,
+			 || (err = device_create_file(dev,
 			     &sensor_dev_attr_fan3_div.dev_attr)))
 				goto ERROR4;
 		}
 	}
 
 	if (enable_pwm_interface) {
-		if ((err = device_create_file(&new_client->dev,
+		if ((err = device_create_file(dev,
 		     &sensor_dev_attr_pwm1_enable.dev_attr))
-		 || (err = device_create_file(&new_client->dev,
+		 || (err = device_create_file(dev,
 		     &sensor_dev_attr_pwm2_enable.dev_attr))
-		 || (err = device_create_file(&new_client->dev,
+		 || (err = device_create_file(dev,
 		     &sensor_dev_attr_pwm3_enable.dev_attr))
-		 || (err = device_create_file(&new_client->dev,
+		 || (err = device_create_file(dev,
 		     &sensor_dev_attr_pwm1.dev_attr))
-		 || (err = device_create_file(&new_client->dev,
+		 || (err = device_create_file(dev,
 		     &sensor_dev_attr_pwm2.dev_attr))
-		 || (err = device_create_file(&new_client->dev,
+		 || (err = device_create_file(dev,
 		     &sensor_dev_attr_pwm3.dev_attr))
-		 || (err = device_create_file(&new_client->dev,
+		 || (err = device_create_file(dev,
 		     &dev_attr_pwm1_freq))
-		 || (err = device_create_file(&new_client->dev,
+		 || (err = device_create_file(dev,
 		     &dev_attr_pwm2_freq))
-		 || (err = device_create_file(&new_client->dev,
+		 || (err = device_create_file(dev,
 		     &dev_attr_pwm3_freq)))
 			goto ERROR4;
 	}
@@ -1077,15 +1081,15 @@
 	 || data->type == it8718) {
 		data->vrm = vid_which_vrm();
 		/* VID reading from Super-I/O config space if available */
-		data->vid = vid_value;
-		if ((err = device_create_file(&new_client->dev,
+		data->vid = sio_data->vid_value;
+		if ((err = device_create_file(dev,
 		     &dev_attr_vrm))
-		 || (err = device_create_file(&new_client->dev,
+		 || (err = device_create_file(dev,
 		     &dev_attr_cpu0_vid)))
 			goto ERROR4;
 	}
 
-	data->class_dev = hwmon_device_register(&new_client->dev);
+	data->class_dev = hwmon_device_register(dev);
 	if (IS_ERR(data->class_dev)) {
 		err = PTR_ERR(data->class_dev);
 		goto ERROR4;
@@ -1094,31 +1098,27 @@
 	return 0;
 
 ERROR4:
-	sysfs_remove_group(&new_client->dev.kobj, &it87_group);
-	sysfs_remove_group(&new_client->dev.kobj, &it87_group_opt);
-ERROR3:
-	i2c_detach_client(new_client);
+	sysfs_remove_group(&dev->kobj, &it87_group);
+	sysfs_remove_group(&dev->kobj, &it87_group_opt);
 ERROR2:
+	platform_set_drvdata(pdev, NULL);
 	kfree(data);
 ERROR1:
-	release_region(isa_address, IT87_EXTENT);
+	release_region(res->start, IT87_EXTENT);
 ERROR0:
 	return err;
 }
 
-static int it87_detach_client(struct i2c_client *client)
+static int __devexit it87_remove(struct platform_device *pdev)
 {
-	struct it87_data *data = i2c_get_clientdata(client);
-	int err;
+	struct it87_data *data = platform_get_drvdata(pdev);
 
 	hwmon_device_unregister(data->class_dev);
-	sysfs_remove_group(&client->dev.kobj, &it87_group);
-	sysfs_remove_group(&client->dev.kobj, &it87_group_opt);
+	sysfs_remove_group(&pdev->dev.kobj, &it87_group);
+	sysfs_remove_group(&pdev->dev.kobj, &it87_group_opt);
 
-	if ((err = i2c_detach_client(client)))
-		return err;
-
-	release_region(client->addr, IT87_EXTENT);
+	release_region(data->addr, IT87_EXTENT);
+	platform_set_drvdata(pdev, NULL);
 	kfree(data);
 
 	return 0;
@@ -1127,28 +1127,29 @@
 /* Must be called with data->update_lock held, except during initialization.
    We ignore the IT87 BUSY flag at this moment - it could lead to deadlocks,
    would slow down the IT87 access and should not be necessary. */
-static int it87_read_value(struct i2c_client *client, u8 reg)
+static int it87_read_value(struct it87_data *data, u8 reg)
 {
-	outb_p(reg, client->addr + IT87_ADDR_REG_OFFSET);
-	return inb_p(client->addr + IT87_DATA_REG_OFFSET);
+	outb_p(reg, data->addr + IT87_ADDR_REG_OFFSET);
+	return inb_p(data->addr + IT87_DATA_REG_OFFSET);
 }
 
 /* Must be called with data->update_lock held, except during initialization.
    We ignore the IT87 BUSY flag at this moment - it could lead to deadlocks,
    would slow down the IT87 access and should not be necessary. */
-static void it87_write_value(struct i2c_client *client, u8 reg, u8 value)
+static void it87_write_value(struct it87_data *data, u8 reg, u8 value)
 {
-	outb_p(reg, client->addr + IT87_ADDR_REG_OFFSET);
-	outb_p(value, client->addr + IT87_DATA_REG_OFFSET);
+	outb_p(reg, data->addr + IT87_ADDR_REG_OFFSET);
+	outb_p(value, data->addr + IT87_DATA_REG_OFFSET);
 }
 
 /* Return 1 if and only if the PWM interface is safe to use */
-static int it87_check_pwm(struct i2c_client *client)
+static int __devinit it87_check_pwm(struct device *dev)
 {
+	struct it87_data *data = dev_get_drvdata(dev);
 	/* Some BIOSes fail to correctly configure the IT87 fans. All fans off
 	 * and polarity set to active low is sign that this is the case so we
 	 * disable pwm control to protect the user. */
-	int tmp = it87_read_value(client, IT87_REG_FAN_CTL);
+	int tmp = it87_read_value(data, IT87_REG_FAN_CTL);
 	if ((tmp & 0x87) == 0) {
 		if (fix_pwm_polarity) {
 			/* The user asks us to attempt a chip reconfiguration.
@@ -1158,7 +1159,7 @@
 			u8 pwm[3];
 
 			for (i = 0; i < 3; i++)
-				pwm[i] = it87_read_value(client,
+				pwm[i] = it87_read_value(data,
 							 IT87_REG_PWM(i));
 
 			/* If any fan is in automatic pwm mode, the polarity
@@ -1166,26 +1167,26 @@
 			 * better don't change anything (but still disable the
 			 * PWM interface). */
 			if (!((pwm[0] | pwm[1] | pwm[2]) & 0x80)) {
-				dev_info(&client->dev, "Reconfiguring PWM to "
+				dev_info(dev, "Reconfiguring PWM to "
 					 "active high polarity\n");
-				it87_write_value(client, IT87_REG_FAN_CTL,
+				it87_write_value(data, IT87_REG_FAN_CTL,
 						 tmp | 0x87);
 				for (i = 0; i < 3; i++)
-					it87_write_value(client,
+					it87_write_value(data,
 							 IT87_REG_PWM(i),
 							 0x7f & ~pwm[i]);
 				return 1;
 			}
 
-			dev_info(&client->dev, "PWM configuration is "
+			dev_info(dev, "PWM configuration is "
 				 "too broken to be fixed\n");
 		}
 
-		dev_info(&client->dev, "Detected broken BIOS "
+		dev_info(dev, "Detected broken BIOS "
 			 "defaults, disabling PWM interface\n");
 		return 0;
 	} else if (fix_pwm_polarity) {
-		dev_info(&client->dev, "PWM configuration looks "
+		dev_info(dev, "PWM configuration looks "
 			 "sane, won't touch\n");
 	}
 
@@ -1193,8 +1194,9 @@
 }
 
 /* Called when we have found a new IT87. */
-static void it87_init_client(struct i2c_client *client, struct it87_data *data)
+static void __devinit it87_init_device(struct platform_device *pdev)
 {
+	struct it87_data *data = platform_get_drvdata(pdev);
 	int tmp, i;
 
 	/* initialize to sane defaults:
@@ -1214,48 +1216,48 @@
 	 * means -1 degree C, which surprisingly doesn't trigger an alarm,
 	 * but is still confusing, so change to 127 degrees C. */
 	for (i = 0; i < 8; i++) {
-		tmp = it87_read_value(client, IT87_REG_VIN_MIN(i));
+		tmp = it87_read_value(data, IT87_REG_VIN_MIN(i));
 		if (tmp == 0xff)
-			it87_write_value(client, IT87_REG_VIN_MIN(i), 0);
+			it87_write_value(data, IT87_REG_VIN_MIN(i), 0);
 	}
 	for (i = 0; i < 3; i++) {
-		tmp = it87_read_value(client, IT87_REG_TEMP_HIGH(i));
+		tmp = it87_read_value(data, IT87_REG_TEMP_HIGH(i));
 		if (tmp == 0xff)
-			it87_write_value(client, IT87_REG_TEMP_HIGH(i), 127);
+			it87_write_value(data, IT87_REG_TEMP_HIGH(i), 127);
 	}
 
 	/* Check if temperature channnels are reset manually or by some reason */
-	tmp = it87_read_value(client, IT87_REG_TEMP_ENABLE);
+	tmp = it87_read_value(data, IT87_REG_TEMP_ENABLE);
 	if ((tmp & 0x3f) == 0) {
 		/* Temp1,Temp3=thermistor; Temp2=thermal diode */
 		tmp = (tmp & 0xc0) | 0x2a;
-		it87_write_value(client, IT87_REG_TEMP_ENABLE, tmp);
+		it87_write_value(data, IT87_REG_TEMP_ENABLE, tmp);
 	}
 	data->sensor = tmp;
 
 	/* Check if voltage monitors are reset manually or by some reason */
-	tmp = it87_read_value(client, IT87_REG_VIN_ENABLE);
+	tmp = it87_read_value(data, IT87_REG_VIN_ENABLE);
 	if ((tmp & 0xff) == 0) {
 		/* Enable all voltage monitors */
-		it87_write_value(client, IT87_REG_VIN_ENABLE, 0xff);
+		it87_write_value(data, IT87_REG_VIN_ENABLE, 0xff);
 	}
 
 	/* Check if tachometers are reset manually or by some reason */
-	data->fan_main_ctrl = it87_read_value(client, IT87_REG_FAN_MAIN_CTRL);
+	data->fan_main_ctrl = it87_read_value(data, IT87_REG_FAN_MAIN_CTRL);
 	if ((data->fan_main_ctrl & 0x70) == 0) {
 		/* Enable all fan tachometers */
 		data->fan_main_ctrl |= 0x70;
-		it87_write_value(client, IT87_REG_FAN_MAIN_CTRL, data->fan_main_ctrl);
+		it87_write_value(data, IT87_REG_FAN_MAIN_CTRL, data->fan_main_ctrl);
 	}
 	data->has_fan = (data->fan_main_ctrl >> 4) & 0x07;
 
 	/* Set tachometers to 16-bit mode if needed */
 	if (data->type == it8716 || data->type == it8718) {
-		tmp = it87_read_value(client, IT87_REG_FAN_16BIT);
+		tmp = it87_read_value(data, IT87_REG_FAN_16BIT);
 		if (~tmp & 0x07 & data->has_fan) {
-			dev_dbg(&client->dev,
+			dev_dbg(&pdev->dev,
 				"Setting fan1-3 to 16-bit mode\n");
-			it87_write_value(client, IT87_REG_FAN_16BIT,
+			it87_write_value(data, IT87_REG_FAN_16BIT,
 					 tmp | 0x07);
 		}
 	}
@@ -1265,7 +1267,7 @@
 	for (i = 0; i < 3; i++) {
 		if (data->fan_main_ctrl & (1 << i)) {
 			/* pwm mode */
-			tmp = it87_read_value(client, IT87_REG_PWM(i));
+			tmp = it87_read_value(data, IT87_REG_PWM(i));
 			if (tmp & 0x80) {
 				/* automatic pwm - not yet implemented, but
 				 * leave the settings made by the BIOS alone
@@ -1279,15 +1281,14 @@
  	}
 
 	/* Start monitoring */
-	it87_write_value(client, IT87_REG_CONFIG,
-			 (it87_read_value(client, IT87_REG_CONFIG) & 0x36)
+	it87_write_value(data, IT87_REG_CONFIG,
+			 (it87_read_value(data, IT87_REG_CONFIG) & 0x36)
 			 | (update_vbat ? 0x41 : 0x01));
 }
 
 static struct it87_data *it87_update_device(struct device *dev)
 {
-	struct i2c_client *client = to_i2c_client(dev);
-	struct it87_data *data = i2c_get_clientdata(client);
+	struct it87_data *data = dev_get_drvdata(dev);
 	int i;
 
 	mutex_lock(&data->update_lock);
@@ -1298,20 +1299,20 @@
 		if (update_vbat) {
 			/* Cleared after each update, so reenable.  Value
 		 	  returned by this read will be previous value */	
-			it87_write_value(client, IT87_REG_CONFIG,
-			   it87_read_value(client, IT87_REG_CONFIG) | 0x40);
+			it87_write_value(data, IT87_REG_CONFIG,
+			   it87_read_value(data, IT87_REG_CONFIG) | 0x40);
 		}
 		for (i = 0; i <= 7; i++) {
 			data->in[i] =
-			    it87_read_value(client, IT87_REG_VIN(i));
+			    it87_read_value(data, IT87_REG_VIN(i));
 			data->in_min[i] =
-			    it87_read_value(client, IT87_REG_VIN_MIN(i));
+			    it87_read_value(data, IT87_REG_VIN_MIN(i));
 			data->in_max[i] =
-			    it87_read_value(client, IT87_REG_VIN_MAX(i));
+			    it87_read_value(data, IT87_REG_VIN_MAX(i));
 		}
 		/* in8 (battery) has no limit registers */
 		data->in[8] =
-		    it87_read_value(client, IT87_REG_VIN(8));
+		    it87_read_value(data, IT87_REG_VIN(8));
 
 		for (i = 0; i < 3; i++) {
 			/* Skip disabled fans */
@@ -1319,46 +1320,47 @@
 				continue;
 
 			data->fan_min[i] =
-			    it87_read_value(client, IT87_REG_FAN_MIN(i));
-			data->fan[i] = it87_read_value(client,
+			    it87_read_value(data, IT87_REG_FAN_MIN(i));
+			data->fan[i] = it87_read_value(data,
 				       IT87_REG_FAN(i));
 			/* Add high byte if in 16-bit mode */
 			if (data->type == it8716 || data->type == it8718) {
-				data->fan[i] |= it87_read_value(client,
+				data->fan[i] |= it87_read_value(data,
 						IT87_REG_FANX(i)) << 8;
-				data->fan_min[i] |= it87_read_value(client,
+				data->fan_min[i] |= it87_read_value(data,
 						IT87_REG_FANX_MIN(i)) << 8;
 			}
 		}
 		for (i = 0; i < 3; i++) {
 			data->temp[i] =
-			    it87_read_value(client, IT87_REG_TEMP(i));
+			    it87_read_value(data, IT87_REG_TEMP(i));
 			data->temp_high[i] =
-			    it87_read_value(client, IT87_REG_TEMP_HIGH(i));
+			    it87_read_value(data, IT87_REG_TEMP_HIGH(i));
 			data->temp_low[i] =
-			    it87_read_value(client, IT87_REG_TEMP_LOW(i));
+			    it87_read_value(data, IT87_REG_TEMP_LOW(i));
 		}
 
 		/* Newer chips don't have clock dividers */
 		if ((data->has_fan & 0x07) && data->type != it8716
 		 && data->type != it8718) {
-			i = it87_read_value(client, IT87_REG_FAN_DIV);
+			i = it87_read_value(data, IT87_REG_FAN_DIV);
 			data->fan_div[0] = i & 0x07;
 			data->fan_div[1] = (i >> 3) & 0x07;
 			data->fan_div[2] = (i & 0x40) ? 3 : 1;
 		}
 
 		data->alarms =
-			it87_read_value(client, IT87_REG_ALARM1) |
-			(it87_read_value(client, IT87_REG_ALARM2) << 8) |
-			(it87_read_value(client, IT87_REG_ALARM3) << 16);
-		data->fan_main_ctrl = it87_read_value(client, IT87_REG_FAN_MAIN_CTRL);
-		data->fan_ctl = it87_read_value(client, IT87_REG_FAN_CTL);
+			it87_read_value(data, IT87_REG_ALARM1) |
+			(it87_read_value(data, IT87_REG_ALARM2) << 8) |
+			(it87_read_value(data, IT87_REG_ALARM3) << 16);
+		data->fan_main_ctrl = it87_read_value(data,
+				IT87_REG_FAN_MAIN_CTRL);
+		data->fan_ctl = it87_read_value(data, IT87_REG_FAN_CTL);
 
-		data->sensor = it87_read_value(client, IT87_REG_TEMP_ENABLE);
+		data->sensor = it87_read_value(data, IT87_REG_TEMP_ENABLE);
 		/* The 8705 does not have VID capability */
 		if (data->type == it8712 || data->type == it8716) {
-			data->vid = it87_read_value(client, IT87_REG_VID);
+			data->vid = it87_read_value(data, IT87_REG_VID);
 			/* The older IT8712F revisions had only 5 VID pins,
 			   but we assume it is always safe to read 6 bits. */
 			data->vid &= 0x3f;
@@ -1372,24 +1374,85 @@
 	return data;
 }
 
+static int __init it87_device_add(unsigned short address,
+				  const struct it87_sio_data *sio_data)
+{
+	struct resource res = {
+		.start	= address ,
+		.end	= address + IT87_EXTENT - 1,
+		.name	= DRVNAME,
+		.flags	= IORESOURCE_IO,
+	};
+	int err;
+
+	pdev = platform_device_alloc(DRVNAME, address);
+	if (!pdev) {
+		err = -ENOMEM;
+		printk(KERN_ERR DRVNAME ": Device allocation failed\n");
+		goto exit;
+	}
+
+	err = platform_device_add_resources(pdev, &res, 1);
+	if (err) {
+		printk(KERN_ERR DRVNAME ": Device resource addition failed "
+		       "(%d)\n", err);
+		goto exit_device_put;
+	}
+
+	err = platform_device_add_data(pdev, sio_data,
+				       sizeof(struct it87_sio_data));
+	if (err) {
+		printk(KERN_ERR DRVNAME ": Platform data allocation failed\n");
+		goto exit_device_put;
+	}
+
+	err = platform_device_add(pdev);
+	if (err) {
+		printk(KERN_ERR DRVNAME ": Device addition failed (%d)\n",
+		       err);
+		goto exit_device_put;
+	}
+
+	return 0;
+
+exit_device_put:
+	platform_device_put(pdev);
+exit:
+	return err;
+}
+
 static int __init sm_it87_init(void)
 {
-	int res;
+	int err;
+	unsigned short isa_address=0;
+	struct it87_sio_data sio_data;
 
-	if ((res = it87_find(&isa_address)))
-		return res;
-	return i2c_isa_add_driver(&it87_isa_driver);
+	err = it87_find(&isa_address, &sio_data);
+	if (err)
+		return err;
+	err = platform_driver_register(&it87_driver);
+	if (err)
+		return err;
+
+	err = it87_device_add(isa_address, &sio_data);
+	if (err){
+		platform_driver_unregister(&it87_driver);
+		return err;
+	}
+
+	return 0;
 }
 
 static void __exit sm_it87_exit(void)
 {
-	i2c_isa_del_driver(&it87_isa_driver);
+	platform_device_unregister(pdev);
+	platform_driver_unregister(&it87_driver);
 }
 
 
 MODULE_AUTHOR("Chris Gauthron <chrisg@0-in.com>, "
 	      "Jean Delvare <khali@linux-fr.org>");
-MODULE_DESCRIPTION("IT8705F/8712F/8716F/8718F, SiS950 driver");
+MODULE_DESCRIPTION("IT8705F/8712F/8716F/8718F/8726F, SiS950 driver");
 module_param(update_vbat, bool, 0);
 MODULE_PARM_DESC(update_vbat, "Update vbat if set else return powerup value");
 module_param(fix_pwm_polarity, bool, 0);
diff --git a/drivers/hwmon/lm63.c b/drivers/hwmon/lm63.c
index d69f3cf..2162d69 100644
--- a/drivers/hwmon/lm63.c
+++ b/drivers/hwmon/lm63.c
@@ -364,7 +364,7 @@
 /* Individual alarm files */
 static SENSOR_DEVICE_ATTR(fan1_min_alarm, S_IRUGO, show_alarm, NULL, 0);
 static SENSOR_DEVICE_ATTR(temp2_crit_alarm, S_IRUGO, show_alarm, NULL, 1);
-static SENSOR_DEVICE_ATTR(temp2_input_fault, S_IRUGO, show_alarm, NULL, 2);
+static SENSOR_DEVICE_ATTR(temp2_fault, S_IRUGO, show_alarm, NULL, 2);
 static SENSOR_DEVICE_ATTR(temp2_min_alarm, S_IRUGO, show_alarm, NULL, 3);
 static SENSOR_DEVICE_ATTR(temp2_max_alarm, S_IRUGO, show_alarm, NULL, 4);
 static SENSOR_DEVICE_ATTR(temp1_max_alarm, S_IRUGO, show_alarm, NULL, 6);
@@ -383,7 +383,7 @@
 	&dev_attr_temp2_crit_hyst.attr,
 
 	&sensor_dev_attr_temp2_crit_alarm.dev_attr.attr,
-	&sensor_dev_attr_temp2_input_fault.dev_attr.attr,
+	&sensor_dev_attr_temp2_fault.dev_attr.attr,
 	&sensor_dev_attr_temp2_min_alarm.dev_attr.attr,
 	&sensor_dev_attr_temp2_max_alarm.dev_attr.attr,
 	&sensor_dev_attr_temp1_max_alarm.dev_attr.attr,
diff --git a/drivers/hwmon/lm70.c b/drivers/hwmon/lm70.c
index 7eaae38..275d392 100644
--- a/drivers/hwmon/lm70.c
+++ b/drivers/hwmon/lm70.c
@@ -96,6 +96,10 @@
 	struct lm70 *p_lm70;
 	int status;
 
+	/* signaling is SPI_MODE_0 on a 3-wire link (shared SI/SO) */
+	if ((spi->mode & (SPI_CPOL|SPI_CPHA)) || !(spi->mode & SPI_3WIRE))
+		return -EINVAL;
+
 	p_lm70 = kzalloc(sizeof *p_lm70, GFP_KERNEL);
 	if (!p_lm70)
 		return -ENOMEM;
diff --git a/drivers/hwmon/lm83.c b/drivers/hwmon/lm83.c
index feb87b4..654c0f7 100644
--- a/drivers/hwmon/lm83.c
+++ b/drivers/hwmon/lm83.c
@@ -223,14 +223,14 @@
 /* Individual alarm files */
 static SENSOR_DEVICE_ATTR(temp1_crit_alarm, S_IRUGO, show_alarm, NULL, 0);
 static SENSOR_DEVICE_ATTR(temp3_crit_alarm, S_IRUGO, show_alarm, NULL, 1);
-static SENSOR_DEVICE_ATTR(temp3_input_fault, S_IRUGO, show_alarm, NULL, 2);
+static SENSOR_DEVICE_ATTR(temp3_fault, S_IRUGO, show_alarm, NULL, 2);
 static SENSOR_DEVICE_ATTR(temp3_max_alarm, S_IRUGO, show_alarm, NULL, 4);
 static SENSOR_DEVICE_ATTR(temp1_max_alarm, S_IRUGO, show_alarm, NULL, 6);
 static SENSOR_DEVICE_ATTR(temp2_crit_alarm, S_IRUGO, show_alarm, NULL, 8);
 static SENSOR_DEVICE_ATTR(temp4_crit_alarm, S_IRUGO, show_alarm, NULL, 9);
-static SENSOR_DEVICE_ATTR(temp4_input_fault, S_IRUGO, show_alarm, NULL, 10);
+static SENSOR_DEVICE_ATTR(temp4_fault, S_IRUGO, show_alarm, NULL, 10);
 static SENSOR_DEVICE_ATTR(temp4_max_alarm, S_IRUGO, show_alarm, NULL, 12);
-static SENSOR_DEVICE_ATTR(temp2_input_fault, S_IRUGO, show_alarm, NULL, 13);
+static SENSOR_DEVICE_ATTR(temp2_fault, S_IRUGO, show_alarm, NULL, 13);
 static SENSOR_DEVICE_ATTR(temp2_max_alarm, S_IRUGO, show_alarm, NULL, 15);
 /* Raw alarm file for compatibility */
 static DEVICE_ATTR(alarms, S_IRUGO, show_alarms, NULL);
@@ -245,7 +245,7 @@
 
 	&sensor_dev_attr_temp1_crit_alarm.dev_attr.attr,
 	&sensor_dev_attr_temp3_crit_alarm.dev_attr.attr,
-	&sensor_dev_attr_temp3_input_fault.dev_attr.attr,
+	&sensor_dev_attr_temp3_fault.dev_attr.attr,
 	&sensor_dev_attr_temp3_max_alarm.dev_attr.attr,
 	&sensor_dev_attr_temp1_max_alarm.dev_attr.attr,
 	&dev_attr_alarms.attr,
@@ -266,9 +266,9 @@
 
 	&sensor_dev_attr_temp2_crit_alarm.dev_attr.attr,
 	&sensor_dev_attr_temp4_crit_alarm.dev_attr.attr,
-	&sensor_dev_attr_temp4_input_fault.dev_attr.attr,
+	&sensor_dev_attr_temp4_fault.dev_attr.attr,
 	&sensor_dev_attr_temp4_max_alarm.dev_attr.attr,
-	&sensor_dev_attr_temp2_input_fault.dev_attr.attr,
+	&sensor_dev_attr_temp2_fault.dev_attr.attr,
 	&sensor_dev_attr_temp2_max_alarm.dev_attr.attr,
 	NULL
 };
diff --git a/drivers/hwmon/lm90.c b/drivers/hwmon/lm90.c
index 6882ce7..48833ff 100644
--- a/drivers/hwmon/lm90.c
+++ b/drivers/hwmon/lm90.c
@@ -43,6 +43,13 @@
  * variants. The extra address and features of the MAX6659 are not
  * supported by this driver.
  *
+ * This driver also supports the MAX6680 and MAX6681, two other sensor
+ * chips made by Maxim. These are quite similar to the other Maxim
+ * chips. Complete datasheet can be obtained at:
+ *   http://www.maxim-ic.com/quick_view2.cfm/qv_pk/3370
+ * The MAX6680 and MAX6681 only differ in the pinout so they can be
+ * treated identically.
+ *
  * This driver also supports the ADT7461 chip from Analog Devices but
  * only in its "compatability mode". If an ADT7461 chip is found but
  * is configured in non-compatible mode (where its temperature
@@ -84,20 +91,25 @@
 /*
  * Addresses to scan
  * Address is fully defined internally and cannot be changed except for
- * MAX6659.
+ * MAX6659, MAX6680 and MAX6681.
  * LM86, LM89, LM90, LM99, ADM1032, ADM1032-1, ADT7461, MAX6657 and MAX6658
  * have address 0x4c.
  * ADM1032-2, ADT7461-2, LM89-1, and LM99-1 have address 0x4d.
  * MAX6659 can have address 0x4c, 0x4d or 0x4e (unsupported).
+ * MAX6680 and MAX6681 can have address 0x18, 0x19, 0x1a, 0x29, 0x2a, 0x2b,
+ * 0x4c, 0x4d or 0x4e.
  */
 
-static unsigned short normal_i2c[] = { 0x4c, 0x4d, I2C_CLIENT_END };
+static unsigned short normal_i2c[] = { 0x18, 0x19, 0x1a,
+				       0x29, 0x2a, 0x2b,
+				       0x4c, 0x4d, 0x4e,
+				       I2C_CLIENT_END };
 
 /*
  * Insmod parameters
  */
 
-I2C_CLIENT_INSMOD_6(lm90, adm1032, lm99, lm86, max6657, adt7461);
+I2C_CLIENT_INSMOD_7(lm90, adm1032, lm99, lm86, max6657, adt7461, max6680);
 
 /*
  * The LM90 registers
@@ -359,7 +371,7 @@
 /* Individual alarm files */
 static SENSOR_DEVICE_ATTR(temp1_crit_alarm, S_IRUGO, show_alarm, NULL, 0);
 static SENSOR_DEVICE_ATTR(temp2_crit_alarm, S_IRUGO, show_alarm, NULL, 1);
-static SENSOR_DEVICE_ATTR(temp2_input_fault, S_IRUGO, show_alarm, NULL, 2);
+static SENSOR_DEVICE_ATTR(temp2_fault, S_IRUGO, show_alarm, NULL, 2);
 static SENSOR_DEVICE_ATTR(temp2_min_alarm, S_IRUGO, show_alarm, NULL, 3);
 static SENSOR_DEVICE_ATTR(temp2_max_alarm, S_IRUGO, show_alarm, NULL, 4);
 static SENSOR_DEVICE_ATTR(temp1_min_alarm, S_IRUGO, show_alarm, NULL, 5);
@@ -381,7 +393,7 @@
 
 	&sensor_dev_attr_temp1_crit_alarm.dev_attr.attr,
 	&sensor_dev_attr_temp2_crit_alarm.dev_attr.attr,
-	&sensor_dev_attr_temp2_input_fault.dev_attr.attr,
+	&sensor_dev_attr_temp2_fault.dev_attr.attr,
 	&sensor_dev_attr_temp2_min_alarm.dev_attr.attr,
 	&sensor_dev_attr_temp2_max_alarm.dev_attr.attr,
 	&sensor_dev_attr_temp1_min_alarm.dev_attr.attr,
@@ -429,7 +441,7 @@
  */
 
 /* The ADM1032 supports PEC but not on write byte transactions, so we need
-   to explicitely ask for a transaction without PEC. */
+   to explicitly ask for a transaction without PEC. */
 static inline s32 adm1032_write_byte(struct i2c_client *client, u8 value)
 {
 	return i2c_smbus_xfer(client->adapter, client->addr,
@@ -525,7 +537,8 @@
 		 		  &reg_convrate) < 0)
 			goto exit_free;
 		
-		if (man_id == 0x01) { /* National Semiconductor */
+		if ((address == 0x4C || address == 0x4D)
+		 && man_id == 0x01) { /* National Semiconductor */
 			u8 reg_config2;
 
 			if (lm90_read_reg(new_client, LM90_REG_R_CONFIG2,
@@ -548,7 +561,8 @@
 				}
 			}
 		} else
-		if (man_id == 0x41) { /* Analog Devices */
+		if ((address == 0x4C || address == 0x4D)
+		 && man_id == 0x41) { /* Analog Devices */
 			if ((chip_id & 0xF0) == 0x40 /* ADM1032 */
 			 && (reg_config1 & 0x3F) == 0x00
 			 && reg_convrate <= 0x0A) {
@@ -562,18 +576,30 @@
 		} else
 		if (man_id == 0x4D) { /* Maxim */
 			/*
-			 * The Maxim variants do NOT have a chip_id register.
-			 * Reading from that address will return the last read
-			 * value, which in our case is those of the man_id
-			 * register. Likewise, the config1 register seems to
-			 * lack a low nibble, so the value will be those of the
-			 * previous read, so in our case those of the man_id
-			 * register.
+			 * The MAX6657, MAX6658 and MAX6659 do NOT have a
+			 * chip_id register. Reading from that address will
+			 * return the last read value, which in our case is
+			 * those of the man_id register. Likewise, the config1
+			 * register seems to lack a low nibble, so the value
+			 * will be those of the previous read, so in our case
+			 * those of the man_id register.
 			 */
 			if (chip_id == man_id
+			 && (address == 0x4F || address == 0x4D)
 			 && (reg_config1 & 0x1F) == (man_id & 0x0F)
 			 && reg_convrate <= 0x09) {
 			 	kind = max6657;
+			} else
+			/* The chip_id register of the MAX6680 and MAX6681
+			 * holds the revision of the chip.
+			 * the lowest bit of the config1 register is unused
+			 * and should return zero when read, so should the
+			 * second to last bit of config1 (software reset)
+			 */
+			if (chip_id == 0x01
+			 && (reg_config1 & 0x03) == 0x00
+			 && reg_convrate <= 0x07) {
+			 	kind = max6680;
 			}
 		}
 
@@ -599,6 +625,8 @@
 		name = "lm86";
 	} else if (kind == max6657) {
 		name = "max6657";
+	} else if (kind == max6680) {
+		name = "max6680";
 	} else if (kind == adt7461) {
 		name = "adt7461";
 	}
@@ -646,7 +674,8 @@
 
 static void lm90_init_client(struct i2c_client *client)
 {
-	u8 config;
+	u8 config, config_orig;
+	struct lm90_data *data = i2c_get_clientdata(client);
 
 	/*
 	 * Start the conversions.
@@ -657,9 +686,20 @@
 		dev_warn(&client->dev, "Initialization failed!\n");
 		return;
 	}
-	if (config & 0x40)
-		i2c_smbus_write_byte_data(client, LM90_REG_W_CONFIG1,
-					  config & 0xBF); /* run */
+	config_orig = config;
+
+	/*
+	 * Put MAX6680/MAX8881 into extended resolution (bit 0x10,
+	 * 0.125 degree resolution) and range (0x08, extend range
+	 * to -64 degree) mode for the remote temperature sensor.
+	 */
+	if (data->kind == max6680) {
+		config |= 0x18;
+	}
+
+	config &= 0xBF;	/* run */
+	if (config != config_orig) /* Only write if changed */
+		i2c_smbus_write_byte_data(client, LM90_REG_W_CONFIG1, config);
 }
 
 static int lm90_detach_client(struct i2c_client *client)
diff --git a/drivers/hwmon/lm93.c b/drivers/hwmon/lm93.c
new file mode 100644
index 0000000..23edf4f
--- /dev/null
+++ b/drivers/hwmon/lm93.c
@@ -0,0 +1,2655 @@
+/*
+    lm93.c - Part of lm_sensors, Linux kernel modules for hardware monitoring
+
+    Author/Maintainer: Mark M. Hoffman <mhoffman@lightlink.com>
+	Copyright (c) 2004 Utilitek Systems, Inc.
+
+    derived in part from lm78.c:
+	Copyright (c) 1998, 1999  Frodo Looijaard <frodol@dds.nl>
+
+    derived in part from lm85.c:
+	Copyright (c) 2002, 2003 Philip Pokorny <ppokorny@penguincomputing.com>
+	Copyright (c) 2003       Margit Schubert-While <margitsw@t-online.de>
+
+    derived in part from w83l785ts.c:
+	Copyright (c) 2003-2004 Jean Delvare <khali@linux-fr.org>
+
+    Ported to Linux 2.6 by Eric J. Bowersox <ericb@aspsys.com>
+	Copyright (c) 2005 Aspen Systems, Inc.
+
+    Adapted to 2.6.20 by Carsten Emde <cbe@osadl.org>
+        Copyright (c) 2006 Carsten Emde, Open Source Automation Development Lab
+
+    Modified for mainline integration by Hans J. Koch <hjk@linutronix.de>
+        Copyright (c) 2007 Hans J. Koch, Linutronix GmbH
+
+    This program is free software; you can redistribute it and/or modify
+    it under the terms of the GNU General Public License as published by
+    the Free Software Foundation; either version 2 of the License, or
+    (at your option) any later version.
+
+    This program is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with this program; if not, write to the Free Software
+    Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+*/
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/i2c.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/hwmon-vid.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+
+/* LM93 REGISTER ADDRESSES */
+
+/* miscellaneous */
+#define LM93_REG_MFR_ID			0x3e
+#define LM93_REG_VER			0x3f
+#define LM93_REG_STATUS_CONTROL		0xe2
+#define LM93_REG_CONFIG			0xe3
+#define LM93_REG_SLEEP_CONTROL		0xe4
+
+/* alarm values start here */
+#define LM93_REG_HOST_ERROR_1		0x48
+
+/* voltage inputs: in1-in16 (nr => 0-15) */
+#define LM93_REG_IN(nr)			(0x56 + (nr))
+#define LM93_REG_IN_MIN(nr)		(0x90 + (nr) * 2)
+#define LM93_REG_IN_MAX(nr)		(0x91 + (nr) * 2)
+
+/* temperature inputs: temp1-temp4 (nr => 0-3) */
+#define LM93_REG_TEMP(nr)		(0x50 + (nr))
+#define LM93_REG_TEMP_MIN(nr)		(0x78 + (nr) * 2)
+#define LM93_REG_TEMP_MAX(nr)		(0x79 + (nr) * 2)
+
+/* temp[1-4]_auto_boost (nr => 0-3) */
+#define LM93_REG_BOOST(nr)		(0x80 + (nr))
+
+/* #PROCHOT inputs: prochot1-prochot2 (nr => 0-1) */
+#define LM93_REG_PROCHOT_CUR(nr)	(0x67 + (nr) * 2)
+#define LM93_REG_PROCHOT_AVG(nr)	(0x68 + (nr) * 2)
+#define LM93_REG_PROCHOT_MAX(nr)	(0xb0 + (nr))
+
+/* fan tach inputs: fan1-fan4 (nr => 0-3) */
+#define LM93_REG_FAN(nr)		(0x6e + (nr) * 2)
+#define LM93_REG_FAN_MIN(nr)		(0xb4 + (nr) * 2)
+
+/* pwm outputs: pwm1-pwm2 (nr => 0-1, reg => 0-3) */
+#define LM93_REG_PWM_CTL(nr,reg)	(0xc8 + (reg) + (nr) * 4)
+#define LM93_PWM_CTL1	0x0
+#define LM93_PWM_CTL2	0x1
+#define LM93_PWM_CTL3	0x2
+#define LM93_PWM_CTL4	0x3
+
+/* GPIO input state */
+#define LM93_REG_GPI			0x6b
+
+/* vid inputs: vid1-vid2 (nr => 0-1) */
+#define LM93_REG_VID(nr)		(0x6c + (nr))
+
+/* vccp1 & vccp2: VID relative inputs (nr => 0-1) */
+#define LM93_REG_VCCP_LIMIT_OFF(nr)	(0xb2 + (nr))
+
+/* temp[1-4]_auto_boost_hyst */
+#define LM93_REG_BOOST_HYST_12		0xc0
+#define LM93_REG_BOOST_HYST_34		0xc1
+#define LM93_REG_BOOST_HYST(nr)		(0xc0 + (nr)/2)
+
+/* temp[1-4]_auto_pwm_[min|hyst] */
+#define LM93_REG_PWM_MIN_HYST_12	0xc3
+#define LM93_REG_PWM_MIN_HYST_34	0xc4
+#define LM93_REG_PWM_MIN_HYST(nr)	(0xc3 + (nr)/2)
+
+/* prochot_override & prochot_interval */
+#define LM93_REG_PROCHOT_OVERRIDE	0xc6
+#define LM93_REG_PROCHOT_INTERVAL	0xc7
+
+/* temp[1-4]_auto_base (nr => 0-3) */
+#define LM93_REG_TEMP_BASE(nr)		(0xd0 + (nr))
+
+/* temp[1-4]_auto_offsets (step => 0-11) */
+#define LM93_REG_TEMP_OFFSET(step)	(0xd4 + (step))
+
+/* #PROCHOT & #VRDHOT PWM ramp control */
+#define LM93_REG_PWM_RAMP_CTL		0xbf
+
+/* miscellaneous */
+#define LM93_REG_SFC1		0xbc
+#define LM93_REG_SFC2		0xbd
+#define LM93_REG_GPI_VID_CTL	0xbe
+#define LM93_REG_SF_TACH_TO_PWM	0xe0
+
+/* error masks */
+#define LM93_REG_GPI_ERR_MASK	0xec
+#define LM93_REG_MISC_ERR_MASK	0xed
+
+/* LM93 REGISTER VALUES */
+#define LM93_MFR_ID		0x73
+#define LM93_MFR_ID_PROTOTYPE	0x72
+
+/* SMBus capabilities */
+#define LM93_SMBUS_FUNC_FULL (I2C_FUNC_SMBUS_BYTE_DATA | \
+		I2C_FUNC_SMBUS_WORD_DATA | I2C_FUNC_SMBUS_BLOCK_DATA)
+#define LM93_SMBUS_FUNC_MIN  (I2C_FUNC_SMBUS_BYTE_DATA | \
+		I2C_FUNC_SMBUS_WORD_DATA)
+
+/* Addresses to scan */
+static unsigned short normal_i2c[] = { 0x2c, 0x2d, 0x2e, I2C_CLIENT_END };
+
+/* Insmod parameters */
+I2C_CLIENT_INSMOD_1(lm93);
+
+static int disable_block;
+module_param(disable_block, bool, 0);
+MODULE_PARM_DESC(disable_block,
+	"Set to non-zero to disable SMBus block data transactions.");
+
+static int init;
+module_param(init, bool, 0);
+MODULE_PARM_DESC(init, "Set to non-zero to force chip initialization.");
+
+static int vccp_limit_type[2] = {0,0};
+module_param_array(vccp_limit_type, int, NULL, 0);
+MODULE_PARM_DESC(vccp_limit_type, "Configures in7 and in8 limit modes.");
+
+static int vid_agtl;
+module_param(vid_agtl, int, 0);
+MODULE_PARM_DESC(vid_agtl, "Configures VID pin input thresholds.");
+
+/* Driver data */
+static struct i2c_driver lm93_driver;
+
+/* LM93 BLOCK READ COMMANDS */
+static const struct { u8 cmd; u8 len; } lm93_block_read_cmds[12] = {
+	{ 0xf2,  8 },
+	{ 0xf3,  8 },
+	{ 0xf4,  6 },
+	{ 0xf5, 16 },
+	{ 0xf6,  4 },
+	{ 0xf7,  8 },
+	{ 0xf8, 12 },
+	{ 0xf9, 32 },
+	{ 0xfa,  8 },
+	{ 0xfb,  8 },
+	{ 0xfc, 16 },
+	{ 0xfd,  9 },
+};
+
+/* ALARMS: SYSCTL format described further below
+   REG: 64 bits in 8 registers, as immediately below */
+struct block1_t {
+	u8 host_status_1;
+	u8 host_status_2;
+	u8 host_status_3;
+	u8 host_status_4;
+	u8 p1_prochot_status;
+	u8 p2_prochot_status;
+	u8 gpi_status;
+	u8 fan_status;
+};
+
+/*
+ * Client-specific data
+ */
+struct lm93_data {
+	struct i2c_client client;
+	struct class_device *class_dev;
+
+	struct mutex update_lock;
+	unsigned long last_updated;	/* In jiffies */
+
+	/* client update function */
+	void (*update)(struct lm93_data *, struct i2c_client *);
+
+	char valid; /* !=0 if following fields are valid */
+
+	/* register values, arranged by block read groups */
+	struct block1_t block1;
+
+	/* temp1 - temp4: unfiltered readings
+	   temp1 - temp2: filtered readings */
+	u8 block2[6];
+
+	/* vin1 - vin16: readings */
+	u8 block3[16];
+
+	/* prochot1 - prochot2: readings */
+	struct {
+		u8 cur;
+		u8 avg;
+	} block4[2];
+
+	/* fan counts 1-4 => 14-bits, LE, *left* justified */
+	u16 block5[4];
+
+	/* block6 has a lot of data we don't need */
+	struct {
+		u8 min;
+		u8 max;
+	} temp_lim[3];
+
+	/* vin1 - vin16: low and high limits */
+	struct {
+		u8 min;
+		u8 max;
+	} block7[16];
+
+	/* fan count limits 1-4 => same format as block5 */
+	u16 block8[4];
+
+	/* pwm control registers (2 pwms, 4 regs) */
+	u8 block9[2][4];
+
+	/* auto/pwm base temp and offset temp registers */
+	struct {
+		u8 base[4];
+		u8 offset[12];
+	} block10;
+
+	/* master config register */
+	u8 config;
+
+	/* VID1 & VID2 => register format, 6-bits, right justified */
+	u8 vid[2];
+
+	/* prochot1 - prochot2: limits */
+	u8 prochot_max[2];
+
+	/* vccp1 & vccp2 (in7 & in8): VID relative limits (register format) */
+	u8 vccp_limits[2];
+
+	/* GPIO input state (register format, i.e. inverted) */
+	u8 gpi;
+
+	/* #PROCHOT override (register format) */
+	u8 prochot_override;
+
+	/* #PROCHOT intervals (register format) */
+	u8 prochot_interval;
+
+	/* Fan Boost Temperatures (register format) */
+	u8 boost[4];
+
+	/* Fan Boost Hysteresis (register format) */
+	u8 boost_hyst[2];
+
+	/* Temperature Zone Min. PWM & Hysteresis (register format) */
+	u8 auto_pwm_min_hyst[2];
+
+	/* #PROCHOT & #VRDHOT PWM Ramp Control */
+	u8 pwm_ramp_ctl;
+
+	/* miscellaneous setup regs */
+	u8 sfc1;
+	u8 sfc2;
+	u8 sf_tach_to_pwm;
+
+	/* The two PWM CTL2  registers can read something other than what was
+	   last written for the OVR_DC field (duty cycle override).  So, we
+	   save the user-commanded value here. */
+	u8 pwm_override[2];
+};
+
+/* VID:	mV
+   REG: 6-bits, right justified, *always* using Intel VRM/VRD 10 */
+static int LM93_VID_FROM_REG(u8 reg)
+{
+	return vid_from_reg((reg & 0x3f), 100);
+}
+
+/* min, max, and nominal register values, per channel (u8) */
+static const u8 lm93_vin_reg_min[16] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xae,
+};
+static const u8 lm93_vin_reg_max[16] = {
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+	0xff, 0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd1,
+};
+/* Values from the datasheet. They're here for documentation only.
+static const u8 lm93_vin_reg_nom[16] = {
+	0xc0, 0xc0, 0xc0, 0xc0, 0xc0, 0xc0, 0xc0, 0xc0,
+	0xc0, 0xc0, 0xc0, 0xc0, 0xc0, 0xc0, 0x40, 0xc0,
+};
+*/
+
+/* min, max, and nominal voltage readings, per channel (mV)*/
+static const unsigned long lm93_vin_val_min[16] = {
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 3000,
+};
+
+static const unsigned long lm93_vin_val_max[16] = {
+	1236, 1236, 1236, 1600, 2000, 2000, 1600, 1600,
+	4400, 6500, 3333, 2625, 1312, 1312, 1236, 3600,
+};
+/* Values from the datasheet. They're here for documentation only.
+static const unsigned long lm93_vin_val_nom[16] = {
+	 927,  927,  927, 1200, 1500, 1500, 1200, 1200,
+	3300, 5000, 2500, 1969,  984,  984,  309, 3300,
+};
+*/
+
+static unsigned LM93_IN_FROM_REG(int nr, u8 reg)
+{
+	const long uV_max = lm93_vin_val_max[nr] * 1000;
+	const long uV_min = lm93_vin_val_min[nr] * 1000;
+
+	const long slope = (uV_max - uV_min) /
+		(lm93_vin_reg_max[nr] - lm93_vin_reg_min[nr]);
+	const long intercept = uV_min - slope * lm93_vin_reg_min[nr];
+
+	return (slope * reg + intercept + 500) / 1000;
+}
+
+/* IN: mV, limits determined by channel nr
+   REG: scaling determined by channel nr */
+static u8 LM93_IN_TO_REG(int nr, unsigned val)
+{
+	/* range limit */
+	const long mV = SENSORS_LIMIT(val,
+		lm93_vin_val_min[nr], lm93_vin_val_max[nr]);
+
+	/* try not to lose too much precision here */
+	const long uV = mV * 1000;
+	const long uV_max = lm93_vin_val_max[nr] * 1000;
+	const long uV_min = lm93_vin_val_min[nr] * 1000;
+
+	/* convert */
+	const long slope = (uV_max - uV_min) /
+		(lm93_vin_reg_max[nr] - lm93_vin_reg_min[nr]);
+	const long intercept = uV_min - slope * lm93_vin_reg_min[nr];
+
+	u8 result = ((uV - intercept + (slope/2)) / slope);
+	result = SENSORS_LIMIT(result,
+			lm93_vin_reg_min[nr], lm93_vin_reg_max[nr]);
+	return result;
+}
+
+/* vid in mV, upper == 0 indicates low limit, otherwise upper limit */
+static unsigned LM93_IN_REL_FROM_REG(u8 reg, int upper, int vid)
+{
+	const long uV_offset = upper ? (((reg >> 4 & 0x0f) + 1) * 12500) :
+				(((reg >> 0 & 0x0f) + 1) * -25000);
+	const long uV_vid = vid * 1000;
+	return (uV_vid + uV_offset + 5000) / 10000;
+}
+
+#define LM93_IN_MIN_FROM_REG(reg,vid)	LM93_IN_REL_FROM_REG(reg,0,vid)
+#define LM93_IN_MAX_FROM_REG(reg,vid)	LM93_IN_REL_FROM_REG(reg,1,vid)
+
+/* vid in mV , upper == 0 indicates low limit, otherwise upper limit
+   upper also determines which nibble of the register is returned
+   (the other nibble will be 0x0) */
+static u8 LM93_IN_REL_TO_REG(unsigned val, int upper, int vid)
+{
+	long uV_offset = vid * 1000 - val * 10000;
+	if (upper) {
+		uV_offset = SENSORS_LIMIT(uV_offset, 12500, 200000);
+		return (u8)((uV_offset /  12500 - 1) << 4);
+	} else {
+		uV_offset = SENSORS_LIMIT(uV_offset, -400000, -25000);
+		return (u8)((uV_offset / -25000 - 1) << 0);
+	}
+}
+
+/* TEMP: 1/1000 degrees C (-128C to +127C)
+   REG: 1C/bit, two's complement */
+static int LM93_TEMP_FROM_REG(u8 reg)
+{
+	return (s8)reg * 1000;
+}
+
+#define LM93_TEMP_MIN (-128000)
+#define LM93_TEMP_MAX ( 127000)
+
+/* TEMP: 1/1000 degrees C (-128C to +127C)
+   REG: 1C/bit, two's complement */
+static u8 LM93_TEMP_TO_REG(int temp)
+{
+	int ntemp = SENSORS_LIMIT(temp, LM93_TEMP_MIN, LM93_TEMP_MAX);
+	ntemp += (ntemp<0 ? -500 : 500);
+	return (u8)(ntemp / 1000);
+}
+
+/* Determine 4-bit temperature offset resolution */
+static int LM93_TEMP_OFFSET_MODE_FROM_REG(u8 sfc2, int nr)
+{
+	/* mode: 0 => 1C/bit, nonzero => 0.5C/bit */
+	return sfc2 & (nr < 2 ? 0x10 : 0x20);
+}
+
+/* This function is common to all 4-bit temperature offsets
+   reg is 4 bits right justified
+   mode 0 => 1C/bit, mode !0 => 0.5C/bit */
+static int LM93_TEMP_OFFSET_FROM_REG(u8 reg, int mode)
+{
+	return (reg & 0x0f) * (mode ? 5 : 10);
+}
+
+#define LM93_TEMP_OFFSET_MIN  (  0)
+#define LM93_TEMP_OFFSET_MAX0 (150)
+#define LM93_TEMP_OFFSET_MAX1 ( 75)
+
+/* This function is common to all 4-bit temperature offsets
+   returns 4 bits right justified
+   mode 0 => 1C/bit, mode !0 => 0.5C/bit */
+static u8 LM93_TEMP_OFFSET_TO_REG(int off, int mode)
+{
+	int factor = mode ? 5 : 10;
+
+	off = SENSORS_LIMIT(off, LM93_TEMP_OFFSET_MIN,
+		mode ? LM93_TEMP_OFFSET_MAX1 : LM93_TEMP_OFFSET_MAX0);
+	return (u8)((off + factor/2) / factor);
+}
+
+/* 0 <= nr <= 3 */
+static int LM93_TEMP_AUTO_OFFSET_FROM_REG(u8 reg, int nr, int mode)
+{
+	/* temp1-temp2 (nr=0,1) use lower nibble */
+	if (nr < 2)
+		return LM93_TEMP_OFFSET_FROM_REG(reg & 0x0f, mode);
+
+	/* temp3-temp4 (nr=2,3) use upper nibble */
+	else
+		return LM93_TEMP_OFFSET_FROM_REG(reg >> 4 & 0x0f, mode);
+}
+
+/* TEMP: 1/10 degrees C (0C to +15C (mode 0) or +7.5C (mode non-zero))
+   REG: 1.0C/bit (mode 0) or 0.5C/bit (mode non-zero)
+   0 <= nr <= 3 */
+static u8 LM93_TEMP_AUTO_OFFSET_TO_REG(u8 old, int off, int nr, int mode)
+{
+	u8 new = LM93_TEMP_OFFSET_TO_REG(off, mode);
+
+	/* temp1-temp2 (nr=0,1) use lower nibble */
+	if (nr < 2)
+		return (old & 0xf0) | (new & 0x0f);
+
+	/* temp3-temp4 (nr=2,3) use upper nibble */
+	else
+		return (new << 4 & 0xf0) | (old & 0x0f);
+}
+
+static int LM93_AUTO_BOOST_HYST_FROM_REGS(struct lm93_data *data, int nr,
+		int mode)
+{
+	u8 reg;
+
+	switch (nr) {
+	case 0:
+		reg = data->boost_hyst[0] & 0x0f;
+		break;
+	case 1:
+		reg = data->boost_hyst[0] >> 4 & 0x0f;
+		break;
+	case 2:
+		reg = data->boost_hyst[1] & 0x0f;
+		break;
+	case 3:
+	default:
+		reg = data->boost_hyst[1] >> 4 & 0x0f;
+		break;
+	}
+
+	return LM93_TEMP_FROM_REG(data->boost[nr]) -
+			LM93_TEMP_OFFSET_FROM_REG(reg, mode);
+}
+
+static u8 LM93_AUTO_BOOST_HYST_TO_REG(struct lm93_data *data, long hyst,
+		int nr, int mode)
+{
+	u8 reg = LM93_TEMP_OFFSET_TO_REG(
+			(LM93_TEMP_FROM_REG(data->boost[nr]) - hyst), mode);
+
+	switch (nr) {
+	case 0:
+		reg = (data->boost_hyst[0] & 0xf0) | (reg & 0x0f);
+		break;
+	case 1:
+		reg = (reg << 4 & 0xf0) | (data->boost_hyst[0] & 0x0f);
+		break;
+	case 2:
+		reg = (data->boost_hyst[1] & 0xf0) | (reg & 0x0f);
+		break;
+	case 3:
+	default:
+		reg = (reg << 4 & 0xf0) | (data->boost_hyst[1] & 0x0f);
+		break;
+	}
+
+	return reg;
+}
+
+/* PWM: 0-255 per sensors documentation
+   REG: 0-13 as mapped below... right justified */
+typedef enum { LM93_PWM_MAP_HI_FREQ, LM93_PWM_MAP_LO_FREQ } pwm_freq_t;
+static int lm93_pwm_map[2][16] = {
+	{
+		0x00, /*   0.00% */ 0x40, /*  25.00% */
+		0x50, /*  31.25% */ 0x60, /*  37.50% */
+		0x70, /*  43.75% */ 0x80, /*  50.00% */
+		0x90, /*  56.25% */ 0xa0, /*  62.50% */
+		0xb0, /*  68.75% */ 0xc0, /*  75.00% */
+		0xd0, /*  81.25% */ 0xe0, /*  87.50% */
+		0xf0, /*  93.75% */ 0xff, /* 100.00% */
+		0xff, 0xff, /* 14, 15 are reserved and should never occur */
+	},
+	{
+		0x00, /*   0.00% */ 0x40, /*  25.00% */
+		0x49, /*  28.57% */ 0x52, /*  32.14% */
+		0x5b, /*  35.71% */ 0x64, /*  39.29% */
+		0x6d, /*  42.86% */ 0x76, /*  46.43% */
+		0x80, /*  50.00% */ 0x89, /*  53.57% */
+		0x92, /*  57.14% */ 0xb6, /*  71.43% */
+		0xdb, /*  85.71% */ 0xff, /* 100.00% */
+		0xff, 0xff, /* 14, 15 are reserved and should never occur */
+	},
+};
+
+static int LM93_PWM_FROM_REG(u8 reg, pwm_freq_t freq)
+{
+	return lm93_pwm_map[freq][reg & 0x0f];
+}
+
+/* round up to nearest match */
+static u8 LM93_PWM_TO_REG(int pwm, pwm_freq_t freq)
+{
+	int i;
+	for (i = 0; i < 13; i++)
+		if (pwm <= lm93_pwm_map[freq][i])
+			break;
+
+	/* can fall through with i==13 */
+	return (u8)i;
+}
+
+static int LM93_FAN_FROM_REG(u16 regs)
+{
+	const u16 count = le16_to_cpu(regs) >> 2;
+	return count==0 ? -1 : count==0x3fff ? 0: 1350000 / count;
+}
+
+/*
+ * RPM: (82.5 to 1350000)
+ * REG: 14-bits, LE, *left* justified
+ */
+static u16 LM93_FAN_TO_REG(long rpm)
+{
+	u16 count, regs;
+
+	if (rpm == 0) {
+		count = 0x3fff;
+	} else {
+		rpm = SENSORS_LIMIT(rpm, 1, 1000000);
+		count = SENSORS_LIMIT((1350000 + rpm) / rpm, 1, 0x3ffe);
+	}
+
+	regs = count << 2;
+	return cpu_to_le16(regs);
+}
+
+/* PWM FREQ: HZ
+   REG: 0-7 as mapped below */
+static int lm93_pwm_freq_map[8] = {
+	22500, 96, 84, 72, 60, 48, 36, 12
+};
+
+static int LM93_PWM_FREQ_FROM_REG(u8 reg)
+{
+	return lm93_pwm_freq_map[reg & 0x07];
+}
+
+/* round up to nearest match */
+static u8 LM93_PWM_FREQ_TO_REG(int freq)
+{
+	int i;
+	for (i = 7; i > 0; i--)
+		if (freq <= lm93_pwm_freq_map[i])
+			break;
+
+	/* can fall through with i==0 */
+	return (u8)i;
+}
+
+/* TIME: 1/100 seconds
+ * REG: 0-7 as mapped below */
+static int lm93_spinup_time_map[8] = {
+	0, 10, 25, 40, 70, 100, 200, 400,
+};
+
+static int LM93_SPINUP_TIME_FROM_REG(u8 reg)
+{
+	return lm93_spinup_time_map[reg >> 5 & 0x07];
+}
+
+/* round up to nearest match */
+static u8 LM93_SPINUP_TIME_TO_REG(int time)
+{
+	int i;
+	for (i = 0; i < 7; i++)
+		if (time <= lm93_spinup_time_map[i])
+			break;
+
+	/* can fall through with i==8 */
+	return (u8)i;
+}
+
+#define LM93_RAMP_MIN 0
+#define LM93_RAMP_MAX 75
+
+static int LM93_RAMP_FROM_REG(u8 reg)
+{
+	return (reg & 0x0f) * 5;
+}
+
+/* RAMP: 1/100 seconds
+   REG: 50mS/bit 4-bits right justified */
+static u8 LM93_RAMP_TO_REG(int ramp)
+{
+	ramp = SENSORS_LIMIT(ramp, LM93_RAMP_MIN, LM93_RAMP_MAX);
+	return (u8)((ramp + 2) / 5);
+}
+
+/* PROCHOT: 0-255, 0 => 0%, 255 => > 96.6%
+ * REG: (same) */
+static u8 LM93_PROCHOT_TO_REG(long prochot)
+{
+	prochot = SENSORS_LIMIT(prochot, 0, 255);
+	return (u8)prochot;
+}
+
+/* PROCHOT-INTERVAL: 73 - 37200 (1/100 seconds)
+ * REG: 0-9 as mapped below */
+static int lm93_interval_map[10] = {
+	73, 146, 290, 580, 1170, 2330, 4660, 9320, 18600, 37200,
+};
+
+static int LM93_INTERVAL_FROM_REG(u8 reg)
+{
+	return lm93_interval_map[reg & 0x0f];
+}
+
+/* round up to nearest match */
+static u8 LM93_INTERVAL_TO_REG(long interval)
+{
+	int i;
+	for (i = 0; i < 9; i++)
+		if (interval <= lm93_interval_map[i])
+			break;
+
+	/* can fall through with i==9 */
+	return (u8)i;
+}
+
+/* GPIO: 0-255, GPIO0 is LSB
+ * REG: inverted */
+static unsigned LM93_GPI_FROM_REG(u8 reg)
+{
+	return ~reg & 0xff;
+}
+
+/* alarm bitmask definitions
+   The LM93 has nearly 64 bits of error status... I've pared that down to
+   what I think is a useful subset in order to fit it into 32 bits.
+
+   Especially note that the #VRD_HOT alarms are missing because we provide
+   that information as values in another sysfs file.
+
+   If libsensors is extended to support 64 bit values, this could be revisited.
+*/
+#define LM93_ALARM_IN1		0x00000001
+#define LM93_ALARM_IN2		0x00000002
+#define LM93_ALARM_IN3		0x00000004
+#define LM93_ALARM_IN4		0x00000008
+#define LM93_ALARM_IN5		0x00000010
+#define LM93_ALARM_IN6		0x00000020
+#define LM93_ALARM_IN7		0x00000040
+#define LM93_ALARM_IN8		0x00000080
+#define LM93_ALARM_IN9		0x00000100
+#define LM93_ALARM_IN10		0x00000200
+#define LM93_ALARM_IN11		0x00000400
+#define LM93_ALARM_IN12		0x00000800
+#define LM93_ALARM_IN13		0x00001000
+#define LM93_ALARM_IN14		0x00002000
+#define LM93_ALARM_IN15		0x00004000
+#define LM93_ALARM_IN16		0x00008000
+#define LM93_ALARM_FAN1		0x00010000
+#define LM93_ALARM_FAN2		0x00020000
+#define LM93_ALARM_FAN3		0x00040000
+#define LM93_ALARM_FAN4		0x00080000
+#define LM93_ALARM_PH1_ERR	0x00100000
+#define LM93_ALARM_PH2_ERR	0x00200000
+#define LM93_ALARM_SCSI1_ERR	0x00400000
+#define LM93_ALARM_SCSI2_ERR	0x00800000
+#define LM93_ALARM_DVDDP1_ERR	0x01000000
+#define LM93_ALARM_DVDDP2_ERR	0x02000000
+#define LM93_ALARM_D1_ERR	0x04000000
+#define LM93_ALARM_D2_ERR	0x08000000
+#define LM93_ALARM_TEMP1	0x10000000
+#define LM93_ALARM_TEMP2	0x20000000
+#define LM93_ALARM_TEMP3	0x40000000
+
+static unsigned LM93_ALARMS_FROM_REG(struct block1_t b1)
+{
+	unsigned result;
+	result  = b1.host_status_2 & 0x3f;
+
+	if (vccp_limit_type[0])
+		result |= (b1.host_status_4 & 0x10) << 2;
+	else
+		result |= b1.host_status_2 & 0x40;
+
+	if (vccp_limit_type[1])
+		result |= (b1.host_status_4 & 0x20) << 2;
+	else
+		result |= b1.host_status_2 & 0x80;
+
+	result |= b1.host_status_3 << 8;
+	result |= (b1.fan_status & 0x0f) << 16;
+	result |= (b1.p1_prochot_status & 0x80) << 13;
+	result |= (b1.p2_prochot_status & 0x80) << 14;
+	result |= (b1.host_status_4 & 0xfc) << 20;
+	result |= (b1.host_status_1 & 0x07) << 28;
+	return result;
+}
+
+#define MAX_RETRIES 5
+
+static u8 lm93_read_byte(struct i2c_client *client, u8 reg)
+{
+	int value, i;
+
+	/* retry in case of read errors */
+	for (i=1; i<=MAX_RETRIES; i++) {
+		if ((value = i2c_smbus_read_byte_data(client, reg)) >= 0) {
+			return value;
+		} else {
+			dev_warn(&client->dev,"lm93: read byte data failed, "
+				"address 0x%02x.\n", reg);
+			mdelay(i + 3);
+		}
+
+	}
+
+	/* <TODO> what to return in case of error? */
+	dev_err(&client->dev,"lm93: All read byte retries failed!!\n");
+	return 0;
+}
+
+static int lm93_write_byte(struct i2c_client *client, u8 reg, u8 value)
+{
+	int result;
+
+	/* <TODO> how to handle write errors? */
+	result = i2c_smbus_write_byte_data(client, reg, value);
+
+	if (result < 0)
+		dev_warn(&client->dev,"lm93: write byte data failed, "
+			 "0x%02x at address 0x%02x.\n", value, reg);
+
+	return result;
+}
+
+static u16 lm93_read_word(struct i2c_client *client, u8 reg)
+{
+	int value, i;
+
+	/* retry in case of read errors */
+	for (i=1; i<=MAX_RETRIES; i++) {
+		if ((value = i2c_smbus_read_word_data(client, reg)) >= 0) {
+			return value;
+		} else {
+			dev_warn(&client->dev,"lm93: read word data failed, "
+				 "address 0x%02x.\n", reg);
+			mdelay(i + 3);
+		}
+
+	}
+
+	/* <TODO> what to return in case of error? */
+	dev_err(&client->dev,"lm93: All read word retries failed!!\n");
+	return 0;
+}
+
+static int lm93_write_word(struct i2c_client *client, u8 reg, u16 value)
+{
+	int result;
+
+	/* <TODO> how to handle write errors? */
+	result = i2c_smbus_write_word_data(client, reg, value);
+
+	if (result < 0)
+		dev_warn(&client->dev,"lm93: write word data failed, "
+			 "0x%04x at address 0x%02x.\n", value, reg);
+
+	return result;
+}
+
+static u8 lm93_block_buffer[I2C_SMBUS_BLOCK_MAX];
+
+/*
+	read block data into values, retry if not expected length
+	fbn => index to lm93_block_read_cmds table
+		(Fixed Block Number - section 14.5.2 of LM93 datasheet)
+*/
+static void lm93_read_block(struct i2c_client *client, u8 fbn, u8 *values)
+{
+	int i, result=0;
+
+	for (i = 1; i <= MAX_RETRIES; i++) {
+		result = i2c_smbus_read_block_data(client,
+			lm93_block_read_cmds[fbn].cmd, lm93_block_buffer);
+
+		if (result == lm93_block_read_cmds[fbn].len) {
+			break;
+		} else {
+			dev_warn(&client->dev,"lm93: block read data failed, "
+				 "command 0x%02x.\n",
+				 lm93_block_read_cmds[fbn].cmd);
+			mdelay(i + 3);
+		}
+	}
+
+	if (result == lm93_block_read_cmds[fbn].len) {
+		memcpy(values,lm93_block_buffer,lm93_block_read_cmds[fbn].len);
+	} else {
+		/* <TODO> what to do in case of error? */
+	}
+}
+
+static struct lm93_data *lm93_update_device(struct device *dev)
+{
+	struct i2c_client *client = to_i2c_client(dev);
+	struct lm93_data *data = i2c_get_clientdata(client);
+	const unsigned long interval = HZ + (HZ / 2);
+
+	mutex_lock(&data->update_lock);
+
+	if (time_after(jiffies, data->last_updated + interval) ||
+		!data->valid) {
+
+		data->update(data, client);
+		data->last_updated = jiffies;
+		data->valid = 1;
+	}
+
+	mutex_unlock(&data->update_lock);
+	return data;
+}
+
+/* update routine for data that has no corresponding SMBus block command */
+static void lm93_update_client_common(struct lm93_data *data,
+				      struct i2c_client *client)
+{
+	int i;
+	u8 *ptr;
+
+	/* temp1 - temp4: limits */
+	for (i = 0; i < 4; i++) {
+		data->temp_lim[i].min =
+			lm93_read_byte(client, LM93_REG_TEMP_MIN(i));
+		data->temp_lim[i].max =
+			lm93_read_byte(client, LM93_REG_TEMP_MAX(i));
+	}
+
+	/* config register */
+	data->config = lm93_read_byte(client, LM93_REG_CONFIG);
+
+	/* vid1 - vid2: values */
+	for (i = 0; i < 2; i++)
+		data->vid[i] = lm93_read_byte(client, LM93_REG_VID(i));
+
+	/* prochot1 - prochot2: limits */
+	for (i = 0; i < 2; i++)
+		data->prochot_max[i] = lm93_read_byte(client,
+				LM93_REG_PROCHOT_MAX(i));
+
+	/* vccp1 - vccp2: VID relative limits */
+	for (i = 0; i < 2; i++)
+		data->vccp_limits[i] = lm93_read_byte(client,
+				LM93_REG_VCCP_LIMIT_OFF(i));
+
+	/* GPIO input state */
+	data->gpi = lm93_read_byte(client, LM93_REG_GPI);
+
+	/* #PROCHOT override state */
+	data->prochot_override = lm93_read_byte(client,
+			LM93_REG_PROCHOT_OVERRIDE);
+
+	/* #PROCHOT intervals */
+	data->prochot_interval = lm93_read_byte(client,
+			LM93_REG_PROCHOT_INTERVAL);
+
+	/* Fan Boost Termperature registers */
+	for (i = 0; i < 4; i++)
+		data->boost[i] = lm93_read_byte(client, LM93_REG_BOOST(i));
+
+	/* Fan Boost Temperature Hyst. registers */
+	data->boost_hyst[0] = lm93_read_byte(client, LM93_REG_BOOST_HYST_12);
+	data->boost_hyst[1] = lm93_read_byte(client, LM93_REG_BOOST_HYST_34);
+
+	/* Temperature Zone Min. PWM & Hysteresis registers */
+	data->auto_pwm_min_hyst[0] =
+			lm93_read_byte(client, LM93_REG_PWM_MIN_HYST_12);
+	data->auto_pwm_min_hyst[1] =
+			lm93_read_byte(client, LM93_REG_PWM_MIN_HYST_34);
+
+	/* #PROCHOT & #VRDHOT PWM Ramp Control register */
+	data->pwm_ramp_ctl = lm93_read_byte(client, LM93_REG_PWM_RAMP_CTL);
+
+	/* misc setup registers */
+	data->sfc1 = lm93_read_byte(client, LM93_REG_SFC1);
+	data->sfc2 = lm93_read_byte(client, LM93_REG_SFC2);
+	data->sf_tach_to_pwm = lm93_read_byte(client,
+			LM93_REG_SF_TACH_TO_PWM);
+
+	/* write back alarm values to clear */
+	for (i = 0, ptr = (u8 *)(&data->block1); i < 8; i++)
+		lm93_write_byte(client, LM93_REG_HOST_ERROR_1 + i, *(ptr + i));
+}
+
+/* update routine which uses SMBus block data commands */
+static void lm93_update_client_full(struct lm93_data *data,
+				    struct i2c_client *client)
+{
+	dev_dbg(&client->dev,"starting device update (block data enabled)\n");
+
+	/* in1 - in16: values & limits */
+	lm93_read_block(client, 3, (u8 *)(data->block3));
+	lm93_read_block(client, 7, (u8 *)(data->block7));
+
+	/* temp1 - temp4: values */
+	lm93_read_block(client, 2, (u8 *)(data->block2));
+
+	/* prochot1 - prochot2: values */
+	lm93_read_block(client, 4, (u8 *)(data->block4));
+
+	/* fan1 - fan4: values & limits */
+	lm93_read_block(client, 5, (u8 *)(data->block5));
+	lm93_read_block(client, 8, (u8 *)(data->block8));
+
+	/* pmw control registers */
+	lm93_read_block(client, 9, (u8 *)(data->block9));
+
+	/* alarm values */
+	lm93_read_block(client, 1, (u8 *)(&data->block1));
+
+	/* auto/pwm registers */
+	lm93_read_block(client, 10, (u8 *)(&data->block10));
+
+	lm93_update_client_common(data, client);
+}
+
+/* update routine which uses SMBus byte/word data commands only */
+static void lm93_update_client_min(struct lm93_data *data,
+				   struct i2c_client *client)
+{
+	int i,j;
+	u8 *ptr;
+
+	dev_dbg(&client->dev,"starting device update (block data disabled)\n");
+
+	/* in1 - in16: values & limits */
+	for (i = 0; i < 16; i++) {
+		data->block3[i] =
+			lm93_read_byte(client, LM93_REG_IN(i));
+		data->block7[i].min =
+			lm93_read_byte(client, LM93_REG_IN_MIN(i));
+		data->block7[i].max =
+			lm93_read_byte(client, LM93_REG_IN_MAX(i));
+	}
+
+	/* temp1 - temp4: values */
+	for (i = 0; i < 4; i++) {
+		data->block2[i] =
+			lm93_read_byte(client, LM93_REG_TEMP(i));
+	}
+
+	/* prochot1 - prochot2: values */
+	for (i = 0; i < 2; i++) {
+		data->block4[i].cur =
+			lm93_read_byte(client, LM93_REG_PROCHOT_CUR(i));
+		data->block4[i].avg =
+			lm93_read_byte(client, LM93_REG_PROCHOT_AVG(i));
+	}
+
+	/* fan1 - fan4: values & limits */
+	for (i = 0; i < 4; i++) {
+		data->block5[i] =
+			lm93_read_word(client, LM93_REG_FAN(i));
+		data->block8[i] =
+			lm93_read_word(client, LM93_REG_FAN_MIN(i));
+	}
+
+	/* pwm control registers */
+	for (i = 0; i < 2; i++) {
+		for (j = 0; j < 4; j++) {
+			data->block9[i][j] =
+				lm93_read_byte(client, LM93_REG_PWM_CTL(i,j));
+		}
+	}
+
+	/* alarm values */
+	for (i = 0, ptr = (u8 *)(&data->block1); i < 8; i++) {
+		*(ptr + i) =
+			lm93_read_byte(client, LM93_REG_HOST_ERROR_1 + i);
+	}
+
+	/* auto/pwm (base temp) registers */
+	for (i = 0; i < 4; i++) {
+		data->block10.base[i] =
+			lm93_read_byte(client, LM93_REG_TEMP_BASE(i));
+	}
+
+	/* auto/pwm (offset temp) registers */
+	for (i = 0; i < 12; i++) {
+		data->block10.offset[i] =
+			lm93_read_byte(client, LM93_REG_TEMP_OFFSET(i));
+	}
+
+	lm93_update_client_common(data, client);
+}
+
+/* following are the sysfs callback functions */
+static ssize_t show_in(struct device *dev, struct device_attribute *attr,
+			char *buf)
+{
+	int nr = (to_sensor_dev_attr(attr))->index;
+
+	struct lm93_data *data = lm93_update_device(dev);
+	return sprintf(buf, "%d\n", LM93_IN_FROM_REG(nr, data->block3[nr]));
+}
+
+static SENSOR_DEVICE_ATTR(in1_input, S_IRUGO, show_in, NULL, 0);
+static SENSOR_DEVICE_ATTR(in2_input, S_IRUGO, show_in, NULL, 1);
+static SENSOR_DEVICE_ATTR(in3_input, S_IRUGO, show_in, NULL, 2);
+static SENSOR_DEVICE_ATTR(in4_input, S_IRUGO, show_in, NULL, 3);
+static SENSOR_DEVICE_ATTR(in5_input, S_IRUGO, show_in, NULL, 4);
+static SENSOR_DEVICE_ATTR(in6_input, S_IRUGO, show_in, NULL, 5);
+static SENSOR_DEVICE_ATTR(in7_input, S_IRUGO, show_in, NULL, 6);
+static SENSOR_DEVICE_ATTR(in8_input, S_IRUGO, show_in, NULL, 7);
+static SENSOR_DEVICE_ATTR(in9_input, S_IRUGO, show_in, NULL, 8);
+static SENSOR_DEVICE_ATTR(in10_input, S_IRUGO, show_in, NULL, 9);
+static SENSOR_DEVICE_ATTR(in11_input, S_IRUGO, show_in, NULL, 10);
+static SENSOR_DEVICE_ATTR(in12_input, S_IRUGO, show_in, NULL, 11);
+static SENSOR_DEVICE_ATTR(in13_input, S_IRUGO, show_in, NULL, 12);
+static SENSOR_DEVICE_ATTR(in14_input, S_IRUGO, show_in, NULL, 13);
+static SENSOR_DEVICE_ATTR(in15_input, S_IRUGO, show_in, NULL, 14);
+static SENSOR_DEVICE_ATTR(in16_input, S_IRUGO, show_in, NULL, 15);
+
+static ssize_t show_in_min(struct device *dev,
+			struct device_attribute *attr, char *buf)
+{
+	int nr = (to_sensor_dev_attr(attr))->index;
+	struct lm93_data *data = lm93_update_device(dev);
+	int vccp = nr - 6;
+	long rc, vid;
+
+	if ((nr==6 || nr==7) && (vccp_limit_type[vccp])) {
+		vid = LM93_VID_FROM_REG(data->vid[vccp]);
+		rc = LM93_IN_MIN_FROM_REG(data->vccp_limits[vccp], vid);
+	}
+	else {
+		rc = LM93_IN_FROM_REG(nr, data->block7[nr].min); \
+	}
+	return sprintf(buf, "%ld\n", rc); \
+}
+
+static ssize_t store_in_min(struct device *dev, struct device_attribute *attr,
+			    const char *buf, size_t count)
+{
+	int nr = (to_sensor_dev_attr(attr))->index;
+	struct i2c_client *client = to_i2c_client(dev);
+	struct lm93_data *data = i2c_get_clientdata(client);
+	u32 val = simple_strtoul(buf, NULL, 10);
+	int vccp = nr - 6;
+	long vid;
+
+	mutex_lock(&data->update_lock);
+	if ((nr==6 || nr==7) && (vccp_limit_type[vccp])) {
+		vid = LM93_VID_FROM_REG(data->vid[vccp]);
+		data->vccp_limits[vccp] = (data->vccp_limits[vccp] & 0xf0) |
+				LM93_IN_REL_TO_REG(val, 0, vid);
+		lm93_write_byte(client, LM93_REG_VCCP_LIMIT_OFF(vccp),
+				data->vccp_limits[vccp]);
+	}
+	else {
+		data->block7[nr].min = LM93_IN_TO_REG(nr,val);
+		lm93_write_byte(client, LM93_REG_IN_MIN(nr),
+				data->block7[nr].min);
+	}
+	mutex_unlock(&data->update_lock);
+	return count;
+}
+
+static SENSOR_DEVICE_ATTR(in1_min, S_IWUSR | S_IRUGO,
+			  show_in_min, store_in_min, 0);
+static SENSOR_DEVICE_ATTR(in2_min, S_IWUSR | S_IRUGO,
+			  show_in_min, store_in_min, 1);
+static SENSOR_DEVICE_ATTR(in3_min, S_IWUSR | S_IRUGO,
+			  show_in_min, store_in_min, 2);
+static SENSOR_DEVICE_ATTR(in4_min, S_IWUSR | S_IRUGO,
+			  show_in_min, store_in_min, 3);
+static SENSOR_DEVICE_ATTR(in5_min, S_IWUSR | S_IRUGO,
+			  show_in_min, store_in_min, 4);
+static SENSOR_DEVICE_ATTR(in6_min, S_IWUSR | S_IRUGO,
+			  show_in_min, store_in_min, 5);
+static SENSOR_DEVICE_ATTR(in7_min, S_IWUSR | S_IRUGO,
+			  show_in_min, store_in_min, 6);
+static SENSOR_DEVICE_ATTR(in8_min, S_IWUSR | S_IRUGO,
+			  show_in_min, store_in_min, 7);
+static SENSOR_DEVICE_ATTR(in9_min, S_IWUSR | S_IRUGO,
+			  show_in_min, store_in_min, 8);
+static SENSOR_DEVICE_ATTR(in10_min, S_IWUSR | S_IRUGO,
+			  show_in_min, store_in_min, 9);
+static SENSOR_DEVICE_ATTR(in11_min, S_IWUSR | S_IRUGO,
+			  show_in_min, store_in_min, 10);
+static SENSOR_DEVICE_ATTR(in12_min, S_IWUSR | S_IRUGO,
+			  show_in_min, store_in_min, 11);
+static SENSOR_DEVICE_ATTR(in13_min, S_IWUSR | S_IRUGO,
+			  show_in_min, store_in_min, 12);
+static SENSOR_DEVICE_ATTR(in14_min, S_IWUSR | S_IRUGO,
+			  show_in_min, store_in_min, 13);
+static SENSOR_DEVICE_ATTR(in15_min, S_IWUSR | S_IRUGO,
+			  show_in_min, store_in_min, 14);
+static SENSOR_DEVICE_ATTR(in16_min, S_IWUSR | S_IRUGO,
+			  show_in_min, store_in_min, 15);
+
+static ssize_t show_in_max(struct device *dev,
+			   struct device_attribute *attr, char *buf)
+{
+	int nr = (to_sensor_dev_attr(attr))->index;
+	struct lm93_data *data = lm93_update_device(dev);
+	int vccp = nr - 6;
+	long rc, vid;
+
+	if ((nr==6 || nr==7) && (vccp_limit_type[vccp])) {
+		vid = LM93_VID_FROM_REG(data->vid[vccp]);
+		rc = LM93_IN_MAX_FROM_REG(data->vccp_limits[vccp],vid);
+	}
+	else {
+		rc = LM93_IN_FROM_REG(nr,data->block7[nr].max); \
+	}
+	return sprintf(buf,"%ld\n",rc); \
+}
+
+static ssize_t store_in_max(struct device *dev, struct device_attribute *attr,
+			    const char *buf, size_t count)
+{
+	int nr = (to_sensor_dev_attr(attr))->index;
+	struct i2c_client *client = to_i2c_client(dev);
+	struct lm93_data *data = i2c_get_clientdata(client);
+	u32 val = simple_strtoul(buf, NULL, 10);
+	int vccp = nr - 6;
+	long vid;
+
+	mutex_lock(&data->update_lock);
+	if ((nr==6 || nr==7) && (vccp_limit_type[vccp])) {
+		vid = LM93_VID_FROM_REG(data->vid[vccp]);
+		data->vccp_limits[vccp] = (data->vccp_limits[vccp] & 0x0f) |
+				LM93_IN_REL_TO_REG(val, 1, vid);
+		lm93_write_byte(client, LM93_REG_VCCP_LIMIT_OFF(vccp),
+				data->vccp_limits[vccp]);
+	}
+	else {
+		data->block7[nr].max = LM93_IN_TO_REG(nr,val);
+		lm93_write_byte(client, LM93_REG_IN_MAX(nr),
+				data->block7[nr].max);
+	}
+	mutex_unlock(&data->update_lock);
+	return count;
+}
+
+static SENSOR_DEVICE_ATTR(in1_max, S_IWUSR | S_IRUGO,
+			  show_in_max, store_in_max, 0);
+static SENSOR_DEVICE_ATTR(in2_max, S_IWUSR | S_IRUGO,
+			  show_in_max, store_in_max, 1);
+static SENSOR_DEVICE_ATTR(in3_max, S_IWUSR | S_IRUGO,
+			  show_in_max, store_in_max, 2);
+static SENSOR_DEVICE_ATTR(in4_max, S_IWUSR | S_IRUGO,
+			  show_in_max, store_in_max, 3);
+static SENSOR_DEVICE_ATTR(in5_max, S_IWUSR | S_IRUGO,
+			  show_in_max, store_in_max, 4);
+static SENSOR_DEVICE_ATTR(in6_max, S_IWUSR | S_IRUGO,
+			  show_in_max, store_in_max, 5);
+static SENSOR_DEVICE_ATTR(in7_max, S_IWUSR | S_IRUGO,
+			  show_in_max, store_in_max, 6);
+static SENSOR_DEVICE_ATTR(in8_max, S_IWUSR | S_IRUGO,
+			  show_in_max, store_in_max, 7);
+static SENSOR_DEVICE_ATTR(in9_max, S_IWUSR | S_IRUGO,
+			  show_in_max, store_in_max, 8);
+static SENSOR_DEVICE_ATTR(in10_max, S_IWUSR | S_IRUGO,
+			  show_in_max, store_in_max, 9);
+static SENSOR_DEVICE_ATTR(in11_max, S_IWUSR | S_IRUGO,
+			  show_in_max, store_in_max, 10);
+static SENSOR_DEVICE_ATTR(in12_max, S_IWUSR | S_IRUGO,
+			  show_in_max, store_in_max, 11);
+static SENSOR_DEVICE_ATTR(in13_max, S_IWUSR | S_IRUGO,
+			  show_in_max, store_in_max, 12);
+static SENSOR_DEVICE_ATTR(in14_max, S_IWUSR | S_IRUGO,
+			  show_in_max, store_in_max, 13);
+static SENSOR_DEVICE_ATTR(in15_max, S_IWUSR | S_IRUGO,
+			  show_in_max, store_in_max, 14);
+static SENSOR_DEVICE_ATTR(in16_max, S_IWUSR | S_IRUGO,
+			  show_in_max, store_in_max, 15);
+
+static ssize_t show_temp(struct device *dev,
+			 struct device_attribute *attr, char *buf)
+{
+	int nr = (to_sensor_dev_attr(attr))->index;
+	struct lm93_data *data = lm93_update_device(dev);
+	return sprintf(buf,"%d\n",LM93_TEMP_FROM_REG(data->block2[nr]));
+}
+
+static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, show_temp, NULL, 0);
+static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, show_temp, NULL, 1);
+static SENSOR_DEVICE_ATTR(temp3_input, S_IRUGO, show_temp, NULL, 2);
+
+static ssize_t show_temp_min(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	int nr = (to_sensor_dev_attr(attr))->index;
+	struct lm93_data *data = lm93_update_device(dev);
+	return sprintf(buf,"%d\n",LM93_TEMP_FROM_REG(data->temp_lim[nr].min));
+}
+
+static ssize_t store_temp_min(struct device *dev, struct device_attribute *attr,
+			      const char *buf, size_t count)
+{
+	int nr = (to_sensor_dev_attr(attr))->index;
+	struct i2c_client *client = to_i2c_client(dev);
+	struct lm93_data *data = i2c_get_clientdata(client);
+	u32 val = simple_strtoul(buf, NULL, 10);
+
+	mutex_lock(&data->update_lock);
+	data->temp_lim[nr].min = LM93_TEMP_TO_REG(val);
+	lm93_write_byte(client, LM93_REG_TEMP_MIN(nr), data->temp_lim[nr].min);
+	mutex_unlock(&data->update_lock);
+	return count;
+}
+
+static SENSOR_DEVICE_ATTR(temp1_min, S_IWUSR | S_IRUGO,
+			  show_temp_min, store_temp_min, 0);
+static SENSOR_DEVICE_ATTR(temp2_min, S_IWUSR | S_IRUGO,
+			  show_temp_min, store_temp_min, 1);
+static SENSOR_DEVICE_ATTR(temp3_min, S_IWUSR | S_IRUGO,
+			  show_temp_min, store_temp_min, 2);
+
+static ssize_t show_temp_max(struct device *dev,
+			     struct device_attribute *attr, char *buf)
+{
+	int nr = (to_sensor_dev_attr(attr))->index;
+	struct lm93_data *data = lm93_update_device(dev);
+	return sprintf(buf,"%d\n",LM93_TEMP_FROM_REG(data->temp_lim[nr].max));
+}
+
+static ssize_t store_temp_max(struct device *dev, struct device_attribute *attr,
+			      const char *buf, size_t count)
+{
+	int nr = (to_sensor_dev_attr(attr))->index;
+	struct i2c_client *client = to_i2c_client(dev);
+	struct lm93_data *data = i2c_get_clientdata(client);
+	u32 val = simple_strtoul(buf, NULL, 10);
+
+	mutex_lock(&data->update_lock);
+	data->temp_lim[nr].max = LM93_TEMP_TO_REG(val);
+	lm93_write_byte(client, LM93_REG_TEMP_MAX(nr), data->temp_lim[nr].max);
+	mutex_unlock(&data->update_lock);
+	return count;
+}
+
+static SENSOR_DEVICE_ATTR(temp1_max, S_IWUSR | S_IRUGO,
+			  show_temp_max, store_temp_max, 0);
+static SENSOR_DEVICE_ATTR(temp2_max, S_IWUSR | S_IRUGO,
+			  show_temp_max, store_temp_max, 1);
+static SENSOR_DEVICE_ATTR(temp3_max, S_IWUSR | S_IRUGO,
+			  show_temp_max, store_temp_max, 2);
+
+static ssize_t show_temp_auto_base(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	int nr = (to_sensor_dev_attr(attr))->index;
+	struct lm93_data *data = lm93_update_device(dev);
+	return sprintf(buf,"%d\n",LM93_TEMP_FROM_REG(data->block10.base[nr]));
+}
+
+static ssize_t store_temp_auto_base(struct device *dev,
+					struct device_attribute *attr,
+					const char *buf, size_t count)
+{
+	int nr = (to_sensor_dev_attr(attr))->index;
+	struct i2c_client *client = to_i2c_client(dev);
+	struct lm93_data *data = i2c_get_clientdata(client);
+	u32 val = simple_strtoul(buf, NULL, 10);
+
+	mutex_lock(&data->update_lock);
+	data->block10.base[nr] = LM93_TEMP_TO_REG(val);
+	lm93_write_byte(client, LM93_REG_TEMP_BASE(nr), data->block10.base[nr]);
+	mutex_unlock(&data->update_lock);
+	return count;
+}
+
+static SENSOR_DEVICE_ATTR(temp1_auto_base, S_IWUSR | S_IRUGO,
+			  show_temp_auto_base, store_temp_auto_base, 0);
+static SENSOR_DEVICE_ATTR(temp2_auto_base, S_IWUSR | S_IRUGO,
+			  show_temp_auto_base, store_temp_auto_base, 1);
+static SENSOR_DEVICE_ATTR(temp3_auto_base, S_IWUSR | S_IRUGO,
+			  show_temp_auto_base, store_temp_auto_base, 2);
+
+static ssize_t show_temp_auto_boost(struct device *dev,
+				    struct device_attribute *attr,char *buf)
+{
+	int nr = (to_sensor_dev_attr(attr))->index;
+	struct lm93_data *data = lm93_update_device(dev);
+	return sprintf(buf,"%d\n",LM93_TEMP_FROM_REG(data->boost[nr]));
+}
+
+static ssize_t store_temp_auto_boost(struct device *dev,
+				     struct device_attribute *attr,
+				     const char *buf, size_t count)
+{
+	int nr = (to_sensor_dev_attr(attr))->index;
+	struct i2c_client *client = to_i2c_client(dev);
+	struct lm93_data *data = i2c_get_clientdata(client);
+	u32 val = simple_strtoul(buf, NULL, 10);
+
+	mutex_lock(&data->update_lock);
+	data->boost[nr] = LM93_TEMP_TO_REG(val);
+	lm93_write_byte(client, LM93_REG_BOOST(nr), data->boost[nr]);
+	mutex_unlock(&data->update_lock);
+	return count;
+}
+
+static SENSOR_DEVICE_ATTR(temp1_auto_boost, S_IWUSR | S_IRUGO,
+			  show_temp_auto_boost, store_temp_auto_boost, 0);
+static SENSOR_DEVICE_ATTR(temp2_auto_boost, S_IWUSR | S_IRUGO,
+			  show_temp_auto_boost, store_temp_auto_boost, 1);
+static SENSOR_DEVICE_ATTR(temp3_auto_boost, S_IWUSR | S_IRUGO,
+			  show_temp_auto_boost, store_temp_auto_boost, 2);
+
+static ssize_t show_temp_auto_boost_hyst(struct device *dev,
+					 struct device_attribute *attr,
+					 char *buf)
+{
+	int nr = (to_sensor_dev_attr(attr))->index;
+	struct lm93_data *data = lm93_update_device(dev);
+	int mode = LM93_TEMP_OFFSET_MODE_FROM_REG(data->sfc2, nr);
+	return sprintf(buf,"%d\n",
+		       LM93_AUTO_BOOST_HYST_FROM_REGS(data, nr, mode));
+}
+
+static ssize_t store_temp_auto_boost_hyst(struct device *dev,
+					  struct device_attribute *attr,
+					  const char *buf, size_t count)
+{
+	int nr = (to_sensor_dev_attr(attr))->index;
+	struct i2c_client *client = to_i2c_client(dev);
+	struct lm93_data *data = i2c_get_clientdata(client);
+	u32 val = simple_strtoul(buf, NULL, 10);
+
+	mutex_lock(&data->update_lock);
+	/* force 0.5C/bit mode */
+	data->sfc2 = lm93_read_byte(client, LM93_REG_SFC2);
+	data->sfc2 |= ((nr < 2) ? 0x10 : 0x20);
+	lm93_write_byte(client, LM93_REG_SFC2, data->sfc2);
+	data->boost_hyst[nr/2] = LM93_AUTO_BOOST_HYST_TO_REG(data, val, nr, 1);
+	lm93_write_byte(client, LM93_REG_BOOST_HYST(nr),
+			data->boost_hyst[nr/2]);
+	mutex_unlock(&data->update_lock);
+	return count;
+}
+
+static SENSOR_DEVICE_ATTR(temp1_auto_boost_hyst, S_IWUSR | S_IRUGO,
+			  show_temp_auto_boost_hyst,
+			  store_temp_auto_boost_hyst, 0);
+static SENSOR_DEVICE_ATTR(temp2_auto_boost_hyst, S_IWUSR | S_IRUGO,
+			  show_temp_auto_boost_hyst,
+			  store_temp_auto_boost_hyst, 1);
+static SENSOR_DEVICE_ATTR(temp3_auto_boost_hyst, S_IWUSR | S_IRUGO,
+			  show_temp_auto_boost_hyst,
+			  store_temp_auto_boost_hyst, 2);
+
+static ssize_t show_temp_auto_offset(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	struct sensor_device_attribute_2 *s_attr = to_sensor_dev_attr_2(attr);
+	int nr = s_attr->index;
+	int ofs = s_attr->nr;
+	struct lm93_data *data = lm93_update_device(dev);
+	int mode = LM93_TEMP_OFFSET_MODE_FROM_REG(data->sfc2, nr);
+	return sprintf(buf,"%d\n",
+	       LM93_TEMP_AUTO_OFFSET_FROM_REG(data->block10.offset[ofs],
+					      nr,mode));
+}
+
+static ssize_t store_temp_auto_offset(struct device *dev,
+					struct device_attribute *attr,
+					const char *buf, size_t count)
+{
+	struct sensor_device_attribute_2 *s_attr = to_sensor_dev_attr_2(attr);
+	int nr = s_attr->index;
+	int ofs = s_attr->nr;
+	struct i2c_client *client = to_i2c_client(dev);
+	struct lm93_data *data = i2c_get_clientdata(client);
+	u32 val = simple_strtoul(buf, NULL, 10);
+
+	mutex_lock(&data->update_lock);
+	/* force 0.5C/bit mode */
+	data->sfc2 = lm93_read_byte(client, LM93_REG_SFC2);
+	data->sfc2 |= ((nr < 2) ? 0x10 : 0x20);
+	lm93_write_byte(client, LM93_REG_SFC2, data->sfc2);
+	data->block10.offset[ofs] = LM93_TEMP_AUTO_OFFSET_TO_REG(
+			data->block10.offset[ofs], val, nr, 1);
+	lm93_write_byte(client, LM93_REG_TEMP_OFFSET(ofs),
+			data->block10.offset[ofs]);
+	mutex_unlock(&data->update_lock);
+	return count;
+}
+
+static SENSOR_DEVICE_ATTR_2(temp1_auto_offset1, S_IWUSR | S_IRUGO,
+			  show_temp_auto_offset, store_temp_auto_offset, 0, 0);
+static SENSOR_DEVICE_ATTR_2(temp1_auto_offset2, S_IWUSR | S_IRUGO,
+			  show_temp_auto_offset, store_temp_auto_offset, 1, 0);
+static SENSOR_DEVICE_ATTR_2(temp1_auto_offset3, S_IWUSR | S_IRUGO,
+			  show_temp_auto_offset, store_temp_auto_offset, 2, 0);
+static SENSOR_DEVICE_ATTR_2(temp1_auto_offset4, S_IWUSR | S_IRUGO,
+			  show_temp_auto_offset, store_temp_auto_offset, 3, 0);
+static SENSOR_DEVICE_ATTR_2(temp1_auto_offset5, S_IWUSR | S_IRUGO,
+			  show_temp_auto_offset, store_temp_auto_offset, 4, 0);
+static SENSOR_DEVICE_ATTR_2(temp1_auto_offset6, S_IWUSR | S_IRUGO,
+			  show_temp_auto_offset, store_temp_auto_offset, 5, 0);
+static SENSOR_DEVICE_ATTR_2(temp1_auto_offset7, S_IWUSR | S_IRUGO,
+			  show_temp_auto_offset, store_temp_auto_offset, 6, 0);
+static SENSOR_DEVICE_ATTR_2(temp1_auto_offset8, S_IWUSR | S_IRUGO,
+			  show_temp_auto_offset, store_temp_auto_offset, 7, 0);
+static SENSOR_DEVICE_ATTR_2(temp1_auto_offset9, S_IWUSR | S_IRUGO,
+			  show_temp_auto_offset, store_temp_auto_offset, 8, 0);
+static SENSOR_DEVICE_ATTR_2(temp1_auto_offset10, S_IWUSR | S_IRUGO,
+			  show_temp_auto_offset, store_temp_auto_offset, 9, 0);
+static SENSOR_DEVICE_ATTR_2(temp1_auto_offset11, S_IWUSR | S_IRUGO,
+			  show_temp_auto_offset, store_temp_auto_offset, 10, 0);
+static SENSOR_DEVICE_ATTR_2(temp1_auto_offset12, S_IWUSR | S_IRUGO,
+			  show_temp_auto_offset, store_temp_auto_offset, 11, 0);
+static SENSOR_DEVICE_ATTR_2(temp2_auto_offset1, S_IWUSR | S_IRUGO,
+			  show_temp_auto_offset, store_temp_auto_offset, 0, 1);
+static SENSOR_DEVICE_ATTR_2(temp2_auto_offset2, S_IWUSR | S_IRUGO,
+			  show_temp_auto_offset, store_temp_auto_offset, 1, 1);
+static SENSOR_DEVICE_ATTR_2(temp2_auto_offset3, S_IWUSR | S_IRUGO,
+			  show_temp_auto_offset, store_temp_auto_offset, 2, 1);
+static SENSOR_DEVICE_ATTR_2(temp2_auto_offset4, S_IWUSR | S_IRUGO,
+			  show_temp_auto_offset, store_temp_auto_offset, 3, 1);
+static SENSOR_DEVICE_ATTR_2(temp2_auto_offset5, S_IWUSR | S_IRUGO,
+			  show_temp_auto_offset, store_temp_auto_offset, 4, 1);
+static SENSOR_DEVICE_ATTR_2(temp2_auto_offset6, S_IWUSR | S_IRUGO,
+			  show_temp_auto_offset, store_temp_auto_offset, 5, 1);
+static SENSOR_DEVICE_ATTR_2(temp2_auto_offset7, S_IWUSR | S_IRUGO,
+			  show_temp_auto_offset, store_temp_auto_offset, 6, 1);
+static SENSOR_DEVICE_ATTR_2(temp2_auto_offset8, S_IWUSR | S_IRUGO,
+			  show_temp_auto_offset, store_temp_auto_offset, 7, 1);
+static SENSOR_DEVICE_ATTR_2(temp2_auto_offset9, S_IWUSR | S_IRUGO,
+			  show_temp_auto_offset, store_temp_auto_offset, 8, 1);
+static SENSOR_DEVICE_ATTR_2(temp2_auto_offset10, S_IWUSR | S_IRUGO,
+			  show_temp_auto_offset, store_temp_auto_offset, 9, 1);
+static SENSOR_DEVICE_ATTR_2(temp2_auto_offset11, S_IWUSR | S_IRUGO,
+			  show_temp_auto_offset, store_temp_auto_offset, 10, 1);
+static SENSOR_DEVICE_ATTR_2(temp2_auto_offset12, S_IWUSR | S_IRUGO,
+			  show_temp_auto_offset, store_temp_auto_offset, 11, 1);
+static SENSOR_DEVICE_ATTR_2(temp3_auto_offset1, S_IWUSR | S_IRUGO,
+			  show_temp_auto_offset, store_temp_auto_offset, 0, 2);
+static SENSOR_DEVICE_ATTR_2(temp3_auto_offset2, S_IWUSR | S_IRUGO,
+			  show_temp_auto_offset, store_temp_auto_offset, 1, 2);
+static SENSOR_DEVICE_ATTR_2(temp3_auto_offset3, S_IWUSR | S_IRUGO,
+			  show_temp_auto_offset, store_temp_auto_offset, 2, 2);
+static SENSOR_DEVICE_ATTR_2(temp3_auto_offset4, S_IWUSR | S_IRUGO,
+			  show_temp_auto_offset, store_temp_auto_offset, 3, 2);
+static SENSOR_DEVICE_ATTR_2(temp3_auto_offset5, S_IWUSR | S_IRUGO,
+			  show_temp_auto_offset, store_temp_auto_offset, 4, 2);
+static SENSOR_DEVICE_ATTR_2(temp3_auto_offset6, S_IWUSR | S_IRUGO,
+			  show_temp_auto_offset, store_temp_auto_offset, 5, 2);
+static SENSOR_DEVICE_ATTR_2(temp3_auto_offset7, S_IWUSR | S_IRUGO,
+			  show_temp_auto_offset, store_temp_auto_offset, 6, 2);
+static SENSOR_DEVICE_ATTR_2(temp3_auto_offset8, S_IWUSR | S_IRUGO,
+			  show_temp_auto_offset, store_temp_auto_offset, 7, 2);
+static SENSOR_DEVICE_ATTR_2(temp3_auto_offset9, S_IWUSR | S_IRUGO,
+			  show_temp_auto_offset, store_temp_auto_offset, 8, 2);
+static SENSOR_DEVICE_ATTR_2(temp3_auto_offset10, S_IWUSR | S_IRUGO,
+			  show_temp_auto_offset, store_temp_auto_offset, 9, 2);
+static SENSOR_DEVICE_ATTR_2(temp3_auto_offset11, S_IWUSR | S_IRUGO,
+			  show_temp_auto_offset, store_temp_auto_offset, 10, 2);
+static SENSOR_DEVICE_ATTR_2(temp3_auto_offset12, S_IWUSR | S_IRUGO,
+			  show_temp_auto_offset, store_temp_auto_offset, 11, 2);
+
+static ssize_t show_temp_auto_pwm_min(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	int nr = (to_sensor_dev_attr(attr))->index;
+	u8 reg, ctl4;
+	struct lm93_data *data = lm93_update_device(dev);
+	reg = data->auto_pwm_min_hyst[nr/2] >> 4 & 0x0f;
+	ctl4 = data->block9[nr][LM93_PWM_CTL4];
+	return sprintf(buf,"%d\n",LM93_PWM_FROM_REG(reg, (ctl4 & 0x07) ?
+				LM93_PWM_MAP_LO_FREQ : LM93_PWM_MAP_HI_FREQ));
+}
+
+static ssize_t store_temp_auto_pwm_min(struct device *dev,
+					struct device_attribute *attr,
+					const char *buf, size_t count)
+{
+	int nr = (to_sensor_dev_attr(attr))->index;
+	struct i2c_client *client = to_i2c_client(dev);
+	struct lm93_data *data = i2c_get_clientdata(client);
+	u32 val = simple_strtoul(buf, NULL, 10);
+	u8 reg, ctl4;
+
+	mutex_lock(&data->update_lock);
+	reg = lm93_read_byte(client, LM93_REG_PWM_MIN_HYST(nr));
+	ctl4 = lm93_read_byte(client, LM93_REG_PWM_CTL(nr,LM93_PWM_CTL4));
+	reg = (reg & 0x0f) |
+		LM93_PWM_TO_REG(val, (ctl4 & 0x07) ?
+				LM93_PWM_MAP_LO_FREQ :
+				LM93_PWM_MAP_HI_FREQ) << 4;
+	data->auto_pwm_min_hyst[nr/2] = reg;
+	lm93_write_byte(client, LM93_REG_PWM_MIN_HYST(nr), reg);
+	mutex_unlock(&data->update_lock);
+	return count;
+}
+
+static SENSOR_DEVICE_ATTR(temp1_auto_pwm_min, S_IWUSR | S_IRUGO,
+			  show_temp_auto_pwm_min,
+			  store_temp_auto_pwm_min, 0);
+static SENSOR_DEVICE_ATTR(temp2_auto_pwm_min, S_IWUSR | S_IRUGO,
+			  show_temp_auto_pwm_min,
+			  store_temp_auto_pwm_min, 1);
+static SENSOR_DEVICE_ATTR(temp3_auto_pwm_min, S_IWUSR | S_IRUGO,
+			  show_temp_auto_pwm_min,
+			  store_temp_auto_pwm_min, 2);
+
+static ssize_t show_temp_auto_offset_hyst(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	int nr = (to_sensor_dev_attr(attr))->index;
+	struct lm93_data *data = lm93_update_device(dev);
+	int mode = LM93_TEMP_OFFSET_MODE_FROM_REG(data->sfc2, nr);
+	return sprintf(buf,"%d\n",LM93_TEMP_OFFSET_FROM_REG(
+					data->auto_pwm_min_hyst[nr/2], mode));
+}
+
+static ssize_t store_temp_auto_offset_hyst(struct device *dev,
+						struct device_attribute *attr,
+						const char *buf, size_t count)
+{
+	int nr = (to_sensor_dev_attr(attr))->index;
+	struct i2c_client *client = to_i2c_client(dev);
+	struct lm93_data *data = i2c_get_clientdata(client);
+	u32 val = simple_strtoul(buf, NULL, 10);
+	u8 reg;
+
+	mutex_lock(&data->update_lock);
+	/* force 0.5C/bit mode */
+	data->sfc2 = lm93_read_byte(client, LM93_REG_SFC2);
+	data->sfc2 |= ((nr < 2) ? 0x10 : 0x20);
+	lm93_write_byte(client, LM93_REG_SFC2, data->sfc2);
+	reg = data->auto_pwm_min_hyst[nr/2];
+	reg = (reg & 0xf0) | (LM93_TEMP_OFFSET_TO_REG(val, 1) & 0x0f);
+	data->auto_pwm_min_hyst[nr/2] = reg;
+	lm93_write_byte(client, LM93_REG_PWM_MIN_HYST(nr), reg);
+	mutex_unlock(&data->update_lock);
+	return count;
+}
+
+static SENSOR_DEVICE_ATTR(temp1_auto_offset_hyst, S_IWUSR | S_IRUGO,
+			  show_temp_auto_offset_hyst,
+			  store_temp_auto_offset_hyst, 0);
+static SENSOR_DEVICE_ATTR(temp2_auto_offset_hyst, S_IWUSR | S_IRUGO,
+			  show_temp_auto_offset_hyst,
+			  store_temp_auto_offset_hyst, 1);
+static SENSOR_DEVICE_ATTR(temp3_auto_offset_hyst, S_IWUSR | S_IRUGO,
+			  show_temp_auto_offset_hyst,
+			  store_temp_auto_offset_hyst, 2);
+
+static ssize_t show_fan_input(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct sensor_device_attribute *s_attr = to_sensor_dev_attr(attr);
+	int nr = s_attr->index;
+	struct lm93_data *data = lm93_update_device(dev);
+
+	return sprintf(buf,"%d\n",LM93_FAN_FROM_REG(data->block5[nr]));
+}
+
+static SENSOR_DEVICE_ATTR(fan1_input, S_IRUGO, show_fan_input, NULL, 0);
+static SENSOR_DEVICE_ATTR(fan2_input, S_IRUGO, show_fan_input, NULL, 1);
+static SENSOR_DEVICE_ATTR(fan3_input, S_IRUGO, show_fan_input, NULL, 2);
+static SENSOR_DEVICE_ATTR(fan4_input, S_IRUGO, show_fan_input, NULL, 3);
+
+static ssize_t show_fan_min(struct device *dev,
+			      struct device_attribute *attr, char *buf)
+{
+	int nr = (to_sensor_dev_attr(attr))->index;
+	struct lm93_data *data = lm93_update_device(dev);
+
+	return sprintf(buf,"%d\n",LM93_FAN_FROM_REG(data->block8[nr]));
+}
+
+static ssize_t store_fan_min(struct device *dev, struct device_attribute *attr,
+				const char *buf, size_t count)
+{
+	int nr = (to_sensor_dev_attr(attr))->index;
+	struct i2c_client *client = to_i2c_client(dev);
+	struct lm93_data *data = i2c_get_clientdata(client);
+	u32 val = simple_strtoul(buf, NULL, 10);
+
+	mutex_lock(&data->update_lock);
+	data->block8[nr] = LM93_FAN_TO_REG(val);
+	lm93_write_word(client,LM93_REG_FAN_MIN(nr),data->block8[nr]);
+	mutex_unlock(&data->update_lock);
+	return count;
+}
+
+static SENSOR_DEVICE_ATTR(fan1_min, S_IWUSR | S_IRUGO,
+			  show_fan_min, store_fan_min, 0);
+static SENSOR_DEVICE_ATTR(fan2_min, S_IWUSR | S_IRUGO,
+			  show_fan_min, store_fan_min, 1);
+static SENSOR_DEVICE_ATTR(fan3_min, S_IWUSR | S_IRUGO,
+			  show_fan_min, store_fan_min, 2);
+static SENSOR_DEVICE_ATTR(fan4_min, S_IWUSR | S_IRUGO,
+			  show_fan_min, store_fan_min, 3);
+
+/* some tedious bit-twiddling here to deal with the register format:
+
+	data->sf_tach_to_pwm: (tach to pwm mapping bits)
+
+		bit |  7  |  6  |  5  |  4  |  3  |  2  |  1  |  0
+		     T4:P2 T4:P1 T3:P2 T3:P1 T2:P2 T2:P1 T1:P2 T1:P1
+
+	data->sfc2: (enable bits)
+
+		bit |  3  |  2  |  1  |  0
+		       T4    T3    T2    T1
+*/
+
+static ssize_t show_fan_smart_tach(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	int nr = (to_sensor_dev_attr(attr))->index;
+	struct lm93_data *data = lm93_update_device(dev);
+	long rc = 0;
+	int mapping;
+
+	/* extract the relevant mapping */
+	mapping = (data->sf_tach_to_pwm >> (nr * 2)) & 0x03;
+
+	/* if there's a mapping and it's enabled */
+	if (mapping && ((data->sfc2 >> nr) & 0x01))
+		rc = mapping;
+	return sprintf(buf,"%ld\n",rc);
+}
+
+/* helper function - must grab data->update_lock before calling
+   fan is 0-3, indicating fan1-fan4 */
+static void lm93_write_fan_smart_tach(struct i2c_client *client,
+	struct lm93_data *data, int fan, long value)
+{
+	/* insert the new mapping and write it out */
+	data->sf_tach_to_pwm = lm93_read_byte(client, LM93_REG_SF_TACH_TO_PWM);
+	data->sf_tach_to_pwm &= ~(0x3 << fan * 2);
+	data->sf_tach_to_pwm |= value << fan * 2;
+	lm93_write_byte(client, LM93_REG_SF_TACH_TO_PWM, data->sf_tach_to_pwm);
+
+	/* insert the enable bit and write it out */
+	data->sfc2 = lm93_read_byte(client, LM93_REG_SFC2);
+	if (value)
+		data->sfc2 |= 1 << fan;
+	else
+		data->sfc2 &= ~(1 << fan);
+	lm93_write_byte(client, LM93_REG_SFC2, data->sfc2);
+}
+
+static ssize_t store_fan_smart_tach(struct device *dev,
+					struct device_attribute *attr,
+					const char *buf, size_t count)
+{
+	int nr = (to_sensor_dev_attr(attr))->index;
+	struct i2c_client *client = to_i2c_client(dev);
+	struct lm93_data *data = i2c_get_clientdata(client);
+	u32 val = simple_strtoul(buf, NULL, 10);
+
+	mutex_lock(&data->update_lock);
+	/* sanity test, ignore the write otherwise */
+	if (0 <= val && val <= 2) {
+		/* can't enable if pwm freq is 22.5KHz */
+		if (val) {
+			u8 ctl4 = lm93_read_byte(client,
+				LM93_REG_PWM_CTL(val-1,LM93_PWM_CTL4));
+			if ((ctl4 & 0x07) == 0)
+				val = 0;
+		}
+		lm93_write_fan_smart_tach(client, data, nr, val);
+	}
+	mutex_unlock(&data->update_lock);
+	return count;
+}
+
+static SENSOR_DEVICE_ATTR(fan1_smart_tach, S_IWUSR | S_IRUGO,
+			  show_fan_smart_tach, store_fan_smart_tach, 0);
+static SENSOR_DEVICE_ATTR(fan2_smart_tach, S_IWUSR | S_IRUGO,
+			  show_fan_smart_tach, store_fan_smart_tach, 1);
+static SENSOR_DEVICE_ATTR(fan3_smart_tach, S_IWUSR | S_IRUGO,
+			  show_fan_smart_tach, store_fan_smart_tach, 2);
+static SENSOR_DEVICE_ATTR(fan4_smart_tach, S_IWUSR | S_IRUGO,
+			  show_fan_smart_tach, store_fan_smart_tach, 3);
+
+static ssize_t show_pwm(struct device *dev, struct device_attribute *attr,
+			char *buf)
+{
+	int nr = (to_sensor_dev_attr(attr))->index;
+	struct lm93_data *data = lm93_update_device(dev);
+	u8 ctl2, ctl4;
+	long rc;
+
+	ctl2 = data->block9[nr][LM93_PWM_CTL2];
+	ctl4 = data->block9[nr][LM93_PWM_CTL4];
+	if (ctl2 & 0x01) /* show user commanded value if enabled */
+		rc = data->pwm_override[nr];
+	else /* show present h/w value if manual pwm disabled */
+		rc = LM93_PWM_FROM_REG(ctl2 >> 4, (ctl4 & 0x07) ?
+			LM93_PWM_MAP_LO_FREQ : LM93_PWM_MAP_HI_FREQ);
+	return sprintf(buf,"%ld\n",rc);
+}
+
+static ssize_t store_pwm(struct device *dev, struct device_attribute *attr,
+				const char *buf, size_t count)
+{
+	int nr = (to_sensor_dev_attr(attr))->index;
+	struct i2c_client *client = to_i2c_client(dev);
+	struct lm93_data *data = i2c_get_clientdata(client);
+	u32 val = simple_strtoul(buf, NULL, 10);
+	u8 ctl2, ctl4;
+
+	mutex_lock(&data->update_lock);
+	ctl2 = lm93_read_byte(client,LM93_REG_PWM_CTL(nr,LM93_PWM_CTL2));
+	ctl4 = lm93_read_byte(client, LM93_REG_PWM_CTL(nr,LM93_PWM_CTL4));
+	ctl2 = (ctl2 & 0x0f) | LM93_PWM_TO_REG(val,(ctl4 & 0x07) ?
+			LM93_PWM_MAP_LO_FREQ : LM93_PWM_MAP_HI_FREQ) << 4;
+	/* save user commanded value */
+	data->pwm_override[nr] = LM93_PWM_FROM_REG(ctl2 >> 4,
+			(ctl4 & 0x07) ?  LM93_PWM_MAP_LO_FREQ :
+			LM93_PWM_MAP_HI_FREQ);
+	lm93_write_byte(client,LM93_REG_PWM_CTL(nr,LM93_PWM_CTL2),ctl2);
+	mutex_unlock(&data->update_lock);
+	return count;
+}
+
+static SENSOR_DEVICE_ATTR(pwm1, S_IWUSR | S_IRUGO, show_pwm, store_pwm, 0);
+static SENSOR_DEVICE_ATTR(pwm2, S_IWUSR | S_IRUGO, show_pwm, store_pwm, 1);
+
+static ssize_t show_pwm_enable(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	int nr = (to_sensor_dev_attr(attr))->index;
+	struct lm93_data *data = lm93_update_device(dev);
+	u8 ctl2;
+	long rc;
+
+	ctl2 = data->block9[nr][LM93_PWM_CTL2];
+	if (ctl2 & 0x01) /* manual override enabled ? */
+		rc = ((ctl2 & 0xF0) == 0xF0) ? 0 : 1;
+	else
+		rc = 2;
+	return sprintf(buf,"%ld\n",rc);
+}
+
+static ssize_t store_pwm_enable(struct device *dev,
+				struct device_attribute *attr,
+				const char *buf, size_t count)
+{
+	int nr = (to_sensor_dev_attr(attr))->index;
+	struct i2c_client *client = to_i2c_client(dev);
+	struct lm93_data *data = i2c_get_clientdata(client);
+	u32 val = simple_strtoul(buf, NULL, 10);
+	u8 ctl2;
+
+	mutex_lock(&data->update_lock);
+	ctl2 = lm93_read_byte(client,LM93_REG_PWM_CTL(nr,LM93_PWM_CTL2));
+
+	switch (val) {
+	case 0:
+		ctl2 |= 0xF1; /* enable manual override, set PWM to max */
+		break;
+	case 1: ctl2 |= 0x01; /* enable manual override */
+		break;
+	case 2: ctl2 &= ~0x01; /* disable manual override */
+		break;
+	default:
+		mutex_unlock(&data->update_lock);
+		return -EINVAL;
+	}
+
+	lm93_write_byte(client,LM93_REG_PWM_CTL(nr,LM93_PWM_CTL2),ctl2);
+	mutex_unlock(&data->update_lock);
+	return count;
+}
+
+static SENSOR_DEVICE_ATTR(pwm1_enable, S_IWUSR | S_IRUGO,
+				show_pwm_enable, store_pwm_enable, 0);
+static SENSOR_DEVICE_ATTR(pwm2_enable, S_IWUSR | S_IRUGO,
+				show_pwm_enable, store_pwm_enable, 1);
+
+static ssize_t show_pwm_freq(struct device *dev, struct device_attribute *attr,
+				char *buf)
+{
+	int nr = (to_sensor_dev_attr(attr))->index;
+	struct lm93_data *data = lm93_update_device(dev);
+	u8 ctl4;
+
+	ctl4 = data->block9[nr][LM93_PWM_CTL4];
+	return sprintf(buf,"%d\n",LM93_PWM_FREQ_FROM_REG(ctl4));
+}
+
+/* helper function - must grab data->update_lock before calling
+   pwm is 0-1, indicating pwm1-pwm2
+   this disables smart tach for all tach channels bound to the given pwm */
+static void lm93_disable_fan_smart_tach(struct i2c_client *client,
+	struct lm93_data *data, int pwm)
+{
+	int mapping = lm93_read_byte(client, LM93_REG_SF_TACH_TO_PWM);
+	int mask;
+
+	/* collapse the mapping into a mask of enable bits */
+	mapping = (mapping >> pwm) & 0x55;
+	mask = mapping & 0x01;
+	mask |= (mapping & 0x04) >> 1;
+	mask |= (mapping & 0x10) >> 2;
+	mask |= (mapping & 0x40) >> 3;
+
+	/* disable smart tach according to the mask */
+	data->sfc2 = lm93_read_byte(client, LM93_REG_SFC2);
+	data->sfc2 &= ~mask;
+	lm93_write_byte(client, LM93_REG_SFC2, data->sfc2);
+}
+
+static ssize_t store_pwm_freq(struct device *dev,
+				struct device_attribute *attr,
+				const char *buf, size_t count)
+{
+	int nr = (to_sensor_dev_attr(attr))->index;
+	struct i2c_client *client = to_i2c_client(dev);
+	struct lm93_data *data = i2c_get_clientdata(client);
+	u32 val = simple_strtoul(buf, NULL, 10);
+	u8 ctl4;
+
+	mutex_lock(&data->update_lock);
+	ctl4 = lm93_read_byte(client,LM93_REG_PWM_CTL(nr,LM93_PWM_CTL4));
+	ctl4 = (ctl4 & 0xf8) | LM93_PWM_FREQ_TO_REG(val);
+	data->block9[nr][LM93_PWM_CTL4] = ctl4;
+	/* ctl4 == 0 -> 22.5KHz -> disable smart tach */
+	if (!ctl4)
+		lm93_disable_fan_smart_tach(client, data, nr);
+	lm93_write_byte(client,	LM93_REG_PWM_CTL(nr,LM93_PWM_CTL4), ctl4);
+	mutex_unlock(&data->update_lock);
+	return count;
+}
+
+static SENSOR_DEVICE_ATTR(pwm1_freq, S_IWUSR | S_IRUGO,
+			  show_pwm_freq, store_pwm_freq, 0);
+static SENSOR_DEVICE_ATTR(pwm2_freq, S_IWUSR | S_IRUGO,
+			  show_pwm_freq, store_pwm_freq, 1);
+
+static ssize_t show_pwm_auto_channels(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	int nr = (to_sensor_dev_attr(attr))->index;
+	struct lm93_data *data = lm93_update_device(dev);
+	return sprintf(buf,"%d\n",data->block9[nr][LM93_PWM_CTL1]);
+}
+
+static ssize_t store_pwm_auto_channels(struct device *dev,
+					struct device_attribute *attr,
+					const char *buf, size_t count)
+{
+	int nr = (to_sensor_dev_attr(attr))->index;
+	struct i2c_client *client = to_i2c_client(dev);
+	struct lm93_data *data = i2c_get_clientdata(client);
+	u32 val = simple_strtoul(buf, NULL, 10);
+
+	mutex_lock(&data->update_lock);
+	data->block9[nr][LM93_PWM_CTL1] = SENSORS_LIMIT(val, 0, 255);
+	lm93_write_byte(client,	LM93_REG_PWM_CTL(nr,LM93_PWM_CTL1),
+				data->block9[nr][LM93_PWM_CTL1]);
+	mutex_unlock(&data->update_lock);
+	return count;
+}
+
+static SENSOR_DEVICE_ATTR(pwm1_auto_channels, S_IWUSR | S_IRUGO,
+			  show_pwm_auto_channels, store_pwm_auto_channels, 0);
+static SENSOR_DEVICE_ATTR(pwm2_auto_channels, S_IWUSR | S_IRUGO,
+			  show_pwm_auto_channels, store_pwm_auto_channels, 1);
+
+static ssize_t show_pwm_auto_spinup_min(struct device *dev,
+				struct device_attribute *attr,char *buf)
+{
+	int nr = (to_sensor_dev_attr(attr))->index;
+	struct lm93_data *data = lm93_update_device(dev);
+	u8 ctl3, ctl4;
+
+	ctl3 = data->block9[nr][LM93_PWM_CTL3];
+	ctl4 = data->block9[nr][LM93_PWM_CTL4];
+	return sprintf(buf,"%d\n",
+		       LM93_PWM_FROM_REG(ctl3 & 0x0f, (ctl4 & 0x07) ?
+			LM93_PWM_MAP_LO_FREQ : LM93_PWM_MAP_HI_FREQ));
+}
+
+static ssize_t store_pwm_auto_spinup_min(struct device *dev,
+						struct device_attribute *attr,
+						const char *buf, size_t count)
+{
+	int nr = (to_sensor_dev_attr(attr))->index;
+	struct i2c_client *client = to_i2c_client(dev);
+	struct lm93_data *data = i2c_get_clientdata(client);
+	u32 val = simple_strtoul(buf, NULL, 10);
+	u8 ctl3, ctl4;
+
+	mutex_lock(&data->update_lock);
+	ctl3 = lm93_read_byte(client,LM93_REG_PWM_CTL(nr, LM93_PWM_CTL3));
+	ctl4 = lm93_read_byte(client,LM93_REG_PWM_CTL(nr, LM93_PWM_CTL4));
+	ctl3 = (ctl3 & 0xf0) | 	LM93_PWM_TO_REG(val, (ctl4 & 0x07) ?
+			LM93_PWM_MAP_LO_FREQ :
+			LM93_PWM_MAP_HI_FREQ);
+	data->block9[nr][LM93_PWM_CTL3] = ctl3;
+	lm93_write_byte(client,LM93_REG_PWM_CTL(nr, LM93_PWM_CTL3), ctl3);
+	mutex_unlock(&data->update_lock);
+	return count;
+}
+
+static SENSOR_DEVICE_ATTR(pwm1_auto_spinup_min, S_IWUSR | S_IRUGO,
+			  show_pwm_auto_spinup_min,
+			  store_pwm_auto_spinup_min, 0);
+static SENSOR_DEVICE_ATTR(pwm2_auto_spinup_min, S_IWUSR | S_IRUGO,
+			  show_pwm_auto_spinup_min,
+			  store_pwm_auto_spinup_min, 1);
+
+static ssize_t show_pwm_auto_spinup_time(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	int nr = (to_sensor_dev_attr(attr))->index;
+	struct lm93_data *data = lm93_update_device(dev);
+	return sprintf(buf,"%d\n",LM93_SPINUP_TIME_FROM_REG(
+				data->block9[nr][LM93_PWM_CTL3]));
+}
+
+static ssize_t store_pwm_auto_spinup_time(struct device *dev,
+						struct device_attribute *attr,
+						const char *buf, size_t count)
+{
+	int nr = (to_sensor_dev_attr(attr))->index;
+	struct i2c_client *client = to_i2c_client(dev);
+	struct lm93_data *data = i2c_get_clientdata(client);
+	u32 val = simple_strtoul(buf, NULL, 10);
+	u8 ctl3;
+
+	mutex_lock(&data->update_lock);
+	ctl3 = lm93_read_byte(client,LM93_REG_PWM_CTL(nr, LM93_PWM_CTL3));
+	ctl3 = (ctl3 & 0x1f) | (LM93_SPINUP_TIME_TO_REG(val) << 5 & 0xe0);
+	data->block9[nr][LM93_PWM_CTL3] = ctl3;
+	lm93_write_byte(client,LM93_REG_PWM_CTL(nr, LM93_PWM_CTL3), ctl3);
+	mutex_unlock(&data->update_lock);
+	return count;
+}
+
+static SENSOR_DEVICE_ATTR(pwm1_auto_spinup_time, S_IWUSR | S_IRUGO,
+			  show_pwm_auto_spinup_time,
+			  store_pwm_auto_spinup_time, 0);
+static SENSOR_DEVICE_ATTR(pwm2_auto_spinup_time, S_IWUSR | S_IRUGO,
+			  show_pwm_auto_spinup_time,
+			  store_pwm_auto_spinup_time, 1);
+
+static ssize_t show_pwm_auto_prochot_ramp(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	struct lm93_data *data = lm93_update_device(dev);
+	return sprintf(buf,"%d\n",
+		       LM93_RAMP_FROM_REG(data->pwm_ramp_ctl >> 4 & 0x0f));
+}
+
+static ssize_t store_pwm_auto_prochot_ramp(struct device *dev,
+						struct device_attribute *attr,
+						const char *buf, size_t count)
+{
+	struct i2c_client *client = to_i2c_client(dev);
+	struct lm93_data *data = i2c_get_clientdata(client);
+	u32 val = simple_strtoul(buf, NULL, 10);
+	u8 ramp;
+
+	mutex_lock(&data->update_lock);
+	ramp = lm93_read_byte(client, LM93_REG_PWM_RAMP_CTL);
+	ramp = (ramp & 0x0f) | (LM93_RAMP_TO_REG(val) << 4 & 0xf0);
+	lm93_write_byte(client, LM93_REG_PWM_RAMP_CTL, ramp);
+	mutex_unlock(&data->update_lock);
+	return count;
+}
+
+static DEVICE_ATTR(pwm_auto_prochot_ramp, S_IRUGO | S_IWUSR,
+			show_pwm_auto_prochot_ramp,
+			store_pwm_auto_prochot_ramp);
+
+static ssize_t show_pwm_auto_vrdhot_ramp(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	struct lm93_data *data = lm93_update_device(dev);
+	return sprintf(buf,"%d\n",
+		       LM93_RAMP_FROM_REG(data->pwm_ramp_ctl & 0x0f));
+}
+
+static ssize_t store_pwm_auto_vrdhot_ramp(struct device *dev,
+						struct device_attribute *attr,
+						const char *buf, size_t count)
+{
+	struct i2c_client *client = to_i2c_client(dev);
+	struct lm93_data *data = i2c_get_clientdata(client);
+	u32 val = simple_strtoul(buf, NULL, 10);
+	u8 ramp;
+
+	mutex_lock(&data->update_lock);
+	ramp = lm93_read_byte(client, LM93_REG_PWM_RAMP_CTL);
+	ramp = (ramp & 0xf0) | (LM93_RAMP_TO_REG(val) & 0x0f);
+	lm93_write_byte(client, LM93_REG_PWM_RAMP_CTL, ramp);
+	mutex_unlock(&data->update_lock);
+	return 0;
+}
+
+static DEVICE_ATTR(pwm_auto_vrdhot_ramp, S_IRUGO | S_IWUSR,
+			show_pwm_auto_vrdhot_ramp,
+			store_pwm_auto_vrdhot_ramp);
+
+static ssize_t show_vid(struct device *dev, struct device_attribute *attr,
+			char *buf)
+{
+	int nr = (to_sensor_dev_attr(attr))->index;
+	struct lm93_data *data = lm93_update_device(dev);
+	return sprintf(buf,"%d\n",LM93_VID_FROM_REG(data->vid[nr]));
+}
+
+static SENSOR_DEVICE_ATTR(vid1, S_IRUGO, show_vid, NULL, 0);
+static SENSOR_DEVICE_ATTR(vid2, S_IRUGO, show_vid, NULL, 1);
+
+static ssize_t show_prochot(struct device *dev, struct device_attribute *attr,
+				char *buf)
+{
+	int nr = (to_sensor_dev_attr(attr))->index;
+	struct lm93_data *data = lm93_update_device(dev);
+	return sprintf(buf,"%d\n",data->block4[nr].cur);
+}
+
+static SENSOR_DEVICE_ATTR(prochot1, S_IRUGO, show_prochot, NULL, 0);
+static SENSOR_DEVICE_ATTR(prochot2, S_IRUGO, show_prochot, NULL, 1);
+
+static ssize_t show_prochot_avg(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	int nr = (to_sensor_dev_attr(attr))->index;
+	struct lm93_data *data = lm93_update_device(dev);
+	return sprintf(buf,"%d\n",data->block4[nr].avg);
+}
+
+static SENSOR_DEVICE_ATTR(prochot1_avg, S_IRUGO, show_prochot_avg, NULL, 0);
+static SENSOR_DEVICE_ATTR(prochot2_avg, S_IRUGO, show_prochot_avg, NULL, 1);
+
+static ssize_t show_prochot_max(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	int nr = (to_sensor_dev_attr(attr))->index;
+	struct lm93_data *data = lm93_update_device(dev);
+	return sprintf(buf,"%d\n",data->prochot_max[nr]);
+}
+
+static ssize_t store_prochot_max(struct device *dev,
+					struct device_attribute *attr,
+					const char *buf, size_t count)
+{
+	int nr = (to_sensor_dev_attr(attr))->index;
+	struct i2c_client *client = to_i2c_client(dev);
+	struct lm93_data *data = i2c_get_clientdata(client);
+	u32 val = simple_strtoul(buf, NULL, 10);
+
+	mutex_lock(&data->update_lock);
+	data->prochot_max[nr] = LM93_PROCHOT_TO_REG(val);
+	lm93_write_byte(client, LM93_REG_PROCHOT_MAX(nr),
+			data->prochot_max[nr]);
+	mutex_unlock(&data->update_lock);
+	return count;
+}
+
+static SENSOR_DEVICE_ATTR(prochot1_max, S_IWUSR | S_IRUGO,
+			  show_prochot_max, store_prochot_max, 0);
+static SENSOR_DEVICE_ATTR(prochot2_max, S_IWUSR | S_IRUGO,
+			  show_prochot_max, store_prochot_max, 1);
+
+static const u8 prochot_override_mask[] = { 0x80, 0x40 };
+
+static ssize_t show_prochot_override(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	int nr = (to_sensor_dev_attr(attr))->index;
+	struct lm93_data *data = lm93_update_device(dev);
+	return sprintf(buf,"%d\n",
+		(data->prochot_override & prochot_override_mask[nr]) ? 1 : 0);
+}
+
+static ssize_t store_prochot_override(struct device *dev,
+					struct device_attribute *attr,
+					const char *buf, size_t count)
+{
+	int nr = (to_sensor_dev_attr(attr))->index;
+	struct i2c_client *client = to_i2c_client(dev);
+	struct lm93_data *data = i2c_get_clientdata(client);
+	u32 val = simple_strtoul(buf, NULL, 10);
+
+	mutex_lock(&data->update_lock);
+	if (val)
+		data->prochot_override |= prochot_override_mask[nr];
+	else
+		data->prochot_override &= (~prochot_override_mask[nr]);
+	lm93_write_byte(client, LM93_REG_PROCHOT_OVERRIDE,
+			data->prochot_override);
+	mutex_unlock(&data->update_lock);
+	return count;
+}
+
+static SENSOR_DEVICE_ATTR(prochot1_override, S_IWUSR | S_IRUGO,
+			  show_prochot_override, store_prochot_override, 0);
+static SENSOR_DEVICE_ATTR(prochot2_override, S_IWUSR | S_IRUGO,
+			  show_prochot_override, store_prochot_override, 1);
+
+static ssize_t show_prochot_interval(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	int nr = (to_sensor_dev_attr(attr))->index;
+	struct lm93_data *data = lm93_update_device(dev);
+	u8 tmp;
+	if (nr==1)
+		tmp = (data->prochot_interval & 0xf0) >> 4;
+	else
+		tmp = data->prochot_interval & 0x0f;
+	return sprintf(buf,"%d\n",LM93_INTERVAL_FROM_REG(tmp));
+}
+
+static ssize_t store_prochot_interval(struct device *dev,
+					struct device_attribute *attr,
+					const char *buf, size_t count)
+{
+	int nr = (to_sensor_dev_attr(attr))->index;
+	struct i2c_client *client = to_i2c_client(dev);
+	struct lm93_data *data = i2c_get_clientdata(client);
+	u32 val = simple_strtoul(buf, NULL, 10);
+	u8 tmp;
+
+	mutex_lock(&data->update_lock);
+	tmp = lm93_read_byte(client, LM93_REG_PROCHOT_INTERVAL);
+	if (nr==1)
+		tmp = (tmp & 0x0f) | (LM93_INTERVAL_TO_REG(val) << 4);
+	else
+		tmp = (tmp & 0xf0) | LM93_INTERVAL_TO_REG(val);
+	data->prochot_interval = tmp;
+	lm93_write_byte(client, LM93_REG_PROCHOT_INTERVAL, tmp);
+	mutex_unlock(&data->update_lock);
+	return count;
+}
+
+static SENSOR_DEVICE_ATTR(prochot1_interval, S_IWUSR | S_IRUGO,
+			  show_prochot_interval, store_prochot_interval, 0);
+static SENSOR_DEVICE_ATTR(prochot2_interval, S_IWUSR | S_IRUGO,
+			  show_prochot_interval, store_prochot_interval, 1);
+
+static ssize_t show_prochot_override_duty_cycle(struct device *dev,
+						struct device_attribute *attr,
+						char *buf)
+{
+	struct lm93_data *data = lm93_update_device(dev);
+	return sprintf(buf,"%d\n",data->prochot_override & 0x0f);
+}
+
+static ssize_t store_prochot_override_duty_cycle(struct device *dev,
+						struct device_attribute *attr,
+						const char *buf, size_t count)
+{
+	struct i2c_client *client = to_i2c_client(dev);
+	struct lm93_data *data = i2c_get_clientdata(client);
+	u32 val = simple_strtoul(buf, NULL, 10);
+
+	mutex_lock(&data->update_lock);
+	data->prochot_override = (data->prochot_override & 0xf0) |
+					SENSORS_LIMIT(val, 0, 15);
+	lm93_write_byte(client, LM93_REG_PROCHOT_OVERRIDE,
+			data->prochot_override);
+	mutex_unlock(&data->update_lock);
+	return count;
+}
+
+static DEVICE_ATTR(prochot_override_duty_cycle, S_IRUGO | S_IWUSR,
+			show_prochot_override_duty_cycle,
+			store_prochot_override_duty_cycle);
+
+static ssize_t show_prochot_short(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	struct lm93_data *data = lm93_update_device(dev);
+	return sprintf(buf,"%d\n",(data->config & 0x10) ? 1 : 0);
+}
+
+static ssize_t store_prochot_short(struct device *dev,
+					struct device_attribute *attr,
+					const char *buf, size_t count)
+{
+	struct i2c_client *client = to_i2c_client(dev);
+	struct lm93_data *data = i2c_get_clientdata(client);
+	u32 val = simple_strtoul(buf, NULL, 10);
+
+	mutex_lock(&data->update_lock);
+	if (val)
+		data->config |= 0x10;
+	else
+		data->config &= ~0x10;
+	lm93_write_byte(client, LM93_REG_CONFIG, data->config);
+	mutex_unlock(&data->update_lock);
+	return count;
+}
+
+static DEVICE_ATTR(prochot_short, S_IRUGO | S_IWUSR,
+		   show_prochot_short, store_prochot_short);
+
+static ssize_t show_vrdhot(struct device *dev, struct device_attribute *attr,
+				char *buf)
+{
+	int nr = (to_sensor_dev_attr(attr))->index;
+	struct lm93_data *data = lm93_update_device(dev);
+	return sprintf(buf,"%d\n",
+		       data->block1.host_status_1 & (1 << (nr+4)) ? 1 : 0);
+}
+
+static SENSOR_DEVICE_ATTR(vrdhot1, S_IRUGO, show_vrdhot, NULL, 0);
+static SENSOR_DEVICE_ATTR(vrdhot2, S_IRUGO, show_vrdhot, NULL, 1);
+
+static ssize_t show_gpio(struct device *dev, struct device_attribute *attr,
+				char *buf)
+{
+	struct lm93_data *data = lm93_update_device(dev);
+	return sprintf(buf,"%d\n",LM93_GPI_FROM_REG(data->gpi));
+}
+
+static DEVICE_ATTR(gpio, S_IRUGO, show_gpio, NULL);
+
+static ssize_t show_alarms(struct device *dev, struct device_attribute *attr,
+				char *buf)
+{
+	struct lm93_data *data = lm93_update_device(dev);
+	return sprintf(buf,"%d\n",LM93_ALARMS_FROM_REG(data->block1));
+}
+
+static DEVICE_ATTR(alarms, S_IRUGO, show_alarms, NULL);
+
+static struct attribute *lm93_attrs[] = {
+	&sensor_dev_attr_in1_input.dev_attr.attr,
+	&sensor_dev_attr_in2_input.dev_attr.attr,
+	&sensor_dev_attr_in3_input.dev_attr.attr,
+	&sensor_dev_attr_in4_input.dev_attr.attr,
+	&sensor_dev_attr_in5_input.dev_attr.attr,
+	&sensor_dev_attr_in6_input.dev_attr.attr,
+	&sensor_dev_attr_in7_input.dev_attr.attr,
+	&sensor_dev_attr_in8_input.dev_attr.attr,
+	&sensor_dev_attr_in9_input.dev_attr.attr,
+	&sensor_dev_attr_in10_input.dev_attr.attr,
+	&sensor_dev_attr_in11_input.dev_attr.attr,
+	&sensor_dev_attr_in12_input.dev_attr.attr,
+	&sensor_dev_attr_in13_input.dev_attr.attr,
+	&sensor_dev_attr_in14_input.dev_attr.attr,
+	&sensor_dev_attr_in15_input.dev_attr.attr,
+	&sensor_dev_attr_in16_input.dev_attr.attr,
+	&sensor_dev_attr_in1_min.dev_attr.attr,
+	&sensor_dev_attr_in2_min.dev_attr.attr,
+	&sensor_dev_attr_in3_min.dev_attr.attr,
+	&sensor_dev_attr_in4_min.dev_attr.attr,
+	&sensor_dev_attr_in5_min.dev_attr.attr,
+	&sensor_dev_attr_in6_min.dev_attr.attr,
+	&sensor_dev_attr_in7_min.dev_attr.attr,
+	&sensor_dev_attr_in8_min.dev_attr.attr,
+	&sensor_dev_attr_in9_min.dev_attr.attr,
+	&sensor_dev_attr_in10_min.dev_attr.attr,
+	&sensor_dev_attr_in11_min.dev_attr.attr,
+	&sensor_dev_attr_in12_min.dev_attr.attr,
+	&sensor_dev_attr_in13_min.dev_attr.attr,
+	&sensor_dev_attr_in14_min.dev_attr.attr,
+	&sensor_dev_attr_in15_min.dev_attr.attr,
+	&sensor_dev_attr_in16_min.dev_attr.attr,
+	&sensor_dev_attr_in1_max.dev_attr.attr,
+	&sensor_dev_attr_in2_max.dev_attr.attr,
+	&sensor_dev_attr_in3_max.dev_attr.attr,
+	&sensor_dev_attr_in4_max.dev_attr.attr,
+	&sensor_dev_attr_in5_max.dev_attr.attr,
+	&sensor_dev_attr_in6_max.dev_attr.attr,
+	&sensor_dev_attr_in7_max.dev_attr.attr,
+	&sensor_dev_attr_in8_max.dev_attr.attr,
+	&sensor_dev_attr_in9_max.dev_attr.attr,
+	&sensor_dev_attr_in10_max.dev_attr.attr,
+	&sensor_dev_attr_in11_max.dev_attr.attr,
+	&sensor_dev_attr_in12_max.dev_attr.attr,
+	&sensor_dev_attr_in13_max.dev_attr.attr,
+	&sensor_dev_attr_in14_max.dev_attr.attr,
+	&sensor_dev_attr_in15_max.dev_attr.attr,
+	&sensor_dev_attr_in16_max.dev_attr.attr,
+	&sensor_dev_attr_temp1_input.dev_attr.attr,
+	&sensor_dev_attr_temp2_input.dev_attr.attr,
+	&sensor_dev_attr_temp3_input.dev_attr.attr,
+	&sensor_dev_attr_temp1_min.dev_attr.attr,
+	&sensor_dev_attr_temp2_min.dev_attr.attr,
+	&sensor_dev_attr_temp3_min.dev_attr.attr,
+	&sensor_dev_attr_temp1_max.dev_attr.attr,
+	&sensor_dev_attr_temp2_max.dev_attr.attr,
+	&sensor_dev_attr_temp3_max.dev_attr.attr,
+	&sensor_dev_attr_temp1_auto_base.dev_attr.attr,
+	&sensor_dev_attr_temp2_auto_base.dev_attr.attr,
+	&sensor_dev_attr_temp3_auto_base.dev_attr.attr,
+	&sensor_dev_attr_temp1_auto_boost.dev_attr.attr,
+	&sensor_dev_attr_temp2_auto_boost.dev_attr.attr,
+	&sensor_dev_attr_temp3_auto_boost.dev_attr.attr,
+	&sensor_dev_attr_temp1_auto_boost_hyst.dev_attr.attr,
+	&sensor_dev_attr_temp2_auto_boost_hyst.dev_attr.attr,
+	&sensor_dev_attr_temp3_auto_boost_hyst.dev_attr.attr,
+	&sensor_dev_attr_temp1_auto_offset1.dev_attr.attr,
+	&sensor_dev_attr_temp1_auto_offset2.dev_attr.attr,
+	&sensor_dev_attr_temp1_auto_offset3.dev_attr.attr,
+	&sensor_dev_attr_temp1_auto_offset4.dev_attr.attr,
+	&sensor_dev_attr_temp1_auto_offset5.dev_attr.attr,
+	&sensor_dev_attr_temp1_auto_offset6.dev_attr.attr,
+	&sensor_dev_attr_temp1_auto_offset7.dev_attr.attr,
+	&sensor_dev_attr_temp1_auto_offset8.dev_attr.attr,
+	&sensor_dev_attr_temp1_auto_offset9.dev_attr.attr,
+	&sensor_dev_attr_temp1_auto_offset10.dev_attr.attr,
+	&sensor_dev_attr_temp1_auto_offset11.dev_attr.attr,
+	&sensor_dev_attr_temp1_auto_offset12.dev_attr.attr,
+	&sensor_dev_attr_temp2_auto_offset1.dev_attr.attr,
+	&sensor_dev_attr_temp2_auto_offset2.dev_attr.attr,
+	&sensor_dev_attr_temp2_auto_offset3.dev_attr.attr,
+	&sensor_dev_attr_temp2_auto_offset4.dev_attr.attr,
+	&sensor_dev_attr_temp2_auto_offset5.dev_attr.attr,
+	&sensor_dev_attr_temp2_auto_offset6.dev_attr.attr,
+	&sensor_dev_attr_temp2_auto_offset7.dev_attr.attr,
+	&sensor_dev_attr_temp2_auto_offset8.dev_attr.attr,
+	&sensor_dev_attr_temp2_auto_offset9.dev_attr.attr,
+	&sensor_dev_attr_temp2_auto_offset10.dev_attr.attr,
+	&sensor_dev_attr_temp2_auto_offset11.dev_attr.attr,
+	&sensor_dev_attr_temp2_auto_offset12.dev_attr.attr,
+	&sensor_dev_attr_temp3_auto_offset1.dev_attr.attr,
+	&sensor_dev_attr_temp3_auto_offset2.dev_attr.attr,
+	&sensor_dev_attr_temp3_auto_offset3.dev_attr.attr,
+	&sensor_dev_attr_temp3_auto_offset4.dev_attr.attr,
+	&sensor_dev_attr_temp3_auto_offset5.dev_attr.attr,
+	&sensor_dev_attr_temp3_auto_offset6.dev_attr.attr,
+	&sensor_dev_attr_temp3_auto_offset7.dev_attr.attr,
+	&sensor_dev_attr_temp3_auto_offset8.dev_attr.attr,
+	&sensor_dev_attr_temp3_auto_offset9.dev_attr.attr,
+	&sensor_dev_attr_temp3_auto_offset10.dev_attr.attr,
+	&sensor_dev_attr_temp3_auto_offset11.dev_attr.attr,
+	&sensor_dev_attr_temp3_auto_offset12.dev_attr.attr,
+	&sensor_dev_attr_temp1_auto_pwm_min.dev_attr.attr,
+	&sensor_dev_attr_temp2_auto_pwm_min.dev_attr.attr,
+	&sensor_dev_attr_temp3_auto_pwm_min.dev_attr.attr,
+	&sensor_dev_attr_temp1_auto_offset_hyst.dev_attr.attr,
+	&sensor_dev_attr_temp2_auto_offset_hyst.dev_attr.attr,
+	&sensor_dev_attr_temp3_auto_offset_hyst.dev_attr.attr,
+	&sensor_dev_attr_fan1_input.dev_attr.attr,
+	&sensor_dev_attr_fan2_input.dev_attr.attr,
+	&sensor_dev_attr_fan3_input.dev_attr.attr,
+	&sensor_dev_attr_fan4_input.dev_attr.attr,
+	&sensor_dev_attr_fan1_min.dev_attr.attr,
+	&sensor_dev_attr_fan2_min.dev_attr.attr,
+	&sensor_dev_attr_fan3_min.dev_attr.attr,
+	&sensor_dev_attr_fan4_min.dev_attr.attr,
+	&sensor_dev_attr_fan1_smart_tach.dev_attr.attr,
+	&sensor_dev_attr_fan2_smart_tach.dev_attr.attr,
+	&sensor_dev_attr_fan3_smart_tach.dev_attr.attr,
+	&sensor_dev_attr_fan4_smart_tach.dev_attr.attr,
+	&sensor_dev_attr_pwm1.dev_attr.attr,
+	&sensor_dev_attr_pwm2.dev_attr.attr,
+	&sensor_dev_attr_pwm1_enable.dev_attr.attr,
+	&sensor_dev_attr_pwm2_enable.dev_attr.attr,
+	&sensor_dev_attr_pwm1_freq.dev_attr.attr,
+	&sensor_dev_attr_pwm2_freq.dev_attr.attr,
+	&sensor_dev_attr_pwm1_auto_channels.dev_attr.attr,
+	&sensor_dev_attr_pwm2_auto_channels.dev_attr.attr,
+	&sensor_dev_attr_pwm1_auto_spinup_min.dev_attr.attr,
+	&sensor_dev_attr_pwm2_auto_spinup_min.dev_attr.attr,
+	&sensor_dev_attr_pwm1_auto_spinup_time.dev_attr.attr,
+	&sensor_dev_attr_pwm2_auto_spinup_time.dev_attr.attr,
+	&dev_attr_pwm_auto_prochot_ramp.attr,
+	&dev_attr_pwm_auto_vrdhot_ramp.attr,
+	&sensor_dev_attr_vid1.dev_attr.attr,
+	&sensor_dev_attr_vid2.dev_attr.attr,
+	&sensor_dev_attr_prochot1.dev_attr.attr,
+	&sensor_dev_attr_prochot2.dev_attr.attr,
+	&sensor_dev_attr_prochot1_avg.dev_attr.attr,
+	&sensor_dev_attr_prochot2_avg.dev_attr.attr,
+	&sensor_dev_attr_prochot1_max.dev_attr.attr,
+	&sensor_dev_attr_prochot2_max.dev_attr.attr,
+	&sensor_dev_attr_prochot1_override.dev_attr.attr,
+	&sensor_dev_attr_prochot2_override.dev_attr.attr,
+	&sensor_dev_attr_prochot1_interval.dev_attr.attr,
+	&sensor_dev_attr_prochot2_interval.dev_attr.attr,
+	&dev_attr_prochot_override_duty_cycle.attr,
+	&dev_attr_prochot_short.attr,
+	&sensor_dev_attr_vrdhot1.dev_attr.attr,
+	&sensor_dev_attr_vrdhot2.dev_attr.attr,
+	&dev_attr_gpio.attr,
+	&dev_attr_alarms.attr,
+	NULL
+};
+
+static struct attribute_group lm93_attr_grp = {
+	.attrs = lm93_attrs,
+};
+
+static void lm93_init_client(struct i2c_client *client)
+{
+	int i;
+	u8 reg;
+
+	/* configure VID pin input thresholds */
+	reg = lm93_read_byte(client, LM93_REG_GPI_VID_CTL);
+	lm93_write_byte(client, LM93_REG_GPI_VID_CTL,
+			reg | (vid_agtl ? 0x03 : 0x00));
+
+	if (init) {
+		/* enable #ALERT pin */
+		reg = lm93_read_byte(client, LM93_REG_CONFIG);
+		lm93_write_byte(client, LM93_REG_CONFIG, reg | 0x08);
+
+		/* enable ASF mode for BMC status registers */
+		reg = lm93_read_byte(client, LM93_REG_STATUS_CONTROL);
+		lm93_write_byte(client, LM93_REG_STATUS_CONTROL, reg | 0x02);
+
+		/* set sleep state to S0 */
+		lm93_write_byte(client, LM93_REG_SLEEP_CONTROL, 0);
+
+		/* unmask #VRDHOT and dynamic VCCP (if nec) error events */
+		reg = lm93_read_byte(client, LM93_REG_MISC_ERR_MASK);
+		reg &= ~0x03;
+		reg &= ~(vccp_limit_type[0] ? 0x10 : 0);
+		reg &= ~(vccp_limit_type[1] ? 0x20 : 0);
+		lm93_write_byte(client, LM93_REG_MISC_ERR_MASK, reg);
+	}
+
+	/* start monitoring */
+	reg = lm93_read_byte(client, LM93_REG_CONFIG);
+	lm93_write_byte(client, LM93_REG_CONFIG, reg | 0x01);
+
+	/* spin until ready */
+	for (i=0; i<20; i++) {
+		msleep(10);
+		if ((lm93_read_byte(client, LM93_REG_CONFIG) & 0x80) == 0x80)
+			return;
+	}
+
+	dev_warn(&client->dev,"timed out waiting for sensor "
+		 "chip to signal ready!\n");
+}
+
+static int lm93_detect(struct i2c_adapter *adapter, int address, int kind)
+{
+	struct lm93_data *data;
+	struct i2c_client *client;
+
+	int err = -ENODEV, func;
+	void (*update)(struct lm93_data *, struct i2c_client *);
+
+	/* choose update routine based on bus capabilities */
+	func = i2c_get_functionality(adapter);
+	if ( ((LM93_SMBUS_FUNC_FULL & func) == LM93_SMBUS_FUNC_FULL) &&
+			(!disable_block) ) {
+		dev_dbg(&adapter->dev,"using SMBus block data transactions\n");
+		update = lm93_update_client_full;
+	} else if ((LM93_SMBUS_FUNC_MIN & func) == LM93_SMBUS_FUNC_MIN) {
+		dev_dbg(&adapter->dev,"disabled SMBus block data "
+			"transactions\n");
+		update = lm93_update_client_min;
+	} else {
+		dev_dbg(&adapter->dev,"detect failed, "
+			"smbus byte and/or word data not supported!\n");
+		goto err_out;
+	}
+
+	/* OK. For now, we presume we have a valid client. We now create the
+	   client structure, even though we cannot fill it completely yet.
+	   But it allows us to access lm78_{read,write}_value. */
+
+	if ( !(data = kzalloc(sizeof(struct lm93_data), GFP_KERNEL))) {
+		dev_dbg(&adapter->dev,"out of memory!\n");
+		err = -ENOMEM;
+		goto err_out;
+	}
+
+	client = &data->client;
+	i2c_set_clientdata(client, data);
+	client->addr = address;
+	client->adapter = adapter;
+	client->driver = &lm93_driver;
+
+	/* detection */
+	if (kind < 0) {
+		int mfr = lm93_read_byte(client, LM93_REG_MFR_ID);
+
+		if (mfr != 0x01) {
+			dev_dbg(&adapter->dev,"detect failed, "
+				"bad manufacturer id 0x%02x!\n", mfr);
+			goto err_free;
+		}
+	}
+
+	if (kind <= 0) {
+		int ver = lm93_read_byte(client, LM93_REG_VER);
+
+		if ((ver == LM93_MFR_ID) || (ver == LM93_MFR_ID_PROTOTYPE)) {
+			kind = lm93;
+		} else {
+			dev_dbg(&adapter->dev,"detect failed, "
+				"bad version id 0x%02x!\n", ver);
+			if (kind == 0)
+				dev_dbg(&adapter->dev,
+					"(ignored 'force' parameter)\n");
+			goto err_free;
+		}
+	}
+
+	/* fill in remaining client fields */
+	strlcpy(client->name, "lm93", I2C_NAME_SIZE);
+	dev_dbg(&adapter->dev,"loading %s at %d,0x%02x\n",
+		client->name, i2c_adapter_id(client->adapter),
+		client->addr);
+
+	/* housekeeping */
+	data->valid = 0;
+	data->update = update;
+	mutex_init(&data->update_lock);
+
+	/* tell the I2C layer a new client has arrived */
+	if ((err = i2c_attach_client(client)))
+		goto err_free;
+
+	/* initialize the chip */
+	lm93_init_client(client);
+
+	err = sysfs_create_group(&client->dev.kobj, &lm93_attr_grp);
+	if (err)
+		goto err_detach;
+
+	/* Register hwmon driver class */
+	data->class_dev = hwmon_device_register(&client->dev);
+	if ( !IS_ERR(data->class_dev))
+		return 0;
+
+	err = PTR_ERR(data->class_dev);
+	dev_err(&client->dev, "error registering hwmon device.\n");
+	sysfs_remove_group(&client->dev.kobj, &lm93_attr_grp);
+err_detach:
+	i2c_detach_client(client);
+err_free:
+	kfree(data);
+err_out:
+	return err;
+}
+
+/* This function is called when:
+     * lm93_driver is inserted (when this module is loaded), for each
+       available adapter
+     * when a new adapter is inserted (and lm93_driver is still present) */
+static int lm93_attach_adapter(struct i2c_adapter *adapter)
+{
+	return i2c_probe(adapter, &addr_data, lm93_detect);
+}
+
+static int lm93_detach_client(struct i2c_client *client)
+{
+	struct lm93_data *data = i2c_get_clientdata(client);
+	int err = 0;
+
+	hwmon_device_unregister(data->class_dev);
+	sysfs_remove_group(&client->dev.kobj, &lm93_attr_grp);
+
+	err = i2c_detach_client(client);
+	if (!err)
+		kfree(data);
+	return err;
+}
+
+static struct i2c_driver lm93_driver = {
+	.driver = {
+		.name	= "lm93",
+	},
+	.attach_adapter	= lm93_attach_adapter,
+	.detach_client	= lm93_detach_client,
+};
+
+static int __init lm93_init(void)
+{
+	return i2c_add_driver(&lm93_driver);
+}
+
+static void __exit lm93_exit(void)
+{
+	i2c_del_driver(&lm93_driver);
+}
+
+MODULE_AUTHOR("Mark M. Hoffman <mhoffman@lightlink.com>, "
+		"Hans J. Koch <hjk@linutronix.de");
+MODULE_DESCRIPTION("LM93 driver");
+MODULE_LICENSE("GPL");
+
+module_init(lm93_init);
+module_exit(lm93_exit);
diff --git a/drivers/hwmon/pc87360.c b/drivers/hwmon/pc87360.c
index c8a21be..cb72526c 100644
--- a/drivers/hwmon/pc87360.c
+++ b/drivers/hwmon/pc87360.c
@@ -1,7 +1,7 @@
 /*
  *  pc87360.c - Part of lm_sensors, Linux kernel modules
  *              for hardware monitoring
- *  Copyright (C) 2004 Jean Delvare <khali@linux-fr.org>
+ *  Copyright (C) 2004, 2007 Jean Delvare <khali@linux-fr.org>
  *
  *  Copied from smsc47m1.c:
  *  Copyright (C) 2002 Mark D. Studebaker <mdsxyz123@yahoo.com>
@@ -37,8 +37,7 @@
 #include <linux/init.h>
 #include <linux/slab.h>
 #include <linux/jiffies.h>
-#include <linux/i2c.h>
-#include <linux/i2c-isa.h>
+#include <linux/platform_device.h>
 #include <linux/hwmon.h>
 #include <linux/hwmon-sysfs.h>
 #include <linux/hwmon-vid.h>
@@ -47,12 +46,10 @@
 #include <asm/io.h>
 
 static u8 devid;
-static unsigned short address;
+static struct platform_device *pdev;
 static unsigned short extra_isa[3];
 static u8 confreg[4];
 
-enum chips { any_chip, pc87360, pc87363, pc87364, pc87365, pc87366 };
-
 static int init = 1;
 module_param(init, int, 0);
 MODULE_PARM_DESC(init,
@@ -178,11 +175,11 @@
 					 ((val) + 500) / 1000)
 
 /*
- * Client data (each client gets its own)
+ * Device data
  */
 
 struct pc87360_data {
-	struct i2c_client client;
+	const char *name;
 	struct class_device *class_dev;
 	struct mutex lock;
 	struct mutex update_lock;
@@ -222,27 +219,28 @@
  * Functions declaration
  */
 
-static int pc87360_detect(struct i2c_adapter *adapter);
-static int pc87360_detach_client(struct i2c_client *client);
+static int pc87360_probe(struct platform_device *pdev);
+static int pc87360_remove(struct platform_device *pdev);
 
 static int pc87360_read_value(struct pc87360_data *data, u8 ldi, u8 bank,
 			      u8 reg);
 static void pc87360_write_value(struct pc87360_data *data, u8 ldi, u8 bank,
 				u8 reg, u8 value);
-static void pc87360_init_client(struct i2c_client *client, int use_thermistors);
+static void pc87360_init_device(struct platform_device *pdev,
+				int use_thermistors);
 static struct pc87360_data *pc87360_update_device(struct device *dev);
 
 /*
- * Driver data (common to all clients)
+ * Driver data
  */
 
-static struct i2c_driver pc87360_driver = {
+static struct platform_driver pc87360_driver = {
 	.driver = {
 		.owner	= THIS_MODULE,
 		.name	= "pc87360",
 	},
-	.attach_adapter	= pc87360_detect,
-	.detach_client	= pc87360_detach_client,
+	.probe		= pc87360_probe,
+	.remove		= __devexit_p(pc87360_remove),
 };
 
 /*
@@ -281,8 +279,7 @@
 	size_t count)
 {
 	struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
-	struct i2c_client *client = to_i2c_client(dev);
-	struct pc87360_data *data = i2c_get_clientdata(client);
+	struct pc87360_data *data = dev_get_drvdata(dev);
 	long fan_min = simple_strtol(buf, NULL, 10);
 
 	mutex_lock(&data->update_lock);
@@ -347,8 +344,7 @@
 	size_t count)
 {
 	struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
-	struct i2c_client *client = to_i2c_client(dev);
-	struct pc87360_data *data = i2c_get_clientdata(client);
+	struct pc87360_data *data = dev_get_drvdata(dev);
 	long val = simple_strtol(buf, NULL, 10);
 
 	mutex_lock(&data->update_lock);
@@ -410,8 +406,7 @@
 	size_t count)
 {
 	struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
-	struct i2c_client *client = to_i2c_client(dev);
-	struct pc87360_data *data = i2c_get_clientdata(client);
+	struct pc87360_data *data = dev_get_drvdata(dev);
 	long val = simple_strtol(buf, NULL, 10);
 
 	mutex_lock(&data->update_lock);
@@ -425,8 +420,7 @@
 	size_t count)
 {
 	struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
-	struct i2c_client *client = to_i2c_client(dev);
-	struct pc87360_data *data = i2c_get_clientdata(client);
+	struct pc87360_data *data = dev_get_drvdata(dev);
 	long val = simple_strtol(buf, NULL, 10);
 
 	mutex_lock(&data->update_lock);
@@ -511,8 +505,7 @@
 }
 static ssize_t set_vrm(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
 {
-	struct i2c_client *client = to_i2c_client(dev);
-	struct pc87360_data *data = i2c_get_clientdata(client);
+	struct pc87360_data *data = dev_get_drvdata(dev);
 	data->vrm = simple_strtoul(buf, NULL, 10);
 	return count;
 }
@@ -584,8 +577,7 @@
 	size_t count)
 {
 	struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
-	struct i2c_client *client = to_i2c_client(dev);
-	struct pc87360_data *data = i2c_get_clientdata(client);
+	struct pc87360_data *data = dev_get_drvdata(dev);
 	long val = simple_strtol(buf, NULL, 10);
 
 	mutex_lock(&data->update_lock);
@@ -599,8 +591,7 @@
 	size_t count)
 {
 	struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
-	struct i2c_client *client = to_i2c_client(dev);
-	struct pc87360_data *data = i2c_get_clientdata(client);
+	struct pc87360_data *data = dev_get_drvdata(dev);
 	long val = simple_strtol(buf, NULL, 10);
 
 	mutex_lock(&data->update_lock);
@@ -614,8 +605,7 @@
 	size_t count)
 {
 	struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
-	struct i2c_client *client = to_i2c_client(dev);
-	struct pc87360_data *data = i2c_get_clientdata(client);
+	struct pc87360_data *data = dev_get_drvdata(dev);
 	long val = simple_strtol(buf, NULL, 10);
 
 	mutex_lock(&data->update_lock);
@@ -715,8 +705,7 @@
 	size_t count)
 {
 	struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
-	struct i2c_client *client = to_i2c_client(dev);
-	struct pc87360_data *data = i2c_get_clientdata(client);
+	struct pc87360_data *data = dev_get_drvdata(dev);
 	long val = simple_strtol(buf, NULL, 10);
 
 	mutex_lock(&data->update_lock);
@@ -730,8 +719,7 @@
 	size_t count)
 {
 	struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
-	struct i2c_client *client = to_i2c_client(dev);
-	struct pc87360_data *data = i2c_get_clientdata(client);
+	struct pc87360_data *data = dev_get_drvdata(dev);
 	long val = simple_strtol(buf, NULL, 10);
 
 	mutex_lock(&data->update_lock);
@@ -745,8 +733,7 @@
 	size_t count)
 {
 	struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
-	struct i2c_client *client = to_i2c_client(dev);
-	struct pc87360_data *data = i2c_get_clientdata(client);
+	struct pc87360_data *data = dev_get_drvdata(dev);
 	long val = simple_strtol(buf, NULL, 10);
 
 	mutex_lock(&data->update_lock);
@@ -818,6 +805,14 @@
 	.attrs = pc8736x_temp_attr_array,
 };
 
+static ssize_t show_name(struct device *dev, struct device_attribute
+			 *devattr, char *buf)
+{
+	struct pc87360_data *data = dev_get_drvdata(dev);
+	return sprintf(buf, "%s\n", data->name);
+}
+static DEVICE_ATTR(name, S_IRUGO, show_name, NULL);
+
 /*
  * Device detection, registration and update
  */
@@ -912,28 +907,18 @@
 	return 0;
 }
 
-static int pc87360_detect(struct i2c_adapter *adapter)
+static int __devinit pc87360_probe(struct platform_device *pdev)
 {
 	int i;
-	struct i2c_client *client;
 	struct pc87360_data *data;
 	int err = 0;
 	const char *name = "pc87360";
 	int use_thermistors = 0;
-	struct device *dev;
+	struct device *dev = &pdev->dev;
 
 	if (!(data = kzalloc(sizeof(struct pc87360_data), GFP_KERNEL)))
 		return -ENOMEM;
 
-	client = &data->client;
-	dev = &client->dev;
-	i2c_set_clientdata(client, data);
-	client->addr = address;
-	mutex_init(&data->lock);
-	client->adapter = adapter;
-	client->driver = &pc87360_driver;
-	client->flags = 0;
-
 	data->fannr = 2;
 	data->innr = 0;
 	data->tempnr = 0;
@@ -960,15 +945,17 @@
 		break;
 	}
 
-	strlcpy(client->name, name, sizeof(client->name));
+	data->name = name;
 	data->valid = 0;
+	mutex_init(&data->lock);
 	mutex_init(&data->update_lock);
+	platform_set_drvdata(pdev, data);
 
 	for (i = 0; i < 3; i++) {
 		if (((data->address[i] = extra_isa[i]))
 		 && !request_region(extra_isa[i], PC87360_EXTENT,
 		 		    pc87360_driver.driver.name)) {
-			dev_err(&client->dev, "Region 0x%x-0x%x already "
+			dev_err(dev, "Region 0x%x-0x%x already "
 				"in use!\n", extra_isa[i],
 				extra_isa[i]+PC87360_EXTENT-1);
 			for (i--; i >= 0; i--)
@@ -982,9 +969,6 @@
 	if (data->fannr)
 		data->fan_conf = confreg[0] | (confreg[1] << 8);
 
-	if ((err = i2c_attach_client(client)))
-		goto ERROR2;
-
 	/* Use the correct reference voltage
 	   Unless both the VLM and the TMS logical devices agree to
 	   use an external Vref, the internal one is used. */
@@ -996,7 +980,7 @@
 						PC87365_REG_TEMP_CONFIG);
 		}
 		data->in_vref = (i&0x02) ? 3025 : 2966;
-		dev_dbg(&client->dev, "Using %s reference voltage\n",
+		dev_dbg(dev, "Using %s reference voltage\n",
 			(i&0x02) ? "external" : "internal");
 
 		data->vid_conf = confreg[3];
@@ -1015,18 +999,18 @@
 		if (devid == 0xe9 && data->address[1]) /* PC87366 */
 			use_thermistors = confreg[2] & 0x40;
 
-		pc87360_init_client(client, use_thermistors);
+		pc87360_init_device(pdev, use_thermistors);
 	}
 
 	/* Register all-or-nothing sysfs groups */
 
 	if (data->innr &&
-	    (err = sysfs_create_group(&client->dev.kobj,
+	    (err = sysfs_create_group(&dev->kobj,
 				      &pc8736x_vin_group)))
 		goto ERROR3;
 
 	if (data->innr == 14 &&
-	    (err = sysfs_create_group(&client->dev.kobj,
+	    (err = sysfs_create_group(&dev->kobj,
 				      &pc8736x_therm_group)))
 		goto ERROR3;
 
@@ -1067,7 +1051,10 @@
 			goto ERROR3;
 	}
 
-	data->class_dev = hwmon_device_register(&client->dev);
+	if ((err = device_create_file(dev, &dev_attr_name)))
+		goto ERROR3;
+
+	data->class_dev = hwmon_device_register(dev);
 	if (IS_ERR(data->class_dev)) {
 		err = PTR_ERR(data->class_dev);
 		goto ERROR3;
@@ -1075,14 +1062,12 @@
 	return 0;
 
 ERROR3:
+	device_remove_file(dev, &dev_attr_name);
 	/* can still remove groups whose members were added individually */
-	sysfs_remove_group(&client->dev.kobj, &pc8736x_temp_group);
-	sysfs_remove_group(&client->dev.kobj, &pc8736x_fan_group);
-	sysfs_remove_group(&client->dev.kobj, &pc8736x_therm_group);
-	sysfs_remove_group(&client->dev.kobj, &pc8736x_vin_group);
-
-	i2c_detach_client(client);
-ERROR2:
+	sysfs_remove_group(&dev->kobj, &pc8736x_temp_group);
+	sysfs_remove_group(&dev->kobj, &pc8736x_fan_group);
+	sysfs_remove_group(&dev->kobj, &pc8736x_therm_group);
+	sysfs_remove_group(&dev->kobj, &pc8736x_vin_group);
 	for (i = 0; i < 3; i++) {
 		if (data->address[i]) {
 			release_region(data->address[i], PC87360_EXTENT);
@@ -1093,20 +1078,18 @@
 	return err;
 }
 
-static int pc87360_detach_client(struct i2c_client *client)
+static int __devexit pc87360_remove(struct platform_device *pdev)
 {
-	struct pc87360_data *data = i2c_get_clientdata(client);
+	struct pc87360_data *data = platform_get_drvdata(pdev);
 	int i;
 
 	hwmon_device_unregister(data->class_dev);
 
-	sysfs_remove_group(&client->dev.kobj, &pc8736x_temp_group);
-	sysfs_remove_group(&client->dev.kobj, &pc8736x_fan_group);
-	sysfs_remove_group(&client->dev.kobj, &pc8736x_therm_group);
-	sysfs_remove_group(&client->dev.kobj, &pc8736x_vin_group);
-
-	if ((i = i2c_detach_client(client)))
-		return i;
+	device_remove_file(&pdev->dev, &dev_attr_name);
+	sysfs_remove_group(&pdev->dev.kobj, &pc8736x_temp_group);
+	sysfs_remove_group(&pdev->dev.kobj, &pc8736x_fan_group);
+	sysfs_remove_group(&pdev->dev.kobj, &pc8736x_therm_group);
+	sysfs_remove_group(&pdev->dev.kobj, &pc8736x_vin_group);
 
 	for (i = 0; i < 3; i++) {
 		if (data->address[i]) {
@@ -1144,9 +1127,10 @@
 	mutex_unlock(&(data->lock));
 }
 
-static void pc87360_init_client(struct i2c_client *client, int use_thermistors)
+static void pc87360_init_device(struct platform_device *pdev,
+				int use_thermistors)
 {
-	struct pc87360_data *data = i2c_get_clientdata(client);
+	struct pc87360_data *data = platform_get_drvdata(pdev);
 	int i, nr;
 	const u8 init_in[14] = { 2, 2, 2, 2, 2, 2, 2, 1, 1, 3, 1, 2, 2, 2 };
 	const u8 init_temp[3] = { 2, 2, 1 };
@@ -1155,7 +1139,7 @@
 	if (init >= 2 && data->innr) {
 		reg = pc87360_read_value(data, LD_IN, NO_BANK,
 					 PC87365_REG_IN_CONVRATE);
-		dev_info(&client->dev, "VLM conversion set to "
+		dev_info(&pdev->dev, "VLM conversion set to "
 			 "1s period, 160us delay\n");
 		pc87360_write_value(data, LD_IN, NO_BANK,
 				    PC87365_REG_IN_CONVRATE,
@@ -1169,7 +1153,7 @@
 			reg = pc87360_read_value(data, LD_IN, i,
 						 PC87365_REG_IN_STATUS);
 			if (!(reg & 0x01)) {
-				dev_dbg(&client->dev, "Forcibly "
+				dev_dbg(&pdev->dev, "Forcibly "
 					"enabling in%d\n", i);
 				pc87360_write_value(data, LD_IN, i,
 						    PC87365_REG_IN_STATUS,
@@ -1193,7 +1177,7 @@
 			reg = pc87360_read_value(data, LD_TEMP, i,
 						 PC87365_REG_TEMP_STATUS);
 			if (!(reg & 0x01)) {
-				dev_dbg(&client->dev, "Forcibly "
+				dev_dbg(&pdev->dev, "Forcibly "
 					"enabling temp%d\n", i+1);
 				pc87360_write_value(data, LD_TEMP, i,
 						    PC87365_REG_TEMP_STATUS,
@@ -1210,7 +1194,7 @@
 				reg = pc87360_read_value(data, LD_TEMP,
 				      (i-11)/2, PC87365_REG_TEMP_STATUS);
 				if (reg & 0x01) {
-					dev_dbg(&client->dev, "Skipping "
+					dev_dbg(&pdev->dev, "Skipping "
 						"temp%d, pin already in use "
 						"by temp%d\n", i-7, (i-11)/2);
 					continue;
@@ -1220,7 +1204,7 @@
 				reg = pc87360_read_value(data, LD_IN, i,
 							 PC87365_REG_IN_STATUS);
 				if (!(reg & 0x01)) {
-					dev_dbg(&client->dev, "Forcibly "
+					dev_dbg(&pdev->dev, "Forcibly "
 						"enabling temp%d\n", i-7);
 					pc87360_write_value(data, LD_IN, i,
 						PC87365_REG_TEMP_STATUS,
@@ -1234,7 +1218,7 @@
 		reg = pc87360_read_value(data, LD_IN, NO_BANK,
 					 PC87365_REG_IN_CONFIG);
 		if (reg & 0x01) {
-			dev_dbg(&client->dev, "Forcibly "
+			dev_dbg(&pdev->dev, "Forcibly "
 				"enabling monitoring (VLM)\n");
 			pc87360_write_value(data, LD_IN, NO_BANK,
 					    PC87365_REG_IN_CONFIG,
@@ -1246,7 +1230,7 @@
 		reg = pc87360_read_value(data, LD_TEMP, NO_BANK,
 					 PC87365_REG_TEMP_CONFIG);
 		if (reg & 0x01) {
-			dev_dbg(&client->dev, "Forcibly enabling "
+			dev_dbg(&pdev->dev, "Forcibly enabling "
 				"monitoring (TMS)\n");
 			pc87360_write_value(data, LD_TEMP, NO_BANK,
 					    PC87365_REG_TEMP_CONFIG,
@@ -1268,9 +1252,9 @@
 	}
 }
 
-static void pc87360_autodiv(struct i2c_client *client, int nr)
+static void pc87360_autodiv(struct device *dev, int nr)
 {
-	struct pc87360_data *data = i2c_get_clientdata(client);
+	struct pc87360_data *data = dev_get_drvdata(dev);
 	u8 old_min = data->fan_min[nr];
 
 	/* Increase clock divider if needed and possible */
@@ -1280,7 +1264,7 @@
 			data->fan_status[nr] += 0x20;
 			data->fan_min[nr] >>= 1;
 			data->fan[nr] >>= 1;
-			dev_dbg(&client->dev, "Increasing "
+			dev_dbg(dev, "Increasing "
 				"clock divider to %d for fan %d\n",
 				FAN_DIV_FROM_REG(data->fan_status[nr]), nr+1);
 		}
@@ -1292,7 +1276,7 @@
 			data->fan_status[nr] -= 0x20;
 			data->fan_min[nr] <<= 1;
 			data->fan[nr] <<= 1;
-			dev_dbg(&client->dev, "Decreasing "
+			dev_dbg(dev, "Decreasing "
 				"clock divider to %d for fan %d\n",
 				FAN_DIV_FROM_REG(data->fan_status[nr]),
 				nr+1);
@@ -1309,14 +1293,13 @@
 
 static struct pc87360_data *pc87360_update_device(struct device *dev)
 {
-	struct i2c_client *client = to_i2c_client(dev);
-	struct pc87360_data *data = i2c_get_clientdata(client);
+	struct pc87360_data *data = dev_get_drvdata(dev);
 	u8 i;
 
 	mutex_lock(&data->update_lock);
 
 	if (time_after(jiffies, data->last_updated + HZ * 2) || !data->valid) {
-		dev_dbg(&client->dev, "Data update\n");
+		dev_dbg(dev, "Data update\n");
 
 		/* Fans */
 		for (i = 0; i < data->fannr; i++) {
@@ -1330,7 +1313,7 @@
 						   LD_FAN, NO_BANK,
 						   PC87360_REG_FAN_MIN(i));
 				/* Change clock divider if needed */
-				pc87360_autodiv(client, i);
+				pc87360_autodiv(dev, i);
 				/* Clear bits and write new divider */
 				pc87360_write_value(data, LD_FAN, NO_BANK,
 						    PC87360_REG_FAN_STATUS(i),
@@ -1418,9 +1401,53 @@
 	return data;
 }
 
+static int __init pc87360_device_add(unsigned short address)
+{
+	struct resource res = {
+		.name	= "pc87360",
+		.flags	= IORESOURCE_IO,
+	};
+	int err, i;
+
+	pdev = platform_device_alloc("pc87360", address);
+	if (!pdev) {
+		err = -ENOMEM;
+		printk(KERN_ERR "pc87360: Device allocation failed\n");
+		goto exit;
+	}
+
+	for (i = 0; i < 3; i++) {
+		if (!extra_isa[i])
+			continue;
+		res.start = extra_isa[i];
+		res.end = extra_isa[i] + PC87360_EXTENT - 1;
+		err = platform_device_add_resources(pdev, &res, 1);
+		if (err) {
+			printk(KERN_ERR "pc87360: Device resource[%d] "
+			       "addition failed (%d)\n", i, err);
+			goto exit_device_put;
+		}
+	}
+
+	err = platform_device_add(pdev);
+	if (err) {
+		printk(KERN_ERR "pc87360: Device addition failed (%d)\n",
+		       err);
+		goto exit_device_put;
+	}
+
+	return 0;
+
+exit_device_put:
+	platform_device_put(pdev);
+exit:
+	return err;
+}
+
 static int __init pc87360_init(void)
 {
-	int i;
+	int err, i;
+	unsigned short address = 0;
 
 	if (pc87360_find(0x2e, &devid, extra_isa)
 	 && pc87360_find(0x4e, &devid, extra_isa)) {
@@ -1443,12 +1470,27 @@
 		return -ENODEV;
 	}
 
-	return i2c_isa_add_driver(&pc87360_driver);
+	err = platform_driver_register(&pc87360_driver);
+	if (err)
+		goto exit;
+
+	/* Sets global pdev as a side effect */
+	err = pc87360_device_add(address);
+	if (err)
+		goto exit_driver;
+
+	return 0;
+
+ exit_driver:
+	platform_driver_unregister(&pc87360_driver);
+ exit:
+	return err;
 }
 
 static void __exit pc87360_exit(void)
 {
-	i2c_isa_del_driver(&pc87360_driver);
+	platform_device_unregister(pdev);
+	platform_driver_unregister(&pc87360_driver);
 }
 
 
diff --git a/drivers/hwmon/pc87427.c b/drivers/hwmon/pc87427.c
index 29354fa..2915bc4 100644
--- a/drivers/hwmon/pc87427.c
+++ b/drivers/hwmon/pc87427.c
@@ -484,7 +484,6 @@
 	struct resource *res;
 	int i;
 
-	platform_set_drvdata(pdev, NULL);
 	hwmon_device_unregister(data->class_dev);
 	device_remove_file(&pdev->dev, &dev_attr_name);
 	for (i = 0; i < 8; i++) {
@@ -492,6 +491,7 @@
 			continue;
 		sysfs_remove_group(&pdev->dev.kobj, &pc87427_group_fan[i]);
 	}
+	platform_set_drvdata(pdev, NULL);
 	kfree(data);
 
 	res = platform_get_resource(pdev, IORESOURCE_IO, 0);
diff --git a/drivers/hwmon/sis5595.c b/drivers/hwmon/sis5595.c
index 3f40026..83321b2 100644
--- a/drivers/hwmon/sis5595.c
+++ b/drivers/hwmon/sis5595.c
@@ -54,9 +54,9 @@
 #include <linux/slab.h>
 #include <linux/ioport.h>
 #include <linux/pci.h>
-#include <linux/i2c.h>
-#include <linux/i2c-isa.h>
+#include <linux/platform_device.h>
 #include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
 #include <linux/err.h>
 #include <linux/init.h>
 #include <linux/jiffies.h>
@@ -72,17 +72,13 @@
 MODULE_PARM_DESC(force_addr,
 		 "Initialize the base address of the sensors");
 
-/* Device address
-   Note that we can't determine the ISA address until we have initialized
-   our module */
-static unsigned short address;
+static struct platform_device *pdev;
 
 /* Many SIS5595 constants specified below */
 
 /* Length of ISA address segment */
 #define SIS5595_EXTENT 8
 /* PCI Config Registers */
-#define SIS5595_REVISION_REG 0x08
 #define SIS5595_BASE_REG 0x68
 #define SIS5595_PIN_REG 0x7A
 #define SIS5595_ENABLE_REG 0x7B
@@ -165,7 +161,8 @@
 /* For each registered chip, we need to keep some data in memory.
    The structure is dynamically allocated. */
 struct sis5595_data {
-	struct i2c_client client;
+	unsigned short addr;
+	const char *name;
 	struct class_device *class_dev;
 	struct mutex lock;
 
@@ -189,102 +186,88 @@
 
 static struct pci_dev *s_bridge;	/* pointer to the (only) sis5595 */
 
-static int sis5595_detect(struct i2c_adapter *adapter);
-static int sis5595_detach_client(struct i2c_client *client);
+static int sis5595_probe(struct platform_device *pdev);
+static int sis5595_remove(struct platform_device *pdev);
 
-static int sis5595_read_value(struct i2c_client *client, u8 reg);
-static int sis5595_write_value(struct i2c_client *client, u8 reg, u8 value);
+static int sis5595_read_value(struct sis5595_data *data, u8 reg);
+static void sis5595_write_value(struct sis5595_data *data, u8 reg, u8 value);
 static struct sis5595_data *sis5595_update_device(struct device *dev);
-static void sis5595_init_client(struct i2c_client *client);
+static void sis5595_init_device(struct sis5595_data *data);
 
-static struct i2c_driver sis5595_driver = {
+static struct platform_driver sis5595_driver = {
 	.driver = {
 		.owner	= THIS_MODULE,
 		.name	= "sis5595",
 	},
-	.attach_adapter	= sis5595_detect,
-	.detach_client	= sis5595_detach_client,
+	.probe		= sis5595_probe,
+	.remove		= __devexit_p(sis5595_remove),
 };
 
 /* 4 Voltages */
-static ssize_t show_in(struct device *dev, char *buf, int nr)
+static ssize_t show_in(struct device *dev, struct device_attribute *da,
+		       char *buf)
 {
 	struct sis5595_data *data = sis5595_update_device(dev);
+	struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
+	int nr = attr->index;
 	return sprintf(buf, "%d\n", IN_FROM_REG(data->in[nr]));
 }
 
-static ssize_t show_in_min(struct device *dev, char *buf, int nr)
+static ssize_t show_in_min(struct device *dev, struct device_attribute *da,
+			   char *buf)
 {
 	struct sis5595_data *data = sis5595_update_device(dev);
+	struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
+	int nr = attr->index;
 	return sprintf(buf, "%d\n", IN_FROM_REG(data->in_min[nr]));
 }
 
-static ssize_t show_in_max(struct device *dev, char *buf, int nr)
+static ssize_t show_in_max(struct device *dev, struct device_attribute *da,
+			   char *buf)
 {
 	struct sis5595_data *data = sis5595_update_device(dev);
+	struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
+	int nr = attr->index;
 	return sprintf(buf, "%d\n", IN_FROM_REG(data->in_max[nr]));
 }
 
-static ssize_t set_in_min(struct device *dev, const char *buf,
-	       size_t count, int nr)
+static ssize_t set_in_min(struct device *dev, struct device_attribute *da,
+			  const char *buf, size_t count)
 {
-	struct i2c_client *client = to_i2c_client(dev);
-	struct sis5595_data *data = i2c_get_clientdata(client);
+	struct sis5595_data *data = dev_get_drvdata(dev);
+	struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
+	int nr = attr->index;
 	unsigned long val = simple_strtoul(buf, NULL, 10);
 
 	mutex_lock(&data->update_lock);
 	data->in_min[nr] = IN_TO_REG(val);
-	sis5595_write_value(client, SIS5595_REG_IN_MIN(nr), data->in_min[nr]);
+	sis5595_write_value(data, SIS5595_REG_IN_MIN(nr), data->in_min[nr]);
 	mutex_unlock(&data->update_lock);
 	return count;
 }
 
-static ssize_t set_in_max(struct device *dev, const char *buf,
-	       size_t count, int nr)
+static ssize_t set_in_max(struct device *dev, struct device_attribute *da,
+			  const char *buf, size_t count)
 {
-	struct i2c_client *client = to_i2c_client(dev);
-	struct sis5595_data *data = i2c_get_clientdata(client);
+	struct sis5595_data *data = dev_get_drvdata(dev);
+	struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
+	int nr = attr->index;
 	unsigned long val = simple_strtoul(buf, NULL, 10);
 
 	mutex_lock(&data->update_lock);
 	data->in_max[nr] = IN_TO_REG(val);
-	sis5595_write_value(client, SIS5595_REG_IN_MAX(nr), data->in_max[nr]);
+	sis5595_write_value(data, SIS5595_REG_IN_MAX(nr), data->in_max[nr]);
 	mutex_unlock(&data->update_lock);
 	return count;
 }
 
 #define show_in_offset(offset)					\
-static ssize_t							\
-	show_in##offset (struct device *dev, struct device_attribute *attr, char *buf)		\
-{								\
-	return show_in(dev, buf, offset);			\
-}								\
-static DEVICE_ATTR(in##offset##_input, S_IRUGO, 		\
-		show_in##offset, NULL);				\
-static ssize_t							\
-	show_in##offset##_min (struct device *dev, struct device_attribute *attr, char *buf)	\
-{								\
-	return show_in_min(dev, buf, offset);			\
-}								\
-static ssize_t							\
-	show_in##offset##_max (struct device *dev, struct device_attribute *attr, char *buf)	\
-{								\
-	return show_in_max(dev, buf, offset);			\
-}								\
-static ssize_t set_in##offset##_min (struct device *dev, struct device_attribute *attr,	\
-		const char *buf, size_t count)			\
-{								\
-	return set_in_min(dev, buf, count, offset);		\
-}								\
-static ssize_t set_in##offset##_max (struct device *dev, struct device_attribute *attr,	\
-		const char *buf, size_t count)			\
-{								\
-	return set_in_max(dev, buf, count, offset);		\
-}								\
-static DEVICE_ATTR(in##offset##_min, S_IRUGO | S_IWUSR,		\
-		show_in##offset##_min, set_in##offset##_min);	\
-static DEVICE_ATTR(in##offset##_max, S_IRUGO | S_IWUSR,		\
-		show_in##offset##_max, set_in##offset##_max);
+static SENSOR_DEVICE_ATTR(in##offset##_input, S_IRUGO,		\
+		show_in, NULL, offset);				\
+static SENSOR_DEVICE_ATTR(in##offset##_min, S_IRUGO | S_IWUSR,	\
+		show_in_min, set_in_min, offset);		\
+static SENSOR_DEVICE_ATTR(in##offset##_max, S_IRUGO | S_IWUSR,	\
+		show_in_max, set_in_max, offset);
 
 show_in_offset(0);
 show_in_offset(1);
@@ -307,13 +290,12 @@
 
 static ssize_t set_temp_over(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
 {
-	struct i2c_client *client = to_i2c_client(dev);
-	struct sis5595_data *data = i2c_get_clientdata(client);
+	struct sis5595_data *data = dev_get_drvdata(dev);
 	long val = simple_strtol(buf, NULL, 10);
 
 	mutex_lock(&data->update_lock);
 	data->temp_over = TEMP_TO_REG(val);
-	sis5595_write_value(client, SIS5595_REG_TEMP_OVER, data->temp_over);
+	sis5595_write_value(data, SIS5595_REG_TEMP_OVER, data->temp_over);
 	mutex_unlock(&data->update_lock);
 	return count;
 }
@@ -326,13 +308,12 @@
 
 static ssize_t set_temp_hyst(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
 {
-	struct i2c_client *client = to_i2c_client(dev);
-	struct sis5595_data *data = i2c_get_clientdata(client);
+	struct sis5595_data *data = dev_get_drvdata(dev);
 	long val = simple_strtol(buf, NULL, 10);
 
 	mutex_lock(&data->update_lock);
 	data->temp_hyst = TEMP_TO_REG(val);
-	sis5595_write_value(client, SIS5595_REG_TEMP_HYST, data->temp_hyst);
+	sis5595_write_value(data, SIS5595_REG_TEMP_HYST, data->temp_hyst);
 	mutex_unlock(&data->update_lock);
 	return count;
 }
@@ -344,37 +325,47 @@
 		show_temp_hyst, set_temp_hyst);
 
 /* 2 Fans */
-static ssize_t show_fan(struct device *dev, char *buf, int nr)
+static ssize_t show_fan(struct device *dev, struct device_attribute *da,
+			char *buf)
 {
 	struct sis5595_data *data = sis5595_update_device(dev);
+	struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
+	int nr = attr->index;
 	return sprintf(buf, "%d\n", FAN_FROM_REG(data->fan[nr],
 		DIV_FROM_REG(data->fan_div[nr])) );
 }
 
-static ssize_t show_fan_min(struct device *dev, char *buf, int nr)
+static ssize_t show_fan_min(struct device *dev, struct device_attribute *da,
+			    char *buf)
 {
 	struct sis5595_data *data = sis5595_update_device(dev);
+	struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
+	int nr = attr->index;
 	return sprintf(buf,"%d\n", FAN_FROM_REG(data->fan_min[nr],
 		DIV_FROM_REG(data->fan_div[nr])) );
 }
 
-static ssize_t set_fan_min(struct device *dev, const char *buf,
-		size_t count, int nr)
+static ssize_t set_fan_min(struct device *dev, struct device_attribute *da,
+			   const char *buf, size_t count)
 {
-	struct i2c_client *client = to_i2c_client(dev);
-	struct sis5595_data *data = i2c_get_clientdata(client);
+	struct sis5595_data *data = dev_get_drvdata(dev);
+	struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
+	int nr = attr->index;
 	unsigned long val = simple_strtoul(buf, NULL, 10);
 
 	mutex_lock(&data->update_lock);
 	data->fan_min[nr] = FAN_TO_REG(val, DIV_FROM_REG(data->fan_div[nr]));
-	sis5595_write_value(client, SIS5595_REG_FAN_MIN(nr), data->fan_min[nr]);
+	sis5595_write_value(data, SIS5595_REG_FAN_MIN(nr), data->fan_min[nr]);
 	mutex_unlock(&data->update_lock);
 	return count;
 }
 
-static ssize_t show_fan_div(struct device *dev, char *buf, int nr)
+static ssize_t show_fan_div(struct device *dev, struct device_attribute *da,
+			    char *buf)
 {
 	struct sis5595_data *data = sis5595_update_device(dev);
+	struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
+	int nr = attr->index;
 	return sprintf(buf, "%d\n", DIV_FROM_REG(data->fan_div[nr]) );
 }
 
@@ -382,11 +373,12 @@
    determined in part by the fan divisor.  This follows the principle of
    least surprise; the user doesn't expect the fan minimum to change just
    because the divisor changed. */
-static ssize_t set_fan_div(struct device *dev, const char *buf,
-	size_t count, int nr)
+static ssize_t set_fan_div(struct device *dev, struct device_attribute *da,
+			   const char *buf, size_t count)
 {
-	struct i2c_client *client = to_i2c_client(dev);
-	struct sis5595_data *data = i2c_get_clientdata(client);
+	struct sis5595_data *data = dev_get_drvdata(dev);
+	struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
+	int nr = attr->index;
 	unsigned long min;
 	unsigned long val = simple_strtoul(buf, NULL, 10);
 	int reg;
@@ -394,7 +386,7 @@
 	mutex_lock(&data->update_lock);
 	min = FAN_FROM_REG(data->fan_min[nr],
 			DIV_FROM_REG(data->fan_div[nr]));
-	reg = sis5595_read_value(client, SIS5595_REG_FANDIV);
+	reg = sis5595_read_value(data, SIS5595_REG_FANDIV);
 
 	switch (val) {
 	case 1: data->fan_div[nr] = 0; break;
@@ -402,7 +394,7 @@
 	case 4: data->fan_div[nr] = 2; break;
 	case 8: data->fan_div[nr] = 3; break;
 	default:
-		dev_err(&client->dev, "fan_div value %ld not "
+		dev_err(dev, "fan_div value %ld not "
 			"supported. Choose one of 1, 2, 4 or 8!\n", val);
 		mutex_unlock(&data->update_lock);
 		return -EINVAL;
@@ -416,55 +408,25 @@
 		reg = (reg & 0x3f) | (data->fan_div[nr] << 6);
 		break;
 	}
-	sis5595_write_value(client, SIS5595_REG_FANDIV, reg);
+	sis5595_write_value(data, SIS5595_REG_FANDIV, reg);
 	data->fan_min[nr] =
 		FAN_TO_REG(min, DIV_FROM_REG(data->fan_div[nr]));
-	sis5595_write_value(client, SIS5595_REG_FAN_MIN(nr), data->fan_min[nr]);
+	sis5595_write_value(data, SIS5595_REG_FAN_MIN(nr), data->fan_min[nr]);
 	mutex_unlock(&data->update_lock);
 	return count;
 }
 
 #define show_fan_offset(offset)						\
-static ssize_t show_fan_##offset (struct device *dev, struct device_attribute *attr, char *buf)	\
-{									\
-	return show_fan(dev, buf, offset - 1);			\
-}									\
-static ssize_t show_fan_##offset##_min (struct device *dev, struct device_attribute *attr, char *buf)	\
-{									\
-	return show_fan_min(dev, buf, offset - 1);			\
-}									\
-static ssize_t show_fan_##offset##_div (struct device *dev, struct device_attribute *attr, char *buf)	\
-{									\
-	return show_fan_div(dev, buf, offset - 1);			\
-}									\
-static ssize_t set_fan_##offset##_min (struct device *dev, struct device_attribute *attr,		\
-		const char *buf, size_t count)				\
-{									\
-	return set_fan_min(dev, buf, count, offset - 1);		\
-}									\
-static DEVICE_ATTR(fan##offset##_input, S_IRUGO, show_fan_##offset, NULL);\
-static DEVICE_ATTR(fan##offset##_min, S_IRUGO | S_IWUSR,		\
-		show_fan_##offset##_min, set_fan_##offset##_min);
+static SENSOR_DEVICE_ATTR(fan##offset##_input, S_IRUGO,			\
+		show_fan, NULL, offset - 1);				\
+static SENSOR_DEVICE_ATTR(fan##offset##_min, S_IRUGO | S_IWUSR,		\
+		show_fan_min, set_fan_min, offset - 1);			\
+static SENSOR_DEVICE_ATTR(fan##offset##_div, S_IRUGO | S_IWUSR,		\
+		show_fan_div, set_fan_div, offset - 1);
 
 show_fan_offset(1);
 show_fan_offset(2);
 
-static ssize_t set_fan_1_div(struct device *dev, struct device_attribute *attr, const char *buf,
-		size_t count)
-{
-	return set_fan_div(dev, buf, count, 0) ;
-}
-
-static ssize_t set_fan_2_div(struct device *dev, struct device_attribute *attr, const char *buf,
-		size_t count)
-{
-	return set_fan_div(dev, buf, count, 1) ;
-}
-static DEVICE_ATTR(fan1_div, S_IRUGO | S_IWUSR,
-		show_fan_1_div, set_fan_1_div);
-static DEVICE_ATTR(fan2_div, S_IRUGO | S_IWUSR,
-		show_fan_2_div, set_fan_2_div);
-
 /* Alarms */
 static ssize_t show_alarms(struct device *dev, struct device_attribute *attr, char *buf)
 {
@@ -473,28 +435,37 @@
 }
 static DEVICE_ATTR(alarms, S_IRUGO, show_alarms, NULL);
 
-static struct attribute *sis5595_attributes[] = {
-	&dev_attr_in0_input.attr,
-	&dev_attr_in0_min.attr,
-	&dev_attr_in0_max.attr,
-	&dev_attr_in1_input.attr,
-	&dev_attr_in1_min.attr,
-	&dev_attr_in1_max.attr,
-	&dev_attr_in2_input.attr,
-	&dev_attr_in2_min.attr,
-	&dev_attr_in2_max.attr,
-	&dev_attr_in3_input.attr,
-	&dev_attr_in3_min.attr,
-	&dev_attr_in3_max.attr,
+static ssize_t show_name(struct device *dev, struct device_attribute *attr,
+			 char *buf)
+{
+	struct sis5595_data *data = dev_get_drvdata(dev);
+	return sprintf(buf, "%s\n", data->name);
+}
+static DEVICE_ATTR(name, S_IRUGO, show_name, NULL);
 
-	&dev_attr_fan1_input.attr,
-	&dev_attr_fan1_min.attr,
-	&dev_attr_fan1_div.attr,
-	&dev_attr_fan2_input.attr,
-	&dev_attr_fan2_min.attr,
-	&dev_attr_fan2_div.attr,
+static struct attribute *sis5595_attributes[] = {
+	&sensor_dev_attr_in0_input.dev_attr.attr,
+	&sensor_dev_attr_in0_min.dev_attr.attr,
+	&sensor_dev_attr_in0_max.dev_attr.attr,
+	&sensor_dev_attr_in1_input.dev_attr.attr,
+	&sensor_dev_attr_in1_min.dev_attr.attr,
+	&sensor_dev_attr_in1_max.dev_attr.attr,
+	&sensor_dev_attr_in2_input.dev_attr.attr,
+	&sensor_dev_attr_in2_min.dev_attr.attr,
+	&sensor_dev_attr_in2_max.dev_attr.attr,
+	&sensor_dev_attr_in3_input.dev_attr.attr,
+	&sensor_dev_attr_in3_min.dev_attr.attr,
+	&sensor_dev_attr_in3_max.dev_attr.attr,
+
+	&sensor_dev_attr_fan1_input.dev_attr.attr,
+	&sensor_dev_attr_fan1_min.dev_attr.attr,
+	&sensor_dev_attr_fan1_div.dev_attr.attr,
+	&sensor_dev_attr_fan2_input.dev_attr.attr,
+	&sensor_dev_attr_fan2_min.dev_attr.attr,
+	&sensor_dev_attr_fan2_div.dev_attr.attr,
 
 	&dev_attr_alarms.attr,
+	&dev_attr_name.attr,
 	NULL
 };
 
@@ -503,9 +474,9 @@
 };
 
 static struct attribute *sis5595_attributes_opt[] = {
-	&dev_attr_in4_input.attr,
-	&dev_attr_in4_min.attr,
-	&dev_attr_in4_max.attr,
+	&sensor_dev_attr_in4_input.dev_attr.attr,
+	&sensor_dev_attr_in4_min.dev_attr.attr,
+	&sensor_dev_attr_in4_max.dev_attr.attr,
 
 	&dev_attr_temp1_input.attr,
 	&dev_attr_temp1_max.attr,
@@ -518,68 +489,35 @@
 };
  
 /* This is called when the module is loaded */
-static int sis5595_detect(struct i2c_adapter *adapter)
+static int __devinit sis5595_probe(struct platform_device *pdev)
 {
 	int err = 0;
 	int i;
-	struct i2c_client *new_client;
 	struct sis5595_data *data;
+	struct resource *res;
 	char val;
-	u16 a;
 
-	if (force_addr)
-		address = force_addr & ~(SIS5595_EXTENT - 1);
 	/* Reserve the ISA region */
-	if (!request_region(address, SIS5595_EXTENT,
+	res = platform_get_resource(pdev, IORESOURCE_IO, 0);
+	if (!request_region(res->start, SIS5595_EXTENT,
 			    sis5595_driver.driver.name)) {
 		err = -EBUSY;
 		goto exit;
 	}
-	if (force_addr) {
-		dev_warn(&adapter->dev, "forcing ISA address 0x%04X\n", address);
-		if (PCIBIOS_SUCCESSFUL !=
-		    pci_write_config_word(s_bridge, SIS5595_BASE_REG, address))
-			goto exit_release;
-		if (PCIBIOS_SUCCESSFUL !=
-		    pci_read_config_word(s_bridge, SIS5595_BASE_REG, &a))
-			goto exit_release;
-		if ((a & ~(SIS5595_EXTENT - 1)) != address)
-			/* doesn't work for some chips? */
-			goto exit_release;
-	}
-
-	if (PCIBIOS_SUCCESSFUL !=
-	    pci_read_config_byte(s_bridge, SIS5595_ENABLE_REG, &val)) {
-		goto exit_release;
-	}
-	if ((val & 0x80) == 0) {
-		if (PCIBIOS_SUCCESSFUL !=
-		    pci_write_config_byte(s_bridge, SIS5595_ENABLE_REG,
-					  val | 0x80))
-			goto exit_release;
-		if (PCIBIOS_SUCCESSFUL !=
-		    pci_read_config_byte(s_bridge, SIS5595_ENABLE_REG, &val))
-			goto exit_release;
-		if ((val & 0x80) == 0) 
-			/* doesn't work for some chips! */
-			goto exit_release;
-	}
 
 	if (!(data = kzalloc(sizeof(struct sis5595_data), GFP_KERNEL))) {
 		err = -ENOMEM;
 		goto exit_release;
 	}
 
-	new_client = &data->client;
-	new_client->addr = address;
 	mutex_init(&data->lock);
-	i2c_set_clientdata(new_client, data);
-	new_client->adapter = adapter;
-	new_client->driver = &sis5595_driver;
-	new_client->flags = 0;
+	mutex_init(&data->update_lock);
+	data->addr = res->start;
+	data->name = "sis5595";
+	platform_set_drvdata(pdev, data);
 
 	/* Check revision and pin registers to determine whether 4 or 5 voltages */
-	pci_read_config_byte(s_bridge, SIS5595_REVISION_REG, &(data->revision));
+	pci_read_config_byte(s_bridge, PCI_REVISION_ID, &data->revision);
 	/* 4 voltages, 1 temp */
 	data->maxins = 3;
 	if (data->revision >= REV2MIN) {
@@ -589,47 +527,37 @@
 			data->maxins = 4;
 	}
 	
-	/* Fill in the remaining client fields and put it into the global list */
-	strlcpy(new_client->name, "sis5595", I2C_NAME_SIZE);
-
-	data->valid = 0;
-	mutex_init(&data->update_lock);
-
-	/* Tell the I2C layer a new client has arrived */
-	if ((err = i2c_attach_client(new_client)))
-		goto exit_free;
-	
 	/* Initialize the SIS5595 chip */
-	sis5595_init_client(new_client);
+	sis5595_init_device(data);
 
 	/* A few vars need to be filled upon startup */
 	for (i = 0; i < 2; i++) {
-		data->fan_min[i] = sis5595_read_value(new_client,
+		data->fan_min[i] = sis5595_read_value(data,
 					SIS5595_REG_FAN_MIN(i));
 	}
 
 	/* Register sysfs hooks */
-	if ((err = sysfs_create_group(&new_client->dev.kobj, &sis5595_group)))
-		goto exit_detach;
+	if ((err = sysfs_create_group(&pdev->dev.kobj, &sis5595_group)))
+		goto exit_free;
 	if (data->maxins == 4) {
-		if ((err = device_create_file(&new_client->dev,
-					      &dev_attr_in4_input))
-		 || (err = device_create_file(&new_client->dev,
-					      &dev_attr_in4_min))
-		 || (err = device_create_file(&new_client->dev,
-					      &dev_attr_in4_max)))
+		if ((err = device_create_file(&pdev->dev,
+					&sensor_dev_attr_in4_input.dev_attr))
+		 || (err = device_create_file(&pdev->dev,
+					&sensor_dev_attr_in4_min.dev_attr))
+		 || (err = device_create_file(&pdev->dev,
+					&sensor_dev_attr_in4_max.dev_attr)))
 			goto exit_remove_files;
 	} else {
-		if ((err = device_create_file(&new_client->dev,
+		if ((err = device_create_file(&pdev->dev,
 					      &dev_attr_temp1_input))
-		 || (err = device_create_file(&new_client->dev,
+		 || (err = device_create_file(&pdev->dev,
 					      &dev_attr_temp1_max))
-		 || (err = device_create_file(&new_client->dev,
+		 || (err = device_create_file(&pdev->dev,
 					      &dev_attr_temp1_max_hyst)))
 			goto exit_remove_files;
 	}
 
-	data->class_dev = hwmon_device_register(&new_client->dev);
+	data->class_dev = hwmon_device_register(&pdev->dev);
 	if (IS_ERR(data->class_dev)) {
 		err = PTR_ERR(data->class_dev);
 		goto exit_remove_files;
@@ -638,32 +566,26 @@
 	return 0;
 
 exit_remove_files:
-	sysfs_remove_group(&new_client->dev.kobj, &sis5595_group);
-	sysfs_remove_group(&new_client->dev.kobj, &sis5595_group_opt);
-exit_detach:
-	i2c_detach_client(new_client);
+	sysfs_remove_group(&pdev->dev.kobj, &sis5595_group);
+	sysfs_remove_group(&pdev->dev.kobj, &sis5595_group_opt);
 exit_free:
 	kfree(data);
 exit_release:
-	release_region(address, SIS5595_EXTENT);
+	release_region(res->start, SIS5595_EXTENT);
 exit:
 	return err;
 }
 
-static int sis5595_detach_client(struct i2c_client *client)
+static int __devexit sis5595_remove(struct platform_device *pdev)
 {
-	struct sis5595_data *data = i2c_get_clientdata(client);
-	int err;
+	struct sis5595_data *data = platform_get_drvdata(pdev);
 
 	hwmon_device_unregister(data->class_dev);
-	sysfs_remove_group(&client->dev.kobj, &sis5595_group);
-	sysfs_remove_group(&client->dev.kobj, &sis5595_group_opt);
+	sysfs_remove_group(&pdev->dev.kobj, &sis5595_group);
+	sysfs_remove_group(&pdev->dev.kobj, &sis5595_group_opt);
 
-	if ((err = i2c_detach_client(client)))
-		return err;
-
-	release_region(client->addr, SIS5595_EXTENT);
-
+	release_region(data->addr, SIS5595_EXTENT);
+	platform_set_drvdata(pdev, NULL);
 	kfree(data);
 
 	return 0;
@@ -671,41 +593,37 @@
 
 
 /* ISA access must be locked explicitly. */
-static int sis5595_read_value(struct i2c_client *client, u8 reg)
+static int sis5595_read_value(struct sis5595_data *data, u8 reg)
 {
 	int res;
 
-	struct sis5595_data *data = i2c_get_clientdata(client);
 	mutex_lock(&data->lock);
-	outb_p(reg, client->addr + SIS5595_ADDR_REG_OFFSET);
-	res = inb_p(client->addr + SIS5595_DATA_REG_OFFSET);
+	outb_p(reg, data->addr + SIS5595_ADDR_REG_OFFSET);
+	res = inb_p(data->addr + SIS5595_DATA_REG_OFFSET);
 	mutex_unlock(&data->lock);
 	return res;
 }
 
-static int sis5595_write_value(struct i2c_client *client, u8 reg, u8 value)
+static void sis5595_write_value(struct sis5595_data *data, u8 reg, u8 value)
 {
-	struct sis5595_data *data = i2c_get_clientdata(client);
 	mutex_lock(&data->lock);
-	outb_p(reg, client->addr + SIS5595_ADDR_REG_OFFSET);
-	outb_p(value, client->addr + SIS5595_DATA_REG_OFFSET);
+	outb_p(reg, data->addr + SIS5595_ADDR_REG_OFFSET);
+	outb_p(value, data->addr + SIS5595_DATA_REG_OFFSET);
 	mutex_unlock(&data->lock);
-	return 0;
 }
 
 /* Called when we have found a new SIS5595. */
-static void sis5595_init_client(struct i2c_client *client)
+static void __devinit sis5595_init_device(struct sis5595_data *data)
 {
-	u8 config = sis5595_read_value(client, SIS5595_REG_CONFIG);
+	u8 config = sis5595_read_value(data, SIS5595_REG_CONFIG);
 	if (!(config & 0x01))
-		sis5595_write_value(client, SIS5595_REG_CONFIG,
+		sis5595_write_value(data, SIS5595_REG_CONFIG,
 				(config & 0xf7) | 0x01);
 }
 
 static struct sis5595_data *sis5595_update_device(struct device *dev)
 {
-	struct i2c_client *client = to_i2c_client(dev);
-	struct sis5595_data *data = i2c_get_clientdata(client);
+	struct sis5595_data *data = dev_get_drvdata(dev);
 	int i;
 
 	mutex_lock(&data->update_lock);
@@ -715,35 +633,35 @@
 
 		for (i = 0; i <= data->maxins; i++) {
 			data->in[i] =
-			    sis5595_read_value(client, SIS5595_REG_IN(i));
+			    sis5595_read_value(data, SIS5595_REG_IN(i));
 			data->in_min[i] =
-			    sis5595_read_value(client,
+			    sis5595_read_value(data,
 					       SIS5595_REG_IN_MIN(i));
 			data->in_max[i] =
-			    sis5595_read_value(client,
+			    sis5595_read_value(data,
 					       SIS5595_REG_IN_MAX(i));
 		}
 		for (i = 0; i < 2; i++) {
 			data->fan[i] =
-			    sis5595_read_value(client, SIS5595_REG_FAN(i));
+			    sis5595_read_value(data, SIS5595_REG_FAN(i));
 			data->fan_min[i] =
-			    sis5595_read_value(client,
+			    sis5595_read_value(data,
 					       SIS5595_REG_FAN_MIN(i));
 		}
 		if (data->maxins == 3) {
 			data->temp =
-			    sis5595_read_value(client, SIS5595_REG_TEMP);
+			    sis5595_read_value(data, SIS5595_REG_TEMP);
 			data->temp_over =
-			    sis5595_read_value(client, SIS5595_REG_TEMP_OVER);
+			    sis5595_read_value(data, SIS5595_REG_TEMP_OVER);
 			data->temp_hyst =
-			    sis5595_read_value(client, SIS5595_REG_TEMP_HYST);
+			    sis5595_read_value(data, SIS5595_REG_TEMP_HYST);
 		}
-		i = sis5595_read_value(client, SIS5595_REG_FANDIV);
+		i = sis5595_read_value(data, SIS5595_REG_FANDIV);
 		data->fan_div[0] = (i >> 4) & 0x03;
 		data->fan_div[1] = i >> 6;
 		data->alarms =
-		    sis5595_read_value(client, SIS5595_REG_ALARM1) |
-		    (sis5595_read_value(client, SIS5595_REG_ALARM2) << 8);
+		    sis5595_read_value(data, SIS5595_REG_ALARM1) |
+		    (sis5595_read_value(data, SIS5595_REG_ALARM2) << 8);
 		data->last_updated = jiffies;
 		data->valid = 1;
 	}
@@ -774,10 +692,50 @@
 	PCI_DEVICE_ID_SI_5598,
 	0 };
 
+static int __devinit sis5595_device_add(unsigned short address)
+{
+	struct resource res = {
+		.start	= address,
+		.end	= address + SIS5595_EXTENT - 1,
+		.name	= "sis5595",
+		.flags	= IORESOURCE_IO,
+	};
+	int err;
+
+	pdev = platform_device_alloc("sis5595", address);
+	if (!pdev) {
+		err = -ENOMEM;
+		printk(KERN_ERR "sis5595: Device allocation failed\n");
+		goto exit;
+	}
+
+	err = platform_device_add_resources(pdev, &res, 1);
+	if (err) {
+		printk(KERN_ERR "sis5595: Device resource addition failed "
+		       "(%d)\n", err);
+		goto exit_device_put;
+	}
+
+	err = platform_device_add(pdev);
+	if (err) {
+		printk(KERN_ERR "sis5595: Device addition failed (%d)\n",
+		       err);
+		goto exit_device_put;
+	}
+
+	return 0;
+
+exit_device_put:
+	platform_device_put(pdev);
+exit:
+	return err;
+}
+
 static int __devinit sis5595_pci_probe(struct pci_dev *dev,
 				       const struct pci_device_id *id)
 {
-	u16 val;
+	u16 address;
+	u8 enable;
 	int *i;
 
 	for (i = blacklist; *i != 0; i++) {
@@ -790,27 +748,68 @@
 		}
 	}
 	
+	force_addr &= ~(SIS5595_EXTENT - 1);
+	if (force_addr) {
+		dev_warn(&dev->dev, "Forcing ISA address 0x%x\n", force_addr);
+		pci_write_config_word(dev, SIS5595_BASE_REG, force_addr);
+	}
+
 	if (PCIBIOS_SUCCESSFUL !=
-	    pci_read_config_word(dev, SIS5595_BASE_REG, &val))
+	    pci_read_config_word(dev, SIS5595_BASE_REG, &address)) {
+		dev_err(&dev->dev, "Failed to read ISA address\n");
 		return -ENODEV;
+	}
 	
-	address = val & ~(SIS5595_EXTENT - 1);
-	if (address == 0 && force_addr == 0) {
+	address &= ~(SIS5595_EXTENT - 1);
+	if (!address) {
 		dev_err(&dev->dev, "Base address not set - upgrade BIOS or use force_addr=0xaddr\n");
 		return -ENODEV;
 	}
+	if (force_addr && address != force_addr) {
+		/* doesn't work for some chips? */
+		dev_err(&dev->dev, "Failed to force ISA address\n");
+		return -ENODEV;
+	}
+
+	if (PCIBIOS_SUCCESSFUL !=
+	    pci_read_config_byte(dev, SIS5595_ENABLE_REG, &enable)) {
+		dev_err(&dev->dev, "Failed to read enable register\n");
+		return -ENODEV;
+	}
+	if (!(enable & 0x80)) {
+		if ((PCIBIOS_SUCCESSFUL !=
+		     pci_write_config_byte(dev, SIS5595_ENABLE_REG,
+					   enable | 0x80))
+		 || (PCIBIOS_SUCCESSFUL !=
+		     pci_read_config_byte(dev, SIS5595_ENABLE_REG, &enable))
+		 || (!(enable & 0x80))) {
+			/* doesn't work for some chips! */
+			dev_err(&dev->dev, "Failed to enable HWM device\n");
+			return -ENODEV;
+		}
+	}
+
+	if (platform_driver_register(&sis5595_driver)) {
+		dev_dbg(&dev->dev, "Failed to register sis5595 driver\n");
+		goto exit;
+	}
 
 	s_bridge = pci_dev_get(dev);
-	if (i2c_isa_add_driver(&sis5595_driver)) {
-		pci_dev_put(s_bridge);
-		s_bridge = NULL;
-	}
+	/* Sets global pdev as a side effect */
+	if (sis5595_device_add(address))
+		goto exit_unregister;
 
 	/* Always return failure here.  This is to allow other drivers to bind
 	 * to this pci device.  We don't really want to have control over the
 	 * pci device, we only wanted to read as few register values from it.
 	 */
 	return -ENODEV;
+
+exit_unregister:
+	pci_dev_put(dev);
+	platform_driver_unregister(&sis5595_driver);
+exit:
+	return -ENODEV;
 }
 
 static struct pci_driver sis5595_pci_driver = {
@@ -828,7 +827,8 @@
 {
 	pci_unregister_driver(&sis5595_pci_driver);
 	if (s_bridge != NULL) {
-		i2c_isa_del_driver(&sis5595_driver);
+		platform_device_unregister(pdev);
+		platform_driver_unregister(&sis5595_driver);
 		pci_dev_put(s_bridge);
 		s_bridge = NULL;
 	}
diff --git a/drivers/hwmon/smsc47b397.c b/drivers/hwmon/smsc47b397.c
index 943abbd..45266b3 100644
--- a/drivers/hwmon/smsc47b397.c
+++ b/drivers/hwmon/smsc47b397.c
@@ -174,6 +174,8 @@
    REG: count of 90kHz pulses / revolution */
 static int fan_from_reg(u16 reg)
 {
+	if (reg == 0 || reg == 0xffff)
+		return 0;
 	return 90000 * 60 / reg;
 }
 
@@ -333,7 +335,7 @@
 	superio_enter();
 	id = superio_inb(SUPERIO_REG_DEVID);
 
-	if ((id != 0x6f) && (id != 0x81)) {
+	if ((id != 0x6f) && (id != 0x81) && (id != 0x85)) {
 		superio_exit();
 		return -ENODEV;
 	}
@@ -346,7 +348,8 @@
 
 	printk(KERN_INFO DRVNAME ": found SMSC %s "
 		"(base address 0x%04x, revision %u)\n",
-		id == 0x81 ? "SCH5307-NS" : "LPC47B397-NC", *addr, rev);
+		id == 0x81 ? "SCH5307-NS" : id == 0x85 ? "SCH5317" :
+	       "LPC47B397-NC", *addr, rev);
 
 	superio_exit();
 	return 0;
diff --git a/drivers/hwmon/smsc47m1.c b/drivers/hwmon/smsc47m1.c
index 1e21c8c..1de2f2b 100644
--- a/drivers/hwmon/smsc47m1.c
+++ b/drivers/hwmon/smsc47m1.c
@@ -597,6 +597,7 @@
 error_remove_files:
 	sysfs_remove_group(&dev->kobj, &smsc47m1_group);
 error_free:
+	platform_set_drvdata(pdev, NULL);
 	kfree(data);
 error_release:
 	release_region(res->start, SMSC_EXTENT);
@@ -608,12 +609,12 @@
 	struct smsc47m1_data *data = platform_get_drvdata(pdev);
 	struct resource *res;
 
-	platform_set_drvdata(pdev, NULL);
 	hwmon_device_unregister(data->class_dev);
 	sysfs_remove_group(&pdev->dev.kobj, &smsc47m1_group);
 
 	res = platform_get_resource(pdev, IORESOURCE_IO, 0);
 	release_region(res->start, SMSC_EXTENT);
+	platform_set_drvdata(pdev, NULL);
 	kfree(data);
 
 	return 0;
@@ -693,15 +694,12 @@
 		goto exit_device_put;
 	}
 
-	pdev->dev.platform_data = kmalloc(sizeof(struct smsc47m1_sio_data),
-					  GFP_KERNEL);
-	if (!pdev->dev.platform_data) {
-		err = -ENOMEM;
+	err = platform_device_add_data(pdev, sio_data,
+				       sizeof(struct smsc47m1_sio_data));
+	if (err) {
 		printk(KERN_ERR DRVNAME ": Platform data allocation failed\n");
 		goto exit_device_put;
 	}
-	memcpy(pdev->dev.platform_data, sio_data,
-	       sizeof(struct smsc47m1_sio_data));
 
 	err = platform_device_add(pdev);
 	if (err) {
diff --git a/drivers/hwmon/smsc47m192.c b/drivers/hwmon/smsc47m192.c
index a012f39..d3a3ba0 100644
--- a/drivers/hwmon/smsc47m192.c
+++ b/drivers/hwmon/smsc47m192.c
@@ -31,6 +31,7 @@
 #include <linux/hwmon-vid.h>
 #include <linux/err.h>
 #include <linux/sysfs.h>
+#include <linux/mutex.h>
 
 /* Addresses to scan */
 static unsigned short normal_i2c[] = { 0x2c, 0x2d, I2C_CLIENT_END };
@@ -97,7 +98,7 @@
 struct smsc47m192_data {
 	struct i2c_client client;
 	struct class_device *class_dev;
-	struct semaphore update_lock;
+	struct mutex update_lock;
 	char valid;		/* !=0 if following fields are valid */
 	unsigned long last_updated;	/* In jiffies */
 
@@ -164,11 +165,11 @@
 	struct smsc47m192_data *data = i2c_get_clientdata(client);
 	unsigned long val = simple_strtoul(buf, NULL, 10);
 
-	down(&data->update_lock);
+	mutex_lock(&data->update_lock);
 	data->in_min[nr] = IN_TO_REG(val, nr);
 	i2c_smbus_write_byte_data(client, SMSC47M192_REG_IN_MIN(nr),
 							data->in_min[nr]);
-	up(&data->update_lock);
+	mutex_unlock(&data->update_lock);
 	return count;
 }
 
@@ -181,11 +182,11 @@
 	struct smsc47m192_data *data = i2c_get_clientdata(client);
 	unsigned long val = simple_strtoul(buf, NULL, 10);
 
-	down(&data->update_lock);
+	mutex_lock(&data->update_lock);
 	data->in_max[nr] = IN_TO_REG(val, nr);
 	i2c_smbus_write_byte_data(client, SMSC47M192_REG_IN_MAX(nr),
 							data->in_max[nr]);
-	up(&data->update_lock);
+	mutex_unlock(&data->update_lock);
 	return count;
 }
 
@@ -243,11 +244,11 @@
 	struct smsc47m192_data *data = i2c_get_clientdata(client);
 	long val = simple_strtol(buf, NULL, 10);
 
-	down(&data->update_lock);
+	mutex_lock(&data->update_lock);
 	data->temp_min[nr] = TEMP_TO_REG(val);
 	i2c_smbus_write_byte_data(client, SMSC47M192_REG_TEMP_MIN[nr],
 						data->temp_min[nr]);
-	up(&data->update_lock);
+	mutex_unlock(&data->update_lock);
 	return count;
 }
 
@@ -260,11 +261,11 @@
 	struct smsc47m192_data *data = i2c_get_clientdata(client);
 	long val = simple_strtol(buf, NULL, 10);
 
-	down(&data->update_lock);
+	mutex_lock(&data->update_lock);
 	data->temp_max[nr] = TEMP_TO_REG(val);
 	i2c_smbus_write_byte_data(client, SMSC47M192_REG_TEMP_MAX[nr],
 						data->temp_max[nr]);
-	up(&data->update_lock);
+	mutex_unlock(&data->update_lock);
 	return count;
 }
 
@@ -287,7 +288,7 @@
 	u8 sfr = i2c_smbus_read_byte_data(client, SMSC47M192_REG_SFR);
 	long val = simple_strtol(buf, NULL, 10);
 
-	down(&data->update_lock);
+	mutex_lock(&data->update_lock);
 	data->temp_offset[nr] = TEMP_TO_REG(val);
 	if (nr>1)
 		i2c_smbus_write_byte_data(client,
@@ -303,7 +304,7 @@
 	} else if ((sfr & 0x10) == (nr==0 ? 0x10 : 0))
 		i2c_smbus_write_byte_data(client,
 					SMSC47M192_REG_TEMP_OFFSET(nr), 0);
-	up(&data->update_lock);
+	mutex_unlock(&data->update_lock);
 	return count;
 }
 
@@ -360,8 +361,8 @@
 static SENSOR_DEVICE_ATTR(temp1_alarm, S_IRUGO, show_alarm, NULL, 0x0010);
 static SENSOR_DEVICE_ATTR(temp2_alarm, S_IRUGO, show_alarm, NULL, 0x0020);
 static SENSOR_DEVICE_ATTR(temp3_alarm, S_IRUGO, show_alarm, NULL, 0x0040);
-static SENSOR_DEVICE_ATTR(temp2_input_fault, S_IRUGO, show_alarm, NULL, 0x4000);
-static SENSOR_DEVICE_ATTR(temp3_input_fault, S_IRUGO, show_alarm, NULL, 0x8000);
+static SENSOR_DEVICE_ATTR(temp2_fault, S_IRUGO, show_alarm, NULL, 0x4000);
+static SENSOR_DEVICE_ATTR(temp3_fault, S_IRUGO, show_alarm, NULL, 0x8000);
 static SENSOR_DEVICE_ATTR(in0_alarm, S_IRUGO, show_alarm, NULL, 0x0001);
 static SENSOR_DEVICE_ATTR(in1_alarm, S_IRUGO, show_alarm, NULL, 0x0002);
 static SENSOR_DEVICE_ATTR(in2_alarm, S_IRUGO, show_alarm, NULL, 0x0004);
@@ -411,13 +412,13 @@
 	&sensor_dev_attr_temp2_min.dev_attr.attr,
 	&sensor_dev_attr_temp2_offset.dev_attr.attr,
 	&sensor_dev_attr_temp2_alarm.dev_attr.attr,
-	&sensor_dev_attr_temp2_input_fault.dev_attr.attr,
+	&sensor_dev_attr_temp2_fault.dev_attr.attr,
 	&sensor_dev_attr_temp3_input.dev_attr.attr,
 	&sensor_dev_attr_temp3_max.dev_attr.attr,
 	&sensor_dev_attr_temp3_min.dev_attr.attr,
 	&sensor_dev_attr_temp3_offset.dev_attr.attr,
 	&sensor_dev_attr_temp3_alarm.dev_attr.attr,
-	&sensor_dev_attr_temp3_input_fault.dev_attr.attr,
+	&sensor_dev_attr_temp3_fault.dev_attr.attr,
 
 	&dev_attr_cpu0_vid.attr,
 	&dev_attr_vrm.attr,
@@ -531,7 +532,7 @@
 	/* Fill in the remaining client fields and put into the global list */
 	strlcpy(client->name, "smsc47m192", I2C_NAME_SIZE);
 	data->vrm = vid_which_vrm();
-	init_MUTEX(&data->update_lock);
+	mutex_init(&data->update_lock);
 
 	/* Tell the I2C layer a new client has arrived */
 	if ((err = i2c_attach_client(client)))
@@ -594,7 +595,7 @@
 	struct smsc47m192_data *data = i2c_get_clientdata(client);
 	int i, config;
 
-	down(&data->update_lock);
+	mutex_lock(&data->update_lock);
 
 	if (time_after(jiffies, data->last_updated + HZ + HZ / 2)
 	 || !data->valid) {
@@ -645,7 +646,7 @@
 		data->valid = 1;
 	}
 
-	up(&data->update_lock);
+	mutex_unlock(&data->update_lock);
 
 	return data;
 }
diff --git a/drivers/hwmon/via686a.c b/drivers/hwmon/via686a.c
index 9a440c8..24a6851 100644
--- a/drivers/hwmon/via686a.c
+++ b/drivers/hwmon/via686a.c
@@ -34,9 +34,9 @@
 #include <linux/slab.h>
 #include <linux/pci.h>
 #include <linux/jiffies.h>
-#include <linux/i2c.h>
-#include <linux/i2c-isa.h>
+#include <linux/platform_device.h>
 #include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
 #include <linux/err.h>
 #include <linux/init.h>
 #include <linux/mutex.h>
@@ -51,10 +51,7 @@
 MODULE_PARM_DESC(force_addr,
 		 "Initialize the base address of the sensors");
 
-/* Device address
-   Note that we can't determine the ISA address until we have initialized
-   our module */
-static unsigned short address;
+static struct platform_device *pdev;
 
 /*
    The Via 686a southbridge has a LM78-like chip integrated on the same IC.
@@ -295,7 +292,8 @@
 /* For each registered chip, we need to keep some data in memory.
    The structure is dynamically allocated. */
 struct via686a_data {
-	struct i2c_client client;
+	unsigned short addr;
+	const char *name;
 	struct class_device *class_dev;
 	struct mutex update_lock;
 	char valid;		/* !=0 if following fields are valid */
@@ -315,98 +313,85 @@
 
 static struct pci_dev *s_bridge;	/* pointer to the (only) via686a */
 
-static int via686a_detect(struct i2c_adapter *adapter);
-static int via686a_detach_client(struct i2c_client *client);
+static int via686a_probe(struct platform_device *pdev);
+static int via686a_remove(struct platform_device *pdev);
 
-static inline int via686a_read_value(struct i2c_client *client, u8 reg)
+static inline int via686a_read_value(struct via686a_data *data, u8 reg)
 {
-	return (inb_p(client->addr + reg));
+	return inb_p(data->addr + reg);
 }
 
-static inline void via686a_write_value(struct i2c_client *client, u8 reg,
+static inline void via686a_write_value(struct via686a_data *data, u8 reg,
 				       u8 value)
 {
-	outb_p(value, client->addr + reg);
+	outb_p(value, data->addr + reg);
 }
 
 static struct via686a_data *via686a_update_device(struct device *dev);
-static void via686a_init_client(struct i2c_client *client);
+static void via686a_init_device(struct via686a_data *data);
 
 /* following are the sysfs callback functions */
 
 /* 7 voltage sensors */
-static ssize_t show_in(struct device *dev, char *buf, int nr) {
+static ssize_t show_in(struct device *dev, struct device_attribute *da,
+		char *buf) {
 	struct via686a_data *data = via686a_update_device(dev);
+	struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
+	int nr = attr->index;
 	return sprintf(buf, "%ld\n", IN_FROM_REG(data->in[nr], nr));
 }
 
-static ssize_t show_in_min(struct device *dev, char *buf, int nr) {
+static ssize_t show_in_min(struct device *dev, struct device_attribute *da,
+		char *buf) {
 	struct via686a_data *data = via686a_update_device(dev);
+	struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
+	int nr = attr->index;
 	return sprintf(buf, "%ld\n", IN_FROM_REG(data->in_min[nr], nr));
 }
 
-static ssize_t show_in_max(struct device *dev, char *buf, int nr) {
+static ssize_t show_in_max(struct device *dev, struct device_attribute *da,
+		char *buf) {
 	struct via686a_data *data = via686a_update_device(dev);
+	struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
+	int nr = attr->index;
 	return sprintf(buf, "%ld\n", IN_FROM_REG(data->in_max[nr], nr));
 }
 
-static ssize_t set_in_min(struct device *dev, const char *buf,
-		size_t count, int nr) {
-	struct i2c_client *client = to_i2c_client(dev);
-	struct via686a_data *data = i2c_get_clientdata(client);
+static ssize_t set_in_min(struct device *dev, struct device_attribute *da,
+		const char *buf, size_t count) {
+	struct via686a_data *data = dev_get_drvdata(dev);
+	struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
+	int nr = attr->index;
 	unsigned long val = simple_strtoul(buf, NULL, 10);
 
 	mutex_lock(&data->update_lock);
 	data->in_min[nr] = IN_TO_REG(val, nr);
-	via686a_write_value(client, VIA686A_REG_IN_MIN(nr),
+	via686a_write_value(data, VIA686A_REG_IN_MIN(nr),
 			data->in_min[nr]);
 	mutex_unlock(&data->update_lock);
 	return count;
 }
-static ssize_t set_in_max(struct device *dev, const char *buf,
-		size_t count, int nr) {
-	struct i2c_client *client = to_i2c_client(dev);
-	struct via686a_data *data = i2c_get_clientdata(client);
+static ssize_t set_in_max(struct device *dev, struct device_attribute *da,
+		const char *buf, size_t count) {
+	struct via686a_data *data = dev_get_drvdata(dev);
+	struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
+	int nr = attr->index;
 	unsigned long val = simple_strtoul(buf, NULL, 10);
 
 	mutex_lock(&data->update_lock);
 	data->in_max[nr] = IN_TO_REG(val, nr);
-	via686a_write_value(client, VIA686A_REG_IN_MAX(nr),
+	via686a_write_value(data, VIA686A_REG_IN_MAX(nr),
 			data->in_max[nr]);
 	mutex_unlock(&data->update_lock);
 	return count;
 }
 #define show_in_offset(offset)					\
-static ssize_t 							\
-	show_in##offset (struct device *dev, struct device_attribute *attr, char *buf)		\
-{								\
-	return show_in(dev, buf, offset);			\
-}								\
-static ssize_t 							\
-	show_in##offset##_min (struct device *dev, struct device_attribute *attr, char *buf)	\
-{								\
-	return show_in_min(dev, buf, offset);		\
-}								\
-static ssize_t 							\
-	show_in##offset##_max (struct device *dev, struct device_attribute *attr, char *buf)	\
-{								\
-	return show_in_max(dev, buf, offset);		\
-}								\
-static ssize_t set_in##offset##_min (struct device *dev, struct device_attribute *attr, 	\
-		const char *buf, size_t count) 			\
-{								\
-	return set_in_min(dev, buf, count, offset);		\
-}								\
-static ssize_t set_in##offset##_max (struct device *dev, struct device_attribute *attr,	\
-			const char *buf, size_t count)		\
-{								\
-	return set_in_max(dev, buf, count, offset);		\
-}								\
-static DEVICE_ATTR(in##offset##_input, S_IRUGO, show_in##offset, NULL);\
-static DEVICE_ATTR(in##offset##_min, S_IRUGO | S_IWUSR, 	\
-		show_in##offset##_min, set_in##offset##_min);	\
-static DEVICE_ATTR(in##offset##_max, S_IRUGO | S_IWUSR, 	\
-		show_in##offset##_max, set_in##offset##_max);
+static SENSOR_DEVICE_ATTR(in##offset##_input, S_IRUGO,		\
+		show_in, NULL, offset);				\
+static SENSOR_DEVICE_ATTR(in##offset##_min, S_IRUGO | S_IWUSR,	\
+		show_in_min, set_in_min, offset);		\
+static SENSOR_DEVICE_ATTR(in##offset##_max, S_IRUGO | S_IWUSR,	\
+		show_in_max, set_in_max, offset);
 
 show_in_offset(0);
 show_in_offset(1);
@@ -415,150 +400,128 @@
 show_in_offset(4);
 
 /* 3 temperatures */
-static ssize_t show_temp(struct device *dev, char *buf, int nr) {
+static ssize_t show_temp(struct device *dev, struct device_attribute *da,
+		char *buf) {
 	struct via686a_data *data = via686a_update_device(dev);
+	struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
+	int nr = attr->index;
 	return sprintf(buf, "%ld\n", TEMP_FROM_REG10(data->temp[nr]));
 }
-static ssize_t show_temp_over(struct device *dev, char *buf, int nr) {
+static ssize_t show_temp_over(struct device *dev, struct device_attribute *da,
+		char *buf) {
 	struct via686a_data *data = via686a_update_device(dev);
+	struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
+	int nr = attr->index;
 	return sprintf(buf, "%ld\n", TEMP_FROM_REG(data->temp_over[nr]));
 }
-static ssize_t show_temp_hyst(struct device *dev, char *buf, int nr) {
+static ssize_t show_temp_hyst(struct device *dev, struct device_attribute *da,
+		char *buf) {
 	struct via686a_data *data = via686a_update_device(dev);
+	struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
+	int nr = attr->index;
 	return sprintf(buf, "%ld\n", TEMP_FROM_REG(data->temp_hyst[nr]));
 }
-static ssize_t set_temp_over(struct device *dev, const char *buf,
-		size_t count, int nr) {
-	struct i2c_client *client = to_i2c_client(dev);
-	struct via686a_data *data = i2c_get_clientdata(client);
+static ssize_t set_temp_over(struct device *dev, struct device_attribute *da,
+		const char *buf, size_t count) {
+	struct via686a_data *data = dev_get_drvdata(dev);
+	struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
+	int nr = attr->index;
 	int val = simple_strtol(buf, NULL, 10);
 
 	mutex_lock(&data->update_lock);
 	data->temp_over[nr] = TEMP_TO_REG(val);
-	via686a_write_value(client, VIA686A_REG_TEMP_OVER[nr],
+	via686a_write_value(data, VIA686A_REG_TEMP_OVER[nr],
 			    data->temp_over[nr]);
 	mutex_unlock(&data->update_lock);
 	return count;
 }
-static ssize_t set_temp_hyst(struct device *dev, const char *buf,
-		size_t count, int nr) {
-	struct i2c_client *client = to_i2c_client(dev);
-	struct via686a_data *data = i2c_get_clientdata(client);
+static ssize_t set_temp_hyst(struct device *dev, struct device_attribute *da,
+		const char *buf, size_t count) {
+	struct via686a_data *data = dev_get_drvdata(dev);
+	struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
+	int nr = attr->index;
 	int val = simple_strtol(buf, NULL, 10);
 
 	mutex_lock(&data->update_lock);
 	data->temp_hyst[nr] = TEMP_TO_REG(val);
-	via686a_write_value(client, VIA686A_REG_TEMP_HYST[nr],
+	via686a_write_value(data, VIA686A_REG_TEMP_HYST[nr],
 			    data->temp_hyst[nr]);
 	mutex_unlock(&data->update_lock);
 	return count;
 }
 #define show_temp_offset(offset)					\
-static ssize_t show_temp_##offset (struct device *dev, struct device_attribute *attr, char *buf)	\
-{									\
-	return show_temp(dev, buf, offset - 1);				\
-}									\
-static ssize_t								\
-show_temp_##offset##_over (struct device *dev, struct device_attribute *attr, char *buf)		\
-{									\
-	return show_temp_over(dev, buf, offset - 1);			\
-}									\
-static ssize_t								\
-show_temp_##offset##_hyst (struct device *dev, struct device_attribute *attr, char *buf)		\
-{									\
-	return show_temp_hyst(dev, buf, offset - 1);			\
-}									\
-static ssize_t set_temp_##offset##_over (struct device *dev, struct device_attribute *attr, 		\
-		const char *buf, size_t count) 				\
-{									\
-	return set_temp_over(dev, buf, count, offset - 1);		\
-}									\
-static ssize_t set_temp_##offset##_hyst (struct device *dev, struct device_attribute *attr, 		\
-		const char *buf, size_t count) 				\
-{									\
-	return set_temp_hyst(dev, buf, count, offset - 1);		\
-}									\
-static DEVICE_ATTR(temp##offset##_input, S_IRUGO, show_temp_##offset, NULL);\
-static DEVICE_ATTR(temp##offset##_max, S_IRUGO | S_IWUSR, 		\
-		show_temp_##offset##_over, set_temp_##offset##_over);	\
-static DEVICE_ATTR(temp##offset##_max_hyst, S_IRUGO | S_IWUSR, 		\
-		show_temp_##offset##_hyst, set_temp_##offset##_hyst);
+static SENSOR_DEVICE_ATTR(temp##offset##_input, S_IRUGO,		\
+		show_temp, NULL, offset - 1);				\
+static SENSOR_DEVICE_ATTR(temp##offset##_max, S_IRUGO | S_IWUSR,	\
+		show_temp_over, set_temp_over, offset - 1);		\
+static SENSOR_DEVICE_ATTR(temp##offset##_max_hyst, S_IRUGO | S_IWUSR,	\
+		show_temp_hyst, set_temp_hyst, offset - 1);
 
 show_temp_offset(1);
 show_temp_offset(2);
 show_temp_offset(3);
 
 /* 2 Fans */
-static ssize_t show_fan(struct device *dev, char *buf, int nr) {
+static ssize_t show_fan(struct device *dev, struct device_attribute *da,
+		char *buf) {
 	struct via686a_data *data = via686a_update_device(dev);
+	struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
+	int nr = attr->index;
 	return sprintf(buf, "%d\n", FAN_FROM_REG(data->fan[nr],
 				DIV_FROM_REG(data->fan_div[nr])) );
 }
-static ssize_t show_fan_min(struct device *dev, char *buf, int nr) {
+static ssize_t show_fan_min(struct device *dev, struct device_attribute *da,
+		char *buf) {
 	struct via686a_data *data = via686a_update_device(dev);
+	struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
+	int nr = attr->index;
 	return sprintf(buf, "%d\n",
 		FAN_FROM_REG(data->fan_min[nr], DIV_FROM_REG(data->fan_div[nr])) );
 }
-static ssize_t show_fan_div(struct device *dev, char *buf, int nr) {
+static ssize_t show_fan_div(struct device *dev, struct device_attribute *da,
+		char *buf) {
 	struct via686a_data *data = via686a_update_device(dev);
+	struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
+	int nr = attr->index;
 	return sprintf(buf, "%d\n", DIV_FROM_REG(data->fan_div[nr]) );
 }
-static ssize_t set_fan_min(struct device *dev, const char *buf,
-		size_t count, int nr) {
-	struct i2c_client *client = to_i2c_client(dev);
-	struct via686a_data *data = i2c_get_clientdata(client);
+static ssize_t set_fan_min(struct device *dev, struct device_attribute *da,
+		const char *buf, size_t count) {
+	struct via686a_data *data = dev_get_drvdata(dev);
+	struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
+	int nr = attr->index;
 	int val = simple_strtol(buf, NULL, 10);
 
 	mutex_lock(&data->update_lock);
 	data->fan_min[nr] = FAN_TO_REG(val, DIV_FROM_REG(data->fan_div[nr]));
-	via686a_write_value(client, VIA686A_REG_FAN_MIN(nr+1), data->fan_min[nr]);
+	via686a_write_value(data, VIA686A_REG_FAN_MIN(nr+1), data->fan_min[nr]);
 	mutex_unlock(&data->update_lock);
 	return count;
 }
-static ssize_t set_fan_div(struct device *dev, const char *buf,
-		size_t count, int nr) {
-	struct i2c_client *client = to_i2c_client(dev);
-	struct via686a_data *data = i2c_get_clientdata(client);
+static ssize_t set_fan_div(struct device *dev, struct device_attribute *da,
+		const char *buf, size_t count) {
+	struct via686a_data *data = dev_get_drvdata(dev);
+	struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
+	int nr = attr->index;
 	int val = simple_strtol(buf, NULL, 10);
 	int old;
 
 	mutex_lock(&data->update_lock);
-	old = via686a_read_value(client, VIA686A_REG_FANDIV);
+	old = via686a_read_value(data, VIA686A_REG_FANDIV);
 	data->fan_div[nr] = DIV_TO_REG(val);
 	old = (old & 0x0f) | (data->fan_div[1] << 6) | (data->fan_div[0] << 4);
-	via686a_write_value(client, VIA686A_REG_FANDIV, old);
+	via686a_write_value(data, VIA686A_REG_FANDIV, old);
 	mutex_unlock(&data->update_lock);
 	return count;
 }
 
 #define show_fan_offset(offset)						\
-static ssize_t show_fan_##offset (struct device *dev, struct device_attribute *attr, char *buf)	\
-{									\
-	return show_fan(dev, buf, offset - 1);				\
-}									\
-static ssize_t show_fan_##offset##_min (struct device *dev, struct device_attribute *attr, char *buf)	\
-{									\
-	return show_fan_min(dev, buf, offset - 1);			\
-}									\
-static ssize_t show_fan_##offset##_div (struct device *dev, struct device_attribute *attr, char *buf)	\
-{									\
-	return show_fan_div(dev, buf, offset - 1);			\
-}									\
-static ssize_t set_fan_##offset##_min (struct device *dev, struct device_attribute *attr, 		\
-	const char *buf, size_t count) 					\
-{									\
-	return set_fan_min(dev, buf, count, offset - 1);		\
-}									\
-static ssize_t set_fan_##offset##_div (struct device *dev, struct device_attribute *attr, 		\
-		const char *buf, size_t count) 				\
-{									\
-	return set_fan_div(dev, buf, count, offset - 1);		\
-}									\
-static DEVICE_ATTR(fan##offset##_input, S_IRUGO, show_fan_##offset, NULL);\
-static DEVICE_ATTR(fan##offset##_min, S_IRUGO | S_IWUSR, 		\
-		show_fan_##offset##_min, set_fan_##offset##_min);	\
-static DEVICE_ATTR(fan##offset##_div, S_IRUGO | S_IWUSR, 		\
-		show_fan_##offset##_div, set_fan_##offset##_div);
+static SENSOR_DEVICE_ATTR(fan##offset##_input, S_IRUGO,			\
+		show_fan, NULL, offset - 1);				\
+static SENSOR_DEVICE_ATTR(fan##offset##_min, S_IRUGO | S_IWUSR,		\
+		show_fan_min, set_fan_min, offset - 1);			\
+static SENSOR_DEVICE_ATTR(fan##offset##_div, S_IRUGO | S_IWUSR,		\
+		show_fan_div, set_fan_div, offset - 1);
 
 show_fan_offset(1);
 show_fan_offset(2);
@@ -570,41 +533,50 @@
 }
 static DEVICE_ATTR(alarms, S_IRUGO, show_alarms, NULL);
 
+static ssize_t show_name(struct device *dev, struct device_attribute
+			 *devattr, char *buf)
+{
+	struct via686a_data *data = dev_get_drvdata(dev);
+	return sprintf(buf, "%s\n", data->name);
+}
+static DEVICE_ATTR(name, S_IRUGO, show_name, NULL);
+
 static struct attribute *via686a_attributes[] = {
-	&dev_attr_in0_input.attr,
-	&dev_attr_in1_input.attr,
-	&dev_attr_in2_input.attr,
-	&dev_attr_in3_input.attr,
-	&dev_attr_in4_input.attr,
-	&dev_attr_in0_min.attr,
-	&dev_attr_in1_min.attr,
-	&dev_attr_in2_min.attr,
-	&dev_attr_in3_min.attr,
-	&dev_attr_in4_min.attr,
-	&dev_attr_in0_max.attr,
-	&dev_attr_in1_max.attr,
-	&dev_attr_in2_max.attr,
-	&dev_attr_in3_max.attr,
-	&dev_attr_in4_max.attr,
+	&sensor_dev_attr_in0_input.dev_attr.attr,
+	&sensor_dev_attr_in1_input.dev_attr.attr,
+	&sensor_dev_attr_in2_input.dev_attr.attr,
+	&sensor_dev_attr_in3_input.dev_attr.attr,
+	&sensor_dev_attr_in4_input.dev_attr.attr,
+	&sensor_dev_attr_in0_min.dev_attr.attr,
+	&sensor_dev_attr_in1_min.dev_attr.attr,
+	&sensor_dev_attr_in2_min.dev_attr.attr,
+	&sensor_dev_attr_in3_min.dev_attr.attr,
+	&sensor_dev_attr_in4_min.dev_attr.attr,
+	&sensor_dev_attr_in0_max.dev_attr.attr,
+	&sensor_dev_attr_in1_max.dev_attr.attr,
+	&sensor_dev_attr_in2_max.dev_attr.attr,
+	&sensor_dev_attr_in3_max.dev_attr.attr,
+	&sensor_dev_attr_in4_max.dev_attr.attr,
 
-	&dev_attr_temp1_input.attr,
-	&dev_attr_temp2_input.attr,
-	&dev_attr_temp3_input.attr,
-	&dev_attr_temp1_max.attr,
-	&dev_attr_temp2_max.attr,
-	&dev_attr_temp3_max.attr,
-	&dev_attr_temp1_max_hyst.attr,
-	&dev_attr_temp2_max_hyst.attr,
-	&dev_attr_temp3_max_hyst.attr,
+	&sensor_dev_attr_temp1_input.dev_attr.attr,
+	&sensor_dev_attr_temp2_input.dev_attr.attr,
+	&sensor_dev_attr_temp3_input.dev_attr.attr,
+	&sensor_dev_attr_temp1_max.dev_attr.attr,
+	&sensor_dev_attr_temp2_max.dev_attr.attr,
+	&sensor_dev_attr_temp3_max.dev_attr.attr,
+	&sensor_dev_attr_temp1_max_hyst.dev_attr.attr,
+	&sensor_dev_attr_temp2_max_hyst.dev_attr.attr,
+	&sensor_dev_attr_temp3_max_hyst.dev_attr.attr,
 
-	&dev_attr_fan1_input.attr,
-	&dev_attr_fan2_input.attr,
-	&dev_attr_fan1_min.attr,
-	&dev_attr_fan2_min.attr,
-	&dev_attr_fan1_div.attr,
-	&dev_attr_fan2_div.attr,
+	&sensor_dev_attr_fan1_input.dev_attr.attr,
+	&sensor_dev_attr_fan2_input.dev_attr.attr,
+	&sensor_dev_attr_fan1_min.dev_attr.attr,
+	&sensor_dev_attr_fan2_min.dev_attr.attr,
+	&sensor_dev_attr_fan1_div.dev_attr.attr,
+	&sensor_dev_attr_fan2_div.dev_attr.attr,
 
 	&dev_attr_alarms.attr,
+	&dev_attr_name.attr,
 	NULL
 };
 
@@ -612,58 +584,29 @@
 	.attrs = via686a_attributes,
 };
 
-/* The driver. I choose to use type i2c_driver, as at is identical to both
-   smbus_driver and isa_driver, and clients could be of either kind */
-static struct i2c_driver via686a_driver = {
+static struct platform_driver via686a_driver = {
 	.driver = {
 		.owner	= THIS_MODULE,
 		.name	= "via686a",
 	},
-	.attach_adapter	= via686a_detect,
-	.detach_client	= via686a_detach_client,
+	.probe		= via686a_probe,
+	.remove		= __devexit_p(via686a_remove),
 };
 
 
 /* This is called when the module is loaded */
-static int via686a_detect(struct i2c_adapter *adapter)
+static int __devinit via686a_probe(struct platform_device *pdev)
 {
-	struct i2c_client *new_client;
 	struct via686a_data *data;
-	int err = 0;
-	const char client_name[] = "via686a";
-	u16 val;
-
-	/* 8231 requires multiple of 256, we enforce that on 686 as well */
-	if (force_addr) {
-		address = force_addr & 0xFF00;
-		dev_warn(&adapter->dev, "forcing ISA address 0x%04X\n",
-			 address);
-		if (PCIBIOS_SUCCESSFUL !=
-		    pci_write_config_word(s_bridge, VIA686A_BASE_REG, address))
-			return -ENODEV;
-	}
-	if (PCIBIOS_SUCCESSFUL !=
-	    pci_read_config_word(s_bridge, VIA686A_ENABLE_REG, &val))
-		return -ENODEV;
-	if (!(val & 0x0001)) {
-		if (force_addr) {
-			dev_info(&adapter->dev, "enabling sensors\n");
-			if (PCIBIOS_SUCCESSFUL !=
-			    pci_write_config_word(s_bridge, VIA686A_ENABLE_REG,
-						  val | 0x0001))
-				return -ENODEV;
-		} else {
-			dev_warn(&adapter->dev, "sensors disabled - enable "
-				 "with force_addr=0x%x\n", address);
-			return -ENODEV;
-		}
-	}
+	struct resource *res;
+	int err;
 
 	/* Reserve the ISA region */
-	if (!request_region(address, VIA686A_EXTENT,
+	res = platform_get_resource(pdev, IORESOURCE_IO, 0);
+	if (!request_region(res->start, VIA686A_EXTENT,
 			    via686a_driver.driver.name)) {
-		dev_err(&adapter->dev, "region 0x%x already in use!\n",
-			address);
+		dev_err(&pdev->dev, "Region 0x%lx-0x%lx already in use!\n",
+			(unsigned long)res->start, (unsigned long)res->end);
 		return -ENODEV;
 	}
 
@@ -672,30 +615,19 @@
 		goto exit_release;
 	}
 
-	new_client = &data->client;
-	i2c_set_clientdata(new_client, data);
-	new_client->addr = address;
-	new_client->adapter = adapter;
-	new_client->driver = &via686a_driver;
-	new_client->flags = 0;
-
-	/* Fill in the remaining client fields and put into the global list */
-	strlcpy(new_client->name, client_name, I2C_NAME_SIZE);
-
-	data->valid = 0;
+	platform_set_drvdata(pdev, data);
+	data->addr = res->start;
+	data->name = "via686a";
 	mutex_init(&data->update_lock);
-	/* Tell the I2C layer a new client has arrived */
-	if ((err = i2c_attach_client(new_client)))
-		goto exit_free;
 
 	/* Initialize the VIA686A chip */
-	via686a_init_client(new_client);
+	via686a_init_device(data);
 
 	/* Register sysfs hooks */
-	if ((err = sysfs_create_group(&new_client->dev.kobj, &via686a_group)))
-		goto exit_detach;
+	if ((err = sysfs_create_group(&pdev->dev.kobj, &via686a_group)))
+		goto exit_free;
 
-	data->class_dev = hwmon_device_register(&new_client->dev);
+	data->class_dev = hwmon_device_register(&pdev->dev);
 	if (IS_ERR(data->class_dev)) {
 		err = PTR_ERR(data->class_dev);
 		goto exit_remove_files;
@@ -704,51 +636,46 @@
 	return 0;
 
 exit_remove_files:
-	sysfs_remove_group(&new_client->dev.kobj, &via686a_group);
-exit_detach:
-	i2c_detach_client(new_client);
+	sysfs_remove_group(&pdev->dev.kobj, &via686a_group);
 exit_free:
 	kfree(data);
 exit_release:
-	release_region(address, VIA686A_EXTENT);
+	release_region(res->start, VIA686A_EXTENT);
 	return err;
 }
 
-static int via686a_detach_client(struct i2c_client *client)
+static int __devexit via686a_remove(struct platform_device *pdev)
 {
-	struct via686a_data *data = i2c_get_clientdata(client);
-	int err;
+	struct via686a_data *data = platform_get_drvdata(pdev);
 
 	hwmon_device_unregister(data->class_dev);
-	sysfs_remove_group(&client->dev.kobj, &via686a_group);
+	sysfs_remove_group(&pdev->dev.kobj, &via686a_group);
 
-	if ((err = i2c_detach_client(client)))
-		return err;
-
-	release_region(client->addr, VIA686A_EXTENT);
+	release_region(data->addr, VIA686A_EXTENT);
+	platform_set_drvdata(pdev, NULL);
 	kfree(data);
 
 	return 0;
 }
 
-static void via686a_init_client(struct i2c_client *client)
+static void __devinit via686a_init_device(struct via686a_data *data)
 {
 	u8 reg;
 
 	/* Start monitoring */
-	reg = via686a_read_value(client, VIA686A_REG_CONFIG);
-	via686a_write_value(client, VIA686A_REG_CONFIG, (reg|0x01)&0x7F);
+	reg = via686a_read_value(data, VIA686A_REG_CONFIG);
+	via686a_write_value(data, VIA686A_REG_CONFIG, (reg | 0x01) & 0x7F);
 
 	/* Configure temp interrupt mode for continuous-interrupt operation */
-	via686a_write_value(client, VIA686A_REG_TEMP_MODE,
-			    via686a_read_value(client, VIA686A_REG_TEMP_MODE) &
-			    !(VIA686A_TEMP_MODE_MASK | VIA686A_TEMP_MODE_CONTINUOUS));
+	reg = via686a_read_value(data, VIA686A_REG_TEMP_MODE);
+	via686a_write_value(data, VIA686A_REG_TEMP_MODE,
+			    (reg & ~VIA686A_TEMP_MODE_MASK)
+			    | VIA686A_TEMP_MODE_CONTINUOUS);
 }
 
 static struct via686a_data *via686a_update_device(struct device *dev)
 {
-	struct i2c_client *client = to_i2c_client(dev);
-	struct via686a_data *data = i2c_get_clientdata(client);
+	struct via686a_data *data = dev_get_drvdata(dev);
 	int i;
 
 	mutex_lock(&data->update_lock);
@@ -757,27 +684,27 @@
 	    || !data->valid) {
 		for (i = 0; i <= 4; i++) {
 			data->in[i] =
-			    via686a_read_value(client, VIA686A_REG_IN(i));
-			data->in_min[i] = via686a_read_value(client,
+			    via686a_read_value(data, VIA686A_REG_IN(i));
+			data->in_min[i] = via686a_read_value(data,
 							     VIA686A_REG_IN_MIN
 							     (i));
 			data->in_max[i] =
-			    via686a_read_value(client, VIA686A_REG_IN_MAX(i));
+			    via686a_read_value(data, VIA686A_REG_IN_MAX(i));
 		}
 		for (i = 1; i <= 2; i++) {
 			data->fan[i - 1] =
-			    via686a_read_value(client, VIA686A_REG_FAN(i));
-			data->fan_min[i - 1] = via686a_read_value(client,
+			    via686a_read_value(data, VIA686A_REG_FAN(i));
+			data->fan_min[i - 1] = via686a_read_value(data,
 						     VIA686A_REG_FAN_MIN(i));
 		}
 		for (i = 0; i <= 2; i++) {
-			data->temp[i] = via686a_read_value(client,
+			data->temp[i] = via686a_read_value(data,
 						 VIA686A_REG_TEMP[i]) << 2;
 			data->temp_over[i] =
-			    via686a_read_value(client,
+			    via686a_read_value(data,
 					       VIA686A_REG_TEMP_OVER[i]);
 			data->temp_hyst[i] =
-			    via686a_read_value(client,
+			    via686a_read_value(data,
 					       VIA686A_REG_TEMP_HYST[i]);
 		}
 		/* add in lower 2 bits
@@ -785,23 +712,23 @@
 		   temp2 uses bits 5-4 of VIA686A_REG_TEMP_LOW23
 		   temp3 uses bits 7-6 of VIA686A_REG_TEMP_LOW23
 		 */
-		data->temp[0] |= (via686a_read_value(client,
+		data->temp[0] |= (via686a_read_value(data,
 						     VIA686A_REG_TEMP_LOW1)
 				  & 0xc0) >> 6;
 		data->temp[1] |=
-		    (via686a_read_value(client, VIA686A_REG_TEMP_LOW23) &
+		    (via686a_read_value(data, VIA686A_REG_TEMP_LOW23) &
 		     0x30) >> 4;
 		data->temp[2] |=
-		    (via686a_read_value(client, VIA686A_REG_TEMP_LOW23) &
+		    (via686a_read_value(data, VIA686A_REG_TEMP_LOW23) &
 		     0xc0) >> 6;
 
-		i = via686a_read_value(client, VIA686A_REG_FANDIV);
+		i = via686a_read_value(data, VIA686A_REG_FANDIV);
 		data->fan_div[0] = (i >> 4) & 0x03;
 		data->fan_div[1] = i >> 6;
 		data->alarms =
-		    via686a_read_value(client,
+		    via686a_read_value(data,
 				       VIA686A_REG_ALARM1) |
-		    (via686a_read_value(client, VIA686A_REG_ALARM2) << 8);
+		    (via686a_read_value(data, VIA686A_REG_ALARM2) << 8);
 		data->last_updated = jiffies;
 		data->valid = 1;
 	}
@@ -818,32 +745,102 @@
 
 MODULE_DEVICE_TABLE(pci, via686a_pci_ids);
 
+static int __devinit via686a_device_add(unsigned short address)
+{
+	struct resource res = {
+		.start	= address,
+		.end	= address + VIA686A_EXTENT - 1,
+		.name	= "via686a",
+		.flags	= IORESOURCE_IO,
+	};
+	int err;
+
+	pdev = platform_device_alloc("via686a", address);
+	if (!pdev) {
+		err = -ENOMEM;
+		printk(KERN_ERR "via686a: Device allocation failed\n");
+		goto exit;
+	}
+
+	err = platform_device_add_resources(pdev, &res, 1);
+	if (err) {
+		printk(KERN_ERR "via686a: Device resource addition failed "
+		       "(%d)\n", err);
+		goto exit_device_put;
+	}
+
+	err = platform_device_add(pdev);
+	if (err) {
+		printk(KERN_ERR "via686a: Device addition failed (%d)\n",
+		       err);
+		goto exit_device_put;
+	}
+
+	return 0;
+
+exit_device_put:
+	platform_device_put(pdev);
+exit:
+	return err;
+}
+
 static int __devinit via686a_pci_probe(struct pci_dev *dev,
 				       const struct pci_device_id *id)
 {
-	u16 val;
+	u16 address, val;
 
+	if (force_addr) {
+		address = force_addr & ~(VIA686A_EXTENT - 1);
+		dev_warn(&dev->dev, "Forcing ISA address 0x%x\n", address);
+		if (PCIBIOS_SUCCESSFUL !=
+		    pci_write_config_word(dev, VIA686A_BASE_REG, address | 1))
+			return -ENODEV;
+	}
 	if (PCIBIOS_SUCCESSFUL !=
 	    pci_read_config_word(dev, VIA686A_BASE_REG, &val))
 		return -ENODEV;
 
 	address = val & ~(VIA686A_EXTENT - 1);
-	if (address == 0 && force_addr == 0) {
+	if (address == 0) {
 		dev_err(&dev->dev, "base address not set - upgrade BIOS "
 			"or use force_addr=0xaddr\n");
 		return -ENODEV;
 	}
 
-	s_bridge = pci_dev_get(dev);
-	if (i2c_isa_add_driver(&via686a_driver)) {
-		pci_dev_put(s_bridge);
-		s_bridge = NULL;
+	if (PCIBIOS_SUCCESSFUL !=
+	    pci_read_config_word(dev, VIA686A_ENABLE_REG, &val))
+		return -ENODEV;
+	if (!(val & 0x0001)) {
+		if (!force_addr) {
+			dev_warn(&dev->dev, "Sensors disabled, enable "
+				 "with force_addr=0x%x\n", address);
+			return -ENODEV;
+		}
+
+		dev_warn(&dev->dev, "Enabling sensors\n");
+		if (PCIBIOS_SUCCESSFUL !=
+		    pci_write_config_word(dev, VIA686A_ENABLE_REG,
+					  val | 0x0001))
+			return -ENODEV;
 	}
 
+	if (platform_driver_register(&via686a_driver))
+		goto exit;
+
+	/* Sets global pdev as a side effect */
+	if (via686a_device_add(address))
+		goto exit_unregister;
+
 	/* Always return failure here.  This is to allow other drivers to bind
 	 * to this pci device.  We don't really want to have control over the
 	 * pci device, we only wanted to read as few register values from it.
 	 */
+	s_bridge = pci_dev_get(dev);
+	return -ENODEV;
+
+exit_unregister:
+	platform_driver_unregister(&via686a_driver);
+exit:
 	return -ENODEV;
 }
 
@@ -862,7 +859,8 @@
 {
 	pci_unregister_driver(&via686a_pci_driver);
 	if (s_bridge != NULL) {
-		i2c_isa_del_driver(&via686a_driver);
+		platform_device_unregister(pdev);
+		platform_driver_unregister(&via686a_driver);
 		pci_dev_put(s_bridge);
 		s_bridge = NULL;
 	}
diff --git a/drivers/hwmon/vt8231.c b/drivers/hwmon/vt8231.c
index a6a4aa0..c604972 100644
--- a/drivers/hwmon/vt8231.c
+++ b/drivers/hwmon/vt8231.c
@@ -29,8 +29,7 @@
 #include <linux/slab.h>
 #include <linux/pci.h>
 #include <linux/jiffies.h>
-#include <linux/i2c.h>
-#include <linux/i2c-isa.h>
+#include <linux/platform_device.h>
 #include <linux/hwmon.h>
 #include <linux/hwmon-sysfs.h>
 #include <linux/hwmon-vid.h>
@@ -42,10 +41,7 @@
 module_param(force_addr, int, 0);
 MODULE_PARM_DESC(force_addr, "Initialize the base address of the sensors");
 
-/* Device address
-   Note that we can't determine the ISA address until we have initialized
-   our module */
-static unsigned short isa_address;
+static struct platform_device *pdev;
 
 #define VT8231_EXTENT 0x80
 #define VT8231_BASE_REG 0x70
@@ -148,7 +144,9 @@
 #define FAN_FROM_REG(val, div) ((val) == 0 ? 0 : 1310720 / ((val) * (div)))
 
 struct vt8231_data {
-	struct i2c_client client;
+	unsigned short addr;
+	const char *name;
+
 	struct mutex update_lock;
 	struct class_device *class_dev;
 	char valid;		/* !=0 if following fields are valid */
@@ -168,20 +166,20 @@
 };
 
 static struct pci_dev *s_bridge;
-static int vt8231_detect(struct i2c_adapter *adapter);
-static int vt8231_detach_client(struct i2c_client *client);
+static int vt8231_probe(struct platform_device *pdev);
+static int vt8231_remove(struct platform_device *pdev);
 static struct vt8231_data *vt8231_update_device(struct device *dev);
-static void vt8231_init_client(struct i2c_client *client);
+static void vt8231_init_device(struct vt8231_data *data);
 
-static inline int vt8231_read_value(struct i2c_client *client, u8 reg)
+static inline int vt8231_read_value(struct vt8231_data *data, u8 reg)
 {
-	return inb_p(client->addr + reg);
+	return inb_p(data->addr + reg);
 }
 
-static inline void vt8231_write_value(struct i2c_client *client, u8 reg,
+static inline void vt8231_write_value(struct vt8231_data *data, u8 reg,
 					u8 value)
 {
-	outb_p(value, client->addr + reg);
+	outb_p(value, data->addr + reg);
 }
 
 /* following are the sysfs callback functions */
@@ -220,13 +218,12 @@
 {
 	struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
 	int nr = sensor_attr->index;
-	struct i2c_client *client = to_i2c_client(dev);
-	struct vt8231_data *data = i2c_get_clientdata(client);
+	struct vt8231_data *data = dev_get_drvdata(dev);
 	unsigned long val = simple_strtoul(buf, NULL, 10);
 
 	mutex_lock(&data->update_lock);
 	data->in_min[nr] = SENSORS_LIMIT(((val * 958) / 10000) + 3, 0, 255);
-	vt8231_write_value(client, regvoltmin[nr], data->in_min[nr]);
+	vt8231_write_value(data, regvoltmin[nr], data->in_min[nr]);
 	mutex_unlock(&data->update_lock);
 	return count;
 }
@@ -236,13 +233,12 @@
 {
 	struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
 	int nr = sensor_attr->index;
-	struct i2c_client *client = to_i2c_client(dev);
-	struct vt8231_data *data = i2c_get_clientdata(client);
+	struct vt8231_data *data = dev_get_drvdata(dev);
 	unsigned long val = simple_strtoul(buf, NULL, 10);
 
 	mutex_lock(&data->update_lock);
 	data->in_max[nr] = SENSORS_LIMIT(((val * 958) / 10000) + 3, 0, 255);
-	vt8231_write_value(client, regvoltmax[nr], data->in_max[nr]);
+	vt8231_write_value(data, regvoltmax[nr], data->in_max[nr]);
 	mutex_unlock(&data->update_lock);
 	return count;
 }
@@ -278,14 +274,13 @@
 static ssize_t set_in5_min(struct device *dev, struct device_attribute *attr,
 		const char *buf, size_t count)
 {
-	struct i2c_client *client = to_i2c_client(dev);
-	struct vt8231_data *data = i2c_get_clientdata(client);
+	struct vt8231_data *data = dev_get_drvdata(dev);
 	unsigned long val = simple_strtoul(buf, NULL, 10);
 
 	mutex_lock(&data->update_lock);
 	data->in_min[5] = SENSORS_LIMIT(((val * 958 * 34) / (10000 * 54)) + 3,
 					0, 255);
-	vt8231_write_value(client, regvoltmin[5], data->in_min[5]);
+	vt8231_write_value(data, regvoltmin[5], data->in_min[5]);
 	mutex_unlock(&data->update_lock);
 	return count;
 }
@@ -293,14 +288,13 @@
 static ssize_t set_in5_max(struct device *dev, struct device_attribute *attr,
 		const char *buf, size_t count)
 {
-	struct i2c_client *client = to_i2c_client(dev);
-	struct vt8231_data *data = i2c_get_clientdata(client);
+	struct vt8231_data *data = dev_get_drvdata(dev);
 	unsigned long val = simple_strtoul(buf, NULL, 10);
 
 	mutex_lock(&data->update_lock);
 	data->in_max[5] = SENSORS_LIMIT(((val * 958 * 34) / (10000 * 54)) + 3,
 					0, 255);
-	vt8231_write_value(client, regvoltmax[5], data->in_max[5]);
+	vt8231_write_value(data, regvoltmax[5], data->in_max[5]);
 	mutex_unlock(&data->update_lock);
 	return count;
 }
@@ -348,26 +342,24 @@
 static ssize_t set_temp0_max(struct device *dev, struct device_attribute *attr,
 		const char *buf, size_t count)
 {
-	struct i2c_client *client = to_i2c_client(dev);
-	struct vt8231_data *data = i2c_get_clientdata(client);
+	struct vt8231_data *data = dev_get_drvdata(dev);
 	int val = simple_strtol(buf, NULL, 10);
 
 	mutex_lock(&data->update_lock);
 	data->temp_max[0] = SENSORS_LIMIT((val + 500) / 1000, 0, 255);
-	vt8231_write_value(client, regtempmax[0], data->temp_max[0]);
+	vt8231_write_value(data, regtempmax[0], data->temp_max[0]);
 	mutex_unlock(&data->update_lock);
 	return count;
 }
 static ssize_t set_temp0_min(struct device *dev, struct device_attribute *attr,
 		const char *buf, size_t count)
 {
-	struct i2c_client *client = to_i2c_client(dev);
-	struct vt8231_data *data = i2c_get_clientdata(client);
+	struct vt8231_data *data = dev_get_drvdata(dev);
 	int val = simple_strtol(buf, NULL, 10);
 
 	mutex_lock(&data->update_lock);
 	data->temp_min[0] = SENSORS_LIMIT((val + 500) / 1000, 0, 255);
-	vt8231_write_value(client, regtempmin[0], data->temp_min[0]);
+	vt8231_write_value(data, regtempmin[0], data->temp_min[0]);
 	mutex_unlock(&data->update_lock);
 	return count;
 }
@@ -404,13 +396,12 @@
 {
 	struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
 	int nr = sensor_attr->index;
-	struct i2c_client *client = to_i2c_client(dev);
-	struct vt8231_data *data = i2c_get_clientdata(client);
+	struct vt8231_data *data = dev_get_drvdata(dev);
 	int val = simple_strtol(buf, NULL, 10);
 
 	mutex_lock(&data->update_lock);
 	data->temp_max[nr] = SENSORS_LIMIT(TEMP_MAXMIN_TO_REG(val), 0, 255);
-	vt8231_write_value(client, regtempmax[nr], data->temp_max[nr]);
+	vt8231_write_value(data, regtempmax[nr], data->temp_max[nr]);
 	mutex_unlock(&data->update_lock);
 	return count;
 }
@@ -419,13 +410,12 @@
 {
 	struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
 	int nr = sensor_attr->index;
-	struct i2c_client *client = to_i2c_client(dev);
-	struct vt8231_data *data = i2c_get_clientdata(client);
+	struct vt8231_data *data = dev_get_drvdata(dev);
 	int val = simple_strtol(buf, NULL, 10);
 
 	mutex_lock(&data->update_lock);
 	data->temp_min[nr] = SENSORS_LIMIT(TEMP_MAXMIN_TO_REG(val), 0, 255);
-	vt8231_write_value(client, regtempmin[nr], data->temp_min[nr]);
+	vt8231_write_value(data, regtempmin[nr], data->temp_min[nr]);
 	mutex_unlock(&data->update_lock);
 	return count;
 }
@@ -486,13 +476,12 @@
 {
 	struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
 	int nr = sensor_attr->index;
-	struct i2c_client *client = to_i2c_client(dev);
-	struct vt8231_data *data = i2c_get_clientdata(client);
+	struct vt8231_data *data = dev_get_drvdata(dev);
 	int val = simple_strtoul(buf, NULL, 10);
 
 	mutex_lock(&data->update_lock);
 	data->fan_min[nr] = FAN_TO_REG(val, DIV_FROM_REG(data->fan_div[nr]));
-	vt8231_write_value(client, VT8231_REG_FAN_MIN(nr), data->fan_min[nr]);
+	vt8231_write_value(data, VT8231_REG_FAN_MIN(nr), data->fan_min[nr]);
 	mutex_unlock(&data->update_lock);
 	return count;
 }
@@ -500,12 +489,11 @@
 static ssize_t set_fan_div(struct device *dev, struct device_attribute *attr,
 		const char *buf, size_t count)
 {
-	struct i2c_client *client = to_i2c_client(dev);
-	struct vt8231_data *data = i2c_get_clientdata(client);
+	struct vt8231_data *data = dev_get_drvdata(dev);
 	struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
 	unsigned long val = simple_strtoul(buf, NULL, 10);
 	int nr = sensor_attr->index;
-	int old = vt8231_read_value(client, VT8231_REG_FANDIV);
+	int old = vt8231_read_value(data, VT8231_REG_FANDIV);
 	long min = FAN_FROM_REG(data->fan_min[nr],
 				 DIV_FROM_REG(data->fan_div[nr]));
 
@@ -516,7 +504,7 @@
 	case 4: data->fan_div[nr] = 2; break;
 	case 8: data->fan_div[nr] = 3; break;
 	default:
-		dev_err(&client->dev, "fan_div value %ld not supported."
+		dev_err(dev, "fan_div value %ld not supported."
 		        "Choose one of 1, 2, 4 or 8!\n", val);
 		mutex_unlock(&data->update_lock);
 		return -EINVAL;
@@ -524,10 +512,10 @@
 
 	/* Correct the fan minimum speed */
 	data->fan_min[nr] = FAN_TO_REG(min, DIV_FROM_REG(data->fan_div[nr]));
-	vt8231_write_value(client, VT8231_REG_FAN_MIN(nr), data->fan_min[nr]);
+	vt8231_write_value(data, VT8231_REG_FAN_MIN(nr), data->fan_min[nr]);
 
 	old = (old & 0x0f) | (data->fan_div[1] << 6) | (data->fan_div[0] << 4);
-	vt8231_write_value(client, VT8231_REG_FANDIV, old);
+	vt8231_write_value(data, VT8231_REG_FANDIV, old);
 	mutex_unlock(&data->update_lock);
 	return count;
 }
@@ -551,9 +539,16 @@
 	struct vt8231_data *data = vt8231_update_device(dev);
 	return sprintf(buf, "%d\n", data->alarms);
 }
-
 static DEVICE_ATTR(alarms, S_IRUGO, show_alarms, NULL);
 
+static ssize_t show_name(struct device *dev, struct device_attribute
+			 *devattr, char *buf)
+{
+	struct vt8231_data *data = dev_get_drvdata(dev);
+	return sprintf(buf, "%s\n", data->name);
+}
+static DEVICE_ATTR(name, S_IRUGO, show_name, NULL);
+
 static struct attribute *vt8231_attributes_temps[6][4] = {
 	{
 		&dev_attr_temp1_input.attr,
@@ -648,6 +643,7 @@
 	&sensor_dev_attr_fan1_div.dev_attr.attr,
 	&sensor_dev_attr_fan2_div.dev_attr.attr,
 	&dev_attr_alarms.attr,
+	&dev_attr_name.attr,
 	NULL
 };
 
@@ -655,13 +651,13 @@
 	.attrs = vt8231_attributes,
 };
 
-static struct i2c_driver vt8231_driver = {
+static struct platform_driver vt8231_driver = {
 	.driver = {
 		.owner	= THIS_MODULE,
 		.name	= "vt8231",
 	},
-	.attach_adapter	= vt8231_detect,
-	.detach_client	= vt8231_detach_client,
+	.probe	= vt8231_probe,
+	.remove	= __devexit_p(vt8231_remove),
 };
 
 static struct pci_device_id vt8231_pci_ids[] = {
@@ -680,40 +676,18 @@
 	.probe		= vt8231_pci_probe,
 };
 
-int vt8231_detect(struct i2c_adapter *adapter)
+int vt8231_probe(struct platform_device *pdev)
 {
-	struct i2c_client *client;
+	struct resource *res;
 	struct vt8231_data *data;
 	int err = 0, i;
-	u16 val;
-
-	/* 8231 requires multiple of 256 */
-	if (force_addr)	{
-		isa_address = force_addr & 0xFF00;
-		dev_warn(&adapter->dev, "forcing ISA address 0x%04X\n",
-				 isa_address);
-		if (PCIBIOS_SUCCESSFUL != pci_write_config_word(s_bridge,
-						VT8231_BASE_REG, isa_address))
-			return -ENODEV;
-	}
-
-	if (PCIBIOS_SUCCESSFUL !=
-		pci_read_config_word(s_bridge, VT8231_ENABLE_REG, &val))
-		return -ENODEV;
-
-	if (!(val & 0x0001)) {
-		dev_warn(&adapter->dev, "enabling sensors\n");
-		if (PCIBIOS_SUCCESSFUL !=
-			pci_write_config_word(s_bridge, VT8231_ENABLE_REG,
-							  val | 0x0001))
-			return -ENODEV;
-	}
 
 	/* Reserve the ISA region */
-	if (!request_region(isa_address, VT8231_EXTENT,
-			    vt8231_pci_driver.name)) {
-		dev_err(&adapter->dev, "region 0x%x already in use!\n",
-			   isa_address);
+	res = platform_get_resource(pdev, IORESOURCE_IO, 0);
+	if (!request_region(res->start, VT8231_EXTENT,
+			    vt8231_driver.driver.name)) {
+		dev_err(&pdev->dev, "Region 0x%lx-0x%lx already in use!\n",
+			(unsigned long)res->start, (unsigned long)res->end);
 		return -ENODEV;
 	}
 
@@ -722,33 +696,23 @@
 		goto exit_release;
 	}
 
-	client = &data->client;
-	i2c_set_clientdata(client, data);
-	client->addr = isa_address;
-	client->adapter = adapter;
-	client->driver = &vt8231_driver;
-
-	/* Fill in the remaining client fields and put into the global list */
-	strlcpy(client->name, "vt8231", I2C_NAME_SIZE);
+	platform_set_drvdata(pdev, data);
+	data->addr = res->start;
+	data->name = "vt8231";
 
 	mutex_init(&data->update_lock);
-
-	/* Tell the I2C layer a new client has arrived */
-	if ((err = i2c_attach_client(client)))
-		goto exit_free;
-
-	vt8231_init_client(client);
+	vt8231_init_device(data);
 
 	/* Register sysfs hooks */
-	if ((err = sysfs_create_group(&client->dev.kobj, &vt8231_group)))
-		goto exit_detach;
+	if ((err = sysfs_create_group(&pdev->dev.kobj, &vt8231_group)))
+		goto exit_free;
 
 	/* Must update device information to find out the config field */
-	data->uch_config = vt8231_read_value(client, VT8231_REG_UCH_CONFIG);
+	data->uch_config = vt8231_read_value(data, VT8231_REG_UCH_CONFIG);
 
 	for (i = 0; i < ARRAY_SIZE(vt8231_group_temps); i++) {
 		if (ISTEMP(i, data->uch_config)) {
-			if ((err = sysfs_create_group(&client->dev.kobj,
+			if ((err = sysfs_create_group(&pdev->dev.kobj,
 					&vt8231_group_temps[i])))
 				goto exit_remove_files;
 		}
@@ -756,13 +720,13 @@
 
 	for (i = 0; i < ARRAY_SIZE(vt8231_group_volts); i++) {
 		if (ISVOLT(i, data->uch_config)) {
-			if ((err = sysfs_create_group(&client->dev.kobj,
+			if ((err = sysfs_create_group(&pdev->dev.kobj,
 					&vt8231_group_volts[i])))
 				goto exit_remove_files;
 		}
 	}
 
-	data->class_dev = hwmon_device_register(&client->dev);
+	data->class_dev = hwmon_device_register(&pdev->dev);
 	if (IS_ERR(data->class_dev)) {
 		err = PTR_ERR(data->class_dev);
 		goto exit_remove_files;
@@ -771,56 +735,52 @@
 
 exit_remove_files:
 	for (i = 0; i < ARRAY_SIZE(vt8231_group_volts); i++)
-		sysfs_remove_group(&client->dev.kobj, &vt8231_group_volts[i]);
+		sysfs_remove_group(&pdev->dev.kobj, &vt8231_group_volts[i]);
 
 	for (i = 0; i < ARRAY_SIZE(vt8231_group_temps); i++)
-		sysfs_remove_group(&client->dev.kobj, &vt8231_group_temps[i]);
+		sysfs_remove_group(&pdev->dev.kobj, &vt8231_group_temps[i]);
 
-	sysfs_remove_group(&client->dev.kobj, &vt8231_group);
-exit_detach:
-	i2c_detach_client(client);
+	sysfs_remove_group(&pdev->dev.kobj, &vt8231_group);
+
 exit_free:
+	platform_set_drvdata(pdev, NULL);
 	kfree(data);
+
 exit_release:
-	release_region(isa_address, VT8231_EXTENT);
+	release_region(res->start, VT8231_EXTENT);
 	return err;
 }
 
-static int vt8231_detach_client(struct i2c_client *client)
+static int vt8231_remove(struct platform_device *pdev)
 {
-	struct vt8231_data *data = i2c_get_clientdata(client);
-	int err, i;
+	struct vt8231_data *data = platform_get_drvdata(pdev);
+	int i;
 
 	hwmon_device_unregister(data->class_dev);
 
 	for (i = 0; i < ARRAY_SIZE(vt8231_group_volts); i++)
-		sysfs_remove_group(&client->dev.kobj, &vt8231_group_volts[i]);
+		sysfs_remove_group(&pdev->dev.kobj, &vt8231_group_volts[i]);
 
 	for (i = 0; i < ARRAY_SIZE(vt8231_group_temps); i++)
-		sysfs_remove_group(&client->dev.kobj, &vt8231_group_temps[i]);
+		sysfs_remove_group(&pdev->dev.kobj, &vt8231_group_temps[i]);
 
-	sysfs_remove_group(&client->dev.kobj, &vt8231_group);
+	sysfs_remove_group(&pdev->dev.kobj, &vt8231_group);
 
-	if ((err = i2c_detach_client(client))) {
-		return err;
-	}
-
-	release_region(client->addr, VT8231_EXTENT);
+	release_region(data->addr, VT8231_EXTENT);
+	platform_set_drvdata(pdev, NULL);
 	kfree(data);
-
 	return 0;
 }
 
-static void vt8231_init_client(struct i2c_client *client)
+static void vt8231_init_device(struct vt8231_data *data)
 {
-	vt8231_write_value(client, VT8231_REG_TEMP1_CONFIG, 0);
-	vt8231_write_value(client, VT8231_REG_TEMP2_CONFIG, 0);
+	vt8231_write_value(data, VT8231_REG_TEMP1_CONFIG, 0);
+	vt8231_write_value(data, VT8231_REG_TEMP2_CONFIG, 0);
 }
 
 static struct vt8231_data *vt8231_update_device(struct device *dev)
 {
-	struct i2c_client *client = to_i2c_client(dev);
-	struct vt8231_data *data = i2c_get_clientdata(client);
+	struct vt8231_data *data = dev_get_drvdata(dev);
 	int i;
 	u16 low;
 
@@ -830,41 +790,41 @@
 	    || !data->valid) {
 		for (i = 0; i < 6; i++) {
 			if (ISVOLT(i, data->uch_config)) {
-				data->in[i] = vt8231_read_value(client,
+				data->in[i] = vt8231_read_value(data,
 						regvolt[i]);
-				data->in_min[i] = vt8231_read_value(client,
+				data->in_min[i] = vt8231_read_value(data,
 						regvoltmin[i]);
-				data->in_max[i] = vt8231_read_value(client,
+				data->in_max[i] = vt8231_read_value(data,
 						regvoltmax[i]);
 			}
 		}
 		for (i = 0; i < 2; i++) {
-			data->fan[i] = vt8231_read_value(client,
+			data->fan[i] = vt8231_read_value(data,
 						VT8231_REG_FAN(i));
-			data->fan_min[i] = vt8231_read_value(client,
+			data->fan_min[i] = vt8231_read_value(data,
 						VT8231_REG_FAN_MIN(i));
 		}
 
-		low = vt8231_read_value(client, VT8231_REG_TEMP_LOW01);
+		low = vt8231_read_value(data, VT8231_REG_TEMP_LOW01);
 		low = (low >> 6) | ((low & 0x30) >> 2)
-		    | (vt8231_read_value(client, VT8231_REG_TEMP_LOW25) << 4);
+		    | (vt8231_read_value(data, VT8231_REG_TEMP_LOW25) << 4);
 		for (i = 0; i < 6; i++) {
 			if (ISTEMP(i, data->uch_config)) {
-				data->temp[i] = (vt8231_read_value(client,
+				data->temp[i] = (vt8231_read_value(data,
 						       regtemp[i]) << 2)
 						| ((low >> (2 * i)) & 0x03);
-				data->temp_max[i] = vt8231_read_value(client,
+				data->temp_max[i] = vt8231_read_value(data,
 						      regtempmax[i]);
-				data->temp_min[i] = vt8231_read_value(client,
+				data->temp_min[i] = vt8231_read_value(data,
 						      regtempmin[i]);
 			}
 		}
 
-		i = vt8231_read_value(client, VT8231_REG_FANDIV);
+		i = vt8231_read_value(data, VT8231_REG_FANDIV);
 		data->fan_div[0] = (i >> 4) & 0x03;
 		data->fan_div[1] = i >> 6;
-		data->alarms = vt8231_read_value(client, VT8231_REG_ALARM1) |
-			(vt8231_read_value(client, VT8231_REG_ALARM2) << 8);
+		data->alarms = vt8231_read_value(data, VT8231_REG_ALARM1) |
+			(vt8231_read_value(data, VT8231_REG_ALARM2) << 8);
 
 		/* Set alarm flags correctly */
 		if (!data->fan[0] && data->fan_min[0]) {
@@ -888,33 +848,102 @@
 	return data;
 }
 
+static int __devinit vt8231_device_add(unsigned short address)
+{
+	struct resource res = {
+		.start	= address,
+		.end	= address + VT8231_EXTENT - 1,
+		.name	= "vt8231",
+		.flags	= IORESOURCE_IO,
+	};
+	int err;
+
+	pdev = platform_device_alloc("vt8231", address);
+	if (!pdev) {
+		err = -ENOMEM;
+		printk(KERN_ERR "vt8231: Device allocation failed\n");
+		goto exit;
+	}
+
+	err = platform_device_add_resources(pdev, &res, 1);
+	if (err) {
+		printk(KERN_ERR "vt8231: Device resource addition failed "
+		       "(%d)\n", err);
+		goto exit_device_put;
+	}
+
+	err = platform_device_add(pdev);
+	if (err) {
+		printk(KERN_ERR "vt8231: Device addition failed (%d)\n",
+		       err);
+		goto exit_device_put;
+	}
+
+	return 0;
+
+exit_device_put:
+	platform_device_put(pdev);
+exit:
+	return err;
+}
+
 static int __devinit vt8231_pci_probe(struct pci_dev *dev,
 				const struct pci_device_id *id)
 {
-	u16 val;
+	u16 address, val;
+	if (force_addr) {
+		address = force_addr & 0xff00;
+		dev_warn(&dev->dev, "Forcing ISA address 0x%x\n",
+			 address);
+
+		if (PCIBIOS_SUCCESSFUL !=
+		    pci_write_config_word(dev, VT8231_BASE_REG, address | 1))
+			return -ENODEV;
+	}
 
 	if (PCIBIOS_SUCCESSFUL != pci_read_config_word(dev, VT8231_BASE_REG,
 							&val))
 		return -ENODEV;
 
-	isa_address = val & ~(VT8231_EXTENT - 1);
-	if (isa_address == 0 && force_addr == 0) {
+	address = val & ~(VT8231_EXTENT - 1);
+	if (address == 0) {
 		dev_err(&dev->dev, "base address not set -\
 				 upgrade BIOS or use force_addr=0xaddr\n");
 		return -ENODEV;
 	}
 
-	s_bridge = pci_dev_get(dev);
+	if (PCIBIOS_SUCCESSFUL != pci_read_config_word(dev, VT8231_ENABLE_REG,
+							&val))
+		return -ENODEV;
 
-	if (i2c_isa_add_driver(&vt8231_driver)) {
-		pci_dev_put(s_bridge);
-		s_bridge = NULL;
+	if (!(val & 0x0001)) {
+		dev_warn(&dev->dev, "enabling sensors\n");
+		if (PCIBIOS_SUCCESSFUL !=
+			pci_write_config_word(dev, VT8231_ENABLE_REG,
+							val | 0x0001))
+			return -ENODEV;
 	}
 
+	if (platform_driver_register(&vt8231_driver))
+		goto exit;
+
+	/* Sets global pdev as a side effect */
+	if (vt8231_device_add(address))
+		goto exit_unregister;
+
 	/* Always return failure here.  This is to allow other drivers to bind
 	 * to this pci device.  We don't really want to have control over the
 	 * pci device, we only wanted to read as few register values from it.
 	 */
+
+	/* We do, however, mark ourselves as using the PCI device to stop it
+	   getting unloaded. */
+	s_bridge = pci_dev_get(dev);
+	return -ENODEV;
+
+exit_unregister:
+	platform_driver_unregister(&vt8231_driver);
+exit:
 	return -ENODEV;
 }
 
@@ -927,7 +956,8 @@
 {
 	pci_unregister_driver(&vt8231_pci_driver);
 	if (s_bridge != NULL) {
-		i2c_isa_del_driver(&vt8231_driver);
+		platform_device_unregister(pdev);
+		platform_driver_unregister(&vt8231_driver);
 		pci_dev_put(s_bridge);
 		s_bridge = NULL;
 	}
diff --git a/drivers/hwmon/w83627ehf.c b/drivers/hwmon/w83627ehf.c
index 30a7640..c51ae2e 100644
--- a/drivers/hwmon/w83627ehf.c
+++ b/drivers/hwmon/w83627ehf.c
@@ -41,41 +41,39 @@
 #include <linux/module.h>
 #include <linux/init.h>
 #include <linux/slab.h>
-#include <linux/i2c.h>
-#include <linux/i2c-isa.h>
+#include <linux/jiffies.h>
+#include <linux/platform_device.h>
 #include <linux/hwmon.h>
 #include <linux/hwmon-sysfs.h>
+#include <linux/hwmon-vid.h>
 #include <linux/err.h>
 #include <linux/mutex.h>
 #include <asm/io.h>
 #include "lm75.h"
 
-/* The actual ISA address is read from Super-I/O configuration space */
-static unsigned short address;
+enum kinds { w83627ehf, w83627dhg };
+
+/* used to set data->name = w83627ehf_device_names[data->sio_kind] */
+static const char * w83627ehf_device_names[] = {
+	"w83627ehf",
+	"w83627dhg",
+};
+
+#define DRVNAME "w83627ehf"
 
 /*
  * Super-I/O constants and functions
  */
 
-/*
- * The three following globals are initialized in w83627ehf_find(), before
- * the i2c-isa device is created. Otherwise, they could be stored in
- * w83627ehf_data. This is ugly, but necessary, and when the driver is next
- * updated to become a platform driver, the globals will disappear.
- */
-static int REG;		/* The register to read/write */
-static int VAL;		/* The value to read/write */
-/* The w83627ehf/ehg have 10 voltage inputs, but the w83627dhg has 9. This
- * value is also used in w83627ehf_detect() to export a device name in sysfs
- * (e.g. w83627ehf or w83627dhg) */
-static int w83627ehf_num_in;
-
 #define W83627EHF_LD_HWM	0x0b
 
 #define SIO_REG_LDSEL		0x07	/* Logical device select */
 #define SIO_REG_DEVID		0x20	/* Device ID (2 bytes) */
+#define SIO_REG_EN_VRM10	0x2C	/* GPIO3, GPIO4 selection */
 #define SIO_REG_ENABLE		0x30	/* Logical device enable */
 #define SIO_REG_ADDR		0x60	/* Logical device address (2 bytes) */
+#define SIO_REG_VID_CTRL	0xF0	/* VID control */
+#define SIO_REG_VID_DATA	0xF1	/* VID data */
 
 #define SIO_W83627EHF_ID	0x8850
 #define SIO_W83627EHG_ID	0x8860
@@ -83,38 +81,38 @@
 #define SIO_ID_MASK		0xFFF0
 
 static inline void
-superio_outb(int reg, int val)
+superio_outb(int ioreg, int reg, int val)
 {
-	outb(reg, REG);
-	outb(val, VAL);
+	outb(reg, ioreg);
+	outb(val, ioreg + 1);
 }
 
 static inline int
-superio_inb(int reg)
+superio_inb(int ioreg, int reg)
 {
-	outb(reg, REG);
-	return inb(VAL);
+	outb(reg, ioreg);
+	return inb(ioreg + 1);
 }
 
 static inline void
-superio_select(int ld)
+superio_select(int ioreg, int ld)
 {
-	outb(SIO_REG_LDSEL, REG);
-	outb(ld, VAL);
+	outb(SIO_REG_LDSEL, ioreg);
+	outb(ld, ioreg + 1);
 }
 
 static inline void
-superio_enter(void)
+superio_enter(int ioreg)
 {
-	outb(0x87, REG);
-	outb(0x87, REG);
+	outb(0x87, ioreg);
+	outb(0x87, ioreg);
 }
 
 static inline void
-superio_exit(void)
+superio_exit(int ioreg)
 {
-	outb(0x02, REG);
-	outb(0x02, VAL);
+	outb(0x02, ioreg);
+	outb(0x02, ioreg + 1);
 }
 
 /*
@@ -124,8 +122,8 @@
 #define IOREGION_ALIGNMENT	~7
 #define IOREGION_OFFSET		5
 #define IOREGION_LENGTH		2
-#define ADDR_REG_OFFSET		5
-#define DATA_REG_OFFSET		6
+#define ADDR_REG_OFFSET		0
+#define DATA_REG_OFFSET		1
 
 #define W83627EHF_REG_BANK		0x4E
 #define W83627EHF_REG_CONFIG		0x40
@@ -255,7 +253,9 @@
  */
 
 struct w83627ehf_data {
-	struct i2c_client client;
+	int addr;	/* IO base of hw monitor block */
+	const char *name;
+
 	struct class_device *class_dev;
 	struct mutex lock;
 
@@ -264,6 +264,7 @@
 	unsigned long last_updated;	/* In jiffies */
 
 	/* Register values */
+	u8 in_num;		/* number of in inputs we have */
 	u8 in[10];		/* Register value */
 	u8 in_max[10];		/* Register value */
 	u8 in_min[10];		/* Register value */
@@ -271,6 +272,7 @@
 	u8 fan_min[5];
 	u8 fan_div[5];
 	u8 has_fan;		/* some fan inputs can be disabled */
+	u8 temp_type[3];
 	s8 temp1;
 	s8 temp1_max;
 	s8 temp1_max_hyst;
@@ -288,6 +290,14 @@
 
 	u8 fan_min_output[4]; /* minimum fan speed */
 	u8 fan_stop_time[4];
+
+	u8 vid;
+	u8 vrm;
+};
+
+struct w83627ehf_sio_data {
+	int sioreg;
+	enum kinds kind;
 };
 
 static inline int is_word_sized(u16 reg)
@@ -303,156 +313,152 @@
    nothing for registers which live in bank 0. For others, they respectively
    set the bank register to the correct value (before the register is
    accessed), and back to 0 (afterwards). */
-static inline void w83627ehf_set_bank(struct i2c_client *client, u16 reg)
+static inline void w83627ehf_set_bank(struct w83627ehf_data *data, u16 reg)
 {
 	if (reg & 0xff00) {
-		outb_p(W83627EHF_REG_BANK, client->addr + ADDR_REG_OFFSET);
-		outb_p(reg >> 8, client->addr + DATA_REG_OFFSET);
+		outb_p(W83627EHF_REG_BANK, data->addr + ADDR_REG_OFFSET);
+		outb_p(reg >> 8, data->addr + DATA_REG_OFFSET);
 	}
 }
 
-static inline void w83627ehf_reset_bank(struct i2c_client *client, u16 reg)
+static inline void w83627ehf_reset_bank(struct w83627ehf_data *data, u16 reg)
 {
 	if (reg & 0xff00) {
-		outb_p(W83627EHF_REG_BANK, client->addr + ADDR_REG_OFFSET);
-		outb_p(0, client->addr + DATA_REG_OFFSET);
+		outb_p(W83627EHF_REG_BANK, data->addr + ADDR_REG_OFFSET);
+		outb_p(0, data->addr + DATA_REG_OFFSET);
 	}
 }
 
-static u16 w83627ehf_read_value(struct i2c_client *client, u16 reg)
+static u16 w83627ehf_read_value(struct w83627ehf_data *data, u16 reg)
 {
-	struct w83627ehf_data *data = i2c_get_clientdata(client);
 	int res, word_sized = is_word_sized(reg);
 
 	mutex_lock(&data->lock);
 
-	w83627ehf_set_bank(client, reg);
-	outb_p(reg & 0xff, client->addr + ADDR_REG_OFFSET);
-	res = inb_p(client->addr + DATA_REG_OFFSET);
+	w83627ehf_set_bank(data, reg);
+	outb_p(reg & 0xff, data->addr + ADDR_REG_OFFSET);
+	res = inb_p(data->addr + DATA_REG_OFFSET);
 	if (word_sized) {
 		outb_p((reg & 0xff) + 1,
-		       client->addr + ADDR_REG_OFFSET);
-		res = (res << 8) + inb_p(client->addr + DATA_REG_OFFSET);
+		       data->addr + ADDR_REG_OFFSET);
+		res = (res << 8) + inb_p(data->addr + DATA_REG_OFFSET);
 	}
-	w83627ehf_reset_bank(client, reg);
+	w83627ehf_reset_bank(data, reg);
 
 	mutex_unlock(&data->lock);
 
 	return res;
 }
 
-static int w83627ehf_write_value(struct i2c_client *client, u16 reg, u16 value)
+static int w83627ehf_write_value(struct w83627ehf_data *data, u16 reg, u16 value)
 {
-	struct w83627ehf_data *data = i2c_get_clientdata(client);
 	int word_sized = is_word_sized(reg);
 
 	mutex_lock(&data->lock);
 
-	w83627ehf_set_bank(client, reg);
-	outb_p(reg & 0xff, client->addr + ADDR_REG_OFFSET);
+	w83627ehf_set_bank(data, reg);
+	outb_p(reg & 0xff, data->addr + ADDR_REG_OFFSET);
 	if (word_sized) {
-		outb_p(value >> 8, client->addr + DATA_REG_OFFSET);
+		outb_p(value >> 8, data->addr + DATA_REG_OFFSET);
 		outb_p((reg & 0xff) + 1,
-		       client->addr + ADDR_REG_OFFSET);
+		       data->addr + ADDR_REG_OFFSET);
 	}
-	outb_p(value & 0xff, client->addr + DATA_REG_OFFSET);
-	w83627ehf_reset_bank(client, reg);
+	outb_p(value & 0xff, data->addr + DATA_REG_OFFSET);
+	w83627ehf_reset_bank(data, reg);
 
 	mutex_unlock(&data->lock);
 	return 0;
 }
 
 /* This function assumes that the caller holds data->update_lock */
-static void w83627ehf_write_fan_div(struct i2c_client *client, int nr)
+static void w83627ehf_write_fan_div(struct w83627ehf_data *data, int nr)
 {
-	struct w83627ehf_data *data = i2c_get_clientdata(client);
 	u8 reg;
 
 	switch (nr) {
 	case 0:
-		reg = (w83627ehf_read_value(client, W83627EHF_REG_FANDIV1) & 0xcf)
+		reg = (w83627ehf_read_value(data, W83627EHF_REG_FANDIV1) & 0xcf)
 		    | ((data->fan_div[0] & 0x03) << 4);
 		/* fan5 input control bit is write only, compute the value */
 		reg |= (data->has_fan & (1 << 4)) ? 1 : 0;
-		w83627ehf_write_value(client, W83627EHF_REG_FANDIV1, reg);
-		reg = (w83627ehf_read_value(client, W83627EHF_REG_VBAT) & 0xdf)
+		w83627ehf_write_value(data, W83627EHF_REG_FANDIV1, reg);
+		reg = (w83627ehf_read_value(data, W83627EHF_REG_VBAT) & 0xdf)
 		    | ((data->fan_div[0] & 0x04) << 3);
-		w83627ehf_write_value(client, W83627EHF_REG_VBAT, reg);
+		w83627ehf_write_value(data, W83627EHF_REG_VBAT, reg);
 		break;
 	case 1:
-		reg = (w83627ehf_read_value(client, W83627EHF_REG_FANDIV1) & 0x3f)
+		reg = (w83627ehf_read_value(data, W83627EHF_REG_FANDIV1) & 0x3f)
 		    | ((data->fan_div[1] & 0x03) << 6);
 		/* fan5 input control bit is write only, compute the value */
 		reg |= (data->has_fan & (1 << 4)) ? 1 : 0;
-		w83627ehf_write_value(client, W83627EHF_REG_FANDIV1, reg);
-		reg = (w83627ehf_read_value(client, W83627EHF_REG_VBAT) & 0xbf)
+		w83627ehf_write_value(data, W83627EHF_REG_FANDIV1, reg);
+		reg = (w83627ehf_read_value(data, W83627EHF_REG_VBAT) & 0xbf)
 		    | ((data->fan_div[1] & 0x04) << 4);
-		w83627ehf_write_value(client, W83627EHF_REG_VBAT, reg);
+		w83627ehf_write_value(data, W83627EHF_REG_VBAT, reg);
 		break;
 	case 2:
-		reg = (w83627ehf_read_value(client, W83627EHF_REG_FANDIV2) & 0x3f)
+		reg = (w83627ehf_read_value(data, W83627EHF_REG_FANDIV2) & 0x3f)
 		    | ((data->fan_div[2] & 0x03) << 6);
-		w83627ehf_write_value(client, W83627EHF_REG_FANDIV2, reg);
-		reg = (w83627ehf_read_value(client, W83627EHF_REG_VBAT) & 0x7f)
+		w83627ehf_write_value(data, W83627EHF_REG_FANDIV2, reg);
+		reg = (w83627ehf_read_value(data, W83627EHF_REG_VBAT) & 0x7f)
 		    | ((data->fan_div[2] & 0x04) << 5);
-		w83627ehf_write_value(client, W83627EHF_REG_VBAT, reg);
+		w83627ehf_write_value(data, W83627EHF_REG_VBAT, reg);
 		break;
 	case 3:
-		reg = (w83627ehf_read_value(client, W83627EHF_REG_DIODE) & 0xfc)
+		reg = (w83627ehf_read_value(data, W83627EHF_REG_DIODE) & 0xfc)
 		    | (data->fan_div[3] & 0x03);
-		w83627ehf_write_value(client, W83627EHF_REG_DIODE, reg);
-		reg = (w83627ehf_read_value(client, W83627EHF_REG_SMI_OVT) & 0x7f)
+		w83627ehf_write_value(data, W83627EHF_REG_DIODE, reg);
+		reg = (w83627ehf_read_value(data, W83627EHF_REG_SMI_OVT) & 0x7f)
 		    | ((data->fan_div[3] & 0x04) << 5);
-		w83627ehf_write_value(client, W83627EHF_REG_SMI_OVT, reg);
+		w83627ehf_write_value(data, W83627EHF_REG_SMI_OVT, reg);
 		break;
 	case 4:
-		reg = (w83627ehf_read_value(client, W83627EHF_REG_DIODE) & 0x73)
+		reg = (w83627ehf_read_value(data, W83627EHF_REG_DIODE) & 0x73)
 		    | ((data->fan_div[4] & 0x03) << 2)
 		    | ((data->fan_div[4] & 0x04) << 5);
-		w83627ehf_write_value(client, W83627EHF_REG_DIODE, reg);
+		w83627ehf_write_value(data, W83627EHF_REG_DIODE, reg);
 		break;
 	}
 }
 
 static struct w83627ehf_data *w83627ehf_update_device(struct device *dev)
 {
-	struct i2c_client *client = to_i2c_client(dev);
-	struct w83627ehf_data *data = i2c_get_clientdata(client);
+	struct w83627ehf_data *data = dev_get_drvdata(dev);
 	int pwmcfg = 0, tolerance = 0; /* shut up the compiler */
 	int i;
 
 	mutex_lock(&data->update_lock);
 
-	if (time_after(jiffies, data->last_updated + HZ)
+	if (time_after(jiffies, data->last_updated + HZ + HZ/2)
 	 || !data->valid) {
 		/* Fan clock dividers */
-		i = w83627ehf_read_value(client, W83627EHF_REG_FANDIV1);
+		i = w83627ehf_read_value(data, W83627EHF_REG_FANDIV1);
 		data->fan_div[0] = (i >> 4) & 0x03;
 		data->fan_div[1] = (i >> 6) & 0x03;
-		i = w83627ehf_read_value(client, W83627EHF_REG_FANDIV2);
+		i = w83627ehf_read_value(data, W83627EHF_REG_FANDIV2);
 		data->fan_div[2] = (i >> 6) & 0x03;
-		i = w83627ehf_read_value(client, W83627EHF_REG_VBAT);
+		i = w83627ehf_read_value(data, W83627EHF_REG_VBAT);
 		data->fan_div[0] |= (i >> 3) & 0x04;
 		data->fan_div[1] |= (i >> 4) & 0x04;
 		data->fan_div[2] |= (i >> 5) & 0x04;
 		if (data->has_fan & ((1 << 3) | (1 << 4))) {
-			i = w83627ehf_read_value(client, W83627EHF_REG_DIODE);
+			i = w83627ehf_read_value(data, W83627EHF_REG_DIODE);
 			data->fan_div[3] = i & 0x03;
 			data->fan_div[4] = ((i >> 2) & 0x03)
 					 | ((i >> 5) & 0x04);
 		}
 		if (data->has_fan & (1 << 3)) {
-			i = w83627ehf_read_value(client, W83627EHF_REG_SMI_OVT);
+			i = w83627ehf_read_value(data, W83627EHF_REG_SMI_OVT);
 			data->fan_div[3] |= (i >> 5) & 0x04;
 		}
 
 		/* Measured voltages and limits */
-		for (i = 0; i < w83627ehf_num_in; i++) {
-			data->in[i] = w83627ehf_read_value(client,
+		for (i = 0; i < data->in_num; i++) {
+			data->in[i] = w83627ehf_read_value(data,
 				      W83627EHF_REG_IN(i));
-			data->in_min[i] = w83627ehf_read_value(client,
+			data->in_min[i] = w83627ehf_read_value(data,
 					  W83627EHF_REG_IN_MIN(i));
-			data->in_max[i] = w83627ehf_read_value(client,
+			data->in_max[i] = w83627ehf_read_value(data,
 					  W83627EHF_REG_IN_MAX(i));
 		}
 
@@ -461,9 +467,9 @@
 			if (!(data->has_fan & (1 << i)))
 				continue;
 
-			data->fan[i] = w83627ehf_read_value(client,
+			data->fan[i] = w83627ehf_read_value(data,
 				       W83627EHF_REG_FAN[i]);
-			data->fan_min[i] = w83627ehf_read_value(client,
+			data->fan_min[i] = w83627ehf_read_value(data,
 					   W83627EHF_REG_FAN_MIN[i]);
 
 			/* If we failed to measure the fan speed and clock
@@ -471,16 +477,16 @@
 			   time */
 			if (data->fan[i] == 0xff
 			 && data->fan_div[i] < 0x07) {
-			 	dev_dbg(&client->dev, "Increasing fan%d "
+			 	dev_dbg(dev, "Increasing fan%d "
 					"clock divider from %u to %u\n",
 					i + 1, div_from_reg(data->fan_div[i]),
 					div_from_reg(data->fan_div[i] + 1));
 				data->fan_div[i]++;
-				w83627ehf_write_fan_div(client, i);
+				w83627ehf_write_fan_div(data, i);
 				/* Preserve min limit if possible */
 				if (data->fan_min[i] >= 2
 				 && data->fan_min[i] != 255)
-					w83627ehf_write_value(client,
+					w83627ehf_write_value(data,
 						W83627EHF_REG_FAN_MIN[i],
 						(data->fan_min[i] /= 2));
 			}
@@ -489,9 +495,9 @@
 		for (i = 0; i < 4; i++) {
 			/* pwmcfg, tolarance mapped for i=0, i=1 to same reg */
 			if (i != 1) {
-				pwmcfg = w83627ehf_read_value(client,
+				pwmcfg = w83627ehf_read_value(data,
 						W83627EHF_REG_PWM_ENABLE[i]);
-				tolerance = w83627ehf_read_value(client,
+				tolerance = w83627ehf_read_value(data,
 						W83627EHF_REG_TOLERANCE[i]);
 			}
 			data->pwm_mode[i] =
@@ -500,14 +506,14 @@
 			data->pwm_enable[i] =
 					((pwmcfg >> W83627EHF_PWM_ENABLE_SHIFT[i])
 						& 3) + 1;
-			data->pwm[i] = w83627ehf_read_value(client,
+			data->pwm[i] = w83627ehf_read_value(data,
 						W83627EHF_REG_PWM[i]);
-			data->fan_min_output[i] = w83627ehf_read_value(client,
+			data->fan_min_output[i] = w83627ehf_read_value(data,
 						W83627EHF_REG_FAN_MIN_OUTPUT[i]);
-			data->fan_stop_time[i] = w83627ehf_read_value(client,
+			data->fan_stop_time[i] = w83627ehf_read_value(data,
 						W83627EHF_REG_FAN_STOP_TIME[i]);
 			data->target_temp[i] =
-				w83627ehf_read_value(client,
+				w83627ehf_read_value(data,
 					W83627EHF_REG_TARGET[i]) &
 					(data->pwm_mode[i] == 1 ? 0x7f : 0xff);
 			data->tolerance[i] = (tolerance >> (i == 1 ? 4 : 0))
@@ -515,26 +521,26 @@
 		}
 
 		/* Measured temperatures and limits */
-		data->temp1 = w83627ehf_read_value(client,
+		data->temp1 = w83627ehf_read_value(data,
 			      W83627EHF_REG_TEMP1);
-		data->temp1_max = w83627ehf_read_value(client,
+		data->temp1_max = w83627ehf_read_value(data,
 				  W83627EHF_REG_TEMP1_OVER);
-		data->temp1_max_hyst = w83627ehf_read_value(client,
+		data->temp1_max_hyst = w83627ehf_read_value(data,
 				       W83627EHF_REG_TEMP1_HYST);
 		for (i = 0; i < 2; i++) {
-			data->temp[i] = w83627ehf_read_value(client,
+			data->temp[i] = w83627ehf_read_value(data,
 					W83627EHF_REG_TEMP[i]);
-			data->temp_max[i] = w83627ehf_read_value(client,
+			data->temp_max[i] = w83627ehf_read_value(data,
 					    W83627EHF_REG_TEMP_OVER[i]);
-			data->temp_max_hyst[i] = w83627ehf_read_value(client,
+			data->temp_max_hyst[i] = w83627ehf_read_value(data,
 						 W83627EHF_REG_TEMP_HYST[i]);
 		}
 
-		data->alarms = w83627ehf_read_value(client,
+		data->alarms = w83627ehf_read_value(data,
 					W83627EHF_REG_ALARM1) |
-			       (w83627ehf_read_value(client,
+			       (w83627ehf_read_value(data,
 					W83627EHF_REG_ALARM2) << 8) |
-			       (w83627ehf_read_value(client,
+			       (w83627ehf_read_value(data,
 					W83627EHF_REG_ALARM3) << 16);
 
 		data->last_updated = jiffies;
@@ -567,15 +573,14 @@
 store_in_##reg (struct device *dev, struct device_attribute *attr, \
 			const char *buf, size_t count) \
 { \
-	struct i2c_client *client = to_i2c_client(dev); \
-	struct w83627ehf_data *data = i2c_get_clientdata(client); \
+	struct w83627ehf_data *data = dev_get_drvdata(dev); \
 	struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); \
 	int nr = sensor_attr->index; \
 	u32 val = simple_strtoul(buf, NULL, 10); \
  \
 	mutex_lock(&data->update_lock); \
 	data->in_##reg[nr] = in_to_reg(val, nr); \
-	w83627ehf_write_value(client, W83627EHF_REG_IN_##REG(nr), \
+	w83627ehf_write_value(data, W83627EHF_REG_IN_##REG(nr), \
 			      data->in_##reg[nr]); \
 	mutex_unlock(&data->update_lock); \
 	return count; \
@@ -673,8 +678,7 @@
 store_fan_min(struct device *dev, struct device_attribute *attr,
 	      const char *buf, size_t count)
 {
-	struct i2c_client *client = to_i2c_client(dev);
-	struct w83627ehf_data *data = i2c_get_clientdata(client);
+	struct w83627ehf_data *data = dev_get_drvdata(dev);
 	struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
 	int nr = sensor_attr->index;
 	unsigned int val = simple_strtoul(buf, NULL, 10);
@@ -716,18 +720,25 @@
 	/* Write both the fan clock divider (if it changed) and the new
 	   fan min (unconditionally) */
 	if (new_div != data->fan_div[nr]) {
-		if (new_div > data->fan_div[nr])
-			data->fan[nr] >>= (data->fan_div[nr] - new_div);
-		else
-			data->fan[nr] <<= (new_div - data->fan_div[nr]);
+		/* Preserve the fan speed reading */
+		if (data->fan[nr] != 0xff) {
+			if (new_div > data->fan_div[nr])
+				data->fan[nr] >>= new_div - data->fan_div[nr];
+			else if (data->fan[nr] & 0x80)
+				data->fan[nr] = 0xff;
+			else
+				data->fan[nr] <<= data->fan_div[nr] - new_div;
+		}
 
 		dev_dbg(dev, "fan%u clock divider changed from %u to %u\n",
 			nr + 1, div_from_reg(data->fan_div[nr]),
 			div_from_reg(new_div));
 		data->fan_div[nr] = new_div;
-		w83627ehf_write_fan_div(client, nr);
+		w83627ehf_write_fan_div(data, nr);
+		/* Give the chip time to sample a new speed value */
+		data->last_updated = jiffies;
 	}
-	w83627ehf_write_value(client, W83627EHF_REG_FAN_MIN[nr],
+	w83627ehf_write_value(data, W83627EHF_REG_FAN_MIN[nr],
 			      data->fan_min[nr]);
 	mutex_unlock(&data->update_lock);
 
@@ -788,13 +799,12 @@
 store_temp1_##reg(struct device *dev, struct device_attribute *attr, \
 		  const char *buf, size_t count) \
 { \
-	struct i2c_client *client = to_i2c_client(dev); \
-	struct w83627ehf_data *data = i2c_get_clientdata(client); \
+	struct w83627ehf_data *data = dev_get_drvdata(dev); \
 	u32 val = simple_strtoul(buf, NULL, 10); \
  \
 	mutex_lock(&data->update_lock); \
 	data->temp1_##reg = temp1_to_reg(val, -128000, 127000); \
-	w83627ehf_write_value(client, W83627EHF_REG_TEMP1_##REG, \
+	w83627ehf_write_value(data, W83627EHF_REG_TEMP1_##REG, \
 			      data->temp1_##reg); \
 	mutex_unlock(&data->update_lock); \
 	return count; \
@@ -822,15 +832,14 @@
 store_##reg(struct device *dev, struct device_attribute *attr, \
 	    const char *buf, size_t count) \
 { \
-	struct i2c_client *client = to_i2c_client(dev); \
-	struct w83627ehf_data *data = i2c_get_clientdata(client); \
+	struct w83627ehf_data *data = dev_get_drvdata(dev); \
 	struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); \
 	int nr = sensor_attr->index; \
 	u32 val = simple_strtoul(buf, NULL, 10); \
  \
 	mutex_lock(&data->update_lock); \
 	data->reg[nr] = LM75_TEMP_TO_REG(val); \
-	w83627ehf_write_value(client, W83627EHF_REG_TEMP_##REG[nr], \
+	w83627ehf_write_value(data, W83627EHF_REG_TEMP_##REG[nr], \
 			      data->reg[nr]); \
 	mutex_unlock(&data->update_lock); \
 	return count; \
@@ -838,6 +847,15 @@
 store_temp_reg(OVER, temp_max);
 store_temp_reg(HYST, temp_max_hyst);
 
+static ssize_t
+show_temp_type(struct device *dev, struct device_attribute *attr, char *buf)
+{
+	struct w83627ehf_data *data = w83627ehf_update_device(dev);
+	struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
+	int nr = sensor_attr->index;
+	return sprintf(buf, "%d\n", (int)data->temp_type[nr]);
+}
+
 static struct sensor_device_attribute sda_temp[] = {
 	SENSOR_ATTR(temp1_input, S_IRUGO, show_temp1, NULL, 0),
 	SENSOR_ATTR(temp2_input, S_IRUGO, show_temp, NULL, 0),
@@ -857,6 +875,9 @@
 	SENSOR_ATTR(temp1_alarm, S_IRUGO, show_alarm, NULL, 4),
 	SENSOR_ATTR(temp2_alarm, S_IRUGO, show_alarm, NULL, 5),
 	SENSOR_ATTR(temp3_alarm, S_IRUGO, show_alarm, NULL, 13),
+	SENSOR_ATTR(temp1_type, S_IRUGO, show_temp_type, NULL, 0),
+	SENSOR_ATTR(temp2_type, S_IRUGO, show_temp_type, NULL, 1),
+	SENSOR_ATTR(temp3_type, S_IRUGO, show_temp_type, NULL, 2),
 };
 
 #define show_pwm_reg(reg) \
@@ -877,8 +898,7 @@
 store_pwm_mode(struct device *dev, struct device_attribute *attr,
 			const char *buf, size_t count)
 {
-	struct i2c_client *client = to_i2c_client(dev);
-	struct w83627ehf_data *data = i2c_get_clientdata(client);
+	struct w83627ehf_data *data = dev_get_drvdata(dev);
 	struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
 	int nr = sensor_attr->index;
 	u32 val = simple_strtoul(buf, NULL, 10);
@@ -887,12 +907,12 @@
 	if (val > 1)
 		return -EINVAL;
 	mutex_lock(&data->update_lock);
-	reg = w83627ehf_read_value(client, W83627EHF_REG_PWM_ENABLE[nr]);
+	reg = w83627ehf_read_value(data, W83627EHF_REG_PWM_ENABLE[nr]);
 	data->pwm_mode[nr] = val;
 	reg &= ~(1 << W83627EHF_PWM_MODE_SHIFT[nr]);
 	if (!val)
 		reg |= 1 << W83627EHF_PWM_MODE_SHIFT[nr];
-	w83627ehf_write_value(client, W83627EHF_REG_PWM_ENABLE[nr], reg);
+	w83627ehf_write_value(data, W83627EHF_REG_PWM_ENABLE[nr], reg);
 	mutex_unlock(&data->update_lock);
 	return count;
 }
@@ -901,15 +921,14 @@
 store_pwm(struct device *dev, struct device_attribute *attr,
 			const char *buf, size_t count)
 {
-	struct i2c_client *client = to_i2c_client(dev);
-	struct w83627ehf_data *data = i2c_get_clientdata(client);
+	struct w83627ehf_data *data = dev_get_drvdata(dev);
 	struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
 	int nr = sensor_attr->index;
 	u32 val = SENSORS_LIMIT(simple_strtoul(buf, NULL, 10), 0, 255);
 
 	mutex_lock(&data->update_lock);
 	data->pwm[nr] = val;
-	w83627ehf_write_value(client, W83627EHF_REG_PWM[nr], val);
+	w83627ehf_write_value(data, W83627EHF_REG_PWM[nr], val);
 	mutex_unlock(&data->update_lock);
 	return count;
 }
@@ -918,8 +937,7 @@
 store_pwm_enable(struct device *dev, struct device_attribute *attr,
 			const char *buf, size_t count)
 {
-	struct i2c_client *client = to_i2c_client(dev);
-	struct w83627ehf_data *data = i2c_get_clientdata(client);
+	struct w83627ehf_data *data = dev_get_drvdata(dev);
 	struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
 	int nr = sensor_attr->index;
 	u32 val = simple_strtoul(buf, NULL, 10);
@@ -928,11 +946,11 @@
 	if (!val || (val > 2))	/* only modes 1 and 2 are supported */
 		return -EINVAL;
 	mutex_lock(&data->update_lock);
-	reg = w83627ehf_read_value(client, W83627EHF_REG_PWM_ENABLE[nr]);
+	reg = w83627ehf_read_value(data, W83627EHF_REG_PWM_ENABLE[nr]);
 	data->pwm_enable[nr] = val;
 	reg &= ~(0x03 << W83627EHF_PWM_ENABLE_SHIFT[nr]);
 	reg |= (val - 1) << W83627EHF_PWM_ENABLE_SHIFT[nr];
-	w83627ehf_write_value(client, W83627EHF_REG_PWM_ENABLE[nr], reg);
+	w83627ehf_write_value(data, W83627EHF_REG_PWM_ENABLE[nr], reg);
 	mutex_unlock(&data->update_lock);
 	return count;
 }
@@ -955,15 +973,14 @@
 store_target_temp(struct device *dev, struct device_attribute *attr,
 			const char *buf, size_t count)
 {
-	struct i2c_client *client = to_i2c_client(dev);
-	struct w83627ehf_data *data = i2c_get_clientdata(client);
+	struct w83627ehf_data *data = dev_get_drvdata(dev);
 	struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
 	int nr = sensor_attr->index;
 	u8 val = temp1_to_reg(simple_strtoul(buf, NULL, 10), 0, 127000);
 
 	mutex_lock(&data->update_lock);
 	data->target_temp[nr] = val;
-	w83627ehf_write_value(client, W83627EHF_REG_TARGET[nr], val);
+	w83627ehf_write_value(data, W83627EHF_REG_TARGET[nr], val);
 	mutex_unlock(&data->update_lock);
 	return count;
 }
@@ -972,8 +989,7 @@
 store_tolerance(struct device *dev, struct device_attribute *attr,
 			const char *buf, size_t count)
 {
-	struct i2c_client *client = to_i2c_client(dev);
-	struct w83627ehf_data *data = i2c_get_clientdata(client);
+	struct w83627ehf_data *data = dev_get_drvdata(dev);
 	struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
 	int nr = sensor_attr->index;
 	u16 reg;
@@ -981,13 +997,13 @@
 	u8 val = temp1_to_reg(simple_strtoul(buf, NULL, 10), 0, 15000);
 
 	mutex_lock(&data->update_lock);
-	reg = w83627ehf_read_value(client, W83627EHF_REG_TOLERANCE[nr]);
+	reg = w83627ehf_read_value(data, W83627EHF_REG_TOLERANCE[nr]);
 	data->tolerance[nr] = val;
 	if (nr == 1)
 		reg = (reg & 0x0f) | (val << 4);
 	else
 		reg = (reg & 0xf0) | val;
-	w83627ehf_write_value(client, W83627EHF_REG_TOLERANCE[nr], reg);
+	w83627ehf_write_value(data, W83627EHF_REG_TOLERANCE[nr], reg);
 	mutex_unlock(&data->update_lock);
 	return count;
 }
@@ -1058,14 +1074,13 @@
 store_##reg(struct device *dev, struct device_attribute *attr, \
 			    const char *buf, size_t count) \
 {\
-	struct i2c_client *client = to_i2c_client(dev); \
-	struct w83627ehf_data *data = i2c_get_clientdata(client); \
+	struct w83627ehf_data *data = dev_get_drvdata(dev); \
 	struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); \
 	int nr = sensor_attr->index; \
 	u32 val = SENSORS_LIMIT(simple_strtoul(buf, NULL, 10), 1, 255); \
 	mutex_lock(&data->update_lock); \
 	data->reg[nr] = val; \
-	w83627ehf_write_value(client, W83627EHF_REG_##REG[nr],  val); \
+	w83627ehf_write_value(data, W83627EHF_REG_##REG[nr], val); \
 	mutex_unlock(&data->update_lock); \
 	return count; \
 }
@@ -1087,21 +1102,28 @@
 store_##reg(struct device *dev, struct device_attribute *attr, \
 			const char *buf, size_t count) \
 { \
-	struct i2c_client *client = to_i2c_client(dev); \
-	struct w83627ehf_data *data = i2c_get_clientdata(client); \
+	struct w83627ehf_data *data = dev_get_drvdata(dev); \
 	struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); \
 	int nr = sensor_attr->index; \
 	u8 val = step_time_to_reg(simple_strtoul(buf, NULL, 10), \
 					data->pwm_mode[nr]); \
 	mutex_lock(&data->update_lock); \
 	data->reg[nr] = val; \
-	w83627ehf_write_value(client, W83627EHF_REG_##REG[nr], val); \
+	w83627ehf_write_value(data, W83627EHF_REG_##REG[nr], val); \
 	mutex_unlock(&data->update_lock); \
 	return count; \
 } \
 
 fan_time_functions(fan_stop_time, FAN_STOP_TIME)
 
+static ssize_t show_name(struct device *dev, struct device_attribute *attr,
+			 char *buf)
+{
+	struct w83627ehf_data *data = dev_get_drvdata(dev);
+
+	return sprintf(buf, "%s\n", data->name);
+}
+static DEVICE_ATTR(name, S_IRUGO, show_name, NULL);
 
 static struct sensor_device_attribute sda_sf3_arrays_fan4[] = {
 	SENSOR_ATTR(pwm4_stop_time, S_IWUSR | S_IRUGO, show_fan_stop_time,
@@ -1125,8 +1147,16 @@
 		    store_fan_min_output, 2),
 };
 
+static ssize_t
+show_vid(struct device *dev, struct device_attribute *attr, char *buf)
+{
+	struct w83627ehf_data *data = dev_get_drvdata(dev);
+	return sprintf(buf, "%d\n", vid_from_reg(data->vid, data->vrm));
+}
+static DEVICE_ATTR(cpu0_vid, S_IRUGO, show_vid, NULL);
+
 /*
- * Driver and client management
+ * Driver and device management
  */
 
 static void w83627ehf_device_remove_files(struct device *dev)
@@ -1134,12 +1164,13 @@
 	/* some entries in the following arrays may not have been used in
 	 * device_create_file(), but device_remove_file() will ignore them */
 	int i;
+	struct w83627ehf_data *data = dev_get_drvdata(dev);
 
 	for (i = 0; i < ARRAY_SIZE(sda_sf3_arrays); i++)
 		device_remove_file(dev, &sda_sf3_arrays[i].dev_attr);
 	for (i = 0; i < ARRAY_SIZE(sda_sf3_arrays_fan4); i++)
 		device_remove_file(dev, &sda_sf3_arrays_fan4[i].dev_attr);
-	for (i = 0; i < w83627ehf_num_in; i++) {
+	for (i = 0; i < data->in_num; i++) {
 		device_remove_file(dev, &sda_in_input[i].dev_attr);
 		device_remove_file(dev, &sda_in_alarm[i].dev_attr);
 		device_remove_file(dev, &sda_in_min[i].dev_attr);
@@ -1160,43 +1191,64 @@
 	}
 	for (i = 0; i < ARRAY_SIZE(sda_temp); i++)
 		device_remove_file(dev, &sda_temp[i].dev_attr);
+
+	device_remove_file(dev, &dev_attr_name);
+	if (data->vid != 0x3f)
+		device_remove_file(dev, &dev_attr_cpu0_vid);
 }
 
-static struct i2c_driver w83627ehf_driver;
-
-static void w83627ehf_init_client(struct i2c_client *client)
+/* Get the monitoring functions started */
+static inline void __devinit w83627ehf_init_device(struct w83627ehf_data *data)
 {
 	int i;
-	u8 tmp;
+	u8 tmp, diode;
 
 	/* Start monitoring is needed */
-	tmp = w83627ehf_read_value(client, W83627EHF_REG_CONFIG);
+	tmp = w83627ehf_read_value(data, W83627EHF_REG_CONFIG);
 	if (!(tmp & 0x01))
-		w83627ehf_write_value(client, W83627EHF_REG_CONFIG,
+		w83627ehf_write_value(data, W83627EHF_REG_CONFIG,
 				      tmp | 0x01);
 
 	/* Enable temp2 and temp3 if needed */
 	for (i = 0; i < 2; i++) {
-		tmp = w83627ehf_read_value(client,
+		tmp = w83627ehf_read_value(data,
 					   W83627EHF_REG_TEMP_CONFIG[i]);
 		if (tmp & 0x01)
-			w83627ehf_write_value(client,
+			w83627ehf_write_value(data,
 					      W83627EHF_REG_TEMP_CONFIG[i],
 					      tmp & 0xfe);
 	}
+
+	/* Enable VBAT monitoring if needed */
+	tmp = w83627ehf_read_value(data, W83627EHF_REG_VBAT);
+	if (!(tmp & 0x01))
+		w83627ehf_write_value(data, W83627EHF_REG_VBAT, tmp | 0x01);
+
+	/* Get thermal sensor types */
+	diode = w83627ehf_read_value(data, W83627EHF_REG_DIODE);
+	for (i = 0; i < 3; i++) {
+		if ((tmp & (0x02 << i)))
+			data->temp_type[i] = (diode & (0x10 << i)) ? 1 : 2;
+		else
+			data->temp_type[i] = 4; /* thermistor */
+	}
 }
 
-static int w83627ehf_detect(struct i2c_adapter *adapter)
+static int __devinit w83627ehf_probe(struct platform_device *pdev)
 {
-	struct i2c_client *client;
+	struct device *dev = &pdev->dev;
+	struct w83627ehf_sio_data *sio_data = dev->platform_data;
 	struct w83627ehf_data *data;
-	struct device *dev;
-	u8 fan4pin, fan5pin;
+	struct resource *res;
+	u8 fan4pin, fan5pin, en_vrm10;
 	int i, err = 0;
 
-	if (!request_region(address + IOREGION_OFFSET, IOREGION_LENGTH,
-	                    w83627ehf_driver.driver.name)) {
+	res = platform_get_resource(pdev, IORESOURCE_IO, 0);
+	if (!request_region(res->start, IOREGION_LENGTH, DRVNAME)) {
 		err = -EBUSY;
+		dev_err(dev, "Failed to request region 0x%lx-0x%lx\n",
+			(unsigned long)res->start,
+			(unsigned long)res->start + IOREGION_LENGTH - 1);
 		goto exit;
 	}
 
@@ -1205,41 +1257,47 @@
 		goto exit_release;
 	}
 
-	client = &data->client;
-	i2c_set_clientdata(client, data);
-	client->addr = address;
+	data->addr = res->start;
 	mutex_init(&data->lock);
-	client->adapter = adapter;
-	client->driver = &w83627ehf_driver;
-	client->flags = 0;
-	dev = &client->dev;
-
-	if (w83627ehf_num_in == 9)
-		strlcpy(client->name, "w83627dhg", I2C_NAME_SIZE);
-	else	/* just say ehf. 627EHG is 627EHF in lead-free packaging. */
-		strlcpy(client->name, "w83627ehf", I2C_NAME_SIZE);
-
-	data->valid = 0;
 	mutex_init(&data->update_lock);
+	data->name = w83627ehf_device_names[sio_data->kind];
+	platform_set_drvdata(pdev, data);
 
-	/* Tell the i2c layer a new client has arrived */
-	if ((err = i2c_attach_client(client)))
-		goto exit_free;
+	/* 627EHG and 627EHF have 10 voltage inputs; DHG has 9 */
+	data->in_num = (sio_data->kind == w83627dhg) ? 9 : 10;
 
 	/* Initialize the chip */
-	w83627ehf_init_client(client);
+	w83627ehf_init_device(data);
 
-	/* A few vars need to be filled upon startup */
-	for (i = 0; i < 5; i++)
-		data->fan_min[i] = w83627ehf_read_value(client,
-				   W83627EHF_REG_FAN_MIN[i]);
+	data->vrm = vid_which_vrm();
+	superio_enter(sio_data->sioreg);
+	/* Set VID input sensibility if needed. In theory the BIOS should
+	   have set it, but in practice it's not always the case. */
+	en_vrm10 = superio_inb(sio_data->sioreg, SIO_REG_EN_VRM10);
+	if ((en_vrm10 & 0x08) && data->vrm != 100) {
+		dev_warn(dev, "Setting VID input voltage to TTL\n");
+		superio_outb(sio_data->sioreg, SIO_REG_EN_VRM10,
+			     en_vrm10 & ~0x08);
+	} else if (!(en_vrm10 & 0x08) && data->vrm == 100) {
+		dev_warn(dev, "Setting VID input voltage to VRM10\n");
+		superio_outb(sio_data->sioreg, SIO_REG_EN_VRM10,
+			     en_vrm10 | 0x08);
+	}
+	/* Read VID value */
+	superio_select(sio_data->sioreg, W83627EHF_LD_HWM);
+	if (superio_inb(sio_data->sioreg, SIO_REG_VID_CTRL) & 0x80)
+		data->vid = superio_inb(sio_data->sioreg, SIO_REG_VID_DATA) & 0x3f;
+	else {
+		dev_info(dev, "VID pins in output mode, CPU VID not "
+			 "available\n");
+		data->vid = 0x3f;
+	}
 
 	/* fan4 and fan5 share some pins with the GPIO and serial flash */
 
-	superio_enter();
-	fan5pin = superio_inb(0x24) & 0x2;
-	fan4pin = superio_inb(0x29) & 0x6;
-	superio_exit();
+	fan5pin = superio_inb(sio_data->sioreg, 0x24) & 0x2;
+	fan4pin = superio_inb(sio_data->sioreg, 0x29) & 0x6;
+	superio_exit(sio_data->sioreg);
 
 	/* It looks like fan4 and fan5 pins can be alternatively used
 	   as fan on/off switches, but fan5 control is write only :/
@@ -1248,7 +1306,7 @@
 	   is not the default. */
 
 	data->has_fan = 0x07; /* fan1, fan2 and fan3 */
-	i = w83627ehf_read_value(client, W83627EHF_REG_FANDIV1);
+	i = w83627ehf_read_value(data, W83627EHF_REG_FANDIV1);
 	if ((i & (1 << 2)) && (!fan4pin))
 		data->has_fan |= (1 << 3);
 	if (!(i & (1 << 1)) && (!fan5pin))
@@ -1268,7 +1326,7 @@
 				goto exit_remove;
 		}
 
-	for (i = 0; i < w83627ehf_num_in; i++)
+	for (i = 0; i < data->in_num; i++)
 		if ((err = device_create_file(dev, &sda_in_input[i].dev_attr))
 			|| (err = device_create_file(dev,
 				&sda_in_alarm[i].dev_attr))
@@ -1308,6 +1366,16 @@
 		if ((err = device_create_file(dev, &sda_temp[i].dev_attr)))
 			goto exit_remove;
 
+	err = device_create_file(dev, &dev_attr_name);
+	if (err)
+		goto exit_remove;
+
+	if (data->vid != 0x3f) {
+		err = device_create_file(dev, &dev_attr_cpu0_vid);
+		if (err)
+			goto exit_remove;
+	}
+
 	data->class_dev = hwmon_device_register(dev);
 	if (IS_ERR(data->class_dev)) {
 		err = PTR_ERR(data->class_dev);
@@ -1318,95 +1386,172 @@
 
 exit_remove:
 	w83627ehf_device_remove_files(dev);
-	i2c_detach_client(client);
-exit_free:
 	kfree(data);
+	platform_set_drvdata(pdev, NULL);
 exit_release:
-	release_region(address + IOREGION_OFFSET, IOREGION_LENGTH);
+	release_region(res->start, IOREGION_LENGTH);
 exit:
 	return err;
 }
 
-static int w83627ehf_detach_client(struct i2c_client *client)
+static int __devexit w83627ehf_remove(struct platform_device *pdev)
 {
-	struct w83627ehf_data *data = i2c_get_clientdata(client);
-	int err;
+	struct w83627ehf_data *data = platform_get_drvdata(pdev);
 
 	hwmon_device_unregister(data->class_dev);
-	w83627ehf_device_remove_files(&client->dev);
-
-	if ((err = i2c_detach_client(client)))
-		return err;
-	release_region(client->addr + IOREGION_OFFSET, IOREGION_LENGTH);
+	w83627ehf_device_remove_files(&pdev->dev);
+	release_region(data->addr, IOREGION_LENGTH);
+	platform_set_drvdata(pdev, NULL);
 	kfree(data);
 
 	return 0;
 }
 
-static struct i2c_driver w83627ehf_driver = {
+static struct platform_driver w83627ehf_driver = {
 	.driver = {
 		.owner	= THIS_MODULE,
-		.name	= "w83627ehf",
+		.name	= DRVNAME,
 	},
-	.attach_adapter	= w83627ehf_detect,
-	.detach_client	= w83627ehf_detach_client,
+	.probe		= w83627ehf_probe,
+	.remove		= __devexit_p(w83627ehf_remove),
 };
 
-static int __init w83627ehf_find(int sioaddr, unsigned short *addr)
+/* w83627ehf_find() looks for a '627 in the Super-I/O config space */
+static int __init w83627ehf_find(int sioaddr, unsigned short *addr,
+				 struct w83627ehf_sio_data *sio_data)
 {
+	static const char __initdata sio_name_W83627EHF[] = "W83627EHF";
+	static const char __initdata sio_name_W83627EHG[] = "W83627EHG";
+	static const char __initdata sio_name_W83627DHG[] = "W83627DHG";
+
 	u16 val;
+	const char *sio_name;
 
-	REG = sioaddr;
-	VAL = sioaddr + 1;
-	superio_enter();
+	superio_enter(sioaddr);
 
-	val = (superio_inb(SIO_REG_DEVID) << 8)
-	    | superio_inb(SIO_REG_DEVID + 1);
+	val = (superio_inb(sioaddr, SIO_REG_DEVID) << 8)
+	    | superio_inb(sioaddr, SIO_REG_DEVID + 1);
 	switch (val & SIO_ID_MASK) {
-	case SIO_W83627DHG_ID:
-		w83627ehf_num_in = 9;
-		break;
 	case SIO_W83627EHF_ID:
+		sio_data->kind = w83627ehf;
+		sio_name = sio_name_W83627EHF;
+		break;
 	case SIO_W83627EHG_ID:
-		w83627ehf_num_in = 10;
+		sio_data->kind = w83627ehf;
+		sio_name = sio_name_W83627EHG;
+		break;
+	case SIO_W83627DHG_ID:
+		sio_data->kind = w83627dhg;
+		sio_name = sio_name_W83627DHG;
 		break;
 	default:
-		printk(KERN_WARNING "w83627ehf: unsupported chip ID: 0x%04x\n",
-			val);
-		superio_exit();
+		if (val != 0xffff)
+			pr_debug(DRVNAME ": unsupported chip ID: 0x%04x\n",
+				 val);
+		superio_exit(sioaddr);
 		return -ENODEV;
 	}
 
-	superio_select(W83627EHF_LD_HWM);
-	val = (superio_inb(SIO_REG_ADDR) << 8)
-	    | superio_inb(SIO_REG_ADDR + 1);
+	/* We have a known chip, find the HWM I/O address */
+	superio_select(sioaddr, W83627EHF_LD_HWM);
+	val = (superio_inb(sioaddr, SIO_REG_ADDR) << 8)
+	    | superio_inb(sioaddr, SIO_REG_ADDR + 1);
 	*addr = val & IOREGION_ALIGNMENT;
 	if (*addr == 0) {
-		superio_exit();
+		printk(KERN_ERR DRVNAME ": Refusing to enable a Super-I/O "
+		       "device with a base I/O port 0.\n");
+		superio_exit(sioaddr);
 		return -ENODEV;
 	}
 
 	/* Activate logical device if needed */
-	val = superio_inb(SIO_REG_ENABLE);
-	if (!(val & 0x01))
-		superio_outb(SIO_REG_ENABLE, val | 0x01);
+	val = superio_inb(sioaddr, SIO_REG_ENABLE);
+	if (!(val & 0x01)) {
+		printk(KERN_WARNING DRVNAME ": Forcibly enabling Super-I/O. "
+		       "Sensor is probably unusable.\n");
+		superio_outb(sioaddr, SIO_REG_ENABLE, val | 0x01);
+	}
 
-	superio_exit();
+	superio_exit(sioaddr);
+	pr_info(DRVNAME ": Found %s chip at %#x\n", sio_name, *addr);
+	sio_data->sioreg = sioaddr;
+
 	return 0;
 }
 
+/* when Super-I/O functions move to a separate file, the Super-I/O
+ * bus will manage the lifetime of the device and this module will only keep
+ * track of the w83627ehf driver. But since we platform_device_alloc(), we
+ * must keep track of the device */
+static struct platform_device *pdev;
+
 static int __init sensors_w83627ehf_init(void)
 {
-	if (w83627ehf_find(0x2e, &address)
-	 && w83627ehf_find(0x4e, &address))
+	int err;
+	unsigned short address;
+	struct resource res;
+	struct w83627ehf_sio_data sio_data;
+
+	/* initialize sio_data->kind and sio_data->sioreg.
+	 *
+	 * when Super-I/O functions move to a separate file, the Super-I/O
+	 * driver will probe 0x2e and 0x4e and auto-detect the presence of a
+	 * w83627ehf hardware monitor, and call probe() */
+	if (w83627ehf_find(0x2e, &address, &sio_data) &&
+	    w83627ehf_find(0x4e, &address, &sio_data))
 		return -ENODEV;
 
-	return i2c_isa_add_driver(&w83627ehf_driver);
+	err = platform_driver_register(&w83627ehf_driver);
+	if (err)
+		goto exit;
+
+	if (!(pdev = platform_device_alloc(DRVNAME, address))) {
+		err = -ENOMEM;
+		printk(KERN_ERR DRVNAME ": Device allocation failed\n");
+		goto exit_unregister;
+	}
+
+	err = platform_device_add_data(pdev, &sio_data,
+				       sizeof(struct w83627ehf_sio_data));
+	if (err) {
+		printk(KERN_ERR DRVNAME ": Platform data allocation failed\n");
+		goto exit_device_put;
+	}
+
+	memset(&res, 0, sizeof(res));
+	res.name = DRVNAME;
+	res.start = address + IOREGION_OFFSET;
+	res.end = address + IOREGION_OFFSET + IOREGION_LENGTH - 1;
+	res.flags = IORESOURCE_IO;
+	err = platform_device_add_resources(pdev, &res, 1);
+	if (err) {
+		printk(KERN_ERR DRVNAME ": Device resource addition failed "
+		       "(%d)\n", err);
+		goto exit_device_put;
+	}
+
+	/* platform_device_add calls probe() */
+	err = platform_device_add(pdev);
+	if (err) {
+		printk(KERN_ERR DRVNAME ": Device addition failed (%d)\n",
+		       err);
+		goto exit_device_put;
+	}
+
+	return 0;
+
+exit_device_put:
+	platform_device_put(pdev);
+exit_unregister:
+	platform_driver_unregister(&w83627ehf_driver);
+exit:
+	return err;
 }
 
 static void __exit sensors_w83627ehf_exit(void)
 {
-	i2c_isa_del_driver(&w83627ehf_driver);
+	platform_device_unregister(pdev);
+	platform_driver_unregister(&w83627ehf_driver);
 }
 
 MODULE_AUTHOR("Jean Delvare <khali@linux-fr.org>");
diff --git a/drivers/hwmon/w83627hf.c b/drivers/hwmon/w83627hf.c
index 12cb40a..1ce7817 100644
--- a/drivers/hwmon/w83627hf.c
+++ b/drivers/hwmon/w83627hf.c
@@ -220,6 +220,18 @@
 #define W836X7HF_REG_PWM(type, nr) (((type) == w83627hf) ? \
                                      regpwm_627hf[(nr) - 1] : regpwm[(nr) - 1])
 
+#define W83627HF_REG_PWM_FREQ		0x5C	/* Only for the 627HF */
+
+#define W83637HF_REG_PWM_FREQ1		0x00	/* 697HF/687THF too */
+#define W83637HF_REG_PWM_FREQ2		0x02	/* 697HF/687THF too */
+#define W83637HF_REG_PWM_FREQ3		0x10	/* 687THF too */
+
+static const u8 W83637HF_REG_PWM_FREQ[] = { W83637HF_REG_PWM_FREQ1,
+					W83637HF_REG_PWM_FREQ2,
+					W83637HF_REG_PWM_FREQ3 };
+
+#define W83627HF_BASE_PWM_FREQ	46870
+
 #define W83781D_REG_I2C_ADDR 0x48
 #define W83781D_REG_I2C_SUBADDR 0x4A
 
@@ -267,6 +279,49 @@
 
 #define PWM_TO_REG(val) (SENSORS_LIMIT((val),0,255))
 
+static inline unsigned long pwm_freq_from_reg_627hf(u8 reg)
+{
+	unsigned long freq;
+	freq = W83627HF_BASE_PWM_FREQ >> reg;
+	return freq;
+}
+static inline u8 pwm_freq_to_reg_627hf(unsigned long val)
+{
+	u8 i;
+	/* Only 5 dividers (1 2 4 8 16)
+	   Search for the nearest available frequency */
+	for (i = 0; i < 4; i++) {
+		if (val > (((W83627HF_BASE_PWM_FREQ >> i) +
+			    (W83627HF_BASE_PWM_FREQ >> (i+1))) / 2))
+			break;
+	}
+	return i;
+}
+
+static inline unsigned long pwm_freq_from_reg(u8 reg)
+{
+	/* Clock bit 8 -> 180 kHz or 24 MHz */
+	unsigned long clock = (reg & 0x80) ? 180000UL : 24000000UL;
+
+	reg &= 0x7f;
+	/* This should not happen but anyway... */
+	if (reg == 0)
+		reg++;
+	return (clock / (reg << 8));
+}
+static inline u8 pwm_freq_to_reg(unsigned long val)
+{
+	/* Minimum divider value is 0x01 and maximum is 0x7F */
+	if (val >= 93750)	/* The highest we can do */
+		return 0x01;
+	if (val >= 720)	/* Use 24 MHz clock */
+		return (24000000UL / (val << 8));
+	if (val < 6)		/* The lowest we can do */
+		return 0xFF;
+	else			/* Use 180 kHz clock */
+		return (0x80 | (180000UL / (val << 8)));
+}
+
 #define BEEP_MASK_FROM_REG(val)		 (val)
 #define BEEP_MASK_TO_REG(val)		((val) & 0xffffff)
 #define BEEP_ENABLE_TO_REG(val)		((val)?1:0)
@@ -316,6 +371,7 @@
 	u32 beep_mask;		/* Register encoding, combined */
 	u8 beep_enable;		/* Boolean */
 	u8 pwm[3];		/* Register value */
+	u8 pwm_freq[3];		/* Register value */
 	u16 sens[3];		/* 782D/783S only.
 				   1 = pentium diode; 2 = 3904 diode;
 				   3000-5000 = thermistor beta.
@@ -852,6 +908,64 @@
 sysfs_pwm(3);
 
 static ssize_t
+show_pwm_freq_reg(struct device *dev, char *buf, int nr)
+{
+	struct w83627hf_data *data = w83627hf_update_device(dev);
+	if (data->type == w83627hf)
+		return sprintf(buf, "%ld\n",
+			pwm_freq_from_reg_627hf(data->pwm_freq[nr - 1]));
+	else
+		return sprintf(buf, "%ld\n",
+			pwm_freq_from_reg(data->pwm_freq[nr - 1]));
+}
+
+static ssize_t
+store_pwm_freq_reg(struct device *dev, const char *buf, size_t count, int nr)
+{
+	struct w83627hf_data *data = dev_get_drvdata(dev);
+	static const u8 mask[]={0xF8, 0x8F};
+	u32 val;
+
+	val = simple_strtoul(buf, NULL, 10);
+
+	mutex_lock(&data->update_lock);
+
+	if (data->type == w83627hf) {
+		data->pwm_freq[nr - 1] = pwm_freq_to_reg_627hf(val);
+		w83627hf_write_value(data, W83627HF_REG_PWM_FREQ,
+				(data->pwm_freq[nr - 1] << ((nr - 1)*4)) |
+				(w83627hf_read_value(data,
+				W83627HF_REG_PWM_FREQ) & mask[nr - 1]));
+	} else {
+		data->pwm_freq[nr - 1] = pwm_freq_to_reg(val);
+		w83627hf_write_value(data, W83637HF_REG_PWM_FREQ[nr - 1],
+				data->pwm_freq[nr - 1]);
+	}
+
+	mutex_unlock(&data->update_lock);
+	return count;
+}
+
+#define sysfs_pwm_freq(offset) \
+static ssize_t show_regs_pwm_freq_##offset(struct device *dev, \
+		struct device_attribute *attr, char *buf) \
+{ \
+	return show_pwm_freq_reg(dev, buf, offset); \
+} \
+static ssize_t \
+store_regs_pwm_freq_##offset(struct device *dev, \
+		struct device_attribute *attr, const char *buf, size_t count) \
+{ \
+	return store_pwm_freq_reg(dev, buf, count, offset); \
+} \
+static DEVICE_ATTR(pwm##offset##_freq, S_IRUGO | S_IWUSR, \
+		  show_regs_pwm_freq_##offset, store_regs_pwm_freq_##offset);
+
+sysfs_pwm_freq(1);
+sysfs_pwm_freq(2);
+sysfs_pwm_freq(3);
+
+static ssize_t
 show_sensor_reg(struct device *dev, char *buf, int nr)
 {
 	struct w83627hf_data *data = w83627hf_update_device(dev);
@@ -1077,6 +1191,9 @@
 
 	&dev_attr_pwm3.attr,
 
+	&dev_attr_pwm1_freq.attr,
+	&dev_attr_pwm2_freq.attr,
+	&dev_attr_pwm3_freq.attr,
 	NULL
 };
 
@@ -1139,7 +1256,9 @@
 		 || (err = device_create_file(dev, &dev_attr_in5_max))
 		 || (err = device_create_file(dev, &dev_attr_in6_input))
 		 || (err = device_create_file(dev, &dev_attr_in6_min))
-		 || (err = device_create_file(dev, &dev_attr_in6_max)))
+		 || (err = device_create_file(dev, &dev_attr_in6_max))
+		 || (err = device_create_file(dev, &dev_attr_pwm1_freq))
+		 || (err = device_create_file(dev, &dev_attr_pwm2_freq)))
 			goto ERROR4;
 
 	if (data->type != w83697hf)
@@ -1169,6 +1288,12 @@
 		if ((err = device_create_file(dev, &dev_attr_pwm3)))
 			goto ERROR4;
 
+	if (data->type == w83637hf || data->type == w83687thf)
+		if ((err = device_create_file(dev, &dev_attr_pwm1_freq))
+		 || (err = device_create_file(dev, &dev_attr_pwm2_freq))
+		 || (err = device_create_file(dev, &dev_attr_pwm3_freq)))
+			goto ERROR4;
+
 	data->class_dev = hwmon_device_register(dev);
 	if (IS_ERR(data->class_dev)) {
 		err = PTR_ERR(data->class_dev);
@@ -1181,6 +1306,7 @@
 	sysfs_remove_group(&dev->kobj, &w83627hf_group);
 	sysfs_remove_group(&dev->kobj, &w83627hf_group_opt);
       ERROR3:
+	platform_set_drvdata(pdev, NULL);
 	kfree(data);
       ERROR1:
 	release_region(res->start, WINB_REGION_SIZE);
@@ -1193,11 +1319,11 @@
 	struct w83627hf_data *data = platform_get_drvdata(pdev);
 	struct resource *res;
 
-	platform_set_drvdata(pdev, NULL);
 	hwmon_device_unregister(data->class_dev);
 
 	sysfs_remove_group(&pdev->dev.kobj, &w83627hf_group);
 	sysfs_remove_group(&pdev->dev.kobj, &w83627hf_group_opt);
+	platform_set_drvdata(pdev, NULL);
 	kfree(data);
 
 	res = platform_get_resource(pdev, IORESOURCE_IO, 0);
@@ -1472,6 +1598,20 @@
 			   (data->type == w83627hf || data->type == w83697hf))
 				break;
 		}
+		if (data->type == w83627hf) {
+				u8 tmp = w83627hf_read_value(data,
+						W83627HF_REG_PWM_FREQ);
+				data->pwm_freq[0] = tmp & 0x07;
+				data->pwm_freq[1] = (tmp >> 4) & 0x07;
+		} else if (data->type != w83627thf) {
+			for (i = 1; i <= 3; i++) {
+				data->pwm_freq[i - 1] =
+					w83627hf_read_value(data,
+						W83637HF_REG_PWM_FREQ[i - 1]);
+				if (i == 2 && (data->type == w83697hf))
+					break;
+			}
+		}
 
 		data->temp = w83627hf_read_value(data, W83781D_REG_TEMP(1));
 		data->temp_max =
@@ -1548,15 +1688,12 @@
 		goto exit_device_put;
 	}
 
-	pdev->dev.platform_data = kmalloc(sizeof(struct w83627hf_sio_data),
-					  GFP_KERNEL);
-	if (!pdev->dev.platform_data) {
-		err = -ENOMEM;
+	err = platform_device_add_data(pdev, sio_data,
+				       sizeof(struct w83627hf_sio_data));
+	if (err) {
 		printk(KERN_ERR DRVNAME ": Platform data allocation failed\n");
 		goto exit_device_put;
 	}
-	memcpy(pdev->dev.platform_data, sio_data,
-	       sizeof(struct w83627hf_sio_data));
 
 	err = platform_device_add(pdev);
 	if (err) {
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index 1c77e14..da16478 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -237,9 +237,6 @@
 	  This driver can also be built as a module.  If so, the module
 	  will be called i2c-iop3xx.
 
-config I2C_ISA
-	tristate
-
 config I2C_IXP4XX
 	tristate "IXP4xx GPIO-Based I2C Interface (DEPRECATED)"
 	depends on ARCH_IXP4XX
diff --git a/drivers/i2c/busses/Makefile b/drivers/i2c/busses/Makefile
index a6db4e3..5b752e4 100644
--- a/drivers/i2c/busses/Makefile
+++ b/drivers/i2c/busses/Makefile
@@ -18,7 +18,6 @@
 obj-$(CONFIG_I2C_I810)		+= i2c-i810.o
 obj-$(CONFIG_I2C_IBM_IIC)	+= i2c-ibm_iic.o
 obj-$(CONFIG_I2C_IOP3XX)	+= i2c-iop3xx.o
-obj-$(CONFIG_I2C_ISA)		+= i2c-isa.o
 obj-$(CONFIG_I2C_IXP2000)	+= i2c-ixp2000.o
 obj-$(CONFIG_I2C_IXP4XX)	+= i2c-ixp4xx.o
 obj-$(CONFIG_I2C_POWERMAC)	+= i2c-powermac.o
diff --git a/drivers/i2c/busses/i2c-isa.c b/drivers/i2c/busses/i2c-isa.c
deleted file mode 100644
index b0e1370..0000000
--- a/drivers/i2c/busses/i2c-isa.c
+++ /dev/null
@@ -1,192 +0,0 @@
-/*
-    i2c-isa.c - an i2c-core-like thing for ISA hardware monitoring chips
-    Copyright (C) 2005  Jean Delvare <khali@linux-fr.org>
-
-    Based on the i2c-isa pseudo-adapter from the lm_sensors project
-    Copyright (c) 1998, 1999  Frodo Looijaard <frodol@dds.nl> 
-
-    This program is free software; you can redistribute it and/or modify
-    it under the terms of the GNU General Public License as published by
-    the Free Software Foundation; either version 2 of the License, or
-    (at your option) any later version.
-
-    This program is distributed in the hope that it will be useful,
-    but WITHOUT ANY WARRANTY; without even the implied warranty of
-    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-    GNU General Public License for more details.
-
-    You should have received a copy of the GNU General Public License
-    along with this program; if not, write to the Free Software
-    Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-*/
-
-/* This implements an i2c-core-like thing for ISA hardware monitoring
-   chips. Such chips are linked to the i2c subsystem for historical
-   reasons (because the early ISA hardware monitoring chips such as the
-   LM78 had both an I2C and an ISA interface). They used to be
-   registered with the main i2c-core, but as a first step in the
-   direction of a clean separation between I2C and ISA chip drivers,
-   we now have this separate core for ISA ones. It is significantly
-   more simple than the real one, of course, because we don't have to
-   handle multiple busses: there is only one (fake) ISA adapter.
-   It is worth noting that we still rely on i2c-core for some things
-   at the moment - but hopefully this won't last. */
-
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/i2c.h>
-#include <linux/i2c-isa.h>
-#include <linux/platform_device.h>
-#include <linux/completion.h>
-
-/* Exported by i2c-core for i2c-isa only */
-extern void i2c_adapter_dev_release(struct device *dev);
-extern struct class i2c_adapter_class;
-
-static u32 isa_func(struct i2c_adapter *adapter);
-
-/* This is the actual algorithm we define */
-static const struct i2c_algorithm isa_algorithm = {
-	.functionality	= isa_func,
-};
-
-/* There can only be one... */
-static struct i2c_adapter isa_adapter = {
-	.owner		= THIS_MODULE,
-	.id		= I2C_HW_ISA,
-	.class          = I2C_CLASS_HWMON,
-	.algo		= &isa_algorithm,
-	.name		= "ISA main adapter",
-};
-
-/* We can't do a thing... */
-static u32 isa_func(struct i2c_adapter *adapter)
-{
-	return 0;
-}
-
-
-/* We implement an interface which resembles i2c_{add,del}_driver,
-   but for i2c-isa drivers. We don't have to remember and handle lists
-   of drivers and adapters so this is much more simple, of course. */
-
-int i2c_isa_add_driver(struct i2c_driver *driver)
-{
-	int res;
-
-	/* Add the driver to the list of i2c drivers in the driver core */
-	driver->driver.bus = &i2c_bus_type;
-	res = driver_register(&driver->driver);
-	if (res)
-		return res;
-	dev_dbg(&isa_adapter.dev, "Driver %s registered\n", driver->driver.name);
-
-	/* Now look for clients */
-	res = driver->attach_adapter(&isa_adapter);
-	if (res) {
-		dev_dbg(&isa_adapter.dev,
-			"Driver %s failed to attach adapter, unregistering\n",
-			driver->driver.name);
-		driver_unregister(&driver->driver);
-	}
-	return res;
-}
-
-int i2c_isa_del_driver(struct i2c_driver *driver)
-{
-	struct list_head *item, *_n;
-	struct i2c_client *client;
-	int res;
-
-	/* Detach all clients belonging to this one driver */
-	list_for_each_safe(item, _n, &isa_adapter.clients) {
-		client = list_entry(item, struct i2c_client, list);
-		if (client->driver != driver)
-			continue;
-		dev_dbg(&isa_adapter.dev, "Detaching client %s at 0x%x\n",
-			client->name, client->addr);
-		if ((res = driver->detach_client(client))) {
-			dev_err(&isa_adapter.dev, "Failed, driver "
-				"%s not unregistered!\n",
-				driver->driver.name);
-			return res;
-		}
-	}
-
-	/* Get the driver off the core list */
-	driver_unregister(&driver->driver);
-	dev_dbg(&isa_adapter.dev, "Driver %s unregistered\n", driver->driver.name);
-
-	return 0;
-}
-
-
-static int __init i2c_isa_init(void)
-{
-	int err;
-
-	mutex_init(&isa_adapter.clist_lock);
-	INIT_LIST_HEAD(&isa_adapter.clients);
-
-	isa_adapter.nr = ANY_I2C_ISA_BUS;
-	isa_adapter.dev.parent = &platform_bus;
-	sprintf(isa_adapter.dev.bus_id, "i2c-%d", isa_adapter.nr);
-	isa_adapter.dev.release = &i2c_adapter_dev_release;
-	isa_adapter.dev.class = &i2c_adapter_class;
-	err = device_register(&isa_adapter.dev);
-	if (err) {
-		printk(KERN_ERR "i2c-isa: Failed to register device\n");
-		goto exit;
-	}
-
-	dev_dbg(&isa_adapter.dev, "%s registered\n", isa_adapter.name);
-
-	return 0;
-
-exit:
-	return err;
-}
-
-static void __exit i2c_isa_exit(void)
-{
-#ifdef DEBUG
-	struct list_head  *item, *_n;
-	struct i2c_client *client = NULL;
-#endif
-
-	/* There should be no more active client */
-#ifdef DEBUG
-	dev_dbg(&isa_adapter.dev, "Looking for clients\n");
-	list_for_each_safe(item, _n, &isa_adapter.clients) {
-		client = list_entry(item, struct i2c_client, list);
-		dev_err(&isa_adapter.dev, "Driver %s still has an active "
-			"ISA client at 0x%x\n", client->driver->driver.name,
-			client->addr);
-	}
-	if (client != NULL)
-		return;
-#endif
-
-	/* Clean up the sysfs representation */
-	dev_dbg(&isa_adapter.dev, "Unregistering from sysfs\n");
-	init_completion(&isa_adapter.dev_released);
-	device_unregister(&isa_adapter.dev);
-
-	/* Wait for sysfs to drop all references */
-	dev_dbg(&isa_adapter.dev, "Waiting for sysfs completion\n");
-	wait_for_completion(&isa_adapter.dev_released);
-
-	dev_dbg(&isa_adapter.dev, "%s unregistered\n", isa_adapter.name);
-}
-
-EXPORT_SYMBOL(i2c_isa_add_driver);
-EXPORT_SYMBOL(i2c_isa_del_driver);
-
-MODULE_AUTHOR("Jean Delvare <khali@linux-fr.org>");
-MODULE_DESCRIPTION("ISA bus access through i2c");
-MODULE_LICENSE("GPL");
-
-module_init(i2c_isa_init);
-module_exit(i2c_isa_exit);
diff --git a/drivers/i2c/busses/i2c-pmcmsp.c b/drivers/i2c/busses/i2c-pmcmsp.c
index 03188d2..17cecf1 100644
--- a/drivers/i2c/busses/i2c-pmcmsp.c
+++ b/drivers/i2c/busses/i2c-pmcmsp.c
@@ -630,7 +630,7 @@
 static struct platform_driver pmcmsptwi_driver = {
 	.probe  = pmcmsptwi_probe,
 	.remove	= __devexit_p(pmcmsptwi_remove),
-	.driver {
+	.driver = {
 		.name	= DRV_NAME,
 		.owner	= THIS_MODULE,
 	},
diff --git a/drivers/i2c/chips/Kconfig b/drivers/i2c/chips/Kconfig
index 3944e88..2e1c24f 100644
--- a/drivers/i2c/chips/Kconfig
+++ b/drivers/i2c/chips/Kconfig
@@ -153,4 +153,14 @@
 	  This driver can also be built as a module.  If so, the module
 	  will be called tsl2550.
 
+config MENELAUS
+	bool "TWL92330/Menelaus PM chip"
+	depends on I2C=y && ARCH_OMAP24XX
+	help
+	  If you say yes here you get support for the Texas Instruments
+	  TWL92330/Menelaus Power Management chip. This include voltage
+	  regulators, Dual slot memory card tranceivers, real-time clock
+	  and other features that are often used in portable devices like
+	  cell phones and PDAs.
+
 endmenu
diff --git a/drivers/i2c/chips/Makefile b/drivers/i2c/chips/Makefile
index d8cbeb3..ca924e1 100644
--- a/drivers/i2c/chips/Makefile
+++ b/drivers/i2c/chips/Makefile
@@ -13,6 +13,7 @@
 obj-$(CONFIG_SENSORS_PCF8591)	+= pcf8591.o
 obj-$(CONFIG_ISP1301_OMAP)	+= isp1301_omap.o
 obj-$(CONFIG_TPS65010)		+= tps65010.o
+obj-$(CONFIG_MENELAUS)		+= menelaus.o
 obj-$(CONFIG_SENSORS_TSL2550)	+= tsl2550.o
 
 ifeq ($(CONFIG_I2C_DEBUG_CHIP),y)
diff --git a/drivers/i2c/chips/menelaus.c b/drivers/i2c/chips/menelaus.c
new file mode 100644
index 0000000..48a7e2f
--- /dev/null
+++ b/drivers/i2c/chips/menelaus.c
@@ -0,0 +1,1281 @@
+#define DEBUG
+/*
+ * Copyright (C) 2004 Texas Instruments, Inc.
+ *
+ * Some parts based tps65010.c:
+ * Copyright (C) 2004 Texas Instruments and
+ * Copyright (C) 2004-2005 David Brownell
+ *
+ * Some parts based on tlv320aic24.c:
+ * Copyright (C) by Kai Svahn <kai.svahn@nokia.com>
+ *
+ * Changes for interrupt handling and clean-up by
+ * Tony Lindgren <tony@atomide.com> and Imre Deak <imre.deak@nokia.com>
+ * Cleanup and generalized support for voltage setting by
+ * Juha Yrjola
+ * Added support for controlling VCORE and regulator sleep states,
+ * Amit Kucheria <amit.kucheria@nokia.com>
+ * Copyright (C) 2005, 2006 Nokia Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/sched.h>
+#include <linux/mutex.h>
+#include <linux/workqueue.h>
+#include <linux/delay.h>
+#include <linux/rtc.h>
+#include <linux/bcd.h>
+
+#include <asm/mach-types.h>
+#include <asm/mach/irq.h>
+
+#include <asm/arch/gpio.h>
+#include <asm/arch/menelaus.h>
+
+#define DRIVER_NAME			"menelaus"
+
+#define pr_err(fmt, arg...)	printk(KERN_ERR DRIVER_NAME ": ", ## arg);
+
+#define MENELAUS_I2C_ADDRESS		0x72
+
+#define MENELAUS_REV			0x01
+#define MENELAUS_VCORE_CTRL1		0x02
+#define MENELAUS_VCORE_CTRL2		0x03
+#define MENELAUS_VCORE_CTRL3		0x04
+#define MENELAUS_VCORE_CTRL4		0x05
+#define MENELAUS_VCORE_CTRL5		0x06
+#define MENELAUS_DCDC_CTRL1		0x07
+#define MENELAUS_DCDC_CTRL2		0x08
+#define MENELAUS_DCDC_CTRL3		0x09
+#define MENELAUS_LDO_CTRL1		0x0A
+#define MENELAUS_LDO_CTRL2		0x0B
+#define MENELAUS_LDO_CTRL3		0x0C
+#define MENELAUS_LDO_CTRL4		0x0D
+#define MENELAUS_LDO_CTRL5		0x0E
+#define MENELAUS_LDO_CTRL6		0x0F
+#define MENELAUS_LDO_CTRL7		0x10
+#define MENELAUS_LDO_CTRL8		0x11
+#define MENELAUS_SLEEP_CTRL1		0x12
+#define MENELAUS_SLEEP_CTRL2		0x13
+#define MENELAUS_DEVICE_OFF		0x14
+#define MENELAUS_OSC_CTRL		0x15
+#define MENELAUS_DETECT_CTRL		0x16
+#define MENELAUS_INT_MASK1		0x17
+#define MENELAUS_INT_MASK2		0x18
+#define MENELAUS_INT_STATUS1		0x19
+#define MENELAUS_INT_STATUS2		0x1A
+#define MENELAUS_INT_ACK1		0x1B
+#define MENELAUS_INT_ACK2		0x1C
+#define MENELAUS_GPIO_CTRL		0x1D
+#define MENELAUS_GPIO_IN		0x1E
+#define MENELAUS_GPIO_OUT		0x1F
+#define MENELAUS_BBSMS			0x20
+#define MENELAUS_RTC_CTRL		0x21
+#define MENELAUS_RTC_UPDATE		0x22
+#define MENELAUS_RTC_SEC		0x23
+#define MENELAUS_RTC_MIN		0x24
+#define MENELAUS_RTC_HR			0x25
+#define MENELAUS_RTC_DAY		0x26
+#define MENELAUS_RTC_MON		0x27
+#define MENELAUS_RTC_YR			0x28
+#define MENELAUS_RTC_WKDAY		0x29
+#define MENELAUS_RTC_AL_SEC		0x2A
+#define MENELAUS_RTC_AL_MIN		0x2B
+#define MENELAUS_RTC_AL_HR		0x2C
+#define MENELAUS_RTC_AL_DAY		0x2D
+#define MENELAUS_RTC_AL_MON		0x2E
+#define MENELAUS_RTC_AL_YR		0x2F
+#define MENELAUS_RTC_COMP_MSB		0x30
+#define MENELAUS_RTC_COMP_LSB		0x31
+#define MENELAUS_S1_PULL_EN		0x32
+#define MENELAUS_S1_PULL_DIR		0x33
+#define MENELAUS_S2_PULL_EN		0x34
+#define MENELAUS_S2_PULL_DIR		0x35
+#define MENELAUS_MCT_CTRL1		0x36
+#define MENELAUS_MCT_CTRL2		0x37
+#define MENELAUS_MCT_CTRL3		0x38
+#define MENELAUS_MCT_PIN_ST		0x39
+#define MENELAUS_DEBOUNCE1		0x3A
+
+#define IH_MENELAUS_IRQS		12
+#define MENELAUS_MMC_S1CD_IRQ		0	/* MMC slot 1 card change */
+#define MENELAUS_MMC_S2CD_IRQ		1	/* MMC slot 2 card change */
+#define MENELAUS_MMC_S1D1_IRQ		2	/* MMC DAT1 low in slot 1 */
+#define MENELAUS_MMC_S2D1_IRQ		3	/* MMC DAT1 low in slot 2 */
+#define MENELAUS_LOWBAT_IRQ		4	/* Low battery */
+#define MENELAUS_HOTDIE_IRQ		5	/* Hot die detect */
+#define MENELAUS_UVLO_IRQ		6	/* UVLO detect */
+#define MENELAUS_TSHUT_IRQ		7	/* Thermal shutdown */
+#define MENELAUS_RTCTMR_IRQ		8	/* RTC timer */
+#define MENELAUS_RTCALM_IRQ		9	/* RTC alarm */
+#define MENELAUS_RTCERR_IRQ		10	/* RTC error */
+#define MENELAUS_PSHBTN_IRQ		11	/* Push button */
+#define MENELAUS_RESERVED12_IRQ		12	/* Reserved */
+#define MENELAUS_RESERVED13_IRQ		13	/* Reserved */
+#define MENELAUS_RESERVED14_IRQ		14	/* Reserved */
+#define MENELAUS_RESERVED15_IRQ		15	/* Reserved */
+
+static void menelaus_work(struct work_struct *_menelaus);
+
+struct menelaus_chip {
+	struct mutex		lock;
+	struct i2c_client	*client;
+	struct work_struct	work;
+#ifdef CONFIG_RTC_DRV_TWL92330
+	struct rtc_device	*rtc;
+	u8			rtc_control;
+	unsigned		uie:1;
+#endif
+	unsigned		vcore_hw_mode:1;
+	u8			mask1, mask2;
+	void			(*handlers[16])(struct menelaus_chip *);
+	void			(*mmc_callback)(void *data, u8 mask);
+	void			*mmc_callback_data;
+};
+
+static struct menelaus_chip *the_menelaus;
+
+static int menelaus_write_reg(int reg, u8 value)
+{
+	int val = i2c_smbus_write_byte_data(the_menelaus->client, reg, value);
+
+	if (val < 0) {
+		pr_err("write error");
+		return val;
+	}
+
+	return 0;
+}
+
+static int menelaus_read_reg(int reg)
+{
+	int val = i2c_smbus_read_byte_data(the_menelaus->client, reg);
+
+	if (val < 0)
+		pr_err("read error");
+
+	return val;
+}
+
+static int menelaus_enable_irq(int irq)
+{
+	if (irq > 7) {
+		irq -= 8;
+		the_menelaus->mask2 &= ~(1 << irq);
+		return menelaus_write_reg(MENELAUS_INT_MASK2,
+				the_menelaus->mask2);
+	} else {
+		the_menelaus->mask1 &= ~(1 << irq);
+		return menelaus_write_reg(MENELAUS_INT_MASK1,
+				the_menelaus->mask1);
+	}
+}
+
+static int menelaus_disable_irq(int irq)
+{
+	if (irq > 7) {
+		irq -= 8;
+		the_menelaus->mask2 |= (1 << irq);
+		return menelaus_write_reg(MENELAUS_INT_MASK2,
+				the_menelaus->mask2);
+	} else {
+		the_menelaus->mask1 |= (1 << irq);
+		return menelaus_write_reg(MENELAUS_INT_MASK1,
+				the_menelaus->mask1);
+	}
+}
+
+static int menelaus_ack_irq(int irq)
+{
+	if (irq > 7)
+		return menelaus_write_reg(MENELAUS_INT_ACK2, 1 << (irq - 8));
+	else
+		return menelaus_write_reg(MENELAUS_INT_ACK1, 1 << irq);
+}
+
+/* Adds a handler for an interrupt. Does not run in interrupt context */
+static int menelaus_add_irq_work(int irq,
+		void (*handler)(struct menelaus_chip *))
+{
+	int ret = 0;
+
+	mutex_lock(&the_menelaus->lock);
+	the_menelaus->handlers[irq] = handler;
+	ret = menelaus_enable_irq(irq);
+	mutex_unlock(&the_menelaus->lock);
+
+	return ret;
+}
+
+/* Removes handler for an interrupt */
+static int menelaus_remove_irq_work(int irq)
+{
+	int ret = 0;
+
+	mutex_lock(&the_menelaus->lock);
+	ret = menelaus_disable_irq(irq);
+	the_menelaus->handlers[irq] = NULL;
+	mutex_unlock(&the_menelaus->lock);
+
+	return ret;
+}
+
+/*
+ * Gets scheduled when a card detect interrupt happens. Note that in some cases
+ * this line is wired to card cover switch rather than the card detect switch
+ * in each slot. In this case the cards are not seen by menelaus.
+ * FIXME: Add handling for D1 too
+ */
+static void menelaus_mmc_cd_work(struct menelaus_chip *menelaus_hw)
+{
+	int reg;
+	unsigned char card_mask = 0;
+
+	reg = menelaus_read_reg(MENELAUS_MCT_PIN_ST);
+	if (reg < 0)
+		return;
+
+	if (!(reg & 0x1))
+		card_mask |= (1 << 0);
+
+	if (!(reg & 0x2))
+		card_mask |= (1 << 1);
+
+	if (menelaus_hw->mmc_callback)
+		menelaus_hw->mmc_callback(menelaus_hw->mmc_callback_data,
+					  card_mask);
+}
+
+/*
+ * Toggles the MMC slots between open-drain and push-pull mode.
+ */
+int menelaus_set_mmc_opendrain(int slot, int enable)
+{
+	int ret, val;
+
+	if (slot != 1 && slot != 2)
+		return -EINVAL;
+	mutex_lock(&the_menelaus->lock);
+	ret = menelaus_read_reg(MENELAUS_MCT_CTRL1);
+	if (ret < 0) {
+		mutex_unlock(&the_menelaus->lock);
+		return ret;
+	}
+	val = ret;
+	if (slot == 1) {
+		if (enable)
+			val |= 1 << 2;
+		else
+			val &= ~(1 << 2);
+	} else {
+		if (enable)
+			val |= 1 << 3;
+		else
+			val &= ~(1 << 3);
+	}
+	ret = menelaus_write_reg(MENELAUS_MCT_CTRL1, val);
+	mutex_unlock(&the_menelaus->lock);
+
+	return ret;
+}
+EXPORT_SYMBOL(menelaus_set_mmc_opendrain);
+
+int menelaus_set_slot_sel(int enable)
+{
+	int ret;
+
+	mutex_lock(&the_menelaus->lock);
+	ret = menelaus_read_reg(MENELAUS_GPIO_CTRL);
+	if (ret < 0)
+		goto out;
+	ret |= 0x02;
+	if (enable)
+		ret |= 1 << 5;
+	else
+		ret &= ~(1 << 5);
+	ret = menelaus_write_reg(MENELAUS_GPIO_CTRL, ret);
+out:
+	mutex_unlock(&the_menelaus->lock);
+	return ret;
+}
+EXPORT_SYMBOL(menelaus_set_slot_sel);
+
+int menelaus_set_mmc_slot(int slot, int enable, int power, int cd_en)
+{
+	int ret, val;
+
+	if (slot != 1 && slot != 2)
+		return -EINVAL;
+	if (power >= 3)
+		return -EINVAL;
+
+	mutex_lock(&the_menelaus->lock);
+
+	ret = menelaus_read_reg(MENELAUS_MCT_CTRL2);
+	if (ret < 0)
+		goto out;
+	val = ret;
+	if (slot == 1) {
+		if (cd_en)
+			val |= (1 << 4) | (1 << 6);
+		else
+			val &= ~((1 << 4) | (1 << 6));
+	} else {
+		if (cd_en)
+			val |= (1 << 5) | (1 << 7);
+		else
+			val &= ~((1 << 5) | (1 << 7));
+	}
+	ret = menelaus_write_reg(MENELAUS_MCT_CTRL2, val);
+	if (ret < 0)
+		goto out;
+
+	ret = menelaus_read_reg(MENELAUS_MCT_CTRL3);
+	if (ret < 0)
+		goto out;
+	val = ret;
+	if (slot == 1) {
+		if (enable)
+			val |= 1 << 0;
+		else
+			val &= ~(1 << 0);
+	} else {
+		int b;
+
+		if (enable)
+			ret |= 1 << 1;
+		else
+			ret &= ~(1 << 1);
+		b = menelaus_read_reg(MENELAUS_MCT_CTRL2);
+		b &= ~0x03;
+		b |= power;
+		ret = menelaus_write_reg(MENELAUS_MCT_CTRL2, b);
+		if (ret < 0)
+			goto out;
+	}
+	/* Disable autonomous shutdown */
+	val &= ~(0x03 << 2);
+	ret = menelaus_write_reg(MENELAUS_MCT_CTRL3, val);
+out:
+	mutex_unlock(&the_menelaus->lock);
+	return ret;
+}
+EXPORT_SYMBOL(menelaus_set_mmc_slot);
+
+int menelaus_register_mmc_callback(void (*callback)(void *data, u8 card_mask),
+				   void *data)
+{
+	int ret = 0;
+
+	the_menelaus->mmc_callback_data = data;
+	the_menelaus->mmc_callback = callback;
+	ret = menelaus_add_irq_work(MENELAUS_MMC_S1CD_IRQ,
+				    menelaus_mmc_cd_work);
+	if (ret < 0)
+		return ret;
+	ret = menelaus_add_irq_work(MENELAUS_MMC_S2CD_IRQ,
+				    menelaus_mmc_cd_work);
+	if (ret < 0)
+		return ret;
+	ret = menelaus_add_irq_work(MENELAUS_MMC_S1D1_IRQ,
+				    menelaus_mmc_cd_work);
+	if (ret < 0)
+		return ret;
+	ret = menelaus_add_irq_work(MENELAUS_MMC_S2D1_IRQ,
+				    menelaus_mmc_cd_work);
+
+	return ret;
+}
+EXPORT_SYMBOL(menelaus_register_mmc_callback);
+
+void menelaus_unregister_mmc_callback(void)
+{
+	menelaus_remove_irq_work(MENELAUS_MMC_S1CD_IRQ);
+	menelaus_remove_irq_work(MENELAUS_MMC_S2CD_IRQ);
+	menelaus_remove_irq_work(MENELAUS_MMC_S1D1_IRQ);
+	menelaus_remove_irq_work(MENELAUS_MMC_S2D1_IRQ);
+
+	the_menelaus->mmc_callback = NULL;
+	the_menelaus->mmc_callback_data = 0;
+}
+EXPORT_SYMBOL(menelaus_unregister_mmc_callback);
+
+struct menelaus_vtg {
+	const char *name;
+	u8 vtg_reg;
+	u8 vtg_shift;
+	u8 vtg_bits;
+	u8 mode_reg;
+};
+
+struct menelaus_vtg_value {
+	u16 vtg;
+	u16 val;
+};
+
+static int menelaus_set_voltage(const struct menelaus_vtg *vtg, int mV,
+				int vtg_val, int mode)
+{
+	int val, ret;
+	struct i2c_client *c = the_menelaus->client;
+
+	mutex_lock(&the_menelaus->lock);
+	if (vtg == 0)
+		goto set_voltage;
+
+	ret = menelaus_read_reg(vtg->vtg_reg);
+	if (ret < 0)
+		goto out;
+	val = ret & ~(((1 << vtg->vtg_bits) - 1) << vtg->vtg_shift);
+	val |= vtg_val << vtg->vtg_shift;
+
+	dev_dbg(&c->dev, "Setting voltage '%s'"
+			 "to %d mV (reg 0x%02x, val 0x%02x)\n",
+			vtg->name, mV, vtg->vtg_reg, val);
+
+	ret = menelaus_write_reg(vtg->vtg_reg, val);
+	if (ret < 0)
+		goto out;
+set_voltage:
+	ret = menelaus_write_reg(vtg->mode_reg, mode);
+out:
+	mutex_unlock(&the_menelaus->lock);
+	if (ret == 0) {
+		/* Wait for voltage to stabilize */
+		msleep(1);
+	}
+	return ret;
+}
+
+static int menelaus_get_vtg_value(int vtg, const struct menelaus_vtg_value *tbl,
+				  int n)
+{
+	int i;
+
+	for (i = 0; i < n; i++, tbl++)
+		if (tbl->vtg == vtg)
+			return tbl->val;
+	return -EINVAL;
+}
+
+/*
+ * Vcore can be programmed in two ways:
+ * SW-controlled: Required voltage is programmed into VCORE_CTRL1
+ * HW-controlled: Required range (roof-floor) is programmed into VCORE_CTRL3
+ * and VCORE_CTRL4
+ *
+ * Call correct 'set' function accordingly
+ */
+
+static const struct menelaus_vtg_value vcore_values[] = {
+	{ 1000, 0 },
+	{ 1025, 1 },
+	{ 1050, 2 },
+	{ 1075, 3 },
+	{ 1100, 4 },
+	{ 1125, 5 },
+	{ 1150, 6 },
+	{ 1175, 7 },
+	{ 1200, 8 },
+	{ 1225, 9 },
+	{ 1250, 10 },
+	{ 1275, 11 },
+	{ 1300, 12 },
+	{ 1325, 13 },
+	{ 1350, 14 },
+	{ 1375, 15 },
+	{ 1400, 16 },
+	{ 1425, 17 },
+	{ 1450, 18 },
+};
+
+int menelaus_set_vcore_sw(unsigned int mV)
+{
+	int val, ret;
+	struct i2c_client *c = the_menelaus->client;
+
+	val = menelaus_get_vtg_value(mV, vcore_values,
+				     ARRAY_SIZE(vcore_values));
+	if (val < 0)
+		return -EINVAL;
+
+	dev_dbg(&c->dev, "Setting VCORE to %d mV (val 0x%02x)\n", mV, val);
+
+	/* Set SW mode and the voltage in one go. */
+	mutex_lock(&the_menelaus->lock);
+	ret = menelaus_write_reg(MENELAUS_VCORE_CTRL1, val);
+	if (ret == 0)
+		the_menelaus->vcore_hw_mode = 0;
+	mutex_unlock(&the_menelaus->lock);
+	msleep(1);
+
+	return ret;
+}
+
+int menelaus_set_vcore_hw(unsigned int roof_mV, unsigned int floor_mV)
+{
+	int fval, rval, val, ret;
+	struct i2c_client *c = the_menelaus->client;
+
+	rval = menelaus_get_vtg_value(roof_mV, vcore_values,
+				      ARRAY_SIZE(vcore_values));
+	if (rval < 0)
+		return -EINVAL;
+	fval = menelaus_get_vtg_value(floor_mV, vcore_values,
+				      ARRAY_SIZE(vcore_values));
+	if (fval < 0)
+		return -EINVAL;
+
+	dev_dbg(&c->dev, "Setting VCORE FLOOR to %d mV and ROOF to %d mV\n",
+	       floor_mV, roof_mV);
+
+	mutex_lock(&the_menelaus->lock);
+	ret = menelaus_write_reg(MENELAUS_VCORE_CTRL3, fval);
+	if (ret < 0)
+		goto out;
+	ret = menelaus_write_reg(MENELAUS_VCORE_CTRL4, rval);
+	if (ret < 0)
+		goto out;
+	if (!the_menelaus->vcore_hw_mode) {
+		val = menelaus_read_reg(MENELAUS_VCORE_CTRL1);
+		/* HW mode, turn OFF byte comparator */
+		val |= ((1 << 7) | (1 << 5));
+		ret = menelaus_write_reg(MENELAUS_VCORE_CTRL1, val);
+		the_menelaus->vcore_hw_mode = 1;
+	}
+	msleep(1);
+out:
+	mutex_unlock(&the_menelaus->lock);
+	return ret;
+}
+
+static const struct menelaus_vtg vmem_vtg = {
+	.name = "VMEM",
+	.vtg_reg = MENELAUS_LDO_CTRL1,
+	.vtg_shift = 0,
+	.vtg_bits = 2,
+	.mode_reg = MENELAUS_LDO_CTRL3,
+};
+
+static const struct menelaus_vtg_value vmem_values[] = {
+	{ 1500, 0 },
+	{ 1800, 1 },
+	{ 1900, 2 },
+	{ 2500, 3 },
+};
+
+int menelaus_set_vmem(unsigned int mV)
+{
+	int val;
+
+	if (mV == 0)
+		return menelaus_set_voltage(&vmem_vtg, 0, 0, 0);
+
+	val = menelaus_get_vtg_value(mV, vmem_values, ARRAY_SIZE(vmem_values));
+	if (val < 0)
+		return -EINVAL;
+	return menelaus_set_voltage(&vmem_vtg, mV, val, 0x02);
+}
+EXPORT_SYMBOL(menelaus_set_vmem);
+
+static const struct menelaus_vtg vio_vtg = {
+	.name = "VIO",
+	.vtg_reg = MENELAUS_LDO_CTRL1,
+	.vtg_shift = 2,
+	.vtg_bits = 2,
+	.mode_reg = MENELAUS_LDO_CTRL4,
+};
+
+static const struct menelaus_vtg_value vio_values[] = {
+	{ 1500, 0 },
+	{ 1800, 1 },
+	{ 2500, 2 },
+	{ 2800, 3 },
+};
+
+int menelaus_set_vio(unsigned int mV)
+{
+	int val;
+
+	if (mV == 0)
+		return menelaus_set_voltage(&vio_vtg, 0, 0, 0);
+
+	val = menelaus_get_vtg_value(mV, vio_values, ARRAY_SIZE(vio_values));
+	if (val < 0)
+		return -EINVAL;
+	return menelaus_set_voltage(&vio_vtg, mV, val, 0x02);
+}
+EXPORT_SYMBOL(menelaus_set_vio);
+
+static const struct menelaus_vtg_value vdcdc_values[] = {
+	{ 1500, 0 },
+	{ 1800, 1 },
+	{ 2000, 2 },
+	{ 2200, 3 },
+	{ 2400, 4 },
+	{ 2800, 5 },
+	{ 3000, 6 },
+	{ 3300, 7 },
+};
+
+static const struct menelaus_vtg vdcdc2_vtg = {
+	.name = "VDCDC2",
+	.vtg_reg = MENELAUS_DCDC_CTRL1,
+	.vtg_shift = 0,
+	.vtg_bits = 3,
+	.mode_reg = MENELAUS_DCDC_CTRL2,
+};
+
+static const struct menelaus_vtg vdcdc3_vtg = {
+	.name = "VDCDC3",
+	.vtg_reg = MENELAUS_DCDC_CTRL1,
+	.vtg_shift = 3,
+	.vtg_bits = 3,
+	.mode_reg = MENELAUS_DCDC_CTRL3,
+};
+
+int menelaus_set_vdcdc(int dcdc, unsigned int mV)
+{
+	const struct menelaus_vtg *vtg;
+	int val;
+
+	if (dcdc != 2 && dcdc != 3)
+		return -EINVAL;
+	if (dcdc == 2)
+		vtg = &vdcdc2_vtg;
+	else
+		vtg = &vdcdc3_vtg;
+
+	if (mV == 0)
+		return menelaus_set_voltage(vtg, 0, 0, 0);
+
+	val = menelaus_get_vtg_value(mV, vdcdc_values,
+				     ARRAY_SIZE(vdcdc_values));
+	if (val < 0)
+		return -EINVAL;
+	return menelaus_set_voltage(vtg, mV, val, 0x03);
+}
+
+static const struct menelaus_vtg_value vmmc_values[] = {
+	{ 1850, 0 },
+	{ 2800, 1 },
+	{ 3000, 2 },
+	{ 3100, 3 },
+};
+
+static const struct menelaus_vtg vmmc_vtg = {
+	.name = "VMMC",
+	.vtg_reg = MENELAUS_LDO_CTRL1,
+	.vtg_shift = 6,
+	.vtg_bits = 2,
+	.mode_reg = MENELAUS_LDO_CTRL7,
+};
+
+int menelaus_set_vmmc(unsigned int mV)
+{
+	int val;
+
+	if (mV == 0)
+		return menelaus_set_voltage(&vmmc_vtg, 0, 0, 0);
+
+	val = menelaus_get_vtg_value(mV, vmmc_values, ARRAY_SIZE(vmmc_values));
+	if (val < 0)
+		return -EINVAL;
+	return menelaus_set_voltage(&vmmc_vtg, mV, val, 0x02);
+}
+EXPORT_SYMBOL(menelaus_set_vmmc);
+
+
+static const struct menelaus_vtg_value vaux_values[] = {
+	{ 1500, 0 },
+	{ 1800, 1 },
+	{ 2500, 2 },
+	{ 2800, 3 },
+};
+
+static const struct menelaus_vtg vaux_vtg = {
+	.name = "VAUX",
+	.vtg_reg = MENELAUS_LDO_CTRL1,
+	.vtg_shift = 4,
+	.vtg_bits = 2,
+	.mode_reg = MENELAUS_LDO_CTRL6,
+};
+
+int menelaus_set_vaux(unsigned int mV)
+{
+	int val;
+
+	if (mV == 0)
+		return menelaus_set_voltage(&vaux_vtg, 0, 0, 0);
+
+	val = menelaus_get_vtg_value(mV, vaux_values, ARRAY_SIZE(vaux_values));
+	if (val < 0)
+		return -EINVAL;
+	return menelaus_set_voltage(&vaux_vtg, mV, val, 0x02);
+}
+EXPORT_SYMBOL(menelaus_set_vaux);
+
+int menelaus_get_slot_pin_states(void)
+{
+	return menelaus_read_reg(MENELAUS_MCT_PIN_ST);
+}
+EXPORT_SYMBOL(menelaus_get_slot_pin_states);
+
+int menelaus_set_regulator_sleep(int enable, u32 val)
+{
+	int t, ret;
+	struct i2c_client *c = the_menelaus->client;
+
+	mutex_lock(&the_menelaus->lock);
+	ret = menelaus_write_reg(MENELAUS_SLEEP_CTRL2, val);
+	if (ret < 0)
+		goto out;
+
+	dev_dbg(&c->dev, "regulator sleep configuration: %02x\n", val);
+
+	ret = menelaus_read_reg(MENELAUS_GPIO_CTRL);
+	if (ret < 0)
+		goto out;
+	t = ((1 << 6) | 0x04);
+	if (enable)
+		ret |= t;
+	else
+		ret &= ~t;
+	ret = menelaus_write_reg(MENELAUS_GPIO_CTRL, ret);
+out:
+	mutex_unlock(&the_menelaus->lock);
+	return ret;
+}
+
+/*-----------------------------------------------------------------------*/
+
+/* Handles Menelaus interrupts. Does not run in interrupt context */
+static void menelaus_work(struct work_struct *_menelaus)
+{
+	struct menelaus_chip *menelaus =
+			container_of(_menelaus, struct menelaus_chip, work);
+	void (*handler)(struct menelaus_chip *menelaus);
+
+	while (1) {
+		unsigned isr;
+
+		isr = (menelaus_read_reg(MENELAUS_INT_STATUS2)
+				& ~menelaus->mask2) << 8;
+		isr |= menelaus_read_reg(MENELAUS_INT_STATUS1)
+				& ~menelaus->mask1;
+		if (!isr)
+			break;
+
+		while (isr) {
+			int irq = fls(isr) - 1;
+			isr &= ~(1 << irq);
+
+			mutex_lock(&menelaus->lock);
+			menelaus_disable_irq(irq);
+			menelaus_ack_irq(irq);
+			handler = menelaus->handlers[irq];
+			if (handler)
+				handler(menelaus);
+			menelaus_enable_irq(irq);
+			mutex_unlock(&menelaus->lock);
+		}
+	}
+	enable_irq(menelaus->client->irq);
+}
+
+/*
+ * We cannot use I2C in interrupt context, so we just schedule work.
+ */
+static irqreturn_t menelaus_irq(int irq, void *_menelaus)
+{
+	struct menelaus_chip *menelaus = _menelaus;
+
+	disable_irq_nosync(irq);
+	(void)schedule_work(&menelaus->work);
+
+	return IRQ_HANDLED;
+}
+
+/*-----------------------------------------------------------------------*/
+
+/*
+ * The RTC needs to be set once, then it runs on backup battery power.
+ * It supports alarms, including system wake alarms (from some modes);
+ * and 1/second IRQs if requested.
+ */
+#ifdef CONFIG_RTC_DRV_TWL92330
+
+#define RTC_CTRL_RTC_EN		(1 << 0)
+#define RTC_CTRL_AL_EN		(1 << 1)
+#define RTC_CTRL_MODE12		(1 << 2)
+#define RTC_CTRL_EVERY_MASK	(3 << 3)
+#define RTC_CTRL_EVERY_SEC	(0 << 3)
+#define RTC_CTRL_EVERY_MIN	(1 << 3)
+#define RTC_CTRL_EVERY_HR	(2 << 3)
+#define RTC_CTRL_EVERY_DAY	(3 << 3)
+
+#define RTC_UPDATE_EVERY	0x08
+
+#define RTC_HR_PM		(1 << 7)
+
+static void menelaus_to_time(char *regs, struct rtc_time *t)
+{
+	t->tm_sec = BCD2BIN(regs[0]);
+	t->tm_min = BCD2BIN(regs[1]);
+	if (the_menelaus->rtc_control & RTC_CTRL_MODE12) {
+		t->tm_hour = BCD2BIN(regs[2] & 0x1f) - 1;
+		if (regs[2] & RTC_HR_PM)
+			t->tm_hour += 12;
+	} else
+		t->tm_hour = BCD2BIN(regs[2] & 0x3f);
+	t->tm_mday = BCD2BIN(regs[3]);
+	t->tm_mon = BCD2BIN(regs[4]) - 1;
+	t->tm_year = BCD2BIN(regs[5]) + 100;
+}
+
+static int time_to_menelaus(struct rtc_time *t, int regnum)
+{
+	int	hour, status;
+
+	status = menelaus_write_reg(regnum++, BIN2BCD(t->tm_sec));
+	if (status < 0)
+		goto fail;
+
+	status = menelaus_write_reg(regnum++, BIN2BCD(t->tm_min));
+	if (status < 0)
+		goto fail;
+
+	if (the_menelaus->rtc_control & RTC_CTRL_MODE12) {
+		hour = t->tm_hour + 1;
+		if (hour > 12)
+			hour = RTC_HR_PM | BIN2BCD(hour - 12);
+		else
+			hour = BIN2BCD(hour);
+	} else
+		hour = BIN2BCD(t->tm_hour);
+	status = menelaus_write_reg(regnum++, hour);
+	if (status < 0)
+		goto fail;
+
+	status = menelaus_write_reg(regnum++, BIN2BCD(t->tm_mday));
+	if (status < 0)
+		goto fail;
+
+	status = menelaus_write_reg(regnum++, BIN2BCD(t->tm_mon + 1));
+	if (status < 0)
+		goto fail;
+
+	status = menelaus_write_reg(regnum++, BIN2BCD(t->tm_year - 100));
+	if (status < 0)
+		goto fail;
+
+	return 0;
+fail:
+	dev_err(&the_menelaus->client->dev, "rtc write reg %02x, err %d\n",
+			--regnum, status);
+	return status;
+}
+
+static int menelaus_read_time(struct device *dev, struct rtc_time *t)
+{
+	struct i2c_msg	msg[2];
+	char		regs[7];
+	int		status;
+
+	/* block read date and time registers */
+	regs[0] = MENELAUS_RTC_SEC;
+
+	msg[0].addr = MENELAUS_I2C_ADDRESS;
+	msg[0].flags = 0;
+	msg[0].len = 1;
+	msg[0].buf = regs;
+
+	msg[1].addr = MENELAUS_I2C_ADDRESS;
+	msg[1].flags = I2C_M_RD;
+	msg[1].len = sizeof(regs);
+	msg[1].buf = regs;
+
+	status = i2c_transfer(the_menelaus->client->adapter, msg, 2);
+	if (status != 2) {
+		dev_err(dev, "%s error %d\n", "read", status);
+		return -EIO;
+	}
+
+	menelaus_to_time(regs, t);
+	t->tm_wday = BCD2BIN(regs[6]);
+
+	return 0;
+}
+
+static int menelaus_set_time(struct device *dev, struct rtc_time *t)
+{
+	int		status;
+
+	/* write date and time registers */
+	status = time_to_menelaus(t, MENELAUS_RTC_SEC);
+	if (status < 0)
+		return status;
+	status = menelaus_write_reg(MENELAUS_RTC_WKDAY, BIN2BCD(t->tm_wday));
+	if (status < 0) {
+		dev_err(&the_menelaus->client->dev, "rtc write reg %02x",
+				"err %d\n", MENELAUS_RTC_WKDAY, status);
+		return status;
+	}
+
+	/* now commit the write */
+	status = menelaus_write_reg(MENELAUS_RTC_UPDATE, RTC_UPDATE_EVERY);
+	if (status < 0)
+		dev_err(&the_menelaus->client->dev, "rtc commit time, err %d\n",
+				status);
+
+	return 0;
+}
+
+static int menelaus_read_alarm(struct device *dev, struct rtc_wkalrm *w)
+{
+	struct i2c_msg	msg[2];
+	char		regs[6];
+	int		status;
+
+	/* block read alarm registers */
+	regs[0] = MENELAUS_RTC_AL_SEC;
+
+	msg[0].addr = MENELAUS_I2C_ADDRESS;
+	msg[0].flags = 0;
+	msg[0].len = 1;
+	msg[0].buf = regs;
+
+	msg[1].addr = MENELAUS_I2C_ADDRESS;
+	msg[1].flags = I2C_M_RD;
+	msg[1].len = sizeof(regs);
+	msg[1].buf = regs;
+
+	status = i2c_transfer(the_menelaus->client->adapter, msg, 2);
+	if (status != 2) {
+		dev_err(dev, "%s error %d\n", "alarm read", status);
+		return -EIO;
+	}
+
+	menelaus_to_time(regs, &w->time);
+
+	w->enabled = !!(the_menelaus->rtc_control & RTC_CTRL_AL_EN);
+
+	/* NOTE we *could* check if actually pending... */
+	w->pending = 0;
+
+	return 0;
+}
+
+static int menelaus_set_alarm(struct device *dev, struct rtc_wkalrm *w)
+{
+	int		status;
+
+	if (the_menelaus->client->irq <= 0 && w->enabled)
+		return -ENODEV;
+
+	/* clear previous alarm enable */
+	if (the_menelaus->rtc_control & RTC_CTRL_AL_EN) {
+		the_menelaus->rtc_control &= ~RTC_CTRL_AL_EN;
+		status = menelaus_write_reg(MENELAUS_RTC_CTRL,
+				the_menelaus->rtc_control);
+		if (status < 0)
+			return status;
+	}
+
+	/* write alarm registers */
+	status = time_to_menelaus(&w->time, MENELAUS_RTC_AL_SEC);
+	if (status < 0)
+		return status;
+
+	/* enable alarm if requested */
+	if (w->enabled) {
+		the_menelaus->rtc_control |= RTC_CTRL_AL_EN;
+		status = menelaus_write_reg(MENELAUS_RTC_CTRL,
+				the_menelaus->rtc_control);
+	}
+
+	return status;
+}
+
+#ifdef CONFIG_RTC_INTF_DEV
+
+static void menelaus_rtc_update_work(struct menelaus_chip *m)
+{
+	/* report 1/sec update */
+	local_irq_disable();
+	rtc_update_irq(m->rtc, 1, RTC_IRQF | RTC_UF);
+	local_irq_enable();
+}
+
+static int menelaus_ioctl(struct device *dev, unsigned cmd, unsigned long arg)
+{
+	int	status;
+
+	if (the_menelaus->client->irq <= 0)
+		return -ENOIOCTLCMD;
+
+	switch (cmd) {
+	/* alarm IRQ */
+	case RTC_AIE_ON:
+		if (the_menelaus->rtc_control & RTC_CTRL_AL_EN)
+			return 0;
+		the_menelaus->rtc_control |= RTC_CTRL_AL_EN;
+		break;
+	case RTC_AIE_OFF:
+		if (!(the_menelaus->rtc_control & RTC_CTRL_AL_EN))
+			return 0;
+		the_menelaus->rtc_control &= ~RTC_CTRL_AL_EN;
+		break;
+	/* 1/second "update" IRQ */
+	case RTC_UIE_ON:
+		if (the_menelaus->uie)
+			return 0;
+		status = menelaus_remove_irq_work(MENELAUS_RTCTMR_IRQ);
+		status = menelaus_add_irq_work(MENELAUS_RTCTMR_IRQ,
+				menelaus_rtc_update_work);
+		if (status == 0)
+			the_menelaus->uie = 1;
+		return status;
+	case RTC_UIE_OFF:
+		if (!the_menelaus->uie)
+			return 0;
+		status = menelaus_remove_irq_work(MENELAUS_RTCTMR_IRQ);
+		if (status == 0)
+			the_menelaus->uie = 0;
+		return status;
+	default:
+		return -ENOIOCTLCMD;
+	}
+	return menelaus_write_reg(MENELAUS_RTC_CTRL, the_menelaus->rtc_control);
+}
+
+#else
+#define menelaus_ioctl	NULL
+#endif
+
+/* REVISIT no compensation register support ... */
+
+static const struct rtc_class_ops menelaus_rtc_ops = {
+	.ioctl			= menelaus_ioctl,
+	.read_time		= menelaus_read_time,
+	.set_time		= menelaus_set_time,
+	.read_alarm		= menelaus_read_alarm,
+	.set_alarm		= menelaus_set_alarm,
+};
+
+static void menelaus_rtc_alarm_work(struct menelaus_chip *m)
+{
+	/* report alarm */
+	local_irq_disable();
+	rtc_update_irq(m->rtc, 1, RTC_IRQF | RTC_AF);
+	local_irq_enable();
+
+	/* then disable it; alarms are oneshot */
+	the_menelaus->rtc_control &= ~RTC_CTRL_AL_EN;
+	menelaus_write_reg(MENELAUS_RTC_CTRL, the_menelaus->rtc_control);
+}
+
+static inline void menelaus_rtc_init(struct menelaus_chip *m)
+{
+	int	alarm = (m->client->irq > 0);
+
+	/* assume 32KDETEN pin is pulled high */
+	if (!(menelaus_read_reg(MENELAUS_OSC_CTRL) & 0x80)) {
+		dev_dbg(&m->client->dev, "no 32k oscillator\n");
+		return;
+	}
+
+	/* support RTC alarm; it can issue wakeups */
+	if (alarm) {
+		if (menelaus_add_irq_work(MENELAUS_RTCALM_IRQ,
+				menelaus_rtc_alarm_work) < 0) {
+			dev_err(&m->client->dev, "can't handle RTC alarm\n");
+			return;
+		}
+		device_init_wakeup(&m->client->dev, 1);
+	}
+
+	/* be sure RTC is enabled; allow 1/sec irqs; leave 12hr mode alone */
+	m->rtc_control = menelaus_read_reg(MENELAUS_RTC_CTRL);
+	if (!(m->rtc_control & RTC_CTRL_RTC_EN)
+			|| (m->rtc_control & RTC_CTRL_AL_EN)
+			|| (m->rtc_control & RTC_CTRL_EVERY_MASK)) {
+		if (!(m->rtc_control & RTC_CTRL_RTC_EN)) {
+			dev_warn(&m->client->dev, "rtc clock needs setting\n");
+			m->rtc_control |= RTC_CTRL_RTC_EN;
+		}
+		m->rtc_control &= ~RTC_CTRL_EVERY_MASK;
+		m->rtc_control &= ~RTC_CTRL_AL_EN;
+		menelaus_write_reg(MENELAUS_RTC_CTRL, m->rtc_control);
+	}
+
+	m->rtc = rtc_device_register(DRIVER_NAME,
+			&m->client->dev,
+			&menelaus_rtc_ops, THIS_MODULE);
+	if (IS_ERR(m->rtc)) {
+		if (alarm) {
+			menelaus_remove_irq_work(MENELAUS_RTCALM_IRQ);
+			device_init_wakeup(&m->client->dev, 0);
+		}
+		dev_err(&m->client->dev, "can't register RTC: %d\n",
+				(int) PTR_ERR(m->rtc));
+		the_menelaus->rtc = NULL;
+	}
+}
+
+#else
+
+static inline void menelaus_rtc_init(struct menelaus_chip *m)
+{
+	/* nothing */
+}
+
+#endif
+
+/*-----------------------------------------------------------------------*/
+
+static struct i2c_driver menelaus_i2c_driver;
+
+static int menelaus_probe(struct i2c_client *client)
+{
+	struct menelaus_chip	*menelaus;
+	int			rev = 0, val;
+	int			err = 0;
+	struct menelaus_platform_data *menelaus_pdata =
+					client->dev.platform_data;
+
+	if (the_menelaus) {
+		dev_dbg(&client->dev, "only one %s for now\n",
+				DRIVER_NAME);
+		return -ENODEV;
+	}
+
+	menelaus = kzalloc(sizeof *menelaus, GFP_KERNEL);
+	if (!menelaus)
+		return -ENOMEM;
+
+	i2c_set_clientdata(client, menelaus);
+
+	the_menelaus = menelaus;
+	menelaus->client = client;
+
+	/* If a true probe check the device */
+	rev = menelaus_read_reg(MENELAUS_REV);
+	if (rev < 0) {
+		pr_err("device not found");
+		err = -ENODEV;
+		goto fail1;
+	}
+
+	/* Ack and disable all Menelaus interrupts */
+	menelaus_write_reg(MENELAUS_INT_ACK1, 0xff);
+	menelaus_write_reg(MENELAUS_INT_ACK2, 0xff);
+	menelaus_write_reg(MENELAUS_INT_MASK1, 0xff);
+	menelaus_write_reg(MENELAUS_INT_MASK2, 0xff);
+	menelaus->mask1 = 0xff;
+	menelaus->mask2 = 0xff;
+
+	/* Set output buffer strengths */
+	menelaus_write_reg(MENELAUS_MCT_CTRL1, 0x73);
+
+	if (client->irq > 0) {
+		err = request_irq(client->irq, menelaus_irq, IRQF_DISABLED,
+				  DRIVER_NAME, menelaus);
+		if (err) {
+			dev_dbg(&client->dev,  "can't get IRQ %d, err %d",
+					client->irq, err);
+			goto fail1;
+		}
+	}
+
+	mutex_init(&menelaus->lock);
+	INIT_WORK(&menelaus->work, menelaus_work);
+
+	pr_info("Menelaus rev %d.%d\n", rev >> 4, rev & 0x0f);
+
+	val = menelaus_read_reg(MENELAUS_VCORE_CTRL1);
+	if (val < 0)
+		goto fail2;
+	if (val & (1 << 7))
+		menelaus->vcore_hw_mode = 1;
+	else
+		menelaus->vcore_hw_mode = 0;
+
+	if (menelaus_pdata != NULL && menelaus_pdata->late_init != NULL) {
+		err = menelaus_pdata->late_init(&client->dev);
+		if (err < 0)
+			goto fail2;
+	}
+
+	menelaus_rtc_init(menelaus);
+
+	return 0;
+fail2:
+	free_irq(client->irq, menelaus);
+	flush_scheduled_work();
+fail1:
+	kfree(menelaus);
+	return err;
+}
+
+static int __exit menelaus_remove(struct i2c_client *client)
+{
+	struct menelaus_chip	*menelaus = i2c_get_clientdata(client);
+
+	free_irq(client->irq, menelaus);
+	kfree(menelaus);
+	i2c_set_clientdata(client, NULL);
+	the_menelaus = NULL;
+	return 0;
+}
+
+static struct i2c_driver menelaus_i2c_driver = {
+	.driver = {
+		.name		= DRIVER_NAME,
+	},
+	.probe		= menelaus_probe,
+	.remove		= __exit_p(menelaus_remove),
+};
+
+static int __init menelaus_init(void)
+{
+	int res;
+
+	res = i2c_add_driver(&menelaus_i2c_driver);
+	if (res < 0) {
+		pr_err("driver registration failed\n");
+		return res;
+	}
+
+	return 0;
+}
+
+static void __exit menelaus_exit(void)
+{
+	i2c_del_driver(&menelaus_i2c_driver);
+
+	/* FIXME: Shutdown menelaus parts that can be shut down */
+}
+
+MODULE_AUTHOR("Texas Instruments, Inc. (and others)");
+MODULE_DESCRIPTION("I2C interface for Menelaus.");
+MODULE_LICENSE("GPL");
+
+module_init(menelaus_init);
+module_exit(menelaus_exit);
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
index 6971a62..d663e69 100644
--- a/drivers/i2c/i2c-core.c
+++ b/drivers/i2c/i2c-core.c
@@ -288,7 +288,6 @@
 	struct i2c_adapter *adap = to_i2c_adapter(dev);
 	complete(&adap->dev_released);
 }
-EXPORT_SYMBOL_GPL(i2c_adapter_dev_release);	/* exported to i2c-isa */
 
 static ssize_t
 show_adapter_name(struct device *dev, struct device_attribute *attr, char *buf)
@@ -307,7 +306,6 @@
 	.name			= "i2c-adapter",
 	.dev_attrs		= i2c_adapter_attrs,
 };
-EXPORT_SYMBOL_GPL(i2c_adapter_class);		/* exported to i2c-isa */
 
 static void i2c_scan_static_board_info(struct i2c_adapter *adapter)
 {
diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c
index c5b5011..f9de798 100644
--- a/drivers/ide/ide-io.c
+++ b/drivers/ide/ide-io.c
@@ -55,7 +55,7 @@
 #include <asm/bitops.h>
 
 static int __ide_end_request(ide_drive_t *drive, struct request *rq,
-			     int uptodate, int nr_sectors)
+			     int uptodate, unsigned int nr_bytes)
 {
 	int ret = 1;
 
@@ -64,7 +64,7 @@
 	 * complete the whole request right now
 	 */
 	if (blk_noretry_request(rq) && end_io_error(uptodate))
-		nr_sectors = rq->hard_nr_sectors;
+		nr_bytes = rq->hard_nr_sectors << 9;
 
 	if (!blk_fs_request(rq) && end_io_error(uptodate) && !rq->errors)
 		rq->errors = -EIO;
@@ -78,7 +78,7 @@
 		HWGROUP(drive)->hwif->ide_dma_on(drive);
 	}
 
-	if (!end_that_request_first(rq, uptodate, nr_sectors)) {
+	if (!end_that_request_chunk(rq, uptodate, nr_bytes)) {
 		add_disk_randomness(rq->rq_disk);
 		if (!list_empty(&rq->queuelist))
 			blkdev_dequeue_request(rq);
@@ -103,6 +103,7 @@
 
 int ide_end_request (ide_drive_t *drive, int uptodate, int nr_sectors)
 {
+	unsigned int nr_bytes = nr_sectors << 9;
 	struct request *rq;
 	unsigned long flags;
 	int ret = 1;
@@ -114,10 +115,14 @@
 	spin_lock_irqsave(&ide_lock, flags);
 	rq = HWGROUP(drive)->rq;
 
-	if (!nr_sectors)
-		nr_sectors = rq->hard_cur_sectors;
+	if (!nr_bytes) {
+		if (blk_pc_request(rq))
+			nr_bytes = rq->data_len;
+		else
+			nr_bytes = rq->hard_cur_sectors << 9;
+	}
 
-	ret = __ide_end_request(drive, rq, uptodate, nr_sectors);
+	ret = __ide_end_request(drive, rq, uptodate, nr_bytes);
 
 	spin_unlock_irqrestore(&ide_lock, flags);
 	return ret;
diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c
index cc58013..5a4c5ea 100644
--- a/drivers/ide/ide-probe.c
+++ b/drivers/ide/ide-probe.c
@@ -1073,14 +1073,14 @@
 		hwgroup->hwif->next = hwif;
 		spin_unlock_irq(&ide_lock);
 	} else {
-		hwgroup = kmalloc_node(sizeof(ide_hwgroup_t), GFP_KERNEL,
+		hwgroup = kmalloc_node(sizeof(ide_hwgroup_t),
+					GFP_KERNEL | __GFP_ZERO,
 					hwif_to_node(hwif->drives[0].hwif));
 		if (!hwgroup)
 	       		goto out_up;
 
 		hwif->hwgroup = hwgroup;
 
-		memset(hwgroup, 0, sizeof(ide_hwgroup_t));
 		hwgroup->hwif     = hwif->next = hwif;
 		hwgroup->rq       = NULL;
 		hwgroup->handler  = NULL;
diff --git a/drivers/ide/ide.c b/drivers/ide/ide.c
index 8cd7694..077fb67 100644
--- a/drivers/ide/ide.c
+++ b/drivers/ide/ide.c
@@ -1049,13 +1049,9 @@
 	unsigned long flags;
 	ide_driver_t *drv;
 	void __user *p = (void __user *)arg;
-	int err, (*setfunc)(ide_drive_t *, int);
+	int err = 0, (*setfunc)(ide_drive_t *, int);
 	u8 *val;
 
-	err = scsi_cmd_ioctl(file, bdev->bd_disk->queue, bdev->bd_disk, cmd, p);
-	if (err != -ENOTTY)
-		return err;
-
 	switch (cmd) {
 	case HDIO_GET_32BIT:	    val = &drive->io_32bit;	 goto read_val;
 	case HDIO_GET_KEEPSETTINGS: val = &drive->keep_settings; goto read_val;
@@ -1175,6 +1171,10 @@
 			return 0;
 		}
 
+		case CDROMEJECT:
+		case CDROMCLOSETRAY:
+			return scsi_cmd_ioctl(file, bdev->bd_disk->queue, bdev->bd_disk, cmd, p);
+
 		case HDIO_GET_BUSSTATE:
 			if (!capable(CAP_SYS_ADMIN))
 				return -EACCES;
diff --git a/drivers/ide/mips/swarm.c b/drivers/ide/mips/swarm.c
index 6e935d7..c2e2957 100644
--- a/drivers/ide/mips/swarm.c
+++ b/drivers/ide/mips/swarm.c
@@ -165,12 +165,11 @@
 		goto out;
 	}
 
-        if (!(pldev = kmalloc(sizeof (*pldev), GFP_KERNEL))) {
+        if (!(pldev = kzalloc(sizeof (*pldev), GFP_KERNEL))) {
 		err = -ENOMEM;
 		goto out_unregister_driver;
 	}
 
-	memset (pldev, 0, sizeof (*pldev));
 	pldev->name		= swarm_ide_string;
 	pldev->id		= 0;
 	pldev->dev.release	= swarm_ide_platform_release;
diff --git a/drivers/ieee1394/ieee1394_core.c b/drivers/ieee1394/ieee1394_core.c
index 0fc8c6e..ee452595 100644
--- a/drivers/ieee1394/ieee1394_core.c
+++ b/drivers/ieee1394/ieee1394_core.c
@@ -30,6 +30,7 @@
 #include <linux/moduleparam.h>
 #include <linux/bitops.h>
 #include <linux/kdev_t.h>
+#include <linux/freezer.h>
 #include <linux/suspend.h>
 #include <linux/kthread.h>
 #include <linux/preempt.h>
@@ -1128,8 +1129,6 @@
 	struct list_head tmp;
 	int may_schedule;
 
-	current->flags |= PF_NOFREEZE;
-
 	while (!kthread_should_stop()) {
 
 		INIT_LIST_HEAD(&tmp);
diff --git a/drivers/ieee1394/nodemgr.c b/drivers/ieee1394/nodemgr.c
index 51a1206..2ffd534 100644
--- a/drivers/ieee1394/nodemgr.c
+++ b/drivers/ieee1394/nodemgr.c
@@ -1699,6 +1699,7 @@
 	unsigned int g, generation = 0;
 	int i, reset_cycles = 0;
 
+	set_freezable();
 	/* Setup our device-model entries */
 	nodemgr_create_host_dev_files(host);
 
diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c
index a91001c..c5c33d3 100644
--- a/drivers/infiniband/core/addr.c
+++ b/drivers/infiniband/core/addr.c
@@ -295,10 +295,9 @@
 	struct addr_req *req;
 	int ret = 0;
 
-	req = kmalloc(sizeof *req, GFP_KERNEL);
+	req = kzalloc(sizeof *req, GFP_KERNEL);
 	if (!req)
 		return -ENOMEM;
-	memset(req, 0, sizeof *req);
 
 	if (src_addr)
 		memcpy(&req->src_addr, src_addr, ip_addr_size(src_addr));
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
index 9820c67..4df269f 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -3374,7 +3374,7 @@
 }
 EXPORT_SYMBOL(ib_cm_init_qp_attr);
 
-void cm_get_ack_delay(struct cm_device *cm_dev)
+static void cm_get_ack_delay(struct cm_device *cm_dev)
 {
 	struct ib_device_attr attr;
 
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 23af7a0..9ffb998 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -573,7 +573,7 @@
 		break;
 	case RDMA_TRANSPORT_IWARP:
 		if (!id_priv->cm_id.iw) {
-			qp_attr->qp_access_flags = IB_ACCESS_LOCAL_WRITE;
+			qp_attr->qp_access_flags = 0;
 			*qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS;
 		} else
 			ret = iw_cm_init_qp_attr(id_priv->cm_id.iw, qp_attr,
diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.c b/drivers/infiniband/hw/cxgb3/iwch_cm.c
index 3b41dc0..9574088 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_cm.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_cm.c
@@ -229,9 +229,8 @@
 {
 	struct iwch_ep_common *epc;
 
-	epc = kmalloc(size, gfp);
+	epc = kzalloc(size, gfp);
 	if (epc) {
-		memset(epc, 0, size);
 		kref_init(&epc->kref);
 		spin_lock_init(&epc->lock);
 		init_waitqueue_head(&epc->waitq);
@@ -1914,6 +1913,7 @@
 fail3:
 	cxgb3_free_stid(ep->com.tdev, ep->stid);
 fail2:
+	cm_id->rem_ref(cm_id);
 	put_ep(&ep->com);
 fail1:
 out:
diff --git a/drivers/infiniband/hw/ehca/ehca_av.c b/drivers/infiniband/hw/ehca/ehca_av.c
index 3cd6bf3..e53a97a 100644
--- a/drivers/infiniband/hw/ehca/ehca_av.c
+++ b/drivers/infiniband/hw/ehca/ehca_av.c
@@ -79,7 +79,7 @@
 			av->av.ipd = (ah_mult > 0) ?
 				((ehca_mult - 1) / ah_mult) : 0;
 	} else
-	        av->av.ipd = ehca_static_rate;
+		av->av.ipd = ehca_static_rate;
 
 	av->av.lnh = ah_attr->ah_flags;
 	av->av.grh.word_0 = EHCA_BMASK_SET(GRH_IPVERSION_MASK, 6);
diff --git a/drivers/infiniband/hw/ehca/ehca_classes.h b/drivers/infiniband/hw/ehca/ehca_classes.h
index daf823e..043e4fb 100644
--- a/drivers/infiniband/hw/ehca/ehca_classes.h
+++ b/drivers/infiniband/hw/ehca/ehca_classes.h
@@ -204,11 +204,11 @@
 	spinlock_t mrlock;
 
 	enum ehca_mr_flag flags;
-	u32 num_pages;		/* number of MR pages */
-	u32 num_4k;		/* number of 4k "page" portions to form MR */
+	u32 num_kpages;		/* number of kernel pages */
+	u32 num_hwpages;	/* number of hw pages to form MR */
 	int acl;		/* ACL (stored here for usage in reregister) */
 	u64 *start;		/* virtual start address (stored here for */
-	                        /* usage in reregister) */
+				/* usage in reregister) */
 	u64 size;		/* size (stored here for usage in reregister) */
 	u32 fmr_page_size;	/* page size for FMR */
 	u32 fmr_max_pages;	/* max pages for FMR */
@@ -217,9 +217,6 @@
 	/* fw specific data */
 	struct ipz_mrmw_handle ipz_mr_handle;	/* MR handle for h-calls */
 	struct h_galpas galpas;
-	/* data for userspace bridge */
-	u32 nr_of_pages;
-	void *pagearray;
 };
 
 struct ehca_mw {
@@ -241,26 +238,29 @@
 
 struct ehca_mr_pginfo {
 	enum ehca_mr_pgi_type type;
-	u64 num_pages;
-	u64 page_cnt;
-	u64 num_4k;       /* number of 4k "page" portions */
-	u64 page_4k_cnt;  /* counter for 4k "page" portions */
-	u64 next_4k;      /* next 4k "page" portion in buffer/chunk/listelem */
+	u64 num_kpages;
+	u64 kpage_cnt;
+	u64 num_hwpages;     /* number of hw pages */
+	u64 hwpage_cnt;      /* counter for hw pages */
+	u64 next_hwpage;     /* next hw page in buffer/chunk/listelem */
 
-	/* type EHCA_MR_PGI_PHYS section */
-	int num_phys_buf;
-	struct ib_phys_buf *phys_buf_array;
-	u64 next_buf;
-
-	/* type EHCA_MR_PGI_USER section */
-	struct ib_umem *region;
-	struct ib_umem_chunk *next_chunk;
-	u64 next_nmap;
-
-	/* type EHCA_MR_PGI_FMR section */
-	u64 *page_list;
-	u64 next_listelem;
-	/* next_4k also used within EHCA_MR_PGI_FMR */
+	union {
+		struct { /* type EHCA_MR_PGI_PHYS section */
+			int num_phys_buf;
+			struct ib_phys_buf *phys_buf_array;
+			u64 next_buf;
+		} phy;
+		struct { /* type EHCA_MR_PGI_USER section */
+			struct ib_umem *region;
+			struct ib_umem_chunk *next_chunk;
+			u64 next_nmap;
+		} usr;
+		struct { /* type EHCA_MR_PGI_FMR section */
+			u64 fmr_pgsize;
+			u64 *page_list;
+			u64 next_listelem;
+		} fmr;
+	} u;
 };
 
 /* output parameters for MR/FMR hipz calls */
@@ -391,6 +391,6 @@
 
 int ehca_cq_assign_qp(struct ehca_cq *cq, struct ehca_qp *qp);
 int ehca_cq_unassign_qp(struct ehca_cq *cq, unsigned int qp_num);
-struct ehca_qp* ehca_cq_get_qp(struct ehca_cq *cq, int qp_num);
+struct ehca_qp *ehca_cq_get_qp(struct ehca_cq *cq, int qp_num);
 
 #endif
diff --git a/drivers/infiniband/hw/ehca/ehca_classes_pSeries.h b/drivers/infiniband/hw/ehca/ehca_classes_pSeries.h
index fb3df5c..1798e64 100644
--- a/drivers/infiniband/hw/ehca/ehca_classes_pSeries.h
+++ b/drivers/infiniband/hw/ehca/ehca_classes_pSeries.h
@@ -154,83 +154,83 @@
 	u32 reserved_70_127[58];       /* 70 */
 };
 
-#define MQPCB_MASK_QKEY                         EHCA_BMASK_IBM(0,0)
-#define MQPCB_MASK_SEND_PSN                     EHCA_BMASK_IBM(2,2)
-#define MQPCB_MASK_RECEIVE_PSN                  EHCA_BMASK_IBM(3,3)
-#define MQPCB_MASK_PRIM_PHYS_PORT               EHCA_BMASK_IBM(4,4)
-#define MQPCB_PRIM_PHYS_PORT                    EHCA_BMASK_IBM(24,31)
-#define MQPCB_MASK_ALT_PHYS_PORT                EHCA_BMASK_IBM(5,5)
-#define MQPCB_MASK_PRIM_P_KEY_IDX               EHCA_BMASK_IBM(6,6)
-#define MQPCB_PRIM_P_KEY_IDX                    EHCA_BMASK_IBM(24,31)
-#define MQPCB_MASK_ALT_P_KEY_IDX                EHCA_BMASK_IBM(7,7)
-#define MQPCB_MASK_RDMA_ATOMIC_CTRL             EHCA_BMASK_IBM(8,8)
-#define MQPCB_MASK_QP_STATE                     EHCA_BMASK_IBM(9,9)
-#define MQPCB_QP_STATE                          EHCA_BMASK_IBM(24,31)
-#define MQPCB_MASK_RDMA_NR_ATOMIC_RESP_RES      EHCA_BMASK_IBM(11,11)
-#define MQPCB_MASK_PATH_MIGRATION_STATE         EHCA_BMASK_IBM(12,12)
-#define MQPCB_MASK_RDMA_ATOMIC_OUTST_DEST_QP    EHCA_BMASK_IBM(13,13)
-#define MQPCB_MASK_DEST_QP_NR                   EHCA_BMASK_IBM(14,14)
-#define MQPCB_MASK_MIN_RNR_NAK_TIMER_FIELD      EHCA_BMASK_IBM(15,15)
-#define MQPCB_MASK_SERVICE_LEVEL                EHCA_BMASK_IBM(16,16)
-#define MQPCB_MASK_SEND_GRH_FLAG                EHCA_BMASK_IBM(17,17)
-#define MQPCB_MASK_RETRY_COUNT                  EHCA_BMASK_IBM(18,18)
-#define MQPCB_MASK_TIMEOUT                      EHCA_BMASK_IBM(19,19)
-#define MQPCB_MASK_PATH_MTU                     EHCA_BMASK_IBM(20,20)
-#define MQPCB_PATH_MTU                          EHCA_BMASK_IBM(24,31)
-#define MQPCB_MASK_MAX_STATIC_RATE              EHCA_BMASK_IBM(21,21)
-#define MQPCB_MAX_STATIC_RATE                   EHCA_BMASK_IBM(24,31)
-#define MQPCB_MASK_DLID                         EHCA_BMASK_IBM(22,22)
-#define MQPCB_DLID                              EHCA_BMASK_IBM(16,31)
-#define MQPCB_MASK_RNR_RETRY_COUNT              EHCA_BMASK_IBM(23,23)
-#define MQPCB_RNR_RETRY_COUNT                   EHCA_BMASK_IBM(29,31)
-#define MQPCB_MASK_SOURCE_PATH_BITS             EHCA_BMASK_IBM(24,24)
-#define MQPCB_SOURCE_PATH_BITS                  EHCA_BMASK_IBM(25,31)
-#define MQPCB_MASK_TRAFFIC_CLASS                EHCA_BMASK_IBM(25,25)
-#define MQPCB_TRAFFIC_CLASS                     EHCA_BMASK_IBM(24,31)
-#define MQPCB_MASK_HOP_LIMIT                    EHCA_BMASK_IBM(26,26)
-#define MQPCB_HOP_LIMIT                         EHCA_BMASK_IBM(24,31)
-#define MQPCB_MASK_SOURCE_GID_IDX               EHCA_BMASK_IBM(27,27)
-#define MQPCB_SOURCE_GID_IDX                    EHCA_BMASK_IBM(24,31)
-#define MQPCB_MASK_FLOW_LABEL                   EHCA_BMASK_IBM(28,28)
-#define MQPCB_FLOW_LABEL                        EHCA_BMASK_IBM(12,31)
-#define MQPCB_MASK_DEST_GID                     EHCA_BMASK_IBM(30,30)
-#define MQPCB_MASK_SERVICE_LEVEL_AL             EHCA_BMASK_IBM(31,31)
-#define MQPCB_SERVICE_LEVEL_AL                  EHCA_BMASK_IBM(28,31)
-#define MQPCB_MASK_SEND_GRH_FLAG_AL             EHCA_BMASK_IBM(32,32)
-#define MQPCB_SEND_GRH_FLAG_AL                  EHCA_BMASK_IBM(31,31)
-#define MQPCB_MASK_RETRY_COUNT_AL               EHCA_BMASK_IBM(33,33)
-#define MQPCB_RETRY_COUNT_AL                    EHCA_BMASK_IBM(29,31)
-#define MQPCB_MASK_TIMEOUT_AL                   EHCA_BMASK_IBM(34,34)
-#define MQPCB_TIMEOUT_AL                        EHCA_BMASK_IBM(27,31)
-#define MQPCB_MASK_MAX_STATIC_RATE_AL           EHCA_BMASK_IBM(35,35)
-#define MQPCB_MAX_STATIC_RATE_AL                EHCA_BMASK_IBM(24,31)
-#define MQPCB_MASK_DLID_AL                      EHCA_BMASK_IBM(36,36)
-#define MQPCB_DLID_AL                           EHCA_BMASK_IBM(16,31)
-#define MQPCB_MASK_RNR_RETRY_COUNT_AL           EHCA_BMASK_IBM(37,37)
-#define MQPCB_RNR_RETRY_COUNT_AL                EHCA_BMASK_IBM(29,31)
-#define MQPCB_MASK_SOURCE_PATH_BITS_AL          EHCA_BMASK_IBM(38,38)
-#define MQPCB_SOURCE_PATH_BITS_AL               EHCA_BMASK_IBM(25,31)
-#define MQPCB_MASK_TRAFFIC_CLASS_AL             EHCA_BMASK_IBM(39,39)
-#define MQPCB_TRAFFIC_CLASS_AL                  EHCA_BMASK_IBM(24,31)
-#define MQPCB_MASK_HOP_LIMIT_AL                 EHCA_BMASK_IBM(40,40)
-#define MQPCB_HOP_LIMIT_AL                      EHCA_BMASK_IBM(24,31)
-#define MQPCB_MASK_SOURCE_GID_IDX_AL            EHCA_BMASK_IBM(41,41)
-#define MQPCB_SOURCE_GID_IDX_AL                 EHCA_BMASK_IBM(24,31)
-#define MQPCB_MASK_FLOW_LABEL_AL                EHCA_BMASK_IBM(42,42)
-#define MQPCB_FLOW_LABEL_AL                     EHCA_BMASK_IBM(12,31)
-#define MQPCB_MASK_DEST_GID_AL                  EHCA_BMASK_IBM(44,44)
-#define MQPCB_MASK_MAX_NR_OUTST_SEND_WR         EHCA_BMASK_IBM(45,45)
-#define MQPCB_MAX_NR_OUTST_SEND_WR              EHCA_BMASK_IBM(16,31)
-#define MQPCB_MASK_MAX_NR_OUTST_RECV_WR         EHCA_BMASK_IBM(46,46)
-#define MQPCB_MAX_NR_OUTST_RECV_WR              EHCA_BMASK_IBM(16,31)
-#define MQPCB_MASK_DISABLE_ETE_CREDIT_CHECK     EHCA_BMASK_IBM(47,47)
-#define MQPCB_DISABLE_ETE_CREDIT_CHECK          EHCA_BMASK_IBM(31,31)
-#define MQPCB_QP_NUMBER                         EHCA_BMASK_IBM(8,31)
-#define MQPCB_MASK_QP_ENABLE                    EHCA_BMASK_IBM(48,48)
-#define MQPCB_QP_ENABLE                         EHCA_BMASK_IBM(31,31)
-#define MQPCB_MASK_CURR_SRQ_LIMIT               EHCA_BMASK_IBM(49,49)
-#define MQPCB_CURR_SRQ_LIMIT                    EHCA_BMASK_IBM(16,31)
-#define MQPCB_MASK_QP_AFF_ASYN_EV_LOG_REG       EHCA_BMASK_IBM(50,50)
-#define MQPCB_MASK_SHARED_RQ_HNDL               EHCA_BMASK_IBM(51,51)
+#define MQPCB_MASK_QKEY                         EHCA_BMASK_IBM( 0,  0)
+#define MQPCB_MASK_SEND_PSN                     EHCA_BMASK_IBM( 2,  2)
+#define MQPCB_MASK_RECEIVE_PSN                  EHCA_BMASK_IBM( 3,  3)
+#define MQPCB_MASK_PRIM_PHYS_PORT               EHCA_BMASK_IBM( 4,  4)
+#define MQPCB_PRIM_PHYS_PORT                    EHCA_BMASK_IBM(24, 31)
+#define MQPCB_MASK_ALT_PHYS_PORT                EHCA_BMASK_IBM( 5,  5)
+#define MQPCB_MASK_PRIM_P_KEY_IDX               EHCA_BMASK_IBM( 6,  6)
+#define MQPCB_PRIM_P_KEY_IDX                    EHCA_BMASK_IBM(24, 31)
+#define MQPCB_MASK_ALT_P_KEY_IDX                EHCA_BMASK_IBM( 7,  7)
+#define MQPCB_MASK_RDMA_ATOMIC_CTRL             EHCA_BMASK_IBM( 8,  8)
+#define MQPCB_MASK_QP_STATE                     EHCA_BMASK_IBM( 9,  9)
+#define MQPCB_QP_STATE                          EHCA_BMASK_IBM(24, 31)
+#define MQPCB_MASK_RDMA_NR_ATOMIC_RESP_RES      EHCA_BMASK_IBM(11, 11)
+#define MQPCB_MASK_PATH_MIGRATION_STATE         EHCA_BMASK_IBM(12, 12)
+#define MQPCB_MASK_RDMA_ATOMIC_OUTST_DEST_QP    EHCA_BMASK_IBM(13, 13)
+#define MQPCB_MASK_DEST_QP_NR                   EHCA_BMASK_IBM(14, 14)
+#define MQPCB_MASK_MIN_RNR_NAK_TIMER_FIELD      EHCA_BMASK_IBM(15, 15)
+#define MQPCB_MASK_SERVICE_LEVEL                EHCA_BMASK_IBM(16, 16)
+#define MQPCB_MASK_SEND_GRH_FLAG                EHCA_BMASK_IBM(17, 17)
+#define MQPCB_MASK_RETRY_COUNT                  EHCA_BMASK_IBM(18, 18)
+#define MQPCB_MASK_TIMEOUT                      EHCA_BMASK_IBM(19, 19)
+#define MQPCB_MASK_PATH_MTU                     EHCA_BMASK_IBM(20, 20)
+#define MQPCB_PATH_MTU                          EHCA_BMASK_IBM(24, 31)
+#define MQPCB_MASK_MAX_STATIC_RATE              EHCA_BMASK_IBM(21, 21)
+#define MQPCB_MAX_STATIC_RATE                   EHCA_BMASK_IBM(24, 31)
+#define MQPCB_MASK_DLID                         EHCA_BMASK_IBM(22, 22)
+#define MQPCB_DLID                              EHCA_BMASK_IBM(16, 31)
+#define MQPCB_MASK_RNR_RETRY_COUNT              EHCA_BMASK_IBM(23, 23)
+#define MQPCB_RNR_RETRY_COUNT                   EHCA_BMASK_IBM(29, 31)
+#define MQPCB_MASK_SOURCE_PATH_BITS             EHCA_BMASK_IBM(24, 24)
+#define MQPCB_SOURCE_PATH_BITS                  EHCA_BMASK_IBM(25, 31)
+#define MQPCB_MASK_TRAFFIC_CLASS                EHCA_BMASK_IBM(25, 25)
+#define MQPCB_TRAFFIC_CLASS                     EHCA_BMASK_IBM(24, 31)
+#define MQPCB_MASK_HOP_LIMIT                    EHCA_BMASK_IBM(26, 26)
+#define MQPCB_HOP_LIMIT                         EHCA_BMASK_IBM(24, 31)
+#define MQPCB_MASK_SOURCE_GID_IDX               EHCA_BMASK_IBM(27, 27)
+#define MQPCB_SOURCE_GID_IDX                    EHCA_BMASK_IBM(24, 31)
+#define MQPCB_MASK_FLOW_LABEL                   EHCA_BMASK_IBM(28, 28)
+#define MQPCB_FLOW_LABEL                        EHCA_BMASK_IBM(12, 31)
+#define MQPCB_MASK_DEST_GID                     EHCA_BMASK_IBM(30, 30)
+#define MQPCB_MASK_SERVICE_LEVEL_AL             EHCA_BMASK_IBM(31, 31)
+#define MQPCB_SERVICE_LEVEL_AL                  EHCA_BMASK_IBM(28, 31)
+#define MQPCB_MASK_SEND_GRH_FLAG_AL             EHCA_BMASK_IBM(32, 32)
+#define MQPCB_SEND_GRH_FLAG_AL                  EHCA_BMASK_IBM(31, 31)
+#define MQPCB_MASK_RETRY_COUNT_AL               EHCA_BMASK_IBM(33, 33)
+#define MQPCB_RETRY_COUNT_AL                    EHCA_BMASK_IBM(29, 31)
+#define MQPCB_MASK_TIMEOUT_AL                   EHCA_BMASK_IBM(34, 34)
+#define MQPCB_TIMEOUT_AL                        EHCA_BMASK_IBM(27, 31)
+#define MQPCB_MASK_MAX_STATIC_RATE_AL           EHCA_BMASK_IBM(35, 35)
+#define MQPCB_MAX_STATIC_RATE_AL                EHCA_BMASK_IBM(24, 31)
+#define MQPCB_MASK_DLID_AL                      EHCA_BMASK_IBM(36, 36)
+#define MQPCB_DLID_AL                           EHCA_BMASK_IBM(16, 31)
+#define MQPCB_MASK_RNR_RETRY_COUNT_AL           EHCA_BMASK_IBM(37, 37)
+#define MQPCB_RNR_RETRY_COUNT_AL                EHCA_BMASK_IBM(29, 31)
+#define MQPCB_MASK_SOURCE_PATH_BITS_AL          EHCA_BMASK_IBM(38, 38)
+#define MQPCB_SOURCE_PATH_BITS_AL               EHCA_BMASK_IBM(25, 31)
+#define MQPCB_MASK_TRAFFIC_CLASS_AL             EHCA_BMASK_IBM(39, 39)
+#define MQPCB_TRAFFIC_CLASS_AL                  EHCA_BMASK_IBM(24, 31)
+#define MQPCB_MASK_HOP_LIMIT_AL                 EHCA_BMASK_IBM(40, 40)
+#define MQPCB_HOP_LIMIT_AL                      EHCA_BMASK_IBM(24, 31)
+#define MQPCB_MASK_SOURCE_GID_IDX_AL            EHCA_BMASK_IBM(41, 41)
+#define MQPCB_SOURCE_GID_IDX_AL                 EHCA_BMASK_IBM(24, 31)
+#define MQPCB_MASK_FLOW_LABEL_AL                EHCA_BMASK_IBM(42, 42)
+#define MQPCB_FLOW_LABEL_AL                     EHCA_BMASK_IBM(12, 31)
+#define MQPCB_MASK_DEST_GID_AL                  EHCA_BMASK_IBM(44, 44)
+#define MQPCB_MASK_MAX_NR_OUTST_SEND_WR         EHCA_BMASK_IBM(45, 45)
+#define MQPCB_MAX_NR_OUTST_SEND_WR              EHCA_BMASK_IBM(16, 31)
+#define MQPCB_MASK_MAX_NR_OUTST_RECV_WR         EHCA_BMASK_IBM(46, 46)
+#define MQPCB_MAX_NR_OUTST_RECV_WR              EHCA_BMASK_IBM(16, 31)
+#define MQPCB_MASK_DISABLE_ETE_CREDIT_CHECK     EHCA_BMASK_IBM(47, 47)
+#define MQPCB_DISABLE_ETE_CREDIT_CHECK          EHCA_BMASK_IBM(31, 31)
+#define MQPCB_QP_NUMBER                         EHCA_BMASK_IBM( 8, 31)
+#define MQPCB_MASK_QP_ENABLE                    EHCA_BMASK_IBM(48, 48)
+#define MQPCB_QP_ENABLE                         EHCA_BMASK_IBM(31, 31)
+#define MQPCB_MASK_CURR_SRQ_LIMIT               EHCA_BMASK_IBM(49, 49)
+#define MQPCB_CURR_SRQ_LIMIT                    EHCA_BMASK_IBM(16, 31)
+#define MQPCB_MASK_QP_AFF_ASYN_EV_LOG_REG       EHCA_BMASK_IBM(50, 50)
+#define MQPCB_MASK_SHARED_RQ_HNDL               EHCA_BMASK_IBM(51, 51)
 
 #endif /* __EHCA_CLASSES_PSERIES_H__ */
diff --git a/drivers/infiniband/hw/ehca/ehca_cq.c b/drivers/infiniband/hw/ehca/ehca_cq.c
index 01d4a14..9e87883 100644
--- a/drivers/infiniband/hw/ehca/ehca_cq.c
+++ b/drivers/infiniband/hw/ehca/ehca_cq.c
@@ -97,7 +97,7 @@
 	return ret;
 }
 
-struct ehca_qp* ehca_cq_get_qp(struct ehca_cq *cq, int real_qp_num)
+struct ehca_qp *ehca_cq_get_qp(struct ehca_cq *cq, int real_qp_num)
 {
 	struct ehca_qp *ret = NULL;
 	unsigned int key = real_qp_num & (QP_HASHTAB_LEN-1);
diff --git a/drivers/infiniband/hw/ehca/ehca_eq.c b/drivers/infiniband/hw/ehca/ehca_eq.c
index 4961eb8..4825975 100644
--- a/drivers/infiniband/hw/ehca/ehca_eq.c
+++ b/drivers/infiniband/hw/ehca/ehca_eq.c
@@ -96,7 +96,8 @@
 	for (i = 0; i < nr_pages; i++) {
 		u64 rpage;
 
-		if (!(vpage = ipz_qpageit_get_inc(&eq->ipz_queue))) {
+		vpage = ipz_qpageit_get_inc(&eq->ipz_queue);
+		if (!vpage) {
 			ret = H_RESOURCE;
 			goto create_eq_exit2;
 		}
diff --git a/drivers/infiniband/hw/ehca/ehca_hca.c b/drivers/infiniband/hw/ehca/ehca_hca.c
index bbd3c6a..fc19ef9 100644
--- a/drivers/infiniband/hw/ehca/ehca_hca.c
+++ b/drivers/infiniband/hw/ehca/ehca_hca.c
@@ -127,6 +127,7 @@
 		    u8 port, struct ib_port_attr *props)
 {
 	int ret = 0;
+	u64 h_ret;
 	struct ehca_shca *shca = container_of(ibdev, struct ehca_shca,
 					      ib_device);
 	struct hipz_query_port *rblock;
@@ -137,7 +138,8 @@
 		return -ENOMEM;
 	}
 
-	if (hipz_h_query_port(shca->ipz_hca_handle, port, rblock) != H_SUCCESS) {
+	h_ret = hipz_h_query_port(shca->ipz_hca_handle, port, rblock);
+	if (h_ret != H_SUCCESS) {
 		ehca_err(&shca->ib_device, "Can't query port properties");
 		ret = -EINVAL;
 		goto query_port1;
@@ -197,6 +199,7 @@
 			u8 port, struct ehca_sma_attr *attr)
 {
 	int ret = 0;
+	u64 h_ret;
 	struct hipz_query_port *rblock;
 
 	rblock = ehca_alloc_fw_ctrlblock(GFP_ATOMIC);
@@ -205,7 +208,8 @@
 		return -ENOMEM;
 	}
 
-	if (hipz_h_query_port(shca->ipz_hca_handle, port, rblock) != H_SUCCESS) {
+	h_ret = hipz_h_query_port(shca->ipz_hca_handle, port, rblock);
+	if (h_ret != H_SUCCESS) {
 		ehca_err(&shca->ib_device, "Can't query port properties");
 		ret = -EINVAL;
 		goto query_sma_attr1;
@@ -230,9 +234,11 @@
 int ehca_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey)
 {
 	int ret = 0;
-	struct ehca_shca *shca = container_of(ibdev, struct ehca_shca, ib_device);
+	u64 h_ret;
+	struct ehca_shca *shca;
 	struct hipz_query_port *rblock;
 
+	shca = container_of(ibdev, struct ehca_shca, ib_device);
 	if (index > 16) {
 		ehca_err(&shca->ib_device, "Invalid index: %x.", index);
 		return -EINVAL;
@@ -244,7 +250,8 @@
 		return -ENOMEM;
 	}
 
-	if (hipz_h_query_port(shca->ipz_hca_handle, port, rblock) != H_SUCCESS) {
+	h_ret = hipz_h_query_port(shca->ipz_hca_handle, port, rblock);
+	if (h_ret != H_SUCCESS) {
 		ehca_err(&shca->ib_device, "Can't query port properties");
 		ret = -EINVAL;
 		goto query_pkey1;
@@ -262,6 +269,7 @@
 		   int index, union ib_gid *gid)
 {
 	int ret = 0;
+	u64 h_ret;
 	struct ehca_shca *shca = container_of(ibdev, struct ehca_shca,
 					      ib_device);
 	struct hipz_query_port *rblock;
@@ -277,7 +285,8 @@
 		return -ENOMEM;
 	}
 
-	if (hipz_h_query_port(shca->ipz_hca_handle, port, rblock) != H_SUCCESS) {
+	h_ret = hipz_h_query_port(shca->ipz_hca_handle, port, rblock);
+	if (h_ret != H_SUCCESS) {
 		ehca_err(&shca->ib_device, "Can't query port properties");
 		ret = -EINVAL;
 		goto query_gid1;
@@ -302,11 +311,12 @@
 		     struct ib_port_modify *props)
 {
 	int ret = 0;
-	struct ehca_shca *shca = container_of(ibdev, struct ehca_shca, ib_device);
+	struct ehca_shca *shca;
 	struct hipz_query_port *rblock;
 	u32 cap;
 	u64 hret;
 
+	shca = container_of(ibdev, struct ehca_shca, ib_device);
 	if ((props->set_port_cap_mask | props->clr_port_cap_mask)
 	    & ~allowed_port_caps) {
 		ehca_err(&shca->ib_device, "Non-changeable bits set in masks  "
@@ -325,7 +335,8 @@
 		goto modify_port1;
 	}
 
-	if (hipz_h_query_port(shca->ipz_hca_handle, port, rblock) != H_SUCCESS) {
+	hret = hipz_h_query_port(shca->ipz_hca_handle, port, rblock);
+	if (hret != H_SUCCESS) {
 		ehca_err(&shca->ib_device, "Can't query port properties");
 		ret = -EINVAL;
 		goto modify_port2;
@@ -337,7 +348,8 @@
 	hret = hipz_h_modify_port(shca->ipz_hca_handle, port,
 				  cap, props->init_type, port_modify_mask);
 	if (hret != H_SUCCESS) {
-		ehca_err(&shca->ib_device, "Modify port failed  hret=%lx", hret);
+		ehca_err(&shca->ib_device, "Modify port failed  hret=%lx",
+			 hret);
 		ret = -EINVAL;
 	}
 
diff --git a/drivers/infiniband/hw/ehca/ehca_irq.c b/drivers/infiniband/hw/ehca/ehca_irq.c
index 96eba38..4fb01fc 100644
--- a/drivers/infiniband/hw/ehca/ehca_irq.c
+++ b/drivers/infiniband/hw/ehca/ehca_irq.c
@@ -49,26 +49,26 @@
 #include "hipz_fns.h"
 #include "ipz_pt_fn.h"
 
-#define EQE_COMPLETION_EVENT   EHCA_BMASK_IBM(1,1)
-#define EQE_CQ_QP_NUMBER       EHCA_BMASK_IBM(8,31)
-#define EQE_EE_IDENTIFIER      EHCA_BMASK_IBM(2,7)
-#define EQE_CQ_NUMBER          EHCA_BMASK_IBM(8,31)
-#define EQE_QP_NUMBER          EHCA_BMASK_IBM(8,31)
-#define EQE_QP_TOKEN           EHCA_BMASK_IBM(32,63)
-#define EQE_CQ_TOKEN           EHCA_BMASK_IBM(32,63)
+#define EQE_COMPLETION_EVENT   EHCA_BMASK_IBM( 1,  1)
+#define EQE_CQ_QP_NUMBER       EHCA_BMASK_IBM( 8, 31)
+#define EQE_EE_IDENTIFIER      EHCA_BMASK_IBM( 2,  7)
+#define EQE_CQ_NUMBER          EHCA_BMASK_IBM( 8, 31)
+#define EQE_QP_NUMBER          EHCA_BMASK_IBM( 8, 31)
+#define EQE_QP_TOKEN           EHCA_BMASK_IBM(32, 63)
+#define EQE_CQ_TOKEN           EHCA_BMASK_IBM(32, 63)
 
-#define NEQE_COMPLETION_EVENT  EHCA_BMASK_IBM(1,1)
-#define NEQE_EVENT_CODE        EHCA_BMASK_IBM(2,7)
-#define NEQE_PORT_NUMBER       EHCA_BMASK_IBM(8,15)
-#define NEQE_PORT_AVAILABILITY EHCA_BMASK_IBM(16,16)
-#define NEQE_DISRUPTIVE        EHCA_BMASK_IBM(16,16)
+#define NEQE_COMPLETION_EVENT  EHCA_BMASK_IBM( 1,  1)
+#define NEQE_EVENT_CODE        EHCA_BMASK_IBM( 2,  7)
+#define NEQE_PORT_NUMBER       EHCA_BMASK_IBM( 8, 15)
+#define NEQE_PORT_AVAILABILITY EHCA_BMASK_IBM(16, 16)
+#define NEQE_DISRUPTIVE        EHCA_BMASK_IBM(16, 16)
 
-#define ERROR_DATA_LENGTH      EHCA_BMASK_IBM(52,63)
-#define ERROR_DATA_TYPE        EHCA_BMASK_IBM(0,7)
+#define ERROR_DATA_LENGTH      EHCA_BMASK_IBM(52, 63)
+#define ERROR_DATA_TYPE        EHCA_BMASK_IBM( 0,  7)
 
 static void queue_comp_task(struct ehca_cq *__cq);
 
-static struct ehca_comp_pool* pool;
+static struct ehca_comp_pool *pool;
 #ifdef CONFIG_HOTPLUG_CPU
 static struct notifier_block comp_pool_callback_nb;
 #endif
@@ -85,8 +85,8 @@
 	return;
 }
 
-static void print_error_data(struct ehca_shca * shca, void* data,
-			     u64* rblock, int length)
+static void print_error_data(struct ehca_shca *shca, void *data,
+			     u64 *rblock, int length)
 {
 	u64 type = EHCA_BMASK_GET(ERROR_DATA_TYPE, rblock[2]);
 	u64 resource = rblock[1];
@@ -94,7 +94,7 @@
 	switch (type) {
 	case 0x1: /* Queue Pair */
 	{
-		struct ehca_qp *qp = (struct ehca_qp*)data;
+		struct ehca_qp *qp = (struct ehca_qp *)data;
 
 		/* only print error data if AER is set */
 		if (rblock[6] == 0)
@@ -107,7 +107,7 @@
 	}
 	case 0x4: /* Completion Queue */
 	{
-		struct ehca_cq *cq = (struct ehca_cq*)data;
+		struct ehca_cq *cq = (struct ehca_cq *)data;
 
 		ehca_err(&shca->ib_device,
 			 "CQ 0x%x (resource=%lx) has errors.",
@@ -572,7 +572,7 @@
 	ehca_process_eq((struct ehca_shca*)data, 1);
 }
 
-static inline int find_next_online_cpu(struct ehca_comp_pool* pool)
+static inline int find_next_online_cpu(struct ehca_comp_pool *pool)
 {
 	int cpu;
 	unsigned long flags;
@@ -636,7 +636,7 @@
 	__queue_comp_task(__cq, cct);
 }
 
-static void run_comp_task(struct ehca_cpu_comp_task* cct)
+static void run_comp_task(struct ehca_cpu_comp_task *cct)
 {
 	struct ehca_cq *cq;
 	unsigned long flags;
@@ -666,12 +666,12 @@
 
 static int comp_task(void *__cct)
 {
-	struct ehca_cpu_comp_task* cct = __cct;
+	struct ehca_cpu_comp_task *cct = __cct;
 	int cql_empty;
 	DECLARE_WAITQUEUE(wait, current);
 
 	set_current_state(TASK_INTERRUPTIBLE);
-	while(!kthread_should_stop()) {
+	while (!kthread_should_stop()) {
 		add_wait_queue(&cct->wait_queue, &wait);
 
 		spin_lock_irq(&cct->task_lock);
@@ -745,7 +745,7 @@
 
 	list_splice_init(&cct->cq_list, &list);
 
-	while(!list_empty(&list)) {
+	while (!list_empty(&list)) {
 		cq = list_entry(cct->cq_list.next, struct ehca_cq, entry);
 
 		list_del(&cq->entry);
@@ -768,7 +768,7 @@
 	case CPU_UP_PREPARE:
 	case CPU_UP_PREPARE_FROZEN:
 		ehca_gen_dbg("CPU: %x (CPU_PREPARE)", cpu);
-		if(!create_comp_task(pool, cpu)) {
+		if (!create_comp_task(pool, cpu)) {
 			ehca_gen_err("Can't create comp_task for cpu: %x", cpu);
 			return NOTIFY_BAD;
 		}
@@ -838,7 +838,7 @@
 
 #ifdef CONFIG_HOTPLUG_CPU
 	comp_pool_callback_nb.notifier_call = comp_pool_callback;
-	comp_pool_callback_nb.priority =0;
+	comp_pool_callback_nb.priority = 0;
 	register_cpu_notifier(&comp_pool_callback_nb);
 #endif
 
diff --git a/drivers/infiniband/hw/ehca/ehca_iverbs.h b/drivers/infiniband/hw/ehca/ehca_iverbs.h
index 77aeca6..dce503b 100644
--- a/drivers/infiniband/hw/ehca/ehca_iverbs.h
+++ b/drivers/infiniband/hw/ehca/ehca_iverbs.h
@@ -81,8 +81,9 @@
 			       int num_phys_buf,
 			       int mr_access_flags, u64 *iova_start);
 
-struct ib_mr *ehca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, u64 virt,
-			       int mr_access_flags, struct ib_udata *udata);
+struct ib_mr *ehca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
+			       u64 virt, int mr_access_flags,
+			       struct ib_udata *udata);
 
 int ehca_rereg_phys_mr(struct ib_mr *mr,
 		       int mr_rereg_mask,
@@ -192,7 +193,7 @@
 void *ehca_alloc_fw_ctrlblock(gfp_t flags);
 void ehca_free_fw_ctrlblock(void *ptr);
 #else
-#define ehca_alloc_fw_ctrlblock(flags) ((void *) get_zeroed_page(flags))
+#define ehca_alloc_fw_ctrlblock(flags) ((void *)get_zeroed_page(flags))
 #define ehca_free_fw_ctrlblock(ptr) free_page((unsigned long)(ptr))
 #endif
 
diff --git a/drivers/infiniband/hw/ehca/ehca_main.c b/drivers/infiniband/hw/ehca/ehca_main.c
index 28ba2dd..36377c6 100644
--- a/drivers/infiniband/hw/ehca/ehca_main.c
+++ b/drivers/infiniband/hw/ehca/ehca_main.c
@@ -107,7 +107,7 @@
 static struct timer_list poll_eqs_timer;
 
 #ifdef CONFIG_PPC_64K_PAGES
-static struct kmem_cache *ctblk_cache = NULL;
+static struct kmem_cache *ctblk_cache;
 
 void *ehca_alloc_fw_ctrlblock(gfp_t flags)
 {
@@ -200,8 +200,8 @@
 #endif
 }
 
-#define EHCA_HCAAVER  EHCA_BMASK_IBM(32,39)
-#define EHCA_REVID    EHCA_BMASK_IBM(40,63)
+#define EHCA_HCAAVER  EHCA_BMASK_IBM(32, 39)
+#define EHCA_REVID    EHCA_BMASK_IBM(40, 63)
 
 static struct cap_descr {
 	u64 mask;
@@ -263,22 +263,27 @@
 
 		ehca_gen_dbg(" ... hardware version=%x:%x", hcaaver, revid);
 
-		if ((hcaaver == 1) && (revid == 0))
-			shca->hw_level = 0x11;
-		else if ((hcaaver == 1) && (revid == 1))
-			shca->hw_level = 0x12;
-		else if ((hcaaver == 1) && (revid == 2))
-			shca->hw_level = 0x13;
-		else if ((hcaaver == 2) && (revid == 0))
-			shca->hw_level = 0x21;
-		else if ((hcaaver == 2) && (revid == 0x10))
-			shca->hw_level = 0x22;
-		else {
+		if (hcaaver == 1) {
+			if (revid <= 3)
+				shca->hw_level = 0x10 | (revid + 1);
+			else
+				shca->hw_level = 0x14;
+		} else if (hcaaver == 2) {
+			if (revid == 0)
+				shca->hw_level = 0x21;
+			else if (revid == 0x10)
+				shca->hw_level = 0x22;
+			else if (revid == 0x20 || revid == 0x21)
+				shca->hw_level = 0x23;
+		}
+
+		if (!shca->hw_level) {
 			ehca_gen_warn("unknown hardware version"
 				      " - assuming default level");
 			shca->hw_level = 0x22;
 		}
-	}
+	} else
+		shca->hw_level = ehca_hw_level;
 	ehca_gen_dbg(" ... hardware level=%x", shca->hw_level);
 
 	shca->sport[0].rate = IB_RATE_30_GBPS;
@@ -290,7 +295,7 @@
 		if (EHCA_BMASK_GET(hca_cap_descr[i].mask, shca->hca_cap))
 			ehca_gen_dbg("   %s", hca_cap_descr[i].descr);
 
-	port = (struct hipz_query_port *) rblock;
+	port = (struct hipz_query_port *)rblock;
 	h_ret = hipz_h_query_port(shca->ipz_hca_handle, 1, port);
 	if (h_ret != H_SUCCESS) {
 		ehca_gen_err("Cannot query port properties. h_ret=%lx",
@@ -439,7 +444,7 @@
 		return -EPERM;
 	}
 
-	ibcq = ib_create_cq(&shca->ib_device, NULL, NULL, (void*)(-1), 10, 0);
+	ibcq = ib_create_cq(&shca->ib_device, NULL, NULL, (void *)(-1), 10, 0);
 	if (IS_ERR(ibcq)) {
 		ehca_err(&shca->ib_device, "Cannot create AQP1 CQ.");
 		return PTR_ERR(ibcq);
@@ -666,7 +671,7 @@
 	}
 
 	/* create internal protection domain */
-	ibpd = ehca_alloc_pd(&shca->ib_device, (void*)(-1), NULL);
+	ibpd = ehca_alloc_pd(&shca->ib_device, (void *)(-1), NULL);
 	if (IS_ERR(ibpd)) {
 		ehca_err(&shca->ib_device, "Cannot create internal PD.");
 		ret = PTR_ERR(ibpd);
@@ -863,18 +868,21 @@
 	printk(KERN_INFO "eHCA Infiniband Device Driver "
 	       "(Rel.: SVNEHCA_0023)\n");
 
-	if ((ret = ehca_create_comp_pool())) {
+	ret = ehca_create_comp_pool();
+	if (ret) {
 		ehca_gen_err("Cannot create comp pool.");
 		return ret;
 	}
 
-	if ((ret = ehca_create_slab_caches())) {
+	ret = ehca_create_slab_caches();
+	if (ret) {
 		ehca_gen_err("Cannot create SLAB caches");
 		ret = -ENOMEM;
 		goto module_init1;
 	}
 
-	if ((ret = ibmebus_register_driver(&ehca_driver))) {
+	ret = ibmebus_register_driver(&ehca_driver);
+	if (ret) {
 		ehca_gen_err("Cannot register eHCA device driver");
 		ret = -EINVAL;
 		goto module_init2;
diff --git a/drivers/infiniband/hw/ehca/ehca_mrmw.c b/drivers/infiniband/hw/ehca/ehca_mrmw.c
index add79bd..6262c54 100644
--- a/drivers/infiniband/hw/ehca/ehca_mrmw.c
+++ b/drivers/infiniband/hw/ehca/ehca_mrmw.c
@@ -48,6 +48,11 @@
 #include "hcp_if.h"
 #include "hipz_hw.h"
 
+#define NUM_CHUNKS(length, chunk_size) \
+	(((length) + (chunk_size - 1)) / (chunk_size))
+/* max number of rpages (per hcall register_rpages) */
+#define MAX_RPAGES 512
+
 static struct kmem_cache *mr_cache;
 static struct kmem_cache *mw_cache;
 
@@ -56,9 +61,9 @@
 	struct ehca_mr *me;
 
 	me = kmem_cache_zalloc(mr_cache, GFP_KERNEL);
-	if (me) {
+	if (me)
 		spin_lock_init(&me->mrlock);
-	} else
+	else
 		ehca_gen_err("alloc failed");
 
 	return me;
@@ -74,9 +79,9 @@
 	struct ehca_mw *me;
 
 	me = kmem_cache_zalloc(mw_cache, GFP_KERNEL);
-	if (me) {
+	if (me)
 		spin_lock_init(&me->mwlock);
-	} else
+	else
 		ehca_gen_err("alloc failed");
 
 	return me;
@@ -106,11 +111,12 @@
 			goto get_dma_mr_exit0;
 		}
 
-		ret = ehca_reg_maxmr(shca, e_maxmr, (u64*)KERNELBASE,
+		ret = ehca_reg_maxmr(shca, e_maxmr, (u64 *)KERNELBASE,
 				     mr_access_flags, e_pd,
 				     &e_maxmr->ib.ib_mr.lkey,
 				     &e_maxmr->ib.ib_mr.rkey);
 		if (ret) {
+			ehca_mr_delete(e_maxmr);
 			ib_mr = ERR_PTR(ret);
 			goto get_dma_mr_exit0;
 		}
@@ -144,9 +150,6 @@
 	struct ehca_pd *e_pd = container_of(pd, struct ehca_pd, ib_pd);
 
 	u64 size;
-	struct ehca_mr_pginfo pginfo={0,0,0,0,0,0,0,NULL,0,NULL,NULL,0,NULL,0};
-	u32 num_pages_mr;
-	u32 num_pages_4k; /* 4k portion "pages" */
 
 	if ((num_phys_buf <= 0) || !phys_buf_array) {
 		ehca_err(pd->device, "bad input values: num_phys_buf=%x "
@@ -190,12 +193,6 @@
 		goto reg_phys_mr_exit0;
 	}
 
-	/* determine number of MR pages */
-	num_pages_mr = ((((u64)iova_start % PAGE_SIZE) + size +
-			 PAGE_SIZE - 1) / PAGE_SIZE);
-	num_pages_4k = ((((u64)iova_start % EHCA_PAGESIZE) + size +
-			 EHCA_PAGESIZE - 1) / EHCA_PAGESIZE);
-
 	/* register MR on HCA */
 	if (ehca_mr_is_maxmr(size, iova_start)) {
 		e_mr->flags |= EHCA_MR_FLAG_MAXMR;
@@ -207,13 +204,22 @@
 			goto reg_phys_mr_exit1;
 		}
 	} else {
-		pginfo.type           = EHCA_MR_PGI_PHYS;
-		pginfo.num_pages      = num_pages_mr;
-		pginfo.num_4k         = num_pages_4k;
-		pginfo.num_phys_buf   = num_phys_buf;
-		pginfo.phys_buf_array = phys_buf_array;
-		pginfo.next_4k        = (((u64)iova_start & ~PAGE_MASK) /
-					 EHCA_PAGESIZE);
+		struct ehca_mr_pginfo pginfo;
+		u32 num_kpages;
+		u32 num_hwpages;
+
+		num_kpages = NUM_CHUNKS(((u64)iova_start % PAGE_SIZE) + size,
+					PAGE_SIZE);
+		num_hwpages = NUM_CHUNKS(((u64)iova_start % EHCA_PAGESIZE) +
+					 size, EHCA_PAGESIZE);
+		memset(&pginfo, 0, sizeof(pginfo));
+		pginfo.type = EHCA_MR_PGI_PHYS;
+		pginfo.num_kpages = num_kpages;
+		pginfo.num_hwpages = num_hwpages;
+		pginfo.u.phy.num_phys_buf = num_phys_buf;
+		pginfo.u.phy.phys_buf_array = phys_buf_array;
+		pginfo.next_hwpage = (((u64)iova_start & ~PAGE_MASK) /
+				      EHCA_PAGESIZE);
 
 		ret = ehca_reg_mr(shca, e_mr, iova_start, size, mr_access_flags,
 				  e_pd, &pginfo, &e_mr->ib.ib_mr.lkey,
@@ -240,18 +246,19 @@
 
 /*----------------------------------------------------------------------*/
 
-struct ib_mr *ehca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, u64 virt,
-			       int mr_access_flags, struct ib_udata *udata)
+struct ib_mr *ehca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
+			       u64 virt, int mr_access_flags,
+			       struct ib_udata *udata)
 {
 	struct ib_mr *ib_mr;
 	struct ehca_mr *e_mr;
 	struct ehca_shca *shca =
 		container_of(pd->device, struct ehca_shca, ib_device);
 	struct ehca_pd *e_pd = container_of(pd, struct ehca_pd, ib_pd);
-	struct ehca_mr_pginfo pginfo={0,0,0,0,0,0,0,NULL,0,NULL,NULL,0,NULL,0};
+	struct ehca_mr_pginfo pginfo;
 	int ret;
-	u32 num_pages_mr;
-	u32 num_pages_4k; /* 4k portion "pages" */
+	u32 num_kpages;
+	u32 num_hwpages;
 
 	if (!pd) {
 		ehca_gen_err("bad pd=%p", pd);
@@ -289,7 +296,7 @@
 	e_mr->umem = ib_umem_get(pd->uobject->context, start, length,
 				 mr_access_flags);
 	if (IS_ERR(e_mr->umem)) {
-		ib_mr = (void *) e_mr->umem;
+		ib_mr = (void *)e_mr->umem;
 		goto reg_user_mr_exit1;
 	}
 
@@ -301,23 +308,24 @@
 	}
 
 	/* determine number of MR pages */
-	num_pages_mr = (((virt % PAGE_SIZE) + length + PAGE_SIZE - 1) /
-			PAGE_SIZE);
-	num_pages_4k = (((virt % EHCA_PAGESIZE) + length + EHCA_PAGESIZE - 1) /
-			EHCA_PAGESIZE);
+	num_kpages = NUM_CHUNKS((virt % PAGE_SIZE) + length, PAGE_SIZE);
+	num_hwpages = NUM_CHUNKS((virt % EHCA_PAGESIZE) + length,
+				 EHCA_PAGESIZE);
 
 	/* register MR on HCA */
-	pginfo.type       = EHCA_MR_PGI_USER;
-	pginfo.num_pages  = num_pages_mr;
-	pginfo.num_4k     = num_pages_4k;
-	pginfo.region     = e_mr->umem;
-	pginfo.next_4k	  = e_mr->umem->offset / EHCA_PAGESIZE;
-	pginfo.next_chunk = list_prepare_entry(pginfo.next_chunk,
-					       (&e_mr->umem->chunk_list),
-					       list);
+	memset(&pginfo, 0, sizeof(pginfo));
+	pginfo.type = EHCA_MR_PGI_USER;
+	pginfo.num_kpages = num_kpages;
+	pginfo.num_hwpages = num_hwpages;
+	pginfo.u.usr.region = e_mr->umem;
+	pginfo.next_hwpage = e_mr->umem->offset / EHCA_PAGESIZE;
+	pginfo.u.usr.next_chunk = list_prepare_entry(pginfo.u.usr.next_chunk,
+						     (&e_mr->umem->chunk_list),
+						     list);
 
-	ret = ehca_reg_mr(shca, e_mr, (u64*) virt, length, mr_access_flags, e_pd,
-			  &pginfo, &e_mr->ib.ib_mr.lkey, &e_mr->ib.ib_mr.rkey);
+	ret = ehca_reg_mr(shca, e_mr, (u64 *)virt, length, mr_access_flags,
+			  e_pd, &pginfo, &e_mr->ib.ib_mr.lkey,
+			  &e_mr->ib.ib_mr.rkey);
 	if (ret) {
 		ib_mr = ERR_PTR(ret);
 		goto reg_user_mr_exit2;
@@ -360,9 +368,9 @@
 	struct ehca_pd *new_pd;
 	u32 tmp_lkey, tmp_rkey;
 	unsigned long sl_flags;
-	u32 num_pages_mr = 0;
-	u32 num_pages_4k = 0; /* 4k portion "pages" */
-	struct ehca_mr_pginfo pginfo={0,0,0,0,0,0,0,NULL,0,NULL,NULL,0,NULL,0};
+	u32 num_kpages = 0;
+	u32 num_hwpages = 0;
+	struct ehca_mr_pginfo pginfo;
 	u32 cur_pid = current->tgid;
 
 	if (my_pd->ib_pd.uobject && my_pd->ib_pd.uobject->context &&
@@ -414,7 +422,7 @@
 			goto rereg_phys_mr_exit0;
 		}
 		if (!phys_buf_array || num_phys_buf <= 0) {
-			ehca_err(mr->device, "bad input values: mr_rereg_mask=%x"
+			ehca_err(mr->device, "bad input values mr_rereg_mask=%x"
 				 " phys_buf_array=%p num_phys_buf=%x",
 				 mr_rereg_mask, phys_buf_array, num_phys_buf);
 			ret = -EINVAL;
@@ -438,10 +446,10 @@
 
 	/* set requested values dependent on rereg request */
 	spin_lock_irqsave(&e_mr->mrlock, sl_flags);
-	new_start = e_mr->start;  /* new == old address */
-	new_size  = e_mr->size;	  /* new == old length */
-	new_acl   = e_mr->acl;	  /* new == old access control */
-	new_pd    = container_of(mr->pd,struct ehca_pd,ib_pd); /*new == old PD*/
+	new_start = e_mr->start;
+	new_size = e_mr->size;
+	new_acl = e_mr->acl;
+	new_pd = container_of(mr->pd, struct ehca_pd, ib_pd);
 
 	if (mr_rereg_mask & IB_MR_REREG_TRANS) {
 		new_start = iova_start;	/* change address */
@@ -458,17 +466,18 @@
 			ret = -EINVAL;
 			goto rereg_phys_mr_exit1;
 		}
-		num_pages_mr = ((((u64)new_start % PAGE_SIZE) + new_size +
-				 PAGE_SIZE - 1) / PAGE_SIZE);
-		num_pages_4k = ((((u64)new_start % EHCA_PAGESIZE) + new_size +
-				 EHCA_PAGESIZE - 1) / EHCA_PAGESIZE);
-		pginfo.type           = EHCA_MR_PGI_PHYS;
-		pginfo.num_pages      = num_pages_mr;
-		pginfo.num_4k         = num_pages_4k;
-		pginfo.num_phys_buf   = num_phys_buf;
-		pginfo.phys_buf_array = phys_buf_array;
-		pginfo.next_4k        = (((u64)iova_start & ~PAGE_MASK) /
-					 EHCA_PAGESIZE);
+		num_kpages = NUM_CHUNKS(((u64)new_start % PAGE_SIZE) +
+					new_size, PAGE_SIZE);
+		num_hwpages = NUM_CHUNKS(((u64)new_start % EHCA_PAGESIZE) +
+					 new_size, EHCA_PAGESIZE);
+		memset(&pginfo, 0, sizeof(pginfo));
+		pginfo.type = EHCA_MR_PGI_PHYS;
+		pginfo.num_kpages = num_kpages;
+		pginfo.num_hwpages = num_hwpages;
+		pginfo.u.phy.num_phys_buf = num_phys_buf;
+		pginfo.u.phy.phys_buf_array = phys_buf_array;
+		pginfo.next_hwpage = (((u64)iova_start & ~PAGE_MASK) /
+				      EHCA_PAGESIZE);
 	}
 	if (mr_rereg_mask & IB_MR_REREG_ACCESS)
 		new_acl = mr_access_flags;
@@ -510,7 +519,7 @@
 	struct ehca_pd *my_pd = container_of(mr->pd, struct ehca_pd, ib_pd);
 	u32 cur_pid = current->tgid;
 	unsigned long sl_flags;
-	struct ehca_mr_hipzout_parms hipzout = {{0},0,0,0,0,0};
+	struct ehca_mr_hipzout_parms hipzout;
 
 	if (my_pd->ib_pd.uobject && my_pd->ib_pd.uobject->context &&
 	    (my_pd->ownpid != cur_pid)) {
@@ -536,14 +545,14 @@
 			 "hca_hndl=%lx mr_hndl=%lx lkey=%x",
 			 h_ret, mr, shca->ipz_hca_handle.handle,
 			 e_mr->ipz_mr_handle.handle, mr->lkey);
-		ret = ehca_mrmw_map_hrc_query_mr(h_ret);
+		ret = ehca2ib_return_code(h_ret);
 		goto query_mr_exit1;
 	}
-	mr_attr->pd               = mr->pd;
+	mr_attr->pd = mr->pd;
 	mr_attr->device_virt_addr = hipzout.vaddr;
-	mr_attr->size             = hipzout.len;
-	mr_attr->lkey             = hipzout.lkey;
-	mr_attr->rkey             = hipzout.rkey;
+	mr_attr->size = hipzout.len;
+	mr_attr->lkey = hipzout.lkey;
+	mr_attr->rkey = hipzout.rkey;
 	ehca_mrmw_reverse_map_acl(&hipzout.acl, &mr_attr->mr_access_flags);
 
 query_mr_exit1:
@@ -596,7 +605,7 @@
 			 "e_mr=%p hca_hndl=%lx mr_hndl=%lx mr->lkey=%x",
 			 h_ret, shca, e_mr, shca->ipz_hca_handle.handle,
 			 e_mr->ipz_mr_handle.handle, mr->lkey);
-		ret = ehca_mrmw_map_hrc_free_mr(h_ret);
+		ret = ehca2ib_return_code(h_ret);
 		goto dereg_mr_exit0;
 	}
 
@@ -622,7 +631,7 @@
 	struct ehca_pd *e_pd = container_of(pd, struct ehca_pd, ib_pd);
 	struct ehca_shca *shca =
 		container_of(pd->device, struct ehca_shca, ib_device);
-	struct ehca_mw_hipzout_parms hipzout = {{0},0};
+	struct ehca_mw_hipzout_parms hipzout;
 
 	e_mw = ehca_mw_new();
 	if (!e_mw) {
@@ -636,7 +645,7 @@
 		ehca_err(pd->device, "hipz_mw_allocate failed, h_ret=%lx "
 			 "shca=%p hca_hndl=%lx mw=%p",
 			 h_ret, shca, shca->ipz_hca_handle.handle, e_mw);
-		ib_mw = ERR_PTR(ehca_mrmw_map_hrc_alloc(h_ret));
+		ib_mw = ERR_PTR(ehca2ib_return_code(h_ret));
 		goto alloc_mw_exit1;
 	}
 	/* successful MW allocation */
@@ -679,7 +688,7 @@
 			 "mw=%p rkey=%x hca_hndl=%lx mw_hndl=%lx",
 			 h_ret, shca, mw, mw->rkey, shca->ipz_hca_handle.handle,
 			 e_mw->ipz_mw_handle.handle);
-		return ehca_mrmw_map_hrc_free_mw(h_ret);
+		return ehca2ib_return_code(h_ret);
 	}
 	/* successful deallocation */
 	ehca_mw_delete(e_mw);
@@ -699,7 +708,7 @@
 	struct ehca_mr *e_fmr;
 	int ret;
 	u32 tmp_lkey, tmp_rkey;
-	struct ehca_mr_pginfo pginfo={0,0,0,0,0,0,0,NULL,0,NULL,NULL,0,NULL,0};
+	struct ehca_mr_pginfo pginfo;
 
 	/* check other parameters */
 	if (((mr_access_flags & IB_ACCESS_REMOTE_WRITE) &&
@@ -745,6 +754,7 @@
 	e_fmr->flags |= EHCA_MR_FLAG_FMR;
 
 	/* register MR on HCA */
+	memset(&pginfo, 0, sizeof(pginfo));
 	ret = ehca_reg_mr(shca, e_fmr, NULL,
 			  fmr_attr->max_pages * (1 << fmr_attr->page_shift),
 			  mr_access_flags, e_pd, &pginfo,
@@ -783,7 +793,7 @@
 		container_of(fmr->device, struct ehca_shca, ib_device);
 	struct ehca_mr *e_fmr = container_of(fmr, struct ehca_mr, ib.ib_fmr);
 	struct ehca_pd *e_pd = container_of(fmr->pd, struct ehca_pd, ib_pd);
-	struct ehca_mr_pginfo pginfo={0,0,0,0,0,0,0,NULL,0,NULL,NULL,0,NULL,0};
+	struct ehca_mr_pginfo pginfo;
 	u32 tmp_lkey, tmp_rkey;
 
 	if (!(e_fmr->flags & EHCA_MR_FLAG_FMR)) {
@@ -809,14 +819,16 @@
 			  fmr, e_fmr->fmr_map_cnt, e_fmr->fmr_max_maps);
 	}
 
-	pginfo.type      = EHCA_MR_PGI_FMR;
-	pginfo.num_pages = list_len;
-	pginfo.num_4k    = list_len * (e_fmr->fmr_page_size / EHCA_PAGESIZE);
-	pginfo.page_list = page_list;
-	pginfo.next_4k   = ((iova & (e_fmr->fmr_page_size-1)) /
-			    EHCA_PAGESIZE);
+	memset(&pginfo, 0, sizeof(pginfo));
+	pginfo.type = EHCA_MR_PGI_FMR;
+	pginfo.num_kpages = list_len;
+	pginfo.num_hwpages = list_len * (e_fmr->fmr_page_size / EHCA_PAGESIZE);
+	pginfo.u.fmr.page_list = page_list;
+	pginfo.next_hwpage = ((iova & (e_fmr->fmr_page_size-1)) /
+			      EHCA_PAGESIZE);
+	pginfo.u.fmr.fmr_pgsize = e_fmr->fmr_page_size;
 
-	ret = ehca_rereg_mr(shca, e_fmr, (u64*)iova,
+	ret = ehca_rereg_mr(shca, e_fmr, (u64 *)iova,
 			    list_len * e_fmr->fmr_page_size,
 			    e_fmr->acl, e_pd, &pginfo, &tmp_lkey, &tmp_rkey);
 	if (ret)
@@ -831,8 +843,7 @@
 map_phys_fmr_exit0:
 	if (ret)
 		ehca_err(fmr->device, "ret=%x fmr=%p page_list=%p list_len=%x "
-			 "iova=%lx",
-			 ret, fmr, page_list, list_len, iova);
+			 "iova=%lx", ret, fmr, page_list, list_len, iova);
 	return ret;
 } /* end ehca_map_phys_fmr() */
 
@@ -922,7 +933,7 @@
 			 "hca_hndl=%lx fmr_hndl=%lx fmr->lkey=%x",
 			 h_ret, e_fmr, shca->ipz_hca_handle.handle,
 			 e_fmr->ipz_mr_handle.handle, fmr->lkey);
-		ret = ehca_mrmw_map_hrc_free_mr(h_ret);
+		ret = ehca2ib_return_code(h_ret);
 		goto free_fmr_exit0;
 	}
 	/* successful deregistration */
@@ -950,12 +961,12 @@
 	int ret;
 	u64 h_ret;
 	u32 hipz_acl;
-	struct ehca_mr_hipzout_parms hipzout = {{0},0,0,0,0,0};
+	struct ehca_mr_hipzout_parms hipzout;
 
 	ehca_mrmw_map_acl(acl, &hipz_acl);
 	ehca_mrmw_set_pgsize_hipz_acl(&hipz_acl);
 	if (ehca_use_hp_mr == 1)
-	        hipz_acl |= 0x00000001;
+		hipz_acl |= 0x00000001;
 
 	h_ret = hipz_h_alloc_resource_mr(shca->ipz_hca_handle, e_mr,
 					 (u64)iova_start, size, hipz_acl,
@@ -963,7 +974,7 @@
 	if (h_ret != H_SUCCESS) {
 		ehca_err(&shca->ib_device, "hipz_alloc_mr failed, h_ret=%lx "
 			 "hca_hndl=%lx", h_ret, shca->ipz_hca_handle.handle);
-		ret = ehca_mrmw_map_hrc_alloc(h_ret);
+		ret = ehca2ib_return_code(h_ret);
 		goto ehca_reg_mr_exit0;
 	}
 
@@ -974,11 +985,11 @@
 		goto ehca_reg_mr_exit1;
 
 	/* successful registration */
-	e_mr->num_pages = pginfo->num_pages;
-	e_mr->num_4k    = pginfo->num_4k;
-	e_mr->start     = iova_start;
-	e_mr->size      = size;
-	e_mr->acl       = acl;
+	e_mr->num_kpages = pginfo->num_kpages;
+	e_mr->num_hwpages = pginfo->num_hwpages;
+	e_mr->start = iova_start;
+	e_mr->size = size;
+	e_mr->acl = acl;
 	*lkey = hipzout.lkey;
 	*rkey = hipzout.rkey;
 	return 0;
@@ -988,10 +999,10 @@
 	if (h_ret != H_SUCCESS) {
 		ehca_err(&shca->ib_device, "h_ret=%lx shca=%p e_mr=%p "
 			 "iova_start=%p size=%lx acl=%x e_pd=%p lkey=%x "
-			 "pginfo=%p num_pages=%lx num_4k=%lx ret=%x",
+			 "pginfo=%p num_kpages=%lx num_hwpages=%lx ret=%x",
 			 h_ret, shca, e_mr, iova_start, size, acl, e_pd,
-			 hipzout.lkey, pginfo, pginfo->num_pages,
-			 pginfo->num_4k, ret);
+			 hipzout.lkey, pginfo, pginfo->num_kpages,
+			 pginfo->num_hwpages, ret);
 		ehca_err(&shca->ib_device, "internal error in ehca_reg_mr, "
 			 "not recoverable");
 	}
@@ -999,9 +1010,9 @@
 	if (ret)
 		ehca_err(&shca->ib_device, "ret=%x shca=%p e_mr=%p "
 			 "iova_start=%p size=%lx acl=%x e_pd=%p pginfo=%p "
-			 "num_pages=%lx num_4k=%lx",
+			 "num_kpages=%lx num_hwpages=%lx",
 			 ret, shca, e_mr, iova_start, size, acl, e_pd, pginfo,
-			 pginfo->num_pages, pginfo->num_4k);
+			 pginfo->num_kpages, pginfo->num_hwpages);
 	return ret;
 } /* end ehca_reg_mr() */
 
@@ -1026,24 +1037,24 @@
 	}
 
 	/* max 512 pages per shot */
-	for (i = 0; i < ((pginfo->num_4k + 512 - 1) / 512); i++) {
+	for (i = 0; i < NUM_CHUNKS(pginfo->num_hwpages, MAX_RPAGES); i++) {
 
-		if (i == ((pginfo->num_4k + 512 - 1) / 512) - 1) {
-			rnum = pginfo->num_4k % 512; /* last shot */
+		if (i == NUM_CHUNKS(pginfo->num_hwpages, MAX_RPAGES) - 1) {
+			rnum = pginfo->num_hwpages % MAX_RPAGES; /* last shot */
 			if (rnum == 0)
-				rnum = 512;      /* last shot is full */
+				rnum = MAX_RPAGES;      /* last shot is full */
 		} else
-			rnum = 512;
+			rnum = MAX_RPAGES;
 
-		if (rnum > 1) {
-			ret = ehca_set_pagebuf(e_mr, pginfo, rnum, kpage);
-			if (ret) {
-				ehca_err(&shca->ib_device, "ehca_set_pagebuf "
+		ret = ehca_set_pagebuf(pginfo, rnum, kpage);
+		if (ret) {
+			ehca_err(&shca->ib_device, "ehca_set_pagebuf "
 					 "bad rc, ret=%x rnum=%x kpage=%p",
 					 ret, rnum, kpage);
-				ret = -EFAULT;
-				goto ehca_reg_mr_rpages_exit1;
-			}
+			goto ehca_reg_mr_rpages_exit1;
+		}
+
+		if (rnum > 1) {
 			rpage = virt_to_abs(kpage);
 			if (!rpage) {
 				ehca_err(&shca->ib_device, "kpage=%p i=%x",
@@ -1051,21 +1062,14 @@
 				ret = -EFAULT;
 				goto ehca_reg_mr_rpages_exit1;
 			}
-		} else {  /* rnum==1 */
-			ret = ehca_set_pagebuf_1(e_mr, pginfo, &rpage);
-			if (ret) {
-				ehca_err(&shca->ib_device, "ehca_set_pagebuf_1 "
-					 "bad rc, ret=%x i=%x", ret, i);
-				ret = -EFAULT;
-				goto ehca_reg_mr_rpages_exit1;
-			}
-		}
+		} else
+			rpage = *kpage;
 
 		h_ret = hipz_h_register_rpage_mr(shca->ipz_hca_handle, e_mr,
 						 0, /* pagesize 4k */
 						 0, rpage, rnum);
 
-		if (i == ((pginfo->num_4k + 512 - 1) / 512) - 1) {
+		if (i == NUM_CHUNKS(pginfo->num_hwpages, MAX_RPAGES) - 1) {
 			/*
 			 * check for 'registration complete'==H_SUCCESS
 			 * and for 'page registered'==H_PAGE_REGISTERED
@@ -1078,7 +1082,7 @@
 					 shca->ipz_hca_handle.handle,
 					 e_mr->ipz_mr_handle.handle,
 					 e_mr->ib.ib_mr.lkey);
-				ret = ehca_mrmw_map_hrc_rrpg_last(h_ret);
+				ret = ehca2ib_return_code(h_ret);
 				break;
 			} else
 				ret = 0;
@@ -1089,7 +1093,7 @@
 				 e_mr->ib.ib_mr.lkey,
 				 shca->ipz_hca_handle.handle,
 				 e_mr->ipz_mr_handle.handle);
-			ret = ehca_mrmw_map_hrc_rrpg_notlast(h_ret);
+			ret = ehca2ib_return_code(h_ret);
 			break;
 		} else
 			ret = 0;
@@ -1101,8 +1105,8 @@
 ehca_reg_mr_rpages_exit0:
 	if (ret)
 		ehca_err(&shca->ib_device, "ret=%x shca=%p e_mr=%p pginfo=%p "
-			 "num_pages=%lx num_4k=%lx", ret, shca, e_mr, pginfo,
-			 pginfo->num_pages, pginfo->num_4k);
+			 "num_kpages=%lx num_hwpages=%lx", ret, shca, e_mr,
+			 pginfo, pginfo->num_kpages, pginfo->num_hwpages);
 	return ret;
 } /* end ehca_reg_mr_rpages() */
 
@@ -1124,7 +1128,7 @@
 	u64 *kpage;
 	u64 rpage;
 	struct ehca_mr_pginfo pginfo_save;
-	struct ehca_mr_hipzout_parms hipzout = {{0},0,0,0,0,0};
+	struct ehca_mr_hipzout_parms hipzout;
 
 	ehca_mrmw_map_acl(acl, &hipz_acl);
 	ehca_mrmw_set_pgsize_hipz_acl(&hipz_acl);
@@ -1137,12 +1141,12 @@
 	}
 
 	pginfo_save = *pginfo;
-	ret = ehca_set_pagebuf(e_mr, pginfo, pginfo->num_4k, kpage);
+	ret = ehca_set_pagebuf(pginfo, pginfo->num_hwpages, kpage);
 	if (ret) {
 		ehca_err(&shca->ib_device, "set pagebuf failed, e_mr=%p "
-			 "pginfo=%p type=%x num_pages=%lx num_4k=%lx kpage=%p",
-			 e_mr, pginfo, pginfo->type, pginfo->num_pages,
-			 pginfo->num_4k,kpage);
+			 "pginfo=%p type=%x num_kpages=%lx num_hwpages=%lx "
+			 "kpage=%p", e_mr, pginfo, pginfo->type,
+			 pginfo->num_kpages, pginfo->num_hwpages, kpage);
 		goto ehca_rereg_mr_rereg1_exit1;
 	}
 	rpage = virt_to_abs(kpage);
@@ -1164,7 +1168,7 @@
 			  "(Rereg1), h_ret=%lx e_mr=%p", h_ret, e_mr);
 		*pginfo = pginfo_save;
 		ret = -EAGAIN;
-	} else if ((u64*)hipzout.vaddr != iova_start) {
+	} else if ((u64 *)hipzout.vaddr != iova_start) {
 		ehca_err(&shca->ib_device, "PHYP changed iova_start in "
 			 "rereg_pmr, iova_start=%p iova_start_out=%lx e_mr=%p "
 			 "mr_handle=%lx lkey=%x lkey_out=%x", iova_start,
@@ -1176,11 +1180,11 @@
 		 * successful reregistration
 		 * note: start and start_out are identical for eServer HCAs
 		 */
-		e_mr->num_pages = pginfo->num_pages;
-		e_mr->num_4k    = pginfo->num_4k;
-		e_mr->start     = iova_start;
-		e_mr->size      = size;
-		e_mr->acl       = acl;
+		e_mr->num_kpages = pginfo->num_kpages;
+		e_mr->num_hwpages = pginfo->num_hwpages;
+		e_mr->start = iova_start;
+		e_mr->size = size;
+		e_mr->acl = acl;
 		*lkey = hipzout.lkey;
 		*rkey = hipzout.rkey;
 	}
@@ -1190,9 +1194,9 @@
 ehca_rereg_mr_rereg1_exit0:
 	if ( ret && (ret != -EAGAIN) )
 		ehca_err(&shca->ib_device, "ret=%x lkey=%x rkey=%x "
-			 "pginfo=%p num_pages=%lx num_4k=%lx",
-			 ret, *lkey, *rkey, pginfo, pginfo->num_pages,
-			 pginfo->num_4k);
+			 "pginfo=%p num_kpages=%lx num_hwpages=%lx",
+			 ret, *lkey, *rkey, pginfo, pginfo->num_kpages,
+			 pginfo->num_hwpages);
 	return ret;
 } /* end ehca_rereg_mr_rereg1() */
 
@@ -1214,10 +1218,12 @@
 	int rereg_3_hcall = 0; /* 1: use 3 hipz calls for reregistration */
 
 	/* first determine reregistration hCall(s) */
-	if ((pginfo->num_4k > 512) || (e_mr->num_4k > 512) ||
-	    (pginfo->num_4k > e_mr->num_4k)) {
-		ehca_dbg(&shca->ib_device, "Rereg3 case, pginfo->num_4k=%lx "
-			 "e_mr->num_4k=%x", pginfo->num_4k, e_mr->num_4k);
+	if ((pginfo->num_hwpages > MAX_RPAGES) ||
+	    (e_mr->num_hwpages > MAX_RPAGES) ||
+	    (pginfo->num_hwpages > e_mr->num_hwpages)) {
+		ehca_dbg(&shca->ib_device, "Rereg3 case, "
+			 "pginfo->num_hwpages=%lx e_mr->num_hwpages=%x",
+			 pginfo->num_hwpages, e_mr->num_hwpages);
 		rereg_1_hcall = 0;
 		rereg_3_hcall = 1;
 	}
@@ -1253,7 +1259,7 @@
 				 h_ret, e_mr, shca->ipz_hca_handle.handle,
 				 e_mr->ipz_mr_handle.handle,
 				 e_mr->ib.ib_mr.lkey);
-			ret = ehca_mrmw_map_hrc_free_mr(h_ret);
+			ret = ehca2ib_return_code(h_ret);
 			goto ehca_rereg_mr_exit0;
 		}
 		/* clean ehca_mr_t, without changing struct ib_mr and lock */
@@ -1281,9 +1287,9 @@
 	if (ret)
 		ehca_err(&shca->ib_device, "ret=%x shca=%p e_mr=%p "
 			 "iova_start=%p size=%lx acl=%x e_pd=%p pginfo=%p "
-			 "num_pages=%lx lkey=%x rkey=%x rereg_1_hcall=%x "
+			 "num_kpages=%lx lkey=%x rkey=%x rereg_1_hcall=%x "
 			 "rereg_3_hcall=%x", ret, shca, e_mr, iova_start, size,
-			 acl, e_pd, pginfo, pginfo->num_pages, *lkey, *rkey,
+			 acl, e_pd, pginfo, pginfo->num_kpages, *lkey, *rkey,
 			 rereg_1_hcall, rereg_3_hcall);
 	return ret;
 } /* end ehca_rereg_mr() */
@@ -1295,97 +1301,86 @@
 {
 	int ret = 0;
 	u64 h_ret;
-	int rereg_1_hcall = 1; /* 1: use hipz_mr_reregister directly */
-	int rereg_3_hcall = 0; /* 1: use 3 hipz calls for unmapping */
 	struct ehca_pd *e_pd =
 		container_of(e_fmr->ib.ib_fmr.pd, struct ehca_pd, ib_pd);
 	struct ehca_mr save_fmr;
 	u32 tmp_lkey, tmp_rkey;
-	struct ehca_mr_pginfo pginfo={0,0,0,0,0,0,0,NULL,0,NULL,NULL,0,NULL,0};
-	struct ehca_mr_hipzout_parms hipzout = {{0},0,0,0,0,0};
+	struct ehca_mr_pginfo pginfo;
+	struct ehca_mr_hipzout_parms hipzout;
+	struct ehca_mr save_mr;
 
-	/* first check if reregistration hCall can be used for unmap */
-	if (e_fmr->fmr_max_pages > 512) {
-		rereg_1_hcall = 0;
-		rereg_3_hcall = 1;
-	}
-
-	if (rereg_1_hcall) {
+	if (e_fmr->fmr_max_pages <= MAX_RPAGES) {
 		/*
 		 * note: after using rereg hcall with len=0,
 		 * rereg hcall must be used again for registering pages
 		 */
 		h_ret = hipz_h_reregister_pmr(shca->ipz_hca_handle, e_fmr, 0,
 					      0, 0, e_pd->fw_pd, 0, &hipzout);
-		if (h_ret != H_SUCCESS) {
-			/*
-			 * should not happen, because length checked above,
-			 * FMRs are not shared and no MW bound to FMRs
-			 */
-			ehca_err(&shca->ib_device, "hipz_reregister_pmr failed "
-				 "(Rereg1), h_ret=%lx e_fmr=%p hca_hndl=%lx "
-				 "mr_hndl=%lx lkey=%x lkey_out=%x",
-				 h_ret, e_fmr, shca->ipz_hca_handle.handle,
-				 e_fmr->ipz_mr_handle.handle,
-				 e_fmr->ib.ib_fmr.lkey, hipzout.lkey);
-			rereg_3_hcall = 1;
-		} else {
+		if (h_ret == H_SUCCESS) {
 			/* successful reregistration */
 			e_fmr->start = NULL;
 			e_fmr->size = 0;
 			tmp_lkey = hipzout.lkey;
 			tmp_rkey = hipzout.rkey;
+			return 0;
 		}
+		/*
+		 * should not happen, because length checked above,
+		 * FMRs are not shared and no MW bound to FMRs
+		 */
+		ehca_err(&shca->ib_device, "hipz_reregister_pmr failed "
+			 "(Rereg1), h_ret=%lx e_fmr=%p hca_hndl=%lx "
+			 "mr_hndl=%lx lkey=%x lkey_out=%x",
+			 h_ret, e_fmr, shca->ipz_hca_handle.handle,
+			 e_fmr->ipz_mr_handle.handle,
+			 e_fmr->ib.ib_fmr.lkey, hipzout.lkey);
+		/* try free and rereg */
 	}
 
-	if (rereg_3_hcall) {
-		struct ehca_mr save_mr;
+	/* first free old FMR */
+	h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_fmr);
+	if (h_ret != H_SUCCESS) {
+		ehca_err(&shca->ib_device, "hipz_free_mr failed, "
+			 "h_ret=%lx e_fmr=%p hca_hndl=%lx mr_hndl=%lx "
+			 "lkey=%x",
+			 h_ret, e_fmr, shca->ipz_hca_handle.handle,
+			 e_fmr->ipz_mr_handle.handle,
+			 e_fmr->ib.ib_fmr.lkey);
+		ret = ehca2ib_return_code(h_ret);
+		goto ehca_unmap_one_fmr_exit0;
+	}
+	/* clean ehca_mr_t, without changing lock */
+	save_fmr = *e_fmr;
+	ehca_mr_deletenew(e_fmr);
 
-		/* first free old FMR */
-		h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_fmr);
-		if (h_ret != H_SUCCESS) {
-			ehca_err(&shca->ib_device, "hipz_free_mr failed, "
-				 "h_ret=%lx e_fmr=%p hca_hndl=%lx mr_hndl=%lx "
-				 "lkey=%x",
-				 h_ret, e_fmr, shca->ipz_hca_handle.handle,
-				 e_fmr->ipz_mr_handle.handle,
-				 e_fmr->ib.ib_fmr.lkey);
-			ret = ehca_mrmw_map_hrc_free_mr(h_ret);
-			goto ehca_unmap_one_fmr_exit0;
-		}
-		/* clean ehca_mr_t, without changing lock */
-		save_fmr = *e_fmr;
-		ehca_mr_deletenew(e_fmr);
+	/* set some MR values */
+	e_fmr->flags = save_fmr.flags;
+	e_fmr->fmr_page_size = save_fmr.fmr_page_size;
+	e_fmr->fmr_max_pages = save_fmr.fmr_max_pages;
+	e_fmr->fmr_max_maps = save_fmr.fmr_max_maps;
+	e_fmr->fmr_map_cnt = save_fmr.fmr_map_cnt;
+	e_fmr->acl = save_fmr.acl;
 
-		/* set some MR values */
-		e_fmr->flags = save_fmr.flags;
-		e_fmr->fmr_page_size = save_fmr.fmr_page_size;
-		e_fmr->fmr_max_pages = save_fmr.fmr_max_pages;
-		e_fmr->fmr_max_maps = save_fmr.fmr_max_maps;
-		e_fmr->fmr_map_cnt = save_fmr.fmr_map_cnt;
-		e_fmr->acl = save_fmr.acl;
-
-		pginfo.type      = EHCA_MR_PGI_FMR;
-		pginfo.num_pages = 0;
-		pginfo.num_4k    = 0;
-		ret = ehca_reg_mr(shca, e_fmr, NULL,
-				  (e_fmr->fmr_max_pages * e_fmr->fmr_page_size),
-				  e_fmr->acl, e_pd, &pginfo, &tmp_lkey,
-				  &tmp_rkey);
-		if (ret) {
-			u32 offset = (u64)(&e_fmr->flags) - (u64)e_fmr;
-			memcpy(&e_fmr->flags, &(save_mr.flags),
-			       sizeof(struct ehca_mr) - offset);
-			goto ehca_unmap_one_fmr_exit0;
-		}
+	memset(&pginfo, 0, sizeof(pginfo));
+	pginfo.type = EHCA_MR_PGI_FMR;
+	pginfo.num_kpages = 0;
+	pginfo.num_hwpages = 0;
+	ret = ehca_reg_mr(shca, e_fmr, NULL,
+			  (e_fmr->fmr_max_pages * e_fmr->fmr_page_size),
+			  e_fmr->acl, e_pd, &pginfo, &tmp_lkey,
+			  &tmp_rkey);
+	if (ret) {
+		u32 offset = (u64)(&e_fmr->flags) - (u64)e_fmr;
+		memcpy(&e_fmr->flags, &(save_mr.flags),
+		       sizeof(struct ehca_mr) - offset);
+		goto ehca_unmap_one_fmr_exit0;
 	}
 
 ehca_unmap_one_fmr_exit0:
 	if (ret)
 		ehca_err(&shca->ib_device, "ret=%x tmp_lkey=%x tmp_rkey=%x "
-			 "fmr_max_pages=%x rereg_1_hcall=%x rereg_3_hcall=%x",
-			 ret, tmp_lkey, tmp_rkey, e_fmr->fmr_max_pages,
-			 rereg_1_hcall, rereg_3_hcall);
+			 "fmr_max_pages=%x",
+			 ret, tmp_lkey, tmp_rkey, e_fmr->fmr_max_pages);
 	return ret;
 } /* end ehca_unmap_one_fmr() */
 
@@ -1403,7 +1398,7 @@
 	int ret = 0;
 	u64 h_ret;
 	u32 hipz_acl;
-	struct ehca_mr_hipzout_parms hipzout = {{0},0,0,0,0,0};
+	struct ehca_mr_hipzout_parms hipzout;
 
 	ehca_mrmw_map_acl(acl, &hipz_acl);
 	ehca_mrmw_set_pgsize_hipz_acl(&hipz_acl);
@@ -1419,15 +1414,15 @@
 			 shca->ipz_hca_handle.handle,
 			 e_origmr->ipz_mr_handle.handle,
 			 e_origmr->ib.ib_mr.lkey);
-		ret = ehca_mrmw_map_hrc_reg_smr(h_ret);
+		ret = ehca2ib_return_code(h_ret);
 		goto ehca_reg_smr_exit0;
 	}
 	/* successful registration */
-	e_newmr->num_pages     = e_origmr->num_pages;
-	e_newmr->num_4k        = e_origmr->num_4k;
-	e_newmr->start         = iova_start;
-	e_newmr->size          = e_origmr->size;
-	e_newmr->acl           = acl;
+	e_newmr->num_kpages = e_origmr->num_kpages;
+	e_newmr->num_hwpages = e_origmr->num_hwpages;
+	e_newmr->start = iova_start;
+	e_newmr->size = e_origmr->size;
+	e_newmr->acl = acl;
 	e_newmr->ipz_mr_handle = hipzout.handle;
 	*lkey = hipzout.lkey;
 	*rkey = hipzout.rkey;
@@ -1453,10 +1448,10 @@
 	struct ehca_mr *e_mr;
 	u64 *iova_start;
 	u64 size_maxmr;
-	struct ehca_mr_pginfo pginfo={0,0,0,0,0,0,0,NULL,0,NULL,NULL,0,NULL,0};
+	struct ehca_mr_pginfo pginfo;
 	struct ib_phys_buf ib_pbuf;
-	u32 num_pages_mr;
-	u32 num_pages_4k; /* 4k portion "pages" */
+	u32 num_kpages;
+	u32 num_hwpages;
 
 	e_mr = ehca_mr_new();
 	if (!e_mr) {
@@ -1468,28 +1463,29 @@
 
 	/* register internal max-MR on HCA */
 	size_maxmr = (u64)high_memory - PAGE_OFFSET;
-	iova_start = (u64*)KERNELBASE;
+	iova_start = (u64 *)KERNELBASE;
 	ib_pbuf.addr = 0;
 	ib_pbuf.size = size_maxmr;
-	num_pages_mr = ((((u64)iova_start % PAGE_SIZE) + size_maxmr +
-			 PAGE_SIZE - 1) / PAGE_SIZE);
-	num_pages_4k = ((((u64)iova_start % EHCA_PAGESIZE) + size_maxmr +
-			 EHCA_PAGESIZE - 1) / EHCA_PAGESIZE);
+	num_kpages = NUM_CHUNKS(((u64)iova_start % PAGE_SIZE) + size_maxmr,
+				PAGE_SIZE);
+	num_hwpages = NUM_CHUNKS(((u64)iova_start % EHCA_PAGESIZE) + size_maxmr,
+				 EHCA_PAGESIZE);
 
-	pginfo.type           = EHCA_MR_PGI_PHYS;
-	pginfo.num_pages      = num_pages_mr;
-	pginfo.num_4k         = num_pages_4k;
-	pginfo.num_phys_buf   = 1;
-	pginfo.phys_buf_array = &ib_pbuf;
+	memset(&pginfo, 0, sizeof(pginfo));
+	pginfo.type = EHCA_MR_PGI_PHYS;
+	pginfo.num_kpages = num_kpages;
+	pginfo.num_hwpages = num_hwpages;
+	pginfo.u.phy.num_phys_buf = 1;
+	pginfo.u.phy.phys_buf_array = &ib_pbuf;
 
 	ret = ehca_reg_mr(shca, e_mr, iova_start, size_maxmr, 0, e_pd,
 			  &pginfo, &e_mr->ib.ib_mr.lkey,
 			  &e_mr->ib.ib_mr.rkey);
 	if (ret) {
 		ehca_err(&shca->ib_device, "reg of internal max MR failed, "
-			 "e_mr=%p iova_start=%p size_maxmr=%lx num_pages_mr=%x "
-			 "num_pages_4k=%x", e_mr, iova_start, size_maxmr,
-			 num_pages_mr, num_pages_4k);
+			 "e_mr=%p iova_start=%p size_maxmr=%lx num_kpages=%x "
+			 "num_hwpages=%x", e_mr, iova_start, size_maxmr,
+			 num_kpages, num_hwpages);
 		goto ehca_reg_internal_maxmr_exit1;
 	}
 
@@ -1524,7 +1520,7 @@
 	u64 h_ret;
 	struct ehca_mr *e_origmr = shca->maxmr;
 	u32 hipz_acl;
-	struct ehca_mr_hipzout_parms hipzout = {{0},0,0,0,0,0};
+	struct ehca_mr_hipzout_parms hipzout;
 
 	ehca_mrmw_map_acl(acl, &hipz_acl);
 	ehca_mrmw_set_pgsize_hipz_acl(&hipz_acl);
@@ -1538,14 +1534,14 @@
 			 h_ret, e_origmr, shca->ipz_hca_handle.handle,
 			 e_origmr->ipz_mr_handle.handle,
 			 e_origmr->ib.ib_mr.lkey);
-		return ehca_mrmw_map_hrc_reg_smr(h_ret);
+		return ehca2ib_return_code(h_ret);
 	}
 	/* successful registration */
-	e_newmr->num_pages     = e_origmr->num_pages;
-	e_newmr->num_4k        = e_origmr->num_4k;
-	e_newmr->start         = iova_start;
-	e_newmr->size          = e_origmr->size;
-	e_newmr->acl           = acl;
+	e_newmr->num_kpages = e_origmr->num_kpages;
+	e_newmr->num_hwpages = e_origmr->num_hwpages;
+	e_newmr->start = iova_start;
+	e_newmr->size = e_origmr->size;
+	e_newmr->acl = acl;
 	e_newmr->ipz_mr_handle = hipzout.handle;
 	*lkey = hipzout.lkey;
 	*rkey = hipzout.rkey;
@@ -1677,299 +1673,187 @@
 
 /*----------------------------------------------------------------------*/
 
-/* setup page buffer from page info */
-int ehca_set_pagebuf(struct ehca_mr *e_mr,
-		     struct ehca_mr_pginfo *pginfo,
-		     u32 number,
-		     u64 *kpage)
+/* PAGE_SIZE >= pginfo->hwpage_size */
+static int ehca_set_pagebuf_user1(struct ehca_mr_pginfo *pginfo,
+				  u32 number,
+				  u64 *kpage)
 {
 	int ret = 0;
 	struct ib_umem_chunk *prev_chunk;
 	struct ib_umem_chunk *chunk;
-	struct ib_phys_buf *pbuf;
-	u64 *fmrlist;
-	u64 num4k, pgaddr, offs4k;
+	u64 pgaddr;
 	u32 i = 0;
 	u32 j = 0;
 
-	if (pginfo->type == EHCA_MR_PGI_PHYS) {
-		/* loop over desired phys_buf_array entries */
-		while (i < number) {
-			pbuf   = pginfo->phys_buf_array + pginfo->next_buf;
-			num4k  = ((pbuf->addr % EHCA_PAGESIZE) + pbuf->size +
-				  EHCA_PAGESIZE - 1) / EHCA_PAGESIZE;
-			offs4k = (pbuf->addr & ~PAGE_MASK) / EHCA_PAGESIZE;
-			while (pginfo->next_4k < offs4k + num4k) {
-				/* sanity check */
-				if ((pginfo->page_cnt >= pginfo->num_pages) ||
-				    (pginfo->page_4k_cnt >= pginfo->num_4k)) {
-					ehca_gen_err("page_cnt >= num_pages, "
-						     "page_cnt=%lx "
-						     "num_pages=%lx "
-						     "page_4k_cnt=%lx "
-						     "num_4k=%lx i=%x",
-						     pginfo->page_cnt,
-						     pginfo->num_pages,
-						     pginfo->page_4k_cnt,
-						     pginfo->num_4k, i);
-					ret = -EFAULT;
-					goto ehca_set_pagebuf_exit0;
-				}
-				*kpage = phys_to_abs(
-					(pbuf->addr & EHCA_PAGEMASK)
-					+ (pginfo->next_4k * EHCA_PAGESIZE));
-				if ( !(*kpage) && pbuf->addr ) {
-					ehca_gen_err("pbuf->addr=%lx "
-						     "pbuf->size=%lx "
-						     "next_4k=%lx", pbuf->addr,
-						     pbuf->size,
-						     pginfo->next_4k);
-					ret = -EFAULT;
-					goto ehca_set_pagebuf_exit0;
-				}
-				(pginfo->page_4k_cnt)++;
-				(pginfo->next_4k)++;
-				if (pginfo->next_4k %
-				    (PAGE_SIZE / EHCA_PAGESIZE) == 0)
-					(pginfo->page_cnt)++;
-				kpage++;
-				i++;
-				if (i >= number) break;
-			}
-			if (pginfo->next_4k >= offs4k + num4k) {
-				(pginfo->next_buf)++;
-				pginfo->next_4k = 0;
-			}
-		}
-	} else if (pginfo->type == EHCA_MR_PGI_USER) {
-		/* loop over desired chunk entries */
-		chunk      = pginfo->next_chunk;
-		prev_chunk = pginfo->next_chunk;
-		list_for_each_entry_continue(chunk,
-					     (&(pginfo->region->chunk_list)),
-					     list) {
-			for (i = pginfo->next_nmap; i < chunk->nmap; ) {
-				pgaddr = ( page_to_pfn(chunk->page_list[i].page)
-					   << PAGE_SHIFT );
-				*kpage = phys_to_abs(pgaddr +
-						     (pginfo->next_4k *
-						      EHCA_PAGESIZE));
-				if ( !(*kpage) ) {
-					ehca_gen_err("pgaddr=%lx "
-						     "chunk->page_list[i]=%lx "
-						     "i=%x next_4k=%lx mr=%p",
-						     pgaddr,
-						     (u64)sg_dma_address(
-							     &chunk->
-							     page_list[i]),
-						     i, pginfo->next_4k, e_mr);
-					ret = -EFAULT;
-					goto ehca_set_pagebuf_exit0;
-				}
-				(pginfo->page_4k_cnt)++;
-				(pginfo->next_4k)++;
-				kpage++;
-				if (pginfo->next_4k %
-				    (PAGE_SIZE / EHCA_PAGESIZE) == 0) {
-					(pginfo->page_cnt)++;
-					(pginfo->next_nmap)++;
-					pginfo->next_4k = 0;
-					i++;
-				}
-				j++;
-				if (j >= number) break;
-			}
-			if ((pginfo->next_nmap >= chunk->nmap) &&
-			    (j >= number)) {
-				pginfo->next_nmap = 0;
-				prev_chunk = chunk;
-				break;
-			} else if (pginfo->next_nmap >= chunk->nmap) {
-				pginfo->next_nmap = 0;
-				prev_chunk = chunk;
-			} else if (j >= number)
-				break;
-			else
-				prev_chunk = chunk;
-		}
-		pginfo->next_chunk =
-			list_prepare_entry(prev_chunk,
-					   (&(pginfo->region->chunk_list)),
-					   list);
-	} else if (pginfo->type == EHCA_MR_PGI_FMR) {
-		/* loop over desired page_list entries */
-		fmrlist = pginfo->page_list + pginfo->next_listelem;
-		for (i = 0; i < number; i++) {
-			*kpage = phys_to_abs((*fmrlist & EHCA_PAGEMASK) +
-					     pginfo->next_4k * EHCA_PAGESIZE);
+	/* loop over desired chunk entries */
+	chunk      = pginfo->u.usr.next_chunk;
+	prev_chunk = pginfo->u.usr.next_chunk;
+	list_for_each_entry_continue(
+		chunk, (&(pginfo->u.usr.region->chunk_list)), list) {
+		for (i = pginfo->u.usr.next_nmap; i < chunk->nmap; ) {
+			pgaddr = page_to_pfn(chunk->page_list[i].page)
+				<< PAGE_SHIFT ;
+			*kpage = phys_to_abs(pgaddr +
+					     (pginfo->next_hwpage *
+					      EHCA_PAGESIZE));
 			if ( !(*kpage) ) {
-				ehca_gen_err("*fmrlist=%lx fmrlist=%p "
-					     "next_listelem=%lx next_4k=%lx",
-					     *fmrlist, fmrlist,
-					     pginfo->next_listelem,
-					     pginfo->next_4k);
-				ret = -EFAULT;
-				goto ehca_set_pagebuf_exit0;
+				ehca_gen_err("pgaddr=%lx "
+					     "chunk->page_list[i]=%lx "
+					     "i=%x next_hwpage=%lx",
+					     pgaddr, (u64)sg_dma_address(
+						     &chunk->page_list[i]),
+					     i, pginfo->next_hwpage);
+				return -EFAULT;
 			}
-			(pginfo->page_4k_cnt)++;
-			(pginfo->next_4k)++;
+			(pginfo->hwpage_cnt)++;
+			(pginfo->next_hwpage)++;
 			kpage++;
-			if (pginfo->next_4k %
-			    (e_mr->fmr_page_size / EHCA_PAGESIZE) == 0) {
-				(pginfo->page_cnt)++;
-				(pginfo->next_listelem)++;
-				fmrlist++;
-				pginfo->next_4k = 0;
+			if (pginfo->next_hwpage %
+			    (PAGE_SIZE / EHCA_PAGESIZE) == 0) {
+				(pginfo->kpage_cnt)++;
+				(pginfo->u.usr.next_nmap)++;
+				pginfo->next_hwpage = 0;
+				i++;
 			}
+			j++;
+			if (j >= number) break;
 		}
-	} else {
-		ehca_gen_err("bad pginfo->type=%x", pginfo->type);
-		ret = -EFAULT;
-		goto ehca_set_pagebuf_exit0;
+		if ((pginfo->u.usr.next_nmap >= chunk->nmap) &&
+		    (j >= number)) {
+			pginfo->u.usr.next_nmap = 0;
+			prev_chunk = chunk;
+			break;
+		} else if (pginfo->u.usr.next_nmap >= chunk->nmap) {
+			pginfo->u.usr.next_nmap = 0;
+			prev_chunk = chunk;
+		} else if (j >= number)
+			break;
+		else
+			prev_chunk = chunk;
 	}
-
-ehca_set_pagebuf_exit0:
-	if (ret)
-		ehca_gen_err("ret=%x e_mr=%p pginfo=%p type=%x num_pages=%lx "
-			     "num_4k=%lx next_buf=%lx next_4k=%lx number=%x "
-			     "kpage=%p page_cnt=%lx page_4k_cnt=%lx i=%x "
-			     "next_listelem=%lx region=%p next_chunk=%p "
-			     "next_nmap=%lx", ret, e_mr, pginfo, pginfo->type,
-			     pginfo->num_pages, pginfo->num_4k,
-			     pginfo->next_buf, pginfo->next_4k, number, kpage,
-			     pginfo->page_cnt, pginfo->page_4k_cnt, i,
-			     pginfo->next_listelem, pginfo->region,
-			     pginfo->next_chunk, pginfo->next_nmap);
+	pginfo->u.usr.next_chunk =
+		list_prepare_entry(prev_chunk,
+				   (&(pginfo->u.usr.region->chunk_list)),
+				   list);
 	return ret;
-} /* end ehca_set_pagebuf() */
+}
 
-/*----------------------------------------------------------------------*/
-
-/* setup 1 page from page info page buffer */
-int ehca_set_pagebuf_1(struct ehca_mr *e_mr,
-		       struct ehca_mr_pginfo *pginfo,
-		       u64 *rpage)
+int ehca_set_pagebuf_phys(struct ehca_mr_pginfo *pginfo,
+			  u32 number,
+			  u64 *kpage)
 {
 	int ret = 0;
-	struct ib_phys_buf *tmp_pbuf;
-	u64 *fmrlist;
-	struct ib_umem_chunk *chunk;
-	struct ib_umem_chunk *prev_chunk;
-	u64 pgaddr, num4k, offs4k;
+	struct ib_phys_buf *pbuf;
+	u64 num_hw, offs_hw;
+	u32 i = 0;
 
-	if (pginfo->type == EHCA_MR_PGI_PHYS) {
-		/* sanity check */
-		if ((pginfo->page_cnt >= pginfo->num_pages) ||
-		    (pginfo->page_4k_cnt >= pginfo->num_4k)) {
-			ehca_gen_err("page_cnt >= num_pages, page_cnt=%lx "
-				     "num_pages=%lx page_4k_cnt=%lx num_4k=%lx",
-				     pginfo->page_cnt, pginfo->num_pages,
-				     pginfo->page_4k_cnt, pginfo->num_4k);
-			ret = -EFAULT;
-			goto ehca_set_pagebuf_1_exit0;
-		}
-		tmp_pbuf = pginfo->phys_buf_array + pginfo->next_buf;
-		num4k  = ((tmp_pbuf->addr % EHCA_PAGESIZE) + tmp_pbuf->size +
-			  EHCA_PAGESIZE - 1) / EHCA_PAGESIZE;
-		offs4k = (tmp_pbuf->addr & ~PAGE_MASK) / EHCA_PAGESIZE;
-		*rpage = phys_to_abs((tmp_pbuf->addr & EHCA_PAGEMASK) +
-				     (pginfo->next_4k * EHCA_PAGESIZE));
-		if ( !(*rpage) && tmp_pbuf->addr ) {
-			ehca_gen_err("tmp_pbuf->addr=%lx"
-				     " tmp_pbuf->size=%lx next_4k=%lx",
-				     tmp_pbuf->addr, tmp_pbuf->size,
-				     pginfo->next_4k);
-			ret = -EFAULT;
-			goto ehca_set_pagebuf_1_exit0;
-		}
-		(pginfo->page_4k_cnt)++;
-		(pginfo->next_4k)++;
-		if (pginfo->next_4k % (PAGE_SIZE / EHCA_PAGESIZE) == 0)
-			(pginfo->page_cnt)++;
-		if (pginfo->next_4k >= offs4k + num4k) {
-			(pginfo->next_buf)++;
-			pginfo->next_4k = 0;
-		}
-	} else if (pginfo->type == EHCA_MR_PGI_USER) {
-		chunk      = pginfo->next_chunk;
-		prev_chunk = pginfo->next_chunk;
-		list_for_each_entry_continue(chunk,
-					     (&(pginfo->region->chunk_list)),
-					     list) {
-			pgaddr = ( page_to_pfn(chunk->page_list[
-						       pginfo->next_nmap].page)
-				   << PAGE_SHIFT);
-			*rpage = phys_to_abs(pgaddr +
-					     (pginfo->next_4k * EHCA_PAGESIZE));
-			if ( !(*rpage) ) {
-				ehca_gen_err("pgaddr=%lx chunk->page_list[]=%lx"
-					     " next_nmap=%lx next_4k=%lx mr=%p",
-					     pgaddr, (u64)sg_dma_address(
-						     &chunk->page_list[
-							     pginfo->
-							     next_nmap]),
-					     pginfo->next_nmap, pginfo->next_4k,
-					     e_mr);
-				ret = -EFAULT;
-				goto ehca_set_pagebuf_1_exit0;
+	/* loop over desired phys_buf_array entries */
+	while (i < number) {
+		pbuf   = pginfo->u.phy.phys_buf_array + pginfo->u.phy.next_buf;
+		num_hw  = NUM_CHUNKS((pbuf->addr % EHCA_PAGESIZE) +
+				     pbuf->size, EHCA_PAGESIZE);
+		offs_hw = (pbuf->addr & ~PAGE_MASK) / EHCA_PAGESIZE;
+		while (pginfo->next_hwpage < offs_hw + num_hw) {
+			/* sanity check */
+			if ((pginfo->kpage_cnt >= pginfo->num_kpages) ||
+			    (pginfo->hwpage_cnt >= pginfo->num_hwpages)) {
+				ehca_gen_err("kpage_cnt >= num_kpages, "
+					     "kpage_cnt=%lx num_kpages=%lx "
+					     "hwpage_cnt=%lx "
+					     "num_hwpages=%lx i=%x",
+					     pginfo->kpage_cnt,
+					     pginfo->num_kpages,
+					     pginfo->hwpage_cnt,
+					     pginfo->num_hwpages, i);
+				return -EFAULT;
 			}
-			(pginfo->page_4k_cnt)++;
-			(pginfo->next_4k)++;
-			if (pginfo->next_4k %
-			    (PAGE_SIZE / EHCA_PAGESIZE) == 0) {
-				(pginfo->page_cnt)++;
-				(pginfo->next_nmap)++;
-				pginfo->next_4k = 0;
+			*kpage = phys_to_abs(
+				(pbuf->addr & EHCA_PAGEMASK)
+				+ (pginfo->next_hwpage * EHCA_PAGESIZE));
+			if ( !(*kpage) && pbuf->addr ) {
+				ehca_gen_err("pbuf->addr=%lx "
+					     "pbuf->size=%lx "
+					     "next_hwpage=%lx", pbuf->addr,
+					     pbuf->size,
+					     pginfo->next_hwpage);
+				return -EFAULT;
 			}
-			if (pginfo->next_nmap >= chunk->nmap) {
-				pginfo->next_nmap = 0;
-				prev_chunk = chunk;
-			}
-			break;
+			(pginfo->hwpage_cnt)++;
+			(pginfo->next_hwpage)++;
+			if (pginfo->next_hwpage %
+			    (PAGE_SIZE / EHCA_PAGESIZE) == 0)
+				(pginfo->kpage_cnt)++;
+			kpage++;
+			i++;
+			if (i >= number) break;
 		}
-		pginfo->next_chunk =
-			list_prepare_entry(prev_chunk,
-					   (&(pginfo->region->chunk_list)),
-					   list);
-	} else if (pginfo->type == EHCA_MR_PGI_FMR) {
-		fmrlist = pginfo->page_list + pginfo->next_listelem;
-		*rpage = phys_to_abs((*fmrlist & EHCA_PAGEMASK) +
-				     pginfo->next_4k * EHCA_PAGESIZE);
-		if ( !(*rpage) ) {
+		if (pginfo->next_hwpage >= offs_hw + num_hw) {
+			(pginfo->u.phy.next_buf)++;
+			pginfo->next_hwpage = 0;
+		}
+	}
+	return ret;
+}
+
+int ehca_set_pagebuf_fmr(struct ehca_mr_pginfo *pginfo,
+			 u32 number,
+			 u64 *kpage)
+{
+	int ret = 0;
+	u64 *fmrlist;
+	u32 i;
+
+	/* loop over desired page_list entries */
+	fmrlist = pginfo->u.fmr.page_list + pginfo->u.fmr.next_listelem;
+	for (i = 0; i < number; i++) {
+		*kpage = phys_to_abs((*fmrlist & EHCA_PAGEMASK) +
+				     pginfo->next_hwpage * EHCA_PAGESIZE);
+		if ( !(*kpage) ) {
 			ehca_gen_err("*fmrlist=%lx fmrlist=%p "
-				     "next_listelem=%lx next_4k=%lx",
-				     *fmrlist, fmrlist, pginfo->next_listelem,
-				     pginfo->next_4k);
-			ret = -EFAULT;
-			goto ehca_set_pagebuf_1_exit0;
+				     "next_listelem=%lx next_hwpage=%lx",
+				     *fmrlist, fmrlist,
+				     pginfo->u.fmr.next_listelem,
+				     pginfo->next_hwpage);
+			return -EFAULT;
 		}
-		(pginfo->page_4k_cnt)++;
-		(pginfo->next_4k)++;
-		if (pginfo->next_4k %
-		    (e_mr->fmr_page_size / EHCA_PAGESIZE) == 0) {
-			(pginfo->page_cnt)++;
-			(pginfo->next_listelem)++;
-			pginfo->next_4k = 0;
+		(pginfo->hwpage_cnt)++;
+		(pginfo->next_hwpage)++;
+		kpage++;
+		if (pginfo->next_hwpage %
+		    (pginfo->u.fmr.fmr_pgsize / EHCA_PAGESIZE) == 0) {
+			(pginfo->kpage_cnt)++;
+			(pginfo->u.fmr.next_listelem)++;
+			fmrlist++;
+			pginfo->next_hwpage = 0;
 		}
-	} else {
+	}
+	return ret;
+}
+
+/* setup page buffer from page info */
+int ehca_set_pagebuf(struct ehca_mr_pginfo *pginfo,
+		     u32 number,
+		     u64 *kpage)
+{
+	int ret;
+
+	switch (pginfo->type) {
+	case EHCA_MR_PGI_PHYS:
+		ret = ehca_set_pagebuf_phys(pginfo, number, kpage);
+		break;
+	case EHCA_MR_PGI_USER:
+		ret = ehca_set_pagebuf_user1(pginfo, number, kpage);
+		break;
+	case EHCA_MR_PGI_FMR:
+		ret = ehca_set_pagebuf_fmr(pginfo, number, kpage);
+		break;
+	default:
 		ehca_gen_err("bad pginfo->type=%x", pginfo->type);
 		ret = -EFAULT;
-		goto ehca_set_pagebuf_1_exit0;
+		break;
 	}
-
-ehca_set_pagebuf_1_exit0:
-	if (ret)
-		ehca_gen_err("ret=%x e_mr=%p pginfo=%p type=%x num_pages=%lx "
-			     "num_4k=%lx next_buf=%lx next_4k=%lx rpage=%p "
-			     "page_cnt=%lx page_4k_cnt=%lx next_listelem=%lx "
-			     "region=%p next_chunk=%p next_nmap=%lx", ret, e_mr,
-			     pginfo, pginfo->type, pginfo->num_pages,
-			     pginfo->num_4k, pginfo->next_buf, pginfo->next_4k,
-			     rpage, pginfo->page_cnt, pginfo->page_4k_cnt,
-			     pginfo->next_listelem, pginfo->region,
-			     pginfo->next_chunk, pginfo->next_nmap);
 	return ret;
-} /* end ehca_set_pagebuf_1() */
+} /* end ehca_set_pagebuf() */
 
 /*----------------------------------------------------------------------*/
 
@@ -1982,7 +1866,7 @@
 {
 	/* a MR is treated as max-MR only if it fits following: */
 	if ((size == ((u64)high_memory - PAGE_OFFSET)) &&
-	    (iova_start == (void*)KERNELBASE)) {
+	    (iova_start == (void *)KERNELBASE)) {
 		ehca_gen_dbg("this is a max-MR");
 		return 1;
 	} else
@@ -2042,196 +1926,23 @@
 /*----------------------------------------------------------------------*/
 
 /*
- * map HIPZ rc to IB retcodes for MR/MW allocations
- * Used for hipz_mr_reg_alloc and hipz_mw_alloc.
- */
-int ehca_mrmw_map_hrc_alloc(const u64 hipz_rc)
-{
-	switch (hipz_rc) {
-	case H_SUCCESS:	             /* successful completion */
-		return 0;
-	case H_NOT_ENOUGH_RESOURCES: /* insufficient resources */
-	case H_CONSTRAINED:          /* resource constraint */
-	case H_NO_MEM:
-		return -ENOMEM;
-	case H_BUSY:                 /* long busy */
-		return -EBUSY;
-	default:
-		return -EINVAL;
-	}
-} /* end ehca_mrmw_map_hrc_alloc() */
-
-/*----------------------------------------------------------------------*/
-
-/*
- * map HIPZ rc to IB retcodes for MR register rpage
- * Used for hipz_h_register_rpage_mr at registering last page
- */
-int ehca_mrmw_map_hrc_rrpg_last(const u64 hipz_rc)
-{
-	switch (hipz_rc) {
-	case H_SUCCESS:         /* registration complete */
-		return 0;
-	case H_PAGE_REGISTERED:	/* page registered */
-	case H_ADAPTER_PARM:    /* invalid adapter handle */
-	case H_RH_PARM:         /* invalid resource handle */
-/*	case H_QT_PARM:            invalid queue type */
-	case H_PARAMETER:       /*
-				 * invalid logical address,
-				 * or count zero or greater 512
-				 */
-	case H_TABLE_FULL:      /* page table full */
-	case H_HARDWARE:        /* HCA not operational */
-		return -EINVAL;
-	case H_BUSY:            /* long busy */
-		return -EBUSY;
-	default:
-		return -EINVAL;
-	}
-} /* end ehca_mrmw_map_hrc_rrpg_last() */
-
-/*----------------------------------------------------------------------*/
-
-/*
- * map HIPZ rc to IB retcodes for MR register rpage
- * Used for hipz_h_register_rpage_mr at registering one page, but not last page
- */
-int ehca_mrmw_map_hrc_rrpg_notlast(const u64 hipz_rc)
-{
-	switch (hipz_rc) {
-	case H_PAGE_REGISTERED:	/* page registered */
-		return 0;
-	case H_SUCCESS:         /* registration complete */
-	case H_ADAPTER_PARM:    /* invalid adapter handle */
-	case H_RH_PARM:         /* invalid resource handle */
-/*	case H_QT_PARM:            invalid queue type */
-	case H_PARAMETER:       /*
-				 * invalid logical address,
-				 * or count zero or greater 512
-				 */
-	case H_TABLE_FULL:      /* page table full */
-	case H_HARDWARE:        /* HCA not operational */
-		return -EINVAL;
-	case H_BUSY:            /* long busy */
-		return -EBUSY;
-	default:
-		return -EINVAL;
-	}
-} /* end ehca_mrmw_map_hrc_rrpg_notlast() */
-
-/*----------------------------------------------------------------------*/
-
-/* map HIPZ rc to IB retcodes for MR query. Used for hipz_mr_query. */
-int ehca_mrmw_map_hrc_query_mr(const u64 hipz_rc)
-{
-	switch (hipz_rc) {
-	case H_SUCCESS:	             /* successful completion */
-		return 0;
-	case H_ADAPTER_PARM:         /* invalid adapter handle */
-	case H_RH_PARM:              /* invalid resource handle */
-		return -EINVAL;
-	case H_BUSY:                 /* long busy */
-		return -EBUSY;
-	default:
-		return -EINVAL;
-	}
-} /* end ehca_mrmw_map_hrc_query_mr() */
-
-/*----------------------------------------------------------------------*/
-/*----------------------------------------------------------------------*/
-
-/*
- * map HIPZ rc to IB retcodes for freeing MR resource
- * Used for hipz_h_free_resource_mr
- */
-int ehca_mrmw_map_hrc_free_mr(const u64 hipz_rc)
-{
-	switch (hipz_rc) {
-	case H_SUCCESS:      /* resource freed */
-		return 0;
-	case H_ADAPTER_PARM: /* invalid adapter handle */
-	case H_RH_PARM:      /* invalid resource handle */
-	case H_R_STATE:      /* invalid resource state */
-	case H_HARDWARE:     /* HCA not operational */
-		return -EINVAL;
-	case H_RESOURCE:     /* Resource in use */
-	case H_BUSY:         /* long busy */
-		return -EBUSY;
-	default:
-		return -EINVAL;
-	}
-} /* end ehca_mrmw_map_hrc_free_mr() */
-
-/*----------------------------------------------------------------------*/
-
-/*
- * map HIPZ rc to IB retcodes for freeing MW resource
- * Used for hipz_h_free_resource_mw
- */
-int ehca_mrmw_map_hrc_free_mw(const u64 hipz_rc)
-{
-	switch (hipz_rc) {
-	case H_SUCCESS:	     /* resource freed */
-		return 0;
-	case H_ADAPTER_PARM: /* invalid adapter handle */
-	case H_RH_PARM:      /* invalid resource handle */
-	case H_R_STATE:      /* invalid resource state */
-	case H_HARDWARE:     /* HCA not operational */
-		return -EINVAL;
-	case H_RESOURCE:     /* Resource in use */
-	case H_BUSY:         /* long busy */
-		return -EBUSY;
-	default:
-		return -EINVAL;
-	}
-} /* end ehca_mrmw_map_hrc_free_mw() */
-
-/*----------------------------------------------------------------------*/
-
-/*
- * map HIPZ rc to IB retcodes for SMR registrations
- * Used for hipz_h_register_smr.
- */
-int ehca_mrmw_map_hrc_reg_smr(const u64 hipz_rc)
-{
-	switch (hipz_rc) {
-	case H_SUCCESS:	             /* successful completion */
-		return 0;
-	case H_ADAPTER_PARM:         /* invalid adapter handle */
-	case H_RH_PARM:              /* invalid resource handle */
-	case H_MEM_PARM:             /* invalid MR virtual address */
-	case H_MEM_ACCESS_PARM:      /* invalid access controls */
-	case H_NOT_ENOUGH_RESOURCES: /* insufficient resources */
-		return -EINVAL;
-	case H_BUSY:                 /* long busy */
-		return -EBUSY;
-	default:
-		return -EINVAL;
-	}
-} /* end ehca_mrmw_map_hrc_reg_smr() */
-
-/*----------------------------------------------------------------------*/
-
-/*
  * MR destructor and constructor
  * used in Reregister MR verb, sets all fields in ehca_mr_t to 0,
  * except struct ib_mr and spinlock
  */
 void ehca_mr_deletenew(struct ehca_mr *mr)
 {
-	mr->flags         = 0;
-	mr->num_pages     = 0;
-	mr->num_4k        = 0;
-	mr->acl           = 0;
-	mr->start         = NULL;
+	mr->flags = 0;
+	mr->num_kpages = 0;
+	mr->num_hwpages = 0;
+	mr->acl = 0;
+	mr->start = NULL;
 	mr->fmr_page_size = 0;
 	mr->fmr_max_pages = 0;
-	mr->fmr_max_maps  = 0;
-	mr->fmr_map_cnt   = 0;
+	mr->fmr_max_maps = 0;
+	mr->fmr_map_cnt = 0;
 	memset(&mr->ipz_mr_handle, 0, sizeof(mr->ipz_mr_handle));
 	memset(&mr->galpas, 0, sizeof(mr->galpas));
-	mr->nr_of_pages   = 0;
-	mr->pagearray     = NULL;
 } /* end ehca_mr_deletenew() */
 
 int ehca_init_mrmw_cache(void)
diff --git a/drivers/infiniband/hw/ehca/ehca_mrmw.h b/drivers/infiniband/hw/ehca/ehca_mrmw.h
index d936e40..24f13fe 100644
--- a/drivers/infiniband/hw/ehca/ehca_mrmw.h
+++ b/drivers/infiniband/hw/ehca/ehca_mrmw.h
@@ -101,15 +101,10 @@
 			     u64 *page_list,
 			     int list_len);
 
-int ehca_set_pagebuf(struct ehca_mr *e_mr,
-		     struct ehca_mr_pginfo *pginfo,
+int ehca_set_pagebuf(struct ehca_mr_pginfo *pginfo,
 		     u32 number,
 		     u64 *kpage);
 
-int ehca_set_pagebuf_1(struct ehca_mr *e_mr,
-		       struct ehca_mr_pginfo *pginfo,
-		       u64 *rpage);
-
 int ehca_mr_is_maxmr(u64 size,
 		     u64 *iova_start);
 
@@ -121,20 +116,6 @@
 void ehca_mrmw_reverse_map_acl(const u32 *hipz_acl,
 			       int *ib_acl);
 
-int ehca_mrmw_map_hrc_alloc(const u64 hipz_rc);
-
-int ehca_mrmw_map_hrc_rrpg_last(const u64 hipz_rc);
-
-int ehca_mrmw_map_hrc_rrpg_notlast(const u64 hipz_rc);
-
-int ehca_mrmw_map_hrc_query_mr(const u64 hipz_rc);
-
-int ehca_mrmw_map_hrc_free_mr(const u64 hipz_rc);
-
-int ehca_mrmw_map_hrc_free_mw(const u64 hipz_rc);
-
-int ehca_mrmw_map_hrc_reg_smr(const u64 hipz_rc);
-
 void ehca_mr_deletenew(struct ehca_mr *mr);
 
 #endif  /*_EHCA_MRMW_H_*/
diff --git a/drivers/infiniband/hw/ehca/ehca_qes.h b/drivers/infiniband/hw/ehca/ehca_qes.h
index 8707d29..8188030 100644
--- a/drivers/infiniband/hw/ehca/ehca_qes.h
+++ b/drivers/infiniband/hw/ehca/ehca_qes.h
@@ -53,13 +53,13 @@
 	u32 length;
 };
 
-#define GRH_FLAG_MASK        EHCA_BMASK_IBM(7,7)
-#define GRH_IPVERSION_MASK   EHCA_BMASK_IBM(0,3)
-#define GRH_TCLASS_MASK      EHCA_BMASK_IBM(4,12)
-#define GRH_FLOWLABEL_MASK   EHCA_BMASK_IBM(13,31)
-#define GRH_PAYLEN_MASK      EHCA_BMASK_IBM(32,47)
-#define GRH_NEXTHEADER_MASK  EHCA_BMASK_IBM(48,55)
-#define GRH_HOPLIMIT_MASK    EHCA_BMASK_IBM(56,63)
+#define GRH_FLAG_MASK        EHCA_BMASK_IBM( 7,  7)
+#define GRH_IPVERSION_MASK   EHCA_BMASK_IBM( 0,  3)
+#define GRH_TCLASS_MASK      EHCA_BMASK_IBM( 4, 12)
+#define GRH_FLOWLABEL_MASK   EHCA_BMASK_IBM(13, 31)
+#define GRH_PAYLEN_MASK      EHCA_BMASK_IBM(32, 47)
+#define GRH_NEXTHEADER_MASK  EHCA_BMASK_IBM(48, 55)
+#define GRH_HOPLIMIT_MASK    EHCA_BMASK_IBM(56, 63)
 
 /*
  * Unreliable Datagram Address Vector Format
@@ -206,10 +206,10 @@
 
 };
 
-#define WC_SEND_RECEIVE EHCA_BMASK_IBM(0,0)
-#define WC_IMM_DATA     EHCA_BMASK_IBM(1,1)
-#define WC_GRH_PRESENT  EHCA_BMASK_IBM(2,2)
-#define WC_SE_BIT       EHCA_BMASK_IBM(3,3)
+#define WC_SEND_RECEIVE EHCA_BMASK_IBM(0, 0)
+#define WC_IMM_DATA     EHCA_BMASK_IBM(1, 1)
+#define WC_GRH_PRESENT  EHCA_BMASK_IBM(2, 2)
+#define WC_SE_BIT       EHCA_BMASK_IBM(3, 3)
 #define WC_STATUS_ERROR_BIT 0x80000000
 #define WC_STATUS_REMOTE_ERROR_FLAGS 0x0000F800
 #define WC_STATUS_PURGE_BIT 0x10
diff --git a/drivers/infiniband/hw/ehca/ehca_qp.c b/drivers/infiniband/hw/ehca/ehca_qp.c
index 7467125..48e9cea 100644
--- a/drivers/infiniband/hw/ehca/ehca_qp.c
+++ b/drivers/infiniband/hw/ehca/ehca_qp.c
@@ -602,10 +602,10 @@
 			/* UD circumvention */
 			parms.act_nr_send_sges -= 2;
 			parms.act_nr_recv_sges -= 2;
-			swqe_size = offsetof(struct ehca_wqe,
-					     u.ud_av.sg_list[parms.act_nr_send_sges]);
-			rwqe_size = offsetof(struct ehca_wqe,
-					     u.ud_av.sg_list[parms.act_nr_recv_sges]);
+			swqe_size = offsetof(struct ehca_wqe, u.ud_av.sg_list[
+						     parms.act_nr_send_sges]);
+			rwqe_size = offsetof(struct ehca_wqe, u.ud_av.sg_list[
+						     parms.act_nr_recv_sges]);
 		}
 
 		if (IB_QPT_GSI == qp_type || IB_QPT_SMI == qp_type) {
@@ -690,8 +690,8 @@
 	if (my_qp->send_cq) {
 		ret = ehca_cq_assign_qp(my_qp->send_cq, my_qp);
 		if (ret) {
-			ehca_err(pd->device, "Couldn't assign qp to send_cq ret=%x",
-				 ret);
+			ehca_err(pd->device,
+				 "Couldn't assign qp to send_cq ret=%x", ret);
 			goto create_qp_exit4;
 		}
 	}
@@ -749,7 +749,7 @@
 	struct ehca_qp *ret;
 
 	ret = internal_create_qp(pd, qp_init_attr, NULL, udata, 0);
-	return IS_ERR(ret) ? (struct ib_qp *) ret : &ret->ib_qp;
+	return IS_ERR(ret) ? (struct ib_qp *)ret : &ret->ib_qp;
 }
 
 int internal_destroy_qp(struct ib_device *dev, struct ehca_qp *my_qp,
@@ -780,7 +780,7 @@
 
 	my_qp = internal_create_qp(pd, &qp_init_attr, srq_init_attr, udata, 1);
 	if (IS_ERR(my_qp))
-		return (struct ib_srq *) my_qp;
+		return (struct ib_srq *)my_qp;
 
 	/* copy back return values */
 	srq_init_attr->attr.max_wr = qp_init_attr.cap.max_recv_wr;
@@ -875,7 +875,7 @@
 			 my_qp, qp_num, h_ret);
 		return ehca2ib_return_code(h_ret);
 	}
-	bad_send_wqe_p = (void*)((u64)bad_send_wqe_p & (~(1L<<63)));
+	bad_send_wqe_p = (void *)((u64)bad_send_wqe_p & (~(1L << 63)));
 	ehca_dbg(&shca->ib_device, "qp_num=%x bad_send_wqe_p=%p",
 		 qp_num, bad_send_wqe_p);
 	/* convert wqe pointer to vadr */
@@ -890,7 +890,7 @@
 	}
 
 	/* loop sets wqe's purge bit */
-	wqe = (struct ehca_wqe*)ipz_qeit_calc(squeue, q_ofs);
+	wqe = (struct ehca_wqe *)ipz_qeit_calc(squeue, q_ofs);
 	*bad_wqe_cnt = 0;
 	while (wqe->optype != 0xff && wqe->wqef != 0xff) {
 		if (ehca_debug_level)
@@ -898,7 +898,7 @@
 		wqe->nr_of_data_seg = 0; /* suppress data access */
 		wqe->wqef = WQEF_PURGE; /* WQE to be purged */
 		q_ofs = ipz_queue_advance_offset(squeue, q_ofs);
-		wqe = (struct ehca_wqe*)ipz_qeit_calc(squeue, q_ofs);
+		wqe = (struct ehca_wqe *)ipz_qeit_calc(squeue, q_ofs);
 		*bad_wqe_cnt = (*bad_wqe_cnt)+1;
 	}
 	/*
@@ -1003,7 +1003,7 @@
 		goto modify_qp_exit1;
 	}
 
-	ehca_dbg(ibqp->device,"ehca_qp=%p qp_num=%x current qp_state=%x "
+	ehca_dbg(ibqp->device, "ehca_qp=%p qp_num=%x current qp_state=%x "
 		 "new qp_state=%x attribute_mask=%x",
 		 my_qp, ibqp->qp_num, qp_cur_state, attr->qp_state, attr_mask);
 
@@ -1019,7 +1019,8 @@
 		goto modify_qp_exit1;
 	}
 
-	if ((mqpcb->qp_state = ib2ehca_qp_state(qp_new_state)))
+	mqpcb->qp_state = ib2ehca_qp_state(qp_new_state);
+	if (mqpcb->qp_state)
 		update_mask = EHCA_BMASK_SET(MQPCB_MASK_QP_STATE, 1);
 	else {
 		ret = -EINVAL;
@@ -1077,7 +1078,7 @@
 			spin_lock_irqsave(&my_qp->spinlock_s, flags);
 			squeue_locked = 1;
 			/* mark next free wqe */
-			wqe = (struct ehca_wqe*)
+			wqe = (struct ehca_wqe *)
 				ipz_qeit_get(&my_qp->ipz_squeue);
 			wqe->optype = wqe->wqef = 0xff;
 			ehca_dbg(ibqp->device, "qp_num=%x next_free_wqe=%p",
@@ -1312,7 +1313,7 @@
 	if (h_ret != H_SUCCESS) {
 		ret = ehca2ib_return_code(h_ret);
 		ehca_err(ibqp->device, "hipz_h_modify_qp() failed rc=%lx "
-			 "ehca_qp=%p qp_num=%x",h_ret, my_qp, ibqp->qp_num);
+			 "ehca_qp=%p qp_num=%x", h_ret, my_qp, ibqp->qp_num);
 		goto modify_qp_exit2;
 	}
 
@@ -1411,7 +1412,7 @@
 	}
 
 	if (qp_attr_mask & QP_ATTR_QUERY_NOT_SUPPORTED) {
-		ehca_err(qp->device,"Invalid attribute mask "
+		ehca_err(qp->device, "Invalid attribute mask "
 			 "ehca_qp=%p qp_num=%x qp_attr_mask=%x ",
 			 my_qp, qp->qp_num, qp_attr_mask);
 		return -EINVAL;
@@ -1419,7 +1420,7 @@
 
 	qpcb = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
 	if (!qpcb) {
-		ehca_err(qp->device,"Out of memory for qpcb "
+		ehca_err(qp->device, "Out of memory for qpcb "
 			 "ehca_qp=%p qp_num=%x", my_qp, qp->qp_num);
 		return -ENOMEM;
 	}
@@ -1431,7 +1432,7 @@
 
 	if (h_ret != H_SUCCESS) {
 		ret = ehca2ib_return_code(h_ret);
-		ehca_err(qp->device,"hipz_h_query_qp() failed "
+		ehca_err(qp->device, "hipz_h_query_qp() failed "
 			 "ehca_qp=%p qp_num=%x h_ret=%lx",
 			 my_qp, qp->qp_num, h_ret);
 		goto query_qp_exit1;
@@ -1442,7 +1443,7 @@
 
 	if (qp_attr->cur_qp_state == -EINVAL) {
 		ret = -EINVAL;
-		ehca_err(qp->device,"Got invalid ehca_qp_state=%x "
+		ehca_err(qp->device, "Got invalid ehca_qp_state=%x "
 			 "ehca_qp=%p qp_num=%x",
 			 qpcb->qp_state, my_qp, qp->qp_num);
 		goto query_qp_exit1;
diff --git a/drivers/infiniband/hw/ehca/ehca_reqs.c b/drivers/infiniband/hw/ehca/ehca_reqs.c
index 61da65e6..94eed70 100644
--- a/drivers/infiniband/hw/ehca/ehca_reqs.c
+++ b/drivers/infiniband/hw/ehca/ehca_reqs.c
@@ -79,7 +79,8 @@
 	}
 
 	if (ehca_debug_level) {
-		ehca_gen_dbg("RECEIVE WQE written into ipz_rqueue=%p", ipz_rqueue);
+		ehca_gen_dbg("RECEIVE WQE written into ipz_rqueue=%p",
+			     ipz_rqueue);
 		ehca_dmp( wqe_p, 16*(6 + wqe_p->nr_of_data_seg), "recv wqe");
 	}
 
@@ -99,7 +100,7 @@
 		struct ib_mad_hdr *mad_hdr = send_wr->wr.ud.mad_hdr;
 		struct ib_sge *sge = send_wr->sg_list;
 		ehca_gen_dbg("send_wr#%x wr_id=%lx num_sge=%x "
-			     "send_flags=%x opcode=%x",idx, send_wr->wr_id,
+			     "send_flags=%x opcode=%x", idx, send_wr->wr_id,
 			     send_wr->num_sge, send_wr->send_flags,
 			     send_wr->opcode);
 		if (mad_hdr) {
@@ -116,7 +117,7 @@
 				     mad_hdr->attr_mod);
 		}
 		for (j = 0; j < send_wr->num_sge; j++) {
-			u8 *data = (u8 *) abs_to_virt(sge->addr);
+			u8 *data = (u8 *)abs_to_virt(sge->addr);
 			ehca_gen_dbg("send_wr#%x sge#%x addr=%p length=%x "
 				     "lkey=%x",
 				     idx, j, data, sge->length, sge->lkey);
@@ -534,9 +535,11 @@
 
 	cqe_count++;
 	if (unlikely(cqe->status & WC_STATUS_PURGE_BIT)) {
-		struct ehca_qp *qp=ehca_cq_get_qp(my_cq, cqe->local_qp_number);
+		struct ehca_qp *qp;
 		int purgeflag;
 		unsigned long flags;
+
+		qp = ehca_cq_get_qp(my_cq, cqe->local_qp_number);
 		if (!qp) {
 			ehca_err(cq->device, "cq_num=%x qp_num=%x "
 				 "could not find qp -> ignore cqe",
@@ -551,8 +554,8 @@
 		spin_unlock_irqrestore(&qp->spinlock_s, flags);
 
 		if (purgeflag) {
-			ehca_dbg(cq->device, "Got CQE with purged bit qp_num=%x "
-				 "src_qp=%x",
+			ehca_dbg(cq->device,
+				 "Got CQE with purged bit qp_num=%x src_qp=%x",
 				 cqe->local_qp_number, cqe->remote_qp_number);
 			if (ehca_debug_level)
 				ehca_dmp(cqe, 64, "qp_num=%x src_qp=%x",
diff --git a/drivers/infiniband/hw/ehca/ehca_tools.h b/drivers/infiniband/hw/ehca/ehca_tools.h
index 03b185f..678b813 100644
--- a/drivers/infiniband/hw/ehca/ehca_tools.h
+++ b/drivers/infiniband/hw/ehca/ehca_tools.h
@@ -93,14 +93,14 @@
 #define ehca_gen_dbg(format, arg...) \
 	do { \
 		if (unlikely(ehca_debug_level)) \
-			printk(KERN_DEBUG "PU%04x EHCA_DBG:%s " format "\n",\
+			printk(KERN_DEBUG "PU%04x EHCA_DBG:%s " format "\n", \
 			       get_paca()->paca_index, __FUNCTION__, ## arg); \
 	} while (0)
 
 #define ehca_gen_warn(format, arg...) \
 	do { \
 		if (unlikely(ehca_debug_level)) \
-			printk(KERN_INFO "PU%04x EHCA_WARN:%s " format "\n",\
+			printk(KERN_INFO "PU%04x EHCA_WARN:%s " format "\n", \
 			       get_paca()->paca_index, __FUNCTION__, ## arg); \
 	} while (0)
 
@@ -114,12 +114,12 @@
  * <format string> adr=X ofs=Y <8 bytes hex> <8 bytes hex>
  */
 #define ehca_dmp(adr, len, format, args...) \
-	do {				       \
-		unsigned int x;			      \
+	do { \
+		unsigned int x; \
 		unsigned int l = (unsigned int)(len); \
-		unsigned char *deb = (unsigned char*)(adr);	\
+		unsigned char *deb = (unsigned char *)(adr); \
 		for (x = 0; x < l; x += 16) { \
-			printk("EHCA_DMP:%s " format \
+			printk(KERN_INFO "EHCA_DMP:%s " format \
 			       " adr=%p ofs=%04x %016lx %016lx\n", \
 			       __FUNCTION__, ##args, deb, x, \
 			       *((u64 *)&deb[0]), *((u64 *)&deb[8])); \
@@ -128,16 +128,16 @@
 	} while (0)
 
 /* define a bitmask, little endian version */
-#define EHCA_BMASK(pos,length) (((pos)<<16)+(length))
+#define EHCA_BMASK(pos, length) (((pos) << 16) + (length))
 
 /* define a bitmask, the ibm way... */
-#define EHCA_BMASK_IBM(from,to) (((63-to)<<16)+((to)-(from)+1))
+#define EHCA_BMASK_IBM(from, to) (((63 - to) << 16) + ((to) - (from) + 1))
 
 /* internal function, don't use */
-#define EHCA_BMASK_SHIFTPOS(mask) (((mask)>>16)&0xffff)
+#define EHCA_BMASK_SHIFTPOS(mask) (((mask) >> 16) & 0xffff)
 
 /* internal function, don't use */
-#define EHCA_BMASK_MASK(mask) (0xffffffffffffffffULL >> ((64-(mask))&0xffff))
+#define EHCA_BMASK_MASK(mask) (~0ULL >> ((64 - (mask)) & 0xffff))
 
 /**
  * EHCA_BMASK_SET - return value shifted and masked by mask
@@ -145,14 +145,14 @@
  * variable&=~EHCA_BMASK_SET(MY_MASK,-1) clears the bits from the mask
  * in variable
  */
-#define EHCA_BMASK_SET(mask,value) \
-	((EHCA_BMASK_MASK(mask) & ((u64)(value)))<<EHCA_BMASK_SHIFTPOS(mask))
+#define EHCA_BMASK_SET(mask, value) \
+	((EHCA_BMASK_MASK(mask) & ((u64)(value))) << EHCA_BMASK_SHIFTPOS(mask))
 
 /**
  * EHCA_BMASK_GET - extract a parameter from value by mask
  */
-#define EHCA_BMASK_GET(mask,value) \
-	(EHCA_BMASK_MASK(mask)& (((u64)(value))>>EHCA_BMASK_SHIFTPOS(mask)))
+#define EHCA_BMASK_GET(mask, value) \
+	(EHCA_BMASK_MASK(mask) & (((u64)(value)) >> EHCA_BMASK_SHIFTPOS(mask)))
 
 
 /* Converts ehca to ib return code */
@@ -161,8 +161,11 @@
 	switch (ehca_rc) {
 	case H_SUCCESS:
 		return 0;
+	case H_RESOURCE:             /* Resource in use */
 	case H_BUSY:
 		return -EBUSY;
+	case H_NOT_ENOUGH_RESOURCES: /* insufficient resources */
+	case H_CONSTRAINED:          /* resource constraint */
 	case H_NO_MEM:
 		return -ENOMEM;
 	default:
diff --git a/drivers/infiniband/hw/ehca/ehca_uverbs.c b/drivers/infiniband/hw/ehca/ehca_uverbs.c
index 3031b3b..05c4157 100644
--- a/drivers/infiniband/hw/ehca/ehca_uverbs.c
+++ b/drivers/infiniband/hw/ehca/ehca_uverbs.c
@@ -70,7 +70,7 @@
 
 static void ehca_mm_open(struct vm_area_struct *vma)
 {
-	u32 *count = (u32*)vma->vm_private_data;
+	u32 *count = (u32 *)vma->vm_private_data;
 	if (!count) {
 		ehca_gen_err("Invalid vma struct vm_start=%lx vm_end=%lx",
 			     vma->vm_start, vma->vm_end);
@@ -86,7 +86,7 @@
 
 static void ehca_mm_close(struct vm_area_struct *vma)
 {
-	u32 *count = (u32*)vma->vm_private_data;
+	u32 *count = (u32 *)vma->vm_private_data;
 	if (!count) {
 		ehca_gen_err("Invalid vma struct vm_start=%lx vm_end=%lx",
 			     vma->vm_start, vma->vm_end);
@@ -215,7 +215,8 @@
 	case 2: /* qp rqueue_addr */
 		ehca_dbg(qp->ib_qp.device, "qp_num=%x rqueue",
 			 qp->ib_qp.qp_num);
-		ret = ehca_mmap_queue(vma, &qp->ipz_rqueue, &qp->mm_count_rqueue);
+		ret = ehca_mmap_queue(vma, &qp->ipz_rqueue,
+				      &qp->mm_count_rqueue);
 		if (unlikely(ret)) {
 			ehca_err(qp->ib_qp.device,
 				 "ehca_mmap_queue(rq) failed rc=%x qp_num=%x",
@@ -227,7 +228,8 @@
 	case 3: /* qp squeue_addr */
 		ehca_dbg(qp->ib_qp.device, "qp_num=%x squeue",
 			 qp->ib_qp.qp_num);
-		ret = ehca_mmap_queue(vma, &qp->ipz_squeue, &qp->mm_count_squeue);
+		ret = ehca_mmap_queue(vma, &qp->ipz_squeue,
+				      &qp->mm_count_squeue);
 		if (unlikely(ret)) {
 			ehca_err(qp->ib_qp.device,
 				 "ehca_mmap_queue(sq) failed rc=%x qp_num=%x",
diff --git a/drivers/infiniband/hw/ehca/hcp_if.c b/drivers/infiniband/hw/ehca/hcp_if.c
index 4776a8b..3394e05 100644
--- a/drivers/infiniband/hw/ehca/hcp_if.c
+++ b/drivers/infiniband/hw/ehca/hcp_if.c
@@ -501,8 +501,8 @@
 		return H_PARAMETER;
 	}
 
-	return hipz_h_register_rpage(adapter_handle,pagesize,queue_type,
-				     qp_handle.handle,logical_address_of_page,
+	return hipz_h_register_rpage(adapter_handle, pagesize, queue_type,
+				     qp_handle.handle, logical_address_of_page,
 				     count);
 }
 
@@ -522,9 +522,9 @@
 				qp_handle.handle,	   /* r6 */
 				0, 0, 0, 0, 0, 0);
 	if (log_addr_next_sq_wqe2processed)
-		*log_addr_next_sq_wqe2processed = (void*)outs[0];
+		*log_addr_next_sq_wqe2processed = (void *)outs[0];
 	if (log_addr_next_rq_wqe2processed)
-		*log_addr_next_rq_wqe2processed = (void*)outs[1];
+		*log_addr_next_rq_wqe2processed = (void *)outs[1];
 
 	return ret;
 }
diff --git a/drivers/infiniband/hw/ehca/hcp_phyp.c b/drivers/infiniband/hw/ehca/hcp_phyp.c
index 0b1a477..2148210 100644
--- a/drivers/infiniband/hw/ehca/hcp_phyp.c
+++ b/drivers/infiniband/hw/ehca/hcp_phyp.c
@@ -50,7 +50,7 @@
 
 int hcall_unmap_page(u64 mapaddr)
 {
-	iounmap((volatile void __iomem*)mapaddr);
+	iounmap((volatile void __iomem *) mapaddr);
 	return 0;
 }
 
diff --git a/drivers/infiniband/hw/ehca/hipz_fns_core.h b/drivers/infiniband/hw/ehca/hipz_fns_core.h
index 20898a1..868735f 100644
--- a/drivers/infiniband/hw/ehca/hipz_fns_core.h
+++ b/drivers/infiniband/hw/ehca/hipz_fns_core.h
@@ -53,10 +53,10 @@
 #define hipz_galpa_load_cq(gal, offset) \
 	hipz_galpa_load(gal, CQTEMM_OFFSET(offset))
 
-#define hipz_galpa_store_qp(gal,offset, value) \
+#define hipz_galpa_store_qp(gal, offset, value) \
 	hipz_galpa_store(gal, QPTEMM_OFFSET(offset), value)
 #define hipz_galpa_load_qp(gal, offset) \
-	hipz_galpa_load(gal,QPTEMM_OFFSET(offset))
+	hipz_galpa_load(gal, QPTEMM_OFFSET(offset))
 
 static inline void hipz_update_sqa(struct ehca_qp *qp, u16 nr_wqes)
 {
diff --git a/drivers/infiniband/hw/ehca/hipz_hw.h b/drivers/infiniband/hw/ehca/hipz_hw.h
index dad6dea..d9739e5 100644
--- a/drivers/infiniband/hw/ehca/hipz_hw.h
+++ b/drivers/infiniband/hw/ehca/hipz_hw.h
@@ -161,11 +161,11 @@
 /* 0x1000      */
 };
 
-#define QPX_SQADDER EHCA_BMASK_IBM(48,63)
-#define QPX_RQADDER EHCA_BMASK_IBM(48,63)
-#define QPX_AAELOG_RESET_SRQ_LIMIT EHCA_BMASK_IBM(3,3)
+#define QPX_SQADDER EHCA_BMASK_IBM(48, 63)
+#define QPX_RQADDER EHCA_BMASK_IBM(48, 63)
+#define QPX_AAELOG_RESET_SRQ_LIMIT EHCA_BMASK_IBM(3, 3)
 
-#define QPTEMM_OFFSET(x) offsetof(struct hipz_qptemm,x)
+#define QPTEMM_OFFSET(x) offsetof(struct hipz_qptemm, x)
 
 /* MRMWPT Entry Memory Map */
 struct hipz_mrmwmm {
@@ -187,7 +187,7 @@
 
 };
 
-#define MRMWMM_OFFSET(x) offsetof(struct hipz_mrmwmm,x)
+#define MRMWMM_OFFSET(x) offsetof(struct hipz_mrmwmm, x)
 
 struct hipz_qpedmm {
 	/* 0x00 */
@@ -238,7 +238,7 @@
 	u64 qpedx_rrva3;
 };
 
-#define QPEDMM_OFFSET(x) offsetof(struct hipz_qpedmm,x)
+#define QPEDMM_OFFSET(x) offsetof(struct hipz_qpedmm, x)
 
 /* CQ Table Entry Memory Map */
 struct hipz_cqtemm {
@@ -263,12 +263,12 @@
 /* 0x1000 */
 };
 
-#define CQX_FEC_CQE_CNT           EHCA_BMASK_IBM(32,63)
-#define CQX_FECADDER              EHCA_BMASK_IBM(32,63)
-#define CQX_N0_GENERATE_SOLICITED_COMP_EVENT EHCA_BMASK_IBM(0,0)
-#define CQX_N1_GENERATE_COMP_EVENT EHCA_BMASK_IBM(0,0)
+#define CQX_FEC_CQE_CNT           EHCA_BMASK_IBM(32, 63)
+#define CQX_FECADDER              EHCA_BMASK_IBM(32, 63)
+#define CQX_N0_GENERATE_SOLICITED_COMP_EVENT EHCA_BMASK_IBM(0, 0)
+#define CQX_N1_GENERATE_COMP_EVENT EHCA_BMASK_IBM(0, 0)
 
-#define CQTEMM_OFFSET(x) offsetof(struct hipz_cqtemm,x)
+#define CQTEMM_OFFSET(x) offsetof(struct hipz_cqtemm, x)
 
 /* EQ Table Entry Memory Map */
 struct hipz_eqtemm {
@@ -293,7 +293,7 @@
 
 };
 
-#define EQTEMM_OFFSET(x) offsetof(struct hipz_eqtemm,x)
+#define EQTEMM_OFFSET(x) offsetof(struct hipz_eqtemm, x)
 
 /* access control defines for MR/MW */
 #define HIPZ_ACCESSCTRL_L_WRITE  0x00800000
diff --git a/drivers/infiniband/hw/ehca/ipz_pt_fn.c b/drivers/infiniband/hw/ehca/ipz_pt_fn.c
index bf7a400..9606f13 100644
--- a/drivers/infiniband/hw/ehca/ipz_pt_fn.c
+++ b/drivers/infiniband/hw/ehca/ipz_pt_fn.c
@@ -114,7 +114,7 @@
 	 */
 	f = 0;
 	while (f < nr_of_pages) {
-		u8 *kpage = (u8*)get_zeroed_page(GFP_KERNEL);
+		u8 *kpage = (u8 *)get_zeroed_page(GFP_KERNEL);
 		int k;
 		if (!kpage)
 			goto ipz_queue_ctor_exit0; /*NOMEM*/
diff --git a/drivers/infiniband/hw/ehca/ipz_pt_fn.h b/drivers/infiniband/hw/ehca/ipz_pt_fn.h
index 007f088..39a4f64 100644
--- a/drivers/infiniband/hw/ehca/ipz_pt_fn.h
+++ b/drivers/infiniband/hw/ehca/ipz_pt_fn.h
@@ -240,7 +240,7 @@
 static inline void *ipz_eqit_eq_get_inc_valid(struct ipz_queue *queue)
 {
 	void *ret = ipz_qeit_get(queue);
-	u32 qe = *(u8 *) ret;
+	u32 qe = *(u8 *)ret;
 	if ((qe >> 7) != (queue->toggle_state & 1))
 		return NULL;
 	ipz_qeit_eq_get_inc(queue); /* this is a good one */
@@ -250,7 +250,7 @@
 static inline void *ipz_eqit_eq_peek_valid(struct ipz_queue *queue)
 {
 	void *ret = ipz_qeit_get(queue);
-	u32 qe = *(u8 *) ret;
+	u32 qe = *(u8 *)ret;
 	if ((qe >> 7) != (queue->toggle_state & 1))
 		return NULL;
 	return ret;
diff --git a/drivers/infiniband/hw/ipath/ipath_driver.c b/drivers/infiniband/hw/ipath/ipath_driver.c
index 9361f5a..09c5fd8 100644
--- a/drivers/infiniband/hw/ipath/ipath_driver.c
+++ b/drivers/infiniband/hw/ipath/ipath_driver.c
@@ -1889,7 +1889,7 @@
 /* Below is "non-zero" to force override, but both actual LEDs are off */
 #define LED_OVER_BOTH_OFF (8)
 
-void ipath_run_led_override(unsigned long opaque)
+static void ipath_run_led_override(unsigned long opaque)
 {
 	struct ipath_devdata *dd = (struct ipath_devdata *)opaque;
 	int timeoff;
diff --git a/drivers/infiniband/hw/ipath/ipath_eeprom.c b/drivers/infiniband/hw/ipath/ipath_eeprom.c
index 6b91479..b4503e9 100644
--- a/drivers/infiniband/hw/ipath/ipath_eeprom.c
+++ b/drivers/infiniband/hw/ipath/ipath_eeprom.c
@@ -426,8 +426,8 @@
  * @buffer: data to write
  * @len: number of bytes to write
  */
-int ipath_eeprom_internal_write(struct ipath_devdata *dd, u8 eeprom_offset,
-				const void *buffer, int len)
+static int ipath_eeprom_internal_write(struct ipath_devdata *dd, u8 eeprom_offset,
+				       const void *buffer, int len)
 {
 	u8 single_byte;
 	int sub_len;
diff --git a/drivers/infiniband/hw/ipath/ipath_intr.c b/drivers/infiniband/hw/ipath/ipath_intr.c
index 47aa434..1fd91c5 100644
--- a/drivers/infiniband/hw/ipath/ipath_intr.c
+++ b/drivers/infiniband/hw/ipath/ipath_intr.c
@@ -70,7 +70,7 @@
  * If rewrite is true, and bits are set in the sendbufferror registers,
  * we'll write to the buffer, for error recovery on parity errors.
  */
-void ipath_disarm_senderrbufs(struct ipath_devdata *dd, int rewrite)
+static void ipath_disarm_senderrbufs(struct ipath_devdata *dd, int rewrite)
 {
 	u32 piobcnt;
 	unsigned long sbuf[4];
diff --git a/drivers/infiniband/hw/ipath/ipath_kernel.h b/drivers/infiniband/hw/ipath/ipath_kernel.h
index 3105005..ace63ef 100644
--- a/drivers/infiniband/hw/ipath/ipath_kernel.h
+++ b/drivers/infiniband/hw/ipath/ipath_kernel.h
@@ -776,7 +776,6 @@
 int ipath_update_eeprom_log(struct ipath_devdata *dd);
 void ipath_inc_eeprom_err(struct ipath_devdata *dd, u32 eidx, u32 incr);
 u64 ipath_snap_cntr(struct ipath_devdata *, ipath_creg);
-void ipath_disarm_senderrbufs(struct ipath_devdata *, int);
 
 /*
  * Set LED override, only the two LSBs have "public" meaning, but
@@ -820,7 +819,6 @@
 #define IPATH_MDIO_CTRL_8355_REG_10 0x1D
 
 int ipath_get_user_pages(unsigned long, size_t, struct page **);
-int ipath_get_user_pages_nocopy(unsigned long, struct page **);
 void ipath_release_user_pages(struct page **, size_t);
 void ipath_release_user_pages_on_close(struct page **, size_t);
 int ipath_eeprom_read(struct ipath_devdata *, u8, void *, int);
diff --git a/drivers/infiniband/hw/ipath/ipath_ruc.c b/drivers/infiniband/hw/ipath/ipath_ruc.c
index 8525674..c69c252 100644
--- a/drivers/infiniband/hw/ipath/ipath_ruc.c
+++ b/drivers/infiniband/hw/ipath/ipath_ruc.c
@@ -507,7 +507,7 @@
  *
  * Called when we run out of PIO buffers.
  */
-void ipath_no_bufs_available(struct ipath_qp *qp, struct ipath_ibdev *dev)
+static void ipath_no_bufs_available(struct ipath_qp *qp, struct ipath_ibdev *dev)
 {
 	unsigned long flags;
 
diff --git a/drivers/infiniband/hw/ipath/ipath_user_pages.c b/drivers/infiniband/hw/ipath/ipath_user_pages.c
index 27034d3..0190edc 100644
--- a/drivers/infiniband/hw/ipath/ipath_user_pages.c
+++ b/drivers/infiniband/hw/ipath/ipath_user_pages.c
@@ -171,32 +171,6 @@
 	return ret;
 }
 
-/**
- * ipath_get_user_pages_nocopy - lock a single page for I/O and mark shared
- * @start_page: the page to lock
- * @p: the output page structure
- *
- * This is similar to ipath_get_user_pages, but it's always one page, and we
- * mark the page as locked for I/O, and shared.  This is used for the user
- * process page that contains the destination address for the rcvhdrq tail
- * update, so we need to have the vma. If we don't do this, the page can be
- * taken away from us on fork, even if the child never touches it, and then
- * the user process never sees the tail register updates.
- */
-int ipath_get_user_pages_nocopy(unsigned long page, struct page **p)
-{
-	struct vm_area_struct *vma;
-	int ret;
-
-	down_write(&current->mm->mmap_sem);
-
-	ret = __get_user_pages(page, 1, p, &vma);
-
-	up_write(&current->mm->mmap_sem);
-
-	return ret;
-}
-
 void ipath_release_user_pages(struct page **p, size_t num_pages)
 {
 	down_write(&current->mm->mmap_sem);
diff --git a/drivers/infiniband/hw/ipath/ipath_verbs.c b/drivers/infiniband/hw/ipath/ipath_verbs.c
index 65f7181..16aa61f 100644
--- a/drivers/infiniband/hw/ipath/ipath_verbs.c
+++ b/drivers/infiniband/hw/ipath/ipath_verbs.c
@@ -488,7 +488,7 @@
  * This is called from ipath_do_rcv_timer() at interrupt level to check for
  * QPs which need retransmits and to collect performance numbers.
  */
-void ipath_ib_timer(struct ipath_ibdev *dev)
+static void ipath_ib_timer(struct ipath_ibdev *dev)
 {
 	struct ipath_qp *resend = NULL;
 	struct list_head *last;
diff --git a/drivers/infiniband/hw/ipath/ipath_verbs.h b/drivers/infiniband/hw/ipath/ipath_verbs.h
index f3d1f2c..9bbe819 100644
--- a/drivers/infiniband/hw/ipath/ipath_verbs.h
+++ b/drivers/infiniband/hw/ipath/ipath_verbs.h
@@ -782,8 +782,6 @@
 
 int ipath_mmap(struct ib_ucontext *context, struct vm_area_struct *vma);
 
-void ipath_no_bufs_available(struct ipath_qp *qp, struct ipath_ibdev *dev);
-
 void ipath_insert_rnr_queue(struct ipath_qp *qp);
 
 int ipath_get_rwqe(struct ipath_qp *qp, int wr_id_only);
@@ -807,8 +805,6 @@
 
 int ipath_ib_piobufavail(struct ipath_ibdev *);
 
-void ipath_ib_timer(struct ipath_ibdev *);
-
 unsigned ipath_get_npkeys(struct ipath_devdata *);
 
 u32 ipath_get_cr_errpkey(struct ipath_devdata *);
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index 4004218..b5a24fb 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -1183,6 +1183,43 @@
 	return cur + nreq >= wq->max_post;
 }
 
+static __always_inline void set_raddr_seg(struct mlx4_wqe_raddr_seg *rseg,
+					  u64 remote_addr, u32 rkey)
+{
+	rseg->raddr    = cpu_to_be64(remote_addr);
+	rseg->rkey     = cpu_to_be32(rkey);
+	rseg->reserved = 0;
+}
+
+static void set_atomic_seg(struct mlx4_wqe_atomic_seg *aseg, struct ib_send_wr *wr)
+{
+	if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP) {
+		aseg->swap_add = cpu_to_be64(wr->wr.atomic.swap);
+		aseg->compare  = cpu_to_be64(wr->wr.atomic.compare_add);
+	} else {
+		aseg->swap_add = cpu_to_be64(wr->wr.atomic.compare_add);
+		aseg->compare  = 0;
+	}
+
+}
+
+static void set_datagram_seg(struct mlx4_wqe_datagram_seg *dseg,
+			     struct ib_send_wr *wr)
+{
+	memcpy(dseg->av, &to_mah(wr->wr.ud.ah)->av, sizeof (struct mlx4_av));
+	dseg->dqpn = cpu_to_be32(wr->wr.ud.remote_qpn);
+	dseg->qkey = cpu_to_be32(wr->wr.ud.remote_qkey);
+
+}
+
+static void set_data_seg(struct mlx4_wqe_data_seg *dseg,
+			 struct ib_sge *sg)
+{
+	dseg->byte_count = cpu_to_be32(sg->length);
+	dseg->lkey       = cpu_to_be32(sg->lkey);
+	dseg->addr       = cpu_to_be64(sg->addr);
+}
+
 int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
 		      struct ib_send_wr **bad_wr)
 {
@@ -1238,26 +1275,13 @@
 			switch (wr->opcode) {
 			case IB_WR_ATOMIC_CMP_AND_SWP:
 			case IB_WR_ATOMIC_FETCH_AND_ADD:
-				((struct mlx4_wqe_raddr_seg *) wqe)->raddr =
-					cpu_to_be64(wr->wr.atomic.remote_addr);
-				((struct mlx4_wqe_raddr_seg *) wqe)->rkey =
-					cpu_to_be32(wr->wr.atomic.rkey);
-				((struct mlx4_wqe_raddr_seg *) wqe)->reserved = 0;
-
+				set_raddr_seg(wqe, wr->wr.atomic.remote_addr,
+					      wr->wr.atomic.rkey);
 				wqe  += sizeof (struct mlx4_wqe_raddr_seg);
 
-				if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP) {
-					((struct mlx4_wqe_atomic_seg *) wqe)->swap_add =
-						cpu_to_be64(wr->wr.atomic.swap);
-					((struct mlx4_wqe_atomic_seg *) wqe)->compare =
-						cpu_to_be64(wr->wr.atomic.compare_add);
-				} else {
-					((struct mlx4_wqe_atomic_seg *) wqe)->swap_add =
-						cpu_to_be64(wr->wr.atomic.compare_add);
-					((struct mlx4_wqe_atomic_seg *) wqe)->compare = 0;
-				}
-
+				set_atomic_seg(wqe, wr);
 				wqe  += sizeof (struct mlx4_wqe_atomic_seg);
+
 				size += (sizeof (struct mlx4_wqe_raddr_seg) +
 					 sizeof (struct mlx4_wqe_atomic_seg)) / 16;
 
@@ -1266,15 +1290,10 @@
 			case IB_WR_RDMA_READ:
 			case IB_WR_RDMA_WRITE:
 			case IB_WR_RDMA_WRITE_WITH_IMM:
-				((struct mlx4_wqe_raddr_seg *) wqe)->raddr =
-					cpu_to_be64(wr->wr.rdma.remote_addr);
-				((struct mlx4_wqe_raddr_seg *) wqe)->rkey =
-					cpu_to_be32(wr->wr.rdma.rkey);
-				((struct mlx4_wqe_raddr_seg *) wqe)->reserved = 0;
-
+				set_raddr_seg(wqe, wr->wr.rdma.remote_addr,
+					      wr->wr.rdma.rkey);
 				wqe  += sizeof (struct mlx4_wqe_raddr_seg);
 				size += sizeof (struct mlx4_wqe_raddr_seg) / 16;
-
 				break;
 
 			default:
@@ -1284,13 +1303,7 @@
 			break;
 
 		case IB_QPT_UD:
-			memcpy(((struct mlx4_wqe_datagram_seg *) wqe)->av,
-			       &to_mah(wr->wr.ud.ah)->av, sizeof (struct mlx4_av));
-			((struct mlx4_wqe_datagram_seg *) wqe)->dqpn =
-				cpu_to_be32(wr->wr.ud.remote_qpn);
-			((struct mlx4_wqe_datagram_seg *) wqe)->qkey =
-				cpu_to_be32(wr->wr.ud.remote_qkey);
-
+			set_datagram_seg(wqe, wr);
 			wqe  += sizeof (struct mlx4_wqe_datagram_seg);
 			size += sizeof (struct mlx4_wqe_datagram_seg) / 16;
 			break;
@@ -1313,12 +1326,7 @@
 		}
 
 		for (i = 0; i < wr->num_sge; ++i) {
-			((struct mlx4_wqe_data_seg *) wqe)->byte_count =
-				cpu_to_be32(wr->sg_list[i].length);
-			((struct mlx4_wqe_data_seg *) wqe)->lkey =
-				cpu_to_be32(wr->sg_list[i].lkey);
-			((struct mlx4_wqe_data_seg *) wqe)->addr =
-				cpu_to_be64(wr->sg_list[i].addr);
+			set_data_seg(wqe, wr->sg_list + i);
 
 			wqe  += sizeof (struct mlx4_wqe_data_seg);
 			size += sizeof (struct mlx4_wqe_data_seg) / 16;
@@ -1498,7 +1506,7 @@
 static void to_ib_ah_attr(struct mlx4_dev *dev, struct ib_ah_attr *ib_ah_attr,
 				struct mlx4_qp_path *path)
 {
-	memset(ib_ah_attr, 0, sizeof *path);
+	memset(ib_ah_attr, 0, sizeof *ib_ah_attr);
 	ib_ah_attr->port_num	  = path->sched_queue & 0x40 ? 2 : 1;
 
 	if (ib_ah_attr->port_num == 0 || ib_ah_attr->port_num > dev->caps.num_ports)
@@ -1515,7 +1523,7 @@
 		ib_ah_attr->grh.traffic_class =
 			(be32_to_cpu(path->tclass_flowlabel) >> 20) & 0xff;
 		ib_ah_attr->grh.flow_label =
-			be32_to_cpu(path->tclass_flowlabel) & 0xffffff;
+			be32_to_cpu(path->tclass_flowlabel) & 0xfffff;
 		memcpy(ib_ah_attr->grh.dgid.raw,
 			path->rgid, sizeof ib_ah_attr->grh.dgid.raw);
 	}
@@ -1560,7 +1568,10 @@
 	}
 
 	qp_attr->pkey_index = context.pri_path.pkey_index & 0x7f;
-	qp_attr->port_num   = context.pri_path.sched_queue & 0x40 ? 2 : 1;
+	if (qp_attr->qp_state == IB_QPS_INIT)
+		qp_attr->port_num = qp->port;
+	else
+		qp_attr->port_num = context.pri_path.sched_queue & 0x40 ? 2 : 1;
 
 	/* qp_attr->en_sqd_async_notify is only applicable in modify qp */
 	qp_attr->sq_draining = mlx4_state == MLX4_QP_STATE_SQ_DRAINING;
@@ -1578,17 +1589,25 @@
 
 done:
 	qp_attr->cur_qp_state	     = qp_attr->qp_state;
+	qp_attr->cap.max_recv_wr     = qp->rq.wqe_cnt;
+	qp_attr->cap.max_recv_sge    = qp->rq.max_gs;
+
 	if (!ibqp->uobject) {
-		qp_attr->cap.max_send_wr     = qp->sq.wqe_cnt;
-		qp_attr->cap.max_recv_wr     = qp->rq.wqe_cnt;
-		qp_attr->cap.max_send_sge    = qp->sq.max_gs;
-		qp_attr->cap.max_recv_sge    = qp->rq.max_gs;
-		qp_attr->cap.max_inline_data = (1 << qp->sq.wqe_shift) -
-			send_wqe_overhead(qp->ibqp.qp_type) -
-			sizeof (struct mlx4_wqe_inline_seg);
-		qp_init_attr->cap	     = qp_attr->cap;
+		qp_attr->cap.max_send_wr  = qp->sq.wqe_cnt;
+		qp_attr->cap.max_send_sge = qp->sq.max_gs;
+	} else {
+		qp_attr->cap.max_send_wr  = 0;
+		qp_attr->cap.max_send_sge = 0;
 	}
 
+	/*
+	 * We don't support inline sends for kernel QPs (yet), and we
+	 * don't know what userspace's value should be.
+	 */
+	qp_attr->cap.max_inline_data = 0;
+
+	qp_init_attr->cap	     = qp_attr->cap;
+
 	return 0;
 }
 
diff --git a/drivers/infiniband/hw/mthca/mthca_main.c b/drivers/infiniband/hw/mthca/mthca_main.c
index aa563e6..76fed75 100644
--- a/drivers/infiniband/hw/mthca/mthca_main.c
+++ b/drivers/infiniband/hw/mthca/mthca_main.c
@@ -67,7 +67,7 @@
 
 static int msi = 0;
 module_param(msi, int, 0444);
-MODULE_PARM_DESC(msi, "attempt to use MSI if nonzero");
+MODULE_PARM_DESC(msi, "attempt to use MSI if nonzero (deprecated, use MSI-X instead)");
 
 #else /* CONFIG_PCI_MSI */
 
@@ -1117,9 +1117,21 @@
 
 	if (msi_x && !mthca_enable_msi_x(mdev))
 		mdev->mthca_flags |= MTHCA_FLAG_MSI_X;
-	if (msi && !(mdev->mthca_flags & MTHCA_FLAG_MSI_X) &&
-	    !pci_enable_msi(pdev))
-		mdev->mthca_flags |= MTHCA_FLAG_MSI;
+	else if (msi) {
+		static int warned;
+
+		if (!warned) {
+			printk(KERN_WARNING PFX "WARNING: MSI support will be "
+			       "removed from the ib_mthca driver in January 2008.\n");
+			printk(KERN_WARNING "    If you are using MSI and cannot "
+			       "switch to MSI-X, please tell "
+			       "<general@lists.openfabrics.org>.\n");
+			++warned;
+		}
+
+		if (!pci_enable_msi(pdev))
+			mdev->mthca_flags |= MTHCA_FLAG_MSI;
+	}
 
 	if (mthca_cmd_init(mdev)) {
 		mthca_err(mdev, "Failed to init command interface, aborting.\n");
@@ -1135,7 +1147,7 @@
 		goto err_cmd;
 
 	if (mdev->fw_ver < mthca_hca_table[hca_type].latest_fw) {
-		mthca_warn(mdev, "HCA FW version %d.%d.%3d is old (%d.%d.%3d is current).\n",
+		mthca_warn(mdev, "HCA FW version %d.%d.%03d is old (%d.%d.%03d is current).\n",
 			   (int) (mdev->fw_ver >> 32), (int) (mdev->fw_ver >> 16) & 0xffff,
 			   (int) (mdev->fw_ver & 0xffff),
 			   (int) (mthca_hca_table[hca_type].latest_fw >> 32),
diff --git a/drivers/infiniband/hw/mthca/mthca_qp.c b/drivers/infiniband/hw/mthca/mthca_qp.c
index eef415b..df01b20 100644
--- a/drivers/infiniband/hw/mthca/mthca_qp.c
+++ b/drivers/infiniband/hw/mthca/mthca_qp.c
@@ -1578,6 +1578,45 @@
 	return cur + nreq >= wq->max;
 }
 
+static __always_inline void set_raddr_seg(struct mthca_raddr_seg *rseg,
+					  u64 remote_addr, u32 rkey)
+{
+	rseg->raddr    = cpu_to_be64(remote_addr);
+	rseg->rkey     = cpu_to_be32(rkey);
+	rseg->reserved = 0;
+}
+
+static __always_inline void set_atomic_seg(struct mthca_atomic_seg *aseg,
+					   struct ib_send_wr *wr)
+{
+	if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP) {
+		aseg->swap_add = cpu_to_be64(wr->wr.atomic.swap);
+		aseg->compare  = cpu_to_be64(wr->wr.atomic.compare_add);
+	} else {
+		aseg->swap_add = cpu_to_be64(wr->wr.atomic.compare_add);
+		aseg->compare  = 0;
+	}
+
+}
+
+static void set_tavor_ud_seg(struct mthca_tavor_ud_seg *useg,
+			     struct ib_send_wr *wr)
+{
+	useg->lkey    = cpu_to_be32(to_mah(wr->wr.ud.ah)->key);
+	useg->av_addr =	cpu_to_be64(to_mah(wr->wr.ud.ah)->avdma);
+	useg->dqpn    =	cpu_to_be32(wr->wr.ud.remote_qpn);
+	useg->qkey    =	cpu_to_be32(wr->wr.ud.remote_qkey);
+
+}
+
+static void set_arbel_ud_seg(struct mthca_arbel_ud_seg *useg,
+			     struct ib_send_wr *wr)
+{
+	memcpy(useg->av, to_mah(wr->wr.ud.ah)->av, MTHCA_AV_SIZE);
+	useg->dqpn = cpu_to_be32(wr->wr.ud.remote_qpn);
+	useg->qkey = cpu_to_be32(wr->wr.ud.remote_qkey);
+}
+
 int mthca_tavor_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
 			  struct ib_send_wr **bad_wr)
 {
@@ -1590,8 +1629,15 @@
 	int nreq;
 	int i;
 	int size;
-	int size0 = 0;
-	u32 f0;
+	/*
+	 * f0 and size0 are only used if nreq != 0, and they will
+	 * always be initialized the first time through the main loop
+	 * before nreq is incremented.  So nreq cannot become non-zero
+	 * without initializing f0 and size0, and they are in fact
+	 * never used uninitialized.
+	 */
+	int uninitialized_var(size0);
+	u32 uninitialized_var(f0);
 	int ind;
 	u8 op0 = 0;
 
@@ -1636,25 +1682,11 @@
 			switch (wr->opcode) {
 			case IB_WR_ATOMIC_CMP_AND_SWP:
 			case IB_WR_ATOMIC_FETCH_AND_ADD:
-				((struct mthca_raddr_seg *) wqe)->raddr =
-					cpu_to_be64(wr->wr.atomic.remote_addr);
-				((struct mthca_raddr_seg *) wqe)->rkey =
-					cpu_to_be32(wr->wr.atomic.rkey);
-				((struct mthca_raddr_seg *) wqe)->reserved = 0;
-
+				set_raddr_seg(wqe, wr->wr.atomic.remote_addr,
+					      wr->wr.atomic.rkey);
 				wqe += sizeof (struct mthca_raddr_seg);
 
-				if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP) {
-					((struct mthca_atomic_seg *) wqe)->swap_add =
-						cpu_to_be64(wr->wr.atomic.swap);
-					((struct mthca_atomic_seg *) wqe)->compare =
-						cpu_to_be64(wr->wr.atomic.compare_add);
-				} else {
-					((struct mthca_atomic_seg *) wqe)->swap_add =
-						cpu_to_be64(wr->wr.atomic.compare_add);
-					((struct mthca_atomic_seg *) wqe)->compare = 0;
-				}
-
+				set_atomic_seg(wqe, wr);
 				wqe += sizeof (struct mthca_atomic_seg);
 				size += (sizeof (struct mthca_raddr_seg) +
 					 sizeof (struct mthca_atomic_seg)) / 16;
@@ -1663,12 +1695,9 @@
 			case IB_WR_RDMA_WRITE:
 			case IB_WR_RDMA_WRITE_WITH_IMM:
 			case IB_WR_RDMA_READ:
-				((struct mthca_raddr_seg *) wqe)->raddr =
-					cpu_to_be64(wr->wr.rdma.remote_addr);
-				((struct mthca_raddr_seg *) wqe)->rkey =
-					cpu_to_be32(wr->wr.rdma.rkey);
-				((struct mthca_raddr_seg *) wqe)->reserved = 0;
-				wqe += sizeof (struct mthca_raddr_seg);
+				set_raddr_seg(wqe, wr->wr.rdma.remote_addr,
+					      wr->wr.rdma.rkey);
+				wqe  += sizeof (struct mthca_raddr_seg);
 				size += sizeof (struct mthca_raddr_seg) / 16;
 				break;
 
@@ -1683,12 +1712,9 @@
 			switch (wr->opcode) {
 			case IB_WR_RDMA_WRITE:
 			case IB_WR_RDMA_WRITE_WITH_IMM:
-				((struct mthca_raddr_seg *) wqe)->raddr =
-					cpu_to_be64(wr->wr.rdma.remote_addr);
-				((struct mthca_raddr_seg *) wqe)->rkey =
-					cpu_to_be32(wr->wr.rdma.rkey);
-				((struct mthca_raddr_seg *) wqe)->reserved = 0;
-				wqe += sizeof (struct mthca_raddr_seg);
+				set_raddr_seg(wqe, wr->wr.rdma.remote_addr,
+					      wr->wr.rdma.rkey);
+				wqe  += sizeof (struct mthca_raddr_seg);
 				size += sizeof (struct mthca_raddr_seg) / 16;
 				break;
 
@@ -1700,16 +1726,8 @@
 			break;
 
 		case UD:
-			((struct mthca_tavor_ud_seg *) wqe)->lkey =
-				cpu_to_be32(to_mah(wr->wr.ud.ah)->key);
-			((struct mthca_tavor_ud_seg *) wqe)->av_addr =
-				cpu_to_be64(to_mah(wr->wr.ud.ah)->avdma);
-			((struct mthca_tavor_ud_seg *) wqe)->dqpn =
-				cpu_to_be32(wr->wr.ud.remote_qpn);
-			((struct mthca_tavor_ud_seg *) wqe)->qkey =
-				cpu_to_be32(wr->wr.ud.remote_qkey);
-
-			wqe += sizeof (struct mthca_tavor_ud_seg);
+			set_tavor_ud_seg(wqe, wr);
+			wqe  += sizeof (struct mthca_tavor_ud_seg);
 			size += sizeof (struct mthca_tavor_ud_seg) / 16;
 			break;
 
@@ -1734,13 +1752,8 @@
 		}
 
 		for (i = 0; i < wr->num_sge; ++i) {
-			((struct mthca_data_seg *) wqe)->byte_count =
-				cpu_to_be32(wr->sg_list[i].length);
-			((struct mthca_data_seg *) wqe)->lkey =
-				cpu_to_be32(wr->sg_list[i].lkey);
-			((struct mthca_data_seg *) wqe)->addr =
-				cpu_to_be64(wr->sg_list[i].addr);
-			wqe += sizeof (struct mthca_data_seg);
+			mthca_set_data_seg(wqe, wr->sg_list + i);
+			wqe  += sizeof (struct mthca_data_seg);
 			size += sizeof (struct mthca_data_seg) / 16;
 		}
 
@@ -1768,11 +1781,11 @@
 				    mthca_opcode[wr->opcode]);
 		wmb();
 		((struct mthca_next_seg *) prev_wqe)->ee_nds =
-			cpu_to_be32((size0 ? 0 : MTHCA_NEXT_DBD) | size |
+			cpu_to_be32((nreq ? 0 : MTHCA_NEXT_DBD) | size |
 				    ((wr->send_flags & IB_SEND_FENCE) ?
 				    MTHCA_NEXT_FENCE : 0));
 
-		if (!size0) {
+		if (!nreq) {
 			size0 = size;
 			op0   = mthca_opcode[wr->opcode];
 			f0    = wr->send_flags & IB_SEND_FENCE ?
@@ -1822,7 +1835,14 @@
 	int nreq;
 	int i;
 	int size;
-	int size0 = 0;
+	/*
+	 * size0 is only used if nreq != 0, and it will always be
+	 * initialized the first time through the main loop before
+	 * nreq is incremented.  So nreq cannot become non-zero
+	 * without initializing size0, and it is in fact never used
+	 * uninitialized.
+	 */
+	int uninitialized_var(size0);
 	int ind;
 	void *wqe;
 	void *prev_wqe;
@@ -1863,13 +1883,8 @@
 		}
 
 		for (i = 0; i < wr->num_sge; ++i) {
-			((struct mthca_data_seg *) wqe)->byte_count =
-				cpu_to_be32(wr->sg_list[i].length);
-			((struct mthca_data_seg *) wqe)->lkey =
-				cpu_to_be32(wr->sg_list[i].lkey);
-			((struct mthca_data_seg *) wqe)->addr =
-				cpu_to_be64(wr->sg_list[i].addr);
-			wqe += sizeof (struct mthca_data_seg);
+			mthca_set_data_seg(wqe, wr->sg_list + i);
+			wqe  += sizeof (struct mthca_data_seg);
 			size += sizeof (struct mthca_data_seg) / 16;
 		}
 
@@ -1881,7 +1896,7 @@
 		((struct mthca_next_seg *) prev_wqe)->ee_nds =
 			cpu_to_be32(MTHCA_NEXT_DBD | size);
 
-		if (!size0)
+		if (!nreq)
 			size0 = size;
 
 		++ind;
@@ -1903,7 +1918,6 @@
 
 			qp->rq.next_ind = ind;
 			qp->rq.head += MTHCA_TAVOR_MAX_WQES_PER_RECV_DB;
-			size0 = 0;
 		}
 	}
 
@@ -1945,8 +1959,15 @@
 	int nreq;
 	int i;
 	int size;
-	int size0 = 0;
-	u32 f0;
+	/*
+	 * f0 and size0 are only used if nreq != 0, and they will
+	 * always be initialized the first time through the main loop
+	 * before nreq is incremented.  So nreq cannot become non-zero
+	 * without initializing f0 and size0, and they are in fact
+	 * never used uninitialized.
+	 */
+	int uninitialized_var(size0);
+	u32 uninitialized_var(f0);
 	int ind;
 	u8 op0 = 0;
 
@@ -1966,7 +1987,6 @@
 			doorbell[1] = cpu_to_be32((qp->qpn << 8) | size0);
 
 			qp->sq.head += MTHCA_ARBEL_MAX_WQES_PER_SEND_DB;
-			size0 = 0;
 
 			/*
 			 * Make sure that descriptors are written before
@@ -2017,26 +2037,12 @@
 			switch (wr->opcode) {
 			case IB_WR_ATOMIC_CMP_AND_SWP:
 			case IB_WR_ATOMIC_FETCH_AND_ADD:
-				((struct mthca_raddr_seg *) wqe)->raddr =
-					cpu_to_be64(wr->wr.atomic.remote_addr);
-				((struct mthca_raddr_seg *) wqe)->rkey =
-					cpu_to_be32(wr->wr.atomic.rkey);
-				((struct mthca_raddr_seg *) wqe)->reserved = 0;
-
+				set_raddr_seg(wqe, wr->wr.atomic.remote_addr,
+					      wr->wr.atomic.rkey);
 				wqe += sizeof (struct mthca_raddr_seg);
 
-				if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP) {
-					((struct mthca_atomic_seg *) wqe)->swap_add =
-						cpu_to_be64(wr->wr.atomic.swap);
-					((struct mthca_atomic_seg *) wqe)->compare =
-						cpu_to_be64(wr->wr.atomic.compare_add);
-				} else {
-					((struct mthca_atomic_seg *) wqe)->swap_add =
-						cpu_to_be64(wr->wr.atomic.compare_add);
-					((struct mthca_atomic_seg *) wqe)->compare = 0;
-				}
-
-				wqe += sizeof (struct mthca_atomic_seg);
+				set_atomic_seg(wqe, wr);
+				wqe  += sizeof (struct mthca_atomic_seg);
 				size += (sizeof (struct mthca_raddr_seg) +
 					 sizeof (struct mthca_atomic_seg)) / 16;
 				break;
@@ -2044,12 +2050,9 @@
 			case IB_WR_RDMA_READ:
 			case IB_WR_RDMA_WRITE:
 			case IB_WR_RDMA_WRITE_WITH_IMM:
-				((struct mthca_raddr_seg *) wqe)->raddr =
-					cpu_to_be64(wr->wr.rdma.remote_addr);
-				((struct mthca_raddr_seg *) wqe)->rkey =
-					cpu_to_be32(wr->wr.rdma.rkey);
-				((struct mthca_raddr_seg *) wqe)->reserved = 0;
-				wqe += sizeof (struct mthca_raddr_seg);
+				set_raddr_seg(wqe, wr->wr.rdma.remote_addr,
+					      wr->wr.rdma.rkey);
+				wqe  += sizeof (struct mthca_raddr_seg);
 				size += sizeof (struct mthca_raddr_seg) / 16;
 				break;
 
@@ -2064,12 +2067,9 @@
 			switch (wr->opcode) {
 			case IB_WR_RDMA_WRITE:
 			case IB_WR_RDMA_WRITE_WITH_IMM:
-				((struct mthca_raddr_seg *) wqe)->raddr =
-					cpu_to_be64(wr->wr.rdma.remote_addr);
-				((struct mthca_raddr_seg *) wqe)->rkey =
-					cpu_to_be32(wr->wr.rdma.rkey);
-				((struct mthca_raddr_seg *) wqe)->reserved = 0;
-				wqe += sizeof (struct mthca_raddr_seg);
+				set_raddr_seg(wqe, wr->wr.rdma.remote_addr,
+					      wr->wr.rdma.rkey);
+				wqe  += sizeof (struct mthca_raddr_seg);
 				size += sizeof (struct mthca_raddr_seg) / 16;
 				break;
 
@@ -2081,14 +2081,8 @@
 			break;
 
 		case UD:
-			memcpy(((struct mthca_arbel_ud_seg *) wqe)->av,
-			       to_mah(wr->wr.ud.ah)->av, MTHCA_AV_SIZE);
-			((struct mthca_arbel_ud_seg *) wqe)->dqpn =
-				cpu_to_be32(wr->wr.ud.remote_qpn);
-			((struct mthca_arbel_ud_seg *) wqe)->qkey =
-				cpu_to_be32(wr->wr.ud.remote_qkey);
-
-			wqe += sizeof (struct mthca_arbel_ud_seg);
+			set_arbel_ud_seg(wqe, wr);
+			wqe  += sizeof (struct mthca_arbel_ud_seg);
 			size += sizeof (struct mthca_arbel_ud_seg) / 16;
 			break;
 
@@ -2113,13 +2107,8 @@
 		}
 
 		for (i = 0; i < wr->num_sge; ++i) {
-			((struct mthca_data_seg *) wqe)->byte_count =
-				cpu_to_be32(wr->sg_list[i].length);
-			((struct mthca_data_seg *) wqe)->lkey =
-				cpu_to_be32(wr->sg_list[i].lkey);
-			((struct mthca_data_seg *) wqe)->addr =
-				cpu_to_be64(wr->sg_list[i].addr);
-			wqe += sizeof (struct mthca_data_seg);
+			mthca_set_data_seg(wqe, wr->sg_list + i);
+			wqe  += sizeof (struct mthca_data_seg);
 			size += sizeof (struct mthca_data_seg) / 16;
 		}
 
@@ -2151,7 +2140,7 @@
 				    ((wr->send_flags & IB_SEND_FENCE) ?
 				     MTHCA_NEXT_FENCE : 0));
 
-		if (!size0) {
+		if (!nreq) {
 			size0 = size;
 			op0   = mthca_opcode[wr->opcode];
 			f0    = wr->send_flags & IB_SEND_FENCE ?
@@ -2241,20 +2230,12 @@
 		}
 
 		for (i = 0; i < wr->num_sge; ++i) {
-			((struct mthca_data_seg *) wqe)->byte_count =
-				cpu_to_be32(wr->sg_list[i].length);
-			((struct mthca_data_seg *) wqe)->lkey =
-				cpu_to_be32(wr->sg_list[i].lkey);
-			((struct mthca_data_seg *) wqe)->addr =
-				cpu_to_be64(wr->sg_list[i].addr);
+			mthca_set_data_seg(wqe, wr->sg_list + i);
 			wqe += sizeof (struct mthca_data_seg);
 		}
 
-		if (i < qp->rq.max_gs) {
-			((struct mthca_data_seg *) wqe)->byte_count = 0;
-			((struct mthca_data_seg *) wqe)->lkey = cpu_to_be32(MTHCA_INVAL_LKEY);
-			((struct mthca_data_seg *) wqe)->addr = 0;
-		}
+		if (i < qp->rq.max_gs)
+			mthca_set_data_seg_inval(wqe);
 
 		qp->wrid[ind] = wr->wr_id;
 
diff --git a/drivers/infiniband/hw/mthca/mthca_srq.c b/drivers/infiniband/hw/mthca/mthca_srq.c
index b8f05a5..88d219e 100644
--- a/drivers/infiniband/hw/mthca/mthca_srq.c
+++ b/drivers/infiniband/hw/mthca/mthca_srq.c
@@ -543,20 +543,12 @@
 		}
 
 		for (i = 0; i < wr->num_sge; ++i) {
-			((struct mthca_data_seg *) wqe)->byte_count =
-				cpu_to_be32(wr->sg_list[i].length);
-			((struct mthca_data_seg *) wqe)->lkey =
-				cpu_to_be32(wr->sg_list[i].lkey);
-			((struct mthca_data_seg *) wqe)->addr =
-				cpu_to_be64(wr->sg_list[i].addr);
+			mthca_set_data_seg(wqe, wr->sg_list + i);
 			wqe += sizeof (struct mthca_data_seg);
 		}
 
-		if (i < srq->max_gs) {
-			((struct mthca_data_seg *) wqe)->byte_count = 0;
-			((struct mthca_data_seg *) wqe)->lkey = cpu_to_be32(MTHCA_INVAL_LKEY);
-			((struct mthca_data_seg *) wqe)->addr = 0;
-		}
+		if (i < srq->max_gs)
+			mthca_set_data_seg_inval(wqe);
 
 		((struct mthca_next_seg *) prev_wqe)->nda_op =
 			cpu_to_be32((ind << srq->wqe_shift) | 1);
@@ -662,20 +654,12 @@
 		}
 
 		for (i = 0; i < wr->num_sge; ++i) {
-			((struct mthca_data_seg *) wqe)->byte_count =
-				cpu_to_be32(wr->sg_list[i].length);
-			((struct mthca_data_seg *) wqe)->lkey =
-				cpu_to_be32(wr->sg_list[i].lkey);
-			((struct mthca_data_seg *) wqe)->addr =
-				cpu_to_be64(wr->sg_list[i].addr);
+			mthca_set_data_seg(wqe, wr->sg_list + i);
 			wqe += sizeof (struct mthca_data_seg);
 		}
 
-		if (i < srq->max_gs) {
-			((struct mthca_data_seg *) wqe)->byte_count = 0;
-			((struct mthca_data_seg *) wqe)->lkey = cpu_to_be32(MTHCA_INVAL_LKEY);
-			((struct mthca_data_seg *) wqe)->addr = 0;
-		}
+		if (i < srq->max_gs)
+			mthca_set_data_seg_inval(wqe);
 
 		srq->wrid[ind]  = wr->wr_id;
 		srq->first_free = next_ind;
diff --git a/drivers/infiniband/hw/mthca/mthca_wqe.h b/drivers/infiniband/hw/mthca/mthca_wqe.h
index e7d2c1e..f6a66fe 100644
--- a/drivers/infiniband/hw/mthca/mthca_wqe.h
+++ b/drivers/infiniband/hw/mthca/mthca_wqe.h
@@ -113,4 +113,19 @@
 	__be16 vcrc;
 };
 
+static __always_inline void mthca_set_data_seg(struct mthca_data_seg *dseg,
+					       struct ib_sge *sg)
+{
+	dseg->byte_count = cpu_to_be32(sg->length);
+	dseg->lkey       = cpu_to_be32(sg->lkey);
+	dseg->addr       = cpu_to_be64(sg->addr);
+}
+
+static __always_inline void mthca_set_data_seg_inval(struct mthca_data_seg *dseg)
+{
+	dseg->byte_count = 0;
+	dseg->lkey       = cpu_to_be32(MTHCA_INVAL_LKEY);
+	dseg->addr       = 0;
+}
+
 #endif /* MTHCA_WQE_H */
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h
index e235370..1ee867b 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.h
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.h
@@ -310,8 +310,6 @@
 
 void iser_conn_terminate(struct iser_conn *ib_conn);
 
-void iser_conn_release(struct iser_conn *ib_conn);
-
 void iser_rcv_completion(struct iser_desc *desc,
 			 unsigned long    dto_xfer_len);
 
@@ -329,9 +327,6 @@
 		     struct iser_regd_buf    *regd_buf,
 		     enum dma_data_direction direction);
 
-int  iser_start_rdma_unaligned_sg(struct iscsi_iser_cmd_task    *ctask,
-				  enum iser_data_dir            cmd_dir);
-
 void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_cmd_task *ctask,
 				     enum iser_data_dir         cmd_dir);
 
diff --git a/drivers/infiniband/ulp/iser/iser_memory.c b/drivers/infiniband/ulp/iser/iser_memory.c
index fc9f1fd..36cdf77 100644
--- a/drivers/infiniband/ulp/iser/iser_memory.c
+++ b/drivers/infiniband/ulp/iser/iser_memory.c
@@ -103,8 +103,8 @@
 /**
  * iser_start_rdma_unaligned_sg
  */
-int iser_start_rdma_unaligned_sg(struct iscsi_iser_cmd_task  *iser_ctask,
-				 enum iser_data_dir cmd_dir)
+static int iser_start_rdma_unaligned_sg(struct iscsi_iser_cmd_task *iser_ctask,
+					enum iser_data_dir cmd_dir)
 {
 	int dma_nents;
 	struct ib_device *dev;
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c
index 2044de1..d42ec01 100644
--- a/drivers/infiniband/ulp/iser/iser_verbs.c
+++ b/drivers/infiniband/ulp/iser/iser_verbs.c
@@ -311,6 +311,29 @@
 }
 
 /**
+ * Frees all conn objects and deallocs conn descriptor
+ */
+static void iser_conn_release(struct iser_conn *ib_conn)
+{
+	struct iser_device  *device = ib_conn->device;
+
+	BUG_ON(ib_conn->state != ISER_CONN_DOWN);
+
+	mutex_lock(&ig.connlist_mutex);
+	list_del(&ib_conn->conn_list);
+	mutex_unlock(&ig.connlist_mutex);
+
+	iser_free_ib_conn_res(ib_conn);
+	ib_conn->device = NULL;
+	/* on EVENT_ADDR_ERROR there's no device yet for this conn */
+	if (device != NULL)
+		iser_device_try_release(device);
+	if (ib_conn->iser_conn)
+		ib_conn->iser_conn->ib_conn = NULL;
+	kfree(ib_conn);
+}
+
+/**
  * triggers start of the disconnect procedures and wait for them to be done
  */
 void iser_conn_terminate(struct iser_conn *ib_conn)
@@ -550,30 +573,6 @@
 }
 
 /**
- * Frees all conn objects and deallocs conn descriptor
- */
-void iser_conn_release(struct iser_conn *ib_conn)
-{
-	struct iser_device  *device = ib_conn->device;
-
-	BUG_ON(ib_conn->state != ISER_CONN_DOWN);
-
-	mutex_lock(&ig.connlist_mutex);
-	list_del(&ib_conn->conn_list);
-	mutex_unlock(&ig.connlist_mutex);
-
-	iser_free_ib_conn_res(ib_conn);
-	ib_conn->device = NULL;
-	/* on EVENT_ADDR_ERROR there's no device yet for this conn */
-	if (device != NULL)
-		iser_device_try_release(device);
-	if (ib_conn->iser_conn)
-		ib_conn->iser_conn->ib_conn = NULL;
-	kfree(ib_conn);
-}
-
-
-/**
  * iser_reg_page_vec - Register physical memory
  *
  * returns: 0 on success, errno code on failure
diff --git a/drivers/input/gameport/gameport.c b/drivers/input/gameport/gameport.c
index bd686a2..20896d5 100644
--- a/drivers/input/gameport/gameport.c
+++ b/drivers/input/gameport/gameport.c
@@ -445,6 +445,7 @@
 
 static int gameport_thread(void *nothing)
 {
+	set_freezable();
 	do {
 		gameport_handle_event();
 		wait_event_interruptible(gameport_wait,
diff --git a/drivers/input/serio/ambakmi.c b/drivers/input/serio/ambakmi.c
index 5a7b49c..b10ffae 100644
--- a/drivers/input/serio/ambakmi.c
+++ b/drivers/input/serio/ambakmi.c
@@ -117,15 +117,13 @@
 	if (ret)
 		return ret;
 
-	kmi = kmalloc(sizeof(struct amba_kmi_port), GFP_KERNEL);
-	io = kmalloc(sizeof(struct serio), GFP_KERNEL);
+	kmi = kzalloc(sizeof(struct amba_kmi_port), GFP_KERNEL);
+	io = kzalloc(sizeof(struct serio), GFP_KERNEL);
 	if (!kmi || !io) {
 		ret = -ENOMEM;
 		goto out;
 	}
 
-	memset(kmi, 0, sizeof(struct amba_kmi_port));
-	memset(io, 0, sizeof(struct serio));
 
 	io->id.type	= SERIO_8042;
 	io->write	= amba_kmi_write;
diff --git a/drivers/input/serio/pcips2.c b/drivers/input/serio/pcips2.c
index ea5e3c6..1b404f9 100644
--- a/drivers/input/serio/pcips2.c
+++ b/drivers/input/serio/pcips2.c
@@ -140,15 +140,13 @@
 	if (ret)
 		goto disable;
 
-	ps2if = kmalloc(sizeof(struct pcips2_data), GFP_KERNEL);
-	serio = kmalloc(sizeof(struct serio), GFP_KERNEL);
+	ps2if = kzalloc(sizeof(struct pcips2_data), GFP_KERNEL);
+	serio = kzalloc(sizeof(struct serio), GFP_KERNEL);
 	if (!ps2if || !serio) {
 		ret = -ENOMEM;
 		goto release;
 	}
 
-	memset(ps2if, 0, sizeof(struct pcips2_data));
-	memset(serio, 0, sizeof(struct serio));
 
 	serio->id.type		= SERIO_8042;
 	serio->write		= pcips2_write;
diff --git a/drivers/input/serio/sa1111ps2.c b/drivers/input/serio/sa1111ps2.c
index d31ece8..2ad8878 100644
--- a/drivers/input/serio/sa1111ps2.c
+++ b/drivers/input/serio/sa1111ps2.c
@@ -234,15 +234,13 @@
 	struct serio *serio;
 	int ret;
 
-	ps2if = kmalloc(sizeof(struct ps2if), GFP_KERNEL);
-	serio = kmalloc(sizeof(struct serio), GFP_KERNEL);
+	ps2if = kzalloc(sizeof(struct ps2if), GFP_KERNEL);
+	serio = kzalloc(sizeof(struct serio), GFP_KERNEL);
 	if (!ps2if || !serio) {
 		ret = -ENOMEM;
 		goto free;
 	}
 
-	memset(ps2if, 0, sizeof(struct ps2if));
-	memset(serio, 0, sizeof(struct serio));
 
 	serio->id.type		= SERIO_8042;
 	serio->write		= ps2_write;
diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c
index a8f3bc1..372ca49 100644
--- a/drivers/input/serio/serio.c
+++ b/drivers/input/serio/serio.c
@@ -384,6 +384,7 @@
 
 static int serio_thread(void *nothing)
 {
+	set_freezable();
 	do {
 		serio_handle_event();
 		wait_event_interruptible(serio_wait,
diff --git a/drivers/input/touchscreen/ucb1400_ts.c b/drivers/input/touchscreen/ucb1400_ts.c
index f0cbcdb..36f9440 100644
--- a/drivers/input/touchscreen/ucb1400_ts.c
+++ b/drivers/input/touchscreen/ucb1400_ts.c
@@ -292,6 +292,7 @@
 
 	sched_setscheduler(tsk, SCHED_FIFO, &param);
 
+	set_freezable();
 	while (!kthread_should_stop()) {
 		unsigned int x, y, p;
 		long timeout;
diff --git a/drivers/isdn/Kconfig b/drivers/isdn/Kconfig
index 3e088c4..cf906c8 100644
--- a/drivers/isdn/Kconfig
+++ b/drivers/isdn/Kconfig
@@ -2,12 +2,10 @@
 # ISDN device configuration
 #
 
-menu "ISDN subsystem"
-	depends on !S390
-
-config ISDN
+menuconfig ISDN
 	tristate "ISDN support"
 	depends on NET
+	depends on !S390
 	---help---
 	  ISDN ("Integrated Services Digital Networks", called RNIS in France)
 	  is a special type of fully digital telephone service; it's mostly
@@ -21,9 +19,9 @@
 
 	  Select this option if you want your kernel to support ISDN.
 
+if ISDN
 
 menu "Old ISDN4Linux"
-	depends on NET && ISDN
 
 config ISDN_I4L
 	tristate "Old ISDN4Linux (deprecated)"
@@ -50,20 +48,21 @@
 endmenu
 
 comment "CAPI subsystem"
-	depends on NET && ISDN
 
 config ISDN_CAPI
 	tristate "CAPI2.0 support"
-	depends on ISDN
 	help
 	  This provides the CAPI (Common ISDN Application Programming
 	  Interface, a standard making it easy for programs to access ISDN
 	  hardware, see <http://www.capi.org/>.  This is needed for AVM's set
 	  of active ISDN controllers like B1, T1, M1.
 
+if ISDN_CAPI
+
 source "drivers/isdn/capi/Kconfig"
 
 source "drivers/isdn/hardware/Kconfig"
 
-endmenu
+endif # ISDN_CAPI
 
+endif # ISDN
diff --git a/drivers/isdn/capi/Kconfig b/drivers/isdn/capi/Kconfig
index c92f9d7..e1afd60 100644
--- a/drivers/isdn/capi/Kconfig
+++ b/drivers/isdn/capi/Kconfig
@@ -3,7 +3,6 @@
 #
 config ISDN_DRV_AVMB1_VERBOSE_REASON
 	bool "Verbose reason code reporting"
-	depends on ISDN_CAPI
 	default y
 	help
 	  If you say Y here, the CAPI drivers will give verbose reasons for
@@ -12,7 +11,6 @@
 
 config CAPI_TRACE
 	bool "CAPI trace support"
-	depends on ISDN_CAPI
 	default y
 	help
 	  If you say Y here, the kernelcapi driver can make verbose traces
@@ -23,7 +21,7 @@
 
 config ISDN_CAPI_MIDDLEWARE
 	bool "CAPI2.0 Middleware support (EXPERIMENTAL)"
-	depends on ISDN_CAPI && EXPERIMENTAL
+	depends on EXPERIMENTAL
 	help
 	  This option will enhance the capabilities of the /dev/capi20
 	  interface.  It will provide a means of moving a data connection,
@@ -33,7 +31,6 @@
 
 config ISDN_CAPI_CAPI20
 	tristate "CAPI2.0 /dev/capi support"
-	depends on ISDN_CAPI
 	help
 	  This option will provide the CAPI 2.0 interface to userspace
 	  applications via /dev/capi20. Applications should use the
@@ -56,7 +53,7 @@
 
 config ISDN_CAPI_CAPIDRV
 	tristate "CAPI2.0 capidrv interface support"
-	depends on ISDN_CAPI && ISDN_I4L
+	depends on ISDN_I4L
 	help
 	  This option provides the glue code to hook up CAPI driven cards to
 	  the legacy isdn4linux link layer.  If you have a card which is
diff --git a/drivers/isdn/capi/capi.c b/drivers/isdn/capi/capi.c
index 81661b8..f449dae 100644
--- a/drivers/isdn/capi/capi.c
+++ b/drivers/isdn/capi/capi.c
@@ -549,7 +549,7 @@
 		capimsg_setu8 (skb->data, 5, CAPI_REQ);
 		capimsg_setu16(skb->data, 6, mp->msgid++);
 		capimsg_setu32(skb->data, 8, mp->ncci);	/* NCCI */
-		capimsg_setu32(skb->data, 12, (u32) skb->data); /* Data32 */
+		capimsg_setu32(skb->data, 12, (u32)(long)skb->data);/* Data32 */
 		capimsg_setu16(skb->data, 16, len);	/* Data length */
 		capimsg_setu16(skb->data, 18, datahandle);
 		capimsg_setu16(skb->data, 20, 0);	/* Flags */
diff --git a/drivers/isdn/capi/kcapi.c b/drivers/isdn/capi/kcapi.c
index 3ed34f7..9f73bc27 100644
--- a/drivers/isdn/capi/kcapi.c
+++ b/drivers/isdn/capi/kcapi.c
@@ -258,7 +258,7 @@
 	if ((!ap) || (ap->release_in_progress))
 		return;
 
-	down(&ap->recv_sem);
+	mutex_lock(&ap->recv_mtx);
 	while ((skb = skb_dequeue(&ap->recv_queue))) {
 		if (CAPIMSG_CMD(skb->data) == CAPI_DATA_B3_IND)
 			ap->nrecvdatapkt++;
@@ -267,7 +267,7 @@
 
 		ap->recv_message(ap, skb);
 	}
-	up(&ap->recv_sem);
+	mutex_unlock(&ap->recv_mtx);
 }
 
 void capi_ctr_handle_message(struct capi_ctr * card, u16 appl, struct sk_buff *skb)
@@ -547,7 +547,7 @@
 	ap->nsentctlpkt = 0;
 	ap->nsentdatapkt = 0;
 	ap->callback = NULL;
-	init_MUTEX(&ap->recv_sem);
+	mutex_init(&ap->recv_mtx);
 	skb_queue_head_init(&ap->recv_queue);
 	INIT_WORK(&ap->recv_work, recv_handler);
 	ap->release_in_progress = 0;
diff --git a/drivers/isdn/capi/kcapi_proc.c b/drivers/isdn/capi/kcapi_proc.c
index 31f4fd8..845a797 100644
--- a/drivers/isdn/capi/kcapi_proc.c
+++ b/drivers/isdn/capi/kcapi_proc.c
@@ -243,36 +243,15 @@
 
 // ---------------------------------------------------------------------------
 
-
-static __inline__ struct capi_driver *capi_driver_get_idx(loff_t pos)
-{
-	struct capi_driver *drv = NULL;
-	struct list_head *l;
-	loff_t i;
-
-	i = 0;
-	list_for_each(l, &capi_drivers) {
-		drv = list_entry(l, struct capi_driver, list);
-		if (i++ == pos)
-			return drv;
-	}
-	return NULL;
-}
-
 static void *capi_driver_start(struct seq_file *seq, loff_t *pos)
 {
-	struct capi_driver *drv;
 	read_lock(&capi_drivers_list_lock);
-	drv = capi_driver_get_idx(*pos);
-	return drv;
+	return seq_list_start(&capi_drivers, *pos);
 }
 
 static void *capi_driver_next(struct seq_file *seq, void *v, loff_t *pos)
 {
-	struct capi_driver *drv = (struct capi_driver *)v;
-	++*pos;
-	if (drv->list.next == &capi_drivers) return NULL;
-	return list_entry(drv->list.next, struct capi_driver, list);
+	return seq_list_next(v, &capi_drivers, pos);
 }
 
 static void capi_driver_stop(struct seq_file *seq, void *v)
@@ -282,7 +261,8 @@
 
 static int capi_driver_show(struct seq_file *seq, void *v)
 {
-	struct capi_driver *drv = (struct capi_driver *)v;
+	struct capi_driver *drv = list_entry(v, struct capi_driver, list);
+
 	seq_printf(seq, "%-32s %s\n", drv->name, drv->revision);
 	return 0;
 }
diff --git a/drivers/isdn/hardware/Kconfig b/drivers/isdn/hardware/Kconfig
index 139f197..30d028d 100644
--- a/drivers/isdn/hardware/Kconfig
+++ b/drivers/isdn/hardware/Kconfig
@@ -2,7 +2,6 @@
 # ISDN hardware drivers
 #
 comment "CAPI hardware drivers"
-	depends on NET && ISDN && ISDN_CAPI
 
 source "drivers/isdn/hardware/avm/Kconfig"
 
diff --git a/drivers/isdn/hardware/avm/Kconfig b/drivers/isdn/hardware/avm/Kconfig
index 29a32a8..5dbcbe3 100644
--- a/drivers/isdn/hardware/avm/Kconfig
+++ b/drivers/isdn/hardware/avm/Kconfig
@@ -2,23 +2,22 @@
 # ISDN AVM drivers
 #
 
-menu "Active AVM cards"
-	depends on NET && ISDN && ISDN_CAPI!=n
-
-config CAPI_AVM
-	bool "Support AVM cards"
+menuconfig CAPI_AVM
+	bool "Active AVM cards"
 	help
 	  Enable support for AVM active ISDN cards.
 
+if CAPI_AVM
+
 config ISDN_DRV_AVMB1_B1ISA
 	tristate "AVM B1 ISA support"
-	depends on CAPI_AVM && ISDN_CAPI && ISA
+	depends on ISA
 	help
 	  Enable support for the ISA version of the AVM B1 card.
 
 config ISDN_DRV_AVMB1_B1PCI
 	tristate "AVM B1 PCI support"
-	depends on CAPI_AVM && ISDN_CAPI && PCI
+	depends on PCI
 	help
 	  Enable support for the PCI version of the AVM B1 card.
 
@@ -30,14 +29,13 @@
 
 config ISDN_DRV_AVMB1_T1ISA
 	tristate "AVM T1/T1-B ISA support"
-	depends on CAPI_AVM && ISDN_CAPI && ISA
+	depends on ISA
 	help
 	  Enable support for the AVM T1 T1B card.
 	  Note: This is a PRI card and handle 30 B-channels.
 
 config ISDN_DRV_AVMB1_B1PCMCIA
 	tristate "AVM B1/M1/M2 PCMCIA support"
-	depends on CAPI_AVM && ISDN_CAPI
 	help
 	  Enable support for the PCMCIA version of the AVM B1 card.
 
@@ -50,17 +48,16 @@
 
 config ISDN_DRV_AVMB1_T1PCI
 	tristate "AVM T1/T1-B PCI support"
-	depends on CAPI_AVM && ISDN_CAPI && PCI
+	depends on PCI
 	help
 	  Enable support for the AVM T1 T1B card.
 	  Note: This is a PRI card and handle 30 B-channels.
 
 config ISDN_DRV_AVMB1_C4
 	tristate "AVM C4/C2 support"
-	depends on CAPI_AVM && ISDN_CAPI && PCI
+	depends on PCI
 	help
 	  Enable support for the AVM C4/C2 PCI cards.
 	  These cards handle 4/2 BRI ISDN lines (8/4 channels).
 
-endmenu
-
+endif # CAPI_AVM
diff --git a/drivers/isdn/hardware/eicon/Kconfig b/drivers/isdn/hardware/eicon/Kconfig
index 01d4afd..6082b6a 100644
--- a/drivers/isdn/hardware/eicon/Kconfig
+++ b/drivers/isdn/hardware/eicon/Kconfig
@@ -2,52 +2,50 @@
 # ISDN DIVAS Eicon driver
 #
 
-menu "Active Eicon DIVA Server cards"
-	depends on NET && ISDN && ISDN_CAPI!=n
-
-config CAPI_EICON
-	bool "Support Eicon cards"
+menuconfig CAPI_EICON
+	bool "Active Eicon DIVA Server cards"
 	help
 	  Enable support for Eicon Networks active ISDN cards.
 
+if CAPI_EICON
+
 config ISDN_DIVAS
 	tristate "Support Eicon DIVA Server cards"
-	depends on CAPI_EICON && PROC_FS && PCI
+	depends on PROC_FS && PCI
 	help
 	  Say Y here if you have an Eicon Networks DIVA Server PCI ISDN card.
 	  In order to use this card, additional firmware is necessary, which
 	  has to be downloaded into the card using the divactrl utility.
 
+if ISDN_DIVAS
+
 config ISDN_DIVAS_BRIPCI
 	bool "DIVA Server BRI/PCI support"
-	depends on ISDN_DIVAS
 	help
 	  Enable support for DIVA Server BRI-PCI.
 
 config ISDN_DIVAS_PRIPCI
 	bool "DIVA Server PRI/PCI support"
-	depends on ISDN_DIVAS
 	help
 	  Enable support for DIVA Server PRI-PCI.
 
 config ISDN_DIVAS_DIVACAPI
 	tristate "DIVA CAPI2.0 interface support"
-	depends on ISDN_DIVAS && ISDN_CAPI
 	help
 	  You need this to provide the CAPI interface
 	  for DIVA Server cards.
 
 config ISDN_DIVAS_USERIDI
 	tristate "DIVA User-IDI interface support"
-	depends on ISDN_DIVAS
 	help
 	  Enable support for user-mode IDI interface.
 
 config ISDN_DIVAS_MAINT
 	tristate "DIVA Maint driver support"
-	depends on ISDN_DIVAS && m
+	depends on m
 	help
 	  Enable Divas Maintenance driver.
 
-endmenu
+endif # ISDN_DIVAS
 
+endif # CAPI_EICON
diff --git a/drivers/isdn/hardware/eicon/idifunc.c b/drivers/isdn/hardware/eicon/idifunc.c
index 4cbc68c..db87d51 100644
--- a/drivers/isdn/hardware/eicon/idifunc.c
+++ b/drivers/isdn/hardware/eicon/idifunc.c
@@ -106,6 +106,7 @@
 	} else {
 		DBG_ERR(("could not create user mode idi card %d",
 			 adapter_nr));
+		diva_os_free(0, card);
 	}
 }
 
diff --git a/drivers/isdn/hisax/bkm_a4t.c b/drivers/isdn/hisax/bkm_a4t.c
index 871310d..3d1bdc8 100644
--- a/drivers/isdn/hisax/bkm_a4t.c
+++ b/drivers/isdn/hisax/bkm_a4t.c
@@ -255,54 +255,38 @@
 	return (0);
 }
 
-static struct pci_dev *dev_a4t __devinitdata = NULL;
-
-int __devinit
-setup_bkm_a4t(struct IsdnCard *card)
+static int __devinit a4t_pci_probe(struct pci_dev *dev_a4t,
+				   struct IsdnCardState *cs,
+				   u_int *found,
+				   u_int *pci_memaddr)
 {
-	struct IsdnCardState *cs = card->cs;
-	char tmp[64];
-	u_int pci_memaddr = 0, found = 0;
+	u16 sub_sys;
+	u16 sub_vendor;
+
+	sub_vendor = dev_a4t->subsystem_vendor;
+	sub_sys = dev_a4t->subsystem_device;
+	if ((sub_sys == PCI_DEVICE_ID_BERKOM_A4T) && (sub_vendor == PCI_VENDOR_ID_BERKOM)) {
+		if (pci_enable_device(dev_a4t))
+			return (0);	/* end loop & function */
+		*found = 1;
+		*pci_memaddr = pci_resource_start(dev_a4t, 0);
+		cs->irq = dev_a4t->irq;
+		return (1);		/* end loop */
+	}
+
+	return (-1);			/* continue looping */
+}
+
+static int __devinit a4t_cs_init(struct IsdnCard *card,
+				 struct IsdnCardState *cs,
+				 u_int pci_memaddr)
+{
 	I20_REGISTER_FILE *pI20_Regs;
-#ifdef CONFIG_PCI
-#endif
 
-	strcpy(tmp, bkm_a4t_revision);
-	printk(KERN_INFO "HiSax: T-Berkom driver Rev. %s\n", HiSax_getrev(tmp));
-	if (cs->typ == ISDN_CTYPE_BKM_A4T) {
-		cs->subtyp = BKM_A4T;
-	} else
-		return (0);
-
-#ifdef CONFIG_PCI
-	while ((dev_a4t = pci_find_device(PCI_VENDOR_ID_ZORAN,
-		PCI_DEVICE_ID_ZORAN_36120, dev_a4t))) {
-		u16 sub_sys;
-		u16 sub_vendor;
-
-		sub_vendor = dev_a4t->subsystem_vendor;
-		sub_sys = dev_a4t->subsystem_device;
-		if ((sub_sys == PCI_DEVICE_ID_BERKOM_A4T) && (sub_vendor == PCI_VENDOR_ID_BERKOM)) {
-			if (pci_enable_device(dev_a4t))
-				return(0);
-			found = 1;
-			pci_memaddr = pci_resource_start(dev_a4t, 0);
-			cs->irq = dev_a4t->irq;
-			break;
-		}
-	}
-	if (!found) {
-		printk(KERN_WARNING "HiSax: %s: Card not found\n", CardType[card->typ]);
-		return (0);
-	}
 	if (!cs->irq) {		/* IRQ range check ?? */
 		printk(KERN_WARNING "HiSax: %s: No IRQ\n", CardType[card->typ]);
 		return (0);
 	}
-	if (!pci_memaddr) {
-		printk(KERN_WARNING "HiSax: %s: No Memory base address\n", CardType[card->typ]);
-		return (0);
-	}
 	cs->hw.ax.base = (long) ioremap(pci_memaddr, 4096);
 	/* Check suspecious address */
 	pI20_Regs = (I20_REGISTER_FILE *) (cs->hw.ax.base);
@@ -317,11 +301,7 @@
 	cs->hw.ax.jade_adr = cs->hw.ax.base + PO_OFFSET;
 	cs->hw.ax.isac_ale = GCS_1;
 	cs->hw.ax.jade_ale = GCS_3;
-#else
-	printk(KERN_WARNING "HiSax: %s: NO_PCI_BIOS\n", CardType[card->typ]);
-	printk(KERN_WARNING "HiSax: %s: unable to configure\n", CardType[card->typ]);
-	return (0);
-#endif				/* CONFIG_PCI */
+
 	printk(KERN_INFO "HiSax: %s: Card configured at 0x%lX IRQ %d\n",
 	       CardType[card->typ], cs->hw.ax.base, cs->irq);
 
@@ -339,5 +319,43 @@
 	ISACVersion(cs, "Telekom A4T:");
 	/* Jade version */
 	JadeVersion(cs, "Telekom A4T:");
+
 	return (1);
 }
+
+static struct pci_dev *dev_a4t __devinitdata = NULL;
+
+int __devinit
+setup_bkm_a4t(struct IsdnCard *card)
+{
+	struct IsdnCardState *cs = card->cs;
+	char tmp[64];
+	u_int pci_memaddr = 0, found = 0;
+	int ret;
+
+	strcpy(tmp, bkm_a4t_revision);
+	printk(KERN_INFO "HiSax: T-Berkom driver Rev. %s\n", HiSax_getrev(tmp));
+	if (cs->typ == ISDN_CTYPE_BKM_A4T) {
+		cs->subtyp = BKM_A4T;
+	} else
+		return (0);
+
+	while ((dev_a4t = pci_find_device(PCI_VENDOR_ID_ZORAN,
+		PCI_DEVICE_ID_ZORAN_36120, dev_a4t))) {
+		ret = a4t_pci_probe(dev_a4t, cs, &found, &pci_memaddr);
+		if (!ret)
+			return (0);
+		if (ret > 0)
+			break;
+	}
+	if (!found) {
+		printk(KERN_WARNING "HiSax: %s: Card not found\n", CardType[card->typ]);
+		return (0);
+	}
+	if (!pci_memaddr) {
+		printk(KERN_WARNING "HiSax: %s: No Memory base address\n", CardType[card->typ]);
+		return (0);
+	}
+
+	return a4t_cs_init(card, cs, pci_memaddr);
+}
diff --git a/drivers/isdn/hisax/config.c b/drivers/isdn/hisax/config.c
index 8d53a7f..97097ef 100644
--- a/drivers/isdn/hisax/config.c
+++ b/drivers/isdn/hisax/config.c
@@ -361,11 +361,11 @@
 
 int nrcards;
 
-extern char *l1_revision;
-extern char *l2_revision;
-extern char *l3_revision;
-extern char *lli_revision;
-extern char *tei_revision;
+extern const char *l1_revision;
+extern const char *l2_revision;
+extern const char *l3_revision;
+extern const char *lli_revision;
+extern const char *tei_revision;
 
 char *HiSax_getrev(const char *revision)
 {
@@ -847,95 +847,10 @@
 	return 3;
 }
 
-static int checkcard(int cardnr, char *id, int *busy_flag, struct module *lockowner)
+static int hisax_cs_setup_card(struct IsdnCard *card)
 {
-	int ret = 0;
-	struct IsdnCard *card = cards + cardnr;
-	struct IsdnCardState *cs;
+	int ret;
 
-	cs = kzalloc(sizeof(struct IsdnCardState), GFP_ATOMIC);
-	if (!cs) {
-		printk(KERN_WARNING
-		       "HiSax: No memory for IsdnCardState(card %d)\n",
-		       cardnr + 1);
-		goto out;
-	}
-	card->cs = cs;
-	spin_lock_init(&cs->statlock);
-	spin_lock_init(&cs->lock);
-	cs->chanlimit = 2;	/* maximum B-channel number */
-	cs->logecho = 0;	/* No echo logging */
-	cs->cardnr = cardnr;
-	cs->debug = L1_DEB_WARN;
-	cs->HW_Flags = 0;
-	cs->busy_flag = busy_flag;
-	cs->irq_flags = I4L_IRQ_FLAG;
-#if TEI_PER_CARD
-	if (card->protocol == ISDN_PTYPE_NI1)
-		test_and_set_bit(FLG_TWO_DCHAN, &cs->HW_Flags);
-#else
-	test_and_set_bit(FLG_TWO_DCHAN, &cs->HW_Flags);
-#endif
-	cs->protocol = card->protocol;
-
-	if (card->typ <= 0 || card->typ > ISDN_CTYPE_COUNT) {
-		printk(KERN_WARNING
-		       "HiSax: Card Type %d out of range\n", card->typ);
-		goto outf_cs;
-	}
-	if (!(cs->dlog = kmalloc(MAX_DLOG_SPACE, GFP_ATOMIC))) {
-		printk(KERN_WARNING
-		       "HiSax: No memory for dlog(card %d)\n", cardnr + 1);
-		goto outf_cs;
-	}
-	if (!(cs->status_buf = kmalloc(HISAX_STATUS_BUFSIZE, GFP_ATOMIC))) {
-		printk(KERN_WARNING
-		       "HiSax: No memory for status_buf(card %d)\n",
-		       cardnr + 1);
-		goto outf_dlog;
-	}
-	cs->stlist = NULL;
-	cs->status_read = cs->status_buf;
-	cs->status_write = cs->status_buf;
-	cs->status_end = cs->status_buf + HISAX_STATUS_BUFSIZE - 1;
-	cs->typ = card->typ;
-#ifdef MODULE
-	cs->iif.owner = lockowner;
-#endif
-	strcpy(cs->iif.id, id);
-	cs->iif.channels = 2;
-	cs->iif.maxbufsize = MAX_DATA_SIZE;
-	cs->iif.hl_hdrlen = MAX_HEADER_LEN;
-	cs->iif.features =
-		ISDN_FEATURE_L2_X75I |
-		ISDN_FEATURE_L2_HDLC |
-		ISDN_FEATURE_L2_HDLC_56K |
-		ISDN_FEATURE_L2_TRANS |
-		ISDN_FEATURE_L3_TRANS |
-#ifdef	CONFIG_HISAX_1TR6
-		ISDN_FEATURE_P_1TR6 |
-#endif
-#ifdef	CONFIG_HISAX_EURO
-		ISDN_FEATURE_P_EURO |
-#endif
-#ifdef	CONFIG_HISAX_NI1
-		ISDN_FEATURE_P_NI1 |
-#endif
-		0;
-
-	cs->iif.command = HiSax_command;
-	cs->iif.writecmd = NULL;
-	cs->iif.writebuf_skb = HiSax_writebuf_skb;
-	cs->iif.readstat = HiSax_readstatus;
-	register_isdn(&cs->iif);
-	cs->myid = cs->iif.channels;
-	printk(KERN_INFO
-	       "HiSax: Card %d Protocol %s Id=%s (%d)\n", cardnr + 1,
-	       (card->protocol == ISDN_PTYPE_1TR6) ? "1TR6" :
-	       (card->protocol == ISDN_PTYPE_EURO) ? "EDSS1" :
-	       (card->protocol == ISDN_PTYPE_LEASED) ? "LEASED" :
-	       (card->protocol == ISDN_PTYPE_NI1) ? "NI1" :
-	       "NONE", cs->iif.id, cs->myid);
 	switch (card->typ) {
 #if CARD_TELES0
 	case ISDN_CTYPE_16_0:
@@ -1094,13 +1009,115 @@
 		printk(KERN_WARNING
 		       "HiSax: Support for %s Card not selected\n",
 		       CardType[card->typ]);
-		ll_unload(cs);
+		ret = 0;
+		break;
+	}
+
+	return ret;
+}
+
+static int hisax_cs_new(int cardnr, char *id, struct IsdnCard *card,
+			struct IsdnCardState **cs_out, int *busy_flag,
+			struct module *lockowner)
+{
+	struct IsdnCardState *cs;
+
+	*cs_out = NULL;
+
+	cs = kzalloc(sizeof(struct IsdnCardState), GFP_ATOMIC);
+	if (!cs) {
+		printk(KERN_WARNING
+		       "HiSax: No memory for IsdnCardState(card %d)\n",
+		       cardnr + 1);
+		goto out;
+	}
+	card->cs = cs;
+	spin_lock_init(&cs->statlock);
+	spin_lock_init(&cs->lock);
+	cs->chanlimit = 2;	/* maximum B-channel number */
+	cs->logecho = 0;	/* No echo logging */
+	cs->cardnr = cardnr;
+	cs->debug = L1_DEB_WARN;
+	cs->HW_Flags = 0;
+	cs->busy_flag = busy_flag;
+	cs->irq_flags = I4L_IRQ_FLAG;
+#if TEI_PER_CARD
+	if (card->protocol == ISDN_PTYPE_NI1)
+		test_and_set_bit(FLG_TWO_DCHAN, &cs->HW_Flags);
+#else
+	test_and_set_bit(FLG_TWO_DCHAN, &cs->HW_Flags);
+#endif
+	cs->protocol = card->protocol;
+
+	if (card->typ <= 0 || card->typ > ISDN_CTYPE_COUNT) {
+		printk(KERN_WARNING
+		       "HiSax: Card Type %d out of range\n", card->typ);
 		goto outf_cs;
 	}
-	if (!ret) {
-		ll_unload(cs);
+	if (!(cs->dlog = kmalloc(MAX_DLOG_SPACE, GFP_ATOMIC))) {
+		printk(KERN_WARNING
+		       "HiSax: No memory for dlog(card %d)\n", cardnr + 1);
 		goto outf_cs;
 	}
+	if (!(cs->status_buf = kmalloc(HISAX_STATUS_BUFSIZE, GFP_ATOMIC))) {
+		printk(KERN_WARNING
+		       "HiSax: No memory for status_buf(card %d)\n",
+		       cardnr + 1);
+		goto outf_dlog;
+	}
+	cs->stlist = NULL;
+	cs->status_read = cs->status_buf;
+	cs->status_write = cs->status_buf;
+	cs->status_end = cs->status_buf + HISAX_STATUS_BUFSIZE - 1;
+	cs->typ = card->typ;
+#ifdef MODULE
+	cs->iif.owner = lockowner;
+#endif
+	strcpy(cs->iif.id, id);
+	cs->iif.channels = 2;
+	cs->iif.maxbufsize = MAX_DATA_SIZE;
+	cs->iif.hl_hdrlen = MAX_HEADER_LEN;
+	cs->iif.features =
+		ISDN_FEATURE_L2_X75I |
+		ISDN_FEATURE_L2_HDLC |
+		ISDN_FEATURE_L2_HDLC_56K |
+		ISDN_FEATURE_L2_TRANS |
+		ISDN_FEATURE_L3_TRANS |
+#ifdef	CONFIG_HISAX_1TR6
+		ISDN_FEATURE_P_1TR6 |
+#endif
+#ifdef	CONFIG_HISAX_EURO
+		ISDN_FEATURE_P_EURO |
+#endif
+#ifdef	CONFIG_HISAX_NI1
+		ISDN_FEATURE_P_NI1 |
+#endif
+		0;
+
+	cs->iif.command = HiSax_command;
+	cs->iif.writecmd = NULL;
+	cs->iif.writebuf_skb = HiSax_writebuf_skb;
+	cs->iif.readstat = HiSax_readstatus;
+	register_isdn(&cs->iif);
+	cs->myid = cs->iif.channels;
+
+	*cs_out = cs;
+	return 1;	/* success */
+
+outf_dlog:
+	kfree(cs->dlog);
+outf_cs:
+	kfree(cs);
+	card->cs = NULL;
+out:
+	return 0;	/* error */
+}
+
+static int hisax_cs_setup(int cardnr, struct IsdnCard *card,
+			  struct IsdnCardState *cs)
+{
+	int ret;
+
 	if (!(cs->rcvbuf = kmalloc(MAX_DFRAME_LEN_L1, GFP_ATOMIC))) {
 		printk(KERN_WARNING "HiSax: No memory for isac rcvbuf\n");
 		ll_unload(cs);
@@ -1129,25 +1146,53 @@
 	}
 	if (ret) {
 		closecard(cardnr);
-		ret = 0;
 		goto outf_cs;
 	}
 	init_tei(cs, cs->protocol);
 	ret = CallcNewChan(cs);
 	if (ret) {
 		closecard(cardnr);
-		ret = 0;
 		goto outf_cs;
 	}
 	/* ISAR needs firmware download first */
 	if (!test_bit(HW_ISAR, &cs->HW_Flags))
 		ll_run(cs, 0);
 
-	ret = 1;
+	return 1;
+
+outf_cs:
+	kfree(cs);
+	card->cs = NULL;
+	return 0;
+}
+
+static int checkcard(int cardnr, char *id, int *busy_flag, struct module *lockowner)
+{
+	int ret;
+	struct IsdnCard *card = cards + cardnr;
+	struct IsdnCardState *cs;
+
+	ret = hisax_cs_new(cardnr, id, card, &cs, busy_flag, lockowner);
+	if (!ret)
+		return 0;
+
+	printk(KERN_INFO
+	       "HiSax: Card %d Protocol %s Id=%s (%d)\n", cardnr + 1,
+	       (card->protocol == ISDN_PTYPE_1TR6) ? "1TR6" :
+	       (card->protocol == ISDN_PTYPE_EURO) ? "EDSS1" :
+	       (card->protocol == ISDN_PTYPE_LEASED) ? "LEASED" :
+	       (card->protocol == ISDN_PTYPE_NI1) ? "NI1" :
+	       "NONE", cs->iif.id, cs->myid);
+
+	ret = hisax_cs_setup_card(card);
+	if (!ret) {
+		ll_unload(cs);
+		goto outf_cs;
+	}
+
+	ret = hisax_cs_setup(cardnr, card, cs);
 	goto out;
 
- outf_dlog:
-	kfree(cs->dlog);
  outf_cs:
 	kfree(cs);
 	card->cs = NULL;
diff --git a/drivers/isdn/hisax/enternow_pci.c b/drivers/isdn/hisax/enternow_pci.c
index b45de9d..b73027f 100644
--- a/drivers/isdn/hisax/enternow_pci.c
+++ b/drivers/isdn/hisax/enternow_pci.c
@@ -300,98 +300,72 @@
 	return IRQ_HANDLED;
 }
 
-
-static struct pci_dev *dev_netjet __devinitdata = NULL;
-
-/* called by config.c */
-int __devinit
-setup_enternow_pci(struct IsdnCard *card)
+static int __devinit en_pci_probe(struct pci_dev *dev_netjet,
+				  struct IsdnCardState *cs)
 {
-	int bytecnt;
-	struct IsdnCardState *cs = card->cs;
-	char tmp[64];
-
-#ifdef CONFIG_PCI
-#ifdef __BIG_ENDIAN
-#error "not running on big endian machines now"
-#endif
-        strcpy(tmp, enternow_pci_rev);
-	printk(KERN_INFO "HiSax: Formula-n Europe AG enter:now ISDN PCI driver Rev. %s\n", HiSax_getrev(tmp));
-	if (cs->typ != ISDN_CTYPE_ENTERNOW)
+	if (pci_enable_device(dev_netjet))
 		return(0);
-	test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags);
-
-	for ( ;; )
-	{
-		if ((dev_netjet = pci_find_device(PCI_VENDOR_ID_TIGERJET,
-			PCI_DEVICE_ID_TIGERJET_300,  dev_netjet))) {
-			if (pci_enable_device(dev_netjet))
-				return(0);
-			cs->irq = dev_netjet->irq;
-			if (!cs->irq) {
-				printk(KERN_WARNING "enter:now PCI: No IRQ for PCI card found\n");
-				return(0);
-			}
-			cs->hw.njet.base = pci_resource_start(dev_netjet, 0);
-			if (!cs->hw.njet.base) {
-				printk(KERN_WARNING "enter:now PCI: No IO-Adr for PCI card found\n");
-				return(0);
-			}
-                        /* checks Sub-Vendor ID because system crashes with Traverse-Card */
-			if ((dev_netjet->subsystem_vendor != 0x55) ||
-				(dev_netjet->subsystem_device != 0x02)) {
-				printk(KERN_WARNING "enter:now: You tried to load this driver with an incompatible TigerJet-card\n");
-                                printk(KERN_WARNING "Use type=20 for Traverse NetJet PCI Card.\n");
-                                return(0);
-                        }
-		} else {
-                        printk(KERN_WARNING "enter:now PCI: No PCI card found\n");
-			return(0);
-		}
-
-		cs->hw.njet.auxa = cs->hw.njet.base + NETJET_AUXDATA;
-		cs->hw.njet.isac = cs->hw.njet.base + 0xC0; // Fenster zum AMD
-
-		/* Reset an */
-		cs->hw.njet.ctrl_reg = 0x07;  // geändert von 0xff
-		outb(cs->hw.njet.ctrl_reg, cs->hw.njet.base + NETJET_CTRL);
-		/* 20 ms Pause */
-		mdelay(20);
-
-		cs->hw.njet.ctrl_reg = 0x30;  /* Reset Off and status read clear */
-		outb(cs->hw.njet.ctrl_reg, cs->hw.njet.base + NETJET_CTRL);
-		mdelay(10);
-
-		cs->hw.njet.auxd = 0x00; // war 0xc0
-		cs->hw.njet.dmactrl = 0;
-
-		outb(~TJ_AMD_IRQ, cs->hw.njet.base + NETJET_AUXCTRL);
-		outb(TJ_AMD_IRQ, cs->hw.njet.base + NETJET_IRQMASK1);
-		outb(cs->hw.njet.auxd, cs->hw.njet.auxa);
-
-		break;
+	cs->irq = dev_netjet->irq;
+	if (!cs->irq) {
+		printk(KERN_WARNING "enter:now PCI: No IRQ for PCI card found\n");
+		return(0);
 	}
-#else
+	cs->hw.njet.base = pci_resource_start(dev_netjet, 0);
+	if (!cs->hw.njet.base) {
+		printk(KERN_WARNING "enter:now PCI: No IO-Adr for PCI card found\n");
+		return(0);
+	}
+	/* checks Sub-Vendor ID because system crashes with Traverse-Card */
+	if ((dev_netjet->subsystem_vendor != 0x55) ||
+	    (dev_netjet->subsystem_device != 0x02)) {
+		printk(KERN_WARNING "enter:now: You tried to load this driver with an incompatible TigerJet-card\n");
+		printk(KERN_WARNING "Use type=20 for Traverse NetJet PCI Card.\n");
+		return(0);
+	}
 
-	printk(KERN_WARNING "enter:now PCI: NO_PCI_BIOS\n");
-	printk(KERN_WARNING "enter:now PCI: unable to config Formula-n enter:now ISDN PCI ab\n");
-	return (0);
+	return(1);
+}
 
-#endif /* CONFIG_PCI */
+static void __devinit en_cs_init(struct IsdnCard *card,
+				 struct IsdnCardState *cs)
+{
+	cs->hw.njet.auxa = cs->hw.njet.base + NETJET_AUXDATA;
+	cs->hw.njet.isac = cs->hw.njet.base + 0xC0; // Fenster zum AMD
 
-	bytecnt = 256;
+	/* Reset an */
+	cs->hw.njet.ctrl_reg = 0x07;  // geändert von 0xff
+	outb(cs->hw.njet.ctrl_reg, cs->hw.njet.base + NETJET_CTRL);
+	/* 20 ms Pause */
+	mdelay(20);
+
+	cs->hw.njet.ctrl_reg = 0x30;  /* Reset Off and status read clear */
+	outb(cs->hw.njet.ctrl_reg, cs->hw.njet.base + NETJET_CTRL);
+	mdelay(10);
+
+	cs->hw.njet.auxd = 0x00; // war 0xc0
+	cs->hw.njet.dmactrl = 0;
+
+	outb(~TJ_AMD_IRQ, cs->hw.njet.base + NETJET_AUXCTRL);
+	outb(TJ_AMD_IRQ, cs->hw.njet.base + NETJET_IRQMASK1);
+	outb(cs->hw.njet.auxd, cs->hw.njet.auxa);
+}
+
+static int __devinit en_cs_init_rest(struct IsdnCard *card,
+				     struct IsdnCardState *cs)
+{
+	const int bytecnt = 256;
 
 	printk(KERN_INFO
 		"enter:now PCI: PCI card configured at 0x%lx IRQ %d\n",
 		cs->hw.njet.base, cs->irq);
 	if (!request_region(cs->hw.njet.base, bytecnt, "Fn_ISDN")) {
 		printk(KERN_WARNING
-			   "HiSax: %s config port %lx-%lx already in use\n",
-			   CardType[card->typ],
-			   cs->hw.njet.base,
-			   cs->hw.njet.base + bytecnt);
+		       "HiSax: enter:now config port %lx-%lx already in use\n",
+		       cs->hw.njet.base,
+		       cs->hw.njet.base + bytecnt);
 		return (0);
 	}
+
 	setup_Amd7930(cs);
 	cs->hw.njet.last_is0 = 0;
         /* macro rByteAMD */
@@ -407,5 +381,44 @@
 	cs->irq_func = &enpci_interrupt;
 	cs->irq_flags |= IRQF_SHARED;
 
-        return (1);
+	return (1);
+}
+
+static struct pci_dev *dev_netjet __devinitdata = NULL;
+
+/* called by config.c */
+int __devinit
+setup_enternow_pci(struct IsdnCard *card)
+{
+	int ret;
+	struct IsdnCardState *cs = card->cs;
+	char tmp[64];
+
+#ifdef __BIG_ENDIAN
+#error "not running on big endian machines now"
+#endif
+
+        strcpy(tmp, enternow_pci_rev);
+	printk(KERN_INFO "HiSax: Formula-n Europe AG enter:now ISDN PCI driver Rev. %s\n", HiSax_getrev(tmp));
+	if (cs->typ != ISDN_CTYPE_ENTERNOW)
+		return(0);
+	test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags);
+
+	for ( ;; )
+	{
+		if ((dev_netjet = pci_find_device(PCI_VENDOR_ID_TIGERJET,
+			PCI_DEVICE_ID_TIGERJET_300,  dev_netjet))) {
+			ret = en_pci_probe(dev_netjet, cs);
+			if (!ret)
+				return(0);
+		} else {
+                        printk(KERN_WARNING "enter:now PCI: No PCI card found\n");
+			return(0);
+		}
+
+		en_cs_init(card, cs);
+		break;
+	}
+
+        return en_cs_init_rest(card, cs);
 }
diff --git a/drivers/isdn/hisax/hfc_pci.c b/drivers/isdn/hisax/hfc_pci.c
index 8a48a3c..077080a 100644
--- a/drivers/isdn/hisax/hfc_pci.c
+++ b/drivers/isdn/hisax/hfc_pci.c
@@ -6,7 +6,7 @@
  *              based on existing driver for CCD hfc ISA cards
  * Copyright    by Werner Cornelius  <werner@isdn4linux.de>
  *              by Karsten Keil      <keil@isdn4linux.de>
- * 
+ *
  * This software may be used and distributed according to the terms
  * of the GNU General Public License, incorporated herein by reference.
  *
@@ -67,8 +67,6 @@
 };
 
 
-#ifdef CONFIG_PCI
-
 /******************************************/
 /* free hardware resources used by driver */
 /******************************************/
@@ -237,7 +235,7 @@
 	if (fifo_state)
 	        cs->hw.hfcpci.fifo_en |= fifo_state;
 	Write_hfc(cs, HFCPCI_FIFO_EN, cs->hw.hfcpci.fifo_en);
-}   
+}
 
 /***************************************/
 /* clear the desired B-channel tx fifo */
@@ -263,7 +261,7 @@
 	if (fifo_state)
 	        cs->hw.hfcpci.fifo_en |= fifo_state;
 	Write_hfc(cs, HFCPCI_FIFO_EN, cs->hw.hfcpci.fifo_en);
-}   
+}
 
 /*********************************************/
 /* read a complete B-frame out of the buffer */
@@ -511,7 +509,6 @@
 	test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags);
 	if (count && receive)
 		goto Begin;
-	return;
 }
 
 /**************************/
@@ -582,7 +579,6 @@
 
 	dev_kfree_skb_any(cs->tx_skb);
 	cs->tx_skb = NULL;
-	return;
 }
 
 /**************************/
@@ -729,7 +725,6 @@
 	dev_kfree_skb_any(bcs->tx_skb);
 	bcs->tx_skb = NULL;
 	test_and_clear_bit(BC_FLG_BUSY, &bcs->Flag);
-	return;
 }
 
 /**********************************************/
@@ -924,7 +919,6 @@
 	test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags);
 	if (count && receive)
 		goto Begin;
-	return;
 }				/* receive_emsg */
 
 /*********************/
@@ -1350,13 +1344,13 @@
 				cs->hw.hfcpci.sctrl_r |= SCTRL_B1_ENA;
 			}
 			if (fifo2) {
-			        cs->hw.hfcpci.last_bfifo_cnt[1] = 0;  
+			        cs->hw.hfcpci.last_bfifo_cnt[1] = 0;
 				cs->hw.hfcpci.fifo_en |= HFCPCI_FIFOEN_B2;
 				cs->hw.hfcpci.int_m1 |= (HFCPCI_INTS_B2TRANS + HFCPCI_INTS_B2REC);
 				cs->hw.hfcpci.ctmt &= ~2;
 				cs->hw.hfcpci.conn &= ~0x18;
 			} else {
-			        cs->hw.hfcpci.last_bfifo_cnt[0] = 0;  
+			        cs->hw.hfcpci.last_bfifo_cnt[0] = 0;
 				cs->hw.hfcpci.fifo_en |= HFCPCI_FIFOEN_B1;
 				cs->hw.hfcpci.int_m1 |= (HFCPCI_INTS_B1TRANS + HFCPCI_INTS_B1REC);
 				cs->hw.hfcpci.ctmt &= ~1;
@@ -1642,8 +1636,6 @@
 /* this variable is used as card index when more than one cards are present */
 static struct pci_dev *dev_hfcpci __devinitdata = NULL;
 
-#endif				/* CONFIG_PCI */
-
 int __devinit
 setup_hfcpci(struct IsdnCard *card)
 {
@@ -1656,96 +1648,99 @@
 #ifdef __BIG_ENDIAN
 #error "not running on big endian machines now"
 #endif
+
 	strcpy(tmp, hfcpci_revision);
 	printk(KERN_INFO "HiSax: HFC-PCI driver Rev. %s\n", HiSax_getrev(tmp));
-#ifdef CONFIG_PCI
+
 	cs->hw.hfcpci.int_s1 = 0;
 	cs->dc.hfcpci.ph_state = 0;
 	cs->hw.hfcpci.fifo = 255;
-	if (cs->typ == ISDN_CTYPE_HFC_PCI) {
-		i = 0;
-		while (id_list[i].vendor_id) {
-			tmp_hfcpci = pci_find_device(id_list[i].vendor_id,
-						     id_list[i].device_id,
-						     dev_hfcpci);
-			i++;
-			if (tmp_hfcpci) {
-				if (pci_enable_device(tmp_hfcpci))
-					continue;
-				pci_set_master(tmp_hfcpci);
-				if ((card->para[0]) && (card->para[0] != (tmp_hfcpci->resource[ 0].start & PCI_BASE_ADDRESS_IO_MASK)))
-					continue;
-				else
-					break;
-			}
-		}
+	if (cs->typ != ISDN_CTYPE_HFC_PCI)
+		return(0);
 
+	i = 0;
+	while (id_list[i].vendor_id) {
+		tmp_hfcpci = pci_find_device(id_list[i].vendor_id,
+					     id_list[i].device_id,
+					     dev_hfcpci);
+		i++;
 		if (tmp_hfcpci) {
-			i--;
-			dev_hfcpci = tmp_hfcpci;	/* old device */
-			cs->hw.hfcpci.dev = dev_hfcpci;
-			cs->irq = dev_hfcpci->irq;
-			if (!cs->irq) {
-				printk(KERN_WARNING "HFC-PCI: No IRQ for PCI card found\n");
-				return (0);
-			}
-			cs->hw.hfcpci.pci_io = (char *)(unsigned long)dev_hfcpci->resource[1].start;
-			printk(KERN_INFO "HiSax: HFC-PCI card manufacturer: %s card name: %s\n", id_list[i].vendor_name, id_list[i].card_name);
-		} else {
-			printk(KERN_WARNING "HFC-PCI: No PCI card found\n");
-			return (0);
+			if (pci_enable_device(tmp_hfcpci))
+				continue;
+			pci_set_master(tmp_hfcpci);
+			if ((card->para[0]) && (card->para[0] != (tmp_hfcpci->resource[ 0].start & PCI_BASE_ADDRESS_IO_MASK)))
+				continue;
+			else
+				break;
 		}
-		if (!cs->hw.hfcpci.pci_io) {
-			printk(KERN_WARNING "HFC-PCI: No IO-Mem for PCI card found\n");
-			return (0);
-		}
-		/* Allocate memory for FIFOS */
-		/* Because the HFC-PCI needs a 32K physical alignment, we */
-		/* need to allocate the double mem and align the address */
-		if (!(cs->hw.hfcpci.share_start = kmalloc(65536, GFP_KERNEL))) {
-			printk(KERN_WARNING "HFC-PCI: Error allocating memory for FIFO!\n");
-			return 0;
-		}
-		cs->hw.hfcpci.fifos = (void *)
-		    (((ulong) cs->hw.hfcpci.share_start) & ~0x7FFF) + 0x8000;
-		pci_write_config_dword(cs->hw.hfcpci.dev, 0x80, (u_int) virt_to_bus(cs->hw.hfcpci.fifos));
-		cs->hw.hfcpci.pci_io = ioremap((ulong) cs->hw.hfcpci.pci_io, 256);
-		printk(KERN_INFO
-		       "HFC-PCI: defined at mem %p fifo %p(%#x) IRQ %d HZ %d\n",
-		       cs->hw.hfcpci.pci_io,
-		       cs->hw.hfcpci.fifos,
-		       (u_int) virt_to_bus(cs->hw.hfcpci.fifos),
-		       cs->irq, HZ);
-		spin_lock_irqsave(&cs->lock, flags);
-		pci_write_config_word(cs->hw.hfcpci.dev, PCI_COMMAND, PCI_ENA_MEMIO);	/* enable memory mapped ports, disable busmaster */
-		cs->hw.hfcpci.int_m2 = 0;	/* disable alle interrupts */
-		cs->hw.hfcpci.int_m1 = 0;
-		Write_hfc(cs, HFCPCI_INT_M1, cs->hw.hfcpci.int_m1);
-		Write_hfc(cs, HFCPCI_INT_M2, cs->hw.hfcpci.int_m2);
-		/* At this point the needed PCI config is done */
-		/* fifos are still not enabled */
-		INIT_WORK(&cs->tqueue,  hfcpci_bh);
-		cs->setstack_d = setstack_hfcpci;
-		cs->BC_Send_Data = &hfcpci_send_data;
-		cs->readisac = NULL;
-		cs->writeisac = NULL;
-		cs->readisacfifo = NULL;
-		cs->writeisacfifo = NULL;
-		cs->BC_Read_Reg = NULL;
-		cs->BC_Write_Reg = NULL;
-		cs->irq_func = &hfcpci_interrupt;
-		cs->irq_flags |= IRQF_SHARED;
-		cs->hw.hfcpci.timer.function = (void *) hfcpci_Timer;
-		cs->hw.hfcpci.timer.data = (long) cs;
-		init_timer(&cs->hw.hfcpci.timer);
-		cs->cardmsg = &hfcpci_card_msg;
-		cs->auxcmd = &hfcpci_auxcmd;
-		spin_unlock_irqrestore(&cs->lock, flags);
-		return (1);
-	} else
-		return (0);	/* no valid card type */
-#else
-	printk(KERN_WARNING "HFC-PCI: NO_PCI_BIOS\n");
-	return (0);
-#endif				/* CONFIG_PCI */
+	}
+
+	if (!tmp_hfcpci) {
+		printk(KERN_WARNING "HFC-PCI: No PCI card found\n");
+		return (0);
+	}
+
+	i--;
+	dev_hfcpci = tmp_hfcpci;	/* old device */
+	cs->hw.hfcpci.dev = dev_hfcpci;
+	cs->irq = dev_hfcpci->irq;
+	if (!cs->irq) {
+		printk(KERN_WARNING "HFC-PCI: No IRQ for PCI card found\n");
+		return (0);
+	}
+	cs->hw.hfcpci.pci_io = (char *)(unsigned long)dev_hfcpci->resource[1].start;
+	printk(KERN_INFO "HiSax: HFC-PCI card manufacturer: %s card name: %s\n", id_list[i].vendor_name, id_list[i].card_name);
+
+	if (!cs->hw.hfcpci.pci_io) {
+		printk(KERN_WARNING "HFC-PCI: No IO-Mem for PCI card found\n");
+		return (0);
+	}
+	/* Allocate memory for FIFOS */
+	/* Because the HFC-PCI needs a 32K physical alignment, we */
+	/* need to allocate the double mem and align the address */
+	if (!(cs->hw.hfcpci.share_start = kmalloc(65536, GFP_KERNEL))) {
+		printk(KERN_WARNING "HFC-PCI: Error allocating memory for FIFO!\n");
+		return 0;
+	}
+	cs->hw.hfcpci.fifos = (void *)
+	    (((ulong) cs->hw.hfcpci.share_start) & ~0x7FFF) + 0x8000;
+	pci_write_config_dword(cs->hw.hfcpci.dev, 0x80, (u_int) virt_to_bus(cs->hw.hfcpci.fifos));
+	cs->hw.hfcpci.pci_io = ioremap((ulong) cs->hw.hfcpci.pci_io, 256);
+	printk(KERN_INFO
+	       "HFC-PCI: defined at mem %p fifo %p(%#x) IRQ %d HZ %d\n",
+	       cs->hw.hfcpci.pci_io,
+	       cs->hw.hfcpci.fifos,
+	       (u_int) virt_to_bus(cs->hw.hfcpci.fifos),
+	       cs->irq, HZ);
+
+	spin_lock_irqsave(&cs->lock, flags);
+
+	pci_write_config_word(cs->hw.hfcpci.dev, PCI_COMMAND, PCI_ENA_MEMIO);	/* enable memory mapped ports, disable busmaster */
+	cs->hw.hfcpci.int_m2 = 0;	/* disable alle interrupts */
+	cs->hw.hfcpci.int_m1 = 0;
+	Write_hfc(cs, HFCPCI_INT_M1, cs->hw.hfcpci.int_m1);
+	Write_hfc(cs, HFCPCI_INT_M2, cs->hw.hfcpci.int_m2);
+	/* At this point the needed PCI config is done */
+	/* fifos are still not enabled */
+
+	INIT_WORK(&cs->tqueue,  hfcpci_bh);
+	cs->setstack_d = setstack_hfcpci;
+	cs->BC_Send_Data = &hfcpci_send_data;
+	cs->readisac = NULL;
+	cs->writeisac = NULL;
+	cs->readisacfifo = NULL;
+	cs->writeisacfifo = NULL;
+	cs->BC_Read_Reg = NULL;
+	cs->BC_Write_Reg = NULL;
+	cs->irq_func = &hfcpci_interrupt;
+	cs->irq_flags |= IRQF_SHARED;
+	cs->hw.hfcpci.timer.function = (void *) hfcpci_Timer;
+	cs->hw.hfcpci.timer.data = (long) cs;
+	init_timer(&cs->hw.hfcpci.timer);
+	cs->cardmsg = &hfcpci_card_msg;
+	cs->auxcmd = &hfcpci_auxcmd;
+
+	spin_unlock_irqrestore(&cs->lock, flags);
+
+	return (1);
 }
diff --git a/drivers/isdn/hisax/nj_s.c b/drivers/isdn/hisax/nj_s.c
index c09ffb1..fa2db87 100644
--- a/drivers/isdn/hisax/nj_s.c
+++ b/drivers/isdn/hisax/nj_s.c
@@ -148,107 +148,87 @@
 	return(0);
 }
 
-static struct pci_dev *dev_netjet __devinitdata = NULL;
-
-int __devinit
-setup_netjet_s(struct IsdnCard *card)
+static int __devinit njs_pci_probe(struct pci_dev *dev_netjet,
+				   struct IsdnCardState *cs)
 {
-	int bytecnt,cfg;
-	struct IsdnCardState *cs = card->cs;
-	char tmp[64];
+	int cfg;
 
-#ifdef __BIG_ENDIAN
-#error "not running on big endian machines now"
-#endif
-	strcpy(tmp, NETjet_S_revision);
-	printk(KERN_INFO "HiSax: Traverse Tech. NETjet-S driver Rev. %s\n", HiSax_getrev(tmp));
-	if (cs->typ != ISDN_CTYPE_NETJET_S)
+	if (pci_enable_device(dev_netjet))
 		return(0);
-	test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags);
-
-#ifdef CONFIG_PCI
-
-	for ( ;; )
-	{
-		if ((dev_netjet = pci_find_device(PCI_VENDOR_ID_TIGERJET,
-			PCI_DEVICE_ID_TIGERJET_300,  dev_netjet))) {
-			if (pci_enable_device(dev_netjet))
-				return(0);
-			pci_set_master(dev_netjet);
-			cs->irq = dev_netjet->irq;
-			if (!cs->irq) {
-				printk(KERN_WARNING "NETjet-S: No IRQ for PCI card found\n");
-				return(0);
-			}
-			cs->hw.njet.base = pci_resource_start(dev_netjet, 0);
-			if (!cs->hw.njet.base) {
-				printk(KERN_WARNING "NETjet-S: No IO-Adr for PCI card found\n");
-				return(0);
-			}
-			/* the TJ300 and TJ320 must be detected, the IRQ handling is different
-			 * unfortunatly the chips use the same device ID, but the TJ320 has
-			 * the bit20 in status PCI cfg register set
-			 */
-			pci_read_config_dword(dev_netjet, 0x04, &cfg);
-			if (cfg & 0x00100000)
-				cs->subtyp = 1; /* TJ320 */
-			else
-				cs->subtyp = 0; /* TJ300 */
-			/* 2001/10/04 Christoph Ersfeld, Formula-n Europe AG www.formula-n.com */
-			if ((dev_netjet->subsystem_vendor == 0x55) &&
-				(dev_netjet->subsystem_device == 0x02)) {
-				printk(KERN_WARNING "Netjet: You tried to load this driver with an incompatible TigerJet-card\n");
-				printk(KERN_WARNING "Use type=41 for Formula-n enter:now ISDN PCI and compatible\n");
-				return(0);
-			}
-			/* end new code */
-		} else {
-			printk(KERN_WARNING "NETjet-S: No PCI card found\n");
-			return(0);
-		}
-
-		cs->hw.njet.auxa = cs->hw.njet.base + NETJET_AUXDATA;
-		cs->hw.njet.isac = cs->hw.njet.base | NETJET_ISAC_OFF;
-
-		cs->hw.njet.ctrl_reg = 0xff;  /* Reset On */
-		byteout(cs->hw.njet.base + NETJET_CTRL, cs->hw.njet.ctrl_reg);
-		mdelay(10);
-
-		cs->hw.njet.ctrl_reg = 0x00;  /* Reset Off and status read clear */
-		byteout(cs->hw.njet.base + NETJET_CTRL, cs->hw.njet.ctrl_reg);
-		mdelay(10);
-
-		cs->hw.njet.auxd = 0xC0;
-		cs->hw.njet.dmactrl = 0;
-
-		byteout(cs->hw.njet.base + NETJET_AUXCTRL, ~NETJET_ISACIRQ);
-		byteout(cs->hw.njet.base + NETJET_IRQMASK1, NETJET_ISACIRQ);
-		byteout(cs->hw.njet.auxa, cs->hw.njet.auxd);
-
-		switch ( ( ( NETjet_ReadIC( cs, ISAC_RBCH ) >> 5 ) & 3 ) )
-		{
-			case 0 :
-				break;
-
-			case 3 :
-				printk( KERN_WARNING "NETjet-S: NETspider-U PCI card found\n" );
-				continue;
-
-			default :
-				printk( KERN_WARNING "NETjet-S: No PCI card found\n" );
-				return 0;
-                }
-                break;
+	pci_set_master(dev_netjet);
+	cs->irq = dev_netjet->irq;
+	if (!cs->irq) {
+		printk(KERN_WARNING "NETjet-S: No IRQ for PCI card found\n");
+		return(0);
 	}
-#else
+	cs->hw.njet.base = pci_resource_start(dev_netjet, 0);
+	if (!cs->hw.njet.base) {
+		printk(KERN_WARNING "NETjet-S: No IO-Adr for PCI card found\n");
+		return(0);
+	}
+	/* the TJ300 and TJ320 must be detected, the IRQ handling is different
+	 * unfortunatly the chips use the same device ID, but the TJ320 has
+	 * the bit20 in status PCI cfg register set
+	 */
+	pci_read_config_dword(dev_netjet, 0x04, &cfg);
+	if (cfg & 0x00100000)
+		cs->subtyp = 1; /* TJ320 */
+	else
+		cs->subtyp = 0; /* TJ300 */
+	/* 2001/10/04 Christoph Ersfeld, Formula-n Europe AG www.formula-n.com */
+	if ((dev_netjet->subsystem_vendor == 0x55) &&
+		(dev_netjet->subsystem_device == 0x02)) {
+		printk(KERN_WARNING "Netjet: You tried to load this driver with an incompatible TigerJet-card\n");
+		printk(KERN_WARNING "Use type=41 for Formula-n enter:now ISDN PCI and compatible\n");
+		return(0);
+	}
+	/* end new code */
 
-	printk(KERN_WARNING "NETjet-S: NO_PCI_BIOS\n");
-	printk(KERN_WARNING "NETjet-S: unable to config NETJET-S PCI\n");
-	return (0);
+	return(1);
+}
 
-#endif /* CONFIG_PCI */
+static int __devinit njs_cs_init(struct IsdnCard *card,
+				 struct IsdnCardState *cs)
+{
 
-	bytecnt = 256;
+	cs->hw.njet.auxa = cs->hw.njet.base + NETJET_AUXDATA;
+	cs->hw.njet.isac = cs->hw.njet.base | NETJET_ISAC_OFF;
+
+	cs->hw.njet.ctrl_reg = 0xff;  /* Reset On */
+	byteout(cs->hw.njet.base + NETJET_CTRL, cs->hw.njet.ctrl_reg);
+	mdelay(10);
+
+	cs->hw.njet.ctrl_reg = 0x00;  /* Reset Off and status read clear */
+	byteout(cs->hw.njet.base + NETJET_CTRL, cs->hw.njet.ctrl_reg);
+	mdelay(10);
+
+	cs->hw.njet.auxd = 0xC0;
+	cs->hw.njet.dmactrl = 0;
+
+	byteout(cs->hw.njet.base + NETJET_AUXCTRL, ~NETJET_ISACIRQ);
+	byteout(cs->hw.njet.base + NETJET_IRQMASK1, NETJET_ISACIRQ);
+	byteout(cs->hw.njet.auxa, cs->hw.njet.auxd);
+
+	switch ( ( ( NETjet_ReadIC( cs, ISAC_RBCH ) >> 5 ) & 3 ) )
+	{
+		case 0 :
+			return 1;	/* end loop */
+
+		case 3 :
+			printk( KERN_WARNING "NETjet-S: NETspider-U PCI card found\n" );
+			return -1;	/* continue looping */
+
+		default :
+			printk( KERN_WARNING "NETjet-S: No PCI card found\n" );
+			return 0;	/* end loop & function */
+	}
+	return 1;			/* end loop */
+}
+
+static int __devinit njs_cs_init_rest(struct IsdnCard *card,
+				      struct IsdnCardState *cs)
+{
+	const int bytecnt = 256;
 
 	printk(KERN_INFO
 		"NETjet-S: %s card configured at %#lx IRQ %d\n",
@@ -273,5 +253,47 @@
 	cs->irq_func = &netjet_s_interrupt;
 	cs->irq_flags |= IRQF_SHARED;
 	ISACVersion(cs, "NETjet-S:");
+
 	return (1);
 }
+
+static struct pci_dev *dev_netjet __devinitdata = NULL;
+
+int __devinit
+setup_netjet_s(struct IsdnCard *card)
+{
+	int ret;
+	struct IsdnCardState *cs = card->cs;
+	char tmp[64];
+
+#ifdef __BIG_ENDIAN
+#error "not running on big endian machines now"
+#endif
+	strcpy(tmp, NETjet_S_revision);
+	printk(KERN_INFO "HiSax: Traverse Tech. NETjet-S driver Rev. %s\n", HiSax_getrev(tmp));
+	if (cs->typ != ISDN_CTYPE_NETJET_S)
+		return(0);
+	test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags);
+
+	for ( ;; )
+	{
+		if ((dev_netjet = pci_find_device(PCI_VENDOR_ID_TIGERJET,
+			PCI_DEVICE_ID_TIGERJET_300,  dev_netjet))) {
+			ret = njs_pci_probe(dev_netjet, cs);
+			if (!ret)
+				return(0);
+		} else {
+			printk(KERN_WARNING "NETjet-S: No PCI card found\n");
+			return(0);
+		}
+
+		ret = njs_cs_init(card, cs);
+		if (!ret)
+			return(0);
+		if (ret > 0)
+			break;
+		/* otherwise, ret < 0, continue looping */
+	}
+
+	return njs_cs_init_rest(card, cs);
+}
diff --git a/drivers/isdn/hisax/nj_u.c b/drivers/isdn/hisax/nj_u.c
index 8202cf3..f017d38 100644
--- a/drivers/isdn/hisax/nj_u.c
+++ b/drivers/isdn/hisax/nj_u.c
@@ -128,93 +128,69 @@
 	return(0);
 }
 
-static struct pci_dev *dev_netjet __devinitdata = NULL;
-
-int __devinit
-setup_netjet_u(struct IsdnCard *card)
+static int __devinit nju_pci_probe(struct pci_dev *dev_netjet,
+				   struct IsdnCardState *cs)
 {
-	int bytecnt;
-	struct IsdnCardState *cs = card->cs;
-	char tmp[64];
-#ifdef CONFIG_PCI
-#endif
-#ifdef __BIG_ENDIAN
-#error "not running on big endian machines now"
-#endif
-	strcpy(tmp, NETjet_U_revision);
-	printk(KERN_INFO "HiSax: Traverse Tech. NETspider-U driver Rev. %s\n", HiSax_getrev(tmp));
-	if (cs->typ != ISDN_CTYPE_NETJET_U)
+	if (pci_enable_device(dev_netjet))
 		return(0);
-	test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags);
-
-#ifdef CONFIG_PCI
-
-	for ( ;; )
-	{
-		if ((dev_netjet = pci_find_device(PCI_VENDOR_ID_TIGERJET,
-			PCI_DEVICE_ID_TIGERJET_300,  dev_netjet))) {
-			if (pci_enable_device(dev_netjet))
-				return(0);
-			pci_set_master(dev_netjet);
-			cs->irq = dev_netjet->irq;
-			if (!cs->irq) {
-				printk(KERN_WARNING "NETspider-U: No IRQ for PCI card found\n");
-				return(0);
-			}
-			cs->hw.njet.base = pci_resource_start(dev_netjet, 0);
-			if (!cs->hw.njet.base) {
-				printk(KERN_WARNING "NETspider-U: No IO-Adr for PCI card found\n");
-				return(0);
-			}
-		} else {
-			printk(KERN_WARNING "NETspider-U: No PCI card found\n");
-			return(0);
-		}
-
-		cs->hw.njet.auxa = cs->hw.njet.base + NETJET_AUXDATA;
-		cs->hw.njet.isac = cs->hw.njet.base | NETJET_ISAC_OFF;
-		mdelay(10);
-
-		cs->hw.njet.ctrl_reg = 0xff;  /* Reset On */
-		byteout(cs->hw.njet.base + NETJET_CTRL, cs->hw.njet.ctrl_reg);
-		mdelay(10);
-
-		cs->hw.njet.ctrl_reg = 0x00;  /* Reset Off and status read clear */
-		byteout(cs->hw.njet.base + NETJET_CTRL, cs->hw.njet.ctrl_reg);
-		mdelay(10);
-
-		cs->hw.njet.auxd = 0xC0;
-		cs->hw.njet.dmactrl = 0;
-
-		byteout(cs->hw.njet.auxa, 0);
-		byteout(cs->hw.njet.base + NETJET_AUXCTRL, ~NETJET_ISACIRQ);
-		byteout(cs->hw.njet.base + NETJET_IRQMASK1, NETJET_ISACIRQ);
-		byteout(cs->hw.njet.auxa, cs->hw.njet.auxd);
-
-		switch ( ( ( NETjet_ReadIC( cs, ICC_RBCH ) >> 5 ) & 3 ) )
-		{
-			case 3 :
-				break;
-
-			case 0 :
-				printk( KERN_WARNING "NETspider-U: NETjet-S PCI card found\n" );
-				continue;
-
-			default :
-				printk( KERN_WARNING "NETspider-U: No PCI card found\n" );
-				return 0;
-                }
-                break;
+	pci_set_master(dev_netjet);
+	cs->irq = dev_netjet->irq;
+	if (!cs->irq) {
+		printk(KERN_WARNING "NETspider-U: No IRQ for PCI card found\n");
+		return(0);
 	}
-#else
+	cs->hw.njet.base = pci_resource_start(dev_netjet, 0);
+	if (!cs->hw.njet.base) {
+		printk(KERN_WARNING "NETspider-U: No IO-Adr for PCI card found\n");
+		return(0);
+	}
 
-	printk(KERN_WARNING "NETspider-U: NO_PCI_BIOS\n");
-	printk(KERN_WARNING "NETspider-U: unable to config NETspider-U PCI\n");
-	return (0);
+	return (1);
+}
 
-#endif /* CONFIG_PCI */
+static int __devinit nju_cs_init(struct IsdnCard *card,
+				 struct IsdnCardState *cs)
+{
+	cs->hw.njet.auxa = cs->hw.njet.base + NETJET_AUXDATA;
+	cs->hw.njet.isac = cs->hw.njet.base | NETJET_ISAC_OFF;
+	mdelay(10);
 
-	bytecnt = 256;
+	cs->hw.njet.ctrl_reg = 0xff;  /* Reset On */
+	byteout(cs->hw.njet.base + NETJET_CTRL, cs->hw.njet.ctrl_reg);
+	mdelay(10);
+
+	cs->hw.njet.ctrl_reg = 0x00;  /* Reset Off and status read clear */
+	byteout(cs->hw.njet.base + NETJET_CTRL, cs->hw.njet.ctrl_reg);
+	mdelay(10);
+
+	cs->hw.njet.auxd = 0xC0;
+	cs->hw.njet.dmactrl = 0;
+
+	byteout(cs->hw.njet.auxa, 0);
+	byteout(cs->hw.njet.base + NETJET_AUXCTRL, ~NETJET_ISACIRQ);
+	byteout(cs->hw.njet.base + NETJET_IRQMASK1, NETJET_ISACIRQ);
+	byteout(cs->hw.njet.auxa, cs->hw.njet.auxd);
+
+	switch ( ( ( NETjet_ReadIC( cs, ICC_RBCH ) >> 5 ) & 3 ) )
+	{
+		case 3 :
+			return 1;	/* end loop */
+
+		case 0 :
+			printk( KERN_WARNING "NETspider-U: NETjet-S PCI card found\n" );
+			return -1;	/* continue looping */
+
+		default :
+			printk( KERN_WARNING "NETspider-U: No PCI card found\n" );
+			return 0;	/* end loop & function */
+	}
+	return 1;			/* end loop */
+}
+
+static int __devinit nju_cs_init_rest(struct IsdnCard *card,
+				      struct IsdnCardState *cs)
+{
+	const int bytecnt = 256;
 
 	printk(KERN_INFO
 		"NETspider-U: PCI card configured at %#lx IRQ %d\n",
@@ -239,5 +215,48 @@
 	cs->irq_func = &netjet_u_interrupt;
 	cs->irq_flags |= IRQF_SHARED;
 	ICCVersion(cs, "NETspider-U:");
+
 	return (1);
 }
+
+static struct pci_dev *dev_netjet __devinitdata = NULL;
+
+int __devinit
+setup_netjet_u(struct IsdnCard *card)
+{
+	int ret;
+	struct IsdnCardState *cs = card->cs;
+	char tmp[64];
+
+#ifdef __BIG_ENDIAN
+#error "not running on big endian machines now"
+#endif
+
+	strcpy(tmp, NETjet_U_revision);
+	printk(KERN_INFO "HiSax: Traverse Tech. NETspider-U driver Rev. %s\n", HiSax_getrev(tmp));
+	if (cs->typ != ISDN_CTYPE_NETJET_U)
+		return(0);
+	test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags);
+
+	for ( ;; )
+	{
+		if ((dev_netjet = pci_find_device(PCI_VENDOR_ID_TIGERJET,
+			PCI_DEVICE_ID_TIGERJET_300,  dev_netjet))) {
+			ret = nju_pci_probe(dev_netjet, cs);
+			if (!ret)
+				return(0);
+		} else {
+			printk(KERN_WARNING "NETspider-U: No PCI card found\n");
+			return(0);
+		}
+
+		ret = nju_cs_init(card, cs);
+		if (!ret)
+			return (0);
+		if (ret > 0)
+			break;
+		/* ret < 0 == continue looping */
+	}
+
+	return nju_cs_init_rest(card, cs);
+}
diff --git a/drivers/isdn/hisax/sedlbauer.c b/drivers/isdn/hisax/sedlbauer.c
index 030d162..ad06f3c 100644
--- a/drivers/isdn/hisax/sedlbauer.c
+++ b/drivers/isdn/hisax/sedlbauer.c
@@ -451,6 +451,9 @@
 			spin_unlock_irqrestore(&cs->lock, flags);
 			return(0);
 		case CARD_RELEASE:
+			if (cs->hw.sedl.bus == SEDL_BUS_PCI)
+				/* disable all IRQ */
+				byteout(cs->hw.sedl.cfg_reg+ 5, 0);
 			if (cs->hw.sedl.chip == SEDL_CHIP_ISAC_ISAR) {
 				spin_lock_irqsave(&cs->lock, flags);
 				writereg(cs->hw.sedl.adr, cs->hw.sedl.hscx,
@@ -468,6 +471,9 @@
 			return(0);
 		case CARD_INIT:
 			spin_lock_irqsave(&cs->lock, flags);
+			if (cs->hw.sedl.bus == SEDL_BUS_PCI)
+				/* enable all IRQ */
+				byteout(cs->hw.sedl.cfg_reg+ 5, 0x02);
 			reset_sedlbauer(cs);
 			if (cs->hw.sedl.chip == SEDL_CHIP_ISAC_ISAR) {
 				clear_pending_isac_ints(cs);
@@ -667,7 +673,7 @@
 		byteout(cs->hw.sedl.cfg_reg, 0xff);
 		byteout(cs->hw.sedl.cfg_reg, 0x00);
 		byteout(cs->hw.sedl.cfg_reg+ 2, 0xdd);
-		byteout(cs->hw.sedl.cfg_reg+ 5, 0x02);
+		byteout(cs->hw.sedl.cfg_reg+ 5, 0); /* disable all IRQ */
 		byteout(cs->hw.sedl.cfg_reg +3, cs->hw.sedl.reset_on);
 		mdelay(2);
 		byteout(cs->hw.sedl.cfg_reg +3, cs->hw.sedl.reset_off);
diff --git a/drivers/isdn/i4l/Kconfig b/drivers/isdn/i4l/Kconfig
index 3ef567b..e91c187 100644
--- a/drivers/isdn/i4l/Kconfig
+++ b/drivers/isdn/i4l/Kconfig
@@ -86,7 +86,6 @@
 
 
 menu "ISDN feature submodules"
-	depends on ISDN
 
 config ISDN_DRV_LOOP
 	tristate "isdnloop support"
@@ -100,7 +99,7 @@
 
 config ISDN_DIVERSION
 	tristate "Support isdn diversion services"
-	depends on ISDN && ISDN_I4L
+	depends on ISDN_I4L
 	help
 	  This option allows you to use some supplementary diversion
 	  services in conjunction with the HiSax driver on an EURO/DSS1
@@ -120,13 +119,13 @@
 endmenu
 
 comment "ISDN4Linux hardware drivers"
-	depends on NET && ISDN && ISDN_I4L
+	depends on ISDN_I4L
 
 source "drivers/isdn/hisax/Kconfig"
 
 
 menu "Active cards"
-	depends on NET && ISDN && ISDN_I4L!=n
+	depends on ISDN_I4L!=n
 
 source "drivers/isdn/icn/Kconfig"
 
diff --git a/drivers/isdn/sc/card.h b/drivers/isdn/sc/card.h
index 4fbfa82..5992f63 100644
--- a/drivers/isdn/sc/card.h
+++ b/drivers/isdn/sc/card.h
@@ -125,7 +125,7 @@
 int receivemessage(int card, RspMessage *rspmsg);
 int sc_ioctl(int card, scs_ioctl *data);
 int setup_buffers(int card, int c);
-void check_reset(unsigned long data);
+void sc_check_reset(unsigned long data);
 void check_phystat(unsigned long data);
 
 #endif /* CARD_H */
diff --git a/drivers/isdn/sc/command.c b/drivers/isdn/sc/command.c
index b7bb7cb..0e4969c 100644
--- a/drivers/isdn/sc/command.c
+++ b/drivers/isdn/sc/command.c
@@ -344,7 +344,7 @@
 
 	spin_lock_irqsave(&sc_adapter[card]->lock, flags);
 	init_timer(&sc_adapter[card]->reset_timer);
-	sc_adapter[card]->reset_timer.function = check_reset;
+	sc_adapter[card]->reset_timer.function = sc_check_reset;
 	sc_adapter[card]->reset_timer.data = card;
 	sc_adapter[card]->reset_timer.expires = jiffies + CHECKRESET_TIME;
 	add_timer(&sc_adapter[card]->reset_timer);
diff --git a/drivers/isdn/sc/timer.c b/drivers/isdn/sc/timer.c
index cc1b886..91fbe0d 100644
--- a/drivers/isdn/sc/timer.c
+++ b/drivers/isdn/sc/timer.c
@@ -43,7 +43,7 @@
  * Then, check to see if the signate has been set. Next, set the
  * signature to a known value and issue a startproc if needed.
  */
-void check_reset(unsigned long data)
+void sc_check_reset(unsigned long data)
 {
 	unsigned long flags;
 	unsigned long sig;
diff --git a/drivers/kvm/Kconfig b/drivers/kvm/Kconfig
index e8e37d8..33fa28a 100644
--- a/drivers/kvm/Kconfig
+++ b/drivers/kvm/Kconfig
@@ -1,12 +1,17 @@
 #
 # KVM configuration
 #
-menu "Virtualization"
+menuconfig VIRTUALIZATION
+	bool "Virtualization"
 	depends on X86
+	default y
+
+if VIRTUALIZATION
 
 config KVM
 	tristate "Kernel-based Virtual Machine (KVM) support"
 	depends on X86 && EXPERIMENTAL
+	depends on X86_CMPXCHG64 || 64BIT
 	---help---
 	  Support hosting fully virtualized guest machines using hardware
 	  virtualization extensions.  You will need a fairly recent
@@ -35,4 +40,4 @@
 	  Provides support for KVM on AMD processors equipped with the AMD-V
 	  (SVM) extensions.
 
-endmenu
+endif # VIRTUALIZATION
diff --git a/drivers/kvm/kvm.h b/drivers/kvm/kvm.h
index 152312c..a7c5e6b 100644
--- a/drivers/kvm/kvm.h
+++ b/drivers/kvm/kvm.h
@@ -10,6 +10,8 @@
 #include <linux/list.h>
 #include <linux/mutex.h>
 #include <linux/spinlock.h>
+#include <linux/signal.h>
+#include <linux/sched.h>
 #include <linux/mm.h>
 #include <asm/signal.h>
 
@@ -18,6 +20,7 @@
 #include <linux/kvm_para.h>
 
 #define CR0_PE_MASK (1ULL << 0)
+#define CR0_MP_MASK (1ULL << 1)
 #define CR0_TS_MASK (1ULL << 3)
 #define CR0_NE_MASK (1ULL << 5)
 #define CR0_WP_MASK (1ULL << 16)
@@ -42,7 +45,8 @@
 	(CR0_PG_MASK | CR0_PE_MASK | CR0_WP_MASK | CR0_NE_MASK \
 	 | CR0_NW_MASK | CR0_CD_MASK)
 #define KVM_VM_CR0_ALWAYS_ON \
-	(CR0_PG_MASK | CR0_PE_MASK | CR0_WP_MASK | CR0_NE_MASK)
+	(CR0_PG_MASK | CR0_PE_MASK | CR0_WP_MASK | CR0_NE_MASK | CR0_TS_MASK \
+	 | CR0_MP_MASK)
 #define KVM_GUEST_CR4_MASK \
 	(CR4_PSE_MASK | CR4_PAE_MASK | CR4_PGE_MASK | CR4_VMXE_MASK | CR4_VME_MASK)
 #define KVM_PMODE_VM_CR4_ALWAYS_ON (CR4_VMXE_MASK | CR4_PAE_MASK)
@@ -51,10 +55,10 @@
 #define INVALID_PAGE (~(hpa_t)0)
 #define UNMAPPED_GVA (~(gpa_t)0)
 
-#define KVM_MAX_VCPUS 1
+#define KVM_MAX_VCPUS 4
 #define KVM_ALIAS_SLOTS 4
 #define KVM_MEMORY_SLOTS 4
-#define KVM_NUM_MMU_PAGES 256
+#define KVM_NUM_MMU_PAGES 1024
 #define KVM_MIN_FREE_MMU_PAGES 5
 #define KVM_REFILL_PAGES 25
 #define KVM_MAX_CPUID_ENTRIES 40
@@ -80,6 +84,11 @@
 #define KVM_PIO_PAGE_OFFSET 1
 
 /*
+ * vcpu->requests bit members
+ */
+#define KVM_TLB_FLUSH 0
+
+/*
  * Address types:
  *
  *  gva - guest virtual address
@@ -137,7 +146,7 @@
 	gfn_t gfn;
 	union kvm_mmu_page_role role;
 
-	hpa_t page_hpa;
+	u64 *spt;
 	unsigned long slot_bitmap; /* One bit set per slot which has memory
 				    * in this shadow page.
 				    */
@@ -232,6 +241,7 @@
 	struct page *guest_pages[2];
 	unsigned guest_page_offset;
 	int in;
+	int port;
 	int size;
 	int string;
 	int down;
@@ -252,8 +262,70 @@
 	u32 halt_exits;
 	u32 request_irq_exits;
 	u32 irq_exits;
+	u32 light_exits;
+	u32 efer_reload;
 };
 
+struct kvm_io_device {
+	void (*read)(struct kvm_io_device *this,
+		     gpa_t addr,
+		     int len,
+		     void *val);
+	void (*write)(struct kvm_io_device *this,
+		      gpa_t addr,
+		      int len,
+		      const void *val);
+	int (*in_range)(struct kvm_io_device *this, gpa_t addr);
+	void (*destructor)(struct kvm_io_device *this);
+
+	void             *private;
+};
+
+static inline void kvm_iodevice_read(struct kvm_io_device *dev,
+				     gpa_t addr,
+				     int len,
+				     void *val)
+{
+	dev->read(dev, addr, len, val);
+}
+
+static inline void kvm_iodevice_write(struct kvm_io_device *dev,
+				      gpa_t addr,
+				      int len,
+				      const void *val)
+{
+	dev->write(dev, addr, len, val);
+}
+
+static inline int kvm_iodevice_inrange(struct kvm_io_device *dev, gpa_t addr)
+{
+	return dev->in_range(dev, addr);
+}
+
+static inline void kvm_iodevice_destructor(struct kvm_io_device *dev)
+{
+	if (dev->destructor)
+		dev->destructor(dev);
+}
+
+/*
+ * It would be nice to use something smarter than a linear search, TBD...
+ * Thankfully we dont expect many devices to register (famous last words :),
+ * so until then it will suffice.  At least its abstracted so we can change
+ * in one place.
+ */
+struct kvm_io_bus {
+	int                   dev_count;
+#define NR_IOBUS_DEVS 6
+	struct kvm_io_device *devs[NR_IOBUS_DEVS];
+};
+
+void kvm_io_bus_init(struct kvm_io_bus *bus);
+void kvm_io_bus_destroy(struct kvm_io_bus *bus);
+struct kvm_io_device *kvm_io_bus_find_dev(struct kvm_io_bus *bus, gpa_t addr);
+void kvm_io_bus_register_dev(struct kvm_io_bus *bus,
+			     struct kvm_io_device *dev);
+
 struct kvm_vcpu {
 	struct kvm *kvm;
 	union {
@@ -266,6 +338,8 @@
 	u64 host_tsc;
 	struct kvm_run *run;
 	int interrupt_window_open;
+	int guest_mode;
+	unsigned long requests;
 	unsigned long irq_summary; /* bit vector: 1 per word in irq_pending */
 #define NR_IRQ_WORDS KVM_IRQ_BITMAP_SIZE(unsigned long)
 	unsigned long irq_pending[NR_IRQ_WORDS];
@@ -285,15 +359,20 @@
 	u64 apic_base;
 	u64 ia32_misc_enable_msr;
 	int nmsrs;
+	int save_nmsrs;
+	int msr_offset_efer;
+#ifdef CONFIG_X86_64
+	int msr_offset_kernel_gs_base;
+#endif
 	struct vmx_msr_entry *guest_msrs;
 	struct vmx_msr_entry *host_msrs;
 
-	struct list_head free_pages;
-	struct kvm_mmu_page page_header_buf[KVM_NUM_MMU_PAGES];
 	struct kvm_mmu mmu;
 
 	struct kvm_mmu_memory_cache mmu_pte_chain_cache;
 	struct kvm_mmu_memory_cache mmu_rmap_desc_cache;
+	struct kvm_mmu_memory_cache mmu_page_cache;
+	struct kvm_mmu_memory_cache mmu_page_header_cache;
 
 	gfn_t last_pt_write_gfn;
 	int   last_pt_write_count;
@@ -305,6 +384,11 @@
 	char *guest_fx_image;
 	int fpu_active;
 	int guest_fpu_loaded;
+	struct vmx_host_state {
+		int loaded;
+		u16 fs_sel, gs_sel, ldt_sel;
+		int fs_gs_ldt_reload_needed;
+	} vmx_host_state;
 
 	int mmio_needed;
 	int mmio_read_completed;
@@ -331,6 +415,7 @@
 			u32 ar;
 		} tr, es, ds, fs, gs;
 	} rmode;
+	int halt_request; /* real mode on Intel only */
 
 	int cpuid_nent;
 	struct kvm_cpuid_entry cpuid_entries[KVM_MAX_CPUID_ENTRIES];
@@ -362,12 +447,15 @@
 	struct list_head active_mmu_pages;
 	int n_free_mmu_pages;
 	struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
+	int nvcpus;
 	struct kvm_vcpu vcpus[KVM_MAX_VCPUS];
 	int memory_config_version;
 	int busy;
 	unsigned long rmap_overflow;
 	struct list_head vm_list;
 	struct file *filp;
+	struct kvm_io_bus mmio_bus;
+	struct kvm_io_bus pio_bus;
 };
 
 struct descriptor_table {
@@ -488,6 +576,7 @@
 		  int size, unsigned long count, int string, int down,
 		  gva_t address, int rep, unsigned port);
 void kvm_emulate_cpuid(struct kvm_vcpu *vcpu);
+int kvm_emulate_halt(struct kvm_vcpu *vcpu);
 int emulate_invlpg(struct kvm_vcpu *vcpu, gva_t address);
 int emulate_clts(struct kvm_vcpu *vcpu);
 int emulator_get_dr(struct x86_emulate_ctxt* ctxt, int dr,
@@ -511,6 +600,7 @@
 void kvm_resched(struct kvm_vcpu *vcpu);
 void kvm_load_guest_fpu(struct kvm_vcpu *vcpu);
 void kvm_put_guest_fpu(struct kvm_vcpu *vcpu);
+void kvm_flush_remote_tlbs(struct kvm *kvm);
 
 int kvm_read_guest(struct kvm_vcpu *vcpu,
 	       gva_t addr,
@@ -524,10 +614,12 @@
 
 unsigned long segment_base(u16 selector);
 
-void kvm_mmu_pre_write(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes);
-void kvm_mmu_post_write(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes);
+void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
+		       const u8 *old, const u8 *new, int bytes);
 int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva);
 void kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu);
+int kvm_mmu_load(struct kvm_vcpu *vcpu);
+void kvm_mmu_unload(struct kvm_vcpu *vcpu);
 
 int kvm_hypercall(struct kvm_vcpu *vcpu, struct kvm_run *run);
 
@@ -539,6 +631,14 @@
 	return vcpu->mmu.page_fault(vcpu, gva, error_code);
 }
 
+static inline int kvm_mmu_reload(struct kvm_vcpu *vcpu)
+{
+	if (likely(vcpu->mmu.root_hpa != INVALID_PAGE))
+		return 0;
+
+	return kvm_mmu_load(vcpu);
+}
+
 static inline int is_long_mode(struct kvm_vcpu *vcpu)
 {
 #ifdef CONFIG_X86_64
diff --git a/drivers/kvm/kvm_main.c b/drivers/kvm/kvm_main.c
index 8f1f07a..1b206f1 100644
--- a/drivers/kvm/kvm_main.c
+++ b/drivers/kvm/kvm_main.c
@@ -16,34 +16,33 @@
  */
 
 #include "kvm.h"
+#include "x86_emulate.h"
+#include "segment_descriptor.h"
 
 #include <linux/kvm.h>
 #include <linux/module.h>
 #include <linux/errno.h>
-#include <linux/magic.h>
-#include <asm/processor.h>
 #include <linux/percpu.h>
 #include <linux/gfp.h>
-#include <asm/msr.h>
 #include <linux/mm.h>
 #include <linux/miscdevice.h>
 #include <linux/vmalloc.h>
-#include <asm/uaccess.h>
 #include <linux/reboot.h>
-#include <asm/io.h>
 #include <linux/debugfs.h>
 #include <linux/highmem.h>
 #include <linux/file.h>
-#include <asm/desc.h>
 #include <linux/sysdev.h>
 #include <linux/cpu.h>
-#include <linux/file.h>
-#include <linux/fs.h>
-#include <linux/mount.h>
 #include <linux/sched.h>
+#include <linux/cpumask.h>
+#include <linux/smp.h>
+#include <linux/anon_inodes.h>
 
-#include "x86_emulate.h"
-#include "segment_descriptor.h"
+#include <asm/processor.h>
+#include <asm/msr.h>
+#include <asm/io.h>
+#include <asm/uaccess.h>
+#include <asm/desc.h>
 
 MODULE_AUTHOR("Qumranet");
 MODULE_LICENSE("GPL");
@@ -51,8 +50,12 @@
 static DEFINE_SPINLOCK(kvm_lock);
 static LIST_HEAD(vm_list);
 
+static cpumask_t cpus_hardware_enabled;
+
 struct kvm_arch_ops *kvm_arch_ops;
 
+static void hardware_disable(void *ignored);
+
 #define STAT_OFFSET(x) offsetof(struct kvm_vcpu, stat.x)
 
 static struct kvm_stats_debugfs_item {
@@ -72,13 +75,13 @@
 	{ "halt_exits", STAT_OFFSET(halt_exits) },
 	{ "request_irq", STAT_OFFSET(request_irq_exits) },
 	{ "irq_exits", STAT_OFFSET(irq_exits) },
+	{ "light_exits", STAT_OFFSET(light_exits) },
+	{ "efer_reload", STAT_OFFSET(efer_reload) },
 	{ NULL }
 };
 
 static struct dentry *debugfs_dir;
 
-struct vfsmount *kvmfs_mnt;
-
 #define MAX_IO_MSRS 256
 
 #define CR0_RESEVED_BITS 0xffffffff1ffaffc0ULL
@@ -100,55 +103,6 @@
 static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl,
 			   unsigned long arg);
 
-static struct inode *kvmfs_inode(struct file_operations *fops)
-{
-	int error = -ENOMEM;
-	struct inode *inode = new_inode(kvmfs_mnt->mnt_sb);
-
-	if (!inode)
-		goto eexit_1;
-
-	inode->i_fop = fops;
-
-	/*
-	 * Mark the inode dirty from the very beginning,
-	 * that way it will never be moved to the dirty
-	 * list because mark_inode_dirty() will think
-	 * that it already _is_ on the dirty list.
-	 */
-	inode->i_state = I_DIRTY;
-	inode->i_mode = S_IRUSR | S_IWUSR;
-	inode->i_uid = current->fsuid;
-	inode->i_gid = current->fsgid;
-	inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
-	return inode;
-
-eexit_1:
-	return ERR_PTR(error);
-}
-
-static struct file *kvmfs_file(struct inode *inode, void *private_data)
-{
-	struct file *file = get_empty_filp();
-
-	if (!file)
-		return ERR_PTR(-ENFILE);
-
-	file->f_path.mnt = mntget(kvmfs_mnt);
-	file->f_path.dentry = d_alloc_anon(inode);
-	if (!file->f_path.dentry)
-		return ERR_PTR(-ENOMEM);
-	file->f_mapping = inode->i_mapping;
-
-	file->f_pos = 0;
-	file->f_flags = O_RDWR;
-	file->f_op = inode->i_fop;
-	file->f_mode = FMODE_READ | FMODE_WRITE;
-	file->f_version = 0;
-	file->private_data = private_data;
-	return file;
-}
-
 unsigned long segment_base(u16 selector)
 {
 	struct descriptor_table gdt;
@@ -307,6 +261,48 @@
 	mutex_unlock(&vcpu->mutex);
 }
 
+static void ack_flush(void *_completed)
+{
+	atomic_t *completed = _completed;
+
+	atomic_inc(completed);
+}
+
+void kvm_flush_remote_tlbs(struct kvm *kvm)
+{
+	int i, cpu, needed;
+	cpumask_t cpus;
+	struct kvm_vcpu *vcpu;
+	atomic_t completed;
+
+	atomic_set(&completed, 0);
+	cpus_clear(cpus);
+	needed = 0;
+	for (i = 0; i < kvm->nvcpus; ++i) {
+		vcpu = &kvm->vcpus[i];
+		if (test_and_set_bit(KVM_TLB_FLUSH, &vcpu->requests))
+			continue;
+		cpu = vcpu->cpu;
+		if (cpu != -1 && cpu != raw_smp_processor_id())
+			if (!cpu_isset(cpu, cpus)) {
+				cpu_set(cpu, cpus);
+				++needed;
+			}
+	}
+
+	/*
+	 * We really want smp_call_function_mask() here.  But that's not
+	 * available, so ipi all cpus in parallel and wait for them
+	 * to complete.
+	 */
+	for (cpu = first_cpu(cpus); cpu != NR_CPUS; cpu = next_cpu(cpu, cpus))
+		smp_call_function_single(cpu, ack_flush, &completed, 1, 0);
+	while (atomic_read(&completed) != needed) {
+		cpu_relax();
+		barrier();
+	}
+}
+
 static struct kvm *kvm_create_vm(void)
 {
 	struct kvm *kvm = kzalloc(sizeof(struct kvm), GFP_KERNEL);
@@ -315,8 +311,13 @@
 	if (!kvm)
 		return ERR_PTR(-ENOMEM);
 
+	kvm_io_bus_init(&kvm->pio_bus);
 	spin_lock_init(&kvm->lock);
 	INIT_LIST_HEAD(&kvm->active_mmu_pages);
+	spin_lock(&kvm_lock);
+	list_add(&kvm->vm_list, &vm_list);
+	spin_unlock(&kvm_lock);
+	kvm_io_bus_init(&kvm->mmio_bus);
 	for (i = 0; i < KVM_MAX_VCPUS; ++i) {
 		struct kvm_vcpu *vcpu = &kvm->vcpus[i];
 
@@ -324,10 +325,6 @@
 		vcpu->cpu = -1;
 		vcpu->kvm = kvm;
 		vcpu->mmu.root_hpa = INVALID_PAGE;
-		INIT_LIST_HEAD(&vcpu->free_pages);
-		spin_lock(&kvm_lock);
-		list_add(&kvm->vm_list, &vm_list);
-		spin_unlock(&kvm_lock);
 	}
 	return kvm;
 }
@@ -380,6 +377,16 @@
 		}
 }
 
+static void kvm_unload_vcpu_mmu(struct kvm_vcpu *vcpu)
+{
+	if (!vcpu->vmcs)
+		return;
+
+	vcpu_load(vcpu);
+	kvm_mmu_unload(vcpu);
+	vcpu_put(vcpu);
+}
+
 static void kvm_free_vcpu(struct kvm_vcpu *vcpu)
 {
 	if (!vcpu->vmcs)
@@ -400,6 +407,11 @@
 {
 	unsigned int i;
 
+	/*
+	 * Unpin any mmu pages first.
+	 */
+	for (i = 0; i < KVM_MAX_VCPUS; ++i)
+		kvm_unload_vcpu_mmu(&kvm->vcpus[i]);
 	for (i = 0; i < KVM_MAX_VCPUS; ++i)
 		kvm_free_vcpu(&kvm->vcpus[i]);
 }
@@ -414,6 +426,8 @@
 	spin_lock(&kvm_lock);
 	list_del(&kvm->vm_list);
 	spin_unlock(&kvm_lock);
+	kvm_io_bus_destroy(&kvm->pio_bus);
+	kvm_io_bus_destroy(&kvm->mmio_bus);
 	kvm_free_vcpus(kvm);
 	kvm_free_physmem(kvm);
 	kfree(kvm);
@@ -969,7 +983,7 @@
 void mark_page_dirty(struct kvm *kvm, gfn_t gfn)
 {
 	int i;
-	struct kvm_memory_slot *memslot = NULL;
+	struct kvm_memory_slot *memslot;
 	unsigned long rel_gfn;
 
 	for (i = 0; i < kvm->nmemslots; ++i) {
@@ -978,7 +992,7 @@
 		if (gfn >= memslot->base_gfn
 		    && gfn < memslot->base_gfn + memslot->npages) {
 
-			if (!memslot || !memslot->dirty_bitmap)
+			if (!memslot->dirty_bitmap)
 				return;
 
 			rel_gfn = gfn - memslot->base_gfn;
@@ -1037,12 +1051,31 @@
 	return X86EMUL_UNHANDLEABLE;
 }
 
+static struct kvm_io_device *vcpu_find_mmio_dev(struct kvm_vcpu *vcpu,
+						gpa_t addr)
+{
+	/*
+	 * Note that its important to have this wrapper function because
+	 * in the very near future we will be checking for MMIOs against
+	 * the LAPIC as well as the general MMIO bus
+	 */
+	return kvm_io_bus_find_dev(&vcpu->kvm->mmio_bus, addr);
+}
+
+static struct kvm_io_device *vcpu_find_pio_dev(struct kvm_vcpu *vcpu,
+					       gpa_t addr)
+{
+	return kvm_io_bus_find_dev(&vcpu->kvm->pio_bus, addr);
+}
+
 static int emulator_read_emulated(unsigned long addr,
 				  void *val,
 				  unsigned int bytes,
 				  struct x86_emulate_ctxt *ctxt)
 {
-	struct kvm_vcpu *vcpu = ctxt->vcpu;
+	struct kvm_vcpu      *vcpu = ctxt->vcpu;
+	struct kvm_io_device *mmio_dev;
+	gpa_t                 gpa;
 
 	if (vcpu->mmio_read_completed) {
 		memcpy(val, vcpu->mmio_data, bytes);
@@ -1051,18 +1084,26 @@
 	} else if (emulator_read_std(addr, val, bytes, ctxt)
 		   == X86EMUL_CONTINUE)
 		return X86EMUL_CONTINUE;
-	else {
-		gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, addr);
 
-		if (gpa == UNMAPPED_GVA)
-			return X86EMUL_PROPAGATE_FAULT;
-		vcpu->mmio_needed = 1;
-		vcpu->mmio_phys_addr = gpa;
-		vcpu->mmio_size = bytes;
-		vcpu->mmio_is_write = 0;
+	gpa = vcpu->mmu.gva_to_gpa(vcpu, addr);
+	if (gpa == UNMAPPED_GVA)
+		return X86EMUL_PROPAGATE_FAULT;
 
-		return X86EMUL_UNHANDLEABLE;
+	/*
+	 * Is this MMIO handled locally?
+	 */
+	mmio_dev = vcpu_find_mmio_dev(vcpu, gpa);
+	if (mmio_dev) {
+		kvm_iodevice_read(mmio_dev, gpa, bytes, val);
+		return X86EMUL_CONTINUE;
 	}
+
+	vcpu->mmio_needed = 1;
+	vcpu->mmio_phys_addr = gpa;
+	vcpu->mmio_size = bytes;
+	vcpu->mmio_is_write = 0;
+
+	return X86EMUL_UNHANDLEABLE;
 }
 
 static int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
@@ -1070,18 +1111,20 @@
 {
 	struct page *page;
 	void *virt;
+	unsigned offset = offset_in_page(gpa);
 
 	if (((gpa + bytes - 1) >> PAGE_SHIFT) != (gpa >> PAGE_SHIFT))
 		return 0;
 	page = gfn_to_page(vcpu->kvm, gpa >> PAGE_SHIFT);
 	if (!page)
 		return 0;
-	kvm_mmu_pre_write(vcpu, gpa, bytes);
 	mark_page_dirty(vcpu->kvm, gpa >> PAGE_SHIFT);
 	virt = kmap_atomic(page, KM_USER0);
-	memcpy(virt + offset_in_page(gpa), val, bytes);
+	if (memcmp(virt + offset_in_page(gpa), val, bytes)) {
+		kvm_mmu_pte_write(vcpu, gpa, virt + offset, val, bytes);
+		memcpy(virt + offset_in_page(gpa), val, bytes);
+	}
 	kunmap_atomic(virt, KM_USER0);
-	kvm_mmu_post_write(vcpu, gpa, bytes);
 	return 1;
 }
 
@@ -1090,8 +1133,9 @@
 				   unsigned int bytes,
 				   struct x86_emulate_ctxt *ctxt)
 {
-	struct kvm_vcpu *vcpu = ctxt->vcpu;
-	gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, addr);
+	struct kvm_vcpu      *vcpu = ctxt->vcpu;
+	struct kvm_io_device *mmio_dev;
+	gpa_t                 gpa = vcpu->mmu.gva_to_gpa(vcpu, addr);
 
 	if (gpa == UNMAPPED_GVA) {
 		kvm_arch_ops->inject_page_fault(vcpu, addr, 2);
@@ -1101,6 +1145,15 @@
 	if (emulator_write_phys(vcpu, gpa, val, bytes))
 		return X86EMUL_CONTINUE;
 
+	/*
+	 * Is this MMIO handled locally?
+	 */
+	mmio_dev = vcpu_find_mmio_dev(vcpu, gpa);
+	if (mmio_dev) {
+		kvm_iodevice_write(mmio_dev, gpa, bytes, val);
+		return X86EMUL_CONTINUE;
+	}
+
 	vcpu->mmio_needed = 1;
 	vcpu->mmio_phys_addr = gpa;
 	vcpu->mmio_size = bytes;
@@ -1269,6 +1322,17 @@
 }
 EXPORT_SYMBOL_GPL(emulate_instruction);
 
+int kvm_emulate_halt(struct kvm_vcpu *vcpu)
+{
+	if (vcpu->irq_summary)
+		return 1;
+
+	vcpu->run->exit_reason = KVM_EXIT_HLT;
+	++vcpu->stat.halt_exits;
+	return 0;
+}
+EXPORT_SYMBOL_GPL(kvm_emulate_halt);
+
 int kvm_hypercall(struct kvm_vcpu *vcpu, struct kvm_run *run)
 {
 	unsigned long nr, a0, a1, a2, a3, a4, a5, ret;
@@ -1469,6 +1533,7 @@
 	case MSR_IA32_MC0_MISC+16:
 	case MSR_IA32_UCODE_REV:
 	case MSR_IA32_PERF_STATUS:
+	case MSR_IA32_EBL_CR_POWERON:
 		/* MTRR registers */
 	case 0xfe:
 	case 0x200 ... 0x2ff:
@@ -1727,6 +1792,20 @@
 	return 0;
 }
 
+void kernel_pio(struct kvm_io_device *pio_dev, struct kvm_vcpu *vcpu)
+{
+	/* TODO: String I/O for in kernel device */
+
+	if (vcpu->pio.in)
+		kvm_iodevice_read(pio_dev, vcpu->pio.port,
+				  vcpu->pio.size,
+				  vcpu->pio_data);
+	else
+		kvm_iodevice_write(pio_dev, vcpu->pio.port,
+				   vcpu->pio.size,
+				   vcpu->pio_data);
+}
+
 int kvm_setup_pio(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
 		  int size, unsigned long count, int string, int down,
 		  gva_t address, int rep, unsigned port)
@@ -1735,6 +1814,7 @@
 	int i;
 	int nr_pages = 1;
 	struct page *page;
+	struct kvm_io_device *pio_dev;
 
 	vcpu->run->exit_reason = KVM_EXIT_IO;
 	vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT;
@@ -1746,17 +1826,27 @@
 	vcpu->pio.cur_count = count;
 	vcpu->pio.size = size;
 	vcpu->pio.in = in;
+	vcpu->pio.port = port;
 	vcpu->pio.string = string;
 	vcpu->pio.down = down;
 	vcpu->pio.guest_page_offset = offset_in_page(address);
 	vcpu->pio.rep = rep;
 
+	pio_dev = vcpu_find_pio_dev(vcpu, port);
 	if (!string) {
 		kvm_arch_ops->cache_regs(vcpu);
 		memcpy(vcpu->pio_data, &vcpu->regs[VCPU_REGS_RAX], 4);
 		kvm_arch_ops->decache_regs(vcpu);
+		if (pio_dev) {
+			kernel_pio(pio_dev, vcpu);
+			complete_pio(vcpu);
+			return 1;
+		}
 		return 0;
 	}
+	/* TODO: String I/O for in kernel device */
+	if (pio_dev)
+		printk(KERN_ERR "kvm_setup_pio: no string io support\n");
 
 	if (!count) {
 		kvm_arch_ops->skip_emulated_instruction(vcpu);
@@ -2273,34 +2363,12 @@
 	struct inode *inode;
 	struct file *file;
 
+	r = anon_inode_getfd(&fd, &inode, &file,
+			     "kvm-vcpu", &kvm_vcpu_fops, vcpu);
+	if (r)
+		return r;
 	atomic_inc(&vcpu->kvm->filp->f_count);
-	inode = kvmfs_inode(&kvm_vcpu_fops);
-	if (IS_ERR(inode)) {
-		r = PTR_ERR(inode);
-		goto out1;
-	}
-
-	file = kvmfs_file(inode, vcpu);
-	if (IS_ERR(file)) {
-		r = PTR_ERR(file);
-		goto out2;
-	}
-
-	r = get_unused_fd();
-	if (r < 0)
-		goto out3;
-	fd = r;
-	fd_install(fd, file);
-
 	return fd;
-
-out3:
-	fput(file);
-out2:
-	iput(inode);
-out1:
-	fput(vcpu->kvm->filp);
-	return r;
 }
 
 /*
@@ -2363,6 +2431,11 @@
 	if (r < 0)
 		goto out_free_vcpus;
 
+	spin_lock(&kvm_lock);
+	if (n >= kvm->nvcpus)
+		kvm->nvcpus = n + 1;
+	spin_unlock(&kvm_lock);
+
 	return r;
 
 out_free_vcpus:
@@ -2376,6 +2449,27 @@
 	return r;
 }
 
+static void cpuid_fix_nx_cap(struct kvm_vcpu *vcpu)
+{
+	u64 efer;
+	int i;
+	struct kvm_cpuid_entry *e, *entry;
+
+	rdmsrl(MSR_EFER, efer);
+	entry = NULL;
+	for (i = 0; i < vcpu->cpuid_nent; ++i) {
+		e = &vcpu->cpuid_entries[i];
+		if (e->function == 0x80000001) {
+			entry = e;
+			break;
+		}
+	}
+	if (entry && (entry->edx & EFER_NX) && !(efer & EFER_NX)) {
+		entry->edx &= ~(1 << 20);
+		printk(KERN_INFO ": guest NX capability removed\n");
+	}
+}
+
 static int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
 				    struct kvm_cpuid *cpuid,
 				    struct kvm_cpuid_entry __user *entries)
@@ -2390,6 +2484,7 @@
 			   cpuid->nent * sizeof(struct kvm_cpuid_entry)))
 		goto out;
 	vcpu->cpuid_nent = cpuid->nent;
+	cpuid_fix_nx_cap(vcpu);
 	return 0;
 
 out:
@@ -2738,41 +2833,18 @@
 	struct file *file;
 	struct kvm *kvm;
 
-	inode = kvmfs_inode(&kvm_vm_fops);
-	if (IS_ERR(inode)) {
-		r = PTR_ERR(inode);
-		goto out1;
-	}
-
 	kvm = kvm_create_vm();
-	if (IS_ERR(kvm)) {
-		r = PTR_ERR(kvm);
-		goto out2;
+	if (IS_ERR(kvm))
+		return PTR_ERR(kvm);
+	r = anon_inode_getfd(&fd, &inode, &file, "kvm-vm", &kvm_vm_fops, kvm);
+	if (r) {
+		kvm_destroy_vm(kvm);
+		return r;
 	}
 
-	file = kvmfs_file(inode, kvm);
-	if (IS_ERR(file)) {
-		r = PTR_ERR(file);
-		goto out3;
-	}
 	kvm->filp = file;
 
-	r = get_unused_fd();
-	if (r < 0)
-		goto out4;
-	fd = r;
-	fd_install(fd, file);
-
 	return fd;
-
-out4:
-	fput(file);
-out3:
-	kvm_destroy_vm(kvm);
-out2:
-	iput(inode);
-out1:
-	return r;
 }
 
 static long kvm_dev_ioctl(struct file *filp,
@@ -2862,7 +2934,7 @@
 		 * in vmx root mode.
 		 */
 		printk(KERN_INFO "kvm: exiting hardware virtualization\n");
-		on_each_cpu(kvm_arch_ops->hardware_disable, NULL, 0, 1);
+		on_each_cpu(hardware_disable, NULL, 0, 1);
 	}
 	return NOTIFY_OK;
 }
@@ -2905,33 +2977,88 @@
 	spin_unlock(&kvm_lock);
 }
 
+static void hardware_enable(void *junk)
+{
+	int cpu = raw_smp_processor_id();
+
+	if (cpu_isset(cpu, cpus_hardware_enabled))
+		return;
+	cpu_set(cpu, cpus_hardware_enabled);
+	kvm_arch_ops->hardware_enable(NULL);
+}
+
+static void hardware_disable(void *junk)
+{
+	int cpu = raw_smp_processor_id();
+
+	if (!cpu_isset(cpu, cpus_hardware_enabled))
+		return;
+	cpu_clear(cpu, cpus_hardware_enabled);
+	decache_vcpus_on_cpu(cpu);
+	kvm_arch_ops->hardware_disable(NULL);
+}
+
 static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val,
 			   void *v)
 {
 	int cpu = (long)v;
 
 	switch (val) {
-	case CPU_DOWN_PREPARE:
-	case CPU_DOWN_PREPARE_FROZEN:
+	case CPU_DYING:
+	case CPU_DYING_FROZEN:
 	case CPU_UP_CANCELED:
 	case CPU_UP_CANCELED_FROZEN:
 		printk(KERN_INFO "kvm: disabling virtualization on CPU%d\n",
 		       cpu);
-		decache_vcpus_on_cpu(cpu);
-		smp_call_function_single(cpu, kvm_arch_ops->hardware_disable,
-					 NULL, 0, 1);
+		smp_call_function_single(cpu, hardware_disable, NULL, 0, 1);
 		break;
 	case CPU_ONLINE:
 	case CPU_ONLINE_FROZEN:
 		printk(KERN_INFO "kvm: enabling virtualization on CPU%d\n",
 		       cpu);
-		smp_call_function_single(cpu, kvm_arch_ops->hardware_enable,
-					 NULL, 0, 1);
+		smp_call_function_single(cpu, hardware_enable, NULL, 0, 1);
 		break;
 	}
 	return NOTIFY_OK;
 }
 
+void kvm_io_bus_init(struct kvm_io_bus *bus)
+{
+	memset(bus, 0, sizeof(*bus));
+}
+
+void kvm_io_bus_destroy(struct kvm_io_bus *bus)
+{
+	int i;
+
+	for (i = 0; i < bus->dev_count; i++) {
+		struct kvm_io_device *pos = bus->devs[i];
+
+		kvm_iodevice_destructor(pos);
+	}
+}
+
+struct kvm_io_device *kvm_io_bus_find_dev(struct kvm_io_bus *bus, gpa_t addr)
+{
+	int i;
+
+	for (i = 0; i < bus->dev_count; i++) {
+		struct kvm_io_device *pos = bus->devs[i];
+
+		if (pos->in_range(pos, addr))
+			return pos;
+	}
+
+	return NULL;
+}
+
+void kvm_io_bus_register_dev(struct kvm_io_bus *bus, struct kvm_io_device *dev)
+{
+	BUG_ON(bus->dev_count > (NR_IOBUS_DEVS-1));
+
+	bus->devs[bus->dev_count++] = dev;
+}
+
 static struct notifier_block kvm_cpu_notifier = {
 	.notifier_call = kvm_cpu_hotplug,
 	.priority = 20, /* must be > scheduler priority */
@@ -2983,14 +3110,13 @@
 
 static int kvm_suspend(struct sys_device *dev, pm_message_t state)
 {
-	decache_vcpus_on_cpu(raw_smp_processor_id());
-	on_each_cpu(kvm_arch_ops->hardware_disable, NULL, 0, 1);
+	hardware_disable(NULL);
 	return 0;
 }
 
 static int kvm_resume(struct sys_device *dev)
 {
-	on_each_cpu(kvm_arch_ops->hardware_enable, NULL, 0, 1);
+	hardware_enable(NULL);
 	return 0;
 }
 
@@ -3007,18 +3133,6 @@
 
 hpa_t bad_page_address;
 
-static int kvmfs_get_sb(struct file_system_type *fs_type, int flags,
-			const char *dev_name, void *data, struct vfsmount *mnt)
-{
-	return get_sb_pseudo(fs_type, "kvm:", NULL, KVMFS_SUPER_MAGIC, mnt);
-}
-
-static struct file_system_type kvm_fs_type = {
-	.name		= "kvmfs",
-	.get_sb		= kvmfs_get_sb,
-	.kill_sb	= kill_anon_super,
-};
-
 int kvm_init_arch(struct kvm_arch_ops *ops, struct module *module)
 {
 	int r;
@@ -3043,7 +3157,7 @@
 	if (r < 0)
 		goto out;
 
-	on_each_cpu(kvm_arch_ops->hardware_enable, NULL, 0, 1);
+	on_each_cpu(hardware_enable, NULL, 0, 1);
 	r = register_cpu_notifier(&kvm_cpu_notifier);
 	if (r)
 		goto out_free_1;
@@ -3075,7 +3189,7 @@
 	unregister_reboot_notifier(&kvm_reboot_notifier);
 	unregister_cpu_notifier(&kvm_cpu_notifier);
 out_free_1:
-	on_each_cpu(kvm_arch_ops->hardware_disable, NULL, 0, 1);
+	on_each_cpu(hardware_disable, NULL, 0, 1);
 	kvm_arch_ops->hardware_unsetup();
 out:
 	kvm_arch_ops = NULL;
@@ -3089,7 +3203,7 @@
 	sysdev_class_unregister(&kvm_sysdev_class);
 	unregister_reboot_notifier(&kvm_reboot_notifier);
 	unregister_cpu_notifier(&kvm_cpu_notifier);
-	on_each_cpu(kvm_arch_ops->hardware_disable, NULL, 0, 1);
+	on_each_cpu(hardware_disable, NULL, 0, 1);
 	kvm_arch_ops->hardware_unsetup();
 	kvm_arch_ops = NULL;
 }
@@ -3103,14 +3217,6 @@
 	if (r)
 		goto out4;
 
-	r = register_filesystem(&kvm_fs_type);
-	if (r)
-		goto out3;
-
-	kvmfs_mnt = kern_mount(&kvm_fs_type);
-	r = PTR_ERR(kvmfs_mnt);
-	if (IS_ERR(kvmfs_mnt))
-		goto out2;
 	kvm_init_debug();
 
 	kvm_init_msr_list();
@@ -3127,10 +3233,6 @@
 
 out:
 	kvm_exit_debug();
-	mntput(kvmfs_mnt);
-out2:
-	unregister_filesystem(&kvm_fs_type);
-out3:
 	kvm_mmu_module_exit();
 out4:
 	return r;
@@ -3140,8 +3242,6 @@
 {
 	kvm_exit_debug();
 	__free_page(pfn_to_page(bad_page_address >> PAGE_SHIFT));
-	mntput(kvmfs_mnt);
-	unregister_filesystem(&kvm_fs_type);
 	kvm_mmu_module_exit();
 }
 
diff --git a/drivers/kvm/mmu.c b/drivers/kvm/mmu.c
index e8e2281..b297a6b 100644
--- a/drivers/kvm/mmu.c
+++ b/drivers/kvm/mmu.c
@@ -16,15 +16,18 @@
  * the COPYING file in the top-level directory.
  *
  */
+
+#include "vmx.h"
+#include "kvm.h"
+
 #include <linux/types.h>
 #include <linux/string.h>
-#include <asm/page.h>
 #include <linux/mm.h>
 #include <linux/highmem.h>
 #include <linux/module.h>
 
-#include "vmx.h"
-#include "kvm.h"
+#include <asm/page.h>
+#include <asm/cmpxchg.h>
 
 #undef MMU_DEBUG
 
@@ -90,25 +93,11 @@
 #define PT32_DIR_PSE36_MASK (((1ULL << PT32_DIR_PSE36_SIZE) - 1) << PT32_DIR_PSE36_SHIFT)
 
 
-#define PT32_PTE_COPY_MASK \
-	(PT_PRESENT_MASK | PT_ACCESSED_MASK | PT_DIRTY_MASK | PT_GLOBAL_MASK)
-
-#define PT64_PTE_COPY_MASK (PT64_NX_MASK | PT32_PTE_COPY_MASK)
-
 #define PT_FIRST_AVAIL_BITS_SHIFT 9
 #define PT64_SECOND_AVAIL_BITS_SHIFT 52
 
-#define PT_SHADOW_PS_MARK (1ULL << PT_FIRST_AVAIL_BITS_SHIFT)
 #define PT_SHADOW_IO_MARK (1ULL << PT_FIRST_AVAIL_BITS_SHIFT)
 
-#define PT_SHADOW_WRITABLE_SHIFT (PT_FIRST_AVAIL_BITS_SHIFT + 1)
-#define PT_SHADOW_WRITABLE_MASK (1ULL << PT_SHADOW_WRITABLE_SHIFT)
-
-#define PT_SHADOW_USER_SHIFT (PT_SHADOW_WRITABLE_SHIFT + 1)
-#define PT_SHADOW_USER_MASK (1ULL << (PT_SHADOW_USER_SHIFT))
-
-#define PT_SHADOW_BITS_OFFSET (PT_SHADOW_WRITABLE_SHIFT - PT_WRITABLE_SHIFT)
-
 #define VALID_PAGE(x) ((x) != INVALID_PAGE)
 
 #define PT64_LEVEL_BITS 9
@@ -165,6 +154,8 @@
 
 static struct kmem_cache *pte_chain_cache;
 static struct kmem_cache *rmap_desc_cache;
+static struct kmem_cache *mmu_page_cache;
+static struct kmem_cache *mmu_page_header_cache;
 
 static int is_write_protection(struct kvm_vcpu *vcpu)
 {
@@ -202,6 +193,15 @@
 		== (PT_WRITABLE_MASK | PT_PRESENT_MASK);
 }
 
+static void set_shadow_pte(u64 *sptep, u64 spte)
+{
+#ifdef CONFIG_X86_64
+	set_64bit((unsigned long *)sptep, spte);
+#else
+	set_64bit((unsigned long long *)sptep, spte);
+#endif
+}
+
 static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
 				  struct kmem_cache *base_cache, int min,
 				  gfp_t gfp_flags)
@@ -235,6 +235,14 @@
 		goto out;
 	r = mmu_topup_memory_cache(&vcpu->mmu_rmap_desc_cache,
 				   rmap_desc_cache, 1, gfp_flags);
+	if (r)
+		goto out;
+	r = mmu_topup_memory_cache(&vcpu->mmu_page_cache,
+				   mmu_page_cache, 4, gfp_flags);
+	if (r)
+		goto out;
+	r = mmu_topup_memory_cache(&vcpu->mmu_page_header_cache,
+				   mmu_page_header_cache, 4, gfp_flags);
 out:
 	return r;
 }
@@ -258,6 +266,8 @@
 {
 	mmu_free_memory_cache(&vcpu->mmu_pte_chain_cache);
 	mmu_free_memory_cache(&vcpu->mmu_rmap_desc_cache);
+	mmu_free_memory_cache(&vcpu->mmu_page_cache);
+	mmu_free_memory_cache(&vcpu->mmu_page_header_cache);
 }
 
 static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc,
@@ -433,19 +443,18 @@
 		BUG_ON(!(*spte & PT_WRITABLE_MASK));
 		rmap_printk("rmap_write_protect: spte %p %llx\n", spte, *spte);
 		rmap_remove(vcpu, spte);
-		kvm_arch_ops->tlb_flush(vcpu);
-		*spte &= ~(u64)PT_WRITABLE_MASK;
+		set_shadow_pte(spte, *spte & ~PT_WRITABLE_MASK);
+		kvm_flush_remote_tlbs(vcpu->kvm);
 	}
 }
 
 #ifdef MMU_DEBUG
-static int is_empty_shadow_page(hpa_t page_hpa)
+static int is_empty_shadow_page(u64 *spt)
 {
 	u64 *pos;
 	u64 *end;
 
-	for (pos = __va(page_hpa), end = pos + PAGE_SIZE / sizeof(u64);
-		      pos != end; pos++)
+	for (pos = spt, end = pos + PAGE_SIZE / sizeof(u64); pos != end; pos++)
 		if (*pos != 0) {
 			printk(KERN_ERR "%s: %p %llx\n", __FUNCTION__,
 			       pos, *pos);
@@ -455,13 +464,13 @@
 }
 #endif
 
-static void kvm_mmu_free_page(struct kvm_vcpu *vcpu, hpa_t page_hpa)
+static void kvm_mmu_free_page(struct kvm_vcpu *vcpu,
+			      struct kvm_mmu_page *page_head)
 {
-	struct kvm_mmu_page *page_head = page_header(page_hpa);
-
-	ASSERT(is_empty_shadow_page(page_hpa));
-	page_head->page_hpa = page_hpa;
-	list_move(&page_head->link, &vcpu->free_pages);
+	ASSERT(is_empty_shadow_page(page_head->spt));
+	list_del(&page_head->link);
+	mmu_memory_cache_free(&vcpu->mmu_page_cache, page_head->spt);
+	mmu_memory_cache_free(&vcpu->mmu_page_header_cache, page_head);
 	++vcpu->kvm->n_free_mmu_pages;
 }
 
@@ -475,12 +484,15 @@
 {
 	struct kvm_mmu_page *page;
 
-	if (list_empty(&vcpu->free_pages))
+	if (!vcpu->kvm->n_free_mmu_pages)
 		return NULL;
 
-	page = list_entry(vcpu->free_pages.next, struct kvm_mmu_page, link);
-	list_move(&page->link, &vcpu->kvm->active_mmu_pages);
-	ASSERT(is_empty_shadow_page(page->page_hpa));
+	page = mmu_memory_cache_alloc(&vcpu->mmu_page_header_cache,
+				      sizeof *page);
+	page->spt = mmu_memory_cache_alloc(&vcpu->mmu_page_cache, PAGE_SIZE);
+	set_page_private(virt_to_page(page->spt), (unsigned long)page);
+	list_add(&page->link, &vcpu->kvm->active_mmu_pages);
+	ASSERT(is_empty_shadow_page(page->spt));
 	page->slot_bitmap = 0;
 	page->multimapped = 0;
 	page->parent_pte = parent_pte;
@@ -638,7 +650,7 @@
 	u64 *pt;
 	u64 ent;
 
-	pt = __va(page->page_hpa);
+	pt = page->spt;
 
 	if (page->role.level == PT_PAGE_TABLE_LEVEL) {
 		for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
@@ -646,7 +658,7 @@
 				rmap_remove(vcpu, &pt[i]);
 			pt[i] = 0;
 		}
-		kvm_arch_ops->tlb_flush(vcpu);
+		kvm_flush_remote_tlbs(vcpu->kvm);
 		return;
 	}
 
@@ -659,6 +671,7 @@
 		ent &= PT64_BASE_ADDR_MASK;
 		mmu_page_remove_parent_pte(vcpu, page_header(ent), &pt[i]);
 	}
+	kvm_flush_remote_tlbs(vcpu->kvm);
 }
 
 static void kvm_mmu_put_page(struct kvm_vcpu *vcpu,
@@ -685,12 +698,12 @@
 		}
 		BUG_ON(!parent_pte);
 		kvm_mmu_put_page(vcpu, page, parent_pte);
-		*parent_pte = 0;
+		set_shadow_pte(parent_pte, 0);
 	}
 	kvm_mmu_page_unlink_children(vcpu, page);
 	if (!page->root_count) {
 		hlist_del(&page->hash_link);
-		kvm_mmu_free_page(vcpu, page->page_hpa);
+		kvm_mmu_free_page(vcpu, page);
 	} else
 		list_move(&page->link, &vcpu->kvm->active_mmu_pages);
 }
@@ -717,6 +730,17 @@
 	return r;
 }
 
+static void mmu_unshadow(struct kvm_vcpu *vcpu, gfn_t gfn)
+{
+	struct kvm_mmu_page *page;
+
+	while ((page = kvm_mmu_lookup_page(vcpu, gfn)) != NULL) {
+		pgprintk("%s: zap %lx %x\n",
+			 __FUNCTION__, gfn, page->role.word);
+		kvm_mmu_zap_page(vcpu, page);
+	}
+}
+
 static void page_header_update_slot(struct kvm *kvm, void *pte, gpa_t gpa)
 {
 	int slot = memslot_id(kvm, gfn_to_memslot(kvm, gpa >> PAGE_SHIFT));
@@ -805,7 +829,7 @@
 				return -ENOMEM;
 			}
 
-			table[index] = new_table->page_hpa | PT_PRESENT_MASK
+			table[index] = __pa(new_table->spt) | PT_PRESENT_MASK
 				| PT_WRITABLE_MASK | PT_USER_MASK;
 		}
 		table_addr = table[index] & PT64_BASE_ADDR_MASK;
@@ -817,11 +841,12 @@
 	int i;
 	struct kvm_mmu_page *page;
 
+	if (!VALID_PAGE(vcpu->mmu.root_hpa))
+		return;
 #ifdef CONFIG_X86_64
 	if (vcpu->mmu.shadow_root_level == PT64_ROOT_LEVEL) {
 		hpa_t root = vcpu->mmu.root_hpa;
 
-		ASSERT(VALID_PAGE(root));
 		page = page_header(root);
 		--page->root_count;
 		vcpu->mmu.root_hpa = INVALID_PAGE;
@@ -832,7 +857,6 @@
 		hpa_t root = vcpu->mmu.pae_root[i];
 
 		if (root) {
-			ASSERT(VALID_PAGE(root));
 			root &= PT64_BASE_ADDR_MASK;
 			page = page_header(root);
 			--page->root_count;
@@ -857,7 +881,7 @@
 		ASSERT(!VALID_PAGE(root));
 		page = kvm_mmu_get_page(vcpu, root_gfn, 0,
 					PT64_ROOT_LEVEL, 0, 0, NULL);
-		root = page->page_hpa;
+		root = __pa(page->spt);
 		++page->root_count;
 		vcpu->mmu.root_hpa = root;
 		return;
@@ -878,7 +902,7 @@
 		page = kvm_mmu_get_page(vcpu, root_gfn, i << 30,
 					PT32_ROOT_LEVEL, !is_paging(vcpu),
 					0, NULL);
-		root = page->page_hpa;
+		root = __pa(page->spt);
 		++page->root_count;
 		vcpu->mmu.pae_root[i] = root | PT_PRESENT_MASK;
 	}
@@ -928,9 +952,7 @@
 	context->free = nonpaging_free;
 	context->root_level = 0;
 	context->shadow_root_level = PT32E_ROOT_LEVEL;
-	mmu_alloc_roots(vcpu);
-	ASSERT(VALID_PAGE(context->root_hpa));
-	kvm_arch_ops->set_cr3(vcpu, context->root_hpa);
+	context->root_hpa = INVALID_PAGE;
 	return 0;
 }
 
@@ -944,59 +966,6 @@
 {
 	pgprintk("%s: cr3 %lx\n", __FUNCTION__, vcpu->cr3);
 	mmu_free_roots(vcpu);
-	if (unlikely(vcpu->kvm->n_free_mmu_pages < KVM_MIN_FREE_MMU_PAGES))
-		kvm_mmu_free_some_pages(vcpu);
-	mmu_alloc_roots(vcpu);
-	kvm_mmu_flush_tlb(vcpu);
-	kvm_arch_ops->set_cr3(vcpu, vcpu->mmu.root_hpa);
-}
-
-static inline void set_pte_common(struct kvm_vcpu *vcpu,
-			     u64 *shadow_pte,
-			     gpa_t gaddr,
-			     int dirty,
-			     u64 access_bits,
-			     gfn_t gfn)
-{
-	hpa_t paddr;
-
-	*shadow_pte |= access_bits << PT_SHADOW_BITS_OFFSET;
-	if (!dirty)
-		access_bits &= ~PT_WRITABLE_MASK;
-
-	paddr = gpa_to_hpa(vcpu, gaddr & PT64_BASE_ADDR_MASK);
-
-	*shadow_pte |= access_bits;
-
-	if (is_error_hpa(paddr)) {
-		*shadow_pte |= gaddr;
-		*shadow_pte |= PT_SHADOW_IO_MARK;
-		*shadow_pte &= ~PT_PRESENT_MASK;
-		return;
-	}
-
-	*shadow_pte |= paddr;
-
-	if (access_bits & PT_WRITABLE_MASK) {
-		struct kvm_mmu_page *shadow;
-
-		shadow = kvm_mmu_lookup_page(vcpu, gfn);
-		if (shadow) {
-			pgprintk("%s: found shadow page for %lx, marking ro\n",
-				 __FUNCTION__, gfn);
-			access_bits &= ~PT_WRITABLE_MASK;
-			if (is_writeble_pte(*shadow_pte)) {
-				    *shadow_pte &= ~PT_WRITABLE_MASK;
-				    kvm_arch_ops->tlb_flush(vcpu);
-			}
-		}
-	}
-
-	if (access_bits & PT_WRITABLE_MASK)
-		mark_page_dirty(vcpu->kvm, gaddr >> PAGE_SHIFT);
-
-	page_header_update_slot(vcpu->kvm, shadow_pte, gaddr);
-	rmap_add(vcpu, shadow_pte);
 }
 
 static void inject_page_fault(struct kvm_vcpu *vcpu,
@@ -1006,23 +975,6 @@
 	kvm_arch_ops->inject_page_fault(vcpu, addr, err_code);
 }
 
-static inline int fix_read_pf(u64 *shadow_ent)
-{
-	if ((*shadow_ent & PT_SHADOW_USER_MASK) &&
-	    !(*shadow_ent & PT_USER_MASK)) {
-		/*
-		 * If supervisor write protect is disabled, we shadow kernel
-		 * pages as user pages so we can trap the write access.
-		 */
-		*shadow_ent |= PT_USER_MASK;
-		*shadow_ent &= ~PT_WRITABLE_MASK;
-
-		return 1;
-
-	}
-	return 0;
-}
-
 static void paging_free(struct kvm_vcpu *vcpu)
 {
 	nonpaging_free(vcpu);
@@ -1047,10 +999,7 @@
 	context->free = paging_free;
 	context->root_level = level;
 	context->shadow_root_level = level;
-	mmu_alloc_roots(vcpu);
-	ASSERT(VALID_PAGE(context->root_hpa));
-	kvm_arch_ops->set_cr3(vcpu, context->root_hpa |
-		    (vcpu->cr3 & (CR3_PCD_MASK | CR3_WPT_MASK)));
+	context->root_hpa = INVALID_PAGE;
 	return 0;
 }
 
@@ -1069,10 +1018,7 @@
 	context->free = paging_free;
 	context->root_level = PT32_ROOT_LEVEL;
 	context->shadow_root_level = PT32E_ROOT_LEVEL;
-	mmu_alloc_roots(vcpu);
-	ASSERT(VALID_PAGE(context->root_hpa));
-	kvm_arch_ops->set_cr3(vcpu, context->root_hpa |
-		    (vcpu->cr3 & (CR3_PCD_MASK | CR3_WPT_MASK)));
+	context->root_hpa = INVALID_PAGE;
 	return 0;
 }
 
@@ -1107,18 +1053,33 @@
 
 int kvm_mmu_reset_context(struct kvm_vcpu *vcpu)
 {
-	int r;
-
 	destroy_kvm_mmu(vcpu);
-	r = init_kvm_mmu(vcpu);
-	if (r < 0)
-		goto out;
-	r = mmu_topup_memory_caches(vcpu);
-out:
-	return r;
+	return init_kvm_mmu(vcpu);
 }
 
-static void mmu_pre_write_zap_pte(struct kvm_vcpu *vcpu,
+int kvm_mmu_load(struct kvm_vcpu *vcpu)
+{
+	int r;
+
+	spin_lock(&vcpu->kvm->lock);
+	r = mmu_topup_memory_caches(vcpu);
+	if (r)
+		goto out;
+	mmu_alloc_roots(vcpu);
+	kvm_arch_ops->set_cr3(vcpu, vcpu->mmu.root_hpa);
+	kvm_mmu_flush_tlb(vcpu);
+out:
+	spin_unlock(&vcpu->kvm->lock);
+	return r;
+}
+EXPORT_SYMBOL_GPL(kvm_mmu_load);
+
+void kvm_mmu_unload(struct kvm_vcpu *vcpu)
+{
+	mmu_free_roots(vcpu);
+}
+
+static void mmu_pte_write_zap_pte(struct kvm_vcpu *vcpu,
 				  struct kvm_mmu_page *page,
 				  u64 *spte)
 {
@@ -1135,9 +1096,25 @@
 		}
 	}
 	*spte = 0;
+	kvm_flush_remote_tlbs(vcpu->kvm);
 }
 
-void kvm_mmu_pre_write(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes)
+static void mmu_pte_write_new_pte(struct kvm_vcpu *vcpu,
+				  struct kvm_mmu_page *page,
+				  u64 *spte,
+				  const void *new, int bytes)
+{
+	if (page->role.level != PT_PAGE_TABLE_LEVEL)
+		return;
+
+	if (page->role.glevels == PT32_ROOT_LEVEL)
+		paging32_update_pte(vcpu, page, spte, new, bytes);
+	else
+		paging64_update_pte(vcpu, page, spte, new, bytes);
+}
+
+void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
+		       const u8 *old, const u8 *new, int bytes)
 {
 	gfn_t gfn = gpa >> PAGE_SHIFT;
 	struct kvm_mmu_page *page;
@@ -1149,6 +1126,7 @@
 	unsigned pte_size;
 	unsigned page_offset;
 	unsigned misaligned;
+	unsigned quadrant;
 	int level;
 	int flooded = 0;
 	int npte;
@@ -1169,6 +1147,7 @@
 			continue;
 		pte_size = page->role.glevels == PT32_ROOT_LEVEL ? 4 : 8;
 		misaligned = (offset ^ (offset + bytes - 1)) & ~(pte_size - 1);
+		misaligned |= bytes < 4;
 		if (misaligned || flooded) {
 			/*
 			 * Misaligned accesses are too much trouble to fix
@@ -1200,21 +1179,20 @@
 				page_offset <<= 1;
 				npte = 2;
 			}
+			quadrant = page_offset >> PAGE_SHIFT;
 			page_offset &= ~PAGE_MASK;
+			if (quadrant != page->role.quadrant)
+				continue;
 		}
-		spte = __va(page->page_hpa);
-		spte += page_offset / sizeof(*spte);
+		spte = &page->spt[page_offset / sizeof(*spte)];
 		while (npte--) {
-			mmu_pre_write_zap_pte(vcpu, page, spte);
+			mmu_pte_write_zap_pte(vcpu, page, spte);
+			mmu_pte_write_new_pte(vcpu, page, spte, new, bytes);
 			++spte;
 		}
 	}
 }
 
-void kvm_mmu_post_write(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes)
-{
-}
-
 int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva)
 {
 	gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, gva);
@@ -1243,13 +1221,6 @@
 				    struct kvm_mmu_page, link);
 		kvm_mmu_zap_page(vcpu, page);
 	}
-	while (!list_empty(&vcpu->free_pages)) {
-		page = list_entry(vcpu->free_pages.next,
-				  struct kvm_mmu_page, link);
-		list_del(&page->link);
-		__free_page(pfn_to_page(page->page_hpa >> PAGE_SHIFT));
-		page->page_hpa = INVALID_PAGE;
-	}
 	free_page((unsigned long)vcpu->mmu.pae_root);
 }
 
@@ -1260,18 +1231,7 @@
 
 	ASSERT(vcpu);
 
-	for (i = 0; i < KVM_NUM_MMU_PAGES; i++) {
-		struct kvm_mmu_page *page_header = &vcpu->page_header_buf[i];
-
-		INIT_LIST_HEAD(&page_header->link);
-		if ((page = alloc_page(GFP_KERNEL)) == NULL)
-			goto error_1;
-		set_page_private(page, (unsigned long)page_header);
-		page_header->page_hpa = (hpa_t)page_to_pfn(page) << PAGE_SHIFT;
-		memset(__va(page_header->page_hpa), 0, PAGE_SIZE);
-		list_add(&page_header->link, &vcpu->free_pages);
-		++vcpu->kvm->n_free_mmu_pages;
-	}
+	vcpu->kvm->n_free_mmu_pages = KVM_NUM_MMU_PAGES;
 
 	/*
 	 * When emulating 32-bit mode, cr3 is only 32 bits even on x86_64.
@@ -1296,7 +1256,6 @@
 {
 	ASSERT(vcpu);
 	ASSERT(!VALID_PAGE(vcpu->mmu.root_hpa));
-	ASSERT(list_empty(&vcpu->free_pages));
 
 	return alloc_mmu_pages(vcpu);
 }
@@ -1305,7 +1264,6 @@
 {
 	ASSERT(vcpu);
 	ASSERT(!VALID_PAGE(vcpu->mmu.root_hpa));
-	ASSERT(!list_empty(&vcpu->free_pages));
 
 	return init_kvm_mmu(vcpu);
 }
@@ -1331,7 +1289,7 @@
 		if (!test_bit(slot, &page->slot_bitmap))
 			continue;
 
-		pt = __va(page->page_hpa);
+		pt = page->spt;
 		for (i = 0; i < PT64_ENT_PER_PAGE; ++i)
 			/* avoid RMW */
 			if (pt[i] & PT_WRITABLE_MASK) {
@@ -1354,7 +1312,7 @@
 	}
 
 	mmu_free_memory_caches(vcpu);
-	kvm_arch_ops->tlb_flush(vcpu);
+	kvm_flush_remote_tlbs(vcpu->kvm);
 	init_kvm_mmu(vcpu);
 }
 
@@ -1364,6 +1322,10 @@
 		kmem_cache_destroy(pte_chain_cache);
 	if (rmap_desc_cache)
 		kmem_cache_destroy(rmap_desc_cache);
+	if (mmu_page_cache)
+		kmem_cache_destroy(mmu_page_cache);
+	if (mmu_page_header_cache)
+		kmem_cache_destroy(mmu_page_header_cache);
 }
 
 int kvm_mmu_module_init(void)
@@ -1379,6 +1341,18 @@
 	if (!rmap_desc_cache)
 		goto nomem;
 
+	mmu_page_cache = kmem_cache_create("kvm_mmu_page",
+					   PAGE_SIZE,
+					   PAGE_SIZE, 0, NULL, NULL);
+	if (!mmu_page_cache)
+		goto nomem;
+
+	mmu_page_header_cache = kmem_cache_create("kvm_mmu_page_header",
+						  sizeof(struct kvm_mmu_page),
+						  0, 0, NULL, NULL);
+	if (!mmu_page_header_cache)
+		goto nomem;
+
 	return 0;
 
 nomem:
@@ -1482,7 +1456,7 @@
 	int i;
 
 	list_for_each_entry(page, &vcpu->kvm->active_mmu_pages, link) {
-		u64 *pt = __va(page->page_hpa);
+		u64 *pt = page->spt;
 
 		if (page->role.level != PT_PAGE_TABLE_LEVEL)
 			continue;
diff --git a/drivers/kvm/paging_tmpl.h b/drivers/kvm/paging_tmpl.h
index 73ffbff..a7c5cb0 100644
--- a/drivers/kvm/paging_tmpl.h
+++ b/drivers/kvm/paging_tmpl.h
@@ -31,7 +31,6 @@
 	#define PT_INDEX(addr, level) PT64_INDEX(addr, level)
 	#define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level)
 	#define PT_LEVEL_MASK(level) PT64_LEVEL_MASK(level)
-	#define PT_PTE_COPY_MASK PT64_PTE_COPY_MASK
 	#ifdef CONFIG_X86_64
 	#define PT_MAX_FULL_LEVELS 4
 	#else
@@ -46,7 +45,6 @@
 	#define PT_INDEX(addr, level) PT32_INDEX(addr, level)
 	#define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level)
 	#define PT_LEVEL_MASK(level) PT32_LEVEL_MASK(level)
-	#define PT_PTE_COPY_MASK PT32_PTE_COPY_MASK
 	#define PT_MAX_FULL_LEVELS 2
 #else
 	#error Invalid PTTYPE value
@@ -192,40 +190,143 @@
 	mark_page_dirty(kvm, walker->table_gfn[walker->level - 1]);
 }
 
-static void FNAME(set_pte)(struct kvm_vcpu *vcpu, u64 guest_pte,
-			   u64 *shadow_pte, u64 access_bits, gfn_t gfn)
+static void FNAME(set_pte_common)(struct kvm_vcpu *vcpu,
+				  u64 *shadow_pte,
+				  gpa_t gaddr,
+				  pt_element_t *gpte,
+				  u64 access_bits,
+				  int user_fault,
+				  int write_fault,
+				  int *ptwrite,
+				  struct guest_walker *walker,
+				  gfn_t gfn)
 {
-	ASSERT(*shadow_pte == 0);
-	access_bits &= guest_pte;
-	*shadow_pte = (guest_pte & PT_PTE_COPY_MASK);
-	set_pte_common(vcpu, shadow_pte, guest_pte & PT_BASE_ADDR_MASK,
-		       guest_pte & PT_DIRTY_MASK, access_bits, gfn);
+	hpa_t paddr;
+	int dirty = *gpte & PT_DIRTY_MASK;
+	u64 spte = *shadow_pte;
+	int was_rmapped = is_rmap_pte(spte);
+
+	pgprintk("%s: spte %llx gpte %llx access %llx write_fault %d"
+		 " user_fault %d gfn %lx\n",
+		 __FUNCTION__, spte, (u64)*gpte, access_bits,
+		 write_fault, user_fault, gfn);
+
+	if (write_fault && !dirty) {
+		*gpte |= PT_DIRTY_MASK;
+		dirty = 1;
+		FNAME(mark_pagetable_dirty)(vcpu->kvm, walker);
+	}
+
+	spte |= PT_PRESENT_MASK | PT_ACCESSED_MASK | PT_DIRTY_MASK;
+	spte |= *gpte & PT64_NX_MASK;
+	if (!dirty)
+		access_bits &= ~PT_WRITABLE_MASK;
+
+	paddr = gpa_to_hpa(vcpu, gaddr & PT64_BASE_ADDR_MASK);
+
+	spte |= PT_PRESENT_MASK;
+	if (access_bits & PT_USER_MASK)
+		spte |= PT_USER_MASK;
+
+	if (is_error_hpa(paddr)) {
+		spte |= gaddr;
+		spte |= PT_SHADOW_IO_MARK;
+		spte &= ~PT_PRESENT_MASK;
+		set_shadow_pte(shadow_pte, spte);
+		return;
+	}
+
+	spte |= paddr;
+
+	if ((access_bits & PT_WRITABLE_MASK)
+	    || (write_fault && !is_write_protection(vcpu) && !user_fault)) {
+		struct kvm_mmu_page *shadow;
+
+		spte |= PT_WRITABLE_MASK;
+		if (user_fault) {
+			mmu_unshadow(vcpu, gfn);
+			goto unshadowed;
+		}
+
+		shadow = kvm_mmu_lookup_page(vcpu, gfn);
+		if (shadow) {
+			pgprintk("%s: found shadow page for %lx, marking ro\n",
+				 __FUNCTION__, gfn);
+			access_bits &= ~PT_WRITABLE_MASK;
+			if (is_writeble_pte(spte)) {
+				spte &= ~PT_WRITABLE_MASK;
+				kvm_arch_ops->tlb_flush(vcpu);
+			}
+			if (write_fault)
+				*ptwrite = 1;
+		}
+	}
+
+unshadowed:
+
+	if (access_bits & PT_WRITABLE_MASK)
+		mark_page_dirty(vcpu->kvm, gaddr >> PAGE_SHIFT);
+
+	set_shadow_pte(shadow_pte, spte);
+	page_header_update_slot(vcpu->kvm, shadow_pte, gaddr);
+	if (!was_rmapped)
+		rmap_add(vcpu, shadow_pte);
 }
 
-static void FNAME(set_pde)(struct kvm_vcpu *vcpu, u64 guest_pde,
-			   u64 *shadow_pte, u64 access_bits, gfn_t gfn)
+static void FNAME(set_pte)(struct kvm_vcpu *vcpu, pt_element_t *gpte,
+			   u64 *shadow_pte, u64 access_bits,
+			   int user_fault, int write_fault, int *ptwrite,
+			   struct guest_walker *walker, gfn_t gfn)
+{
+	access_bits &= *gpte;
+	FNAME(set_pte_common)(vcpu, shadow_pte, *gpte & PT_BASE_ADDR_MASK,
+			      gpte, access_bits, user_fault, write_fault,
+			      ptwrite, walker, gfn);
+}
+
+static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *page,
+			      u64 *spte, const void *pte, int bytes)
+{
+	pt_element_t gpte;
+
+	if (bytes < sizeof(pt_element_t))
+		return;
+	gpte = *(const pt_element_t *)pte;
+	if (~gpte & (PT_PRESENT_MASK | PT_ACCESSED_MASK))
+		return;
+	pgprintk("%s: gpte %llx spte %p\n", __FUNCTION__, (u64)gpte, spte);
+	FNAME(set_pte)(vcpu, &gpte, spte, PT_USER_MASK | PT_WRITABLE_MASK, 0,
+		       0, NULL, NULL,
+		       (gpte & PT_BASE_ADDR_MASK) >> PAGE_SHIFT);
+}
+
+static void FNAME(set_pde)(struct kvm_vcpu *vcpu, pt_element_t *gpde,
+			   u64 *shadow_pte, u64 access_bits,
+			   int user_fault, int write_fault, int *ptwrite,
+			   struct guest_walker *walker, gfn_t gfn)
 {
 	gpa_t gaddr;
 
-	ASSERT(*shadow_pte == 0);
-	access_bits &= guest_pde;
+	access_bits &= *gpde;
 	gaddr = (gpa_t)gfn << PAGE_SHIFT;
 	if (PTTYPE == 32 && is_cpuid_PSE36())
-		gaddr |= (guest_pde & PT32_DIR_PSE36_MASK) <<
+		gaddr |= (*gpde & PT32_DIR_PSE36_MASK) <<
 			(32 - PT32_DIR_PSE36_SHIFT);
-	*shadow_pte = guest_pde & PT_PTE_COPY_MASK;
-	set_pte_common(vcpu, shadow_pte, gaddr,
-		       guest_pde & PT_DIRTY_MASK, access_bits, gfn);
+	FNAME(set_pte_common)(vcpu, shadow_pte, gaddr,
+			      gpde, access_bits, user_fault, write_fault,
+			      ptwrite, walker, gfn);
 }
 
 /*
  * Fetch a shadow pte for a specific level in the paging hierarchy.
  */
 static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
-			      struct guest_walker *walker)
+			 struct guest_walker *walker,
+			 int user_fault, int write_fault, int *ptwrite)
 {
 	hpa_t shadow_addr;
 	int level;
+	u64 *shadow_ent;
 	u64 *prev_shadow_ent = NULL;
 	pt_element_t *guest_ent = walker->ptep;
 
@@ -242,37 +343,23 @@
 
 	for (; ; level--) {
 		u32 index = SHADOW_PT_INDEX(addr, level);
-		u64 *shadow_ent = ((u64 *)__va(shadow_addr)) + index;
 		struct kvm_mmu_page *shadow_page;
 		u64 shadow_pte;
 		int metaphysical;
 		gfn_t table_gfn;
 		unsigned hugepage_access = 0;
 
+		shadow_ent = ((u64 *)__va(shadow_addr)) + index;
 		if (is_present_pte(*shadow_ent) || is_io_pte(*shadow_ent)) {
 			if (level == PT_PAGE_TABLE_LEVEL)
-				return shadow_ent;
+				break;
 			shadow_addr = *shadow_ent & PT64_BASE_ADDR_MASK;
 			prev_shadow_ent = shadow_ent;
 			continue;
 		}
 
-		if (level == PT_PAGE_TABLE_LEVEL) {
-
-			if (walker->level == PT_DIRECTORY_LEVEL) {
-				if (prev_shadow_ent)
-					*prev_shadow_ent |= PT_SHADOW_PS_MARK;
-				FNAME(set_pde)(vcpu, *guest_ent, shadow_ent,
-					       walker->inherited_ar,
-					       walker->gfn);
-			} else {
-				ASSERT(walker->level == PT_PAGE_TABLE_LEVEL);
-				FNAME(set_pte)(vcpu, *guest_ent, shadow_ent,
-					       walker->inherited_ar,
-					       walker->gfn);
-			}
-			return shadow_ent;
-		}
+		if (level == PT_PAGE_TABLE_LEVEL)
+			break;
 
 		if (level - 1 == PT_PAGE_TABLE_LEVEL
 		    && walker->level == PT_DIRECTORY_LEVEL) {
@@ -289,90 +376,24 @@
 		shadow_page = kvm_mmu_get_page(vcpu, table_gfn, addr, level-1,
 					       metaphysical, hugepage_access,
 					       shadow_ent);
-		shadow_addr = shadow_page->page_hpa;
+		shadow_addr = __pa(shadow_page->spt);
 		shadow_pte = shadow_addr | PT_PRESENT_MASK | PT_ACCESSED_MASK
 			| PT_WRITABLE_MASK | PT_USER_MASK;
 		*shadow_ent = shadow_pte;
 		prev_shadow_ent = shadow_ent;
 	}
-}
 
-/*
- * The guest faulted for write.  We need to
- *
- * - check write permissions
- * - update the guest pte dirty bit
- * - update our own dirty page tracking structures
- */
-static int FNAME(fix_write_pf)(struct kvm_vcpu *vcpu,
-			       u64 *shadow_ent,
-			       struct guest_walker *walker,
-			       gva_t addr,
-			       int user,
-			       int *write_pt)
-{
-	pt_element_t *guest_ent;
-	int writable_shadow;
-	gfn_t gfn;
-	struct kvm_mmu_page *page;
-
-	if (is_writeble_pte(*shadow_ent))
-		return !user || (*shadow_ent & PT_USER_MASK);
-
-	writable_shadow = *shadow_ent & PT_SHADOW_WRITABLE_MASK;
-	if (user) {
-		/*
-		 * User mode access.  Fail if it's a kernel page or a read-only
-		 * page.
-		 */
-		if (!(*shadow_ent & PT_SHADOW_USER_MASK) || !writable_shadow)
-			return 0;
-		ASSERT(*shadow_ent & PT_USER_MASK);
-	} else
-		/*
-		 * Kernel mode access.  Fail if it's a read-only page and
-		 * supervisor write protection is enabled.
-		 */
-		if (!writable_shadow) {
-			if (is_write_protection(vcpu))
-				return 0;
-			*shadow_ent &= ~PT_USER_MASK;
-		}
-
-	guest_ent = walker->ptep;
-
-	if (!is_present_pte(*guest_ent)) {
-		*shadow_ent = 0;
-		return 0;
+	if (walker->level == PT_DIRECTORY_LEVEL) {
+		FNAME(set_pde)(vcpu, guest_ent, shadow_ent,
+			       walker->inherited_ar, user_fault, write_fault,
+			       ptwrite, walker, walker->gfn);
+	} else {
+		ASSERT(walker->level == PT_PAGE_TABLE_LEVEL);
+		FNAME(set_pte)(vcpu, guest_ent, shadow_ent,
+			       walker->inherited_ar, user_fault, write_fault,
+			       ptwrite, walker, walker->gfn);
 	}
-
-	gfn = walker->gfn;
-
-	if (user) {
-		/*
-		 * Usermode page faults won't be for page table updates.
-		 */
-		while ((page = kvm_mmu_lookup_page(vcpu, gfn)) != NULL) {
-			pgprintk("%s: zap %lx %x\n",
-				 __FUNCTION__, gfn, page->role.word);
-			kvm_mmu_zap_page(vcpu, page);
-		}
-	} else if (kvm_mmu_lookup_page(vcpu, gfn)) {
-		pgprintk("%s: found shadow page for %lx, marking ro\n",
-			 __FUNCTION__, gfn);
-		mark_page_dirty(vcpu->kvm, gfn);
-		FNAME(mark_pagetable_dirty)(vcpu->kvm, walker);
-		*guest_ent |= PT_DIRTY_MASK;
-		*write_pt = 1;
-		return 0;
-	}
-	mark_page_dirty(vcpu->kvm, gfn);
-	*shadow_ent |= PT_WRITABLE_MASK;
-	FNAME(mark_pagetable_dirty)(vcpu->kvm, walker);
-	*guest_ent |= PT_DIRTY_MASK;
-	rmap_add(vcpu, shadow_ent);
-
-	return 1;
+	return shadow_ent;
 }
 
 /*
@@ -397,7 +418,6 @@
 	int fetch_fault = error_code & PFERR_FETCH_MASK;
 	struct guest_walker walker;
 	u64 *shadow_pte;
-	int fixed;
 	int write_pt = 0;
 	int r;
 
@@ -421,27 +441,20 @@
 		pgprintk("%s: guest page fault\n", __FUNCTION__);
 		inject_page_fault(vcpu, addr, walker.error_code);
 		FNAME(release_walker)(&walker);
+		vcpu->last_pt_write_count = 0; /* reset fork detector */
 		return 0;
 	}
 
-	shadow_pte = FNAME(fetch)(vcpu, addr, &walker);
-	pgprintk("%s: shadow pte %p %llx\n", __FUNCTION__,
-		 shadow_pte, *shadow_pte);
-
-	/*
-	 * Update the shadow pte.
-	 */
-	if (write_fault)
-		fixed = FNAME(fix_write_pf)(vcpu, shadow_pte, &walker, addr,
-					    user_fault, &write_pt);
-	else
-		fixed = fix_read_pf(shadow_pte);
-
-	pgprintk("%s: updated shadow pte %p %llx\n", __FUNCTION__,
-		 shadow_pte, *shadow_pte);
+	shadow_pte = FNAME(fetch)(vcpu, addr, &walker, user_fault, write_fault,
+				  &write_pt);
+	pgprintk("%s: shadow pte %p %llx ptwrite %d\n", __FUNCTION__,
+		 shadow_pte, *shadow_pte, write_pt);
 
 	FNAME(release_walker)(&walker);
 
+	if (!write_pt)
+		vcpu->last_pt_write_count = 0; /* reset fork detector */
+
 	/*
 	 * mmio: emulate if accessible, otherwise its a guest fault.
 	 */
@@ -478,7 +491,5 @@
 #undef PT_INDEX
 #undef SHADOW_PT_INDEX
 #undef PT_LEVEL_MASK
-#undef PT_PTE_COPY_MASK
-#undef PT_NON_PTE_COPY_MASK
 #undef PT_DIR_BASE_ADDR_MASK
 #undef PT_MAX_FULL_LEVELS
diff --git a/drivers/kvm/svm.c b/drivers/kvm/svm.c
index fa17d6d..bc818cc 100644
--- a/drivers/kvm/svm.c
+++ b/drivers/kvm/svm.c
@@ -14,16 +14,17 @@
  *
  */
 
+#include "kvm_svm.h"
+#include "x86_emulate.h"
+
 #include <linux/module.h>
 #include <linux/kernel.h>
 #include <linux/vmalloc.h>
 #include <linux/highmem.h>
 #include <linux/profile.h>
 #include <linux/sched.h>
-#include <asm/desc.h>
 
-#include "kvm_svm.h"
-#include "x86_emulate.h"
+#include <asm/desc.h>
 
 MODULE_AUTHOR("Qumranet");
 MODULE_LICENSE("GPL");
@@ -378,7 +379,7 @@
 	int cpu;
 	struct page *iopm_pages;
 	struct page *msrpm_pages;
-	void *msrpm_va;
+	void *iopm_va, *msrpm_va;
 	int r;
 
 	kvm_emulator_want_group7_invlpg();
@@ -387,8 +388,10 @@
 
 	if (!iopm_pages)
 		return -ENOMEM;
-	memset(page_address(iopm_pages), 0xff,
-					PAGE_SIZE * (1 << IOPM_ALLOC_ORDER));
+
+	iopm_va = page_address(iopm_pages);
+	memset(iopm_va, 0xff, PAGE_SIZE * (1 << IOPM_ALLOC_ORDER));
+	clear_bit(0x80, iopm_va); /* allow direct access to PC debug port */
 	iopm_base = page_to_pfn(iopm_pages) << PAGE_SHIFT;
 
 
@@ -579,7 +582,7 @@
 		goto out2;
 
 	vcpu->svm->vmcb = page_address(page);
-	memset(vcpu->svm->vmcb, 0, PAGE_SIZE);
+	clear_page(vcpu->svm->vmcb);
 	vcpu->svm->vmcb_pa = page_to_pfn(page) << PAGE_SHIFT;
 	vcpu->svm->asid_generation = 0;
 	memset(vcpu->svm->db_regs, 0, sizeof(vcpu->svm->db_regs));
@@ -587,9 +590,9 @@
 
 	fx_init(vcpu);
 	vcpu->fpu_active = 1;
-	vcpu->apic_base = 0xfee00000 |
-			/*for vcpu 0*/ MSR_IA32_APICBASE_BSP |
-			MSR_IA32_APICBASE_ENABLE;
+	vcpu->apic_base = 0xfee00000 | MSR_IA32_APICBASE_ENABLE;
+	if (vcpu == &vcpu->kvm->vcpus[0])
+		vcpu->apic_base |= MSR_IA32_APICBASE_BSP;
 
 	return 0;
 
@@ -955,7 +958,7 @@
 	 * VMCB is undefined after a SHUTDOWN intercept
 	 * so reinitialize it.
 	 */
-	memset(vcpu->svm->vmcb, 0, PAGE_SIZE);
+	clear_page(vcpu->svm->vmcb);
 	init_vmcb(vcpu->svm->vmcb);
 
 	kvm_run->exit_reason = KVM_EXIT_SHUTDOWN;
@@ -1113,12 +1116,7 @@
 {
 	vcpu->svm->next_rip = vcpu->svm->vmcb->save.rip + 1;
 	skip_emulated_instruction(vcpu);
-	if (vcpu->irq_summary)
-		return 1;
-
-	kvm_run->exit_reason = KVM_EXIT_HLT;
-	++vcpu->stat.halt_exits;
-	return 0;
+	return kvm_emulate_halt(vcpu);
 }
 
 static int vmmcall_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
@@ -1473,6 +1471,11 @@
 	asm volatile ("mov %0, %%dr3" : : "r"(db_regs[3]));
 }
 
+static void svm_flush_tlb(struct kvm_vcpu *vcpu)
+{
+	force_new_asid(vcpu);
+}
+
 static int svm_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
 {
 	u16 fs_selector;
@@ -1481,11 +1484,20 @@
 	int r;
 
 again:
+	r = kvm_mmu_reload(vcpu);
+	if (unlikely(r))
+		return r;
+
 	if (!vcpu->mmio_read_completed)
 		do_interrupt_requests(vcpu, kvm_run);
 
 	clgi();
 
+	vcpu->guest_mode = 1;
+	if (vcpu->requests)
+		if (test_and_clear_bit(KVM_TLB_FLUSH, &vcpu->requests))
+		    svm_flush_tlb(vcpu);
+
 	pre_svm_run(vcpu);
 
 	save_host_msrs(vcpu);
@@ -1617,6 +1629,8 @@
 #endif
 		: "cc", "memory" );
 
+	vcpu->guest_mode = 0;
+
 	if (vcpu->fpu_active) {
 		fx_save(vcpu->guest_fx_image);
 		fx_restore(vcpu->host_fx_image);
@@ -1681,11 +1695,6 @@
 	return r;
 }
 
-static void svm_flush_tlb(struct kvm_vcpu *vcpu)
-{
-	force_new_asid(vcpu);
-}
-
 static void svm_set_cr3(struct kvm_vcpu *vcpu, unsigned long root)
 {
 	vcpu->svm->vmcb->save.cr3 = root;
@@ -1727,6 +1736,12 @@
 
 static int is_disabled(void)
 {
+	u64 vm_cr;
+
+	rdmsrl(MSR_VM_CR, vm_cr);
+	if (vm_cr & (1 << SVM_VM_CR_SVM_DISABLE))
+		return 1;
+
 	return 0;
 }
 
diff --git a/drivers/kvm/svm.h b/drivers/kvm/svm.h
index 5e93814..3b1b0f3 100644
--- a/drivers/kvm/svm.h
+++ b/drivers/kvm/svm.h
@@ -175,8 +175,11 @@
 #define SVM_CPUID_FUNC 0x8000000a
 
 #define MSR_EFER_SVME_MASK (1ULL << 12)
+#define MSR_VM_CR       0xc0010114
 #define MSR_VM_HSAVE_PA 0xc0010117ULL
 
+#define SVM_VM_CR_SVM_DISABLE 4
+
 #define SVM_SELECTOR_S_SHIFT 4
 #define SVM_SELECTOR_DPL_SHIFT 5
 #define SVM_SELECTOR_P_SHIFT 7
diff --git a/drivers/kvm/vmx.c b/drivers/kvm/vmx.c
index c1ac106..80628f6 100644
--- a/drivers/kvm/vmx.c
+++ b/drivers/kvm/vmx.c
@@ -17,28 +17,35 @@
 
 #include "kvm.h"
 #include "vmx.h"
+#include "segment_descriptor.h"
+
 #include <linux/module.h>
 #include <linux/kernel.h>
 #include <linux/mm.h>
 #include <linux/highmem.h>
 #include <linux/profile.h>
 #include <linux/sched.h>
+
 #include <asm/io.h>
 #include <asm/desc.h>
 
-#include "segment_descriptor.h"
-
 MODULE_AUTHOR("Qumranet");
 MODULE_LICENSE("GPL");
 
+static int init_rmode_tss(struct kvm *kvm);
+
 static DEFINE_PER_CPU(struct vmcs *, vmxarea);
 static DEFINE_PER_CPU(struct vmcs *, current_vmcs);
 
+static struct page *vmx_io_bitmap_a;
+static struct page *vmx_io_bitmap_b;
+
 #ifdef CONFIG_X86_64
 #define HOST_IS_64 1
 #else
 #define HOST_IS_64 0
 #endif
+#define EFER_SAVE_RESTORE_BITS ((u64)EFER_SCE)
 
 static struct vmcs_descriptor {
 	int size;
@@ -82,18 +89,17 @@
 };
 #define NR_VMX_MSR ARRAY_SIZE(vmx_msr_index)
 
-#ifdef CONFIG_X86_64
-static unsigned msr_offset_kernel_gs_base;
-#define NR_64BIT_MSRS 4
-/*
- * avoid save/load MSR_SYSCALL_MASK and MSR_LSTAR by std vt
- * mechanism (cpu bug AA24)
- */
-#define NR_BAD_MSRS 2
-#else
-#define NR_64BIT_MSRS 0
-#define NR_BAD_MSRS 0
-#endif
+static inline u64 msr_efer_save_restore_bits(struct vmx_msr_entry msr)
+{
+	return (u64)msr.data & EFER_SAVE_RESTORE_BITS;
+}
+
+static inline int msr_efer_need_save_restore(struct kvm_vcpu *vcpu)
+{
+	int efer_offset = vcpu->msr_offset_efer;
+	return msr_efer_save_restore_bits(vcpu->host_msrs[efer_offset]) !=
+		msr_efer_save_restore_bits(vcpu->guest_msrs[efer_offset]);
+}
 
 static inline int is_page_fault(u32 intr_info)
 {
@@ -115,13 +121,23 @@
 		== (INTR_TYPE_EXT_INTR | INTR_INFO_VALID_MASK);
 }
 
-static struct vmx_msr_entry *find_msr_entry(struct kvm_vcpu *vcpu, u32 msr)
+static int __find_msr_index(struct kvm_vcpu *vcpu, u32 msr)
 {
 	int i;
 
 	for (i = 0; i < vcpu->nmsrs; ++i)
 		if (vcpu->guest_msrs[i].index == msr)
-			return &vcpu->guest_msrs[i];
+			return i;
+	return -1;
+}
+
+static struct vmx_msr_entry *find_msr_entry(struct kvm_vcpu *vcpu, u32 msr)
+{
+	int i;
+
+	i = __find_msr_index(vcpu, msr);
+	if (i >= 0)
+		return &vcpu->guest_msrs[i];
 	return NULL;
 }
 
@@ -147,6 +163,7 @@
 		vmcs_clear(vcpu->vmcs);
 	if (per_cpu(current_vmcs, cpu) == vcpu->vmcs)
 		per_cpu(current_vmcs, cpu) = NULL;
+	rdtscll(vcpu->host_tsc);
 }
 
 static void vcpu_clear(struct kvm_vcpu *vcpu)
@@ -234,6 +251,127 @@
 	vmcs_writel(field, vmcs_readl(field) | mask);
 }
 
+static void update_exception_bitmap(struct kvm_vcpu *vcpu)
+{
+	u32 eb;
+
+	eb = 1u << PF_VECTOR;
+	if (!vcpu->fpu_active)
+		eb |= 1u << NM_VECTOR;
+	if (vcpu->guest_debug.enabled)
+		eb |= 1u << 1;
+	if (vcpu->rmode.active)
+		eb = ~0;
+	vmcs_write32(EXCEPTION_BITMAP, eb);
+}
+
+static void reload_tss(void)
+{
+#ifndef CONFIG_X86_64
+
+	/*
+	 * VT restores TR but not its size.  Useless.
+	 */
+	struct descriptor_table gdt;
+	struct segment_descriptor *descs;
+
+	get_gdt(&gdt);
+	descs = (void *)gdt.base;
+	descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
+	load_TR_desc();
+#endif
+}
+
+static void load_transition_efer(struct kvm_vcpu *vcpu)
+{
+	u64 trans_efer;
+	int efer_offset = vcpu->msr_offset_efer;
+
+	trans_efer = vcpu->host_msrs[efer_offset].data;
+	trans_efer &= ~EFER_SAVE_RESTORE_BITS;
+	trans_efer |= msr_efer_save_restore_bits(
+				vcpu->guest_msrs[efer_offset]);
+	wrmsrl(MSR_EFER, trans_efer);
+	vcpu->stat.efer_reload++;
+}
+
+static void vmx_save_host_state(struct kvm_vcpu *vcpu)
+{
+	struct vmx_host_state *hs = &vcpu->vmx_host_state;
+
+	if (hs->loaded)
+		return;
+
+	hs->loaded = 1;
+	/*
+	 * Set host fs and gs selectors.  Unfortunately, 22.2.3 does not
+	 * allow segment selectors with cpl > 0 or ti == 1.
+	 */
+	hs->ldt_sel = read_ldt();
+	hs->fs_gs_ldt_reload_needed = hs->ldt_sel;
+	hs->fs_sel = read_fs();
+	if (!(hs->fs_sel & 7))
+		vmcs_write16(HOST_FS_SELECTOR, hs->fs_sel);
+	else {
+		vmcs_write16(HOST_FS_SELECTOR, 0);
+		hs->fs_gs_ldt_reload_needed = 1;
+	}
+	hs->gs_sel = read_gs();
+	if (!(hs->gs_sel & 7))
+		vmcs_write16(HOST_GS_SELECTOR, hs->gs_sel);
+	else {
+		vmcs_write16(HOST_GS_SELECTOR, 0);
+		hs->fs_gs_ldt_reload_needed = 1;
+	}
+
+#ifdef CONFIG_X86_64
+	vmcs_writel(HOST_FS_BASE, read_msr(MSR_FS_BASE));
+	vmcs_writel(HOST_GS_BASE, read_msr(MSR_GS_BASE));
+#else
+	vmcs_writel(HOST_FS_BASE, segment_base(hs->fs_sel));
+	vmcs_writel(HOST_GS_BASE, segment_base(hs->gs_sel));
+#endif
+
+#ifdef CONFIG_X86_64
+	if (is_long_mode(vcpu)) {
+		save_msrs(vcpu->host_msrs + vcpu->msr_offset_kernel_gs_base, 1);
+	}
+#endif
+	load_msrs(vcpu->guest_msrs, vcpu->save_nmsrs);
+	if (msr_efer_need_save_restore(vcpu))
+		load_transition_efer(vcpu);
+}
+
+static void vmx_load_host_state(struct kvm_vcpu *vcpu)
+{
+	struct vmx_host_state *hs = &vcpu->vmx_host_state;
+
+	if (!hs->loaded)
+		return;
+
+	hs->loaded = 0;
+	if (hs->fs_gs_ldt_reload_needed) {
+		load_ldt(hs->ldt_sel);
+		load_fs(hs->fs_sel);
+		/*
+		 * If we have to reload gs, we must take care to
+		 * preserve our gs base.
+		 */
+		local_irq_disable();
+		load_gs(hs->gs_sel);
+#ifdef CONFIG_X86_64
+		wrmsrl(MSR_GS_BASE, vmcs_readl(HOST_GS_BASE));
+#endif
+		local_irq_enable();
+
+		reload_tss();
+	}
+	save_msrs(vcpu->guest_msrs, vcpu->save_nmsrs);
+	load_msrs(vcpu->host_msrs, vcpu->save_nmsrs);
+	if (msr_efer_need_save_restore(vcpu))
+		load_msrs(vcpu->host_msrs + vcpu->msr_offset_efer, 1);
+}
+
 /*
  * Switches to specified vcpu, until a matching vcpu_put(), but assumes
  * vcpu mutex is already taken.
@@ -242,6 +380,7 @@
 {
 	u64 phys_addr = __pa(vcpu->vmcs);
 	int cpu;
+	u64 tsc_this, delta;
 
 	cpu = get_cpu();
 
@@ -275,15 +414,43 @@
 
 		rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp);
 		vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */
+
+		/*
+		 * Make sure the time stamp counter is monotonous.
+		 */
+		rdtscll(tsc_this);
+		delta = vcpu->host_tsc - tsc_this;
+		vmcs_write64(TSC_OFFSET, vmcs_read64(TSC_OFFSET) + delta);
 	}
 }
 
 static void vmx_vcpu_put(struct kvm_vcpu *vcpu)
 {
+	vmx_load_host_state(vcpu);
 	kvm_put_guest_fpu(vcpu);
 	put_cpu();
 }
 
+static void vmx_fpu_activate(struct kvm_vcpu *vcpu)
+{
+	if (vcpu->fpu_active)
+		return;
+	vcpu->fpu_active = 1;
+	vmcs_clear_bits(GUEST_CR0, CR0_TS_MASK);
+	if (vcpu->cr0 & CR0_TS_MASK)
+		vmcs_set_bits(GUEST_CR0, CR0_TS_MASK);
+	update_exception_bitmap(vcpu);
+}
+
+static void vmx_fpu_deactivate(struct kvm_vcpu *vcpu)
+{
+	if (!vcpu->fpu_active)
+		return;
+	vcpu->fpu_active = 0;
+	vmcs_set_bits(GUEST_CR0, CR0_TS_MASK);
+	update_exception_bitmap(vcpu);
+}
+
 static void vmx_vcpu_decache(struct kvm_vcpu *vcpu)
 {
 	vcpu_clear(vcpu);
@@ -332,41 +499,61 @@
 }
 
 /*
+ * Swap MSR entry in host/guest MSR entry array.
+ */
+void move_msr_up(struct kvm_vcpu *vcpu, int from, int to)
+{
+	struct vmx_msr_entry tmp;
+	tmp = vcpu->guest_msrs[to];
+	vcpu->guest_msrs[to] = vcpu->guest_msrs[from];
+	vcpu->guest_msrs[from] = tmp;
+	tmp = vcpu->host_msrs[to];
+	vcpu->host_msrs[to] = vcpu->host_msrs[from];
+	vcpu->host_msrs[from] = tmp;
+}
+
+/*
  * Set up the vmcs to automatically save and restore system
  * msrs.  Don't touch the 64-bit msrs if the guest is in legacy
  * mode, as fiddling with msrs is very expensive.
  */
 static void setup_msrs(struct kvm_vcpu *vcpu)
 {
-	int nr_skip, nr_good_msrs;
+	int save_nmsrs;
 
-	if (is_long_mode(vcpu))
-		nr_skip = NR_BAD_MSRS;
-	else
-		nr_skip = NR_64BIT_MSRS;
-	nr_good_msrs = vcpu->nmsrs - nr_skip;
-
-	/*
-	 * MSR_K6_STAR is only needed on long mode guests, and only
-	 * if efer.sce is enabled.
-	 */
-	if (find_msr_entry(vcpu, MSR_K6_STAR)) {
-		--nr_good_msrs;
+	save_nmsrs = 0;
 #ifdef CONFIG_X86_64
-		if (is_long_mode(vcpu) && (vcpu->shadow_efer & EFER_SCE))
-			++nr_good_msrs;
-#endif
-	}
+	if (is_long_mode(vcpu)) {
+		int index;
 
-	vmcs_writel(VM_ENTRY_MSR_LOAD_ADDR,
-		    virt_to_phys(vcpu->guest_msrs + nr_skip));
-	vmcs_writel(VM_EXIT_MSR_STORE_ADDR,
-		    virt_to_phys(vcpu->guest_msrs + nr_skip));
-	vmcs_writel(VM_EXIT_MSR_LOAD_ADDR,
-		    virt_to_phys(vcpu->host_msrs + nr_skip));
-	vmcs_write32(VM_EXIT_MSR_STORE_COUNT, nr_good_msrs); /* 22.2.2 */
-	vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, nr_good_msrs);  /* 22.2.2 */
-	vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, nr_good_msrs); /* 22.2.2 */
+		index = __find_msr_index(vcpu, MSR_SYSCALL_MASK);
+		if (index >= 0)
+			move_msr_up(vcpu, index, save_nmsrs++);
+		index = __find_msr_index(vcpu, MSR_LSTAR);
+		if (index >= 0)
+			move_msr_up(vcpu, index, save_nmsrs++);
+		index = __find_msr_index(vcpu, MSR_CSTAR);
+		if (index >= 0)
+			move_msr_up(vcpu, index, save_nmsrs++);
+		index = __find_msr_index(vcpu, MSR_KERNEL_GS_BASE);
+		if (index >= 0)
+			move_msr_up(vcpu, index, save_nmsrs++);
+		/*
+		 * MSR_K6_STAR is only needed on long mode guests, and only
+		 * if efer.sce is enabled.
+		 */
+		index = __find_msr_index(vcpu, MSR_K6_STAR);
+		if ((index >= 0) && (vcpu->shadow_efer & EFER_SCE))
+			move_msr_up(vcpu, index, save_nmsrs++);
+	}
+#endif
+	vcpu->save_nmsrs = save_nmsrs;
+
+#ifdef CONFIG_X86_64
+	vcpu->msr_offset_kernel_gs_base =
+		__find_msr_index(vcpu, MSR_KERNEL_GS_BASE);
+#endif
+	vcpu->msr_offset_efer = __find_msr_index(vcpu, MSR_EFER);
 }
 
 /*
@@ -394,23 +581,6 @@
 	vmcs_write64(TSC_OFFSET, guest_tsc - host_tsc);
 }
 
-static void reload_tss(void)
-{
-#ifndef CONFIG_X86_64
-
-	/*
-	 * VT restores TR but not its size.  Useless.
-	 */
-	struct descriptor_table gdt;
-	struct segment_descriptor *descs;
-
-	get_gdt(&gdt);
-	descs = (void *)gdt.base;
-	descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
-	load_TR_desc();
-#endif
-}
-
 /*
  * Reads an msr value (of 'msr_index') into 'pdata'.
  * Returns 0 on success, non-0 otherwise.
@@ -470,10 +640,15 @@
 static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
 {
 	struct vmx_msr_entry *msr;
+	int ret = 0;
+
 	switch (msr_index) {
 #ifdef CONFIG_X86_64
 	case MSR_EFER:
-		return kvm_set_msr_common(vcpu, msr_index, data);
+		ret = kvm_set_msr_common(vcpu, msr_index, data);
+		if (vcpu->vmx_host_state.loaded)
+			load_transition_efer(vcpu);
+		break;
 	case MSR_FS_BASE:
 		vmcs_writel(GUEST_FS_BASE, data);
 		break;
@@ -497,14 +672,14 @@
 		msr = find_msr_entry(vcpu, msr_index);
 		if (msr) {
 			msr->data = data;
+			if (vcpu->vmx_host_state.loaded)
+				load_msrs(vcpu->guest_msrs, vcpu->save_nmsrs);
 			break;
 		}
-		return kvm_set_msr_common(vcpu, msr_index, data);
-		msr->data = data;
-		break;
+		ret = kvm_set_msr_common(vcpu, msr_index, data);
 	}
 
-	return 0;
+	return ret;
 }
 
 /*
@@ -530,10 +705,8 @@
 static int set_guest_debug(struct kvm_vcpu *vcpu, struct kvm_debug_guest *dbg)
 {
 	unsigned long dr7 = 0x400;
-	u32 exception_bitmap;
 	int old_singlestep;
 
-	exception_bitmap = vmcs_read32(EXCEPTION_BITMAP);
 	old_singlestep = vcpu->guest_debug.singlestep;
 
 	vcpu->guest_debug.enabled = dbg->enabled;
@@ -549,13 +722,9 @@
 			dr7 |= 0 << (i*4+16); /* execution breakpoint */
 		}
 
-		exception_bitmap |= (1u << 1);  /* Trap debug exceptions */
-
 		vcpu->guest_debug.singlestep = dbg->singlestep;
-	} else {
-		exception_bitmap &= ~(1u << 1); /* Ignore debug exceptions */
+	} else
 		vcpu->guest_debug.singlestep = 0;
-	}
 
 	if (old_singlestep && !vcpu->guest_debug.singlestep) {
 		unsigned long flags;
@@ -565,7 +734,7 @@
 		vmcs_writel(GUEST_RFLAGS, flags);
 	}
 
-	vmcs_write32(EXCEPTION_BITMAP, exception_bitmap);
+	update_exception_bitmap(vcpu);
 	vmcs_writel(GUEST_DR7, dr7);
 
 	return 0;
@@ -679,14 +848,6 @@
 	free_kvm_area();
 }
 
-static void update_exception_bitmap(struct kvm_vcpu *vcpu)
-{
-	if (vcpu->rmode.active)
-		vmcs_write32(EXCEPTION_BITMAP, ~0);
-	else
-		vmcs_write32(EXCEPTION_BITMAP, 1 << PF_VECTOR);
-}
-
 static void fix_pmode_dataseg(int seg, struct kvm_save_segment *save)
 {
 	struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
@@ -793,6 +954,8 @@
 	fix_rmode_seg(VCPU_SREG_DS, &vcpu->rmode.ds);
 	fix_rmode_seg(VCPU_SREG_GS, &vcpu->rmode.gs);
 	fix_rmode_seg(VCPU_SREG_FS, &vcpu->rmode.fs);
+
+	init_rmode_tss(vcpu->kvm);
 }
 
 #ifdef CONFIG_X86_64
@@ -837,6 +1000,8 @@
 
 static void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
 {
+	vmx_fpu_deactivate(vcpu);
+
 	if (vcpu->rmode.active && (cr0 & CR0_PE_MASK))
 		enter_pmode(vcpu);
 
@@ -852,26 +1017,20 @@
 	}
 #endif
 
-	if (!(cr0 & CR0_TS_MASK)) {
-		vcpu->fpu_active = 1;
-		vmcs_clear_bits(EXCEPTION_BITMAP, CR0_TS_MASK);
-	}
-
 	vmcs_writel(CR0_READ_SHADOW, cr0);
 	vmcs_writel(GUEST_CR0,
 		    (cr0 & ~KVM_GUEST_CR0_MASK) | KVM_VM_CR0_ALWAYS_ON);
 	vcpu->cr0 = cr0;
+
+	if (!(cr0 & CR0_TS_MASK) || !(cr0 & CR0_PE_MASK))
+		vmx_fpu_activate(vcpu);
 }
 
 static void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
 {
 	vmcs_writel(GUEST_CR3, cr3);
-
-	if (!(vcpu->cr0 & CR0_TS_MASK)) {
-		vcpu->fpu_active = 0;
-		vmcs_set_bits(GUEST_CR0, CR0_TS_MASK);
-		vmcs_set_bits(EXCEPTION_BITMAP, 1 << NM_VECTOR);
-	}
+	if (vcpu->cr0 & CR0_PE_MASK)
+		vmx_fpu_deactivate(vcpu);
 }
 
 static void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
@@ -937,23 +1096,11 @@
 	var->unusable = (ar >> 16) & 1;
 }
 
-static void vmx_set_segment(struct kvm_vcpu *vcpu,
-			    struct kvm_segment *var, int seg)
+static u32 vmx_segment_access_rights(struct kvm_segment *var)
 {
-	struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
 	u32 ar;
 
-	vmcs_writel(sf->base, var->base);
-	vmcs_write32(sf->limit, var->limit);
-	vmcs_write16(sf->selector, var->selector);
-	if (vcpu->rmode.active && var->s) {
-		/*
-		 * Hack real-mode segments into vm86 compatibility.
-		 */
-		if (var->base == 0xffff0000 && var->selector == 0xf000)
-			vmcs_writel(sf->base, 0xf0000);
-		ar = 0xf3;
-	} else if (var->unusable)
+	if (var->unusable)
 		ar = 1 << 16;
 	else {
 		ar = var->type & 15;
@@ -967,6 +1114,35 @@
 	}
 	if (ar == 0) /* a 0 value means unusable */
 		ar = AR_UNUSABLE_MASK;
+
+	return ar;
+}
+
+static void vmx_set_segment(struct kvm_vcpu *vcpu,
+			    struct kvm_segment *var, int seg)
+{
+	struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
+	u32 ar;
+
+	if (vcpu->rmode.active && seg == VCPU_SREG_TR) {
+		vcpu->rmode.tr.selector = var->selector;
+		vcpu->rmode.tr.base = var->base;
+		vcpu->rmode.tr.limit = var->limit;
+		vcpu->rmode.tr.ar = vmx_segment_access_rights(var);
+		return;
+	}
+	vmcs_writel(sf->base, var->base);
+	vmcs_write32(sf->limit, var->limit);
+	vmcs_write16(sf->selector, var->selector);
+	if (vcpu->rmode.active && var->s) {
+		/*
+		 * Hack real-mode segments into vm86 compatibility.
+		 */
+		if (var->base == 0xffff0000 && var->selector == 0xf000)
+			vmcs_writel(sf->base, 0xf0000);
+		ar = 0xf3;
+	} else
+		ar = vmx_segment_access_rights(var);
 	vmcs_write32(sf->ar_bytes, ar);
 }
 
@@ -1018,16 +1194,16 @@
 	}
 
 	page = kmap_atomic(p1, KM_USER0);
-	memset(page, 0, PAGE_SIZE);
+	clear_page(page);
 	*(u16*)(page + 0x66) = TSS_BASE_SIZE + TSS_REDIRECTION_SIZE;
 	kunmap_atomic(page, KM_USER0);
 
 	page = kmap_atomic(p2, KM_USER0);
-	memset(page, 0, PAGE_SIZE);
+	clear_page(page);
 	kunmap_atomic(page, KM_USER0);
 
 	page = kmap_atomic(p3, KM_USER0);
-	memset(page, 0, PAGE_SIZE);
+	clear_page(page);
 	*(page + RMODE_TSS_SIZE - 2 * PAGE_SIZE - 1) = ~0;
 	kunmap_atomic(page, KM_USER0);
 
@@ -1066,7 +1242,7 @@
 	struct descriptor_table dt;
 	int i;
 	int ret = 0;
-	extern asmlinkage void kvm_vmx_return(void);
+	unsigned long kvm_vmx_return;
 
 	if (!init_rmode_tss(vcpu->kvm)) {
 		ret = -ENOMEM;
@@ -1076,9 +1252,9 @@
 	memset(vcpu->regs, 0, sizeof(vcpu->regs));
 	vcpu->regs[VCPU_REGS_RDX] = get_rdx_init_val();
 	vcpu->cr8 = 0;
-	vcpu->apic_base = 0xfee00000 |
-			/*for vcpu 0*/ MSR_IA32_APICBASE_BSP |
-			MSR_IA32_APICBASE_ENABLE;
+	vcpu->apic_base = 0xfee00000 | MSR_IA32_APICBASE_ENABLE;
+	if (vcpu == &vcpu->kvm->vcpus[0])
+		vcpu->apic_base |= MSR_IA32_APICBASE_BSP;
 
 	fx_init(vcpu);
 
@@ -1129,8 +1305,8 @@
 	vmcs_write32(GUEST_PENDING_DBG_EXCEPTIONS, 0);
 
 	/* I/O */
-	vmcs_write64(IO_BITMAP_A, 0);
-	vmcs_write64(IO_BITMAP_B, 0);
+	vmcs_write64(IO_BITMAP_A, page_to_phys(vmx_io_bitmap_a));
+	vmcs_write64(IO_BITMAP_B, page_to_phys(vmx_io_bitmap_b));
 
 	guest_write_tsc(0);
 
@@ -1150,12 +1326,11 @@
 			       CPU_BASED_HLT_EXITING         /* 20.6.2 */
 			       | CPU_BASED_CR8_LOAD_EXITING    /* 20.6.2 */
 			       | CPU_BASED_CR8_STORE_EXITING   /* 20.6.2 */
-			       | CPU_BASED_UNCOND_IO_EXITING   /* 20.6.2 */
+			       | CPU_BASED_ACTIVATE_IO_BITMAP  /* 20.6.2 */
 			       | CPU_BASED_MOV_DR_EXITING
 			       | CPU_BASED_USE_TSC_OFFSETING   /* 21.3 */
 			);
 
-	vmcs_write32(EXCEPTION_BITMAP, 1 << PF_VECTOR);
 	vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK, 0);
 	vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH, 0);
 	vmcs_write32(CR3_TARGET_COUNT, 0);           /* 22.2.1 */
@@ -1185,8 +1360,11 @@
 	get_idt(&dt);
 	vmcs_writel(HOST_IDTR_BASE, dt.base);   /* 22.2.4 */
 
-
-	vmcs_writel(HOST_RIP, (unsigned long)kvm_vmx_return); /* 22.2.5 */
+	asm ("mov $.Lkvm_vmx_return, %0" : "=r"(kvm_vmx_return));
+	vmcs_writel(HOST_RIP, kvm_vmx_return); /* 22.2.5 */
+	vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0);
+	vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0);
+	vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, 0);
 
 	rdmsr(MSR_IA32_SYSENTER_CS, host_sysenter_cs, junk);
 	vmcs_write32(HOST_IA32_SYSENTER_CS, host_sysenter_cs);
@@ -1210,10 +1388,6 @@
 		vcpu->host_msrs[j].reserved = 0;
 		vcpu->host_msrs[j].data = data;
 		vcpu->guest_msrs[j] = vcpu->host_msrs[j];
-#ifdef CONFIG_X86_64
-		if (index == MSR_KERNEL_GS_BASE)
-			msr_offset_kernel_gs_base = j;
-#endif
 		++vcpu->nmsrs;
 	}
 
@@ -1241,6 +1415,8 @@
 #ifdef CONFIG_X86_64
 	vmx_set_efer(vcpu, 0);
 #endif
+	vmx_fpu_activate(vcpu);
+	update_exception_bitmap(vcpu);
 
 	return 0;
 
@@ -1365,7 +1541,11 @@
 	if (!vcpu->rmode.active)
 		return 0;
 
-	if (vec == GP_VECTOR && err_code == 0)
+	/*
+	 * Instruction with address size override prefix opcode 0x67
+	 * Cause the #SS fault with 0 error code in VM86 mode.
+	 */
+	if (((vec == GP_VECTOR) || (vec == SS_VECTOR)) && err_code == 0)
 		if (emulate_instruction(vcpu, NULL, 0, 0) == EMULATE_DONE)
 			return 1;
 	return 0;
@@ -1400,10 +1580,7 @@
 	}
 
 	if (is_no_device(intr_info)) {
-		vcpu->fpu_active = 1;
-		vmcs_clear_bits(EXCEPTION_BITMAP, 1 << NM_VECTOR);
-		if (!(vcpu->cr0 & CR0_TS_MASK))
-			vmcs_clear_bits(GUEST_CR0, CR0_TS_MASK);
+		vmx_fpu_activate(vcpu);
 		return 1;
 	}
 
@@ -1445,8 +1622,13 @@
 
 	if (vcpu->rmode.active &&
 	    handle_rmode_exception(vcpu, intr_info & INTR_INFO_VECTOR_MASK,
-								error_code))
+								error_code)) {
+		if (vcpu->halt_request) {
+			vcpu->halt_request = 0;
+			return kvm_emulate_halt(vcpu);
+		}
 		return 1;
+	}
 
 	if ((intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK)) == (INTR_TYPE_EXCEPTION | 1)) {
 		kvm_run->exit_reason = KVM_EXIT_DEBUG;
@@ -1595,11 +1777,10 @@
 		break;
 	case 2: /* clts */
 		vcpu_load_rsp_rip(vcpu);
-		vcpu->fpu_active = 1;
-		vmcs_clear_bits(EXCEPTION_BITMAP, 1 << NM_VECTOR);
-		vmcs_clear_bits(GUEST_CR0, CR0_TS_MASK);
+		vmx_fpu_deactivate(vcpu);
 		vcpu->cr0 &= ~CR0_TS_MASK;
 		vmcs_writel(CR0_READ_SHADOW, vcpu->cr0);
+		vmx_fpu_activate(vcpu);
 		skip_emulated_instruction(vcpu);
 		return 1;
 	case 1: /*mov from cr*/
@@ -1734,12 +1915,7 @@
 static int handle_halt(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
 {
 	skip_emulated_instruction(vcpu);
-	if (vcpu->irq_summary)
-		return 1;
-
-	kvm_run->exit_reason = KVM_EXIT_HLT;
-	++vcpu->stat.halt_exits;
-	return 0;
+	return kvm_emulate_halt(vcpu);
 }
 
 static int handle_vmcall(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
@@ -1770,7 +1946,7 @@
 };
 
 static const int kvm_vmx_max_exit_handlers =
-	sizeof(kvm_vmx_exit_handlers) / sizeof(*kvm_vmx_exit_handlers);
+	ARRAY_SIZE(kvm_vmx_exit_handlers);
 
 /*
  * The guest has exited.  See if we can fix it or if we need userspace
@@ -1810,61 +1986,44 @@
 		(vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF));
 }
 
+static void vmx_flush_tlb(struct kvm_vcpu *vcpu)
+{
+}
+
 static int vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
 {
 	u8 fail;
-	u16 fs_sel, gs_sel, ldt_sel;
-	int fs_gs_ldt_reload_needed;
 	int r;
 
-again:
-	/*
-	 * Set host fs and gs selectors.  Unfortunately, 22.2.3 does not
-	 * allow segment selectors with cpl > 0 or ti == 1.
-	 */
-	fs_sel = read_fs();
-	gs_sel = read_gs();
-	ldt_sel = read_ldt();
-	fs_gs_ldt_reload_needed = (fs_sel & 7) | (gs_sel & 7) | ldt_sel;
-	if (!fs_gs_ldt_reload_needed) {
-		vmcs_write16(HOST_FS_SELECTOR, fs_sel);
-		vmcs_write16(HOST_GS_SELECTOR, gs_sel);
-	} else {
-		vmcs_write16(HOST_FS_SELECTOR, 0);
-		vmcs_write16(HOST_GS_SELECTOR, 0);
-	}
-
-#ifdef CONFIG_X86_64
-	vmcs_writel(HOST_FS_BASE, read_msr(MSR_FS_BASE));
-	vmcs_writel(HOST_GS_BASE, read_msr(MSR_GS_BASE));
-#else
-	vmcs_writel(HOST_FS_BASE, segment_base(fs_sel));
-	vmcs_writel(HOST_GS_BASE, segment_base(gs_sel));
-#endif
-
-	if (!vcpu->mmio_read_completed)
-		do_interrupt_requests(vcpu, kvm_run);
-
+preempted:
 	if (vcpu->guest_debug.enabled)
 		kvm_guest_debug_pre(vcpu);
 
+again:
+	if (!vcpu->mmio_read_completed)
+		do_interrupt_requests(vcpu, kvm_run);
+
+	vmx_save_host_state(vcpu);
 	kvm_load_guest_fpu(vcpu);
 
+	r = kvm_mmu_reload(vcpu);
+	if (unlikely(r))
+		goto out;
+
 	/*
 	 * Loading guest fpu may have cleared host cr0.ts
 	 */
 	vmcs_writel(HOST_CR0, read_cr0());
 
-#ifdef CONFIG_X86_64
-	if (is_long_mode(vcpu)) {
-		save_msrs(vcpu->host_msrs + msr_offset_kernel_gs_base, 1);
-		load_msrs(vcpu->guest_msrs, NR_BAD_MSRS);
-	}
-#endif
+	local_irq_disable();
+
+	vcpu->guest_mode = 1;
+	if (vcpu->requests)
+		if (test_and_clear_bit(KVM_TLB_FLUSH, &vcpu->requests))
+		    vmx_flush_tlb(vcpu);
 
 	asm (
 		/* Store host registers */
-		"pushf \n\t"
 #ifdef CONFIG_X86_64
 		"push %%rax; push %%rbx; push %%rdx;"
 		"push %%rsi; push %%rdi; push %%rbp;"
@@ -1909,12 +2068,11 @@
 		"mov %c[rcx](%3), %%ecx \n\t" /* kills %3 (ecx) */
 #endif
 		/* Enter guest mode */
-		"jne launched \n\t"
+		"jne .Llaunched \n\t"
 		ASM_VMX_VMLAUNCH "\n\t"
-		"jmp kvm_vmx_return \n\t"
-		"launched: " ASM_VMX_VMRESUME "\n\t"
-		".globl kvm_vmx_return \n\t"
-		"kvm_vmx_return: "
+		"jmp .Lkvm_vmx_return \n\t"
+		".Llaunched: " ASM_VMX_VMRESUME "\n\t"
+		".Lkvm_vmx_return: "
 		/* Save guest registers, load host registers, keep flags */
 #ifdef CONFIG_X86_64
 		"xchg %3,     (%%rsp) \n\t"
@@ -1957,7 +2115,6 @@
 		"pop %%ecx; popa \n\t"
 #endif
 		"setbe %0 \n\t"
-		"popf \n\t"
 	      : "=q" (fail)
 	      : "r"(vcpu->launched), "d"((unsigned long)HOST_RSP),
 		"c"(vcpu),
@@ -1981,84 +2138,61 @@
 		[cr2]"i"(offsetof(struct kvm_vcpu, cr2))
 	      : "cc", "memory" );
 
-	/*
-	 * Reload segment selectors ASAP. (it's needed for a functional
-	 * kernel: x86 relies on having __KERNEL_PDA in %fs and x86_64
-	 * relies on having 0 in %gs for the CPU PDA to work.)
-	 */
-	if (fs_gs_ldt_reload_needed) {
-		load_ldt(ldt_sel);
-		load_fs(fs_sel);
-		/*
-		 * If we have to reload gs, we must take care to
-		 * preserve our gs base.
-		 */
-		local_irq_disable();
-		load_gs(gs_sel);
-#ifdef CONFIG_X86_64
-		wrmsrl(MSR_GS_BASE, vmcs_readl(HOST_GS_BASE));
-#endif
-		local_irq_enable();
+	vcpu->guest_mode = 0;
+	local_irq_enable();
 
-		reload_tss();
-	}
 	++vcpu->stat.exits;
 
-#ifdef CONFIG_X86_64
-	if (is_long_mode(vcpu)) {
-		save_msrs(vcpu->guest_msrs, NR_BAD_MSRS);
-		load_msrs(vcpu->host_msrs, NR_BAD_MSRS);
-	}
-#endif
-
 	vcpu->interrupt_window_open = (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & 3) == 0;
 
 	asm ("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
 
-	if (fail) {
+	if (unlikely(fail)) {
 		kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY;
 		kvm_run->fail_entry.hardware_entry_failure_reason
 			= vmcs_read32(VM_INSTRUCTION_ERROR);
 		r = 0;
-	} else {
-		/*
-		 * Profile KVM exit RIPs:
-		 */
-		if (unlikely(prof_on == KVM_PROFILING))
-			profile_hit(KVM_PROFILING, (void *)vmcs_readl(GUEST_RIP));
+		goto out;
+	}
+	/*
+	 * Profile KVM exit RIPs:
+	 */
+	if (unlikely(prof_on == KVM_PROFILING))
+		profile_hit(KVM_PROFILING, (void *)vmcs_readl(GUEST_RIP));
 
-		vcpu->launched = 1;
-		r = kvm_handle_exit(kvm_run, vcpu);
-		if (r > 0) {
-			/* Give scheduler a change to reschedule. */
-			if (signal_pending(current)) {
-				++vcpu->stat.signal_exits;
-				post_kvm_run_save(vcpu, kvm_run);
-				kvm_run->exit_reason = KVM_EXIT_INTR;
-				return -EINTR;
-			}
+	vcpu->launched = 1;
+	r = kvm_handle_exit(kvm_run, vcpu);
+	if (r > 0) {
+		/* Give scheduler a change to reschedule. */
+		if (signal_pending(current)) {
+			r = -EINTR;
+			kvm_run->exit_reason = KVM_EXIT_INTR;
+			++vcpu->stat.signal_exits;
+			goto out;
+		}
 
-			if (dm_request_for_irq_injection(vcpu, kvm_run)) {
-				++vcpu->stat.request_irq_exits;
-				post_kvm_run_save(vcpu, kvm_run);
-				kvm_run->exit_reason = KVM_EXIT_INTR;
-				return -EINTR;
-			}
-
-			kvm_resched(vcpu);
+		if (dm_request_for_irq_injection(vcpu, kvm_run)) {
+			r = -EINTR;
+			kvm_run->exit_reason = KVM_EXIT_INTR;
+			++vcpu->stat.request_irq_exits;
+			goto out;
+		}
+		if (!need_resched()) {
+			++vcpu->stat.light_exits;
 			goto again;
 		}
 	}
 
+out:
+	if (r > 0) {
+		kvm_resched(vcpu);
+		goto preempted;
+	}
+
 	post_kvm_run_save(vcpu, kvm_run);
 	return r;
 }
 
-static void vmx_flush_tlb(struct kvm_vcpu *vcpu)
-{
-	vmcs_writel(GUEST_CR3, vmcs_readl(GUEST_CR3));
-}
-
 static void vmx_inject_page_fault(struct kvm_vcpu *vcpu,
 				  unsigned long addr,
 				  u32 err_code)
@@ -2122,7 +2256,6 @@
 	vmcs_clear(vmcs);
 	vcpu->vmcs = vmcs;
 	vcpu->launched = 0;
-	vcpu->fpu_active = 1;
 
 	return 0;
 
@@ -2188,11 +2321,50 @@
 
 static int __init vmx_init(void)
 {
-	return kvm_init_arch(&vmx_arch_ops, THIS_MODULE);
+	void *iova;
+	int r;
+
+	vmx_io_bitmap_a = alloc_page(GFP_KERNEL | __GFP_HIGHMEM);
+	if (!vmx_io_bitmap_a)
+		return -ENOMEM;
+
+	vmx_io_bitmap_b = alloc_page(GFP_KERNEL | __GFP_HIGHMEM);
+	if (!vmx_io_bitmap_b) {
+		r = -ENOMEM;
+		goto out;
+	}
+
+	/*
+	 * Allow direct access to the PC debug port (it is often used for I/O
+	 * delays, but the vmexits simply slow things down).
+	 */
+	iova = kmap(vmx_io_bitmap_a);
+	memset(iova, 0xff, PAGE_SIZE);
+	clear_bit(0x80, iova);
+	kunmap(vmx_io_bitmap_a);
+
+	iova = kmap(vmx_io_bitmap_b);
+	memset(iova, 0xff, PAGE_SIZE);
+	kunmap(vmx_io_bitmap_b);
+
+	r = kvm_init_arch(&vmx_arch_ops, THIS_MODULE);
+	if (r)
+		goto out1;
+
+	return 0;
+
+out1:
+	__free_page(vmx_io_bitmap_b);
+out:
+	__free_page(vmx_io_bitmap_a);
+	return r;
 }
 
 static void __exit vmx_exit(void)
 {
+	__free_page(vmx_io_bitmap_b);
+	__free_page(vmx_io_bitmap_a);
+
 	kvm_exit_arch();
 }
 
diff --git a/drivers/kvm/x86_emulate.c b/drivers/kvm/x86_emulate.c
index 7ade090..f60012d 100644
--- a/drivers/kvm/x86_emulate.c
+++ b/drivers/kvm/x86_emulate.c
@@ -98,8 +98,11 @@
 	0, 0, 0, 0,
 	/* 0x40 - 0x4F */
 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-	/* 0x50 - 0x5F */
-	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+	/* 0x50 - 0x57 */
+	0, 0, 0, 0, 0, 0, 0, 0,
+	/* 0x58 - 0x5F */
+	ImplicitOps, ImplicitOps, ImplicitOps, ImplicitOps,
+	ImplicitOps, ImplicitOps, ImplicitOps, ImplicitOps,
 	/* 0x60 - 0x6F */
 	0, 0, 0, DstReg | SrcMem32 | ModRM | Mov /* movsxd (x86/64) */ ,
 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
@@ -128,9 +131,9 @@
 	/* 0xB0 - 0xBF */
 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
 	/* 0xC0 - 0xC7 */
-	ByteOp | DstMem | SrcImm | ModRM, DstMem | SrcImmByte | ModRM, 0, 0,
-	0, 0, ByteOp | DstMem | SrcImm | ModRM | Mov,
-	    DstMem | SrcImm | ModRM | Mov,
+	ByteOp | DstMem | SrcImm | ModRM, DstMem | SrcImmByte | ModRM,
+	0, ImplicitOps, 0, 0,
+	ByteOp | DstMem | SrcImm | ModRM | Mov, DstMem | SrcImm | ModRM | Mov,
 	/* 0xC8 - 0xCF */
 	0, 0, 0, 0, 0, 0, 0, 0,
 	/* 0xD0 - 0xD7 */
@@ -143,7 +146,8 @@
 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
 	/* 0xF0 - 0xF7 */
 	0, 0, 0, 0,
-	0, 0, ByteOp | DstMem | SrcNone | ModRM, DstMem | SrcNone | ModRM,
+	ImplicitOps, 0,
+	ByteOp | DstMem | SrcNone | ModRM, DstMem | SrcNone | ModRM,
 	/* 0xF8 - 0xFF */
 	0, 0, 0, 0,
 	0, 0, ByteOp | DstMem | SrcNone | ModRM, DstMem | SrcNone | ModRM
@@ -152,7 +156,7 @@
 static u16 twobyte_table[256] = {
 	/* 0x00 - 0x0F */
 	0, SrcMem | ModRM | DstReg, 0, 0, 0, 0, ImplicitOps, 0,
-	0, 0, 0, 0, 0, ImplicitOps | ModRM, 0, 0,
+	0, ImplicitOps, 0, 0, 0, ImplicitOps | ModRM, 0, 0,
 	/* 0x10 - 0x1F */
 	0, 0, 0, 0, 0, 0, 0, 0, ImplicitOps | ModRM, 0, 0, 0, 0, 0, 0, 0,
 	/* 0x20 - 0x2F */
@@ -481,6 +485,7 @@
 	int mode = ctxt->mode;
 	unsigned long modrm_ea;
 	int use_modrm_ea, index_reg = 0, base_reg = 0, scale, rip_relative = 0;
+	int no_wb = 0;
 
 	/* Shadow copy of register state. Committed on successful emulation. */
 	unsigned long _regs[NR_VCPU_REGS];
@@ -1047,7 +1052,7 @@
 						      _regs[VCPU_REGS_RSP]),
 				     &dst.val, dst.bytes, ctxt)) != 0)
 				goto done;
-			dst.val = dst.orig_val;	/* skanky: disable writeback */
+			no_wb = 1;
 			break;
 		default:
 			goto cannot_emulate;
@@ -1056,7 +1061,7 @@
 	}
 
 writeback:
-	if ((d & Mov) || (dst.orig_val != dst.val)) {
+	if (!no_wb) {
 		switch (dst.type) {
 		case OP_REG:
 			/* The 4-byte case *is* correct: in 64-bit mode we zero-extend. */
@@ -1149,6 +1154,23 @@
 	case 0xae ... 0xaf:	/* scas */
 		DPRINTF("Urk! I don't handle SCAS.\n");
 		goto cannot_emulate;
+	case 0xf4:              /* hlt */
+		ctxt->vcpu->halt_request = 1;
+		goto done;
+	case 0xc3: /* ret */
+		dst.ptr = &_eip;
+		goto pop_instruction;
+	case 0x58 ... 0x5f: /* pop reg */
+		dst.ptr = (unsigned long *)&_regs[b & 0x7];
+
+pop_instruction:
+		if ((rc = ops->read_std(register_address(ctxt->ss_base,
+			_regs[VCPU_REGS_RSP]), dst.ptr, op_bytes, ctxt)) != 0)
+			goto done;
+
+		register_address_increment(_regs[VCPU_REGS_RSP], op_bytes);
+		no_wb = 1; /* Disable writeback. */
+		break;
 	}
 	goto writeback;
 
@@ -1302,8 +1324,10 @@
 
 twobyte_special_insn:
 	/* Disable writeback. */
-	dst.orig_val = dst.val;
+	no_wb = 1;
 	switch (b) {
+	case 0x09:		/* wbinvd */
+		break;
 	case 0x0d:		/* GrpP (prefetch) */
 	case 0x18:		/* Grp16 (prefetch/nop) */
 		break;
diff --git a/drivers/lguest/Kconfig b/drivers/lguest/Kconfig
new file mode 100644
index 0000000..43d901f
--- /dev/null
+++ b/drivers/lguest/Kconfig
@@ -0,0 +1,20 @@
+config LGUEST
+	tristate "Linux hypervisor example code"
+	depends on X86 && PARAVIRT && NET && EXPERIMENTAL && !X86_PAE
+	select LGUEST_GUEST
+	select HVC_DRIVER
+	---help---
+	  This is a very simple module which allows you to run
+	  multiple instances of the same Linux kernel, using the
+	  "lguest" command found in the Documentation/lguest directory.
+	  Note that "lguest" is pronounced to rhyme with "fell quest",
+	  not "rustyvisor".  See Documentation/lguest/lguest.txt.
+
+	  If unsure, say N.  If curious, say M.  If masochistic, say Y.
+
+config LGUEST_GUEST
+	bool
+	help
+	  The guest needs code built-in, even if the host has lguest
+	  support as a module.  The drivers are tiny, so we build them
+	  in too.
diff --git a/drivers/lguest/Makefile b/drivers/lguest/Makefile
new file mode 100644
index 0000000..55382c7
--- /dev/null
+++ b/drivers/lguest/Makefile
@@ -0,0 +1,7 @@
+# Guest requires the paravirt_ops replacement and the bus driver.
+obj-$(CONFIG_LGUEST_GUEST) += lguest.o lguest_asm.o lguest_bus.o
+
+# Host requires the other files, which can be a module.
+obj-$(CONFIG_LGUEST)	+= lg.o
+lg-y := core.o hypercalls.o page_tables.o interrupts_and_traps.o \
+	segments.o io.o lguest_user.o switcher.o
diff --git a/drivers/lguest/core.c b/drivers/lguest/core.c
new file mode 100644
index 0000000..ce909ec
--- /dev/null
+++ b/drivers/lguest/core.c
@@ -0,0 +1,462 @@
+/* World's simplest hypervisor, to test paravirt_ops and show
+ * unbelievers that virtualization is the future.  Plus, it's fun! */
+#include <linux/module.h>
+#include <linux/stringify.h>
+#include <linux/stddef.h>
+#include <linux/io.h>
+#include <linux/mm.h>
+#include <linux/vmalloc.h>
+#include <linux/cpu.h>
+#include <linux/freezer.h>
+#include <asm/paravirt.h>
+#include <asm/desc.h>
+#include <asm/pgtable.h>
+#include <asm/uaccess.h>
+#include <asm/poll.h>
+#include <asm/highmem.h>
+#include <asm/asm-offsets.h>
+#include <asm/i387.h>
+#include "lg.h"
+
+/* Found in switcher.S */
+extern char start_switcher_text[], end_switcher_text[], switch_to_guest[];
+extern unsigned long default_idt_entries[];
+
+/* Every guest maps the core switcher code. */
+#define SHARED_SWITCHER_PAGES \
+	DIV_ROUND_UP(end_switcher_text - start_switcher_text, PAGE_SIZE)
+/* Pages for switcher itself, then two pages per cpu */
+#define TOTAL_SWITCHER_PAGES (SHARED_SWITCHER_PAGES + 2 * NR_CPUS)
+
+/* We map at -4M for ease of mapping into the guest (one PTE page). */
+#define SWITCHER_ADDR 0xFFC00000
+
+static struct vm_struct *switcher_vma;
+static struct page **switcher_page;
+
+static int cpu_had_pge;
+static struct {
+	unsigned long offset;
+	unsigned short segment;
+} lguest_entry;
+
+/* This One Big lock protects all inter-guest data structures. */
+DEFINE_MUTEX(lguest_lock);
+static DEFINE_PER_CPU(struct lguest *, last_guest);
+
+/* FIXME: Make dynamic. */
+#define MAX_LGUEST_GUESTS 16
+struct lguest lguests[MAX_LGUEST_GUESTS];
+
+/* Offset from where switcher.S was compiled to where we've copied it */
+static unsigned long switcher_offset(void)
+{
+	return SWITCHER_ADDR - (unsigned long)start_switcher_text;
+}
+
+/* This cpu's struct lguest_pages. */
+static struct lguest_pages *lguest_pages(unsigned int cpu)
+{
+	return &(((struct lguest_pages *)
+		  (SWITCHER_ADDR + SHARED_SWITCHER_PAGES*PAGE_SIZE))[cpu]);
+}
+
+static __init int map_switcher(void)
+{
+	int i, err;
+	struct page **pagep;
+
+	switcher_page = kmalloc(sizeof(switcher_page[0])*TOTAL_SWITCHER_PAGES,
+				GFP_KERNEL);
+	if (!switcher_page) {
+		err = -ENOMEM;
+		goto out;
+	}
+
+	for (i = 0; i < TOTAL_SWITCHER_PAGES; i++) {
+		unsigned long addr = get_zeroed_page(GFP_KERNEL);
+		if (!addr) {
+			err = -ENOMEM;
+			goto free_some_pages;
+		}
+		switcher_page[i] = virt_to_page(addr);
+	}
+
+	switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
+				       VM_ALLOC, SWITCHER_ADDR, VMALLOC_END);
+	if (!switcher_vma) {
+		err = -ENOMEM;
+		printk("lguest: could not map switcher pages high\n");
+		goto free_pages;
+	}
+
+	pagep = switcher_page;
+	err = map_vm_area(switcher_vma, PAGE_KERNEL, &pagep);
+	if (err) {
+		printk("lguest: map_vm_area failed: %i\n", err);
+		goto free_vma;
+	}
+	memcpy(switcher_vma->addr, start_switcher_text,
+	       end_switcher_text - start_switcher_text);
+
+	/* Fix up IDT entries to point into copied text. */
+	for (i = 0; i < IDT_ENTRIES; i++)
+		default_idt_entries[i] += switcher_offset();
+
+	for_each_possible_cpu(i) {
+		struct lguest_pages *pages = lguest_pages(i);
+		struct lguest_ro_state *state = &pages->state;
+
+		/* These fields are static: rest done in copy_in_guest_info */
+		state->host_gdt_desc.size = GDT_SIZE-1;
+		state->host_gdt_desc.address = (long)get_cpu_gdt_table(i);
+		store_idt(&state->host_idt_desc);
+		state->guest_idt_desc.size = sizeof(state->guest_idt)-1;
+		state->guest_idt_desc.address = (long)&state->guest_idt;
+		state->guest_gdt_desc.size = sizeof(state->guest_gdt)-1;
+		state->guest_gdt_desc.address = (long)&state->guest_gdt;
+		state->guest_tss.esp0 = (long)(&pages->regs + 1);
+		state->guest_tss.ss0 = LGUEST_DS;
+		/* No I/O for you! */
+		state->guest_tss.io_bitmap_base = sizeof(state->guest_tss);
+		setup_default_gdt_entries(state);
+		setup_default_idt_entries(state, default_idt_entries);
+
+		/* Setup LGUEST segments on all cpus */
+		get_cpu_gdt_table(i)[GDT_ENTRY_LGUEST_CS] = FULL_EXEC_SEGMENT;
+		get_cpu_gdt_table(i)[GDT_ENTRY_LGUEST_DS] = FULL_SEGMENT;
+	}
+
+	/* Initialize entry point into switcher. */
+	lguest_entry.offset = (long)switch_to_guest + switcher_offset();
+	lguest_entry.segment = LGUEST_CS;
+
+	printk(KERN_INFO "lguest: mapped switcher at %p\n",
+	       switcher_vma->addr);
+	return 0;
+
+free_vma:
+	vunmap(switcher_vma->addr);
+free_pages:
+	i = TOTAL_SWITCHER_PAGES;
+free_some_pages:
+	for (--i; i >= 0; i--)
+		__free_pages(switcher_page[i], 0);
+	kfree(switcher_page);
+out:
+	return err;
+}
+
+static void unmap_switcher(void)
+{
+	unsigned int i;
+
+	vunmap(switcher_vma->addr);
+	for (i = 0; i < TOTAL_SWITCHER_PAGES; i++)
+		__free_pages(switcher_page[i], 0);
+}
+
+/* IN/OUT insns: enough to get us past boot-time probing. */
+static int emulate_insn(struct lguest *lg)
+{
+	u8 insn;
+	unsigned int insnlen = 0, in = 0, shift = 0;
+	unsigned long physaddr = guest_pa(lg, lg->regs->eip);
+
+	/* This only works for addresses in linear mapping... */
+	if (lg->regs->eip < lg->page_offset)
+		return 0;
+	lgread(lg, &insn, physaddr, 1);
+
+	/* Operand size prefix means it's actually for ax. */
+	if (insn == 0x66) {
+		shift = 16;
+		insnlen = 1;
+		lgread(lg, &insn, physaddr + insnlen, 1);
+	}
+
+	switch (insn & 0xFE) {
+	case 0xE4: /* in     <next byte>,%al */
+		insnlen += 2;
+		in = 1;
+		break;
+	case 0xEC: /* in     (%dx),%al */
+		insnlen += 1;
+		in = 1;
+		break;
+	case 0xE6: /* out    %al,<next byte> */
+		insnlen += 2;
+		break;
+	case 0xEE: /* out    %al,(%dx) */
+		insnlen += 1;
+		break;
+	default:
+		return 0;
+	}
+
+	if (in) {
+		/* Lower bit tells is whether it's a 16 or 32 bit access */
+		if (insn & 0x1)
+			lg->regs->eax = 0xFFFFFFFF;
+		else
+			lg->regs->eax |= (0xFFFF << shift);
+	}
+	lg->regs->eip += insnlen;
+	return 1;
+}
+
+int lguest_address_ok(const struct lguest *lg,
+		      unsigned long addr, unsigned long len)
+{
+	return (addr+len) / PAGE_SIZE < lg->pfn_limit && (addr+len >= addr);
+}
+
+/* Just like get_user, but don't let guest access lguest binary. */
+u32 lgread_u32(struct lguest *lg, unsigned long addr)
+{
+	u32 val = 0;
+
+	/* Don't let them access lguest binary */
+	if (!lguest_address_ok(lg, addr, sizeof(val))
+	    || get_user(val, (u32 __user *)addr) != 0)
+		kill_guest(lg, "bad read address %#lx", addr);
+	return val;
+}
+
+void lgwrite_u32(struct lguest *lg, unsigned long addr, u32 val)
+{
+	if (!lguest_address_ok(lg, addr, sizeof(val))
+	    || put_user(val, (u32 __user *)addr) != 0)
+		kill_guest(lg, "bad write address %#lx", addr);
+}
+
+void lgread(struct lguest *lg, void *b, unsigned long addr, unsigned bytes)
+{
+	if (!lguest_address_ok(lg, addr, bytes)
+	    || copy_from_user(b, (void __user *)addr, bytes) != 0) {
+		/* copy_from_user should do this, but as we rely on it... */
+		memset(b, 0, bytes);
+		kill_guest(lg, "bad read address %#lx len %u", addr, bytes);
+	}
+}
+
+void lgwrite(struct lguest *lg, unsigned long addr, const void *b,
+	     unsigned bytes)
+{
+	if (!lguest_address_ok(lg, addr, bytes)
+	    || copy_to_user((void __user *)addr, b, bytes) != 0)
+		kill_guest(lg, "bad write address %#lx len %u", addr, bytes);
+}
+
+static void set_ts(void)
+{
+	u32 cr0;
+
+	cr0 = read_cr0();
+	if (!(cr0 & 8))
+		write_cr0(cr0|8);
+}
+
+static void copy_in_guest_info(struct lguest *lg, struct lguest_pages *pages)
+{
+	if (__get_cpu_var(last_guest) != lg || lg->last_pages != pages) {
+		__get_cpu_var(last_guest) = lg;
+		lg->last_pages = pages;
+		lg->changed = CHANGED_ALL;
+	}
+
+	/* These are pretty cheap, so we do them unconditionally. */
+	pages->state.host_cr3 = __pa(current->mm->pgd);
+	map_switcher_in_guest(lg, pages);
+	pages->state.guest_tss.esp1 = lg->esp1;
+	pages->state.guest_tss.ss1 = lg->ss1;
+
+	/* Copy direct trap entries. */
+	if (lg->changed & CHANGED_IDT)
+		copy_traps(lg, pages->state.guest_idt, default_idt_entries);
+
+	/* Copy all GDT entries but the TSS. */
+	if (lg->changed & CHANGED_GDT)
+		copy_gdt(lg, pages->state.guest_gdt);
+	/* If only the TLS entries have changed, copy them. */
+	else if (lg->changed & CHANGED_GDT_TLS)
+		copy_gdt_tls(lg, pages->state.guest_gdt);
+
+	lg->changed = 0;
+}
+
+static void run_guest_once(struct lguest *lg, struct lguest_pages *pages)
+{
+	unsigned int clobber;
+
+	copy_in_guest_info(lg, pages);
+
+	/* Put eflags on stack, lcall does rest: suitable for iret return. */
+	asm volatile("pushf; lcall *lguest_entry"
+		     : "=a"(clobber), "=b"(clobber)
+		     : "0"(pages), "1"(__pa(lg->pgdirs[lg->pgdidx].pgdir))
+		     : "memory", "%edx", "%ecx", "%edi", "%esi");
+}
+
+int run_guest(struct lguest *lg, unsigned long __user *user)
+{
+	while (!lg->dead) {
+		unsigned int cr2 = 0; /* Damn gcc */
+
+		/* Hypercalls first: we might have been out to userspace */
+		do_hypercalls(lg);
+		if (lg->dma_is_pending) {
+			if (put_user(lg->pending_dma, user) ||
+			    put_user(lg->pending_key, user+1))
+				return -EFAULT;
+			return sizeof(unsigned long)*2;
+		}
+
+		if (signal_pending(current))
+			return -ERESTARTSYS;
+
+		/* If Waker set break_out, return to Launcher. */
+		if (lg->break_out)
+			return -EAGAIN;
+
+		maybe_do_interrupt(lg);
+
+		try_to_freeze();
+
+		if (lg->dead)
+			break;
+
+		if (lg->halted) {
+			set_current_state(TASK_INTERRUPTIBLE);
+			schedule();
+			continue;
+		}
+
+		local_irq_disable();
+
+		/* Even if *we* don't want FPU trap, guest might... */
+		if (lg->ts)
+			set_ts();
+
+		/* Don't let Guest do SYSENTER: we can't handle it. */
+		if (boot_cpu_has(X86_FEATURE_SEP))
+			wrmsr(MSR_IA32_SYSENTER_CS, 0, 0);
+
+		run_guest_once(lg, lguest_pages(raw_smp_processor_id()));
+
+		/* Save cr2 now if we page-faulted. */
+		if (lg->regs->trapnum == 14)
+			cr2 = read_cr2();
+		else if (lg->regs->trapnum == 7)
+			math_state_restore();
+
+		if (boot_cpu_has(X86_FEATURE_SEP))
+			wrmsr(MSR_IA32_SYSENTER_CS, __KERNEL_CS, 0);
+		local_irq_enable();
+
+		switch (lg->regs->trapnum) {
+		case 13: /* We've intercepted a GPF. */
+			if (lg->regs->errcode == 0) {
+				if (emulate_insn(lg))
+					continue;
+			}
+			break;
+		case 14: /* We've intercepted a page fault. */
+			if (demand_page(lg, cr2, lg->regs->errcode))
+				continue;
+
+			/* If lguest_data is NULL, this won't hurt. */
+			if (put_user(cr2, &lg->lguest_data->cr2))
+				kill_guest(lg, "Writing cr2");
+			break;
+		case 7: /* We've intercepted a Device Not Available fault. */
+			/* If they don't want to know, just absorb it. */
+			if (!lg->ts)
+				continue;
+			break;
+		case 32 ... 255: /* Real interrupt, fall thru */
+			cond_resched();
+		case LGUEST_TRAP_ENTRY: /* Handled at top of loop */
+			continue;
+		}
+
+		if (deliver_trap(lg, lg->regs->trapnum))
+			continue;
+
+		kill_guest(lg, "unhandled trap %li at %#lx (%#lx)",
+			   lg->regs->trapnum, lg->regs->eip,
+			   lg->regs->trapnum == 14 ? cr2 : lg->regs->errcode);
+	}
+	return -ENOENT;
+}
+
+int find_free_guest(void)
+{
+	unsigned int i;
+	for (i = 0; i < MAX_LGUEST_GUESTS; i++)
+		if (!lguests[i].tsk)
+			return i;
+	return -1;
+}
+
+static void adjust_pge(void *on)
+{
+	if (on)
+		write_cr4(read_cr4() | X86_CR4_PGE);
+	else
+		write_cr4(read_cr4() & ~X86_CR4_PGE);
+}
+
+static int __init init(void)
+{
+	int err;
+
+	if (paravirt_enabled()) {
+		printk("lguest is afraid of %s\n", paravirt_ops.name);
+		return -EPERM;
+	}
+
+	err = map_switcher();
+	if (err)
+		return err;
+
+	err = init_pagetables(switcher_page, SHARED_SWITCHER_PAGES);
+	if (err) {
+		unmap_switcher();
+		return err;
+	}
+	lguest_io_init();
+
+	err = lguest_device_init();
+	if (err) {
+		free_pagetables();
+		unmap_switcher();
+		return err;
+	}
+	lock_cpu_hotplug();
+	if (cpu_has_pge) { /* We have a broader idea of "global". */
+		cpu_had_pge = 1;
+		on_each_cpu(adjust_pge, (void *)0, 0, 1);
+		clear_bit(X86_FEATURE_PGE, boot_cpu_data.x86_capability);
+	}
+	unlock_cpu_hotplug();
+	return 0;
+}
+
+static void __exit fini(void)
+{
+	lguest_device_remove();
+	free_pagetables();
+	unmap_switcher();
+	lock_cpu_hotplug();
+	if (cpu_had_pge) {
+		set_bit(X86_FEATURE_PGE, boot_cpu_data.x86_capability);
+		on_each_cpu(adjust_pge, (void *)1, 0, 1);
+	}
+	unlock_cpu_hotplug();
+}
+
+module_init(init);
+module_exit(fini);
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Rusty Russell <rusty@rustcorp.com.au>");
diff --git a/drivers/lguest/hypercalls.c b/drivers/lguest/hypercalls.c
new file mode 100644
index 0000000..ea52ca4
--- /dev/null
+++ b/drivers/lguest/hypercalls.c
@@ -0,0 +1,192 @@
+/*  Actual hypercalls, which allow guests to actually do something.
+    Copyright (C) 2006 Rusty Russell IBM Corporation
+
+    This program is free software; you can redistribute it and/or modify
+    it under the terms of the GNU General Public License as published by
+    the Free Software Foundation; either version 2 of the License, or
+    (at your option) any later version.
+
+    This program is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with this program; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301 USA
+*/
+#include <linux/uaccess.h>
+#include <linux/syscalls.h>
+#include <linux/mm.h>
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <irq_vectors.h>
+#include "lg.h"
+
+static void do_hcall(struct lguest *lg, struct lguest_regs *regs)
+{
+	switch (regs->eax) {
+	case LHCALL_FLUSH_ASYNC:
+		break;
+	case LHCALL_LGUEST_INIT:
+		kill_guest(lg, "already have lguest_data");
+		break;
+	case LHCALL_CRASH: {
+		char msg[128];
+		lgread(lg, msg, regs->edx, sizeof(msg));
+		msg[sizeof(msg)-1] = '\0';
+		kill_guest(lg, "CRASH: %s", msg);
+		break;
+	}
+	case LHCALL_FLUSH_TLB:
+		if (regs->edx)
+			guest_pagetable_clear_all(lg);
+		else
+			guest_pagetable_flush_user(lg);
+		break;
+	case LHCALL_GET_WALLCLOCK: {
+		struct timespec ts;
+		ktime_get_real_ts(&ts);
+		regs->eax = ts.tv_sec;
+		break;
+	}
+	case LHCALL_BIND_DMA:
+		regs->eax = bind_dma(lg, regs->edx, regs->ebx,
+				     regs->ecx >> 8, regs->ecx & 0xFF);
+		break;
+	case LHCALL_SEND_DMA:
+		send_dma(lg, regs->edx, regs->ebx);
+		break;
+	case LHCALL_LOAD_GDT:
+		load_guest_gdt(lg, regs->edx, regs->ebx);
+		break;
+	case LHCALL_LOAD_IDT_ENTRY:
+		load_guest_idt_entry(lg, regs->edx, regs->ebx, regs->ecx);
+		break;
+	case LHCALL_NEW_PGTABLE:
+		guest_new_pagetable(lg, regs->edx);
+		break;
+	case LHCALL_SET_STACK:
+		guest_set_stack(lg, regs->edx, regs->ebx, regs->ecx);
+		break;
+	case LHCALL_SET_PTE:
+		guest_set_pte(lg, regs->edx, regs->ebx, mkgpte(regs->ecx));
+		break;
+	case LHCALL_SET_PMD:
+		guest_set_pmd(lg, regs->edx, regs->ebx);
+		break;
+	case LHCALL_LOAD_TLS:
+		guest_load_tls(lg, regs->edx);
+		break;
+	case LHCALL_SET_CLOCKEVENT:
+		guest_set_clockevent(lg, regs->edx);
+		break;
+	case LHCALL_TS:
+		lg->ts = regs->edx;
+		break;
+	case LHCALL_HALT:
+		lg->halted = 1;
+		break;
+	default:
+		kill_guest(lg, "Bad hypercall %li\n", regs->eax);
+	}
+}
+
+/* We always do queued calls before actual hypercall. */
+static void do_async_hcalls(struct lguest *lg)
+{
+	unsigned int i;
+	u8 st[LHCALL_RING_SIZE];
+
+	if (copy_from_user(&st, &lg->lguest_data->hcall_status, sizeof(st)))
+		return;
+
+	for (i = 0; i < ARRAY_SIZE(st); i++) {
+		struct lguest_regs regs;
+		unsigned int n = lg->next_hcall;
+
+		if (st[n] == 0xFF)
+			break;
+
+		if (++lg->next_hcall == LHCALL_RING_SIZE)
+			lg->next_hcall = 0;
+
+		if (get_user(regs.eax, &lg->lguest_data->hcalls[n].eax)
+		    || get_user(regs.edx, &lg->lguest_data->hcalls[n].edx)
+		    || get_user(regs.ecx, &lg->lguest_data->hcalls[n].ecx)
+		    || get_user(regs.ebx, &lg->lguest_data->hcalls[n].ebx)) {
+			kill_guest(lg, "Fetching async hypercalls");
+			break;
+		}
+
+		do_hcall(lg, &regs);
+		if (put_user(0xFF, &lg->lguest_data->hcall_status[n])) {
+			kill_guest(lg, "Writing result for async hypercall");
+			break;
+		}
+
+		if (lg->dma_is_pending)
+			break;
+	}
+}
+
+static void initialize(struct lguest *lg)
+{
+	u32 tsc_speed;
+
+	if (lg->regs->eax != LHCALL_LGUEST_INIT) {
+		kill_guest(lg, "hypercall %li before LGUEST_INIT",
+			   lg->regs->eax);
+		return;
+	}
+
+	/* We only tell the guest to use the TSC if it's reliable. */
+	if (boot_cpu_has(X86_FEATURE_CONSTANT_TSC) && !check_tsc_unstable())
+		tsc_speed = tsc_khz;
+	else
+		tsc_speed = 0;
+
+	lg->lguest_data = (struct lguest_data __user *)lg->regs->edx;
+	/* We check here so we can simply copy_to_user/from_user */
+	if (!lguest_address_ok(lg, lg->regs->edx, sizeof(*lg->lguest_data))) {
+		kill_guest(lg, "bad guest page %p", lg->lguest_data);
+		return;
+	}
+	if (get_user(lg->noirq_start, &lg->lguest_data->noirq_start)
+	    || get_user(lg->noirq_end, &lg->lguest_data->noirq_end)
+	    /* We reserve the top pgd entry. */
+	    || put_user(4U*1024*1024, &lg->lguest_data->reserve_mem)
+	    || put_user(tsc_speed, &lg->lguest_data->tsc_khz)
+	    || put_user(lg->guestid, &lg->lguest_data->guestid))
+		kill_guest(lg, "bad guest page %p", lg->lguest_data);
+
+	/* This is the one case where the above accesses might have
+	 * been the first write to a Guest page.  This may have caused
+	 * a copy-on-write fault, but the Guest might be referring to
+	 * the old (read-only) page. */
+	guest_pagetable_clear_all(lg);
+}
+
+/* Even if we go out to userspace and come back, we don't want to do
+ * the hypercall again. */
+static void clear_hcall(struct lguest *lg)
+{
+	lg->regs->trapnum = 255;
+}
+
+void do_hypercalls(struct lguest *lg)
+{
+	if (unlikely(!lg->lguest_data)) {
+		if (lg->regs->trapnum == LGUEST_TRAP_ENTRY) {
+			initialize(lg);
+			clear_hcall(lg);
+		}
+		return;
+	}
+
+	do_async_hcalls(lg);
+	if (!lg->dma_is_pending && lg->regs->trapnum == LGUEST_TRAP_ENTRY) {
+		do_hcall(lg, lg->regs);
+		clear_hcall(lg);
+	}
+}
diff --git a/drivers/lguest/interrupts_and_traps.c b/drivers/lguest/interrupts_and_traps.c
new file mode 100644
index 0000000..d9de5bb
--- /dev/null
+++ b/drivers/lguest/interrupts_and_traps.c
@@ -0,0 +1,268 @@
+#include <linux/uaccess.h>
+#include "lg.h"
+
+static unsigned long idt_address(u32 lo, u32 hi)
+{
+	return (lo & 0x0000FFFF) | (hi & 0xFFFF0000);
+}
+
+static int idt_type(u32 lo, u32 hi)
+{
+	return (hi >> 8) & 0xF;
+}
+
+static int idt_present(u32 lo, u32 hi)
+{
+	return (hi & 0x8000);
+}
+
+static void push_guest_stack(struct lguest *lg, unsigned long *gstack, u32 val)
+{
+	*gstack -= 4;
+	lgwrite_u32(lg, *gstack, val);
+}
+
+static void set_guest_interrupt(struct lguest *lg, u32 lo, u32 hi, int has_err)
+{
+	unsigned long gstack;
+	u32 eflags, ss, irq_enable;
+
+	/* If they want a ring change, we use new stack and push old ss/esp */
+	if ((lg->regs->ss&0x3) != GUEST_PL) {
+		gstack = guest_pa(lg, lg->esp1);
+		ss = lg->ss1;
+		push_guest_stack(lg, &gstack, lg->regs->ss);
+		push_guest_stack(lg, &gstack, lg->regs->esp);
+	} else {
+		gstack = guest_pa(lg, lg->regs->esp);
+		ss = lg->regs->ss;
+	}
+
+	/* We use IF bit in eflags to indicate whether irqs were disabled
+	   (it's always 0, since irqs are enabled when guest is running). */
+	eflags = lg->regs->eflags;
+	if (get_user(irq_enable, &lg->lguest_data->irq_enabled))
+		irq_enable = 0;
+	eflags |= (irq_enable & X86_EFLAGS_IF);
+
+	push_guest_stack(lg, &gstack, eflags);
+	push_guest_stack(lg, &gstack, lg->regs->cs);
+	push_guest_stack(lg, &gstack, lg->regs->eip);
+
+	if (has_err)
+		push_guest_stack(lg, &gstack, lg->regs->errcode);
+
+	/* Change the real stack so switcher returns to trap handler */
+	lg->regs->ss = ss;
+	lg->regs->esp = gstack + lg->page_offset;
+	lg->regs->cs = (__KERNEL_CS|GUEST_PL);
+	lg->regs->eip = idt_address(lo, hi);
+
+	/* Disable interrupts for an interrupt gate. */
+	if (idt_type(lo, hi) == 0xE)
+		if (put_user(0, &lg->lguest_data->irq_enabled))
+			kill_guest(lg, "Disabling interrupts");
+}
+
+void maybe_do_interrupt(struct lguest *lg)
+{
+	unsigned int irq;
+	DECLARE_BITMAP(blk, LGUEST_IRQS);
+	struct desc_struct *idt;
+
+	if (!lg->lguest_data)
+		return;
+
+	/* Mask out any interrupts they have blocked. */
+	if (copy_from_user(&blk, lg->lguest_data->blocked_interrupts,
+			   sizeof(blk)))
+		return;
+
+	bitmap_andnot(blk, lg->irqs_pending, blk, LGUEST_IRQS);
+
+	irq = find_first_bit(blk, LGUEST_IRQS);
+	if (irq >= LGUEST_IRQS)
+		return;
+
+	if (lg->regs->eip >= lg->noirq_start && lg->regs->eip < lg->noirq_end)
+		return;
+
+	/* If they're halted, we re-enable interrupts. */
+	if (lg->halted) {
+		/* Re-enable interrupts. */
+		if (put_user(X86_EFLAGS_IF, &lg->lguest_data->irq_enabled))
+			kill_guest(lg, "Re-enabling interrupts");
+		lg->halted = 0;
+	} else {
+		/* Maybe they have interrupts disabled? */
+		u32 irq_enabled;
+		if (get_user(irq_enabled, &lg->lguest_data->irq_enabled))
+			irq_enabled = 0;
+		if (!irq_enabled)
+			return;
+	}
+
+	idt = &lg->idt[FIRST_EXTERNAL_VECTOR+irq];
+	if (idt_present(idt->a, idt->b)) {
+		clear_bit(irq, lg->irqs_pending);
+		set_guest_interrupt(lg, idt->a, idt->b, 0);
+	}
+}
+
+static int has_err(unsigned int trap)
+{
+	return (trap == 8 || (trap >= 10 && trap <= 14) || trap == 17);
+}
+
+int deliver_trap(struct lguest *lg, unsigned int num)
+{
+	u32 lo = lg->idt[num].a, hi = lg->idt[num].b;
+
+	if (!idt_present(lo, hi))
+		return 0;
+	set_guest_interrupt(lg, lo, hi, has_err(num));
+	return 1;
+}
+
+static int direct_trap(const struct lguest *lg,
+		       const struct desc_struct *trap,
+		       unsigned int num)
+{
+	/* Hardware interrupts don't go to guest (except syscall). */
+	if (num >= FIRST_EXTERNAL_VECTOR && num != SYSCALL_VECTOR)
+		return 0;
+
+	/* We intercept page fault (demand shadow paging & cr2 saving)
+	   protection fault (in/out emulation) and device not
+	   available (TS handling), and hypercall */
+	if (num == 14 || num == 13 || num == 7 || num == LGUEST_TRAP_ENTRY)
+		return 0;
+
+	/* Interrupt gates (0xE) or not present (0x0) can't go direct. */
+	return idt_type(trap->a, trap->b) == 0xF;
+}
+
+void pin_stack_pages(struct lguest *lg)
+{
+	unsigned int i;
+
+	for (i = 0; i < lg->stack_pages; i++)
+		pin_page(lg, lg->esp1 - i * PAGE_SIZE);
+}
+
+void guest_set_stack(struct lguest *lg, u32 seg, u32 esp, unsigned int pages)
+{
+	/* You cannot have a stack segment with priv level 0. */
+	if ((seg & 0x3) != GUEST_PL)
+		kill_guest(lg, "bad stack segment %i", seg);
+	if (pages > 2)
+		kill_guest(lg, "bad stack pages %u", pages);
+	lg->ss1 = seg;
+	lg->esp1 = esp;
+	lg->stack_pages = pages;
+	pin_stack_pages(lg);
+}
+
+/* Set up trap in IDT. */
+static void set_trap(struct lguest *lg, struct desc_struct *trap,
+		     unsigned int num, u32 lo, u32 hi)
+{
+	u8 type = idt_type(lo, hi);
+
+	if (!idt_present(lo, hi)) {
+		trap->a = trap->b = 0;
+		return;
+	}
+
+	if (type != 0xE && type != 0xF)
+		kill_guest(lg, "bad IDT type %i", type);
+
+	trap->a = ((__KERNEL_CS|GUEST_PL)<<16) | (lo&0x0000FFFF);
+	trap->b = (hi&0xFFFFEF00);
+}
+
+void load_guest_idt_entry(struct lguest *lg, unsigned int num, u32 lo, u32 hi)
+{
+	/* Guest never handles: NMI, doublefault, hypercall, spurious irq. */
+	if (num == 2 || num == 8 || num == 15 || num == LGUEST_TRAP_ENTRY)
+		return;
+
+	lg->changed |= CHANGED_IDT;
+	if (num < ARRAY_SIZE(lg->idt))
+		set_trap(lg, &lg->idt[num], num, lo, hi);
+	else if (num == SYSCALL_VECTOR)
+		set_trap(lg, &lg->syscall_idt, num, lo, hi);
+}
+
+static void default_idt_entry(struct desc_struct *idt,
+			      int trap,
+			      const unsigned long handler)
+{
+	u32 flags = 0x8e00;
+
+	/* They can't "int" into any of them except hypercall. */
+	if (trap == LGUEST_TRAP_ENTRY)
+		flags |= (GUEST_PL << 13);
+
+	idt->a = (LGUEST_CS<<16) | (handler&0x0000FFFF);
+	idt->b = (handler&0xFFFF0000) | flags;
+}
+
+void setup_default_idt_entries(struct lguest_ro_state *state,
+			       const unsigned long *def)
+{
+	unsigned int i;
+
+	for (i = 0; i < ARRAY_SIZE(state->guest_idt); i++)
+		default_idt_entry(&state->guest_idt[i], i, def[i]);
+}
+
+void copy_traps(const struct lguest *lg, struct desc_struct *idt,
+		const unsigned long *def)
+{
+	unsigned int i;
+
+	/* All hardware interrupts are same whatever the guest: only the
+	 * traps might be different. */
+	for (i = 0; i < FIRST_EXTERNAL_VECTOR; i++) {
+		if (direct_trap(lg, &lg->idt[i], i))
+			idt[i] = lg->idt[i];
+		else
+			default_idt_entry(&idt[i], i, def[i]);
+	}
+	i = SYSCALL_VECTOR;
+	if (direct_trap(lg, &lg->syscall_idt, i))
+		idt[i] = lg->syscall_idt;
+	else
+		default_idt_entry(&idt[i], i, def[i]);
+}
+
+void guest_set_clockevent(struct lguest *lg, unsigned long delta)
+{
+	ktime_t expires;
+
+	if (unlikely(delta == 0)) {
+		/* Clock event device is shutting down. */
+		hrtimer_cancel(&lg->hrt);
+		return;
+	}
+
+	expires = ktime_add_ns(ktime_get_real(), delta);
+	hrtimer_start(&lg->hrt, expires, HRTIMER_MODE_ABS);
+}
+
+static enum hrtimer_restart clockdev_fn(struct hrtimer *timer)
+{
+	struct lguest *lg = container_of(timer, struct lguest, hrt);
+
+	set_bit(0, lg->irqs_pending);
+	if (lg->halted)
+		wake_up_process(lg->tsk);
+	return HRTIMER_NORESTART;
+}
+
+void init_clockdev(struct lguest *lg)
+{
+	hrtimer_init(&lg->hrt, CLOCK_REALTIME, HRTIMER_MODE_ABS);
+	lg->hrt.function = clockdev_fn;
+}
diff --git a/drivers/lguest/io.c b/drivers/lguest/io.c
new file mode 100644
index 0000000..06bdba2
--- /dev/null
+++ b/drivers/lguest/io.c
@@ -0,0 +1,399 @@
+/* Simple I/O model for guests, based on shared memory.
+ * Copyright (C) 2006 Rusty Russell IBM Corporation
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301 USA
+ */
+#include <linux/types.h>
+#include <linux/futex.h>
+#include <linux/jhash.h>
+#include <linux/mm.h>
+#include <linux/highmem.h>
+#include <linux/uaccess.h>
+#include "lg.h"
+
+static struct list_head dma_hash[61];
+
+void lguest_io_init(void)
+{
+	unsigned int i;
+
+	for (i = 0; i < ARRAY_SIZE(dma_hash); i++)
+		INIT_LIST_HEAD(&dma_hash[i]);
+}
+
+/* FIXME: allow multi-page lengths. */
+static int check_dma_list(struct lguest *lg, const struct lguest_dma *dma)
+{
+	unsigned int i;
+
+	for (i = 0; i < LGUEST_MAX_DMA_SECTIONS; i++) {
+		if (!dma->len[i])
+			return 1;
+		if (!lguest_address_ok(lg, dma->addr[i], dma->len[i]))
+			goto kill;
+		if (dma->len[i] > PAGE_SIZE)
+			goto kill;
+		/* We could do over a page, but is it worth it? */
+		if ((dma->addr[i] % PAGE_SIZE) + dma->len[i] > PAGE_SIZE)
+			goto kill;
+	}
+	return 1;
+
+kill:
+	kill_guest(lg, "bad DMA entry: %u@%#lx", dma->len[i], dma->addr[i]);
+	return 0;
+}
+
+static unsigned int hash(const union futex_key *key)
+{
+	return jhash2((u32*)&key->both.word,
+		      (sizeof(key->both.word)+sizeof(key->both.ptr))/4,
+		      key->both.offset)
+		% ARRAY_SIZE(dma_hash);
+}
+
+static inline int key_eq(const union futex_key *a, const union futex_key *b)
+{
+	return (a->both.word == b->both.word
+		&& a->both.ptr == b->both.ptr
+		&& a->both.offset == b->both.offset);
+}
+
+/* Must hold read lock on dmainfo owner's current->mm->mmap_sem */
+static void unlink_dma(struct lguest_dma_info *dmainfo)
+{
+	BUG_ON(!mutex_is_locked(&lguest_lock));
+	dmainfo->interrupt = 0;
+	list_del(&dmainfo->list);
+	drop_futex_key_refs(&dmainfo->key);
+}
+
+static int unbind_dma(struct lguest *lg,
+		      const union futex_key *key,
+		      unsigned long dmas)
+{
+	int i, ret = 0;
+
+	for (i = 0; i < LGUEST_MAX_DMA; i++) {
+		if (key_eq(key, &lg->dma[i].key) && dmas == lg->dma[i].dmas) {
+			unlink_dma(&lg->dma[i]);
+			ret = 1;
+			break;
+		}
+	}
+	return ret;
+}
+
+int bind_dma(struct lguest *lg,
+	     unsigned long ukey, unsigned long dmas, u16 numdmas, u8 interrupt)
+{
+	unsigned int i;
+	int ret = 0;
+	union futex_key key;
+	struct rw_semaphore *fshared = &current->mm->mmap_sem;
+
+	if (interrupt >= LGUEST_IRQS)
+		return 0;
+
+	mutex_lock(&lguest_lock);
+	down_read(fshared);
+	if (get_futex_key((u32 __user *)ukey, fshared, &key) != 0) {
+		kill_guest(lg, "bad dma key %#lx", ukey);
+		goto unlock;
+	}
+	get_futex_key_refs(&key);
+
+	if (interrupt == 0)
+		ret = unbind_dma(lg, &key, dmas);
+	else {
+		for (i = 0; i < LGUEST_MAX_DMA; i++) {
+			if (lg->dma[i].interrupt)
+				continue;
+
+			lg->dma[i].dmas = dmas;
+			lg->dma[i].num_dmas = numdmas;
+			lg->dma[i].next_dma = 0;
+			lg->dma[i].key = key;
+			lg->dma[i].guestid = lg->guestid;
+			lg->dma[i].interrupt = interrupt;
+			list_add(&lg->dma[i].list, &dma_hash[hash(&key)]);
+			ret = 1;
+			goto unlock;
+		}
+	}
+	drop_futex_key_refs(&key);
+unlock:
+ 	up_read(fshared);
+	mutex_unlock(&lguest_lock);
+	return ret;
+}
+
+/* lgread from another guest */
+static int lgread_other(struct lguest *lg,
+			void *buf, u32 addr, unsigned bytes)
+{
+	if (!lguest_address_ok(lg, addr, bytes)
+	    || access_process_vm(lg->tsk, addr, buf, bytes, 0) != bytes) {
+		memset(buf, 0, bytes);
+		kill_guest(lg, "bad address in registered DMA struct");
+		return 0;
+	}
+	return 1;
+}
+
+/* lgwrite to another guest */
+static int lgwrite_other(struct lguest *lg, u32 addr,
+			 const void *buf, unsigned bytes)
+{
+	if (!lguest_address_ok(lg, addr, bytes)
+	    || (access_process_vm(lg->tsk, addr, (void *)buf, bytes, 1)
+		!= bytes)) {
+		kill_guest(lg, "bad address writing to registered DMA");
+		return 0;
+	}
+	return 1;
+}
+
+static u32 copy_data(struct lguest *srclg,
+		     const struct lguest_dma *src,
+		     const struct lguest_dma *dst,
+		     struct page *pages[])
+{
+	unsigned int totlen, si, di, srcoff, dstoff;
+	void *maddr = NULL;
+
+	totlen = 0;
+	si = di = 0;
+	srcoff = dstoff = 0;
+	while (si < LGUEST_MAX_DMA_SECTIONS && src->len[si]
+	       && di < LGUEST_MAX_DMA_SECTIONS && dst->len[di]) {
+		u32 len = min(src->len[si] - srcoff, dst->len[di] - dstoff);
+
+		if (!maddr)
+			maddr = kmap(pages[di]);
+
+		/* FIXME: This is not completely portable, since
+		   archs do different things for copy_to_user_page. */
+		if (copy_from_user(maddr + (dst->addr[di] + dstoff)%PAGE_SIZE,
+				   (void *__user)src->addr[si], len) != 0) {
+			kill_guest(srclg, "bad address in sending DMA");
+			totlen = 0;
+			break;
+		}
+
+		totlen += len;
+		srcoff += len;
+		dstoff += len;
+		if (srcoff == src->len[si]) {
+			si++;
+			srcoff = 0;
+		}
+		if (dstoff == dst->len[di]) {
+			kunmap(pages[di]);
+			maddr = NULL;
+			di++;
+			dstoff = 0;
+		}
+	}
+
+	if (maddr)
+		kunmap(pages[di]);
+
+	return totlen;
+}
+
+/* Src is us, ie. current. */
+static u32 do_dma(struct lguest *srclg, const struct lguest_dma *src,
+		  struct lguest *dstlg, const struct lguest_dma *dst)
+{
+	int i;
+	u32 ret;
+	struct page *pages[LGUEST_MAX_DMA_SECTIONS];
+
+	if (!check_dma_list(dstlg, dst) || !check_dma_list(srclg, src))
+		return 0;
+
+	/* First get the destination pages */
+	for (i = 0; i < LGUEST_MAX_DMA_SECTIONS; i++) {
+		if (dst->len[i] == 0)
+			break;
+		if (get_user_pages(dstlg->tsk, dstlg->mm,
+				   dst->addr[i], 1, 1, 1, pages+i, NULL)
+		    != 1) {
+			kill_guest(dstlg, "Error mapping DMA pages");
+			ret = 0;
+			goto drop_pages;
+		}
+	}
+
+	/* Now copy until we run out of src or dst. */
+	ret = copy_data(srclg, src, dst, pages);
+
+drop_pages:
+	while (--i >= 0)
+		put_page(pages[i]);
+	return ret;
+}
+
+static int dma_transfer(struct lguest *srclg,
+			unsigned long udma,
+			struct lguest_dma_info *dst)
+{
+	struct lguest_dma dst_dma, src_dma;
+	struct lguest *dstlg;
+	u32 i, dma = 0;
+
+	dstlg = &lguests[dst->guestid];
+	/* Get our dma list. */
+	lgread(srclg, &src_dma, udma, sizeof(src_dma));
+
+	/* We can't deadlock against them dmaing to us, because this
+	 * is all under the lguest_lock. */
+	down_read(&dstlg->mm->mmap_sem);
+
+	for (i = 0; i < dst->num_dmas; i++) {
+		dma = (dst->next_dma + i) % dst->num_dmas;
+		if (!lgread_other(dstlg, &dst_dma,
+				  dst->dmas + dma * sizeof(struct lguest_dma),
+				  sizeof(dst_dma))) {
+			goto fail;
+		}
+		if (!dst_dma.used_len)
+			break;
+	}
+	if (i != dst->num_dmas) {
+		unsigned long used_lenp;
+		unsigned int ret;
+
+		ret = do_dma(srclg, &src_dma, dstlg, &dst_dma);
+		/* Put used length in src. */
+		lgwrite_u32(srclg,
+			    udma+offsetof(struct lguest_dma, used_len), ret);
+		if (ret == 0 && src_dma.len[0] != 0)
+			goto fail;
+
+		/* Make sure destination sees contents before length. */
+		wmb();
+		used_lenp = dst->dmas
+			+ dma * sizeof(struct lguest_dma)
+			+ offsetof(struct lguest_dma, used_len);
+		lgwrite_other(dstlg, used_lenp, &ret, sizeof(ret));
+		dst->next_dma++;
+	}
+ 	up_read(&dstlg->mm->mmap_sem);
+
+	/* Do this last so dst doesn't simply sleep on lock. */
+	set_bit(dst->interrupt, dstlg->irqs_pending);
+	wake_up_process(dstlg->tsk);
+	return i == dst->num_dmas;
+
+fail:
+	up_read(&dstlg->mm->mmap_sem);
+	return 0;
+}
+
+void send_dma(struct lguest *lg, unsigned long ukey, unsigned long udma)
+{
+	union futex_key key;
+	int empty = 0;
+	struct rw_semaphore *fshared = &current->mm->mmap_sem;
+
+again:
+	mutex_lock(&lguest_lock);
+	down_read(fshared);
+	if (get_futex_key((u32 __user *)ukey, fshared, &key) != 0) {
+		kill_guest(lg, "bad sending DMA key");
+		goto unlock;
+	}
+	/* Shared mapping?  Look for other guests... */
+	if (key.shared.offset & 1) {
+		struct lguest_dma_info *i;
+		list_for_each_entry(i, &dma_hash[hash(&key)], list) {
+			if (i->guestid == lg->guestid)
+				continue;
+			if (!key_eq(&key, &i->key))
+				continue;
+
+			empty += dma_transfer(lg, udma, i);
+			break;
+		}
+		if (empty == 1) {
+			/* Give any recipients one chance to restock. */
+			up_read(&current->mm->mmap_sem);
+			mutex_unlock(&lguest_lock);
+			empty++;
+			goto again;
+		}
+	} else {
+		/* Private mapping: tell our userspace. */
+		lg->dma_is_pending = 1;
+		lg->pending_dma = udma;
+		lg->pending_key = ukey;
+	}
+unlock:
+	up_read(fshared);
+	mutex_unlock(&lguest_lock);
+}
+
+void release_all_dma(struct lguest *lg)
+{
+	unsigned int i;
+
+	BUG_ON(!mutex_is_locked(&lguest_lock));
+
+	down_read(&lg->mm->mmap_sem);
+	for (i = 0; i < LGUEST_MAX_DMA; i++) {
+		if (lg->dma[i].interrupt)
+			unlink_dma(&lg->dma[i]);
+	}
+	up_read(&lg->mm->mmap_sem);
+}
+
+/* Userspace wants a dma buffer from this guest. */
+unsigned long get_dma_buffer(struct lguest *lg,
+			     unsigned long ukey, unsigned long *interrupt)
+{
+	unsigned long ret = 0;
+	union futex_key key;
+	struct lguest_dma_info *i;
+	struct rw_semaphore *fshared = &current->mm->mmap_sem;
+
+	mutex_lock(&lguest_lock);
+	down_read(fshared);
+	if (get_futex_key((u32 __user *)ukey, fshared, &key) != 0) {
+		kill_guest(lg, "bad registered DMA buffer");
+		goto unlock;
+	}
+	list_for_each_entry(i, &dma_hash[hash(&key)], list) {
+		if (key_eq(&key, &i->key) && i->guestid == lg->guestid) {
+			unsigned int j;
+			for (j = 0; j < i->num_dmas; j++) {
+				struct lguest_dma dma;
+
+				ret = i->dmas + j * sizeof(struct lguest_dma);
+				lgread(lg, &dma, ret, sizeof(dma));
+				if (dma.used_len == 0)
+					break;
+			}
+			*interrupt = i->interrupt;
+			break;
+		}
+	}
+unlock:
+	up_read(fshared);
+	mutex_unlock(&lguest_lock);
+	return ret;
+}
+
diff --git a/drivers/lguest/lg.h b/drivers/lguest/lg.h
new file mode 100644
index 0000000..3e2ddfb
--- /dev/null
+++ b/drivers/lguest/lg.h
@@ -0,0 +1,261 @@
+#ifndef _LGUEST_H
+#define _LGUEST_H
+
+#include <asm/desc.h>
+
+#define GDT_ENTRY_LGUEST_CS	10
+#define GDT_ENTRY_LGUEST_DS	11
+#define LGUEST_CS		(GDT_ENTRY_LGUEST_CS * 8)
+#define LGUEST_DS		(GDT_ENTRY_LGUEST_DS * 8)
+
+#ifndef __ASSEMBLY__
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/stringify.h>
+#include <linux/binfmts.h>
+#include <linux/futex.h>
+#include <linux/lguest.h>
+#include <linux/lguest_launcher.h>
+#include <linux/wait.h>
+#include <linux/err.h>
+#include <asm/semaphore.h>
+#include "irq_vectors.h"
+
+#define GUEST_PL 1
+
+struct lguest_regs
+{
+	/* Manually saved part. */
+	unsigned long ebx, ecx, edx;
+	unsigned long esi, edi, ebp;
+	unsigned long gs;
+	unsigned long eax;
+	unsigned long fs, ds, es;
+	unsigned long trapnum, errcode;
+	/* Trap pushed part */
+	unsigned long eip;
+	unsigned long cs;
+	unsigned long eflags;
+	unsigned long esp;
+	unsigned long ss;
+};
+
+void free_pagetables(void);
+int init_pagetables(struct page **switcher_page, unsigned int pages);
+
+/* Full 4G segment descriptors, suitable for CS and DS. */
+#define FULL_EXEC_SEGMENT ((struct desc_struct){0x0000ffff, 0x00cf9b00})
+#define FULL_SEGMENT ((struct desc_struct){0x0000ffff, 0x00cf9300})
+
+struct lguest_dma_info
+{
+	struct list_head list;
+	union futex_key key;
+	unsigned long dmas;
+	u16 next_dma;
+	u16 num_dmas;
+	u16 guestid;
+	u8 interrupt; 	/* 0 when not registered */
+};
+
+/* We have separate types for the guest's ptes & pgds and the shadow ptes &
+ * pgds.  Since this host might use three-level pagetables and the guest and
+ * shadow pagetables don't, we can't use the normal pte_t/pgd_t. */
+typedef union {
+	struct { unsigned flags:12, pfn:20; };
+	struct { unsigned long val; } raw;
+} spgd_t;
+typedef union {
+	struct { unsigned flags:12, pfn:20; };
+	struct { unsigned long val; } raw;
+} spte_t;
+typedef union {
+	struct { unsigned flags:12, pfn:20; };
+	struct { unsigned long val; } raw;
+} gpgd_t;
+typedef union {
+	struct { unsigned flags:12, pfn:20; };
+	struct { unsigned long val; } raw;
+} gpte_t;
+#define mkgpte(_val) ((gpte_t){.raw.val = _val})
+#define mkgpgd(_val) ((gpgd_t){.raw.val = _val})
+
+struct pgdir
+{
+	unsigned long cr3;
+	spgd_t *pgdir;
+};
+
+/* This is a guest-specific page (mapped ro) into the guest. */
+struct lguest_ro_state
+{
+	/* Host information we need to restore when we switch back. */
+	u32 host_cr3;
+	struct Xgt_desc_struct host_idt_desc;
+	struct Xgt_desc_struct host_gdt_desc;
+	u32 host_sp;
+
+	/* Fields which are used when guest is running. */
+	struct Xgt_desc_struct guest_idt_desc;
+	struct Xgt_desc_struct guest_gdt_desc;
+	struct i386_hw_tss guest_tss;
+	struct desc_struct guest_idt[IDT_ENTRIES];
+	struct desc_struct guest_gdt[GDT_ENTRIES];
+};
+
+/* We have two pages shared with guests, per cpu.  */
+struct lguest_pages
+{
+	/* This is the stack page mapped rw in guest */
+	char spare[PAGE_SIZE - sizeof(struct lguest_regs)];
+	struct lguest_regs regs;
+
+	/* This is the host state & guest descriptor page, ro in guest */
+	struct lguest_ro_state state;
+} __attribute__((aligned(PAGE_SIZE)));
+
+#define CHANGED_IDT		1
+#define CHANGED_GDT		2
+#define CHANGED_GDT_TLS		4 /* Actually a subset of CHANGED_GDT */
+#define CHANGED_ALL	        3
+
+/* The private info the thread maintains about the guest. */
+struct lguest
+{
+	/* At end of a page shared mapped over lguest_pages in guest.  */
+	unsigned long regs_page;
+	struct lguest_regs *regs;
+	struct lguest_data __user *lguest_data;
+	struct task_struct *tsk;
+	struct mm_struct *mm; 	/* == tsk->mm, but that becomes NULL on exit */
+	u16 guestid;
+	u32 pfn_limit;
+	u32 page_offset;
+	u32 cr2;
+	int halted;
+	int ts;
+	u32 next_hcall;
+	u32 esp1;
+	u8 ss1;
+
+	/* Do we need to stop what we're doing and return to userspace? */
+	int break_out;
+	wait_queue_head_t break_wq;
+
+	/* Bitmap of what has changed: see CHANGED_* above. */
+	int changed;
+	struct lguest_pages *last_pages;
+
+	/* We keep a small number of these. */
+	u32 pgdidx;
+	struct pgdir pgdirs[4];
+
+	/* Cached wakeup: we hold a reference to this task. */
+	struct task_struct *wake;
+
+	unsigned long noirq_start, noirq_end;
+	int dma_is_pending;
+	unsigned long pending_dma; /* struct lguest_dma */
+	unsigned long pending_key; /* address they're sending to */
+
+	unsigned int stack_pages;
+	u32 tsc_khz;
+
+	struct lguest_dma_info dma[LGUEST_MAX_DMA];
+
+	/* Dead? */
+	const char *dead;
+
+	/* The GDT entries copied into lguest_ro_state when running. */
+	struct desc_struct gdt[GDT_ENTRIES];
+
+	/* The IDT entries: some copied into lguest_ro_state when running. */
+	struct desc_struct idt[FIRST_EXTERNAL_VECTOR+LGUEST_IRQS];
+	struct desc_struct syscall_idt;
+
+	/* Virtual clock device */
+	struct hrtimer hrt;
+
+	/* Pending virtual interrupts */
+	DECLARE_BITMAP(irqs_pending, LGUEST_IRQS);
+};
+
+extern struct lguest lguests[];
+extern struct mutex lguest_lock;
+
+/* core.c: */
+u32 lgread_u32(struct lguest *lg, unsigned long addr);
+void lgwrite_u32(struct lguest *lg, unsigned long addr, u32 val);
+void lgread(struct lguest *lg, void *buf, unsigned long addr, unsigned len);
+void lgwrite(struct lguest *lg, unsigned long, const void *buf, unsigned len);
+int find_free_guest(void);
+int lguest_address_ok(const struct lguest *lg,
+		      unsigned long addr, unsigned long len);
+int run_guest(struct lguest *lg, unsigned long __user *user);
+
+
+/* interrupts_and_traps.c: */
+void maybe_do_interrupt(struct lguest *lg);
+int deliver_trap(struct lguest *lg, unsigned int num);
+void load_guest_idt_entry(struct lguest *lg, unsigned int i, u32 low, u32 hi);
+void guest_set_stack(struct lguest *lg, u32 seg, u32 esp, unsigned int pages);
+void pin_stack_pages(struct lguest *lg);
+void setup_default_idt_entries(struct lguest_ro_state *state,
+			       const unsigned long *def);
+void copy_traps(const struct lguest *lg, struct desc_struct *idt,
+		const unsigned long *def);
+void guest_set_clockevent(struct lguest *lg, unsigned long delta);
+void init_clockdev(struct lguest *lg);
+
+/* segments.c: */
+void setup_default_gdt_entries(struct lguest_ro_state *state);
+void setup_guest_gdt(struct lguest *lg);
+void load_guest_gdt(struct lguest *lg, unsigned long table, u32 num);
+void guest_load_tls(struct lguest *lg, unsigned long tls_array);
+void copy_gdt(const struct lguest *lg, struct desc_struct *gdt);
+void copy_gdt_tls(const struct lguest *lg, struct desc_struct *gdt);
+
+/* page_tables.c: */
+int init_guest_pagetable(struct lguest *lg, unsigned long pgtable);
+void free_guest_pagetable(struct lguest *lg);
+void guest_new_pagetable(struct lguest *lg, unsigned long pgtable);
+void guest_set_pmd(struct lguest *lg, unsigned long cr3, u32 i);
+void guest_pagetable_clear_all(struct lguest *lg);
+void guest_pagetable_flush_user(struct lguest *lg);
+void guest_set_pte(struct lguest *lg, unsigned long cr3,
+		   unsigned long vaddr, gpte_t val);
+void map_switcher_in_guest(struct lguest *lg, struct lguest_pages *pages);
+int demand_page(struct lguest *info, unsigned long cr2, int errcode);
+void pin_page(struct lguest *lg, unsigned long vaddr);
+
+/* lguest_user.c: */
+int lguest_device_init(void);
+void lguest_device_remove(void);
+
+/* io.c: */
+void lguest_io_init(void);
+int bind_dma(struct lguest *lg,
+	     unsigned long key, unsigned long udma, u16 numdmas, u8 interrupt);
+void send_dma(struct lguest *info, unsigned long key, unsigned long udma);
+void release_all_dma(struct lguest *lg);
+unsigned long get_dma_buffer(struct lguest *lg, unsigned long key,
+			     unsigned long *interrupt);
+
+/* hypercalls.c: */
+void do_hypercalls(struct lguest *lg);
+
+#define kill_guest(lg, fmt...)					\
+do {								\
+	if (!(lg)->dead) {					\
+		(lg)->dead = kasprintf(GFP_ATOMIC, fmt);	\
+		if (!(lg)->dead)				\
+			(lg)->dead = ERR_PTR(-ENOMEM);		\
+	}							\
+} while(0)
+
+static inline unsigned long guest_pa(struct lguest *lg, unsigned long vaddr)
+{
+	return vaddr - lg->page_offset;
+}
+#endif	/* __ASSEMBLY__ */
+#endif	/* _LGUEST_H */
diff --git a/drivers/lguest/lguest.c b/drivers/lguest/lguest.c
new file mode 100644
index 0000000..b9a58b7
--- /dev/null
+++ b/drivers/lguest/lguest.c
@@ -0,0 +1,621 @@
+/*
+ * Lguest specific paravirt-ops implementation
+ *
+ * Copyright (C) 2006, Rusty Russell <rusty@rustcorp.com.au> IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT.  See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+#include <linux/kernel.h>
+#include <linux/start_kernel.h>
+#include <linux/string.h>
+#include <linux/console.h>
+#include <linux/screen_info.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/clocksource.h>
+#include <linux/clockchips.h>
+#include <linux/lguest.h>
+#include <linux/lguest_launcher.h>
+#include <linux/lguest_bus.h>
+#include <asm/paravirt.h>
+#include <asm/param.h>
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <asm/desc.h>
+#include <asm/setup.h>
+#include <asm/e820.h>
+#include <asm/mce.h>
+#include <asm/io.h>
+//#include <asm/sched-clock.h>
+
+/* Declarations for definitions in lguest_guest.S */
+extern char lguest_noirq_start[], lguest_noirq_end[];
+extern const char lgstart_cli[], lgend_cli[];
+extern const char lgstart_sti[], lgend_sti[];
+extern const char lgstart_popf[], lgend_popf[];
+extern const char lgstart_pushf[], lgend_pushf[];
+extern const char lgstart_iret[], lgend_iret[];
+extern void lguest_iret(void);
+
+struct lguest_data lguest_data = {
+	.hcall_status = { [0 ... LHCALL_RING_SIZE-1] = 0xFF },
+	.noirq_start = (u32)lguest_noirq_start,
+	.noirq_end = (u32)lguest_noirq_end,
+	.blocked_interrupts = { 1 }, /* Block timer interrupts */
+};
+struct lguest_device_desc *lguest_devices;
+
+static enum paravirt_lazy_mode lazy_mode;
+static void lguest_lazy_mode(enum paravirt_lazy_mode mode)
+{
+	if (mode == PARAVIRT_LAZY_FLUSH) {
+		if (unlikely(lazy_mode != PARAVIRT_LAZY_NONE))
+			hcall(LHCALL_FLUSH_ASYNC, 0, 0, 0);
+	} else {
+		lazy_mode = mode;
+		if (mode == PARAVIRT_LAZY_NONE)
+			hcall(LHCALL_FLUSH_ASYNC, 0, 0, 0);
+	}
+}
+
+static void lazy_hcall(unsigned long call,
+		       unsigned long arg1,
+		       unsigned long arg2,
+		       unsigned long arg3)
+{
+	if (lazy_mode == PARAVIRT_LAZY_NONE)
+		hcall(call, arg1, arg2, arg3);
+	else
+		async_hcall(call, arg1, arg2, arg3);
+}
+
+void async_hcall(unsigned long call,
+		 unsigned long arg1, unsigned long arg2, unsigned long arg3)
+{
+	/* Note: This code assumes we're uniprocessor. */
+	static unsigned int next_call;
+	unsigned long flags;
+
+	local_irq_save(flags);
+	if (lguest_data.hcall_status[next_call] != 0xFF) {
+		/* Table full, so do normal hcall which will flush table. */
+		hcall(call, arg1, arg2, arg3);
+	} else {
+		lguest_data.hcalls[next_call].eax = call;
+		lguest_data.hcalls[next_call].edx = arg1;
+		lguest_data.hcalls[next_call].ebx = arg2;
+		lguest_data.hcalls[next_call].ecx = arg3;
+		/* Make sure host sees arguments before "valid" flag. */
+		wmb();
+		lguest_data.hcall_status[next_call] = 0;
+		if (++next_call == LHCALL_RING_SIZE)
+			next_call = 0;
+	}
+	local_irq_restore(flags);
+}
+
+void lguest_send_dma(unsigned long key, struct lguest_dma *dma)
+{
+	dma->used_len = 0;
+	hcall(LHCALL_SEND_DMA, key, __pa(dma), 0);
+}
+
+int lguest_bind_dma(unsigned long key, struct lguest_dma *dmas,
+		    unsigned int num, u8 irq)
+{
+	if (!hcall(LHCALL_BIND_DMA, key, __pa(dmas), (num << 8) | irq))
+		return -ENOMEM;
+	return 0;
+}
+
+void lguest_unbind_dma(unsigned long key, struct lguest_dma *dmas)
+{
+	hcall(LHCALL_BIND_DMA, key, __pa(dmas), 0);
+}
+
+/* For guests, device memory can be used as normal memory, so we cast away the
+ * __iomem to quieten sparse. */
+void *lguest_map(unsigned long phys_addr, unsigned long pages)
+{
+	return (__force void *)ioremap(phys_addr, PAGE_SIZE*pages);
+}
+
+void lguest_unmap(void *addr)
+{
+	iounmap((__force void __iomem *)addr);
+}
+
+static unsigned long save_fl(void)
+{
+	return lguest_data.irq_enabled;
+}
+
+static void restore_fl(unsigned long flags)
+{
+	/* FIXME: Check if interrupt pending... */
+	lguest_data.irq_enabled = flags;
+}
+
+static void irq_disable(void)
+{
+	lguest_data.irq_enabled = 0;
+}
+
+static void irq_enable(void)
+{
+	/* FIXME: Check if interrupt pending... */
+	lguest_data.irq_enabled = X86_EFLAGS_IF;
+}
+
+static void lguest_write_idt_entry(struct desc_struct *dt,
+				   int entrynum, u32 low, u32 high)
+{
+	write_dt_entry(dt, entrynum, low, high);
+	hcall(LHCALL_LOAD_IDT_ENTRY, entrynum, low, high);
+}
+
+static void lguest_load_idt(const struct Xgt_desc_struct *desc)
+{
+	unsigned int i;
+	struct desc_struct *idt = (void *)desc->address;
+
+	for (i = 0; i < (desc->size+1)/8; i++)
+		hcall(LHCALL_LOAD_IDT_ENTRY, i, idt[i].a, idt[i].b);
+}
+
+static void lguest_load_gdt(const struct Xgt_desc_struct *desc)
+{
+	BUG_ON((desc->size+1)/8 != GDT_ENTRIES);
+	hcall(LHCALL_LOAD_GDT, __pa(desc->address), GDT_ENTRIES, 0);
+}
+
+static void lguest_write_gdt_entry(struct desc_struct *dt,
+				   int entrynum, u32 low, u32 high)
+{
+	write_dt_entry(dt, entrynum, low, high);
+	hcall(LHCALL_LOAD_GDT, __pa(dt), GDT_ENTRIES, 0);
+}
+
+static void lguest_load_tls(struct thread_struct *t, unsigned int cpu)
+{
+	lazy_hcall(LHCALL_LOAD_TLS, __pa(&t->tls_array), cpu, 0);
+}
+
+static void lguest_set_ldt(const void *addr, unsigned entries)
+{
+}
+
+static void lguest_load_tr_desc(void)
+{
+}
+
+static void lguest_cpuid(unsigned int *eax, unsigned int *ebx,
+			 unsigned int *ecx, unsigned int *edx)
+{
+	int function = *eax;
+
+	native_cpuid(eax, ebx, ecx, edx);
+	switch (function) {
+	case 1:	/* Basic feature request. */
+		/* We only allow kernel to see SSE3, CMPXCHG16B and SSSE3 */
+		*ecx &= 0x00002201;
+		/* SSE, SSE2, FXSR, MMX, CMOV, CMPXCHG8B, FPU. */
+		*edx &= 0x07808101;
+		/* Host wants to know when we flush kernel pages: set PGE. */
+		*edx |= 0x00002000;
+		break;
+	case 0x80000000:
+		/* Futureproof this a little: if they ask how much extended
+		 * processor information, limit it to known fields. */
+		if (*eax > 0x80000008)
+			*eax = 0x80000008;
+		break;
+	}
+}
+
+static unsigned long current_cr0, current_cr3;
+static void lguest_write_cr0(unsigned long val)
+{
+	lazy_hcall(LHCALL_TS, val & 8, 0, 0);
+	current_cr0 = val;
+}
+
+static unsigned long lguest_read_cr0(void)
+{
+	return current_cr0;
+}
+
+static void lguest_clts(void)
+{
+	lazy_hcall(LHCALL_TS, 0, 0, 0);
+	current_cr0 &= ~8U;
+}
+
+static unsigned long lguest_read_cr2(void)
+{
+	return lguest_data.cr2;
+}
+
+static void lguest_write_cr3(unsigned long cr3)
+{
+	lazy_hcall(LHCALL_NEW_PGTABLE, cr3, 0, 0);
+	current_cr3 = cr3;
+}
+
+static unsigned long lguest_read_cr3(void)
+{
+	return current_cr3;
+}
+
+/* Used to enable/disable PGE, but we don't care. */
+static unsigned long lguest_read_cr4(void)
+{
+	return 0;
+}
+
+static void lguest_write_cr4(unsigned long val)
+{
+}
+
+static void lguest_set_pte_at(struct mm_struct *mm, unsigned long addr,
+			      pte_t *ptep, pte_t pteval)
+{
+	*ptep = pteval;
+	lazy_hcall(LHCALL_SET_PTE, __pa(mm->pgd), addr, pteval.pte_low);
+}
+
+/* We only support two-level pagetables at the moment. */
+static void lguest_set_pmd(pmd_t *pmdp, pmd_t pmdval)
+{
+	*pmdp = pmdval;
+	lazy_hcall(LHCALL_SET_PMD, __pa(pmdp)&PAGE_MASK,
+		   (__pa(pmdp)&(PAGE_SIZE-1))/4, 0);
+}
+
+/* FIXME: Eliminate all callers of this. */
+static void lguest_set_pte(pte_t *ptep, pte_t pteval)
+{
+	*ptep = pteval;
+	/* Don't bother with hypercall before initial setup. */
+	if (current_cr3)
+		lazy_hcall(LHCALL_FLUSH_TLB, 1, 0, 0);
+}
+
+static void lguest_flush_tlb_single(unsigned long addr)
+{
+	/* Simply set it to zero, and it will fault back in. */
+	lazy_hcall(LHCALL_SET_PTE, current_cr3, addr, 0);
+}
+
+static void lguest_flush_tlb_user(void)
+{
+	lazy_hcall(LHCALL_FLUSH_TLB, 0, 0, 0);
+}
+
+static void lguest_flush_tlb_kernel(void)
+{
+	lazy_hcall(LHCALL_FLUSH_TLB, 1, 0, 0);
+}
+
+static void disable_lguest_irq(unsigned int irq)
+{
+	set_bit(irq, lguest_data.blocked_interrupts);
+}
+
+static void enable_lguest_irq(unsigned int irq)
+{
+	clear_bit(irq, lguest_data.blocked_interrupts);
+	/* FIXME: If it's pending? */
+}
+
+static struct irq_chip lguest_irq_controller = {
+	.name		= "lguest",
+	.mask		= disable_lguest_irq,
+	.mask_ack	= disable_lguest_irq,
+	.unmask		= enable_lguest_irq,
+};
+
+static void __init lguest_init_IRQ(void)
+{
+	unsigned int i;
+
+	for (i = 0; i < LGUEST_IRQS; i++) {
+		int vector = FIRST_EXTERNAL_VECTOR + i;
+		if (vector != SYSCALL_VECTOR) {
+			set_intr_gate(vector, interrupt[i]);
+			set_irq_chip_and_handler(i, &lguest_irq_controller,
+						 handle_level_irq);
+		}
+	}
+	irq_ctx_init(smp_processor_id());
+}
+
+static unsigned long lguest_get_wallclock(void)
+{
+	return hcall(LHCALL_GET_WALLCLOCK, 0, 0, 0);
+}
+
+static cycle_t lguest_clock_read(void)
+{
+	if (lguest_data.tsc_khz)
+		return native_read_tsc();
+	else
+		return jiffies;
+}
+
+/* This is what we tell the kernel is our clocksource.  */
+static struct clocksource lguest_clock = {
+	.name		= "lguest",
+	.rating		= 400,
+	.read		= lguest_clock_read,
+};
+
+/* We also need a "struct clock_event_device": Linux asks us to set it to go
+ * off some time in the future.  Actually, James Morris figured all this out, I
+ * just applied the patch. */
+static int lguest_clockevent_set_next_event(unsigned long delta,
+                                           struct clock_event_device *evt)
+{
+	if (delta < LG_CLOCK_MIN_DELTA) {
+		if (printk_ratelimit())
+			printk(KERN_DEBUG "%s: small delta %lu ns\n",
+			       __FUNCTION__, delta);
+		return -ETIME;
+	}
+	hcall(LHCALL_SET_CLOCKEVENT, delta, 0, 0);
+	return 0;
+}
+
+static void lguest_clockevent_set_mode(enum clock_event_mode mode,
+                                      struct clock_event_device *evt)
+{
+	switch (mode) {
+	case CLOCK_EVT_MODE_UNUSED:
+	case CLOCK_EVT_MODE_SHUTDOWN:
+		/* A 0 argument shuts the clock down. */
+		hcall(LHCALL_SET_CLOCKEVENT, 0, 0, 0);
+		break;
+	case CLOCK_EVT_MODE_ONESHOT:
+		/* This is what we expect. */
+		break;
+	case CLOCK_EVT_MODE_PERIODIC:
+		BUG();
+	}
+}
+
+/* This describes our primitive timer chip. */
+static struct clock_event_device lguest_clockevent = {
+	.name                   = "lguest",
+	.features               = CLOCK_EVT_FEAT_ONESHOT,
+	.set_next_event         = lguest_clockevent_set_next_event,
+	.set_mode               = lguest_clockevent_set_mode,
+	.rating                 = INT_MAX,
+	.mult                   = 1,
+	.shift                  = 0,
+	.min_delta_ns           = LG_CLOCK_MIN_DELTA,
+	.max_delta_ns           = LG_CLOCK_MAX_DELTA,
+};
+
+/* This is the Guest timer interrupt handler (hardware interrupt 0).  We just
+ * call the clockevent infrastructure and it does whatever needs doing. */
+static void lguest_time_irq(unsigned int irq, struct irq_desc *desc)
+{
+	unsigned long flags;
+
+	/* Don't interrupt us while this is running. */
+	local_irq_save(flags);
+	lguest_clockevent.event_handler(&lguest_clockevent);
+	local_irq_restore(flags);
+}
+
+static void lguest_time_init(void)
+{
+	set_irq_handler(0, lguest_time_irq);
+
+	/* We use the TSC if the Host tells us we can, otherwise a dumb
+	 * jiffies-based clock. */
+	if (lguest_data.tsc_khz) {
+		lguest_clock.shift = 22;
+		lguest_clock.mult = clocksource_khz2mult(lguest_data.tsc_khz,
+							 lguest_clock.shift);
+		lguest_clock.mask = CLOCKSOURCE_MASK(64);
+		lguest_clock.flags = CLOCK_SOURCE_IS_CONTINUOUS;
+	} else {
+		/* To understand this, start at kernel/time/jiffies.c... */
+		lguest_clock.shift = 8;
+		lguest_clock.mult = (((u64)NSEC_PER_SEC<<8)/ACTHZ) << 8;
+		lguest_clock.mask = CLOCKSOURCE_MASK(32);
+	}
+	clocksource_register(&lguest_clock);
+
+	/* We can't set cpumask in the initializer: damn C limitations! */
+	lguest_clockevent.cpumask = cpumask_of_cpu(0);
+	clockevents_register_device(&lguest_clockevent);
+
+	enable_lguest_irq(0);
+}
+
+static void lguest_load_esp0(struct tss_struct *tss,
+				     struct thread_struct *thread)
+{
+	lazy_hcall(LHCALL_SET_STACK, __KERNEL_DS|0x1, thread->esp0,
+		   THREAD_SIZE/PAGE_SIZE);
+}
+
+static void lguest_set_debugreg(int regno, unsigned long value)
+{
+	/* FIXME: Implement */
+}
+
+static void lguest_wbinvd(void)
+{
+}
+
+#ifdef CONFIG_X86_LOCAL_APIC
+static void lguest_apic_write(unsigned long reg, unsigned long v)
+{
+}
+
+static unsigned long lguest_apic_read(unsigned long reg)
+{
+	return 0;
+}
+#endif
+
+static void lguest_safe_halt(void)
+{
+	hcall(LHCALL_HALT, 0, 0, 0);
+}
+
+static void lguest_power_off(void)
+{
+	hcall(LHCALL_CRASH, __pa("Power down"), 0, 0);
+}
+
+static int lguest_panic(struct notifier_block *nb, unsigned long l, void *p)
+{
+	hcall(LHCALL_CRASH, __pa(p), 0, 0);
+	return NOTIFY_DONE;
+}
+
+static struct notifier_block paniced = {
+	.notifier_call = lguest_panic
+};
+
+static __init char *lguest_memory_setup(void)
+{
+	/* We do this here because lockcheck barfs if before start_kernel */
+	atomic_notifier_chain_register(&panic_notifier_list, &paniced);
+
+	add_memory_region(E820_MAP->addr, E820_MAP->size, E820_MAP->type);
+	return "LGUEST";
+}
+
+static const struct lguest_insns
+{
+	const char *start, *end;
+} lguest_insns[] = {
+	[PARAVIRT_PATCH(irq_disable)] = { lgstart_cli, lgend_cli },
+	[PARAVIRT_PATCH(irq_enable)] = { lgstart_sti, lgend_sti },
+	[PARAVIRT_PATCH(restore_fl)] = { lgstart_popf, lgend_popf },
+	[PARAVIRT_PATCH(save_fl)] = { lgstart_pushf, lgend_pushf },
+};
+static unsigned lguest_patch(u8 type, u16 clobber, void *insns, unsigned len)
+{
+	unsigned int insn_len;
+
+	/* Don't touch it if we don't have a replacement */
+	if (type >= ARRAY_SIZE(lguest_insns) || !lguest_insns[type].start)
+		return paravirt_patch_default(type, clobber, insns, len);
+
+	insn_len = lguest_insns[type].end - lguest_insns[type].start;
+
+	/* Similarly if we can't fit replacement. */
+	if (len < insn_len)
+		return paravirt_patch_default(type, clobber, insns, len);
+
+	memcpy(insns, lguest_insns[type].start, insn_len);
+	return insn_len;
+}
+
+__init void lguest_init(void *boot)
+{
+	/* Copy boot parameters first. */
+	memcpy(&boot_params, boot, PARAM_SIZE);
+	memcpy(boot_command_line, __va(boot_params.hdr.cmd_line_ptr),
+	       COMMAND_LINE_SIZE);
+
+	paravirt_ops.name = "lguest";
+	paravirt_ops.paravirt_enabled = 1;
+	paravirt_ops.kernel_rpl = 1;
+
+	paravirt_ops.save_fl = save_fl;
+	paravirt_ops.restore_fl = restore_fl;
+	paravirt_ops.irq_disable = irq_disable;
+	paravirt_ops.irq_enable = irq_enable;
+	paravirt_ops.load_gdt = lguest_load_gdt;
+	paravirt_ops.memory_setup = lguest_memory_setup;
+	paravirt_ops.cpuid = lguest_cpuid;
+	paravirt_ops.write_cr3 = lguest_write_cr3;
+	paravirt_ops.flush_tlb_user = lguest_flush_tlb_user;
+	paravirt_ops.flush_tlb_single = lguest_flush_tlb_single;
+	paravirt_ops.flush_tlb_kernel = lguest_flush_tlb_kernel;
+	paravirt_ops.set_pte = lguest_set_pte;
+	paravirt_ops.set_pte_at = lguest_set_pte_at;
+	paravirt_ops.set_pmd = lguest_set_pmd;
+#ifdef CONFIG_X86_LOCAL_APIC
+	paravirt_ops.apic_write = lguest_apic_write;
+	paravirt_ops.apic_write_atomic = lguest_apic_write;
+	paravirt_ops.apic_read = lguest_apic_read;
+#endif
+	paravirt_ops.load_idt = lguest_load_idt;
+	paravirt_ops.iret = lguest_iret;
+	paravirt_ops.load_esp0 = lguest_load_esp0;
+	paravirt_ops.load_tr_desc = lguest_load_tr_desc;
+	paravirt_ops.set_ldt = lguest_set_ldt;
+	paravirt_ops.load_tls = lguest_load_tls;
+	paravirt_ops.set_debugreg = lguest_set_debugreg;
+	paravirt_ops.clts = lguest_clts;
+	paravirt_ops.read_cr0 = lguest_read_cr0;
+	paravirt_ops.write_cr0 = lguest_write_cr0;
+	paravirt_ops.init_IRQ = lguest_init_IRQ;
+	paravirt_ops.read_cr2 = lguest_read_cr2;
+	paravirt_ops.read_cr3 = lguest_read_cr3;
+	paravirt_ops.read_cr4 = lguest_read_cr4;
+	paravirt_ops.write_cr4 = lguest_write_cr4;
+	paravirt_ops.write_gdt_entry = lguest_write_gdt_entry;
+	paravirt_ops.write_idt_entry = lguest_write_idt_entry;
+	paravirt_ops.patch = lguest_patch;
+	paravirt_ops.safe_halt = lguest_safe_halt;
+	paravirt_ops.get_wallclock = lguest_get_wallclock;
+	paravirt_ops.time_init = lguest_time_init;
+	paravirt_ops.set_lazy_mode = lguest_lazy_mode;
+	paravirt_ops.wbinvd = lguest_wbinvd;
+
+	hcall(LHCALL_LGUEST_INIT, __pa(&lguest_data), 0, 0);
+
+	/* We use top of mem for initial pagetables. */
+	init_pg_tables_end = __pa(pg0);
+
+	asm volatile ("mov %0, %%fs" : : "r" (__KERNEL_DS) : "memory");
+
+	reserve_top_address(lguest_data.reserve_mem);
+
+	lockdep_init();
+
+	paravirt_disable_iospace();
+
+	cpu_detect(&new_cpu_data);
+	/* head.S usually sets up the first capability word, so do it here. */
+	new_cpu_data.x86_capability[0] = cpuid_edx(1);
+
+	/* Math is always hard! */
+	new_cpu_data.hard_math = 1;
+
+#ifdef CONFIG_X86_MCE
+	mce_disabled = 1;
+#endif
+
+#ifdef CONFIG_ACPI
+	acpi_disabled = 1;
+	acpi_ht = 0;
+#endif
+
+	add_preferred_console("hvc", 0, NULL);
+
+	pm_power_off = lguest_power_off;
+	start_kernel();
+}
diff --git a/drivers/lguest/lguest_asm.S b/drivers/lguest/lguest_asm.S
new file mode 100644
index 0000000..00046c5
--- /dev/null
+++ b/drivers/lguest/lguest_asm.S
@@ -0,0 +1,56 @@
+#include <linux/linkage.h>
+#include <linux/lguest.h>
+#include <asm/asm-offsets.h>
+#include <asm/thread_info.h>
+
+/* FIXME: Once asm/processor-flags.h goes in, include that */
+#define X86_EFLAGS_IF 0x00000200
+
+/*
+ * This is where we begin: we have a magic signature which the launcher looks
+ * for.  The plan is that the Linux boot protocol will be extended with a
+ * "platform type" field which will guide us here from the normal entry point,
+ * but for the moment this suffices.  We pass the virtual address of the boot
+ * info to lguest_init().
+ *
+ * We put it in .init.text will be discarded after boot.
+ */
+.section .init.text, "ax", @progbits
+.ascii "GenuineLguest"
+	/* Set up initial stack. */
+ 	movl $(init_thread_union+THREAD_SIZE),%esp
+	movl %esi, %eax
+	addl $__PAGE_OFFSET, %eax
+	jmp lguest_init
+
+/* The templates for inline patching. */
+#define LGUEST_PATCH(name, insns...)			\
+	lgstart_##name:	insns; lgend_##name:;		\
+	.globl lgstart_##name; .globl lgend_##name
+
+LGUEST_PATCH(cli, movl $0, lguest_data+LGUEST_DATA_irq_enabled)
+LGUEST_PATCH(sti, movl $X86_EFLAGS_IF, lguest_data+LGUEST_DATA_irq_enabled)
+LGUEST_PATCH(popf, movl %eax, lguest_data+LGUEST_DATA_irq_enabled)
+LGUEST_PATCH(pushf, movl lguest_data+LGUEST_DATA_irq_enabled, %eax)
+
+.text
+/* These demark the EIP range where host should never deliver interrupts. */
+.global lguest_noirq_start
+.global lguest_noirq_end
+
+/*
+ * We move eflags word to lguest_data.irq_enabled to restore interrupt state.
+ * For page faults, gpfs and virtual interrupts, the hypervisor has saved
+ * eflags manually, otherwise it was delivered directly and so eflags reflects
+ * the real machine IF state, ie. interrupts on.  Since the kernel always dies
+ * if it takes such a trap with interrupts disabled anyway, turning interrupts
+ * back on unconditionally here is OK.
+ */
+ENTRY(lguest_iret)
+	pushl	%eax
+	movl	12(%esp), %eax
+lguest_noirq_start:
+	movl	%eax,%ss:lguest_data+LGUEST_DATA_irq_enabled
+	popl	%eax
+	iret
+lguest_noirq_end:
diff --git a/drivers/lguest/lguest_bus.c b/drivers/lguest/lguest_bus.c
new file mode 100644
index 0000000..18d6ab2
--- /dev/null
+++ b/drivers/lguest/lguest_bus.c
@@ -0,0 +1,148 @@
+#include <linux/init.h>
+#include <linux/bootmem.h>
+#include <linux/lguest_bus.h>
+#include <asm/io.h>
+
+static ssize_t type_show(struct device *_dev,
+                         struct device_attribute *attr, char *buf)
+{
+	struct lguest_device *dev = container_of(_dev,struct lguest_device,dev);
+	return sprintf(buf, "%hu", lguest_devices[dev->index].type);
+}
+static ssize_t features_show(struct device *_dev,
+                             struct device_attribute *attr, char *buf)
+{
+	struct lguest_device *dev = container_of(_dev,struct lguest_device,dev);
+	return sprintf(buf, "%hx", lguest_devices[dev->index].features);
+}
+static ssize_t pfn_show(struct device *_dev,
+			 struct device_attribute *attr, char *buf)
+{
+	struct lguest_device *dev = container_of(_dev,struct lguest_device,dev);
+	return sprintf(buf, "%u", lguest_devices[dev->index].pfn);
+}
+static ssize_t status_show(struct device *_dev,
+                           struct device_attribute *attr, char *buf)
+{
+	struct lguest_device *dev = container_of(_dev,struct lguest_device,dev);
+	return sprintf(buf, "%hx", lguest_devices[dev->index].status);
+}
+static ssize_t status_store(struct device *_dev, struct device_attribute *attr,
+                            const char *buf, size_t count)
+{
+	struct lguest_device *dev = container_of(_dev,struct lguest_device,dev);
+	if (sscanf(buf, "%hi", &lguest_devices[dev->index].status) != 1)
+		return -EINVAL;
+	return count;
+}
+static struct device_attribute lguest_dev_attrs[] = {
+	__ATTR_RO(type),
+	__ATTR_RO(features),
+	__ATTR_RO(pfn),
+	__ATTR(status, 0644, status_show, status_store),
+	__ATTR_NULL
+};
+
+static int lguest_dev_match(struct device *_dev, struct device_driver *_drv)
+{
+	struct lguest_device *dev = container_of(_dev,struct lguest_device,dev);
+	struct lguest_driver *drv = container_of(_drv,struct lguest_driver,drv);
+
+	return (drv->device_type == lguest_devices[dev->index].type);
+}
+
+struct lguest_bus {
+	struct bus_type bus;
+	struct device dev;
+};
+
+static struct lguest_bus lguest_bus = {
+	.bus = {
+		.name  = "lguest",
+		.match = lguest_dev_match,
+		.dev_attrs = lguest_dev_attrs,
+	},
+	.dev = {
+		.parent = NULL,
+		.bus_id = "lguest",
+	}
+};
+
+static int lguest_dev_probe(struct device *_dev)
+{
+	int ret;
+	struct lguest_device *dev = container_of(_dev,struct lguest_device,dev);
+	struct lguest_driver *drv = container_of(dev->dev.driver,
+						struct lguest_driver, drv);
+
+	lguest_devices[dev->index].status |= LGUEST_DEVICE_S_DRIVER;
+	ret = drv->probe(dev);
+	if (ret == 0)
+		lguest_devices[dev->index].status |= LGUEST_DEVICE_S_DRIVER_OK;
+	return ret;
+}
+
+int register_lguest_driver(struct lguest_driver *drv)
+{
+	if (!lguest_devices)
+		return 0;
+
+	drv->drv.bus = &lguest_bus.bus;
+	drv->drv.name = drv->name;
+	drv->drv.owner = drv->owner;
+	drv->drv.probe = lguest_dev_probe;
+
+	return driver_register(&drv->drv);
+}
+EXPORT_SYMBOL_GPL(register_lguest_driver);
+
+static void add_lguest_device(unsigned int index)
+{
+	struct lguest_device *new;
+
+	lguest_devices[index].status |= LGUEST_DEVICE_S_ACKNOWLEDGE;
+	new = kmalloc(sizeof(struct lguest_device), GFP_KERNEL);
+	if (!new) {
+		printk(KERN_EMERG "Cannot allocate lguest device %u\n", index);
+		lguest_devices[index].status |= LGUEST_DEVICE_S_FAILED;
+		return;
+	}
+
+	new->index = index;
+	new->private = NULL;
+	memset(&new->dev, 0, sizeof(new->dev));
+	new->dev.parent = &lguest_bus.dev;
+	new->dev.bus = &lguest_bus.bus;
+	sprintf(new->dev.bus_id, "%u", index);
+	if (device_register(&new->dev) != 0) {
+		printk(KERN_EMERG "Cannot register lguest device %u\n", index);
+		lguest_devices[index].status |= LGUEST_DEVICE_S_FAILED;
+		kfree(new);
+	}
+}
+
+static void scan_devices(void)
+{
+	unsigned int i;
+
+	for (i = 0; i < LGUEST_MAX_DEVICES; i++)
+		if (lguest_devices[i].type)
+			add_lguest_device(i);
+}
+
+static int __init lguest_bus_init(void)
+{
+	if (strcmp(paravirt_ops.name, "lguest") != 0)
+		return 0;
+
+	/* Devices are in page above top of "normal" mem. */
+	lguest_devices = lguest_map(max_pfn<<PAGE_SHIFT, 1);
+
+	if (bus_register(&lguest_bus.bus) != 0
+	    || device_register(&lguest_bus.dev) != 0)
+		panic("lguest bus registration failed");
+
+	scan_devices();
+	return 0;
+}
+postcore_initcall(lguest_bus_init);
diff --git a/drivers/lguest/lguest_user.c b/drivers/lguest/lguest_user.c
new file mode 100644
index 0000000..e90d7a7
--- /dev/null
+++ b/drivers/lguest/lguest_user.c
@@ -0,0 +1,236 @@
+/* Userspace control of the guest, via /dev/lguest. */
+#include <linux/uaccess.h>
+#include <linux/miscdevice.h>
+#include <linux/fs.h>
+#include "lg.h"
+
+static void setup_regs(struct lguest_regs *regs, unsigned long start)
+{
+	/* Write out stack in format lguest expects, so we can switch to it. */
+	regs->ds = regs->es = regs->ss = __KERNEL_DS|GUEST_PL;
+	regs->cs = __KERNEL_CS|GUEST_PL;
+	regs->eflags = 0x202; 	/* Interrupts enabled. */
+	regs->eip = start;
+	/* esi points to our boot information (physical address 0) */
+}
+
+/* + addr */
+static long user_get_dma(struct lguest *lg, const u32 __user *input)
+{
+	unsigned long key, udma, irq;
+
+	if (get_user(key, input) != 0)
+		return -EFAULT;
+	udma = get_dma_buffer(lg, key, &irq);
+	if (!udma)
+		return -ENOENT;
+
+	/* We put irq number in udma->used_len. */
+	lgwrite_u32(lg, udma + offsetof(struct lguest_dma, used_len), irq);
+	return udma;
+}
+
+/* To force the Guest to stop running and return to the Launcher, the
+ * Waker sets writes LHREQ_BREAK and the value "1" to /dev/lguest.  The
+ * Launcher then writes LHREQ_BREAK and "0" to release the Waker. */
+static int break_guest_out(struct lguest *lg, const u32 __user *input)
+{
+	unsigned long on;
+
+	/* Fetch whether they're turning break on or off.. */
+	if (get_user(on, input) != 0)
+		return -EFAULT;
+
+	if (on) {
+		lg->break_out = 1;
+		/* Pop it out (may be running on different CPU) */
+		wake_up_process(lg->tsk);
+		/* Wait for them to reset it */
+		return wait_event_interruptible(lg->break_wq, !lg->break_out);
+	} else {
+		lg->break_out = 0;
+		wake_up(&lg->break_wq);
+		return 0;
+	}
+}
+
+/* + irq */
+static int user_send_irq(struct lguest *lg, const u32 __user *input)
+{
+	u32 irq;
+
+	if (get_user(irq, input) != 0)
+		return -EFAULT;
+	if (irq >= LGUEST_IRQS)
+		return -EINVAL;
+	set_bit(irq, lg->irqs_pending);
+	return 0;
+}
+
+static ssize_t read(struct file *file, char __user *user, size_t size,loff_t*o)
+{
+	struct lguest *lg = file->private_data;
+
+	if (!lg)
+		return -EINVAL;
+
+	/* If you're not the task which owns the guest, go away. */
+	if (current != lg->tsk)
+		return -EPERM;
+
+	if (lg->dead) {
+		size_t len;
+
+		if (IS_ERR(lg->dead))
+			return PTR_ERR(lg->dead);
+
+		len = min(size, strlen(lg->dead)+1);
+		if (copy_to_user(user, lg->dead, len) != 0)
+			return -EFAULT;
+		return len;
+	}
+
+	if (lg->dma_is_pending)
+		lg->dma_is_pending = 0;
+
+	return run_guest(lg, (unsigned long __user *)user);
+}
+
+/* Take: pfnlimit, pgdir, start, pageoffset. */
+static int initialize(struct file *file, const u32 __user *input)
+{
+	struct lguest *lg;
+	int err, i;
+	u32 args[4];
+
+	/* We grab the Big Lguest lock, which protects the global array
+	 * "lguests" and multiple simultaneous initializations. */
+	mutex_lock(&lguest_lock);
+
+	if (file->private_data) {
+		err = -EBUSY;
+		goto unlock;
+	}
+
+	if (copy_from_user(args, input, sizeof(args)) != 0) {
+		err = -EFAULT;
+		goto unlock;
+	}
+
+	i = find_free_guest();
+	if (i < 0) {
+		err = -ENOSPC;
+		goto unlock;
+	}
+	lg = &lguests[i];
+	lg->guestid = i;
+	lg->pfn_limit = args[0];
+	lg->page_offset = args[3];
+	lg->regs_page = get_zeroed_page(GFP_KERNEL);
+	if (!lg->regs_page) {
+		err = -ENOMEM;
+		goto release_guest;
+	}
+	lg->regs = (void *)lg->regs_page + PAGE_SIZE - sizeof(*lg->regs);
+
+	err = init_guest_pagetable(lg, args[1]);
+	if (err)
+		goto free_regs;
+
+	setup_regs(lg->regs, args[2]);
+	setup_guest_gdt(lg);
+	init_clockdev(lg);
+	lg->tsk = current;
+	lg->mm = get_task_mm(lg->tsk);
+	init_waitqueue_head(&lg->break_wq);
+	lg->last_pages = NULL;
+	file->private_data = lg;
+
+	mutex_unlock(&lguest_lock);
+
+	return sizeof(args);
+
+free_regs:
+	free_page(lg->regs_page);
+release_guest:
+	memset(lg, 0, sizeof(*lg));
+unlock:
+	mutex_unlock(&lguest_lock);
+	return err;
+}
+
+static ssize_t write(struct file *file, const char __user *input,
+		     size_t size, loff_t *off)
+{
+	struct lguest *lg = file->private_data;
+	u32 req;
+
+	if (get_user(req, input) != 0)
+		return -EFAULT;
+	input += sizeof(req);
+
+	if (req != LHREQ_INITIALIZE && !lg)
+		return -EINVAL;
+	if (lg && lg->dead)
+		return -ENOENT;
+
+	/* If you're not the task which owns the Guest, you can only break */
+	if (lg && current != lg->tsk && req != LHREQ_BREAK)
+		return -EPERM;
+
+	switch (req) {
+	case LHREQ_INITIALIZE:
+		return initialize(file, (const u32 __user *)input);
+	case LHREQ_GETDMA:
+		return user_get_dma(lg, (const u32 __user *)input);
+	case LHREQ_IRQ:
+		return user_send_irq(lg, (const u32 __user *)input);
+	case LHREQ_BREAK:
+		return break_guest_out(lg, (const u32 __user *)input);
+	default:
+		return -EINVAL;
+	}
+}
+
+static int close(struct inode *inode, struct file *file)
+{
+	struct lguest *lg = file->private_data;
+
+	if (!lg)
+		return 0;
+
+	mutex_lock(&lguest_lock);
+	/* Cancels the hrtimer set via LHCALL_SET_CLOCKEVENT. */
+	hrtimer_cancel(&lg->hrt);
+	release_all_dma(lg);
+	free_guest_pagetable(lg);
+	mmput(lg->mm);
+	if (!IS_ERR(lg->dead))
+		kfree(lg->dead);
+	free_page(lg->regs_page);
+	memset(lg, 0, sizeof(*lg));
+	mutex_unlock(&lguest_lock);
+	return 0;
+}
+
+static struct file_operations lguest_fops = {
+	.owner	 = THIS_MODULE,
+	.release = close,
+	.write	 = write,
+	.read	 = read,
+};
+static struct miscdevice lguest_dev = {
+	.minor	= MISC_DYNAMIC_MINOR,
+	.name	= "lguest",
+	.fops	= &lguest_fops,
+};
+
+int __init lguest_device_init(void)
+{
+	return misc_register(&lguest_dev);
+}
+
+void __exit lguest_device_remove(void)
+{
+	misc_deregister(&lguest_dev);
+}
diff --git a/drivers/lguest/page_tables.c b/drivers/lguest/page_tables.c
new file mode 100644
index 0000000..1b0ba09
--- /dev/null
+++ b/drivers/lguest/page_tables.c
@@ -0,0 +1,411 @@
+/* Shadow page table operations.
+ * Copyright (C) Rusty Russell IBM Corporation 2006.
+ * GPL v2 and any later version */
+#include <linux/mm.h>
+#include <linux/types.h>
+#include <linux/spinlock.h>
+#include <linux/random.h>
+#include <linux/percpu.h>
+#include <asm/tlbflush.h>
+#include "lg.h"
+
+#define PTES_PER_PAGE_SHIFT 10
+#define PTES_PER_PAGE (1 << PTES_PER_PAGE_SHIFT)
+#define SWITCHER_PGD_INDEX (PTES_PER_PAGE - 1)
+
+static DEFINE_PER_CPU(spte_t *, switcher_pte_pages);
+#define switcher_pte_page(cpu) per_cpu(switcher_pte_pages, cpu)
+
+static unsigned vaddr_to_pgd_index(unsigned long vaddr)
+{
+	return vaddr >> (PAGE_SHIFT + PTES_PER_PAGE_SHIFT);
+}
+
+/* These access the shadow versions (ie. the ones used by the CPU). */
+static spgd_t *spgd_addr(struct lguest *lg, u32 i, unsigned long vaddr)
+{
+	unsigned int index = vaddr_to_pgd_index(vaddr);
+
+	if (index >= SWITCHER_PGD_INDEX) {
+		kill_guest(lg, "attempt to access switcher pages");
+		index = 0;
+	}
+	return &lg->pgdirs[i].pgdir[index];
+}
+
+static spte_t *spte_addr(struct lguest *lg, spgd_t spgd, unsigned long vaddr)
+{
+	spte_t *page = __va(spgd.pfn << PAGE_SHIFT);
+	BUG_ON(!(spgd.flags & _PAGE_PRESENT));
+	return &page[(vaddr >> PAGE_SHIFT) % PTES_PER_PAGE];
+}
+
+/* These access the guest versions. */
+static unsigned long gpgd_addr(struct lguest *lg, unsigned long vaddr)
+{
+	unsigned int index = vaddr >> (PAGE_SHIFT + PTES_PER_PAGE_SHIFT);
+	return lg->pgdirs[lg->pgdidx].cr3 + index * sizeof(gpgd_t);
+}
+
+static unsigned long gpte_addr(struct lguest *lg,
+			       gpgd_t gpgd, unsigned long vaddr)
+{
+	unsigned long gpage = gpgd.pfn << PAGE_SHIFT;
+	BUG_ON(!(gpgd.flags & _PAGE_PRESENT));
+	return gpage + ((vaddr>>PAGE_SHIFT) % PTES_PER_PAGE) * sizeof(gpte_t);
+}
+
+/* Do a virtual -> physical mapping on a user page. */
+static unsigned long get_pfn(unsigned long virtpfn, int write)
+{
+	struct page *page;
+	unsigned long ret = -1UL;
+
+	down_read(&current->mm->mmap_sem);
+	if (get_user_pages(current, current->mm, virtpfn << PAGE_SHIFT,
+			   1, write, 1, &page, NULL) == 1)
+		ret = page_to_pfn(page);
+	up_read(&current->mm->mmap_sem);
+	return ret;
+}
+
+static spte_t gpte_to_spte(struct lguest *lg, gpte_t gpte, int write)
+{
+	spte_t spte;
+	unsigned long pfn;
+
+	/* We ignore the global flag. */
+	spte.flags = (gpte.flags & ~_PAGE_GLOBAL);
+	pfn = get_pfn(gpte.pfn, write);
+	if (pfn == -1UL) {
+		kill_guest(lg, "failed to get page %u", gpte.pfn);
+		/* Must not put_page() bogus page on cleanup. */
+		spte.flags = 0;
+	}
+	spte.pfn = pfn;
+	return spte;
+}
+
+static void release_pte(spte_t pte)
+{
+	if (pte.flags & _PAGE_PRESENT)
+		put_page(pfn_to_page(pte.pfn));
+}
+
+static void check_gpte(struct lguest *lg, gpte_t gpte)
+{
+	if ((gpte.flags & (_PAGE_PWT|_PAGE_PSE)) || gpte.pfn >= lg->pfn_limit)
+		kill_guest(lg, "bad page table entry");
+}
+
+static void check_gpgd(struct lguest *lg, gpgd_t gpgd)
+{
+	if ((gpgd.flags & ~_PAGE_TABLE) || gpgd.pfn >= lg->pfn_limit)
+		kill_guest(lg, "bad page directory entry");
+}
+
+/* FIXME: We hold reference to pages, which prevents them from being
+   swapped.  It'd be nice to have a callback when Linux wants to swap out. */
+
+/* We fault pages in, which allows us to update accessed/dirty bits.
+ * Return true if we got page. */
+int demand_page(struct lguest *lg, unsigned long vaddr, int errcode)
+{
+	gpgd_t gpgd;
+	spgd_t *spgd;
+	unsigned long gpte_ptr;
+	gpte_t gpte;
+	spte_t *spte;
+
+	gpgd = mkgpgd(lgread_u32(lg, gpgd_addr(lg, vaddr)));
+	if (!(gpgd.flags & _PAGE_PRESENT))
+		return 0;
+
+	spgd = spgd_addr(lg, lg->pgdidx, vaddr);
+	if (!(spgd->flags & _PAGE_PRESENT)) {
+		/* Get a page of PTEs for them. */
+		unsigned long ptepage = get_zeroed_page(GFP_KERNEL);
+		/* FIXME: Steal from self in this case? */
+		if (!ptepage) {
+			kill_guest(lg, "out of memory allocating pte page");
+			return 0;
+		}
+		check_gpgd(lg, gpgd);
+		spgd->raw.val = (__pa(ptepage) | gpgd.flags);
+	}
+
+	gpte_ptr = gpte_addr(lg, gpgd, vaddr);
+	gpte = mkgpte(lgread_u32(lg, gpte_ptr));
+
+	/* No page? */
+	if (!(gpte.flags & _PAGE_PRESENT))
+		return 0;
+
+	/* Write to read-only page? */
+	if ((errcode & 2) && !(gpte.flags & _PAGE_RW))
+		return 0;
+
+	/* User access to a non-user page? */
+	if ((errcode & 4) && !(gpte.flags & _PAGE_USER))
+		return 0;
+
+	check_gpte(lg, gpte);
+	gpte.flags |= _PAGE_ACCESSED;
+	if (errcode & 2)
+		gpte.flags |= _PAGE_DIRTY;
+
+	/* We're done with the old pte. */
+	spte = spte_addr(lg, *spgd, vaddr);
+	release_pte(*spte);
+
+	/* We don't make it writable if this isn't a write: later
+	 * write will fault so we can set dirty bit in guest. */
+	if (gpte.flags & _PAGE_DIRTY)
+		*spte = gpte_to_spte(lg, gpte, 1);
+	else {
+		gpte_t ro_gpte = gpte;
+		ro_gpte.flags &= ~_PAGE_RW;
+		*spte = gpte_to_spte(lg, ro_gpte, 0);
+	}
+
+	/* Now we update dirty/accessed on guest. */
+	lgwrite_u32(lg, gpte_ptr, gpte.raw.val);
+	return 1;
+}
+
+/* This is much faster than the full demand_page logic. */
+static int page_writable(struct lguest *lg, unsigned long vaddr)
+{
+	spgd_t *spgd;
+	unsigned long flags;
+
+	spgd = spgd_addr(lg, lg->pgdidx, vaddr);
+	if (!(spgd->flags & _PAGE_PRESENT))
+		return 0;
+
+	flags = spte_addr(lg, *spgd, vaddr)->flags;
+	return (flags & (_PAGE_PRESENT|_PAGE_RW)) == (_PAGE_PRESENT|_PAGE_RW);
+}
+
+void pin_page(struct lguest *lg, unsigned long vaddr)
+{
+	if (!page_writable(lg, vaddr) && !demand_page(lg, vaddr, 2))
+		kill_guest(lg, "bad stack page %#lx", vaddr);
+}
+
+static void release_pgd(struct lguest *lg, spgd_t *spgd)
+{
+	if (spgd->flags & _PAGE_PRESENT) {
+		unsigned int i;
+		spte_t *ptepage = __va(spgd->pfn << PAGE_SHIFT);
+		for (i = 0; i < PTES_PER_PAGE; i++)
+			release_pte(ptepage[i]);
+		free_page((long)ptepage);
+		spgd->raw.val = 0;
+	}
+}
+
+static void flush_user_mappings(struct lguest *lg, int idx)
+{
+	unsigned int i;
+	for (i = 0; i < vaddr_to_pgd_index(lg->page_offset); i++)
+		release_pgd(lg, lg->pgdirs[idx].pgdir + i);
+}
+
+void guest_pagetable_flush_user(struct lguest *lg)
+{
+	flush_user_mappings(lg, lg->pgdidx);
+}
+
+static unsigned int find_pgdir(struct lguest *lg, unsigned long pgtable)
+{
+	unsigned int i;
+	for (i = 0; i < ARRAY_SIZE(lg->pgdirs); i++)
+		if (lg->pgdirs[i].cr3 == pgtable)
+			break;
+	return i;
+}
+
+static unsigned int new_pgdir(struct lguest *lg,
+			      unsigned long cr3,
+			      int *blank_pgdir)
+{
+	unsigned int next;
+
+	next = random32() % ARRAY_SIZE(lg->pgdirs);
+	if (!lg->pgdirs[next].pgdir) {
+		lg->pgdirs[next].pgdir = (spgd_t *)get_zeroed_page(GFP_KERNEL);
+		if (!lg->pgdirs[next].pgdir)
+			next = lg->pgdidx;
+		else
+			/* There are no mappings: you'll need to re-pin */
+			*blank_pgdir = 1;
+	}
+	lg->pgdirs[next].cr3 = cr3;
+	/* Release all the non-kernel mappings. */
+	flush_user_mappings(lg, next);
+
+	return next;
+}
+
+void guest_new_pagetable(struct lguest *lg, unsigned long pgtable)
+{
+	int newpgdir, repin = 0;
+
+	newpgdir = find_pgdir(lg, pgtable);
+	if (newpgdir == ARRAY_SIZE(lg->pgdirs))
+		newpgdir = new_pgdir(lg, pgtable, &repin);
+	lg->pgdidx = newpgdir;
+	if (repin)
+		pin_stack_pages(lg);
+}
+
+static void release_all_pagetables(struct lguest *lg)
+{
+	unsigned int i, j;
+
+	for (i = 0; i < ARRAY_SIZE(lg->pgdirs); i++)
+		if (lg->pgdirs[i].pgdir)
+			for (j = 0; j < SWITCHER_PGD_INDEX; j++)
+				release_pgd(lg, lg->pgdirs[i].pgdir + j);
+}
+
+void guest_pagetable_clear_all(struct lguest *lg)
+{
+	release_all_pagetables(lg);
+	pin_stack_pages(lg);
+}
+
+static void do_set_pte(struct lguest *lg, int idx,
+		       unsigned long vaddr, gpte_t gpte)
+{
+	spgd_t *spgd = spgd_addr(lg, idx, vaddr);
+	if (spgd->flags & _PAGE_PRESENT) {
+		spte_t *spte = spte_addr(lg, *spgd, vaddr);
+		release_pte(*spte);
+		if (gpte.flags & (_PAGE_DIRTY | _PAGE_ACCESSED)) {
+			check_gpte(lg, gpte);
+			*spte = gpte_to_spte(lg, gpte, gpte.flags&_PAGE_DIRTY);
+		} else
+			spte->raw.val = 0;
+	}
+}
+
+void guest_set_pte(struct lguest *lg,
+		   unsigned long cr3, unsigned long vaddr, gpte_t gpte)
+{
+	/* Kernel mappings must be changed on all top levels. */
+	if (vaddr >= lg->page_offset) {
+		unsigned int i;
+		for (i = 0; i < ARRAY_SIZE(lg->pgdirs); i++)
+			if (lg->pgdirs[i].pgdir)
+				do_set_pte(lg, i, vaddr, gpte);
+	} else {
+		int pgdir = find_pgdir(lg, cr3);
+		if (pgdir != ARRAY_SIZE(lg->pgdirs))
+			do_set_pte(lg, pgdir, vaddr, gpte);
+	}
+}
+
+void guest_set_pmd(struct lguest *lg, unsigned long cr3, u32 idx)
+{
+	int pgdir;
+
+	if (idx >= SWITCHER_PGD_INDEX)
+		return;
+
+	pgdir = find_pgdir(lg, cr3);
+	if (pgdir < ARRAY_SIZE(lg->pgdirs))
+		release_pgd(lg, lg->pgdirs[pgdir].pgdir + idx);
+}
+
+int init_guest_pagetable(struct lguest *lg, unsigned long pgtable)
+{
+	/* We assume this in flush_user_mappings, so check now */
+	if (vaddr_to_pgd_index(lg->page_offset) >= SWITCHER_PGD_INDEX)
+		return -EINVAL;
+	lg->pgdidx = 0;
+	lg->pgdirs[lg->pgdidx].cr3 = pgtable;
+	lg->pgdirs[lg->pgdidx].pgdir = (spgd_t*)get_zeroed_page(GFP_KERNEL);
+	if (!lg->pgdirs[lg->pgdidx].pgdir)
+		return -ENOMEM;
+	return 0;
+}
+
+void free_guest_pagetable(struct lguest *lg)
+{
+	unsigned int i;
+
+	release_all_pagetables(lg);
+	for (i = 0; i < ARRAY_SIZE(lg->pgdirs); i++)
+		free_page((long)lg->pgdirs[i].pgdir);
+}
+
+/* Caller must be preempt-safe */
+void map_switcher_in_guest(struct lguest *lg, struct lguest_pages *pages)
+{
+	spte_t *switcher_pte_page = __get_cpu_var(switcher_pte_pages);
+	spgd_t switcher_pgd;
+	spte_t regs_pte;
+
+	/* Since switcher less that 4MB, we simply mug top pte page. */
+	switcher_pgd.pfn = __pa(switcher_pte_page) >> PAGE_SHIFT;
+	switcher_pgd.flags = _PAGE_KERNEL;
+	lg->pgdirs[lg->pgdidx].pgdir[SWITCHER_PGD_INDEX] = switcher_pgd;
+
+	/* Map our regs page over stack page. */
+	regs_pte.pfn = __pa(lg->regs_page) >> PAGE_SHIFT;
+	regs_pte.flags = _PAGE_KERNEL;
+	switcher_pte_page[(unsigned long)pages/PAGE_SIZE%PTES_PER_PAGE]
+		= regs_pte;
+}
+
+static void free_switcher_pte_pages(void)
+{
+	unsigned int i;
+
+	for_each_possible_cpu(i)
+		free_page((long)switcher_pte_page(i));
+}
+
+static __init void populate_switcher_pte_page(unsigned int cpu,
+					      struct page *switcher_page[],
+					      unsigned int pages)
+{
+	unsigned int i;
+	spte_t *pte = switcher_pte_page(cpu);
+
+	for (i = 0; i < pages; i++) {
+		pte[i].pfn = page_to_pfn(switcher_page[i]);
+		pte[i].flags = _PAGE_PRESENT|_PAGE_ACCESSED;
+	}
+
+	/* We only map this CPU's pages, so guest can't see others. */
+	i = pages + cpu*2;
+
+	/* First page (regs) is rw, second (state) is ro. */
+	pte[i].pfn = page_to_pfn(switcher_page[i]);
+	pte[i].flags = _PAGE_PRESENT|_PAGE_ACCESSED|_PAGE_RW;
+	pte[i+1].pfn = page_to_pfn(switcher_page[i+1]);
+	pte[i+1].flags = _PAGE_PRESENT|_PAGE_ACCESSED;
+}
+
+__init int init_pagetables(struct page **switcher_page, unsigned int pages)
+{
+	unsigned int i;
+
+	for_each_possible_cpu(i) {
+		switcher_pte_page(i) = (spte_t *)get_zeroed_page(GFP_KERNEL);
+		if (!switcher_pte_page(i)) {
+			free_switcher_pte_pages();
+			return -ENOMEM;
+		}
+		populate_switcher_pte_page(i, switcher_page, pages);
+	}
+	return 0;
+}
+
+void free_pagetables(void)
+{
+	free_switcher_pte_pages();
+}
diff --git a/drivers/lguest/segments.c b/drivers/lguest/segments.c
new file mode 100644
index 0000000..1b2cfe8
--- /dev/null
+++ b/drivers/lguest/segments.c
@@ -0,0 +1,125 @@
+#include "lg.h"
+
+static int desc_ok(const struct desc_struct *gdt)
+{
+	/* MBZ=0, P=1, DT=1  */
+	return ((gdt->b & 0x00209000) == 0x00009000);
+}
+
+static int segment_present(const struct desc_struct *gdt)
+{
+	return gdt->b & 0x8000;
+}
+
+static int ignored_gdt(unsigned int num)
+{
+	return (num == GDT_ENTRY_TSS
+		|| num == GDT_ENTRY_LGUEST_CS
+		|| num == GDT_ENTRY_LGUEST_DS
+		|| num == GDT_ENTRY_DOUBLEFAULT_TSS);
+}
+
+/* We don't allow removal of CS, DS or SS; it doesn't make sense. */
+static void check_segment_use(struct lguest *lg, unsigned int desc)
+{
+	if (lg->regs->gs / 8 == desc)
+		lg->regs->gs = 0;
+	if (lg->regs->fs / 8 == desc)
+		lg->regs->fs = 0;
+	if (lg->regs->es / 8 == desc)
+		lg->regs->es = 0;
+	if (lg->regs->ds / 8 == desc
+	    || lg->regs->cs / 8 == desc
+	    || lg->regs->ss / 8 == desc)
+		kill_guest(lg, "Removed live GDT entry %u", desc);
+}
+
+static void fixup_gdt_table(struct lguest *lg, unsigned start, unsigned end)
+{
+	unsigned int i;
+
+	for (i = start; i < end; i++) {
+		/* We never copy these ones to real gdt */
+		if (ignored_gdt(i))
+			continue;
+
+		/* We could fault in switch_to_guest if they are using
+		 * a removed segment. */
+		if (!segment_present(&lg->gdt[i])) {
+			check_segment_use(lg, i);
+			continue;
+		}
+
+		if (!desc_ok(&lg->gdt[i]))
+			kill_guest(lg, "Bad GDT descriptor %i", i);
+
+		/* DPL 0 presumably means "for use by guest". */
+		if ((lg->gdt[i].b & 0x00006000) == 0)
+			lg->gdt[i].b |= (GUEST_PL << 13);
+
+		/* Set accessed bit, since gdt isn't writable. */
+		lg->gdt[i].b |= 0x00000100;
+	}
+}
+
+void setup_default_gdt_entries(struct lguest_ro_state *state)
+{
+	struct desc_struct *gdt = state->guest_gdt;
+	unsigned long tss = (unsigned long)&state->guest_tss;
+
+	/* Hypervisor segments. */
+	gdt[GDT_ENTRY_LGUEST_CS] = FULL_EXEC_SEGMENT;
+	gdt[GDT_ENTRY_LGUEST_DS] = FULL_SEGMENT;
+
+	/* This is the one which we *cannot* copy from guest, since tss
+	   is depended on this lguest_ro_state, ie. this cpu. */
+	gdt[GDT_ENTRY_TSS].a = 0x00000067 | (tss << 16);
+	gdt[GDT_ENTRY_TSS].b = 0x00008900 | (tss & 0xFF000000)
+		| ((tss >> 16) & 0x000000FF);
+}
+
+void setup_guest_gdt(struct lguest *lg)
+{
+	lg->gdt[GDT_ENTRY_KERNEL_CS] = FULL_EXEC_SEGMENT;
+	lg->gdt[GDT_ENTRY_KERNEL_DS] = FULL_SEGMENT;
+	lg->gdt[GDT_ENTRY_KERNEL_CS].b |= (GUEST_PL << 13);
+	lg->gdt[GDT_ENTRY_KERNEL_DS].b |= (GUEST_PL << 13);
+}
+
+/* This is a fast version for the common case where only the three TLS entries
+ * have changed. */
+void copy_gdt_tls(const struct lguest *lg, struct desc_struct *gdt)
+{
+	unsigned int i;
+
+	for (i = GDT_ENTRY_TLS_MIN; i <= GDT_ENTRY_TLS_MAX; i++)
+		gdt[i] = lg->gdt[i];
+}
+
+void copy_gdt(const struct lguest *lg, struct desc_struct *gdt)
+{
+	unsigned int i;
+
+	for (i = 0; i < GDT_ENTRIES; i++)
+		if (!ignored_gdt(i))
+			gdt[i] = lg->gdt[i];
+}
+
+void load_guest_gdt(struct lguest *lg, unsigned long table, u32 num)
+{
+	if (num > ARRAY_SIZE(lg->gdt))
+		kill_guest(lg, "too many gdt entries %i", num);
+
+	lgread(lg, lg->gdt, table, num * sizeof(lg->gdt[0]));
+	fixup_gdt_table(lg, 0, ARRAY_SIZE(lg->gdt));
+	lg->changed |= CHANGED_GDT;
+}
+
+void guest_load_tls(struct lguest *lg, unsigned long gtls)
+{
+	struct desc_struct *tls = &lg->gdt[GDT_ENTRY_TLS_MIN];
+
+	lgread(lg, tls, gtls, sizeof(*tls)*GDT_ENTRY_TLS_ENTRIES);
+	fixup_gdt_table(lg, GDT_ENTRY_TLS_MIN, GDT_ENTRY_TLS_MAX+1);
+	lg->changed |= CHANGED_GDT_TLS;
+}
diff --git a/drivers/lguest/switcher.S b/drivers/lguest/switcher.S
new file mode 100644
index 0000000..eadd4cc
--- /dev/null
+++ b/drivers/lguest/switcher.S
@@ -0,0 +1,159 @@
+/* This code sits at 0xFFC00000 to do the low-level guest<->host switch.
+
+   There is are two pages above us for this CPU (struct lguest_pages).
+   The second page (struct lguest_ro_state) becomes read-only after the
+   context switch.  The first page (the stack for traps) remains writable,
+   but while we're in here, the guest cannot be running.
+*/
+#include <linux/linkage.h>
+#include <asm/asm-offsets.h>
+#include "lg.h"
+
+.text
+ENTRY(start_switcher_text)
+
+/* %eax points to lguest pages for this CPU.  %ebx contains cr3 value.
+   All normal registers can be clobbered! */
+ENTRY(switch_to_guest)
+	/* Save host segments on host stack. */
+	pushl	%es
+	pushl	%ds
+	pushl	%gs
+	pushl	%fs
+	/* With CONFIG_FRAME_POINTER, gcc doesn't let us clobber this! */
+	pushl	%ebp
+	/* Save host stack. */
+	movl	%esp, LGUEST_PAGES_host_sp(%eax)
+	/* Switch to guest stack: if we get NMI we expect to be there. */
+	movl	%eax, %edx
+	addl	$LGUEST_PAGES_regs, %edx
+	movl	%edx, %esp
+	/* Switch to guest's GDT, IDT. */
+	lgdt	LGUEST_PAGES_guest_gdt_desc(%eax)
+	lidt	LGUEST_PAGES_guest_idt_desc(%eax)
+	/* Switch to guest's TSS while GDT still writable. */
+	movl	$(GDT_ENTRY_TSS*8), %edx
+	ltr	%dx
+	/* Set host's TSS GDT entry to available (clear byte 5 bit 2). */
+	movl	(LGUEST_PAGES_host_gdt_desc+2)(%eax), %edx
+	andb	$0xFD, (GDT_ENTRY_TSS*8 + 5)(%edx)
+	/* Switch to guest page tables:	lguest_pages->state now read-only. */
+	movl	%ebx, %cr3
+	/* Restore guest regs */
+	popl	%ebx
+	popl	%ecx
+	popl	%edx
+	popl	%esi
+	popl	%edi
+	popl	%ebp
+	popl	%gs
+	popl	%eax
+	popl	%fs
+	popl	%ds
+	popl	%es
+	/* Skip error code and trap number */
+	addl	$8, %esp
+	iret
+
+#define SWITCH_TO_HOST							\
+	/* Save guest state */						\
+	pushl	%es;							\
+	pushl	%ds;							\
+	pushl	%fs;							\
+	pushl	%eax;							\
+	pushl	%gs;							\
+	pushl	%ebp;							\
+	pushl	%edi;							\
+	pushl	%esi;							\
+	pushl	%edx;							\
+	pushl	%ecx;							\
+	pushl	%ebx;							\
+	/* Load lguest ds segment for convenience. */			\
+	movl	$(LGUEST_DS), %eax;					\
+	movl	%eax, %ds;						\
+	/* Figure out where we are, based on stack (at top of regs). */	\
+	movl	%esp, %eax;						\
+	subl	$LGUEST_PAGES_regs, %eax;				\
+	/* Put trap number in %ebx before we switch cr3 and lose it. */ \
+	movl	LGUEST_PAGES_regs_trapnum(%eax), %ebx;			\
+	/* Switch to host page tables (host GDT, IDT and stack are in host   \
+	   mem, so need this first) */					\
+	movl	LGUEST_PAGES_host_cr3(%eax), %edx;			\
+	movl	%edx, %cr3;						\
+	/* Set guest's TSS to available (clear byte 5 bit 2). */	\
+	andb	$0xFD, (LGUEST_PAGES_guest_gdt+GDT_ENTRY_TSS*8+5)(%eax); \
+	/* Switch to host's GDT & IDT. */				\
+	lgdt	LGUEST_PAGES_host_gdt_desc(%eax);			\
+	lidt	LGUEST_PAGES_host_idt_desc(%eax);			\
+	/* Switch to host's stack. */					\
+	movl	LGUEST_PAGES_host_sp(%eax), %esp;			\
+	/* Switch to host's TSS */					\
+	movl	$(GDT_ENTRY_TSS*8), %edx;				\
+	ltr	%dx;							\
+	popl	%ebp;							\
+	popl	%fs;							\
+	popl	%gs;							\
+	popl	%ds;							\
+	popl	%es
+
+/* Return to run_guest_once. */
+return_to_host:
+	SWITCH_TO_HOST
+	iret
+
+deliver_to_host:
+	SWITCH_TO_HOST
+	/* Decode IDT and jump to hosts' irq handler.  When that does iret, it
+	 * will return to run_guest_once.  This is a feature. */
+	movl	(LGUEST_PAGES_host_idt_desc+2)(%eax), %edx
+	leal	(%edx,%ebx,8), %eax
+	movzwl	(%eax),%edx
+	movl	4(%eax), %eax
+	xorw	%ax, %ax
+	orl	%eax, %edx
+	jmp	*%edx
+
+/* Real hardware interrupts are delivered straight to the host.  Others
+   cause us to return to run_guest_once so it can decide what to do.  Note
+   that some of these are overridden by the guest to deliver directly, and
+   never enter here (see load_guest_idt_entry). */
+.macro IRQ_STUB N TARGET
+	.data; .long 1f; .text; 1:
+ /* Make an error number for most traps, which don't have one. */
+ .if (\N <> 8) && (\N < 10 || \N > 14) && (\N <> 17)
+	pushl	$0
+ .endif
+	pushl	$\N
+	jmp	\TARGET
+	ALIGN
+.endm
+
+.macro IRQ_STUBS FIRST LAST TARGET
+ irq=\FIRST
+ .rept \LAST-\FIRST+1
+	IRQ_STUB irq \TARGET
+  irq=irq+1
+ .endr
+.endm
+
+/* We intercept every interrupt, because we may need to switch back to
+ * host.  Unfortunately we can't tell them apart except by entry
+ * point, so we need 256 entry points.
+ */
+.data
+.global default_idt_entries
+default_idt_entries:
+.text
+	IRQ_STUBS 0 1 return_to_host		/* First two traps */
+	IRQ_STUB 2 handle_nmi			/* NMI */
+	IRQ_STUBS 3 31 return_to_host		/* Rest of traps */
+	IRQ_STUBS 32 127 deliver_to_host	/* Real interrupts */
+	IRQ_STUB 128 return_to_host		/* System call (overridden) */
+	IRQ_STUBS 129 255 deliver_to_host	/* Other real interrupts */
+
+/* We ignore NMI and return. */
+handle_nmi:
+	addl	$8, %esp
+	iret
+
+ENTRY(end_switcher_text)
diff --git a/drivers/macintosh/macio_asic.c b/drivers/macintosh/macio_asic.c
index c96b7fe..ec9e5f3 100644
--- a/drivers/macintosh/macio_asic.c
+++ b/drivers/macintosh/macio_asic.c
@@ -365,10 +365,9 @@
 	if (np == NULL)
 		return NULL;
 
-	dev = kmalloc(sizeof(*dev), GFP_KERNEL);
+	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
 	if (!dev)
 		return NULL;
-	memset(dev, 0, sizeof(*dev));
 
 	dev->bus = &chip->lbus;
 	dev->media_bay = in_bay;
diff --git a/drivers/macintosh/smu.c b/drivers/macintosh/smu.c
index f8e1a13..d409f67 100644
--- a/drivers/macintosh/smu.c
+++ b/drivers/macintosh/smu.c
@@ -1053,10 +1053,9 @@
 	struct smu_private *pp;
 	unsigned long flags;
 
-	pp = kmalloc(sizeof(struct smu_private), GFP_KERNEL);
+	pp = kzalloc(sizeof(struct smu_private), GFP_KERNEL);
 	if (pp == 0)
 		return -ENOMEM;
-	memset(pp, 0, sizeof(struct smu_private));
 	spin_lock_init(&pp->lock);
 	pp->mode = smu_file_commands;
 	init_waitqueue_head(&pp->wait);
diff --git a/drivers/macintosh/therm_adt746x.c b/drivers/macintosh/therm_adt746x.c
index bd55e6a..f25685b 100644
--- a/drivers/macintosh/therm_adt746x.c
+++ b/drivers/macintosh/therm_adt746x.c
@@ -335,6 +335,7 @@
 {
 	struct thermostat* th = arg;
 
+	set_freezable();
 	while(!kthread_should_stop()) {
 		try_to_freeze();
 		msleep_interruptible(2000);
diff --git a/drivers/macintosh/therm_pm72.c b/drivers/macintosh/therm_pm72.c
index dbb22403..e43554e 100644
--- a/drivers/macintosh/therm_pm72.c
+++ b/drivers/macintosh/therm_pm72.c
@@ -318,10 +318,9 @@
 	if (adap == NULL)
 		return NULL;
 
-	clt = kmalloc(sizeof(struct i2c_client), GFP_KERNEL);
+	clt = kzalloc(sizeof(struct i2c_client), GFP_KERNEL);
 	if (clt == NULL)
 		return NULL;
-	memset(clt, 0, sizeof(struct i2c_client));
 
 	clt->addr = (id >> 1) & 0x7f;
 	clt->adapter = adap;
@@ -1770,7 +1769,8 @@
 				"PATH=/sbin:/usr/sbin:/bin:/usr/bin",
 				NULL };
 
-	return call_usermodehelper(critical_overtemp_path, argv, envp, 0);
+	return call_usermodehelper(critical_overtemp_path,
+				   argv, envp, UMH_WAIT_EXEC);
 }
 
 
diff --git a/drivers/macintosh/therm_windtunnel.c b/drivers/macintosh/therm_windtunnel.c
index 3d0354e..5452da1 100644
--- a/drivers/macintosh/therm_windtunnel.c
+++ b/drivers/macintosh/therm_windtunnel.c
@@ -431,9 +431,8 @@
 				     | I2C_FUNC_SMBUS_WRITE_BYTE) )
 		return 0;
 
-	if( !(cl=kmalloc(sizeof(*cl), GFP_KERNEL)) )
+	if( !(cl=kzalloc(sizeof(*cl), GFP_KERNEL)) )
 		return -ENOMEM;
-	memset( cl, 0, sizeof(struct i2c_client) );
 
 	cl->addr = addr;
 	cl->adapter = adapter;
diff --git a/drivers/macintosh/windfarm_core.c b/drivers/macintosh/windfarm_core.c
index 4fcb245..516d943 100644
--- a/drivers/macintosh/windfarm_core.c
+++ b/drivers/macintosh/windfarm_core.c
@@ -80,7 +80,8 @@
 				"PATH=/sbin:/usr/sbin:/bin:/usr/bin",
 				NULL };
 
-	return call_usermodehelper(critical_overtemp_path, argv, envp, 0);
+	return call_usermodehelper(critical_overtemp_path,
+				   argv, envp, UMH_WAIT_EXEC);
 }
 EXPORT_SYMBOL_GPL(wf_critical_overtemp);
 
@@ -92,6 +93,7 @@
 
 	DBG("wf: thread started\n");
 
+	set_freezable();
 	while(!kthread_should_stop()) {
 		if (time_after_eq(jiffies, next)) {
 			wf_notify(WF_EVENT_TICK, NULL);
diff --git a/drivers/macintosh/windfarm_lm75_sensor.c b/drivers/macintosh/windfarm_lm75_sensor.c
index a0fabf3..7e10c3a 100644
--- a/drivers/macintosh/windfarm_lm75_sensor.c
+++ b/drivers/macintosh/windfarm_lm75_sensor.c
@@ -117,10 +117,9 @@
 	DBG("wf_lm75: creating  %s device at address 0x%02x\n",
 	    ds1775 ? "ds1775" : "lm75", addr);
 
-	lm = kmalloc(sizeof(struct wf_lm75_sensor), GFP_KERNEL);
+	lm = kzalloc(sizeof(struct wf_lm75_sensor), GFP_KERNEL);
 	if (lm == NULL)
 		return NULL;
-	memset(lm, 0, sizeof(struct wf_lm75_sensor));
 
 	/* Usual rant about sensor names not beeing very consistent in
 	 * the device-tree, oh well ...
diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig
index 64bf3a8..531d4d1 100644
--- a/drivers/md/Kconfig
+++ b/drivers/md/Kconfig
@@ -2,19 +2,17 @@
 # Block device driver configuration
 #
 
-if BLOCK
-
-menu "Multi-device support (RAID and LVM)"
-
-config MD
+menuconfig MD
 	bool "Multiple devices driver support (RAID and LVM)"
+	depends on BLOCK
 	help
 	  Support multiple physical spindles through a single logical device.
 	  Required for RAID and logical volume management.
 
+if MD
+
 config BLK_DEV_MD
 	tristate "RAID support"
-	depends on MD
 	---help---
 	  This driver lets you combine several hard disk partitions into one
 	  logical block device. This can be used to simply append one
@@ -191,7 +189,6 @@
 
 config BLK_DEV_DM
 	tristate "Device mapper support"
-	depends on MD
 	---help---
 	  Device-mapper is a low level volume manager.  It works by allowing
 	  people to specify mappings for ranges of logical sectors.  Various
@@ -279,6 +276,4 @@
 
 	If unsure, say N.
 
-endmenu
-
-endif
+endif # MD
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
index 9620d45..927cb34 100644
--- a/drivers/md/bitmap.c
+++ b/drivers/md/bitmap.c
@@ -268,6 +268,31 @@
 			if (page->index == bitmap->file_pages-1)
 				size = roundup(bitmap->last_page_size,
 					       bdev_hardsect_size(rdev->bdev));
+			/* Just make sure we aren't corrupting data or
+			 * metadata
+			 */
+			if (bitmap->offset < 0) {
+				/* DATA  BITMAP METADATA  */
+				if (bitmap->offset
+				    + page->index * (PAGE_SIZE/512)
+				    + size/512 > 0)
+					/* bitmap runs in to metadata */
+					return -EINVAL;
+				if (rdev->data_offset + mddev->size*2
+				    > rdev->sb_offset*2 + bitmap->offset)
+					/* data runs in to bitmap */
+					return -EINVAL;
+			} else if (rdev->sb_offset*2 < rdev->data_offset) {
+				/* METADATA BITMAP DATA */
+				if (rdev->sb_offset*2
+				    + bitmap->offset
+				    + page->index*(PAGE_SIZE/512) + size/512
+				    > rdev->data_offset)
+					/* bitmap runs in to data */
+					return -EINVAL;
+			} else {
+				/* DATA METADATA BITMAP - no problems */
+			}
 			md_super_write(mddev, rdev,
 				       (rdev->sb_offset<<1) + bitmap->offset
 				       + page->index * (PAGE_SIZE/512),
@@ -280,32 +305,38 @@
 	return 0;
 }
 
+static void bitmap_file_kick(struct bitmap *bitmap);
 /*
  * write out a page to a file
  */
-static int write_page(struct bitmap *bitmap, struct page *page, int wait)
+static void write_page(struct bitmap *bitmap, struct page *page, int wait)
 {
 	struct buffer_head *bh;
 
-	if (bitmap->file == NULL)
-		return write_sb_page(bitmap, page, wait);
+	if (bitmap->file == NULL) {
+		switch (write_sb_page(bitmap, page, wait)) {
+		case -EINVAL:
+			bitmap->flags |= BITMAP_WRITE_ERROR;
+		}
+	} else {
 
-	bh = page_buffers(page);
+		bh = page_buffers(page);
 
-	while (bh && bh->b_blocknr) {
-		atomic_inc(&bitmap->pending_writes);
-		set_buffer_locked(bh);
-		set_buffer_mapped(bh);
-		submit_bh(WRITE, bh);
-		bh = bh->b_this_page;
+		while (bh && bh->b_blocknr) {
+			atomic_inc(&bitmap->pending_writes);
+			set_buffer_locked(bh);
+			set_buffer_mapped(bh);
+			submit_bh(WRITE, bh);
+			bh = bh->b_this_page;
+		}
+
+		if (wait) {
+			wait_event(bitmap->write_wait,
+				   atomic_read(&bitmap->pending_writes)==0);
+		}
 	}
-
-	if (wait) {
-		wait_event(bitmap->write_wait,
-			   atomic_read(&bitmap->pending_writes)==0);
-		return (bitmap->flags & BITMAP_WRITE_ERROR) ? -EIO : 0;
-	}
-	return 0;
+	if (bitmap->flags & BITMAP_WRITE_ERROR)
+		bitmap_file_kick(bitmap);
 }
 
 static void end_bitmap_write(struct buffer_head *bh, int uptodate)
@@ -425,17 +456,17 @@
  */
 
 /* update the event counter and sync the superblock to disk */
-int bitmap_update_sb(struct bitmap *bitmap)
+void bitmap_update_sb(struct bitmap *bitmap)
 {
 	bitmap_super_t *sb;
 	unsigned long flags;
 
 	if (!bitmap || !bitmap->mddev) /* no bitmap for this array */
-		return 0;
+		return;
 	spin_lock_irqsave(&bitmap->lock, flags);
 	if (!bitmap->sb_page) { /* no superblock */
 		spin_unlock_irqrestore(&bitmap->lock, flags);
-		return 0;
+		return;
 	}
 	spin_unlock_irqrestore(&bitmap->lock, flags);
 	sb = (bitmap_super_t *)kmap_atomic(bitmap->sb_page, KM_USER0);
@@ -443,7 +474,7 @@
 	if (!bitmap->mddev->degraded)
 		sb->events_cleared = cpu_to_le64(bitmap->mddev->events);
 	kunmap_atomic(sb, KM_USER0);
-	return write_page(bitmap, bitmap->sb_page, 1);
+	write_page(bitmap, bitmap->sb_page, 1);
 }
 
 /* print out the bitmap file superblock */
@@ -572,20 +603,22 @@
 	MASK_UNSET
 };
 
-/* record the state of the bitmap in the superblock */
-static void bitmap_mask_state(struct bitmap *bitmap, enum bitmap_state bits,
-				enum bitmap_mask_op op)
+/* record the state of the bitmap in the superblock.  Return the old value */
+static int bitmap_mask_state(struct bitmap *bitmap, enum bitmap_state bits,
+			     enum bitmap_mask_op op)
 {
 	bitmap_super_t *sb;
 	unsigned long flags;
+	int old;
 
 	spin_lock_irqsave(&bitmap->lock, flags);
 	if (!bitmap->sb_page) { /* can't set the state */
 		spin_unlock_irqrestore(&bitmap->lock, flags);
-		return;
+		return 0;
 	}
 	spin_unlock_irqrestore(&bitmap->lock, flags);
 	sb = (bitmap_super_t *)kmap_atomic(bitmap->sb_page, KM_USER0);
+	old = le32_to_cpu(sb->state) & bits;
 	switch (op) {
 		case MASK_SET: sb->state |= cpu_to_le32(bits);
 				break;
@@ -594,6 +627,7 @@
 		default: BUG();
 	}
 	kunmap_atomic(sb, KM_USER0);
+	return old;
 }
 
 /*
@@ -687,18 +721,23 @@
 {
 	char *path, *ptr = NULL;
 
-	bitmap_mask_state(bitmap, BITMAP_STALE, MASK_SET);
-	bitmap_update_sb(bitmap);
+	if (bitmap_mask_state(bitmap, BITMAP_STALE, MASK_SET) == 0) {
+		bitmap_update_sb(bitmap);
 
-	if (bitmap->file) {
-		path = kmalloc(PAGE_SIZE, GFP_KERNEL);
-		if (path)
-			ptr = file_path(bitmap->file, path, PAGE_SIZE);
+		if (bitmap->file) {
+			path = kmalloc(PAGE_SIZE, GFP_KERNEL);
+			if (path)
+				ptr = file_path(bitmap->file, path, PAGE_SIZE);
 
-		printk(KERN_ALERT "%s: kicking failed bitmap file %s from array!\n",
-		       bmname(bitmap), ptr ? ptr : "");
+			printk(KERN_ALERT
+			      "%s: kicking failed bitmap file %s from array!\n",
+			      bmname(bitmap), ptr ? ptr : "");
 
-		kfree(path);
+			kfree(path);
+		} else
+			printk(KERN_ALERT
+			       "%s: disabling internal bitmap due to errors\n",
+			       bmname(bitmap));
 	}
 
 	bitmap_file_put(bitmap);
@@ -769,16 +808,15 @@
 /* this gets called when the md device is ready to unplug its underlying
  * (slave) device queues -- before we let any writes go down, we need to
  * sync the dirty pages of the bitmap file to disk */
-int bitmap_unplug(struct bitmap *bitmap)
+void bitmap_unplug(struct bitmap *bitmap)
 {
 	unsigned long i, flags;
 	int dirty, need_write;
 	struct page *page;
 	int wait = 0;
-	int err;
 
 	if (!bitmap)
-		return 0;
+		return;
 
 	/* look at each page to see if there are any set bits that need to be
 	 * flushed out to disk */
@@ -786,7 +824,7 @@
 		spin_lock_irqsave(&bitmap->lock, flags);
 		if (!bitmap->filemap) {
 			spin_unlock_irqrestore(&bitmap->lock, flags);
-			return 0;
+			return;
 		}
 		page = bitmap->filemap[i];
 		dirty = test_page_attr(bitmap, page, BITMAP_PAGE_DIRTY);
@@ -798,7 +836,7 @@
 		spin_unlock_irqrestore(&bitmap->lock, flags);
 
 		if (dirty | need_write)
-			err = write_page(bitmap, page, 0);
+			write_page(bitmap, page, 0);
 	}
 	if (wait) { /* if any writes were performed, we need to wait on them */
 		if (bitmap->file)
@@ -809,7 +847,6 @@
 	}
 	if (bitmap->flags & BITMAP_WRITE_ERROR)
 		bitmap_file_kick(bitmap);
-	return 0;
 }
 
 static void bitmap_set_memory_bits(struct bitmap *bitmap, sector_t offset, int needed);
@@ -858,21 +895,21 @@
 			bmname(bitmap),
 			(unsigned long) i_size_read(file->f_mapping->host),
 			bytes + sizeof(bitmap_super_t));
-		goto out;
+		goto err;
 	}
 
 	ret = -ENOMEM;
 
 	bitmap->filemap = kmalloc(sizeof(struct page *) * num_pages, GFP_KERNEL);
 	if (!bitmap->filemap)
-		goto out;
+		goto err;
 
 	/* We need 4 bits per page, rounded up to a multiple of sizeof(unsigned long) */
 	bitmap->filemap_attr = kzalloc(
 		roundup( DIV_ROUND_UP(num_pages*4, 8), sizeof(unsigned long)),
 		GFP_KERNEL);
 	if (!bitmap->filemap_attr)
-		goto out;
+		goto err;
 
 	oldindex = ~0L;
 
@@ -905,7 +942,7 @@
 			}
 			if (IS_ERR(page)) { /* read error */
 				ret = PTR_ERR(page);
-				goto out;
+				goto err;
 			}
 
 			oldindex = index;
@@ -920,11 +957,13 @@
 				memset(paddr + offset, 0xff,
 				       PAGE_SIZE - offset);
 				kunmap_atomic(paddr, KM_USER0);
-				ret = write_page(bitmap, page, 1);
-				if (ret) {
+				write_page(bitmap, page, 1);
+
+				ret = -EIO;
+				if (bitmap->flags & BITMAP_WRITE_ERROR) {
 					/* release, page not in filemap yet */
 					put_page(page);
-					goto out;
+					goto err;
 				}
 			}
 
@@ -956,11 +995,15 @@
 		md_wakeup_thread(bitmap->mddev->thread);
 	}
 
-out:
 	printk(KERN_INFO "%s: bitmap initialized from disk: "
-		"read %lu/%lu pages, set %lu bits, status: %d\n",
-		bmname(bitmap), bitmap->file_pages, num_pages, bit_cnt, ret);
+		"read %lu/%lu pages, set %lu bits\n",
+		bmname(bitmap), bitmap->file_pages, num_pages, bit_cnt);
 
+	return 0;
+
+ err:
+	printk(KERN_INFO "%s: bitmap initialisation failed: %d\n",
+	       bmname(bitmap), ret);
 	return ret;
 }
 
@@ -997,19 +1040,18 @@
  *			out to disk
  */
 
-int bitmap_daemon_work(struct bitmap *bitmap)
+void bitmap_daemon_work(struct bitmap *bitmap)
 {
 	unsigned long j;
 	unsigned long flags;
 	struct page *page = NULL, *lastpage = NULL;
-	int err = 0;
 	int blocks;
 	void *paddr;
 
 	if (bitmap == NULL)
-		return 0;
+		return;
 	if (time_before(jiffies, bitmap->daemon_lastrun + bitmap->daemon_sleep*HZ))
-		return 0;
+		return;
 	bitmap->daemon_lastrun = jiffies;
 
 	for (j = 0; j < bitmap->chunks; j++) {
@@ -1032,14 +1074,8 @@
 					clear_page_attr(bitmap, page, BITMAP_PAGE_NEEDWRITE);
 
 				spin_unlock_irqrestore(&bitmap->lock, flags);
-				if (need_write) {
-					switch (write_page(bitmap, page, 0)) {
-					case 0:
-						break;
-					default:
-						bitmap_file_kick(bitmap);
-					}
-				}
+				if (need_write)
+					write_page(bitmap, page, 0);
 				continue;
 			}
 
@@ -1048,13 +1084,11 @@
 				if (test_page_attr(bitmap, lastpage, BITMAP_PAGE_NEEDWRITE)) {
 					clear_page_attr(bitmap, lastpage, BITMAP_PAGE_NEEDWRITE);
 					spin_unlock_irqrestore(&bitmap->lock, flags);
-					err = write_page(bitmap, lastpage, 0);
+					write_page(bitmap, lastpage, 0);
 				} else {
 					set_page_attr(bitmap, lastpage, BITMAP_PAGE_NEEDWRITE);
 					spin_unlock_irqrestore(&bitmap->lock, flags);
 				}
-				if (err)
-					bitmap_file_kick(bitmap);
 			} else
 				spin_unlock_irqrestore(&bitmap->lock, flags);
 			lastpage = page;
@@ -1097,14 +1131,13 @@
 		if (test_page_attr(bitmap, lastpage, BITMAP_PAGE_NEEDWRITE)) {
 			clear_page_attr(bitmap, lastpage, BITMAP_PAGE_NEEDWRITE);
 			spin_unlock_irqrestore(&bitmap->lock, flags);
-			err = write_page(bitmap, lastpage, 0);
+			write_page(bitmap, lastpage, 0);
 		} else {
 			set_page_attr(bitmap, lastpage, BITMAP_PAGE_NEEDWRITE);
 			spin_unlock_irqrestore(&bitmap->lock, flags);
 		}
 	}
 
-	return err;
 }
 
 static bitmap_counter_t *bitmap_get_counter(struct bitmap *bitmap,
@@ -1517,7 +1550,9 @@
 
 	mddev->thread->timeout = bitmap->daemon_sleep * HZ;
 
-	return bitmap_update_sb(bitmap);
+	bitmap_update_sb(bitmap);
+
+	return (bitmap->flags & BITMAP_WRITE_ERROR) ? -EIO : 0;
 
  error:
 	bitmap_free(bitmap);
diff --git a/drivers/md/dm-exception-store.c b/drivers/md/dm-exception-store.c
index 3d65917..8fe81e1 100644
--- a/drivers/md/dm-exception-store.c
+++ b/drivers/md/dm-exception-store.c
@@ -623,6 +623,7 @@
 
 	ps->metadata_wq = create_singlethread_workqueue("ksnaphd");
 	if (!ps->metadata_wq) {
+		kfree(ps);
 		DMERR("couldn't start header metadata update thread");
 		return -ENOMEM;
 	}
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
index 1a876f9..144071e 100644
--- a/drivers/md/dm-raid1.c
+++ b/drivers/md/dm-raid1.c
@@ -951,13 +951,12 @@
 
 	len = sizeof(*ms) + (sizeof(ms->mirror[0]) * nr_mirrors);
 
-	ms = kmalloc(len, GFP_KERNEL);
+	ms = kzalloc(len, GFP_KERNEL);
 	if (!ms) {
 		ti->error = "Cannot allocate mirror context";
 		return NULL;
 	}
 
-	memset(ms, 0, len);
 	spin_lock_init(&ms->lock);
 
 	ms->ti = ti;
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index f4f7d35..846614e 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -161,9 +161,7 @@
 {
 	kmem_cache_destroy(_tio_cache);
 	kmem_cache_destroy(_io_cache);
-
-	if (unregister_blkdev(_major, _name) < 0)
-		DMERR("unregister_blkdev failed");
+	unregister_blkdev(_major, _name);
 
 	_major = 0;
 
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 33beaa7..65ddc88 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -1640,7 +1640,6 @@
 
 static void md_update_sb(mddev_t * mddev, int force_change)
 {
-	int err;
 	struct list_head *tmp;
 	mdk_rdev_t *rdev;
 	int sync_req;
@@ -1727,7 +1726,7 @@
 		"md: updating %s RAID superblock on device (in sync %d)\n",
 		mdname(mddev),mddev->in_sync);
 
-	err = bitmap_update_sb(mddev->bitmap);
+	bitmap_update_sb(mddev->bitmap);
 	ITERATE_RDEV(mddev,rdev,tmp) {
 		char b[BDEVNAME_SIZE];
 		dprintk(KERN_INFO "md: ");
@@ -2073,9 +2072,11 @@
 		err = super_types[super_format].
 			load_super(rdev, NULL, super_minor);
 		if (err == -EINVAL) {
-			printk(KERN_WARNING 
-				"md: %s has invalid sb, not importing!\n",
-				bdevname(rdev->bdev,b));
+			printk(KERN_WARNING
+				"md: %s does not have a valid v%d.%d "
+			       "superblock, not importing!\n",
+				bdevname(rdev->bdev,b),
+			       super_format, super_minor);
 			goto abort_free;
 		}
 		if (err < 0) {
@@ -3174,13 +3175,33 @@
 	 * Drop all container device buffers, from now on
 	 * the only valid external interface is through the md
 	 * device.
-	 * Also find largest hardsector size
 	 */
 	ITERATE_RDEV(mddev,rdev,tmp) {
 		if (test_bit(Faulty, &rdev->flags))
 			continue;
 		sync_blockdev(rdev->bdev);
 		invalidate_bdev(rdev->bdev);
+
+		/* perform some consistency tests on the device.
+		 * We don't want the data to overlap the metadata,
+		 * Internal Bitmap issues has handled elsewhere.
+		 */
+		if (rdev->data_offset < rdev->sb_offset) {
+			if (mddev->size &&
+			    rdev->data_offset + mddev->size*2
+			    > rdev->sb_offset*2) {
+				printk("md: %s: data overlaps metadata\n",
+				       mdname(mddev));
+				return -EINVAL;
+			}
+		} else {
+			if (rdev->sb_offset*2 + rdev->sb_size/512
+			    > rdev->data_offset) {
+				printk("md: %s: metadata overlaps data\n",
+				       mdname(mddev));
+				return -EINVAL;
+			}
+		}
 	}
 
 	md_probe(mddev->unit, NULL, NULL);
@@ -4642,7 +4663,6 @@
 	 * many dirty RAID5 blocks.
 	 */
 
-	current->flags |= PF_NOFREEZE;
 	allow_signal(SIGKILL);
 	while (!kthread_should_stop()) {
 
@@ -5090,7 +5110,7 @@
 	mdk_rdev_t * rdev;
 	struct list_head *tmp;
 	int idle;
-	unsigned long curr_events;
+	long curr_events;
 
 	idle = 1;
 	ITERATE_RDEV(mddev,rdev,tmp) {
@@ -5098,20 +5118,29 @@
 		curr_events = disk_stat_read(disk, sectors[0]) + 
 				disk_stat_read(disk, sectors[1]) - 
 				atomic_read(&disk->sync_io);
-		/* The difference between curr_events and last_events
-		 * will be affected by any new non-sync IO (making
-		 * curr_events bigger) and any difference in the amount of
-		 * in-flight syncio (making current_events bigger or smaller)
-		 * The amount in-flight is currently limited to
-		 * 32*64K in raid1/10 and 256*PAGE_SIZE in raid5/6
-		 * which is at most 4096 sectors.
-		 * These numbers are fairly fragile and should be made
-		 * more robust, probably by enforcing the
-		 * 'window size' that md_do_sync sort-of uses.
+		/* sync IO will cause sync_io to increase before the disk_stats
+		 * as sync_io is counted when a request starts, and
+		 * disk_stats is counted when it completes.
+		 * So resync activity will cause curr_events to be smaller than
+		 * when there was no such activity.
+		 * non-sync IO will cause disk_stat to increase without
+		 * increasing sync_io so curr_events will (eventually)
+		 * be larger than it was before.  Once it becomes
+		 * substantially larger, the test below will cause
+		 * the array to appear non-idle, and resync will slow
+		 * down.
+		 * If there is a lot of outstanding resync activity when
+		 * we set last_event to curr_events, then all that activity
+		 * completing might cause the array to appear non-idle
+		 * and resync will be slowed down even though there might
+		 * not have been non-resync activity.  This will only
+		 * happen once though.  'last_events' will soon reflect
+		 * the state where there is little or no outstanding
+		 * resync requests, and further resync activity will
+		 * always make curr_events less than last_events.
 		 *
-		 * Note: the following is an unsigned comparison.
 		 */
-		if ((long)curr_events - (long)rdev->last_events > 4096) {
+		if (curr_events - rdev->last_events > 4096) {
 			rdev->last_events = curr_events;
 			idle = 0;
 		}
@@ -5772,7 +5801,7 @@
 	for (i = 0; i < dev_cnt; i++) {
 		dev_t dev = detected_devices[i];
 
-		rdev = md_import_device(dev,0, 0);
+		rdev = md_import_device(dev,0, 90);
 		if (IS_ERR(rdev))
 			continue;
 
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 46677d7..00c78b7 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -1526,8 +1526,7 @@
 			blk_remove_plug(mddev->queue);
 			spin_unlock_irqrestore(&conf->device_lock, flags);
 			/* flush any pending bitmap writes to disk before proceeding w/ I/O */
-			if (bitmap_unplug(mddev->bitmap) != 0)
-				printk("%s: bitmap file write failed!\n", mdname(mddev));
+			bitmap_unplug(mddev->bitmap);
 
 			while (bio) { /* submit pending writes */
 				struct bio *next = bio->bi_next;
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 9eb66c1..a95ada1 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -1510,8 +1510,7 @@
 			blk_remove_plug(mddev->queue);
 			spin_unlock_irqrestore(&conf->device_lock, flags);
 			/* flush any pending bitmap writes to disk before proceeding w/ I/O */
-			if (bitmap_unplug(mddev->bitmap) != 0)
-				printk("%s: bitmap file write failed!\n", mdname(mddev));
+			bitmap_unplug(mddev->bitmap);
 
 			while (bio) { /* submit pending writes */
 				struct bio *next = bio->bi_next;
diff --git a/drivers/media/Kconfig b/drivers/media/Kconfig
index 624b21c..d9d033e 100644
--- a/drivers/media/Kconfig
+++ b/drivers/media/Kconfig
@@ -80,8 +80,12 @@
 config VIDEO_BTCX
 	tristate
 
+config VIDEO_IR_I2C
+	tristate
+
 config VIDEO_IR
 	tristate
+	select VIDEO_IR_I2C if I2C
 
 config VIDEO_TVEEPROM
 	tristate
diff --git a/drivers/media/common/ir-functions.c b/drivers/media/common/ir-functions.c
index fcb1941..fe447a0 100644
--- a/drivers/media/common/ir-functions.c
+++ b/drivers/media/common/ir-functions.c
@@ -107,21 +107,20 @@
 }
 
 /* -------------------------------------------------------------------------- */
-
+/* extract mask bits out of data and pack them into the result */
 u32 ir_extract_bits(u32 data, u32 mask)
 {
-	int mbit, vbit;
-	u32 value;
+	u32 vbit = 1, value = 0;
 
-	value = 0;
-	vbit  = 0;
-	for (mbit = 0; mbit < 32; mbit++) {
-		if (!(mask & ((u32)1 << mbit)))
-			continue;
-		if (data & ((u32)1 << mbit))
-			value |= (1 << vbit);
-		vbit++;
-	}
+	do {
+	    if (mask&1) {
+		if (data&1)
+			value |= vbit;
+		vbit<<=1;
+	    }
+	    data>>=1;
+	} while (mask>>=1);
+
 	return value;
 }
 
diff --git a/drivers/media/common/saa7146_core.c b/drivers/media/common/saa7146_core.c
index ef3e54c..ba6701e 100644
--- a/drivers/media/common/saa7146_core.c
+++ b/drivers/media/common/saa7146_core.c
@@ -27,7 +27,7 @@
 
 unsigned int saa7146_debug;
 
-module_param(saa7146_debug, int, 0644);
+module_param(saa7146_debug, uint, 0644);
 MODULE_PARM_DESC(saa7146_debug, "debug level (default: 0)");
 
 #if 0
@@ -130,10 +130,10 @@
 /********************************************************************************/
 /* common page table functions */
 
-char *saa7146_vmalloc_build_pgtable(struct pci_dev *pci, long length, struct saa7146_pgtable *pt)
+void *saa7146_vmalloc_build_pgtable(struct pci_dev *pci, long length, struct saa7146_pgtable *pt)
 {
 	int pages = (length+PAGE_SIZE-1)/PAGE_SIZE;
-	char *mem = vmalloc_32(length);
+	void *mem = vmalloc_32(length);
 	int slen = 0;
 
 	if (NULL == mem)
@@ -168,7 +168,7 @@
 	return NULL;
 }
 
-void saa7146_vfree_destroy_pgtable(struct pci_dev *pci, char *mem, struct saa7146_pgtable *pt)
+void saa7146_vfree_destroy_pgtable(struct pci_dev *pci, void *mem, struct saa7146_pgtable *pt)
 {
 	pci_unmap_sg(pci, pt->slist, pt->nents, PCI_DMA_FROMDEVICE);
 	saa7146_pgtable_free(pci, pt);
diff --git a/drivers/media/common/saa7146_video.c b/drivers/media/common/saa7146_video.c
index e3d04a4..664280c 100644
--- a/drivers/media/common/saa7146_video.c
+++ b/drivers/media/common/saa7146_video.c
@@ -889,9 +889,9 @@
 
 		DEB_EE(("VIDIOC_QUERYCAP\n"));
 
-		strcpy(cap->driver, "saa7146 v4l2");
-		strlcpy(cap->card, dev->ext->name, sizeof(cap->card));
-		sprintf(cap->bus_info,"PCI:%s", pci_name(dev->pci));
+		strcpy((char *)cap->driver, "saa7146 v4l2");
+		strlcpy((char *)cap->card, dev->ext->name, sizeof(cap->card));
+		sprintf((char *)cap->bus_info,"PCI:%s", pci_name(dev->pci));
 		cap->version = SAA7146_VERSION_CODE;
 		cap->capabilities =
 			V4L2_CAP_VIDEO_CAPTURE |
@@ -968,7 +968,7 @@
 			}
 			memset(f,0,sizeof(*f));
 			f->index = index;
-			strlcpy(f->description,formats[index].name,sizeof(f->description));
+			strlcpy((char *)f->description,formats[index].name,sizeof(f->description));
 			f->pixelformat = formats[index].pixelformat;
 			break;
 		}
diff --git a/drivers/media/dvb/b2c2/Kconfig b/drivers/media/dvb/b2c2/Kconfig
index a0dcd59..3197aeb 100644
--- a/drivers/media/dvb/b2c2/Kconfig
+++ b/drivers/media/dvb/b2c2/Kconfig
@@ -1,7 +1,7 @@
 config DVB_B2C2_FLEXCOP
 	tristate "Technisat/B2C2 FlexCopII(b) and FlexCopIII adapters"
 	depends on DVB_CORE && I2C
-	select DVB_PLL
+	select DVB_PLL if !DVB_FE_CUSTOMISE
 	select DVB_STV0299 if !DVB_FE_CUSTOMISE
 	select DVB_MT352 if !DVB_FE_CUSTOMISE
 	select DVB_MT312 if !DVB_FE_CUSTOMISE
diff --git a/drivers/media/dvb/b2c2/Makefile b/drivers/media/dvb/b2c2/Makefile
index bff00b5..e97ff60 100644
--- a/drivers/media/dvb/b2c2/Makefile
+++ b/drivers/media/dvb/b2c2/Makefile
@@ -12,4 +12,4 @@
 b2c2-flexcop-usb-objs = flexcop-usb.o
 obj-$(CONFIG_DVB_B2C2_FLEXCOP_USB) += b2c2-flexcop-usb.o
 
-EXTRA_CFLAGS = -Idrivers/media/dvb/dvb-core/ -Idrivers/media/dvb/frontends/
+EXTRA_CFLAGS += -Idrivers/media/dvb/dvb-core/ -Idrivers/media/dvb/frontends/
diff --git a/drivers/media/dvb/b2c2/flexcop-fe-tuner.c b/drivers/media/dvb/b2c2/flexcop-fe-tuner.c
index b02c2fd..0378fd6 100644
--- a/drivers/media/dvb/b2c2/flexcop-fe-tuner.c
+++ b/drivers/media/dvb/b2c2/flexcop-fe-tuner.c
@@ -500,13 +500,13 @@
 	/* try the air atsc 2nd generation (nxt2002) */
 	if ((fc->fe = dvb_attach(nxt200x_attach, &samsung_tbmv_config, &fc->i2c_adap)) != NULL) {
 		fc->dev_type          = FC_AIR_ATSC2;
-		dvb_attach(dvb_pll_attach, fc->fe, 0x61, NULL, &dvb_pll_samsung_tbmv);
+		dvb_attach(dvb_pll_attach, fc->fe, 0x61, NULL, DVB_PLL_SAMSUNG_TBMV);
 		info("found the nxt2002 at i2c address: 0x%02x",samsung_tbmv_config.demod_address);
 	} else
 	/* try the air atsc 3nd generation (lgdt3303) */
 	if ((fc->fe = dvb_attach(lgdt330x_attach, &air2pc_atsc_hd5000_config, &fc->i2c_adap)) != NULL) {
 		fc->dev_type          = FC_AIR_ATSC3;
-		dvb_attach(dvb_pll_attach, fc->fe, 0x61, &fc->i2c_adap, &dvb_pll_lg_tdvs_h06xf);
+		dvb_attach(dvb_pll_attach, fc->fe, 0x61, &fc->i2c_adap, DVB_PLL_LG_TDVS_H06XF);
 		info("found the lgdt3303 at i2c address: 0x%02x",air2pc_atsc_hd5000_config.demod_address);
 	} else
 	/* try the air atsc 1nd generation (bcm3510)/panasonic ct10s */
diff --git a/drivers/media/dvb/bt8xx/Kconfig b/drivers/media/dvb/bt8xx/Kconfig
index cfd6fb7..ea66617 100644
--- a/drivers/media/dvb/bt8xx/Kconfig
+++ b/drivers/media/dvb/bt8xx/Kconfig
@@ -7,7 +7,7 @@
 	select DVB_CX24110 if !DVB_FE_CUSTOMISE
 	select DVB_OR51211 if !DVB_FE_CUSTOMISE
 	select DVB_LGDT330X if !DVB_FE_CUSTOMISE
-	select DVB_PLL
+	select DVB_PLL if !DVB_FE_CUSTOMISE
 	select DVB_ZL10353 if !DVB_FE_CUSTOMISE
 	select FW_LOADER
 	help
diff --git a/drivers/media/dvb/bt8xx/Makefile b/drivers/media/dvb/bt8xx/Makefile
index 9d197ef..84cf705 100644
--- a/drivers/media/dvb/bt8xx/Makefile
+++ b/drivers/media/dvb/bt8xx/Makefile
@@ -1,3 +1,3 @@
 obj-$(CONFIG_DVB_BT8XX) += bt878.o dvb-bt8xx.o dst.o dst_ca.o
 
-EXTRA_CFLAGS = -Idrivers/media/dvb/dvb-core/ -Idrivers/media/video/bt8xx -Idrivers/media/dvb/frontends
+EXTRA_CFLAGS += -Idrivers/media/dvb/dvb-core/ -Idrivers/media/video/bt8xx -Idrivers/media/dvb/frontends
diff --git a/drivers/media/dvb/bt8xx/dst.c b/drivers/media/dvb/bt8xx/dst.c
index e908e3c..b7a17e6 100644
--- a/drivers/media/dvb/bt8xx/dst.c
+++ b/drivers/media/dvb/bt8xx/dst.c
@@ -1652,7 +1652,7 @@
 static int dst_tune_frontend(struct dvb_frontend* fe,
 			    struct dvb_frontend_parameters* p,
 			    unsigned int mode_flags,
-			    int *delay,
+			    unsigned int *delay,
 			    fe_status_t *status)
 {
 	struct dst_state *state = fe->demodulator_priv;
diff --git a/drivers/media/dvb/bt8xx/dvb-bt8xx.c b/drivers/media/dvb/bt8xx/dvb-bt8xx.c
index 4f1c09b..67613eb 100644
--- a/drivers/media/dvb/bt8xx/dvb-bt8xx.c
+++ b/drivers/media/dvb/bt8xx/dvb-bt8xx.c
@@ -611,7 +611,7 @@
 		card->fe = dvb_attach(lgdt330x_attach, &tdvs_tua6034_config, card->i2c_adapter);
 		if (card->fe != NULL) {
 			dvb_attach(dvb_pll_attach, card->fe, 0x61,
-				   card->i2c_adapter, &dvb_pll_lg_tdvs_h06xf);
+				   card->i2c_adapter, DVB_PLL_LG_TDVS_H06XF);
 			dprintk ("dvb_bt8xx: lgdt330x detected\n");
 		}
 		break;
@@ -692,6 +692,9 @@
 
 	case BTTV_BOARD_PC_HDTV:
 		card->fe = dvb_attach(or51211_attach, &or51211_config, card->i2c_adapter);
+		if (card->fe != NULL)
+			dvb_attach(dvb_pll_attach, card->fe, 0x61,
+				   card->i2c_adapter, DVB_PLL_FCV1236D);
 		break;
 	}
 
diff --git a/drivers/media/dvb/cinergyT2/Makefile b/drivers/media/dvb/cinergyT2/Makefile
index c51aece..d762d8c 100644
--- a/drivers/media/dvb/cinergyT2/Makefile
+++ b/drivers/media/dvb/cinergyT2/Makefile
@@ -1,3 +1,3 @@
 obj-$(CONFIG_DVB_CINERGYT2) += cinergyT2.o
 
-EXTRA_CFLAGS = -Idrivers/media/dvb/dvb-core/
+EXTRA_CFLAGS += -Idrivers/media/dvb/dvb-core/
diff --git a/drivers/media/dvb/cinergyT2/cinergyT2.c b/drivers/media/dvb/cinergyT2/cinergyT2.c
index b40af48..28929b6 100644
--- a/drivers/media/dvb/cinergyT2/cinergyT2.c
+++ b/drivers/media/dvb/cinergyT2/cinergyT2.c
@@ -829,7 +829,7 @@
 	input_dev->id.vendor = cinergyt2->udev->descriptor.idVendor;
 	input_dev->id.product = cinergyt2->udev->descriptor.idProduct;
 	input_dev->id.version = 1;
-	input_dev->cdev.dev = &cinergyt2->udev->dev;
+	input_dev->dev.parent = &cinergyt2->udev->dev;
 
 	err = input_register_device(input_dev);
 	if (err) {
@@ -905,12 +905,11 @@
 	struct cinergyt2 *cinergyt2;
 	int err;
 
-	if (!(cinergyt2 = kmalloc (sizeof(struct cinergyt2), GFP_KERNEL))) {
+	if (!(cinergyt2 = kzalloc (sizeof(struct cinergyt2), GFP_KERNEL))) {
 		dprintk(1, "out of memory?!?\n");
 		return -ENOMEM;
 	}
 
-	memset (cinergyt2, 0, sizeof (struct cinergyt2));
 	usb_set_intfdata (intf, (void *) cinergyt2);
 
 	mutex_init(&cinergyt2->sem);
@@ -1000,18 +999,15 @@
 	if (cinergyt2->disconnect_pending || mutex_lock_interruptible(&cinergyt2->wq_sem))
 		return -ERESTARTSYS;
 
-	if (1) {
-		cinergyt2_suspend_rc(cinergyt2);
-		cancel_rearming_delayed_work(&cinergyt2->query_work);
+	cinergyt2_suspend_rc(cinergyt2);
+	cancel_rearming_delayed_work(&cinergyt2->query_work);
 
-		mutex_lock(&cinergyt2->sem);
-		if (cinergyt2->streaming)
-			cinergyt2_stop_stream_xfer(cinergyt2);
-		cinergyt2_sleep(cinergyt2, 1);
-		mutex_unlock(&cinergyt2->sem);
-	}
+	mutex_lock(&cinergyt2->sem);
+	if (cinergyt2->streaming)
+		cinergyt2_stop_stream_xfer(cinergyt2);
+	cinergyt2_sleep(cinergyt2, 1);
+	mutex_unlock(&cinergyt2->sem);
 
-	mutex_unlock(&cinergyt2->wq_sem);
 	return 0;
 }
 
diff --git a/drivers/media/dvb/dvb-core/dmxdev.c b/drivers/media/dvb/dvb-core/dmxdev.c
index 275df65..5394de2 100644
--- a/drivers/media/dvb/dvb-core/dmxdev.c
+++ b/drivers/media/dvb/dvb-core/dmxdev.c
@@ -97,7 +97,7 @@
 		if (avail > todo)
 			avail = todo;
 
-		ret = dvb_ringbuffer_read(src, buf, avail, 1);
+		ret = dvb_ringbuffer_read(src, (u8 *)buf, avail, 1);
 		if (ret < 0)
 			break;
 
diff --git a/drivers/media/dvb/dvb-core/dvb_ca_en50221.c b/drivers/media/dvb/dvb-core/dvb_ca_en50221.c
index 2a03bf5..4fadddb 100644
--- a/drivers/media/dvb/dvb-core/dvb_ca_en50221.c
+++ b/drivers/media/dvb/dvb-core/dvb_ca_en50221.c
@@ -175,7 +175,7 @@
  * @param nlen Number of bytes in needle.
  * @return Pointer into haystack needle was found at, or NULL if not found.
  */
-static u8 *findstr(u8 * haystack, int hlen, u8 * needle, int nlen)
+static char *findstr(char * haystack, int hlen, char * needle, int nlen)
 {
 	int i;
 
@@ -482,7 +482,7 @@
 	}
 
 	/* check it contains the correct DVB string */
-	dvb_str = findstr(tuple, tupleLength, "DVB_CI_V", 8);
+	dvb_str = findstr((char *)tuple, tupleLength, "DVB_CI_V", 8);
 	if (dvb_str == NULL)
 		return -EINVAL;
 	if (tupleLength < ((dvb_str - (char *) tuple) + 12))
@@ -513,8 +513,8 @@
 			ca->slot_info[slot].config_option = tuple[0] & 0x3f;
 
 			/* OK, check it contains the correct strings */
-			if ((findstr(tuple, tupleLength, "DVB_HOST", 8) == NULL) ||
-			    (findstr(tuple, tupleLength, "DVB_CI_MODULE", 13) == NULL))
+			if ((findstr((char *)tuple, tupleLength, "DVB_HOST", 8) == NULL) ||
+			    (findstr((char *)tuple, tupleLength, "DVB_CI_MODULE", 13) == NULL))
 				break;
 
 			got_cftableentry = 1;
@@ -1300,7 +1300,7 @@
 	struct dvb_ca_private *ca = dvbdev->priv;
 	u8 slot, connection_id;
 	int status;
-	char fragbuf[HOST_LINK_BUF_SIZE];
+	u8 fragbuf[HOST_LINK_BUF_SIZE];
 	int fragpos = 0;
 	int fraglen;
 	unsigned long timeout;
@@ -1486,7 +1486,7 @@
 				}
 
 				if ((status = dvb_ringbuffer_pkt_read(&ca->slot_info[slot].rx_buffer, idx, 2,
-								      buf + pktlen, fraglen, 1)) < 0) {
+								      (u8 *)buf + pktlen, fraglen, 1)) < 0) {
 					goto exit;
 				}
 				pktlen += fraglen;
diff --git a/drivers/media/dvb/dvb-core/dvb_demux.c b/drivers/media/dvb/dvb-core/dvb_demux.c
index 6d8d1c3d..cb6987f 100644
--- a/drivers/media/dvb/dvb-core/dvb_demux.c
+++ b/drivers/media/dvb/dvb-core/dvb_demux.c
@@ -1068,7 +1068,7 @@
 
 	if (mutex_lock_interruptible(&dvbdemux->mutex))
 		return -ERESTARTSYS;
-	dvb_dmx_swfilter(dvbdemux, buf, count);
+	dvb_dmx_swfilter(dvbdemux, (u8 *)buf, count);
 	mutex_unlock(&dvbdemux->mutex);
 
 	if (signal_pending(current))
diff --git a/drivers/media/dvb/dvb-core/dvb_frontend.c b/drivers/media/dvb/dvb-core/dvb_frontend.c
index f4e4ca2..b6c7f66 100644
--- a/drivers/media/dvb/dvb-core/dvb_frontend.c
+++ b/drivers/media/dvb/dvb-core/dvb_frontend.c
@@ -523,6 +523,7 @@
 
 	dvb_frontend_init(fe);
 
+	set_freezable();
 	while (1) {
 		up(&fepriv->sem);	    /* is locked when we enter the thread... */
 restart:
diff --git a/drivers/media/dvb/dvb-core/dvb_frontend.h b/drivers/media/dvb/dvb-core/dvb_frontend.h
index f233d78..a770a87 100644
--- a/drivers/media/dvb/dvb-core/dvb_frontend.h
+++ b/drivers/media/dvb/dvb-core/dvb_frontend.h
@@ -103,7 +103,7 @@
 	int (*tune)(struct dvb_frontend* fe,
 		    struct dvb_frontend_parameters* params,
 		    unsigned int mode_flags,
-		    int *delay,
+		    unsigned int *delay,
 		    fe_status_t *status);
 	/* get frontend tuning algorithm from the module */
 	int (*get_frontend_algo)(struct dvb_frontend *fe);
diff --git a/drivers/media/dvb/dvb-core/dvb_net.c b/drivers/media/dvb/dvb-core/dvb_net.c
index 4ebf33a5..acf0263 100644
--- a/drivers/media/dvb/dvb-core/dvb_net.c
+++ b/drivers/media/dvb/dvb-core/dvb_net.c
@@ -347,7 +347,8 @@
 {
 	struct dvb_net_priv *priv = dev->priv;
 	unsigned long skipped = 0L;
-	u8 *ts, *ts_end, *from_where = NULL, ts_remain = 0, how_much = 0, new_ts = 1;
+	const u8 *ts, *ts_end, *from_where = NULL;
+	u8 ts_remain = 0, how_much = 0, new_ts = 1;
 	struct ethhdr *ethh = NULL;
 
 #ifdef ULE_DEBUG
@@ -364,7 +365,7 @@
 	/* For all TS cells in current buffer.
 	 * Appearently, we are called for every single TS cell.
 	 */
-	for (ts = (char *)buf, ts_end = (char *)buf + buf_len; ts < ts_end; /* no default incr. */ ) {
+	for (ts = buf, ts_end = buf + buf_len; ts < ts_end; /* no default incr. */ ) {
 
 		if (new_ts) {
 			/* We are about to process a new TS cell. */
diff --git a/drivers/media/dvb/dvb-core/dvbdev.c b/drivers/media/dvb/dvb-core/dvbdev.c
index a9fa333..9ef0c00 100644
--- a/drivers/media/dvb/dvb-core/dvbdev.c
+++ b/drivers/media/dvb/dvb-core/dvbdev.c
@@ -208,7 +208,7 @@
 	if ((id = dvbdev_get_free_id (adap, type)) < 0){
 		mutex_unlock(&dvbdev_register_lock);
 		*pdvbdev = NULL;
-		printk ("%s: could get find free device id...\n", __FUNCTION__);
+		printk(KERN_ERR "%s: couldn't find free device id\n", __FUNCTION__);
 		return -ENFILE;
 	}
 
@@ -252,7 +252,7 @@
 		return PTR_ERR(clsdev);
 	}
 
-	dprintk("DVB: register adapter%d/%s%d @ minor: %i (0x%02x)\n",
+	dprintk(KERN_DEBUG "DVB: register adapter%d/%s%d @ minor: %i (0x%02x)\n",
 		adap->num, dnames[type], id, nums2minor(adap->num, type, id),
 		nums2minor(adap->num, type, id));
 
@@ -311,7 +311,7 @@
 	memset (adap, 0, sizeof(struct dvb_adapter));
 	INIT_LIST_HEAD (&adap->device_list);
 
-	printk ("DVB: registering new adapter (%s).\n", name);
+	printk(KERN_INFO "DVB: registering new adapter (%s)\n", name);
 
 	adap->num = num;
 	adap->name = name;
@@ -407,13 +407,13 @@
 	dev_t dev = MKDEV(DVB_MAJOR, 0);
 
 	if ((retval = register_chrdev_region(dev, MAX_DVB_MINORS, "DVB")) != 0) {
-		printk("dvb-core: unable to get major %d\n", DVB_MAJOR);
+		printk(KERN_ERR "dvb-core: unable to get major %d\n", DVB_MAJOR);
 		return retval;
 	}
 
 	cdev_init(&dvb_device_cdev, &dvb_device_fops);
 	if ((retval = cdev_add(&dvb_device_cdev, dev, MAX_DVB_MINORS)) != 0) {
-		printk("dvb-core: unable to get major %d\n", DVB_MAJOR);
+		printk(KERN_ERR "dvb-core: unable register character device\n");
 		goto error;
 	}
 
diff --git a/drivers/media/dvb/dvb-usb/Kconfig b/drivers/media/dvb/dvb-usb/Kconfig
index 5448873..40e41f2 100644
--- a/drivers/media/dvb/dvb-usb/Kconfig
+++ b/drivers/media/dvb/dvb-usb/Kconfig
@@ -2,7 +2,6 @@
 	tristate "Support for various USB DVB devices"
 	depends on DVB_CORE && USB && I2C
 	select FW_LOADER
-	select DVB_PLL
 	help
 	  By enabling this you will be able to choose the various supported
 	  USB1.1 and USB2.0 DVB devices.
@@ -27,13 +26,14 @@
 	depends on DVB_USB
 	select DVB_DIB3000MC
 	select DVB_TUNER_MT2060 if !DVB_FE_CUSTOMISE
+	select DVB_PLL if !DVB_FE_CUSTOMISE
 	help
 	  Say Y here to support the AVerMedia AverTV DVB-T USB 2.0 (A800) receiver.
 
 config DVB_USB_DIBUSB_MB
 	tristate "DiBcom USB DVB-T devices (based on the DiB3000M-B) (see help for device list)"
 	depends on DVB_USB
-	select DVB_PLL
+	select DVB_PLL if !DVB_FE_CUSTOMISE
 	select DVB_DIB3000MB
 	select DVB_TUNER_MT2060 if !DVB_FE_CUSTOMISE
 	help
@@ -89,7 +89,7 @@
 config DVB_USB_UMT_010
 	tristate "HanfTek UMT-010 DVB-T USB2.0 support"
 	depends on DVB_USB
-	select DVB_PLL
+	select DVB_PLL if !DVB_FE_CUSTOMISE
 	select DVB_DIB3000MC
 	select DVB_TUNER_MT2060 if !DVB_FE_CUSTOMISE
 	help
@@ -98,7 +98,7 @@
 config DVB_USB_CXUSB
 	tristate "Conexant USB2.0 hybrid reference design support"
 	depends on DVB_USB
-	select DVB_PLL
+	select DVB_PLL if !DVB_FE_CUSTOMISE
 	select DVB_CX22702 if !DVB_FE_CUSTOMISE
 	select DVB_LGDT330X if !DVB_FE_CUSTOMISE
 	select DVB_MT352 if !DVB_FE_CUSTOMISE
@@ -142,7 +142,7 @@
 config DVB_USB_DIGITV
 	tristate "Nebula Electronics uDigiTV DVB-T USB2.0 support"
 	depends on DVB_USB
-	select DVB_PLL
+	select DVB_PLL if !DVB_FE_CUSTOMISE
 	select DVB_NXT6000 if !DVB_FE_CUSTOMISE
 	select DVB_MT352 if !DVB_FE_CUSTOMISE
 	help
@@ -188,6 +188,7 @@
 	depends on DVB_USB
 	select DVB_DIB3000MC
 	select DVB_TUNER_MT2060 if !DVB_FE_CUSTOMISE
+	select DVB_PLL if !DVB_FE_CUSTOMISE
 	help
 	  Say Y here to support the Hauppauge WinTV-NOVA-T usb2 DVB-T USB2.0 receiver.
 
@@ -216,5 +217,23 @@
 	tristate "Opera1 DVB-S USB2.0 receiver"
 	depends on DVB_USB
 	select DVB_STV0299 if !DVB_FE_CUSTOMISE
+	select DVB_PLL if !DVB_FE_CUSTOMISE
 	help
 	  Say Y here to support the Opera DVB-S USB2.0 receiver.
+
+config DVB_USB_AF9005
+	tristate "Afatech AF9005 DVB-T USB1.1 support"
+	depends on DVB_USB && EXPERIMENTAL
+	select DVB_TUNER_MT2060 if !DVB_FE_CUSTOMISE
+	select DVB_TUNER_QT1010 if !DVB_FE_CUSTOMISE
+	help
+	  Say Y here to support the Afatech AF9005 based DVB-T USB1.1 receiver
+	  and the TerraTec Cinergy T USB XE (Rev.1)
+
+config DVB_USB_AF9005_REMOTE
+	tristate "Afatech AF9005 default remote control support"
+	depends on DVB_USB_AF9005
+	help
+	  Say Y here to support the default remote control decoding for the
+	  Afatech AF9005 based receiver.
+
diff --git a/drivers/media/dvb/dvb-usb/Makefile b/drivers/media/dvb/dvb-usb/Makefile
index 976f840..73ac0a9 100644
--- a/drivers/media/dvb/dvb-usb/Makefile
+++ b/drivers/media/dvb/dvb-usb/Makefile
@@ -55,4 +55,10 @@
 obj-$(CONFIG_DVB_USB_OPERA1) += dvb-usb-opera.o
 
 
-EXTRA_CFLAGS = -Idrivers/media/dvb/dvb-core/ -Idrivers/media/dvb/frontends/
+dvb-usb-af9005-objs = af9005.o af9005-fe.o
+obj-$(CONFIG_DVB_USB_AF9005) += dvb-usb-af9005.o
+
+dvb-usb-af9005-remote-objs = af9005-remote.o
+obj-$(CONFIG_DVB_USB_AF9005_REMOTE) += dvb-usb-af9005-remote.o
+
+EXTRA_CFLAGS += -Idrivers/media/dvb/dvb-core/ -Idrivers/media/dvb/frontends/
diff --git a/drivers/media/dvb/dvb-usb/af9005-fe.c b/drivers/media/dvb/dvb-usb/af9005-fe.c
new file mode 100644
index 0000000..7195c94
--- /dev/null
+++ b/drivers/media/dvb/dvb-usb/af9005-fe.c
@@ -0,0 +1,1503 @@
+/* Frontend part of the Linux driver for the Afatech 9005
+ * USB1.1 DVB-T receiver.
+ *
+ * Copyright (C) 2007 Luca Olivetti (luca@ventoso.org)
+ *
+ * Thanks to Afatech who kindly provided information.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * see Documentation/dvb/README.dvb-usb for more information
+ */
+#include "af9005.h"
+#include "af9005-script.h"
+#include "mt2060.h"
+#include "qt1010.h"
+#include <asm/div64.h>
+
+struct af9005_fe_state {
+	struct dvb_usb_device *d;
+	struct dvb_frontend *tuner;
+
+	fe_status_t stat;
+
+	/* retraining parameters */
+	u32 original_fcw;
+	u16 original_rf_top;
+	u16 original_if_top;
+	u16 original_if_min;
+	u16 original_aci0_if_top;
+	u16 original_aci1_if_top;
+	u16 original_aci0_if_min;
+	u8 original_if_unplug_th;
+	u8 original_rf_unplug_th;
+	u8 original_dtop_if_unplug_th;
+	u8 original_dtop_rf_unplug_th;
+
+	/* statistics */
+	u32 pre_vit_error_count;
+	u32 pre_vit_bit_count;
+	u32 ber;
+	u32 post_vit_error_count;
+	u32 post_vit_bit_count;
+	u32 unc;
+	u16 abort_count;
+
+	int opened;
+	int strong;
+	unsigned long next_status_check;
+	struct dvb_frontend frontend;
+};
+
+static int af9005_write_word_agc(struct dvb_usb_device *d, u16 reghi,
+				 u16 reglo, u8 pos, u8 len, u16 value)
+{
+	int ret;
+	u8 temp;
+
+	if ((ret = af9005_write_ofdm_register(d, reglo, (u8) (value & 0xff))))
+		return ret;
+	temp = (u8) ((value & 0x0300) >> 8);
+	return af9005_write_register_bits(d, reghi, pos, len,
+					  (u8) ((value & 0x300) >> 8));
+}
+
+static int af9005_read_word_agc(struct dvb_usb_device *d, u16 reghi,
+				u16 reglo, u8 pos, u8 len, u16 * value)
+{
+	int ret;
+	u8 temp0, temp1;
+
+	if ((ret = af9005_read_ofdm_register(d, reglo, &temp0)))
+		return ret;
+	if ((ret = af9005_read_ofdm_register(d, reghi, &temp1)))
+		return ret;
+	switch (pos) {
+	case 0:
+		*value = ((u16) (temp1 & 0x03) << 8) + (u16) temp0;
+		break;
+	case 2:
+		*value = ((u16) (temp1 & 0x0C) << 6) + (u16) temp0;
+		break;
+	case 4:
+		*value = ((u16) (temp1 & 0x30) << 4) + (u16) temp0;
+		break;
+	case 6:
+		*value = ((u16) (temp1 & 0xC0) << 2) + (u16) temp0;
+		break;
+	default:
+		err("invalid pos in read word agc");
+		return -EINVAL;
+	}
+	return 0;
+
+}
+
+static int af9005_is_fecmon_available(struct dvb_frontend *fe, int *available)
+{
+	struct af9005_fe_state *state = fe->demodulator_priv;
+	int ret;
+	u8 temp;
+
+	*available = false;
+
+	ret = af9005_read_register_bits(state->d, xd_p_fec_vtb_rsd_mon_en,
+					fec_vtb_rsd_mon_en_pos,
+					fec_vtb_rsd_mon_en_len, &temp);
+	if (ret)
+		return ret;
+	if (temp & 1) {
+		ret =
+		    af9005_read_register_bits(state->d,
+					      xd_p_reg_ofsm_read_rbc_en,
+					      reg_ofsm_read_rbc_en_pos,
+					      reg_ofsm_read_rbc_en_len, &temp);
+		if (ret)
+			return ret;
+		if ((temp & 1) == 0)
+			*available = true;
+
+	}
+	return 0;
+}
+
+static int af9005_get_post_vit_err_cw_count(struct dvb_frontend *fe,
+					    u32 * post_err_count,
+					    u32 * post_cw_count,
+					    u16 * abort_count)
+{
+	struct af9005_fe_state *state = fe->demodulator_priv;
+	int ret;
+	u32 err_count;
+	u32 cw_count;
+	u8 temp, temp0, temp1, temp2;
+	u16 loc_abort_count;
+
+	*post_err_count = 0;
+	*post_cw_count = 0;
+
+	/* check if error bit count is ready */
+	ret =
+	    af9005_read_register_bits(state->d, xd_r_fec_rsd_ber_rdy,
+				      fec_rsd_ber_rdy_pos, fec_rsd_ber_rdy_len,
+				      &temp);
+	if (ret)
+		return ret;
+	if (!temp) {
+		deb_info("rsd counter not ready\n");
+		return 100;
+	}
+	/* get abort count */
+	ret =
+	    af9005_read_ofdm_register(state->d,
+				      xd_r_fec_rsd_abort_packet_cnt_7_0,
+				      &temp0);
+	if (ret)
+		return ret;
+	ret =
+	    af9005_read_ofdm_register(state->d,
+				      xd_r_fec_rsd_abort_packet_cnt_15_8,
+				      &temp1);
+	if (ret)
+		return ret;
+	loc_abort_count = ((u16) temp1 << 8) + temp0;
+
+	/* get error count */
+	ret =
+	    af9005_read_ofdm_register(state->d, xd_r_fec_rsd_bit_err_cnt_7_0,
+				      &temp0);
+	if (ret)
+		return ret;
+	ret =
+	    af9005_read_ofdm_register(state->d, xd_r_fec_rsd_bit_err_cnt_15_8,
+				      &temp1);
+	if (ret)
+		return ret;
+	ret =
+	    af9005_read_ofdm_register(state->d, xd_r_fec_rsd_bit_err_cnt_23_16,
+				      &temp2);
+	if (ret)
+		return ret;
+	err_count = ((u32) temp2 << 16) + ((u32) temp1 << 8) + temp0;
+	*post_err_count = err_count - (u32) loc_abort_count *8 * 8;
+
+	/* get RSD packet number */
+	ret =
+	    af9005_read_ofdm_register(state->d, xd_p_fec_rsd_packet_unit_7_0,
+				      &temp0);
+	if (ret)
+		return ret;
+	ret =
+	    af9005_read_ofdm_register(state->d, xd_p_fec_rsd_packet_unit_15_8,
+				      &temp1);
+	if (ret)
+		return ret;
+	cw_count = ((u32) temp1 << 8) + temp0;
+	if (cw_count == 0) {
+		err("wrong RSD packet count");
+		return -EIO;
+	}
+	deb_info("POST abort count %d err count %d rsd packets %d\n",
+		 loc_abort_count, err_count, cw_count);
+	*post_cw_count = cw_count - (u32) loc_abort_count;
+	*abort_count = loc_abort_count;
+	return 0;
+
+}
+
+static int af9005_get_post_vit_ber(struct dvb_frontend *fe,
+				   u32 * post_err_count, u32 * post_cw_count,
+				   u16 * abort_count)
+{
+	u32 loc_cw_count = 0, loc_err_count;
+	u16 loc_abort_count;
+	int ret;
+
+	ret =
+	    af9005_get_post_vit_err_cw_count(fe, &loc_err_count, &loc_cw_count,
+					     &loc_abort_count);
+	if (ret)
+		return ret;
+	*post_err_count = loc_err_count;
+	*post_cw_count = loc_cw_count * 204 * 8;
+	*abort_count = loc_abort_count;
+
+	return 0;
+}
+
+static int af9005_get_pre_vit_err_bit_count(struct dvb_frontend *fe,
+					    u32 * pre_err_count,
+					    u32 * pre_bit_count)
+{
+	struct af9005_fe_state *state = fe->demodulator_priv;
+	u8 temp, temp0, temp1, temp2;
+	u32 super_frame_count, x, bits;
+	int ret;
+
+	ret =
+	    af9005_read_register_bits(state->d, xd_r_fec_vtb_ber_rdy,
+				      fec_vtb_ber_rdy_pos, fec_vtb_ber_rdy_len,
+				      &temp);
+	if (ret)
+		return ret;
+	if (!temp) {
+		deb_info("viterbi counter not ready\n");
+		return 101;	/* ERR_APO_VTB_COUNTER_NOT_READY; */
+	}
+	ret =
+	    af9005_read_ofdm_register(state->d, xd_r_fec_vtb_err_bit_cnt_7_0,
+				      &temp0);
+	if (ret)
+		return ret;
+	ret =
+	    af9005_read_ofdm_register(state->d, xd_r_fec_vtb_err_bit_cnt_15_8,
+				      &temp1);
+	if (ret)
+		return ret;
+	ret =
+	    af9005_read_ofdm_register(state->d, xd_r_fec_vtb_err_bit_cnt_23_16,
+				      &temp2);
+	if (ret)
+		return ret;
+	*pre_err_count = ((u32) temp2 << 16) + ((u32) temp1 << 8) + temp0;
+
+	ret =
+	    af9005_read_ofdm_register(state->d, xd_p_fec_super_frm_unit_7_0,
+				      &temp0);
+	if (ret)
+		return ret;
+	ret =
+	    af9005_read_ofdm_register(state->d, xd_p_fec_super_frm_unit_15_8,
+				      &temp1);
+	if (ret)
+		return ret;
+	super_frame_count = ((u32) temp1 << 8) + temp0;
+	if (super_frame_count == 0) {
+		deb_info("super frame count 0\n");
+		return 102;
+	}
+
+	/* read fft mode */
+	ret =
+	    af9005_read_register_bits(state->d, xd_g_reg_tpsd_txmod,
+				      reg_tpsd_txmod_pos, reg_tpsd_txmod_len,
+				      &temp);
+	if (ret)
+		return ret;
+	if (temp == 0) {
+		/* 2K */
+		x = 1512;
+	} else if (temp == 1) {
+		/* 8k */
+		x = 6048;
+	} else {
+		err("Invalid fft mode");
+		return -EINVAL;
+	}
+
+	/* read constellation mode */
+	ret =
+	    af9005_read_register_bits(state->d, xd_g_reg_tpsd_const,
+				      reg_tpsd_const_pos, reg_tpsd_const_len,
+				      &temp);
+	if (ret)
+		return ret;
+	switch (temp) {
+	case 0:		/* QPSK */
+		bits = 2;
+		break;
+	case 1:		/* QAM_16 */
+		bits = 4;
+		break;
+	case 2:		/* QAM_64 */
+		bits = 6;
+		break;
+	default:
+		err("invalid constellation mode");
+		return -EINVAL;
+	}
+	*pre_bit_count = super_frame_count * 68 * 4 * x * bits;
+	deb_info("PRE err count %d frame count %d bit count %d\n",
+		 *pre_err_count, super_frame_count, *pre_bit_count);
+	return 0;
+}
+
+static int af9005_reset_pre_viterbi(struct dvb_frontend *fe)
+{
+	struct af9005_fe_state *state = fe->demodulator_priv;
+	int ret;
+
+	/* set super frame count to 1 */
+	ret =
+	    af9005_write_ofdm_register(state->d, xd_p_fec_super_frm_unit_7_0,
+				       1 & 0xff);
+	if (ret)
+		return ret;
+	af9005_write_ofdm_register(state->d, xd_p_fec_super_frm_unit_15_8,
+				   1 >> 8);
+	if (ret)
+		return ret;
+	/* reset pre viterbi error count */
+	ret =
+	    af9005_write_register_bits(state->d, xd_p_fec_vtb_ber_rst,
+				       fec_vtb_ber_rst_pos, fec_vtb_ber_rst_len,
+				       1);
+
+	return ret;
+}
+
+static int af9005_reset_post_viterbi(struct dvb_frontend *fe)
+{
+	struct af9005_fe_state *state = fe->demodulator_priv;
+	int ret;
+
+	/* set packet unit */
+	ret =
+	    af9005_write_ofdm_register(state->d, xd_p_fec_rsd_packet_unit_7_0,
+				       10000 & 0xff);
+	if (ret)
+		return ret;
+	ret =
+	    af9005_write_ofdm_register(state->d, xd_p_fec_rsd_packet_unit_15_8,
+				       10000 >> 8);
+	if (ret)
+		return ret;
+	/* reset post viterbi error count */
+	ret =
+	    af9005_write_register_bits(state->d, xd_p_fec_rsd_ber_rst,
+				       fec_rsd_ber_rst_pos, fec_rsd_ber_rst_len,
+				       1);
+
+	return ret;
+}
+
+static int af9005_get_statistic(struct dvb_frontend *fe)
+{
+	struct af9005_fe_state *state = fe->demodulator_priv;
+	int ret, fecavailable;
+	u64 numerator, denominator;
+
+	deb_info("GET STATISTIC\n");
+	ret = af9005_is_fecmon_available(fe, &fecavailable);
+	if (ret)
+		return ret;
+	if (!fecavailable) {
+		deb_info("fecmon not available\n");
+		return 0;
+	}
+
+	ret = af9005_get_pre_vit_err_bit_count(fe, &state->pre_vit_error_count,
+					       &state->pre_vit_bit_count);
+	if (ret == 0) {
+		af9005_reset_pre_viterbi(fe);
+		if (state->pre_vit_bit_count > 0) {
+			/* according to v 0.0.4 of the dvb api ber should be a multiple
+			   of 10E-9 so we have to multiply the error count by
+			   10E9=1000000000 */
+			numerator =
+			    (u64) state->pre_vit_error_count * (u64) 1000000000;
+			denominator = (u64) state->pre_vit_bit_count;
+			state->ber = do_div(numerator, denominator);
+		} else {
+			state->ber = 0xffffffff;
+		}
+	}
+
+	ret = af9005_get_post_vit_ber(fe, &state->post_vit_error_count,
+				      &state->post_vit_bit_count,
+				      &state->abort_count);
+	if (ret == 0) {
+		ret = af9005_reset_post_viterbi(fe);
+		state->unc += state->abort_count;
+		if (ret)
+			return ret;
+	}
+	return 0;
+}
+
+static int af9005_fe_refresh_state(struct dvb_frontend *fe)
+{
+	struct af9005_fe_state *state = fe->demodulator_priv;
+	if (time_after(jiffies, state->next_status_check)) {
+		deb_info("REFRESH STATE\n");
+
+		/* statistics */
+		if (af9005_get_statistic(fe))
+			err("get_statistic_failed");
+		state->next_status_check = jiffies + 250 * HZ / 1000;
+	}
+	return 0;
+}
+
+static int af9005_fe_read_status(struct dvb_frontend *fe, fe_status_t * stat)
+{
+	struct af9005_fe_state *state = fe->demodulator_priv;
+	u8 temp;
+	int ret;
+
+	if (state->tuner == NULL)
+		return -ENODEV;
+
+	*stat = 0;
+	ret = af9005_read_register_bits(state->d, xd_p_agc_lock,
+					agc_lock_pos, agc_lock_len, &temp);
+	if (ret)
+		return ret;
+	if (temp)
+		*stat |= FE_HAS_SIGNAL;
+
+	ret = af9005_read_register_bits(state->d, xd_p_fd_tpsd_lock,
+					fd_tpsd_lock_pos, fd_tpsd_lock_len,
+					&temp);
+	if (ret)
+		return ret;
+	if (temp)
+		*stat |= FE_HAS_CARRIER;
+
+	ret = af9005_read_register_bits(state->d,
+					xd_r_mp2if_sync_byte_locked,
+					mp2if_sync_byte_locked_pos,
+					mp2if_sync_byte_locked_pos, &temp);
+	if (ret)
+		return ret;
+	if (temp)
+		*stat |= FE_HAS_SYNC | FE_HAS_VITERBI | FE_HAS_LOCK;
+	if (state->opened)
+		af9005_led_control(state->d, *stat & FE_HAS_LOCK);
+
+	ret =
+	    af9005_read_register_bits(state->d, xd_p_reg_strong_sginal_detected,
+				      reg_strong_sginal_detected_pos,
+				      reg_strong_sginal_detected_len, &temp);
+	if (ret)
+		return ret;
+	if (temp != state->strong) {
+		deb_info("adjust for strong signal %d\n", temp);
+			state->strong = temp;
+	}
+	return 0;
+}
+
+static int af9005_fe_read_ber(struct dvb_frontend *fe, u32 * ber)
+{
+	struct af9005_fe_state *state = fe->demodulator_priv;
+	if (state->tuner == NULL)
+		return -ENODEV;
+	af9005_fe_refresh_state(fe);
+	*ber = state->ber;
+	return 0;
+}
+
+static int af9005_fe_read_unc_blocks(struct dvb_frontend *fe, u32 * unc)
+{
+	struct af9005_fe_state *state = fe->demodulator_priv;
+	if (state->tuner == NULL)
+		return -ENODEV;
+	af9005_fe_refresh_state(fe);
+	*unc = state->unc;
+	return 0;
+}
+
+static int af9005_fe_read_signal_strength(struct dvb_frontend *fe,
+					  u16 * strength)
+{
+	struct af9005_fe_state *state = fe->demodulator_priv;
+	int ret;
+	u8 if_gain, rf_gain;
+
+	if (state->tuner == NULL)
+		return -ENODEV;
+	ret =
+	    af9005_read_ofdm_register(state->d, xd_r_reg_aagc_rf_gain,
+				      &rf_gain);
+	if (ret)
+		return ret;
+	ret =
+	    af9005_read_ofdm_register(state->d, xd_r_reg_aagc_if_gain,
+				      &if_gain);
+	if (ret)
+		return ret;
+	/* this value has no real meaning, but i don't have the tables that relate
+	   the rf and if gain with the dbm, so I just scale the value */
+	*strength = (512 - rf_gain - if_gain) << 7;
+	return 0;
+}
+
+static int af9005_fe_read_snr(struct dvb_frontend *fe, u16 * snr)
+{
+	/* the snr can be derived from the ber and the constellation
+	   but I don't think this kind of complex calculations belong
+	   in the driver. I may be wrong.... */
+	return -ENOSYS;
+}
+
+static int af9005_fe_program_cfoe(struct dvb_usb_device *d, fe_bandwidth_t bw)
+{
+	u8 temp0, temp1, temp2, temp3, buf[4];
+	int ret;
+	u32 NS_coeff1_2048Nu;
+	u32 NS_coeff1_8191Nu;
+	u32 NS_coeff1_8192Nu;
+	u32 NS_coeff1_8193Nu;
+	u32 NS_coeff2_2k;
+	u32 NS_coeff2_8k;
+
+	switch (bw) {
+	case BANDWIDTH_6_MHZ:
+		NS_coeff1_2048Nu = 0x2ADB6DC;
+		NS_coeff1_8191Nu = 0xAB7313;
+		NS_coeff1_8192Nu = 0xAB6DB7;
+		NS_coeff1_8193Nu = 0xAB685C;
+		NS_coeff2_2k = 0x156DB6E;
+		NS_coeff2_8k = 0x55B6DC;
+		break;
+
+	case BANDWIDTH_7_MHZ:
+		NS_coeff1_2048Nu = 0x3200001;
+		NS_coeff1_8191Nu = 0xC80640;
+		NS_coeff1_8192Nu = 0xC80000;
+		NS_coeff1_8193Nu = 0xC7F9C0;
+		NS_coeff2_2k = 0x1900000;
+		NS_coeff2_8k = 0x640000;
+		break;
+
+	case BANDWIDTH_8_MHZ:
+		NS_coeff1_2048Nu = 0x3924926;
+		NS_coeff1_8191Nu = 0xE4996E;
+		NS_coeff1_8192Nu = 0xE49249;
+		NS_coeff1_8193Nu = 0xE48B25;
+		NS_coeff2_2k = 0x1C92493;
+		NS_coeff2_8k = 0x724925;
+		break;
+	default:
+		err("Invalid bandwith %d.", bw);
+		return -EINVAL;
+	}
+
+	/*
+	 *  write NS_coeff1_2048Nu
+	 */
+
+	temp0 = (u8) (NS_coeff1_2048Nu & 0x000000FF);
+	temp1 = (u8) ((NS_coeff1_2048Nu & 0x0000FF00) >> 8);
+	temp2 = (u8) ((NS_coeff1_2048Nu & 0x00FF0000) >> 16);
+	temp3 = (u8) ((NS_coeff1_2048Nu & 0x03000000) >> 24);
+
+	/*  big endian to make 8051 happy */
+	buf[0] = temp3;
+	buf[1] = temp2;
+	buf[2] = temp1;
+	buf[3] = temp0;
+
+	/*  cfoe_NS_2k_coeff1_25_24 */
+	ret = af9005_write_ofdm_register(d, 0xAE00, buf[0]);
+	if (ret)
+		return ret;
+
+	/*  cfoe_NS_2k_coeff1_23_16 */
+	ret = af9005_write_ofdm_register(d, 0xAE01, buf[1]);
+	if (ret)
+		return ret;
+
+	/*  cfoe_NS_2k_coeff1_15_8 */
+	ret = af9005_write_ofdm_register(d, 0xAE02, buf[2]);
+	if (ret)
+		return ret;
+
+	/*  cfoe_NS_2k_coeff1_7_0 */
+	ret = af9005_write_ofdm_register(d, 0xAE03, buf[3]);
+	if (ret)
+		return ret;
+
+	/*
+	 *  write NS_coeff2_2k
+	 */
+
+	temp0 = (u8) ((NS_coeff2_2k & 0x0000003F));
+	temp1 = (u8) ((NS_coeff2_2k & 0x00003FC0) >> 6);
+	temp2 = (u8) ((NS_coeff2_2k & 0x003FC000) >> 14);
+	temp3 = (u8) ((NS_coeff2_2k & 0x01C00000) >> 22);
+
+	/*  big endian to make 8051 happy */
+	buf[0] = temp3;
+	buf[1] = temp2;
+	buf[2] = temp1;
+	buf[3] = temp0;
+
+	ret = af9005_write_ofdm_register(d, 0xAE04, buf[0]);
+	if (ret)
+		return ret;
+
+	ret = af9005_write_ofdm_register(d, 0xAE05, buf[1]);
+	if (ret)
+		return ret;
+
+	ret = af9005_write_ofdm_register(d, 0xAE06, buf[2]);
+	if (ret)
+		return ret;
+
+	ret = af9005_write_ofdm_register(d, 0xAE07, buf[3]);
+	if (ret)
+		return ret;
+
+	/*
+	 *  write NS_coeff1_8191Nu
+	 */
+
+	temp0 = (u8) ((NS_coeff1_8191Nu & 0x000000FF));
+	temp1 = (u8) ((NS_coeff1_8191Nu & 0x0000FF00) >> 8);
+	temp2 = (u8) ((NS_coeff1_8191Nu & 0x00FFC000) >> 16);
+	temp3 = (u8) ((NS_coeff1_8191Nu & 0x03000000) >> 24);
+
+	/*  big endian to make 8051 happy */
+	buf[0] = temp3;
+	buf[1] = temp2;
+	buf[2] = temp1;
+	buf[3] = temp0;
+
+	ret = af9005_write_ofdm_register(d, 0xAE08, buf[0]);
+	if (ret)
+		return ret;
+
+	ret = af9005_write_ofdm_register(d, 0xAE09, buf[1]);
+	if (ret)
+		return ret;
+
+	ret = af9005_write_ofdm_register(d, 0xAE0A, buf[2]);
+	if (ret)
+		return ret;
+
+	ret = af9005_write_ofdm_register(d, 0xAE0B, buf[3]);
+	if (ret)
+		return ret;
+
+	/*
+	 *  write NS_coeff1_8192Nu
+	 */
+
+	temp0 = (u8) (NS_coeff1_8192Nu & 0x000000FF);
+	temp1 = (u8) ((NS_coeff1_8192Nu & 0x0000FF00) >> 8);
+	temp2 = (u8) ((NS_coeff1_8192Nu & 0x00FFC000) >> 16);
+	temp3 = (u8) ((NS_coeff1_8192Nu & 0x03000000) >> 24);
+
+	/*  big endian to make 8051 happy */
+	buf[0] = temp3;
+	buf[1] = temp2;
+	buf[2] = temp1;
+	buf[3] = temp0;
+
+	ret = af9005_write_ofdm_register(d, 0xAE0C, buf[0]);
+	if (ret)
+		return ret;
+
+	ret = af9005_write_ofdm_register(d, 0xAE0D, buf[1]);
+	if (ret)
+		return ret;
+
+	ret = af9005_write_ofdm_register(d, 0xAE0E, buf[2]);
+	if (ret)
+		return ret;
+
+	ret = af9005_write_ofdm_register(d, 0xAE0F, buf[3]);
+	if (ret)
+		return ret;
+
+	/*
+	 *  write NS_coeff1_8193Nu
+	 */
+
+	temp0 = (u8) ((NS_coeff1_8193Nu & 0x000000FF));
+	temp1 = (u8) ((NS_coeff1_8193Nu & 0x0000FF00) >> 8);
+	temp2 = (u8) ((NS_coeff1_8193Nu & 0x00FFC000) >> 16);
+	temp3 = (u8) ((NS_coeff1_8193Nu & 0x03000000) >> 24);
+
+	/*  big endian to make 8051 happy */
+	buf[0] = temp3;
+	buf[1] = temp2;
+	buf[2] = temp1;
+	buf[3] = temp0;
+
+	ret = af9005_write_ofdm_register(d, 0xAE10, buf[0]);
+	if (ret)
+		return ret;
+
+	ret = af9005_write_ofdm_register(d, 0xAE11, buf[1]);
+	if (ret)
+		return ret;
+
+	ret = af9005_write_ofdm_register(d, 0xAE12, buf[2]);
+	if (ret)
+		return ret;
+
+	ret = af9005_write_ofdm_register(d, 0xAE13, buf[3]);
+	if (ret)
+		return ret;
+
+	/*
+	 *  write NS_coeff2_8k
+	 */
+
+	temp0 = (u8) ((NS_coeff2_8k & 0x0000003F));
+	temp1 = (u8) ((NS_coeff2_8k & 0x00003FC0) >> 6);
+	temp2 = (u8) ((NS_coeff2_8k & 0x003FC000) >> 14);
+	temp3 = (u8) ((NS_coeff2_8k & 0x01C00000) >> 22);
+
+	/*  big endian to make 8051 happy */
+	buf[0] = temp3;
+	buf[1] = temp2;
+	buf[2] = temp1;
+	buf[3] = temp0;
+
+	ret = af9005_write_ofdm_register(d, 0xAE14, buf[0]);
+	if (ret)
+		return ret;
+
+	ret = af9005_write_ofdm_register(d, 0xAE15, buf[1]);
+	if (ret)
+		return ret;
+
+	ret = af9005_write_ofdm_register(d, 0xAE16, buf[2]);
+	if (ret)
+		return ret;
+
+	ret = af9005_write_ofdm_register(d, 0xAE17, buf[3]);
+	return ret;
+
+}
+
+static int af9005_fe_select_bw(struct dvb_usb_device *d, fe_bandwidth_t bw)
+{
+	u8 temp;
+	switch (bw) {
+	case BANDWIDTH_6_MHZ:
+		temp = 0;
+		break;
+	case BANDWIDTH_7_MHZ:
+		temp = 1;
+		break;
+	case BANDWIDTH_8_MHZ:
+		temp = 2;
+		break;
+	default:
+		err("Invalid bandwith %d.", bw);
+		return -EINVAL;
+	}
+	return af9005_write_register_bits(d, xd_g_reg_bw, reg_bw_pos,
+					  reg_bw_len, temp);
+}
+
+static int af9005_fe_power(struct dvb_frontend *fe, int on)
+{
+	struct af9005_fe_state *state = fe->demodulator_priv;
+	u8 temp = on;
+	int ret;
+	deb_info("power %s tuner\n", on ? "on" : "off");
+	ret = af9005_send_command(state->d, 0x03, &temp, 1, NULL, 0);
+	return ret;
+}
+
+static struct mt2060_config af9005_mt2060_config = {
+	0xC0
+};
+
+static struct qt1010_config af9005_qt1010_config = {
+	0xC4
+};
+
+static int af9005_fe_init(struct dvb_frontend *fe)
+{
+	struct af9005_fe_state *state = fe->demodulator_priv;
+	struct dvb_usb_adapter *adap = fe->dvb->priv;
+	int ret, i, scriptlen;
+	u8 temp, temp0 = 0, temp1 = 0, temp2 = 0;
+	u8 buf[2];
+	u16 if1;
+
+	deb_info("in af9005_fe_init\n");
+
+	/* reset */
+	deb_info("reset\n");
+	if ((ret =
+	     af9005_write_register_bits(state->d, xd_I2C_reg_ofdm_rst_en,
+					4, 1, 0x01)))
+		return ret;
+	if ((ret = af9005_write_ofdm_register(state->d, APO_REG_RESET, 0)))
+		return ret;
+	/* clear ofdm reset */
+	deb_info("clear ofdm reset\n");
+	for (i = 0; i < 150; i++) {
+		if ((ret =
+		     af9005_read_ofdm_register(state->d,
+					       xd_I2C_reg_ofdm_rst, &temp)))
+			return ret;
+		if (temp & (regmask[reg_ofdm_rst_len - 1] << reg_ofdm_rst_pos))
+			break;
+		msleep(10);
+	}
+	if (i == 150)
+		return -ETIMEDOUT;
+
+	/*FIXME in the dump
+	   write B200 A9
+	   write xd_g_reg_ofsm_clk 7
+	   read eepr c6 (2)
+	   read eepr c7 (2)
+	   misc ctrl 3 -> 1
+	   read eepr ca (6)
+	   write xd_g_reg_ofsm_clk 0
+	   write B200 a1
+	 */
+	ret = af9005_write_ofdm_register(state->d, 0xb200, 0xa9);
+	if (ret)
+		return ret;
+	ret = af9005_write_ofdm_register(state->d, xd_g_reg_ofsm_clk, 0x07);
+	if (ret)
+		return ret;
+	temp = 0x01;
+	ret = af9005_send_command(state->d, 0x03, &temp, 1, NULL, 0);
+	if (ret)
+		return ret;
+	ret = af9005_write_ofdm_register(state->d, xd_g_reg_ofsm_clk, 0x00);
+	if (ret)
+		return ret;
+	ret = af9005_write_ofdm_register(state->d, 0xb200, 0xa1);
+	if (ret)
+		return ret;
+
+	temp = regmask[reg_ofdm_rst_len - 1] << reg_ofdm_rst_pos;
+	if ((ret =
+	     af9005_write_register_bits(state->d, xd_I2C_reg_ofdm_rst,
+					reg_ofdm_rst_pos, reg_ofdm_rst_len, 1)))
+		return ret;
+	if ((ret =
+	     af9005_write_register_bits(state->d, xd_I2C_reg_ofdm_rst,
+					reg_ofdm_rst_pos, reg_ofdm_rst_len, 0)))
+		return ret;
+
+	if (ret)
+		return ret;
+	/* don't know what register aefc is, but this is what the windows driver does */
+	ret = af9005_write_ofdm_register(state->d, 0xaefc, 0);
+	if (ret)
+		return ret;
+
+	/* set stand alone chip */
+	deb_info("set stand alone chip\n");
+	if ((ret =
+	     af9005_write_register_bits(state->d, xd_p_reg_dca_stand_alone,
+					reg_dca_stand_alone_pos,
+					reg_dca_stand_alone_len, 1)))
+		return ret;
+
+	/* set dca upper & lower chip */
+	deb_info("set dca upper & lower chip\n");
+	if ((ret =
+	     af9005_write_register_bits(state->d, xd_p_reg_dca_upper_chip,
+					reg_dca_upper_chip_pos,
+					reg_dca_upper_chip_len, 0)))
+		return ret;
+	if ((ret =
+	     af9005_write_register_bits(state->d, xd_p_reg_dca_lower_chip,
+					reg_dca_lower_chip_pos,
+					reg_dca_lower_chip_len, 0)))
+		return ret;
+
+	/* set 2wire master clock to 0x14 (for 60KHz) */
+	deb_info("set 2wire master clock to 0x14 (for 60KHz)\n");
+	if ((ret =
+	     af9005_write_ofdm_register(state->d, xd_I2C_i2c_m_period, 0x14)))
+		return ret;
+
+	/* clear dca enable chip */
+	deb_info("clear dca enable chip\n");
+	if ((ret =
+	     af9005_write_register_bits(state->d, xd_p_reg_dca_en,
+					reg_dca_en_pos, reg_dca_en_len, 0)))
+		return ret;
+	/* FIXME these are register bits, but I don't know which ones */
+	ret = af9005_write_ofdm_register(state->d, 0xa16c, 1);
+	if (ret)
+		return ret;
+	ret = af9005_write_ofdm_register(state->d, 0xa3c1, 0);
+	if (ret)
+		return ret;
+
+	/* init other parameters: program cfoe and select bandwith */
+	deb_info("program cfoe\n");
+	if ((ret = af9005_fe_program_cfoe(state->d, BANDWIDTH_6_MHZ)))
+		return ret;
+	/* set read-update bit for constellation */
+	deb_info("set read-update bit for constellation\n");
+	if ((ret =
+	     af9005_write_register_bits(state->d, xd_p_reg_feq_read_update,
+					reg_feq_read_update_pos,
+					reg_feq_read_update_len, 1)))
+		return ret;
+
+	/* sample code has a set MPEG TS code here
+	   but sniffing reveals that it doesn't do it */
+
+	/* set read-update bit to 1 for DCA constellation */
+	deb_info("set read-update bit 1 for DCA constellation\n");
+	if ((ret =
+	     af9005_write_register_bits(state->d, xd_p_reg_dca_read_update,
+					reg_dca_read_update_pos,
+					reg_dca_read_update_len, 1)))
+		return ret;
+
+	/* enable fec monitor */
+	deb_info("enable fec monitor\n");
+	if ((ret =
+	     af9005_write_register_bits(state->d, xd_p_fec_vtb_rsd_mon_en,
+					fec_vtb_rsd_mon_en_pos,
+					fec_vtb_rsd_mon_en_len, 1)))
+		return ret;
+
+	/* FIXME should be register bits, I don't know which ones */
+	ret = af9005_write_ofdm_register(state->d, 0xa601, 0);
+
+	/* set api_retrain_never_freeze */
+	deb_info("set api_retrain_never_freeze\n");
+	if ((ret = af9005_write_ofdm_register(state->d, 0xaefb, 0x01)))
+		return ret;
+
+	/* load init script */
+	deb_info("load init script\n");
+	scriptlen = sizeof(script) / sizeof(RegDesc);
+	for (i = 0; i < scriptlen; i++) {
+		if ((ret =
+		     af9005_write_register_bits(state->d, script[i].reg,
+						script[i].pos,
+						script[i].len, script[i].val)))
+			return ret;
+		/* save 3 bytes of original fcw */
+		if (script[i].reg == 0xae18)
+			temp2 = script[i].val;
+		if (script[i].reg == 0xae19)
+			temp1 = script[i].val;
+		if (script[i].reg == 0xae1a)
+			temp0 = script[i].val;
+
+		/* save original unplug threshold */
+		if (script[i].reg == xd_p_reg_unplug_th)
+			state->original_if_unplug_th = script[i].val;
+		if (script[i].reg == xd_p_reg_unplug_rf_gain_th)
+			state->original_rf_unplug_th = script[i].val;
+		if (script[i].reg == xd_p_reg_unplug_dtop_if_gain_th)
+			state->original_dtop_if_unplug_th = script[i].val;
+		if (script[i].reg == xd_p_reg_unplug_dtop_rf_gain_th)
+			state->original_dtop_rf_unplug_th = script[i].val;
+
+	}
+	state->original_fcw =
+	    ((u32) temp2 << 16) + ((u32) temp1 << 8) + (u32) temp0;
+
+
+	/* save original TOPs */
+	deb_info("save original TOPs\n");
+
+	/*  RF TOP */
+	ret =
+	    af9005_read_word_agc(state->d,
+				 xd_p_reg_aagc_rf_top_numerator_9_8,
+				 xd_p_reg_aagc_rf_top_numerator_7_0, 0, 2,
+				 &state->original_rf_top);
+	if (ret)
+		return ret;
+
+	/*  IF TOP */
+	ret =
+	    af9005_read_word_agc(state->d,
+				 xd_p_reg_aagc_if_top_numerator_9_8,
+				 xd_p_reg_aagc_if_top_numerator_7_0, 0, 2,
+				 &state->original_if_top);
+	if (ret)
+		return ret;
+
+	/*  ACI 0 IF TOP */
+	ret =
+	    af9005_read_word_agc(state->d, 0xA60E, 0xA60A, 4, 2,
+				 &state->original_aci0_if_top);
+	if (ret)
+		return ret;
+
+	/*  ACI 1 IF TOP */
+	ret =
+	    af9005_read_word_agc(state->d, 0xA60E, 0xA60B, 6, 2,
+				 &state->original_aci1_if_top);
+	if (ret)
+		return ret;
+
+	/* attach tuner and init */
+	if (state->tuner == NULL) {
+		/* read tuner and board id from eeprom */
+		ret = af9005_read_eeprom(adap->dev, 0xc6, buf, 2);
+		if (ret) {
+			err("Impossible to read EEPROM\n");
+			return ret;
+		}
+		deb_info("Tuner id %d, board id %d\n", buf[0], buf[1]);
+		switch (buf[0]) {
+		case 2:	/* MT2060 */
+			/* read if1 from eeprom */
+			ret = af9005_read_eeprom(adap->dev, 0xc8, buf, 2);
+			if (ret) {
+				err("Impossible to read EEPROM\n");
+				return ret;
+			}
+			if1 = (u16) (buf[0] << 8) + buf[1];
+			state->tuner =
+			    dvb_attach(mt2060_attach, fe, &adap->dev->i2c_adap,
+				       &af9005_mt2060_config, if1);
+			if (state->tuner == NULL) {
+				deb_info("MT2060 attach failed\n");
+				return -ENODEV;
+			}
+			break;
+		case 3:	/* QT1010 */
+		case 9:	/* QT1010B */
+			state->tuner =
+			    dvb_attach(qt1010_attach, fe, &adap->dev->i2c_adap,
+				       &af9005_qt1010_config);
+			if (state->tuner == NULL) {
+				deb_info("QT1010 attach failed\n");
+				return -ENODEV;
+			}
+			break;
+		default:
+			err("Unsupported tuner type %d", buf[0]);
+			return -ENODEV;
+		}
+		ret = state->tuner->ops.tuner_ops.init(state->tuner);
+		if (ret)
+			return ret;
+	}
+
+	deb_info("profit!\n");
+	return 0;
+}
+
+static int af9005_fe_sleep(struct dvb_frontend *fe)
+{
+	return af9005_fe_power(fe, 0);
+}
+
+static int af9005_ts_bus_ctrl(struct dvb_frontend *fe, int acquire)
+{
+	struct af9005_fe_state *state = fe->demodulator_priv;
+
+	if (acquire) {
+		state->opened++;
+	} else {
+
+		state->opened--;
+		if (!state->opened)
+			af9005_led_control(state->d, 0);
+	}
+	return 0;
+}
+
+static int af9005_fe_set_frontend(struct dvb_frontend *fe,
+				  struct dvb_frontend_parameters *fep)
+{
+	struct af9005_fe_state *state = fe->demodulator_priv;
+	int ret;
+	u8 temp, temp0, temp1, temp2;
+
+	deb_info("af9005_fe_set_frontend freq %d bw %d\n", fep->frequency,
+		 fep->u.ofdm.bandwidth);
+	if (state->tuner == NULL) {
+		err("Tuner not attached");
+		return -ENODEV;
+	}
+
+	deb_info("turn off led\n");
+	/* not in the log */
+	ret = af9005_led_control(state->d, 0);
+	if (ret)
+		return ret;
+	/* not sure about the bits */
+	ret = af9005_write_register_bits(state->d, XD_MP2IF_MISC, 2, 1, 0);
+	if (ret)
+		return ret;
+
+	/* set FCW to default value */
+	deb_info("set FCW to default value\n");
+	temp0 = (u8) (state->original_fcw & 0x000000ff);
+	temp1 = (u8) ((state->original_fcw & 0x0000ff00) >> 8);
+	temp2 = (u8) ((state->original_fcw & 0x00ff0000) >> 16);
+	ret = af9005_write_ofdm_register(state->d, 0xae1a, temp0);
+	if (ret)
+		return ret;
+	ret = af9005_write_ofdm_register(state->d, 0xae19, temp1);
+	if (ret)
+		return ret;
+	ret = af9005_write_ofdm_register(state->d, 0xae18, temp2);
+	if (ret)
+		return ret;
+
+	/* restore original TOPs */
+	deb_info("restore original TOPs\n");
+	ret =
+	    af9005_write_word_agc(state->d,
+				  xd_p_reg_aagc_rf_top_numerator_9_8,
+				  xd_p_reg_aagc_rf_top_numerator_7_0, 0, 2,
+				  state->original_rf_top);
+	if (ret)
+		return ret;
+	ret =
+	    af9005_write_word_agc(state->d,
+				  xd_p_reg_aagc_if_top_numerator_9_8,
+				  xd_p_reg_aagc_if_top_numerator_7_0, 0, 2,
+				  state->original_if_top);
+	if (ret)
+		return ret;
+	ret =
+	    af9005_write_word_agc(state->d, 0xA60E, 0xA60A, 4, 2,
+				  state->original_aci0_if_top);
+	if (ret)
+		return ret;
+	ret =
+	    af9005_write_word_agc(state->d, 0xA60E, 0xA60B, 6, 2,
+				  state->original_aci1_if_top);
+	if (ret)
+		return ret;
+
+	/* select bandwith */
+	deb_info("select bandwidth");
+	ret = af9005_fe_select_bw(state->d, fep->u.ofdm.bandwidth);
+	if (ret)
+		return ret;
+	ret = af9005_fe_program_cfoe(state->d, fep->u.ofdm.bandwidth);
+	if (ret)
+		return ret;
+
+	/* clear easy mode flag */
+	deb_info("clear easy mode flag\n");
+	ret = af9005_write_ofdm_register(state->d, 0xaefd, 0);
+	if (ret)
+		return ret;
+
+	/* set unplug threshold to original value */
+	deb_info("set unplug threshold to original value\n");
+	ret =
+	    af9005_write_ofdm_register(state->d, xd_p_reg_unplug_th,
+				       state->original_if_unplug_th);
+	if (ret)
+		return ret;
+	/* set tuner */
+	deb_info("set tuner\n");
+	ret = state->tuner->ops.tuner_ops.set_params(state->tuner, fep);
+	if (ret)
+		return ret;
+
+	/* trigger ofsm */
+	deb_info("trigger ofsm\n");
+	temp = 0;
+	ret = af9005_write_tuner_registers(state->d, 0xffff, &temp, 1);
+	if (ret)
+		return ret;
+
+	/* clear retrain and freeze flag */
+	deb_info("clear retrain and freeze flag\n");
+	ret =
+	    af9005_write_register_bits(state->d,
+				       xd_p_reg_api_retrain_request,
+				       reg_api_retrain_request_pos, 2, 0);
+	if (ret)
+		return ret;
+
+	/* reset pre viterbi and post viterbi registers and statistics */
+	af9005_reset_pre_viterbi(fe);
+	af9005_reset_post_viterbi(fe);
+	state->pre_vit_error_count = 0;
+	state->pre_vit_bit_count = 0;
+	state->ber = 0;
+	state->post_vit_error_count = 0;
+	/* state->unc = 0; commented out since it should be ever increasing */
+	state->abort_count = 0;
+
+	state->next_status_check = jiffies;
+	state->strong = -1;
+
+	return 0;
+}
+
+static int af9005_fe_get_frontend(struct dvb_frontend *fe,
+				  struct dvb_frontend_parameters *fep)
+{
+	struct af9005_fe_state *state = fe->demodulator_priv;
+	int ret;
+	u8 temp;
+
+	/* mode */
+	ret =
+	    af9005_read_register_bits(state->d, xd_g_reg_tpsd_const,
+				      reg_tpsd_const_pos, reg_tpsd_const_len,
+				      &temp);
+	if (ret)
+		return ret;
+	deb_info("===== fe_get_frontend ==============\n");
+	deb_info("CONSTELLATION ");
+	switch (temp) {
+	case 0:
+		fep->u.ofdm.constellation = QPSK;
+		deb_info("QPSK\n");
+		break;
+	case 1:
+		fep->u.ofdm.constellation = QAM_16;
+		deb_info("QAM_16\n");
+		break;
+	case 2:
+		fep->u.ofdm.constellation = QAM_64;
+		deb_info("QAM_64\n");
+		break;
+	}
+
+	/* tps hierarchy and alpha value */
+	ret =
+	    af9005_read_register_bits(state->d, xd_g_reg_tpsd_hier,
+				      reg_tpsd_hier_pos, reg_tpsd_hier_len,
+				      &temp);
+	if (ret)
+		return ret;
+	deb_info("HIERARCHY ");
+	switch (temp) {
+	case 0:
+		fep->u.ofdm.hierarchy_information = HIERARCHY_NONE;
+		deb_info("NONE\n");
+		break;
+	case 1:
+		fep->u.ofdm.hierarchy_information = HIERARCHY_1;
+		deb_info("1\n");
+		break;
+	case 2:
+		fep->u.ofdm.hierarchy_information = HIERARCHY_2;
+		deb_info("2\n");
+		break;
+	case 3:
+		fep->u.ofdm.hierarchy_information = HIERARCHY_4;
+		deb_info("4\n");
+		break;
+	}
+
+	/*  high/low priority     */
+	ret =
+	    af9005_read_register_bits(state->d, xd_g_reg_dec_pri,
+				      reg_dec_pri_pos, reg_dec_pri_len, &temp);
+	if (ret)
+		return ret;
+	/* if temp is set = high priority */
+	deb_info("PRIORITY %s\n", temp ? "high" : "low");
+
+	/* high coderate */
+	ret =
+	    af9005_read_register_bits(state->d, xd_g_reg_tpsd_hpcr,
+				      reg_tpsd_hpcr_pos, reg_tpsd_hpcr_len,
+				      &temp);
+	if (ret)
+		return ret;
+	deb_info("CODERATE HP ");
+	switch (temp) {
+	case 0:
+		fep->u.ofdm.code_rate_HP = FEC_1_2;
+		deb_info("FEC_1_2\n");
+		break;
+	case 1:
+		fep->u.ofdm.code_rate_HP = FEC_2_3;
+		deb_info("FEC_2_3\n");
+		break;
+	case 2:
+		fep->u.ofdm.code_rate_HP = FEC_3_4;
+		deb_info("FEC_3_4\n");
+		break;
+	case 3:
+		fep->u.ofdm.code_rate_HP = FEC_5_6;
+		deb_info("FEC_5_6\n");
+		break;
+	case 4:
+		fep->u.ofdm.code_rate_HP = FEC_7_8;
+		deb_info("FEC_7_8\n");
+		break;
+	}
+
+	/* low coderate */
+	ret =
+	    af9005_read_register_bits(state->d, xd_g_reg_tpsd_lpcr,
+				      reg_tpsd_lpcr_pos, reg_tpsd_lpcr_len,
+				      &temp);
+	if (ret)
+		return ret;
+	deb_info("CODERATE LP ");
+	switch (temp) {
+	case 0:
+		fep->u.ofdm.code_rate_LP = FEC_1_2;
+		deb_info("FEC_1_2\n");
+		break;
+	case 1:
+		fep->u.ofdm.code_rate_LP = FEC_2_3;
+		deb_info("FEC_2_3\n");
+		break;
+	case 2:
+		fep->u.ofdm.code_rate_LP = FEC_3_4;
+		deb_info("FEC_3_4\n");
+		break;
+	case 3:
+		fep->u.ofdm.code_rate_LP = FEC_5_6;
+		deb_info("FEC_5_6\n");
+		break;
+	case 4:
+		fep->u.ofdm.code_rate_LP = FEC_7_8;
+		deb_info("FEC_7_8\n");
+		break;
+	}
+
+	/* guard interval */
+	ret =
+	    af9005_read_register_bits(state->d, xd_g_reg_tpsd_gi,
+				      reg_tpsd_gi_pos, reg_tpsd_gi_len, &temp);
+	if (ret)
+		return ret;
+	deb_info("GUARD INTERVAL ");
+	switch (temp) {
+	case 0:
+		fep->u.ofdm.guard_interval = GUARD_INTERVAL_1_32;
+		deb_info("1_32\n");
+		break;
+	case 1:
+		fep->u.ofdm.guard_interval = GUARD_INTERVAL_1_16;
+		deb_info("1_16\n");
+		break;
+	case 2:
+		fep->u.ofdm.guard_interval = GUARD_INTERVAL_1_8;
+		deb_info("1_8\n");
+		break;
+	case 3:
+		fep->u.ofdm.guard_interval = GUARD_INTERVAL_1_4;
+		deb_info("1_4\n");
+		break;
+	}
+
+	/* fft */
+	ret =
+	    af9005_read_register_bits(state->d, xd_g_reg_tpsd_txmod,
+				      reg_tpsd_txmod_pos, reg_tpsd_txmod_len,
+				      &temp);
+	if (ret)
+		return ret;
+	deb_info("TRANSMISSION MODE ");
+	switch (temp) {
+	case 0:
+		fep->u.ofdm.transmission_mode = TRANSMISSION_MODE_2K;
+		deb_info("2K\n");
+		break;
+	case 1:
+		fep->u.ofdm.transmission_mode = TRANSMISSION_MODE_8K;
+		deb_info("8K\n");
+		break;
+	}
+
+	/* bandwidth      */
+	ret =
+	    af9005_read_register_bits(state->d, xd_g_reg_bw, reg_bw_pos,
+				      reg_bw_len, &temp);
+	deb_info("BANDWIDTH ");
+	switch (temp) {
+	case 0:
+		fep->u.ofdm.bandwidth = BANDWIDTH_6_MHZ;
+		deb_info("6\n");
+		break;
+	case 1:
+		fep->u.ofdm.bandwidth = BANDWIDTH_7_MHZ;
+		deb_info("7\n");
+		break;
+	case 2:
+		fep->u.ofdm.bandwidth = BANDWIDTH_8_MHZ;
+		deb_info("8\n");
+		break;
+	}
+	return 0;
+}
+
+static void af9005_fe_release(struct dvb_frontend *fe)
+{
+	struct af9005_fe_state *state =
+	    (struct af9005_fe_state *)fe->demodulator_priv;
+	if (state->tuner != NULL && state->tuner->ops.tuner_ops.release != NULL) {
+		state->tuner->ops.tuner_ops.release(state->tuner);
+#ifdef CONFIG_DVB_CORE_ATTACH
+		symbol_put_addr(state->tuner->ops.tuner_ops.release);
+#endif
+	}
+	kfree(state);
+}
+
+static struct dvb_frontend_ops af9005_fe_ops;
+
+struct dvb_frontend *af9005_fe_attach(struct dvb_usb_device *d)
+{
+	struct af9005_fe_state *state = NULL;
+
+	/* allocate memory for the internal state */
+	state = kzalloc(sizeof(struct af9005_fe_state), GFP_KERNEL);
+	if (state == NULL)
+		goto error;
+
+	deb_info("attaching frontend af9005\n");
+
+	state->d = d;
+	state->tuner = NULL;
+	state->opened = 0;
+
+	memcpy(&state->frontend.ops, &af9005_fe_ops,
+	       sizeof(struct dvb_frontend_ops));
+	state->frontend.demodulator_priv = state;
+
+	return &state->frontend;
+      error:
+	return NULL;
+}
+
+static struct dvb_frontend_ops af9005_fe_ops = {
+	.info = {
+		 .name = "AF9005 USB DVB-T",
+		 .type = FE_OFDM,
+		 .frequency_min = 44250000,
+		 .frequency_max = 867250000,
+		 .frequency_stepsize = 250000,
+		 .caps = FE_CAN_INVERSION_AUTO |
+		 FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 | FE_CAN_FEC_3_4 |
+		 FE_CAN_FEC_5_6 | FE_CAN_FEC_7_8 | FE_CAN_FEC_AUTO |
+		 FE_CAN_QPSK | FE_CAN_QAM_16 | FE_CAN_QAM_64 |
+		 FE_CAN_QAM_AUTO | FE_CAN_TRANSMISSION_MODE_AUTO |
+		 FE_CAN_GUARD_INTERVAL_AUTO | FE_CAN_RECOVER |
+		 FE_CAN_HIERARCHY_AUTO,
+		 },
+
+	.release = af9005_fe_release,
+
+	.init = af9005_fe_init,
+	.sleep = af9005_fe_sleep,
+	.ts_bus_ctrl = af9005_ts_bus_ctrl,
+
+	.set_frontend = af9005_fe_set_frontend,
+	.get_frontend = af9005_fe_get_frontend,
+
+	.read_status = af9005_fe_read_status,
+	.read_ber = af9005_fe_read_ber,
+	.read_signal_strength = af9005_fe_read_signal_strength,
+	.read_snr = af9005_fe_read_snr,
+	.read_ucblocks = af9005_fe_read_unc_blocks,
+};
diff --git a/drivers/media/dvb/dvb-usb/af9005-remote.c b/drivers/media/dvb/dvb-usb/af9005-remote.c
new file mode 100644
index 0000000..ff00c0e
--- /dev/null
+++ b/drivers/media/dvb/dvb-usb/af9005-remote.c
@@ -0,0 +1,157 @@
+/* DVB USB compliant Linux driver for the Afatech 9005
+ * USB1.1 DVB-T receiver.
+ *
+ * Standard remote decode function
+ *
+ * Copyright (C) 2007 Luca Olivetti (luca@ventoso.org)
+ *
+ * Thanks to Afatech who kindly provided information.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * see Documentation/dvb/REDME.dvb-usb for more information
+ */
+#include "af9005.h"
+/* debug */
+int dvb_usb_af9005_remote_debug;
+module_param_named(debug, dvb_usb_af9005_remote_debug, int, 0644);
+MODULE_PARM_DESC(debug,
+		 "enable (1) or disable (0) debug messages."
+		 DVB_USB_DEBUG_STATUS);
+
+#define deb_decode(args...)   dprintk(dvb_usb_af9005_remote_debug,0x01,args)
+
+struct dvb_usb_rc_key af9005_rc_keys[] = {
+
+	{0x01, 0xb7, KEY_POWER},
+	{0x01, 0xa7, KEY_VOLUMEUP},
+	{0x01, 0x87, KEY_CHANNELUP},
+	{0x01, 0x7f, KEY_MUTE},
+	{0x01, 0xbf, KEY_VOLUMEDOWN},
+	{0x01, 0x3f, KEY_CHANNELDOWN},
+	{0x01, 0xdf, KEY_1},
+	{0x01, 0x5f, KEY_2},
+	{0x01, 0x9f, KEY_3},
+	{0x01, 0x1f, KEY_4},
+	{0x01, 0xef, KEY_5},
+	{0x01, 0x6f, KEY_6},
+	{0x01, 0xaf, KEY_7},
+	{0x01, 0x27, KEY_8},
+	{0x01, 0x07, KEY_9},
+	{0x01, 0xcf, KEY_ZOOM},
+	{0x01, 0x4f, KEY_0},
+	{0x01, 0x8f, KEY_GOTO},	/* marked jump on the remote */
+
+	{0x00, 0xbd, KEY_POWER},
+	{0x00, 0x7d, KEY_VOLUMEUP},
+	{0x00, 0xfd, KEY_CHANNELUP},
+	{0x00, 0x9d, KEY_MUTE},
+	{0x00, 0x5d, KEY_VOLUMEDOWN},
+	{0x00, 0xdd, KEY_CHANNELDOWN},
+	{0x00, 0xad, KEY_1},
+	{0x00, 0x6d, KEY_2},
+	{0x00, 0xed, KEY_3},
+	{0x00, 0x8d, KEY_4},
+	{0x00, 0x4d, KEY_5},
+	{0x00, 0xcd, KEY_6},
+	{0x00, 0xb5, KEY_7},
+	{0x00, 0x75, KEY_8},
+	{0x00, 0xf5, KEY_9},
+	{0x00, 0x95, KEY_ZOOM},
+	{0x00, 0x55, KEY_0},
+	{0x00, 0xd5, KEY_GOTO},	/* marked jump on the remote */
+};
+
+int af9005_rc_keys_size = ARRAY_SIZE(af9005_rc_keys);
+
+static int repeatable_keys[] = {
+	KEY_VOLUMEUP,
+	KEY_VOLUMEDOWN,
+	KEY_CHANNELUP,
+	KEY_CHANNELDOWN
+};
+
+int af9005_rc_decode(struct dvb_usb_device *d, u8 * data, int len, u32 * event,
+		     int *state)
+{
+	u16 mark, space;
+	u32 result;
+	u8 cust, dat, invdat;
+	int i;
+
+	if (len >= 6) {
+		mark = (u16) (data[0] << 8) + data[1];
+		space = (u16) (data[2] << 8) + data[3];
+		if (space * 3 < mark) {
+			for (i = 0; i < ARRAY_SIZE(repeatable_keys); i++) {
+				if (d->last_event == repeatable_keys[i]) {
+					*state = REMOTE_KEY_REPEAT;
+					*event = d->last_event;
+					deb_decode("repeat key, event %x\n",
+						   *event);
+					return 0;
+				}
+			}
+			deb_decode("repeated key ignored (non repeatable)\n");
+			return 0;
+		} else if (len >= 33 * 4) {	/*32 bits + start code */
+			result = 0;
+			for (i = 4; i < 4 + 32 * 4; i += 4) {
+				result <<= 1;
+				mark = (u16) (data[i] << 8) + data[i + 1];
+				mark >>= 1;
+				space = (u16) (data[i + 2] << 8) + data[i + 3];
+				space >>= 1;
+				if (mark * 2 > space)
+					result += 1;
+			}
+			deb_decode("key pressed, raw value %x\n", result);
+			if ((result & 0xff000000) != 0xfe000000) {
+				deb_decode
+				    ("doesn't start with 0xfe, ignored\n");
+				return 0;
+			}
+			cust = (result >> 16) & 0xff;
+			dat = (result >> 8) & 0xff;
+			invdat = (~result) & 0xff;
+			if (dat != invdat) {
+				deb_decode("code != inverted code\n");
+				return 0;
+			}
+			for (i = 0; i < af9005_rc_keys_size; i++) {
+				if (af9005_rc_keys[i].custom == cust
+				    && af9005_rc_keys[i].data == dat) {
+					*event = af9005_rc_keys[i].event;
+					*state = REMOTE_KEY_PRESSED;
+					deb_decode
+					    ("key pressed, event %x\n", *event);
+					return 0;
+				}
+			}
+			deb_decode("not found in table\n");
+		}
+	}
+	return 0;
+}
+
+EXPORT_SYMBOL(af9005_rc_keys);
+EXPORT_SYMBOL(af9005_rc_keys_size);
+EXPORT_SYMBOL(af9005_rc_decode);
+
+MODULE_AUTHOR("Luca Olivetti <luca@ventoso.org>");
+MODULE_DESCRIPTION
+    ("Standard remote control decoder for Afatech 9005 DVB-T USB1.1 stick");
+MODULE_VERSION("1.0");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/dvb/dvb-usb/af9005-script.h b/drivers/media/dvb/dvb-usb/af9005-script.h
new file mode 100644
index 0000000..6eeaae5
--- /dev/null
+++ b/drivers/media/dvb/dvb-usb/af9005-script.h
@@ -0,0 +1,203 @@
+/*
+File automatically generated by createinit.py using data
+extracted from AF05BDA.sys (windows driver):
+
+dd if=AF05BDA.sys of=initsequence bs=1 skip=88316 count=1110
+python createinit.py > af9005-script.h
+
+*/
+
+typedef struct {
+	u16 reg;
+	u8 pos;
+	u8 len;
+	u8 val;
+} RegDesc;
+
+RegDesc script[] = {
+	{0xa180, 0x0, 0x8, 0xa},
+	{0xa181, 0x0, 0x8, 0xd7},
+	{0xa182, 0x0, 0x8, 0xa3},
+	{0xa0a0, 0x0, 0x8, 0x0},
+	{0xa0a1, 0x0, 0x5, 0x0},
+	{0xa0a1, 0x5, 0x1, 0x1},
+	{0xa0c0, 0x0, 0x4, 0x1},
+	{0xa20e, 0x4, 0x4, 0xa},
+	{0xa20f, 0x0, 0x8, 0x40},
+	{0xa210, 0x0, 0x8, 0x8},
+	{0xa32a, 0x0, 0x4, 0xa},
+	{0xa32c, 0x0, 0x8, 0x20},
+	{0xa32b, 0x0, 0x8, 0x15},
+	{0xa1a0, 0x1, 0x1, 0x1},
+	{0xa000, 0x0, 0x1, 0x1},
+	{0xa000, 0x1, 0x1, 0x0},
+	{0xa001, 0x1, 0x1, 0x1},
+	{0xa001, 0x0, 0x1, 0x0},
+	{0xa001, 0x5, 0x1, 0x0},
+	{0xa00e, 0x0, 0x5, 0x10},
+	{0xa00f, 0x0, 0x3, 0x4},
+	{0xa00f, 0x3, 0x3, 0x5},
+	{0xa010, 0x0, 0x3, 0x4},
+	{0xa010, 0x3, 0x3, 0x5},
+	{0xa016, 0x4, 0x4, 0x3},
+	{0xa01f, 0x0, 0x6, 0xa},
+	{0xa020, 0x0, 0x6, 0xa},
+	{0xa2bc, 0x0, 0x1, 0x1},
+	{0xa2bc, 0x5, 0x1, 0x1},
+	{0xa015, 0x0, 0x8, 0x50},
+	{0xa016, 0x0, 0x1, 0x0},
+	{0xa02a, 0x0, 0x8, 0x50},
+	{0xa029, 0x0, 0x8, 0x4b},
+	{0xa614, 0x0, 0x8, 0x46},
+	{0xa002, 0x0, 0x5, 0x19},
+	{0xa003, 0x0, 0x5, 0x1a},
+	{0xa004, 0x0, 0x5, 0x19},
+	{0xa005, 0x0, 0x5, 0x1a},
+	{0xa008, 0x0, 0x8, 0x69},
+	{0xa009, 0x0, 0x2, 0x2},
+	{0xae1b, 0x0, 0x8, 0x69},
+	{0xae1c, 0x0, 0x8, 0x2},
+	{0xae1d, 0x0, 0x8, 0x2a},
+	{0xa022, 0x0, 0x8, 0xaa},
+	{0xa006, 0x0, 0x8, 0xc8},
+	{0xa007, 0x0, 0x2, 0x0},
+	{0xa00c, 0x0, 0x8, 0xba},
+	{0xa00d, 0x0, 0x2, 0x2},
+	{0xa608, 0x0, 0x8, 0xba},
+	{0xa60e, 0x0, 0x2, 0x2},
+	{0xa609, 0x0, 0x8, 0x80},
+	{0xa60e, 0x2, 0x2, 0x3},
+	{0xa00a, 0x0, 0x8, 0xb6},
+	{0xa00b, 0x0, 0x2, 0x0},
+	{0xa011, 0x0, 0x8, 0xb9},
+	{0xa012, 0x0, 0x2, 0x0},
+	{0xa013, 0x0, 0x8, 0xbd},
+	{0xa014, 0x0, 0x2, 0x2},
+	{0xa366, 0x0, 0x1, 0x1},
+	{0xa2bc, 0x3, 0x1, 0x0},
+	{0xa2bd, 0x0, 0x8, 0xa},
+	{0xa2be, 0x0, 0x8, 0x14},
+	{0xa2bf, 0x0, 0x8, 0x8},
+	{0xa60a, 0x0, 0x8, 0xbd},
+	{0xa60e, 0x4, 0x2, 0x2},
+	{0xa60b, 0x0, 0x8, 0x86},
+	{0xa60e, 0x6, 0x2, 0x3},
+	{0xa001, 0x2, 0x2, 0x1},
+	{0xa1c7, 0x0, 0x8, 0xf5},
+	{0xa03d, 0x0, 0x8, 0xb1},
+	{0xa616, 0x0, 0x8, 0xff},
+	{0xa617, 0x0, 0x8, 0xad},
+	{0xa618, 0x0, 0x8, 0xad},
+	{0xa61e, 0x3, 0x1, 0x1},
+	{0xae1a, 0x0, 0x8, 0x0},
+	{0xae19, 0x0, 0x8, 0xc8},
+	{0xae18, 0x0, 0x8, 0x61},
+	{0xa140, 0x0, 0x8, 0x0},
+	{0xa141, 0x0, 0x8, 0xc8},
+	{0xa142, 0x0, 0x7, 0x61},
+	{0xa023, 0x0, 0x8, 0xff},
+	{0xa021, 0x0, 0x8, 0xad},
+	{0xa026, 0x0, 0x1, 0x0},
+	{0xa024, 0x0, 0x8, 0xff},
+	{0xa025, 0x0, 0x8, 0xff},
+	{0xa1c8, 0x0, 0x8, 0xf},
+	{0xa2bc, 0x1, 0x1, 0x0},
+	{0xa60c, 0x0, 0x4, 0x5},
+	{0xa60c, 0x4, 0x4, 0x6},
+	{0xa60d, 0x0, 0x8, 0xa},
+	{0xa371, 0x0, 0x1, 0x1},
+	{0xa366, 0x1, 0x3, 0x7},
+	{0xa338, 0x0, 0x8, 0x10},
+	{0xa339, 0x0, 0x6, 0x7},
+	{0xa33a, 0x0, 0x6, 0x1f},
+	{0xa33b, 0x0, 0x8, 0xf6},
+	{0xa33c, 0x3, 0x5, 0x4},
+	{0xa33d, 0x4, 0x4, 0x0},
+	{0xa33d, 0x1, 0x1, 0x1},
+	{0xa33d, 0x2, 0x1, 0x1},
+	{0xa33d, 0x3, 0x1, 0x1},
+	{0xa16d, 0x0, 0x4, 0xf},
+	{0xa161, 0x0, 0x5, 0x5},
+	{0xa162, 0x0, 0x4, 0x5},
+	{0xa165, 0x0, 0x8, 0xff},
+	{0xa166, 0x0, 0x8, 0x9c},
+	{0xa2c3, 0x0, 0x4, 0x5},
+	{0xa61a, 0x0, 0x6, 0xf},
+	{0xb200, 0x0, 0x8, 0xa1},
+	{0xb201, 0x0, 0x8, 0x7},
+	{0xa093, 0x0, 0x1, 0x0},
+	{0xa093, 0x1, 0x5, 0xf},
+	{0xa094, 0x0, 0x8, 0xff},
+	{0xa095, 0x0, 0x8, 0xf},
+	{0xa080, 0x2, 0x5, 0x3},
+	{0xa081, 0x0, 0x4, 0x0},
+	{0xa081, 0x4, 0x4, 0x9},
+	{0xa082, 0x0, 0x5, 0x1f},
+	{0xa08d, 0x0, 0x8, 0x1},
+	{0xa083, 0x0, 0x8, 0x32},
+	{0xa084, 0x0, 0x1, 0x0},
+	{0xa08e, 0x0, 0x8, 0x3},
+	{0xa085, 0x0, 0x8, 0x32},
+	{0xa086, 0x0, 0x3, 0x0},
+	{0xa087, 0x0, 0x8, 0x6e},
+	{0xa088, 0x0, 0x5, 0x15},
+	{0xa089, 0x0, 0x8, 0x0},
+	{0xa08a, 0x0, 0x5, 0x19},
+	{0xa08b, 0x0, 0x8, 0x92},
+	{0xa08c, 0x0, 0x5, 0x1c},
+	{0xa120, 0x0, 0x8, 0x0},
+	{0xa121, 0x0, 0x5, 0x10},
+	{0xa122, 0x0, 0x8, 0x0},
+	{0xa123, 0x0, 0x7, 0x40},
+	{0xa123, 0x7, 0x1, 0x0},
+	{0xa124, 0x0, 0x8, 0x13},
+	{0xa125, 0x0, 0x7, 0x10},
+	{0xa1c0, 0x0, 0x8, 0x0},
+	{0xa1c1, 0x0, 0x5, 0x4},
+	{0xa1c2, 0x0, 0x8, 0x0},
+	{0xa1c3, 0x0, 0x5, 0x10},
+	{0xa1c3, 0x5, 0x3, 0x0},
+	{0xa1c4, 0x0, 0x6, 0x0},
+	{0xa1c5, 0x0, 0x7, 0x10},
+	{0xa100, 0x0, 0x8, 0x0},
+	{0xa101, 0x0, 0x5, 0x10},
+	{0xa102, 0x0, 0x8, 0x0},
+	{0xa103, 0x0, 0x7, 0x40},
+	{0xa103, 0x7, 0x1, 0x0},
+	{0xa104, 0x0, 0x8, 0x18},
+	{0xa105, 0x0, 0x7, 0xa},
+	{0xa106, 0x0, 0x8, 0x20},
+	{0xa107, 0x0, 0x8, 0x40},
+	{0xa108, 0x0, 0x4, 0x0},
+	{0xa38c, 0x0, 0x8, 0xfc},
+	{0xa38d, 0x0, 0x8, 0x0},
+	{0xa38e, 0x0, 0x8, 0x7e},
+	{0xa38f, 0x0, 0x8, 0x0},
+	{0xa390, 0x0, 0x8, 0x2f},
+	{0xa60f, 0x5, 0x1, 0x1},
+	{0xa170, 0x0, 0x8, 0xdc},
+	{0xa171, 0x0, 0x2, 0x0},
+	{0xa2ae, 0x0, 0x1, 0x1},
+	{0xa2ae, 0x1, 0x1, 0x1},
+	{0xa392, 0x0, 0x1, 0x1},
+	{0xa391, 0x2, 0x1, 0x0},
+	{0xabc1, 0x0, 0x8, 0xff},
+	{0xabc2, 0x0, 0x8, 0x0},
+	{0xabc8, 0x0, 0x8, 0x8},
+	{0xabca, 0x0, 0x8, 0x10},
+	{0xabcb, 0x0, 0x1, 0x0},
+	{0xabc3, 0x5, 0x3, 0x7},
+	{0xabc0, 0x6, 0x1, 0x0},
+	{0xabc0, 0x4, 0x2, 0x0},
+	{0xa344, 0x4, 0x4, 0x1},
+	{0xabc0, 0x7, 0x1, 0x1},
+	{0xabc0, 0x2, 0x1, 0x1},
+	{0xa345, 0x0, 0x8, 0x66},
+	{0xa346, 0x0, 0x8, 0x66},
+	{0xa347, 0x0, 0x4, 0x0},
+	{0xa343, 0x0, 0x4, 0xa},
+	{0xa347, 0x4, 0x4, 0x2},
+	{0xa348, 0x0, 0x4, 0xc},
+	{0xa348, 0x4, 0x4, 0x7},
+	{0xa349, 0x0, 0x6, 0x2},
+};
diff --git a/drivers/media/dvb/dvb-usb/af9005.c b/drivers/media/dvb/dvb-usb/af9005.c
new file mode 100644
index 0000000..7db6eee
--- /dev/null
+++ b/drivers/media/dvb/dvb-usb/af9005.c
@@ -0,0 +1,1141 @@
+/* DVB USB compliant Linux driver for the Afatech 9005
+ * USB1.1 DVB-T receiver.
+ *
+ * Copyright (C) 2007 Luca Olivetti (luca@ventoso.org)
+ *
+ * Thanks to Afatech who kindly provided information.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * see Documentation/dvb/REDME.dvb-usb for more information
+ */
+#include "af9005.h"
+
+/* debug */
+int dvb_usb_af9005_debug;
+module_param_named(debug, dvb_usb_af9005_debug, int, 0644);
+MODULE_PARM_DESC(debug,
+		 "set debugging level (1=info,xfer=2,rc=4,reg=8,i2c=16,fw=32 (or-able))."
+		 DVB_USB_DEBUG_STATUS);
+/* enable obnoxious led */
+int dvb_usb_af9005_led = 1;
+module_param_named(led, dvb_usb_af9005_led, bool, 0644);
+MODULE_PARM_DESC(led, "enable led (default: 1).");
+
+/* eeprom dump */
+int dvb_usb_af9005_dump_eeprom = 0;
+module_param_named(dump_eeprom, dvb_usb_af9005_dump_eeprom, int, 0);
+MODULE_PARM_DESC(dump_eeprom, "dump contents of the eeprom.");
+
+/* remote control decoder */
+int (*rc_decode) (struct dvb_usb_device * d, u8 * data, int len, u32 * event,
+		  int *state);
+void *rc_keys;
+int *rc_keys_size;
+
+u8 regmask[8] = { 0x01, 0x03, 0x07, 0x0f, 0x1f, 0x3f, 0x7f, 0xff };
+
+struct af9005_device_state {
+	u8 sequence;
+	int led_state;
+};
+
+int af9005_usb_generic_rw(struct dvb_usb_device *d, u8 * wbuf, u16 wlen,
+			  u8 * rbuf, u16 rlen, int delay_ms)
+{
+	int actlen, ret = -ENOMEM;
+
+	if (wbuf == NULL || wlen == 0)
+		return -EINVAL;
+
+	if ((ret = mutex_lock_interruptible(&d->usb_mutex)))
+		return ret;
+
+	deb_xfer(">>> ");
+	debug_dump(wbuf, wlen, deb_xfer);
+
+	ret = usb_bulk_msg(d->udev, usb_sndbulkpipe(d->udev,
+						    2), wbuf, wlen,
+			   &actlen, 2000);
+
+	if (ret)
+		err("bulk message failed: %d (%d/%d)", ret, wlen, actlen);
+	else
+		ret = actlen != wlen ? -1 : 0;
+
+	/* an answer is expected, and no error before */
+	if (!ret && rbuf && rlen) {
+		if (delay_ms)
+			msleep(delay_ms);
+
+		ret = usb_bulk_msg(d->udev, usb_rcvbulkpipe(d->udev,
+							    0x01), rbuf,
+				   rlen, &actlen, 2000);
+
+		if (ret)
+			err("recv bulk message failed: %d", ret);
+		else {
+			deb_xfer("<<< ");
+			debug_dump(rbuf, actlen, deb_xfer);
+		}
+	}
+
+	mutex_unlock(&d->usb_mutex);
+	return ret;
+}
+
+int af9005_usb_generic_write(struct dvb_usb_device *d, u8 * buf, u16 len)
+{
+	return af9005_usb_generic_rw(d, buf, len, NULL, 0, 0);
+}
+
+int af9005_generic_read_write(struct dvb_usb_device *d, u16 reg,
+			      int readwrite, int type, u8 * values, int len)
+{
+	struct af9005_device_state *st = d->priv;
+	u8 obuf[16] = { 0 };
+	u8 ibuf[17] = { 0 };
+	u8 command;
+	int i;
+	int ret;
+
+	if (len < 1) {
+		err("generic read/write, less than 1 byte. Makes no sense.");
+		return -EINVAL;
+	}
+	if (len > 8) {
+		err("generic read/write, more than 8 bytes. Not supported.");
+		return -EINVAL;
+	}
+
+	obuf[0] = 14;		/* rest of buffer length low */
+	obuf[1] = 0;		/* rest of buffer length high */
+
+	obuf[2] = AF9005_REGISTER_RW;	/* register operation */
+	obuf[3] = 12;		/* rest of buffer length */
+
+	obuf[4] = st->sequence++;	/* sequence number */
+
+	obuf[5] = (u8) (reg >> 8);	/* register address */
+	obuf[6] = (u8) (reg & 0xff);
+
+	if (type == AF9005_OFDM_REG) {
+		command = AF9005_CMD_OFDM_REG;
+	} else {
+		command = AF9005_CMD_TUNER;
+	}
+
+	if (len > 1)
+		command |=
+		    AF9005_CMD_BURST | AF9005_CMD_AUTOINC | (len - 1) << 3;
+	command |= readwrite;
+	if (readwrite == AF9005_CMD_WRITE)
+		for (i = 0; i < len; i++)
+			obuf[8 + i] = values[i];
+	else if (type == AF9005_TUNER_REG)
+		/* read command for tuner, the first byte contains the i2c address */
+		obuf[8] = values[0];
+	obuf[7] = command;
+
+	ret = af9005_usb_generic_rw(d, obuf, 16, ibuf, 17, 0);
+	if (ret)
+		return ret;
+
+	/* sanity check */
+	if (ibuf[2] != AF9005_REGISTER_RW_ACK) {
+		err("generic read/write, wrong reply code.");
+		return -EIO;
+	}
+	if (ibuf[3] != 0x0d) {
+		err("generic read/write, wrong length in reply.");
+		return -EIO;
+	}
+	if (ibuf[4] != obuf[4]) {
+		err("generic read/write, wrong sequence in reply.");
+		return -EIO;
+	}
+	/*
+	   Windows driver doesn't check these fields, in fact sometimes
+	   the register in the reply is different that what has been sent
+
+	   if (ibuf[5] != obuf[5] || ibuf[6] != obuf[6]) {
+	   err("generic read/write, wrong register in reply.");
+	   return -EIO;
+	   }
+	   if (ibuf[7] != command) {
+	   err("generic read/write wrong command in reply.");
+	   return -EIO;
+	   }
+	 */
+	if (ibuf[16] != 0x01) {
+		err("generic read/write wrong status code in reply.");
+		return -EIO;
+	}
+	if (readwrite == AF9005_CMD_READ)
+		for (i = 0; i < len; i++)
+			values[i] = ibuf[8 + i];
+
+	return 0;
+
+}
+
+int af9005_read_ofdm_register(struct dvb_usb_device *d, u16 reg, u8 * value)
+{
+	int ret;
+	deb_reg("read register %x ", reg);
+	ret = af9005_generic_read_write(d, reg,
+					AF9005_CMD_READ, AF9005_OFDM_REG,
+					value, 1);
+	if (ret)
+		deb_reg("failed\n");
+	else
+		deb_reg("value %x\n", *value);
+	return ret;
+}
+
+int af9005_read_ofdm_registers(struct dvb_usb_device *d, u16 reg,
+			       u8 * values, int len)
+{
+	int ret;
+	deb_reg("read %d registers %x ", len, reg);
+	ret = af9005_generic_read_write(d, reg,
+					AF9005_CMD_READ, AF9005_OFDM_REG,
+					values, len);
+	if (ret)
+		deb_reg("failed\n");
+	else
+		debug_dump(values, len, deb_reg);
+	return ret;
+}
+
+int af9005_write_ofdm_register(struct dvb_usb_device *d, u16 reg, u8 value)
+{
+	int ret;
+	u8 temp = value;
+	deb_reg("write register %x value %x ", reg, value);
+	ret = af9005_generic_read_write(d, reg,
+					AF9005_CMD_WRITE, AF9005_OFDM_REG,
+					&temp, 1);
+	if (ret)
+		deb_reg("failed\n");
+	else
+		deb_reg("ok\n");
+	return ret;
+}
+
+int af9005_write_ofdm_registers(struct dvb_usb_device *d, u16 reg,
+				u8 * values, int len)
+{
+	int ret;
+	deb_reg("write %d registers %x values ", len, reg);
+	debug_dump(values, len, deb_reg);
+
+	ret = af9005_generic_read_write(d, reg,
+					AF9005_CMD_WRITE, AF9005_OFDM_REG,
+					values, len);
+	if (ret)
+		deb_reg("failed\n");
+	else
+		deb_reg("ok\n");
+	return ret;
+}
+
+int af9005_read_register_bits(struct dvb_usb_device *d, u16 reg, u8 pos,
+			      u8 len, u8 * value)
+{
+	u8 temp;
+	int ret;
+	deb_reg("read bits %x %x %x", reg, pos, len);
+	ret = af9005_read_ofdm_register(d, reg, &temp);
+	if (ret) {
+		deb_reg(" failed\n");
+		return ret;
+	}
+	*value = (temp >> pos) & regmask[len - 1];
+	deb_reg(" value %x\n", *value);
+	return 0;
+
+}
+
+int af9005_write_register_bits(struct dvb_usb_device *d, u16 reg, u8 pos,
+			       u8 len, u8 value)
+{
+	u8 temp, mask;
+	int ret;
+	deb_reg("write bits %x %x %x value %x\n", reg, pos, len, value);
+	if (pos == 0 && len == 8)
+		return af9005_write_ofdm_register(d, reg, value);
+	ret = af9005_read_ofdm_register(d, reg, &temp);
+	if (ret)
+		return ret;
+	mask = regmask[len - 1] << pos;
+	temp = (temp & ~mask) | ((value << pos) & mask);
+	return af9005_write_ofdm_register(d, reg, temp);
+
+}
+
+static int af9005_usb_read_tuner_registers(struct dvb_usb_device *d,
+					   u16 reg, u8 * values, int len)
+{
+	return af9005_generic_read_write(d, reg,
+					 AF9005_CMD_READ, AF9005_TUNER_REG,
+					 values, len);
+}
+
+static int af9005_usb_write_tuner_registers(struct dvb_usb_device *d,
+					    u16 reg, u8 * values, int len)
+{
+	return af9005_generic_read_write(d, reg,
+					 AF9005_CMD_WRITE,
+					 AF9005_TUNER_REG, values, len);
+}
+
+int af9005_write_tuner_registers(struct dvb_usb_device *d, u16 reg,
+				 u8 * values, int len)
+{
+	/* don't let the name of this function mislead you: it's just used
+	   as an interface from the firmware to the i2c bus. The actual
+	   i2c addresses are contained in the data */
+	int ret, i, done = 0, fail = 0;
+	u8 temp;
+	ret = af9005_usb_write_tuner_registers(d, reg, values, len);
+	if (ret)
+		return ret;
+	if (reg != 0xffff) {
+		/* check if write done (0xa40d bit 1) or fail (0xa40d bit 2) */
+		for (i = 0; i < 200; i++) {
+			ret =
+			    af9005_read_ofdm_register(d,
+						      xd_I2C_i2c_m_status_wdat_done,
+						      &temp);
+			if (ret)
+				return ret;
+			done = temp & (regmask[i2c_m_status_wdat_done_len - 1]
+				       << i2c_m_status_wdat_done_pos);
+			if (done)
+				break;
+			fail = temp & (regmask[i2c_m_status_wdat_fail_len - 1]
+				       << i2c_m_status_wdat_fail_pos);
+			if (fail)
+				break;
+			msleep(50);
+		}
+		if (i == 200)
+			return -ETIMEDOUT;
+		if (fail) {
+			/* clear write fail bit */
+			af9005_write_register_bits(d,
+						   xd_I2C_i2c_m_status_wdat_fail,
+						   i2c_m_status_wdat_fail_pos,
+						   i2c_m_status_wdat_fail_len,
+						   1);
+			return -EIO;
+		}
+		/* clear write done bit */
+		ret =
+		    af9005_write_register_bits(d,
+					       xd_I2C_i2c_m_status_wdat_fail,
+					       i2c_m_status_wdat_done_pos,
+					       i2c_m_status_wdat_done_len, 1);
+		if (ret)
+			return ret;
+	}
+	return 0;
+}
+
+int af9005_read_tuner_registers(struct dvb_usb_device *d, u16 reg, u8 addr,
+				u8 * values, int len)
+{
+	/* don't let the name of this function mislead you: it's just used
+	   as an interface from the firmware to the i2c bus. The actual
+	   i2c addresses are contained in the data */
+	int ret, i;
+	u8 temp, buf[2];
+
+	buf[0] = addr;		/* tuner i2c address */
+	buf[1] = values[0];	/* tuner register */
+
+	values[0] = addr + 0x01;	/* i2c read address */
+
+	if (reg == APO_REG_I2C_RW_SILICON_TUNER) {
+		/* write tuner i2c address to tuner, 0c00c0 undocumented, found by sniffing */
+		ret = af9005_write_tuner_registers(d, 0x00c0, buf, 2);
+		if (ret)
+			return ret;
+	}
+
+	/* send read command to ofsm */
+	ret = af9005_usb_read_tuner_registers(d, reg, values, 1);
+	if (ret)
+		return ret;
+
+	/* check if read done */
+	for (i = 0; i < 200; i++) {
+		ret = af9005_read_ofdm_register(d, 0xa408, &temp);
+		if (ret)
+			return ret;
+		if (temp & 0x01)
+			break;
+		msleep(50);
+	}
+	if (i == 200)
+		return -ETIMEDOUT;
+
+	/* clear read done bit (by writing 1) */
+	ret = af9005_write_ofdm_register(d, xd_I2C_i2c_m_data8, 1);
+	if (ret)
+		return ret;
+
+	/* get read data (available from 0xa400) */
+	for (i = 0; i < len; i++) {
+		ret = af9005_read_ofdm_register(d, 0xa400 + i, &temp);
+		if (ret)
+			return ret;
+		values[i] = temp;
+	}
+	return 0;
+}
+
+static int af9005_i2c_write(struct dvb_usb_device *d, u8 i2caddr, u8 reg,
+			    u8 * data, int len)
+{
+	int ret, i;
+	u8 buf[3];
+	deb_i2c("i2c_write i2caddr %x, reg %x, len %d data ", i2caddr,
+		reg, len);
+	debug_dump(data, len, deb_i2c);
+
+	for (i = 0; i < len; i++) {
+		buf[0] = i2caddr;
+		buf[1] = reg + (u8) i;
+		buf[2] = data[i];
+		ret =
+		    af9005_write_tuner_registers(d,
+						 APO_REG_I2C_RW_SILICON_TUNER,
+						 buf, 3);
+		if (ret) {
+			deb_i2c("i2c_write failed\n");
+			return ret;
+		}
+	}
+	deb_i2c("i2c_write ok\n");
+	return 0;
+}
+
+static int af9005_i2c_read(struct dvb_usb_device *d, u8 i2caddr, u8 reg,
+			   u8 * data, int len)
+{
+	int ret, i;
+	u8 temp;
+	deb_i2c("i2c_read i2caddr %x, reg %x, len %d\n ", i2caddr, reg, len);
+	for (i = 0; i < len; i++) {
+		temp = reg + i;
+		ret =
+		    af9005_read_tuner_registers(d,
+						APO_REG_I2C_RW_SILICON_TUNER,
+						i2caddr, &temp, 1);
+		if (ret) {
+			deb_i2c("i2c_read failed\n");
+			return ret;
+		}
+		data[i] = temp;
+	}
+	deb_i2c("i2c data read: ");
+	debug_dump(data, len, deb_i2c);
+	return 0;
+}
+
+static int af9005_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[],
+			   int num)
+{
+	/* only implements what the mt2060 module does, don't know how
+	   to make it really generic */
+	struct dvb_usb_device *d = i2c_get_adapdata(adap);
+	int ret;
+	u8 reg, addr;
+	u8 *value;
+
+	if (mutex_lock_interruptible(&d->i2c_mutex) < 0)
+		return -EAGAIN;
+
+	if (num > 2)
+		warn("more than 2 i2c messages at a time is not handled yet. TODO.");
+
+	if (num == 2) {
+		/* reads a single register */
+		reg = *msg[0].buf;
+		addr = msg[0].addr;
+		value = msg[1].buf;
+		ret = af9005_i2c_read(d, addr, reg, value, 1);
+		if (ret == 0)
+			ret = 2;
+	} else {
+		/* write one or more registers */
+		reg = msg[0].buf[0];
+		addr = msg[0].addr;
+		value = &msg[0].buf[1];
+		ret = af9005_i2c_write(d, addr, reg, value, msg[0].len - 1);
+		if (ret == 0)
+			ret = 1;
+	}
+
+	mutex_unlock(&d->i2c_mutex);
+	return ret;
+}
+
+static u32 af9005_i2c_func(struct i2c_adapter *adapter)
+{
+	return I2C_FUNC_I2C;
+}
+
+static struct i2c_algorithm af9005_i2c_algo = {
+	.master_xfer = af9005_i2c_xfer,
+	.functionality = af9005_i2c_func,
+};
+
+int af9005_send_command(struct dvb_usb_device *d, u8 command, u8 * wbuf,
+			int wlen, u8 * rbuf, int rlen)
+{
+	struct af9005_device_state *st = d->priv;
+
+	int ret, i, packet_len;
+	u8 buf[64];
+	u8 ibuf[64];
+
+	if (wlen < 0) {
+		err("send command, wlen less than 0 bytes. Makes no sense.");
+		return -EINVAL;
+	}
+	if (wlen > 54) {
+		err("send command, wlen more than 54 bytes. Not supported.");
+		return -EINVAL;
+	}
+	if (rlen > 54) {
+		err("send command, rlen more than 54 bytes. Not supported.");
+		return -EINVAL;
+	}
+	packet_len = wlen + 5;
+	buf[0] = (u8) (packet_len & 0xff);
+	buf[1] = (u8) ((packet_len & 0xff00) >> 8);
+
+	buf[2] = 0x26;		/* packet type */
+	buf[3] = wlen + 3;
+	buf[4] = st->sequence++;
+	buf[5] = command;
+	buf[6] = wlen;
+	for (i = 0; i < wlen; i++)
+		buf[7 + i] = wbuf[i];
+	ret = af9005_usb_generic_rw(d, buf, wlen + 7, ibuf, rlen + 7, 0);
+	if (ret)
+		return ret;
+	if (ibuf[2] != 0x27) {
+		err("send command, wrong reply code.");
+		return -EIO;
+	}
+	if (ibuf[4] != buf[4]) {
+		err("send command, wrong sequence in reply.");
+		return -EIO;
+	}
+	if (ibuf[5] != 0x01) {
+		err("send command, wrong status code in reply.");
+		return -EIO;
+	}
+	if (ibuf[6] != rlen) {
+		err("send command, invalid data length in reply.");
+		return -EIO;
+	}
+	for (i = 0; i < rlen; i++)
+		rbuf[i] = ibuf[i + 7];
+	return 0;
+}
+
+int af9005_read_eeprom(struct dvb_usb_device *d, u8 address, u8 * values,
+		       int len)
+{
+	struct af9005_device_state *st = d->priv;
+	u8 obuf[16], ibuf[14];
+	int ret, i;
+
+	memset(obuf, 0, sizeof(obuf));
+	memset(ibuf, 0, sizeof(ibuf));
+
+	obuf[0] = 14;		/* length of rest of packet low */
+	obuf[1] = 0;		/* length of rest of packer high */
+
+	obuf[2] = 0x2a;		/* read/write eeprom */
+
+	obuf[3] = 12;		/* size */
+
+	obuf[4] = st->sequence++;
+
+	obuf[5] = 0;		/* read */
+
+	obuf[6] = len;
+	obuf[7] = address;
+	ret = af9005_usb_generic_rw(d, obuf, 16, ibuf, 14, 0);
+	if (ret)
+		return ret;
+	if (ibuf[2] != 0x2b) {
+		err("Read eeprom, invalid reply code");
+		return -EIO;
+	}
+	if (ibuf[3] != 10) {
+		err("Read eeprom, invalid reply length");
+		return -EIO;
+	}
+	if (ibuf[4] != obuf[4]) {
+		err("Read eeprom, wrong sequence in reply ");
+		return -EIO;
+	}
+	if (ibuf[5] != 1) {
+		err("Read eeprom, wrong status in reply ");
+		return -EIO;
+	}
+	for (i = 0; i < len; i++) {
+		values[i] = ibuf[6 + i];
+	}
+	return 0;
+}
+
+static int af9005_boot_packet(struct usb_device *udev, int type, u8 * reply)
+{
+	u8 buf[FW_BULKOUT_SIZE + 2];
+	u16 checksum;
+	int act_len, i, ret;
+	memset(buf, 0, sizeof(buf));
+	buf[0] = (u8) (FW_BULKOUT_SIZE & 0xff);
+	buf[1] = (u8) ((FW_BULKOUT_SIZE >> 8) & 0xff);
+	switch (type) {
+	case FW_CONFIG:
+		buf[2] = 0x11;
+		buf[3] = 0x04;
+		buf[4] = 0x00;	/* sequence number, original driver doesn't increment it here */
+		buf[5] = 0x03;
+		checksum = buf[4] + buf[5];
+		buf[6] = (u8) ((checksum >> 8) & 0xff);
+		buf[7] = (u8) (checksum & 0xff);
+		break;
+	case FW_CONFIRM:
+		buf[2] = 0x11;
+		buf[3] = 0x04;
+		buf[4] = 0x00;	/* sequence number, original driver doesn't increment it here */
+		buf[5] = 0x01;
+		checksum = buf[4] + buf[5];
+		buf[6] = (u8) ((checksum >> 8) & 0xff);
+		buf[7] = (u8) (checksum & 0xff);
+		break;
+	case FW_BOOT:
+		buf[2] = 0x10;
+		buf[3] = 0x08;
+		buf[4] = 0x00;	/* sequence number, original driver doesn't increment it here */
+		buf[5] = 0x97;
+		buf[6] = 0xaa;
+		buf[7] = 0x55;
+		buf[8] = 0xa5;
+		buf[9] = 0x5a;
+		checksum = 0;
+		for (i = 4; i <= 9; i++)
+			checksum += buf[i];
+		buf[10] = (u8) ((checksum >> 8) & 0xff);
+		buf[11] = (u8) (checksum & 0xff);
+		break;
+	default:
+		err("boot packet invalid boot packet type");
+		return -EINVAL;
+	}
+	deb_fw(">>> ");
+	debug_dump(buf, FW_BULKOUT_SIZE + 2, deb_fw);
+
+	ret = usb_bulk_msg(udev,
+			   usb_sndbulkpipe(udev, 0x02),
+			   buf, FW_BULKOUT_SIZE + 2, &act_len, 2000);
+	if (ret)
+		err("boot packet bulk message failed: %d (%d/%d)", ret,
+		    FW_BULKOUT_SIZE + 2, act_len);
+	else
+		ret = act_len != FW_BULKOUT_SIZE + 2 ? -1 : 0;
+	if (ret)
+		return ret;
+	memset(buf, 0, 9);
+	ret = usb_bulk_msg(udev,
+			   usb_rcvbulkpipe(udev, 0x01), buf, 9, &act_len, 2000);
+	if (ret) {
+		err("boot packet recv bulk message failed: %d", ret);
+		return ret;
+	}
+	deb_fw("<<< ");
+	debug_dump(buf, act_len, deb_fw);
+	checksum = 0;
+	switch (type) {
+	case FW_CONFIG:
+		if (buf[2] != 0x11) {
+			err("boot bad config header.");
+			return -EIO;
+		}
+		if (buf[3] != 0x05) {
+			err("boot bad config size.");
+			return -EIO;
+		}
+		if (buf[4] != 0x00) {
+			err("boot bad config sequence.");
+			return -EIO;
+		}
+		if (buf[5] != 0x04) {
+			err("boot bad config subtype.");
+			return -EIO;
+		}
+		for (i = 4; i <= 6; i++)
+			checksum += buf[i];
+		if (buf[7] * 256 + buf[8] != checksum) {
+			err("boot bad config checksum.");
+			return -EIO;
+		}
+		*reply = buf[6];
+		break;
+	case FW_CONFIRM:
+		if (buf[2] != 0x11) {
+			err("boot bad confirm header.");
+			return -EIO;
+		}
+		if (buf[3] != 0x05) {
+			err("boot bad confirm size.");
+			return -EIO;
+		}
+		if (buf[4] != 0x00) {
+			err("boot bad confirm sequence.");
+			return -EIO;
+		}
+		if (buf[5] != 0x02) {
+			err("boot bad confirm subtype.");
+			return -EIO;
+		}
+		for (i = 4; i <= 6; i++)
+			checksum += buf[i];
+		if (buf[7] * 256 + buf[8] != checksum) {
+			err("boot bad confirm checksum.");
+			return -EIO;
+		}
+		*reply = buf[6];
+		break;
+	case FW_BOOT:
+		if (buf[2] != 0x10) {
+			err("boot bad boot header.");
+			return -EIO;
+		}
+		if (buf[3] != 0x05) {
+			err("boot bad boot size.");
+			return -EIO;
+		}
+		if (buf[4] != 0x00) {
+			err("boot bad boot sequence.");
+			return -EIO;
+		}
+		if (buf[5] != 0x01) {
+			err("boot bad boot pattern 01.");
+			return -EIO;
+		}
+		if (buf[6] != 0x10) {
+			err("boot bad boot pattern 10.");
+			return -EIO;
+		}
+		for (i = 4; i <= 6; i++)
+			checksum += buf[i];
+		if (buf[7] * 256 + buf[8] != checksum) {
+			err("boot bad boot checksum.");
+			return -EIO;
+		}
+		break;
+
+	}
+
+	return 0;
+}
+
+int af9005_download_firmware(struct usb_device *udev, const struct firmware *fw)
+{
+	int i, packets, ret, act_len;
+
+	u8 buf[FW_BULKOUT_SIZE + 2];
+	u8 reply;
+
+	ret = af9005_boot_packet(udev, FW_CONFIG, &reply);
+	if (ret)
+		return ret;
+	if (reply != 0x01) {
+		err("before downloading firmware, FW_CONFIG expected 0x01, received 0x%x", reply);
+		return -EIO;
+	}
+	packets = fw->size / FW_BULKOUT_SIZE;
+	buf[0] = (u8) (FW_BULKOUT_SIZE & 0xff);
+	buf[1] = (u8) ((FW_BULKOUT_SIZE >> 8) & 0xff);
+	for (i = 0; i < packets; i++) {
+		memcpy(&buf[2], fw->data + i * FW_BULKOUT_SIZE,
+		       FW_BULKOUT_SIZE);
+		deb_fw(">>> ");
+		debug_dump(buf, FW_BULKOUT_SIZE + 2, deb_fw);
+		ret = usb_bulk_msg(udev,
+				   usb_sndbulkpipe(udev, 0x02),
+				   buf, FW_BULKOUT_SIZE + 2, &act_len, 1000);
+		if (ret) {
+			err("firmware download failed at packet %d with code %d", i, ret);
+			return ret;
+		}
+	}
+	ret = af9005_boot_packet(udev, FW_CONFIRM, &reply);
+	if (ret)
+		return ret;
+	if (reply != (u8) (packets & 0xff)) {
+		err("after downloading firmware, FW_CONFIRM expected 0x%x, received 0x%x", packets & 0xff, reply);
+		return -EIO;
+	}
+	ret = af9005_boot_packet(udev, FW_BOOT, &reply);
+	if (ret)
+		return ret;
+	ret = af9005_boot_packet(udev, FW_CONFIG, &reply);
+	if (ret)
+		return ret;
+	if (reply != 0x02) {
+		err("after downloading firmware, FW_CONFIG expected 0x02, received 0x%x", reply);
+		return -EIO;
+	}
+
+	return 0;
+
+}
+
+int af9005_led_control(struct dvb_usb_device *d, int onoff)
+{
+	struct af9005_device_state *st = d->priv;
+	int temp, ret;
+
+	if (onoff && dvb_usb_af9005_led)
+		temp = 1;
+	else
+		temp = 0;
+	if (st->led_state != temp) {
+		ret =
+		    af9005_write_register_bits(d, xd_p_reg_top_locken1,
+					       reg_top_locken1_pos,
+					       reg_top_locken1_len, temp);
+		if (ret)
+			return ret;
+		ret =
+		    af9005_write_register_bits(d, xd_p_reg_top_lock1,
+					       reg_top_lock1_pos,
+					       reg_top_lock1_len, temp);
+		if (ret)
+			return ret;
+		st->led_state = temp;
+	}
+	return 0;
+}
+
+static int af9005_frontend_attach(struct dvb_usb_adapter *adap)
+{
+	u8 buf[8];
+	int i;
+
+	/* without these calls the first commands after downloading
+	   the firmware fail. I put these calls here to simulate
+	   what it is done in dvb-usb-init.c.
+	 */
+	struct usb_device *udev = adap->dev->udev;
+	usb_clear_halt(udev, usb_sndbulkpipe(udev, 2));
+	usb_clear_halt(udev, usb_rcvbulkpipe(udev, 1));
+	if (dvb_usb_af9005_dump_eeprom) {
+		printk("EEPROM DUMP\n");
+		for (i = 0; i < 255; i += 8) {
+			af9005_read_eeprom(adap->dev, i, buf, 8);
+			printk("ADDR %x ", i);
+			debug_dump(buf, 8, printk);
+		}
+	}
+	adap->fe = af9005_fe_attach(adap->dev);
+	return 0;
+}
+
+static int af9005_rc_query(struct dvb_usb_device *d, u32 * event, int *state)
+{
+	struct af9005_device_state *st = d->priv;
+	int ret, len;
+
+	u8 obuf[5];
+	u8 ibuf[256];
+
+	*state = REMOTE_NO_KEY_PRESSED;
+	if (rc_decode == NULL) {
+		/* it shouldn't never come here */
+		return 0;
+	}
+	/* deb_info("rc_query\n"); */
+	obuf[0] = 3;		/* rest of packet length low */
+	obuf[1] = 0;		/* rest of packet lentgh high */
+	obuf[2] = 0x40;		/* read remote */
+	obuf[3] = 1;		/* rest of packet length */
+	obuf[4] = st->sequence++;	/* sequence number */
+	ret = af9005_usb_generic_rw(d, obuf, 5, ibuf, 256, 0);
+	if (ret) {
+		err("rc query failed");
+		return ret;
+	}
+	if (ibuf[2] != 0x41) {
+		err("rc query bad header.");
+		return -EIO;
+	}
+	if (ibuf[4] != obuf[4]) {
+		err("rc query bad sequence.");
+		return -EIO;
+	}
+	len = ibuf[5];
+	if (len > 246) {
+		err("rc query invalid length");
+		return -EIO;
+	}
+	if (len > 0) {
+		deb_rc("rc data (%d) ", len);
+		debug_dump((ibuf + 6), len, deb_rc);
+		ret = rc_decode(d, &ibuf[6], len, event, state);
+		if (ret) {
+			err("rc_decode failed");
+			return ret;
+		} else {
+			deb_rc("rc_decode state %x event %x\n", *state, *event);
+			if (*state == REMOTE_KEY_REPEAT)
+				*event = d->last_event;
+		}
+	}
+	return 0;
+}
+
+static int af9005_power_ctrl(struct dvb_usb_device *d, int onoff)
+{
+
+	return 0;
+}
+
+static int af9005_pid_filter_control(struct dvb_usb_adapter *adap, int onoff)
+{
+	int ret;
+	deb_info("pid filter control  onoff %d\n", onoff);
+	if (onoff) {
+		ret =
+		    af9005_write_ofdm_register(adap->dev, XD_MP2IF_DMX_CTRL, 1);
+		if (ret)
+			return ret;
+		ret =
+		    af9005_write_register_bits(adap->dev,
+					       XD_MP2IF_DMX_CTRL, 1, 1, 1);
+		if (ret)
+			return ret;
+		ret =
+		    af9005_write_ofdm_register(adap->dev, XD_MP2IF_DMX_CTRL, 1);
+	} else
+		ret =
+		    af9005_write_ofdm_register(adap->dev, XD_MP2IF_DMX_CTRL, 0);
+	if (ret)
+		return ret;
+	deb_info("pid filter control ok\n");
+	return 0;
+}
+
+static int af9005_pid_filter(struct dvb_usb_adapter *adap, int index,
+			     u16 pid, int onoff)
+{
+	u8 cmd = index & 0x1f;
+	int ret;
+	deb_info("set pid filter, index %d, pid %x, onoff %d\n", index,
+		 pid, onoff);
+	if (onoff) {
+		/* cannot use it as pid_filter_ctrl since it has to be done
+		   before setting the first pid */
+		if (adap->feedcount == 1) {
+			deb_info("first pid set, enable pid table\n");
+			ret = af9005_pid_filter_control(adap, onoff);
+			if (ret)
+				return ret;
+		}
+		ret =
+		    af9005_write_ofdm_register(adap->dev,
+					       XD_MP2IF_PID_DATA_L,
+					       (u8) (pid & 0xff));
+		if (ret)
+			return ret;
+		ret =
+		    af9005_write_ofdm_register(adap->dev,
+					       XD_MP2IF_PID_DATA_H,
+					       (u8) (pid >> 8));
+		if (ret)
+			return ret;
+		cmd |= 0x20 | 0x40;
+	} else {
+		if (adap->feedcount == 0) {
+			deb_info("last pid unset, disable pid table\n");
+			ret = af9005_pid_filter_control(adap, onoff);
+			if (ret)
+				return ret;
+		}
+	}
+	ret = af9005_write_ofdm_register(adap->dev, XD_MP2IF_PID_IDX, cmd);
+	if (ret)
+		return ret;
+	deb_info("set pid ok\n");
+	return 0;
+}
+
+static int af9005_identify_state(struct usb_device *udev,
+				 struct dvb_usb_device_properties *props,
+				 struct dvb_usb_device_description **desc,
+				 int *cold)
+{
+	int ret;
+	u8 reply;
+	ret = af9005_boot_packet(udev, FW_CONFIG, &reply);
+	if (ret)
+		return ret;
+	deb_info("result of FW_CONFIG in identify state %d\n", reply);
+	if (reply == 0x01)
+		*cold = 1;
+	else if (reply == 0x02)
+		*cold = 0;
+	else
+		return -EIO;
+	deb_info("Identify state cold = %d\n", *cold);
+	return 0;
+}
+
+static struct dvb_usb_device_properties af9005_properties;
+
+static int af9005_usb_probe(struct usb_interface *intf,
+			    const struct usb_device_id *id)
+{
+	return dvb_usb_device_init(intf, &af9005_properties, THIS_MODULE, NULL);
+}
+
+static struct usb_device_id af9005_usb_table[] = {
+	{USB_DEVICE(USB_VID_AFATECH, USB_PID_AFATECH_AF9005)},
+	{USB_DEVICE(USB_VID_TERRATEC, USB_PID_TERRATEC_CINERGY_T_USB_XE)},
+	{0},
+};
+
+MODULE_DEVICE_TABLE(usb, af9005_usb_table);
+
+static struct dvb_usb_device_properties af9005_properties = {
+	.caps = DVB_USB_IS_AN_I2C_ADAPTER,
+
+	.usb_ctrl = DEVICE_SPECIFIC,
+	.firmware = "af9005.fw",
+	.download_firmware = af9005_download_firmware,
+	.no_reconnect = 1,
+
+	.size_of_priv = sizeof(struct af9005_device_state),
+
+	.num_adapters = 1,
+	.adapter = {
+		    {
+		     .caps =
+		     DVB_USB_ADAP_HAS_PID_FILTER |
+		     DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF,
+		     .pid_filter_count = 32,
+		     .pid_filter = af9005_pid_filter,
+		     /* .pid_filter_ctrl = af9005_pid_filter_control, */
+		     .frontend_attach = af9005_frontend_attach,
+		     /* .tuner_attach     = af9005_tuner_attach, */
+		     /* parameter for the MPEG2-data transfer */
+		     .stream = {
+				.type = USB_BULK,
+				.count = 10,
+				.endpoint = 0x04,
+				.u = {
+				      .bulk = {
+					       .buffersize = 4096,	/* actual size seen is 3948 */
+					       }
+				      }
+				},
+		     }
+		    },
+	.power_ctrl = af9005_power_ctrl,
+	.identify_state = af9005_identify_state,
+
+	.i2c_algo = &af9005_i2c_algo,
+
+	.rc_interval = 200,
+	.rc_key_map = NULL,
+	.rc_key_map_size = 0,
+	.rc_query = af9005_rc_query,
+
+	.num_device_descs = 2,
+	.devices = {
+		    {.name = "Afatech DVB-T USB1.1 stick",
+		     .cold_ids = {&af9005_usb_table[0], NULL},
+		     .warm_ids = {NULL},
+		     },
+		    {.name = "TerraTec Cinergy T USB XE",
+		     .cold_ids = {&af9005_usb_table[1], NULL},
+		     .warm_ids = {NULL},
+		     },
+		    {NULL},
+		    }
+};
+
+/* usb specific object needed to register this driver with the usb subsystem */
+static struct usb_driver af9005_usb_driver = {
+	.name = "dvb_usb_af9005",
+	.probe = af9005_usb_probe,
+	.disconnect = dvb_usb_device_exit,
+	.id_table = af9005_usb_table,
+};
+
+/* module stuff */
+static int __init af9005_usb_module_init(void)
+{
+	int result;
+	if ((result = usb_register(&af9005_usb_driver))) {
+		err("usb_register failed. (%d)", result);
+		return result;
+	}
+	rc_decode = symbol_request(af9005_rc_decode);
+	rc_keys = symbol_request(af9005_rc_keys);
+	rc_keys_size = symbol_request(af9005_rc_keys_size);
+	if (rc_decode == NULL || rc_keys == NULL || rc_keys_size == NULL) {
+		err("af9005_rc_decode function not found, disabling remote");
+		af9005_properties.rc_query = NULL;
+	} else {
+		af9005_properties.rc_key_map = rc_keys;
+		af9005_properties.rc_key_map_size = *rc_keys_size;
+	}
+
+	return 0;
+}
+
+static void __exit af9005_usb_module_exit(void)
+{
+	/* release rc decode symbols */
+	if (rc_decode != NULL)
+		symbol_put(af9005_rc_decode);
+	if (rc_keys != NULL)
+		symbol_put(af9005_rc_keys);
+	if (rc_keys_size != NULL)
+		symbol_put(af9005_rc_keys_size);
+	/* deregister this driver from the USB subsystem */
+	usb_deregister(&af9005_usb_driver);
+}
+
+module_init(af9005_usb_module_init);
+module_exit(af9005_usb_module_exit);
+
+MODULE_AUTHOR("Luca Olivetti <luca@ventoso.org>");
+MODULE_DESCRIPTION("Driver for Afatech 9005 DVB-T USB1.1 stick");
+MODULE_VERSION("1.0");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/dvb/dvb-usb/af9005.h b/drivers/media/dvb/dvb-usb/af9005.h
new file mode 100644
index 0000000..0bc48a0
--- /dev/null
+++ b/drivers/media/dvb/dvb-usb/af9005.h
@@ -0,0 +1,3496 @@
+/* Common header-file of the Linux driver for the Afatech 9005
+ * USB1.1 DVB-T receiver.
+ *
+ * Copyright (C) 2007 Luca Olivetti (luca@ventoso.org)
+ *
+ * Thanks to Afatech who kindly provided information.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * see Documentation/dvb/README.dvb-usb for more information
+ */
+#ifndef _DVB_USB_AF9005_H_
+#define _DVB_USB_AF9005_H_
+
+#define DVB_USB_LOG_PREFIX "af9005"
+#include "dvb-usb.h"
+
+extern int dvb_usb_af9005_debug;
+#define deb_info(args...) dprintk(dvb_usb_af9005_debug,0x01,args)
+#define deb_xfer(args...) dprintk(dvb_usb_af9005_debug,0x02,args)
+#define deb_rc(args...)   dprintk(dvb_usb_af9005_debug,0x04,args)
+#define deb_reg(args...)  dprintk(dvb_usb_af9005_debug,0x08,args)
+#define deb_i2c(args...)  dprintk(dvb_usb_af9005_debug,0x10,args)
+#define deb_fw(args...)   dprintk(dvb_usb_af9005_debug,0x20,args)
+
+extern int dvb_usb_af9005_led;
+
+/* firmware */
+#define FW_BULKOUT_SIZE 250
+enum {
+	FW_CONFIG,
+	FW_CONFIRM,
+	FW_BOOT
+};
+
+/* af9005 commands */
+#define AF9005_OFDM_REG  0
+#define AF9005_TUNER_REG 1
+
+#define AF9005_REGISTER_RW     0x20
+#define AF9005_REGISTER_RW_ACK 0x21
+
+#define AF9005_CMD_OFDM_REG 0x00
+#define AF9005_CMD_TUNER    0x80
+#define AF9005_CMD_BURST    0x02
+#define AF9005_CMD_AUTOINC  0x04
+#define AF9005_CMD_READ     0x00
+#define AF9005_CMD_WRITE    0x01
+
+/* af9005 registers */
+#define APO_REG_RESET					0xAEFF
+
+#define APO_REG_I2C_RW_CAN_TUNER            0xF000
+#define APO_REG_I2C_RW_SILICON_TUNER        0xF001
+#define APO_REG_GPIO_RW_SILICON_TUNER       0xFFFE	/*  also for OFSM */
+#define APO_REG_TRIGGER_OFSM                0xFFFF	/*  also for OFSM */
+
+/***********************************************************************
+ *  Apollo Registers from VLSI					       *
+ ***********************************************************************/
+#define xd_p_reg_aagc_inverted_agc	0xA000
+#define	reg_aagc_inverted_agc_pos 0
+#define	reg_aagc_inverted_agc_len 1
+#define	reg_aagc_inverted_agc_lsb 0
+#define xd_p_reg_aagc_sign_only	0xA000
+#define	reg_aagc_sign_only_pos 1
+#define	reg_aagc_sign_only_len 1
+#define	reg_aagc_sign_only_lsb 0
+#define xd_p_reg_aagc_slow_adc_en	0xA000
+#define	reg_aagc_slow_adc_en_pos 2
+#define	reg_aagc_slow_adc_en_len 1
+#define	reg_aagc_slow_adc_en_lsb 0
+#define xd_p_reg_aagc_slow_adc_scale	0xA000
+#define	reg_aagc_slow_adc_scale_pos 3
+#define	reg_aagc_slow_adc_scale_len 5
+#define	reg_aagc_slow_adc_scale_lsb 0
+#define xd_p_reg_aagc_check_slow_adc_lock	0xA001
+#define	reg_aagc_check_slow_adc_lock_pos 0
+#define	reg_aagc_check_slow_adc_lock_len 1
+#define	reg_aagc_check_slow_adc_lock_lsb 0
+#define xd_p_reg_aagc_init_control	0xA001
+#define	reg_aagc_init_control_pos 1
+#define	reg_aagc_init_control_len 1
+#define	reg_aagc_init_control_lsb 0
+#define xd_p_reg_aagc_total_gain_sel	0xA001
+#define	reg_aagc_total_gain_sel_pos 2
+#define	reg_aagc_total_gain_sel_len 2
+#define	reg_aagc_total_gain_sel_lsb 0
+#define xd_p_reg_aagc_out_inv	0xA001
+#define	reg_aagc_out_inv_pos 5
+#define	reg_aagc_out_inv_len 1
+#define	reg_aagc_out_inv_lsb 0
+#define xd_p_reg_aagc_int_en	0xA001
+#define	reg_aagc_int_en_pos 6
+#define	reg_aagc_int_en_len 1
+#define	reg_aagc_int_en_lsb 0
+#define xd_p_reg_aagc_lock_change_flag	0xA001
+#define	reg_aagc_lock_change_flag_pos 7
+#define	reg_aagc_lock_change_flag_len 1
+#define	reg_aagc_lock_change_flag_lsb 0
+#define xd_p_reg_aagc_rf_loop_bw_scale_acquire	0xA002
+#define	reg_aagc_rf_loop_bw_scale_acquire_pos 0
+#define	reg_aagc_rf_loop_bw_scale_acquire_len 5
+#define	reg_aagc_rf_loop_bw_scale_acquire_lsb 0
+#define xd_p_reg_aagc_rf_loop_bw_scale_track	0xA003
+#define	reg_aagc_rf_loop_bw_scale_track_pos 0
+#define	reg_aagc_rf_loop_bw_scale_track_len 5
+#define	reg_aagc_rf_loop_bw_scale_track_lsb 0
+#define xd_p_reg_aagc_if_loop_bw_scale_acquire	0xA004
+#define	reg_aagc_if_loop_bw_scale_acquire_pos 0
+#define	reg_aagc_if_loop_bw_scale_acquire_len 5
+#define	reg_aagc_if_loop_bw_scale_acquire_lsb 0
+#define xd_p_reg_aagc_if_loop_bw_scale_track	0xA005
+#define	reg_aagc_if_loop_bw_scale_track_pos 0
+#define	reg_aagc_if_loop_bw_scale_track_len 5
+#define	reg_aagc_if_loop_bw_scale_track_lsb 0
+#define xd_p_reg_aagc_max_rf_agc_7_0	0xA006
+#define	reg_aagc_max_rf_agc_7_0_pos 0
+#define	reg_aagc_max_rf_agc_7_0_len 8
+#define	reg_aagc_max_rf_agc_7_0_lsb 0
+#define xd_p_reg_aagc_max_rf_agc_9_8	0xA007
+#define	reg_aagc_max_rf_agc_9_8_pos 0
+#define	reg_aagc_max_rf_agc_9_8_len 2
+#define	reg_aagc_max_rf_agc_9_8_lsb 8
+#define xd_p_reg_aagc_min_rf_agc_7_0	0xA008
+#define	reg_aagc_min_rf_agc_7_0_pos 0
+#define	reg_aagc_min_rf_agc_7_0_len 8
+#define	reg_aagc_min_rf_agc_7_0_lsb 0
+#define xd_p_reg_aagc_min_rf_agc_9_8	0xA009
+#define	reg_aagc_min_rf_agc_9_8_pos 0
+#define	reg_aagc_min_rf_agc_9_8_len 2
+#define	reg_aagc_min_rf_agc_9_8_lsb 8
+#define xd_p_reg_aagc_max_if_agc_7_0	0xA00A
+#define	reg_aagc_max_if_agc_7_0_pos 0
+#define	reg_aagc_max_if_agc_7_0_len 8
+#define	reg_aagc_max_if_agc_7_0_lsb 0
+#define xd_p_reg_aagc_max_if_agc_9_8	0xA00B
+#define	reg_aagc_max_if_agc_9_8_pos 0
+#define	reg_aagc_max_if_agc_9_8_len 2
+#define	reg_aagc_max_if_agc_9_8_lsb 8
+#define xd_p_reg_aagc_min_if_agc_7_0	0xA00C
+#define	reg_aagc_min_if_agc_7_0_pos 0
+#define	reg_aagc_min_if_agc_7_0_len 8
+#define	reg_aagc_min_if_agc_7_0_lsb 0
+#define xd_p_reg_aagc_min_if_agc_9_8	0xA00D
+#define	reg_aagc_min_if_agc_9_8_pos 0
+#define	reg_aagc_min_if_agc_9_8_len 2
+#define	reg_aagc_min_if_agc_9_8_lsb 8
+#define xd_p_reg_aagc_lock_sample_scale	0xA00E
+#define	reg_aagc_lock_sample_scale_pos 0
+#define	reg_aagc_lock_sample_scale_len 5
+#define	reg_aagc_lock_sample_scale_lsb 0
+#define xd_p_reg_aagc_rf_agc_lock_scale_acquire	0xA00F
+#define	reg_aagc_rf_agc_lock_scale_acquire_pos 0
+#define	reg_aagc_rf_agc_lock_scale_acquire_len 3
+#define	reg_aagc_rf_agc_lock_scale_acquire_lsb 0
+#define xd_p_reg_aagc_rf_agc_lock_scale_track	0xA00F
+#define	reg_aagc_rf_agc_lock_scale_track_pos 3
+#define	reg_aagc_rf_agc_lock_scale_track_len 3
+#define	reg_aagc_rf_agc_lock_scale_track_lsb 0
+#define xd_p_reg_aagc_if_agc_lock_scale_acquire	0xA010
+#define	reg_aagc_if_agc_lock_scale_acquire_pos 0
+#define	reg_aagc_if_agc_lock_scale_acquire_len 3
+#define	reg_aagc_if_agc_lock_scale_acquire_lsb 0
+#define xd_p_reg_aagc_if_agc_lock_scale_track	0xA010
+#define	reg_aagc_if_agc_lock_scale_track_pos 3
+#define	reg_aagc_if_agc_lock_scale_track_len 3
+#define	reg_aagc_if_agc_lock_scale_track_lsb 0
+#define xd_p_reg_aagc_rf_top_numerator_7_0	0xA011
+#define	reg_aagc_rf_top_numerator_7_0_pos 0
+#define	reg_aagc_rf_top_numerator_7_0_len 8
+#define	reg_aagc_rf_top_numerator_7_0_lsb 0
+#define xd_p_reg_aagc_rf_top_numerator_9_8	0xA012
+#define	reg_aagc_rf_top_numerator_9_8_pos 0
+#define	reg_aagc_rf_top_numerator_9_8_len 2
+#define	reg_aagc_rf_top_numerator_9_8_lsb 8
+#define xd_p_reg_aagc_if_top_numerator_7_0	0xA013
+#define	reg_aagc_if_top_numerator_7_0_pos 0
+#define	reg_aagc_if_top_numerator_7_0_len 8
+#define	reg_aagc_if_top_numerator_7_0_lsb 0
+#define xd_p_reg_aagc_if_top_numerator_9_8	0xA014
+#define	reg_aagc_if_top_numerator_9_8_pos 0
+#define	reg_aagc_if_top_numerator_9_8_len 2
+#define	reg_aagc_if_top_numerator_9_8_lsb 8
+#define xd_p_reg_aagc_adc_out_desired_7_0	0xA015
+#define	reg_aagc_adc_out_desired_7_0_pos 0
+#define	reg_aagc_adc_out_desired_7_0_len 8
+#define	reg_aagc_adc_out_desired_7_0_lsb 0
+#define xd_p_reg_aagc_adc_out_desired_8	0xA016
+#define	reg_aagc_adc_out_desired_8_pos 0
+#define	reg_aagc_adc_out_desired_8_len 1
+#define	reg_aagc_adc_out_desired_8_lsb 0
+#define xd_p_reg_aagc_fixed_gain	0xA016
+#define	reg_aagc_fixed_gain_pos 3
+#define	reg_aagc_fixed_gain_len 1
+#define	reg_aagc_fixed_gain_lsb 0
+#define xd_p_reg_aagc_lock_count_th	0xA016
+#define	reg_aagc_lock_count_th_pos 4
+#define	reg_aagc_lock_count_th_len 4
+#define	reg_aagc_lock_count_th_lsb 0
+#define xd_p_reg_aagc_fixed_rf_agc_control_7_0	0xA017
+#define	reg_aagc_fixed_rf_agc_control_7_0_pos 0
+#define	reg_aagc_fixed_rf_agc_control_7_0_len 8
+#define	reg_aagc_fixed_rf_agc_control_7_0_lsb 0
+#define xd_p_reg_aagc_fixed_rf_agc_control_15_8	0xA018
+#define	reg_aagc_fixed_rf_agc_control_15_8_pos 0
+#define	reg_aagc_fixed_rf_agc_control_15_8_len 8
+#define	reg_aagc_fixed_rf_agc_control_15_8_lsb 8
+#define xd_p_reg_aagc_fixed_rf_agc_control_23_16	0xA019
+#define	reg_aagc_fixed_rf_agc_control_23_16_pos 0
+#define	reg_aagc_fixed_rf_agc_control_23_16_len 8
+#define	reg_aagc_fixed_rf_agc_control_23_16_lsb 16
+#define xd_p_reg_aagc_fixed_rf_agc_control_30_24	0xA01A
+#define	reg_aagc_fixed_rf_agc_control_30_24_pos 0
+#define	reg_aagc_fixed_rf_agc_control_30_24_len 7
+#define	reg_aagc_fixed_rf_agc_control_30_24_lsb 24
+#define xd_p_reg_aagc_fixed_if_agc_control_7_0	0xA01B
+#define	reg_aagc_fixed_if_agc_control_7_0_pos 0
+#define	reg_aagc_fixed_if_agc_control_7_0_len 8
+#define	reg_aagc_fixed_if_agc_control_7_0_lsb 0
+#define xd_p_reg_aagc_fixed_if_agc_control_15_8	0xA01C
+#define	reg_aagc_fixed_if_agc_control_15_8_pos 0
+#define	reg_aagc_fixed_if_agc_control_15_8_len 8
+#define	reg_aagc_fixed_if_agc_control_15_8_lsb 8
+#define xd_p_reg_aagc_fixed_if_agc_control_23_16	0xA01D
+#define	reg_aagc_fixed_if_agc_control_23_16_pos 0
+#define	reg_aagc_fixed_if_agc_control_23_16_len 8
+#define	reg_aagc_fixed_if_agc_control_23_16_lsb 16
+#define xd_p_reg_aagc_fixed_if_agc_control_30_24	0xA01E
+#define	reg_aagc_fixed_if_agc_control_30_24_pos 0
+#define	reg_aagc_fixed_if_agc_control_30_24_len 7
+#define	reg_aagc_fixed_if_agc_control_30_24_lsb 24
+#define xd_p_reg_aagc_rf_agc_unlock_numerator	0xA01F
+#define	reg_aagc_rf_agc_unlock_numerator_pos 0
+#define	reg_aagc_rf_agc_unlock_numerator_len 6
+#define	reg_aagc_rf_agc_unlock_numerator_lsb 0
+#define xd_p_reg_aagc_if_agc_unlock_numerator	0xA020
+#define	reg_aagc_if_agc_unlock_numerator_pos 0
+#define	reg_aagc_if_agc_unlock_numerator_len 6
+#define	reg_aagc_if_agc_unlock_numerator_lsb 0
+#define xd_p_reg_unplug_th	0xA021
+#define	reg_unplug_th_pos 0
+#define	reg_unplug_th_len 8
+#define	reg_aagc_rf_x0_lsb 0
+#define xd_p_reg_weak_signal_rfagc_thr 0xA022
+#define	reg_weak_signal_rfagc_thr_pos 0
+#define	reg_weak_signal_rfagc_thr_len 8
+#define	reg_weak_signal_rfagc_thr_lsb 0
+#define xd_p_reg_unplug_rf_gain_th 0xA023
+#define	reg_unplug_rf_gain_th_pos 0
+#define	reg_unplug_rf_gain_th_len 8
+#define	reg_unplug_rf_gain_th_lsb 0
+#define xd_p_reg_unplug_dtop_rf_gain_th 0xA024
+#define	reg_unplug_dtop_rf_gain_th_pos 0
+#define	reg_unplug_dtop_rf_gain_th_len 8
+#define	reg_unplug_dtop_rf_gain_th_lsb 0
+#define xd_p_reg_unplug_dtop_if_gain_th 0xA025
+#define	reg_unplug_dtop_if_gain_th_pos 0
+#define	reg_unplug_dtop_if_gain_th_len 8
+#define	reg_unplug_dtop_if_gain_th_lsb 0
+#define xd_p_reg_top_recover_at_unplug_en 0xA026
+#define	reg_top_recover_at_unplug_en_pos 0
+#define	reg_top_recover_at_unplug_en_len 1
+#define	reg_top_recover_at_unplug_en_lsb 0
+#define xd_p_reg_aagc_rf_x6	0xA027
+#define	reg_aagc_rf_x6_pos 0
+#define	reg_aagc_rf_x6_len 8
+#define	reg_aagc_rf_x6_lsb 0
+#define xd_p_reg_aagc_rf_x7	0xA028
+#define	reg_aagc_rf_x7_pos 0
+#define	reg_aagc_rf_x7_len 8
+#define	reg_aagc_rf_x7_lsb 0
+#define xd_p_reg_aagc_rf_x8	0xA029
+#define	reg_aagc_rf_x8_pos 0
+#define	reg_aagc_rf_x8_len 8
+#define	reg_aagc_rf_x8_lsb 0
+#define xd_p_reg_aagc_rf_x9	0xA02A
+#define	reg_aagc_rf_x9_pos 0
+#define	reg_aagc_rf_x9_len 8
+#define	reg_aagc_rf_x9_lsb 0
+#define xd_p_reg_aagc_rf_x10	0xA02B
+#define	reg_aagc_rf_x10_pos 0
+#define	reg_aagc_rf_x10_len 8
+#define	reg_aagc_rf_x10_lsb 0
+#define xd_p_reg_aagc_rf_x11	0xA02C
+#define	reg_aagc_rf_x11_pos 0
+#define	reg_aagc_rf_x11_len 8
+#define	reg_aagc_rf_x11_lsb 0
+#define xd_p_reg_aagc_rf_x12	0xA02D
+#define	reg_aagc_rf_x12_pos 0
+#define	reg_aagc_rf_x12_len 8
+#define	reg_aagc_rf_x12_lsb 0
+#define xd_p_reg_aagc_rf_x13	0xA02E
+#define	reg_aagc_rf_x13_pos 0
+#define	reg_aagc_rf_x13_len 8
+#define	reg_aagc_rf_x13_lsb 0
+#define xd_p_reg_aagc_if_x0	0xA02F
+#define	reg_aagc_if_x0_pos 0
+#define	reg_aagc_if_x0_len 8
+#define	reg_aagc_if_x0_lsb 0
+#define xd_p_reg_aagc_if_x1	0xA030
+#define	reg_aagc_if_x1_pos 0
+#define	reg_aagc_if_x1_len 8
+#define	reg_aagc_if_x1_lsb 0
+#define xd_p_reg_aagc_if_x2	0xA031
+#define	reg_aagc_if_x2_pos 0
+#define	reg_aagc_if_x2_len 8
+#define	reg_aagc_if_x2_lsb 0
+#define xd_p_reg_aagc_if_x3	0xA032
+#define	reg_aagc_if_x3_pos 0
+#define	reg_aagc_if_x3_len 8
+#define	reg_aagc_if_x3_lsb 0
+#define xd_p_reg_aagc_if_x4	0xA033
+#define	reg_aagc_if_x4_pos 0
+#define	reg_aagc_if_x4_len 8
+#define	reg_aagc_if_x4_lsb 0
+#define xd_p_reg_aagc_if_x5	0xA034
+#define	reg_aagc_if_x5_pos 0
+#define	reg_aagc_if_x5_len 8
+#define	reg_aagc_if_x5_lsb 0
+#define xd_p_reg_aagc_if_x6	0xA035
+#define	reg_aagc_if_x6_pos 0
+#define	reg_aagc_if_x6_len 8
+#define	reg_aagc_if_x6_lsb 0
+#define xd_p_reg_aagc_if_x7	0xA036
+#define	reg_aagc_if_x7_pos 0
+#define	reg_aagc_if_x7_len 8
+#define	reg_aagc_if_x7_lsb 0
+#define xd_p_reg_aagc_if_x8	0xA037
+#define	reg_aagc_if_x8_pos 0
+#define	reg_aagc_if_x8_len 8
+#define	reg_aagc_if_x8_lsb 0
+#define xd_p_reg_aagc_if_x9	0xA038
+#define	reg_aagc_if_x9_pos 0
+#define	reg_aagc_if_x9_len 8
+#define	reg_aagc_if_x9_lsb 0
+#define xd_p_reg_aagc_if_x10	0xA039
+#define	reg_aagc_if_x10_pos 0
+#define	reg_aagc_if_x10_len 8
+#define	reg_aagc_if_x10_lsb 0
+#define xd_p_reg_aagc_if_x11	0xA03A
+#define	reg_aagc_if_x11_pos 0
+#define	reg_aagc_if_x11_len 8
+#define	reg_aagc_if_x11_lsb 0
+#define xd_p_reg_aagc_if_x12	0xA03B
+#define	reg_aagc_if_x12_pos 0
+#define	reg_aagc_if_x12_len 8
+#define	reg_aagc_if_x12_lsb 0
+#define xd_p_reg_aagc_if_x13	0xA03C
+#define	reg_aagc_if_x13_pos 0
+#define	reg_aagc_if_x13_len 8
+#define	reg_aagc_if_x13_lsb 0
+#define xd_p_reg_aagc_min_rf_ctl_8bit_for_dca	0xA03D
+#define	reg_aagc_min_rf_ctl_8bit_for_dca_pos 0
+#define	reg_aagc_min_rf_ctl_8bit_for_dca_len 8
+#define	reg_aagc_min_rf_ctl_8bit_for_dca_lsb 0
+#define xd_p_reg_aagc_min_if_ctl_8bit_for_dca	0xA03E
+#define	reg_aagc_min_if_ctl_8bit_for_dca_pos 0
+#define	reg_aagc_min_if_ctl_8bit_for_dca_len 8
+#define	reg_aagc_min_if_ctl_8bit_for_dca_lsb 0
+#define xd_r_reg_aagc_total_gain_7_0	0xA070
+#define	reg_aagc_total_gain_7_0_pos 0
+#define	reg_aagc_total_gain_7_0_len 8
+#define	reg_aagc_total_gain_7_0_lsb 0
+#define xd_r_reg_aagc_total_gain_15_8	0xA071
+#define	reg_aagc_total_gain_15_8_pos 0
+#define	reg_aagc_total_gain_15_8_len 8
+#define	reg_aagc_total_gain_15_8_lsb 8
+#define xd_p_reg_aagc_in_sat_cnt_7_0	0xA074
+#define	reg_aagc_in_sat_cnt_7_0_pos 0
+#define	reg_aagc_in_sat_cnt_7_0_len 8
+#define	reg_aagc_in_sat_cnt_7_0_lsb 0
+#define xd_p_reg_aagc_in_sat_cnt_15_8	0xA075
+#define	reg_aagc_in_sat_cnt_15_8_pos 0
+#define	reg_aagc_in_sat_cnt_15_8_len 8
+#define	reg_aagc_in_sat_cnt_15_8_lsb 8
+#define xd_p_reg_aagc_in_sat_cnt_23_16	0xA076
+#define	reg_aagc_in_sat_cnt_23_16_pos 0
+#define	reg_aagc_in_sat_cnt_23_16_len 8
+#define	reg_aagc_in_sat_cnt_23_16_lsb 16
+#define xd_p_reg_aagc_in_sat_cnt_31_24	0xA077
+#define	reg_aagc_in_sat_cnt_31_24_pos 0
+#define	reg_aagc_in_sat_cnt_31_24_len 8
+#define	reg_aagc_in_sat_cnt_31_24_lsb 24
+#define xd_r_reg_aagc_digital_rf_volt_7_0	0xA078
+#define	reg_aagc_digital_rf_volt_7_0_pos 0
+#define	reg_aagc_digital_rf_volt_7_0_len 8
+#define	reg_aagc_digital_rf_volt_7_0_lsb 0
+#define xd_r_reg_aagc_digital_rf_volt_9_8	0xA079
+#define	reg_aagc_digital_rf_volt_9_8_pos 0
+#define	reg_aagc_digital_rf_volt_9_8_len 2
+#define	reg_aagc_digital_rf_volt_9_8_lsb 8
+#define xd_r_reg_aagc_digital_if_volt_7_0	0xA07A
+#define	reg_aagc_digital_if_volt_7_0_pos 0
+#define	reg_aagc_digital_if_volt_7_0_len 8
+#define	reg_aagc_digital_if_volt_7_0_lsb 0
+#define xd_r_reg_aagc_digital_if_volt_9_8	0xA07B
+#define	reg_aagc_digital_if_volt_9_8_pos 0
+#define	reg_aagc_digital_if_volt_9_8_len 2
+#define	reg_aagc_digital_if_volt_9_8_lsb 8
+#define xd_r_reg_aagc_rf_gain	0xA07C
+#define	reg_aagc_rf_gain_pos 0
+#define	reg_aagc_rf_gain_len 8
+#define	reg_aagc_rf_gain_lsb 0
+#define xd_r_reg_aagc_if_gain	0xA07D
+#define	reg_aagc_if_gain_pos 0
+#define	reg_aagc_if_gain_len 8
+#define	reg_aagc_if_gain_lsb 0
+#define xd_p_tinr_imp_indicator	0xA080
+#define	tinr_imp_indicator_pos 0
+#define	tinr_imp_indicator_len 2
+#define	tinr_imp_indicator_lsb 0
+#define xd_p_reg_tinr_fifo_size	0xA080
+#define	reg_tinr_fifo_size_pos 2
+#define	reg_tinr_fifo_size_len 5
+#define	reg_tinr_fifo_size_lsb 0
+#define xd_p_reg_tinr_saturation_cnt_th	0xA081
+#define	reg_tinr_saturation_cnt_th_pos 0
+#define	reg_tinr_saturation_cnt_th_len 4
+#define	reg_tinr_saturation_cnt_th_lsb 0
+#define xd_p_reg_tinr_saturation_th_3_0	0xA081
+#define	reg_tinr_saturation_th_3_0_pos 4
+#define	reg_tinr_saturation_th_3_0_len 4
+#define	reg_tinr_saturation_th_3_0_lsb 0
+#define xd_p_reg_tinr_saturation_th_8_4	0xA082
+#define	reg_tinr_saturation_th_8_4_pos 0
+#define	reg_tinr_saturation_th_8_4_len 5
+#define	reg_tinr_saturation_th_8_4_lsb 4
+#define xd_p_reg_tinr_imp_duration_th_2k_7_0	0xA083
+#define	reg_tinr_imp_duration_th_2k_7_0_pos 0
+#define	reg_tinr_imp_duration_th_2k_7_0_len 8
+#define	reg_tinr_imp_duration_th_2k_7_0_lsb 0
+#define xd_p_reg_tinr_imp_duration_th_2k_8	0xA084
+#define	reg_tinr_imp_duration_th_2k_8_pos 0
+#define	reg_tinr_imp_duration_th_2k_8_len 1
+#define	reg_tinr_imp_duration_th_2k_8_lsb 0
+#define xd_p_reg_tinr_imp_duration_th_8k_7_0	0xA085
+#define	reg_tinr_imp_duration_th_8k_7_0_pos 0
+#define	reg_tinr_imp_duration_th_8k_7_0_len 8
+#define	reg_tinr_imp_duration_th_8k_7_0_lsb 0
+#define xd_p_reg_tinr_imp_duration_th_8k_10_8	0xA086
+#define	reg_tinr_imp_duration_th_8k_10_8_pos 0
+#define	reg_tinr_imp_duration_th_8k_10_8_len 3
+#define	reg_tinr_imp_duration_th_8k_10_8_lsb 8
+#define xd_p_reg_tinr_freq_ratio_6m_7_0	0xA087
+#define	reg_tinr_freq_ratio_6m_7_0_pos 0
+#define	reg_tinr_freq_ratio_6m_7_0_len 8
+#define	reg_tinr_freq_ratio_6m_7_0_lsb 0
+#define xd_p_reg_tinr_freq_ratio_6m_12_8	0xA088
+#define	reg_tinr_freq_ratio_6m_12_8_pos 0
+#define	reg_tinr_freq_ratio_6m_12_8_len 5
+#define	reg_tinr_freq_ratio_6m_12_8_lsb 8
+#define xd_p_reg_tinr_freq_ratio_7m_7_0	0xA089
+#define	reg_tinr_freq_ratio_7m_7_0_pos 0
+#define	reg_tinr_freq_ratio_7m_7_0_len 8
+#define	reg_tinr_freq_ratio_7m_7_0_lsb 0
+#define xd_p_reg_tinr_freq_ratio_7m_12_8	0xA08A
+#define	reg_tinr_freq_ratio_7m_12_8_pos 0
+#define	reg_tinr_freq_ratio_7m_12_8_len 5
+#define	reg_tinr_freq_ratio_7m_12_8_lsb 8
+#define xd_p_reg_tinr_freq_ratio_8m_7_0	0xA08B
+#define	reg_tinr_freq_ratio_8m_7_0_pos 0
+#define	reg_tinr_freq_ratio_8m_7_0_len 8
+#define	reg_tinr_freq_ratio_8m_7_0_lsb 0
+#define xd_p_reg_tinr_freq_ratio_8m_12_8	0xA08C
+#define	reg_tinr_freq_ratio_8m_12_8_pos 0
+#define	reg_tinr_freq_ratio_8m_12_8_len 5
+#define	reg_tinr_freq_ratio_8m_12_8_lsb 8
+#define xd_p_reg_tinr_imp_duration_th_low_2k	0xA08D
+#define	reg_tinr_imp_duration_th_low_2k_pos 0
+#define	reg_tinr_imp_duration_th_low_2k_len 8
+#define	reg_tinr_imp_duration_th_low_2k_lsb 0
+#define xd_p_reg_tinr_imp_duration_th_low_8k	0xA08E
+#define	reg_tinr_imp_duration_th_low_8k_pos 0
+#define	reg_tinr_imp_duration_th_low_8k_len 8
+#define	reg_tinr_imp_duration_th_low_8k_lsb 0
+#define xd_r_reg_tinr_counter_7_0	0xA090
+#define	reg_tinr_counter_7_0_pos 0
+#define	reg_tinr_counter_7_0_len 8
+#define	reg_tinr_counter_7_0_lsb 0
+#define xd_r_reg_tinr_counter_15_8	0xA091
+#define	reg_tinr_counter_15_8_pos 0
+#define	reg_tinr_counter_15_8_len 8
+#define	reg_tinr_counter_15_8_lsb 8
+#define xd_p_reg_tinr_adative_tinr_en	0xA093
+#define	reg_tinr_adative_tinr_en_pos 0
+#define	reg_tinr_adative_tinr_en_len 1
+#define	reg_tinr_adative_tinr_en_lsb 0
+#define xd_p_reg_tinr_peak_fifo_size	0xA093
+#define	reg_tinr_peak_fifo_size_pos 1
+#define	reg_tinr_peak_fifo_size_len 5
+#define	reg_tinr_peak_fifo_size_lsb 0
+#define xd_p_reg_tinr_counter_rst	0xA093
+#define	reg_tinr_counter_rst_pos 6
+#define	reg_tinr_counter_rst_len 1
+#define	reg_tinr_counter_rst_lsb 0
+#define xd_p_reg_tinr_search_period_7_0	0xA094
+#define	reg_tinr_search_period_7_0_pos 0
+#define	reg_tinr_search_period_7_0_len 8
+#define	reg_tinr_search_period_7_0_lsb 0
+#define xd_p_reg_tinr_search_period_15_8	0xA095
+#define	reg_tinr_search_period_15_8_pos 0
+#define	reg_tinr_search_period_15_8_len 8
+#define	reg_tinr_search_period_15_8_lsb 8
+#define xd_p_reg_ccifs_fcw_7_0	0xA0A0
+#define	reg_ccifs_fcw_7_0_pos 0
+#define	reg_ccifs_fcw_7_0_len 8
+#define	reg_ccifs_fcw_7_0_lsb 0
+#define xd_p_reg_ccifs_fcw_12_8	0xA0A1
+#define	reg_ccifs_fcw_12_8_pos 0
+#define	reg_ccifs_fcw_12_8_len 5
+#define	reg_ccifs_fcw_12_8_lsb 8
+#define xd_p_reg_ccifs_spec_inv	0xA0A1
+#define	reg_ccifs_spec_inv_pos 5
+#define	reg_ccifs_spec_inv_len 1
+#define	reg_ccifs_spec_inv_lsb 0
+#define xd_p_reg_gp_trigger	0xA0A2
+#define	reg_gp_trigger_pos 0
+#define	reg_gp_trigger_len 1
+#define	reg_gp_trigger_lsb 0
+#define xd_p_reg_trigger_sel	0xA0A2
+#define	reg_trigger_sel_pos 1
+#define	reg_trigger_sel_len 2
+#define	reg_trigger_sel_lsb 0
+#define xd_p_reg_debug_ofdm	0xA0A2
+#define	reg_debug_ofdm_pos 3
+#define	reg_debug_ofdm_len 2
+#define	reg_debug_ofdm_lsb 0
+#define xd_p_reg_trigger_module_sel	0xA0A3
+#define	reg_trigger_module_sel_pos 0
+#define	reg_trigger_module_sel_len 6
+#define	reg_trigger_module_sel_lsb 0
+#define xd_p_reg_trigger_set_sel	0xA0A4
+#define	reg_trigger_set_sel_pos 0
+#define	reg_trigger_set_sel_len 6
+#define	reg_trigger_set_sel_lsb 0
+#define xd_p_reg_fw_int_mask_n	0xA0A4
+#define	reg_fw_int_mask_n_pos 6
+#define	reg_fw_int_mask_n_len 1
+#define	reg_fw_int_mask_n_lsb 0
+#define xd_p_reg_debug_group	0xA0A5
+#define	reg_debug_group_pos 0
+#define	reg_debug_group_len 4
+#define	reg_debug_group_lsb 0
+#define xd_p_reg_odbg_clk_sel	0xA0A5
+#define	reg_odbg_clk_sel_pos 4
+#define	reg_odbg_clk_sel_len 2
+#define	reg_odbg_clk_sel_lsb 0
+#define xd_p_reg_ccif_sc	0xA0C0
+#define	reg_ccif_sc_pos 0
+#define	reg_ccif_sc_len 4
+#define	reg_ccif_sc_lsb 0
+#define xd_r_reg_ccif_saturate	0xA0C1
+#define	reg_ccif_saturate_pos 0
+#define	reg_ccif_saturate_len 2
+#define	reg_ccif_saturate_lsb 0
+#define xd_r_reg_antif_saturate	0xA0C1
+#define	reg_antif_saturate_pos 2
+#define	reg_antif_saturate_len 4
+#define	reg_antif_saturate_lsb 0
+#define xd_r_reg_acif_saturate	0xA0C2
+#define	reg_acif_saturate_pos 0
+#define	reg_acif_saturate_len 8
+#define	reg_acif_saturate_lsb 0
+#define xd_p_reg_tmr_timer0_threshold_7_0	0xA0C8
+#define	reg_tmr_timer0_threshold_7_0_pos 0
+#define	reg_tmr_timer0_threshold_7_0_len 8
+#define	reg_tmr_timer0_threshold_7_0_lsb 0
+#define xd_p_reg_tmr_timer0_threshold_15_8	0xA0C9
+#define	reg_tmr_timer0_threshold_15_8_pos 0
+#define	reg_tmr_timer0_threshold_15_8_len 8
+#define	reg_tmr_timer0_threshold_15_8_lsb 8
+#define xd_p_reg_tmr_timer0_enable	0xA0CA
+#define	reg_tmr_timer0_enable_pos 0
+#define	reg_tmr_timer0_enable_len 1
+#define	reg_tmr_timer0_enable_lsb 0
+#define xd_p_reg_tmr_timer0_clk_sel	0xA0CA
+#define	reg_tmr_timer0_clk_sel_pos 1
+#define	reg_tmr_timer0_clk_sel_len 1
+#define	reg_tmr_timer0_clk_sel_lsb 0
+#define xd_p_reg_tmr_timer0_int	0xA0CA
+#define	reg_tmr_timer0_int_pos 2
+#define	reg_tmr_timer0_int_len 1
+#define	reg_tmr_timer0_int_lsb 0
+#define xd_p_reg_tmr_timer0_rst	0xA0CA
+#define	reg_tmr_timer0_rst_pos 3
+#define	reg_tmr_timer0_rst_len 1
+#define	reg_tmr_timer0_rst_lsb 0
+#define xd_r_reg_tmr_timer0_count_7_0	0xA0CB
+#define	reg_tmr_timer0_count_7_0_pos 0
+#define	reg_tmr_timer0_count_7_0_len 8
+#define	reg_tmr_timer0_count_7_0_lsb 0
+#define xd_r_reg_tmr_timer0_count_15_8	0xA0CC
+#define	reg_tmr_timer0_count_15_8_pos 0
+#define	reg_tmr_timer0_count_15_8_len 8
+#define	reg_tmr_timer0_count_15_8_lsb 8
+#define xd_p_reg_suspend	0xA0CD
+#define	reg_suspend_pos 0
+#define	reg_suspend_len 1
+#define	reg_suspend_lsb 0
+#define xd_p_reg_suspend_rdy	0xA0CD
+#define	reg_suspend_rdy_pos 1
+#define	reg_suspend_rdy_len 1
+#define	reg_suspend_rdy_lsb 0
+#define xd_p_reg_resume	0xA0CD
+#define	reg_resume_pos 2
+#define	reg_resume_len 1
+#define	reg_resume_lsb 0
+#define xd_p_reg_resume_rdy	0xA0CD
+#define	reg_resume_rdy_pos 3
+#define	reg_resume_rdy_len 1
+#define	reg_resume_rdy_lsb 0
+#define xd_p_reg_fmf	0xA0CE
+#define	reg_fmf_pos 0
+#define	reg_fmf_len 8
+#define	reg_fmf_lsb 0
+#define xd_p_ccid_accumulate_num_2k_7_0	0xA100
+#define	ccid_accumulate_num_2k_7_0_pos 0
+#define	ccid_accumulate_num_2k_7_0_len 8
+#define	ccid_accumulate_num_2k_7_0_lsb 0
+#define xd_p_ccid_accumulate_num_2k_12_8	0xA101
+#define	ccid_accumulate_num_2k_12_8_pos 0
+#define	ccid_accumulate_num_2k_12_8_len 5
+#define	ccid_accumulate_num_2k_12_8_lsb 8
+#define xd_p_ccid_accumulate_num_8k_7_0	0xA102
+#define	ccid_accumulate_num_8k_7_0_pos 0
+#define	ccid_accumulate_num_8k_7_0_len 8
+#define	ccid_accumulate_num_8k_7_0_lsb 0
+#define xd_p_ccid_accumulate_num_8k_14_8	0xA103
+#define	ccid_accumulate_num_8k_14_8_pos 0
+#define	ccid_accumulate_num_8k_14_8_len 7
+#define	ccid_accumulate_num_8k_14_8_lsb 8
+#define xd_p_ccid_desired_level_0	0xA103
+#define	ccid_desired_level_0_pos 7
+#define	ccid_desired_level_0_len 1
+#define	ccid_desired_level_0_lsb 0
+#define xd_p_ccid_desired_level_8_1	0xA104
+#define	ccid_desired_level_8_1_pos 0
+#define	ccid_desired_level_8_1_len 8
+#define	ccid_desired_level_8_1_lsb 1
+#define xd_p_ccid_apply_delay	0xA105
+#define	ccid_apply_delay_pos 0
+#define	ccid_apply_delay_len 7
+#define	ccid_apply_delay_lsb 0
+#define xd_p_ccid_CCID_Threshold1	0xA106
+#define	ccid_CCID_Threshold1_pos 0
+#define	ccid_CCID_Threshold1_len 8
+#define	ccid_CCID_Threshold1_lsb 0
+#define xd_p_ccid_CCID_Threshold2	0xA107
+#define	ccid_CCID_Threshold2_pos 0
+#define	ccid_CCID_Threshold2_len 8
+#define	ccid_CCID_Threshold2_lsb 0
+#define xd_p_reg_ccid_gain_scale	0xA108
+#define	reg_ccid_gain_scale_pos 0
+#define	reg_ccid_gain_scale_len 4
+#define	reg_ccid_gain_scale_lsb 0
+#define xd_p_reg_ccid2_passband_gain_set	0xA108
+#define	reg_ccid2_passband_gain_set_pos 4
+#define	reg_ccid2_passband_gain_set_len 4
+#define	reg_ccid2_passband_gain_set_lsb 0
+#define xd_r_ccid_multiplier_7_0	0xA109
+#define	ccid_multiplier_7_0_pos 0
+#define	ccid_multiplier_7_0_len 8
+#define	ccid_multiplier_7_0_lsb 0
+#define xd_r_ccid_multiplier_15_8	0xA10A
+#define	ccid_multiplier_15_8_pos 0
+#define	ccid_multiplier_15_8_len 8
+#define	ccid_multiplier_15_8_lsb 8
+#define xd_r_ccid_right_shift_bits	0xA10B
+#define	ccid_right_shift_bits_pos 0
+#define	ccid_right_shift_bits_len 4
+#define	ccid_right_shift_bits_lsb 0
+#define xd_r_reg_ccid_sx_7_0	0xA10C
+#define	reg_ccid_sx_7_0_pos 0
+#define	reg_ccid_sx_7_0_len 8
+#define	reg_ccid_sx_7_0_lsb 0
+#define xd_r_reg_ccid_sx_15_8	0xA10D
+#define	reg_ccid_sx_15_8_pos 0
+#define	reg_ccid_sx_15_8_len 8
+#define	reg_ccid_sx_15_8_lsb 8
+#define xd_r_reg_ccid_sx_21_16	0xA10E
+#define	reg_ccid_sx_21_16_pos 0
+#define	reg_ccid_sx_21_16_len 6
+#define	reg_ccid_sx_21_16_lsb 16
+#define xd_r_reg_ccid_sy_7_0	0xA110
+#define	reg_ccid_sy_7_0_pos 0
+#define	reg_ccid_sy_7_0_len 8
+#define	reg_ccid_sy_7_0_lsb 0
+#define xd_r_reg_ccid_sy_15_8	0xA111
+#define	reg_ccid_sy_15_8_pos 0
+#define	reg_ccid_sy_15_8_len 8
+#define	reg_ccid_sy_15_8_lsb 8
+#define xd_r_reg_ccid_sy_23_16	0xA112
+#define	reg_ccid_sy_23_16_pos 0
+#define	reg_ccid_sy_23_16_len 8
+#define	reg_ccid_sy_23_16_lsb 16
+#define xd_r_reg_ccid2_sz_7_0	0xA114
+#define	reg_ccid2_sz_7_0_pos 0
+#define	reg_ccid2_sz_7_0_len 8
+#define	reg_ccid2_sz_7_0_lsb 0
+#define xd_r_reg_ccid2_sz_15_8	0xA115
+#define	reg_ccid2_sz_15_8_pos 0
+#define	reg_ccid2_sz_15_8_len 8
+#define	reg_ccid2_sz_15_8_lsb 8
+#define xd_r_reg_ccid2_sz_23_16	0xA116
+#define	reg_ccid2_sz_23_16_pos 0
+#define	reg_ccid2_sz_23_16_len 8
+#define	reg_ccid2_sz_23_16_lsb 16
+#define xd_r_reg_ccid2_sz_25_24	0xA117
+#define	reg_ccid2_sz_25_24_pos 0
+#define	reg_ccid2_sz_25_24_len 2
+#define	reg_ccid2_sz_25_24_lsb 24
+#define xd_r_reg_ccid2_sy_7_0	0xA118
+#define	reg_ccid2_sy_7_0_pos 0
+#define	reg_ccid2_sy_7_0_len 8
+#define	reg_ccid2_sy_7_0_lsb 0
+#define xd_r_reg_ccid2_sy_15_8	0xA119
+#define	reg_ccid2_sy_15_8_pos 0
+#define	reg_ccid2_sy_15_8_len 8
+#define	reg_ccid2_sy_15_8_lsb 8
+#define xd_r_reg_ccid2_sy_23_16	0xA11A
+#define	reg_ccid2_sy_23_16_pos 0
+#define	reg_ccid2_sy_23_16_len 8
+#define	reg_ccid2_sy_23_16_lsb 16
+#define xd_r_reg_ccid2_sy_25_24	0xA11B
+#define	reg_ccid2_sy_25_24_pos 0
+#define	reg_ccid2_sy_25_24_len 2
+#define	reg_ccid2_sy_25_24_lsb 24
+#define xd_p_dagc1_accumulate_num_2k_7_0	0xA120
+#define	dagc1_accumulate_num_2k_7_0_pos 0
+#define	dagc1_accumulate_num_2k_7_0_len 8
+#define	dagc1_accumulate_num_2k_7_0_lsb 0
+#define xd_p_dagc1_accumulate_num_2k_12_8	0xA121
+#define	dagc1_accumulate_num_2k_12_8_pos 0
+#define	dagc1_accumulate_num_2k_12_8_len 5
+#define	dagc1_accumulate_num_2k_12_8_lsb 8
+#define xd_p_dagc1_accumulate_num_8k_7_0	0xA122
+#define	dagc1_accumulate_num_8k_7_0_pos 0
+#define	dagc1_accumulate_num_8k_7_0_len 8
+#define	dagc1_accumulate_num_8k_7_0_lsb 0
+#define xd_p_dagc1_accumulate_num_8k_14_8	0xA123
+#define	dagc1_accumulate_num_8k_14_8_pos 0
+#define	dagc1_accumulate_num_8k_14_8_len 7
+#define	dagc1_accumulate_num_8k_14_8_lsb 8
+#define xd_p_dagc1_desired_level_0	0xA123
+#define	dagc1_desired_level_0_pos 7
+#define	dagc1_desired_level_0_len 1
+#define	dagc1_desired_level_0_lsb 0
+#define xd_p_dagc1_desired_level_8_1	0xA124
+#define	dagc1_desired_level_8_1_pos 0
+#define	dagc1_desired_level_8_1_len 8
+#define	dagc1_desired_level_8_1_lsb 1
+#define xd_p_dagc1_apply_delay	0xA125
+#define	dagc1_apply_delay_pos 0
+#define	dagc1_apply_delay_len 7
+#define	dagc1_apply_delay_lsb 0
+#define xd_p_dagc1_bypass_scale_ctl	0xA126
+#define	dagc1_bypass_scale_ctl_pos 0
+#define	dagc1_bypass_scale_ctl_len 2
+#define	dagc1_bypass_scale_ctl_lsb 0
+#define xd_p_reg_dagc1_in_sat_cnt_7_0	0xA127
+#define	reg_dagc1_in_sat_cnt_7_0_pos 0
+#define	reg_dagc1_in_sat_cnt_7_0_len 8
+#define	reg_dagc1_in_sat_cnt_7_0_lsb 0
+#define xd_p_reg_dagc1_in_sat_cnt_15_8	0xA128
+#define	reg_dagc1_in_sat_cnt_15_8_pos 0
+#define	reg_dagc1_in_sat_cnt_15_8_len 8
+#define	reg_dagc1_in_sat_cnt_15_8_lsb 8
+#define xd_p_reg_dagc1_in_sat_cnt_23_16	0xA129
+#define	reg_dagc1_in_sat_cnt_23_16_pos 0
+#define	reg_dagc1_in_sat_cnt_23_16_len 8
+#define	reg_dagc1_in_sat_cnt_23_16_lsb 16
+#define xd_p_reg_dagc1_in_sat_cnt_31_24	0xA12A
+#define	reg_dagc1_in_sat_cnt_31_24_pos 0
+#define	reg_dagc1_in_sat_cnt_31_24_len 8
+#define	reg_dagc1_in_sat_cnt_31_24_lsb 24
+#define xd_p_reg_dagc1_out_sat_cnt_7_0	0xA12B
+#define	reg_dagc1_out_sat_cnt_7_0_pos 0
+#define	reg_dagc1_out_sat_cnt_7_0_len 8
+#define	reg_dagc1_out_sat_cnt_7_0_lsb 0
+#define xd_p_reg_dagc1_out_sat_cnt_15_8	0xA12C
+#define	reg_dagc1_out_sat_cnt_15_8_pos 0
+#define	reg_dagc1_out_sat_cnt_15_8_len 8
+#define	reg_dagc1_out_sat_cnt_15_8_lsb 8
+#define xd_p_reg_dagc1_out_sat_cnt_23_16	0xA12D
+#define	reg_dagc1_out_sat_cnt_23_16_pos 0
+#define	reg_dagc1_out_sat_cnt_23_16_len 8
+#define	reg_dagc1_out_sat_cnt_23_16_lsb 16
+#define xd_p_reg_dagc1_out_sat_cnt_31_24	0xA12E
+#define	reg_dagc1_out_sat_cnt_31_24_pos 0
+#define	reg_dagc1_out_sat_cnt_31_24_len 8
+#define	reg_dagc1_out_sat_cnt_31_24_lsb 24
+#define xd_r_dagc1_multiplier_7_0	0xA136
+#define	dagc1_multiplier_7_0_pos 0
+#define	dagc1_multiplier_7_0_len 8
+#define	dagc1_multiplier_7_0_lsb 0
+#define xd_r_dagc1_multiplier_15_8	0xA137
+#define	dagc1_multiplier_15_8_pos 0
+#define	dagc1_multiplier_15_8_len 8
+#define	dagc1_multiplier_15_8_lsb 8
+#define xd_r_dagc1_right_shift_bits	0xA138
+#define	dagc1_right_shift_bits_pos 0
+#define	dagc1_right_shift_bits_len 4
+#define	dagc1_right_shift_bits_lsb 0
+#define xd_p_reg_bfs_fcw_7_0	0xA140
+#define	reg_bfs_fcw_7_0_pos 0
+#define	reg_bfs_fcw_7_0_len 8
+#define	reg_bfs_fcw_7_0_lsb 0
+#define xd_p_reg_bfs_fcw_15_8	0xA141
+#define	reg_bfs_fcw_15_8_pos 0
+#define	reg_bfs_fcw_15_8_len 8
+#define	reg_bfs_fcw_15_8_lsb 8
+#define xd_p_reg_bfs_fcw_22_16	0xA142
+#define	reg_bfs_fcw_22_16_pos 0
+#define	reg_bfs_fcw_22_16_len 7
+#define	reg_bfs_fcw_22_16_lsb 16
+#define xd_p_reg_antif_sf_7_0	0xA144
+#define	reg_antif_sf_7_0_pos 0
+#define	reg_antif_sf_7_0_len 8
+#define	reg_antif_sf_7_0_lsb 0
+#define xd_p_reg_antif_sf_11_8	0xA145
+#define	reg_antif_sf_11_8_pos 0
+#define	reg_antif_sf_11_8_len 4
+#define	reg_antif_sf_11_8_lsb 8
+#define xd_r_bfs_fcw_q_7_0	0xA150
+#define	bfs_fcw_q_7_0_pos 0
+#define	bfs_fcw_q_7_0_len 8
+#define	bfs_fcw_q_7_0_lsb 0
+#define xd_r_bfs_fcw_q_15_8	0xA151
+#define	bfs_fcw_q_15_8_pos 0
+#define	bfs_fcw_q_15_8_len 8
+#define	bfs_fcw_q_15_8_lsb 8
+#define xd_r_bfs_fcw_q_22_16	0xA152
+#define	bfs_fcw_q_22_16_pos 0
+#define	bfs_fcw_q_22_16_len 7
+#define	bfs_fcw_q_22_16_lsb 16
+#define xd_p_reg_dca_enu	0xA160
+#define	reg_dca_enu_pos 0
+#define	reg_dca_enu_len 1
+#define	reg_dca_enu_lsb 0
+#define xd_p_reg_dca_enl	0xA160
+#define	reg_dca_enl_pos 1
+#define	reg_dca_enl_len 1
+#define	reg_dca_enl_lsb 0
+#define xd_p_reg_dca_lower_chip	0xA160
+#define	reg_dca_lower_chip_pos 2
+#define	reg_dca_lower_chip_len 1
+#define	reg_dca_lower_chip_lsb 0
+#define xd_p_reg_dca_upper_chip	0xA160
+#define	reg_dca_upper_chip_pos 3
+#define	reg_dca_upper_chip_len 1
+#define	reg_dca_upper_chip_lsb 0
+#define xd_p_reg_dca_platch	0xA160
+#define	reg_dca_platch_pos 4
+#define	reg_dca_platch_len 1
+#define	reg_dca_platch_lsb 0
+#define xd_p_reg_dca_th	0xA161
+#define	reg_dca_th_pos 0
+#define	reg_dca_th_len 5
+#define	reg_dca_th_lsb 0
+#define xd_p_reg_dca_scale	0xA162
+#define	reg_dca_scale_pos 0
+#define	reg_dca_scale_len 4
+#define	reg_dca_scale_lsb 0
+#define xd_p_reg_dca_tone_7_0	0xA163
+#define	reg_dca_tone_7_0_pos 0
+#define	reg_dca_tone_7_0_len 8
+#define	reg_dca_tone_7_0_lsb 0
+#define xd_p_reg_dca_tone_12_8	0xA164
+#define	reg_dca_tone_12_8_pos 0
+#define	reg_dca_tone_12_8_len 5
+#define	reg_dca_tone_12_8_lsb 8
+#define xd_p_reg_dca_time_7_0	0xA165
+#define	reg_dca_time_7_0_pos 0
+#define	reg_dca_time_7_0_len 8
+#define	reg_dca_time_7_0_lsb 0
+#define xd_p_reg_dca_time_15_8	0xA166
+#define	reg_dca_time_15_8_pos 0
+#define	reg_dca_time_15_8_len 8
+#define	reg_dca_time_15_8_lsb 8
+#define xd_r_dcasm	0xA167
+#define	dcasm_pos 0
+#define	dcasm_len 3
+#define	dcasm_lsb 0
+#define xd_p_reg_qnt_valuew_7_0	0xA168
+#define	reg_qnt_valuew_7_0_pos 0
+#define	reg_qnt_valuew_7_0_len 8
+#define	reg_qnt_valuew_7_0_lsb 0
+#define xd_p_reg_qnt_valuew_10_8	0xA169
+#define	reg_qnt_valuew_10_8_pos 0
+#define	reg_qnt_valuew_10_8_len 3
+#define	reg_qnt_valuew_10_8_lsb 8
+#define xd_p_dca_sbx_gain_diff_7_0	0xA16A
+#define	dca_sbx_gain_diff_7_0_pos 0
+#define	dca_sbx_gain_diff_7_0_len 8
+#define	dca_sbx_gain_diff_7_0_lsb 0
+#define xd_p_dca_sbx_gain_diff_9_8	0xA16B
+#define	dca_sbx_gain_diff_9_8_pos 0
+#define	dca_sbx_gain_diff_9_8_len 2
+#define	dca_sbx_gain_diff_9_8_lsb 8
+#define xd_p_reg_dca_stand_alone	0xA16C
+#define	reg_dca_stand_alone_pos 0
+#define	reg_dca_stand_alone_len 1
+#define	reg_dca_stand_alone_lsb 0
+#define xd_p_reg_dca_upper_out_en	0xA16C
+#define	reg_dca_upper_out_en_pos 1
+#define	reg_dca_upper_out_en_len 1
+#define	reg_dca_upper_out_en_lsb 0
+#define xd_p_reg_dca_rc_en	0xA16C
+#define	reg_dca_rc_en_pos 2
+#define	reg_dca_rc_en_len 1
+#define	reg_dca_rc_en_lsb 0
+#define xd_p_reg_dca_retrain_send	0xA16C
+#define	reg_dca_retrain_send_pos 3
+#define	reg_dca_retrain_send_len 1
+#define	reg_dca_retrain_send_lsb 0
+#define xd_p_reg_dca_retrain_rec	0xA16C
+#define	reg_dca_retrain_rec_pos 4
+#define	reg_dca_retrain_rec_len 1
+#define	reg_dca_retrain_rec_lsb 0
+#define xd_p_reg_dca_api_tpsrdy	0xA16C
+#define	reg_dca_api_tpsrdy_pos 5
+#define	reg_dca_api_tpsrdy_len 1
+#define	reg_dca_api_tpsrdy_lsb 0
+#define xd_p_reg_dca_symbol_gap	0xA16D
+#define	reg_dca_symbol_gap_pos 0
+#define	reg_dca_symbol_gap_len 4
+#define	reg_dca_symbol_gap_lsb 0
+#define xd_p_reg_qnt_nfvaluew_7_0	0xA16E
+#define	reg_qnt_nfvaluew_7_0_pos 0
+#define	reg_qnt_nfvaluew_7_0_len 8
+#define	reg_qnt_nfvaluew_7_0_lsb 0
+#define xd_p_reg_qnt_nfvaluew_10_8	0xA16F
+#define	reg_qnt_nfvaluew_10_8_pos 0
+#define	reg_qnt_nfvaluew_10_8_len 3
+#define	reg_qnt_nfvaluew_10_8_lsb 8
+#define xd_p_reg_qnt_flatness_thr_7_0	0xA170
+#define	reg_qnt_flatness_thr_7_0_pos 0
+#define	reg_qnt_flatness_thr_7_0_len 8
+#define	reg_qnt_flatness_thr_7_0_lsb 0
+#define xd_p_reg_qnt_flatness_thr_9_8	0xA171
+#define	reg_qnt_flatness_thr_9_8_pos 0
+#define	reg_qnt_flatness_thr_9_8_len 2
+#define	reg_qnt_flatness_thr_9_8_lsb 8
+#define xd_p_reg_dca_tone_idx_5_0	0xA171
+#define	reg_dca_tone_idx_5_0_pos 2
+#define	reg_dca_tone_idx_5_0_len 6
+#define	reg_dca_tone_idx_5_0_lsb 0
+#define xd_p_reg_dca_tone_idx_12_6	0xA172
+#define	reg_dca_tone_idx_12_6_pos 0
+#define	reg_dca_tone_idx_12_6_len 7
+#define	reg_dca_tone_idx_12_6_lsb 6
+#define xd_p_reg_dca_data_vld	0xA173
+#define	reg_dca_data_vld_pos 0
+#define	reg_dca_data_vld_len 1
+#define	reg_dca_data_vld_lsb 0
+#define xd_p_reg_dca_read_update	0xA173
+#define	reg_dca_read_update_pos 1
+#define	reg_dca_read_update_len 1
+#define	reg_dca_read_update_lsb 0
+#define xd_r_reg_dca_data_re_5_0	0xA173
+#define	reg_dca_data_re_5_0_pos 2
+#define	reg_dca_data_re_5_0_len 6
+#define	reg_dca_data_re_5_0_lsb 0
+#define xd_r_reg_dca_data_re_10_6	0xA174
+#define	reg_dca_data_re_10_6_pos 0
+#define	reg_dca_data_re_10_6_len 5
+#define	reg_dca_data_re_10_6_lsb 6
+#define xd_r_reg_dca_data_im_7_0	0xA175
+#define	reg_dca_data_im_7_0_pos 0
+#define	reg_dca_data_im_7_0_len 8
+#define	reg_dca_data_im_7_0_lsb 0
+#define xd_r_reg_dca_data_im_10_8	0xA176
+#define	reg_dca_data_im_10_8_pos 0
+#define	reg_dca_data_im_10_8_len 3
+#define	reg_dca_data_im_10_8_lsb 8
+#define xd_r_reg_dca_data_h2_7_0	0xA178
+#define	reg_dca_data_h2_7_0_pos 0
+#define	reg_dca_data_h2_7_0_len 8
+#define	reg_dca_data_h2_7_0_lsb 0
+#define xd_r_reg_dca_data_h2_9_8	0xA179
+#define	reg_dca_data_h2_9_8_pos 0
+#define	reg_dca_data_h2_9_8_len 2
+#define	reg_dca_data_h2_9_8_lsb 8
+#define xd_p_reg_f_adc_7_0	0xA180
+#define	reg_f_adc_7_0_pos 0
+#define	reg_f_adc_7_0_len 8
+#define	reg_f_adc_7_0_lsb 0
+#define xd_p_reg_f_adc_15_8	0xA181
+#define	reg_f_adc_15_8_pos 0
+#define	reg_f_adc_15_8_len 8
+#define	reg_f_adc_15_8_lsb 8
+#define xd_p_reg_f_adc_23_16	0xA182
+#define	reg_f_adc_23_16_pos 0
+#define	reg_f_adc_23_16_len 8
+#define	reg_f_adc_23_16_lsb 16
+#define xd_r_intp_mu_7_0	0xA190
+#define	intp_mu_7_0_pos 0
+#define	intp_mu_7_0_len 8
+#define	intp_mu_7_0_lsb 0
+#define xd_r_intp_mu_15_8	0xA191
+#define	intp_mu_15_8_pos 0
+#define	intp_mu_15_8_len 8
+#define	intp_mu_15_8_lsb 8
+#define xd_r_intp_mu_19_16	0xA192
+#define	intp_mu_19_16_pos 0
+#define	intp_mu_19_16_len 4
+#define	intp_mu_19_16_lsb 16
+#define xd_p_reg_agc_rst	0xA1A0
+#define	reg_agc_rst_pos 0
+#define	reg_agc_rst_len 1
+#define	reg_agc_rst_lsb 0
+#define xd_p_rf_agc_en	0xA1A0
+#define	rf_agc_en_pos 1
+#define	rf_agc_en_len 1
+#define	rf_agc_en_lsb 0
+#define xd_p_rf_agc_dis	0xA1A0
+#define	rf_agc_dis_pos 2
+#define	rf_agc_dis_len 1
+#define	rf_agc_dis_lsb 0
+#define xd_p_if_agc_rst	0xA1A0
+#define	if_agc_rst_pos 3
+#define	if_agc_rst_len 1
+#define	if_agc_rst_lsb 0
+#define xd_p_if_agc_en	0xA1A0
+#define	if_agc_en_pos 4
+#define	if_agc_en_len 1
+#define	if_agc_en_lsb 0
+#define xd_p_if_agc_dis	0xA1A0
+#define	if_agc_dis_pos 5
+#define	if_agc_dis_len 1
+#define	if_agc_dis_lsb 0
+#define xd_p_agc_lock	0xA1A0
+#define	agc_lock_pos 6
+#define	agc_lock_len 1
+#define	agc_lock_lsb 0
+#define xd_p_reg_tinr_rst	0xA1A1
+#define	reg_tinr_rst_pos 0
+#define	reg_tinr_rst_len 1
+#define	reg_tinr_rst_lsb 0
+#define xd_p_reg_tinr_en	0xA1A1
+#define	reg_tinr_en_pos 1
+#define	reg_tinr_en_len 1
+#define	reg_tinr_en_lsb 0
+#define xd_p_reg_ccifs_en	0xA1A2
+#define	reg_ccifs_en_pos 0
+#define	reg_ccifs_en_len 1
+#define	reg_ccifs_en_lsb 0
+#define xd_p_reg_ccifs_dis	0xA1A2
+#define	reg_ccifs_dis_pos 1
+#define	reg_ccifs_dis_len 1
+#define	reg_ccifs_dis_lsb 0
+#define xd_p_reg_ccifs_rst	0xA1A2
+#define	reg_ccifs_rst_pos 2
+#define	reg_ccifs_rst_len 1
+#define	reg_ccifs_rst_lsb 0
+#define xd_p_reg_ccifs_byp	0xA1A2
+#define	reg_ccifs_byp_pos 3
+#define	reg_ccifs_byp_len 1
+#define	reg_ccifs_byp_lsb 0
+#define xd_p_reg_ccif_en	0xA1A3
+#define	reg_ccif_en_pos 0
+#define	reg_ccif_en_len 1
+#define	reg_ccif_en_lsb 0
+#define xd_p_reg_ccif_dis	0xA1A3
+#define	reg_ccif_dis_pos 1
+#define	reg_ccif_dis_len 1
+#define	reg_ccif_dis_lsb 0
+#define xd_p_reg_ccif_rst	0xA1A3
+#define	reg_ccif_rst_pos 2
+#define	reg_ccif_rst_len 1
+#define	reg_ccif_rst_lsb 0
+#define xd_p_reg_ccif_byp	0xA1A3
+#define	reg_ccif_byp_pos 3
+#define	reg_ccif_byp_len 1
+#define	reg_ccif_byp_lsb 0
+#define xd_p_dagc1_rst	0xA1A4
+#define	dagc1_rst_pos 0
+#define	dagc1_rst_len 1
+#define	dagc1_rst_lsb 0
+#define xd_p_dagc1_en	0xA1A4
+#define	dagc1_en_pos 1
+#define	dagc1_en_len 1
+#define	dagc1_en_lsb 0
+#define xd_p_dagc1_mode	0xA1A4
+#define	dagc1_mode_pos 2
+#define	dagc1_mode_len 2
+#define	dagc1_mode_lsb 0
+#define xd_p_dagc1_done	0xA1A4
+#define	dagc1_done_pos 4
+#define	dagc1_done_len 1
+#define	dagc1_done_lsb 0
+#define xd_p_ccid_rst	0xA1A5
+#define	ccid_rst_pos 0
+#define	ccid_rst_len 1
+#define	ccid_rst_lsb 0
+#define xd_p_ccid_en	0xA1A5
+#define	ccid_en_pos 1
+#define	ccid_en_len 1
+#define	ccid_en_lsb 0
+#define xd_p_ccid_mode	0xA1A5
+#define	ccid_mode_pos 2
+#define	ccid_mode_len 2
+#define	ccid_mode_lsb 0
+#define xd_p_ccid_done	0xA1A5
+#define	ccid_done_pos 4
+#define	ccid_done_len 1
+#define	ccid_done_lsb 0
+#define xd_r_ccid_deted	0xA1A5
+#define	ccid_deted_pos 5
+#define	ccid_deted_len 1
+#define	ccid_deted_lsb 0
+#define xd_p_ccid2_en	0xA1A5
+#define	ccid2_en_pos 6
+#define	ccid2_en_len 1
+#define	ccid2_en_lsb 0
+#define xd_p_ccid2_done	0xA1A5
+#define	ccid2_done_pos 7
+#define	ccid2_done_len 1
+#define	ccid2_done_lsb 0
+#define xd_p_reg_bfs_en	0xA1A6
+#define	reg_bfs_en_pos 0
+#define	reg_bfs_en_len 1
+#define	reg_bfs_en_lsb 0
+#define xd_p_reg_bfs_dis	0xA1A6
+#define	reg_bfs_dis_pos 1
+#define	reg_bfs_dis_len 1
+#define	reg_bfs_dis_lsb 0
+#define xd_p_reg_bfs_rst	0xA1A6
+#define	reg_bfs_rst_pos 2
+#define	reg_bfs_rst_len 1
+#define	reg_bfs_rst_lsb 0
+#define xd_p_reg_bfs_byp	0xA1A6
+#define	reg_bfs_byp_pos 3
+#define	reg_bfs_byp_len 1
+#define	reg_bfs_byp_lsb 0
+#define xd_p_reg_antif_en	0xA1A7
+#define	reg_antif_en_pos 0
+#define	reg_antif_en_len 1
+#define	reg_antif_en_lsb 0
+#define xd_p_reg_antif_dis	0xA1A7
+#define	reg_antif_dis_pos 1
+#define	reg_antif_dis_len 1
+#define	reg_antif_dis_lsb 0
+#define xd_p_reg_antif_rst	0xA1A7
+#define	reg_antif_rst_pos 2
+#define	reg_antif_rst_len 1
+#define	reg_antif_rst_lsb 0
+#define xd_p_reg_antif_byp	0xA1A7
+#define	reg_antif_byp_pos 3
+#define	reg_antif_byp_len 1
+#define	reg_antif_byp_lsb 0
+#define xd_p_intp_en	0xA1A8
+#define	intp_en_pos 0
+#define	intp_en_len 1
+#define	intp_en_lsb 0
+#define xd_p_intp_dis	0xA1A8
+#define	intp_dis_pos 1
+#define	intp_dis_len 1
+#define	intp_dis_lsb 0
+#define xd_p_intp_rst	0xA1A8
+#define	intp_rst_pos 2
+#define	intp_rst_len 1
+#define	intp_rst_lsb 0
+#define xd_p_intp_byp	0xA1A8
+#define	intp_byp_pos 3
+#define	intp_byp_len 1
+#define	intp_byp_lsb 0
+#define xd_p_reg_acif_en	0xA1A9
+#define	reg_acif_en_pos 0
+#define	reg_acif_en_len 1
+#define	reg_acif_en_lsb 0
+#define xd_p_reg_acif_dis	0xA1A9
+#define	reg_acif_dis_pos 1
+#define	reg_acif_dis_len 1
+#define	reg_acif_dis_lsb 0
+#define xd_p_reg_acif_rst	0xA1A9
+#define	reg_acif_rst_pos 2
+#define	reg_acif_rst_len 1
+#define	reg_acif_rst_lsb 0
+#define xd_p_reg_acif_byp	0xA1A9
+#define	reg_acif_byp_pos 3
+#define	reg_acif_byp_len 1
+#define	reg_acif_byp_lsb 0
+#define xd_p_reg_acif_sync_mode	0xA1A9
+#define	reg_acif_sync_mode_pos 4
+#define	reg_acif_sync_mode_len 1
+#define	reg_acif_sync_mode_lsb 0
+#define xd_p_dagc2_rst	0xA1AA
+#define	dagc2_rst_pos 0
+#define	dagc2_rst_len 1
+#define	dagc2_rst_lsb 0
+#define xd_p_dagc2_en	0xA1AA
+#define	dagc2_en_pos 1
+#define	dagc2_en_len 1
+#define	dagc2_en_lsb 0
+#define xd_p_dagc2_mode	0xA1AA
+#define	dagc2_mode_pos 2
+#define	dagc2_mode_len 2
+#define	dagc2_mode_lsb 0
+#define xd_p_dagc2_done	0xA1AA
+#define	dagc2_done_pos 4
+#define	dagc2_done_len 1
+#define	dagc2_done_lsb 0
+#define xd_p_reg_dca_en	0xA1AB
+#define	reg_dca_en_pos 0
+#define	reg_dca_en_len 1
+#define	reg_dca_en_lsb 0
+#define xd_p_dagc2_accumulate_num_2k_7_0	0xA1C0
+#define	dagc2_accumulate_num_2k_7_0_pos 0
+#define	dagc2_accumulate_num_2k_7_0_len 8
+#define	dagc2_accumulate_num_2k_7_0_lsb 0
+#define xd_p_dagc2_accumulate_num_2k_12_8	0xA1C1
+#define	dagc2_accumulate_num_2k_12_8_pos 0
+#define	dagc2_accumulate_num_2k_12_8_len 5
+#define	dagc2_accumulate_num_2k_12_8_lsb 8
+#define xd_p_dagc2_accumulate_num_8k_7_0	0xA1C2
+#define	dagc2_accumulate_num_8k_7_0_pos 0
+#define	dagc2_accumulate_num_8k_7_0_len 8
+#define	dagc2_accumulate_num_8k_7_0_lsb 0
+#define xd_p_dagc2_accumulate_num_8k_12_8	0xA1C3
+#define	dagc2_accumulate_num_8k_12_8_pos 0
+#define	dagc2_accumulate_num_8k_12_8_len 5
+#define	dagc2_accumulate_num_8k_12_8_lsb 8
+#define xd_p_dagc2_desired_level_2_0	0xA1C3
+#define	dagc2_desired_level_2_0_pos 5
+#define	dagc2_desired_level_2_0_len 3
+#define	dagc2_desired_level_2_0_lsb 0
+#define xd_p_dagc2_desired_level_8_3	0xA1C4
+#define	dagc2_desired_level_8_3_pos 0
+#define	dagc2_desired_level_8_3_len 6
+#define	dagc2_desired_level_8_3_lsb 3
+#define xd_p_dagc2_apply_delay	0xA1C5
+#define	dagc2_apply_delay_pos 0
+#define	dagc2_apply_delay_len 7
+#define	dagc2_apply_delay_lsb 0
+#define xd_p_dagc2_bypass_scale_ctl	0xA1C6
+#define	dagc2_bypass_scale_ctl_pos 0
+#define	dagc2_bypass_scale_ctl_len 3
+#define	dagc2_bypass_scale_ctl_lsb 0
+#define xd_p_dagc2_programmable_shift1	0xA1C7
+#define	dagc2_programmable_shift1_pos 0
+#define	dagc2_programmable_shift1_len 8
+#define	dagc2_programmable_shift1_lsb 0
+#define xd_p_dagc2_programmable_shift2	0xA1C8
+#define	dagc2_programmable_shift2_pos 0
+#define	dagc2_programmable_shift2_len 8
+#define	dagc2_programmable_shift2_lsb 0
+#define xd_p_reg_dagc2_in_sat_cnt_7_0	0xA1C9
+#define	reg_dagc2_in_sat_cnt_7_0_pos 0
+#define	reg_dagc2_in_sat_cnt_7_0_len 8
+#define	reg_dagc2_in_sat_cnt_7_0_lsb 0
+#define xd_p_reg_dagc2_in_sat_cnt_15_8	0xA1CA
+#define	reg_dagc2_in_sat_cnt_15_8_pos 0
+#define	reg_dagc2_in_sat_cnt_15_8_len 8
+#define	reg_dagc2_in_sat_cnt_15_8_lsb 8
+#define xd_p_reg_dagc2_in_sat_cnt_23_16	0xA1CB
+#define	reg_dagc2_in_sat_cnt_23_16_pos 0
+#define	reg_dagc2_in_sat_cnt_23_16_len 8
+#define	reg_dagc2_in_sat_cnt_23_16_lsb 16
+#define xd_p_reg_dagc2_in_sat_cnt_31_24	0xA1CC
+#define	reg_dagc2_in_sat_cnt_31_24_pos 0
+#define	reg_dagc2_in_sat_cnt_31_24_len 8
+#define	reg_dagc2_in_sat_cnt_31_24_lsb 24
+#define xd_p_reg_dagc2_out_sat_cnt_7_0	0xA1CD
+#define	reg_dagc2_out_sat_cnt_7_0_pos 0
+#define	reg_dagc2_out_sat_cnt_7_0_len 8
+#define	reg_dagc2_out_sat_cnt_7_0_lsb 0
+#define xd_p_reg_dagc2_out_sat_cnt_15_8	0xA1CE
+#define	reg_dagc2_out_sat_cnt_15_8_pos 0
+#define	reg_dagc2_out_sat_cnt_15_8_len 8
+#define	reg_dagc2_out_sat_cnt_15_8_lsb 8
+#define xd_p_reg_dagc2_out_sat_cnt_23_16	0xA1CF
+#define	reg_dagc2_out_sat_cnt_23_16_pos 0
+#define	reg_dagc2_out_sat_cnt_23_16_len 8
+#define	reg_dagc2_out_sat_cnt_23_16_lsb 16
+#define xd_p_reg_dagc2_out_sat_cnt_31_24	0xA1D0
+#define	reg_dagc2_out_sat_cnt_31_24_pos 0
+#define	reg_dagc2_out_sat_cnt_31_24_len 8
+#define	reg_dagc2_out_sat_cnt_31_24_lsb 24
+#define xd_r_dagc2_multiplier_7_0	0xA1D6
+#define	dagc2_multiplier_7_0_pos 0
+#define	dagc2_multiplier_7_0_len 8
+#define	dagc2_multiplier_7_0_lsb 0
+#define xd_r_dagc2_multiplier_15_8	0xA1D7
+#define	dagc2_multiplier_15_8_pos 0
+#define	dagc2_multiplier_15_8_len 8
+#define	dagc2_multiplier_15_8_lsb 8
+#define xd_r_dagc2_right_shift_bits	0xA1D8
+#define	dagc2_right_shift_bits_pos 0
+#define	dagc2_right_shift_bits_len 4
+#define	dagc2_right_shift_bits_lsb 0
+#define xd_p_cfoe_NS_coeff1_7_0	0xA200
+#define	cfoe_NS_coeff1_7_0_pos 0
+#define	cfoe_NS_coeff1_7_0_len 8
+#define	cfoe_NS_coeff1_7_0_lsb 0
+#define xd_p_cfoe_NS_coeff1_15_8	0xA201
+#define	cfoe_NS_coeff1_15_8_pos 0
+#define	cfoe_NS_coeff1_15_8_len 8
+#define	cfoe_NS_coeff1_15_8_lsb 8
+#define xd_p_cfoe_NS_coeff1_23_16	0xA202
+#define	cfoe_NS_coeff1_23_16_pos 0
+#define	cfoe_NS_coeff1_23_16_len 8
+#define	cfoe_NS_coeff1_23_16_lsb 16
+#define xd_p_cfoe_NS_coeff1_25_24	0xA203
+#define	cfoe_NS_coeff1_25_24_pos 0
+#define	cfoe_NS_coeff1_25_24_len 2
+#define	cfoe_NS_coeff1_25_24_lsb 24
+#define xd_p_cfoe_NS_coeff2_5_0	0xA203
+#define	cfoe_NS_coeff2_5_0_pos 2
+#define	cfoe_NS_coeff2_5_0_len 6
+#define	cfoe_NS_coeff2_5_0_lsb 0
+#define xd_p_cfoe_NS_coeff2_13_6	0xA204
+#define	cfoe_NS_coeff2_13_6_pos 0
+#define	cfoe_NS_coeff2_13_6_len 8
+#define	cfoe_NS_coeff2_13_6_lsb 6
+#define xd_p_cfoe_NS_coeff2_21_14	0xA205
+#define	cfoe_NS_coeff2_21_14_pos 0
+#define	cfoe_NS_coeff2_21_14_len 8
+#define	cfoe_NS_coeff2_21_14_lsb 14
+#define xd_p_cfoe_NS_coeff2_24_22	0xA206
+#define	cfoe_NS_coeff2_24_22_pos 0
+#define	cfoe_NS_coeff2_24_22_len 3
+#define	cfoe_NS_coeff2_24_22_lsb 22
+#define xd_p_cfoe_lf_c1_4_0	0xA206
+#define	cfoe_lf_c1_4_0_pos 3
+#define	cfoe_lf_c1_4_0_len 5
+#define	cfoe_lf_c1_4_0_lsb 0
+#define xd_p_cfoe_lf_c1_12_5	0xA207
+#define	cfoe_lf_c1_12_5_pos 0
+#define	cfoe_lf_c1_12_5_len 8
+#define	cfoe_lf_c1_12_5_lsb 5
+#define xd_p_cfoe_lf_c1_20_13	0xA208
+#define	cfoe_lf_c1_20_13_pos 0
+#define	cfoe_lf_c1_20_13_len 8
+#define	cfoe_lf_c1_20_13_lsb 13
+#define xd_p_cfoe_lf_c1_25_21	0xA209
+#define	cfoe_lf_c1_25_21_pos 0
+#define	cfoe_lf_c1_25_21_len 5
+#define	cfoe_lf_c1_25_21_lsb 21
+#define xd_p_cfoe_lf_c2_2_0	0xA209
+#define	cfoe_lf_c2_2_0_pos 5
+#define	cfoe_lf_c2_2_0_len 3
+#define	cfoe_lf_c2_2_0_lsb 0
+#define xd_p_cfoe_lf_c2_10_3	0xA20A
+#define	cfoe_lf_c2_10_3_pos 0
+#define	cfoe_lf_c2_10_3_len 8
+#define	cfoe_lf_c2_10_3_lsb 3
+#define xd_p_cfoe_lf_c2_18_11	0xA20B
+#define	cfoe_lf_c2_18_11_pos 0
+#define	cfoe_lf_c2_18_11_len 8
+#define	cfoe_lf_c2_18_11_lsb 11
+#define xd_p_cfoe_lf_c2_25_19	0xA20C
+#define	cfoe_lf_c2_25_19_pos 0
+#define	cfoe_lf_c2_25_19_len 7
+#define	cfoe_lf_c2_25_19_lsb 19
+#define xd_p_cfoe_ifod_7_0	0xA20D
+#define	cfoe_ifod_7_0_pos 0
+#define	cfoe_ifod_7_0_len 8
+#define	cfoe_ifod_7_0_lsb 0
+#define xd_p_cfoe_ifod_10_8	0xA20E
+#define	cfoe_ifod_10_8_pos 0
+#define	cfoe_ifod_10_8_len 3
+#define	cfoe_ifod_10_8_lsb 8
+#define xd_p_cfoe_Divg_ctr_th	0xA20E
+#define	cfoe_Divg_ctr_th_pos 4
+#define	cfoe_Divg_ctr_th_len 4
+#define	cfoe_Divg_ctr_th_lsb 0
+#define xd_p_cfoe_FOT_divg_th	0xA20F
+#define	cfoe_FOT_divg_th_pos 0
+#define	cfoe_FOT_divg_th_len 8
+#define	cfoe_FOT_divg_th_lsb 0
+#define xd_p_cfoe_FOT_cnvg_th	0xA210
+#define	cfoe_FOT_cnvg_th_pos 0
+#define	cfoe_FOT_cnvg_th_len 8
+#define	cfoe_FOT_cnvg_th_lsb 0
+#define xd_p_reg_cfoe_offset_7_0	0xA211
+#define	reg_cfoe_offset_7_0_pos 0
+#define	reg_cfoe_offset_7_0_len 8
+#define	reg_cfoe_offset_7_0_lsb 0
+#define xd_p_reg_cfoe_offset_9_8	0xA212
+#define	reg_cfoe_offset_9_8_pos 0
+#define	reg_cfoe_offset_9_8_len 2
+#define	reg_cfoe_offset_9_8_lsb 8
+#define xd_p_reg_cfoe_ifoe_sign_corr	0xA212
+#define	reg_cfoe_ifoe_sign_corr_pos 2
+#define	reg_cfoe_ifoe_sign_corr_len 1
+#define	reg_cfoe_ifoe_sign_corr_lsb 0
+#define xd_r_cfoe_fot_LF_output_7_0	0xA218
+#define	cfoe_fot_LF_output_7_0_pos 0
+#define	cfoe_fot_LF_output_7_0_len 8
+#define	cfoe_fot_LF_output_7_0_lsb 0
+#define xd_r_cfoe_fot_LF_output_15_8	0xA219
+#define	cfoe_fot_LF_output_15_8_pos 0
+#define	cfoe_fot_LF_output_15_8_len 8
+#define	cfoe_fot_LF_output_15_8_lsb 8
+#define xd_r_cfoe_ifo_metric_7_0	0xA21A
+#define	cfoe_ifo_metric_7_0_pos 0
+#define	cfoe_ifo_metric_7_0_len 8
+#define	cfoe_ifo_metric_7_0_lsb 0
+#define xd_r_cfoe_ifo_metric_15_8	0xA21B
+#define	cfoe_ifo_metric_15_8_pos 0
+#define	cfoe_ifo_metric_15_8_len 8
+#define	cfoe_ifo_metric_15_8_lsb 8
+#define xd_r_cfoe_ifo_metric_23_16	0xA21C
+#define	cfoe_ifo_metric_23_16_pos 0
+#define	cfoe_ifo_metric_23_16_len 8
+#define	cfoe_ifo_metric_23_16_lsb 16
+#define xd_p_ste_Nu	0xA220
+#define	ste_Nu_pos 0
+#define	ste_Nu_len 2
+#define	ste_Nu_lsb 0
+#define xd_p_ste_GI	0xA220
+#define	ste_GI_pos 2
+#define	ste_GI_len 3
+#define	ste_GI_lsb 0
+#define xd_p_ste_symbol_num	0xA221
+#define	ste_symbol_num_pos 0
+#define	ste_symbol_num_len 2
+#define	ste_symbol_num_lsb 0
+#define xd_p_ste_sample_num	0xA221
+#define	ste_sample_num_pos 2
+#define	ste_sample_num_len 2
+#define	ste_sample_num_lsb 0
+#define xd_p_reg_ste_buf_en	0xA221
+#define	reg_ste_buf_en_pos 7
+#define	reg_ste_buf_en_len 1
+#define	reg_ste_buf_en_lsb 0
+#define xd_p_ste_FFT_offset_7_0	0xA222
+#define	ste_FFT_offset_7_0_pos 0
+#define	ste_FFT_offset_7_0_len 8
+#define	ste_FFT_offset_7_0_lsb 0
+#define xd_p_ste_FFT_offset_11_8	0xA223
+#define	ste_FFT_offset_11_8_pos 0
+#define	ste_FFT_offset_11_8_len 4
+#define	ste_FFT_offset_11_8_lsb 8
+#define xd_p_reg_ste_tstmod	0xA223
+#define	reg_ste_tstmod_pos 5
+#define	reg_ste_tstmod_len 1
+#define	reg_ste_tstmod_lsb 0
+#define xd_p_ste_adv_start_7_0	0xA224
+#define	ste_adv_start_7_0_pos 0
+#define	ste_adv_start_7_0_len 8
+#define	ste_adv_start_7_0_lsb 0
+#define xd_p_ste_adv_start_10_8	0xA225
+#define	ste_adv_start_10_8_pos 0
+#define	ste_adv_start_10_8_len 3
+#define	ste_adv_start_10_8_lsb 8
+#define xd_p_ste_adv_stop	0xA226
+#define	ste_adv_stop_pos 0
+#define	ste_adv_stop_len 8
+#define	ste_adv_stop_lsb 0
+#define xd_r_ste_P_value_7_0	0xA228
+#define	ste_P_value_7_0_pos 0
+#define	ste_P_value_7_0_len 8
+#define	ste_P_value_7_0_lsb 0
+#define xd_r_ste_P_value_10_8	0xA229
+#define	ste_P_value_10_8_pos 0
+#define	ste_P_value_10_8_len 3
+#define	ste_P_value_10_8_lsb 8
+#define xd_r_ste_M_value_7_0	0xA22A
+#define	ste_M_value_7_0_pos 0
+#define	ste_M_value_7_0_len 8
+#define	ste_M_value_7_0_lsb 0
+#define xd_r_ste_M_value_10_8	0xA22B
+#define	ste_M_value_10_8_pos 0
+#define	ste_M_value_10_8_len 3
+#define	ste_M_value_10_8_lsb 8
+#define xd_r_ste_H1	0xA22C
+#define	ste_H1_pos 0
+#define	ste_H1_len 7
+#define	ste_H1_lsb 0
+#define xd_r_ste_H2	0xA22D
+#define	ste_H2_pos 0
+#define	ste_H2_len 7
+#define	ste_H2_lsb 0
+#define xd_r_ste_H3	0xA22E
+#define	ste_H3_pos 0
+#define	ste_H3_len 7
+#define	ste_H3_lsb 0
+#define xd_r_ste_H4	0xA22F
+#define	ste_H4_pos 0
+#define	ste_H4_len 7
+#define	ste_H4_lsb 0
+#define xd_r_ste_Corr_value_I_7_0	0xA230
+#define	ste_Corr_value_I_7_0_pos 0
+#define	ste_Corr_value_I_7_0_len 8
+#define	ste_Corr_value_I_7_0_lsb 0
+#define xd_r_ste_Corr_value_I_15_8	0xA231
+#define	ste_Corr_value_I_15_8_pos 0
+#define	ste_Corr_value_I_15_8_len 8
+#define	ste_Corr_value_I_15_8_lsb 8
+#define xd_r_ste_Corr_value_I_23_16	0xA232
+#define	ste_Corr_value_I_23_16_pos 0
+#define	ste_Corr_value_I_23_16_len 8
+#define	ste_Corr_value_I_23_16_lsb 16
+#define xd_r_ste_Corr_value_I_27_24	0xA233
+#define	ste_Corr_value_I_27_24_pos 0
+#define	ste_Corr_value_I_27_24_len 4
+#define	ste_Corr_value_I_27_24_lsb 24
+#define xd_r_ste_Corr_value_Q_7_0	0xA234
+#define	ste_Corr_value_Q_7_0_pos 0
+#define	ste_Corr_value_Q_7_0_len 8
+#define	ste_Corr_value_Q_7_0_lsb 0
+#define xd_r_ste_Corr_value_Q_15_8	0xA235
+#define	ste_Corr_value_Q_15_8_pos 0
+#define	ste_Corr_value_Q_15_8_len 8
+#define	ste_Corr_value_Q_15_8_lsb 8
+#define xd_r_ste_Corr_value_Q_23_16	0xA236
+#define	ste_Corr_value_Q_23_16_pos 0
+#define	ste_Corr_value_Q_23_16_len 8
+#define	ste_Corr_value_Q_23_16_lsb 16
+#define xd_r_ste_Corr_value_Q_27_24	0xA237
+#define	ste_Corr_value_Q_27_24_pos 0
+#define	ste_Corr_value_Q_27_24_len 4
+#define	ste_Corr_value_Q_27_24_lsb 24
+#define xd_r_ste_J_num_7_0	0xA238
+#define	ste_J_num_7_0_pos 0
+#define	ste_J_num_7_0_len 8
+#define	ste_J_num_7_0_lsb 0
+#define xd_r_ste_J_num_15_8	0xA239
+#define	ste_J_num_15_8_pos 0
+#define	ste_J_num_15_8_len 8
+#define	ste_J_num_15_8_lsb 8
+#define xd_r_ste_J_num_23_16	0xA23A
+#define	ste_J_num_23_16_pos 0
+#define	ste_J_num_23_16_len 8
+#define	ste_J_num_23_16_lsb 16
+#define xd_r_ste_J_num_31_24	0xA23B
+#define	ste_J_num_31_24_pos 0
+#define	ste_J_num_31_24_len 8
+#define	ste_J_num_31_24_lsb 24
+#define xd_r_ste_J_den_7_0	0xA23C
+#define	ste_J_den_7_0_pos 0
+#define	ste_J_den_7_0_len 8
+#define	ste_J_den_7_0_lsb 0
+#define xd_r_ste_J_den_15_8	0xA23D
+#define	ste_J_den_15_8_pos 0
+#define	ste_J_den_15_8_len 8
+#define	ste_J_den_15_8_lsb 8
+#define xd_r_ste_J_den_18_16	0xA23E
+#define	ste_J_den_18_16_pos 0
+#define	ste_J_den_18_16_len 3
+#define	ste_J_den_18_16_lsb 16
+#define xd_r_ste_Beacon_Indicator	0xA23E
+#define	ste_Beacon_Indicator_pos 4
+#define	ste_Beacon_Indicator_len 1
+#define	ste_Beacon_Indicator_lsb 0
+#define xd_r_tpsd_Frame_Num	0xA250
+#define	tpsd_Frame_Num_pos 0
+#define	tpsd_Frame_Num_len 2
+#define	tpsd_Frame_Num_lsb 0
+#define xd_r_tpsd_Constel	0xA250
+#define	tpsd_Constel_pos 2
+#define	tpsd_Constel_len 2
+#define	tpsd_Constel_lsb 0
+#define xd_r_tpsd_GI	0xA250
+#define	tpsd_GI_pos 4
+#define	tpsd_GI_len 2
+#define	tpsd_GI_lsb 0
+#define xd_r_tpsd_Mode	0xA250
+#define	tpsd_Mode_pos 6
+#define	tpsd_Mode_len 2
+#define	tpsd_Mode_lsb 0
+#define xd_r_tpsd_CR_HP	0xA251
+#define	tpsd_CR_HP_pos 0
+#define	tpsd_CR_HP_len 3
+#define	tpsd_CR_HP_lsb 0
+#define xd_r_tpsd_CR_LP	0xA251
+#define	tpsd_CR_LP_pos 3
+#define	tpsd_CR_LP_len 3
+#define	tpsd_CR_LP_lsb 0
+#define xd_r_tpsd_Hie	0xA252
+#define	tpsd_Hie_pos 0
+#define	tpsd_Hie_len 3
+#define	tpsd_Hie_lsb 0
+#define xd_r_tpsd_Res_Bits	0xA252
+#define	tpsd_Res_Bits_pos 3
+#define	tpsd_Res_Bits_len 5
+#define	tpsd_Res_Bits_lsb 0
+#define xd_r_tpsd_Res_Bits_0	0xA253
+#define	tpsd_Res_Bits_0_pos 0
+#define	tpsd_Res_Bits_0_len 1
+#define	tpsd_Res_Bits_0_lsb 0
+#define xd_r_tpsd_LengthInd	0xA253
+#define	tpsd_LengthInd_pos 1
+#define	tpsd_LengthInd_len 6
+#define	tpsd_LengthInd_lsb 0
+#define xd_r_tpsd_Cell_Id_7_0	0xA254
+#define	tpsd_Cell_Id_7_0_pos 0
+#define	tpsd_Cell_Id_7_0_len 8
+#define	tpsd_Cell_Id_7_0_lsb 0
+#define xd_r_tpsd_Cell_Id_15_8	0xA255
+#define	tpsd_Cell_Id_15_8_pos 0
+#define	tpsd_Cell_Id_15_8_len 8
+#define	tpsd_Cell_Id_15_8_lsb 0
+#define xd_p_reg_fft_mask_tone0_7_0	0xA260
+#define	reg_fft_mask_tone0_7_0_pos 0
+#define	reg_fft_mask_tone0_7_0_len 8
+#define	reg_fft_mask_tone0_7_0_lsb 0
+#define xd_p_reg_fft_mask_tone0_12_8	0xA261
+#define	reg_fft_mask_tone0_12_8_pos 0
+#define	reg_fft_mask_tone0_12_8_len 5
+#define	reg_fft_mask_tone0_12_8_lsb 8
+#define xd_p_reg_fft_mask_tone1_7_0	0xA262
+#define	reg_fft_mask_tone1_7_0_pos 0
+#define	reg_fft_mask_tone1_7_0_len 8
+#define	reg_fft_mask_tone1_7_0_lsb 0
+#define xd_p_reg_fft_mask_tone1_12_8	0xA263
+#define	reg_fft_mask_tone1_12_8_pos 0
+#define	reg_fft_mask_tone1_12_8_len 5
+#define	reg_fft_mask_tone1_12_8_lsb 8
+#define xd_p_reg_fft_mask_tone2_7_0	0xA264
+#define	reg_fft_mask_tone2_7_0_pos 0
+#define	reg_fft_mask_tone2_7_0_len 8
+#define	reg_fft_mask_tone2_7_0_lsb 0
+#define xd_p_reg_fft_mask_tone2_12_8	0xA265
+#define	reg_fft_mask_tone2_12_8_pos 0
+#define	reg_fft_mask_tone2_12_8_len 5
+#define	reg_fft_mask_tone2_12_8_lsb 8
+#define xd_p_reg_fft_mask_tone3_7_0	0xA266
+#define	reg_fft_mask_tone3_7_0_pos 0
+#define	reg_fft_mask_tone3_7_0_len 8
+#define	reg_fft_mask_tone3_7_0_lsb 0
+#define xd_p_reg_fft_mask_tone3_12_8	0xA267
+#define	reg_fft_mask_tone3_12_8_pos 0
+#define	reg_fft_mask_tone3_12_8_len 5
+#define	reg_fft_mask_tone3_12_8_lsb 8
+#define xd_p_reg_fft_mask_from0_7_0	0xA268
+#define	reg_fft_mask_from0_7_0_pos 0
+#define	reg_fft_mask_from0_7_0_len 8
+#define	reg_fft_mask_from0_7_0_lsb 0
+#define xd_p_reg_fft_mask_from0_12_8	0xA269
+#define	reg_fft_mask_from0_12_8_pos 0
+#define	reg_fft_mask_from0_12_8_len 5
+#define	reg_fft_mask_from0_12_8_lsb 8
+#define xd_p_reg_fft_mask_to0_7_0	0xA26A
+#define	reg_fft_mask_to0_7_0_pos 0
+#define	reg_fft_mask_to0_7_0_len 8
+#define	reg_fft_mask_to0_7_0_lsb 0
+#define xd_p_reg_fft_mask_to0_12_8	0xA26B
+#define	reg_fft_mask_to0_12_8_pos 0
+#define	reg_fft_mask_to0_12_8_len 5
+#define	reg_fft_mask_to0_12_8_lsb 8
+#define xd_p_reg_fft_mask_from1_7_0	0xA26C
+#define	reg_fft_mask_from1_7_0_pos 0
+#define	reg_fft_mask_from1_7_0_len 8
+#define	reg_fft_mask_from1_7_0_lsb 0
+#define xd_p_reg_fft_mask_from1_12_8	0xA26D
+#define	reg_fft_mask_from1_12_8_pos 0
+#define	reg_fft_mask_from1_12_8_len 5
+#define	reg_fft_mask_from1_12_8_lsb 8
+#define xd_p_reg_fft_mask_to1_7_0	0xA26E
+#define	reg_fft_mask_to1_7_0_pos 0
+#define	reg_fft_mask_to1_7_0_len 8
+#define	reg_fft_mask_to1_7_0_lsb 0
+#define xd_p_reg_fft_mask_to1_12_8	0xA26F
+#define	reg_fft_mask_to1_12_8_pos 0
+#define	reg_fft_mask_to1_12_8_len 5
+#define	reg_fft_mask_to1_12_8_lsb 8
+#define xd_p_reg_cge_idx0_7_0	0xA280
+#define	reg_cge_idx0_7_0_pos 0
+#define	reg_cge_idx0_7_0_len 8
+#define	reg_cge_idx0_7_0_lsb 0
+#define xd_p_reg_cge_idx0_12_8	0xA281
+#define	reg_cge_idx0_12_8_pos 0
+#define	reg_cge_idx0_12_8_len 5
+#define	reg_cge_idx0_12_8_lsb 8
+#define xd_p_reg_cge_idx1_7_0	0xA282
+#define	reg_cge_idx1_7_0_pos 0
+#define	reg_cge_idx1_7_0_len 8
+#define	reg_cge_idx1_7_0_lsb 0
+#define xd_p_reg_cge_idx1_12_8	0xA283
+#define	reg_cge_idx1_12_8_pos 0
+#define	reg_cge_idx1_12_8_len 5
+#define	reg_cge_idx1_12_8_lsb 8
+#define xd_p_reg_cge_idx2_7_0	0xA284
+#define	reg_cge_idx2_7_0_pos 0
+#define	reg_cge_idx2_7_0_len 8
+#define	reg_cge_idx2_7_0_lsb 0
+#define xd_p_reg_cge_idx2_12_8	0xA285
+#define	reg_cge_idx2_12_8_pos 0
+#define	reg_cge_idx2_12_8_len 5
+#define	reg_cge_idx2_12_8_lsb 8
+#define xd_p_reg_cge_idx3_7_0	0xA286
+#define	reg_cge_idx3_7_0_pos 0
+#define	reg_cge_idx3_7_0_len 8
+#define	reg_cge_idx3_7_0_lsb 0
+#define xd_p_reg_cge_idx3_12_8	0xA287
+#define	reg_cge_idx3_12_8_pos 0
+#define	reg_cge_idx3_12_8_len 5
+#define	reg_cge_idx3_12_8_lsb 8
+#define xd_p_reg_cge_idx4_7_0	0xA288
+#define	reg_cge_idx4_7_0_pos 0
+#define	reg_cge_idx4_7_0_len 8
+#define	reg_cge_idx4_7_0_lsb 0
+#define xd_p_reg_cge_idx4_12_8	0xA289
+#define	reg_cge_idx4_12_8_pos 0
+#define	reg_cge_idx4_12_8_len 5
+#define	reg_cge_idx4_12_8_lsb 8
+#define xd_p_reg_cge_idx5_7_0	0xA28A
+#define	reg_cge_idx5_7_0_pos 0
+#define	reg_cge_idx5_7_0_len 8
+#define	reg_cge_idx5_7_0_lsb 0
+#define xd_p_reg_cge_idx5_12_8	0xA28B
+#define	reg_cge_idx5_12_8_pos 0
+#define	reg_cge_idx5_12_8_len 5
+#define	reg_cge_idx5_12_8_lsb 8
+#define xd_p_reg_cge_idx6_7_0	0xA28C
+#define	reg_cge_idx6_7_0_pos 0
+#define	reg_cge_idx6_7_0_len 8
+#define	reg_cge_idx6_7_0_lsb 0
+#define xd_p_reg_cge_idx6_12_8	0xA28D
+#define	reg_cge_idx6_12_8_pos 0
+#define	reg_cge_idx6_12_8_len 5
+#define	reg_cge_idx6_12_8_lsb 8
+#define xd_p_reg_cge_idx7_7_0	0xA28E
+#define	reg_cge_idx7_7_0_pos 0
+#define	reg_cge_idx7_7_0_len 8
+#define	reg_cge_idx7_7_0_lsb 0
+#define xd_p_reg_cge_idx7_12_8	0xA28F
+#define	reg_cge_idx7_12_8_pos 0
+#define	reg_cge_idx7_12_8_len 5
+#define	reg_cge_idx7_12_8_lsb 8
+#define xd_p_reg_cge_idx8_7_0	0xA290
+#define	reg_cge_idx8_7_0_pos 0
+#define	reg_cge_idx8_7_0_len 8
+#define	reg_cge_idx8_7_0_lsb 0
+#define xd_p_reg_cge_idx8_12_8	0xA291
+#define	reg_cge_idx8_12_8_pos 0
+#define	reg_cge_idx8_12_8_len 5
+#define	reg_cge_idx8_12_8_lsb 8
+#define xd_p_reg_cge_idx9_7_0	0xA292
+#define	reg_cge_idx9_7_0_pos 0
+#define	reg_cge_idx9_7_0_len 8
+#define	reg_cge_idx9_7_0_lsb 0
+#define xd_p_reg_cge_idx9_12_8	0xA293
+#define	reg_cge_idx9_12_8_pos 0
+#define	reg_cge_idx9_12_8_len 5
+#define	reg_cge_idx9_12_8_lsb 8
+#define xd_p_reg_cge_idx10_7_0	0xA294
+#define	reg_cge_idx10_7_0_pos 0
+#define	reg_cge_idx10_7_0_len 8
+#define	reg_cge_idx10_7_0_lsb 0
+#define xd_p_reg_cge_idx10_12_8	0xA295
+#define	reg_cge_idx10_12_8_pos 0
+#define	reg_cge_idx10_12_8_len 5
+#define	reg_cge_idx10_12_8_lsb 8
+#define xd_p_reg_cge_idx11_7_0	0xA296
+#define	reg_cge_idx11_7_0_pos 0
+#define	reg_cge_idx11_7_0_len 8
+#define	reg_cge_idx11_7_0_lsb 0
+#define xd_p_reg_cge_idx11_12_8	0xA297
+#define	reg_cge_idx11_12_8_pos 0
+#define	reg_cge_idx11_12_8_len 5
+#define	reg_cge_idx11_12_8_lsb 8
+#define xd_p_reg_cge_idx12_7_0	0xA298
+#define	reg_cge_idx12_7_0_pos 0
+#define	reg_cge_idx12_7_0_len 8
+#define	reg_cge_idx12_7_0_lsb 0
+#define xd_p_reg_cge_idx12_12_8	0xA299
+#define	reg_cge_idx12_12_8_pos 0
+#define	reg_cge_idx12_12_8_len 5
+#define	reg_cge_idx12_12_8_lsb 8
+#define xd_p_reg_cge_idx13_7_0	0xA29A
+#define	reg_cge_idx13_7_0_pos 0
+#define	reg_cge_idx13_7_0_len 8
+#define	reg_cge_idx13_7_0_lsb 0
+#define xd_p_reg_cge_idx13_12_8	0xA29B
+#define	reg_cge_idx13_12_8_pos 0
+#define	reg_cge_idx13_12_8_len 5
+#define	reg_cge_idx13_12_8_lsb 8
+#define xd_p_reg_cge_idx14_7_0	0xA29C
+#define	reg_cge_idx14_7_0_pos 0
+#define	reg_cge_idx14_7_0_len 8
+#define	reg_cge_idx14_7_0_lsb 0
+#define xd_p_reg_cge_idx14_12_8	0xA29D
+#define	reg_cge_idx14_12_8_pos 0
+#define	reg_cge_idx14_12_8_len 5
+#define	reg_cge_idx14_12_8_lsb 8
+#define xd_p_reg_cge_idx15_7_0	0xA29E
+#define	reg_cge_idx15_7_0_pos 0
+#define	reg_cge_idx15_7_0_len 8
+#define	reg_cge_idx15_7_0_lsb 0
+#define xd_p_reg_cge_idx15_12_8	0xA29F
+#define	reg_cge_idx15_12_8_pos 0
+#define	reg_cge_idx15_12_8_len 5
+#define	reg_cge_idx15_12_8_lsb 8
+#define xd_r_reg_fft_crc	0xA2A8
+#define	reg_fft_crc_pos 0
+#define	reg_fft_crc_len 8
+#define	reg_fft_crc_lsb 0
+#define xd_p_fd_fft_shift_max	0xA2A9
+#define	fd_fft_shift_max_pos 0
+#define	fd_fft_shift_max_len 4
+#define	fd_fft_shift_max_lsb 0
+#define xd_r_fd_fft_shift	0xA2A9
+#define	fd_fft_shift_pos 4
+#define	fd_fft_shift_len 4
+#define	fd_fft_shift_lsb 0
+#define xd_r_fd_fft_frame_num	0xA2AA
+#define	fd_fft_frame_num_pos 0
+#define	fd_fft_frame_num_len 2
+#define	fd_fft_frame_num_lsb 0
+#define xd_r_fd_fft_symbol_count	0xA2AB
+#define	fd_fft_symbol_count_pos 0
+#define	fd_fft_symbol_count_len 7
+#define	fd_fft_symbol_count_lsb 0
+#define xd_r_reg_fft_idx_max_7_0	0xA2AC
+#define	reg_fft_idx_max_7_0_pos 0
+#define	reg_fft_idx_max_7_0_len 8
+#define	reg_fft_idx_max_7_0_lsb 0
+#define xd_r_reg_fft_idx_max_12_8	0xA2AD
+#define	reg_fft_idx_max_12_8_pos 0
+#define	reg_fft_idx_max_12_8_len 5
+#define	reg_fft_idx_max_12_8_lsb 8
+#define xd_p_reg_cge_program	0xA2AE
+#define	reg_cge_program_pos 0
+#define	reg_cge_program_len 1
+#define	reg_cge_program_lsb 0
+#define xd_p_reg_cge_fixed	0xA2AE
+#define	reg_cge_fixed_pos 1
+#define	reg_cge_fixed_len 1
+#define	reg_cge_fixed_lsb 0
+#define xd_p_reg_fft_rotate_en	0xA2AE
+#define	reg_fft_rotate_en_pos 2
+#define	reg_fft_rotate_en_len 1
+#define	reg_fft_rotate_en_lsb 0
+#define xd_p_reg_fft_rotate_base_4_0	0xA2AE
+#define	reg_fft_rotate_base_4_0_pos 3
+#define	reg_fft_rotate_base_4_0_len 5
+#define	reg_fft_rotate_base_4_0_lsb 0
+#define xd_p_reg_fft_rotate_base_12_5	0xA2AF
+#define	reg_fft_rotate_base_12_5_pos 0
+#define	reg_fft_rotate_base_12_5_len 8
+#define	reg_fft_rotate_base_12_5_lsb 5
+#define xd_p_reg_gp_trigger_fd	0xA2B8
+#define	reg_gp_trigger_fd_pos 0
+#define	reg_gp_trigger_fd_len 1
+#define	reg_gp_trigger_fd_lsb 0
+#define xd_p_reg_trigger_sel_fd	0xA2B8
+#define	reg_trigger_sel_fd_pos 1
+#define	reg_trigger_sel_fd_len 2
+#define	reg_trigger_sel_fd_lsb 0
+#define xd_p_reg_trigger_module_sel_fd	0xA2B9
+#define	reg_trigger_module_sel_fd_pos 0
+#define	reg_trigger_module_sel_fd_len 6
+#define	reg_trigger_module_sel_fd_lsb 0
+#define xd_p_reg_trigger_set_sel_fd	0xA2BA
+#define	reg_trigger_set_sel_fd_pos 0
+#define	reg_trigger_set_sel_fd_len 6
+#define	reg_trigger_set_sel_fd_lsb 0
+#define xd_p_reg_fd_noname_7_0	0xA2BC
+#define	reg_fd_noname_7_0_pos 0
+#define	reg_fd_noname_7_0_len 8
+#define	reg_fd_noname_7_0_lsb 0
+#define xd_p_reg_fd_noname_15_8	0xA2BD
+#define	reg_fd_noname_15_8_pos 0
+#define	reg_fd_noname_15_8_len 8
+#define	reg_fd_noname_15_8_lsb 8
+#define xd_p_reg_fd_noname_23_16	0xA2BE
+#define	reg_fd_noname_23_16_pos 0
+#define	reg_fd_noname_23_16_len 8
+#define	reg_fd_noname_23_16_lsb 16
+#define xd_p_reg_fd_noname_31_24	0xA2BF
+#define	reg_fd_noname_31_24_pos 0
+#define	reg_fd_noname_31_24_len 8
+#define	reg_fd_noname_31_24_lsb 24
+#define xd_r_fd_fpcc_cp_corr_signn	0xA2C0
+#define	fd_fpcc_cp_corr_signn_pos 0
+#define	fd_fpcc_cp_corr_signn_len 8
+#define	fd_fpcc_cp_corr_signn_lsb 0
+#define xd_p_reg_feq_s1	0xA2C1
+#define	reg_feq_s1_pos 0
+#define	reg_feq_s1_len 5
+#define	reg_feq_s1_lsb 0
+#define xd_p_fd_fpcc_cp_corr_tone_th	0xA2C2
+#define	fd_fpcc_cp_corr_tone_th_pos 0
+#define	fd_fpcc_cp_corr_tone_th_len 6
+#define	fd_fpcc_cp_corr_tone_th_lsb 0
+#define xd_p_fd_fpcc_cp_corr_symbol_log_th	0xA2C3
+#define	fd_fpcc_cp_corr_symbol_log_th_pos 0
+#define	fd_fpcc_cp_corr_symbol_log_th_len 4
+#define	fd_fpcc_cp_corr_symbol_log_th_lsb 0
+#define xd_p_fd_fpcc_cp_corr_int	0xA2C4
+#define	fd_fpcc_cp_corr_int_pos 0
+#define	fd_fpcc_cp_corr_int_len 1
+#define	fd_fpcc_cp_corr_int_lsb 0
+#define xd_p_reg_sfoe_ns_7_0	0xA320
+#define	reg_sfoe_ns_7_0_pos 0
+#define	reg_sfoe_ns_7_0_len 8
+#define	reg_sfoe_ns_7_0_lsb 0
+#define xd_p_reg_sfoe_ns_14_8	0xA321
+#define	reg_sfoe_ns_14_8_pos 0
+#define	reg_sfoe_ns_14_8_len 7
+#define	reg_sfoe_ns_14_8_lsb 8
+#define xd_p_reg_sfoe_c1_7_0	0xA322
+#define	reg_sfoe_c1_7_0_pos 0
+#define	reg_sfoe_c1_7_0_len 8
+#define	reg_sfoe_c1_7_0_lsb 0
+#define xd_p_reg_sfoe_c1_15_8	0xA323
+#define	reg_sfoe_c1_15_8_pos 0
+#define	reg_sfoe_c1_15_8_len 8
+#define	reg_sfoe_c1_15_8_lsb 8
+#define xd_p_reg_sfoe_c1_17_16	0xA324
+#define	reg_sfoe_c1_17_16_pos 0
+#define	reg_sfoe_c1_17_16_len 2
+#define	reg_sfoe_c1_17_16_lsb 16
+#define xd_p_reg_sfoe_c2_7_0	0xA325
+#define	reg_sfoe_c2_7_0_pos 0
+#define	reg_sfoe_c2_7_0_len 8
+#define	reg_sfoe_c2_7_0_lsb 0
+#define xd_p_reg_sfoe_c2_15_8	0xA326
+#define	reg_sfoe_c2_15_8_pos 0
+#define	reg_sfoe_c2_15_8_len 8
+#define	reg_sfoe_c2_15_8_lsb 8
+#define xd_p_reg_sfoe_c2_17_16	0xA327
+#define	reg_sfoe_c2_17_16_pos 0
+#define	reg_sfoe_c2_17_16_len 2
+#define	reg_sfoe_c2_17_16_lsb 16
+#define xd_r_reg_sfoe_out_9_2	0xA328
+#define	reg_sfoe_out_9_2_pos 0
+#define	reg_sfoe_out_9_2_len 8
+#define	reg_sfoe_out_9_2_lsb 0
+#define xd_r_reg_sfoe_out_1_0	0xA329
+#define	reg_sfoe_out_1_0_pos 0
+#define	reg_sfoe_out_1_0_len 2
+#define	reg_sfoe_out_1_0_lsb 0
+#define xd_p_reg_sfoe_lm_counter_th	0xA32A
+#define	reg_sfoe_lm_counter_th_pos 0
+#define	reg_sfoe_lm_counter_th_len 4
+#define	reg_sfoe_lm_counter_th_lsb 0
+#define xd_p_reg_sfoe_convg_th	0xA32B
+#define	reg_sfoe_convg_th_pos 0
+#define	reg_sfoe_convg_th_len 8
+#define	reg_sfoe_convg_th_lsb 0
+#define xd_p_reg_sfoe_divg_th	0xA32C
+#define	reg_sfoe_divg_th_pos 0
+#define	reg_sfoe_divg_th_len 8
+#define	reg_sfoe_divg_th_lsb 0
+#define xd_p_fd_tpsd_en	0xA330
+#define	fd_tpsd_en_pos 0
+#define	fd_tpsd_en_len 1
+#define	fd_tpsd_en_lsb 0
+#define xd_p_fd_tpsd_dis	0xA330
+#define	fd_tpsd_dis_pos 1
+#define	fd_tpsd_dis_len 1
+#define	fd_tpsd_dis_lsb 0
+#define xd_p_fd_tpsd_rst	0xA330
+#define	fd_tpsd_rst_pos 2
+#define	fd_tpsd_rst_len 1
+#define	fd_tpsd_rst_lsb 0
+#define xd_p_fd_tpsd_lock	0xA330
+#define	fd_tpsd_lock_pos 3
+#define	fd_tpsd_lock_len 1
+#define	fd_tpsd_lock_lsb 0
+#define xd_r_fd_tpsd_s19	0xA330
+#define	fd_tpsd_s19_pos 4
+#define	fd_tpsd_s19_len 1
+#define	fd_tpsd_s19_lsb 0
+#define xd_r_fd_tpsd_s17	0xA330
+#define	fd_tpsd_s17_pos 5
+#define	fd_tpsd_s17_len 1
+#define	fd_tpsd_s17_lsb 0
+#define xd_p_fd_sfr_ste_en	0xA331
+#define	fd_sfr_ste_en_pos 0
+#define	fd_sfr_ste_en_len 1
+#define	fd_sfr_ste_en_lsb 0
+#define xd_p_fd_sfr_ste_dis	0xA331
+#define	fd_sfr_ste_dis_pos 1
+#define	fd_sfr_ste_dis_len 1
+#define	fd_sfr_ste_dis_lsb 0
+#define xd_p_fd_sfr_ste_rst	0xA331
+#define	fd_sfr_ste_rst_pos 2
+#define	fd_sfr_ste_rst_len 1
+#define	fd_sfr_ste_rst_lsb 0
+#define xd_p_fd_sfr_ste_mode	0xA331
+#define	fd_sfr_ste_mode_pos 3
+#define	fd_sfr_ste_mode_len 1
+#define	fd_sfr_ste_mode_lsb 0
+#define xd_p_fd_sfr_ste_done	0xA331
+#define	fd_sfr_ste_done_pos 4
+#define	fd_sfr_ste_done_len 1
+#define	fd_sfr_ste_done_lsb 0
+#define xd_p_reg_cfoe_ffoe_en	0xA332
+#define	reg_cfoe_ffoe_en_pos 0
+#define	reg_cfoe_ffoe_en_len 1
+#define	reg_cfoe_ffoe_en_lsb 0
+#define xd_p_reg_cfoe_ffoe_dis	0xA332
+#define	reg_cfoe_ffoe_dis_pos 1
+#define	reg_cfoe_ffoe_dis_len 1
+#define	reg_cfoe_ffoe_dis_lsb 0
+#define xd_p_reg_cfoe_ffoe_rst	0xA332
+#define	reg_cfoe_ffoe_rst_pos 2
+#define	reg_cfoe_ffoe_rst_len 1
+#define	reg_cfoe_ffoe_rst_lsb 0
+#define xd_p_reg_cfoe_ifoe_en	0xA332
+#define	reg_cfoe_ifoe_en_pos 3
+#define	reg_cfoe_ifoe_en_len 1
+#define	reg_cfoe_ifoe_en_lsb 0
+#define xd_p_reg_cfoe_ifoe_dis	0xA332
+#define	reg_cfoe_ifoe_dis_pos 4
+#define	reg_cfoe_ifoe_dis_len 1
+#define	reg_cfoe_ifoe_dis_lsb 0
+#define xd_p_reg_cfoe_ifoe_rst	0xA332
+#define	reg_cfoe_ifoe_rst_pos 5
+#define	reg_cfoe_ifoe_rst_len 1
+#define	reg_cfoe_ifoe_rst_lsb 0
+#define xd_p_reg_cfoe_fot_en	0xA332
+#define	reg_cfoe_fot_en_pos 6
+#define	reg_cfoe_fot_en_len 1
+#define	reg_cfoe_fot_en_lsb 0
+#define xd_p_reg_cfoe_fot_lm_en	0xA332
+#define	reg_cfoe_fot_lm_en_pos 7
+#define	reg_cfoe_fot_lm_en_len 1
+#define	reg_cfoe_fot_lm_en_lsb 0
+#define xd_p_reg_cfoe_fot_rst	0xA333
+#define	reg_cfoe_fot_rst_pos 0
+#define	reg_cfoe_fot_rst_len 1
+#define	reg_cfoe_fot_rst_lsb 0
+#define xd_r_fd_cfoe_ffoe_done	0xA333
+#define	fd_cfoe_ffoe_done_pos 1
+#define	fd_cfoe_ffoe_done_len 1
+#define	fd_cfoe_ffoe_done_lsb 0
+#define xd_p_fd_cfoe_metric_vld	0xA333
+#define	fd_cfoe_metric_vld_pos 2
+#define	fd_cfoe_metric_vld_len 1
+#define	fd_cfoe_metric_vld_lsb 0
+#define xd_p_reg_cfoe_ifod_vld	0xA333
+#define	reg_cfoe_ifod_vld_pos 3
+#define	reg_cfoe_ifod_vld_len 1
+#define	reg_cfoe_ifod_vld_lsb 0
+#define xd_r_fd_cfoe_ifoe_done	0xA333
+#define	fd_cfoe_ifoe_done_pos 4
+#define	fd_cfoe_ifoe_done_len 1
+#define	fd_cfoe_ifoe_done_lsb 0
+#define xd_r_fd_cfoe_fot_valid	0xA333
+#define	fd_cfoe_fot_valid_pos 5
+#define	fd_cfoe_fot_valid_len 1
+#define	fd_cfoe_fot_valid_lsb 0
+#define xd_p_reg_cfoe_divg_int	0xA333
+#define	reg_cfoe_divg_int_pos 6
+#define	reg_cfoe_divg_int_len 1
+#define	reg_cfoe_divg_int_lsb 0
+#define xd_r_reg_cfoe_divg_flag	0xA333
+#define	reg_cfoe_divg_flag_pos 7
+#define	reg_cfoe_divg_flag_len 1
+#define	reg_cfoe_divg_flag_lsb 0
+#define xd_p_reg_sfoe_en	0xA334
+#define	reg_sfoe_en_pos 0
+#define	reg_sfoe_en_len 1
+#define	reg_sfoe_en_lsb 0
+#define xd_p_reg_sfoe_dis	0xA334
+#define	reg_sfoe_dis_pos 1
+#define	reg_sfoe_dis_len 1
+#define	reg_sfoe_dis_lsb 0
+#define xd_p_reg_sfoe_rst	0xA334
+#define	reg_sfoe_rst_pos 2
+#define	reg_sfoe_rst_len 1
+#define	reg_sfoe_rst_lsb 0
+#define xd_p_reg_sfoe_vld_int	0xA334
+#define	reg_sfoe_vld_int_pos 3
+#define	reg_sfoe_vld_int_len 1
+#define	reg_sfoe_vld_int_lsb 0
+#define xd_p_reg_sfoe_lm_en	0xA334
+#define	reg_sfoe_lm_en_pos 4
+#define	reg_sfoe_lm_en_len 1
+#define	reg_sfoe_lm_en_lsb 0
+#define xd_p_reg_sfoe_divg_int	0xA334
+#define	reg_sfoe_divg_int_pos 5
+#define	reg_sfoe_divg_int_len 1
+#define	reg_sfoe_divg_int_lsb 0
+#define xd_r_reg_sfoe_divg_flag	0xA334
+#define	reg_sfoe_divg_flag_pos 6
+#define	reg_sfoe_divg_flag_len 1
+#define	reg_sfoe_divg_flag_lsb 0
+#define xd_p_reg_fft_rst	0xA335
+#define	reg_fft_rst_pos 0
+#define	reg_fft_rst_len 1
+#define	reg_fft_rst_lsb 0
+#define xd_p_reg_fft_fast_beacon	0xA335
+#define	reg_fft_fast_beacon_pos 1
+#define	reg_fft_fast_beacon_len 1
+#define	reg_fft_fast_beacon_lsb 0
+#define xd_p_reg_fft_fast_valid	0xA335
+#define	reg_fft_fast_valid_pos 2
+#define	reg_fft_fast_valid_len 1
+#define	reg_fft_fast_valid_lsb 0
+#define xd_p_reg_fft_mask_en	0xA335
+#define	reg_fft_mask_en_pos 3
+#define	reg_fft_mask_en_len 1
+#define	reg_fft_mask_en_lsb 0
+#define xd_p_reg_fft_crc_en	0xA335
+#define	reg_fft_crc_en_pos 4
+#define	reg_fft_crc_en_len 1
+#define	reg_fft_crc_en_lsb 0
+#define xd_p_reg_finr_en	0xA336
+#define	reg_finr_en_pos 0
+#define	reg_finr_en_len 1
+#define	reg_finr_en_lsb 0
+#define xd_p_fd_fste_en	0xA337
+#define	fd_fste_en_pos 1
+#define	fd_fste_en_len 1
+#define	fd_fste_en_lsb 0
+#define xd_p_fd_sqi_tps_level_shift	0xA338
+#define	fd_sqi_tps_level_shift_pos 0
+#define	fd_sqi_tps_level_shift_len 8
+#define	fd_sqi_tps_level_shift_lsb 0
+#define xd_p_fd_pilot_ma_len	0xA339
+#define	fd_pilot_ma_len_pos 0
+#define	fd_pilot_ma_len_len 6
+#define	fd_pilot_ma_len_lsb 0
+#define xd_p_fd_tps_ma_len	0xA33A
+#define	fd_tps_ma_len_pos 0
+#define	fd_tps_ma_len_len 6
+#define	fd_tps_ma_len_lsb 0
+#define xd_p_fd_sqi_s3	0xA33B
+#define	fd_sqi_s3_pos 0
+#define	fd_sqi_s3_len 8
+#define	fd_sqi_s3_lsb 0
+#define xd_p_fd_sqi_dummy_reg_0	0xA33C
+#define	fd_sqi_dummy_reg_0_pos 0
+#define	fd_sqi_dummy_reg_0_len 1
+#define	fd_sqi_dummy_reg_0_lsb 0
+#define xd_p_fd_sqi_debug_sel	0xA33C
+#define	fd_sqi_debug_sel_pos 1
+#define	fd_sqi_debug_sel_len 2
+#define	fd_sqi_debug_sel_lsb 0
+#define xd_p_fd_sqi_s2	0xA33C
+#define	fd_sqi_s2_pos 3
+#define	fd_sqi_s2_len 5
+#define	fd_sqi_s2_lsb 0
+#define xd_p_fd_sqi_dummy_reg_1	0xA33D
+#define	fd_sqi_dummy_reg_1_pos 0
+#define	fd_sqi_dummy_reg_1_len 1
+#define	fd_sqi_dummy_reg_1_lsb 0
+#define xd_p_fd_inr_ignore	0xA33D
+#define	fd_inr_ignore_pos 1
+#define	fd_inr_ignore_len 1
+#define	fd_inr_ignore_lsb 0
+#define xd_p_fd_pilot_ignore	0xA33D
+#define	fd_pilot_ignore_pos 2
+#define	fd_pilot_ignore_len 1
+#define	fd_pilot_ignore_lsb 0
+#define xd_p_fd_etps_ignore	0xA33D
+#define	fd_etps_ignore_pos 3
+#define	fd_etps_ignore_len 1
+#define	fd_etps_ignore_lsb 0
+#define xd_p_fd_sqi_s1	0xA33D
+#define	fd_sqi_s1_pos 4
+#define	fd_sqi_s1_len 4
+#define	fd_sqi_s1_lsb 0
+#define xd_p_reg_fste_ehw_7_0	0xA33E
+#define	reg_fste_ehw_7_0_pos 0
+#define	reg_fste_ehw_7_0_len 8
+#define	reg_fste_ehw_7_0_lsb 0
+#define xd_p_reg_fste_ehw_9_8	0xA33F
+#define	reg_fste_ehw_9_8_pos 0
+#define	reg_fste_ehw_9_8_len 2
+#define	reg_fste_ehw_9_8_lsb 8
+#define xd_p_reg_fste_i_adj_vld	0xA33F
+#define	reg_fste_i_adj_vld_pos 2
+#define	reg_fste_i_adj_vld_len 1
+#define	reg_fste_i_adj_vld_lsb 0
+#define xd_p_reg_fste_phase_ini_7_0	0xA340
+#define	reg_fste_phase_ini_7_0_pos 0
+#define	reg_fste_phase_ini_7_0_len 8
+#define	reg_fste_phase_ini_7_0_lsb 0
+#define xd_p_reg_fste_phase_ini_11_8	0xA341
+#define	reg_fste_phase_ini_11_8_pos 0
+#define	reg_fste_phase_ini_11_8_len 4
+#define	reg_fste_phase_ini_11_8_lsb 8
+#define xd_p_reg_fste_phase_inc_3_0	0xA341
+#define	reg_fste_phase_inc_3_0_pos 4
+#define	reg_fste_phase_inc_3_0_len 4
+#define	reg_fste_phase_inc_3_0_lsb 0
+#define xd_p_reg_fste_phase_inc_11_4	0xA342
+#define	reg_fste_phase_inc_11_4_pos 0
+#define	reg_fste_phase_inc_11_4_len 8
+#define	reg_fste_phase_inc_11_4_lsb 4
+#define xd_p_reg_fste_acum_cost_cnt_max	0xA343
+#define	reg_fste_acum_cost_cnt_max_pos 0
+#define	reg_fste_acum_cost_cnt_max_len 4
+#define	reg_fste_acum_cost_cnt_max_lsb 0
+#define xd_p_reg_fste_step_size_std	0xA343
+#define	reg_fste_step_size_std_pos 4
+#define	reg_fste_step_size_std_len 4
+#define	reg_fste_step_size_std_lsb 0
+#define xd_p_reg_fste_step_size_max	0xA344
+#define	reg_fste_step_size_max_pos 0
+#define	reg_fste_step_size_max_len 4
+#define	reg_fste_step_size_max_lsb 0
+#define xd_p_reg_fste_step_size_min	0xA344
+#define	reg_fste_step_size_min_pos 4
+#define	reg_fste_step_size_min_len 4
+#define	reg_fste_step_size_min_lsb 0
+#define xd_p_reg_fste_frac_step_size_7_0	0xA345
+#define	reg_fste_frac_step_size_7_0_pos 0
+#define	reg_fste_frac_step_size_7_0_len 8
+#define	reg_fste_frac_step_size_7_0_lsb 0
+#define xd_p_reg_fste_frac_step_size_15_8	0xA346
+#define	reg_fste_frac_step_size_15_8_pos 0
+#define	reg_fste_frac_step_size_15_8_len 8
+#define	reg_fste_frac_step_size_15_8_lsb 8
+#define xd_p_reg_fste_frac_step_size_19_16	0xA347
+#define	reg_fste_frac_step_size_19_16_pos 0
+#define	reg_fste_frac_step_size_19_16_len 4
+#define	reg_fste_frac_step_size_19_16_lsb 16
+#define xd_p_reg_fste_rpd_dir_cnt_max	0xA347
+#define	reg_fste_rpd_dir_cnt_max_pos 4
+#define	reg_fste_rpd_dir_cnt_max_len 4
+#define	reg_fste_rpd_dir_cnt_max_lsb 0
+#define xd_p_reg_fste_ehs	0xA348
+#define	reg_fste_ehs_pos 0
+#define	reg_fste_ehs_len 4
+#define	reg_fste_ehs_lsb 0
+#define xd_p_reg_fste_frac_cost_cnt_max_3_0	0xA348
+#define	reg_fste_frac_cost_cnt_max_3_0_pos 4
+#define	reg_fste_frac_cost_cnt_max_3_0_len 4
+#define	reg_fste_frac_cost_cnt_max_3_0_lsb 0
+#define xd_p_reg_fste_frac_cost_cnt_max_9_4	0xA349
+#define	reg_fste_frac_cost_cnt_max_9_4_pos 0
+#define	reg_fste_frac_cost_cnt_max_9_4_len 6
+#define	reg_fste_frac_cost_cnt_max_9_4_lsb 4
+#define xd_p_reg_fste_w0_7_0	0xA34A
+#define	reg_fste_w0_7_0_pos 0
+#define	reg_fste_w0_7_0_len 8
+#define	reg_fste_w0_7_0_lsb 0
+#define xd_p_reg_fste_w0_11_8	0xA34B
+#define	reg_fste_w0_11_8_pos 0
+#define	reg_fste_w0_11_8_len 4
+#define	reg_fste_w0_11_8_lsb 8
+#define xd_p_reg_fste_w1_3_0	0xA34B
+#define	reg_fste_w1_3_0_pos 4
+#define	reg_fste_w1_3_0_len 4
+#define	reg_fste_w1_3_0_lsb 0
+#define xd_p_reg_fste_w1_11_4	0xA34C
+#define	reg_fste_w1_11_4_pos 0
+#define	reg_fste_w1_11_4_len 8
+#define	reg_fste_w1_11_4_lsb 4
+#define xd_p_reg_fste_w2_7_0	0xA34D
+#define	reg_fste_w2_7_0_pos 0
+#define	reg_fste_w2_7_0_len 8
+#define	reg_fste_w2_7_0_lsb 0
+#define xd_p_reg_fste_w2_11_8	0xA34E
+#define	reg_fste_w2_11_8_pos 0
+#define	reg_fste_w2_11_8_len 4
+#define	reg_fste_w2_11_8_lsb 8
+#define xd_p_reg_fste_w3_3_0	0xA34E
+#define	reg_fste_w3_3_0_pos 4
+#define	reg_fste_w3_3_0_len 4
+#define	reg_fste_w3_3_0_lsb 0
+#define xd_p_reg_fste_w3_11_4	0xA34F
+#define	reg_fste_w3_11_4_pos 0
+#define	reg_fste_w3_11_4_len 8
+#define	reg_fste_w3_11_4_lsb 4
+#define xd_p_reg_fste_w4_7_0	0xA350
+#define	reg_fste_w4_7_0_pos 0
+#define	reg_fste_w4_7_0_len 8
+#define	reg_fste_w4_7_0_lsb 0
+#define xd_p_reg_fste_w4_11_8	0xA351
+#define	reg_fste_w4_11_8_pos 0
+#define	reg_fste_w4_11_8_len 4
+#define	reg_fste_w4_11_8_lsb 8
+#define xd_p_reg_fste_w5_3_0	0xA351
+#define	reg_fste_w5_3_0_pos 4
+#define	reg_fste_w5_3_0_len 4
+#define	reg_fste_w5_3_0_lsb 0
+#define xd_p_reg_fste_w5_11_4	0xA352
+#define	reg_fste_w5_11_4_pos 0
+#define	reg_fste_w5_11_4_len 8
+#define	reg_fste_w5_11_4_lsb 4
+#define xd_p_reg_fste_w6_7_0	0xA353
+#define	reg_fste_w6_7_0_pos 0
+#define	reg_fste_w6_7_0_len 8
+#define	reg_fste_w6_7_0_lsb 0
+#define xd_p_reg_fste_w6_11_8	0xA354
+#define	reg_fste_w6_11_8_pos 0
+#define	reg_fste_w6_11_8_len 4
+#define	reg_fste_w6_11_8_lsb 8
+#define xd_p_reg_fste_w7_3_0	0xA354
+#define	reg_fste_w7_3_0_pos 4
+#define	reg_fste_w7_3_0_len 4
+#define	reg_fste_w7_3_0_lsb 0
+#define xd_p_reg_fste_w7_11_4	0xA355
+#define	reg_fste_w7_11_4_pos 0
+#define	reg_fste_w7_11_4_len 8
+#define	reg_fste_w7_11_4_lsb 4
+#define xd_p_reg_fste_w8_7_0	0xA356
+#define	reg_fste_w8_7_0_pos 0
+#define	reg_fste_w8_7_0_len 8
+#define	reg_fste_w8_7_0_lsb 0
+#define xd_p_reg_fste_w8_11_8	0xA357
+#define	reg_fste_w8_11_8_pos 0
+#define	reg_fste_w8_11_8_len 4
+#define	reg_fste_w8_11_8_lsb 8
+#define xd_p_reg_fste_w9_3_0	0xA357
+#define	reg_fste_w9_3_0_pos 4
+#define	reg_fste_w9_3_0_len 4
+#define	reg_fste_w9_3_0_lsb 0
+#define xd_p_reg_fste_w9_11_4	0xA358
+#define	reg_fste_w9_11_4_pos 0
+#define	reg_fste_w9_11_4_len 8
+#define	reg_fste_w9_11_4_lsb 4
+#define xd_p_reg_fste_wa_7_0	0xA359
+#define	reg_fste_wa_7_0_pos 0
+#define	reg_fste_wa_7_0_len 8
+#define	reg_fste_wa_7_0_lsb 0
+#define xd_p_reg_fste_wa_11_8	0xA35A
+#define	reg_fste_wa_11_8_pos 0
+#define	reg_fste_wa_11_8_len 4
+#define	reg_fste_wa_11_8_lsb 8
+#define xd_p_reg_fste_wb_3_0	0xA35A
+#define	reg_fste_wb_3_0_pos 4
+#define	reg_fste_wb_3_0_len 4
+#define	reg_fste_wb_3_0_lsb 0
+#define xd_p_reg_fste_wb_11_4	0xA35B
+#define	reg_fste_wb_11_4_pos 0
+#define	reg_fste_wb_11_4_len 8
+#define	reg_fste_wb_11_4_lsb 4
+#define xd_r_fd_fste_i_adj	0xA35C
+#define	fd_fste_i_adj_pos 0
+#define	fd_fste_i_adj_len 5
+#define	fd_fste_i_adj_lsb 0
+#define xd_r_fd_fste_f_adj_7_0	0xA35D
+#define	fd_fste_f_adj_7_0_pos 0
+#define	fd_fste_f_adj_7_0_len 8
+#define	fd_fste_f_adj_7_0_lsb 0
+#define xd_r_fd_fste_f_adj_15_8	0xA35E
+#define	fd_fste_f_adj_15_8_pos 0
+#define	fd_fste_f_adj_15_8_len 8
+#define	fd_fste_f_adj_15_8_lsb 8
+#define xd_r_fd_fste_f_adj_19_16	0xA35F
+#define	fd_fste_f_adj_19_16_pos 0
+#define	fd_fste_f_adj_19_16_len 4
+#define	fd_fste_f_adj_19_16_lsb 16
+#define xd_p_reg_feq_Leak_Bypass	0xA366
+#define	reg_feq_Leak_Bypass_pos 0
+#define	reg_feq_Leak_Bypass_len 1
+#define	reg_feq_Leak_Bypass_lsb 0
+#define xd_p_reg_feq_Leak_Mneg1	0xA366
+#define	reg_feq_Leak_Mneg1_pos 1
+#define	reg_feq_Leak_Mneg1_len 3
+#define	reg_feq_Leak_Mneg1_lsb 0
+#define xd_p_reg_feq_Leak_B_ShiftQ	0xA366
+#define	reg_feq_Leak_B_ShiftQ_pos 4
+#define	reg_feq_Leak_B_ShiftQ_len 4
+#define	reg_feq_Leak_B_ShiftQ_lsb 0
+#define xd_p_reg_feq_Leak_B_Float0	0xA367
+#define	reg_feq_Leak_B_Float0_pos 0
+#define	reg_feq_Leak_B_Float0_len 8
+#define	reg_feq_Leak_B_Float0_lsb 0
+#define xd_p_reg_feq_Leak_B_Float1	0xA368
+#define	reg_feq_Leak_B_Float1_pos 0
+#define	reg_feq_Leak_B_Float1_len 8
+#define	reg_feq_Leak_B_Float1_lsb 0
+#define xd_p_reg_feq_Leak_B_Float2	0xA369
+#define	reg_feq_Leak_B_Float2_pos 0
+#define	reg_feq_Leak_B_Float2_len 8
+#define	reg_feq_Leak_B_Float2_lsb 0
+#define xd_p_reg_feq_Leak_B_Float3	0xA36A
+#define	reg_feq_Leak_B_Float3_pos 0
+#define	reg_feq_Leak_B_Float3_len 8
+#define	reg_feq_Leak_B_Float3_lsb 0
+#define xd_p_reg_feq_Leak_B_Float4	0xA36B
+#define	reg_feq_Leak_B_Float4_pos 0
+#define	reg_feq_Leak_B_Float4_len 8
+#define	reg_feq_Leak_B_Float4_lsb 0
+#define xd_p_reg_feq_Leak_B_Float5	0xA36C
+#define	reg_feq_Leak_B_Float5_pos 0
+#define	reg_feq_Leak_B_Float5_len 8
+#define	reg_feq_Leak_B_Float5_lsb 0
+#define xd_p_reg_feq_Leak_B_Float6	0xA36D
+#define	reg_feq_Leak_B_Float6_pos 0
+#define	reg_feq_Leak_B_Float6_len 8
+#define	reg_feq_Leak_B_Float6_lsb 0
+#define xd_p_reg_feq_Leak_B_Float7	0xA36E
+#define	reg_feq_Leak_B_Float7_pos 0
+#define	reg_feq_Leak_B_Float7_len 8
+#define	reg_feq_Leak_B_Float7_lsb 0
+#define xd_r_reg_feq_data_h2_7_0	0xA36F
+#define	reg_feq_data_h2_7_0_pos 0
+#define	reg_feq_data_h2_7_0_len 8
+#define	reg_feq_data_h2_7_0_lsb 0
+#define xd_r_reg_feq_data_h2_9_8	0xA370
+#define	reg_feq_data_h2_9_8_pos 0
+#define	reg_feq_data_h2_9_8_len 2
+#define	reg_feq_data_h2_9_8_lsb 8
+#define xd_p_reg_feq_leak_use_slice_tps	0xA371
+#define	reg_feq_leak_use_slice_tps_pos 0
+#define	reg_feq_leak_use_slice_tps_len 1
+#define	reg_feq_leak_use_slice_tps_lsb 0
+#define xd_p_reg_feq_read_update	0xA371
+#define	reg_feq_read_update_pos 1
+#define	reg_feq_read_update_len 1
+#define	reg_feq_read_update_lsb 0
+#define xd_p_reg_feq_data_vld	0xA371
+#define	reg_feq_data_vld_pos 2
+#define	reg_feq_data_vld_len 1
+#define	reg_feq_data_vld_lsb 0
+#define xd_p_reg_feq_tone_idx_4_0	0xA371
+#define	reg_feq_tone_idx_4_0_pos 3
+#define	reg_feq_tone_idx_4_0_len 5
+#define	reg_feq_tone_idx_4_0_lsb 0
+#define xd_p_reg_feq_tone_idx_12_5	0xA372
+#define	reg_feq_tone_idx_12_5_pos 0
+#define	reg_feq_tone_idx_12_5_len 8
+#define	reg_feq_tone_idx_12_5_lsb 5
+#define xd_r_reg_feq_data_re_7_0	0xA373
+#define	reg_feq_data_re_7_0_pos 0
+#define	reg_feq_data_re_7_0_len 8
+#define	reg_feq_data_re_7_0_lsb 0
+#define xd_r_reg_feq_data_re_10_8	0xA374
+#define	reg_feq_data_re_10_8_pos 0
+#define	reg_feq_data_re_10_8_len 3
+#define	reg_feq_data_re_10_8_lsb 8
+#define xd_r_reg_feq_data_im_7_0	0xA375
+#define	reg_feq_data_im_7_0_pos 0
+#define	reg_feq_data_im_7_0_len 8
+#define	reg_feq_data_im_7_0_lsb 0
+#define xd_r_reg_feq_data_im_10_8	0xA376
+#define	reg_feq_data_im_10_8_pos 0
+#define	reg_feq_data_im_10_8_len 3
+#define	reg_feq_data_im_10_8_lsb 8
+#define xd_r_reg_feq_y_re	0xA377
+#define	reg_feq_y_re_pos 0
+#define	reg_feq_y_re_len 8
+#define	reg_feq_y_re_lsb 0
+#define xd_r_reg_feq_y_im	0xA378
+#define	reg_feq_y_im_pos 0
+#define	reg_feq_y_im_len 8
+#define	reg_feq_y_im_lsb 0
+#define xd_r_reg_feq_h_re_7_0	0xA379
+#define	reg_feq_h_re_7_0_pos 0
+#define	reg_feq_h_re_7_0_len 8
+#define	reg_feq_h_re_7_0_lsb 0
+#define xd_r_reg_feq_h_re_8	0xA37A
+#define	reg_feq_h_re_8_pos 0
+#define	reg_feq_h_re_8_len 1
+#define	reg_feq_h_re_8_lsb 0
+#define xd_r_reg_feq_h_im_7_0	0xA37B
+#define	reg_feq_h_im_7_0_pos 0
+#define	reg_feq_h_im_7_0_len 8
+#define	reg_feq_h_im_7_0_lsb 0
+#define xd_r_reg_feq_h_im_8	0xA37C
+#define	reg_feq_h_im_8_pos 0
+#define	reg_feq_h_im_8_len 1
+#define	reg_feq_h_im_8_lsb 0
+#define xd_p_fec_super_frm_unit_7_0	0xA380
+#define	fec_super_frm_unit_7_0_pos 0
+#define	fec_super_frm_unit_7_0_len 8
+#define	fec_super_frm_unit_7_0_lsb 0
+#define xd_p_fec_super_frm_unit_15_8	0xA381
+#define	fec_super_frm_unit_15_8_pos 0
+#define	fec_super_frm_unit_15_8_len 8
+#define	fec_super_frm_unit_15_8_lsb 8
+#define xd_r_fec_vtb_err_bit_cnt_7_0	0xA382
+#define	fec_vtb_err_bit_cnt_7_0_pos 0
+#define	fec_vtb_err_bit_cnt_7_0_len 8
+#define	fec_vtb_err_bit_cnt_7_0_lsb 0
+#define xd_r_fec_vtb_err_bit_cnt_15_8	0xA383
+#define	fec_vtb_err_bit_cnt_15_8_pos 0
+#define	fec_vtb_err_bit_cnt_15_8_len 8
+#define	fec_vtb_err_bit_cnt_15_8_lsb 8
+#define xd_r_fec_vtb_err_bit_cnt_23_16	0xA384
+#define	fec_vtb_err_bit_cnt_23_16_pos 0
+#define	fec_vtb_err_bit_cnt_23_16_len 8
+#define	fec_vtb_err_bit_cnt_23_16_lsb 16
+#define xd_p_fec_rsd_packet_unit_7_0	0xA385
+#define	fec_rsd_packet_unit_7_0_pos 0
+#define	fec_rsd_packet_unit_7_0_len 8
+#define	fec_rsd_packet_unit_7_0_lsb 0
+#define xd_p_fec_rsd_packet_unit_15_8	0xA386
+#define	fec_rsd_packet_unit_15_8_pos 0
+#define	fec_rsd_packet_unit_15_8_len 8
+#define	fec_rsd_packet_unit_15_8_lsb 8
+#define xd_r_fec_rsd_bit_err_cnt_7_0	0xA387
+#define	fec_rsd_bit_err_cnt_7_0_pos 0
+#define	fec_rsd_bit_err_cnt_7_0_len 8
+#define	fec_rsd_bit_err_cnt_7_0_lsb 0
+#define xd_r_fec_rsd_bit_err_cnt_15_8	0xA388
+#define	fec_rsd_bit_err_cnt_15_8_pos 0
+#define	fec_rsd_bit_err_cnt_15_8_len 8
+#define	fec_rsd_bit_err_cnt_15_8_lsb 8
+#define xd_r_fec_rsd_bit_err_cnt_23_16	0xA389
+#define	fec_rsd_bit_err_cnt_23_16_pos 0
+#define	fec_rsd_bit_err_cnt_23_16_len 8
+#define	fec_rsd_bit_err_cnt_23_16_lsb 16
+#define xd_r_fec_rsd_abort_packet_cnt_7_0	0xA38A
+#define	fec_rsd_abort_packet_cnt_7_0_pos 0
+#define	fec_rsd_abort_packet_cnt_7_0_len 8
+#define	fec_rsd_abort_packet_cnt_7_0_lsb 0
+#define xd_r_fec_rsd_abort_packet_cnt_15_8	0xA38B
+#define	fec_rsd_abort_packet_cnt_15_8_pos 0
+#define	fec_rsd_abort_packet_cnt_15_8_len 8
+#define	fec_rsd_abort_packet_cnt_15_8_lsb 8
+#define xd_p_fec_RSD_PKT_NUM_PER_UNIT_7_0	0xA38C
+#define	fec_RSD_PKT_NUM_PER_UNIT_7_0_pos 0
+#define	fec_RSD_PKT_NUM_PER_UNIT_7_0_len 8
+#define	fec_RSD_PKT_NUM_PER_UNIT_7_0_lsb 0
+#define xd_p_fec_RSD_PKT_NUM_PER_UNIT_15_8	0xA38D
+#define	fec_RSD_PKT_NUM_PER_UNIT_15_8_pos 0
+#define	fec_RSD_PKT_NUM_PER_UNIT_15_8_len 8
+#define	fec_RSD_PKT_NUM_PER_UNIT_15_8_lsb 8
+#define xd_p_fec_RS_TH_1_7_0	0xA38E
+#define	fec_RS_TH_1_7_0_pos 0
+#define	fec_RS_TH_1_7_0_len 8
+#define	fec_RS_TH_1_7_0_lsb 0
+#define xd_p_fec_RS_TH_1_15_8	0xA38F
+#define	fec_RS_TH_1_15_8_pos 0
+#define	fec_RS_TH_1_15_8_len 8
+#define	fec_RS_TH_1_15_8_lsb 8
+#define xd_p_fec_RS_TH_2	0xA390
+#define	fec_RS_TH_2_pos 0
+#define	fec_RS_TH_2_len 8
+#define	fec_RS_TH_2_lsb 0
+#define xd_p_fec_mon_en	0xA391
+#define	fec_mon_en_pos 0
+#define	fec_mon_en_len 1
+#define	fec_mon_en_lsb 0
+#define xd_p_reg_b8to47	0xA391
+#define	reg_b8to47_pos 1
+#define	reg_b8to47_len 1
+#define	reg_b8to47_lsb 0
+#define xd_p_reg_rsd_sync_rep	0xA391
+#define	reg_rsd_sync_rep_pos 2
+#define	reg_rsd_sync_rep_len 1
+#define	reg_rsd_sync_rep_lsb 0
+#define xd_p_fec_rsd_retrain_rst	0xA391
+#define	fec_rsd_retrain_rst_pos 3
+#define	fec_rsd_retrain_rst_len 1
+#define	fec_rsd_retrain_rst_lsb 0
+#define xd_r_fec_rsd_ber_rdy	0xA391
+#define	fec_rsd_ber_rdy_pos 4
+#define	fec_rsd_ber_rdy_len 1
+#define	fec_rsd_ber_rdy_lsb 0
+#define xd_p_fec_rsd_ber_rst	0xA391
+#define	fec_rsd_ber_rst_pos 5
+#define	fec_rsd_ber_rst_len 1
+#define	fec_rsd_ber_rst_lsb 0
+#define xd_r_fec_vtb_ber_rdy	0xA391
+#define	fec_vtb_ber_rdy_pos 6
+#define	fec_vtb_ber_rdy_len 1
+#define	fec_vtb_ber_rdy_lsb 0
+#define xd_p_fec_vtb_ber_rst	0xA391
+#define	fec_vtb_ber_rst_pos 7
+#define	fec_vtb_ber_rst_len 1
+#define	fec_vtb_ber_rst_lsb 0
+#define xd_p_reg_vtb_clk40en	0xA392
+#define	reg_vtb_clk40en_pos 0
+#define	reg_vtb_clk40en_len 1
+#define	reg_vtb_clk40en_lsb 0
+#define xd_p_fec_vtb_rsd_mon_en	0xA392
+#define	fec_vtb_rsd_mon_en_pos 1
+#define	fec_vtb_rsd_mon_en_len 1
+#define	fec_vtb_rsd_mon_en_lsb 0
+#define xd_p_reg_fec_data_en	0xA392
+#define	reg_fec_data_en_pos 2
+#define	reg_fec_data_en_len 1
+#define	reg_fec_data_en_lsb 0
+#define xd_p_fec_dummy_reg_2	0xA392
+#define	fec_dummy_reg_2_pos 3
+#define	fec_dummy_reg_2_len 3
+#define	fec_dummy_reg_2_lsb 0
+#define xd_p_reg_sync_chk	0xA392
+#define	reg_sync_chk_pos 6
+#define	reg_sync_chk_len 1
+#define	reg_sync_chk_lsb 0
+#define xd_p_fec_rsd_bypass	0xA392
+#define	fec_rsd_bypass_pos 7
+#define	fec_rsd_bypass_len 1
+#define	fec_rsd_bypass_lsb 0
+#define xd_p_fec_sw_rst	0xA393
+#define	fec_sw_rst_pos 0
+#define	fec_sw_rst_len 1
+#define	fec_sw_rst_lsb 0
+#define xd_r_fec_vtb_pm_crc	0xA394
+#define	fec_vtb_pm_crc_pos 0
+#define	fec_vtb_pm_crc_len 8
+#define	fec_vtb_pm_crc_lsb 0
+#define xd_r_fec_vtb_tb_7_crc	0xA395
+#define	fec_vtb_tb_7_crc_pos 0
+#define	fec_vtb_tb_7_crc_len 8
+#define	fec_vtb_tb_7_crc_lsb 0
+#define xd_r_fec_vtb_tb_6_crc	0xA396
+#define	fec_vtb_tb_6_crc_pos 0
+#define	fec_vtb_tb_6_crc_len 8
+#define	fec_vtb_tb_6_crc_lsb 0
+#define xd_r_fec_vtb_tb_5_crc	0xA397
+#define	fec_vtb_tb_5_crc_pos 0
+#define	fec_vtb_tb_5_crc_len 8
+#define	fec_vtb_tb_5_crc_lsb 0
+#define xd_r_fec_vtb_tb_4_crc	0xA398
+#define	fec_vtb_tb_4_crc_pos 0
+#define	fec_vtb_tb_4_crc_len 8
+#define	fec_vtb_tb_4_crc_lsb 0
+#define xd_r_fec_vtb_tb_3_crc	0xA399
+#define	fec_vtb_tb_3_crc_pos 0
+#define	fec_vtb_tb_3_crc_len 8
+#define	fec_vtb_tb_3_crc_lsb 0
+#define xd_r_fec_vtb_tb_2_crc	0xA39A
+#define	fec_vtb_tb_2_crc_pos 0
+#define	fec_vtb_tb_2_crc_len 8
+#define	fec_vtb_tb_2_crc_lsb 0
+#define xd_r_fec_vtb_tb_1_crc	0xA39B
+#define	fec_vtb_tb_1_crc_pos 0
+#define	fec_vtb_tb_1_crc_len 8
+#define	fec_vtb_tb_1_crc_lsb 0
+#define xd_r_fec_vtb_tb_0_crc	0xA39C
+#define	fec_vtb_tb_0_crc_pos 0
+#define	fec_vtb_tb_0_crc_len 8
+#define	fec_vtb_tb_0_crc_lsb 0
+#define xd_r_fec_rsd_bank0_crc	0xA39D
+#define	fec_rsd_bank0_crc_pos 0
+#define	fec_rsd_bank0_crc_len 8
+#define	fec_rsd_bank0_crc_lsb 0
+#define xd_r_fec_rsd_bank1_crc	0xA39E
+#define	fec_rsd_bank1_crc_pos 0
+#define	fec_rsd_bank1_crc_len 8
+#define	fec_rsd_bank1_crc_lsb 0
+#define xd_r_fec_idi_vtb_crc	0xA39F
+#define	fec_idi_vtb_crc_pos 0
+#define	fec_idi_vtb_crc_len 8
+#define	fec_idi_vtb_crc_lsb 0
+#define xd_g_reg_tpsd_txmod	0xA3C0
+#define	reg_tpsd_txmod_pos 0
+#define	reg_tpsd_txmod_len 2
+#define	reg_tpsd_txmod_lsb 0
+#define xd_g_reg_tpsd_gi	0xA3C0
+#define	reg_tpsd_gi_pos 2
+#define	reg_tpsd_gi_len 2
+#define	reg_tpsd_gi_lsb 0
+#define xd_g_reg_tpsd_hier	0xA3C0
+#define	reg_tpsd_hier_pos 4
+#define	reg_tpsd_hier_len 3
+#define	reg_tpsd_hier_lsb 0
+#define xd_g_reg_bw	0xA3C1
+#define	reg_bw_pos 2
+#define	reg_bw_len 2
+#define	reg_bw_lsb 0
+#define xd_g_reg_dec_pri	0xA3C1
+#define	reg_dec_pri_pos 4
+#define	reg_dec_pri_len 1
+#define	reg_dec_pri_lsb 0
+#define xd_g_reg_tpsd_const	0xA3C1
+#define	reg_tpsd_const_pos 6
+#define	reg_tpsd_const_len 2
+#define	reg_tpsd_const_lsb 0
+#define xd_g_reg_tpsd_hpcr	0xA3C2
+#define	reg_tpsd_hpcr_pos 0
+#define	reg_tpsd_hpcr_len 3
+#define	reg_tpsd_hpcr_lsb 0
+#define xd_g_reg_tpsd_lpcr	0xA3C2
+#define	reg_tpsd_lpcr_pos 3
+#define	reg_tpsd_lpcr_len 3
+#define	reg_tpsd_lpcr_lsb 0
+#define xd_g_reg_ofsm_clk	0xA3D0
+#define	reg_ofsm_clk_pos 0
+#define	reg_ofsm_clk_len 3
+#define	reg_ofsm_clk_lsb 0
+#define xd_g_reg_fclk_cfg	0xA3D1
+#define	reg_fclk_cfg_pos 0
+#define	reg_fclk_cfg_len 1
+#define	reg_fclk_cfg_lsb 0
+#define xd_g_reg_fclk_idi	0xA3D1
+#define	reg_fclk_idi_pos 1
+#define	reg_fclk_idi_len 1
+#define	reg_fclk_idi_lsb 0
+#define xd_g_reg_fclk_odi	0xA3D1
+#define	reg_fclk_odi_pos 2
+#define	reg_fclk_odi_len 1
+#define	reg_fclk_odi_lsb 0
+#define xd_g_reg_fclk_rsd	0xA3D1
+#define	reg_fclk_rsd_pos 3
+#define	reg_fclk_rsd_len 1
+#define	reg_fclk_rsd_lsb 0
+#define xd_g_reg_fclk_vtb	0xA3D1
+#define	reg_fclk_vtb_pos 4
+#define	reg_fclk_vtb_len 1
+#define	reg_fclk_vtb_lsb 0
+#define xd_g_reg_fclk_cste	0xA3D1
+#define	reg_fclk_cste_pos 5
+#define	reg_fclk_cste_len 1
+#define	reg_fclk_cste_lsb 0
+#define xd_g_reg_fclk_mp2if	0xA3D1
+#define	reg_fclk_mp2if_pos 6
+#define	reg_fclk_mp2if_len 1
+#define	reg_fclk_mp2if_lsb 0
+#define xd_I2C_i2c_m_slave_addr	0xA400
+#define	i2c_m_slave_addr_pos 0
+#define	i2c_m_slave_addr_len 8
+#define	i2c_m_slave_addr_lsb 0
+#define xd_I2C_i2c_m_data1	0xA401
+#define	i2c_m_data1_pos 0
+#define	i2c_m_data1_len 8
+#define	i2c_m_data1_lsb 0
+#define xd_I2C_i2c_m_data2	0xA402
+#define	i2c_m_data2_pos 0
+#define	i2c_m_data2_len 8
+#define	i2c_m_data2_lsb 0
+#define xd_I2C_i2c_m_data3	0xA403
+#define	i2c_m_data3_pos 0
+#define	i2c_m_data3_len 8
+#define	i2c_m_data3_lsb 0
+#define xd_I2C_i2c_m_data4	0xA404
+#define	i2c_m_data4_pos 0
+#define	i2c_m_data4_len 8
+#define	i2c_m_data4_lsb 0
+#define xd_I2C_i2c_m_data5	0xA405
+#define	i2c_m_data5_pos 0
+#define	i2c_m_data5_len 8
+#define	i2c_m_data5_lsb 0
+#define xd_I2C_i2c_m_data6	0xA406
+#define	i2c_m_data6_pos 0
+#define	i2c_m_data6_len 8
+#define	i2c_m_data6_lsb 0
+#define xd_I2C_i2c_m_data7	0xA407
+#define	i2c_m_data7_pos 0
+#define	i2c_m_data7_len 8
+#define	i2c_m_data7_lsb 0
+#define xd_I2C_i2c_m_data8	0xA408
+#define	i2c_m_data8_pos 0
+#define	i2c_m_data8_len 8
+#define	i2c_m_data8_lsb 0
+#define xd_I2C_i2c_m_data9	0xA409
+#define	i2c_m_data9_pos 0
+#define	i2c_m_data9_len 8
+#define	i2c_m_data9_lsb 0
+#define xd_I2C_i2c_m_data10	0xA40A
+#define	i2c_m_data10_pos 0
+#define	i2c_m_data10_len 8
+#define	i2c_m_data10_lsb 0
+#define xd_I2C_i2c_m_data11	0xA40B
+#define	i2c_m_data11_pos 0
+#define	i2c_m_data11_len 8
+#define	i2c_m_data11_lsb 0
+#define xd_I2C_i2c_m_cmd_rw	0xA40C
+#define	i2c_m_cmd_rw_pos 0
+#define	i2c_m_cmd_rw_len 1
+#define	i2c_m_cmd_rw_lsb 0
+#define xd_I2C_i2c_m_cmd_rwlen	0xA40C
+#define	i2c_m_cmd_rwlen_pos 3
+#define	i2c_m_cmd_rwlen_len 4
+#define	i2c_m_cmd_rwlen_lsb 0
+#define xd_I2C_i2c_m_status_cmd_exe	0xA40D
+#define	i2c_m_status_cmd_exe_pos 0
+#define	i2c_m_status_cmd_exe_len 1
+#define	i2c_m_status_cmd_exe_lsb 0
+#define xd_I2C_i2c_m_status_wdat_done	0xA40D
+#define	i2c_m_status_wdat_done_pos 1
+#define	i2c_m_status_wdat_done_len 1
+#define	i2c_m_status_wdat_done_lsb 0
+#define xd_I2C_i2c_m_status_wdat_fail	0xA40D
+#define	i2c_m_status_wdat_fail_pos 2
+#define	i2c_m_status_wdat_fail_len 1
+#define	i2c_m_status_wdat_fail_lsb 0
+#define xd_I2C_i2c_m_period	0xA40E
+#define	i2c_m_period_pos 0
+#define	i2c_m_period_len 8
+#define	i2c_m_period_lsb 0
+#define xd_I2C_i2c_m_reg_msb_lsb	0xA40F
+#define	i2c_m_reg_msb_lsb_pos 0
+#define	i2c_m_reg_msb_lsb_len 1
+#define	i2c_m_reg_msb_lsb_lsb 0
+#define xd_I2C_reg_ofdm_rst	0xA40F
+#define	reg_ofdm_rst_pos 1
+#define	reg_ofdm_rst_len 1
+#define	reg_ofdm_rst_lsb 0
+#define xd_I2C_reg_sample_period_on_tuner	0xA40F
+#define	reg_sample_period_on_tuner_pos 2
+#define	reg_sample_period_on_tuner_len 1
+#define	reg_sample_period_on_tuner_lsb 0
+#define xd_I2C_reg_rst_i2c	0xA40F
+#define	reg_rst_i2c_pos 3
+#define	reg_rst_i2c_len 1
+#define	reg_rst_i2c_lsb 0
+#define xd_I2C_reg_ofdm_rst_en	0xA40F
+#define	reg_ofdm_rst_en_pos 4
+#define	reg_ofdm_rst_en_len 1
+#define	reg_ofdm_rst_en_lsb 0
+#define xd_I2C_reg_tuner_sda_sync_on	0xA40F
+#define	reg_tuner_sda_sync_on_pos 5
+#define	reg_tuner_sda_sync_on_len 1
+#define	reg_tuner_sda_sync_on_lsb 0
+#define xd_p_mp2if_data_access_disable_ofsm	0xA500
+#define	mp2if_data_access_disable_ofsm_pos 0
+#define	mp2if_data_access_disable_ofsm_len 1
+#define	mp2if_data_access_disable_ofsm_lsb 0
+#define xd_p_reg_mp2_sw_rst_ofsm	0xA500
+#define	reg_mp2_sw_rst_ofsm_pos 1
+#define	reg_mp2_sw_rst_ofsm_len 1
+#define	reg_mp2_sw_rst_ofsm_lsb 0
+#define xd_p_reg_mp2if_clk_en_ofsm	0xA500
+#define	reg_mp2if_clk_en_ofsm_pos 2
+#define	reg_mp2if_clk_en_ofsm_len 1
+#define	reg_mp2if_clk_en_ofsm_lsb 0
+#define xd_r_mp2if_sync_byte_locked	0xA500
+#define	mp2if_sync_byte_locked_pos 3
+#define	mp2if_sync_byte_locked_len 1
+#define	mp2if_sync_byte_locked_lsb 0
+#define xd_r_mp2if_ts_not_188	0xA500
+#define	mp2if_ts_not_188_pos 4
+#define	mp2if_ts_not_188_len 1
+#define	mp2if_ts_not_188_lsb 0
+#define xd_r_mp2if_psb_empty	0xA500
+#define	mp2if_psb_empty_pos 5
+#define	mp2if_psb_empty_len 1
+#define	mp2if_psb_empty_lsb 0
+#define xd_r_mp2if_psb_overflow	0xA500
+#define	mp2if_psb_overflow_pos 6
+#define	mp2if_psb_overflow_len 1
+#define	mp2if_psb_overflow_lsb 0
+#define xd_p_mp2if_keep_sf_sync_byte_ofsm	0xA500
+#define	mp2if_keep_sf_sync_byte_ofsm_pos 7
+#define	mp2if_keep_sf_sync_byte_ofsm_len 1
+#define	mp2if_keep_sf_sync_byte_ofsm_lsb 0
+#define xd_r_mp2if_psb_mp2if_num_pkt	0xA501
+#define	mp2if_psb_mp2if_num_pkt_pos 0
+#define	mp2if_psb_mp2if_num_pkt_len 6
+#define	mp2if_psb_mp2if_num_pkt_lsb 0
+#define xd_p_reg_mpeg_full_speed_ofsm	0xA501
+#define	reg_mpeg_full_speed_ofsm_pos 6
+#define	reg_mpeg_full_speed_ofsm_len 1
+#define	reg_mpeg_full_speed_ofsm_lsb 0
+#define xd_p_mp2if_mpeg_ser_mode_ofsm	0xA501
+#define	mp2if_mpeg_ser_mode_ofsm_pos 7
+#define	mp2if_mpeg_ser_mode_ofsm_len 1
+#define	mp2if_mpeg_ser_mode_ofsm_lsb 0
+#define xd_p_reg_sw_mon51	0xA600
+#define	reg_sw_mon51_pos 0
+#define	reg_sw_mon51_len 8
+#define	reg_sw_mon51_lsb 0
+#define xd_p_reg_top_pcsel	0xA601
+#define	reg_top_pcsel_pos 0
+#define	reg_top_pcsel_len 1
+#define	reg_top_pcsel_lsb 0
+#define xd_p_reg_top_rs232	0xA601
+#define	reg_top_rs232_pos 1
+#define	reg_top_rs232_len 1
+#define	reg_top_rs232_lsb 0
+#define xd_p_reg_top_pcout	0xA601
+#define	reg_top_pcout_pos 2
+#define	reg_top_pcout_len 1
+#define	reg_top_pcout_lsb 0
+#define xd_p_reg_top_debug	0xA601
+#define	reg_top_debug_pos 3
+#define	reg_top_debug_len 1
+#define	reg_top_debug_lsb 0
+#define xd_p_reg_top_adcdly	0xA601
+#define	reg_top_adcdly_pos 4
+#define	reg_top_adcdly_len 2
+#define	reg_top_adcdly_lsb 0
+#define xd_p_reg_top_pwrdw	0xA601
+#define	reg_top_pwrdw_pos 6
+#define	reg_top_pwrdw_len 1
+#define	reg_top_pwrdw_lsb 0
+#define xd_p_reg_top_pwrdw_inv	0xA601
+#define	reg_top_pwrdw_inv_pos 7
+#define	reg_top_pwrdw_inv_len 1
+#define	reg_top_pwrdw_inv_lsb 0
+#define xd_p_reg_top_int_inv	0xA602
+#define	reg_top_int_inv_pos 0
+#define	reg_top_int_inv_len 1
+#define	reg_top_int_inv_lsb 0
+#define xd_p_reg_top_dio_sel	0xA602
+#define	reg_top_dio_sel_pos 1
+#define	reg_top_dio_sel_len 1
+#define	reg_top_dio_sel_lsb 0
+#define xd_p_reg_top_gpioon0	0xA603
+#define	reg_top_gpioon0_pos 0
+#define	reg_top_gpioon0_len 1
+#define	reg_top_gpioon0_lsb 0
+#define xd_p_reg_top_gpioon1	0xA603
+#define	reg_top_gpioon1_pos 1
+#define	reg_top_gpioon1_len 1
+#define	reg_top_gpioon1_lsb 0
+#define xd_p_reg_top_gpioon2	0xA603
+#define	reg_top_gpioon2_pos 2
+#define	reg_top_gpioon2_len 1
+#define	reg_top_gpioon2_lsb 0
+#define xd_p_reg_top_gpioon3	0xA603
+#define	reg_top_gpioon3_pos 3
+#define	reg_top_gpioon3_len 1
+#define	reg_top_gpioon3_lsb 0
+#define xd_p_reg_top_lockon1	0xA603
+#define	reg_top_lockon1_pos 4
+#define	reg_top_lockon1_len 1
+#define	reg_top_lockon1_lsb 0
+#define xd_p_reg_top_lockon2	0xA603
+#define	reg_top_lockon2_pos 5
+#define	reg_top_lockon2_len 1
+#define	reg_top_lockon2_lsb 0
+#define xd_p_reg_top_gpioo0	0xA604
+#define	reg_top_gpioo0_pos 0
+#define	reg_top_gpioo0_len 1
+#define	reg_top_gpioo0_lsb 0
+#define xd_p_reg_top_gpioo1	0xA604
+#define	reg_top_gpioo1_pos 1
+#define	reg_top_gpioo1_len 1
+#define	reg_top_gpioo1_lsb 0
+#define xd_p_reg_top_gpioo2	0xA604
+#define	reg_top_gpioo2_pos 2
+#define	reg_top_gpioo2_len 1
+#define	reg_top_gpioo2_lsb 0
+#define xd_p_reg_top_gpioo3	0xA604
+#define	reg_top_gpioo3_pos 3
+#define	reg_top_gpioo3_len 1
+#define	reg_top_gpioo3_lsb 0
+#define xd_p_reg_top_lock1	0xA604
+#define	reg_top_lock1_pos 4
+#define	reg_top_lock1_len 1
+#define	reg_top_lock1_lsb 0
+#define xd_p_reg_top_lock2	0xA604
+#define	reg_top_lock2_pos 5
+#define	reg_top_lock2_len 1
+#define	reg_top_lock2_lsb 0
+#define xd_p_reg_top_gpioen0	0xA605
+#define	reg_top_gpioen0_pos 0
+#define	reg_top_gpioen0_len 1
+#define	reg_top_gpioen0_lsb 0
+#define xd_p_reg_top_gpioen1	0xA605
+#define	reg_top_gpioen1_pos 1
+#define	reg_top_gpioen1_len 1
+#define	reg_top_gpioen1_lsb 0
+#define xd_p_reg_top_gpioen2	0xA605
+#define	reg_top_gpioen2_pos 2
+#define	reg_top_gpioen2_len 1
+#define	reg_top_gpioen2_lsb 0
+#define xd_p_reg_top_gpioen3	0xA605
+#define	reg_top_gpioen3_pos 3
+#define	reg_top_gpioen3_len 1
+#define	reg_top_gpioen3_lsb 0
+#define xd_p_reg_top_locken1	0xA605
+#define	reg_top_locken1_pos 4
+#define	reg_top_locken1_len 1
+#define	reg_top_locken1_lsb 0
+#define xd_p_reg_top_locken2	0xA605
+#define	reg_top_locken2_pos 5
+#define	reg_top_locken2_len 1
+#define	reg_top_locken2_lsb 0
+#define xd_r_reg_top_gpioi0	0xA606
+#define	reg_top_gpioi0_pos 0
+#define	reg_top_gpioi0_len 1
+#define	reg_top_gpioi0_lsb 0
+#define xd_r_reg_top_gpioi1	0xA606
+#define	reg_top_gpioi1_pos 1
+#define	reg_top_gpioi1_len 1
+#define	reg_top_gpioi1_lsb 0
+#define xd_r_reg_top_gpioi2	0xA606
+#define	reg_top_gpioi2_pos 2
+#define	reg_top_gpioi2_len 1
+#define	reg_top_gpioi2_lsb 0
+#define xd_r_reg_top_gpioi3	0xA606
+#define	reg_top_gpioi3_pos 3
+#define	reg_top_gpioi3_len 1
+#define	reg_top_gpioi3_lsb 0
+#define xd_r_reg_top_locki1	0xA606
+#define	reg_top_locki1_pos 4
+#define	reg_top_locki1_len 1
+#define	reg_top_locki1_lsb 0
+#define xd_r_reg_top_locki2	0xA606
+#define	reg_top_locki2_pos 5
+#define	reg_top_locki2_len 1
+#define	reg_top_locki2_lsb 0
+#define xd_p_reg_dummy_7_0	0xA608
+#define	reg_dummy_7_0_pos 0
+#define	reg_dummy_7_0_len 8
+#define	reg_dummy_7_0_lsb 0
+#define xd_p_reg_dummy_15_8	0xA609
+#define	reg_dummy_15_8_pos 0
+#define	reg_dummy_15_8_len 8
+#define	reg_dummy_15_8_lsb 8
+#define xd_p_reg_dummy_23_16	0xA60A
+#define	reg_dummy_23_16_pos 0
+#define	reg_dummy_23_16_len 8
+#define	reg_dummy_23_16_lsb 16
+#define xd_p_reg_dummy_31_24	0xA60B
+#define	reg_dummy_31_24_pos 0
+#define	reg_dummy_31_24_len 8
+#define	reg_dummy_31_24_lsb 24
+#define xd_p_reg_dummy_39_32	0xA60C
+#define	reg_dummy_39_32_pos 0
+#define	reg_dummy_39_32_len 8
+#define	reg_dummy_39_32_lsb 32
+#define xd_p_reg_dummy_47_40	0xA60D
+#define	reg_dummy_47_40_pos 0
+#define	reg_dummy_47_40_len 8
+#define	reg_dummy_47_40_lsb 40
+#define xd_p_reg_dummy_55_48	0xA60E
+#define	reg_dummy_55_48_pos 0
+#define	reg_dummy_55_48_len 8
+#define	reg_dummy_55_48_lsb 48
+#define xd_p_reg_dummy_63_56	0xA60F
+#define	reg_dummy_63_56_pos 0
+#define	reg_dummy_63_56_len 8
+#define	reg_dummy_63_56_lsb 56
+#define xd_p_reg_dummy_71_64	0xA610
+#define	reg_dummy_71_64_pos 0
+#define	reg_dummy_71_64_len 8
+#define	reg_dummy_71_64_lsb 64
+#define xd_p_reg_dummy_79_72	0xA611
+#define	reg_dummy_79_72_pos 0
+#define	reg_dummy_79_72_len 8
+#define	reg_dummy_79_72_lsb 72
+#define xd_p_reg_dummy_87_80	0xA612
+#define	reg_dummy_87_80_pos 0
+#define	reg_dummy_87_80_len 8
+#define	reg_dummy_87_80_lsb 80
+#define xd_p_reg_dummy_95_88	0xA613
+#define	reg_dummy_95_88_pos 0
+#define	reg_dummy_95_88_len 8
+#define	reg_dummy_95_88_lsb 88
+#define xd_p_reg_dummy_103_96	0xA614
+#define	reg_dummy_103_96_pos 0
+#define	reg_dummy_103_96_len 8
+#define	reg_dummy_103_96_lsb 96
+
+#define xd_p_reg_unplug_flag	0xA615
+#define	reg_unplug_flag_pos 0
+#define	reg_unplug_flag_len 1
+#define	reg_unplug_flag_lsb 104
+
+#define xd_p_reg_api_dca_stes_request   0xA615
+#define reg_api_dca_stes_request_pos 1
+#define reg_api_dca_stes_request_len 1
+#define reg_api_dca_stes_request_lsb 0
+
+#define xd_p_reg_back_to_dca_flag	0xA615
+#define	reg_back_to_dca_flag_pos 2
+#define	reg_back_to_dca_flag_len 1
+#define	reg_back_to_dca_flag_lsb 106
+
+#define xd_p_reg_api_retrain_request    0xA615
+#define reg_api_retrain_request_pos 3
+#define reg_api_retrain_request_len 1
+#define reg_api_retrain_request_lsb 0
+
+#define xd_p_reg_Dyn_Top_Try_flag	0xA615
+#define	reg_Dyn_Top_Try_flag_pos 3
+#define	reg_Dyn_Top_Try_flag_len 1
+#define	reg_Dyn_Top_Try_flag_lsb 107
+
+#define xd_p_reg_API_retrain_freeze_flag	0xA615
+#define	reg_API_retrain_freeze_flag_pos 4
+#define	reg_API_retrain_freeze_flag_len 1
+#define	reg_API_retrain_freeze_flag_lsb 108
+
+#define xd_p_reg_dummy_111_104	0xA615
+#define	reg_dummy_111_104_pos 0
+#define	reg_dummy_111_104_len 8
+#define	reg_dummy_111_104_lsb 104
+#define xd_p_reg_dummy_119_112	0xA616
+#define	reg_dummy_119_112_pos 0
+#define	reg_dummy_119_112_len 8
+#define	reg_dummy_119_112_lsb 112
+#define xd_p_reg_dummy_127_120	0xA617
+#define	reg_dummy_127_120_pos 0
+#define	reg_dummy_127_120_len 8
+#define	reg_dummy_127_120_lsb 120
+#define xd_p_reg_dummy_135_128	0xA618
+#define	reg_dummy_135_128_pos 0
+#define	reg_dummy_135_128_len 8
+#define	reg_dummy_135_128_lsb 128
+
+#define xd_p_reg_dummy_143_136	0xA619
+#define	reg_dummy_143_136_pos 0
+#define	reg_dummy_143_136_len 8
+#define	reg_dummy_143_136_lsb 136
+
+#define xd_p_reg_CCIR_dis	0xA619
+#define	reg_CCIR_dis_pos 0
+#define	reg_CCIR_dis_len 1
+#define	reg_CCIR_dis_lsb 0
+
+#define xd_p_reg_dummy_151_144	0xA61A
+#define	reg_dummy_151_144_pos 0
+#define	reg_dummy_151_144_len 8
+#define	reg_dummy_151_144_lsb 144
+
+#define xd_p_reg_dummy_159_152	0xA61B
+#define	reg_dummy_159_152_pos 0
+#define	reg_dummy_159_152_len 8
+#define	reg_dummy_159_152_lsb 152
+
+#define xd_p_reg_dummy_167_160	0xA61C
+#define	reg_dummy_167_160_pos 0
+#define	reg_dummy_167_160_len 8
+#define	reg_dummy_167_160_lsb 160
+
+#define xd_p_reg_dummy_175_168	0xA61D
+#define	reg_dummy_175_168_pos 0
+#define	reg_dummy_175_168_len 8
+#define	reg_dummy_175_168_lsb 168
+
+#define xd_p_reg_dummy_183_176	0xA61E
+#define	reg_dummy_183_176_pos 0
+#define	reg_dummy_183_176_len 8
+#define	reg_dummy_183_176_lsb 176
+
+#define xd_p_reg_ofsm_read_rbc_en  0xA61E
+#define reg_ofsm_read_rbc_en_pos 2
+#define reg_ofsm_read_rbc_en_len 1
+#define reg_ofsm_read_rbc_en_lsb 0
+
+#define xd_p_reg_ce_filter_selection_dis  0xA61E
+#define reg_ce_filter_selection_dis_pos 1
+#define reg_ce_filter_selection_dis_len 1
+#define reg_ce_filter_selection_dis_lsb 0
+
+#define xd_p_reg_OFSM_version_control_7_0  0xA611
+#define reg_OFSM_version_control_7_0_pos 0
+#define reg_OFSM_version_control_7_0_len 8
+#define reg_OFSM_version_control_7_0_lsb 0
+
+#define xd_p_reg_OFSM_version_control_15_8  0xA61F
+#define reg_OFSM_version_control_15_8_pos 0
+#define reg_OFSM_version_control_15_8_len 8
+#define reg_OFSM_version_control_15_8_lsb 0
+
+#define xd_p_reg_OFSM_version_control_23_16  0xA620
+#define reg_OFSM_version_control_23_16_pos 0
+#define reg_OFSM_version_control_23_16_len 8
+#define reg_OFSM_version_control_23_16_lsb 0
+
+#define xd_p_reg_dummy_191_184	0xA61F
+#define	reg_dummy_191_184_pos 0
+#define	reg_dummy_191_184_len 8
+#define	reg_dummy_191_184_lsb 184
+
+#define xd_p_reg_dummy_199_192	0xA620
+#define	reg_dummy_199_192_pos 0
+#define	reg_dummy_199_192_len 8
+#define	reg_dummy_199_192_lsb 192
+
+#define xd_p_reg_ce_en	0xABC0
+#define	reg_ce_en_pos 0
+#define	reg_ce_en_len 1
+#define	reg_ce_en_lsb 0
+#define xd_p_reg_ce_fctrl_en	0xABC0
+#define	reg_ce_fctrl_en_pos 1
+#define	reg_ce_fctrl_en_len 1
+#define	reg_ce_fctrl_en_lsb 0
+#define xd_p_reg_ce_fste_tdi	0xABC0
+#define	reg_ce_fste_tdi_pos 2
+#define	reg_ce_fste_tdi_len 1
+#define	reg_ce_fste_tdi_lsb 0
+#define xd_p_reg_ce_dynamic	0xABC0
+#define	reg_ce_dynamic_pos 3
+#define	reg_ce_dynamic_len 1
+#define	reg_ce_dynamic_lsb 0
+#define xd_p_reg_ce_conf	0xABC0
+#define	reg_ce_conf_pos 4
+#define	reg_ce_conf_len 2
+#define	reg_ce_conf_lsb 0
+#define xd_p_reg_ce_dyn12	0xABC0
+#define	reg_ce_dyn12_pos 6
+#define	reg_ce_dyn12_len 1
+#define	reg_ce_dyn12_lsb 0
+#define xd_p_reg_ce_derot_en	0xABC0
+#define	reg_ce_derot_en_pos 7
+#define	reg_ce_derot_en_len 1
+#define	reg_ce_derot_en_lsb 0
+#define xd_p_reg_ce_dynamic_th_7_0	0xABC1
+#define	reg_ce_dynamic_th_7_0_pos 0
+#define	reg_ce_dynamic_th_7_0_len 8
+#define	reg_ce_dynamic_th_7_0_lsb 0
+#define xd_p_reg_ce_dynamic_th_15_8	0xABC2
+#define	reg_ce_dynamic_th_15_8_pos 0
+#define	reg_ce_dynamic_th_15_8_len 8
+#define	reg_ce_dynamic_th_15_8_lsb 8
+#define xd_p_reg_ce_s1	0xABC3
+#define	reg_ce_s1_pos 0
+#define	reg_ce_s1_len 5
+#define	reg_ce_s1_lsb 0
+#define xd_p_reg_ce_var_forced_value	0xABC3
+#define	reg_ce_var_forced_value_pos 5
+#define	reg_ce_var_forced_value_len 3
+#define	reg_ce_var_forced_value_lsb 0
+#define xd_p_reg_ce_data_im_7_0	0xABC4
+#define	reg_ce_data_im_7_0_pos 0
+#define	reg_ce_data_im_7_0_len 8
+#define	reg_ce_data_im_7_0_lsb 0
+#define xd_p_reg_ce_data_im_8	0xABC5
+#define	reg_ce_data_im_8_pos 0
+#define	reg_ce_data_im_8_len 1
+#define	reg_ce_data_im_8_lsb 0
+#define xd_p_reg_ce_data_re_6_0	0xABC5
+#define	reg_ce_data_re_6_0_pos 1
+#define	reg_ce_data_re_6_0_len 7
+#define	reg_ce_data_re_6_0_lsb 0
+#define xd_p_reg_ce_data_re_8_7	0xABC6
+#define	reg_ce_data_re_8_7_pos 0
+#define	reg_ce_data_re_8_7_len 2
+#define	reg_ce_data_re_8_7_lsb 7
+#define xd_p_reg_ce_tone_5_0	0xABC6
+#define	reg_ce_tone_5_0_pos 2
+#define	reg_ce_tone_5_0_len 6
+#define	reg_ce_tone_5_0_lsb 0
+#define xd_p_reg_ce_tone_12_6	0xABC7
+#define	reg_ce_tone_12_6_pos 0
+#define	reg_ce_tone_12_6_len 7
+#define	reg_ce_tone_12_6_lsb 6
+#define xd_p_reg_ce_centroid_drift_th	0xABC8
+#define	reg_ce_centroid_drift_th_pos 0
+#define	reg_ce_centroid_drift_th_len 8
+#define	reg_ce_centroid_drift_th_lsb 0
+#define xd_p_reg_ce_centroid_count_max	0xABC9
+#define	reg_ce_centroid_count_max_pos 0
+#define	reg_ce_centroid_count_max_len 4
+#define	reg_ce_centroid_count_max_lsb 0
+#define xd_p_reg_ce_centroid_bias_inc_7_0	0xABCA
+#define	reg_ce_centroid_bias_inc_7_0_pos 0
+#define	reg_ce_centroid_bias_inc_7_0_len 8
+#define	reg_ce_centroid_bias_inc_7_0_lsb 0
+#define xd_p_reg_ce_centroid_bias_inc_8	0xABCB
+#define	reg_ce_centroid_bias_inc_8_pos 0
+#define	reg_ce_centroid_bias_inc_8_len 1
+#define	reg_ce_centroid_bias_inc_8_lsb 0
+#define xd_p_reg_ce_var_th0_7_0	0xABCC
+#define	reg_ce_var_th0_7_0_pos 0
+#define	reg_ce_var_th0_7_0_len 8
+#define	reg_ce_var_th0_7_0_lsb 0
+#define xd_p_reg_ce_var_th0_15_8	0xABCD
+#define	reg_ce_var_th0_15_8_pos 0
+#define	reg_ce_var_th0_15_8_len 8
+#define	reg_ce_var_th0_15_8_lsb 8
+#define xd_p_reg_ce_var_th1_7_0	0xABCE
+#define	reg_ce_var_th1_7_0_pos 0
+#define	reg_ce_var_th1_7_0_len 8
+#define	reg_ce_var_th1_7_0_lsb 0
+#define xd_p_reg_ce_var_th1_15_8	0xABCF
+#define	reg_ce_var_th1_15_8_pos 0
+#define	reg_ce_var_th1_15_8_len 8
+#define	reg_ce_var_th1_15_8_lsb 8
+#define xd_p_reg_ce_var_th2_7_0	0xABD0
+#define	reg_ce_var_th2_7_0_pos 0
+#define	reg_ce_var_th2_7_0_len 8
+#define	reg_ce_var_th2_7_0_lsb 0
+#define xd_p_reg_ce_var_th2_15_8	0xABD1
+#define	reg_ce_var_th2_15_8_pos 0
+#define	reg_ce_var_th2_15_8_len 8
+#define	reg_ce_var_th2_15_8_lsb 8
+#define xd_p_reg_ce_var_th3_7_0	0xABD2
+#define	reg_ce_var_th3_7_0_pos 0
+#define	reg_ce_var_th3_7_0_len 8
+#define	reg_ce_var_th3_7_0_lsb 0
+#define xd_p_reg_ce_var_th3_15_8	0xABD3
+#define	reg_ce_var_th3_15_8_pos 0
+#define	reg_ce_var_th3_15_8_len 8
+#define	reg_ce_var_th3_15_8_lsb 8
+#define xd_p_reg_ce_var_th4_7_0	0xABD4
+#define	reg_ce_var_th4_7_0_pos 0
+#define	reg_ce_var_th4_7_0_len 8
+#define	reg_ce_var_th4_7_0_lsb 0
+#define xd_p_reg_ce_var_th4_15_8	0xABD5
+#define	reg_ce_var_th4_15_8_pos 0
+#define	reg_ce_var_th4_15_8_len 8
+#define	reg_ce_var_th4_15_8_lsb 8
+#define xd_p_reg_ce_var_th5_7_0	0xABD6
+#define	reg_ce_var_th5_7_0_pos 0
+#define	reg_ce_var_th5_7_0_len 8
+#define	reg_ce_var_th5_7_0_lsb 0
+#define xd_p_reg_ce_var_th5_15_8	0xABD7
+#define	reg_ce_var_th5_15_8_pos 0
+#define	reg_ce_var_th5_15_8_len 8
+#define	reg_ce_var_th5_15_8_lsb 8
+#define xd_p_reg_ce_var_th6_7_0	0xABD8
+#define	reg_ce_var_th6_7_0_pos 0
+#define	reg_ce_var_th6_7_0_len 8
+#define	reg_ce_var_th6_7_0_lsb 0
+#define xd_p_reg_ce_var_th6_15_8	0xABD9
+#define	reg_ce_var_th6_15_8_pos 0
+#define	reg_ce_var_th6_15_8_len 8
+#define	reg_ce_var_th6_15_8_lsb 8
+#define xd_p_reg_ce_fctrl_reset	0xABDA
+#define	reg_ce_fctrl_reset_pos 0
+#define	reg_ce_fctrl_reset_len 1
+#define	reg_ce_fctrl_reset_lsb 0
+#define xd_p_reg_ce_cent_auto_clr_en	0xABDA
+#define	reg_ce_cent_auto_clr_en_pos 1
+#define	reg_ce_cent_auto_clr_en_len 1
+#define	reg_ce_cent_auto_clr_en_lsb 0
+#define xd_p_reg_ce_fctrl_auto_reset_en	0xABDA
+#define	reg_ce_fctrl_auto_reset_en_pos 2
+#define	reg_ce_fctrl_auto_reset_en_len 1
+#define	reg_ce_fctrl_auto_reset_en_lsb 0
+#define xd_p_reg_ce_var_forced_en	0xABDA
+#define	reg_ce_var_forced_en_pos 3
+#define	reg_ce_var_forced_en_len 1
+#define	reg_ce_var_forced_en_lsb 0
+#define xd_p_reg_ce_cent_forced_en	0xABDA
+#define	reg_ce_cent_forced_en_pos 4
+#define	reg_ce_cent_forced_en_len 1
+#define	reg_ce_cent_forced_en_lsb 0
+#define xd_p_reg_ce_var_max	0xABDA
+#define	reg_ce_var_max_pos 5
+#define	reg_ce_var_max_len 3
+#define	reg_ce_var_max_lsb 0
+#define xd_p_reg_ce_cent_forced_value_7_0	0xABDB
+#define	reg_ce_cent_forced_value_7_0_pos 0
+#define	reg_ce_cent_forced_value_7_0_len 8
+#define	reg_ce_cent_forced_value_7_0_lsb 0
+#define xd_p_reg_ce_cent_forced_value_11_8	0xABDC
+#define	reg_ce_cent_forced_value_11_8_pos 0
+#define	reg_ce_cent_forced_value_11_8_len 4
+#define	reg_ce_cent_forced_value_11_8_lsb 8
+#define xd_p_reg_ce_fctrl_rd	0xABDD
+#define	reg_ce_fctrl_rd_pos 0
+#define	reg_ce_fctrl_rd_len 1
+#define	reg_ce_fctrl_rd_lsb 0
+#define xd_p_reg_ce_centroid_max_6_0	0xABDD
+#define	reg_ce_centroid_max_6_0_pos 1
+#define	reg_ce_centroid_max_6_0_len 7
+#define	reg_ce_centroid_max_6_0_lsb 0
+#define xd_p_reg_ce_centroid_max_11_7	0xABDE
+#define	reg_ce_centroid_max_11_7_pos 0
+#define	reg_ce_centroid_max_11_7_len 5
+#define	reg_ce_centroid_max_11_7_lsb 7
+#define xd_p_reg_ce_var	0xABDF
+#define	reg_ce_var_pos 0
+#define	reg_ce_var_len 3
+#define	reg_ce_var_lsb 0
+#define xd_p_reg_ce_fctrl_rdy	0xABDF
+#define	reg_ce_fctrl_rdy_pos 3
+#define	reg_ce_fctrl_rdy_len 1
+#define	reg_ce_fctrl_rdy_lsb 0
+#define xd_p_reg_ce_centroid_out_3_0	0xABDF
+#define	reg_ce_centroid_out_3_0_pos 4
+#define	reg_ce_centroid_out_3_0_len 4
+#define	reg_ce_centroid_out_3_0_lsb 0
+#define xd_p_reg_ce_centroid_out_11_4	0xABE0
+#define	reg_ce_centroid_out_11_4_pos 0
+#define	reg_ce_centroid_out_11_4_len 8
+#define	reg_ce_centroid_out_11_4_lsb 4
+#define xd_p_reg_ce_bias_7_0	0xABE1
+#define	reg_ce_bias_7_0_pos 0
+#define	reg_ce_bias_7_0_len 8
+#define	reg_ce_bias_7_0_lsb 0
+#define xd_p_reg_ce_bias_11_8	0xABE2
+#define	reg_ce_bias_11_8_pos 0
+#define	reg_ce_bias_11_8_len 4
+#define	reg_ce_bias_11_8_lsb 8
+#define xd_p_reg_ce_m1_3_0	0xABE2
+#define	reg_ce_m1_3_0_pos 4
+#define	reg_ce_m1_3_0_len 4
+#define	reg_ce_m1_3_0_lsb 0
+#define xd_p_reg_ce_m1_11_4	0xABE3
+#define	reg_ce_m1_11_4_pos 0
+#define	reg_ce_m1_11_4_len 8
+#define	reg_ce_m1_11_4_lsb 4
+#define xd_p_reg_ce_rh0_7_0	0xABE4
+#define	reg_ce_rh0_7_0_pos 0
+#define	reg_ce_rh0_7_0_len 8
+#define	reg_ce_rh0_7_0_lsb 0
+#define xd_p_reg_ce_rh0_15_8	0xABE5
+#define	reg_ce_rh0_15_8_pos 0
+#define	reg_ce_rh0_15_8_len 8
+#define	reg_ce_rh0_15_8_lsb 8
+#define xd_p_reg_ce_rh0_23_16	0xABE6
+#define	reg_ce_rh0_23_16_pos 0
+#define	reg_ce_rh0_23_16_len 8
+#define	reg_ce_rh0_23_16_lsb 16
+#define xd_p_reg_ce_rh0_31_24	0xABE7
+#define	reg_ce_rh0_31_24_pos 0
+#define	reg_ce_rh0_31_24_len 8
+#define	reg_ce_rh0_31_24_lsb 24
+#define xd_p_reg_ce_rh3_real_7_0	0xABE8
+#define	reg_ce_rh3_real_7_0_pos 0
+#define	reg_ce_rh3_real_7_0_len 8
+#define	reg_ce_rh3_real_7_0_lsb 0
+#define xd_p_reg_ce_rh3_real_15_8	0xABE9
+#define	reg_ce_rh3_real_15_8_pos 0
+#define	reg_ce_rh3_real_15_8_len 8
+#define	reg_ce_rh3_real_15_8_lsb 8
+#define xd_p_reg_ce_rh3_real_23_16	0xABEA
+#define	reg_ce_rh3_real_23_16_pos 0
+#define	reg_ce_rh3_real_23_16_len 8
+#define	reg_ce_rh3_real_23_16_lsb 16
+#define xd_p_reg_ce_rh3_real_31_24	0xABEB
+#define	reg_ce_rh3_real_31_24_pos 0
+#define	reg_ce_rh3_real_31_24_len 8
+#define	reg_ce_rh3_real_31_24_lsb 24
+#define xd_p_reg_ce_rh3_imag_7_0	0xABEC
+#define	reg_ce_rh3_imag_7_0_pos 0
+#define	reg_ce_rh3_imag_7_0_len 8
+#define	reg_ce_rh3_imag_7_0_lsb 0
+#define xd_p_reg_ce_rh3_imag_15_8	0xABED
+#define	reg_ce_rh3_imag_15_8_pos 0
+#define	reg_ce_rh3_imag_15_8_len 8
+#define	reg_ce_rh3_imag_15_8_lsb 8
+#define xd_p_reg_ce_rh3_imag_23_16	0xABEE
+#define	reg_ce_rh3_imag_23_16_pos 0
+#define	reg_ce_rh3_imag_23_16_len 8
+#define	reg_ce_rh3_imag_23_16_lsb 16
+#define xd_p_reg_ce_rh3_imag_31_24	0xABEF
+#define	reg_ce_rh3_imag_31_24_pos 0
+#define	reg_ce_rh3_imag_31_24_len 8
+#define	reg_ce_rh3_imag_31_24_lsb 24
+#define xd_p_reg_feq_fix_eh2_7_0	0xABF0
+#define	reg_feq_fix_eh2_7_0_pos 0
+#define	reg_feq_fix_eh2_7_0_len 8
+#define	reg_feq_fix_eh2_7_0_lsb 0
+#define xd_p_reg_feq_fix_eh2_15_8	0xABF1
+#define	reg_feq_fix_eh2_15_8_pos 0
+#define	reg_feq_fix_eh2_15_8_len 8
+#define	reg_feq_fix_eh2_15_8_lsb 8
+#define xd_p_reg_feq_fix_eh2_23_16	0xABF2
+#define	reg_feq_fix_eh2_23_16_pos 0
+#define	reg_feq_fix_eh2_23_16_len 8
+#define	reg_feq_fix_eh2_23_16_lsb 16
+#define xd_p_reg_feq_fix_eh2_31_24	0xABF3
+#define	reg_feq_fix_eh2_31_24_pos 0
+#define	reg_feq_fix_eh2_31_24_len 8
+#define	reg_feq_fix_eh2_31_24_lsb 24
+#define xd_p_reg_ce_m2_central_7_0	0xABF4
+#define	reg_ce_m2_central_7_0_pos 0
+#define	reg_ce_m2_central_7_0_len 8
+#define	reg_ce_m2_central_7_0_lsb 0
+#define xd_p_reg_ce_m2_central_15_8	0xABF5
+#define	reg_ce_m2_central_15_8_pos 0
+#define	reg_ce_m2_central_15_8_len 8
+#define	reg_ce_m2_central_15_8_lsb 8
+#define xd_p_reg_ce_fftshift	0xABF6
+#define	reg_ce_fftshift_pos 0
+#define	reg_ce_fftshift_len 4
+#define	reg_ce_fftshift_lsb 0
+#define xd_p_reg_ce_fftshift1	0xABF6
+#define	reg_ce_fftshift1_pos 4
+#define	reg_ce_fftshift1_len 4
+#define	reg_ce_fftshift1_lsb 0
+#define xd_p_reg_ce_fftshift2	0xABF7
+#define	reg_ce_fftshift2_pos 0
+#define	reg_ce_fftshift2_len 4
+#define	reg_ce_fftshift2_lsb 0
+#define xd_p_reg_ce_top_mobile	0xABF7
+#define	reg_ce_top_mobile_pos 4
+#define	reg_ce_top_mobile_len 1
+#define	reg_ce_top_mobile_lsb 0
+#define xd_p_reg_strong_sginal_detected 0xA2BC
+#define reg_strong_sginal_detected_pos 2
+#define reg_strong_sginal_detected_len 1
+#define reg_strong_sginal_detected_lsb 0
+
+#define XD_MP2IF_BASE                           0xB000
+#define XD_MP2IF_CSR                        (0x00 + XD_MP2IF_BASE)
+#define XD_MP2IF_DMX_CTRL                       (0x03 + XD_MP2IF_BASE)
+#define XD_MP2IF_PID_IDX                        (0x04 + XD_MP2IF_BASE)
+#define XD_MP2IF_PID_DATA_L                     (0x05 + XD_MP2IF_BASE)
+#define XD_MP2IF_PID_DATA_H                     (0x06 + XD_MP2IF_BASE)
+#define XD_MP2IF_MISC                       (0x07 + XD_MP2IF_BASE)
+
+extern struct dvb_frontend *af9005_fe_attach(struct dvb_usb_device *d);
+extern int af9005_read_ofdm_register(struct dvb_usb_device *d, u16 reg,
+				     u8 * value);
+extern int af9005_read_ofdm_registers(struct dvb_usb_device *d, u16 reg,
+				      u8 * values, int len);
+extern int af9005_write_ofdm_register(struct dvb_usb_device *d, u16 reg,
+				      u8 value);
+extern int af9005_write_ofdm_registers(struct dvb_usb_device *d, u16 reg,
+				       u8 * values, int len);
+extern int af9005_read_tuner_registers(struct dvb_usb_device *d, u16 reg,
+				       u8 addr, u8 * values, int len);
+extern int af9005_write_tuner_registers(struct dvb_usb_device *d, u16 reg,
+					u8 * values, int len);
+extern int af9005_read_register_bits(struct dvb_usb_device *d, u16 reg,
+				     u8 pos, u8 len, u8 * value);
+extern int af9005_write_register_bits(struct dvb_usb_device *d, u16 reg,
+				      u8 pos, u8 len, u8 value);
+extern int af9005_send_command(struct dvb_usb_device *d, u8 command,
+			       u8 * wbuf, int wlen, u8 * rbuf, int rlen);
+extern int af9005_read_eeprom(struct dvb_usb_device *d, u8 address,
+			      u8 * values, int len);
+extern int af9005_tuner_attach(struct dvb_usb_adapter *adap);
+extern int af9005_led_control(struct dvb_usb_device *d, int onoff);
+
+extern u8 regmask[8];
+
+/* remote control decoder */
+extern int af9005_rc_decode(struct dvb_usb_device *d, u8 * data, int len,
+			    u32 * event, int *state);
+extern struct dvb_usb_rc_key af9005_rc_keys[];
+extern int af9005_rc_keys_size;
+
+#endif
diff --git a/drivers/media/dvb/dvb-usb/cxusb.c b/drivers/media/dvb/dvb-usb/cxusb.c
index bac2ae3..04e31cf 100644
--- a/drivers/media/dvb/dvb-usb/cxusb.c
+++ b/drivers/media/dvb/dvb-usb/cxusb.c
@@ -354,41 +354,35 @@
 /* Callbacks for DVB USB */
 static int cxusb_fmd1216me_tuner_attach(struct dvb_usb_adapter *adap)
 {
-	u8 bpll[4] = { 0x0b, 0xdc, 0x9c, 0xa0 };
-	adap->pll_addr = 0x61;
-	memcpy(adap->pll_init, bpll, 4);
-	adap->pll_desc = &dvb_pll_fmd1216me;
-
-	adap->fe->ops.tuner_ops.init = dvb_usb_tuner_init_i2c;
-	adap->fe->ops.tuner_ops.set_params = dvb_usb_tuner_set_params_i2c;
-
+	dvb_attach(dvb_pll_attach, adap->fe, 0x61, &adap->dev->i2c_adap,
+		   DVB_PLL_FMD1216ME);
 	return 0;
 }
 
 static int cxusb_dee1601_tuner_attach(struct dvb_usb_adapter *adap)
 {
 	dvb_attach(dvb_pll_attach, adap->fe, 0x61,
-		   NULL, &dvb_pll_thomson_dtt7579);
+		   NULL, DVB_PLL_THOMSON_DTT7579);
 	return 0;
 }
 
 static int cxusb_lgz201_tuner_attach(struct dvb_usb_adapter *adap)
 {
-	dvb_attach(dvb_pll_attach, adap->fe, 0x61, NULL, &dvb_pll_lg_z201);
+	dvb_attach(dvb_pll_attach, adap->fe, 0x61, NULL, DVB_PLL_LG_Z201);
 	return 0;
 }
 
 static int cxusb_dtt7579_tuner_attach(struct dvb_usb_adapter *adap)
 {
 	dvb_attach(dvb_pll_attach, adap->fe, 0x60,
-		   NULL, &dvb_pll_thomson_dtt7579);
+		   NULL, DVB_PLL_THOMSON_DTT7579);
 	return 0;
 }
 
 static int cxusb_lgh064f_tuner_attach(struct dvb_usb_adapter *adap)
 {
 	dvb_attach(dvb_pll_attach, adap->fe, 0x61, &adap->dev->i2c_adap,
-		   &dvb_pll_lg_tdvs_h06xf);
+		   DVB_PLL_LG_TDVS_H06XF);
 	return 0;
 }
 
diff --git a/drivers/media/dvb/dvb-usb/dibusb-common.c b/drivers/media/dvb/dvb-usb/dibusb-common.c
index 5143e42..9a184da 100644
--- a/drivers/media/dvb/dvb-usb/dibusb-common.c
+++ b/drivers/media/dvb/dvb-usb/dibusb-common.c
@@ -295,7 +295,7 @@
 	tun_i2c = dib3000mc_get_tuner_i2c_master(adap->fe, 1);
 	if (dvb_attach(mt2060_attach, adap->fe, tun_i2c, &stk3000p_mt2060_config, if1) == NULL) {
 		/* not found - use panasonic pll parameters */
-		if (dvb_attach(dvb_pll_attach, adap->fe, 0x60, tun_i2c, &dvb_pll_env57h1xd5) == NULL)
+		if (dvb_attach(dvb_pll_attach, adap->fe, 0x60, tun_i2c, DVB_PLL_ENV57H1XD5) == NULL)
 			return -ENOMEM;
 	} else {
 		st->mt2060_present = 1;
diff --git a/drivers/media/dvb/dvb-usb/dibusb-mb.c b/drivers/media/dvb/dvb-usb/dibusb-mb.c
index 7a6ae8f..043cada 100644
--- a/drivers/media/dvb/dvb-usb/dibusb-mb.c
+++ b/drivers/media/dvb/dvb-usb/dibusb-mb.c
@@ -14,6 +14,14 @@
  */
 #include "dibusb.h"
 
+static int dib3000mb_i2c_gate_ctrl(struct dvb_frontend* fe, int enable)
+{
+	struct dvb_usb_adapter *adap = fe->dvb->priv;
+	struct dibusb_state *st = adap->priv;
+
+	return st->ops.tuner_pass_ctrl(fe, enable, st->tuner_addr);
+}
+
 static int dibusb_dib3000mb_frontend_attach(struct dvb_usb_adapter *adap)
 {
 	struct dib3000_config demod_cfg;
@@ -21,21 +29,34 @@
 
 	demod_cfg.demod_address = 0x8;
 
-	if ((adap->fe = dib3000mb_attach(&demod_cfg,&adap->dev->i2c_adap,&st->ops)) == NULL)
+	if ((adap->fe = dvb_attach(dib3000mb_attach, &demod_cfg,
+				   &adap->dev->i2c_adap, &st->ops)) == NULL)
 		return -ENODEV;
 
-	adap->fe->ops.tuner_ops.init       = dvb_usb_tuner_init_i2c;
-	adap->fe->ops.tuner_ops.set_params = dvb_usb_tuner_set_params_i2c;
-
-	adap->tuner_pass_ctrl = st->ops.tuner_pass_ctrl;
+	adap->fe->ops.i2c_gate_ctrl = dib3000mb_i2c_gate_ctrl;
 
 	return 0;
 }
 
 static int dibusb_thomson_tuner_attach(struct dvb_usb_adapter *adap)
 {
-	adap->pll_addr = 0x61;
-	adap->pll_desc = &dvb_pll_tua6010xs;
+	struct dibusb_state *st = adap->priv;
+
+	st->tuner_addr = 0x61;
+
+	dvb_attach(dvb_pll_attach, adap->fe, 0x61, &adap->dev->i2c_adap,
+		   DVB_PLL_TUA6010XS);
+	return 0;
+}
+
+static int dibusb_panasonic_tuner_attach(struct dvb_usb_adapter *adap)
+{
+	struct dibusb_state *st = adap->priv;
+
+	st->tuner_addr = 0x60;
+
+	dvb_attach(dvb_pll_attach, adap->fe, 0x60, &adap->dev->i2c_adap,
+		   DVB_PLL_TDA665X);
 	return 0;
 }
 
@@ -50,30 +71,28 @@
 		{ .flags = 0,        .buf = b,  .len = 2 },
 		{ .flags = I2C_M_RD, .buf = b2, .len = 1 },
 	};
+	struct dibusb_state *st = adap->priv;
 
 	/* the Panasonic sits on I2C addrass 0x60, the Thomson on 0x61 */
-	msg[0].addr = msg[1].addr = 0x60;
+	msg[0].addr = msg[1].addr = st->tuner_addr = 0x60;
 
-	if (adap->tuner_pass_ctrl)
-		adap->tuner_pass_ctrl(adap->fe,1,msg[0].addr);
+	if (adap->fe->ops.i2c_gate_ctrl)
+		adap->fe->ops.i2c_gate_ctrl(adap->fe,1);
 
 	if (i2c_transfer(&adap->dev->i2c_adap, msg, 2) != 2) {
 		err("tuner i2c write failed.");
 		ret = -EREMOTEIO;
 	}
 
-	if (adap->tuner_pass_ctrl)
-		adap->tuner_pass_ctrl(adap->fe,0,msg[0].addr);
+	if (adap->fe->ops.i2c_gate_ctrl)
+		adap->fe->ops.i2c_gate_ctrl(adap->fe,0);
 
 	if (b2[0] == 0xfe) {
 		info("This device has the Thomson Cable onboard. Which is default.");
-		dibusb_thomson_tuner_attach(adap);
+		ret = dibusb_thomson_tuner_attach(adap);
 	} else {
-		u8 bpll[4] = { 0x0b, 0xf5, 0x85, 0xab };
 		info("This device has the Panasonic ENV77H11D5 onboard.");
-		adap->pll_addr = 0x60;
-		memcpy(adap->pll_init,bpll,4);
-		adap->pll_desc = &dvb_pll_tda665x;
+		ret = dibusb_panasonic_tuner_attach(adap);
 	}
 
 	return ret;
diff --git a/drivers/media/dvb/dvb-usb/dibusb.h b/drivers/media/dvb/dvb-usb/dibusb.h
index b607810..8e847aa 100644
--- a/drivers/media/dvb/dvb-usb/dibusb.h
+++ b/drivers/media/dvb/dvb-usb/dibusb.h
@@ -99,6 +99,7 @@
 struct dibusb_state {
 	struct dib_fe_xfer_ops ops;
 	int mt2060_present;
+	u8 tuner_addr;
 };
 
 struct dibusb_device_state {
diff --git a/drivers/media/dvb/dvb-usb/digitv.c b/drivers/media/dvb/dvb-usb/digitv.c
index b5acb11..bca1e09 100644
--- a/drivers/media/dvb/dvb-usb/digitv.c
+++ b/drivers/media/dvb/dvb-usb/digitv.c
@@ -118,7 +118,8 @@
 {
 	struct dvb_usb_adapter *adap = fe->dvb->priv;
 	u8 b[5];
-	dvb_usb_tuner_calc_regs(fe,fep,b, 5);
+
+	fe->ops.tuner_ops.calc_regs(fe, fep, b, sizeof(b));
 	if (fe->ops.i2c_gate_ctrl)
 		fe->ops.i2c_gate_ctrl(fe, 1);
 	return digitv_ctrl_msg(adap->dev, USB_WRITE_TUNER, 0, &b[1], 4, NULL, 0);
@@ -130,12 +131,14 @@
 
 static int digitv_frontend_attach(struct dvb_usb_adapter *adap)
 {
+	struct digitv_state *st = adap->dev->priv;
+
 	if ((adap->fe = dvb_attach(mt352_attach, &digitv_mt352_config, &adap->dev->i2c_adap)) != NULL) {
-		adap->fe->ops.tuner_ops.calc_regs = dvb_usb_tuner_calc_regs;
+		st->is_nxt6000 = 0;
 		return 0;
 	}
 	if ((adap->fe = dvb_attach(nxt6000_attach, &digitv_nxt6000_config, &adap->dev->i2c_adap)) != NULL) {
-		adap->fe->ops.tuner_ops.set_params = digitv_nxt6000_tuner_set_params;
+		st->is_nxt6000 = 1;
 		return 0;
 	}
 	return -EIO;
@@ -143,8 +146,14 @@
 
 static int digitv_tuner_attach(struct dvb_usb_adapter *adap)
 {
-	adap->pll_addr = 0x60;
-	adap->pll_desc = &dvb_pll_tded4;
+	struct digitv_state *st = adap->dev->priv;
+
+	if (!dvb_attach(dvb_pll_attach, adap->fe, 0x60, NULL, DVB_PLL_TDED4))
+		return -ENODEV;
+
+	if (st->is_nxt6000)
+		adap->fe->ops.tuner_ops.set_params = digitv_nxt6000_tuner_set_params;
+
 	return 0;
 }
 
@@ -273,6 +282,8 @@
 	.usb_ctrl = CYPRESS_FX2,
 	.firmware = "dvb-usb-digitv-02.fw",
 
+	.size_of_priv = sizeof(struct digitv_state),
+
 	.num_adapters = 1,
 	.adapter = {
 		{
diff --git a/drivers/media/dvb/dvb-usb/digitv.h b/drivers/media/dvb/dvb-usb/digitv.h
index 477ee42..8b43e3d 100644
--- a/drivers/media/dvb/dvb-usb/digitv.h
+++ b/drivers/media/dvb/dvb-usb/digitv.h
@@ -4,6 +4,10 @@
 #define DVB_USB_LOG_PREFIX "digitv"
 #include "dvb-usb.h"
 
+struct digitv_state {
+    int is_nxt6000;
+};
+
 extern int dvb_usb_digitv_debug;
 #define deb_rc(args...)   dprintk(dvb_usb_digitv_debug,0x01,args)
 
diff --git a/drivers/media/dvb/dvb-usb/dvb-usb-i2c.c b/drivers/media/dvb/dvb-usb/dvb-usb-i2c.c
index 088b6de..23428cd 100644
--- a/drivers/media/dvb/dvb-usb/dvb-usb-i2c.c
+++ b/drivers/media/dvb/dvb-usb/dvb-usb-i2c.c
@@ -46,82 +46,3 @@
 	d->state &= ~DVB_USB_STATE_I2C;
 	return 0;
 }
-
-int dvb_usb_tuner_init_i2c(struct dvb_frontend *fe)
-{
-	struct dvb_usb_adapter *adap = fe->dvb->priv;
-	struct i2c_msg msg = { .addr = adap->pll_addr, .flags = 0, .buf = adap->pll_init, .len = 4 };
-	int ret = 0;
-
-	/* if pll_desc is not used */
-	if (adap->pll_desc == NULL)
-		return 0;
-
-	if (adap->tuner_pass_ctrl)
-		adap->tuner_pass_ctrl(fe, 1, adap->pll_addr);
-
-	deb_pll("pll init: %x\n",adap->pll_addr);
-	deb_pll("pll-buf: %x %x %x %x\n",adap->pll_init[0], adap->pll_init[1],
-			adap->pll_init[2], adap->pll_init[3]);
-
-	if (fe->ops.i2c_gate_ctrl)
-		fe->ops.i2c_gate_ctrl(fe, 1);
-	if (i2c_transfer (&adap->dev->i2c_adap, &msg, 1) != 1) {
-		err("tuner i2c write failed for pll_init.");
-		ret = -EREMOTEIO;
-	}
-	msleep(1);
-
-	if (adap->tuner_pass_ctrl)
-		adap->tuner_pass_ctrl(fe,0,adap->pll_addr);
-	return ret;
-}
-EXPORT_SYMBOL(dvb_usb_tuner_init_i2c);
-
-int dvb_usb_tuner_calc_regs(struct dvb_frontend *fe, struct dvb_frontend_parameters *fep, u8 *b, int buf_len)
-{
-	struct dvb_usb_adapter *adap = fe->dvb->priv;
-
-	if (buf_len != 5)
-		return -EINVAL;
-	if (adap->pll_desc == NULL)
-		return 0;
-
-	deb_pll("pll addr: %x, freq: %d %p\n",adap->pll_addr, fep->frequency, adap->pll_desc);
-
-	b[0] = adap->pll_addr;
-	dvb_pll_configure(adap->pll_desc, &b[1], fep->frequency, fep->u.ofdm.bandwidth);
-
-	deb_pll("pll-buf: %x %x %x %x %x\n",b[0],b[1],b[2],b[3],b[4]);
-
-	return 5;
-}
-EXPORT_SYMBOL(dvb_usb_tuner_calc_regs);
-
-int dvb_usb_tuner_set_params_i2c(struct dvb_frontend *fe, struct dvb_frontend_parameters *fep)
-{
-	struct dvb_usb_adapter *adap = fe->dvb->priv;
-	int ret = 0;
-	u8 b[5];
-	struct i2c_msg msg = { .addr = adap->pll_addr, .flags = 0, .buf = &b[1], .len = 4 };
-
-	dvb_usb_tuner_calc_regs(fe,fep,b,5);
-
-	if (adap->tuner_pass_ctrl)
-		adap->tuner_pass_ctrl(fe, 1, adap->pll_addr);
-
-	if (fe->ops.i2c_gate_ctrl)
-		fe->ops.i2c_gate_ctrl(fe, 1);
-
-	if (i2c_transfer(&adap->dev->i2c_adap, &msg, 1) != 1) {
-		err("tuner i2c write failed for pll_set.");
-		ret = -EREMOTEIO;
-	}
-	msleep(1);
-
-	if (adap->tuner_pass_ctrl)
-		adap->tuner_pass_ctrl(fe, 0, adap->pll_addr);
-
-	return ret;
-}
-EXPORT_SYMBOL(dvb_usb_tuner_set_params_i2c);
diff --git a/drivers/media/dvb/dvb-usb/dvb-usb-ids.h b/drivers/media/dvb/dvb-usb/dvb-usb-ids.h
index 4030816..4dfab02 100644
--- a/drivers/media/dvb/dvb-usb/dvb-usb-ids.h
+++ b/drivers/media/dvb/dvb-usb/dvb-usb-ids.h
@@ -11,7 +11,9 @@
 
 /* Vendor IDs */
 #define USB_VID_ADSTECH				0x06e1
+#define USB_VID_AFATECH				0x15a4
 #define USB_VID_ALCOR_MICRO		0x058f
+#define USB_VID_ALINK				0x05e3
 #define USB_VID_ANCHOR				0x0547
 #define USB_VID_ANUBIS_ELECTRONIC		0x10fd
 #define USB_VID_AVERMEDIA			0x07ca
@@ -35,6 +37,7 @@
 #define USB_VID_MSI				0x0db0
 #define USB_VID_OPERA1				0x695c
 #define USB_VID_PINNACLE			0x2304
+#define USB_VID_TERRATEC			0x0ccd
 #define USB_VID_VISIONPLUS			0x13d3
 #define USB_VID_TWINHAN				0x1822
 #define USB_VID_ULTIMA_ELECTRONIC		0x05d8
@@ -44,6 +47,8 @@
 /* Product IDs */
 #define USB_PID_ADSTECH_USB2_COLD			0xa333
 #define USB_PID_ADSTECH_USB2_WARM			0xa334
+#define USB_PID_AFATECH_AF9005				0x9020
+#define USB_VID_ALINK_DTU				0xf170
 #define USB_PID_AVERMEDIA_DVBT_USB_COLD			0x0001
 #define USB_PID_AVERMEDIA_DVBT_USB_WARM			0x0002
 #define USB_PID_AVERMEDIA_DVBT_USB2_COLD		0xa800
@@ -69,6 +74,7 @@
 #define USB_PID_GRANDTEC_DVBT_USB_WARM			0x0fa1
 #define USB_PID_KWORLD_VSTREAM_COLD			0x17de
 #define USB_PID_KWORLD_VSTREAM_WARM			0x17df
+#define USB_PID_TERRATEC_CINERGY_T_USB_XE		0x0055
 #define USB_PID_TWINHAN_VP7041_COLD			0x3201
 #define USB_PID_TWINHAN_VP7041_WARM			0x3202
 #define USB_PID_TWINHAN_VP7020_COLD			0x3203
diff --git a/drivers/media/dvb/dvb-usb/dvb-usb-remote.c b/drivers/media/dvb/dvb-usb/dvb-usb-remote.c
index 9200a30..7b9f35b 100644
--- a/drivers/media/dvb/dvb-usb/dvb-usb-remote.c
+++ b/drivers/media/dvb/dvb-usb/dvb-usb-remote.c
@@ -110,7 +110,7 @@
 	input_dev->name = "IR-receiver inside an USB DVB receiver";
 	input_dev->phys = d->rc_phys;
 	usb_to_input_id(d->udev, &input_dev->id);
-	input_dev->cdev.dev = &d->udev->dev;
+	input_dev->dev.parent = &d->udev->dev;
 
 	/* set the bits for the keys */
 	deb_rc("key map size: %d\n", d->props.rc_key_map_size);
diff --git a/drivers/media/dvb/dvb-usb/dvb-usb.h b/drivers/media/dvb/dvb-usb/dvb-usb.h
index 6f824a5..d1b3c7b 100644
--- a/drivers/media/dvb/dvb-usb/dvb-usb.h
+++ b/drivers/media/dvb/dvb-usb/dvb-usb.h
@@ -297,12 +297,6 @@
 	int feedcount;
 	int pid_filtering;
 
-	/* tuner programming information */
-	u8 pll_addr;
-	u8 pll_init[4];
-	struct dvb_pll_desc *pll_desc;
-	int (*tuner_pass_ctrl) (struct dvb_frontend *, int, u8);
-
 	/* dvb */
 	struct dvb_adapter   dvb_adap;
 	struct dmxdev        dmxdev;
@@ -388,11 +382,6 @@
 /* commonly used remote control parsing */
 extern int dvb_usb_nec_rc_key_to_event(struct dvb_usb_device *, u8[], u32 *, int *);
 
-/* commonly used pll init and set functions */
-extern int dvb_usb_tuner_init_i2c(struct dvb_frontend *);
-extern int dvb_usb_tuner_calc_regs(struct dvb_frontend *, struct dvb_frontend_parameters *, u8 *buf, int buf_len);
-extern int dvb_usb_tuner_set_params_i2c(struct dvb_frontend *, struct dvb_frontend_parameters *);
-
 /* commonly used firmware download types and function */
 struct hexline {
 	u8 len;
diff --git a/drivers/media/dvb/dvb-usb/gl861.c b/drivers/media/dvb/dvb-usb/gl861.c
index e0587e6..f01d99c 100644
--- a/drivers/media/dvb/dvb-usb/gl861.c
+++ b/drivers/media/dvb/dvb-usb/gl861.c
@@ -157,6 +157,7 @@
 
 static struct usb_device_id gl861_table [] = {
 		{ USB_DEVICE(USB_VID_MSI, USB_PID_MSI_MEGASKY580_55801) },
+		{ USB_DEVICE(USB_VID_ALINK, USB_VID_ALINK_DTU) },
 		{ }		/* Terminating entry */
 };
 MODULE_DEVICE_TABLE (usb, gl861_table);
@@ -187,12 +188,16 @@
 	}},
 	.i2c_algo         = &gl861_i2c_algo,
 
-	.num_device_descs = 1,
+	.num_device_descs = 2,
 	.devices = {
 		{   "MSI Mega Sky 55801 DVB-T USB2.0",
 			{ &gl861_table[0], NULL },
 			{ NULL },
 		},
+		{   "A-LINK DTU DVB-T USB2.0",
+			{ &gl861_table[1], NULL },
+			{ NULL },
+		},
 	}
 };
 
diff --git a/drivers/media/dvb/dvb-usb/m920x.c b/drivers/media/dvb/dvb-usb/m920x.c
index c546dde..a956bc5 100644
--- a/drivers/media/dvb/dvb-usb/m920x.c
+++ b/drivers/media/dvb/dvb-usb/m920x.c
@@ -22,6 +22,8 @@
 module_param_named(debug,dvb_usb_m920x_debug, int, 0644);
 MODULE_PARM_DESC(debug, "set debugging level (1=rc (or-able))." DVB_USB_DEBUG_STATUS);
 
+static int m920x_set_filter(struct dvb_usb_device *d, int type, int idx, int pid);
+
 static inline int m920x_read(struct usb_device *udev, u8 request, u16 value,
 			     u16 index, void *data, int size)
 {
@@ -57,7 +59,8 @@
 
 static int m920x_init(struct dvb_usb_device *d, struct m920x_inits *rc_seq)
 {
-	int ret = 0;
+	int ret = 0, i, epi, flags = 0;
+	int adap_enabled[M9206_MAX_ADAPTERS] = { 0 };
 
 	/* Remote controller init. */
 	if (d->props.rc_query) {
@@ -76,9 +79,51 @@
 		deb("Initialising remote control success\n");
 	}
 
+	for (i = 0; i < d->props.num_adapters; i++)
+		flags |= d->adapter[i].props.caps;
+
+	/* Some devices(Dposh) might crash if we attempt touch at all. */
+	if (flags & DVB_USB_ADAP_HAS_PID_FILTER) {
+		for (i = 0; i < d->props.num_adapters; i++) {
+			epi = d->adapter[i].props.stream.endpoint - 0x81;
+
+			if (epi < 0 || epi >= M9206_MAX_ADAPTERS) {
+				printk(KERN_INFO "m920x: Unexpected adapter endpoint!\n");
+				return -EINVAL;
+			}
+
+			adap_enabled[epi] = 1;
+		}
+
+		for (i = 0; i < M9206_MAX_ADAPTERS; i++) {
+			if (adap_enabled[i])
+				continue;
+
+			if ((ret = m920x_set_filter(d, 0x81 + i, 0, 0x0)) != 0)
+				return ret;
+
+			if ((ret = m920x_set_filter(d, 0x81 + i, 0, 0x02f5)) != 0)
+				return ret;
+		}
+	}
+
 	return ret;
 }
 
+static int m920x_init_ep(struct usb_interface *intf)
+{
+	struct usb_device *udev = interface_to_usbdev(intf);
+	struct usb_host_interface *alt;
+
+	if ((alt = usb_altnum_to_altsetting(intf, 1)) == NULL) {
+		deb("No alt found!\n");
+		return -ENODEV;
+	}
+
+	return usb_set_interface(udev, alt->desc.bInterfaceNumber,
+				 alt->desc.bAlternateSetting);
+}
+
 static int m920x_rc_query(struct dvb_usb_device *d, u32 *event, int *state)
 {
 	struct m920x_state *m = d->priv;
@@ -211,8 +256,7 @@
 };
 
 /* pid filter */
-static int m920x_set_filter(struct dvb_usb_adapter *adap,
-			    int type, int idx, int pid)
+static int m920x_set_filter(struct dvb_usb_device *d, int type, int idx, int pid)
 {
 	int ret = 0;
 
@@ -221,10 +265,10 @@
 
 	pid |= 0x8000;
 
-	if ((ret = m920x_write(adap->dev->udev, M9206_FILTER, pid, (type << 8) | (idx * 4) )) != 0)
+	if ((ret = m920x_write(d->udev, M9206_FILTER, pid, (type << 8) | (idx * 4) )) != 0)
 		return ret;
 
-	if ((ret = m920x_write(adap->dev->udev, M9206_FILTER, 0, (type << 8) | (idx * 4) )) != 0)
+	if ((ret = m920x_write(d->udev, M9206_FILTER, 0, (type << 8) | (idx * 4) )) != 0)
 		return ret;
 
 	return ret;
@@ -233,40 +277,35 @@
 static int m920x_update_filters(struct dvb_usb_adapter *adap)
 {
 	struct m920x_state *m = adap->dev->priv;
-	int enabled = m->filtering_enabled;
+	int enabled = m->filtering_enabled[adap->id];
 	int i, ret = 0, filter = 0;
+	int ep = adap->props.stream.endpoint;
 
 	for (i = 0; i < M9206_MAX_FILTERS; i++)
-		if (m->filters[i] == 8192)
+		if (m->filters[adap->id][i] == 8192)
 			enabled = 0;
 
 	/* Disable all filters */
-	if ((ret = m920x_set_filter(adap, 0x81, 1, enabled)) != 0)
+	if ((ret = m920x_set_filter(adap->dev, ep, 1, enabled)) != 0)
 		return ret;
 
 	for (i = 0; i < M9206_MAX_FILTERS; i++)
-		if ((ret = m920x_set_filter(adap, 0x81, i + 2, 0)) != 0)
+		if ((ret = m920x_set_filter(adap->dev, ep, i + 2, 0)) != 0)
 			return ret;
 
-	if ((ret = m920x_set_filter(adap, 0x82, 0, 0x0)) != 0)
-		return ret;
-
 	/* Set */
 	if (enabled) {
 		for (i = 0; i < M9206_MAX_FILTERS; i++) {
-			if (m->filters[i] == 0)
+			if (m->filters[adap->id][i] == 0)
 				continue;
 
-			if ((ret = m920x_set_filter(adap, 0x81, filter + 2, m->filters[i])) != 0)
+			if ((ret = m920x_set_filter(adap->dev, ep, filter + 2, m->filters[adap->id][i])) != 0)
 				return ret;
 
 			filter++;
 		}
 	}
 
-	if ((ret = m920x_set_filter(adap, 0x82, 0, 0x02f5)) != 0)
-		return ret;
-
 	return ret;
 }
 
@@ -274,7 +313,7 @@
 {
 	struct m920x_state *m = adap->dev->priv;
 
-	m->filtering_enabled = onoff ? 1 : 0;
+	m->filtering_enabled[adap->id] = onoff ? 1 : 0;
 
 	return m920x_update_filters(adap);
 }
@@ -283,7 +322,7 @@
 {
 	struct m920x_state *m = adap->dev->priv;
 
-	m->filters[index] = onoff ? pid : 0;
+	m->filters[adap->id][index] = onoff ? pid : 0;
 
 	return m920x_update_filters(adap);
 }
@@ -368,6 +407,7 @@
 /* demod configurations */
 static int m920x_mt352_demod_init(struct dvb_frontend *fe)
 {
+	int ret;
 	u8 config[] = { CONFIG, 0x3d };
 	u8 clock[] = { CLOCK_CTL, 0x30 };
 	u8 reset[] = { RESET, 0x80 };
@@ -377,17 +417,25 @@
 	u8 unk1[] = { 0x93, 0x1a };
 	u8 unk2[] = { 0xb5, 0x7a };
 
-	mt352_write(fe, config, ARRAY_SIZE(config));
-	mt352_write(fe, clock, ARRAY_SIZE(clock));
-	mt352_write(fe, reset, ARRAY_SIZE(reset));
-	mt352_write(fe, adc_ctl, ARRAY_SIZE(adc_ctl));
-	mt352_write(fe, agc, ARRAY_SIZE(agc));
-	mt352_write(fe, sec_agc, ARRAY_SIZE(sec_agc));
-	mt352_write(fe, unk1, ARRAY_SIZE(unk1));
-	mt352_write(fe, unk2, ARRAY_SIZE(unk2));
-
 	deb("Demod init!\n");
 
+	if ((ret = mt352_write(fe, config, ARRAY_SIZE(config))) != 0)
+		return ret;
+	if ((ret = mt352_write(fe, clock, ARRAY_SIZE(clock))) != 0)
+		return ret;
+	if ((ret = mt352_write(fe, reset, ARRAY_SIZE(reset))) != 0)
+		return ret;
+	if ((ret = mt352_write(fe, adc_ctl, ARRAY_SIZE(adc_ctl))) != 0)
+		return ret;
+	if ((ret = mt352_write(fe, agc, ARRAY_SIZE(agc))) != 0)
+		return ret;
+	if ((ret = mt352_write(fe, sec_agc, ARRAY_SIZE(sec_agc))) != 0)
+		return ret;
+	if ((ret = mt352_write(fe, unk1, ARRAY_SIZE(unk1))) != 0)
+		return ret;
+	if ((ret = mt352_write(fe, unk2, ARRAY_SIZE(unk2))) != 0)
+		return ret;
+
 	return 0;
 }
 
@@ -558,8 +606,7 @@
 static int m920x_probe(struct usb_interface *intf,
 		       const struct usb_device_id *id)
 {
-	struct dvb_usb_device *d;
-	struct usb_host_interface *alt;
+	struct dvb_usb_device *d = NULL;
 	int ret;
 	struct m920x_inits *rc_init_seq = NULL;
 	int bInterfaceNumber = intf->cur_altsetting->desc.bInterfaceNumber;
@@ -604,23 +651,13 @@
 		 * tvwalkertwin_properties already configured both
 		 * tuners, so there is nothing for us to do here
 		 */
-
-		return -ENODEV;
 	}
 
  found:
-	alt = usb_altnum_to_altsetting(intf, 1);
-	if (alt == NULL) {
-		deb("No alt found!\n");
-		return -ENODEV;
-	}
-
-	ret = usb_set_interface(d->udev, alt->desc.bInterfaceNumber,
-				alt->desc.bAlternateSetting);
-	if (ret < 0)
+	if ((ret = m920x_init_ep(intf)) < 0)
 		return ret;
 
-	if ((ret = m920x_init(d, rc_init_seq)) != 0)
+	if (d && (ret = m920x_init(d, rc_init_seq)) != 0)
 		return ret;
 
 	return ret;
@@ -737,9 +774,9 @@
  *
  * LifeView TV Walker Twin has 1 x M9206, 2 x TDA10046, 2 x TDA8275A
  * TDA10046 #0 is located at i2c address 0x08
- * TDA10046 #1 is located at i2c address 0x0b (presently disabled - not yet working)
+ * TDA10046 #1 is located at i2c address 0x0b
  * TDA8275A #0 is located at i2c address 0x60
- * TDA8275A #1 is located at i2c address 0x61 (presently disabled - not yet working)
+ * TDA8275A #1 is located at i2c address 0x61
  */
 static struct dvb_usb_device_properties tvwalkertwin_properties = {
 	.caps = DVB_USB_IS_AN_I2C_ADAPTER,
@@ -756,7 +793,7 @@
 	.size_of_priv     = sizeof(struct m920x_state),
 
 	.identify_state   = m920x_identify_state,
-	.num_adapters = 1,
+	.num_adapters = 2,
 	.adapter = {{
 		.caps = DVB_USB_ADAP_HAS_PID_FILTER |
 			DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF,
diff --git a/drivers/media/dvb/dvb-usb/m920x.h b/drivers/media/dvb/dvb-usb/m920x.h
index 2c8942d..3753289 100644
--- a/drivers/media/dvb/dvb-usb/m920x.h
+++ b/drivers/media/dvb/dvb-usb/m920x.h
@@ -18,6 +18,7 @@
 #define M9206_FW	0x30
 
 #define M9206_MAX_FILTERS 8
+#define M9206_MAX_ADAPTERS 2
 
 /*
 sequences found in logs:
@@ -60,8 +61,8 @@
 */
 
 struct m920x_state {
-	u16 filters[M9206_MAX_FILTERS];
-	int filtering_enabled;
+	u16 filters[M9206_MAX_ADAPTERS][M9206_MAX_FILTERS];
+	int filtering_enabled[M9206_MAX_ADAPTERS];
 	int rep_count;
 };
 
diff --git a/drivers/media/dvb/dvb-usb/opera1.c b/drivers/media/dvb/dvb-usb/opera1.c
index 518d7ad..d7c0495 100644
--- a/drivers/media/dvb/dvb-usb/opera1.c
+++ b/drivers/media/dvb/dvb-usb/opera1.c
@@ -263,7 +263,7 @@
 {
 	dvb_attach(
 		dvb_pll_attach, adap->fe, 0xc0>>1,
-		&adap->dev->i2c_adap, &dvb_pll_opera1
+		&adap->dev->i2c_adap, DVB_PLL_OPERA1
 	);
 	return 0;
 }
@@ -435,9 +435,9 @@
 {
 	const struct firmware *fw = NULL;
 	u8 *b, *p;
-	int ret = 0, i;
+	int ret = 0, i,fpgasize=40;
 	u8 testval;
-	info("start downloading fpga firmware");
+	info("start downloading fpga firmware %s",filename);
 
 	if ((ret = request_firmware(&fw, filename, &dev->dev)) != 0) {
 		err("did not find the firmware file. (%s) "
@@ -454,17 +454,20 @@
 			/* clear fpga ? */
 			opera1_xilinx_rw(dev, 0xbc, 0xaa, &fpga_command, 1,
 					 OPERA_WRITE_MSG);
-			for (i = 0; p[i] != 0 && i < fw->size;) {
+			for (i = 0; i < fw->size;) {
+				if ( (fw->size - i) <fpgasize){
+				    fpgasize=fw->size-i;
+				}
 				b = (u8 *) p + i;
 				if (opera1_xilinx_rw
-					(dev, OPERA_WRITE_FX2, 0x0, b + 1, b[0],
-						OPERA_WRITE_MSG) != b[0]
+					(dev, OPERA_WRITE_FX2, 0x0, b , fpgasize,
+						OPERA_WRITE_MSG) != fpgasize
 					) {
 					err("error while transferring firmware");
 					ret = -EINVAL;
 					break;
 				}
-				i = i + 1 + b[0];
+				i = i + fpgasize;
 			}
 			/* restart the CPU */
 			if (ret || opera1_xilinx_rw
@@ -534,18 +537,16 @@
 static int opera1_probe(struct usb_interface *intf,
 			const struct usb_device_id *id)
 {
-	struct dvb_usb_device *d;
 	struct usb_device *udev = interface_to_usbdev(intf);
 
 	if (udev->descriptor.idProduct == USB_PID_OPERA1_WARM &&
 		udev->descriptor.idVendor == USB_VID_OPERA1 &&
-		(d == NULL
-			|| opera1_xilinx_load_firmware(udev, "dvb-usb-opera1-fpga.fw") != 0)
-		) {
+		opera1_xilinx_load_firmware(udev, "dvb-usb-opera1-fpga-01.fw") != 0
+	    ) {
 		return -EINVAL;
 	}
 
-	if (dvb_usb_device_init(intf, &opera1_properties, THIS_MODULE, &d) != 0)
+	if (dvb_usb_device_init(intf, &opera1_properties, THIS_MODULE, NULL) != 0)
 		return -EINVAL;
 	return 0;
 }
diff --git a/drivers/media/dvb/dvb-usb/umt-010.c b/drivers/media/dvb/dvb-usb/umt-010.c
index f77b48f..0dcab3d 100644
--- a/drivers/media/dvb/dvb-usb/umt-010.c
+++ b/drivers/media/dvb/dvb-usb/umt-010.c
@@ -65,9 +65,7 @@
 
 static int umt_tuner_attach (struct dvb_usb_adapter *adap)
 {
-	adap->pll_addr = 0x61;
-	adap->pll_desc = &dvb_pll_tua6034;
-	adap->fe->ops.tuner_ops.calc_regs = dvb_usb_tuner_calc_regs;
+	dvb_attach(dvb_pll_attach, adap->fe, 0x61, NULL, DVB_PLL_TUA6034);
 	return 0;
 }
 
@@ -84,8 +82,8 @@
 
 /* do not change the order of the ID table */
 static struct usb_device_id umt_table [] = {
-/* 00 */	{ USB_DEVICE(USB_VID_HANFTEK,		USB_PID_HANFTEK_UMT_010_COLD) },
-/* 01 */	{ USB_DEVICE(USB_VID_HANFTEK,		USB_PID_HANFTEK_UMT_010_WARM) },
+/* 00 */	{ USB_DEVICE(USB_VID_HANFTEK, USB_PID_HANFTEK_UMT_010_COLD) },
+/* 01 */	{ USB_DEVICE(USB_VID_HANFTEK, USB_PID_HANFTEK_UMT_010_WARM) },
 			{ }		/* Terminating entry */
 };
 MODULE_DEVICE_TABLE (usb, umt_table);
diff --git a/drivers/media/dvb/frontends/Makefile b/drivers/media/dvb/frontends/Makefile
index 27f3865..156b062 100644
--- a/drivers/media/dvb/frontends/Makefile
+++ b/drivers/media/dvb/frontends/Makefile
@@ -2,7 +2,7 @@
 # Makefile for the kernel DVB frontend device drivers.
 #
 
-EXTRA_CFLAGS = -Idrivers/media/dvb/dvb-core/
+EXTRA_CFLAGS += -Idrivers/media/dvb/dvb-core/
 
 obj-$(CONFIG_DVB_PLL) += dvb-pll.o
 obj-$(CONFIG_DVB_STV0299) += stv0299.o
diff --git a/drivers/media/dvb/frontends/cx22702.c b/drivers/media/dvb/frontends/cx22702.c
index 335219e..1dc164d 100644
--- a/drivers/media/dvb/frontends/cx22702.c
+++ b/drivers/media/dvb/frontends/cx22702.c
@@ -32,7 +32,6 @@
 #include <linux/slab.h>
 #include <linux/delay.h>
 #include "dvb_frontend.h"
-#include "dvb-pll.h"
 #include "cx22702.h"
 
 
diff --git a/drivers/media/dvb/frontends/cx24123.c b/drivers/media/dvb/frontends/cx24123.c
index 732e94a..0834c06 100644
--- a/drivers/media/dvb/frontends/cx24123.c
+++ b/drivers/media/dvb/frontends/cx24123.c
@@ -917,7 +917,7 @@
 static int cx24123_tune(struct dvb_frontend* fe,
 			struct dvb_frontend_parameters* params,
 			unsigned int mode_flags,
-			int *delay,
+			unsigned int *delay,
 			fe_status_t *status)
 {
 	int retval = 0;
diff --git a/drivers/media/dvb/frontends/dvb-pll.c b/drivers/media/dvb/frontends/dvb-pll.c
index 5f96ffd..0c0b947 100644
--- a/drivers/media/dvb/frontends/dvb-pll.c
+++ b/drivers/media/dvb/frontends/dvb-pll.c
@@ -24,6 +24,23 @@
 
 #include "dvb-pll.h"
 
+struct dvb_pll_desc {
+	char *name;
+	u32  min;
+	u32  max;
+	u32  iffreq;
+	void (*set)(u8 *buf, const struct dvb_frontend_parameters *params);
+	u8   *initdata;
+	u8   *sleepdata;
+	int  count;
+	struct {
+		u32 limit;
+		u32 stepsize;
+		u8  config;
+		u8  cb;
+	} entries[12];
+};
+
 /* ----------------------------------------------------------- */
 /* descriptions                                                */
 
@@ -38,7 +55,13 @@
 	0x50 = AGC Take over point = 103 dBuV */
 static u8 tua603x_agc103[] = { 2, 0x80|0x40|0x18|0x06|0x01, 0x00|0x50 };
 
-struct dvb_pll_desc dvb_pll_thomson_dtt7579 = {
+/*	0x04 = 166.67 kHz divider
+
+	0x80 = AGC Time constant 50ms Iagc = 9 uA
+	0x20 = AGC Take over point = 112 dBuV */
+static u8 tua603x_agc112[] = { 2, 0x80|0x40|0x18|0x04|0x01, 0x80|0x20 };
+
+static struct dvb_pll_desc dvb_pll_thomson_dtt7579 = {
 	.name  = "Thomson dtt7579",
 	.min   = 177000000,
 	.max   = 858000000,
@@ -52,9 +75,8 @@
 		{  999999999, 166667, 0xf4, 0x08 },
 	},
 };
-EXPORT_SYMBOL(dvb_pll_thomson_dtt7579);
 
-struct dvb_pll_desc dvb_pll_thomson_dtt7610 = {
+static struct dvb_pll_desc dvb_pll_thomson_dtt7610 = {
 	.name  = "Thomson dtt7610",
 	.min   =  44000000,
 	.max   = 958000000,
@@ -66,19 +88,19 @@
 		{ 999999999, 62500, 0x8e, 0x3c },
 	},
 };
-EXPORT_SYMBOL(dvb_pll_thomson_dtt7610);
 
-static void thomson_dtt759x_bw(u8 *buf, u32 freq, int bandwidth)
+static void thomson_dtt759x_bw(u8 *buf,
+			       const struct dvb_frontend_parameters *params)
 {
-	if (BANDWIDTH_7_MHZ == bandwidth)
+	if (BANDWIDTH_7_MHZ == params->u.ofdm.bandwidth)
 		buf[3] |= 0x10;
 }
 
-struct dvb_pll_desc dvb_pll_thomson_dtt759x = {
+static struct dvb_pll_desc dvb_pll_thomson_dtt759x = {
 	.name  = "Thomson dtt759x",
 	.min   = 177000000,
 	.max   = 896000000,
-	.setbw = thomson_dtt759x_bw,
+	.set   = thomson_dtt759x_bw,
 	.iffreq= 36166667,
 	.sleepdata = (u8[]){ 2, 0x84, 0x03 },
 	.count = 5,
@@ -90,9 +112,8 @@
 		{  999999999, 166667, 0xfc, 0x08 },
 	},
 };
-EXPORT_SYMBOL(dvb_pll_thomson_dtt759x);
 
-struct dvb_pll_desc dvb_pll_lg_z201 = {
+static struct dvb_pll_desc dvb_pll_lg_z201 = {
 	.name  = "LG z201",
 	.min   = 174000000,
 	.max   = 862000000,
@@ -107,9 +128,8 @@
 		{  999999999, 166667, 0xfc, 0x04 },
 	},
 };
-EXPORT_SYMBOL(dvb_pll_lg_z201);
 
-struct dvb_pll_desc dvb_pll_microtune_4042 = {
+static struct dvb_pll_desc dvb_pll_microtune_4042 = {
 	.name  = "Microtune 4042 FI5",
 	.min   =  57000000,
 	.max   = 858000000,
@@ -121,9 +141,8 @@
 		{ 999999999, 62500, 0x8e, 0x31 },
 	},
 };
-EXPORT_SYMBOL(dvb_pll_microtune_4042);
 
-struct dvb_pll_desc dvb_pll_thomson_dtt761x = {
+static struct dvb_pll_desc dvb_pll_thomson_dtt761x = {
 	/* DTT 7611 7611A 7612 7613 7613A 7614 7615 7615A */
 	.name  = "Thomson dtt761x",
 	.min   =  57000000,
@@ -137,9 +156,8 @@
 		{ 999999999, 62500, 0x8e, 0x3c },
 	},
 };
-EXPORT_SYMBOL(dvb_pll_thomson_dtt761x);
 
-struct dvb_pll_desc dvb_pll_unknown_1 = {
+static struct dvb_pll_desc dvb_pll_unknown_1 = {
 	.name  = "unknown 1", /* used by dntv live dvb-t */
 	.min   = 174000000,
 	.max   = 862000000,
@@ -157,12 +175,11 @@
 		{  999999999, 166667, 0xfc, 0x08 },
 	},
 };
-EXPORT_SYMBOL(dvb_pll_unknown_1);
 
 /* Infineon TUA6010XS
  * used in Thomson Cable Tuner
  */
-struct dvb_pll_desc dvb_pll_tua6010xs = {
+static struct dvb_pll_desc dvb_pll_tua6010xs = {
 	.name  = "Infineon TUA6010XS",
 	.min   =  44250000,
 	.max   = 858000000,
@@ -174,10 +191,9 @@
 		{  999999999, 62500, 0x8e, 0x85 },
 	},
 };
-EXPORT_SYMBOL(dvb_pll_tua6010xs);
 
 /* Panasonic env57h1xd5 (some Philips PLL ?) */
-struct dvb_pll_desc dvb_pll_env57h1xd5 = {
+static struct dvb_pll_desc dvb_pll_env57h1xd5 = {
 	.name  = "Panasonic ENV57H1XD5",
 	.min   =  44250000,
 	.max   = 858000000,
@@ -190,23 +206,23 @@
 		{  999999999, 166667, 0xc2, 0xa4 },
 	},
 };
-EXPORT_SYMBOL(dvb_pll_env57h1xd5);
 
 /* Philips TDA6650/TDA6651
  * used in Panasonic ENV77H11D5
  */
-static void tda665x_bw(u8 *buf, u32 freq, int bandwidth)
+static void tda665x_bw(u8 *buf, const struct dvb_frontend_parameters *params)
 {
-	if (bandwidth == BANDWIDTH_8_MHZ)
+	if (params->u.ofdm.bandwidth == BANDWIDTH_8_MHZ)
 		buf[3] |= 0x08;
 }
 
-struct dvb_pll_desc dvb_pll_tda665x = {
+static struct dvb_pll_desc dvb_pll_tda665x = {
 	.name  = "Philips TDA6650/TDA6651",
 	.min   =  44250000,
 	.max   = 858000000,
-	.setbw = tda665x_bw,
+	.set   = tda665x_bw,
 	.iffreq= 36166667,
+	.initdata = (u8[]){ 4, 0x0b, 0xf5, 0x85, 0xab },
 	.count = 12,
 	.entries = {
 		{   93834000, 166667, 0xca, 0x61 /* 011 0 0 0  01 */ },
@@ -223,36 +239,34 @@
 		{  861000000, 166667, 0xca, 0xe4 /* 111 0 0 1  00 */ },
 	}
 };
-EXPORT_SYMBOL(dvb_pll_tda665x);
 
 /* Infineon TUA6034
  * used in LG TDTP E102P
  */
-static void tua6034_bw(u8 *buf, u32 freq, int bandwidth)
+static void tua6034_bw(u8 *buf, const struct dvb_frontend_parameters *params)
 {
-	if (BANDWIDTH_7_MHZ != bandwidth)
+	if (BANDWIDTH_7_MHZ != params->u.ofdm.bandwidth)
 		buf[3] |= 0x08;
 }
 
-struct dvb_pll_desc dvb_pll_tua6034 = {
+static struct dvb_pll_desc dvb_pll_tua6034 = {
 	.name  = "Infineon TUA6034",
 	.min   =  44250000,
 	.max   = 858000000,
 	.iffreq= 36166667,
 	.count = 3,
-	.setbw = tua6034_bw,
+	.set   = tua6034_bw,
 	.entries = {
 		{  174500000, 62500, 0xce, 0x01 },
 		{  230000000, 62500, 0xce, 0x02 },
 		{  999999999, 62500, 0xce, 0x04 },
 	},
 };
-EXPORT_SYMBOL(dvb_pll_tua6034);
 
 /* Infineon TUA6034
  * used in LG TDVS-H061F, LG TDVS-H062F and LG TDVS-H064F
  */
-struct dvb_pll_desc dvb_pll_lg_tdvs_h06xf = {
+static struct dvb_pll_desc dvb_pll_lg_tdvs_h06xf = {
 	.name  = "LG TDVS-H06xF",
 	.min   =  54000000,
 	.max   = 863000000,
@@ -265,23 +279,25 @@
 		{  999999999, 62500, 0xce, 0x04 },
 	},
 };
-EXPORT_SYMBOL(dvb_pll_lg_tdvs_h06xf);
 
 /* Philips FMD1216ME
  * used in Medion Hybrid PCMCIA card and USB Box
  */
-static void fmd1216me_bw(u8 *buf, u32 freq, int bandwidth)
+static void fmd1216me_bw(u8 *buf, const struct dvb_frontend_parameters *params)
 {
-	if (bandwidth == BANDWIDTH_8_MHZ && freq >= 158870000)
+	if (params->u.ofdm.bandwidth == BANDWIDTH_8_MHZ &&
+	    params->frequency >= 158870000)
 		buf[3] |= 0x08;
 }
 
-struct dvb_pll_desc dvb_pll_fmd1216me = {
+static struct dvb_pll_desc dvb_pll_fmd1216me = {
 	.name = "Philips FMD1216ME",
 	.min = 50870000,
 	.max = 858000000,
 	.iffreq= 36125000,
-	.setbw = fmd1216me_bw,
+	.set   = fmd1216me_bw,
+	.initdata = tua603x_agc112,
+	.sleepdata = (u8[]){ 4, 0x9c, 0x60, 0x85, 0x54 },
 	.count = 7,
 	.entries = {
 		{ 143870000, 166667, 0xbc, 0x41 },
@@ -293,23 +309,22 @@
 		{ 999999999, 166667, 0xfc, 0x44 },
 	}
 };
-EXPORT_SYMBOL(dvb_pll_fmd1216me);
 
 /* ALPS TDED4
  * used in Nebula-Cards and USB boxes
  */
-static void tded4_bw(u8 *buf, u32 freq, int bandwidth)
+static void tded4_bw(u8 *buf, const struct dvb_frontend_parameters *params)
 {
-	if (bandwidth == BANDWIDTH_8_MHZ)
+	if (params->u.ofdm.bandwidth == BANDWIDTH_8_MHZ)
 		buf[3] |= 0x04;
 }
 
-struct dvb_pll_desc dvb_pll_tded4 = {
+static struct dvb_pll_desc dvb_pll_tded4 = {
 	.name = "ALPS TDED4",
 	.min = 47000000,
 	.max = 863000000,
 	.iffreq= 36166667,
-	.setbw = tded4_bw,
+	.set   = tded4_bw,
 	.count = 4,
 	.entries = {
 		{ 153000000, 166667, 0x85, 0x01 },
@@ -318,12 +333,11 @@
 		{ 999999999, 166667, 0x85, 0x88 },
 	}
 };
-EXPORT_SYMBOL(dvb_pll_tded4);
 
 /* ALPS TDHU2
  * used in AverTVHD MCE A180
  */
-struct dvb_pll_desc dvb_pll_tdhu2 = {
+static struct dvb_pll_desc dvb_pll_tdhu2 = {
 	.name = "ALPS TDHU2",
 	.min = 54000000,
 	.max = 864000000,
@@ -336,16 +350,29 @@
 		{ 999999999, 62500, 0x85, 0x88 },
 	}
 };
-EXPORT_SYMBOL(dvb_pll_tdhu2);
 
 /* Philips TUV1236D
  * used in ATI HDTV Wonder
  */
-struct dvb_pll_desc dvb_pll_tuv1236d = {
+static void tuv1236d_rf(u8 *buf, const struct dvb_frontend_parameters *params)
+{
+	switch (params->u.vsb.modulation) {
+		case QAM_64:
+		case QAM_256:
+			buf[3] |= 0x08;
+			break;
+		case VSB_8:
+		default:
+			buf[3] &= ~0x08;
+	}
+}
+
+static struct dvb_pll_desc dvb_pll_tuv1236d = {
 	.name  = "Philips TUV1236D",
 	.min   =  54000000,
 	.max   = 864000000,
 	.iffreq= 44000000,
+	.set   = tuv1236d_rf,
 	.count = 3,
 	.entries = {
 		{ 157250000, 62500, 0xc6, 0x41 },
@@ -353,12 +380,11 @@
 		{ 999999999, 62500, 0xc6, 0x44 },
 	},
 };
-EXPORT_SYMBOL(dvb_pll_tuv1236d);
 
 /* Samsung TBMV30111IN / TBMV30712IN1
  * used in Air2PC ATSC - 2nd generation (nxt2002)
  */
-struct dvb_pll_desc dvb_pll_samsung_tbmv = {
+static struct dvb_pll_desc dvb_pll_samsung_tbmv = {
 	.name = "Samsung TBMV30111IN / TBMV30712IN1",
 	.min = 54000000,
 	.max = 860000000,
@@ -373,12 +399,11 @@
 		{ 999999999, 166667, 0xfc, 0x02 },
 	}
 };
-EXPORT_SYMBOL(dvb_pll_samsung_tbmv);
 
 /*
  * Philips SD1878 Tuner.
  */
-struct dvb_pll_desc dvb_pll_philips_sd1878_tda8261 = {
+static struct dvb_pll_desc dvb_pll_philips_sd1878_tda8261 = {
 	.name  = "Philips SD1878",
 	.min   =  950000,
 	.max   = 2150000,
@@ -391,19 +416,18 @@
 		{ 2150000, 500, 0xc4, 0xc0},
 	},
 };
-EXPORT_SYMBOL(dvb_pll_philips_sd1878_tda8261);
 
 /*
  * Philips TD1316 Tuner.
  */
-static void td1316_bw(u8 *buf, u32 freq, int bandwidth)
+static void td1316_bw(u8 *buf, const struct dvb_frontend_parameters *params)
 {
 	u8 band;
 
 	/* determine band */
-	if (freq < 161000000)
+	if (params->frequency < 161000000)
 		band = 1;
-	else if (freq < 444000000)
+	else if (params->frequency < 444000000)
 		band = 2;
 	else
 		band = 4;
@@ -411,16 +435,16 @@
 	buf[3] |= band;
 
 	/* setup PLL filter */
-	if (bandwidth == BANDWIDTH_8_MHZ)
+	if (params->u.ofdm.bandwidth == BANDWIDTH_8_MHZ)
 		buf[3] |= 1 << 3;
 }
 
-struct dvb_pll_desc dvb_pll_philips_td1316 = {
+static struct dvb_pll_desc dvb_pll_philips_td1316 = {
 	.name  = "Philips TD1316",
 	.min   =  87000000,
 	.max   = 895000000,
 	.iffreq= 36166667,
-	.setbw = td1316_bw,
+	.set   = td1316_bw,
 	.count = 9,
 	.entries = {
 		{  93834000, 166667, 0xca, 0x60},
@@ -434,10 +458,9 @@
 		{ 858834000, 166667, 0xca, 0xe0},
 	},
 };
-EXPORT_SYMBOL(dvb_pll_philips_td1316);
 
 /* FE6600 used on DViCO Hybrid */
-struct dvb_pll_desc dvb_pll_thomson_fe6600 = {
+static struct dvb_pll_desc dvb_pll_thomson_fe6600 = {
 	.name = "Thomson FE6600",
 	.min =  44250000,
 	.max = 858000000,
@@ -450,19 +473,19 @@
 		{ 999999999, 166667, 0xf4, 0x18 },
 	}
 };
-EXPORT_SYMBOL(dvb_pll_thomson_fe6600);
-static void opera1_bw(u8 *buf, u32 freq, int bandwidth)
+
+static void opera1_bw(u8 *buf, const struct dvb_frontend_parameters *params)
 {
-	if (bandwidth == BANDWIDTH_8_MHZ)
+	if (params->u.ofdm.bandwidth == BANDWIDTH_8_MHZ)
 		buf[2] |= 0x08;
 }
 
-struct dvb_pll_desc dvb_pll_opera1 = {
+static struct dvb_pll_desc dvb_pll_opera1 = {
 	.name  = "Opera Tuner",
 	.min   =  900000,
 	.max   = 2250000,
 	.iffreq= 0,
-	.setbw = opera1_bw,
+	.set   = opera1_bw,
 	.count = 8,
 	.entries = {
 		{ 1064000, 500, 0xe5, 0xc6 },
@@ -475,7 +498,54 @@
 		{ 2250000, 500, 0xe5, 0xc4 },
 	}
 };
-EXPORT_SYMBOL(dvb_pll_opera1);
+
+/* Philips FCV1236D
+ */
+struct dvb_pll_desc dvb_pll_fcv1236d = {
+/* Bit_0: RF Input select
+ * Bit_1: 0=digital, 1=analog
+ */
+	.name  = "Philips FCV1236D",
+	.min   =  53000000,
+	.max   = 803000000,
+	.iffreq= 44000000,
+	.count = 3,
+	.entries = {
+		{ 159000000, 62500, 0x8e, 0xa0 },
+		{ 453000000, 62500, 0x8e, 0x90 },
+		{ 999999999, 62500, 0x8e, 0x30 },
+	},
+};
+
+/* ----------------------------------------------------------- */
+
+static struct dvb_pll_desc *pll_list[] = {
+	[DVB_PLL_UNDEFINED]              = NULL,
+	[DVB_PLL_THOMSON_DTT7579]        = &dvb_pll_thomson_dtt7579,
+	[DVB_PLL_THOMSON_DTT759X]        = &dvb_pll_thomson_dtt759x,
+	[DVB_PLL_THOMSON_DTT7610]        = &dvb_pll_thomson_dtt7610,
+	[DVB_PLL_LG_Z201]                = &dvb_pll_lg_z201,
+	[DVB_PLL_MICROTUNE_4042]         = &dvb_pll_microtune_4042,
+	[DVB_PLL_THOMSON_DTT761X]        = &dvb_pll_thomson_dtt761x,
+	[DVB_PLL_UNKNOWN_1]              = &dvb_pll_unknown_1,
+	[DVB_PLL_TUA6010XS]              = &dvb_pll_tua6010xs,
+	[DVB_PLL_ENV57H1XD5]             = &dvb_pll_env57h1xd5,
+	[DVB_PLL_TUA6034]                = &dvb_pll_tua6034,
+	[DVB_PLL_LG_TDVS_H06XF]          = &dvb_pll_lg_tdvs_h06xf,
+	[DVB_PLL_TDA665X]                = &dvb_pll_tda665x,
+	[DVB_PLL_FMD1216ME]              = &dvb_pll_fmd1216me,
+	[DVB_PLL_TDED4]                  = &dvb_pll_tded4,
+	[DVB_PLL_TUV1236D]               = &dvb_pll_tuv1236d,
+	[DVB_PLL_TDHU2]                  = &dvb_pll_tdhu2,
+	[DVB_PLL_SAMSUNG_TBMV]           = &dvb_pll_samsung_tbmv,
+	[DVB_PLL_PHILIPS_SD1878_TDA8261] = &dvb_pll_philips_sd1878_tda8261,
+	[DVB_PLL_PHILIPS_TD1316]         = &dvb_pll_philips_td1316,
+	[DVB_PLL_THOMSON_FE6600]         = &dvb_pll_thomson_fe6600,
+	[DVB_PLL_OPERA1]                 = &dvb_pll_opera1,
+	[DVB_PLL_FCV1236D]               = &dvb_pll_fcv1236d,
+};
+
+/* ----------------------------------------------------------- */
 
 struct dvb_pll_priv {
 	/* i2c details */
@@ -497,35 +567,37 @@
 module_param(debug, int, 0644);
 MODULE_PARM_DESC(debug, "enable verbose debug messages");
 
-int dvb_pll_configure(struct dvb_pll_desc *desc, u8 *buf,
-		      u32 freq, int bandwidth)
+static int dvb_pll_configure(struct dvb_pll_desc *desc, u8 *buf,
+			     const struct dvb_frontend_parameters *params)
 {
 	u32 div;
 	int i;
 
-	if (freq != 0 && (freq < desc->min || freq > desc->max))
-	    return -EINVAL;
+	if (params->frequency != 0 && (params->frequency < desc->min ||
+				       params->frequency > desc->max))
+		return -EINVAL;
 
 	for (i = 0; i < desc->count; i++) {
-		if (freq > desc->entries[i].limit)
+		if (params->frequency > desc->entries[i].limit)
 			continue;
 		break;
 	}
+
 	if (debug)
-		printk("pll: %s: freq=%d bw=%d | i=%d/%d\n",
-		       desc->name, freq, bandwidth, i, desc->count);
+		printk("pll: %s: freq=%d | i=%d/%d\n", desc->name,
+		       params->frequency, i, desc->count);
 	if (i == desc->count)
 		return -EINVAL;
 
-	div = (freq + desc->iffreq + desc->entries[i].stepsize/2) /
-	      desc->entries[i].stepsize;
+	div = (params->frequency + desc->iffreq +
+	       desc->entries[i].stepsize/2) / desc->entries[i].stepsize;
 	buf[0] = div >> 8;
 	buf[1] = div & 0xff;
 	buf[2] = desc->entries[i].config;
 	buf[3] = desc->entries[i].cb;
 
-	if (desc->setbw)
-		desc->setbw(buf, freq, bandwidth);
+	if (desc->set)
+		desc->set(buf, params);
 
 	if (debug)
 		printk("pll: %s: div=%d | buf=0x%02x,0x%02x,0x%02x,0x%02x\n",
@@ -534,7 +606,6 @@
 	// calculate the frequency we set it to
 	return (div * desc->entries[i].stepsize) - desc->iffreq;
 }
-EXPORT_SYMBOL(dvb_pll_configure);
 
 static int dvb_pll_release(struct dvb_frontend *fe)
 {
@@ -578,18 +649,12 @@
 		{ .addr = priv->pll_i2c_address, .flags = 0,
 		  .buf = buf, .len = sizeof(buf) };
 	int result;
-	u32 bandwidth = 0, frequency = 0;
+	u32 frequency = 0;
 
 	if (priv->i2c == NULL)
 		return -EINVAL;
 
-	// DVBT bandwidth only just now
-	if (fe->ops.info.type == FE_OFDM) {
-		bandwidth = params->u.ofdm.bandwidth;
-	}
-
-	if ((result = dvb_pll_configure(priv->pll_desc, buf,
-					params->frequency, bandwidth)) < 0)
+	if ((result = dvb_pll_configure(priv->pll_desc, buf, params)) < 0)
 		return result;
 	else
 		frequency = result;
@@ -601,7 +666,7 @@
 	}
 
 	priv->frequency = frequency;
-	priv->bandwidth = bandwidth;
+	priv->bandwidth = (fe->ops.info.type == FE_OFDM) ? params->u.ofdm.bandwidth : 0;
 
 	return 0;
 }
@@ -612,18 +677,12 @@
 {
 	struct dvb_pll_priv *priv = fe->tuner_priv;
 	int result;
-	u32 bandwidth = 0, frequency = 0;
+	u32 frequency = 0;
 
 	if (buf_len < 5)
 		return -EINVAL;
 
-	// DVBT bandwidth only just now
-	if (fe->ops.info.type == FE_OFDM) {
-		bandwidth = params->u.ofdm.bandwidth;
-	}
-
-	if ((result = dvb_pll_configure(priv->pll_desc, buf+1,
-					params->frequency, bandwidth)) < 0)
+	if ((result = dvb_pll_configure(priv->pll_desc, buf+1, params)) < 0)
 		return result;
 	else
 		frequency = result;
@@ -631,7 +690,7 @@
 	buf[0] = priv->pll_i2c_address;
 
 	priv->frequency = frequency;
-	priv->bandwidth = bandwidth;
+	priv->bandwidth = (fe->ops.info.type == FE_OFDM) ? params->u.ofdm.bandwidth : 0;
 
 	return 5;
 }
@@ -687,13 +746,18 @@
 
 struct dvb_frontend *dvb_pll_attach(struct dvb_frontend *fe, int pll_addr,
 				    struct i2c_adapter *i2c,
-				    struct dvb_pll_desc *desc)
+				    unsigned int pll_desc_id)
 {
 	u8 b1 [] = { 0 };
 	struct i2c_msg msg = { .addr = pll_addr, .flags = I2C_M_RD,
 			       .buf = b1, .len = 1 };
 	struct dvb_pll_priv *priv = NULL;
 	int ret;
+	struct dvb_pll_desc *desc;
+
+	BUG_ON(pll_desc_id < 1 || pll_desc_id >= ARRAY_SIZE(pll_list));
+
+	desc = pll_list[pll_desc_id];
 
 	if (i2c != NULL) {
 		if (fe->ops.i2c_gate_ctrl)
diff --git a/drivers/media/dvb/frontends/dvb-pll.h b/drivers/media/dvb/frontends/dvb-pll.h
index 5209f46..e93a810 100644
--- a/drivers/media/dvb/frontends/dvb-pll.h
+++ b/drivers/media/dvb/frontends/dvb-pll.h
@@ -8,50 +8,29 @@
 #include <linux/i2c.h>
 #include "dvb_frontend.h"
 
-struct dvb_pll_desc {
-	char *name;
-	u32  min;
-	u32  max;
-	u32  iffreq;
-	void (*setbw)(u8 *buf, u32 freq, int bandwidth);
-	u8   *initdata;
-	u8   *sleepdata;
-	int  count;
-	struct {
-		u32 limit;
-		u32 stepsize;
-		u8  config;
-		u8  cb;
-	} entries[12];
-};
-
-extern struct dvb_pll_desc dvb_pll_thomson_dtt7579;
-extern struct dvb_pll_desc dvb_pll_thomson_dtt759x;
-extern struct dvb_pll_desc dvb_pll_thomson_dtt7610;
-extern struct dvb_pll_desc dvb_pll_lg_z201;
-extern struct dvb_pll_desc dvb_pll_microtune_4042;
-extern struct dvb_pll_desc dvb_pll_thomson_dtt761x;
-extern struct dvb_pll_desc dvb_pll_unknown_1;
-
-extern struct dvb_pll_desc dvb_pll_tua6010xs;
-extern struct dvb_pll_desc dvb_pll_env57h1xd5;
-extern struct dvb_pll_desc dvb_pll_tua6034;
-extern struct dvb_pll_desc dvb_pll_lg_tdvs_h06xf;
-extern struct dvb_pll_desc dvb_pll_tda665x;
-extern struct dvb_pll_desc dvb_pll_fmd1216me;
-extern struct dvb_pll_desc dvb_pll_tded4;
-
-extern struct dvb_pll_desc dvb_pll_tuv1236d;
-extern struct dvb_pll_desc dvb_pll_tdhu2;
-extern struct dvb_pll_desc dvb_pll_samsung_tbmv;
-extern struct dvb_pll_desc dvb_pll_philips_sd1878_tda8261;
-extern struct dvb_pll_desc dvb_pll_philips_td1316;
-
-extern struct dvb_pll_desc dvb_pll_thomson_fe6600;
-extern struct dvb_pll_desc dvb_pll_opera1;
-
-extern int dvb_pll_configure(struct dvb_pll_desc *desc, u8 *buf,
-			     u32 freq, int bandwidth);
+#define DVB_PLL_UNDEFINED               0
+#define DVB_PLL_THOMSON_DTT7579         1
+#define DVB_PLL_THOMSON_DTT759X         2
+#define DVB_PLL_THOMSON_DTT7610         3
+#define DVB_PLL_LG_Z201                 4
+#define DVB_PLL_MICROTUNE_4042          5
+#define DVB_PLL_THOMSON_DTT761X         6
+#define DVB_PLL_UNKNOWN_1               7
+#define DVB_PLL_TUA6010XS               8
+#define DVB_PLL_ENV57H1XD5              9
+#define DVB_PLL_TUA6034                10
+#define DVB_PLL_LG_TDVS_H06XF          11
+#define DVB_PLL_TDA665X                12
+#define DVB_PLL_FMD1216ME              13
+#define DVB_PLL_TDED4                  14
+#define DVB_PLL_TUV1236D               15
+#define DVB_PLL_TDHU2                  16
+#define DVB_PLL_SAMSUNG_TBMV           17
+#define DVB_PLL_PHILIPS_SD1878_TDA8261 18
+#define DVB_PLL_PHILIPS_TD1316         19
+#define DVB_PLL_THOMSON_FE6600         20
+#define DVB_PLL_OPERA1                 21
+#define DVB_PLL_FCV1236D               22
 
 /**
  * Attach a dvb-pll to the supplied frontend structure.
@@ -59,19 +38,19 @@
  * @param fe Frontend to attach to.
  * @param pll_addr i2c address of the PLL (if used).
  * @param i2c i2c adapter to use (set to NULL if not used).
- * @param desc dvb_pll_desc to use.
+ * @param pll_desc_id dvb_pll_desc to use.
  * @return Frontend pointer on success, NULL on failure
  */
 #if defined(CONFIG_DVB_PLL) || (defined(CONFIG_DVB_PLL_MODULE) && defined(MODULE))
 extern struct dvb_frontend *dvb_pll_attach(struct dvb_frontend *fe,
 					   int pll_addr,
 					   struct i2c_adapter *i2c,
-					   struct dvb_pll_desc *desc);
+					   unsigned int pll_desc_id);
 #else
 static inline struct dvb_frontend *dvb_pll_attach(struct dvb_frontend *fe,
 					   int pll_addr,
 					   struct i2c_adapter *i2c,
-					   struct dvb_pll_desc *desc)
+					   unsigned int pll_desc_id)
 {
 	printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __FUNCTION__);
 	return NULL;
diff --git a/drivers/media/dvb/frontends/nxt200x.c b/drivers/media/dvb/frontends/nxt200x.c
index b809f83..ddc8489 100644
--- a/drivers/media/dvb/frontends/nxt200x.c
+++ b/drivers/media/dvb/frontends/nxt200x.c
@@ -49,7 +49,6 @@
 #include <linux/string.h>
 
 #include "dvb_frontend.h"
-#include "dvb-pll.h"
 #include "nxt200x.h"
 
 struct nxt200x_state {
@@ -546,11 +545,6 @@
 		nxt200x_writebytes(state, 0x17, buf, 1);
 	}
 
-	/* get tuning information */
-	if (fe->ops.tuner_ops.calc_regs) {
-		fe->ops.tuner_ops.calc_regs(fe, p, buf, 5);
-	}
-
 	/* set additional params */
 	switch (p->u.vsb.modulation) {
 		case QAM_64:
@@ -559,27 +553,24 @@
 			/* This is just a guess since I am unable to test it */
 			if (state->config->set_ts_params)
 				state->config->set_ts_params(fe, 1);
-
-			/* set input */
-			if (state->config->set_pll_input)
-				state->config->set_pll_input(buf+1, 1);
 			break;
 		case VSB_8:
 			/* Set non-punctured clock for VSB */
 			if (state->config->set_ts_params)
 				state->config->set_ts_params(fe, 0);
-
-			/* set input */
-			if (state->config->set_pll_input)
-				state->config->set_pll_input(buf+1, 0);
 			break;
 		default:
 			return -EINVAL;
 			break;
 	}
 
-	/* write frequency information */
-	nxt200x_writetuner(state, buf);
+	if (fe->ops.tuner_ops.calc_regs) {
+		/* get tuning information */
+		fe->ops.tuner_ops.calc_regs(fe, p, buf, 5);
+
+		/* write frequency information */
+		nxt200x_writetuner(state, buf);
+	}
 
 	/* reset the agc now that tuning has been completed */
 	nxt200x_agc_reset(state);
diff --git a/drivers/media/dvb/frontends/nxt200x.h b/drivers/media/dvb/frontends/nxt200x.h
index 28bc559..bb0ef58 100644
--- a/drivers/media/dvb/frontends/nxt200x.h
+++ b/drivers/media/dvb/frontends/nxt200x.h
@@ -38,9 +38,6 @@
 	/* the demodulator's i2c address */
 	u8 demod_address;
 
-	/* used to set pll input */
-	int (*set_pll_input)(u8* buf, int input);
-
 	/* need to set device param for start_dma */
 	int (*set_ts_params)(struct dvb_frontend* fe, int is_punctured);
 };
diff --git a/drivers/media/dvb/frontends/or51132.c b/drivers/media/dvb/frontends/or51132.c
index 4e0aca7..3cc8b44 100644
--- a/drivers/media/dvb/frontends/or51132.c
+++ b/drivers/media/dvb/frontends/or51132.c
@@ -45,7 +45,6 @@
 
 #include "dvb_math.h"
 #include "dvb_frontend.h"
-#include "dvb-pll.h"
 #include "or51132.h"
 
 static int debug;
diff --git a/drivers/media/dvb/frontends/or51211.c b/drivers/media/dvb/frontends/or51211.c
index 048d7cf..f46d5a4 100644
--- a/drivers/media/dvb/frontends/or51211.c
+++ b/drivers/media/dvb/frontends/or51211.c
@@ -223,38 +223,13 @@
 				  struct dvb_frontend_parameters *param)
 {
 	struct or51211_state* state = fe->demodulator_priv;
-	u32 freq = 0;
-	u16 tunerfreq = 0;
-	u8 buf[4];
 
 	/* Change only if we are actually changing the channel */
 	if (state->current_frequency != param->frequency) {
-		freq = 44000 + (param->frequency/1000);
-		tunerfreq = freq * 16/1000;
-
-		dprintk("set_parameters frequency = %d (tunerfreq = %d)\n",
-			param->frequency,tunerfreq);
-
-		buf[0] = (tunerfreq >> 8) & 0x7F;
-		buf[1] = (tunerfreq & 0xFF);
-		buf[2] = 0x8E;
-
-		if (param->frequency < 157250000) {
-			buf[3] = 0xA0;
-			dprintk("set_parameters VHF low range\n");
-		} else if (param->frequency < 454000000) {
-			buf[3] = 0x90;
-			dprintk("set_parameters VHF high range\n");
-		} else {
-			buf[3] = 0x30;
-			dprintk("set_parameters UHF range\n");
+		if (fe->ops.tuner_ops.set_params) {
+			fe->ops.tuner_ops.set_params(fe, param);
+			if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 0);
 		}
-		dprintk("set_parameters tuner bytes: 0x%02x 0x%02x "
-			"0x%02x 0x%02x\n",buf[0],buf[1],buf[2],buf[3]);
-
-		if (i2c_writebytes(state,0xC2>>1,buf,4))
-			printk(KERN_WARNING "or51211:set_parameters error "
-			       "writing to tuner\n");
 
 		/* Set to ATSC mode */
 		or51211_setmode(fe,0);
diff --git a/drivers/media/dvb/frontends/stv0299.c b/drivers/media/dvb/frontends/stv0299.c
index 18768d2..6c60730 100644
--- a/drivers/media/dvb/frontends/stv0299.c
+++ b/drivers/media/dvb/frontends/stv0299.c
@@ -249,7 +249,7 @@
 	dprintk ("%s\n", __FUNCTION__);
 
 	stv0299_readregs (state, 0x1f, sfr, 3);
-	stv0299_readregs (state, 0x1a, &rtf, 1);
+	stv0299_readregs (state, 0x1a, (u8 *)&rtf, 1);
 
 	srate = (sfr[0] << 8) | sfr[1];
 	srate *= Mclk;
diff --git a/drivers/media/dvb/frontends/tda10023.c b/drivers/media/dvb/frontends/tda10023.c
index da796e7..4bb06f9 100644
--- a/drivers/media/dvb/frontends/tda10023.c
+++ b/drivers/media/dvb/frontends/tda10023.c
@@ -478,7 +478,7 @@
 	state->i2c = i2c;
 	memcpy(&state->frontend.ops, &tda10023_ops, sizeof(struct dvb_frontend_ops));
 	state->pwm = pwm;
-	for (i=0; i < sizeof(tda10023_inittab)/sizeof(*tda10023_inittab);i+=3) {
+	for (i=0; i < ARRAY_SIZE(tda10023_inittab);i+=3) {
 		if (tda10023_inittab[i] == 0x00) {
 			state->reg0 = tda10023_inittab[i+2];
 			break;
diff --git a/drivers/media/dvb/pluto2/Makefile b/drivers/media/dvb/pluto2/Makefile
index ce6a9aa..7ac1287 100644
--- a/drivers/media/dvb/pluto2/Makefile
+++ b/drivers/media/dvb/pluto2/Makefile
@@ -1,3 +1,3 @@
 obj-$(CONFIG_DVB_PLUTO2) += pluto2.o
 
-EXTRA_CFLAGS = -Idrivers/media/dvb/dvb-core/ -Idrivers/media/dvb/frontends/
+EXTRA_CFLAGS += -Idrivers/media/dvb/dvb-core/ -Idrivers/media/dvb/frontends/
diff --git a/drivers/media/dvb/ttpci/Kconfig b/drivers/media/dvb/ttpci/Kconfig
index 7751628..6d53289 100644
--- a/drivers/media/dvb/ttpci/Kconfig
+++ b/drivers/media/dvb/ttpci/Kconfig
@@ -108,7 +108,7 @@
 	tristate "Budget cards with analog video inputs"
 	depends on DVB_CORE && PCI && I2C && VIDEO_V4L1
 	select VIDEO_SAA7146_VV
-	select DVB_PLL
+	select DVB_PLL if !DVB_FE_CUSTOMISE
 	select DVB_STV0299 if !DVB_FE_CUSTOMISE
 	select DVB_TDA1004X if !DVB_FE_CUSTOMISE
 	select DVB_TDA10021 if !DVB_FE_CUSTOMISE
diff --git a/drivers/media/dvb/ttpci/Makefile b/drivers/media/dvb/ttpci/Makefile
index aa85ecd..2c11452 100644
--- a/drivers/media/dvb/ttpci/Makefile
+++ b/drivers/media/dvb/ttpci/Makefile
@@ -11,7 +11,7 @@
 obj-$(CONFIG_DVB_BUDGET_PATCH) += budget-core.o budget-patch.o ttpci-eeprom.o
 obj-$(CONFIG_DVB_AV7110) += dvb-ttpci.o ttpci-eeprom.o
 
-EXTRA_CFLAGS = -Idrivers/media/dvb/dvb-core/ -Idrivers/media/dvb/frontends/
+EXTRA_CFLAGS += -Idrivers/media/dvb/dvb-core/ -Idrivers/media/dvb/frontends/
 
 hostprogs-y	:= fdump
 
diff --git a/drivers/media/dvb/ttpci/av7110.c b/drivers/media/dvb/ttpci/av7110.c
index ef1108c..2cee9e3 100644
--- a/drivers/media/dvb/ttpci/av7110.c
+++ b/drivers/media/dvb/ttpci/av7110.c
@@ -137,6 +137,15 @@
 	if (ret < 0)
 		printk("dvb-ttpci:cannot set internal volume to maximum:%d\n",ret);
 
+	ret = av7110_fw_cmd(av7110, COMTYPE_ENCODER, SetMonitorType,
+			    1, (u16) av7110->display_ar);
+	if (ret < 0)
+		printk("dvb-ttpci: unable to set aspect ratio\n");
+	ret = av7110_fw_cmd(av7110, COMTYPE_ENCODER, SetPanScanType,
+			    1, av7110->display_panscan);
+	if (ret < 0)
+		printk("dvb-ttpci: unable to set pan scan\n");
+
 	ret = av7110_fw_cmd(av7110, COMTYPE_ENCODER, SetWSSConfig, 2, 2, wss_cfg_4_3);
 	if (ret < 0)
 		printk("dvb-ttpci: unable to configure 4:3 wss\n");
@@ -2639,12 +2648,12 @@
 	av7110->mixer.volume_left  = volume;
 	av7110->mixer.volume_right = volume;
 
-	init_av7110_av(av7110);
-
 	ret = av7110_register(av7110);
 	if (ret < 0)
 		goto err_arm_thread_stop_10;
 
+	init_av7110_av(av7110);
+
 	/* special case DVB-C: these cards have an analog tuner
 	   plus need some special handling, so we have separate
 	   saa7146_ext_vv data for these... */
diff --git a/drivers/media/dvb/ttpci/av7110.h b/drivers/media/dvb/ttpci/av7110.h
index 115002b..0cb4395 100644
--- a/drivers/media/dvb/ttpci/av7110.h
+++ b/drivers/media/dvb/ttpci/av7110.h
@@ -194,6 +194,7 @@
 
 	int			video_blank;
 	struct video_status	videostate;
+	u16			display_panscan;
 	int			display_ar;
 	int			trickmode;
 #define TRICK_NONE   0
diff --git a/drivers/media/dvb/ttpci/av7110_av.c b/drivers/media/dvb/ttpci/av7110_av.c
index 58678c0..d75e7e4 100644
--- a/drivers/media/dvb/ttpci/av7110_av.c
+++ b/drivers/media/dvb/ttpci/av7110_av.c
@@ -391,7 +391,7 @@
  ****************************************************************************/
 
 static inline long aux_ring_buffer_write(struct dvb_ringbuffer *rbuf,
-					 const char *buf, unsigned long count)
+					 const u8 *buf, unsigned long count)
 {
 	unsigned long todo = count;
 	int free;
@@ -436,7 +436,7 @@
 #define FREE_COND (dvb_ringbuffer_free(&av7110->avout) >= 20 * 1024 && \
 		   dvb_ringbuffer_free(&av7110->aout) >= 20 * 1024)
 
-static ssize_t dvb_play(struct av7110 *av7110, const u8 __user *buf,
+static ssize_t dvb_play(struct av7110 *av7110, const char __user *buf,
 			unsigned long count, int nonblock, int type)
 {
 	unsigned long todo = count, n;
@@ -499,7 +499,7 @@
 	return count - todo;
 }
 
-static ssize_t dvb_aplay(struct av7110 *av7110, const u8 __user *buf,
+static ssize_t dvb_aplay(struct av7110 *av7110, const char __user *buf,
 			 unsigned long count, int nonblock, int type)
 {
 	unsigned long todo = count, n;
@@ -959,7 +959,7 @@
 
 #define MIN_IFRAME 400000
 
-static int play_iframe(struct av7110 *av7110, u8 __user *buf, unsigned int len, int nonblock)
+static int play_iframe(struct av7110 *av7110, char __user *buf, unsigned int len, int nonblock)
 {
 	int i, n;
 
@@ -1082,19 +1082,18 @@
 	case VIDEO_SET_DISPLAY_FORMAT:
 	{
 		video_displayformat_t format = (video_displayformat_t) arg;
-		u16 val = 0;
 
 		switch (format) {
 		case VIDEO_PAN_SCAN:
-			val = VID_PAN_SCAN_PREF;
+			av7110->display_panscan = VID_PAN_SCAN_PREF;
 			break;
 
 		case VIDEO_LETTER_BOX:
-			val = VID_VC_AND_PS_PREF;
+			av7110->display_panscan = VID_VC_AND_PS_PREF;
 			break;
 
 		case VIDEO_CENTER_CUT_OUT:
-			val = VID_CENTRE_CUT_PREF;
+			av7110->display_panscan = VID_CENTRE_CUT_PREF;
 			break;
 
 		default:
@@ -1104,7 +1103,7 @@
 			break;
 		av7110->videostate.display_format = format;
 		ret = av7110_fw_cmd(av7110, COMTYPE_ENCODER, SetPanScanType,
-				    1, (u16) val);
+				    1, av7110->display_panscan);
 		break;
 	}
 
@@ -1466,8 +1465,9 @@
 	av7110->videostate.play_state = VIDEO_STOPPED;
 	av7110->videostate.stream_source = VIDEO_SOURCE_DEMUX;
 	av7110->videostate.video_format = VIDEO_FORMAT_4_3;
-	av7110->videostate.display_format = VIDEO_CENTER_CUT_OUT;
+	av7110->videostate.display_format = VIDEO_LETTER_BOX;
 	av7110->display_ar = VIDEO_FORMAT_4_3;
+	av7110->display_panscan = VID_VC_AND_PS_PREF;
 
 	init_waitqueue_head(&av7110->video_events.wait_queue);
 	spin_lock_init(&av7110->video_events.lock);
diff --git a/drivers/media/dvb/ttpci/av7110_ca.c b/drivers/media/dvb/ttpci/av7110_ca.c
index e1c1294..c58e3fc 100644
--- a/drivers/media/dvb/ttpci/av7110_ca.c
+++ b/drivers/media/dvb/ttpci/av7110_ca.c
@@ -151,7 +151,7 @@
 {
 	int free;
 	int non_blocking = file->f_flags & O_NONBLOCK;
-	char *page = (char *)__get_free_page(GFP_USER);
+	u8 *page = (u8 *)__get_free_page(GFP_USER);
 	int res;
 
 	if (!page)
@@ -208,7 +208,7 @@
 		return -EINVAL;
 	DVB_RINGBUFFER_SKIP(cibuf, 2);
 
-	return dvb_ringbuffer_read(cibuf, buf, len, 1);
+	return dvb_ringbuffer_read(cibuf, (u8 *)buf, len, 1);
 }
 
 static int dvb_ca_open(struct inode *inode, struct file *file)
diff --git a/drivers/media/dvb/ttpci/av7110_hw.c b/drivers/media/dvb/ttpci/av7110_hw.c
index 70aee4e..515e823 100644
--- a/drivers/media/dvb/ttpci/av7110_hw.c
+++ b/drivers/media/dvb/ttpci/av7110_hw.c
@@ -158,7 +158,7 @@
 		}
 		dprintk(4, "writing DRAM block %d\n", i);
 		mwdebi(av7110, DEBISWAB, bootblock,
-		       ((char*)data) + i * AV7110_BOOT_MAX_SIZE, AV7110_BOOT_MAX_SIZE);
+		       ((u8 *)data) + i * AV7110_BOOT_MAX_SIZE, AV7110_BOOT_MAX_SIZE);
 		bootblock ^= 0x1400;
 		iwdebi(av7110, DEBISWAB, AV7110_BOOT_BASE, swab32(base), 4);
 		iwdebi(av7110, DEBINOSWAP, AV7110_BOOT_SIZE, AV7110_BOOT_MAX_SIZE, 2);
@@ -173,10 +173,10 @@
 		}
 		if (rest > 4)
 			mwdebi(av7110, DEBISWAB, bootblock,
-			       ((char*)data) + i * AV7110_BOOT_MAX_SIZE, rest);
+			       ((u8 *)data) + i * AV7110_BOOT_MAX_SIZE, rest);
 		else
 			mwdebi(av7110, DEBISWAB, bootblock,
-			       ((char*)data) + i * AV7110_BOOT_MAX_SIZE - 4, rest + 4);
+			       ((u8 *)data) + i * AV7110_BOOT_MAX_SIZE - 4, rest + 4);
 
 		iwdebi(av7110, DEBISWAB, AV7110_BOOT_BASE, swab32(base), 4);
 		iwdebi(av7110, DEBINOSWAP, AV7110_BOOT_SIZE, rest, 2);
@@ -751,7 +751,7 @@
 	return 0;
 }
 
-static int WriteText(struct av7110 *av7110, u8 win, u16 x, u16 y, u8* buf)
+static int WriteText(struct av7110 *av7110, u8 win, u16 x, u16 y, char *buf)
 {
 	int i, ret;
 	unsigned long start;
diff --git a/drivers/media/dvb/ttpci/av7110_hw.h b/drivers/media/dvb/ttpci/av7110_hw.h
index 673d9b3..74d940f 100644
--- a/drivers/media/dvb/ttpci/av7110_hw.h
+++ b/drivers/media/dvb/ttpci/av7110_hw.h
@@ -393,7 +393,7 @@
 }
 
 /* buffer writes */
-static inline void mwdebi(struct av7110 *av7110, u32 config, int addr, char *val, int count)
+static inline void mwdebi(struct av7110 *av7110, u32 config, int addr, u8 *val, int count)
 {
 	memcpy(av7110->debi_virt, val, count);
 	av7110_debiwrite(av7110, config, addr, 0, count);
diff --git a/drivers/media/dvb/ttpci/av7110_ir.c b/drivers/media/dvb/ttpci/av7110_ir.c
index a97f166..6322800 100644
--- a/drivers/media/dvb/ttpci/av7110_ir.c
+++ b/drivers/media/dvb/ttpci/av7110_ir.c
@@ -356,7 +356,7 @@
 		input_dev->id.vendor = av7110->dev->pci->vendor;
 		input_dev->id.product = av7110->dev->pci->device;
 	}
-	input_dev->cdev.dev = &av7110->dev->pci->dev;
+	input_dev->dev.parent = &av7110->dev->pci->dev;
 	/* initial keymap */
 	memcpy(av7110->ir.key_map, default_key_map, sizeof av7110->ir.key_map);
 	input_register_keys(&av7110->ir);
diff --git a/drivers/media/dvb/ttpci/av7110_v4l.c b/drivers/media/dvb/ttpci/av7110_v4l.c
index fcd9994..87afaeb 100644
--- a/drivers/media/dvb/ttpci/av7110_v4l.c
+++ b/drivers/media/dvb/ttpci/av7110_v4l.c
@@ -333,7 +333,7 @@
 			return -EINVAL;
 
 		memset(t, 0, sizeof(*t));
-		strcpy(t->name, "Television");
+		strcpy((char *)t->name, "Television");
 
 		t->type = V4L2_TUNER_ANALOG_TV;
 		t->capability = V4L2_TUNER_CAP_NORM | V4L2_TUNER_CAP_STEREO |
diff --git a/drivers/media/dvb/ttpci/budget-av.c b/drivers/media/dvb/ttpci/budget-av.c
index 0e817d6..0aee7a1 100644
--- a/drivers/media/dvb/ttpci/budget-av.c
+++ b/drivers/media/dvb/ttpci/budget-av.c
@@ -828,29 +828,6 @@
 	0xff, 0xff
 };
 
-static int philips_sd1878_tda8261_tuner_set_params(struct dvb_frontend *fe,
-						   struct dvb_frontend_parameters *params)
-{
-	u8              buf[4];
-	int             rc;
-	struct i2c_msg  tuner_msg = {.addr=0x60,.flags=0,.buf=buf,.len=sizeof(buf)};
-	struct budget *budget = (struct budget *) fe->dvb->priv;
-
-	if((params->frequency < 950000) || (params->frequency > 2150000))
-		return -EINVAL;
-
-	rc=dvb_pll_configure(&dvb_pll_philips_sd1878_tda8261, buf,
-			     params->frequency, 0);
-	if(rc < 0) return rc;
-
-	if (fe->ops.i2c_gate_ctrl)
-		fe->ops.i2c_gate_ctrl(fe, 1);
-	if(i2c_transfer(&budget->i2c_adap, &tuner_msg, 1) != 1)
-		return -EIO;
-
-    return 0;
-}
-
 static int philips_sd1878_ci_set_symbol_rate(struct dvb_frontend *fe,
 		u32 srate, u32 ratio)
 {
@@ -921,6 +898,7 @@
 #define SUBID_DVBS_TV_STAR		0x0014
 #define SUBID_DVBS_TV_STAR_CI		0x0016
 #define SUBID_DVBS_EASYWATCH_1  	0x001a
+#define SUBID_DVBS_EASYWATCH_2  	0x001b
 #define SUBID_DVBS_EASYWATCH		0x001e
 
 #define SUBID_DVBC_EASYWATCH		0x002a
@@ -982,10 +960,13 @@
 	case SUBID_DVBS_TV_STAR_CI:
 	case SUBID_DVBS_CYNERGY1200N:
 	case SUBID_DVBS_EASYWATCH:
+	case SUBID_DVBS_EASYWATCH_2:
 		fe = dvb_attach(stv0299_attach, &philips_sd1878_config,
 				&budget_av->budget.i2c_adap);
 		if (fe) {
-			fe->ops.tuner_ops.set_params = philips_sd1878_tda8261_tuner_set_params;
+			dvb_attach(dvb_pll_attach, fe, 0x60,
+				   &budget_av->budget.i2c_adap,
+				   DVB_PLL_PHILIPS_SD1878_TDA8261);
 		}
 		break;
 
@@ -1264,6 +1245,7 @@
 MAKE_BUDGET_INFO(kncxs, "KNC TV STAR DVB-S", BUDGET_TVSTAR);
 MAKE_BUDGET_INFO(satewpls, "Satelco EasyWatch DVB-S light", BUDGET_TVSTAR);
 MAKE_BUDGET_INFO(satewpls1, "Satelco EasyWatch DVB-S light", BUDGET_KNC1S);
+MAKE_BUDGET_INFO(satewps, "Satelco EasyWatch DVB-S", BUDGET_KNC1S);
 MAKE_BUDGET_INFO(satewplc, "Satelco EasyWatch DVB-C", BUDGET_KNC1CP);
 MAKE_BUDGET_INFO(satewcmk3, "Satelco EasyWatch DVB-C MK3", BUDGET_KNC1C_MK3);
 MAKE_BUDGET_INFO(knc1sp, "KNC1 DVB-S Plus", BUDGET_KNC1SP);
@@ -1287,6 +1269,7 @@
 	MAKE_EXTENSION_PCI(kncxs, 0x1894, 0x0016),
 	MAKE_EXTENSION_PCI(satewpls, 0x1894, 0x001e),
 	MAKE_EXTENSION_PCI(satewpls1, 0x1894, 0x001a),
+	MAKE_EXTENSION_PCI(satewps, 0x1894, 0x001b),
 	MAKE_EXTENSION_PCI(satewplc, 0x1894, 0x002a),
 	MAKE_EXTENSION_PCI(satewcmk3, 0x1894, 0x002c),
 	MAKE_EXTENSION_PCI(knc1c, 0x1894, 0x0020),
diff --git a/drivers/media/dvb/ttpci/budget-ci.c b/drivers/media/dvb/ttpci/budget-ci.c
index 9d42f88..873c3ba 100644
--- a/drivers/media/dvb/ttpci/budget-ci.c
+++ b/drivers/media/dvb/ttpci/budget-ci.c
@@ -206,7 +206,7 @@
 		input_dev->id.vendor = saa->pci->vendor;
 		input_dev->id.product = saa->pci->device;
 	}
-	input_dev->cdev.dev = &saa->pci->dev;
+	input_dev->dev.parent = &saa->pci->dev;
 
 	/* Select keymap and address */
 	switch (budget_ci->budget.dev->pci->subsystem_device) {
diff --git a/drivers/media/dvb/ttusb-budget/Makefile b/drivers/media/dvb/ttusb-budget/Makefile
index 6ab97f6..fbe2b95 100644
--- a/drivers/media/dvb/ttusb-budget/Makefile
+++ b/drivers/media/dvb/ttusb-budget/Makefile
@@ -1,3 +1,3 @@
 obj-$(CONFIG_DVB_TTUSB_BUDGET) += dvb-ttusb-budget.o
 
-EXTRA_CFLAGS = -Idrivers/media/dvb/dvb-core/ -Idrivers/media/dvb/frontends
+EXTRA_CFLAGS += -Idrivers/media/dvb/dvb-core/ -Idrivers/media/dvb/frontends
diff --git a/drivers/media/dvb/ttusb-dec/Makefile b/drivers/media/dvb/ttusb-dec/Makefile
index b41bf1f..2d70a82 100644
--- a/drivers/media/dvb/ttusb-dec/Makefile
+++ b/drivers/media/dvb/ttusb-dec/Makefile
@@ -1,3 +1,3 @@
 obj-$(CONFIG_DVB_TTUSB_DEC) += ttusb_dec.o ttusbdecfe.o
 
-EXTRA_CFLAGS = -Idrivers/media/dvb/dvb-core/
+EXTRA_CFLAGS += -Idrivers/media/dvb/dvb-core/
diff --git a/drivers/media/radio/Kconfig b/drivers/media/radio/Kconfig
index 194b102..f8bf9fe 100644
--- a/drivers/media/radio/Kconfig
+++ b/drivers/media/radio/Kconfig
@@ -324,8 +324,8 @@
 	  Enter the I/O port of your Zoltrix radio card.
 
 config USB_DSBR
-	tristate "D-Link USB FM radio support (EXPERIMENTAL)"
-	depends on USB && VIDEO_V4L2 && EXPERIMENTAL
+	tristate "D-Link/GemTek USB FM radio support"
+	depends on USB && VIDEO_V4L2
 	---help---
 	  Say Y here if you want to connect this type of radio to your
 	  computer's USB port. Note that the audio is not digital, and
diff --git a/drivers/media/radio/radio-aimslab.c b/drivers/media/radio/radio-aimslab.c
index 5adc27c..ce940b1 100644
--- a/drivers/media/radio/radio-aimslab.c
+++ b/drivers/media/radio/radio-aimslab.c
@@ -392,7 +392,6 @@
 	.owner		= THIS_MODULE,
 	.name		= "RadioTrack radio",
 	.type		= VID_TYPE_TUNER,
-	.hardware	= 0,
 	.fops           = &rtrack_fops,
 	.vidioc_querycap    = vidioc_querycap,
 	.vidioc_g_tuner     = vidioc_g_tuner,
diff --git a/drivers/media/radio/radio-aztech.c b/drivers/media/radio/radio-aztech.c
index 9f1adda..9b1f7a9 100644
--- a/drivers/media/radio/radio-aztech.c
+++ b/drivers/media/radio/radio-aztech.c
@@ -355,7 +355,6 @@
 	.owner		    = THIS_MODULE,
 	.name		    = "Aztech radio",
 	.type		    = VID_TYPE_TUNER,
-	.hardware	    = 0,
 	.fops               = &aztech_fops,
 	.vidioc_querycap    = vidioc_querycap,
 	.vidioc_g_tuner     = vidioc_g_tuner,
diff --git a/drivers/media/radio/radio-gemtek-pci.c b/drivers/media/radio/radio-gemtek-pci.c
index 5e6f17d..4db05b2 100644
--- a/drivers/media/radio/radio-gemtek-pci.c
+++ b/drivers/media/radio/radio-gemtek-pci.c
@@ -377,7 +377,6 @@
 	.owner         = THIS_MODULE,
 	.name          = "Gemtek PCI Radio",
 	.type          = VID_TYPE_TUNER,
-	.hardware      = 0,
 	.fops          = &gemtek_pci_fops,
 	.vidioc_querycap    = vidioc_querycap,
 	.vidioc_g_tuner     = vidioc_g_tuner,
diff --git a/drivers/media/radio/radio-gemtek.c b/drivers/media/radio/radio-gemtek.c
index b04b6a7..eab8c80 100644
--- a/drivers/media/radio/radio-gemtek.c
+++ b/drivers/media/radio/radio-gemtek.c
@@ -330,7 +330,6 @@
 	.owner		= THIS_MODULE,
 	.name		= "GemTek radio",
 	.type		= VID_TYPE_TUNER,
-	.hardware	= 0,
 	.fops           = &gemtek_fops,
 	.vidioc_querycap    = vidioc_querycap,
 	.vidioc_g_tuner     = vidioc_g_tuner,
diff --git a/drivers/media/radio/radio-rtrack2.c b/drivers/media/radio/radio-rtrack2.c
index 9b493b3..82aedfc 100644
--- a/drivers/media/radio/radio-rtrack2.c
+++ b/drivers/media/radio/radio-rtrack2.c
@@ -297,7 +297,6 @@
 	.owner		= THIS_MODULE,
 	.name		= "RadioTrack II radio",
 	.type		= VID_TYPE_TUNER,
-	.hardware	= 0,
 	.fops           = &rtrack2_fops,
 	.vidioc_querycap    = vidioc_querycap,
 	.vidioc_g_tuner     = vidioc_g_tuner,
diff --git a/drivers/media/radio/radio-sf16fmi.c b/drivers/media/radio/radio-sf16fmi.c
index dc33f19..3951653 100644
--- a/drivers/media/radio/radio-sf16fmi.c
+++ b/drivers/media/radio/radio-sf16fmi.c
@@ -297,7 +297,6 @@
 	.owner		= THIS_MODULE,
 	.name		= "SF16FMx radio",
 	.type		= VID_TYPE_TUNER,
-	.hardware	= 0,
 	.fops           = &fmi_fops,
 	.vidioc_querycap    = vidioc_querycap,
 	.vidioc_g_tuner     = vidioc_g_tuner,
diff --git a/drivers/media/radio/radio-sf16fmr2.c b/drivers/media/radio/radio-sf16fmr2.c
index e6c125d..c432c44 100644
--- a/drivers/media/radio/radio-sf16fmr2.c
+++ b/drivers/media/radio/radio-sf16fmr2.c
@@ -442,7 +442,6 @@
 	.owner		= THIS_MODULE,
 	.name		= "SF16FMR2 radio",
 	. type		= VID_TYPE_TUNER,
-	.hardware	= 0,
 	.fops		= &fmr2_fops,
 	.vidioc_querycap    = vidioc_querycap,
 	.vidioc_g_tuner     = vidioc_g_tuner,
diff --git a/drivers/media/radio/radio-terratec.c b/drivers/media/radio/radio-terratec.c
index e43acfd..7e1911c 100644
--- a/drivers/media/radio/radio-terratec.c
+++ b/drivers/media/radio/radio-terratec.c
@@ -369,7 +369,6 @@
 	.owner		= THIS_MODULE,
 	.name		= "TerraTec ActiveRadio",
 	.type		= VID_TYPE_TUNER,
-	.hardware	= 0,
 	.fops           = &terratec_fops,
 	.vidioc_querycap    = vidioc_querycap,
 	.vidioc_g_tuner     = vidioc_g_tuner,
diff --git a/drivers/media/radio/radio-trust.c b/drivers/media/radio/radio-trust.c
index c27c629..c11981f 100644
--- a/drivers/media/radio/radio-trust.c
+++ b/drivers/media/radio/radio-trust.c
@@ -349,7 +349,6 @@
 	.owner		= THIS_MODULE,
 	.name		= "Trust FM Radio",
 	.type		= VID_TYPE_TUNER,
-	.hardware	= 0,
 	.fops           = &trust_fops,
 	.vidioc_querycap    = vidioc_querycap,
 	.vidioc_g_tuner     = vidioc_g_tuner,
diff --git a/drivers/media/radio/radio-typhoon.c b/drivers/media/radio/radio-typhoon.c
index 8ff5a23..1366326 100644
--- a/drivers/media/radio/radio-typhoon.c
+++ b/drivers/media/radio/radio-typhoon.c
@@ -349,7 +349,6 @@
 	.owner		= THIS_MODULE,
 	.name		= "Typhoon Radio",
 	.type		= VID_TYPE_TUNER,
-	.hardware	= 0,
 	.fops           = &typhoon_fops,
 	.vidioc_querycap    = vidioc_querycap,
 	.vidioc_g_tuner     = vidioc_g_tuner,
diff --git a/drivers/media/video/Kconfig b/drivers/media/video/Kconfig
index 4d45a40..9dcbffd 100644
--- a/drivers/media/video/Kconfig
+++ b/drivers/media/video/Kconfig
@@ -489,6 +489,15 @@
 	  Say Y here to include support for Philips SAB3036 compatible tuners.
 	  If in doubt, say N.
 
+config TUNER_TEA5761
+	bool "TEA 5761 radio tuner (EXPERIMENTAL)"
+	depends on EXPERIMENTAL
+	depends on I2C
+	select VIDEO_TUNER
+	help
+	  Say Y here to include support for Philips TEA5761 radio tuner.
+	  If in doubt, say N.
+
 config VIDEO_VINO
 	tristate "SGI Vino Video For Linux (EXPERIMENTAL)"
 	depends on I2C && SGI_IP22 && EXPERIMENTAL && VIDEO_V4L2
diff --git a/drivers/media/video/Makefile b/drivers/media/video/Makefile
index 9c2de50..10b4d44 100644
--- a/drivers/media/video/Makefile
+++ b/drivers/media/video/Makefile
@@ -7,6 +7,8 @@
 tuner-objs	:=	tuner-core.o tuner-types.o tuner-simple.o \
 			mt20xx.o tda8290.o tea5767.o tda9887.o
 
+tuner-$(CONFIG_TUNER_TEA5761)	+= tea5761.o
+
 msp3400-objs	:=	msp3400-driver.o msp3400-kthreads.o
 
 obj-$(CONFIG_VIDEO_DEV) += videodev.o v4l2-common.o compat_ioctl32.o
@@ -16,7 +18,7 @@
 endif
 
 obj-$(CONFIG_VIDEO_BT848) += bt8xx/
-obj-$(CONFIG_VIDEO_BT848) += ir-kbd-i2c.o
+obj-$(CONFIG_VIDEO_IR_I2C)  += ir-kbd-i2c.o
 obj-$(CONFIG_VIDEO_TVAUDIO) += tvaudio.o
 obj-$(CONFIG_VIDEO_TDA7432) += tda7432.o
 obj-$(CONFIG_VIDEO_TDA9875) += tda9875.o
@@ -59,7 +61,7 @@
 obj-$(CONFIG_VIDEO_CPIA_PP) += cpia_pp.o
 obj-$(CONFIG_VIDEO_CPIA_USB) += cpia_usb.o
 obj-$(CONFIG_VIDEO_MEYE) += meye.o
-obj-$(CONFIG_VIDEO_SAA7134) += ir-kbd-i2c.o saa7134/
+obj-$(CONFIG_VIDEO_SAA7134) += saa7134/
 obj-$(CONFIG_VIDEO_CX88) += cx88/
 obj-$(CONFIG_VIDEO_IVTV) += ivtv/
 obj-$(CONFIG_VIDEO_EM28XX) += em28xx/
diff --git a/drivers/media/video/adv7170.c b/drivers/media/video/adv7170.c
index 823cd6c..cbab53f 100644
--- a/drivers/media/video/adv7170.c
+++ b/drivers/media/video/adv7170.c
@@ -38,23 +38,23 @@
 #include <linux/slab.h>
 #include <linux/mm.h>
 #include <linux/signal.h>
+#include <linux/types.h>
+#include <linux/i2c.h>
 #include <asm/io.h>
 #include <asm/pgtable.h>
 #include <asm/page.h>
-#include <linux/types.h>
+#include <asm/uaccess.h>
 
 #include <linux/videodev.h>
-#include <asm/uaccess.h>
+#include <linux/video_encoder.h>
 
 MODULE_DESCRIPTION("Analog Devices ADV7170 video encoder driver");
 MODULE_AUTHOR("Maxim Yevtyushkin");
 MODULE_LICENSE("GPL");
 
-#include <linux/i2c.h>
 
 #define I2C_NAME(x) (x)->name
 
-#include <linux/video_encoder.h>
 
 static int debug = 0;
 module_param(debug, int, 0);
diff --git a/drivers/media/video/adv7175.c b/drivers/media/video/adv7175.c
index 05c7820..0d0c554 100644
--- a/drivers/media/video/adv7175.c
+++ b/drivers/media/video/adv7175.c
@@ -34,23 +34,23 @@
 #include <linux/slab.h>
 #include <linux/mm.h>
 #include <linux/signal.h>
+#include <linux/types.h>
+#include <linux/i2c.h>
 #include <asm/io.h>
 #include <asm/pgtable.h>
 #include <asm/page.h>
-#include <linux/types.h>
+#include <asm/uaccess.h>
 
 #include <linux/videodev.h>
-#include <asm/uaccess.h>
+#include <linux/video_encoder.h>
 
 MODULE_DESCRIPTION("Analog Devices ADV7175 video encoder driver");
 MODULE_AUTHOR("Dave Perks");
 MODULE_LICENSE("GPL");
 
-#include <linux/i2c.h>
 
 #define I2C_NAME(s) (s)->name
 
-#include <linux/video_encoder.h>
 
 static int debug = 0;
 module_param(debug, int, 0);
diff --git a/drivers/media/video/bt819.c b/drivers/media/video/bt819.c
index 59a4360..12d1b92 100644
--- a/drivers/media/video/bt819.c
+++ b/drivers/media/video/bt819.c
@@ -38,23 +38,24 @@
 #include <linux/slab.h>
 #include <linux/mm.h>
 #include <linux/signal.h>
+#include <linux/types.h>
+#include <linux/i2c.h>
 #include <asm/io.h>
 #include <asm/pgtable.h>
 #include <asm/page.h>
-#include <linux/types.h>
+#include <asm/uaccess.h>
 
 #include <linux/videodev.h>
-#include <asm/uaccess.h>
+#include <linux/video_decoder.h>
+
 
 MODULE_DESCRIPTION("Brooktree-819 video decoder driver");
 MODULE_AUTHOR("Mike Bernson & Dave Perks");
 MODULE_LICENSE("GPL");
 
-#include <linux/i2c.h>
 
 #define I2C_NAME(s) (s)->name
 
-#include <linux/video_decoder.h>
 
 static int debug = 0;
 module_param(debug, int, 0);
diff --git a/drivers/media/video/bt856.c b/drivers/media/video/bt856.c
index 853b1a3..e1028a7 100644
--- a/drivers/media/video/bt856.c
+++ b/drivers/media/video/bt856.c
@@ -38,23 +38,23 @@
 #include <linux/slab.h>
 #include <linux/mm.h>
 #include <linux/signal.h>
+#include <linux/types.h>
+#include <linux/i2c.h>
+#include <linux/video_encoder.h>
 #include <asm/io.h>
 #include <asm/pgtable.h>
 #include <asm/page.h>
-#include <linux/types.h>
+#include <asm/uaccess.h>
 
 #include <linux/videodev.h>
-#include <asm/uaccess.h>
 
 MODULE_DESCRIPTION("Brooktree-856A video encoder driver");
 MODULE_AUTHOR("Mike Bernson & Dave Perks");
 MODULE_LICENSE("GPL");
 
-#include <linux/i2c.h>
 
 #define I2C_NAME(s) (s)->name
 
-#include <linux/video_encoder.h>
 
 static int debug = 0;
 module_param(debug, int, 0);
diff --git a/drivers/media/video/bt8xx/bttv-cards.c b/drivers/media/video/bt8xx/bttv-cards.c
index 6b31e50..2aea09c 100644
--- a/drivers/media/video/bt8xx/bttv-cards.c
+++ b/drivers/media/video/bt8xx/bttv-cards.c
@@ -178,8 +178,8 @@
 	/* this seems to happen as well ... */
 	{ 0xff1211bd, BTTV_BOARD_PINNACLE,      "Pinnacle PCTV" },
 
-	{ 0x3000121a, BTTV_BOARD_VOODOOTV_FM,   "3Dfx VoodooTV FM/ VoodooTV 200" },
-	{ 0x263710b4, BTTV_BOARD_VOODOOTV_FM,   "3Dfx VoodooTV FM/ VoodooTV 200" },
+	{ 0x3000121a, BTTV_BOARD_VOODOOTV_200,  "3Dfx VoodooTV 200" },
+	{ 0x263710b4, BTTV_BOARD_VOODOOTV_FM,   "3Dfx VoodooTV FM" },
 	{ 0x3060121a, BTTV_BOARD_STB2,	  "3Dfx VoodooTV 100/ STB OEM" },
 
 	{ 0x3000144f, BTTV_BOARD_MAGICTVIEW063, "(Askey Magic/others) TView99 CPH06x" },
@@ -313,6 +313,7 @@
 	{ 0xdb1118ac, BTTV_BOARD_DVICO_DVBT_LITE,    "Ultraview DVB-T Lite" },
 	{ 0xd50018ac, BTTV_BOARD_DVICO_FUSIONHDTV_5_LITE,    "DViCO FusionHDTV 5 Lite" },
 	{ 0x00261822, BTTV_BOARD_TWINHAN_DST,	"DNTV Live! Mini "},
+	{ 0xd200dbc0, BTTV_BOARD_DVICO_FUSIONHDTV_2,	"DViCO FusionHDTV 2" },
 
 	{ 0, -1, NULL }
 };
@@ -329,7 +330,7 @@
 		.tuner		= 0,
 		.svhs		= 2,
 		.muxsel		= { 2, 3, 1, 0 },
-		.tuner_type	= -1,
+		.tuner_type	= UNSET,
 		.tuner_addr	= ADDR_UNSET,
 		.radio_addr     = ADDR_UNSET,
 	},
@@ -344,7 +345,7 @@
 		.gpiomux 	= { 2, 0, 0, 0 },
 		.gpiomute 	= 10,
 		.needs_tvaudio	= 1,
-		.tuner_type	= -1,
+		.tuner_type	= UNSET,
 		.tuner_addr	= ADDR_UNSET,
 		.radio_addr     = ADDR_UNSET,
 	},
@@ -359,7 +360,7 @@
 		.gpiomux 	= { 0, 1, 2, 3 },
 		.gpiomute 	= 4,
 		.needs_tvaudio	= 1,
-		.tuner_type	= -1,
+		.tuner_type	= UNSET,
 		.tuner_addr	= ADDR_UNSET,
 		.radio_addr     = ADDR_UNSET,
 	},
@@ -387,13 +388,13 @@
 		.name		= "Intel Create and Share PCI/ Smart Video Recorder III",
 		.video_inputs	= 4,
 		.audio_inputs	= 0,
-		.tuner		= -1,
+		.tuner		= UNSET,
 		.svhs		= 2,
 		.gpiomask	= 0,
 		.muxsel		= { 2, 3, 1, 1 },
 		.gpiomux 	= { 0 },
 		.needs_tvaudio	= 0,
-		.tuner_type	= 4,
+		.tuner_type	= TUNER_ABSENT,
 		.tuner_addr	= ADDR_UNSET,
 		.radio_addr     = ADDR_UNSET,
 	},
@@ -408,7 +409,7 @@
 		.gpiomux 	= { 0, 1, 0, 1 },
 		.gpiomute 	= 3,
 		.needs_tvaudio	= 1,
-		.tuner_type	= -1,
+		.tuner_type	= UNSET,
 		.tuner_addr	= ADDR_UNSET,
 		.radio_addr     = ADDR_UNSET,
 	},
@@ -423,7 +424,7 @@
 		.gpiomux 	= { 0x0c, 0x04, 0x08, 0x04 },
 		/*                0x04 for some cards ?? */
 		.needs_tvaudio	= 1,
-		.tuner_type	= -1,
+		.tuner_type	= UNSET,
 		.tuner_addr	= ADDR_UNSET,
 		.radio_addr     = ADDR_UNSET,
 		.audio_hook	= avermedia_tvphone_audio,
@@ -433,13 +434,13 @@
 		.name		= "MATRIX-Vision MV-Delta",
 		.video_inputs	= 5,
 		.audio_inputs	= 1,
-		.tuner		= -1,
+		.tuner		= UNSET,
 		.svhs		= 3,
 		.gpiomask	= 0,
 		.muxsel		= { 2, 3, 1, 0, 0 },
 		.gpiomux 	= { 0 },
 		.needs_tvaudio	= 1,
-		.tuner_type	= -1,
+		.tuner_type	= UNSET,
 		.tuner_addr	= ADDR_UNSET,
 		.radio_addr     = ADDR_UNSET,
 	},
@@ -457,7 +458,7 @@
 		.gpiomute 	= 0xc00,
 		.needs_tvaudio	= 1,
 		.pll		= PLL_28,
-		.tuner_type	= -1,
+		.tuner_type	= UNSET,
 		.tuner_addr	= ADDR_UNSET,
 		.radio_addr     = ADDR_UNSET,
 	},
@@ -488,7 +489,7 @@
 		.gpiomute 	= 4,
 		.needs_tvaudio	= 1,
 		.pll		= PLL_28,
-		.tuner_type	= -1,
+		.tuner_type	= UNSET,
 		.tuner_addr	= ADDR_UNSET,
 		.radio_addr     = ADDR_UNSET,
 	},
@@ -503,7 +504,7 @@
 		.gpiomux 	= { 0x20001,0x10001, 0, 0 },
 		.gpiomute 	= 10,
 		.needs_tvaudio	= 1,
-		.tuner_type	= -1,
+		.tuner_type	= UNSET,
 		.tuner_addr	= ADDR_UNSET,
 		.radio_addr     = ADDR_UNSET,
 	},
@@ -519,7 +520,7 @@
 		.muxsel		= { 2, 3, 1, 1 },
 		.gpiomux 	= { 13, 14, 11, 7 },
 		.needs_tvaudio	= 1,
-		.tuner_type	= -1,
+		.tuner_type	= UNSET,
 		.tuner_addr	= ADDR_UNSET,
 		.radio_addr     = ADDR_UNSET,
 	},
@@ -553,7 +554,7 @@
 		.gpiomute 	= 4,
 		.needs_tvaudio	= 1,
 		.pll		= PLL_28,
-		.tuner_type	= -1,
+		.tuner_type	= UNSET,
 		.tuner_addr	= ADDR_UNSET,
 		.radio_addr     = ADDR_UNSET,
 	},
@@ -568,7 +569,7 @@
 		.gpiomux 	= { 0, 0, 1, 0 },
 		.gpiomute 	= 10,
 		.needs_tvaudio	= 1,
-		.tuner_type	= -1,
+		.tuner_type	= UNSET,
 		.tuner_addr	= ADDR_UNSET,
 		.radio_addr     = ADDR_UNSET,
 	},
@@ -587,7 +588,7 @@
 		.gpiomute 	= 0x002000,
 		.needs_tvaudio	= 1,
 		.pll		= PLL_28,
-		.tuner_type	= -1,
+		.tuner_type	= UNSET,
 	},
 	[BTTV_BOARD_WINVIEW_601] = {
 		.name		= "Leadtek WinView 601",
@@ -600,7 +601,7 @@
 		.gpiomux 	= { 0x4fa007,0xcfa007,0xcfa007,0xcfa007 },
 		.gpiomute 	= 0xcfa007,
 		.needs_tvaudio	= 1,
-		.tuner_type	= -1,
+		.tuner_type	= UNSET,
 		.tuner_addr	= ADDR_UNSET,
 		.radio_addr     = ADDR_UNSET,
 		.audio_hook	= winview_audio,
@@ -616,7 +617,7 @@
 		.muxsel		= { 2, 3, 1, 1 },
 		.gpiomux 	= { 1, 0, 0, 0 },
 		.needs_tvaudio	= 1,
-		.tuner_type	= -1,
+		.tuner_type	= UNSET,
 		.tuner_addr	= ADDR_UNSET,
 		.radio_addr     = ADDR_UNSET,
 	},
@@ -624,13 +625,13 @@
 		.name		= "Lifeview FlyVideo II EZ /FlyKit LR38 Bt848 (capture only)",
 		.video_inputs	= 4,
 		.audio_inputs	= 1,
-		.tuner		= -1,
-		.svhs		= -1,
+		.tuner		= UNSET,
+		.svhs		= UNSET,
 		.gpiomask	= 0x8dff00,
 		.muxsel		= { 2, 3, 1, 1 },
 		.gpiomux 	= { 0 },
 		.no_msp34xx	= 1,
-		.tuner_type	= -1,
+		.tuner_type	= UNSET,
 		.tuner_addr	= ADDR_UNSET,
 		.radio_addr     = ADDR_UNSET,
 	},
@@ -643,7 +644,7 @@
 		.tuner		= 0,
 		.svhs		= 2,
 		.muxsel		= { 2, 3, 1, 1 },
-		.tuner_type	= -1,
+		.tuner_type	= UNSET,
 		.tuner_addr	= ADDR_UNSET,
 		.radio_addr     = ADDR_UNSET,
 	},
@@ -674,7 +675,7 @@
 		.gpiomute 	= 0xc00,
 		.needs_tvaudio	= 1,
 		.pll		= PLL_28,
-		.tuner_type	= -1,
+		.tuner_type	= UNSET,
 		.tuner_addr	= ADDR_UNSET,
 		.radio_addr     = ADDR_UNSET,
 	},
@@ -683,7 +684,7 @@
 		.video_inputs	= 3,
 		.audio_inputs	= 1,
 		.tuner		= 0,
-		.svhs		= -1,
+		.svhs		= UNSET,
 		.gpiomask	= 7,
 		.muxsel		= { 2, 3, -1 },
 		.digital_mode   = DIGITAL_MODE_CAMERA,
@@ -708,7 +709,7 @@
 		.gpiomute 	= 0xc00,
 		.needs_tvaudio	= 1,
 		.pll		= PLL_28,
-		.tuner_type	= -1,
+		.tuner_type	= UNSET,
 		.tuner_addr	= ADDR_UNSET,
 		.radio_addr     = ADDR_UNSET,
 		.has_remote     = 1,
@@ -740,7 +741,7 @@
 		.gpiomux 	= { 0, 1, 2, 3 },
 		.gpiomute 	= 4,
 		.needs_tvaudio	= 1,
-		.tuner_type	= -1,
+		.tuner_type	= UNSET,
 		.tuner_addr	= ADDR_UNSET,
 		.radio_addr     = ADDR_UNSET,
 	},
@@ -813,13 +814,13 @@
 		.name		= "Imagenation PXC200",
 		.video_inputs	= 5,
 		.audio_inputs	= 1,
-		.tuner		= -1,
+		.tuner		= UNSET,
 		.svhs		= 1, /* was: 4 */
 		.gpiomask	= 0,
 		.muxsel		= { 2, 3, 1, 0, 0},
 		.gpiomux 	= { 0 },
 		.needs_tvaudio	= 1,
-		.tuner_type	= -1,
+		.tuner_type	= UNSET,
 		.tuner_addr	= ADDR_UNSET,
 		.radio_addr     = ADDR_UNSET,
 		.muxsel_hook    = PXC200_muxsel,
@@ -836,7 +837,7 @@
 		.gpiomux 	= { 0, 0x0800, 0x1000, 0x1000 },
 		.gpiomute 	= 0x1800,
 		.pll            = PLL_28,
-		.tuner_type	= -1,
+		.tuner_type	= UNSET,
 		.tuner_addr	= ADDR_UNSET,
 		.radio_addr     = ADDR_UNSET,
 	},
@@ -860,13 +861,13 @@
 		.name		= "Intel Create and Share PCI/ Smart Video Recorder III",
 		.video_inputs	= 4,
 		.audio_inputs	= 0,
-		.tuner		= -1,
+		.tuner		= UNSET,
 		.svhs		= 2,
 		.gpiomask	= 0,
 		.muxsel		= { 2, 3, 1, 1 },
 		.gpiomux 	= { 0 },
 		.needs_tvaudio	= 0,
-		.tuner_type	= 4,
+		.tuner_type	= TUNER_ABSENT,
 		.tuner_addr	= ADDR_UNSET,
 		.radio_addr     = ADDR_UNSET,
 	},
@@ -911,7 +912,7 @@
 		.needs_tvaudio	= 0,
 		.pll		= PLL_28,
 		.has_radio	= 1,
-		.tuner_type	= 5, /* default for now, gpio reads BFFF06 for Pal bg+dk */
+		.tuner_type	= TUNER_PHILIPS_PAL, /* default for now, gpio reads BFFF06 for Pal bg+dk */
 		.tuner_addr	= ADDR_UNSET,
 		.radio_addr     = ADDR_UNSET,
 		.audio_hook	= winfast2000_audio,
@@ -928,7 +929,7 @@
 		.gpiomux 	= { 0, 0x800, 0x1000, 0x1000 },
 		.gpiomute 	= 0x1800,
 		.pll		= PLL_28,
-		.tuner_type	= -1,
+		.tuner_type	= UNSET,
 		.tuner_addr	= ADDR_UNSET,
 		.radio_addr     = ADDR_UNSET,
 	},
@@ -945,7 +946,7 @@
 		.gpiomux 	= { 0, 0x800, 0x1000, 0x1000 },
 		.gpiomute 	= 0x1800,
 		.pll		= PLL_28,
-		.tuner_type	= -1,
+		.tuner_type	= UNSET,
 		.tuner_addr	= ADDR_UNSET,
 		.radio_addr     = ADDR_UNSET,
 		.has_radio	= 1,
@@ -962,7 +963,7 @@
 		.gpiomute 	= 0x29,
 		.no_msp34xx	= 1,
 		.pll		= PLL_28,
-		.tuner_type	= -1,
+		.tuner_type	= UNSET,
 		.tuner_addr	= ADDR_UNSET,
 		.radio_addr     = ADDR_UNSET,
 	},
@@ -978,7 +979,7 @@
 		.gpiomute 	= 0x551c00,
 		.needs_tvaudio	= 1,
 		.pll		= PLL_28,
-		.tuner_type	= 1,
+		.tuner_type	= TUNER_PHILIPS_PAL_I,
 		.tuner_addr	= ADDR_UNSET,
 		.radio_addr     = ADDR_UNSET,
 		.has_remote     = 1,
@@ -995,7 +996,7 @@
 		.gpiomute 	= 1,
 		.needs_tvaudio	= 0,
 		.pll		= PLL_28,
-		.tuner_type	= -1,
+		.tuner_type	= UNSET,
 		.tuner_addr	= ADDR_UNSET,
 		.radio_addr     = ADDR_UNSET,
 	},
@@ -1030,7 +1031,7 @@
 		.gpiomux 	= { 13, 4, 11, 7 },
 		.needs_tvaudio	= 1,
 		.pll		= PLL_28,
-		.tuner_type	= -1,
+		.tuner_type	= UNSET,
 		.tuner_addr	= ADDR_UNSET,
 		.radio_addr     = ADDR_UNSET,
 		.has_radio	= 1,
@@ -1048,7 +1049,7 @@
 		.needs_tvaudio	= 1,
 		.no_msp34xx	= 1,
 		.pll		= PLL_28,
-		.tuner_type	= 1,
+		.tuner_type	= TUNER_PHILIPS_PAL_I,
 		.tuner_addr	= ADDR_UNSET,
 		.radio_addr     = ADDR_UNSET,
 	},
@@ -1063,7 +1064,7 @@
 		.gpiomux 	= { 0xff9ff6, 0xff9ff6, 0xff1ff7, 0 },
 		.gpiomute 	= 0xff3ffc,
 		.no_msp34xx	= 1,
-		.tuner_type	= -1,
+		.tuner_type	= UNSET,
 		.tuner_addr	= ADDR_UNSET,
 		.radio_addr     = ADDR_UNSET,
 	},
@@ -1074,14 +1075,14 @@
 		.video_inputs	= 2,
 		.audio_inputs	= 1,
 		.tuner		= 0,
-		.svhs		= -1,
+		.svhs		= UNSET,
 		.gpiomask	= 3,
 		.muxsel		= { 2, 3, 1, 1 },
 		.gpiomux 	= { 1, 1, 0, 2 },
 		.gpiomute 	= 3,
 		.no_msp34xx	= 1,
 		.pll		= PLL_NONE,
-		.tuner_type	= -1,
+		.tuner_type	= UNSET,
 		.tuner_addr	= ADDR_UNSET,
 		.radio_addr     = ADDR_UNSET,
 	},
@@ -1089,14 +1090,14 @@
 		.name		= "MATRIX-Vision MV-Delta 2",
 		.video_inputs	= 5,
 		.audio_inputs	= 1,
-		.tuner		= -1,
+		.tuner		= UNSET,
 		.svhs		= 3,
 		.gpiomask	= 0,
 		.muxsel		= { 2, 3, 1, 0, 0 },
 		.gpiomux 	= { 0 },
 		.no_msp34xx	= 1,
 		.pll		= PLL_28,
-		.tuner_type	= -1,
+		.tuner_type	= UNSET,
 		.tuner_addr	= ADDR_UNSET,
 		.radio_addr     = ADDR_UNSET,
 	},
@@ -1112,7 +1113,7 @@
 		.gpiomute 	= 0xbcb03f,
 		.no_msp34xx	= 1,
 		.pll		= PLL_28,
-		.tuner_type	= 21,
+		.tuner_type	= TUNER_TEMIC_4039FR5_NTSC,
 		.tuner_addr	= ADDR_UNSET,
 		.radio_addr     = ADDR_UNSET,
 	},
@@ -1129,7 +1130,7 @@
 		.needs_tvaudio	= 1,
 		.no_msp34xx	= 1,
 		.pll		= PLL_35,
-		.tuner_type	= 1,
+		.tuner_type	= TUNER_PHILIPS_PAL_I,
 		.tuner_addr	= ADDR_UNSET,
 		.radio_addr     = ADDR_UNSET,
 		.has_radio	= 1,
@@ -1148,7 +1149,7 @@
 		.gpiomute 	= 1,
 		.needs_tvaudio	= 1,
 		.pll		= PLL_28,
-		.tuner_type	= -1,
+		.tuner_type	= UNSET,
 		.tuner_addr	= ADDR_UNSET,
 		.radio_addr     = ADDR_UNSET,
 	},
@@ -1206,7 +1207,7 @@
 		.gpiomux        = { 0, 1, 2, 3 },
 		.gpiomute 	= 4,
 		.pll            = PLL_28,
-		.tuner_type     = -1 /* TUNER_ALPS_TMDH2_NTSC */,
+		.tuner_type     = UNSET /* TUNER_ALPS_TMDH2_NTSC */,
 		.tuner_addr	= ADDR_UNSET,
 		.radio_addr     = ADDR_UNSET,
 	},
@@ -1234,7 +1235,7 @@
 					1= FM stereo Radio from Tuner */
 		.needs_tvaudio  = 0,
 		.pll            = PLL_28,
-		.tuner_type     = -1,
+		.tuner_type     = UNSET,
 		.tuner_addr	= ADDR_UNSET,
 		.radio_addr     = ADDR_UNSET,
 	},
@@ -1277,7 +1278,7 @@
 				0x0080: Tuner A2 SAP (second audio program = Zweikanalton)
 				0x0880: Tuner A2 stereo */
 		.pll		= PLL_28,
-		.tuner_type	= -1,
+		.tuner_type	= UNSET,
 		.tuner_addr	= ADDR_UNSET,
 		.radio_addr     = ADDR_UNSET,
 	},
@@ -1313,7 +1314,7 @@
 		.gpiomux        = { 0, 0x800, 0x1000, 0x1000 },
 		.gpiomute 	= 0x1800,
 		.pll            = PLL_28,
-		.tuner_type     = 5,
+		.tuner_type     = TUNER_PHILIPS_PAL,
 		.tuner_addr	= ADDR_UNSET,
 		.radio_addr     = ADDR_UNSET,
 	},
@@ -1324,7 +1325,7 @@
 		.name           = "GrandTec 'Grand Video Capture' (Bt848)",
 		.video_inputs   = 2,
 		.audio_inputs   = 0,
-		.tuner          = -1,
+		.tuner          = UNSET,
 		.svhs           = 1,
 		.gpiomask       = 0,
 		.muxsel         = { 3, 1 },
@@ -1332,7 +1333,7 @@
 		.needs_tvaudio  = 0,
 		.no_msp34xx     = 1,
 		.pll            = PLL_35,
-		.tuner_type     = -1,
+		.tuner_type     = UNSET,
 		.tuner_addr	= ADDR_UNSET,
 		.radio_addr     = ADDR_UNSET,
 	},
@@ -1365,7 +1366,7 @@
 		.gpiomux        = { 2, 0, 0, 0 },
 		.gpiomute 	= 1,
 		.pll            = PLL_28,
-		.tuner_type	= 0,
+		.tuner_type	= TUNER_TEMIC_PAL,
 		.tuner_addr	= ADDR_UNSET,
 		.radio_addr     = ADDR_UNSET,
 	},
@@ -1377,7 +1378,7 @@
 		.video_inputs   = 2,
 		.audio_inputs   = 2,
 		.tuner		= 0,
-		.svhs		= -1,
+		.svhs		= UNSET,
 		.gpiomask       = 11,
 		.muxsel         = { 2, 3, 1, 1 },
 		.gpiomux        = { 2, 0, 0, 1 },
@@ -1392,7 +1393,7 @@
 		.name		= "AG Electronics GMV1",
 		.video_inputs   = 2,
 		.audio_inputs   = 0,
-		.tuner		= -1,
+		.tuner		= UNSET,
 		.svhs		= 1,
 		.gpiomask       = 0xF,
 		.muxsel		= { 2, 2 },
@@ -1400,7 +1401,7 @@
 		.no_msp34xx     = 1,
 		.needs_tvaudio  = 0,
 		.pll		= PLL_28,
-		.tuner_type     = -1,
+		.tuner_type     = UNSET,
 		.tuner_addr	= ADDR_UNSET,
 		.radio_addr     = ADDR_UNSET,
 	},
@@ -1447,7 +1448,7 @@
 		.video_inputs	= 2,
 		.audio_inputs	= 1,
 		.tuner		= 0,
-		.svhs		= -1,
+		.svhs		= UNSET,
 		.gpiomask	= 1,
 		.muxsel		= { 2, 3, 0, 1 },
 		.gpiomux 	= { 0, 0, 1, 0 },
@@ -1476,7 +1477,7 @@
 		.no_tda9875	= 1,
 		.needs_tvaudio  = 1,
 		.pll            = PLL_28,
-		.tuner_type     = 5,
+		.tuner_type     = TUNER_PHILIPS_PAL,
 		.tuner_addr	= ADDR_UNSET,
 		.radio_addr     = ADDR_UNSET,
 	},
@@ -1517,13 +1518,35 @@
 
 	/* ---- card 0x44 ---------------------------------- */
 	[BTTV_BOARD_VOODOOTV_FM] = {
-		.name           = "3Dfx VoodooTV FM (Euro), VoodooTV 200 (USA)",
+		.name           = "3Dfx VoodooTV FM (Euro)",
 		/* try "insmod msp3400 simple=0" if you have
 		* sound problems with this card. */
 		.video_inputs   = 4,
 		.audio_inputs   = 1,
 		.tuner          = 0,
-		.svhs           = -1,
+		.svhs           = UNSET,
+		.gpiomask       = 0x4f8a00,
+		/* 0x100000: 1=MSP enabled (0=disable again)
+		* 0x010000: Connected to "S0" on tda9880 (0=Pal/BG, 1=NTSC) */
+		.gpiomux        = {0x947fff, 0x987fff,0x947fff,0x947fff },
+		.gpiomute 	= 0x947fff,
+		/* tvtuner, radio,   external,internal, mute,  stereo
+		* tuner, Composit, SVid, Composit-on-Svid-adapter */
+		.muxsel         = { 2, 3 ,0 ,1 },
+		.tuner_type     = TUNER_MT2032,
+		.tuner_addr	= ADDR_UNSET,
+		.radio_addr     = ADDR_UNSET,
+		.pll		= PLL_28,
+		.has_radio	= 1,
+	},
+	[BTTV_BOARD_VOODOOTV_200] = {
+		.name           = "VoodooTV 200 (USA)",
+		/* try "insmod msp3400 simple=0" if you have
+		* sound problems with this card. */
+		.video_inputs   = 4,
+		.audio_inputs   = 1,
+		.tuner          = 0,
+		.svhs           = UNSET,
 		.gpiomask       = 0x4f8a00,
 		/* 0x100000: 1=MSP enabled (0=disable again)
 		* 0x010000: Connected to "S0" on tda9880 (0=Pal/BG, 1=NTSC) */
@@ -1543,8 +1566,8 @@
 		.name           = "Active Imaging AIMMS",
 		.video_inputs   = 1,
 		.audio_inputs   = 0,
-		.tuner          = -1,
-		.tuner_type     = -1,
+		.tuner          = UNSET,
+		.tuner_type     = UNSET,
 		.tuner_addr	= ADDR_UNSET,
 		.radio_addr     = ADDR_UNSET,
 		.pll            = PLL_28,
@@ -1564,7 +1587,7 @@
 		.gpiomute 	= 13,
 		.needs_tvaudio  = 1,
 		.pll            = PLL_28,
-		.tuner_type     = 25,
+		.tuner_type     = TUNER_LG_PAL_I_FM,
 		.tuner_addr	= ADDR_UNSET,
 		.radio_addr     = ADDR_UNSET,
 		.has_remote     = 1,
@@ -1580,7 +1603,7 @@
 		.name		= "Lifeview FlyVideo 98EZ (capture only) LR51",
 		.video_inputs	= 4,
 		.audio_inputs   = 0,
-		.tuner		= -1,
+		.tuner		= UNSET,
 		.svhs		= 2,
 		.muxsel		= { 2, 3, 1, 1 }, /* AV1, AV2, SVHS, CVid adapter on SVHS */
 		.pll		= PLL_28,
@@ -1606,7 +1629,7 @@
 		.no_msp34xx	= 1,
 		.no_tda9875	= 1,
 		.pll		= PLL_28,
-		.tuner_type	= 5,
+		.tuner_type	= TUNER_PHILIPS_PAL,
 		.tuner_addr	= ADDR_UNSET,
 		.radio_addr     = ADDR_UNSET,
 		.audio_hook	= pvbt878p9b_audio, /* Note: not all cards have stereo */
@@ -1626,13 +1649,13 @@
 		.name           = "Sensoray 311",
 		.video_inputs   = 5,
 		.audio_inputs   = 0,
-		.tuner          = -1,
+		.tuner          = UNSET,
 		.svhs           = 4,
 		.gpiomask       = 0,
 		.muxsel         = { 2, 3, 1, 0, 0 },
 		.gpiomux        = { 0 },
 		.needs_tvaudio  = 0,
-		.tuner_type     = -1,
+		.tuner_type     = UNSET,
 		.tuner_addr	= ADDR_UNSET,
 		.radio_addr     = ADDR_UNSET,
 	},
@@ -1641,15 +1664,15 @@
 		.name           = "RemoteVision MX (RV605)",
 		.video_inputs   = 16,
 		.audio_inputs   = 0,
-		.tuner          = -1,
-		.svhs           = -1,
+		.tuner          = UNSET,
+		.svhs           = UNSET,
 		.gpiomask       = 0x00,
 		.gpiomask2      = 0x07ff,
 		.muxsel         = { 0x33, 0x13, 0x23, 0x43, 0xf3, 0x73, 0xe3, 0x03,
 				0xd3, 0xb3, 0xc3, 0x63, 0x93, 0x53, 0x83, 0xa3 },
 		.no_msp34xx     = 1,
 		.no_tda9875     = 1,
-		.tuner_type     = -1,
+		.tuner_type     = UNSET,
 		.tuner_addr	= ADDR_UNSET,
 		.radio_addr     = ADDR_UNSET,
 		.muxsel_hook    = rv605_muxsel,
@@ -1693,15 +1716,15 @@
 		.name           = "GrandTec Multi Capture Card (Bt878)",
 		.video_inputs   = 4,
 		.audio_inputs   = 0,
-		.tuner          = -1,
-		.svhs           = -1,
+		.tuner          = UNSET,
+		.svhs           = UNSET,
 		.gpiomask       = 0,
 		.muxsel         = { 2, 3, 1, 0 },
 		.gpiomux        = { 0 },
 		.needs_tvaudio  = 0,
 		.no_msp34xx     = 1,
 		.pll            = PLL_28,
-		.tuner_type     = -1,
+		.tuner_type     = UNSET,
 		.tuner_addr	= ADDR_UNSET,
 		.radio_addr     = ADDR_UNSET,
 	},
@@ -1724,7 +1747,7 @@
 		.needs_tvaudio  = 0,
 		.no_msp34xx     = 1,
 		.pll            = PLL_28,
-		.tuner_type     = 5,
+		.tuner_type     = TUNER_PHILIPS_PAL,
 		.tuner_addr	= ADDR_UNSET,
 		.radio_addr     = ADDR_UNSET,
 		/* Samsung TCPA9095PC27A (BG+DK), philips compatible, w/FM, stereo and
@@ -1744,10 +1767,10 @@
 		/* Arthur Tetzlaff-Deas, DSP Design Ltd <software@dspdesign.com> */
 		.name           = "DSP Design TCVIDEO",
 		.video_inputs   = 4,
-		.svhs           = -1,
+		.svhs           = UNSET,
 		.muxsel         = { 2, 3, 1, 0 },
 		.pll            = PLL_28,
-		.tuner_type     = -1,
+		.tuner_type     = UNSET,
 		.tuner_addr	= ADDR_UNSET,
 		.radio_addr     = ADDR_UNSET,
 	},
@@ -1762,7 +1785,7 @@
 		.muxsel         = { 2, 0, 1, 1 },
 		.needs_tvaudio  = 1,
 		.pll            = PLL_28,
-		.tuner_type     = -1,
+		.tuner_type     = UNSET,
 		.tuner_addr	= ADDR_UNSET,
 		.radio_addr     = ADDR_UNSET,
 
@@ -1791,11 +1814,11 @@
 		.name           = "Osprey 100/150 (878)", /* 0x1(2|3)-45C6-C1 */
 		.video_inputs   = 4,                  /* id-inputs-clock */
 		.audio_inputs   = 0,
-		.tuner          = -1,
+		.tuner          = UNSET,
 		.svhs           = 3,
 		.muxsel         = { 3, 2, 0, 1 },
 		.pll            = PLL_28,
-		.tuner_type     = -1,
+		.tuner_type     = UNSET,
 		.tuner_addr	= ADDR_UNSET,
 		.radio_addr     = ADDR_UNSET,
 		.no_msp34xx     = 1,
@@ -1806,11 +1829,11 @@
 		.name           = "Osprey 100/150 (848)", /* 0x04-54C0-C1 & older boards */
 		.video_inputs   = 3,
 		.audio_inputs   = 0,
-		.tuner          = -1,
+		.tuner          = UNSET,
 		.svhs           = 2,
 		.muxsel         = { 2, 3, 1 },
 		.pll            = PLL_28,
-		.tuner_type     = -1,
+		.tuner_type     = UNSET,
 		.tuner_addr	= ADDR_UNSET,
 		.radio_addr     = ADDR_UNSET,
 		.no_msp34xx     = 1,
@@ -1823,11 +1846,11 @@
 		.name           = "Osprey 101 (848)", /* 0x05-40C0-C1 */
 		.video_inputs   = 2,
 		.audio_inputs   = 0,
-		.tuner          = -1,
+		.tuner          = UNSET,
 		.svhs           = 1,
 		.muxsel         = { 3, 1 },
 		.pll            = PLL_28,
-		.tuner_type     = -1,
+		.tuner_type     = UNSET,
 		.tuner_addr	= ADDR_UNSET,
 		.radio_addr     = ADDR_UNSET,
 		.no_msp34xx     = 1,
@@ -1838,11 +1861,11 @@
 		.name           = "Osprey 101/151",       /* 0x1(4|5)-0004-C4 */
 		.video_inputs   = 1,
 		.audio_inputs   = 0,
-		.tuner          = -1,
-		.svhs           = -1,
+		.tuner          = UNSET,
+		.svhs           = UNSET,
 		.muxsel         = { 0 },
 		.pll            = PLL_28,
-		.tuner_type     = -1,
+		.tuner_type     = UNSET,
 		.tuner_addr	= ADDR_UNSET,
 		.radio_addr     = ADDR_UNSET,
 		.no_msp34xx     = 1,
@@ -1853,11 +1876,11 @@
 		.name           = "Osprey 101/151 w/ svid",  /* 0x(16|17|20)-00C4-C1 */
 		.video_inputs   = 2,
 		.audio_inputs   = 0,
-		.tuner          = -1,
+		.tuner          = UNSET,
 		.svhs           = 1,
 		.muxsel         = { 0, 1 },
 		.pll            = PLL_28,
-		.tuner_type     = -1,
+		.tuner_type     = UNSET,
 		.tuner_addr	= ADDR_UNSET,
 		.radio_addr     = ADDR_UNSET,
 		.no_msp34xx     = 1,
@@ -1868,8 +1891,8 @@
 		.name           = "Osprey 200/201/250/251",  /* 0x1(8|9|E|F)-0004-C4 */
 		.video_inputs   = 1,
 		.audio_inputs   = 1,
-		.tuner          = -1,
-		.svhs           = -1,
+		.tuner          = UNSET,
+		.svhs           = UNSET,
 		.muxsel         = { 0 },
 		.pll            = PLL_28,
 		.tuner_type	= UNSET,
@@ -1885,7 +1908,7 @@
 		.name           = "Osprey 200/250",   /* 0x1(A|B)-00C4-C1 */
 		.video_inputs   = 2,
 		.audio_inputs   = 1,
-		.tuner          = -1,
+		.tuner          = UNSET,
 		.svhs           = 1,
 		.muxsel         = { 0, 1 },
 		.pll            = PLL_28,
@@ -1900,7 +1923,7 @@
 		.name           = "Osprey 210/220/230",   /* 0x1(A|B)-04C0-C1 */
 		.video_inputs   = 2,
 		.audio_inputs   = 1,
-		.tuner          = -1,
+		.tuner          = UNSET,
 		.svhs           = 1,
 		.muxsel         = { 2, 3 },
 		.pll            = PLL_28,
@@ -1915,11 +1938,11 @@
 		.name           = "Osprey 500",   /* 500 */
 		.video_inputs   = 2,
 		.audio_inputs   = 1,
-		.tuner          = -1,
+		.tuner          = UNSET,
 		.svhs           = 1,
 		.muxsel         = { 2, 3 },
 		.pll            = PLL_28,
-		.tuner_type     = -1,
+		.tuner_type     = UNSET,
 		.tuner_addr	= ADDR_UNSET,
 		.radio_addr     = ADDR_UNSET,
 		.no_msp34xx     = 1,
@@ -1930,9 +1953,9 @@
 		.name           = "Osprey 540",   /* 540 */
 		.video_inputs   = 4,
 		.audio_inputs   = 1,
-		.tuner          = -1,
+		.tuner          = UNSET,
 		.pll            = PLL_28,
-		.tuner_type     = -1,
+		.tuner_type     = UNSET,
 		.tuner_addr	= ADDR_UNSET,
 		.radio_addr     = ADDR_UNSET,
 		.no_msp34xx     = 1,
@@ -1945,7 +1968,7 @@
 		.name           = "Osprey 2000",  /* 2000 */
 		.video_inputs   = 2,
 		.audio_inputs   = 1,
-		.tuner          = -1,
+		.tuner          = UNSET,
 		.svhs           = 1,
 		.muxsel         = { 2, 3 },
 		.pll            = PLL_28,
@@ -1961,11 +1984,11 @@
 		.name           = "IDS Eagle",
 		.video_inputs   = 4,
 		.audio_inputs   = 0,
-		.tuner          = -1,
-		.tuner_type     = -1,
+		.tuner          = UNSET,
+		.tuner_type     = UNSET,
 		.tuner_addr	= ADDR_UNSET,
 		.radio_addr     = ADDR_UNSET,
-		.svhs           = -1,
+		.svhs           = UNSET,
 		.gpiomask       = 0,
 		.muxsel         = { 0, 1, 2, 3 },
 		.muxsel_hook    = eagle_muxsel,
@@ -1978,8 +2001,8 @@
 		.video_inputs   = 2,
 		.audio_inputs   = 0,
 		.svhs           = 1,
-		.tuner          = -1,
-		.tuner_type     = -1,
+		.tuner          = UNSET,
+		.tuner_type     = UNSET,
 		.tuner_addr	= ADDR_UNSET,
 		.radio_addr     = ADDR_UNSET,
 		.no_msp34xx     = 1,
@@ -2020,13 +2043,13 @@
 		.video_inputs   = 3,
 		.audio_inputs   = 1,
 		.tuner          = 0,
-		.svhs           = -1,
+		.svhs           = UNSET,
 		.gpiomask       = 7,
 		.muxsel         = { 2, 3, 1, 1},
 		.gpiomux        = { 0, 1, 2, 3},
 		.gpiomute 	= 4,
 		.needs_tvaudio  = 1,
-		.tuner_type     = 5,
+		.tuner_type     = TUNER_PHILIPS_PAL,
 		.tuner_addr	= ADDR_UNSET,
 		.radio_addr     = ADDR_UNSET,
 		.pll            = PLL_28,
@@ -2035,7 +2058,7 @@
 		.name           = "Euresys Picolo",
 		.video_inputs   = 3,
 		.audio_inputs   = 0,
-		.tuner          = -1,
+		.tuner          = UNSET,
 		.svhs           = 2,
 		.gpiomask       = 0,
 		.no_msp34xx     = 1,
@@ -2052,8 +2075,8 @@
 		.name           = "ProVideo PV150", /* 0x4f */
 		.video_inputs   = 2,
 		.audio_inputs   = 0,
-		.tuner          = -1,
-		.svhs           = -1,
+		.tuner          = UNSET,
+		.svhs           = UNSET,
 		.gpiomask       = 0,
 		.muxsel         = { 2, 3 },
 		.gpiomux        = { 0 },
@@ -2080,7 +2103,7 @@
 		.needs_tvaudio  = 0,
 		.no_msp34xx     = 1,
 		.pll            = PLL_28,
-		.tuner_type     = 2,
+		.tuner_type     = TUNER_PHILIPS_NTSC,
 		.tuner_addr	= ADDR_UNSET,
 		.radio_addr     = ADDR_UNSET,
 		.audio_hook	= adtvk503_audio,
@@ -2098,7 +2121,7 @@
 		.needs_tvaudio  = 1,
 		.no_msp34xx     = 1,
 		.pll            = PLL_28,
-		.tuner_type     = 5,
+		.tuner_type     = TUNER_PHILIPS_PAL,
 		.tuner_addr	= ADDR_UNSET,
 		.radio_addr     = ADDR_UNSET,
 		/* Notes:
@@ -2121,7 +2144,7 @@
 		.gpiomask       = 0,
 		.no_tda9875     = 1,
 		.no_tda7432     = 1,
-		.tuner_type     = 1,
+		.tuner_type     = TUNER_PHILIPS_PAL_I,
 		.tuner_addr	= ADDR_UNSET,
 		.radio_addr     = ADDR_UNSET,
 		.has_radio      = 1,
@@ -2138,11 +2161,11 @@
 		.name           = "IVC-200",
 		.video_inputs   = 1,
 		.audio_inputs   = 0,
-		.tuner          = -1,
-		.tuner_type     = -1,
+		.tuner          = UNSET,
+		.tuner_type     = UNSET,
 		.tuner_addr	= ADDR_UNSET,
 		.radio_addr     = ADDR_UNSET,
-		.svhs           = -1,
+		.svhs           = UNSET,
 		.gpiomask       = 0xdf,
 		.muxsel         = { 2 },
 		.pll            = PLL_28,
@@ -2151,9 +2174,9 @@
 		.name           = "Grand X-Guard / Trust 814PCI",
 		.video_inputs   = 16,
 		.audio_inputs   = 0,
-		.tuner          = -1,
-		.svhs           = -1,
-		.tuner_type     = 4,
+		.tuner          = UNSET,
+		.svhs           = UNSET,
+		.tuner_type     = TUNER_ABSENT,
 		.tuner_addr	= ADDR_UNSET,
 		.radio_addr     = ADDR_UNSET,
 		.gpiomask2      = 0xff,
@@ -2169,14 +2192,14 @@
 	[BTTV_BOARD_NEBULA_DIGITV] = {
 		.name           = "Nebula Electronics DigiTV",
 		.video_inputs   = 1,
-		.tuner          = -1,
-		.svhs           = -1,
+		.tuner          = UNSET,
+		.svhs           = UNSET,
 		.muxsel         = { 2, 3, 1, 0 },
 		.no_msp34xx     = 1,
 		.no_tda9875     = 1,
 		.no_tda7432     = 1,
 		.pll            = PLL_28,
-		.tuner_type     = -1,
+		.tuner_type     = UNSET,
 		.tuner_addr	= ADDR_UNSET,
 		.radio_addr     = ADDR_UNSET,
 		.has_dvb        = 1,
@@ -2189,15 +2212,15 @@
 		.name           = "ProVideo PV143",
 		.video_inputs   = 4,
 		.audio_inputs   = 0,
-		.tuner          = -1,
-		.svhs           = -1,
+		.tuner          = UNSET,
+		.svhs           = UNSET,
 		.gpiomask       = 0,
 		.muxsel         = { 2, 3, 1, 0 },
 		.gpiomux        = { 0 },
 		.needs_tvaudio  = 0,
 		.no_msp34xx     = 1,
 		.pll            = PLL_28,
-		.tuner_type     = -1,
+		.tuner_type     = UNSET,
 		.tuner_addr	= ADDR_UNSET,
 		.radio_addr     = ADDR_UNSET,
 	},
@@ -2206,14 +2229,14 @@
 		.name           = "PHYTEC VD-009-X1 MiniDIN (bt878)",
 		.video_inputs   = 4,
 		.audio_inputs   = 0,
-		.tuner          = -1, /* card has no tuner */
+		.tuner          = UNSET, /* card has no tuner */
 		.svhs           = 3,
 		.gpiomask       = 0x00,
 		.muxsel         = { 2, 3, 1, 0 },
 		.gpiomux        = { 0, 0, 0, 0 }, /* card has no audio */
 		.needs_tvaudio  = 1,
 		.pll            = PLL_28,
-		.tuner_type     = -1,
+		.tuner_type     = UNSET,
 		.tuner_addr	= ADDR_UNSET,
 		.radio_addr     = ADDR_UNSET,
 	},
@@ -2221,14 +2244,14 @@
 		.name           = "PHYTEC VD-009-X1 Combi (bt878)",
 		.video_inputs   = 4,
 		.audio_inputs   = 0,
-		.tuner          = -1, /* card has no tuner */
+		.tuner          = UNSET, /* card has no tuner */
 		.svhs           = 3,
 		.gpiomask       = 0x00,
 		.muxsel         = { 2, 3, 1, 1 },
 		.gpiomux        = { 0, 0, 0, 0 }, /* card has no audio */
 		.needs_tvaudio  = 1,
 		.pll            = PLL_28,
-		.tuner_type     = -1,
+		.tuner_type     = UNSET,
 		.tuner_addr	= ADDR_UNSET,
 		.radio_addr     = ADDR_UNSET,
 	},
@@ -2238,7 +2261,7 @@
 		.name           = "PHYTEC VD-009 MiniDIN (bt878)",
 		.video_inputs   = 10,
 		.audio_inputs   = 0,
-		.tuner          = -1, /* card has no tuner */
+		.tuner          = UNSET, /* card has no tuner */
 		.svhs           = 9,
 		.gpiomask       = 0x00,
 		.gpiomask2      = 0x03, /* gpiomask2 defines the bits used to switch audio
@@ -2248,7 +2271,7 @@
 		.gpiomux        = { 0, 0, 0, 0 }, /* card has no audio */
 		.needs_tvaudio  = 1,
 		.pll            = PLL_28,
-		.tuner_type     = -1,
+		.tuner_type     = UNSET,
 		.tuner_addr	= ADDR_UNSET,
 		.radio_addr     = ADDR_UNSET,
 	},
@@ -2256,7 +2279,7 @@
 		.name           = "PHYTEC VD-009 Combi (bt878)",
 		.video_inputs   = 10,
 		.audio_inputs   = 0,
-		.tuner          = -1, /* card has no tuner */
+		.tuner          = UNSET, /* card has no tuner */
 		.svhs           = 9,
 		.gpiomask       = 0x00,
 		.gpiomask2      = 0x03, /* gpiomask2 defines the bits used to switch audio
@@ -2266,7 +2289,7 @@
 		.gpiomux        = { 0, 0, 0, 0 }, /* card has no audio */
 		.needs_tvaudio  = 1,
 		.pll            = PLL_28,
-		.tuner_type     = -1,
+		.tuner_type     = UNSET,
 		.tuner_addr	= ADDR_UNSET,
 		.radio_addr     = ADDR_UNSET,
 	},
@@ -2274,11 +2297,11 @@
 		.name           = "IVC-100",
 		.video_inputs   = 4,
 		.audio_inputs   = 0,
-		.tuner          = -1,
-		.tuner_type     = -1,
+		.tuner          = UNSET,
+		.tuner_type     = UNSET,
 		.tuner_addr	= ADDR_UNSET,
 		.radio_addr     = ADDR_UNSET,
-		.svhs           = -1,
+		.svhs           = UNSET,
 		.gpiomask       = 0xdf,
 		.muxsel         = { 2, 3, 1, 0 },
 		.pll            = PLL_28,
@@ -2288,11 +2311,11 @@
 		.name           = "IVC-120G",
 		.video_inputs   = 16,
 		.audio_inputs   = 0,    /* card has no audio */
-		.tuner          = -1,   /* card has no tuner */
-		.tuner_type     = -1,
+		.tuner          = UNSET,   /* card has no tuner */
+		.tuner_type     = UNSET,
 		.tuner_addr	= ADDR_UNSET,
 		.radio_addr     = ADDR_UNSET,
-		.svhs           = -1,   /* card has no svhs */
+		.svhs           = UNSET,   /* card has no svhs */
 		.needs_tvaudio  = 0,
 		.no_msp34xx     = 1,
 		.no_tda9875     = 1,
@@ -2333,7 +2356,7 @@
 		.video_inputs   = 3,
 		.audio_inputs   = 0,
 		.svhs           = 1,
-		.tuner          = -1,
+		.tuner          = UNSET,
 		.muxsel         = { 3, 1, 1, 3 }, /* Vid In, SVid In, Vid over SVid in connector */
 		.no_msp34xx     = 1,
 		.no_tda9875     = 1,
@@ -2364,9 +2387,9 @@
 		.name           = "SIMUS GVC1100",
 		.video_inputs   = 4,
 		.audio_inputs   = 0,
-		.tuner          = -1,
-		.svhs           = -1,
-		.tuner_type     = -1,
+		.tuner          = UNSET,
+		.svhs           = UNSET,
+		.tuner_type     = UNSET,
 		.tuner_addr	= ADDR_UNSET,
 		.radio_addr     = ADDR_UNSET,
 		.pll            = PLL_28,
@@ -2395,14 +2418,14 @@
 		.name           = "LMLBT4",
 		.video_inputs   = 4, /* IN1,IN2,IN3,IN4 */
 		.audio_inputs   = 0,
-		.tuner          = -1,
-		.svhs           = -1,
+		.tuner          = UNSET,
+		.svhs           = UNSET,
 		.muxsel         = { 2, 3, 1, 0 },
 		.no_msp34xx     = 1,
 		.no_tda9875     = 1,
 		.no_tda7432     = 1,
 		.needs_tvaudio  = 0,
-		.tuner_type     = -1,
+		.tuner_type     = UNSET,
 		.tuner_addr	= ADDR_UNSET,
 		.radio_addr     = ADDR_UNSET,
 	},
@@ -2452,8 +2475,8 @@
 		.name           = "Euresys Picolo Tetra",
 		.video_inputs   = 4,
 		.audio_inputs   = 0,
-		.tuner          = -1,
-		.svhs           = -1,
+		.tuner          = UNSET,
+		.svhs           = UNSET,
 		.gpiomask       = 0,
 		.gpiomask2      = 0x3C<<16,/*Set the GPIO[18]->GPIO[21] as output pin.==> drive the video inputs through analog multiplexers*/
 		.no_msp34xx     = 1,
@@ -2464,7 +2487,7 @@
 		.pll            = PLL_28,
 		.needs_tvaudio  = 0,
 		.muxsel_hook    = picolo_tetra_muxsel,/*Required as it doesn't follow the classic input selection policy*/
-		.tuner_type     = -1,
+		.tuner_type     = UNSET,
 		.tuner_addr	= ADDR_UNSET,
 		.radio_addr     = ADDR_UNSET,
 	},
@@ -2490,7 +2513,7 @@
 		.name           = "AVerMedia AVerTV DVB-T 771",
 		.video_inputs   = 2,
 		.svhs           = 1,
-		.tuner          = -1,
+		.tuner          = UNSET,
 		.tuner_type     = TUNER_ABSENT,
 		.tuner_addr	= ADDR_UNSET,
 		.radio_addr     = ADDR_UNSET,
@@ -2509,14 +2532,14 @@
 		/* Based on the Nebula card data - added remote and new card number - BTTV_BOARD_AVDVBT_761, see also ir-kbd-gpio.c */
 		.name           = "AverMedia AverTV DVB-T 761",
 		.video_inputs   = 2,
-		.tuner          = -1,
+		.tuner          = UNSET,
 		.svhs           = 1,
 		.muxsel         = { 3, 1, 2, 0 }, /* Comp0, S-Video, ?, ? */
 		.no_msp34xx     = 1,
 		.no_tda9875     = 1,
 		.no_tda7432     = 1,
 		.pll            = PLL_28,
-		.tuner_type     = -1,
+		.tuner_type     = UNSET,
 		.tuner_addr	= ADDR_UNSET,
 		.radio_addr     = ADDR_UNSET,
 		.has_dvb        = 1,
@@ -2528,8 +2551,8 @@
 		.name             = "MATRIX Vision Sigma-SQ",
 		.video_inputs     = 16,
 		.audio_inputs     = 0,
-		.tuner            = -1,
-		.svhs             = -1,
+		.tuner            = UNSET,
+		.svhs             = UNSET,
 		.gpiomask         = 0x0,
 		.muxsel           = { 2, 2, 2, 2, 2, 2, 2, 2,
 				3, 3, 3, 3, 3, 3, 3, 3 },
@@ -2537,7 +2560,7 @@
 		.gpiomux          = { 0 },
 		.no_msp34xx       = 1,
 		.pll              = PLL_28,
-		.tuner_type       = -1,
+		.tuner_type       = UNSET,
 		.tuner_addr	  = ADDR_UNSET,
 		.radio_addr     = ADDR_UNSET,
 	},
@@ -2546,15 +2569,15 @@
 		.name             = "MATRIX Vision Sigma-SLC",
 		.video_inputs     = 4,
 		.audio_inputs     = 0,
-		.tuner            = -1,
-		.svhs             = -1,
+		.tuner            = UNSET,
+		.svhs             = UNSET,
 		.gpiomask         = 0x0,
 		.muxsel           = { 2, 2, 2, 2 },
 		.muxsel_hook      = sigmaSLC_muxsel,
 		.gpiomux          = { 0 },
 		.no_msp34xx       = 1,
 		.pll              = PLL_28,
-		.tuner_type       = -1,
+		.tuner_type       = UNSET,
 		.tuner_addr	  = ADDR_UNSET,
 		.radio_addr     = ADDR_UNSET,
 	},
@@ -2566,7 +2589,7 @@
 		.video_inputs   = 2,
 		.audio_inputs   = 1,
 		.tuner          = 0,
-		.svhs           = -1,
+		.svhs           = UNSET,
 		.gpiomask       = 0xFF,
 		.muxsel         = { 2, 3, 1, 1 },
 		.gpiomux        = { 2, 0, 0, 0 },
@@ -2584,14 +2607,14 @@
 	[BTTV_BOARD_DVICO_DVBT_LITE] = {
 		/* Chris Pascoe <c.pascoe@itee.uq.edu.au> */
 		.name           = "DViCO FusionHDTV DVB-T Lite",
-		.tuner          = -1,
+		.tuner          = UNSET,
 		.no_msp34xx     = 1,
 		.no_tda9875     = 1,
 		.no_tda7432     = 1,
 		.pll            = PLL_28,
 		.no_video       = 1,
 		.has_dvb        = 1,
-		.tuner_type     = -1,
+		.tuner_type     = UNSET,
 		.tuner_addr	= ADDR_UNSET,
 		.radio_addr     = ADDR_UNSET,
 	},
@@ -2634,14 +2657,14 @@
 		.name           = "Tibet Systems 'Progress DVR' CS16",
 		.video_inputs   = 16,
 		.audio_inputs   = 0,
-		.tuner          = -1,
-		.svhs           = -1,
+		.tuner          = UNSET,
+		.svhs           = UNSET,
 		.muxsel         = { 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2 },
 		.pll		= PLL_28,
 		.no_msp34xx     = 1,
 		.no_tda9875     = 1,
 		.no_tda7432	= 1,
-		.tuner_type     = -1,
+		.tuner_type     = UNSET,
 		.tuner_addr	= ADDR_UNSET,
 		.radio_addr     = ADDR_UNSET,
 		.muxsel_hook    = tibetCS16_muxsel,
@@ -2661,11 +2684,11 @@
 		.name           = "Kodicom 4400R (master)",
 		.video_inputs	= 16,
 		.audio_inputs	= 0,
-		.tuner		= -1,
-		.tuner_type	= -1,
+		.tuner		= UNSET,
+		.tuner_type	= UNSET,
 		.tuner_addr	= ADDR_UNSET,
 		.radio_addr     = ADDR_UNSET,
-		.svhs		= -1,
+		.svhs		= UNSET,
 		/* GPIO bits 0-9 used for analog switch:
 		*   00 - 03:	camera selector
 		*   04 - 06:	channel (controller) selector
@@ -2693,11 +2716,11 @@
 		.name		= "Kodicom 4400R (slave)",
 		.video_inputs	= 16,
 		.audio_inputs	= 0,
-		.tuner		= -1,
-		.tuner_type	= -1,
+		.tuner		= UNSET,
+		.tuner_type	= UNSET,
 		.tuner_addr	= ADDR_UNSET,
 		.radio_addr     = ADDR_UNSET,
-		.svhs		= -1,
+		.svhs		= UNSET,
 		.gpiomask	= 0x010000,
 		.no_gpioirq     = 1,
 		.muxsel		= { 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3 },
@@ -2717,7 +2740,7 @@
 		.tuner          = 0,
 		.svhs           = 2,
 		.muxsel         = { 2, 3, 1, 0 },
-		.tuner_type     = -1,
+		.tuner_type     = UNSET,
 		.tuner_addr	= ADDR_UNSET,
 		.radio_addr     = ADDR_UNSET,
 		.pll            = PLL_28,
@@ -2824,7 +2847,7 @@
 		.name           = "Osprey 440",
 		.video_inputs   = 1,
 		.audio_inputs   = 1,
-		.tuner          = -1,
+		.tuner          = UNSET,
 		.svhs           = 1,
 		.muxsel         = { 2 },
 		.pll            = PLL_28,
@@ -2848,7 +2871,7 @@
 		.gpiomute 	= 1,
 		.needs_tvaudio	= 1,
 		.pll		= PLL_28,
-		.tuner_type	= 2,
+		.tuner_type	= TUNER_PHILIPS_NTSC,
 		.tuner_addr	= ADDR_UNSET,
 		.radio_addr     = ADDR_UNSET,
 	},
@@ -2875,14 +2898,14 @@
 		.name		= "Hauppauge ImpactVCB (bt878)",
 		.video_inputs	= 4,
 		.audio_inputs	= 0,
-		.tuner		= -1,
-		.svhs		= -1,
+		.tuner		= UNSET,
+		.svhs		= UNSET,
 		.gpiomask	= 0x0f, /* old: 7 */
 		.muxsel		= { 0, 1, 3, 2 }, /* Composite 0-3 */
 		.no_msp34xx	= 1,
 		.no_tda9875     = 1,
 		.no_tda7432     = 1,
-		.tuner_type	= -1,
+		.tuner_type	= UNSET,
 		.tuner_addr	= ADDR_UNSET,
 		.radio_addr     = ADDR_UNSET,
 	},
@@ -2914,10 +2937,10 @@
 		.name		= "SSAI Security Video Interface",
 		.video_inputs	= 4,
 		.audio_inputs	= 0,
-		.tuner		= -1,
-		.svhs		= -1,
+		.tuner		= UNSET,
+		.svhs		= UNSET,
 		.muxsel		= { 0, 1, 2, 3 },
-		.tuner_type	= -1,
+		.tuner_type	= UNSET,
 		.tuner_addr	= ADDR_UNSET,
 		.radio_addr     = ADDR_UNSET,
 	},
@@ -2925,13 +2948,31 @@
 		.name		= "SSAI Ultrasound Video Interface",
 		.video_inputs	= 2,
 		.audio_inputs	= 0,
-		.tuner		= -1,
+		.tuner		= UNSET,
 		.svhs		= 1,
 		.muxsel		= { 2, 0, 1, 3 },
-		.tuner_type	= -1,
+		.tuner_type	= UNSET,
 		.tuner_addr	= ADDR_UNSET,
 		.radio_addr     = ADDR_UNSET,
 	},
+	/* ---- card 0x94---------------------------------- */
+	[BTTV_BOARD_DVICO_FUSIONHDTV_2] = {
+		.name           = "DViCO FusionHDTV 2",
+		.tuner          = 0,
+		.tuner_type     = TUNER_PHILIPS_ATSC, /* FCV1236D */
+		.tuner_addr	= ADDR_UNSET,
+		.radio_addr     = ADDR_UNSET,
+		.video_inputs   = 3,
+		.audio_inputs   = 1,
+		.svhs           = 2,
+		.muxsel		= { 2, 3, 1 },
+		.gpiomask       = 0x00e00007,
+		.gpiomux        = { 0x00400005, 0, 0x00000001, 0 },
+		.gpiomute 	= 0x00c00007,
+		.no_msp34xx     = 1,
+		.no_tda9875     = 1,
+		.no_tda7432     = 1,
+	},
 };
 
 static const unsigned int bttv_num_tvcards = ARRAY_SIZE(bttv_tvcards);
@@ -3040,7 +3081,7 @@
 static void flyvideo_gpio(struct bttv *btv)
 {
 	int gpio,has_remote,has_radio,is_capture_only,is_lr90,has_tda9820_tda9821;
-	int tuner=-1,ttype;
+	int tuner=UNSET,ttype;
 
 	gpio_inout(0xffffff, 0);
 	udelay(8);  /* without this we would see the 0x1800 mask */
@@ -3085,7 +3126,7 @@
 	 * gpio & 0x001000    output bit for audio routing */
 
 	if(is_capture_only)
-		tuner=4; /* No tuner present */
+		tuner = TUNER_ABSENT; /* No tuner present */
 
 	printk(KERN_INFO "bttv%d: FlyVideo Radio=%s RemoteControl=%s Tuner=%d gpio=0x%06x\n",
 	       btv->c.nr, has_radio? "yes":"no ", has_remote? "yes":"no ", tuner, gpio);
@@ -3093,7 +3134,7 @@
 		btv->c.nr, is_lr90?"yes":"no ", has_tda9820_tda9821?"yes":"no ",
 		is_capture_only?"yes":"no ");
 
-	if(tuner!= -1) /* only set if known tuner autodetected, else let insmod option through */
+	if (tuner != UNSET) /* only set if known tuner autodetected, else let insmod option through */
 		btv->tuner_type = tuner;
 	btv->has_radio = has_radio;
 
@@ -3302,6 +3343,7 @@
 	case BTTV_BOARD_HAUPPAUGE878:
 		boot_msp34xx(btv,5);
 		break;
+	case BTTV_BOARD_VOODOOTV_200:
 	case BTTV_BOARD_VOODOOTV_FM:
 		boot_msp34xx(btv,20);
 		break;
@@ -3328,10 +3370,9 @@
 /* initialization part two -- after registering i2c bus */
 void __devinit bttv_init_card2(struct bttv *btv)
 {
-	int tda9887;
 	int addr=ADDR_UNSET;
 
-	btv->tuner_type = -1;
+	btv->tuner_type = UNSET;
 
 	if (BTTV_BOARD_UNKNOWN == btv->c.type) {
 		bttv_readee(btv,eeprom_data,0xa0);
@@ -3479,7 +3520,15 @@
 			btv->tuner_type = bttv_tvcards[btv->c.type].tuner_type;
 	if (UNSET != tuner[btv->c.nr])
 		btv->tuner_type = tuner[btv->c.nr];
-	printk("bttv%d: using tuner=%d\n",btv->c.nr,btv->tuner_type);
+
+	if (btv->tuner_type == TUNER_ABSENT ||
+	    bttv_tvcards[btv->c.type].tuner == UNSET)
+		printk(KERN_INFO "bttv%d: tuner absent\n", btv->c.nr);
+	else if(btv->tuner_type == UNSET)
+		printk(KERN_WARNING "bttv%d: tuner type unset\n", btv->c.nr);
+	else
+		printk(KERN_INFO "bttv%d: tuner type=%d\n", btv->c.nr,
+		       btv->tuner_type);
 
 	if (btv->tuner_type != UNSET) {
 		struct tuner_setup tun_setup;
@@ -3521,6 +3570,9 @@
 	if (!autoload)
 		return;
 
+	if (bttv_tvcards[btv->c.type].tuner == UNSET)
+		return;  /* no tuner or related drivers to load */
+
 	/* try to detect audio/fader chips */
 	if (!bttv_tvcards[btv->c.type].no_msp34xx &&
 	    bttv_I2CRead(btv, I2C_ADDR_MSP3400, "MSP34xx") >=0)
@@ -3541,17 +3593,7 @@
 	if (bttv_tvcards[btv->c.type].needs_tvaudio)
 		request_module("tvaudio");
 
-	/* tuner modules */
-	tda9887 = 0;
-	if (btv->tda9887_conf)
-		tda9887 = 1;
-	if (0 == tda9887 && 0 == bttv_tvcards[btv->c.type].has_dvb &&
-	    bttv_I2CRead(btv, I2C_ADDR_TDA9887, "TDA9887") >=0)
-		tda9887 = 1;
-	/* Hybrid DVB card, DOES have a tda9887 */
-	if (btv->c.type == BTTV_BOARD_DVICO_FUSIONHDTV_5_LITE)
-		tda9887 = 1;
-	if (btv->tuner_type != UNSET)
+	if (btv->tuner_type != UNSET && btv->tuner_type != TUNER_ABSENT)
 		request_module("tuner");
 }
 
@@ -3865,11 +3907,15 @@
 	if(norm==VIDEO_MODE_NTSC) {
 		bttv_tvcards[BTTV_BOARD_VOODOOTV_FM].gpiomux[TVAUDIO_INPUT_TUNER]=0x957fff;
 		bttv_tvcards[BTTV_BOARD_VOODOOTV_FM].gpiomute=0x957fff;
+		bttv_tvcards[BTTV_BOARD_VOODOOTV_200].gpiomux[TVAUDIO_INPUT_TUNER]=0x957fff;
+		bttv_tvcards[BTTV_BOARD_VOODOOTV_200].gpiomute=0x957fff;
 		dprintk("bttv_tda9880_setnorm to NTSC\n");
 	}
 	else {
 		bttv_tvcards[BTTV_BOARD_VOODOOTV_FM].gpiomux[TVAUDIO_INPUT_TUNER]=0x947fff;
 		bttv_tvcards[BTTV_BOARD_VOODOOTV_FM].gpiomute=0x947fff;
+		bttv_tvcards[BTTV_BOARD_VOODOOTV_200].gpiomux[TVAUDIO_INPUT_TUNER]=0x947fff;
+		bttv_tvcards[BTTV_BOARD_VOODOOTV_200].gpiomute=0x947fff;
 		dprintk("bttv_tda9880_setnorm to PAL\n");
 	}
 	/* set GPIO according */
diff --git a/drivers/media/video/bt8xx/bttv-driver.c b/drivers/media/video/bt8xx/bttv-driver.c
index b1fedb0..cb555f2 100644
--- a/drivers/media/video/bt8xx/bttv-driver.c
+++ b/drivers/media/video/bt8xx/bttv-driver.c
@@ -1218,7 +1218,14 @@
 			break;
 		case TVAUDIO_INPUT_TUNER:
 		default:
-			route.input = MSP_INPUT_DEFAULT;
+			/* This is the only card that uses TUNER2, and afaik,
+			   is the only difference between the VOODOOTV_FM
+			   and VOODOOTV_200 */
+			if (btv->c.type == BTTV_BOARD_VOODOOTV_200)
+				route.input = MSP_INPUT(MSP_IN_SCART1, MSP_IN_TUNER2, \
+					MSP_DSP_IN_TUNER, MSP_DSP_IN_TUNER);
+			else
+				route.input = MSP_INPUT_DEFAULT;
 			break;
 		}
 		route.output = MSP_OUTPUT_DEFAULT;
@@ -1253,7 +1260,7 @@
 	v4l2_std_id std = bttv_tvnorms[btv->tvnorm].v4l2_id;
 
 	bttv_call_i2c_clients(btv, VIDIOC_S_STD, &std);
-	if (btv->c.type == BTTV_BOARD_VOODOOTV_FM)
+	if (btv->c.type == BTTV_BOARD_VOODOOTV_FM || btv->c.type == BTTV_BOARD_VOODOOTV_200)
 		bttv_tda9880_setnorm(btv,btv->tvnorm);
 }
 
@@ -1323,6 +1330,7 @@
 
 	switch (btv->c.type) {
 	case BTTV_BOARD_VOODOOTV_FM:
+	case BTTV_BOARD_VOODOOTV_200:
 		bttv_tda9880_setnorm(btv,norm);
 		break;
 	}
@@ -2251,6 +2259,24 @@
 		printk(KERN_INFO "bttv%d: ==================  END STATUS CARD #%d  ==================\n", btv->c.nr, btv->c.nr);
 		return 0;
 	}
+#ifdef CONFIG_VIDEO_ADV_DEBUG
+	case VIDIOC_DBG_G_REGISTER:
+	case VIDIOC_DBG_S_REGISTER:
+	{
+		struct v4l2_register *reg = arg;
+		if (!capable(CAP_SYS_ADMIN))
+			return -EPERM;
+		if (!v4l2_chip_match_host(reg->match_type, reg->match_chip))
+			return -EINVAL;
+		/* bt848 has a 12-bit register space */
+		reg->reg &= 0xfff;
+		if (cmd == VIDIOC_DBG_G_REGISTER)
+			reg->val = btread(reg->reg);
+		else
+			btwrite(reg->val, reg->reg);
+		return 0;
+	}
+#endif
 
 	default:
 		return -ENOIOCTLCMD;
@@ -3561,6 +3587,8 @@
 	case VIDIOC_G_FREQUENCY:
 	case VIDIOC_S_FREQUENCY:
 	case VIDIOC_LOG_STATUS:
+	case VIDIOC_DBG_G_REGISTER:
+	case VIDIOC_DBG_S_REGISTER:
 		return bttv_common_ioctls(btv,cmd,arg);
 
 	default:
@@ -3943,6 +3971,8 @@
 	case VIDIOCGAUDIO:
 	case VIDIOCSAUDIO:
 	case VIDIOC_LOG_STATUS:
+	case VIDIOC_DBG_G_REGISTER:
+	case VIDIOC_DBG_S_REGISTER:
 		return bttv_common_ioctls(btv,cmd,arg);
 
 	default:
diff --git a/drivers/media/video/bt8xx/bttv-input.c b/drivers/media/video/bt8xx/bttv-input.c
index 6f74c80..94a13d0 100644
--- a/drivers/media/video/bt8xx/bttv-input.c
+++ b/drivers/media/video/bt8xx/bttv-input.c
@@ -313,7 +313,7 @@
 		input_dev->id.vendor  = btv->c.pci->vendor;
 		input_dev->id.product = btv->c.pci->device;
 	}
-	input_dev->cdev.dev = &btv->c.pci->dev;
+	input_dev->dev.parent = &btv->c.pci->dev;
 
 	btv->remote = ir;
 	bttv_ir_start(btv, ir);
diff --git a/drivers/media/video/bt8xx/bttv.h b/drivers/media/video/bt8xx/bttv.h
index f821ba6..dcc847d 100644
--- a/drivers/media/video/bt8xx/bttv.h
+++ b/drivers/media/video/bt8xx/bttv.h
@@ -170,6 +170,8 @@
 #define BTTV_BOARD_MACHTV_MAGICTV          0x90
 #define BTTV_BOARD_SSAI_SECURITY	   0x91
 #define BTTV_BOARD_SSAI_ULTRASOUND	   0x92
+#define BTTV_BOARD_VOODOOTV_200		   0x93
+#define BTTV_BOARD_DVICO_FUSIONHDTV_2	   0x94
 
 /* more card-specific defines */
 #define PT2254_L_CHANNEL 0x10
diff --git a/drivers/media/video/bt8xx/bttvp.h b/drivers/media/video/bt8xx/bttvp.h
index 8f44f02..bd85f6d 100644
--- a/drivers/media/video/bt8xx/bttvp.h
+++ b/drivers/media/video/bt8xx/bttvp.h
@@ -33,12 +33,12 @@
 #include <linux/i2c.h>
 #include <linux/i2c-algo-bit.h>
 #include <linux/videodev.h>
-#include <media/v4l2-common.h>
 #include <linux/pci.h>
 #include <linux/input.h>
 #include <linux/mutex.h>
 #include <asm/scatterlist.h>
 #include <asm/io.h>
+#include <media/v4l2-common.h>
 
 #include <linux/device.h>
 #include <media/video-buf.h>
diff --git a/drivers/media/video/cpia2/cpia2_core.c b/drivers/media/video/cpia2/cpia2_core.c
index fd771c7..a76bd78 100644
--- a/drivers/media/video/cpia2/cpia2_core.c
+++ b/drivers/media/video/cpia2/cpia2_core.c
@@ -663,15 +663,13 @@
 		cpia2_send_command(cam, &cmd);
 	}
 
-	current->state = TASK_INTERRUPTIBLE;
-	schedule_timeout(100 * HZ / 1000);	/* wait for 100 msecs */
+	schedule_timeout_interruptible(msecs_to_jiffies(100));
 
 	if (cam->params.pnp_id.device_type == DEVICE_STV_672)
 		retval = apply_vp_patch(cam);
 
 	/* wait for vp to go to sleep */
-	current->state = TASK_INTERRUPTIBLE;
-	schedule_timeout(100 * HZ / 1000);	/* wait for 100 msecs */
+	schedule_timeout_interruptible(msecs_to_jiffies(100));
 
 	/***
 	 * If this is a 676, apply VP5 fixes before we start streaming
@@ -720,8 +718,7 @@
 	set_default_user_mode(cam);
 
 	/* Give VP time to wake up */
-	current->state = TASK_INTERRUPTIBLE;
-	schedule_timeout(100 * HZ / 1000);	/* wait for 100 msecs */
+	schedule_timeout_interruptible(msecs_to_jiffies(100));
 
 	set_all_properties(cam);
 
@@ -2227,15 +2224,13 @@
 {
 	struct camera_data *cam;
 
-	cam = kmalloc(sizeof(*cam), GFP_KERNEL);
+	cam = kzalloc(sizeof(*cam), GFP_KERNEL);
 
 	if (!cam) {
 		ERR("couldn't kmalloc cpia2 struct\n");
 		return NULL;
 	}
 
-	/* Default everything to 0 */
-	memset(cam, 0, sizeof(struct camera_data));
 
 	cam->present = 1;
 	mutex_init(&cam->busy_lock);
diff --git a/drivers/media/video/cpia2/cpia2_v4l.c b/drivers/media/video/cpia2/cpia2_v4l.c
index 1bda7ad..92778cd 100644
--- a/drivers/media/video/cpia2/cpia2_v4l.c
+++ b/drivers/media/video/cpia2/cpia2_v4l.c
@@ -105,7 +105,7 @@
 	{ CPIA2_VP_FRAMERATE_25,   "25 fps"   },
 	{ CPIA2_VP_FRAMERATE_30,   "30 fps"   },
 };
-#define NUM_FRAMERATE_CONTROLS (sizeof(framerate_controls)/sizeof(framerate_controls[0]))
+#define NUM_FRAMERATE_CONTROLS (ARRAY_SIZE(framerate_controls))
 
 static struct control_menu_info flicker_controls[] =
 {
@@ -113,7 +113,7 @@
 	{ FLICKER_50,    "50 Hz" },
 	{ FLICKER_60,    "60 Hz"  },
 };
-#define NUM_FLICKER_CONTROLS (sizeof(flicker_controls)/sizeof(flicker_controls[0]))
+#define NUM_FLICKER_CONTROLS (ARRAY_SIZE(flicker_controls))
 
 static struct control_menu_info lights_controls[] =
 {
@@ -122,7 +122,7 @@
 	{ 128, "Bottom"  },
 	{ 192, "Both"  },
 };
-#define NUM_LIGHTS_CONTROLS (sizeof(lights_controls)/sizeof(lights_controls[0]))
+#define NUM_LIGHTS_CONTROLS (ARRAY_SIZE(lights_controls))
 #define GPIO_LIGHTS_MASK 192
 
 static struct v4l2_queryctrl controls[] = {
@@ -235,7 +235,7 @@
 		.default_value = 0,
 	},
 };
-#define NUM_CONTROLS (sizeof(controls)/sizeof(controls[0]))
+#define NUM_CONTROLS (ARRAY_SIZE(controls))
 
 
 /******************************************************************************
diff --git a/drivers/media/video/cx88/Kconfig b/drivers/media/video/cx88/Kconfig
index 0f9d969..f750a54 100644
--- a/drivers/media/video/cx88/Kconfig
+++ b/drivers/media/video/cx88/Kconfig
@@ -47,7 +47,7 @@
 	tristate "DVB/ATSC Support for cx2388x based TV cards"
 	depends on VIDEO_CX88 && DVB_CORE
 	select VIDEO_BUF_DVB
-	select DVB_PLL
+	select DVB_PLL if !DVB_FE_CUSTOMISE
 	select DVB_MT352 if !DVB_FE_CUSTOMISE
 	select DVB_ZL10353 if !DVB_FE_CUSTOMISE
 	select DVB_OR51132 if !DVB_FE_CUSTOMISE
diff --git a/drivers/media/video/cx88/cx88-blackbird.c b/drivers/media/video/cx88/cx88-blackbird.c
index a80b1cb..f2fcdb9 100644
--- a/drivers/media/video/cx88/cx88-blackbird.c
+++ b/drivers/media/video/cx88/cx88-blackbird.c
@@ -56,8 +56,7 @@
 
 /* ------------------------------------------------------------------ */
 
-#define OLD_BLACKBIRD_FIRM_IMAGE_SIZE 262144
-#define     BLACKBIRD_FIRM_IMAGE_SIZE 376836
+#define BLACKBIRD_FIRM_IMAGE_SIZE 376836
 
 /* defines below are from ivtv-driver.h */
 
@@ -405,7 +404,7 @@
 	u32 value;
 	int i;
 
-	for (i = 0; i < dev->fw_size; i++) {
+	for (i = 0; i < BLACKBIRD_FIRM_IMAGE_SIZE; i++) {
 		memory_read(dev->core, i, &value);
 		if (value == signature[signaturecnt])
 			signaturecnt++;
@@ -453,15 +452,12 @@
 		return -1;
 	}
 
-	if ((firmware->size != BLACKBIRD_FIRM_IMAGE_SIZE) &&
-	    (firmware->size != OLD_BLACKBIRD_FIRM_IMAGE_SIZE)) {
-		dprintk(0, "ERROR: Firmware size mismatch (have %zd, expected %d or %d)\n",
-			firmware->size, BLACKBIRD_FIRM_IMAGE_SIZE,
-			OLD_BLACKBIRD_FIRM_IMAGE_SIZE);
+	if (firmware->size != BLACKBIRD_FIRM_IMAGE_SIZE) {
+		dprintk(0, "ERROR: Firmware size mismatch (have %zd, expected %d)\n",
+			firmware->size, BLACKBIRD_FIRM_IMAGE_SIZE);
 		release_firmware(firmware);
 		return -1;
 	}
-	dev->fw_size = firmware->size;
 
 	if (0 != memcmp(firmware->data, magic, 8)) {
 		dprintk(0, "ERROR: Firmware magic mismatch, wrong file?\n");
diff --git a/drivers/media/video/cx88/cx88-cards.c b/drivers/media/video/cx88/cx88-cards.c
index e61102d..6a136dd 100644
--- a/drivers/media/video/cx88/cx88-cards.c
+++ b/drivers/media/video/cx88/cx88-cards.c
@@ -1335,6 +1335,26 @@
 		/* fixme: Add radio support */
 		.mpeg           = CX88_MPEG_DVB | CX88_MPEG_BLACKBIRD,
 	},
+	[CX88_BOARD_ADSTECH_PTV_390] = {
+		.name           = "ADS Tech Instant Video PCI",
+		.tuner_type     = TUNER_ABSENT,
+		.radio_type     = UNSET,
+		.tuner_addr     = ADDR_UNSET,
+		.radio_addr     = ADDR_UNSET,
+		.input          = {{
+			.type   = CX88_VMUX_DEBUG,
+			.vmux   = 3,
+			.gpio0  = 0x04ff,
+		},{
+			.type   = CX88_VMUX_COMPOSITE1,
+			.vmux   = 1,
+			.gpio0  = 0x07fa,
+		},{
+			.type   = CX88_VMUX_SVIDEO,
+			.vmux   = 2,
+			.gpio0  = 0x07fa,
+		}},
+	},
 };
 const unsigned int cx88_bcount = ARRAY_SIZE(cx88_boards);
 
@@ -1641,6 +1661,10 @@
 		.subvendor = 0x1421,
 		.subdevice = 0x0341, /* ADS Tech InstantTV DVB-S */
 		.card      = CX88_BOARD_KWORLD_DVBS_100,
+	},{
+		.subvendor = 0x1421,
+		.subdevice = 0x0390,
+		.card      = CX88_BOARD_ADSTECH_PTV_390,
 	},
 };
 const unsigned int cx88_idcount = ARRAY_SIZE(cx88_subids);
diff --git a/drivers/media/video/cx88/cx88-dvb.c b/drivers/media/video/cx88/cx88-dvb.c
index dbfe4dc..1773b40 100644
--- a/drivers/media/video/cx88/cx88-dvb.c
+++ b/drivers/media/video/cx88/cx88-dvb.c
@@ -35,9 +35,7 @@
 
 #include "mt352.h"
 #include "mt352_priv.h"
-#if defined(CONFIG_VIDEO_CX88_VP3054) || defined(CONFIG_VIDEO_CX88_VP3054_MODULE)
-# include "cx88-vp3054-i2c.h"
-#endif
+#include "cx88-vp3054-i2c.h"
 #include "zl10353.h"
 #include "cx22702.h"
 #include "or51132.h"
@@ -199,7 +197,7 @@
 	.demod_init    = dvico_dual_demod_init,
 };
 
-#if defined(CONFIG_VIDEO_CX88_VP3054) || defined(CONFIG_VIDEO_CX88_VP3054_MODULE)
+#if defined(CONFIG_VIDEO_CX88_VP3054) || (defined(CONFIG_VIDEO_CX88_VP3054_MODULE) && defined(MODULE))
 static int dntv_live_dvbt_pro_demod_init(struct dvb_frontend* fe)
 {
 	static u8 clock_config []  = { 0x89, 0x38, 0x38 };
@@ -223,64 +221,6 @@
 	return 0;
 }
 
-static int philips_fmd1216_pll_init(struct dvb_frontend *fe)
-{
-	struct cx8802_dev *dev= fe->dvb->priv;
-
-	/* this message is to set up ATC and ALC */
-	static u8 fmd1216_init[] = { 0x0b, 0xdc, 0x9c, 0xa0 };
-	struct i2c_msg msg =
-		{ .addr = dev->core->pll_addr, .flags = 0,
-		  .buf = fmd1216_init, .len = sizeof(fmd1216_init) };
-	int err;
-
-	if (fe->ops.i2c_gate_ctrl)
-		fe->ops.i2c_gate_ctrl(fe, 1);
-	if ((err = i2c_transfer(&dev->core->i2c_adap, &msg, 1)) != 1) {
-		if (err < 0)
-			return err;
-		else
-			return -EREMOTEIO;
-	}
-
-	return 0;
-}
-
-static int dntv_live_dvbt_pro_tuner_set_params(struct dvb_frontend* fe,
-					       struct dvb_frontend_parameters* params)
-{
-	struct cx8802_dev *dev= fe->dvb->priv;
-	u8 buf[4];
-	struct i2c_msg msg =
-		{ .addr = dev->core->pll_addr, .flags = 0,
-		  .buf = buf, .len = 4 };
-	int err;
-
-	/* Switch PLL to DVB mode */
-	err = philips_fmd1216_pll_init(fe);
-	if (err)
-		return err;
-
-	/* Tune PLL */
-	dvb_pll_configure(dev->core->pll_desc, buf,
-			  params->frequency,
-			  params->u.ofdm.bandwidth);
-	if (fe->ops.i2c_gate_ctrl)
-		fe->ops.i2c_gate_ctrl(fe, 1);
-	if ((err = i2c_transfer(&dev->core->i2c_adap, &msg, 1)) != 1) {
-
-		printk(KERN_WARNING "cx88-dvb: %s error "
-		       "(addr %02x <- %02x, err = %i)\n",
-		       __FUNCTION__, dev->core->pll_addr, buf[0], err);
-		if (err < 0)
-			return err;
-		else
-			return -EREMOTEIO;
-	}
-
-	return 0;
-}
-
 static struct mt352_config dntv_live_dvbt_pro_config = {
 	.demod_address = 0x0f,
 	.no_tuner      = 1,
@@ -370,18 +310,8 @@
 	return 0;
 }
 
-static int nxt200x_set_pll_input(u8* buf, int input)
-{
-	if (input)
-		buf[3] |= 0x08;
-	else
-		buf[3] &= ~0x08;
-	return 0;
-}
-
 static struct nxt200x_config ati_hdtvwonder = {
 	.demod_address = 0x0a,
-	.set_pll_input = nxt200x_set_pll_input,
 	.set_ts_params = nxt200x_set_ts_param,
 };
 
@@ -456,7 +386,7 @@
 		if (dev->dvb.frontend != NULL) {
 			dvb_attach(dvb_pll_attach, dev->dvb.frontend, 0x61,
 				   &dev->core->i2c_adap,
-				   &dvb_pll_thomson_dtt759x);
+				   DVB_PLL_THOMSON_DTT759X);
 		}
 		break;
 	case CX88_BOARD_TERRATEC_CINERGY_1400_DVB_T1:
@@ -469,7 +399,7 @@
 		if (dev->dvb.frontend != NULL) {
 			dvb_attach(dvb_pll_attach, dev->dvb.frontend, 0x60,
 				   &dev->core->i2c_adap,
-				   &dvb_pll_thomson_dtt7579);
+				   DVB_PLL_THOMSON_DTT7579);
 		}
 		break;
 	case CX88_BOARD_WINFAST_DTV2000H:
@@ -482,7 +412,7 @@
 					       &dev->core->i2c_adap);
 		if (dev->dvb.frontend != NULL) {
 			dvb_attach(dvb_pll_attach, dev->dvb.frontend, 0x61,
-				   &dev->core->i2c_adap, &dvb_pll_fmd1216me);
+				   &dev->core->i2c_adap, DVB_PLL_FMD1216ME);
 		}
 		break;
 	case CX88_BOARD_DVICO_FUSIONHDTV_DVB_T_PLUS:
@@ -491,7 +421,7 @@
 					       &dev->core->i2c_adap);
 		if (dev->dvb.frontend != NULL) {
 			dvb_attach(dvb_pll_attach, dev->dvb.frontend, 0x60,
-				   NULL, &dvb_pll_thomson_dtt7579);
+				   NULL, DVB_PLL_THOMSON_DTT7579);
 			break;
 		}
 		/* ZL10353 replaces MT352 on later cards */
@@ -500,7 +430,7 @@
 					       &dev->core->i2c_adap);
 		if (dev->dvb.frontend != NULL) {
 			dvb_attach(dvb_pll_attach, dev->dvb.frontend, 0x60,
-				   NULL, &dvb_pll_thomson_dtt7579);
+				   NULL, DVB_PLL_THOMSON_DTT7579);
 		}
 		break;
 	case CX88_BOARD_DVICO_FUSIONHDTV_DVB_T_DUAL:
@@ -511,7 +441,7 @@
 					       &dev->core->i2c_adap);
 		if (dev->dvb.frontend != NULL) {
 			dvb_attach(dvb_pll_attach, dev->dvb.frontend, 0x61,
-				   NULL, &dvb_pll_thomson_dtt7579);
+				   NULL, DVB_PLL_THOMSON_DTT7579);
 			break;
 		}
 		/* ZL10353 replaces MT352 on later cards */
@@ -520,7 +450,7 @@
 					       &dev->core->i2c_adap);
 		if (dev->dvb.frontend != NULL) {
 			dvb_attach(dvb_pll_attach, dev->dvb.frontend, 0x61,
-				   NULL, &dvb_pll_thomson_dtt7579);
+				   NULL, DVB_PLL_THOMSON_DTT7579);
 		}
 		break;
 	case CX88_BOARD_DVICO_FUSIONHDTV_DVB_T1:
@@ -529,7 +459,7 @@
 					       &dev->core->i2c_adap);
 		if (dev->dvb.frontend != NULL) {
 			dvb_attach(dvb_pll_attach, dev->dvb.frontend, 0x61,
-				   NULL, &dvb_pll_lg_z201);
+				   NULL, DVB_PLL_LG_Z201);
 		}
 		break;
 	case CX88_BOARD_KWORLD_DVB_T:
@@ -540,17 +470,16 @@
 					       &dev->core->i2c_adap);
 		if (dev->dvb.frontend != NULL) {
 			dvb_attach(dvb_pll_attach, dev->dvb.frontend, 0x61,
-				   NULL, &dvb_pll_unknown_1);
+				   NULL, DVB_PLL_UNKNOWN_1);
 		}
 		break;
 	case CX88_BOARD_DNTV_LIVE_DVB_T_PRO:
-#if defined(CONFIG_VIDEO_CX88_VP3054) || defined(CONFIG_VIDEO_CX88_VP3054_MODULE)
-		dev->core->pll_addr = 0x61;
-		dev->core->pll_desc = &dvb_pll_fmd1216me;
+#if defined(CONFIG_VIDEO_CX88_VP3054) || (defined(CONFIG_VIDEO_CX88_VP3054_MODULE) && defined(MODULE))
 		dev->dvb.frontend = dvb_attach(mt352_attach, &dntv_live_dvbt_pro_config,
 			&((struct vp3054_i2c_state *)dev->card_priv)->adap);
 		if (dev->dvb.frontend != NULL) {
-			dev->dvb.frontend->ops.tuner_ops.set_params = dntv_live_dvbt_pro_tuner_set_params;
+			dvb_attach(dvb_pll_attach, dev->dvb.frontend, 0x61,
+				   &dev->core->i2c_adap, DVB_PLL_FMD1216ME);
 		}
 #else
 		printk("%s: built without vp3054 support\n", dev->core->name);
@@ -563,7 +492,7 @@
 		if (dev->dvb.frontend != NULL) {
 			dvb_attach(dvb_pll_attach, dev->dvb.frontend, 0x61,
 				   &dev->core->i2c_adap,
-				   &dvb_pll_thomson_fe6600);
+				   DVB_PLL_THOMSON_FE6600);
 		}
 		break;
 	case CX88_BOARD_PCHDTV_HD3000:
@@ -572,7 +501,7 @@
 		if (dev->dvb.frontend != NULL) {
 			dvb_attach(dvb_pll_attach, dev->dvb.frontend, 0x61,
 				   &dev->core->i2c_adap,
-				   &dvb_pll_thomson_dtt761x);
+				   DVB_PLL_THOMSON_DTT761X);
 		}
 		break;
 	case CX88_BOARD_DVICO_FUSIONHDTV_3_GOLD_Q:
@@ -594,7 +523,7 @@
 		if (dev->dvb.frontend != NULL) {
 			dvb_attach(dvb_pll_attach, dev->dvb.frontend, 0x61,
 				   &dev->core->i2c_adap,
-				   &dvb_pll_microtune_4042);
+				   DVB_PLL_MICROTUNE_4042);
 		}
 		}
 		break;
@@ -614,7 +543,7 @@
 		if (dev->dvb.frontend != NULL) {
 			dvb_attach(dvb_pll_attach, dev->dvb.frontend, 0x61,
 				   &dev->core->i2c_adap,
-				   &dvb_pll_thomson_dtt761x);
+				   DVB_PLL_THOMSON_DTT761X);
 		}
 		}
 		break;
@@ -634,7 +563,7 @@
 		if (dev->dvb.frontend != NULL) {
 			dvb_attach(dvb_pll_attach, dev->dvb.frontend, 0x61,
 				   &dev->core->i2c_adap,
-				   &dvb_pll_lg_tdvs_h06xf);
+				   DVB_PLL_LG_TDVS_H06XF);
 		}
 		}
 		break;
@@ -654,7 +583,7 @@
 		if (dev->dvb.frontend != NULL) {
 			dvb_attach(dvb_pll_attach, dev->dvb.frontend, 0x61,
 				   &dev->core->i2c_adap,
-				   &dvb_pll_lg_tdvs_h06xf);
+				   DVB_PLL_LG_TDVS_H06XF);
 		}
 		}
 		break;
@@ -664,7 +593,7 @@
 					       &dev->core->i2c_adap);
 		if (dev->dvb.frontend != NULL) {
 			dvb_attach(dvb_pll_attach, dev->dvb.frontend, 0x61,
-				   NULL, &dvb_pll_tuv1236d);
+				   NULL, DVB_PLL_TUV1236D);
 		}
 		break;
 	case CX88_BOARD_HAUPPAUGE_NOVASPLUS_S1:
@@ -705,10 +634,6 @@
 		return -1;
 	}
 
-	if (dev->core->pll_desc) {
-		dev->dvb.frontend->ops.info.frequency_min = dev->core->pll_desc->min;
-		dev->dvb.frontend->ops.info.frequency_max = dev->core->pll_desc->max;
-	}
 	/* Ensure all frontends negotiate bus access */
 	dev->dvb.frontend->ops.ts_bus_ctrl = cx88_dvb_bus_ctrl;
 
@@ -778,11 +703,10 @@
 	if (!(cx88_boards[core->board].mpeg & CX88_MPEG_DVB))
 		goto fail_core;
 
-#if defined(CONFIG_VIDEO_CX88_VP3054) || defined(CONFIG_VIDEO_CX88_VP3054_MODULE)
+	/* If vp3054 isn't enabled, a stub will just return 0 */
 	err = vp3054_i2c_probe(dev);
 	if (0 != err)
 		goto fail_core;
-#endif
 
 	/* dvb stuff */
 	printk("%s/2: cx2388x based dvb card\n", core->name);
@@ -807,9 +731,7 @@
 	/* dvb */
 	videobuf_dvb_unregister(&dev->dvb);
 
-#if defined(CONFIG_VIDEO_CX88_VP3054) || defined(CONFIG_VIDEO_CX88_VP3054_MODULE)
 	vp3054_i2c_remove(dev);
-#endif
 
 	return 0;
 }
diff --git a/drivers/media/video/cx88/cx88-i2c.c b/drivers/media/video/cx88/cx88-i2c.c
index 7919a1f..78bbcfa 100644
--- a/drivers/media/video/cx88/cx88-i2c.c
+++ b/drivers/media/video/cx88/cx88-i2c.c
@@ -160,7 +160,7 @@
 		i2c_clients_command(&core->i2c_adap, cmd, arg);
 }
 
-static struct i2c_algo_bit_data cx8800_i2c_algo_template = {
+static const struct i2c_algo_bit_data cx8800_i2c_algo_template = {
 	.setsda  = cx8800_bit_setsda,
 	.setscl  = cx8800_bit_setscl,
 	.getsda  = cx8800_bit_getsda,
@@ -171,18 +171,6 @@
 
 /* ----------------------------------------------------------------------- */
 
-static struct i2c_adapter cx8800_i2c_adap_template = {
-	.name              = "cx2388x",
-	.owner             = THIS_MODULE,
-	.id                = I2C_HW_B_CX2388x,
-	.client_register   = attach_inform,
-	.client_unregister = detach_inform,
-};
-
-static struct i2c_client cx8800_i2c_client_template = {
-	.name	= "cx88xx internal",
-};
-
 static char *i2c_devs[128] = {
 	[ 0x1c >> 1 ] = "lgdt330x",
 	[ 0x86 >> 1 ] = "tda9887/cx22702",
@@ -212,14 +200,9 @@
 	/* Prevents usage of invalid delay values */
 	if (i2c_udelay<5)
 		i2c_udelay=5;
-	cx8800_i2c_algo_template.udelay=i2c_udelay;
 
-	memcpy(&core->i2c_adap, &cx8800_i2c_adap_template,
-	       sizeof(core->i2c_adap));
 	memcpy(&core->i2c_algo, &cx8800_i2c_algo_template,
 	       sizeof(core->i2c_algo));
-	memcpy(&core->i2c_client, &cx8800_i2c_client_template,
-	       sizeof(core->i2c_client));
 
 	if (core->tuner_type != TUNER_ABSENT)
 		core->i2c_adap.class |= I2C_CLASS_TV_ANALOG;
@@ -228,10 +211,16 @@
 
 	core->i2c_adap.dev.parent = &pci->dev;
 	strlcpy(core->i2c_adap.name,core->name,sizeof(core->i2c_adap.name));
+	core->i2c_adap.owner = THIS_MODULE;
+	core->i2c_adap.id = I2C_HW_B_CX2388x;
+	core->i2c_adap.client_register = attach_inform;
+	core->i2c_adap.client_unregister = detach_inform;
+	core->i2c_algo.udelay = i2c_udelay;
 	core->i2c_algo.data = core;
 	i2c_set_adapdata(&core->i2c_adap,core);
 	core->i2c_adap.algo_data = &core->i2c_algo;
 	core->i2c_client.adapter = &core->i2c_adap;
+	strlcpy(core->i2c_client.name, "cx88xx internal", I2C_NAME_SIZE);
 
 	cx8800_bit_setscl(core,1);
 	cx8800_bit_setsda(core,1);
diff --git a/drivers/media/video/cx88/cx88-input.c b/drivers/media/video/cx88/cx88-input.c
index 8136673..f5d4a56 100644
--- a/drivers/media/video/cx88/cx88-input.c
+++ b/drivers/media/video/cx88/cx88-input.c
@@ -74,7 +74,8 @@
 
 	/* read gpio value */
 	gpio = cx_read(ir->gpio_addr);
-	if (core->board == CX88_BOARD_NPGTECH_REALTV_TOP10FM) {
+	switch (core->board) {
+	case CX88_BOARD_NPGTECH_REALTV_TOP10FM:
 		/* This board apparently uses a combination of 2 GPIO
 		   to represent the keys. Additionally, the second GPIO
 		   can be used for parity.
@@ -90,9 +91,14 @@
 		auxgpio = cx_read(MO_GP1_IO);
 		/* Take out the parity part */
 		gpio=(gpio & 0x7fd) + (auxgpio & 0xef);
-	} else
+		break;
+	case CX88_BOARD_WINFAST_DTV1000:
+		gpio = (gpio & 0x6ff) | ((cx_read(MO_GP1_IO) << 8) & 0x900);
 		auxgpio = gpio;
-
+		break;
+	default:
+		auxgpio = gpio;
+	}
 	if (ir->polling) {
 		if (ir->last_gpio == auxgpio)
 			return;
@@ -148,20 +154,16 @@
 static void cx88_ir_work(struct work_struct *work)
 {
 	struct cx88_IR *ir = container_of(work, struct cx88_IR, work);
-	unsigned long timeout;
 
 	cx88_ir_handle_key(ir);
-	timeout = jiffies + (ir->polling * HZ / 1000);
-	mod_timer(&ir->timer, timeout);
+	mod_timer(&ir->timer, jiffies + msecs_to_jiffies(ir->polling));
 }
 
 static void cx88_ir_start(struct cx88_core *core, struct cx88_IR *ir)
 {
 	if (ir->polling) {
+		setup_timer(&ir->timer, ir_timer, (unsigned long)ir);
 		INIT_WORK(&ir->work, cx88_ir_work);
-		init_timer(&ir->timer);
-		ir->timer.function = ir_timer;
-		ir->timer.data = (unsigned long)ir;
 		schedule_work(&ir->work);
 	}
 	if (ir->sampling) {
@@ -222,7 +224,6 @@
 	case CX88_BOARD_HAUPPAUGE_NOVASE2_S1:
 	case CX88_BOARD_HAUPPAUGE_NOVASPLUS_S1:
 	case CX88_BOARD_HAUPPAUGE_HVR1100:
-	case CX88_BOARD_HAUPPAUGE_HVR1300:
 	case CX88_BOARD_HAUPPAUGE_HVR3000:
 		ir_codes = ir_codes_hauppauge_new;
 		ir_type = IR_TYPE_RC5;
@@ -236,6 +237,7 @@
 		ir->polling = 50; /* ms */
 		break;
 	case CX88_BOARD_WINFAST2000XP_EXPERT:
+	case CX88_BOARD_WINFAST_DTV1000:
 		ir_codes = ir_codes_winfast;
 		ir->gpio_addr = MO_GP0_IO;
 		ir->mask_keycode = 0x8f8;
@@ -328,7 +330,7 @@
 		input_dev->id.vendor = pci->vendor;
 		input_dev->id.product = pci->device;
 	}
-	input_dev->cdev.dev = &pci->dev;
+	input_dev->dev.parent = &pci->dev;
 	/* record handles to ourself */
 	ir->core = core;
 	core->ir = ir;
@@ -442,7 +444,6 @@
 	case CX88_BOARD_HAUPPAUGE_NOVASE2_S1:
 	case CX88_BOARD_HAUPPAUGE_NOVASPLUS_S1:
 	case CX88_BOARD_HAUPPAUGE_HVR1100:
-	case CX88_BOARD_HAUPPAUGE_HVR1300:
 	case CX88_BOARD_HAUPPAUGE_HVR3000:
 		ircode = ir_decode_biphase(ir->samples, ir->scount, 5, 7);
 		ir_dprintk("biphase decoded: %x\n", ircode);
diff --git a/drivers/media/video/cx88/cx88-mpeg.c b/drivers/media/video/cx88/cx88-mpeg.c
index 543b05e..317a2a3 100644
--- a/drivers/media/video/cx88/cx88-mpeg.c
+++ b/drivers/media/video/cx88/cx88-mpeg.c
@@ -336,7 +336,7 @@
 {
 	struct cx8802_dev *dev = (struct cx8802_dev*)data;
 
-	dprintk(0, "%s\n",__FUNCTION__);
+	dprintk(1, "%s\n",__FUNCTION__);
 
 	if (debug)
 		cx88_sram_channel_dump(dev->core, &cx88_sram_channels[SRAM_CH28]);
diff --git a/drivers/media/video/cx88/cx88-tvaudio.c b/drivers/media/video/cx88/cx88-tvaudio.c
index 259ea08..1cc2d28 100644
--- a/drivers/media/video/cx88/cx88-tvaudio.c
+++ b/drivers/media/video/cx88/cx88-tvaudio.c
@@ -906,6 +906,7 @@
 	u32 mode = 0;
 
 	dprintk("cx88: tvaudio thread started\n");
+	set_freezable();
 	for (;;) {
 		msleep_interruptible(1000);
 		if (kthread_should_stop())
diff --git a/drivers/media/video/cx88/cx88-vp3054-i2c.c b/drivers/media/video/cx88/cx88-vp3054-i2c.c
index 82bc3a2..cd08776 100644
--- a/drivers/media/video/cx88/cx88-vp3054-i2c.c
+++ b/drivers/media/video/cx88/cx88-vp3054-i2c.c
@@ -94,7 +94,7 @@
 
 /* ----------------------------------------------------------------------- */
 
-static struct i2c_algo_bit_data vp3054_i2c_algo_template = {
+static const struct i2c_algo_bit_data vp3054_i2c_algo_template = {
 	.setsda  = vp3054_bit_setsda,
 	.setscl  = vp3054_bit_setscl,
 	.getsda  = vp3054_bit_getsda,
@@ -105,12 +105,6 @@
 
 /* ----------------------------------------------------------------------- */
 
-static struct i2c_adapter vp3054_i2c_adap_template = {
-	.name              = "cx2388x",
-	.owner             = THIS_MODULE,
-	.id                = I2C_HW_B_CX2388x,
-};
-
 int vp3054_i2c_probe(struct cx8802_dev *dev)
 {
 	struct cx88_core *core = dev->core;
@@ -125,8 +119,6 @@
 		return -ENOMEM;
 	vp3054_i2c = dev->card_priv;
 
-	memcpy(&vp3054_i2c->adap, &vp3054_i2c_adap_template,
-	       sizeof(vp3054_i2c->adap));
 	memcpy(&vp3054_i2c->algo, &vp3054_i2c_algo_template,
 	       sizeof(vp3054_i2c->algo));
 
@@ -135,6 +127,8 @@
 	vp3054_i2c->adap.dev.parent = &dev->pci->dev;
 	strlcpy(vp3054_i2c->adap.name, core->name,
 		sizeof(vp3054_i2c->adap.name));
+	vp3054_i2c->adap.owner = THIS_MODULE;
+	vp3054_i2c->adap.id = I2C_HW_B_CX2388x;
 	vp3054_i2c->algo.data = dev;
 	i2c_set_adapdata(&vp3054_i2c->adap, dev);
 	vp3054_i2c->adap.algo_data = &vp3054_i2c->algo;
diff --git a/drivers/media/video/cx88/cx88-vp3054-i2c.h b/drivers/media/video/cx88/cx88-vp3054-i2c.h
index 637a7d2..be99c93 100644
--- a/drivers/media/video/cx88/cx88-vp3054-i2c.h
+++ b/drivers/media/video/cx88/cx88-vp3054-i2c.h
@@ -30,5 +30,12 @@
 };
 
 /* ----------------------------------------------------------------------- */
+#if defined(CONFIG_VIDEO_CX88_VP3054) || (defined(CONFIG_VIDEO_CX88_VP3054_MODULE) && defined(MODULE))
 int  vp3054_i2c_probe(struct cx8802_dev *dev);
 void vp3054_i2c_remove(struct cx8802_dev *dev);
+#else
+static inline int  vp3054_i2c_probe(struct cx8802_dev *dev)
+{ return 0; }
+static inline void vp3054_i2c_remove(struct cx8802_dev *dev)
+{ }
+#endif
diff --git a/drivers/media/video/cx88/cx88.h b/drivers/media/video/cx88/cx88.h
index 738d4f2..c4f656e 100644
--- a/drivers/media/video/cx88/cx88.h
+++ b/drivers/media/video/cx88/cx88.h
@@ -209,6 +209,7 @@
 #define CX88_BOARD_NORWOOD_MICRO           54
 #define CX88_BOARD_TE_DTV_250_OEM_SWANN    55
 #define CX88_BOARD_HAUPPAUGE_HVR1300       56
+#define CX88_BOARD_ADSTECH_PTV_390         57
 
 enum cx88_itype {
 	CX88_VMUX_COMPOSITE1 = 1,
@@ -316,8 +317,6 @@
 
 	/* config info -- dvb */
 #if defined(CONFIG_VIDEO_BUF_DVB) || defined(CONFIG_VIDEO_BUF_DVB_MODULE)
-	struct dvb_pll_desc        *pll_desc;
-	unsigned int               pll_addr;
 	int 			   (*prev_set_voltage)(struct dvb_frontend* fe, fe_sec_voltage_t voltage);
 #endif
 
@@ -463,13 +462,10 @@
 	u32                        mailbox;
 	int                        width;
 	int                        height;
-	int                        fw_size;
 
 #if defined(CONFIG_VIDEO_BUF_DVB) || defined(CONFIG_VIDEO_BUF_DVB_MODULE)
 	/* for dvb only */
 	struct videobuf_dvb        dvb;
-	void*                      fe_handle;
-	int                        (*fe_release)(void *handle);
 
 	void			   *card_priv;
 #endif
diff --git a/drivers/media/video/et61x251/Kconfig b/drivers/media/video/et61x251/Kconfig
index 664676f..dcc1a03 100644
--- a/drivers/media/video/et61x251/Kconfig
+++ b/drivers/media/video/et61x251/Kconfig
@@ -1,6 +1,6 @@
 config USB_ET61X251
 	tristate "USB ET61X[12]51 PC Camera Controller support"
-	depends on VIDEO_V4L1
+	depends on VIDEO_V4L2
 	---help---
 	  Say Y here if you want support for cameras based on Etoms ET61X151
 	  or ET61X251 PC Camera Controllers.
diff --git a/drivers/media/video/et61x251/et61x251.h b/drivers/media/video/et61x251/et61x251.h
index 262f98e..02c741d 100644
--- a/drivers/media/video/et61x251/et61x251.h
+++ b/drivers/media/video/et61x251/et61x251.h
@@ -36,6 +36,7 @@
 #include <linux/mutex.h>
 #include <linux/stddef.h>
 #include <linux/string.h>
+#include <linux/kref.h>
 
 #include "et61x251_sensor.h"
 
@@ -134,7 +135,7 @@
 };
 
 static DEFINE_MUTEX(et61x251_sysfs_lock);
-static DECLARE_RWSEM(et61x251_disconnect);
+static DECLARE_RWSEM(et61x251_dev_lock);
 
 struct et61x251_device {
 	struct video_device* v4ldev;
@@ -158,12 +159,14 @@
 	struct et61x251_sysfs_attr sysfs;
 	struct et61x251_module_param module_param;
 
+	struct kref kref;
 	enum et61x251_dev_state state;
 	u8 users;
 
-	struct mutex dev_mutex, fileop_mutex;
+	struct completion probe;
+	struct mutex open_mutex, fileop_mutex;
 	spinlock_t queue_lock;
-	wait_queue_head_t open, wait_frame, wait_stream;
+	wait_queue_head_t wait_open, wait_frame, wait_stream;
 };
 
 /*****************************************************************************/
@@ -177,7 +180,7 @@
 
 void
 et61x251_attach_sensor(struct et61x251_device* cam,
-		       struct et61x251_sensor* sensor)
+		       const struct et61x251_sensor* sensor)
 {
 	memcpy(&cam->sensor, sensor, sizeof(struct et61x251_sensor));
 }
@@ -195,8 +198,8 @@
 		else if ((level) == 2)                                        \
 			dev_info(&cam->usbdev->dev, fmt "\n", ## args);       \
 		else if ((level) >= 3)                                        \
-			dev_info(&cam->usbdev->dev, "[%s:%d] " fmt "\n",      \
-				 __FUNCTION__, __LINE__ , ## args);           \
+			dev_info(&cam->usbdev->dev, "[%s:%s:%d] " fmt "\n",   \
+				 __FILE__, __FUNCTION__, __LINE__ , ## args); \
 	}                                                                     \
 } while (0)
 #	define KDBG(level, fmt, args...)                                      \
@@ -205,8 +208,8 @@
 		if ((level) == 1 || (level) == 2)                             \
 			pr_info("et61x251: " fmt "\n", ## args);              \
 		else if ((level) == 3)                                        \
-			pr_debug("et61x251: [%s:%d] " fmt "\n", __FUNCTION__, \
-				 __LINE__ , ## args);                         \
+			pr_debug("sn9c102: [%s:%s:%d] " fmt "\n", __FILE__,   \
+				 __FUNCTION__, __LINE__ , ## args);           \
 	}                                                                     \
 } while (0)
 #	define V4LDBG(level, name, cmd)                                       \
@@ -222,8 +225,8 @@
 
 #undef PDBG
 #define PDBG(fmt, args...)                                                    \
-dev_info(&cam->usbdev->dev, "[%s:%d] " fmt "\n",                              \
-	 __FUNCTION__, __LINE__ , ## args)
+dev_info(&cam->usbdev->dev, "[%s:%s:%d] " fmt "\n", __FILE__, __FUNCTION__,   \
+	 __LINE__ , ## args)
 
 #undef PDBGG
 #define PDBGG(fmt, args...) do {;} while(0) /* placeholder */
diff --git a/drivers/media/video/et61x251/et61x251_core.c b/drivers/media/video/et61x251/et61x251_core.c
index a652551..585bd1f 100644
--- a/drivers/media/video/et61x251/et61x251_core.c
+++ b/drivers/media/video/et61x251/et61x251_core.c
@@ -45,11 +45,11 @@
 
 #define ET61X251_MODULE_NAME    "V4L2 driver for ET61X[12]51 "                \
 				"PC Camera Controllers"
-#define ET61X251_MODULE_AUTHOR  "(C) 2006 Luca Risolia"
+#define ET61X251_MODULE_AUTHOR  "(C) 2006-2007 Luca Risolia"
 #define ET61X251_AUTHOR_EMAIL   "<luca.risolia@studio.unibo.it>"
 #define ET61X251_MODULE_LICENSE "GPL"
-#define ET61X251_MODULE_VERSION "1:1.04"
-#define ET61X251_MODULE_VERSION_CODE  KERNEL_VERSION(1, 1, 4)
+#define ET61X251_MODULE_VERSION "1:1.09"
+#define ET61X251_MODULE_VERSION_CODE  KERNEL_VERSION(1, 1, 9)
 
 /*****************************************************************************/
 
@@ -245,7 +245,8 @@
 
 
 static int
-et61x251_i2c_wait(struct et61x251_device* cam, struct et61x251_sensor* sensor)
+et61x251_i2c_wait(struct et61x251_device* cam,
+		  const struct et61x251_sensor* sensor)
 {
 	int i, r;
 
@@ -270,7 +271,7 @@
 
 int
 et61x251_i2c_try_read(struct et61x251_device* cam,
-		      struct et61x251_sensor* sensor, u8 address)
+		      const struct et61x251_sensor* sensor, u8 address)
 {
 	struct usb_device* udev = cam->usbdev;
 	u8* data = cam->control_buffer;
@@ -303,7 +304,8 @@
 
 int
 et61x251_i2c_try_write(struct et61x251_device* cam,
-		       struct et61x251_sensor* sensor, u8 address, u8 value)
+		       const struct et61x251_sensor* sensor, u8 address,
+		       u8 value)
 {
 	struct usb_device* udev = cam->usbdev;
 	u8* data = cam->control_buffer;
@@ -615,7 +617,7 @@
 	return 0;
 
 free_urbs:
-	for (i = 0; (i < ET61X251_URBS) &&  cam->urb[i]; i++)
+	for (i = 0; (i < ET61X251_URBS) && cam->urb[i]; i++)
 		usb_free_urb(cam->urb[i]);
 
 free_buffers:
@@ -682,7 +684,7 @@
 
 	if (len < 4) {
 		strncpy(str, buff, len);
-		str[len+1] = '\0';
+		str[len] = '\0';
 	} else {
 		strncpy(str, buff, 4);
 		str[4] = '\0';
@@ -977,30 +979,30 @@
 
 static int et61x251_create_sysfs(struct et61x251_device* cam)
 {
-	struct video_device *v4ldev = cam->v4ldev;
+	struct class_device *classdev = &(cam->v4ldev->class_dev);
 	int err = 0;
 
-	if ((err = video_device_create_file(v4ldev, &class_device_attr_reg)))
+	if ((err = class_device_create_file(classdev, &class_device_attr_reg)))
 		goto err_out;
-	if ((err = video_device_create_file(v4ldev, &class_device_attr_val)))
+	if ((err = class_device_create_file(classdev, &class_device_attr_val)))
 		goto err_reg;
 
 	if (cam->sensor.sysfs_ops) {
-		if ((err = video_device_create_file(v4ldev,
+		if ((err = class_device_create_file(classdev,
 						  &class_device_attr_i2c_reg)))
 			goto err_val;
-		if ((err = video_device_create_file(v4ldev,
+		if ((err = class_device_create_file(classdev,
 						  &class_device_attr_i2c_val)))
 			goto err_i2c_reg;
 	}
 
 err_i2c_reg:
 	if (cam->sensor.sysfs_ops)
-	video_device_remove_file(v4ldev, &class_device_attr_i2c_reg);
+		class_device_remove_file(classdev, &class_device_attr_i2c_reg);
 err_val:
-	video_device_remove_file(v4ldev, &class_device_attr_val);
+	class_device_remove_file(classdev, &class_device_attr_val);
 err_reg:
-	video_device_remove_file(v4ldev, &class_device_attr_reg);
+	class_device_remove_file(classdev, &class_device_attr_reg);
 err_out:
 	return err;
 }
@@ -1103,7 +1105,8 @@
 	int err = 0;
 
 	if (!(cam->state & DEV_INITIALIZED)) {
-		init_waitqueue_head(&cam->open);
+		mutex_init(&cam->open_mutex);
+		init_waitqueue_head(&cam->wait_open);
 		qctrl = s->qctrl;
 		rect = &(s->cropcap.defrect);
 		cam->compression.quality = ET61X251_COMPRESSION_QUALITY;
@@ -1177,64 +1180,80 @@
 	return 0;
 }
 
+/*****************************************************************************/
 
-static void et61x251_release_resources(struct et61x251_device* cam)
+static void et61x251_release_resources(struct kref *kref)
 {
+	struct et61x251_device *cam;
+
 	mutex_lock(&et61x251_sysfs_lock);
 
+	cam = container_of(kref, struct et61x251_device, kref);
+
 	DBG(2, "V4L2 device /dev/video%d deregistered", cam->v4ldev->minor);
 	video_set_drvdata(cam->v4ldev, NULL);
 	video_unregister_device(cam->v4ldev);
+	usb_put_dev(cam->usbdev);
+	kfree(cam->control_buffer);
+	kfree(cam);
 
 	mutex_unlock(&et61x251_sysfs_lock);
-
-	kfree(cam->control_buffer);
 }
 
-/*****************************************************************************/
 
 static int et61x251_open(struct inode* inode, struct file* filp)
 {
 	struct et61x251_device* cam;
 	int err = 0;
 
-	/*
-	   This is the only safe way to prevent race conditions with
-	   disconnect
-	*/
-	if (!down_read_trylock(&et61x251_disconnect))
+	if (!down_read_trylock(&et61x251_dev_lock))
 		return -ERESTARTSYS;
 
 	cam = video_get_drvdata(video_devdata(filp));
 
-	if (mutex_lock_interruptible(&cam->dev_mutex)) {
-		up_read(&et61x251_disconnect);
+	if (wait_for_completion_interruptible(&cam->probe)) {
+		up_read(&et61x251_dev_lock);
 		return -ERESTARTSYS;
 	}
 
+	kref_get(&cam->kref);
+
+	if (mutex_lock_interruptible(&cam->open_mutex)) {
+		kref_put(&cam->kref, et61x251_release_resources);
+		up_read(&et61x251_dev_lock);
+		return -ERESTARTSYS;
+	}
+
+	if (cam->state & DEV_DISCONNECTED) {
+		DBG(1, "Device not present");
+		err = -ENODEV;
+		goto out;
+	}
+
 	if (cam->users) {
-		DBG(2, "Device /dev/video%d is busy...", cam->v4ldev->minor);
+		DBG(2, "Device /dev/video%d is already in use",
+		       cam->v4ldev->minor);
+		DBG(3, "Simultaneous opens are not supported");
 		if ((filp->f_flags & O_NONBLOCK) ||
 		    (filp->f_flags & O_NDELAY)) {
 			err = -EWOULDBLOCK;
 			goto out;
 		}
-		mutex_unlock(&cam->dev_mutex);
-		err = wait_event_interruptible_exclusive(cam->open,
-						  cam->state & DEV_DISCONNECTED
+		DBG(2, "A blocking open() has been requested. Wait for the "
+		       "device to be released...");
+		up_read(&et61x251_dev_lock);
+		err = wait_event_interruptible_exclusive(cam->wait_open,
+						(cam->state & DEV_DISCONNECTED)
 							 || !cam->users);
-		if (err) {
-			up_read(&et61x251_disconnect);
-			return err;
-		}
+		down_read(&et61x251_dev_lock);
+		if (err)
+			goto out;
 		if (cam->state & DEV_DISCONNECTED) {
-			up_read(&et61x251_disconnect);
-			return -ENODEV;
+			err = -ENODEV;
+			goto out;
 		}
-		mutex_lock(&cam->dev_mutex);
 	}
 
-
 	if (cam->state & DEV_MISCONFIGURED) {
 		err = et61x251_init(cam);
 		if (err) {
@@ -1259,36 +1278,32 @@
 	DBG(3, "Video device /dev/video%d is open", cam->v4ldev->minor);
 
 out:
-	mutex_unlock(&cam->dev_mutex);
-	up_read(&et61x251_disconnect);
+	mutex_unlock(&cam->open_mutex);
+	if (err)
+		kref_put(&cam->kref, et61x251_release_resources);
+	up_read(&et61x251_dev_lock);
 	return err;
 }
 
 
 static int et61x251_release(struct inode* inode, struct file* filp)
 {
-	struct et61x251_device* cam = video_get_drvdata(video_devdata(filp));
+	struct et61x251_device* cam;
 
-	mutex_lock(&cam->dev_mutex); /* prevent disconnect() to be called */
+	down_write(&et61x251_dev_lock);
+
+	cam = video_get_drvdata(video_devdata(filp));
 
 	et61x251_stop_transfer(cam);
-
 	et61x251_release_buffers(cam);
-
-	if (cam->state & DEV_DISCONNECTED) {
-		et61x251_release_resources(cam);
-		usb_put_dev(cam->usbdev);
-		mutex_unlock(&cam->dev_mutex);
-		kfree(cam);
-		return 0;
-	}
-
 	cam->users--;
-	wake_up_interruptible_nr(&cam->open, 1);
+	wake_up_interruptible_nr(&cam->wait_open, 1);
 
 	DBG(3, "Video device /dev/video%d closed", cam->v4ldev->minor);
 
-	mutex_unlock(&cam->dev_mutex);
+	kref_put(&cam->kref, et61x251_release_resources);
+
+	up_write(&et61x251_dev_lock);
 
 	return 0;
 }
@@ -1324,7 +1339,7 @@
 		DBG(3, "Close and open the device again to choose the read "
 		       "method");
 		mutex_unlock(&cam->fileop_mutex);
-		return -EINVAL;
+		return -EBUSY;
 	}
 
 	if (cam->io == IO_NONE) {
@@ -1504,7 +1519,12 @@
 		return -EIO;
 	}
 
-	if (cam->io != IO_MMAP || !(vma->vm_flags & VM_WRITE) ||
+	if (!(vma->vm_flags & (VM_WRITE | VM_READ))) {
+		mutex_unlock(&cam->fileop_mutex);
+		return -EACCES;
+	}
+
+	if (cam->io != IO_MMAP ||
 	    size != PAGE_ALIGN(cam->frame[0].buf.length)) {
 		mutex_unlock(&cam->fileop_mutex);
 		return -EINVAL;
@@ -1535,7 +1555,6 @@
 
 	vma->vm_ops = &et61x251_vm_ops;
 	vma->vm_private_data = &cam->frame[i];
-
 	et61x251_vm_open(vma);
 
 	mutex_unlock(&cam->fileop_mutex);
@@ -1764,7 +1783,7 @@
 			if (cam->frame[i].vma_use_count) {
 				DBG(3, "VIDIOC_S_CROP failed. "
 				       "Unmap the buffers first.");
-				return -EINVAL;
+				return -EBUSY;
 			}
 
 	/* Preserve R,G or B origin */
@@ -1921,6 +1940,8 @@
 	if (format.type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
 		return -EINVAL;
 
+	pfmt->colorspace = (pfmt->pixelformat == V4L2_PIX_FMT_ET61X251) ?
+			   0 : V4L2_COLORSPACE_SRGB;
 	pfmt->bytesperline = (pfmt->pixelformat==V4L2_PIX_FMT_ET61X251)
 			     ? 0 : (pfmt->width * pfmt->priv) / 8;
 	pfmt->sizeimage = pfmt->height * ((pfmt->width*pfmt->priv)/8);
@@ -1996,6 +2017,8 @@
 	    pix->pixelformat != V4L2_PIX_FMT_SBGGR8)
 		pix->pixelformat = pfmt->pixelformat;
 	pix->priv = pfmt->priv; /* bpp */
+	pix->colorspace = (pix->pixelformat == V4L2_PIX_FMT_ET61X251) ?
+			  0 : V4L2_COLORSPACE_SRGB;
 	pix->colorspace = pfmt->colorspace;
 	pix->bytesperline = (pix->pixelformat == V4L2_PIX_FMT_ET61X251)
 			    ? 0 : (pix->width * pix->priv) / 8;
@@ -2013,7 +2036,7 @@
 			if (cam->frame[i].vma_use_count) {
 				DBG(3, "VIDIOC_S_FMT failed. "
 				       "Unmap the buffers first.");
-				return -EINVAL;
+				return -EBUSY;
 			}
 
 	if (cam->stream == STREAM_ON)
@@ -2129,14 +2152,14 @@
 	if (cam->io == IO_READ) {
 		DBG(3, "Close and open the device again to choose the mmap "
 		       "I/O method");
-		return -EINVAL;
+		return -EBUSY;
 	}
 
 	for (i = 0; i < cam->nbuffers; i++)
 		if (cam->frame[i].vma_use_count) {
 			DBG(3, "VIDIOC_REQBUFS failed. "
 			       "Previous buffers are still mapped.");
-			return -EINVAL;
+			return -EBUSY;
 		}
 
 	if (cam->stream == STREAM_ON)
@@ -2284,9 +2307,6 @@
 	if (type != V4L2_BUF_TYPE_VIDEO_CAPTURE || cam->io != IO_MMAP)
 		return -EINVAL;
 
-	if (list_empty(&cam->inqueue))
-		return -EINVAL;
-
 	cam->stream = STREAM_ON;
 
 	DBG(3, "Stream on");
@@ -2535,8 +2555,6 @@
 		goto fail;
 	}
 
-	mutex_init(&cam->dev_mutex);
-
 	DBG(2, "ET61X[12]51 PC Camera Controller detected "
 	       "(vid/pid 0x%04X:0x%04X)",id->idVendor, id->idProduct);
 
@@ -2568,7 +2586,7 @@
 	cam->v4ldev->release = video_device_release;
 	video_set_drvdata(cam->v4ldev, cam);
 
-	mutex_lock(&cam->dev_mutex);
+	init_completion(&cam->probe);
 
 	err = video_register_device(cam->v4ldev, VFL_TYPE_GRABBER,
 				    video_nr[dev_nr]);
@@ -2578,7 +2596,7 @@
 			DBG(1, "Free /dev/videoX node not found");
 		video_nr[dev_nr] = -1;
 		dev_nr = (dev_nr < ET61X251_MAX_DEVICES-1) ? dev_nr+1 : 0;
-		mutex_unlock(&cam->dev_mutex);
+		complete_all(&cam->probe);
 		goto fail;
 	}
 
@@ -2599,11 +2617,15 @@
 		       "device controlling. Error #%d", err);
 #else
 	DBG(2, "Optional device control through 'sysfs' interface disabled");
+	DBG(3, "Compile the kernel with the 'CONFIG_VIDEO_ADV_DEBUG' "
+	       "configuration option to enable it.");
 #endif
 
 	usb_set_intfdata(intf, cam);
+	kref_init(&cam->kref);
+	usb_get_dev(cam->usbdev);
 
-	mutex_unlock(&cam->dev_mutex);
+	complete_all(&cam->probe);
 
 	return 0;
 
@@ -2620,40 +2642,31 @@
 
 static void et61x251_usb_disconnect(struct usb_interface* intf)
 {
-	struct et61x251_device* cam = usb_get_intfdata(intf);
+	struct et61x251_device* cam;
 
-	if (!cam)
-		return;
+	down_write(&et61x251_dev_lock);
 
-	down_write(&et61x251_disconnect);
-
-	mutex_lock(&cam->dev_mutex);
+	cam = usb_get_intfdata(intf);
 
 	DBG(2, "Disconnecting %s...", cam->v4ldev->name);
 
-	wake_up_interruptible_all(&cam->open);
-
 	if (cam->users) {
 		DBG(2, "Device /dev/video%d is open! Deregistration and "
-		       "memory deallocation are deferred on close.",
+		       "memory deallocation are deferred.",
 		    cam->v4ldev->minor);
 		cam->state |= DEV_MISCONFIGURED;
 		et61x251_stop_transfer(cam);
 		cam->state |= DEV_DISCONNECTED;
 		wake_up_interruptible(&cam->wait_frame);
 		wake_up(&cam->wait_stream);
-		usb_get_dev(cam->usbdev);
-	} else {
+	} else
 		cam->state |= DEV_DISCONNECTED;
-		et61x251_release_resources(cam);
-	}
 
-	mutex_unlock(&cam->dev_mutex);
+	wake_up_interruptible_all(&cam->wait_open);
 
-	if (!cam->users)
-		kfree(cam);
+	kref_put(&cam->kref, et61x251_release_resources);
 
-	up_write(&et61x251_disconnect);
+	up_write(&et61x251_dev_lock);
 }
 
 
diff --git a/drivers/media/video/et61x251/et61x251_sensor.h b/drivers/media/video/et61x251/et61x251_sensor.h
index 5fadb5d..e145863 100644
--- a/drivers/media/video/et61x251/et61x251_sensor.h
+++ b/drivers/media/video/et61x251/et61x251_sensor.h
@@ -22,7 +22,7 @@
 #define _ET61X251_SENSOR_H_
 
 #include <linux/usb.h>
-#include <linux/videodev.h>
+#include <linux/videodev2.h>
 #include <linux/device.h>
 #include <linux/stddef.h>
 #include <linux/errno.h>
@@ -47,7 +47,7 @@
 
 extern void
 et61x251_attach_sensor(struct et61x251_device* cam,
-		       struct et61x251_sensor* sensor);
+		       const struct et61x251_sensor* sensor);
 
 /*****************************************************************************/
 
@@ -56,10 +56,10 @@
 extern int et61x251_i2c_write(struct et61x251_device*, u8 address, u8 value);
 extern int et61x251_i2c_read(struct et61x251_device*, u8 address);
 extern int et61x251_i2c_try_write(struct et61x251_device*,
-				  struct et61x251_sensor*, u8 address,
+				  const struct et61x251_sensor*, u8 address,
 				  u8 value);
 extern int et61x251_i2c_try_read(struct et61x251_device*,
-				 struct et61x251_sensor*, u8 address);
+				 const struct et61x251_sensor*, u8 address);
 extern int et61x251_i2c_raw_write(struct et61x251_device*, u8 n, u8 data1,
 				  u8 data2, u8 data3, u8 data4, u8 data5,
 				  u8 data6, u8 data7, u8 data8, u8 address);
diff --git a/drivers/media/video/et61x251/et61x251_tas5130d1b.c b/drivers/media/video/et61x251/et61x251_tas5130d1b.c
index b066434..04b7fbb 100644
--- a/drivers/media/video/et61x251/et61x251_tas5130d1b.c
+++ b/drivers/media/video/et61x251/et61x251_tas5130d1b.c
@@ -69,7 +69,7 @@
 }
 
 
-static struct et61x251_sensor tas5130d1b = {
+static const struct et61x251_sensor tas5130d1b = {
 	.name = "TAS5130D1B",
 	.interface = ET61X251_I2C_3WIRES,
 	.rsta = ET61X251_I2C_RSTA_STOP,
diff --git a/drivers/media/video/ir-kbd-i2c.c b/drivers/media/video/ir-kbd-i2c.c
index ed92b6f..2d709e0 100644
--- a/drivers/media/video/ir-kbd-i2c.c
+++ b/drivers/media/video/ir-kbd-i2c.c
@@ -37,6 +37,7 @@
 #include <linux/errno.h>
 #include <linux/slab.h>
 #include <linux/i2c.h>
+#include <linux/i2c-id.h>
 #include <linux/workqueue.h>
 #include <asm/semaphore.h>
 
@@ -60,21 +61,22 @@
 
 /* ----------------------------------------------------------------------- */
 
-static int get_key_haup(struct IR_i2c *ir, u32 *ir_key, u32 *ir_raw)
+static int get_key_haup_common(struct IR_i2c *ir, u32 *ir_key, u32 *ir_raw,
+			       int size, int offset)
 {
-	unsigned char buf[3];
+	unsigned char buf[6];
 	int start, range, toggle, dev, code;
 
 	/* poll IR chip */
-	if (3 != i2c_master_recv(&ir->c,buf,3))
+	if (size != i2c_master_recv(&ir->c,buf,size))
 		return -EIO;
 
 	/* split rc5 data block ... */
-	start  = (buf[0] >> 7) &    1;
-	range  = (buf[0] >> 6) &    1;
-	toggle = (buf[0] >> 5) &    1;
-	dev    =  buf[0]       & 0x1f;
-	code   = (buf[1] >> 2) & 0x3f;
+	start  = (buf[offset] >> 7) &    1;
+	range  = (buf[offset] >> 6) &    1;
+	toggle = (buf[offset] >> 5) &    1;
+	dev    =  buf[offset]       & 0x1f;
+	code   = (buf[offset+1] >> 2) & 0x3f;
 
 	/* rc5 has two start bits
 	 * the first bit must be one
@@ -96,6 +98,16 @@
 	return 1;
 }
 
+static inline int get_key_haup(struct IR_i2c *ir, u32 *ir_key, u32 *ir_raw)
+{
+	return get_key_haup_common (ir, ir_key, ir_raw, 3, 0);
+}
+
+static inline int get_key_haup_xvr(struct IR_i2c *ir, u32 *ir_key, u32 *ir_raw)
+{
+	return get_key_haup_common (ir, ir_key, ir_raw, 6, 3);
+}
+
 static int get_key_pixelview(struct IR_i2c *ir, u32 *ir_key, u32 *ir_raw)
 {
 	unsigned char b;
@@ -270,8 +282,9 @@
 static void ir_work(struct work_struct *work)
 {
 	struct IR_i2c *ir = container_of(work, struct IR_i2c, work);
+
 	ir_key_poll(ir);
-	mod_timer(&ir->timer, jiffies+HZ/10);
+	mod_timer(&ir->timer, jiffies + msecs_to_jiffies(100));
 }
 
 /* ----------------------------------------------------------------------- */
@@ -354,9 +367,21 @@
 	case 0x7a:
 	case 0x47:
 	case 0x71:
-		/* Handled by saa7134-input */
-		name        = "SAA713x remote";
-		ir_type     = IR_TYPE_OTHER;
+		if (adap->id == I2C_HW_B_CX2388x) {
+			/* Handled by cx88-input */
+			name        = "CX2388x remote";
+			ir_type     = IR_TYPE_RC5;
+			ir->get_key = get_key_haup_xvr;
+			if (hauppauge == 1) {
+				ir_codes    = ir_codes_hauppauge_new;
+			} else {
+				ir_codes    = ir_codes_rc5_tv;
+			}
+		} else {
+			/* Handled by saa7134-input */
+			name        = "SAA713x remote";
+			ir_type     = IR_TYPE_OTHER;
+		}
 		break;
 	default:
 		/* shouldn't happen */
@@ -450,6 +475,7 @@
 	static const int probe_bttv[] = { 0x1a, 0x18, 0x4b, 0x64, 0x30, -1};
 	static const int probe_saa7134[] = { 0x7a, 0x47, 0x71, -1 };
 	static const int probe_em28XX[] = { 0x30, 0x47, -1 };
+	static const int probe_cx88[] = { 0x18, 0x71, -1 };
 	const int *probe = NULL;
 	struct i2c_client c;
 	unsigned char buf;
@@ -468,6 +494,9 @@
 	case I2C_HW_B_EM28XX:
 		probe = probe_em28XX;
 		break;
+	case I2C_HW_B_CX2388x:
+		probe = probe_cx88;
+		break;
 	}
 	if (NULL == probe)
 		return 0;
diff --git a/drivers/media/video/ivtv/ivtv-driver.c b/drivers/media/video/ivtv/ivtv-driver.c
index efc6635..4c93466 100644
--- a/drivers/media/video/ivtv/ivtv-driver.c
+++ b/drivers/media/video/ivtv/ivtv-driver.c
@@ -181,7 +181,7 @@
 MODULE_PARM_DESC(ntsc, "Set NTSC standard: M, J, K");
 MODULE_PARM_DESC(debug,
 		 "Debug level (bitmask). Default: errors only\n"
-		 "\t\t\t(debug = 511 gives full debugging)");
+		 "\t\t\t(debug = 1023 gives full debugging)");
 MODULE_PARM_DESC(ivtv_pci_latency,
 		 "Change the PCI latency to 64 if lower: 0 = No, 1 = Yes,\n"
 		 "\t\t\tDefault: Yes");
@@ -339,6 +339,7 @@
 		/* In a few cases the PCI subsystem IDs do not correctly
 		   identify the card. A better method is to check the
 		   model number from the eeprom instead. */
+		case 30012 ... 30039:  /* Low profile PVR250 */
 		case 32000 ... 32999:
 		case 48000 ... 48099:  /* 48??? range are PVR250s with a cx23415 */
 		case 48400 ... 48599:
@@ -622,6 +623,7 @@
 	itv->enc_mbox.max_mbox = 2; /* the encoder has 3 mailboxes (0-2) */
 	itv->dec_mbox.max_mbox = 1; /* the decoder has 2 mailboxes (0-1) */
 
+	mutex_init(&itv->serialize_lock);
 	mutex_init(&itv->i2c_bus_lock);
 	mutex_init(&itv->udma.lock);
 
@@ -1288,10 +1290,7 @@
 
 	IVTV_DEBUG_INFO(" Releasing irq.\n");
 	free_irq(itv->dev->irq, (void *)itv);
-
-	if (itv->dev) {
-		ivtv_iounmap(itv);
-	}
+	ivtv_iounmap(itv);
 
 	IVTV_DEBUG_INFO(" Releasing mem.\n");
 	release_mem_region(itv->base_addr, IVTV_ENCODER_SIZE);
@@ -1326,9 +1325,9 @@
 		return -1;
 	}
 
-	if (ivtv_debug < 0 || ivtv_debug > 511) {
+	if (ivtv_debug < 0 || ivtv_debug > 1023) {
 		ivtv_debug = 0;
-		printk(KERN_INFO "ivtv:  debug value must be >= 0 and <= 511!\n");
+		printk(KERN_INFO "ivtv:  debug value must be >= 0 and <= 1023!\n");
 	}
 
 	if (pci_register_driver(&ivtv_pci_driver)) {
diff --git a/drivers/media/video/ivtv/ivtv-driver.h b/drivers/media/video/ivtv/ivtv-driver.h
index e6e56f1..6c1a85f 100644
--- a/drivers/media/video/ivtv/ivtv-driver.h
+++ b/drivers/media/video/ivtv/ivtv-driver.h
@@ -268,6 +268,8 @@
 #define IVTV_DBGFLG_IRQ   (1 << 6)
 #define IVTV_DBGFLG_DEC   (1 << 7)
 #define IVTV_DBGFLG_YUV   (1 << 8)
+/* Flag to turn on high volume debugging */
+#define IVTV_DBGFLG_HIGHVOL (1 << 9)
 
 /* NOTE: extra space before comma in 'itv->num , ## args' is required for
    gcc-2.95, otherwise it won't compile. */
@@ -286,6 +288,21 @@
 #define IVTV_DEBUG_DEC(fmt, args...)   IVTV_DEBUG(IVTV_DBGFLG_DEC, "dec", fmt , ## args)
 #define IVTV_DEBUG_YUV(fmt, args...)   IVTV_DEBUG(IVTV_DBGFLG_YUV, "yuv", fmt , ## args)
 
+#define IVTV_DEBUG_HIGH_VOL(x, type, fmt, args...) \
+	do { \
+		if (((x) & ivtv_debug) && (ivtv_debug & IVTV_DBGFLG_HIGHVOL)) \
+			printk(KERN_INFO "ivtv%d " type ": " fmt, itv->num , ## args); \
+	} while (0)
+#define IVTV_DEBUG_HI_WARN(fmt, args...)  IVTV_DEBUG_HIGH_VOL(IVTV_DBGFLG_WARN, "warning", fmt , ## args)
+#define IVTV_DEBUG_HI_INFO(fmt, args...)  IVTV_DEBUG_HIGH_VOL(IVTV_DBGFLG_INFO, "info",fmt , ## args)
+#define IVTV_DEBUG_HI_API(fmt, args...)   IVTV_DEBUG_HIGH_VOL(IVTV_DBGFLG_API, "api", fmt , ## args)
+#define IVTV_DEBUG_HI_DMA(fmt, args...)   IVTV_DEBUG_HIGH_VOL(IVTV_DBGFLG_DMA, "dma", fmt , ## args)
+#define IVTV_DEBUG_HI_IOCTL(fmt, args...) IVTV_DEBUG_HIGH_VOL(IVTV_DBGFLG_IOCTL, "ioctl", fmt , ## args)
+#define IVTV_DEBUG_HI_I2C(fmt, args...)   IVTV_DEBUG_HIGH_VOL(IVTV_DBGFLG_I2C, "i2c", fmt , ## args)
+#define IVTV_DEBUG_HI_IRQ(fmt, args...)   IVTV_DEBUG_HIGH_VOL(IVTV_DBGFLG_IRQ, "irq", fmt , ## args)
+#define IVTV_DEBUG_HI_DEC(fmt, args...)   IVTV_DEBUG_HIGH_VOL(IVTV_DBGFLG_DEC, "dec", fmt , ## args)
+#define IVTV_DEBUG_HI_YUV(fmt, args...)   IVTV_DEBUG_HIGH_VOL(IVTV_DBGFLG_YUV, "yuv", fmt , ## args)
+
 #define IVTV_FB_DEBUG(x, type, fmt, args...) \
 	do { \
 		if ((x) & ivtv_debug) \
@@ -650,7 +667,6 @@
 	/* convenience pointer to sliced struct in vbi_in union */
 	struct v4l2_sliced_vbi_format *sliced_in;
 	u32 service_set_in;
-	u32 service_set_out;
 	int insert_mpeg;
 
 	/* Buffer for the maximum of 2 * 18 * packet_size sliced VBI lines.
@@ -723,6 +739,7 @@
 	int search_pack_header;
 
 	spinlock_t dma_reg_lock; /* lock access to DMA engine registers */
+	struct mutex serialize_lock;  /* lock used to serialize starting streams */
 
 	/* User based DMA for OSD */
 	struct ivtv_user_dma udma;
diff --git a/drivers/media/video/ivtv/ivtv-fileops.c b/drivers/media/video/ivtv/ivtv-fileops.c
index 555d5e6..ee7e884 100644
--- a/drivers/media/video/ivtv/ivtv-fileops.c
+++ b/drivers/media/video/ivtv/ivtv-fileops.c
@@ -406,7 +406,7 @@
 	ssize_t rc = count ? ivtv_read(s, ubuf, count, non_block) : 0;
 	struct ivtv *itv = s->itv;
 
-	IVTV_DEBUG_INFO("read %zd from %s, got %zd\n", count, s->name, rc);
+	IVTV_DEBUG_HI_INFO("read %zd from %s, got %zd\n", count, s->name, rc);
 	if (rc > 0)
 		pos += rc;
 	return rc;
@@ -497,7 +497,7 @@
 	struct ivtv_stream *s = &itv->streams[id->type];
 	int rc;
 
-	IVTV_DEBUG_IOCTL("read %zd bytes from %s\n", count, s->name);
+	IVTV_DEBUG_HI_IOCTL("read %zd bytes from %s\n", count, s->name);
 
 	rc = ivtv_start_capture(id);
 	if (rc)
@@ -535,7 +535,7 @@
 	int rc;
 	DEFINE_WAIT(wait);
 
-	IVTV_DEBUG_IOCTL("write %zd bytes to %s\n", count, s->name);
+	IVTV_DEBUG_HI_IOCTL("write %zd bytes to %s\n", count, s->name);
 
 	if (s->type != IVTV_DEC_STREAM_TYPE_MPG &&
 	    s->type != IVTV_DEC_STREAM_TYPE_YUV &&
@@ -643,7 +643,7 @@
 	   to transfer the rest. */
 	if (count && !(filp->f_flags & O_NONBLOCK))
 		goto retry;
-	IVTV_DEBUG_INFO("Wrote %d bytes to %s (%d)\n", bytes_written, s->name, s->q_full.bytesused);
+	IVTV_DEBUG_HI_INFO("Wrote %d bytes to %s (%d)\n", bytes_written, s->name, s->q_full.bytesused);
 	return bytes_written;
 }
 
diff --git a/drivers/media/video/ivtv/ivtv-firmware.c b/drivers/media/video/ivtv/ivtv-firmware.c
index d4c910b..2b6208a 100644
--- a/drivers/media/video/ivtv/ivtv-firmware.c
+++ b/drivers/media/video/ivtv/ivtv-firmware.c
@@ -56,9 +56,7 @@
 		volatile u32 __iomem *dst = (volatile u32 __iomem *)mem;
 		const u32 *src = (const u32 *)fw->data;
 
-		/* temporarily allow 256 KB encoding firmwares as well for
-		   compatibility with blackbird cards */
-		if (fw->size != size && fw->size != 256 * 1024) {
+		if (fw->size != size) {
 			/* Due to race conditions in firmware loading (esp. with udev <0.95)
 			   the wrong file was sometimes loaded. So we check filesizes to
 			   see if at least the right-sized file was loaded. If not, then we
diff --git a/drivers/media/video/ivtv/ivtv-gpio.c b/drivers/media/video/ivtv/ivtv-gpio.c
index bc8f8ca..676418c 100644
--- a/drivers/media/video/ivtv/ivtv-gpio.c
+++ b/drivers/media/video/ivtv/ivtv-gpio.c
@@ -115,8 +115,7 @@
 	curout = (curout & ~0xF) | 1;
 	write_reg(curout, IVTV_REG_GPIO_OUT);
 	/* We could use something else for smaller time */
-	current->state = TASK_INTERRUPTIBLE;
-	schedule_timeout(1);
+	schedule_timeout_interruptible(msecs_to_jiffies(1));
 	curout |= 2;
 	write_reg(curout, IVTV_REG_GPIO_OUT);
 	curdir &= ~0x80;
@@ -138,13 +137,11 @@
 
 	curout &= ~(1 << 12);
 	write_reg(curout, IVTV_REG_GPIO_OUT);
-	current->state = TASK_INTERRUPTIBLE;
-	schedule_timeout(1);
+	schedule_timeout_interruptible(msecs_to_jiffies(1));
 
 	curout |= (1 << 12);
 	write_reg(curout, IVTV_REG_GPIO_OUT);
-	current->state = TASK_INTERRUPTIBLE;
-	schedule_timeout(1);
+	schedule_timeout_interruptible(msecs_to_jiffies(1));
 
 	return 0;
 }
diff --git a/drivers/media/video/ivtv/ivtv-ioctl.c b/drivers/media/video/ivtv/ivtv-ioctl.c
index 57af176..4773453 100644
--- a/drivers/media/video/ivtv/ivtv-ioctl.c
+++ b/drivers/media/video/ivtv/ivtv-ioctl.c
@@ -1159,7 +1159,7 @@
 
 		memset(fb, 0, sizeof(*fb));
 		if (!(itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT_OVERLAY))
-			break;
+			return -EINVAL;
 		fb->capability = V4L2_FBUF_CAP_EXTERNOVERLAY | V4L2_FBUF_CAP_CHROMAKEY |
 			V4L2_FBUF_CAP_LOCAL_ALPHA | V4L2_FBUF_CAP_GLOBAL_ALPHA;
 		fb->fmt.pixelformat = itv->osd_pixelformat;
@@ -1179,7 +1179,7 @@
 		struct v4l2_framebuffer *fb = arg;
 
 		if (!(itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT_OVERLAY))
-			break;
+			return -EINVAL;
 		itv->osd_global_alpha_state = (fb->flags & V4L2_FBUF_FLAG_GLOBAL_ALPHA) != 0;
 		itv->osd_local_alpha_state = (fb->flags & V4L2_FBUF_FLAG_LOCAL_ALPHA) != 0;
 		itv->osd_color_key_state = (fb->flags & V4L2_FBUF_FLAG_CHROMAKEY) != 0;
diff --git a/drivers/media/video/ivtv/ivtv-irq.c b/drivers/media/video/ivtv/ivtv-irq.c
index ba98bf0..1a3ee46 100644
--- a/drivers/media/video/ivtv/ivtv-irq.c
+++ b/drivers/media/video/ivtv/ivtv-irq.c
@@ -48,7 +48,7 @@
 	struct list_head *p;
 	int i = 0;
 
-	IVTV_DEBUG_DMA("ivtv_pio_work_handler\n");
+	IVTV_DEBUG_HI_DMA("ivtv_pio_work_handler\n");
 	if (itv->cur_pio_stream < 0 || itv->cur_pio_stream >= IVTV_MAX_STREAMS ||
 			s->v4l2dev == NULL || !ivtv_use_pio(s)) {
 		itv->cur_pio_stream = -1;
@@ -56,7 +56,7 @@
 		write_reg(IVTV_IRQ_ENC_PIO_COMPLETE, 0x44);
 		return;
 	}
-	IVTV_DEBUG_DMA("Process PIO %s\n", s->name);
+	IVTV_DEBUG_HI_DMA("Process PIO %s\n", s->name);
 	buf = list_entry(s->q_dma.list.next, struct ivtv_buffer, list);
 	list_for_each(p, &s->q_dma.list) {
 		struct ivtv_buffer *buf = list_entry(p, struct ivtv_buffer, list);
@@ -187,7 +187,7 @@
 		bytes_needed += UVsize;
 	}
 
-	IVTV_DEBUG_DMA("%s %s: 0x%08x bytes at 0x%08x\n",
+	IVTV_DEBUG_HI_DMA("%s %s: 0x%08x bytes at 0x%08x\n",
 		ivtv_use_pio(s) ? "PIO" : "DMA", s->name, bytes_needed, offset);
 
 	rc = ivtv_queue_move(s, &s->q_free, &s->q_full, &s->q_predma, bytes_needed);
@@ -242,7 +242,7 @@
 	u32 *u32buf;
 	int x = 0;
 
-	IVTV_DEBUG_DMA("%s %s completed (%x)\n", ivtv_use_pio(s) ? "PIO" : "DMA",
+	IVTV_DEBUG_HI_DMA("%s %s completed (%x)\n", ivtv_use_pio(s) ? "PIO" : "DMA",
 			s->name, s->dma_offset);
 	list_for_each(p, &s->q_dma.list) {
 		buf = list_entry(p, struct ivtv_buffer, list);
@@ -321,7 +321,7 @@
 	unsigned long flags = 0;
 	int idx = 0;
 
-	IVTV_DEBUG_DMA("DEC PREPARE DMA %s: %08x %08x\n", s->name, s->q_predma.bytesused, offset);
+	IVTV_DEBUG_HI_DMA("DEC PREPARE DMA %s: %08x %08x\n", s->name, s->q_predma.bytesused, offset);
 	buf = list_entry(s->q_predma.list.next, struct ivtv_buffer, list);
 	list_for_each(p, &s->q_predma.list) {
 		struct ivtv_buffer *buf = list_entry(p, struct ivtv_buffer, list);
@@ -368,7 +368,7 @@
 	struct ivtv_stream *s_vbi = &itv->streams[IVTV_ENC_STREAM_TYPE_VBI];
 	int i;
 
-	IVTV_DEBUG_DMA("start %s for %s\n", ivtv_use_dma(s) ? "DMA" : "PIO", s->name);
+	IVTV_DEBUG_HI_DMA("start %s for %s\n", ivtv_use_dma(s) ? "DMA" : "PIO", s->name);
 
 	if (s->q_predma.bytesused)
 		ivtv_queue_move(s, &s->q_predma, NULL, &s->q_dma, s->q_predma.bytesused);
@@ -397,7 +397,7 @@
 		itv->vbi.dma_offset = s_vbi->dma_offset;
 		s_vbi->SG_length = 0;
 		set_bit(IVTV_F_S_DMA_HAS_VBI, &s->s_flags);
-		IVTV_DEBUG_DMA("include DMA for %s\n", s->name);
+		IVTV_DEBUG_HI_DMA("include DMA for %s\n", s->name);
 	}
 
 	/* Mark last buffer size for Interrupt flag */
@@ -431,7 +431,7 @@
 
 	if (s->q_predma.bytesused)
 		ivtv_queue_move(s, &s->q_predma, NULL, &s->q_dma, s->q_predma.bytesused);
-	IVTV_DEBUG_DMA("start DMA for %s\n", s->name);
+	IVTV_DEBUG_HI_DMA("start DMA for %s\n", s->name);
 	/* put SG Handle into register 0x0c */
 	write_reg(s->SG_handle, IVTV_REG_DECDMAADDR);
 	write_reg_sync(read_reg(IVTV_REG_DMAXFER) | 0x01, IVTV_REG_DMAXFER);
@@ -447,7 +447,7 @@
 	struct ivtv_buffer *buf;
 	int hw_stream_type;
 
-	IVTV_DEBUG_IRQ("DEC DMA READ\n");
+	IVTV_DEBUG_HI_IRQ("DEC DMA READ\n");
 	del_timer(&itv->dma_timer);
 	if (read_reg(IVTV_REG_DMASTATUS) & 0x14) {
 		IVTV_DEBUG_WARN("DEC DMA ERROR %x\n", read_reg(IVTV_REG_DMASTATUS));
@@ -462,7 +462,7 @@
 			s = &itv->streams[IVTV_DEC_STREAM_TYPE_MPG];
 			hw_stream_type = 0;
 		}
-		IVTV_DEBUG_DMA("DEC DATA READ %s: %d\n", s->name, s->q_dma.bytesused);
+		IVTV_DEBUG_HI_DMA("DEC DATA READ %s: %d\n", s->name, s->q_dma.bytesused);
 
 		ivtv_stream_sync_for_cpu(s);
 
@@ -495,7 +495,7 @@
 
 	del_timer(&itv->dma_timer);
 	ivtv_api_get_data(&itv->enc_mbox, IVTV_MBOX_DMA_END, data);
-	IVTV_DEBUG_IRQ("ENC DMA COMPLETE %x %d\n", data[0], data[1]);
+	IVTV_DEBUG_HI_IRQ("ENC DMA COMPLETE %x %d\n", data[0], data[1]);
 	if (test_and_clear_bit(IVTV_F_I_ENC_VBI, &itv->i_flags))
 		data[1] = 3;
 	else if (data[1] > 2)
@@ -532,7 +532,7 @@
 		return;
 	}
 	s = &itv->streams[itv->cur_pio_stream];
-	IVTV_DEBUG_IRQ("ENC PIO COMPLETE %s\n", s->name);
+	IVTV_DEBUG_HI_IRQ("ENC PIO COMPLETE %s\n", s->name);
 	s->SG_length = 0;
 	clear_bit(IVTV_F_I_ENC_VBI, &itv->i_flags);
 	clear_bit(IVTV_F_I_PIO, &itv->i_flags);
@@ -590,7 +590,7 @@
 
 	/* Get DMA destination and size arguments from card */
 	ivtv_api_get_data(&itv->enc_mbox, IVTV_MBOX_DMA, data);
-	IVTV_DEBUG_IRQ("ENC START CAP %d: %08x %08x\n", data[0], data[1], data[2]);
+	IVTV_DEBUG_HI_IRQ("ENC START CAP %d: %08x %08x\n", data[0], data[1], data[2]);
 
 	if (data[0] > 2 || data[1] == 0 || data[2] == 0) {
 		IVTV_DEBUG_WARN("Unknown input: %08x %08x %08x\n",
@@ -610,7 +610,7 @@
 	u32 data[CX2341X_MBOX_MAX_DATA];
 	struct ivtv_stream *s;
 
-	IVTV_DEBUG_IRQ("ENC START VBI CAP\n");
+	IVTV_DEBUG_HI_IRQ("ENC START VBI CAP\n");
 	s = &itv->streams[IVTV_ENC_STREAM_TYPE_VBI];
 
 	/* If more than two VBI buffers are pending, then
@@ -644,7 +644,7 @@
 	u32 data[CX2341X_MBOX_MAX_DATA];
 	struct ivtv_stream *s = &itv->streams[IVTV_DEC_STREAM_TYPE_VBI];
 
-	IVTV_DEBUG_IRQ("DEC VBI REINSERT\n");
+	IVTV_DEBUG_HI_IRQ("DEC VBI REINSERT\n");
 	if (test_bit(IVTV_F_S_CLAIMED, &s->s_flags) &&
 			!stream_enc_dma_append(s, data)) {
 		set_bit(IVTV_F_S_PIO_PENDING, &s->s_flags);
@@ -669,7 +669,7 @@
 		itv->dma_data_req_offset = data[1];
 		s = &itv->streams[IVTV_DEC_STREAM_TYPE_MPG];
 	}
-	IVTV_DEBUG_IRQ("DEC DATA REQ %s: %d %08x %u\n", s->name, s->q_full.bytesused,
+	IVTV_DEBUG_HI_IRQ("DEC DATA REQ %s: %d %08x %u\n", s->name, s->q_full.bytesused,
 		       itv->dma_data_req_offset, itv->dma_data_req_size);
 	if (itv->dma_data_req_size == 0 || s->q_full.bytesused < itv->dma_data_req_size) {
 		set_bit(IVTV_F_S_NEEDS_DATA, &s->s_flags);
@@ -791,10 +791,10 @@
 	/* Exclude interrupts noted below from the output, otherwise the log is flooded with
 	   these messages */
 	if (combo & ~0xff6d0400)
-		IVTV_DEBUG_IRQ("======= valid IRQ bits: 0x%08x ======\n", combo);
+		IVTV_DEBUG_HI_IRQ("======= valid IRQ bits: 0x%08x ======\n", combo);
 
 	if (combo & IVTV_IRQ_DEC_DMA_COMPLETE) {
-		IVTV_DEBUG_IRQ("DEC DMA COMPLETE\n");
+		IVTV_DEBUG_HI_IRQ("DEC DMA COMPLETE\n");
 	}
 
 	if (combo & IVTV_IRQ_DMA_READ) {
diff --git a/drivers/media/video/ivtv/ivtv-streams.c b/drivers/media/video/ivtv/ivtv-streams.c
index 6af88ae..2871171 100644
--- a/drivers/media/video/ivtv/ivtv-streams.c
+++ b/drivers/media/video/ivtv/ivtv-streams.c
@@ -446,6 +446,9 @@
 	if (s->v4l2dev == NULL)
 		return -EINVAL;
 
+	/* Big serialization lock to ensure no two streams are started
+	   simultaneously: that can give all sorts of weird results. */
+	mutex_lock(&itv->serialize_lock);
 	IVTV_DEBUG_INFO("Start encoder stream %s\n", s->name);
 
 	switch (s->type) {
@@ -487,6 +490,7 @@
 			0, sizeof(itv->vbi.sliced_mpeg_size));
 		break;
 	default:
+		mutex_unlock(&itv->serialize_lock);
 		return -EINVAL;
 	}
 	s->subtype = subtype;
@@ -568,6 +572,7 @@
 	if (ivtv_vapi(itv, CX2341X_ENC_START_CAPTURE, 2, captype, subtype))
 	{
 		IVTV_DEBUG_WARN( "Error starting capture!\n");
+		mutex_unlock(&itv->serialize_lock);
 		return -EINVAL;
 	}
 
@@ -583,6 +588,7 @@
 
 	/* you're live! sit back and await interrupts :) */
 	atomic_inc(&itv->capturing);
+	mutex_unlock(&itv->serialize_lock);
 	return 0;
 }
 
@@ -762,17 +768,6 @@
 	/* when: 0 =  end of GOP  1 = NOW!, type: 0 = mpeg, subtype: 3 = video+audio */
 	ivtv_vapi(itv, CX2341X_ENC_STOP_CAPTURE, 3, stopmode, cap_type, s->subtype);
 
-	/* only run these if we're shutting down the last cap */
-	if (atomic_read(&itv->capturing) - 1 == 0) {
-		/* event notification (off) */
-		if (test_and_clear_bit(IVTV_F_I_DIG_RST, &itv->i_flags)) {
-			/* type: 0 = refresh */
-			/* on/off: 0 = off, intr: 0x10000000, mbox_id: -1: none */
-			ivtv_vapi(itv, CX2341X_ENC_SET_EVENT_NOTIFICATION, 4, 0, 0, IVTV_IRQ_ENC_VIM_RST, -1);
-			ivtv_set_irq_mask(itv, IVTV_IRQ_ENC_VIM_RST);
-		}
-	}
-
 	then = jiffies;
 
 	if (!test_bit(IVTV_F_S_PASSTHROUGH, &s->s_flags)) {
@@ -812,7 +807,6 @@
 		then = jiffies;
 		/* Make sure DMA is complete */
 		add_wait_queue(&s->waitq, &wait);
-		set_current_state(TASK_INTERRUPTIBLE);
 		do {
 			/* check if DMA is pending */
 			if ((s->type == IVTV_ENC_STREAM_TYPE_MPG) &&	/* MPG Only */
@@ -827,9 +821,7 @@
 			} else if (read_reg(IVTV_REG_DMASTATUS) & 0x02) {
 				break;
 			}
-
-			ivtv_sleep_timeout(HZ / 100, 1);
-		} while (then + HZ * 2 > jiffies);
+		} while (!ivtv_sleep_timeout(HZ / 100, 1) && then + HZ * 2 > jiffies);
 
 		set_current_state(TASK_RUNNING);
 		remove_wait_queue(&s->waitq, &wait);
@@ -840,17 +832,30 @@
 	/* Clear capture and no-read bits */
 	clear_bit(IVTV_F_S_STREAMING, &s->s_flags);
 
+	/* ensure these global cleanup actions are done only once */
+	mutex_lock(&itv->serialize_lock);
+
 	if (s->type == IVTV_ENC_STREAM_TYPE_VBI)
 		ivtv_set_irq_mask(itv, IVTV_IRQ_ENC_VBI_CAP);
 
 	if (atomic_read(&itv->capturing) > 0) {
+		mutex_unlock(&itv->serialize_lock);
 		return 0;
 	}
 
 	/* Set the following Interrupt mask bits for capture */
 	ivtv_set_irq_mask(itv, IVTV_IRQ_MASK_CAPTURE);
 
+	/* event notification (off) */
+	if (test_and_clear_bit(IVTV_F_I_DIG_RST, &itv->i_flags)) {
+		/* type: 0 = refresh */
+		/* on/off: 0 = off, intr: 0x10000000, mbox_id: -1: none */
+		ivtv_vapi(itv, CX2341X_ENC_SET_EVENT_NOTIFICATION, 4, 0, 0, IVTV_IRQ_ENC_VIM_RST, -1);
+		ivtv_set_irq_mask(itv, IVTV_IRQ_ENC_VIM_RST);
+	}
+
 	wake_up(&s->waitq);
+	mutex_unlock(&itv->serialize_lock);
 
 	return 0;
 }
diff --git a/drivers/media/video/ivtv/ivtv-vbi.c b/drivers/media/video/ivtv/ivtv-vbi.c
index 3ba46e0..a7282a9 100644
--- a/drivers/media/video/ivtv/ivtv-vbi.c
+++ b/drivers/media/video/ivtv/ivtv-vbi.c
@@ -219,31 +219,23 @@
 	int found_cc = 0;
 	int cc_pos = itv->vbi.cc_pos;
 
-	if (itv->vbi.service_set_out == 0)
-		return -EPERM;
-
 	while (count >= sizeof(struct v4l2_sliced_vbi_data)) {
 		switch (p->id) {
 		case V4L2_SLICED_CAPTION_525:
-			if (p->id == V4L2_SLICED_CAPTION_525 &&
-			    p->line == 21 &&
-			    (itv->vbi.service_set_out &
-				V4L2_SLICED_CAPTION_525) == 0) {
-				break;
-			}
-			found_cc = 1;
-			if (p->field) {
-				cc[2] = p->data[0];
-				cc[3] = p->data[1];
-			} else {
-				cc[0] = p->data[0];
-				cc[1] = p->data[1];
+			if (p->line == 21) {
+				found_cc = 1;
+				if (p->field) {
+					cc[2] = p->data[0];
+					cc[3] = p->data[1];
+				} else {
+					cc[0] = p->data[0];
+					cc[1] = p->data[1];
+				}
 			}
 			break;
 
 		case V4L2_SLICED_VPS:
-			if (p->line == 16 && p->field == 0 &&
-			    (itv->vbi.service_set_out & V4L2_SLICED_VPS)) {
+			if (p->line == 16 && p->field == 0) {
 				itv->vbi.vps[0] = p->data[2];
 				itv->vbi.vps[1] = p->data[8];
 				itv->vbi.vps[2] = p->data[9];
@@ -255,8 +247,7 @@
 			break;
 
 		case V4L2_SLICED_WSS_625:
-			if (p->line == 23 && p->field == 0 &&
-			    (itv->vbi.service_set_out & V4L2_SLICED_WSS_625)) {
+			if (p->line == 23 && p->field == 0) {
 				/* No lock needed for WSS */
 				itv->vbi.wss = p->data[0] | (p->data[1] << 8);
 				itv->vbi.wss_found = 1;
diff --git a/drivers/media/video/msp3400-driver.c b/drivers/media/video/msp3400-driver.c
index 3bb7d66..11cfcf1 100644
--- a/drivers/media/video/msp3400-driver.c
+++ b/drivers/media/video/msp3400-driver.c
@@ -157,8 +157,7 @@
 			break;
 		v4l_warn(client, "I/O error #%d (read 0x%02x/0x%02x)\n", err,
 		       dev, addr);
-		current->state = TASK_INTERRUPTIBLE;
-		schedule_timeout(msecs_to_jiffies(10));
+		schedule_timeout_interruptible(msecs_to_jiffies(10));
 	}
 	if (err == 3) {
 		v4l_warn(client, "giving up, resetting chip. Sound will go off, sorry folks :-|\n");
@@ -197,8 +196,7 @@
 			break;
 		v4l_warn(client, "I/O error #%d (write 0x%02x/0x%02x)\n", err,
 		       dev, addr);
-		current->state = TASK_INTERRUPTIBLE;
-		schedule_timeout(msecs_to_jiffies(10));
+		schedule_timeout_interruptible(msecs_to_jiffies(10));
 	}
 	if (err == 3) {
 		v4l_warn(client, "giving up, resetting chip. Sound will go off, sorry folks :-|\n");
@@ -814,10 +812,9 @@
 	int msp_product, msp_prod_hi, msp_prod_lo;
 	int msp_rom;
 
-	client = kmalloc(sizeof(*client), GFP_KERNEL);
+	client = kzalloc(sizeof(*client), GFP_KERNEL);
 	if (client == NULL)
 		return -ENOMEM;
-	memset(client, 0, sizeof(*client));
 	client->addr = address;
 	client->adapter = adapter;
 	client->driver = &i2c_driver;
diff --git a/drivers/media/video/msp3400-kthreads.c b/drivers/media/video/msp3400-kthreads.c
index e1821eb..d5ee262 100644
--- a/drivers/media/video/msp3400-kthreads.c
+++ b/drivers/media/video/msp3400-kthreads.c
@@ -23,6 +23,7 @@
 #include <linux/module.h>
 #include <linux/slab.h>
 #include <linux/i2c.h>
+#include <linux/freezer.h>
 #include <linux/videodev.h>
 #include <linux/videodev2.h>
 #include <media/v4l2-common.h>
@@ -468,6 +469,7 @@
 
 
 	v4l_dbg(1, msp_debug, client, "msp3400 daemon started\n");
+	set_freezable();
 	for (;;) {
 		v4l_dbg(2, msp_debug, client, "msp3400 thread: sleep\n");
 		msp_sleep(state, -1);
@@ -646,7 +648,7 @@
 	int val, i, std, count;
 
 	v4l_dbg(1, msp_debug, client, "msp3410 daemon started\n");
-
+	set_freezable();
 	for (;;) {
 		v4l_dbg(2, msp_debug, client, "msp3410 thread: sleep\n");
 		msp_sleep(state,-1);
@@ -940,7 +942,7 @@
 	int val, i;
 
 	v4l_dbg(1, msp_debug, client, "msp34xxg daemon started\n");
-
+	set_freezable();
 	for (;;) {
 		v4l_dbg(2, msp_debug, client, "msp34xxg thread: sleep\n");
 		msp_sleep(state, -1);
diff --git a/drivers/media/video/mt20xx.c b/drivers/media/video/mt20xx.c
index c7c9f3f8..7549114 100644
--- a/drivers/media/video/mt20xx.c
+++ b/drivers/media/video/mt20xx.c
@@ -7,7 +7,7 @@
 #include <linux/i2c.h>
 #include <linux/videodev.h>
 #include <linux/moduleparam.h>
-#include <media/tuner.h>
+#include "tuner-driver.h"
 
 /* ---------------------------------------------------------------------- */
 
@@ -37,6 +37,19 @@
 	[ MT2050 ] = "MT2050",
 };
 
+struct microtune_priv {
+	unsigned int xogc;
+	unsigned int radio_if2;
+};
+
+static void microtune_release(struct i2c_client *c)
+{
+	struct tuner *t = i2c_get_clientdata(c);
+
+	kfree(t->priv);
+	t->priv = NULL;
+}
+
 // IsSpurInBand()?
 static int mt2032_spurcheck(struct i2c_client *c,
 			    int f1, int f2, int spectrum_from,int spectrum_to)
@@ -218,6 +231,7 @@
 	unsigned char buf[21];
 	int lint_try,ret,sel,lock=0;
 	struct tuner *t = i2c_get_clientdata(c);
+	struct microtune_priv *priv = t->priv;
 
 	tuner_dbg("mt2032_set_if_freq rfin=%d if1=%d if2=%d from=%d to=%d\n",
 		  rfin,if1,if2,from,to);
@@ -227,7 +241,7 @@
 	i2c_master_recv(c,buf,21);
 
 	buf[0]=0;
-	ret=mt2032_compute_freq(c,rfin,if1,if2,from,to,&buf[1],&sel,t->xogc);
+	ret=mt2032_compute_freq(c,rfin,if1,if2,from,to,&buf[1],&sel,priv->xogc);
 	if (ret<0)
 		return;
 
@@ -251,10 +265,10 @@
 
 		tuner_dbg("mt2032: re-init PLLs by LINT\n");
 		buf[0]=7;
-		buf[1]=0x80 +8+t->xogc; // set LINT to re-init PLLs
+		buf[1]=0x80 +8+priv->xogc; // set LINT to re-init PLLs
 		i2c_master_send(c,buf,2);
 		mdelay(10);
-		buf[1]=8+t->xogc;
+		buf[1]=8+priv->xogc;
 		i2c_master_send(c,buf,2);
 	}
 
@@ -294,17 +308,25 @@
 static void mt2032_set_radio_freq(struct i2c_client *c, unsigned int freq)
 {
 	struct tuner *t = i2c_get_clientdata(c);
-	int if2 = t->radio_if2;
+	struct microtune_priv *priv = t->priv;
+	int if2 = priv->radio_if2;
 
 	// per Manual for FM tuning: first if center freq. 1085 MHz
 	mt2032_set_if_freq(c, freq * 1000 / 16,
 			      1085*1000*1000,if2,if2,if2);
 }
 
+static struct tuner_operations mt2032_tuner_ops = {
+	.set_tv_freq    = mt2032_set_tv_freq,
+	.set_radio_freq = mt2032_set_radio_freq,
+	.release        = microtune_release,
+};
+
 // Initalization as described in "MT203x Programming Procedures", Rev 1.2, Feb.2001
 static int mt2032_init(struct i2c_client *c)
 {
 	struct tuner *t = i2c_get_clientdata(c);
+	struct microtune_priv *priv = t->priv;
 	unsigned char buf[21];
 	int ret,xogc,xok=0;
 
@@ -351,23 +373,23 @@
 		if (ret!=2)
 			tuner_warn("i2c i/o error: rc == %d (should be 2)\n",ret);
 	} while (xok != 1 );
-	t->xogc=xogc;
+	priv->xogc=xogc;
 
-	t->set_tv_freq    = mt2032_set_tv_freq;
-	t->set_radio_freq = mt2032_set_radio_freq;
+	memcpy(&t->ops, &mt2032_tuner_ops, sizeof(struct tuner_operations));
+
 	return(1);
 }
 
 static void mt2050_set_antenna(struct i2c_client *c, unsigned char antenna)
 {
 	struct tuner *t = i2c_get_clientdata(c);
-       unsigned char buf[2];
-       int ret;
+	unsigned char buf[2];
+	int ret;
 
-       buf[0] = 6;
-       buf[1] = antenna ? 0x11 : 0x10;
-       ret=i2c_master_send(c,buf,2);
-       tuner_dbg("mt2050: enabled antenna connector %d\n", antenna);
+	buf[0] = 6;
+	buf[1] = antenna ? 0x11 : 0x10;
+	ret=i2c_master_send(c,buf,2);
+	tuner_dbg("mt2050: enabled antenna connector %d\n", antenna);
 }
 
 static void mt2050_set_if_freq(struct i2c_client *c,unsigned int freq, unsigned int if2)
@@ -456,12 +478,19 @@
 static void mt2050_set_radio_freq(struct i2c_client *c, unsigned int freq)
 {
 	struct tuner *t = i2c_get_clientdata(c);
-	int if2 = t->radio_if2;
+	struct microtune_priv *priv = t->priv;
+	int if2 = priv->radio_if2;
 
 	mt2050_set_if_freq(c, freq * 1000 / 16, if2);
 	mt2050_set_antenna(c, radio_antenna);
 }
 
+static struct tuner_operations mt2050_tuner_ops = {
+	.set_tv_freq    = mt2050_set_tv_freq,
+	.set_radio_freq = mt2050_set_radio_freq,
+	.release        = microtune_release,
+};
+
 static int mt2050_init(struct i2c_client *c)
 {
 	struct tuner *t = i2c_get_clientdata(c);
@@ -481,28 +510,35 @@
 	i2c_master_recv(c,buf,1);
 
 	tuner_dbg("mt2050: sro is %x\n",buf[0]);
-	t->set_tv_freq    = mt2050_set_tv_freq;
-	t->set_radio_freq = mt2050_set_radio_freq;
+
+	memcpy(&t->ops, &mt2050_tuner_ops, sizeof(struct tuner_operations));
+
 	return 0;
 }
 
 int microtune_init(struct i2c_client *c)
 {
+	struct microtune_priv *priv = NULL;
 	struct tuner *t = i2c_get_clientdata(c);
 	char *name;
 	unsigned char buf[21];
 	int company_code;
 
+	priv = kzalloc(sizeof(struct microtune_priv), GFP_KERNEL);
+	if (priv == NULL)
+		return -ENOMEM;
+	t->priv = priv;
+
+	priv->radio_if2 = 10700 * 1000;	/* 10.7MHz - FM radio */
+
 	memset(buf,0,sizeof(buf));
-	t->set_tv_freq    = NULL;
-	t->set_radio_freq = NULL;
-	t->standby    = NULL;
+
 	if (t->std & V4L2_STD_525_60) {
 		tuner_dbg("pinnacle ntsc\n");
-		t->radio_if2 = 41300 * 1000;
+		priv->radio_if2 = 41300 * 1000;
 	} else {
 		tuner_dbg("pinnacle pal\n");
-		t->radio_if2 = 33300 * 1000;
+		priv->radio_if2 = 33300 * 1000;
 	}
 	name = "unknown";
 
diff --git a/drivers/media/video/ov7670.c b/drivers/media/video/ov7670.c
index 3ceb8a6..f8f21dd 100644
--- a/drivers/media/video/ov7670.c
+++ b/drivers/media/video/ov7670.c
@@ -617,7 +617,7 @@
 	},
 };
 
-#define N_WIN_SIZES (sizeof(ov7670_win_sizes)/sizeof(ov7670_win_sizes[0]))
+#define N_WIN_SIZES (ARRAY_SIZE(ov7670_win_sizes))
 
 
 /*
@@ -1183,7 +1183,7 @@
 		.query = ov7670_q_hflip,
 	},
 };
-#define N_CONTROLS (sizeof(ov7670_controls)/sizeof(ov7670_controls[0]))
+#define N_CONTROLS (ARRAY_SIZE(ov7670_controls))
 
 static struct ov7670_control *ov7670_find_control(__u32 id)
 {
diff --git a/drivers/media/video/planb.c b/drivers/media/video/planb.c
index 1455a8f..4ab1af7 100644
--- a/drivers/media/video/planb.c
+++ b/drivers/media/video/planb.c
@@ -353,9 +353,8 @@
 		* PLANB_DUMMY)*sizeof(struct dbdma_cmd)
 		+(PLANB_MAXLINES*((PLANB_MAXPIXELS+7)& ~7))/8
 		+MAX_GBUFFERS*sizeof(unsigned int);
-	if ((pb->priv_space = kmalloc (size, GFP_KERNEL)) == 0)
+	if ((pb->priv_space = kzalloc (size, GFP_KERNEL)) == 0)
 		return -ENOMEM;
-	memset ((void *) pb->priv_space, 0, size);
 	pb->overlay_last1 = pb->ch1_cmd = (volatile struct dbdma_cmd *)
 						DBDMA_ALIGN (pb->priv_space);
 	pb->overlay_last2 = pb->ch2_cmd = pb->ch1_cmd + pb->tab_size;
diff --git a/drivers/media/video/pwc/pwc-if.c b/drivers/media/video/pwc/pwc-if.c
index 085332a..9c0e8d1 100644
--- a/drivers/media/video/pwc/pwc-if.c
+++ b/drivers/media/video/pwc/pwc-if.c
@@ -1099,7 +1099,7 @@
 		return -EBUSY;
 	}
 
-	down(&pdev->modlock);
+	mutex_lock(&pdev->modlock);
 	if (!pdev->usb_init) {
 		PWC_DEBUG_OPEN("Doing first time initialization.\n");
 		pdev->usb_init = 1;
@@ -1131,7 +1131,7 @@
 	if (i < 0) {
 		PWC_DEBUG_OPEN("Failed to allocate buffers memory.\n");
 		pwc_free_buffers(pdev);
-		up(&pdev->modlock);
+		mutex_unlock(&pdev->modlock);
 		return i;
 	}
 
@@ -1172,7 +1172,7 @@
 	if (i) {
 		PWC_DEBUG_OPEN("Second attempt at set_video_mode failed.\n");
 		pwc_free_buffers(pdev);
-		up(&pdev->modlock);
+		mutex_unlock(&pdev->modlock);
 		return i;
 	}
 
@@ -1181,7 +1181,7 @@
 		PWC_DEBUG_OPEN("Failed to init ISOC stuff = %d.\n", i);
 		pwc_isoc_cleanup(pdev);
 		pwc_free_buffers(pdev);
-		up(&pdev->modlock);
+		mutex_unlock(&pdev->modlock);
 		return i;
 	}
 
@@ -1191,7 +1191,7 @@
 
 	pdev->vopen++;
 	file->private_data = vdev;
-	up(&pdev->modlock);
+	mutex_unlock(&pdev->modlock);
 	PWC_DEBUG_OPEN("<< video_open() returns 0.\n");
 	return 0;
 }
@@ -1685,7 +1685,7 @@
 		pdev->angle_range.tilt_max =  2500;
 	}
 
-	init_MUTEX(&pdev->modlock);
+	mutex_init(&pdev->modlock);
 	spin_lock_init(&pdev->ptrlock);
 
 	pdev->udev = udev;
diff --git a/drivers/media/video/pwc/pwc.h b/drivers/media/video/pwc/pwc.h
index acbb931..910a04f 100644
--- a/drivers/media/video/pwc/pwc.h
+++ b/drivers/media/video/pwc/pwc.h
@@ -31,7 +31,7 @@
 #include <linux/wait.h>
 #include <linux/smp_lock.h>
 #include <linux/version.h>
-#include <asm/semaphore.h>
+#include <linux/mutex.h>
 #include <asm/errno.h>
 #include <linux/videodev.h>
 #include <media/v4l2-common.h>
@@ -244,7 +244,7 @@
    int image_read_pos;			/* In case we read data in pieces, keep track of were we are in the imagebuffer */
    int image_used[MAX_IMAGES];		/* For MCAPTURE and SYNC */
 
-   struct semaphore modlock;		/* to prevent races in video_open(), etc */
+   struct mutex modlock;		/* to prevent races in video_open(), etc */
    spinlock_t ptrlock;			/* for manipulating the buffer pointers */
 
    /*** motorized pan/tilt feature */
diff --git a/drivers/media/video/saa7111.c b/drivers/media/video/saa7111.c
index c1a392e..7ae2d64 100644
--- a/drivers/media/video/saa7111.c
+++ b/drivers/media/video/saa7111.c
@@ -37,23 +37,23 @@
 #include <linux/slab.h>
 #include <linux/mm.h>
 #include <linux/signal.h>
+#include <linux/types.h>
+#include <linux/i2c.h>
 #include <asm/io.h>
 #include <asm/pgtable.h>
 #include <asm/page.h>
-#include <linux/types.h>
+#include <asm/uaccess.h>
 
 #include <linux/videodev.h>
-#include <asm/uaccess.h>
+#include <linux/video_decoder.h>
 
 MODULE_DESCRIPTION("Philips SAA7111 video decoder driver");
 MODULE_AUTHOR("Dave Perks");
 MODULE_LICENSE("GPL");
 
-#include <linux/i2c.h>
 
 #define I2C_NAME(s) (s)->name
 
-#include <linux/video_decoder.h>
 
 static int debug = 0;
 module_param(debug, int, 0644);
diff --git a/drivers/media/video/saa7114.c b/drivers/media/video/saa7114.c
index 87c3144..677df51 100644
--- a/drivers/media/video/saa7114.c
+++ b/drivers/media/video/saa7114.c
@@ -35,28 +35,26 @@
 #include <linux/fs.h>
 #include <linux/kernel.h>
 #include <linux/major.h>
-
 #include <linux/slab.h>
-
 #include <linux/mm.h>
 #include <linux/signal.h>
+#include <linux/types.h>
+#include <linux/i2c.h>
 #include <asm/io.h>
 #include <asm/pgtable.h>
 #include <asm/page.h>
-#include <linux/types.h>
+#include <asm/uaccess.h>
 
 #include <linux/videodev.h>
-#include <asm/uaccess.h>
+#include <linux/video_decoder.h>
 
 MODULE_DESCRIPTION("Philips SAA7114H video decoder driver");
 MODULE_AUTHOR("Maxim Yevtyushkin");
 MODULE_LICENSE("GPL");
 
-#include <linux/i2c.h>
 
 #define I2C_NAME(x) (x)->name
 
-#include <linux/video_decoder.h>
 
 static int debug = 0;
 module_param(debug, int, 0);
diff --git a/drivers/media/video/saa7134/Kconfig b/drivers/media/video/saa7134/Kconfig
index 309dca3..9f1417a 100644
--- a/drivers/media/video/saa7134/Kconfig
+++ b/drivers/media/video/saa7134/Kconfig
@@ -40,7 +40,7 @@
 	depends on VIDEO_SAA7134 && DVB_CORE
 	select VIDEO_BUF_DVB
 	select FW_LOADER
-	select DVB_PLL
+	select DVB_PLL if !DVB_FE_CUSTOMISE
 	select DVB_MT352 if !DVB_FE_CUSTOMISE
 	select DVB_TDA1004X if !DVB_FE_CUSTOMISE
 	select DVB_NXT200X if !DVB_FE_CUSTOMISE
diff --git a/drivers/media/video/saa7134/saa7134-alsa.c b/drivers/media/video/saa7134/saa7134-alsa.c
index ffb0f64..3c0fc90 100644
--- a/drivers/media/video/saa7134/saa7134-alsa.c
+++ b/drivers/media/video/saa7134/saa7134-alsa.c
@@ -75,7 +75,8 @@
 	struct saa7134_dev *dev;
 
 	unsigned long iobase;
-	int irq;
+	s16 irq;
+	u16 mute_was_on;
 
 	spinlock_t lock;
 } snd_card_saa7134_t;
@@ -589,8 +590,10 @@
 	snd_card_saa7134_t *saa7134 = snd_pcm_substream_chip(substream);
 	struct saa7134_dev *dev = saa7134->dev;
 
-	dev->ctl_mute = 1;
-	saa7134_tvaudio_setmute(dev);
+	if (saa7134->mute_was_on) {
+		dev->ctl_mute = 1;
+		saa7134_tvaudio_setmute(dev);
+	}
 	return 0;
 }
 
@@ -637,8 +640,11 @@
 	runtime->private_free = snd_card_saa7134_runtime_free;
 	runtime->hw = snd_card_saa7134_capture;
 
-	dev->ctl_mute = 0;
-	saa7134_tvaudio_setmute(dev);
+	if (dev->ctl_mute != 0) {
+		saa7134->mute_was_on = 1;
+		dev->ctl_mute = 0;
+		saa7134_tvaudio_setmute(dev);
+	}
 
 	if ((err = snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS)) < 0)
 		return err;
diff --git a/drivers/media/video/saa7134/saa7134-cards.c b/drivers/media/video/saa7134/saa7134-cards.c
index 50f15ad..8ec83bd 100644
--- a/drivers/media/video/saa7134/saa7134-cards.c
+++ b/drivers/media/video/saa7134/saa7134-cards.c
@@ -400,7 +400,7 @@
 		.inputs		= {{
 			.name = name_tv,
 			.vmux = 1,
-			.amux = LINE2,
+			.amux = TV,
 			.tv   = 1,
 			.gpio = 0x20000,
 		},{
@@ -3502,6 +3502,38 @@
 			.amux = TV,
 		},
 	},
+	[SAA7134_BOARD_10MOONSTVMASTER3] = {
+		/* Tony Wan <aloha_cn@hotmail.com> */
+		.name           = "10MOONS TM300 TV Card",
+		.audio_clock    = 0x00200000,
+		.tuner_type     = TUNER_LG_PAL_NEW_TAPC,
+		.radio_type     = UNSET,
+		.tuner_addr     = ADDR_UNSET,
+		.radio_addr     = ADDR_UNSET,
+		.gpiomask       = 0x7000,
+		.inputs         = {{
+			.name = name_tv,
+			.vmux = 1,
+			.amux = LINE2,
+			.gpio = 0x0000,
+			.tv   = 1,
+		},{
+			.name = name_comp1,
+			.vmux = 3,
+			.amux = LINE1,
+			.gpio = 0x2000,
+		},{
+			.name = name_svideo,
+			.vmux = 8,
+			.amux = LINE1,
+			.gpio = 0x2000,
+		}},
+		.mute = {
+			.name = name_mute,
+			.amux = LINE2,
+			.gpio = 0x3000,
+		},
+	},
 };
 
 const unsigned int saa7134_bcount = ARRAY_SIZE(saa7134_boards);
@@ -4219,6 +4251,12 @@
 		.subdevice    = 0x2003, /* OEM cardbus */
 		.driver_data  = SAA7134_BOARD_SABRENT_TV_PCB05,
 	},{
+		.vendor       = PCI_VENDOR_ID_PHILIPS,
+		.device       = PCI_DEVICE_ID_PHILIPS_SAA7130,
+		.subvendor    = PCI_VENDOR_ID_PHILIPS,
+		.subdevice    = 0x2304,
+		.driver_data  = SAA7134_BOARD_10MOONSTVMASTER3,
+	},{
 		/* --- boards without eeprom + subsystem ID --- */
 		.vendor       = PCI_VENDOR_ID_PHILIPS,
 		.device       = PCI_DEVICE_ID_PHILIPS_SAA7134,
@@ -4330,6 +4368,7 @@
 	case SAA7134_BOARD_AVERMEDIA_A16AR:
 	case SAA7134_BOARD_ENCORE_ENLTV:
 	case SAA7134_BOARD_ENCORE_ENLTV_FM:
+	case SAA7134_BOARD_10MOONSTVMASTER3:
 		dev->has_remote = SAA7134_REMOTE_GPIO;
 		break;
 	case SAA7134_BOARD_FLYDVBS_LR300:
diff --git a/drivers/media/video/saa7134/saa7134-dvb.c b/drivers/media/video/saa7134/saa7134-dvb.c
index e0eec80..1f6bd33 100644
--- a/drivers/media/video/saa7134/saa7134-dvb.c
+++ b/drivers/media/video/saa7134/saa7134-dvb.c
@@ -175,18 +175,6 @@
 	return mt352_pinnacle_init(fe);
 }
 
-static int mt352_aver777_tuner_calc_regs(struct dvb_frontend *fe, struct dvb_frontend_parameters *params, u8* pllbuf, int buf_len)
-{
-	if (buf_len < 5)
-		return -EINVAL;
-
-	pllbuf[0] = 0x61;
-	dvb_pll_configure(&dvb_pll_philips_td1316, pllbuf+1,
-			  params->frequency,
-			  params->u.ofdm.bandwidth);
-	return 5;
-}
-
 static struct mt352_config pinnacle_300i = {
 	.demod_address = 0x3c >> 1,
 	.adc_clock     = 20333,
@@ -444,135 +432,6 @@
 
 /* ------------------------------------------------------------------ */
 
-static int philips_fmd1216_tuner_init(struct dvb_frontend *fe)
-{
-	struct saa7134_dev *dev = fe->dvb->priv;
-	struct tda1004x_state *state = fe->demodulator_priv;
-	u8 addr = state->config->tuner_address;
-	/* this message is to set up ATC and ALC */
-	static u8 fmd1216_init[] = { 0x0b, 0xdc, 0x9c, 0xa0 };
-	struct i2c_msg tuner_msg = {.addr = addr,.flags = 0,.buf = fmd1216_init,.len = sizeof(fmd1216_init) };
-
-	if (fe->ops.i2c_gate_ctrl)
-		fe->ops.i2c_gate_ctrl(fe, 1);
-	if (i2c_transfer(&dev->i2c_adap, &tuner_msg, 1) != 1)
-		return -EIO;
-	msleep(1);
-
-	return 0;
-}
-
-static int philips_fmd1216_tuner_sleep(struct dvb_frontend *fe)
-{
-	struct saa7134_dev *dev = fe->dvb->priv;
-	struct tda1004x_state *state = fe->demodulator_priv;
-	u8 addr = state->config->tuner_address;
-	/* this message actually turns the tuner back to analog mode */
-	u8 fmd1216_init[] = { 0x0b, 0xdc, 0x9c, 0x60 };
-	struct i2c_msg tuner_msg = {.addr = addr,.flags = 0,.buf = fmd1216_init,.len = sizeof(fmd1216_init) };
-
-	if (fe->ops.i2c_gate_ctrl)
-		fe->ops.i2c_gate_ctrl(fe, 1);
-	i2c_transfer(&dev->i2c_adap, &tuner_msg, 1);
-	msleep(1);
-	fmd1216_init[2] = 0x86;
-	fmd1216_init[3] = 0x54;
-	if (fe->ops.i2c_gate_ctrl)
-		fe->ops.i2c_gate_ctrl(fe, 1);
-	i2c_transfer(&dev->i2c_adap, &tuner_msg, 1);
-	msleep(1);
-	return 0;
-}
-
-static int philips_fmd1216_tuner_set_params(struct dvb_frontend *fe, struct dvb_frontend_parameters *params)
-{
-	struct saa7134_dev *dev = fe->dvb->priv;
-	struct tda1004x_state *state = fe->demodulator_priv;
-	u8 addr = state->config->tuner_address;
-	u8 tuner_buf[4];
-	struct i2c_msg tuner_msg = {.addr = addr,.flags = 0,.buf = tuner_buf,.len =
-			sizeof(tuner_buf) };
-	int tuner_frequency = 0;
-	int divider = 0;
-	u8 band, mode, cp;
-
-	/* determine charge pump */
-	tuner_frequency = params->frequency + 36130000;
-	if (tuner_frequency < 87000000)
-		return -EINVAL;
-	/* low band */
-	else if (tuner_frequency < 180000000) {
-		band = 1;
-		mode = 7;
-		cp   = 0;
-	} else if (tuner_frequency < 195000000) {
-		band = 1;
-		mode = 6;
-		cp   = 1;
-	/* mid band	*/
-	} else if (tuner_frequency < 366000000) {
-		if (params->u.ofdm.bandwidth == BANDWIDTH_8_MHZ) {
-			band = 10;
-		} else {
-			band = 2;
-		}
-		mode = 7;
-		cp   = 0;
-	} else if (tuner_frequency < 478000000) {
-		if (params->u.ofdm.bandwidth == BANDWIDTH_8_MHZ) {
-			band = 10;
-		} else {
-			band = 2;
-		}
-		mode = 6;
-		cp   = 1;
-	/* high band */
-	} else if (tuner_frequency < 662000000) {
-		if (params->u.ofdm.bandwidth == BANDWIDTH_8_MHZ) {
-			band = 12;
-		} else {
-			band = 4;
-		}
-		mode = 7;
-		cp   = 0;
-	} else if (tuner_frequency < 840000000) {
-		if (params->u.ofdm.bandwidth == BANDWIDTH_8_MHZ) {
-			band = 12;
-		} else {
-			band = 4;
-		}
-		mode = 6;
-		cp   = 1;
-	} else {
-		if (params->u.ofdm.bandwidth == BANDWIDTH_8_MHZ) {
-			band = 12;
-		} else {
-			band = 4;
-		}
-		mode = 7;
-		cp   = 1;
-
-	}
-	/* calculate divisor */
-	/* ((36166000 + Finput) / 166666) rounded! */
-	divider = (tuner_frequency + 83333) / 166667;
-
-	/* setup tuner buffer */
-	tuner_buf[0] = (divider >> 8) & 0x7f;
-	tuner_buf[1] = divider & 0xff;
-	tuner_buf[2] = 0x80 | (cp << 6) | (mode  << 3) | 4;
-	tuner_buf[3] = 0x40 | band;
-
-	if (fe->ops.i2c_gate_ctrl)
-		fe->ops.i2c_gate_ctrl(fe, 1);
-	if (i2c_transfer(&dev->i2c_adap, &tuner_msg, 1) != 1) {
-		wprintk("could not write to tuner at addr: 0x%02x\n",
-			addr << 1);
-		return -EIO;
-	}
-	return 0;
-}
-
 static struct tda1004x_config medion_cardbus = {
 	.demod_address = 0x08,
 	.invert        = 1,
@@ -958,18 +817,8 @@
 	.demod_address    = 0x0a,
 };
 
-static int nxt200x_set_pll_input(u8 *buf, int input)
-{
-	if (input)
-		buf[3] |= 0x08;
-	else
-		buf[3] &= ~0x08;
-	return 0;
-}
-
 static struct nxt200x_config kworldatsc110 = {
 	.demod_address    = 0x0a,
-	.set_pll_input    = nxt200x_set_pll_input,
 };
 
 /* ==================================================================
@@ -1005,7 +854,8 @@
 		dev->dvb.frontend = dvb_attach(mt352_attach, &avermedia_777,
 					       &dev->i2c_adap);
 		if (dev->dvb.frontend) {
-			dev->dvb.frontend->ops.tuner_ops.calc_regs = mt352_aver777_tuner_calc_regs;
+			dvb_attach(dvb_pll_attach, dev->dvb.frontend, 0x61,
+				   NULL, DVB_PLL_PHILIPS_TD1316);
 		}
 		break;
 	case SAA7134_BOARD_MD7134:
@@ -1013,9 +863,8 @@
 					       &medion_cardbus,
 					       &dev->i2c_adap);
 		if (dev->dvb.frontend) {
-			dev->dvb.frontend->ops.tuner_ops.init = philips_fmd1216_tuner_init;
-			dev->dvb.frontend->ops.tuner_ops.sleep = philips_fmd1216_tuner_sleep;
-			dev->dvb.frontend->ops.tuner_ops.set_params = philips_fmd1216_tuner_set_params;
+			dvb_attach(dvb_pll_attach, dev->dvb.frontend, medion_cardbus.tuner_address,
+				   &dev->i2c_adap, DVB_PLL_FMD1216ME);
 		}
 		break;
 	case SAA7134_BOARD_PHILIPS_TOUGH:
@@ -1113,7 +962,7 @@
 					       &dev->i2c_adap);
 		if (dev->dvb.frontend) {
 			dvb_attach(dvb_pll_attach, dev->dvb.frontend, 0x61,
-				   NULL, &dvb_pll_tdhu2);
+				   NULL, DVB_PLL_TDHU2);
 		}
 		break;
 	case SAA7134_BOARD_KWORLD_ATSC110:
@@ -1121,7 +970,7 @@
 					       &dev->i2c_adap);
 		if (dev->dvb.frontend) {
 			dvb_attach(dvb_pll_attach, dev->dvb.frontend, 0x61,
-				   NULL, &dvb_pll_tuv1236d);
+				   NULL, DVB_PLL_TUV1236D);
 		}
 		break;
 	case SAA7134_BOARD_FLYDVBS_LR300:
@@ -1144,9 +993,9 @@
 		if (dev->dvb.frontend) {
 			dev->original_demod_sleep = dev->dvb.frontend->ops.sleep;
 			dev->dvb.frontend->ops.sleep = philips_europa_demod_sleep;
-			dev->dvb.frontend->ops.tuner_ops.init = philips_fmd1216_tuner_init;
-			dev->dvb.frontend->ops.tuner_ops.sleep = philips_fmd1216_tuner_sleep;
-			dev->dvb.frontend->ops.tuner_ops.set_params = philips_fmd1216_tuner_set_params;
+
+			dvb_attach(dvb_pll_attach, dev->dvb.frontend, medion_cardbus.tuner_address,
+				   &dev->i2c_adap, DVB_PLL_FMD1216ME);
 		}
 		break;
 	case SAA7134_BOARD_VIDEOMATE_DVBT_200A:
diff --git a/drivers/media/video/saa7134/saa7134-empress.c b/drivers/media/video/saa7134/saa7134-empress.c
index f521603..fc260ec 100644
--- a/drivers/media/video/saa7134/saa7134-empress.c
+++ b/drivers/media/video/saa7134/saa7134-empress.c
@@ -96,6 +96,10 @@
 	if (dev->empress_users)
 		goto done_up;
 
+	/* Unmute audio */
+	saa_writeb(SAA7134_AUDIO_MUTE_CTRL,
+		saa_readb(SAA7134_AUDIO_MUTE_CTRL) & ~(1 << 6));
+
 	dev->empress_users++;
 	file->private_data = dev;
 	err = 0;
@@ -121,6 +125,10 @@
 	/* stop the encoder */
 	ts_reset_encoder(dev);
 
+	/* Mute audio */
+	saa_writeb(SAA7134_AUDIO_MUTE_CTRL,
+		saa_readb(SAA7134_AUDIO_MUTE_CTRL) | (1 << 6));
+
 	mutex_unlock(&dev->empress_tsq.lock);
 	return 0;
 }
diff --git a/drivers/media/video/saa7134/saa7134-input.c b/drivers/media/video/saa7134/saa7134-input.c
index c0de37e..1b6dfd8 100644
--- a/drivers/media/video/saa7134/saa7134-input.c
+++ b/drivers/media/video/saa7134/saa7134-input.c
@@ -153,21 +153,18 @@
 
 static void saa7134_input_timer(unsigned long data)
 {
-	struct saa7134_dev *dev = (struct saa7134_dev*)data;
+	struct saa7134_dev *dev = (struct saa7134_dev *)data;
 	struct card_ir *ir = dev->remote;
-	unsigned long timeout;
 
 	build_key(dev);
-	timeout = jiffies + (ir->polling * HZ / 1000);
-	mod_timer(&ir->timer, timeout);
+	mod_timer(&ir->timer, jiffies + msecs_to_jiffies(ir->polling));
 }
 
 static void saa7134_ir_start(struct saa7134_dev *dev, struct card_ir *ir)
 {
 	if (ir->polling) {
-		init_timer(&ir->timer);
-		ir->timer.function = saa7134_input_timer;
-		ir->timer.data     = (unsigned long)dev;
+		setup_timer(&ir->timer, saa7134_input_timer,
+			    (unsigned long)dev);
 		ir->timer.expires  = jiffies + HZ;
 		add_timer(&ir->timer);
 	} else if (ir->rc5_gpio) {
@@ -314,6 +311,7 @@
 		mask_keycode = 0x003F00;
 		mask_keyup   = 0x040000;
 		break;
+	case SAA7134_BOARD_FLYDVBS_LR300:
 	case SAA7134_BOARD_FLYDVBT_LR301:
 	case SAA7134_BOARD_FLYDVBTDUO:
 		ir_codes     = ir_codes_flydvb;
@@ -333,6 +331,12 @@
 		mask_keyup   = 0x040000;
 		polling      = 50; // ms
 		break;
+	case SAA7134_BOARD_10MOONSTVMASTER3:
+		ir_codes     = ir_codes_encore_enltv;
+		mask_keycode = 0x5f80000;
+		mask_keyup   = 0x8000000;
+		polling      = 50; //ms
+		break;
 	}
 	if (NULL == ir_codes) {
 		printk("%s: Oops: IR config error [card=%d]\n",
@@ -374,7 +378,7 @@
 		input_dev->id.vendor  = dev->pci->vendor;
 		input_dev->id.product = dev->pci->device;
 	}
-	input_dev->cdev.dev = &dev->pci->dev;
+	input_dev->dev.parent = &dev->pci->dev;
 
 	dev->remote = ir;
 	saa7134_ir_start(dev, ir);
diff --git a/drivers/media/video/saa7134/saa7134-tvaudio.c b/drivers/media/video/saa7134/saa7134-tvaudio.c
index 30395d6..18b4817 100644
--- a/drivers/media/video/saa7134/saa7134-tvaudio.c
+++ b/drivers/media/video/saa7134/saa7134-tvaudio.c
@@ -25,6 +25,7 @@
 #include <linux/module.h>
 #include <linux/moduleparam.h>
 #include <linux/kernel.h>
+#include <linux/kthread.h>
 #include <linux/slab.h>
 #include <linux/delay.h>
 #include <asm/div64.h>
@@ -341,10 +342,8 @@
 
 static int tvaudio_sleep(struct saa7134_dev *dev, int timeout)
 {
-	DECLARE_WAITQUEUE(wait, current);
-
-	add_wait_queue(&dev->thread.wq, &wait);
-	if (dev->thread.scan1 == dev->thread.scan2 && !dev->thread.shutdown) {
+	if (dev->thread.scan1 == dev->thread.scan2 &&
+	    !kthread_should_stop()) {
 		if (timeout < 0) {
 			set_current_state(TASK_INTERRUPTIBLE);
 			schedule();
@@ -353,7 +352,6 @@
 						(msecs_to_jiffies(timeout));
 		}
 	}
-	remove_wait_queue(&dev->thread.wq, &wait);
 	return dev->thread.scan1 != dev->thread.scan2;
 }
 
@@ -505,11 +503,10 @@
 	unsigned int i, audio, nscan;
 	int max1,max2,carrier,rx,mode,lastmode,default_carrier;
 
-	daemonize("%s", dev->name);
 	allow_signal(SIGTERM);
 	for (;;) {
 		tvaudio_sleep(dev,-1);
-		if (dev->thread.shutdown || signal_pending(current))
+		if (kthread_should_stop() || signal_pending(current))
 			goto done;
 
 	restart:
@@ -618,7 +615,7 @@
 		for (;;) {
 			if (tvaudio_sleep(dev,5000))
 				goto restart;
-			if (dev->thread.shutdown || signal_pending(current))
+			if (kthread_should_stop() || signal_pending(current))
 				break;
 			if (UNSET == dev->thread.mode) {
 				rx = tvaudio_getstereo(dev,&tvaudio[i]);
@@ -634,7 +631,6 @@
 	}
 
  done:
-	complete_and_exit(&dev->thread.exit, 0);
 	return 0;
 }
 
@@ -782,7 +778,6 @@
 	struct saa7134_dev *dev = data;
 	u32 value, norms, clock;
 
-	daemonize("%s", dev->name);
 	allow_signal(SIGTERM);
 
 	clock = saa7134_boards[dev->board].audio_clock;
@@ -796,7 +791,7 @@
 
 	for (;;) {
 		tvaudio_sleep(dev,-1);
-		if (dev->thread.shutdown || signal_pending(current))
+		if (kthread_should_stop() || signal_pending(current))
 			goto done;
 
 	restart:
@@ -876,7 +871,6 @@
 	}
 
  done:
-	complete_and_exit(&dev->thread.exit, 0);
 	return 0;
 }
 
@@ -973,7 +967,6 @@
 
 int saa7134_tvaudio_init2(struct saa7134_dev *dev)
 {
-	DECLARE_MUTEX_LOCKED(sem);
 	int (*my_thread)(void *data) = NULL;
 
 	switch (dev->pci->device) {
@@ -986,15 +979,15 @@
 		break;
 	}
 
-	dev->thread.pid = -1;
+	dev->thread.thread = NULL;
 	if (my_thread) {
 		/* start tvaudio thread */
-		init_waitqueue_head(&dev->thread.wq);
-		init_completion(&dev->thread.exit);
-		dev->thread.pid = kernel_thread(my_thread,dev,0);
-		if (dev->thread.pid < 0)
+		dev->thread.thread = kthread_run(my_thread, dev, "%s", dev->name);
+		if (IS_ERR(dev->thread.thread)) {
 			printk(KERN_WARNING "%s: kernel_thread() failed\n",
 			       dev->name);
+			/* XXX: missing error handling here */
+		}
 		saa7134_tvaudio_do_scan(dev);
 	}
 
@@ -1005,11 +998,9 @@
 int saa7134_tvaudio_fini(struct saa7134_dev *dev)
 {
 	/* shutdown tvaudio thread */
-	if (dev->thread.pid > 0) {
-		dev->thread.shutdown = 1;
-		wake_up_interruptible(&dev->thread.wq);
-		wait_for_completion(&dev->thread.exit);
-	}
+	if (dev->thread.thread)
+		kthread_stop(dev->thread.thread);
+
 	saa_andorb(SAA7134_ANALOG_IO_SELECT, 0x07, 0x00); /* LINE1 */
 	return 0;
 }
@@ -1020,10 +1011,10 @@
 		dprintk("sound IF not in use, skipping scan\n");
 		dev->automute = 0;
 		saa7134_tvaudio_setmute(dev);
-	} else if (dev->thread.pid >= 0) {
+	} else if (dev->thread.thread) {
 		dev->thread.mode = UNSET;
 		dev->thread.scan2++;
-		wake_up_interruptible(&dev->thread.wq);
+		wake_up_process(dev->thread.thread);
 	} else {
 		dev->automute = 0;
 		saa7134_tvaudio_setmute(dev);
@@ -1040,4 +1031,3 @@
  * c-basic-offset: 8
  * End:
  */
-
diff --git a/drivers/media/video/saa7134/saa7134.h b/drivers/media/video/saa7134/saa7134.h
index 15623b2..d32a856 100644
--- a/drivers/media/video/saa7134/saa7134.h
+++ b/drivers/media/video/saa7134/saa7134.h
@@ -238,6 +238,7 @@
 #define SAA7134_BOARD_ECS_TVP3XP_4CB6  113
 #define SAA7134_BOARD_KWORLD_DVBT_210 114
 #define SAA7134_BOARD_SABRENT_TV_PCB05     115
+#define SAA7134_BOARD_10MOONSTVMASTER3     116
 
 #define SAA7134_MAXBOARDS 8
 #define SAA7134_INPUT_MAX 8
@@ -327,10 +328,7 @@
 
 /* tvaudio thread status */
 struct saa7134_thread {
-	pid_t                      pid;
-	struct completion          exit;
-	wait_queue_head_t          wq;
-	unsigned int               shutdown;
+	struct task_struct         *thread;
 	unsigned int               scan1;
 	unsigned int               scan2;
 	unsigned int               mode;
diff --git a/drivers/media/video/saa7185.c b/drivers/media/video/saa7185.c
index 339592e..66cc92c 100644
--- a/drivers/media/video/saa7185.c
+++ b/drivers/media/video/saa7185.c
@@ -34,23 +34,23 @@
 #include <linux/slab.h>
 #include <linux/mm.h>
 #include <linux/signal.h>
+#include <linux/types.h>
+#include <linux/i2c.h>
 #include <asm/io.h>
 #include <asm/pgtable.h>
 #include <asm/page.h>
-#include <linux/types.h>
+#include <asm/uaccess.h>
 
 #include <linux/videodev.h>
-#include <asm/uaccess.h>
+#include <linux/video_encoder.h>
 
 MODULE_DESCRIPTION("Philips SAA7185 video encoder driver");
 MODULE_AUTHOR("Dave Perks");
 MODULE_LICENSE("GPL");
 
-#include <linux/i2c.h>
 
 #define I2C_NAME(s) (s)->name
 
-#include <linux/video_encoder.h>
 
 static int debug = 0;
 module_param(debug, int, 0);
diff --git a/drivers/media/video/sn9c102/sn9c102.h b/drivers/media/video/sn9c102/sn9c102.h
index 11fcb49..2e3c3de 100644
--- a/drivers/media/video/sn9c102/sn9c102.h
+++ b/drivers/media/video/sn9c102/sn9c102.h
@@ -36,6 +36,7 @@
 #include <linux/mutex.h>
 #include <linux/string.h>
 #include <linux/stddef.h>
+#include <linux/kref.h>
 
 #include "sn9c102_config.h"
 #include "sn9c102_sensor.h"
@@ -94,7 +95,7 @@
 };
 
 static DEFINE_MUTEX(sn9c102_sysfs_lock);
-static DECLARE_RWSEM(sn9c102_disconnect);
+static DECLARE_RWSEM(sn9c102_dev_lock);
 
 struct sn9c102_device {
 	struct video_device* v4ldev;
@@ -122,12 +123,14 @@
 
 	struct sn9c102_module_param module_param;
 
+	struct kref kref;
 	enum sn9c102_dev_state state;
 	u8 users;
 
-	struct mutex dev_mutex, fileop_mutex;
+	struct completion probe;
+	struct mutex open_mutex, fileop_mutex;
 	spinlock_t queue_lock;
-	wait_queue_head_t open, wait_frame, wait_stream;
+	wait_queue_head_t wait_open, wait_frame, wait_stream;
 };
 
 /*****************************************************************************/
diff --git a/drivers/media/video/sn9c102/sn9c102_core.c b/drivers/media/video/sn9c102/sn9c102_core.c
index 74a204f..36d8a455 100644
--- a/drivers/media/video/sn9c102/sn9c102_core.c
+++ b/drivers/media/video/sn9c102/sn9c102_core.c
@@ -48,8 +48,8 @@
 #define SN9C102_MODULE_AUTHOR   "(C) 2004-2007 Luca Risolia"
 #define SN9C102_AUTHOR_EMAIL    "<luca.risolia@studio.unibo.it>"
 #define SN9C102_MODULE_LICENSE  "GPL"
-#define SN9C102_MODULE_VERSION  "1:1.44"
-#define SN9C102_MODULE_VERSION_CODE  KERNEL_VERSION(1, 1, 44)
+#define SN9C102_MODULE_VERSION  "1:1.47"
+#define SN9C102_MODULE_VERSION_CODE  KERNEL_VERSION(1, 1, 47)
 
 /*****************************************************************************/
 
@@ -64,9 +64,10 @@
 static short video_nr[] = {[0 ... SN9C102_MAX_DEVICES-1] = -1};
 module_param_array(video_nr, short, NULL, 0444);
 MODULE_PARM_DESC(video_nr,
-		 "\n<-1|n[,...]> Specify V4L2 minor mode number."
-		 "\n -1 = use next available (default)"
-		 "\n  n = use minor number n (integer >= 0)"
+		 " <-1|n[,...]>"
+		 "\nSpecify V4L2 minor mode number."
+		 "\n-1 = use next available (default)"
+		 "\n n = use minor number n (integer >= 0)"
 		 "\nYou can specify up to "__MODULE_STRING(SN9C102_MAX_DEVICES)
 		 " cameras this way."
 		 "\nFor example:"
@@ -79,13 +80,14 @@
 			       SN9C102_FORCE_MUNMAP};
 module_param_array(force_munmap, bool, NULL, 0444);
 MODULE_PARM_DESC(force_munmap,
-		 "\n<0|1[,...]> Force the application to unmap previously"
+		 " <0|1[,...]>"
+		 "\nForce the application to unmap previously"
 		 "\nmapped buffer memory before calling any VIDIOC_S_CROP or"
 		 "\nVIDIOC_S_FMT ioctl's. Not all the applications support"
 		 "\nthis feature. This parameter is specific for each"
 		 "\ndetected camera."
-		 "\n 0 = do not force memory unmapping"
-		 "\n 1 = force memory unmapping (save memory)"
+		 "\n0 = do not force memory unmapping"
+		 "\n1 = force memory unmapping (save memory)"
 		 "\nDefault value is "__MODULE_STRING(SN9C102_FORCE_MUNMAP)"."
 		 "\n");
 
@@ -93,7 +95,8 @@
 				       SN9C102_FRAME_TIMEOUT};
 module_param_array(frame_timeout, uint, NULL, 0644);
 MODULE_PARM_DESC(frame_timeout,
-		 "\n<0|n[,...]> Timeout for a video frame in seconds before"
+		 " <0|n[,...]>"
+		 "\nTimeout for a video frame in seconds before"
 		 "\nreturning an I/O error; 0 for infinity."
 		 "\nThis parameter is specific for each detected camera."
 		 "\nDefault value is "__MODULE_STRING(SN9C102_FRAME_TIMEOUT)"."
@@ -103,7 +106,8 @@
 static unsigned short debug = SN9C102_DEBUG_LEVEL;
 module_param(debug, ushort, 0644);
 MODULE_PARM_DESC(debug,
-		 "\n<n> Debugging information level, from 0 to 3:"
+		 " <n>"
+		 "\nDebugging information level, from 0 to 3:"
 		 "\n0 = none (use carefully)"
 		 "\n1 = critical errors"
 		 "\n2 = significant informations"
@@ -1616,7 +1620,8 @@
 	int err = 0;
 
 	if (!(cam->state & DEV_INITIALIZED)) {
-		init_waitqueue_head(&cam->open);
+		mutex_init(&cam->open_mutex);
+		init_waitqueue_head(&cam->wait_open);
 		qctrl = s->qctrl;
 		rect = &(s->cropcap.defrect);
 	} else { /* use current values */
@@ -1706,21 +1711,27 @@
 	return 0;
 }
 
+/*****************************************************************************/
 
-static void sn9c102_release_resources(struct sn9c102_device* cam)
+static void sn9c102_release_resources(struct kref *kref)
 {
+	struct sn9c102_device *cam;
+
 	mutex_lock(&sn9c102_sysfs_lock);
 
+	cam = container_of(kref, struct sn9c102_device, kref);
+
 	DBG(2, "V4L2 device /dev/video%d deregistered", cam->v4ldev->minor);
 	video_set_drvdata(cam->v4ldev, NULL);
 	video_unregister_device(cam->v4ldev);
+	usb_put_dev(cam->usbdev);
+	kfree(cam->control_buffer);
+	kfree(cam);
 
 	mutex_unlock(&sn9c102_sysfs_lock);
 
-	kfree(cam->control_buffer);
 }
 
-/*****************************************************************************/
 
 static int sn9c102_open(struct inode* inode, struct file* filp)
 {
@@ -1728,43 +1739,78 @@
 	int err = 0;
 
 	/*
-	   This is the only safe way to prevent race conditions with
-	   disconnect
+	   A read_trylock() in open() is the only safe way to prevent race
+	   conditions with disconnect(), one close() and multiple (not
+	   necessarily simultaneous) attempts to open(). For example, it
+	   prevents from waiting for a second access, while the device
+	   structure is being deallocated, after a possible disconnect() and
+	   during a following close() holding the write lock: given that, after
+	   this deallocation, no access will be possible anymore, using the
+	   non-trylock version would have let open() gain the access to the
+	   device structure improperly.
+	   For this reason the lock must also not be per-device.
 	*/
-	if (!down_read_trylock(&sn9c102_disconnect))
+	if (!down_read_trylock(&sn9c102_dev_lock))
 		return -ERESTARTSYS;
 
 	cam = video_get_drvdata(video_devdata(filp));
 
-	if (mutex_lock_interruptible(&cam->dev_mutex)) {
-		up_read(&sn9c102_disconnect);
+	if (wait_for_completion_interruptible(&cam->probe)) {
+		up_read(&sn9c102_dev_lock);
 		return -ERESTARTSYS;
 	}
 
+	kref_get(&cam->kref);
+
+	/*
+	    Make sure to isolate all the simultaneous opens.
+	*/
+	if (mutex_lock_interruptible(&cam->open_mutex)) {
+		kref_put(&cam->kref, sn9c102_release_resources);
+		up_read(&sn9c102_dev_lock);
+		return -ERESTARTSYS;
+	}
+
+	if (cam->state & DEV_DISCONNECTED) {
+		DBG(1, "Device not present");
+		err = -ENODEV;
+		goto out;
+	}
+
 	if (cam->users) {
-		DBG(2, "Device /dev/video%d is busy...", cam->v4ldev->minor);
+		DBG(2, "Device /dev/video%d is already in use",
+		       cam->v4ldev->minor);
 		DBG(3, "Simultaneous opens are not supported");
+		/*
+		   open() must follow the open flags and should block
+		   eventually while the device is in use.
+		*/
 		if ((filp->f_flags & O_NONBLOCK) ||
 		    (filp->f_flags & O_NDELAY)) {
 			err = -EWOULDBLOCK;
 			goto out;
 		}
-		mutex_unlock(&cam->dev_mutex);
-		err = wait_event_interruptible_exclusive(cam->open,
-						  cam->state & DEV_DISCONNECTED
+		DBG(2, "A blocking open() has been requested. Wait for the "
+		       "device to be released...");
+		up_read(&sn9c102_dev_lock);
+		/*
+		   We will not release the "open_mutex" lock, so that only one
+		   process can be in the wait queue below. This way the process
+		   will be sleeping while holding the lock, without loosing its
+		   priority after any wake_up().
+		*/
+		err = wait_event_interruptible_exclusive(cam->wait_open,
+						(cam->state & DEV_DISCONNECTED)
 							 || !cam->users);
-		if (err) {
-			up_read(&sn9c102_disconnect);
-			return err;
-		}
+		down_read(&sn9c102_dev_lock);
+		if (err)
+			goto out;
 		if (cam->state & DEV_DISCONNECTED) {
-			up_read(&sn9c102_disconnect);
-			return -ENODEV;
+			err = -ENODEV;
+			goto out;
 		}
-		mutex_lock(&cam->dev_mutex);
 	}
 
-
 	if (cam->state & DEV_MISCONFIGURED) {
 		err = sn9c102_init(cam);
 		if (err) {
@@ -1789,36 +1835,33 @@
 	DBG(3, "Video device /dev/video%d is open", cam->v4ldev->minor);
 
 out:
-	mutex_unlock(&cam->dev_mutex);
-	up_read(&sn9c102_disconnect);
+	mutex_unlock(&cam->open_mutex);
+	if (err)
+		kref_put(&cam->kref, sn9c102_release_resources);
+
+	up_read(&sn9c102_dev_lock);
 	return err;
 }
 
 
 static int sn9c102_release(struct inode* inode, struct file* filp)
 {
-	struct sn9c102_device* cam = video_get_drvdata(video_devdata(filp));
+	struct sn9c102_device* cam;
 
-	mutex_lock(&cam->dev_mutex); /* prevent disconnect() to be called */
+	down_write(&sn9c102_dev_lock);
+
+	cam = video_get_drvdata(video_devdata(filp));
 
 	sn9c102_stop_transfer(cam);
-
 	sn9c102_release_buffers(cam);
-
-	if (cam->state & DEV_DISCONNECTED) {
-		sn9c102_release_resources(cam);
-		usb_put_dev(cam->usbdev);
-		mutex_unlock(&cam->dev_mutex);
-		kfree(cam);
-		return 0;
-	}
-
 	cam->users--;
-	wake_up_interruptible_nr(&cam->open, 1);
+	wake_up_interruptible_nr(&cam->wait_open, 1);
 
 	DBG(3, "Video device /dev/video%d closed", cam->v4ldev->minor);
 
-	mutex_unlock(&cam->dev_mutex);
+	kref_put(&cam->kref, sn9c102_release_resources);
+
+	up_write(&sn9c102_dev_lock);
 
 	return 0;
 }
@@ -2085,7 +2128,6 @@
 
 	vma->vm_ops = &sn9c102_vm_ops;
 	vma->vm_private_data = &cam->frame[i];
-
 	sn9c102_vm_open(vma);
 
 	mutex_unlock(&cam->fileop_mutex);
@@ -3215,8 +3257,6 @@
 		goto fail;
 	}
 
-	mutex_init(&cam->dev_mutex);
-
 	r = sn9c102_read_reg(cam, 0x00);
 	if (r < 0 || (r != 0x10 && r != 0x11 && r != 0x12)) {
 		DBG(1, "Sorry, this is not a SN9C1xx-based camera "
@@ -3282,7 +3322,7 @@
 	cam->v4ldev->release = video_device_release;
 	video_set_drvdata(cam->v4ldev, cam);
 
-	mutex_lock(&cam->dev_mutex);
+	init_completion(&cam->probe);
 
 	err = video_register_device(cam->v4ldev, VFL_TYPE_GRABBER,
 				    video_nr[dev_nr]);
@@ -3292,7 +3332,7 @@
 			DBG(1, "Free /dev/videoX node not found");
 		video_nr[dev_nr] = -1;
 		dev_nr = (dev_nr < SN9C102_MAX_DEVICES-1) ? dev_nr+1 : 0;
-		mutex_unlock(&cam->dev_mutex);
+		complete_all(&cam->probe);
 		goto fail;
 	}
 
@@ -3318,8 +3358,10 @@
 #endif
 
 	usb_set_intfdata(intf, cam);
+	kref_init(&cam->kref);
+	usb_get_dev(cam->usbdev);
 
-	mutex_unlock(&cam->dev_mutex);
+	complete_all(&cam->probe);
 
 	return 0;
 
@@ -3336,40 +3378,31 @@
 
 static void sn9c102_usb_disconnect(struct usb_interface* intf)
 {
-	struct sn9c102_device* cam = usb_get_intfdata(intf);
+	struct sn9c102_device* cam;
 
-	if (!cam)
-		return;
+	down_write(&sn9c102_dev_lock);
 
-	down_write(&sn9c102_disconnect);
-
-	mutex_lock(&cam->dev_mutex);
+	cam = usb_get_intfdata(intf);
 
 	DBG(2, "Disconnecting %s...", cam->v4ldev->name);
 
-	wake_up_interruptible_all(&cam->open);
-
 	if (cam->users) {
 		DBG(2, "Device /dev/video%d is open! Deregistration and "
-		       "memory deallocation are deferred on close.",
+		       "memory deallocation are deferred.",
 		    cam->v4ldev->minor);
 		cam->state |= DEV_MISCONFIGURED;
 		sn9c102_stop_transfer(cam);
 		cam->state |= DEV_DISCONNECTED;
 		wake_up_interruptible(&cam->wait_frame);
 		wake_up(&cam->wait_stream);
-		usb_get_dev(cam->usbdev);
-	} else {
+	} else
 		cam->state |= DEV_DISCONNECTED;
-		sn9c102_release_resources(cam);
-	}
 
-	mutex_unlock(&cam->dev_mutex);
+	wake_up_interruptible_all(&cam->wait_open);
 
-	if (!cam->users)
-		kfree(cam);
+	kref_put(&cam->kref, sn9c102_release_resources);
 
-	up_write(&sn9c102_disconnect);
+	up_write(&sn9c102_dev_lock);
 }
 
 
diff --git a/drivers/media/video/sn9c102/sn9c102_ov7630.c b/drivers/media/video/sn9c102/sn9c102_ov7630.c
index e683234..e4856fd 100644
--- a/drivers/media/video/sn9c102/sn9c102_ov7630.c
+++ b/drivers/media/video/sn9c102/sn9c102_ov7630.c
@@ -104,6 +104,145 @@
 		err += sn9c102_i2c_write(cam, 0x74, 0x21);
 		err += sn9c102_i2c_write(cam, 0x7d, 0xf7);
 		break;
+	case BRIDGE_SN9C105:
+	case BRIDGE_SN9C120:
+	err = sn9c102_write_const_regs(cam, {0x40, 0x02}, {0x00, 0x03},
+				       {0x1a, 0x04}, {0x03, 0x10},
+				       {0x0a, 0x14}, {0xe2, 0x17},
+				       {0x0b, 0x18}, {0x00, 0x19},
+				       {0x1d, 0x1a}, {0x10, 0x1b},
+				       {0x02, 0x1c}, {0x03, 0x1d},
+				       {0x0f, 0x1e}, {0x0c, 0x1f},
+				       {0x00, 0x20}, {0x24, 0x21},
+				       {0x3b, 0x22}, {0x47, 0x23},
+				       {0x60, 0x24}, {0x71, 0x25},
+				       {0x80, 0x26}, {0x8f, 0x27},
+				       {0x9d, 0x28}, {0xaa, 0x29},
+				       {0xb8, 0x2a}, {0xc4, 0x2b},
+				       {0xd1, 0x2c}, {0xdd, 0x2d},
+				       {0xe8, 0x2e}, {0xf4, 0x2f},
+				       {0xff, 0x30}, {0x00, 0x3f},
+				       {0xc7, 0x40}, {0x01, 0x41},
+				       {0x44, 0x42}, {0x00, 0x43},
+				       {0x44, 0x44}, {0x00, 0x45},
+				       {0x44, 0x46}, {0x00, 0x47},
+				       {0xc7, 0x48}, {0x01, 0x49},
+				       {0xc7, 0x4a}, {0x01, 0x4b},
+				       {0xc7, 0x4c}, {0x01, 0x4d},
+				       {0x44, 0x4e}, {0x00, 0x4f},
+				       {0x44, 0x50}, {0x00, 0x51},
+				       {0x44, 0x52}, {0x00, 0x53},
+				       {0xc7, 0x54}, {0x01, 0x55},
+				       {0xc7, 0x56}, {0x01, 0x57},
+				       {0xc7, 0x58}, {0x01, 0x59},
+				       {0x44, 0x5a}, {0x00, 0x5b},
+				       {0x44, 0x5c}, {0x00, 0x5d},
+				       {0x44, 0x5e}, {0x00, 0x5f},
+				       {0xc7, 0x60}, {0x01, 0x61},
+				       {0xc7, 0x62}, {0x01, 0x63},
+				       {0xc7, 0x64}, {0x01, 0x65},
+				       {0x44, 0x66}, {0x00, 0x67},
+				       {0x44, 0x68}, {0x00, 0x69},
+				       {0x44, 0x6a}, {0x00, 0x6b},
+				       {0xc7, 0x6c}, {0x01, 0x6d},
+				       {0xc7, 0x6e}, {0x01, 0x6f},
+				       {0xc7, 0x70}, {0x01, 0x71},
+				       {0x44, 0x72}, {0x00, 0x73},
+				       {0x44, 0x74}, {0x00, 0x75},
+				       {0x44, 0x76}, {0x00, 0x77},
+				       {0xc7, 0x78}, {0x01, 0x79},
+				       {0xc7, 0x7a}, {0x01, 0x7b},
+				       {0xc7, 0x7c}, {0x01, 0x7d},
+				       {0x44, 0x7e}, {0x00, 0x7f},
+				       {0x17, 0x84}, {0x00, 0x85},
+				       {0x2e, 0x86}, {0x00, 0x87},
+				       {0x09, 0x88}, {0x00, 0x89},
+				       {0xe8, 0x8a}, {0x0f, 0x8b},
+				       {0xda, 0x8c}, {0x0f, 0x8d},
+				       {0x40, 0x8e}, {0x00, 0x8f},
+				       {0x37, 0x90}, {0x00, 0x91},
+				       {0xcf, 0x92}, {0x0f, 0x93},
+				       {0xfa, 0x94}, {0x0f, 0x95},
+				       {0x00, 0x96}, {0x00, 0x97},
+				       {0x00, 0x98}, {0x66, 0x99},
+				       {0x00, 0x9a}, {0x40, 0x9b},
+				       {0x20, 0x9c}, {0x00, 0x9d},
+				       {0x00, 0x9e}, {0x00, 0x9f},
+				       {0x2d, 0xc0}, {0x2d, 0xc1},
+				       {0x3a, 0xc2}, {0x00, 0xc3},
+				       {0x04, 0xc4}, {0x3f, 0xc5},
+				       {0x00, 0xc6}, {0x00, 0xc7},
+				       {0x50, 0xc8}, {0x3c, 0xc9},
+				       {0x28, 0xca}, {0xd8, 0xcb},
+				       {0x14, 0xcc}, {0xec, 0xcd},
+				       {0x32, 0xce}, {0xdd, 0xcf},
+				       {0x32, 0xd0}, {0xdd, 0xd1},
+				       {0x6a, 0xd2}, {0x50, 0xd3},
+				       {0x60, 0xd4}, {0x00, 0xd5},
+				       {0x00, 0xd6});
+
+		err += sn9c102_i2c_write(cam, 0x12, 0x80);
+		err += sn9c102_i2c_write(cam, 0x12, 0x48);
+		err += sn9c102_i2c_write(cam, 0x01, 0x80);
+		err += sn9c102_i2c_write(cam, 0x02, 0x80);
+		err += sn9c102_i2c_write(cam, 0x03, 0x80);
+		err += sn9c102_i2c_write(cam, 0x04, 0x10);
+		err += sn9c102_i2c_write(cam, 0x05, 0x20);
+		err += sn9c102_i2c_write(cam, 0x06, 0x80);
+		err += sn9c102_i2c_write(cam, 0x11, 0x00);
+		err += sn9c102_i2c_write(cam, 0x0c, 0x20);
+		err += sn9c102_i2c_write(cam, 0x0d, 0x20);
+		err += sn9c102_i2c_write(cam, 0x15, 0x80);
+		err += sn9c102_i2c_write(cam, 0x16, 0x03);
+		err += sn9c102_i2c_write(cam, 0x17, 0x1b);
+		err += sn9c102_i2c_write(cam, 0x18, 0xbd);
+		err += sn9c102_i2c_write(cam, 0x19, 0x05);
+		err += sn9c102_i2c_write(cam, 0x1a, 0xf6);
+		err += sn9c102_i2c_write(cam, 0x1b, 0x04);
+		err += sn9c102_i2c_write(cam, 0x21, 0x1b);
+		err += sn9c102_i2c_write(cam, 0x22, 0x00);
+		err += sn9c102_i2c_write(cam, 0x23, 0xde);
+		err += sn9c102_i2c_write(cam, 0x24, 0x10);
+		err += sn9c102_i2c_write(cam, 0x25, 0x8a);
+		err += sn9c102_i2c_write(cam, 0x26, 0xa0);
+		err += sn9c102_i2c_write(cam, 0x27, 0xca);
+		err += sn9c102_i2c_write(cam, 0x28, 0xa2);
+		err += sn9c102_i2c_write(cam, 0x29, 0x74);
+		err += sn9c102_i2c_write(cam, 0x2a, 0x88);
+		err += sn9c102_i2c_write(cam, 0x2b, 0x34);
+		err += sn9c102_i2c_write(cam, 0x2c, 0x88);
+		err += sn9c102_i2c_write(cam, 0x2e, 0x00);
+		err += sn9c102_i2c_write(cam, 0x2f, 0x00);
+		err += sn9c102_i2c_write(cam, 0x30, 0x00);
+		err += sn9c102_i2c_write(cam, 0x32, 0xc2);
+		err += sn9c102_i2c_write(cam, 0x33, 0x08);
+		err += sn9c102_i2c_write(cam, 0x4c, 0x40);
+		err += sn9c102_i2c_write(cam, 0x4d, 0xf3);
+		err += sn9c102_i2c_write(cam, 0x60, 0x05);
+		err += sn9c102_i2c_write(cam, 0x61, 0x40);
+		err += sn9c102_i2c_write(cam, 0x62, 0x12);
+		err += sn9c102_i2c_write(cam, 0x63, 0x57);
+		err += sn9c102_i2c_write(cam, 0x64, 0x73);
+		err += sn9c102_i2c_write(cam, 0x65, 0x00);
+		err += sn9c102_i2c_write(cam, 0x66, 0x55);
+		err += sn9c102_i2c_write(cam, 0x67, 0x01);
+		err += sn9c102_i2c_write(cam, 0x68, 0xac);
+		err += sn9c102_i2c_write(cam, 0x69, 0x38);
+		err += sn9c102_i2c_write(cam, 0x6f, 0x1f);
+		err += sn9c102_i2c_write(cam, 0x70, 0x01);
+		err += sn9c102_i2c_write(cam, 0x71, 0x00);
+		err += sn9c102_i2c_write(cam, 0x72, 0x10);
+		err += sn9c102_i2c_write(cam, 0x73, 0x50);
+		err += sn9c102_i2c_write(cam, 0x74, 0x20);
+		err += sn9c102_i2c_write(cam, 0x76, 0x01);
+		err += sn9c102_i2c_write(cam, 0x77, 0xf3);
+		err += sn9c102_i2c_write(cam, 0x78, 0x90);
+		err += sn9c102_i2c_write(cam, 0x79, 0x98);
+		err += sn9c102_i2c_write(cam, 0x7a, 0x98);
+		err += sn9c102_i2c_write(cam, 0x7b, 0x00);
+		err += sn9c102_i2c_write(cam, 0x7c, 0x38);
+		err += sn9c102_i2c_write(cam, 0x7d, 0xff);
+		break;
 	default:
 		break;
 	}
@@ -115,6 +254,7 @@
 static int ov7630_get_ctrl(struct sn9c102_device* cam,
 			   struct v4l2_control* ctrl)
 {
+	enum sn9c102_bridge bridge = sn9c102_get_bridge(cam);
 	int err = 0;
 
 	switch (ctrl->id) {
@@ -123,13 +263,20 @@
 			return -EIO;
 		break;
 	case V4L2_CID_RED_BALANCE:
-		ctrl->value = sn9c102_pread_reg(cam, 0x07);
+		if (bridge == BRIDGE_SN9C105 || bridge == BRIDGE_SN9C120)
+			ctrl->value = sn9c102_pread_reg(cam, 0x05);
+		else
+			ctrl->value = sn9c102_pread_reg(cam, 0x07);
 		break;
 	case V4L2_CID_BLUE_BALANCE:
 		ctrl->value = sn9c102_pread_reg(cam, 0x06);
 		break;
 	case SN9C102_V4L2_CID_GREEN_BALANCE:
-		ctrl->value = sn9c102_pread_reg(cam, 0x05);
+		if (bridge == BRIDGE_SN9C105 || bridge == BRIDGE_SN9C120)
+			ctrl->value = sn9c102_pread_reg(cam, 0x07);
+		else
+			ctrl->value = sn9c102_pread_reg(cam, 0x05);
+		break;
 		break;
 	case V4L2_CID_GAIN:
 		if ((ctrl->value = sn9c102_i2c_read(cam, 0x00)) < 0)
@@ -177,6 +324,7 @@
 static int ov7630_set_ctrl(struct sn9c102_device* cam,
 			   const struct v4l2_control* ctrl)
 {
+	enum sn9c102_bridge bridge = sn9c102_get_bridge(cam);
 	int err = 0;
 
 	switch (ctrl->id) {
@@ -184,13 +332,19 @@
 		err += sn9c102_i2c_write(cam, 0x10, ctrl->value);
 		break;
 	case V4L2_CID_RED_BALANCE:
-		err += sn9c102_write_reg(cam, ctrl->value, 0x07);
+		if (bridge == BRIDGE_SN9C105 || bridge == BRIDGE_SN9C120)
+			err += sn9c102_write_reg(cam, ctrl->value, 0x05);
+		else
+			err += sn9c102_write_reg(cam, ctrl->value, 0x07);
 		break;
 	case V4L2_CID_BLUE_BALANCE:
 		err += sn9c102_write_reg(cam, ctrl->value, 0x06);
 		break;
 	case SN9C102_V4L2_CID_GREEN_BALANCE:
-		err += sn9c102_write_reg(cam, ctrl->value, 0x05);
+		if (bridge == BRIDGE_SN9C105 || bridge == BRIDGE_SN9C120)
+			err += sn9c102_write_reg(cam, ctrl->value, 0x07);
+		else
+			err += sn9c102_write_reg(cam, ctrl->value, 0x05);
 		break;
 	case V4L2_CID_GAIN:
 		err += sn9c102_i2c_write(cam, 0x00, ctrl->value);
@@ -227,8 +381,21 @@
 {
 	struct sn9c102_sensor* s = sn9c102_get_sensor(cam);
 	int err = 0;
-	u8 h_start = (u8)(rect->left - s->cropcap.bounds.left) + 1,
-	   v_start = (u8)(rect->top - s->cropcap.bounds.top) + 1;
+	u8 h_start = 0, v_start = (u8)(rect->top - s->cropcap.bounds.top) + 1;
+
+	switch (sn9c102_get_bridge(cam)) {
+	case BRIDGE_SN9C101:
+	case BRIDGE_SN9C102:
+	case BRIDGE_SN9C103:
+		h_start = (u8)(rect->left - s->cropcap.bounds.left) + 1;
+		break;
+	case BRIDGE_SN9C105:
+	case BRIDGE_SN9C120:
+		h_start = (u8)(rect->left - s->cropcap.bounds.left) + 4;
+		break;
+	default:
+		break;
+	}
 
 	err += sn9c102_write_reg(cam, h_start, 0x12);
 	err += sn9c102_write_reg(cam, v_start, 0x13);
@@ -242,10 +409,28 @@
 {
 	int err = 0;
 
-	if (pix->pixelformat == V4L2_PIX_FMT_SN9C10X)
-		err += sn9c102_write_reg(cam, 0x20, 0x19);
-	else
-		err += sn9c102_write_reg(cam, 0x50, 0x19);
+	switch (sn9c102_get_bridge(cam)) {
+	case BRIDGE_SN9C101:
+	case BRIDGE_SN9C102:
+	case BRIDGE_SN9C103:
+		if (pix->pixelformat == V4L2_PIX_FMT_SBGGR8)
+			err += sn9c102_write_reg(cam, 0x50, 0x19);
+		else
+			err += sn9c102_write_reg(cam, 0x20, 0x19);
+		break;
+	case BRIDGE_SN9C105:
+	case BRIDGE_SN9C120:
+		if (pix->pixelformat == V4L2_PIX_FMT_SBGGR8) {
+			err += sn9c102_write_reg(cam, 0xe5, 0x17);
+			err += sn9c102_i2c_write(cam, 0x11, 0x04);
+		} else {
+			err += sn9c102_write_reg(cam, 0xe2, 0x17);
+			err += sn9c102_i2c_write(cam, 0x11, 0x02);
+		}
+		break;
+	default:
+		break;
+	}
 
 	return err;
 }
@@ -254,7 +439,8 @@
 static const struct sn9c102_sensor ov7630 = {
 	.name = "OV7630",
 	.maintainer = "Luca Risolia <luca.risolia@studio.unibo.it>",
-	.supported_bridge = BRIDGE_SN9C101 | BRIDGE_SN9C102 | BRIDGE_SN9C103,
+	.supported_bridge = BRIDGE_SN9C101 | BRIDGE_SN9C102 | BRIDGE_SN9C103 |
+			    BRIDGE_SN9C105 | BRIDGE_SN9C120,
 	.sysfs_ops = SN9C102_I2C_READ | SN9C102_I2C_WRITE,
 	.frequency = SN9C102_I2C_100KHZ,
 	.interface = SN9C102_I2C_2WIRES,
@@ -417,6 +603,12 @@
 			err += sn9c102_write_const_regs(cam, {0x01, 0x01},
 							{0x00, 0x01});
 		break;
+	case BRIDGE_SN9C105:
+	case BRIDGE_SN9C120:
+		err = sn9c102_write_const_regs(cam, {0x01, 0xf1}, {0x00, 0xf1},
+					       {0x29, 0x01}, {0x74, 0x02},
+					       {0x0e, 0x01}, {0x44, 0x01});
+		break;
 	default:
 		break;
 	}
diff --git a/drivers/media/video/sn9c102/sn9c102_ov7660.c b/drivers/media/video/sn9c102/sn9c102_ov7660.c
index 4b64740..8aae416 100644
--- a/drivers/media/video/sn9c102/sn9c102_ov7660.c
+++ b/drivers/media/video/sn9c102/sn9c102_ov7660.c
@@ -41,65 +41,65 @@
 				       {0xbb, 0x2a}, {0xc7, 0x2b},
 				       {0xd3, 0x2c}, {0xde, 0x2d},
 				       {0xea, 0x2e}, {0xf4, 0x2f},
-				       {0xff, 0x30}, {0x00, 0x3F},
-				       {0xC7, 0x40}, {0x01, 0x41},
+				       {0xff, 0x30}, {0x00, 0x3f},
+				       {0xc7, 0x40}, {0x01, 0x41},
 				       {0x44, 0x42}, {0x00, 0x43},
 				       {0x44, 0x44}, {0x00, 0x45},
 				       {0x44, 0x46}, {0x00, 0x47},
-				       {0xC7, 0x48}, {0x01, 0x49},
-				       {0xC7, 0x4A}, {0x01, 0x4B},
-				       {0xC7, 0x4C}, {0x01, 0x4D},
-				       {0x44, 0x4E}, {0x00, 0x4F},
+				       {0xc7, 0x48}, {0x01, 0x49},
+				       {0xc7, 0x4a}, {0x01, 0x4b},
+				       {0xc7, 0x4c}, {0x01, 0x4d},
+				       {0x44, 0x4e}, {0x00, 0x4f},
 				       {0x44, 0x50}, {0x00, 0x51},
 				       {0x44, 0x52}, {0x00, 0x53},
-				       {0xC7, 0x54}, {0x01, 0x55},
-				       {0xC7, 0x56}, {0x01, 0x57},
-				       {0xC7, 0x58}, {0x01, 0x59},
-				       {0x44, 0x5A}, {0x00, 0x5B},
-				       {0x44, 0x5C}, {0x00, 0x5D},
-				       {0x44, 0x5E}, {0x00, 0x5F},
-				       {0xC7, 0x60}, {0x01, 0x61},
-				       {0xC7, 0x62}, {0x01, 0x63},
-				       {0xC7, 0x64}, {0x01, 0x65},
+				       {0xc7, 0x54}, {0x01, 0x55},
+				       {0xc7, 0x56}, {0x01, 0x57},
+				       {0xc7, 0x58}, {0x01, 0x59},
+				       {0x44, 0x5a}, {0x00, 0x5b},
+				       {0x44, 0x5c}, {0x00, 0x5d},
+				       {0x44, 0x5e}, {0x00, 0x5f},
+				       {0xc7, 0x60}, {0x01, 0x61},
+				       {0xc7, 0x62}, {0x01, 0x63},
+				       {0xc7, 0x64}, {0x01, 0x65},
 				       {0x44, 0x66}, {0x00, 0x67},
 				       {0x44, 0x68}, {0x00, 0x69},
-				       {0x44, 0x6A}, {0x00, 0x6B},
-				       {0xC7, 0x6C}, {0x01, 0x6D},
-				       {0xC7, 0x6E}, {0x01, 0x6F},
-				       {0xC7, 0x70}, {0x01, 0x71},
+				       {0x44, 0x6a}, {0x00, 0x6b},
+				       {0xc7, 0x6c}, {0x01, 0x6d},
+				       {0xc7, 0x6e}, {0x01, 0x6f},
+				       {0xc7, 0x70}, {0x01, 0x71},
 				       {0x44, 0x72}, {0x00, 0x73},
 				       {0x44, 0x74}, {0x00, 0x75},
 				       {0x44, 0x76}, {0x00, 0x77},
-				       {0xC7, 0x78}, {0x01, 0x79},
-				       {0xC7, 0x7A}, {0x01, 0x7B},
-				       {0xC7, 0x7C}, {0x01, 0x7D},
-				       {0x44, 0x7E}, {0x00, 0x7F},
+				       {0xc7, 0x78}, {0x01, 0x79},
+				       {0xc7, 0x7a}, {0x01, 0x7b},
+				       {0xc7, 0x7c}, {0x01, 0x7d},
+				       {0x44, 0x7e}, {0x00, 0x7f},
 				       {0x14, 0x84}, {0x00, 0x85},
 				       {0x27, 0x86}, {0x00, 0x87},
 				       {0x07, 0x88}, {0x00, 0x89},
-				       {0xEC, 0x8A}, {0x0f, 0x8B},
-				       {0xD8, 0x8C}, {0x0f, 0x8D},
-				       {0x3D, 0x8E}, {0x00, 0x8F},
-				       {0x3D, 0x90}, {0x00, 0x91},
-				       {0xCD, 0x92}, {0x0f, 0x93},
+				       {0xec, 0x8a}, {0x0f, 0x8b},
+				       {0xd8, 0x8c}, {0x0f, 0x8d},
+				       {0x3d, 0x8e}, {0x00, 0x8f},
+				       {0x3d, 0x90}, {0x00, 0x91},
+				       {0xcd, 0x92}, {0x0f, 0x93},
 				       {0xf7, 0x94}, {0x0f, 0x95},
-				       {0x0C, 0x96}, {0x00, 0x97},
+				       {0x0c, 0x96}, {0x00, 0x97},
 				       {0x00, 0x98}, {0x66, 0x99},
-				       {0x05, 0x9A}, {0x00, 0x9B},
-				       {0x04, 0x9C}, {0x00, 0x9D},
-				       {0x08, 0x9E}, {0x00, 0x9F},
-				       {0x2D, 0xC0}, {0x2D, 0xC1},
-				       {0x3A, 0xC2}, {0x05, 0xC3},
-				       {0x04, 0xC4}, {0x3F, 0xC5},
-				       {0x00, 0xC6}, {0x00, 0xC7},
-				       {0x50, 0xC8}, {0x3C, 0xC9},
-				       {0x28, 0xCA}, {0xD8, 0xCB},
-				       {0x14, 0xCC}, {0xEC, 0xCD},
-				       {0x32, 0xCE}, {0xDD, 0xCF},
-				       {0x32, 0xD0}, {0xDD, 0xD1},
-				       {0x6A, 0xD2}, {0x50, 0xD3},
-				       {0x00, 0xD4}, {0x00, 0xD5},
-				       {0x00, 0xD6});
+				       {0x05, 0x9a}, {0x00, 0x9b},
+				       {0x04, 0x9c}, {0x00, 0x9d},
+				       {0x08, 0x9e}, {0x00, 0x9f},
+				       {0x2d, 0xc0}, {0x2d, 0xc1},
+				       {0x3a, 0xc2}, {0x05, 0xc3},
+				       {0x04, 0xc4}, {0x3f, 0xc5},
+				       {0x00, 0xc6}, {0x00, 0xc7},
+				       {0x50, 0xc8}, {0x3C, 0xc9},
+				       {0x28, 0xca}, {0xd8, 0xcb},
+				       {0x14, 0xcc}, {0xec, 0xcd},
+				       {0x32, 0xce}, {0xdd, 0xcf},
+				       {0x32, 0xd0}, {0xdd, 0xd1},
+				       {0x6a, 0xd2}, {0x50, 0xd3},
+				       {0x00, 0xd4}, {0x00, 0xd5},
+				       {0x00, 0xd6});
 
 	err += sn9c102_i2c_write(cam, 0x12, 0x80);
 	err += sn9c102_i2c_write(cam, 0x11, 0x09);
diff --git a/drivers/media/video/stradis.c b/drivers/media/video/stradis.c
index 3e736be..eb22046 100644
--- a/drivers/media/video/stradis.c
+++ b/drivers/media/video/stradis.c
@@ -1321,7 +1321,7 @@
 			u32 format;
 			if (copy_from_user(&p, arg, sizeof(p)))
 				return -EFAULT;
-			if (p.palette < sizeof(palette2fmt) / sizeof(u32)) {
+			if (p.palette < ARRAY_SIZE(palette2fmt)) {
 				format = palette2fmt[p.palette];
 				saa->win.color_fmt = format;
 				saawrite(format | 0x60,
diff --git a/drivers/media/video/stv680.c b/drivers/media/video/stv680.c
index bf3aa8d..4dc5bc7 100644
--- a/drivers/media/video/stv680.c
+++ b/drivers/media/video/stv680.c
@@ -715,8 +715,11 @@
 				   stv680_video_irq, stv680);
 		stv680->urb[i] = urb;
 		err = usb_submit_urb (stv680->urb[i], GFP_KERNEL);
-		if (err)
-			PDEBUG (0, "STV(e): urb burned down in start stream");
+		if (err) {
+			PDEBUG (0, "STV(e): urb burned down with err "
+				   "%d in start stream %d", err, i);
+			goto nomem_err;
+		}
 	}			/* i STV680_NUMSBUF */
 
 	stv680->framecount = 0;
diff --git a/drivers/media/video/tda8290.c b/drivers/media/video/tda8290.c
index 1a1bef0..59cff5a 100644
--- a/drivers/media/video/tda8290.c
+++ b/drivers/media/video/tda8290.c
@@ -21,7 +21,17 @@
 #include <linux/i2c.h>
 #include <linux/videodev.h>
 #include <linux/delay.h>
-#include <media/tuner.h>
+#include "tuner-driver.h"
+
+/* ---------------------------------------------------------------------- */
+
+struct tda8290_priv {
+	unsigned char tda8290_easy_mode;
+	unsigned char tda827x_lpsel;
+	unsigned char tda827x_addr;
+	unsigned char tda827x_ver;
+	unsigned int sgIF;
+};
 
 /* ---------------------------------------------------------------------- */
 
@@ -76,7 +86,8 @@
 	u32 N;
 	int i;
 	struct tuner *t = i2c_get_clientdata(c);
-	struct i2c_msg msg = {.addr = t->tda827x_addr, .flags = 0};
+	struct tda8290_priv *priv = t->priv;
+	struct i2c_msg msg = {.addr = priv->tda827x_addr, .flags = 0};
 
 	if (t->mode == V4L2_TUNER_RADIO)
 		freq = freq / 1000;
@@ -95,7 +106,7 @@
 	tuner_reg[1] = (unsigned char)(N>>8);
 	tuner_reg[2] = (unsigned char) N;
 	tuner_reg[3] = 0x40;
-	tuner_reg[4] = 0x52 + (t->tda827x_lpsel << 5);
+	tuner_reg[4] = 0x52 + (priv->tda827x_lpsel << 5);
 	tuner_reg[5] = (tda827x_analog[i].spd   << 6) + (tda827x_analog[i].div1p5 <<5) +
 		       (tda827x_analog[i].bs     <<3) +  tda827x_analog[i].bp;
 	tuner_reg[6] = 0x8f + (tda827x_analog[i].gc3 << 4);
@@ -146,8 +157,9 @@
 static void tda827x_agcf(struct i2c_client *c)
 {
 	struct tuner *t = i2c_get_clientdata(c);
+	struct tda8290_priv *priv = t->priv;
 	unsigned char data[] = {0x80, 0x0c};
-	struct i2c_msg msg = {.addr = t->tda827x_addr, .buf = data,
+	struct i2c_msg msg = {.addr = priv->tda827x_addr, .buf = data,
 			      .flags = 0, .len = 2};
 	i2c_transfer(c->adapter, &msg, 1);
 }
@@ -234,7 +246,8 @@
 	u32 N;
 	int i;
 	struct tuner *t = i2c_get_clientdata(c);
-	struct i2c_msg msg = {.addr = t->tda827x_addr, .flags = 0, .buf = tuner_reg};
+	struct tda8290_priv *priv = t->priv;
+	struct i2c_msg msg = {.addr = priv->tda827x_addr, .flags = 0, .buf = tuner_reg};
 
 	tda827xa_lna_gain( c, 1);
 	msleep(10);
@@ -271,7 +284,7 @@
 	tuner_reg[1] = 0xff;
 	tuner_reg[2] = 0xe0;
 	tuner_reg[3] = 0;
-	tuner_reg[4] = 0x99 + (t->tda827x_lpsel << 1);
+	tuner_reg[4] = 0x99 + (priv->tda827x_lpsel << 1);
 	msg.len = 5;
 	i2c_transfer(c->adapter, &msg, 1);
 
@@ -311,15 +324,16 @@
 	i2c_transfer(c->adapter, &msg, 1);
 
 	tuner_reg[0] = 0xc0;
-	tuner_reg[1] = 0x19 + (t->tda827x_lpsel << 1);
+	tuner_reg[1] = 0x19 + (priv->tda827x_lpsel << 1);
 	i2c_transfer(c->adapter, &msg, 1);
 }
 
 static void tda827xa_agcf(struct i2c_client *c)
 {
 	struct tuner *t = i2c_get_clientdata(c);
+	struct tda8290_priv *priv = t->priv;
 	unsigned char data[] = {0x80, 0x2c};
-	struct i2c_msg msg = {.addr = t->tda827x_addr, .buf = data,
+	struct i2c_msg msg = {.addr = priv->tda827x_addr, .buf = data,
 			      .flags = 0, .len = 2};
 	i2c_transfer(c->adapter, &msg, 1);
 }
@@ -347,8 +361,9 @@
 static int tda8290_tune(struct i2c_client *c, u16 ifc, unsigned int freq)
 {
 	struct tuner *t = i2c_get_clientdata(c);
+	struct tda8290_priv *priv = t->priv;
 	unsigned char soft_reset[]  = { 0x00, 0x00 };
-	unsigned char easy_mode[]   = { 0x01, t->tda8290_easy_mode };
+	unsigned char easy_mode[]   = { 0x01, priv->tda8290_easy_mode };
 	unsigned char expert_mode[] = { 0x01, 0x80 };
 	unsigned char agc_out_on[]  = { 0x02, 0x00 };
 	unsigned char gainset_off[] = { 0x28, 0x14 };
@@ -375,18 +390,18 @@
 	i2c_master_send(c, soft_reset, 2);
 	msleep(1);
 
-	expert_mode[1] = t->tda8290_easy_mode + 0x80;
+	expert_mode[1] = priv->tda8290_easy_mode + 0x80;
 	i2c_master_send(c, expert_mode, 2);
 	i2c_master_send(c, gainset_off, 2);
 	i2c_master_send(c, if_agc_spd, 2);
-	if (t->tda8290_easy_mode & 0x60)
+	if (priv->tda8290_easy_mode & 0x60)
 		i2c_master_send(c, adc_head_9, 2);
 	else
 		i2c_master_send(c, adc_head_6, 2);
 	i2c_master_send(c, pll_bw_nom, 2);
 
 	tda8290_i2c_bridge(c, 1);
-	if (t->tda827x_ver != 0)
+	if (priv->tda827x_ver != 0)
 		tda827xa_tune(c, ifc, freq);
 	else
 		tda827x_tune(c, ifc, freq);
@@ -418,7 +433,7 @@
 		if ((agc_stat > 115) || !(pll_stat & 0x80)) {
 			tuner_dbg("adjust gain, step 2. Agc: %d, lock: %d\n",
 				   agc_stat, pll_stat & 0x80);
-			if (t->tda827x_ver != 0)
+			if (priv->tda827x_ver != 0)
 				tda827xa_agcf(c);
 			else
 				tda827x_agcf(c);
@@ -437,7 +452,7 @@
 	}
 
 	/* l/ l' deadlock? */
-	if(t->tda8290_easy_mode & 0x60) {
+	if(priv->tda8290_easy_mode & 0x60) {
 		i2c_master_send(c, &addr_adc_sat, 1);
 		i2c_master_recv(c, &adc_sat, 1);
 		i2c_master_send(c, &addr_pll_stat, 1);
@@ -459,41 +474,42 @@
 
 static void set_audio(struct tuner *t)
 {
+	struct tda8290_priv *priv = t->priv;
 	char* mode;
 
-	t->tda827x_lpsel = 0;
+	priv->tda827x_lpsel = 0;
 	if (t->std & V4L2_STD_MN) {
-		t->sgIF = 92;
-		t->tda8290_easy_mode = 0x01;
-		t->tda827x_lpsel = 1;
+		priv->sgIF = 92;
+		priv->tda8290_easy_mode = 0x01;
+		priv->tda827x_lpsel = 1;
 		mode = "MN";
 	} else if (t->std & V4L2_STD_B) {
-		t->sgIF = 108;
-		t->tda8290_easy_mode = 0x02;
+		priv->sgIF = 108;
+		priv->tda8290_easy_mode = 0x02;
 		mode = "B";
 	} else if (t->std & V4L2_STD_GH) {
-		t->sgIF = 124;
-		t->tda8290_easy_mode = 0x04;
+		priv->sgIF = 124;
+		priv->tda8290_easy_mode = 0x04;
 		mode = "GH";
 	} else if (t->std & V4L2_STD_PAL_I) {
-		t->sgIF = 124;
-		t->tda8290_easy_mode = 0x08;
+		priv->sgIF = 124;
+		priv->tda8290_easy_mode = 0x08;
 		mode = "I";
 	} else if (t->std & V4L2_STD_DK) {
-		t->sgIF = 124;
-		t->tda8290_easy_mode = 0x10;
+		priv->sgIF = 124;
+		priv->tda8290_easy_mode = 0x10;
 		mode = "DK";
 	} else if (t->std & V4L2_STD_SECAM_L) {
-		t->sgIF = 124;
-		t->tda8290_easy_mode = 0x20;
+		priv->sgIF = 124;
+		priv->tda8290_easy_mode = 0x20;
 		mode = "L";
 	} else if (t->std & V4L2_STD_SECAM_LC) {
-		t->sgIF = 20;
-		t->tda8290_easy_mode = 0x40;
+		priv->sgIF = 20;
+		priv->tda8290_easy_mode = 0x40;
 		mode = "LC";
 	} else {
-		t->sgIF = 124;
-		t->tda8290_easy_mode = 0x10;
+		priv->sgIF = 124;
+		priv->tda8290_easy_mode = 0x10;
 		mode = "xx";
 	}
 	tuner_dbg("setting tda8290 to system %s\n", mode);
@@ -502,9 +518,10 @@
 static void set_tv_freq(struct i2c_client *c, unsigned int freq)
 {
 	struct tuner *t = i2c_get_clientdata(c);
+	struct tda8290_priv *priv = t->priv;
 
 	set_audio(t);
-	tda8290_tune(c, t->sgIF, freq);
+	tda8290_tune(c, priv->sgIF, freq);
 }
 
 static void set_radio_freq(struct i2c_client *c, unsigned int freq)
@@ -528,13 +545,14 @@
 static void standby(struct i2c_client *c)
 {
 	struct tuner *t = i2c_get_clientdata(c);
+	struct tda8290_priv *priv = t->priv;
 	unsigned char cb1[] = { 0x30, 0xD0 };
 	unsigned char tda8290_standby[] = { 0x00, 0x02 };
 	unsigned char tda8290_agc_tri[] = { 0x02, 0x20 };
-	struct i2c_msg msg = {.addr = t->tda827x_addr, .flags=0, .buf=cb1, .len = 2};
+	struct i2c_msg msg = {.addr = priv->tda827x_addr, .flags=0, .buf=cb1, .len = 2};
 
 	tda8290_i2c_bridge(c, 1);
-	if (t->tda827x_ver != 0)
+	if (priv->tda827x_ver != 0)
 		cb1[1] = 0x90;
 	i2c_transfer(c->adapter, &msg, 1);
 	tda8290_i2c_bridge(c, 0);
@@ -560,13 +578,14 @@
 static void tda8290_init_tuner(struct i2c_client *c)
 {
 	struct tuner *t = i2c_get_clientdata(c);
+	struct tda8290_priv *priv = t->priv;
 	unsigned char tda8275_init[]  = { 0x00, 0x00, 0x00, 0x40, 0xdC, 0x04, 0xAf,
 					  0x3F, 0x2A, 0x04, 0xFF, 0x00, 0x00, 0x40 };
 	unsigned char tda8275a_init[] = { 0x00, 0x00, 0x00, 0x00, 0xdC, 0x05, 0x8b,
 					  0x0c, 0x04, 0x20, 0xFF, 0x00, 0x00, 0x4b };
-	struct i2c_msg msg = {.addr = t->tda827x_addr, .flags=0,
+	struct i2c_msg msg = {.addr = priv->tda827x_addr, .flags=0,
 			      .buf=tda8275_init, .len = 14};
-	if (t->tda827x_ver != 0)
+	if (priv->tda827x_ver != 0)
 		msg.buf = tda8275a_init;
 
 	tda8290_i2c_bridge(c, 1);
@@ -576,14 +595,36 @@
 
 /*---------------------------------------------------------------------*/
 
+static void tda8290_release(struct i2c_client *c)
+{
+	struct tuner *t = i2c_get_clientdata(c);
+
+	kfree(t->priv);
+	t->priv = NULL;
+}
+
+static struct tuner_operations tda8290_tuner_ops = {
+	.set_tv_freq    = set_tv_freq,
+	.set_radio_freq = set_radio_freq,
+	.has_signal     = has_signal,
+	.standby        = standby,
+	.release        = tda8290_release,
+};
+
 int tda8290_init(struct i2c_client *c)
 {
+	struct tda8290_priv *priv = NULL;
 	struct tuner *t = i2c_get_clientdata(c);
 	u8 data;
 	int i, ret, tuners_found;
 	u32 tuner_addrs;
 	struct i2c_msg msg = {.flags=I2C_M_RD, .buf=&data, .len = 1};
 
+	priv = kzalloc(sizeof(struct tda8290_priv), GFP_KERNEL);
+	if (priv == NULL)
+		return -ENOMEM;
+	t->priv = priv;
+
 	tda8290_i2c_bridge(c, 1);
 	/* probe for tuner chip */
 	tuners_found = 0;
@@ -618,7 +659,7 @@
 		tuner_addrs = tuner_addrs & 0xff;
 		tuner_info ("setting tuner address to %x\n", tuner_addrs);
 	}
-	t->tda827x_addr = tuner_addrs;
+	priv->tda827x_addr = tuner_addrs;
 	msg.addr = tuner_addrs;
 
 	tda8290_i2c_bridge(c, 1);
@@ -627,18 +668,16 @@
 		tuner_warn ("TDA827x access failed!\n");
 	if ((data & 0x3c) == 0) {
 		strlcpy(c->name, "tda8290+75", sizeof(c->name));
-		t->tda827x_ver = 0;
+		priv->tda827x_ver = 0;
 	} else {
 		strlcpy(c->name, "tda8290+75a", sizeof(c->name));
-		t->tda827x_ver = 2;
+		priv->tda827x_ver = 2;
 	}
 	tuner_info("type set to %s\n", c->name);
 
-	t->set_tv_freq    = set_tv_freq;
-	t->set_radio_freq = set_radio_freq;
-	t->has_signal = has_signal;
-	t->standby = standby;
-	t->tda827x_lpsel = 0;
+	memcpy(&t->ops, &tda8290_tuner_ops, sizeof(struct tuner_operations));
+
+	priv->tda827x_lpsel = 0;
 	t->mode = V4L2_TUNER_ANALOG_TV;
 
 	tda8290_init_tuner(c);
diff --git a/drivers/media/video/tda9887.c b/drivers/media/video/tda9887.c
index fde576f..a8f7732 100644
--- a/drivers/media/video/tda9887.c
+++ b/drivers/media/video/tda9887.c
@@ -11,6 +11,7 @@
 
 #include <media/v4l2-common.h>
 #include <media/tuner.h>
+#include "tuner-driver.h"
 
 
 /* Chips:
@@ -29,6 +30,9 @@
 		printk(KERN_INFO "%s %d-%04x: " fmt, t->i2c.name, \
 			i2c_adapter_id(t->i2c.adapter), t->i2c.addr , ##arg); } while (0)
 
+struct tda9887_priv {
+	unsigned char 	   data[4];
+};
 
 /* ---------------------------------------------------------------------- */
 
@@ -508,10 +512,11 @@
 static void tda9887_configure(struct i2c_client *client)
 {
 	struct tuner *t = i2c_get_clientdata(client);
+	struct tda9887_priv *priv = t->priv;
 	int rc;
 
-	memset(t->tda9887_data,0,sizeof(t->tda9887_data));
-	tda9887_set_tvnorm(t,t->tda9887_data);
+	memset(priv->data,0,sizeof(priv->data));
+	tda9887_set_tvnorm(t,priv->data);
 
 	/* A note on the port settings:
 	   These settings tend to depend on the specifics of the board.
@@ -526,22 +531,22 @@
 	   the ports should be set to active (0), but, again, that may
 	   differ depending on the precise hardware configuration.
 	 */
-	t->tda9887_data[1] |= cOutputPort1Inactive;
-	t->tda9887_data[1] |= cOutputPort2Inactive;
+	priv->data[1] |= cOutputPort1Inactive;
+	priv->data[1] |= cOutputPort2Inactive;
 
-	tda9887_set_config(t,t->tda9887_data);
-	tda9887_set_insmod(t,t->tda9887_data);
+	tda9887_set_config(t,priv->data);
+	tda9887_set_insmod(t,priv->data);
 
 	if (t->mode == T_STANDBY) {
-		t->tda9887_data[1] |= cForcedMuteAudioON;
+		priv->data[1] |= cForcedMuteAudioON;
 	}
 
 	tda9887_dbg("writing: b=0x%02x c=0x%02x e=0x%02x\n",
-		t->tda9887_data[1],t->tda9887_data[2],t->tda9887_data[3]);
+		priv->data[1],priv->data[2],priv->data[3]);
 	if (tuner_debug > 1)
-		dump_write_message(t, t->tda9887_data);
+		dump_write_message(t, priv->data);
 
-	if (4 != (rc = i2c_master_send(&t->i2c,t->tda9887_data,4)))
+	if (4 != (rc = i2c_master_send(&t->i2c,priv->data,4)))
 		tda9887_info("i2c i/o error: rc == %d (should be 4)\n",rc);
 
 	if (tuner_debug > 2) {
@@ -555,7 +560,8 @@
 static void tda9887_tuner_status(struct i2c_client *client)
 {
 	struct tuner *t = i2c_get_clientdata(client);
-	tda9887_info("Data bytes: b=0x%02x c=0x%02x e=0x%02x\n", t->tda9887_data[1], t->tda9887_data[2], t->tda9887_data[3]);
+	struct tda9887_priv *priv = t->priv;
+	tda9887_info("Data bytes: b=0x%02x c=0x%02x e=0x%02x\n", priv->data[1], priv->data[2], priv->data[3]);
 }
 
 static int tda9887_get_afc(struct i2c_client *client)
@@ -586,20 +592,39 @@
 	tda9887_configure(client);
 }
 
-int tda9887_tuner_init(struct i2c_client *c)
+static void tda9887_release(struct i2c_client *c)
 {
 	struct tuner *t = i2c_get_clientdata(c);
 
+	kfree(t->priv);
+	t->priv = NULL;
+}
+
+static struct tuner_operations tda9887_tuner_ops = {
+	.set_tv_freq    = tda9887_set_freq,
+	.set_radio_freq = tda9887_set_freq,
+	.standby        = tda9887_standby,
+	.tuner_status   = tda9887_tuner_status,
+	.get_afc        = tda9887_get_afc,
+	.release        = tda9887_release,
+};
+
+int tda9887_tuner_init(struct i2c_client *c)
+{
+	struct tda9887_priv *priv = NULL;
+	struct tuner *t = i2c_get_clientdata(c);
+
+	priv = kzalloc(sizeof(struct tda9887_priv), GFP_KERNEL);
+	if (priv == NULL)
+		return -ENOMEM;
+	t->priv = priv;
+
 	strlcpy(c->name, "tda9887", sizeof(c->name));
 
 	tda9887_info("tda988[5/6/7] found @ 0x%x (%s)\n", t->i2c.addr,
 						t->i2c.driver->driver.name);
 
-	t->set_tv_freq = tda9887_set_freq;
-	t->set_radio_freq = tda9887_set_freq;
-	t->standby = tda9887_standby;
-	t->tuner_status = tda9887_tuner_status;
-	t->get_afc = tda9887_get_afc;
+	memcpy(&t->ops, &tda9887_tuner_ops, sizeof(struct tuner_operations));
 
 	return 0;
 }
diff --git a/drivers/media/video/tea5761.c b/drivers/media/video/tea5761.c
new file mode 100644
index 0000000..ae105c2
--- /dev/null
+++ b/drivers/media/video/tea5761.c
@@ -0,0 +1,243 @@
+/*
+ * For Philips TEA5761 FM Chip
+ * I2C address is allways 0x20 (0x10 at 7-bit mode).
+ *
+ * Copyright (c) 2005-2007 Mauro Carvalho Chehab (mchehab@infradead.org)
+ * This code is placed under the terms of the GNUv2 General Public License
+ *
+ */
+
+#include <linux/i2c.h>
+#include <linux/videodev.h>
+#include <linux/delay.h>
+#include <media/tuner.h>
+#include "tuner-driver.h"
+
+#define PREFIX "TEA5761 "
+
+/* from tuner-core.c */
+extern int tuner_debug;
+
+/*****************************************************************************/
+
+/***************************
+ * TEA5761HN I2C registers *
+ ***************************/
+
+/* INTREG - Read: bytes 0 and 1 / Write: byte 0 */
+
+	/* first byte for reading */
+#define TEA5761_INTREG_IFFLAG		0x10
+#define TEA5761_INTREG_LEVFLAG		0x8
+#define TEA5761_INTREG_FRRFLAG		0x2
+#define TEA5761_INTREG_BLFLAG		0x1
+
+	/* second byte for reading / byte for writing */
+#define TEA5761_INTREG_IFMSK		0x10
+#define TEA5761_INTREG_LEVMSK		0x8
+#define TEA5761_INTREG_FRMSK		0x2
+#define TEA5761_INTREG_BLMSK		0x1
+
+/* FRQSET - Read: bytes 2 and 3 / Write: byte 1 and 2 */
+
+	/* First byte */
+#define TEA5761_FRQSET_SEARCH_UP 0x80		/* 1=Station search from botton to up */
+#define TEA5761_FRQSET_SEARCH_MODE 0x40		/* 1=Search mode */
+
+	/* Bits 0-5 for divider MSB */
+
+	/* Second byte */
+	/* Bits 0-7 for divider LSB */
+
+/* TNCTRL - Read: bytes 4 and 5 / Write: Bytes 3 and 4 */
+
+	/* first byte */
+
+#define TEA5761_TNCTRL_PUPD_0	0x40	/* Power UP/Power Down MSB */
+#define TEA5761_TNCTRL_BLIM	0X20	/* 1= Japan Frequencies, 0= European frequencies */
+#define TEA5761_TNCTRL_SWPM	0x10	/* 1= software port is FRRFLAG */
+#define TEA5761_TNCTRL_IFCTC	0x08	/* 1= IF count time 15.02 ms, 0= IF count time 2.02 ms */
+#define TEA5761_TNCTRL_AFM	0x04
+#define TEA5761_TNCTRL_SMUTE	0x02	/* 1= Soft mute */
+#define TEA5761_TNCTRL_SNC	0x01
+
+	/* second byte */
+
+#define TEA5761_TNCTRL_MU	0x80	/* 1=Hard mute */
+#define TEA5761_TNCTRL_SSL_1	0x40
+#define TEA5761_TNCTRL_SSL_0	0x20
+#define TEA5761_TNCTRL_HLSI	0x10
+#define TEA5761_TNCTRL_MST	0x08	/* 1 = mono */
+#define TEA5761_TNCTRL_SWP	0x04
+#define TEA5761_TNCTRL_DTC	0x02	/* 1 = deemphasis 50 us, 0 = deemphasis 75 us */
+#define TEA5761_TNCTRL_AHLSI	0x01
+
+/* FRQCHECK - Read: bytes 6 and 7  */
+	/* First byte */
+
+	/* Bits 0-5 for divider MSB */
+
+	/* Second byte */
+	/* Bits 0-7 for divider LSB */
+
+/* TUNCHECK - Read: bytes 8 and 9  */
+
+	/* First byte */
+#define TEA5761_TUNCHECK_IF_MASK	0x7e	/* IF count */
+#define TEA5761_TUNCHECK_TUNTO		0x01
+
+	/* Second byte */
+#define TEA5761_TUNCHECK_LEV_MASK	0xf0	/* Level Count */
+#define TEA5761_TUNCHECK_LD		0x08
+#define TEA5761_TUNCHECK_STEREO		0x04
+
+/* TESTREG - Read: bytes 10 and 11 / Write: bytes 5 and 6 */
+
+	/* All zero = no test mode */
+
+/* MANID - Read: bytes 12 and 13 */
+
+	/* First byte - should be 0x10 */
+#define TEA5767_MANID_VERSION_MASK	0xf0	/* Version = 1 */
+#define TEA5767_MANID_ID_MSB_MASK	0x0f	/* Manufacurer ID - should be 0 */
+
+	/* Second byte - Should be 0x2b */
+
+#define TEA5767_MANID_ID_LSB_MASK	0xfe	/* Manufacturer ID - should be 0x15 */
+#define TEA5767_MANID_IDAV		0x01	/* 1 = Chip has ID, 0 = Chip has no ID */
+
+/* Chip ID - Read: bytes 14 and 15 */
+
+	/* First byte - should be 0x57 */
+
+	/* Second byte - should be 0x61 */
+
+/*****************************************************************************/
+
+static void set_tv_freq(struct i2c_client *c, unsigned int freq)
+{
+	struct tuner *t = i2c_get_clientdata(c);
+
+	tuner_warn("This tuner doesn't support TV freq.\n");
+}
+
+#define FREQ_OFFSET 0 /* for TEA5767, it is 700 to give the right freq */
+static void tea5761_status_dump(unsigned char *buffer)
+{
+	unsigned int div, frq;
+
+	div = ((buffer[2] & 0x3f) << 8) | buffer[3];
+
+	frq = 1000 * (div * 32768 / 1000 + FREQ_OFFSET + 225) / 4;	/* Freq in KHz */
+
+	printk(PREFIX "Frequency %d.%03d KHz (divider = 0x%04x)\n",
+	       frq / 1000, frq % 1000, div);
+}
+
+/* Freq should be specifyed at 62.5 Hz */
+static void set_radio_freq(struct i2c_client *c, unsigned int frq)
+{
+	struct tuner *t = i2c_get_clientdata(c);
+	unsigned char buffer[7] = {0, 0, 0, 0, 0, 0, 0 };
+	unsigned div;
+	int rc;
+
+	tuner_dbg (PREFIX "radio freq counter %d\n", frq);
+
+	if (t->mode == T_STANDBY) {
+		tuner_dbg("TEA5761 set to standby mode\n");
+		buffer[5] |= TEA5761_TNCTRL_MU;
+	} else {
+		buffer[4] |= TEA5761_TNCTRL_PUPD_0;
+	}
+
+
+	if (t->audmode == V4L2_TUNER_MODE_MONO) {
+		tuner_dbg("TEA5761 set to mono\n");
+		buffer[5] |= TEA5761_TNCTRL_MST;
+;
+	} else {
+		tuner_dbg("TEA5761 set to stereo\n");
+	}
+
+	div = (1000 * (frq * 4 / 16 + 700 + 225) ) >> 15;
+	buffer[1] = (div >> 8) & 0x3f;
+	buffer[2] = div & 0xff;
+
+	if (tuner_debug)
+		tea5761_status_dump(buffer);
+
+	if (7 != (rc = i2c_master_send(c, buffer, 7)))
+		tuner_warn("i2c i/o error: rc == %d (should be 5)\n", rc);
+}
+
+static int tea5761_signal(struct i2c_client *c)
+{
+	unsigned char buffer[16];
+	int rc;
+	struct tuner *t = i2c_get_clientdata(c);
+
+	memset(buffer, 0, sizeof(buffer));
+	if (16 != (rc = i2c_master_recv(c, buffer, 16)))
+		tuner_warn("i2c i/o error: rc == %d (should be 5)\n", rc);
+
+	return ((buffer[9] & TEA5761_TUNCHECK_LEV_MASK) << (13 - 4));
+}
+
+static int tea5761_stereo(struct i2c_client *c)
+{
+	unsigned char buffer[16];
+	int rc;
+	struct tuner *t = i2c_get_clientdata(c);
+
+	memset(buffer, 0, sizeof(buffer));
+	if (16 != (rc = i2c_master_recv(c, buffer, 16)))
+		tuner_warn("i2c i/o error: rc == %d (should be 5)\n", rc);
+
+	rc = buffer[9] & TEA5761_TUNCHECK_STEREO;
+
+	tuner_dbg("TEA5761 radio ST GET = %02x\n", rc);
+
+	return (rc ? V4L2_TUNER_SUB_STEREO : 0);
+}
+
+int tea5761_autodetection(struct i2c_client *c)
+{
+	unsigned char buffer[16];
+	int rc;
+	struct tuner *t = i2c_get_clientdata(c);
+
+	if (16 != (rc = i2c_master_recv(c, buffer, 16))) {
+		tuner_warn("it is not a TEA5761. Received %i chars.\n", rc);
+		return EINVAL;
+	}
+
+	if (!((buffer[13] != 0x2b) || (buffer[14] != 0x57) || (buffer[15] != 0x061))) {
+		tuner_warn("Manufacturer ID= 0x%02x, Chip ID = %02x%02x. It is not a TEA5761\n",buffer[13],buffer[14],buffer[15]);
+		return EINVAL;
+	}
+	tuner_warn("TEA5761 detected.\n");
+	return 0;
+}
+
+static struct tuner_operations tea5761_tuner_ops = {
+	.set_tv_freq    = set_tv_freq,
+	.set_radio_freq = set_radio_freq,
+	.has_signal     = tea5761_signal,
+	.is_stereo      = tea5761_stereo,
+};
+
+int tea5761_tuner_init(struct i2c_client *c)
+{
+	struct tuner *t = i2c_get_clientdata(c);
+
+	if (tea5761_autodetection(c) == EINVAL)
+		return EINVAL;
+
+	tuner_info("type set to %d (%s)\n", t->type, "Philips TEA5761HN FM Radio");
+	strlcpy(c->name, "tea5761", sizeof(c->name));
+
+	memcpy(&t->ops, &tea5761_tuner_ops, sizeof(struct tuner_operations));
+
+	return (0);
+}
diff --git a/drivers/media/video/tea5767.c b/drivers/media/video/tea5767.c
index d1c4178..4985d47 100644
--- a/drivers/media/video/tea5767.c
+++ b/drivers/media/video/tea5767.c
@@ -13,7 +13,7 @@
 #include <linux/i2c.h>
 #include <linux/videodev.h>
 #include <linux/delay.h>
-#include <media/tuner.h>
+#include "tuner-driver.h"
 
 #define PREFIX "TEA5767 "
 
@@ -343,6 +343,14 @@
 	return 0;
 }
 
+static struct tuner_operations tea5767_tuner_ops = {
+	.set_tv_freq    = set_tv_freq,
+	.set_radio_freq = set_radio_freq,
+	.has_signal     = tea5767_signal,
+	.is_stereo      = tea5767_stereo,
+	.standby        = tea5767_standby,
+};
+
 int tea5767_tuner_init(struct i2c_client *c)
 {
 	struct tuner *t = i2c_get_clientdata(c);
@@ -350,11 +358,7 @@
 	tuner_info("type set to %d (%s)\n", t->type, "Philips TEA5767HN FM Radio");
 	strlcpy(c->name, "tea5767", sizeof(c->name));
 
-	t->set_tv_freq = set_tv_freq;
-	t->set_radio_freq = set_radio_freq;
-	t->has_signal = tea5767_signal;
-	t->is_stereo = tea5767_stereo;
-	t->standby = tea5767_standby;
+	memcpy(&t->ops, &tea5767_tuner_ops, sizeof(struct tuner_operations));
 
 	return (0);
 }
diff --git a/drivers/media/video/tuner-core.c b/drivers/media/video/tuner-core.c
index 505591a..e646465 100644
--- a/drivers/media/video/tuner-core.c
+++ b/drivers/media/video/tuner-core.c
@@ -20,11 +20,15 @@
 
 #include <media/tuner.h>
 #include <media/v4l2-common.h>
+#include "tuner-driver.h"
 
 #define UNSET (-1U)
 
 /* standard i2c insmod options */
 static unsigned short normal_i2c[] = {
+#ifdef CONFIG_TUNER_TEA5761
+	0x10,
+#endif
 	0x42, 0x43, 0x4a, 0x4b,			/* tda8290 */
 	0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67,
 	0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f,
@@ -77,7 +81,7 @@
 		tuner_warn ("tuner type not set\n");
 		return;
 	}
-	if (NULL == t->set_tv_freq) {
+	if (NULL == t->ops.set_tv_freq) {
 		tuner_warn ("Tuner has no way to set tv freq\n");
 		return;
 	}
@@ -92,7 +96,7 @@
 		else
 			freq = tv_range[1] * 16;
 	}
-	t->set_tv_freq(c, freq);
+	t->ops.set_tv_freq(c, freq);
 }
 
 static void set_radio_freq(struct i2c_client *c, unsigned int freq)
@@ -103,7 +107,7 @@
 		tuner_warn ("tuner type not set\n");
 		return;
 	}
-	if (NULL == t->set_radio_freq) {
+	if (NULL == t->ops.set_radio_freq) {
 		tuner_warn ("tuner has no way to set radio frequency\n");
 		return;
 	}
@@ -119,7 +123,7 @@
 			freq = radio_range[1] * 16000;
 	}
 
-	t->set_radio_freq(c, freq);
+	t->ops.set_radio_freq(c, freq);
 }
 
 static void set_freq(struct i2c_client *c, unsigned long freq)
@@ -174,6 +178,14 @@
 		return;
 	}
 
+	/* discard private data, in case set_type() was previously called */
+	if (t->ops.release)
+		t->ops.release(c);
+	else {
+		kfree(t->priv);
+		t->priv = NULL;
+	}
+
 	switch (t->type) {
 	case TUNER_MT2032:
 		microtune_init(c);
@@ -189,6 +201,16 @@
 		}
 		t->mode_mask = T_RADIO;
 		break;
+#ifdef CONFIG_TUNER_TEA5761
+	case TUNER_TEA5761:
+		if (tea5761_tuner_init(c) == EINVAL) {
+			t->type = TUNER_ABSENT;
+			t->mode_mask = T_UNINITIALIZED;
+			return;
+		}
+		t->mode_mask = T_RADIO;
+		break;
+#endif
 	case TUNER_PHILIPS_FMD1216ME_MK3:
 		buffer[0] = 0x0b;
 		buffer[1] = 0xdc;
@@ -408,11 +430,11 @@
 	tuner_info("Standard:        0x%08lx\n", (unsigned long)t->std);
 	if (t->mode != V4L2_TUNER_RADIO)
 	       return;
-	if (t->has_signal) {
-		tuner_info("Signal strength: %d\n", t->has_signal(client));
+	if (t->ops.has_signal) {
+		tuner_info("Signal strength: %d\n", t->ops.has_signal(client));
 	}
-	if (t->is_stereo) {
-		tuner_info("Stereo:          %s\n", t->is_stereo(client) ? "yes" : "no");
+	if (t->ops.is_stereo) {
+		tuner_info("Stereo:          %s\n", t->ops.is_stereo(client) ? "yes" : "no");
 	}
 }
 
@@ -437,10 +459,9 @@
 	memcpy(&t->i2c, &client_template, sizeof(struct i2c_client));
 	i2c_set_clientdata(&t->i2c, t);
 	t->type = UNSET;
-	t->radio_if2 = 10700 * 1000;	/* 10.7MHz - FM radio */
 	t->audmode = V4L2_TUNER_MODE_STEREO;
 	t->mode_mask = T_UNINITIALIZED;
-	t->tuner_status = tuner_status;
+	t->ops.tuner_status = tuner_status;
 
 	if (show_i2c) {
 		unsigned char buffer[16];
@@ -460,6 +481,19 @@
 	/* autodetection code based on the i2c addr */
 	if (!no_autodetect) {
 		switch (addr) {
+#ifdef CONFIG_TUNER_TEA5761
+		case 0x10:
+			if (tea5761_autodetection(&t->i2c) != EINVAL) {
+				t->type = TUNER_TEA5761;
+				t->mode_mask = T_RADIO;
+				t->mode = T_STANDBY;
+				t->radio_freq = 87.5 * 16000; /* Sets freq to FM range */
+				default_mode_mask &= ~T_RADIO;
+
+				goto register_client;
+			}
+			break;
+#endif
 		case 0x42:
 		case 0x43:
 		case 0x4a:
@@ -533,6 +567,11 @@
 		return err;
 	}
 
+	if (t->ops.release)
+		t->ops.release(client);
+	else {
+		kfree(t->priv);
+	}
 	kfree(t);
 	return 0;
 }
@@ -553,8 +592,8 @@
 
 	if (check_mode(t, cmd) == EINVAL) {
 		t->mode = T_STANDBY;
-		if (t->standby)
-			t->standby (client);
+		if (t->ops.standby)
+			t->ops.standby (client);
 		return EINVAL;
 	}
 	return 0;
@@ -602,8 +641,8 @@
 		if (check_mode(t, "TUNER_SET_STANDBY") == EINVAL)
 			return 0;
 		t->mode = T_STANDBY;
-		if (t->standby)
-			t->standby (client);
+		if (t->ops.standby)
+			t->ops.standby (client);
 		break;
 #ifdef CONFIG_VIDEO_V4L1
 	case VIDIOCSAUDIO:
@@ -662,10 +701,10 @@
 				return 0;
 
 			if (V4L2_TUNER_RADIO == t->mode) {
-				if (t->has_signal)
-					vt->signal = t->has_signal(client);
-				if (t->is_stereo) {
-					if (t->is_stereo(client))
+				if (t->ops.has_signal)
+					vt->signal = t->ops.has_signal(client);
+				if (t->ops.is_stereo) {
+					if (t->ops.is_stereo(client))
 						vt->flags |=
 						    VIDEO_TUNER_STEREO_ON;
 					else
@@ -693,8 +732,8 @@
 			if (check_v4l2(t) == EINVAL)
 				return 0;
 
-			if (V4L2_TUNER_RADIO == t->mode && t->is_stereo)
-				va->mode = t->is_stereo(client)
+			if (V4L2_TUNER_RADIO == t->mode && t->ops.is_stereo)
+				va->mode = t->ops.is_stereo(client)
 				    ? VIDEO_SOUND_STEREO : VIDEO_SOUND_MONO;
 			return 0;
 		}
@@ -759,8 +798,8 @@
 			switch_v4l2();
 
 			tuner->type = t->mode;
-			if (t->get_afc)
-				tuner->afc=t->get_afc(client);
+			if (t->ops.get_afc)
+				tuner->afc=t->ops.get_afc(client);
 			if (t->mode == V4L2_TUNER_ANALOG_TV)
 				tuner->capability |= V4L2_TUNER_CAP_NORM;
 			if (t->mode != V4L2_TUNER_RADIO) {
@@ -770,13 +809,13 @@
 			}
 
 			/* radio mode */
-			if (t->has_signal)
-				tuner->signal = t->has_signal(client);
+			if (t->ops.has_signal)
+				tuner->signal = t->ops.has_signal(client);
 
 			tuner->rxsubchans =
 				V4L2_TUNER_SUB_MONO | V4L2_TUNER_SUB_STEREO;
-			if (t->is_stereo) {
-				tuner->rxsubchans = t->is_stereo(client) ?
+			if (t->ops.is_stereo) {
+				tuner->rxsubchans = t->ops.is_stereo(client) ?
 					V4L2_TUNER_SUB_STEREO : V4L2_TUNER_SUB_MONO;
 			}
 
@@ -804,8 +843,8 @@
 			break;
 		}
 	case VIDIOC_LOG_STATUS:
-		if (t->tuner_status)
-			t->tuner_status(client);
+		if (t->ops.tuner_status)
+			t->ops.tuner_status(client);
 		break;
 	}
 
diff --git a/drivers/media/video/tuner-driver.h b/drivers/media/video/tuner-driver.h
new file mode 100644
index 0000000..0334a91
--- /dev/null
+++ b/drivers/media/video/tuner-driver.h
@@ -0,0 +1,107 @@
+/*
+    tuner-driver.h - interface for different tuners
+
+    Copyright (C) 1997 Markus Schroeder (schroedm@uni-duesseldorf.de)
+    minor modifications by Ralph Metzler (rjkm@thp.uni-koeln.de)
+
+    This program is free software; you can redistribute it and/or modify
+    it under the terms of the GNU General Public License as published by
+    the Free Software Foundation; either version 2 of the License, or
+    (at your option) any later version.
+
+    This program is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with this program; if not, write to the Free Software
+    Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+*/
+
+#ifndef __TUNER_HW_H__
+#define __TUNER_HW_H__
+
+#include <linux/videodev2.h>
+#include <linux/i2c.h>
+
+extern unsigned const int tuner_count;
+
+struct tuner_operations {
+	void (*set_tv_freq)(struct i2c_client *c, unsigned int freq);
+	void (*set_radio_freq)(struct i2c_client *c, unsigned int freq);
+	int  (*has_signal)(struct i2c_client *c);
+	int  (*is_stereo)(struct i2c_client *c);
+	int  (*get_afc)(struct i2c_client *c);
+	void (*tuner_status)(struct i2c_client *c);
+	void (*standby)(struct i2c_client *c);
+	void (*release)(struct i2c_client *c);
+};
+
+struct tuner {
+	/* device */
+	struct i2c_client i2c;
+
+	unsigned int type;	/* chip type */
+
+	unsigned int mode;
+	unsigned int mode_mask;	/* Combination of allowable modes */
+
+	unsigned int tv_freq;	/* keep track of the current settings */
+	unsigned int radio_freq;
+	u16 	     last_div;
+	unsigned int audmode;
+	v4l2_std_id  std;
+
+	int          using_v4l2;
+	void *priv;
+
+	/* used by tda9887 */
+	unsigned int       tda9887_config;
+
+	unsigned int config;
+	int (*tuner_callback) (void *dev, int command,int arg);
+
+	struct tuner_operations ops;
+};
+
+/* ------------------------------------------------------------------------ */
+
+extern int default_tuner_init(struct i2c_client *c);
+
+extern int tda9887_tuner_init(struct i2c_client *c);
+
+extern int microtune_init(struct i2c_client *c);
+
+extern int tda8290_init(struct i2c_client *c);
+extern int tda8290_probe(struct i2c_client *c);
+
+extern int tea5761_tuner_init(struct i2c_client *c);
+extern int tea5761_autodetection(struct i2c_client *c);
+
+extern int tea5767_autodetection(struct i2c_client *c);
+extern int tea5767_tuner_init(struct i2c_client *c);
+
+/* ------------------------------------------------------------------------ */
+
+#define tuner_warn(fmt, arg...) do {\
+	printk(KERN_WARNING "%s %d-%04x: " fmt, t->i2c.driver->driver.name, \
+			i2c_adapter_id(t->i2c.adapter), t->i2c.addr , ##arg); } while (0)
+#define tuner_info(fmt, arg...) do {\
+	printk(KERN_INFO "%s %d-%04x: " fmt, t->i2c.driver->driver.name, \
+			i2c_adapter_id(t->i2c.adapter), t->i2c.addr , ##arg); } while (0)
+#define tuner_dbg(fmt, arg...) do {\
+	extern int tuner_debug; \
+	if (tuner_debug) \
+		printk(KERN_DEBUG "%s %d-%04x: " fmt, t->i2c.driver->driver.name, \
+			i2c_adapter_id(t->i2c.adapter), t->i2c.addr , ##arg); } while (0)
+
+#endif /* __TUNER_HW_H__ */
+
+/*
+ * Overrides for Emacs so that we follow Linus's tabbing style.
+ * ---------------------------------------------------------------------------
+ * Local variables:
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/drivers/media/video/tuner-simple.c b/drivers/media/video/tuner-simple.c
index c40b92c..2d57e8b 100644
--- a/drivers/media/video/tuner-simple.c
+++ b/drivers/media/video/tuner-simple.c
@@ -8,6 +8,8 @@
 #include <linux/videodev.h>
 #include <media/tuner.h>
 #include <media/v4l2-common.h>
+#include <media/tuner-types.h>
+#include "tuner-driver.h"
 
 static int offset = 0;
 module_param(offset, int, 0664);
@@ -54,9 +56,9 @@
     sound 2          33.16  -      -
     NICAM            33.05  33.05  39.80
  */
-#define PHILIPS_MF_SET_BG	0x01 /* Bit 2 must be zero, Bit 3 is system output */
-#define PHILIPS_MF_SET_PAL_L	0x03 // France
-#define PHILIPS_MF_SET_PAL_L2	0x02 // L'
+#define PHILIPS_MF_SET_STD_BG	0x01 /* Bit 2 must be zero, Bit 3 is system output */
+#define PHILIPS_MF_SET_STD_L	0x03 /* Used on Secam France */
+#define PHILIPS_MF_SET_STD_LC	0x02 /* Used on SECAM L' */
 
 /* Control byte */
 
@@ -207,11 +209,11 @@
 		/* 0x04 -> ??? PAL others / SECAM others ??? */
 		cb &= ~0x03;
 		if (t->std & V4L2_STD_SECAM_L) //also valid for V4L2_STD_SECAM
-			cb |= PHILIPS_MF_SET_PAL_L;
+			cb |= PHILIPS_MF_SET_STD_L;
 		else if (t->std & V4L2_STD_SECAM_LC)
-			cb |= PHILIPS_MF_SET_PAL_L2;
+			cb |= PHILIPS_MF_SET_STD_LC;
 		else /* V4L2_STD_B|V4L2_STD_GH */
-			cb |= PHILIPS_MF_SET_BG;
+			cb |= PHILIPS_MF_SET_STD_BG;
 		break;
 
 	case TUNER_TEMIC_4046FM5:
@@ -479,6 +481,13 @@
 		tuner_warn("i2c i/o error: rc == %d (should be 4)\n",rc);
 }
 
+static struct tuner_operations simple_tuner_ops = {
+	.set_tv_freq    = default_set_tv_freq,
+	.set_radio_freq = default_set_radio_freq,
+	.has_signal     = tuner_signal,
+	.is_stereo      = tuner_stereo,
+};
+
 int default_tuner_init(struct i2c_client *c)
 {
 	struct tuner *t = i2c_get_clientdata(c);
@@ -487,11 +496,7 @@
 		   t->type, tuners[t->type].name);
 	strlcpy(c->name, tuners[t->type].name, sizeof(c->name));
 
-	t->set_tv_freq = default_set_tv_freq;
-	t->set_radio_freq = default_set_radio_freq;
-	t->has_signal = tuner_signal;
-	t->is_stereo = tuner_stereo;
-	t->standby = NULL;
+	memcpy(&t->ops, &simple_tuner_ops, sizeof(struct tuner_operations));
 
 	return 0;
 }
diff --git a/drivers/media/video/tuner-types.c b/drivers/media/video/tuner-types.c
index 74c3e6f..417f642b 100644
--- a/drivers/media/video/tuner-types.c
+++ b/drivers/media/video/tuner-types.c
@@ -594,19 +594,19 @@
 	},
 };
 
-/* ------------ TUNER_PHILIPS_ATSC - Philips ATSC ------------ */
+/* ---- TUNER_PHILIPS_ATSC - Philips FCV1236D (ATSC/NTSC) ---- */
 
-static struct tuner_range tuner_philips_atsc_ranges[] = {
+static struct tuner_range tuner_philips_fcv1236d_ranges[] = {
 	{ 16 * 157.25 /*MHz*/, 0x8e, 0xa0, },
-	{ 16 * 454.00 /*MHz*/, 0x8e, 0x90, },
+	{ 16 * 451.25 /*MHz*/, 0x8e, 0x90, },
 	{ 16 * 999.99        , 0x8e, 0x30, },
 };
 
-static struct tuner_params tuner_philips_atsc_params[] = {
+static struct tuner_params tuner_philips_fcv1236d_params[] = {
 	{
 		.type   = TUNER_PARAM_TYPE_NTSC,
-		.ranges = tuner_philips_atsc_ranges,
-		.count  = ARRAY_SIZE(tuner_philips_atsc_ranges),
+		.ranges = tuner_philips_fcv1236d_ranges,
+		.count  = ARRAY_SIZE(tuner_philips_fcv1236d_ranges),
 	},
 };
 
@@ -1296,9 +1296,9 @@
 		.count  = ARRAY_SIZE(tuner_philips_pal_mk_params),
 	},
 	[TUNER_PHILIPS_ATSC] = { /* Philips ATSC */
-		.name   = "Philips 1236D ATSC/NTSC dual in",
-		.params = tuner_philips_atsc_params,
-		.count  = ARRAY_SIZE(tuner_philips_atsc_params),
+		.name   = "Philips FCV1236D ATSC/NTSC dual in",
+		.params = tuner_philips_fcv1236d_params,
+		.count  = ARRAY_SIZE(tuner_philips_fcv1236d_params),
 	},
 	[TUNER_PHILIPS_FM1236_MK3] = { /* Philips NTSC */
 		.name   = "Philips NTSC MK3 (FM1236MK3 or FM1236/F)",
@@ -1463,6 +1463,10 @@
 		.name   = "Philips TDA988[5,6,7] IF PLL Demodulator",
 		/* see tda9887.c for details */
 	},
+	[TUNER_TEA5761] = { /* Philips RADIO */
+		.name   = "Philips TEA5761 FM Radio",
+		/* see tea5767.c for details */
+	},
 };
 
 unsigned const int tuner_count = ARRAY_SIZE(tuners);
diff --git a/drivers/media/video/tvaudio.c b/drivers/media/video/tvaudio.c
index c9bf9db..9da338d 100644
--- a/drivers/media/video/tvaudio.c
+++ b/drivers/media/video/tvaudio.c
@@ -271,7 +271,7 @@
 	struct CHIPDESC  *desc = chiplist + chip->type;
 
 	v4l_dbg(1, debug, &chip->c, "%s: thread started\n", chip->c.name);
-
+	set_freezable();
 	for (;;) {
 		set_current_state(TASK_INTERRUPTIBLE);
 		if (!kthread_should_stop())
diff --git a/drivers/media/video/tveeprom.c b/drivers/media/video/tveeprom.c
index a1136da..fdc3def 100644
--- a/drivers/media/video/tveeprom.c
+++ b/drivers/media/video/tveeprom.c
@@ -183,7 +183,7 @@
 	{ TUNER_ABSENT,        "Silicon TDA8275C1 8290 FM"},
 	{ TUNER_ABSENT,        "Thompson DTT757"},
 	/* 80-89 */
-	{ TUNER_ABSENT,        "Philips FQ1216LME MK3"},
+	{ TUNER_PHILIPS_FM1216ME_MK3, "Philips FQ1216LME MK3"},
 	{ TUNER_LG_PAL_NEW_TAPC, "LG TAPC G701D"},
 	{ TUNER_LG_NTSC_NEW_TAPC, "LG TAPC H791F"},
 	{ TUNER_LG_PAL_NEW_TAPC, "TCL 2002MB 3"},
@@ -490,7 +490,7 @@
 			to indicate 4052 mux was removed in favor of using MSP
 			inputs directly. */
 			audioic = eeprom_data[i+2] & 0x7f;
-			if (audioic < sizeof(audioIC)/sizeof(*audioIC))
+			if (audioic < ARRAY_SIZE(audioIC))
 				tvee->audio_processor = audioIC[audioic].id;
 			else
 				tvee->audio_processor = AUDIO_CHIP_UNKNOWN;
@@ -523,7 +523,7 @@
 			to indicate 4052 mux was removed in favor of using MSP
 			inputs directly. */
 			audioic = eeprom_data[i+1] & 0x7f;
-			if (audioic < sizeof(audioIC)/sizeof(*audioIC))
+			if (audioic < ARRAY_SIZE(audioIC))
 				tvee->audio_processor = audioIC[audioic].id;
 			else
 				tvee->audio_processor = AUDIO_CHIP_UNKNOWN;
@@ -678,7 +678,7 @@
 		tveeprom_info("audio processor is unknown (no idx)\n");
 		tvee->audio_processor=AUDIO_CHIP_UNKNOWN;
 	} else {
-		if (audioic < sizeof(audioIC)/sizeof(*audioIC))
+		if (audioic < ARRAY_SIZE(audioIC))
 			tveeprom_info("audio processor is %s (idx %d)\n",
 					audioIC[audioic].name,audioic);
 		else
diff --git a/drivers/media/video/tvp5150.c b/drivers/media/video/tvp5150.c
index d5ec05f..e2f1c97 100644
--- a/drivers/media/video/tvp5150.c
+++ b/drivers/media/video/tvp5150.c
@@ -1006,7 +1006,7 @@
 		{
 			struct v4l2_control *ctrl = arg;
 			u8 i, n;
-			n = sizeof(tvp5150_qctrl) / sizeof(tvp5150_qctrl[0]);
+			n = ARRAY_SIZE(tvp5150_qctrl);
 			for (i = 0; i < n; i++)
 				if (ctrl->id == tvp5150_qctrl[i].id) {
 					if (ctrl->value <
diff --git a/drivers/media/video/usbvideo/konicawc.c b/drivers/media/video/usbvideo/konicawc.c
index abe2146..491505d 100644
--- a/drivers/media/video/usbvideo/konicawc.c
+++ b/drivers/media/video/usbvideo/konicawc.c
@@ -236,7 +236,7 @@
 	input_dev->name = "Konicawc snapshot button";
 	input_dev->phys = cam->input_physname;
 	usb_to_input_id(dev, &input_dev->id);
-	input_dev->cdev.dev = &dev->dev;
+	input_dev->dev.parent = &dev->dev;
 
 	input_dev->evbit[0] = BIT(EV_KEY);
 	input_dev->keybit[LONG(BTN_0)] = BIT(BTN_0);
diff --git a/drivers/media/video/usbvideo/quickcam_messenger.c b/drivers/media/video/usbvideo/quickcam_messenger.c
index ec0ff22..dd1a6d6 100644
--- a/drivers/media/video/usbvideo/quickcam_messenger.c
+++ b/drivers/media/video/usbvideo/quickcam_messenger.c
@@ -100,7 +100,7 @@
 	input_dev->name = "QCM button";
 	input_dev->phys = cam->input_physname;
 	usb_to_input_id(dev, &input_dev->id);
-	input_dev->cdev.dev = &dev->dev;
+	input_dev->dev.parent = &dev->dev;
 
 	input_dev->evbit[0] = BIT(EV_KEY);
 	input_dev->keybit[LONG(BTN_0)] = BIT(BTN_0);
@@ -439,7 +439,7 @@
 	int ret;
 	int i;
 
-	for (i=0; i < sizeof(regval_table)/sizeof(regval_table[0]) ; i++) {
+	for (i=0; i < ARRAY_SIZE(regval_table) ; i++) {
 		CHECK_RET(ret, qcm_stv_setb(uvd->dev,
 					regval_table[i].reg,
 					regval_table[i].val));
diff --git a/drivers/media/video/usbvideo/vicam.c b/drivers/media/video/usbvideo/vicam.c
index 982b115..ff55512 100644
--- a/drivers/media/video/usbvideo/vicam.c
+++ b/drivers/media/video/usbvideo/vicam.c
@@ -42,7 +42,6 @@
 #include <linux/usb.h>
 #include <linux/vmalloc.h>
 #include <linux/slab.h>
-#include <linux/proc_fs.h>
 #include <linux/mutex.h>
 #include "usbvideo.h"
 
@@ -417,11 +416,6 @@
 	u8 open_count;
 	u8 bulkEndpoint;
 	int needsDummyRead;
-
-#if defined(CONFIG_VIDEO_PROC_FS)
-	struct proc_dir_entry *proc_dir;
-#endif
-
 };
 
 static int vicam_probe( struct usb_interface *intf, const struct usb_device_id *id);
@@ -1065,175 +1059,6 @@
 	return 0;
 }
 
-#if defined(CONFIG_VIDEO_PROC_FS)
-
-static struct proc_dir_entry *vicam_proc_root = NULL;
-
-static int vicam_read_helper(char *page, char **start, off_t off,
-				int count, int *eof, int value)
-{
-	char *out = page;
-	int len;
-
-	out += sprintf(out, "%d",value);
-
-	len = out - page;
-	len -= off;
-	if (len < count) {
-		*eof = 1;
-		if (len <= 0)
-			return 0;
-	} else
-		len = count;
-
-	*start = page + off;
-	return len;
-}
-
-static int vicam_read_proc_shutter(char *page, char **start, off_t off,
-				int count, int *eof, void *data)
-{
-	return vicam_read_helper(page,start,off,count,eof,
-				((struct vicam_camera *)data)->shutter_speed);
-}
-
-static int vicam_read_proc_gain(char *page, char **start, off_t off,
-				int count, int *eof, void *data)
-{
-	return vicam_read_helper(page,start,off,count,eof,
-				((struct vicam_camera *)data)->gain);
-}
-
-static int
-vicam_write_proc_shutter(struct file *file, const char *buffer,
-			 unsigned long count, void *data)
-{
-	u16 stmp;
-	char kbuf[8];
-	struct vicam_camera *cam = (struct vicam_camera *) data;
-
-	if (count > 6)
-		return -EINVAL;
-
-	if (copy_from_user(kbuf, buffer, count))
-		return -EFAULT;
-
-	stmp = (u16) simple_strtoul(kbuf, NULL, 10);
-	if (stmp < 4 || stmp > 32000)
-		return -EINVAL;
-
-	cam->shutter_speed = stmp;
-
-	return count;
-}
-
-static int
-vicam_write_proc_gain(struct file *file, const char *buffer,
-		      unsigned long count, void *data)
-{
-	u16 gtmp;
-	char kbuf[8];
-
-	struct vicam_camera *cam = (struct vicam_camera *) data;
-
-	if (count > 4)
-		return -EINVAL;
-
-	if (copy_from_user(kbuf, buffer, count))
-		return -EFAULT;
-
-	gtmp = (u16) simple_strtoul(kbuf, NULL, 10);
-	if (gtmp > 255)
-		return -EINVAL;
-	cam->gain = gtmp;
-
-	return count;
-}
-
-static void
-vicam_create_proc_root(void)
-{
-	vicam_proc_root = proc_mkdir("video/vicam", NULL);
-
-	if (vicam_proc_root)
-		vicam_proc_root->owner = THIS_MODULE;
-	else
-		printk(KERN_ERR
-		       "could not create /proc entry for vicam!");
-}
-
-static void
-vicam_destroy_proc_root(void)
-{
-	if (vicam_proc_root)
-		remove_proc_entry("video/vicam", 0);
-}
-
-static void
-vicam_create_proc_entry(struct vicam_camera *cam)
-{
-	char name[64];
-	struct proc_dir_entry *ent;
-
-	DBG(KERN_INFO "vicam: creating proc entry\n");
-
-	if (!vicam_proc_root || !cam) {
-		printk(KERN_INFO
-		       "vicam: could not create proc entry, %s pointer is null.\n",
-		       (!cam ? "camera" : "root"));
-		return;
-	}
-
-	sprintf(name, "video%d", cam->vdev.minor);
-
-	cam->proc_dir = proc_mkdir(name, vicam_proc_root);
-
-	if ( !cam->proc_dir )
-		return; // FIXME: We should probably return an error here
-
-	ent = create_proc_entry("shutter", S_IFREG | S_IRUGO | S_IWUSR,
-				cam->proc_dir);
-	if (ent) {
-		ent->data = cam;
-		ent->read_proc = vicam_read_proc_shutter;
-		ent->write_proc = vicam_write_proc_shutter;
-		ent->size = 64;
-	}
-
-	ent = create_proc_entry("gain", S_IFREG | S_IRUGO | S_IWUSR,
-				cam->proc_dir);
-	if (ent) {
-		ent->data = cam;
-		ent->read_proc = vicam_read_proc_gain;
-		ent->write_proc = vicam_write_proc_gain;
-		ent->size = 64;
-	}
-}
-
-static void
-vicam_destroy_proc_entry(void *ptr)
-{
-	struct vicam_camera *cam = (struct vicam_camera *) ptr;
-	char name[16];
-
-	if ( !cam->proc_dir )
-		return;
-
-	sprintf(name, "video%d", cam->vdev.minor);
-	remove_proc_entry("shutter", cam->proc_dir);
-	remove_proc_entry("gain", cam->proc_dir);
-	remove_proc_entry(name,vicam_proc_root);
-	cam->proc_dir = NULL;
-
-}
-
-#else
-static inline void vicam_create_proc_root(void) { }
-static inline void vicam_destroy_proc_root(void) { }
-static inline void vicam_create_proc_entry(struct vicam_camera *cam) { }
-static inline void vicam_destroy_proc_entry(void *ptr) { }
-#endif
-
 static const struct file_operations vicam_fops = {
 	.owner		= THIS_MODULE,
 	.open		= vicam_open,
@@ -1305,13 +1130,12 @@
 	}
 
 	if ((cam =
-	     kmalloc(sizeof (struct vicam_camera), GFP_KERNEL)) == NULL) {
+	     kzalloc(sizeof (struct vicam_camera), GFP_KERNEL)) == NULL) {
 		printk(KERN_WARNING
 		       "could not allocate kernel memory for vicam_camera struct\n");
 		return -ENOMEM;
 	}
 
-	memset(cam, 0, sizeof (struct vicam_camera));
 
 	cam->shutter_speed = 15;
 
@@ -1330,8 +1154,6 @@
 		return -EIO;
 	}
 
-	vicam_create_proc_entry(cam);
-
 	printk(KERN_INFO "ViCam webcam driver now controlling video device %d\n",cam->vdev.minor);
 
 	usb_set_intfdata (intf, cam);
@@ -1363,8 +1185,6 @@
 
 	cam->udev = NULL;
 
-	vicam_destroy_proc_entry(cam);
-
 	/* the only thing left to do is synchronize with
 	 * our close/release function on who should release
 	 * the camera memory. if there are any users using the
@@ -1390,7 +1210,6 @@
 {
 	int retval;
 	DBG(KERN_INFO "ViCam-based WebCam driver startup\n");
-	vicam_create_proc_root();
 	retval = usb_register(&vicam_driver);
 	if (retval)
 		printk(KERN_WARNING "usb_register failed!\n");
@@ -1404,7 +1223,6 @@
 	       "ViCam-based WebCam driver shutdown\n");
 
 	usb_deregister(&vicam_driver);
-	vicam_destroy_proc_root();
 }
 
 module_init(usb_vicam_init);
diff --git a/drivers/media/video/usbvision/usbvision-cards.c b/drivers/media/video/usbvision/usbvision-cards.c
index 51ab265..380564c 100644
--- a/drivers/media/video/usbvision/usbvision-cards.c
+++ b/drivers/media/video/usbvision/usbvision-cards.c
@@ -79,7 +79,7 @@
 		.Interface     = -1,
 		.Codec         = CODEC_SAA7113,
 		.VideoChannels = 2,
-		.VideoNorm     = V4L2_STD_PAL,
+		.VideoNorm     = V4L2_STD_NTSC,
 		.AudioChannels = 1,
 		.Radio         = 0,
 		.vbi           = 1,
@@ -311,8 +311,8 @@
 		.vbi           = 1,
 		.Tuner         = 1,
 		.TunerType     = TUNER_PHILIPS_SECAM,
-		.X_Offset      = -1,
-		.Y_Offset      = -1,
+		.X_Offset      = 0x80,
+		.Y_Offset      = 0x16,
 		.ModelString   = "Hauppauge WinTV USB (PAL/SECAM L)",
 	},
 	[HPG_WINTV_PAL_D_K] = {
@@ -586,7 +586,7 @@
 		.Radio         = 0,
 		.vbi           = 1,
 		.Tuner         = 1,
-		.TunerType     = TUNER_PHILIPS_PAL,
+		.TunerType     = TUNER_LG_PAL_NEW_TAPC,
 		.X_Offset      = 0,
 		.Y_Offset      = 3,
 		.Dvi_yuv_override = 1,
diff --git a/drivers/media/video/usbvision/usbvision-core.c b/drivers/media/video/usbvision/usbvision-core.c
index 7df071eb..5b1e346 100644
--- a/drivers/media/video/usbvision/usbvision-core.c
+++ b/drivers/media/video/usbvision/usbvision-core.c
@@ -1742,7 +1742,7 @@
 		format = ISOC_MODE_YUV420;
 	}
 	value[0] = 0x0A;  //TODO: See the effect of the filter
-	value[1] = format;
+	value[1] = format; // Sets the VO_MODE register which follows FILT_CONT
 	rc = usb_control_msg(usbvision->dev, usb_sndctrlpipe(usbvision->dev, 1),
 			     USBVISION_OP_CODE,
 			     USB_DIR_OUT | USB_TYPE_VENDOR |
@@ -1831,10 +1831,10 @@
 		frameRate = FRAMERATE_MAX;
 	}
 
-	if (usbvision->tvnorm->id & V4L2_STD_625_50) {
+	if (usbvision->tvnormId & V4L2_STD_625_50) {
 		frameDrop = frameRate * 32 / 25 - 1;
 	}
-	else if (usbvision->tvnorm->id & V4L2_STD_525_60) {
+	else if (usbvision->tvnormId & V4L2_STD_525_60) {
 		frameDrop = frameRate * 32 / 30 - 1;
 	}
 
@@ -2067,7 +2067,7 @@
 	}
 
 
-	if (usbvision->tvnorm->id & V4L2_STD_PAL) {
+	if (usbvision->tvnormId & V4L2_STD_PAL) {
 		value[0] = 0xC0;
 		value[1] = 0x02;	//0x02C0 -> 704 Input video line length
 		value[2] = 0x20;
@@ -2076,7 +2076,7 @@
 		value[5] = 0x00;	//0x0060 -> 96 Input video h offset
 		value[6] = 0x16;
 		value[7] = 0x00;	//0x0016 -> 22 Input video v offset
-	} else if (usbvision->tvnorm->id & V4L2_STD_SECAM) {
+	} else if (usbvision->tvnormId & V4L2_STD_SECAM) {
 		value[0] = 0xC0;
 		value[1] = 0x02;	//0x02C0 -> 704 Input video line length
 		value[2] = 0x20;
@@ -2537,7 +2537,9 @@
 
 int usbvision_muxsel(struct usb_usbvision *usbvision, int channel)
 {
-	int mode[4];
+	/* inputs #0 and #3 are constant for every SAA711x. */
+	/* inputs #1 and #2 are variable for SAA7111 and SAA7113 */
+	int mode[4]= {SAA7115_COMPOSITE0, 0, 0, SAA7115_COMPOSITE3};
 	int audio[]= {1, 0, 0, 0};
 	struct v4l2_routing route;
 	//channel 0 is TV with audiochannel 1 (tuner mono)
@@ -2547,10 +2549,6 @@
 
 	RESTRICT_TO_RANGE(channel, 0, usbvision->video_inputs);
 	usbvision->ctl_input = channel;
-	  route.input = SAA7115_COMPOSITE1;
-	  route.output = 0;
-	  call_i2c_clients(usbvision, VIDIOC_INT_S_VIDEO_ROUTING,&route);
-	  call_i2c_clients(usbvision, VIDIOC_S_INPUT, &usbvision->ctl_input);
 
 	// set the new channel
 	// Regular USB TV Tuners -> channel: 0 = Television, 1 = Composite, 2 = S-Video
@@ -2558,28 +2556,27 @@
 
 	switch (usbvision_device_data[usbvision->DevModel].Codec) {
 		case CODEC_SAA7113:
-			if (SwitchSVideoInput) { // To handle problems with S-Video Input for some devices.  Use SwitchSVideoInput parameter when loading the module.
-				mode[2] = 1;
+			mode[1] = SAA7115_COMPOSITE2;
+			if (SwitchSVideoInput) {
+				/* To handle problems with S-Video Input for
+				 * some devices.  Use SwitchSVideoInput
+				 * parameter when loading the module.*/
+				mode[2] = SAA7115_COMPOSITE1;
 			}
 			else {
-				mode[2] = 7;
-			}
-			if (usbvision_device_data[usbvision->DevModel].VideoChannels == 4) {
-				mode[0] = 0; mode[1] = 2; mode[3] = 3;  // Special for four input devices
-			}
-			else {
-				mode[0] = 0; mode[1] = 2; //modes for regular saa7113 devices
+				mode[2] = SAA7115_SVIDEO1;
 			}
 			break;
 		case CODEC_SAA7111:
-			mode[0] = 0; mode[1] = 1; mode[2] = 7; //modes for saa7111
-			break;
 		default:
-			mode[0] = 0; mode[1] = 1; mode[2] = 7; //default modes
+			/* modes for saa7111 */
+			mode[1] = SAA7115_COMPOSITE1;
+			mode[2] = SAA7115_SVIDEO1;
+			break;
 	}
 	route.input = mode[channel];
+	route.output = 0;
 	call_i2c_clients(usbvision, VIDIOC_INT_S_VIDEO_ROUTING,&route);
-	usbvision->channel = channel;
 	usbvision_set_audio(usbvision, audio[channel]);
 	return 0;
 }
diff --git a/drivers/media/video/usbvision/usbvision-video.c b/drivers/media/video/usbvision/usbvision-video.c
index aa3258b..868b688 100644
--- a/drivers/media/video/usbvision/usbvision-video.c
+++ b/drivers/media/video/usbvision/usbvision-video.c
@@ -36,7 +36,8 @@
  *     - use submit_urb for all setup packets
  *     - Fix memory settings for nt1004. It is 4 times as big as the
  *       nt1003 memory.
- *     - Add audio on endpoint 3 for nt1004 chip.  Seems impossible, needs a codec interface.  Which one?
+ *     - Add audio on endpoint 3 for nt1004 chip.
+ *         Seems impossible, needs a codec interface.  Which one?
  *     - Clean up the driver.
  *     - optimization for performance.
  *     - Add Videotext capability (VBI).  Working on it.....
@@ -77,7 +78,8 @@
 #include "usbvision.h"
 #include "usbvision-cards.h"
 
-#define DRIVER_AUTHOR "Joerg Heckenbach <joerg@heckenbach-aw.de>, Dwaine Garden <DwaineGarden@rogers.com>"
+#define DRIVER_AUTHOR "Joerg Heckenbach <joerg@heckenbach-aw.de>,\
+ Dwaine Garden <DwaineGarden@rogers.com>"
 #define DRIVER_NAME "usbvision"
 #define DRIVER_ALIAS "USBVision"
 #define DRIVER_DESC "USBVision USB Video Device Driver for Linux"
@@ -85,20 +87,25 @@
 #define USBVISION_DRIVER_VERSION_MAJOR 0
 #define USBVISION_DRIVER_VERSION_MINOR 9
 #define USBVISION_DRIVER_VERSION_PATCHLEVEL 9
-#define USBVISION_DRIVER_VERSION KERNEL_VERSION(USBVISION_DRIVER_VERSION_MAJOR,USBVISION_DRIVER_VERSION_MINOR,USBVISION_DRIVER_VERSION_PATCHLEVEL)
-#define USBVISION_VERSION_STRING __stringify(USBVISION_DRIVER_VERSION_MAJOR) "." __stringify(USBVISION_DRIVER_VERSION_MINOR) "." __stringify(USBVISION_DRIVER_VERSION_PATCHLEVEL)
+#define USBVISION_DRIVER_VERSION KERNEL_VERSION(USBVISION_DRIVER_VERSION_MAJOR,\
+USBVISION_DRIVER_VERSION_MINOR,\
+USBVISION_DRIVER_VERSION_PATCHLEVEL)
+#define USBVISION_VERSION_STRING __stringify(USBVISION_DRIVER_VERSION_MAJOR)\
+ "." __stringify(USBVISION_DRIVER_VERSION_MINOR)\
+ "." __stringify(USBVISION_DRIVER_VERSION_PATCHLEVEL)
 
 #define	ENABLE_HEXDUMP	0	/* Enable if you need it */
 
 
 #ifdef USBVISION_DEBUG
 	#define PDEBUG(level, fmt, args...) \
-		if (video_debug & (level)) info("[%s:%d] " fmt, __PRETTY_FUNCTION__, __LINE__ , ## args)
+		if (video_debug & (level)) \
+			info("[%s:%d] " fmt, __PRETTY_FUNCTION__, __LINE__ ,\
+				## args)
 #else
 	#define PDEBUG(level, fmt, args...) do {} while(0)
 #endif
 
-#define DBG_IOCTL	1<<0
 #define DBG_IO		1<<1
 #define DBG_PROBE	1<<2
 #define DBG_MMAP	1<<3
@@ -108,7 +115,8 @@
 #define goto2next(str)	while(*str!=' ') str++; while(*str==' ') str++;
 
 
-static int usbvision_nr = 0;			// sequential number of usbvision device
+/* sequential number of usbvision device */
+static int usbvision_nr = 0;
 
 static struct usbvision_v4l2_format_st usbvision_v4l2_format[] = {
 	{ 1, 1,  8, V4L2_PIX_FMT_GREY    , "GREY" },
@@ -121,55 +129,32 @@
 	{ 1, 2, 16, V4L2_PIX_FMT_YUV422P , "YUV422P" }
 };
 
-/* supported tv norms */
-static struct usbvision_tvnorm tvnorms[] = {
-	{
-		.name = "PAL",
-		.id = V4L2_STD_PAL,
-	}, {
-		.name = "NTSC",
-		.id = V4L2_STD_NTSC,
-	}, {
-		 .name = "SECAM",
-		 .id = V4L2_STD_SECAM,
-	}, {
-		.name = "PAL-M",
-		.id = V4L2_STD_PAL_M,
-	}
-};
-
-#define TVNORMS ARRAY_SIZE(tvnorms)
-
-// Function prototypes
+/* Function prototypes */
 static void usbvision_release(struct usb_usbvision *usbvision);
 
-// Default initalization of device driver parameters
-static int isocMode = ISOC_MODE_COMPRESS;		// Set the default format for ISOC endpoint
-static int video_debug = 0;				// Set the default Debug Mode of the device driver
-static int PowerOnAtOpen = 1;				// Set the default device to power on at startup
-static int video_nr = -1;				// Sequential Number of Video Device
-static int radio_nr = -1;				// Sequential Number of Radio Device
-static int vbi_nr = -1;					// Sequential Number of VBI Device
+/* Default initalization of device driver parameters */
+/* Set the default format for ISOC endpoint */
+static int isocMode = ISOC_MODE_COMPRESS;
+/* Set the default Debug Mode of the device driver */
+static int video_debug = 0;
+/* Set the default device to power on at startup */
+static int PowerOnAtOpen = 1;
+/* Sequential Number of Video Device */
+static int video_nr = -1;
+/* Sequential Number of Radio Device */
+static int radio_nr = -1;
+/* Sequential Number of VBI Device */
+static int vbi_nr = -1;
 
-// Grab parameters for the device driver
+/* Grab parameters for the device driver */
 
-#if defined(module_param)                               // Showing parameters under SYSFS
+/* Showing parameters under SYSFS */
 module_param(isocMode, int, 0444);
 module_param(video_debug, int, 0444);
 module_param(PowerOnAtOpen, int, 0444);
 module_param(video_nr, int, 0444);
 module_param(radio_nr, int, 0444);
 module_param(vbi_nr, int, 0444);
-#else							// Old Style
-MODULE_PARAM(isocMode, "i");
-MODULE_PARM(video_debug, "i");				// Grab the Debug Mode of the device driver
-MODULE_PARM(adjustCompression, "i");			// Grab the compression to be adaptive
-MODULE_PARM(PowerOnAtOpen, "i");			// Grab the device to power on at startup
-MODULE_PARM(SwitchSVideoInput, "i");			// To help people with Black and White output with using s-video input.  Some cables and input device are wired differently.
-MODULE_PARM(video_nr, "i");				// video_nr option allows to specify a certain /dev/videoX device (like /dev/video0 or /dev/video1 ...)
-MODULE_PARM(radio_nr, "i");				// radio_nr option allows to specify a certain /dev/radioX device (like /dev/radio0 or /dev/radio1 ...)
-MODULE_PARM(vbi_nr, "i");				// vbi_nr option allows to specify a certain /dev/vbiX device (like /dev/vbi0 or /dev/vbi1 ...)
-#endif
 
 MODULE_PARM_DESC(isocMode, " Set the default format for ISOC endpoint.  Default: 0x60 (Compression On)");
 MODULE_PARM_DESC(video_debug, " Set the default Debug Mode of the device driver.  Default: 0 (Off)");
@@ -187,19 +172,21 @@
 MODULE_ALIAS(DRIVER_ALIAS);
 
 
-/****************************************************************************************/
-/* SYSFS Code - Copied from the stv680.c usb module.					*/
-/* Device information is located at /sys/class/video4linux/video0			*/
-/* Device parameters information is located at /sys/module/usbvision                    */
-/* Device USB Information is located at /sys/bus/usb/drivers/USBVision Video Grabber    */
-/****************************************************************************************/
+/*****************************************************************************/
+/* SYSFS Code - Copied from the stv680.c usb module.			     */
+/* Device information is located at /sys/class/video4linux/video0            */
+/* Device parameters information is located at /sys/module/usbvision         */
+/* Device USB Information is located at                                      */
+/*   /sys/bus/usb/drivers/USBVision Video Grabber                            */
+/*****************************************************************************/
 
 
 #define YES_NO(x) ((x) ? "Yes" : "No")
 
 static inline struct usb_usbvision *cd_to_usbvision(struct class_device *cd)
 {
-	struct video_device *vdev = container_of(cd, struct video_device, class_dev);
+	struct video_device *vdev =
+		container_of(cd, struct video_device, class_dev);
 	return video_get_drvdata(vdev);
 }
 
@@ -211,15 +198,18 @@
 
 static ssize_t show_model(struct class_device *cd, char *buf)
 {
-	struct video_device *vdev = container_of(cd, struct video_device, class_dev);
+	struct video_device *vdev =
+		container_of(cd, struct video_device, class_dev);
 	struct usb_usbvision *usbvision = video_get_drvdata(vdev);
-	return sprintf(buf, "%s\n", usbvision_device_data[usbvision->DevModel].ModelString);
+	return sprintf(buf, "%s\n",
+		       usbvision_device_data[usbvision->DevModel].ModelString);
 }
 static CLASS_DEVICE_ATTR(model, S_IRUGO, show_model, NULL);
 
 static ssize_t show_hue(struct class_device *cd, char *buf)
 {
-	struct video_device *vdev = container_of(cd, struct video_device, class_dev);
+	struct video_device *vdev =
+		container_of(cd, struct video_device, class_dev);
 	struct usb_usbvision *usbvision = video_get_drvdata(vdev);
 	struct v4l2_control ctrl;
 	ctrl.id = V4L2_CID_HUE;
@@ -232,7 +222,8 @@
 
 static ssize_t show_contrast(struct class_device *cd, char *buf)
 {
-	struct video_device *vdev = container_of(cd, struct video_device, class_dev);
+	struct video_device *vdev =
+		container_of(cd, struct video_device, class_dev);
 	struct usb_usbvision *usbvision = video_get_drvdata(vdev);
 	struct v4l2_control ctrl;
 	ctrl.id = V4L2_CID_CONTRAST;
@@ -245,7 +236,8 @@
 
 static ssize_t show_brightness(struct class_device *cd, char *buf)
 {
-	struct video_device *vdev = container_of(cd, struct video_device, class_dev);
+	struct video_device *vdev =
+		container_of(cd, struct video_device, class_dev);
 	struct usb_usbvision *usbvision = video_get_drvdata(vdev);
 	struct v4l2_control ctrl;
 	ctrl.id = V4L2_CID_BRIGHTNESS;
@@ -258,7 +250,8 @@
 
 static ssize_t show_saturation(struct class_device *cd, char *buf)
 {
-	struct video_device *vdev = container_of(cd, struct video_device, class_dev);
+	struct video_device *vdev =
+		container_of(cd, struct video_device, class_dev);
 	struct usb_usbvision *usbvision = video_get_drvdata(vdev);
 	struct v4l2_control ctrl;
 	ctrl.id = V4L2_CID_SATURATION;
@@ -271,23 +264,28 @@
 
 static ssize_t show_streaming(struct class_device *cd, char *buf)
 {
-	struct video_device *vdev = container_of(cd, struct video_device, class_dev);
+	struct video_device *vdev =
+		container_of(cd, struct video_device, class_dev);
 	struct usb_usbvision *usbvision = video_get_drvdata(vdev);
-	return sprintf(buf, "%s\n", YES_NO(usbvision->streaming==Stream_On?1:0));
+	return sprintf(buf, "%s\n",
+		       YES_NO(usbvision->streaming==Stream_On?1:0));
 }
 static CLASS_DEVICE_ATTR(streaming, S_IRUGO, show_streaming, NULL);
 
 static ssize_t show_compression(struct class_device *cd, char *buf)
 {
-	struct video_device *vdev = container_of(cd, struct video_device, class_dev);
+	struct video_device *vdev =
+		container_of(cd, struct video_device, class_dev);
 	struct usb_usbvision *usbvision = video_get_drvdata(vdev);
-	return sprintf(buf, "%s\n", YES_NO(usbvision->isocMode==ISOC_MODE_COMPRESS));
+	return sprintf(buf, "%s\n",
+		       YES_NO(usbvision->isocMode==ISOC_MODE_COMPRESS));
 }
 static CLASS_DEVICE_ATTR(compression, S_IRUGO, show_compression, NULL);
 
 static ssize_t show_device_bridge(struct class_device *cd, char *buf)
 {
-	struct video_device *vdev = container_of(cd, struct video_device, class_dev);
+	struct video_device *vdev =
+		container_of(cd, struct video_device, class_dev);
 	struct usb_usbvision *usbvision = video_get_drvdata(vdev);
 	return sprintf(buf, "%d\n", usbvision->bridgeType);
 }
@@ -376,7 +374,8 @@
 static int usbvision_v4l2_open(struct inode *inode, struct file *file)
 {
 	struct video_device *dev = video_devdata(file);
-	struct usb_usbvision *usbvision = (struct usb_usbvision *) video_get_drvdata(dev);
+	struct usb_usbvision *usbvision =
+		(struct usb_usbvision *) video_get_drvdata(dev);
 	int errCode = 0;
 
 	PDEBUG(DBG_IO, "open");
@@ -390,7 +389,8 @@
 		/* Allocate memory for the scratch ring buffer */
 		errCode = usbvision_scratch_alloc(usbvision);
 		if (isocMode==ISOC_MODE_COMPRESS) {
-			/* Allocate intermediate decompression buffers only if needed */
+			/* Allocate intermediate decompression buffers
+			   only if needed */
 			errCode = usbvision_decompress_alloc(usbvision);
 		}
 		if (errCode) {
@@ -421,11 +421,10 @@
 		if (!errCode) {
 			usbvision_begin_streaming(usbvision);
 			errCode = usbvision_init_isoc(usbvision);
-			/* device needs to be initialized before isoc transfer */
+			/* device must be initialized before isoc transfer */
 			usbvision_muxsel(usbvision,0);
 			usbvision->user++;
-		}
-		else {
+		} else {
 			if (PowerOnAtOpen) {
 				usbvision_i2c_unregister(usbvision);
 				usbvision_power_off(usbvision);
@@ -456,7 +455,8 @@
 static int usbvision_v4l2_close(struct inode *inode, struct file *file)
 {
 	struct video_device *dev = video_devdata(file);
-	struct usb_usbvision *usbvision = (struct usb_usbvision *) video_get_drvdata(dev);
+	struct usb_usbvision *usbvision =
+		(struct usb_usbvision *) video_get_drvdata(dev);
 
 	PDEBUG(DBG_IO, "close");
 	down(&usbvision->lock);
@@ -473,7 +473,8 @@
 	usbvision->user--;
 
 	if (PowerOnAtOpen) {
-		/* power off in a little while to avoid off/on every close/open short sequences */
+		/* power off in a little while
+		   to avoid off/on every close/open short sequences */
 		usbvision_set_powerOffTimer(usbvision);
 		usbvision->initialized = 0;
 	}
@@ -498,583 +499,612 @@
  * This is part of Video 4 Linux API. The procedure handles ioctl() calls.
  *
  */
-static int usbvision_v4l2_do_ioctl(struct inode *inode, struct file *file,
-				 unsigned int cmd, void *arg)
+#ifdef CONFIG_VIDEO_ADV_DEBUG
+static int vidioc_g_register (struct file *file, void *priv,
+				struct v4l2_register *reg)
 {
 	struct video_device *dev = video_devdata(file);
-	struct usb_usbvision *usbvision = (struct usb_usbvision *) video_get_drvdata(dev);
+	struct usb_usbvision *usbvision =
+		(struct usb_usbvision *) video_get_drvdata(dev);
+	int errCode;
 
-	if (!USBVISION_IS_OPERATIONAL(usbvision))
-		return -EFAULT;
-
-	switch (cmd) {
-
-#ifdef CONFIG_VIDEO_ADV_DEBUG
-		/* ioctls to allow direct acces to the NT100x registers */
-		case VIDIOC_DBG_G_REGISTER:
-		case VIDIOC_DBG_S_REGISTER:
-		{
-			struct v4l2_register *reg = arg;
-			int errCode;
-
-			if (!v4l2_chip_match_host(reg->match_type, reg->match_chip))
-				return -EINVAL;
-			if (!capable(CAP_SYS_ADMIN))
-				return -EPERM;
-			/* NT100x has a 8-bit register space */
-			if (cmd == VIDIOC_DBG_G_REGISTER)
-				errCode = usbvision_read_reg(usbvision, reg->reg&0xff);
-			else
-				errCode = usbvision_write_reg(usbvision, reg->reg&0xff, reg->val);
-			if (errCode < 0) {
-				err("%s: VIDIOC_DBG_%c_REGISTER failed: error %d", __FUNCTION__,
-				    cmd == VIDIOC_DBG_G_REGISTER ? 'G' : 'S', errCode);
-				return errCode;
-			}
-			if (cmd == VIDIOC_DBG_S_REGISTER)
-				reg->val = (u8)errCode;
-
-			PDEBUG(DBG_IOCTL, "VIDIOC_DBG_%c_REGISTER reg=0x%02X, value=0x%02X",
-			       cmd == VIDIOC_DBG_G_REGISTER ? 'G' : 'S',
-			       (unsigned int)reg->reg, (unsigned int)reg->val);
-			return 0;
-		}
-#endif
-		case VIDIOC_QUERYCAP:
-		{
-			struct v4l2_capability *vc=arg;
-
-			memset(vc, 0, sizeof(*vc));
-			strlcpy(vc->driver, "USBVision", sizeof(vc->driver));
-			strlcpy(vc->card, usbvision_device_data[usbvision->DevModel].ModelString,
-				sizeof(vc->card));
-			strlcpy(vc->bus_info, usbvision->dev->dev.bus_id,
-				sizeof(vc->bus_info));
-			vc->version = USBVISION_DRIVER_VERSION;
-			vc->capabilities = V4L2_CAP_VIDEO_CAPTURE |
-				V4L2_CAP_AUDIO |
-				V4L2_CAP_READWRITE |
-				V4L2_CAP_STREAMING |
-				(usbvision->have_tuner ? V4L2_CAP_TUNER : 0);
-			PDEBUG(DBG_IOCTL, "VIDIOC_QUERYCAP");
-			return 0;
-		}
-		case VIDIOC_ENUMINPUT:
-		{
-			struct v4l2_input *vi = arg;
-			int chan;
-
-			if ((vi->index >= usbvision->video_inputs) || (vi->index < 0) )
-				return -EINVAL;
-			if (usbvision->have_tuner) {
-				chan = vi->index;
-			}
-			else {
-				chan = vi->index + 1; //skip Television string
-			}
-			switch(chan) {
-				case 0:
-					if (usbvision_device_data[usbvision->DevModel].VideoChannels == 4) {
-						strcpy(vi->name, "White Video Input");
-					}
-					else {
-						strcpy(vi->name, "Television");
-						vi->type = V4L2_INPUT_TYPE_TUNER;
-						vi->audioset = 1;
-						vi->tuner = chan;
-						vi->std = V4L2_STD_PAL | V4L2_STD_NTSC | V4L2_STD_SECAM;
-					}
-					break;
-				case 1:
-					vi->type = V4L2_INPUT_TYPE_CAMERA;
-					if (usbvision_device_data[usbvision->DevModel].VideoChannels == 4) {
-						strcpy(vi->name, "Green Video Input");
-					}
-					else {
-						strcpy(vi->name, "Composite Video Input");
-					}
-					vi->std = V4L2_STD_PAL;
-					break;
-				case 2:
-					vi->type = V4L2_INPUT_TYPE_CAMERA;
-					if (usbvision_device_data[usbvision->DevModel].VideoChannels == 4) {
-						strcpy(vi->name, "Yellow Video Input");
-					}
-					else {
-					strcpy(vi->name, "S-Video Input");
-					}
-					vi->std = V4L2_STD_PAL;
-					break;
-				case 3:
-					vi->type = V4L2_INPUT_TYPE_CAMERA;
-					strcpy(vi->name, "Red Video Input");
-					vi->std = V4L2_STD_PAL;
-					break;
-			}
-			PDEBUG(DBG_IOCTL, "VIDIOC_ENUMINPUT name=%s:%d tuners=%d type=%d norm=%x",
-			       vi->name, vi->index, vi->tuner,vi->type,(int)vi->std);
-			return 0;
-		}
-		case VIDIOC_ENUMSTD:
-		{
-			struct v4l2_standard *e = arg;
-			unsigned int i;
-			int ret;
-
-			i = e->index;
-			if (i >= TVNORMS)
-				return -EINVAL;
-			ret = v4l2_video_std_construct(e, tvnorms[e->index].id,
-						       tvnorms[e->index].name);
-			e->index = i;
-			if (ret < 0)
-				return ret;
-			return 0;
-		}
-		case VIDIOC_G_INPUT:
-		{
-			int *input = arg;
-			*input = usbvision->ctl_input;
-			return 0;
-		}
-		case VIDIOC_S_INPUT:
-		{
-			int *input = arg;
-			if ((*input >= usbvision->video_inputs) || (*input < 0) )
-				return -EINVAL;
-			usbvision->ctl_input = *input;
-
-			down(&usbvision->lock);
-			usbvision_muxsel(usbvision, usbvision->ctl_input);
-			usbvision_set_input(usbvision);
-			usbvision_set_output(usbvision, usbvision->curwidth, usbvision->curheight);
-			up(&usbvision->lock);
-			return 0;
-		}
-		case VIDIOC_G_STD:
-		{
-			v4l2_std_id *id = arg;
-
-			*id = usbvision->tvnorm->id;
-
-			PDEBUG(DBG_IOCTL, "VIDIOC_G_STD std_id=%s", usbvision->tvnorm->name);
-			return 0;
-		}
-		case VIDIOC_S_STD:
-		{
-			v4l2_std_id *id = arg;
-			unsigned int i;
-
-			for (i = 0; i < TVNORMS; i++)
-				if (*id == tvnorms[i].id)
-					break;
-			if (i == TVNORMS)
-				for (i = 0; i < TVNORMS; i++)
-					if (*id & tvnorms[i].id)
-						break;
-			if (i == TVNORMS)
-				return -EINVAL;
-
-			down(&usbvision->lock);
-			usbvision->tvnorm = &tvnorms[i];
-
-			call_i2c_clients(usbvision, VIDIOC_S_STD,
-					 &usbvision->tvnorm->id);
-
-			up(&usbvision->lock);
-
-			PDEBUG(DBG_IOCTL, "VIDIOC_S_STD std_id=%s", usbvision->tvnorm->name);
-			return 0;
-		}
-		case VIDIOC_G_TUNER:
-		{
-			struct v4l2_tuner *vt = arg;
-
-			if (!usbvision->have_tuner || vt->index)	// Only tuner 0
-				return -EINVAL;
-			strcpy(vt->name, "Television");
-			/* Let clients fill in the remainder of this struct */
-			call_i2c_clients(usbvision,VIDIOC_G_TUNER,vt);
-
-			PDEBUG(DBG_IOCTL, "VIDIOC_G_TUNER signal=%x, afc=%x",vt->signal,vt->afc);
-			return 0;
-		}
-		case VIDIOC_S_TUNER:
-		{
-			struct v4l2_tuner *vt = arg;
-
-			// Only no or one tuner for now
-			if (!usbvision->have_tuner || vt->index)
-				return -EINVAL;
-			/* let clients handle this */
-			call_i2c_clients(usbvision,VIDIOC_S_TUNER,vt);
-
-			PDEBUG(DBG_IOCTL, "VIDIOC_S_TUNER");
-			return 0;
-		}
-		case VIDIOC_G_FREQUENCY:
-		{
-			struct v4l2_frequency *freq = arg;
-
-			freq->tuner = 0; // Only one tuner
-			freq->type = V4L2_TUNER_ANALOG_TV;
-			freq->frequency = usbvision->freq;
-			PDEBUG(DBG_IOCTL, "VIDIOC_G_FREQUENCY freq=0x%X", (unsigned)freq->frequency);
-			return 0;
-		}
-		case VIDIOC_S_FREQUENCY:
-		{
-			struct v4l2_frequency *freq = arg;
-
-			// Only no or one tuner for now
-			if (!usbvision->have_tuner || freq->tuner)
-				return -EINVAL;
-
-			usbvision->freq = freq->frequency;
-			call_i2c_clients(usbvision, cmd, freq);
-			PDEBUG(DBG_IOCTL, "VIDIOC_S_FREQUENCY freq=0x%X", (unsigned)freq->frequency);
-			return 0;
-		}
-		case VIDIOC_G_AUDIO:
-		{
-			struct v4l2_audio *v = arg;
-			memset(v,0, sizeof(v));
-			strcpy(v->name, "TV");
-			PDEBUG(DBG_IOCTL, "VIDIOC_G_AUDIO");
-			return 0;
-		}
-		case VIDIOC_S_AUDIO:
-		{
-			struct v4l2_audio *v = arg;
-			if(v->index) {
-				return -EINVAL;
-			}
-			PDEBUG(DBG_IOCTL, "VIDIOC_S_AUDIO");
-			return 0;
-		}
-		case VIDIOC_QUERYCTRL:
-		{
-			struct v4l2_queryctrl *ctrl = arg;
-			int id=ctrl->id;
-
-			memset(ctrl,0,sizeof(*ctrl));
-			ctrl->id=id;
-
-			call_i2c_clients(usbvision, cmd, arg);
-
-			if (ctrl->type)
-				return 0;
-			else
-				return -EINVAL;
-
-			PDEBUG(DBG_IOCTL,"VIDIOC_QUERYCTRL id=%x value=%x",ctrl->id,ctrl->type);
-		}
-		case VIDIOC_G_CTRL:
-		{
-			struct v4l2_control *ctrl = arg;
-			call_i2c_clients(usbvision, VIDIOC_G_CTRL, ctrl);
-			PDEBUG(DBG_IOCTL,"VIDIOC_G_CTRL id=%x value=%x",ctrl->id,ctrl->value);
-			return 0;
-		}
-		case VIDIOC_S_CTRL:
-		{
-			struct v4l2_control *ctrl = arg;
-
-			PDEBUG(DBG_IOCTL, "VIDIOC_S_CTRL id=%x value=%x",ctrl->id,ctrl->value);
-			call_i2c_clients(usbvision, VIDIOC_S_CTRL, ctrl);
-			return 0;
-		}
-		case VIDIOC_REQBUFS:
-		{
-			struct v4l2_requestbuffers *vr = arg;
-			int ret;
-
-			RESTRICT_TO_RANGE(vr->count,1,USBVISION_NUMFRAMES);
-
-			// Check input validity : the user must do a VIDEO CAPTURE and MMAP method.
-			if((vr->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) ||
-			   (vr->memory != V4L2_MEMORY_MMAP))
-				return -EINVAL;
-
-			if(usbvision->streaming == Stream_On) {
-				if ((ret = usbvision_stream_interrupt(usbvision)))
-				    return ret;
-			}
-
-			usbvision_frames_free(usbvision);
-			usbvision_empty_framequeues(usbvision);
-			vr->count = usbvision_frames_alloc(usbvision,vr->count);
-
-			usbvision->curFrame = NULL;
-
-			PDEBUG(DBG_IOCTL, "VIDIOC_REQBUFS count=%d",vr->count);
-			return 0;
-		}
-		case VIDIOC_QUERYBUF:
-		{
-			struct v4l2_buffer *vb = arg;
-			struct usbvision_frame *frame;
-
-			// FIXME : must control that buffers are mapped (VIDIOC_REQBUFS has been called)
-
-			if(vb->type != V4L2_CAP_VIDEO_CAPTURE) {
-				return -EINVAL;
-			}
-			if(vb->index>=usbvision->num_frames)  {
-				return -EINVAL;
-			}
-			// Updating the corresponding frame state
-			vb->flags = 0;
-			frame = &usbvision->frame[vb->index];
-			if(frame->grabstate >= FrameState_Ready)
-				vb->flags |= V4L2_BUF_FLAG_QUEUED;
-			if(frame->grabstate >= FrameState_Done)
-				vb->flags |= V4L2_BUF_FLAG_DONE;
-			if(frame->grabstate == FrameState_Unused)
-				vb->flags |= V4L2_BUF_FLAG_MAPPED;
-			vb->memory = V4L2_MEMORY_MMAP;
-
-			vb->m.offset = vb->index*PAGE_ALIGN(usbvision->max_frame_size);
-
-			vb->memory = V4L2_MEMORY_MMAP;
-			vb->field = V4L2_FIELD_NONE;
-			vb->length = usbvision->curwidth*usbvision->curheight*usbvision->palette.bytes_per_pixel;
-			vb->timestamp = usbvision->frame[vb->index].timestamp;
-			vb->sequence = usbvision->frame[vb->index].sequence;
-			return 0;
-		}
-		case VIDIOC_QBUF:
-		{
-			struct v4l2_buffer *vb = arg;
-			struct usbvision_frame *frame;
-			unsigned long lock_flags;
-
-			// FIXME : works only on VIDEO_CAPTURE MODE, MMAP.
-			if(vb->type != V4L2_CAP_VIDEO_CAPTURE) {
-				return -EINVAL;
-			}
-			if(vb->index>=usbvision->num_frames)  {
-				return -EINVAL;
-			}
-
-			frame = &usbvision->frame[vb->index];
-
-			if (frame->grabstate != FrameState_Unused) {
-				return -EAGAIN;
-			}
-
-			/* Mark it as ready and enqueue frame */
-			frame->grabstate = FrameState_Ready;
-			frame->scanstate = ScanState_Scanning;
-			frame->scanlength = 0;	/* Accumulated in usbvision_parse_data() */
-
-			vb->flags &= ~V4L2_BUF_FLAG_DONE;
-
-			/* set v4l2_format index */
-			frame->v4l2_format = usbvision->palette;
-
-			spin_lock_irqsave(&usbvision->queue_lock, lock_flags);
-			list_add_tail(&usbvision->frame[vb->index].frame, &usbvision->inqueue);
-			spin_unlock_irqrestore(&usbvision->queue_lock, lock_flags);
-
-			PDEBUG(DBG_IOCTL, "VIDIOC_QBUF frame #%d",vb->index);
-			return 0;
-		}
-		case VIDIOC_DQBUF:
-		{
-			struct v4l2_buffer *vb = arg;
-			int ret;
-			struct usbvision_frame *f;
-			unsigned long lock_flags;
-
-			if (vb->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
-				return -EINVAL;
-
-			if (list_empty(&(usbvision->outqueue))) {
-				if (usbvision->streaming == Stream_Idle)
-					return -EINVAL;
-				ret = wait_event_interruptible
-					(usbvision->wait_frame,
-					 !list_empty(&(usbvision->outqueue)));
-				if (ret)
-					return ret;
-			}
-
-			spin_lock_irqsave(&usbvision->queue_lock, lock_flags);
-			f = list_entry(usbvision->outqueue.next,
-				       struct usbvision_frame, frame);
-			list_del(usbvision->outqueue.next);
-			spin_unlock_irqrestore(&usbvision->queue_lock, lock_flags);
-
-			f->grabstate = FrameState_Unused;
-
-			vb->memory = V4L2_MEMORY_MMAP;
-			vb->flags = V4L2_BUF_FLAG_MAPPED | V4L2_BUF_FLAG_QUEUED | V4L2_BUF_FLAG_DONE;
-			vb->index = f->index;
-			vb->sequence = f->sequence;
-			vb->timestamp = f->timestamp;
-			vb->field = V4L2_FIELD_NONE;
-			vb->bytesused = f->scanlength;
-
-			return 0;
-		}
-		case VIDIOC_STREAMON:
-		{
-			int b=V4L2_BUF_TYPE_VIDEO_CAPTURE;
-
-			usbvision->streaming = Stream_On;
-
-			call_i2c_clients(usbvision,VIDIOC_STREAMON , &b);
-
-			PDEBUG(DBG_IOCTL, "VIDIOC_STREAMON");
-
-			return 0;
-		}
-		case VIDIOC_STREAMOFF:
-		{
-			int *type = arg;
-			int b=V4L2_BUF_TYPE_VIDEO_CAPTURE;
-
-			if (*type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
-				return -EINVAL;
-
-			if(usbvision->streaming == Stream_On) {
-				usbvision_stream_interrupt(usbvision);
-				// Stop all video streamings
-				call_i2c_clients(usbvision,VIDIOC_STREAMOFF , &b);
-			}
-			usbvision_empty_framequeues(usbvision);
-
-			PDEBUG(DBG_IOCTL, "VIDIOC_STREAMOFF");
-			return 0;
-		}
-		case VIDIOC_ENUM_FMT:
-		{
-			struct v4l2_fmtdesc *vfd = arg;
-
-			if(vfd->index>=USBVISION_SUPPORTED_PALETTES-1) {
-				return -EINVAL;
-			}
-			vfd->flags = 0;
-			vfd->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
-			strcpy(vfd->description,usbvision_v4l2_format[vfd->index].desc);
-			vfd->pixelformat = usbvision_v4l2_format[vfd->index].format;
-			memset(vfd->reserved, 0, sizeof(vfd->reserved));
-			return 0;
-		}
-		case VIDIOC_G_FMT:
-		{
-			struct v4l2_format *vf = arg;
-
-			switch (vf->type) {
-				case V4L2_BUF_TYPE_VIDEO_CAPTURE:
-				{
-					vf->fmt.pix.width = usbvision->curwidth;
-					vf->fmt.pix.height = usbvision->curheight;
-					vf->fmt.pix.pixelformat = usbvision->palette.format;
-					vf->fmt.pix.bytesperline =  usbvision->curwidth*usbvision->palette.bytes_per_pixel;
-					vf->fmt.pix.sizeimage = vf->fmt.pix.bytesperline*usbvision->curheight;
-					vf->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M;
-					vf->fmt.pix.field = V4L2_FIELD_NONE; /* Always progressive image */
-					PDEBUG(DBG_IOCTL, "VIDIOC_G_FMT w=%d, h=%d, format=%s",
-					       vf->fmt.pix.width, vf->fmt.pix.height,usbvision->palette.desc);
-					return 0;
-				}
-				default:
-					PDEBUG(DBG_IOCTL, "VIDIOC_G_FMT invalid type %d",vf->type);
-					return -EINVAL;
-			}
-			return 0;
-		}
-		case VIDIOC_TRY_FMT:
-		case VIDIOC_S_FMT:
-		{
-			struct v4l2_format *vf = arg;
-			int formatIdx,ret;
-
-			switch(vf->type) {
-				case V4L2_BUF_TYPE_VIDEO_CAPTURE:
-				{
-					/* Find requested format in available ones */
-					for(formatIdx=0;formatIdx<USBVISION_SUPPORTED_PALETTES;formatIdx++) {
-						if(vf->fmt.pix.pixelformat == usbvision_v4l2_format[formatIdx].format) {
-							usbvision->palette = usbvision_v4l2_format[formatIdx];
-							break;
-						}
-					}
-					/* robustness */
-					if(formatIdx == USBVISION_SUPPORTED_PALETTES) {
-						return -EINVAL;
-					}
-					RESTRICT_TO_RANGE(vf->fmt.pix.width, MIN_FRAME_WIDTH, MAX_FRAME_WIDTH);
-					RESTRICT_TO_RANGE(vf->fmt.pix.height, MIN_FRAME_HEIGHT, MAX_FRAME_HEIGHT);
-
-					vf->fmt.pix.bytesperline = vf->fmt.pix.width*usbvision->palette.bytes_per_pixel;
-					vf->fmt.pix.sizeimage = vf->fmt.pix.bytesperline*vf->fmt.pix.height;
-
-					if(cmd == VIDIOC_TRY_FMT) {
-						PDEBUG(DBG_IOCTL, "VIDIOC_TRY_FMT grabdisplay w=%d, h=%d, format=%s",
-					       vf->fmt.pix.width, vf->fmt.pix.height,usbvision->palette.desc);
-						return 0;
-					}
-
-					/* stop io in case it is already in progress */
-					if(usbvision->streaming == Stream_On) {
-						if ((ret = usbvision_stream_interrupt(usbvision)))
-							return ret;
-					}
-					usbvision_frames_free(usbvision);
-					usbvision_empty_framequeues(usbvision);
-
-					usbvision->curFrame = NULL;
-
-					// by now we are committed to the new data...
-					down(&usbvision->lock);
-					usbvision_set_output(usbvision, vf->fmt.pix.width, vf->fmt.pix.height);
-					up(&usbvision->lock);
-
-					PDEBUG(DBG_IOCTL, "VIDIOC_S_FMT grabdisplay w=%d, h=%d, format=%s",
-					       vf->fmt.pix.width, vf->fmt.pix.height,usbvision->palette.desc);
-					return 0;
-				}
-				default:
-					return -EINVAL;
-			}
-		}
-		default:
-			return -ENOIOCTLCMD;
+	if (!v4l2_chip_match_host(reg->match_type, reg->match_chip))
+		return -EINVAL;
+	/* NT100x has a 8-bit register space */
+	errCode = usbvision_read_reg(usbvision, reg->reg&0xff);
+	if (errCode < 0) {
+		err("%s: VIDIOC_DBG_G_REGISTER failed: error %d",
+		    __FUNCTION__, errCode);
+		return errCode;
 	}
 	return 0;
 }
 
-static int usbvision_v4l2_ioctl(struct inode *inode, struct file *file,
-		       unsigned int cmd, unsigned long arg)
+static int vidioc_s_register (struct file *file, void *priv,
+				struct v4l2_register *reg)
 {
-	return video_usercopy(inode, file, cmd, arg, usbvision_v4l2_do_ioctl);
+	struct video_device *dev = video_devdata(file);
+	struct usb_usbvision *usbvision =
+		(struct usb_usbvision *) video_get_drvdata(dev);
+	int errCode;
+
+	if (!v4l2_chip_match_host(reg->match_type, reg->match_chip))
+		return -EINVAL;
+	/* NT100x has a 8-bit register space */
+	reg->val = (u8)usbvision_write_reg(usbvision, reg->reg&0xff, reg->val);
+	if (reg->val < 0) {
+		err("%s: VIDIOC_DBG_S_REGISTER failed: error %d",
+		    __FUNCTION__, errCode);
+		return errCode;
+	}
+	return 0;
+}
+#endif
+
+static int vidioc_querycap (struct file *file, void  *priv,
+					struct v4l2_capability *vc)
+{
+	struct video_device *dev = video_devdata(file);
+	struct usb_usbvision *usbvision =
+		(struct usb_usbvision *) video_get_drvdata(dev);
+
+	strlcpy(vc->driver, "USBVision", sizeof(vc->driver));
+	strlcpy(vc->card,
+		usbvision_device_data[usbvision->DevModel].ModelString,
+		sizeof(vc->card));
+	strlcpy(vc->bus_info, usbvision->dev->dev.bus_id,
+		sizeof(vc->bus_info));
+	vc->version = USBVISION_DRIVER_VERSION;
+	vc->capabilities = V4L2_CAP_VIDEO_CAPTURE |
+		V4L2_CAP_AUDIO |
+		V4L2_CAP_READWRITE |
+		V4L2_CAP_STREAMING |
+		(usbvision->have_tuner ? V4L2_CAP_TUNER : 0);
+	return 0;
 }
 
+static int vidioc_enum_input (struct file *file, void *priv,
+				struct v4l2_input *vi)
+{
+	struct video_device *dev = video_devdata(file);
+	struct usb_usbvision *usbvision =
+		(struct usb_usbvision *) video_get_drvdata(dev);
+	int chan;
+
+	if ((vi->index >= usbvision->video_inputs) || (vi->index < 0) )
+		return -EINVAL;
+	if (usbvision->have_tuner) {
+		chan = vi->index;
+	} else {
+		chan = vi->index + 1; /*skip Television string*/
+	}
+	/* Determine the requested input characteristics
+	   specific for each usbvision card model */
+	switch(chan) {
+	case 0:
+		if (usbvision_device_data[usbvision->DevModel].VideoChannels == 4) {
+			strcpy(vi->name, "White Video Input");
+		} else {
+			strcpy(vi->name, "Television");
+			vi->type = V4L2_INPUT_TYPE_TUNER;
+			vi->audioset = 1;
+			vi->tuner = chan;
+			vi->std = USBVISION_NORMS;
+		}
+		break;
+	case 1:
+		vi->type = V4L2_INPUT_TYPE_CAMERA;
+		if (usbvision_device_data[usbvision->DevModel].VideoChannels == 4) {
+			strcpy(vi->name, "Green Video Input");
+		} else {
+			strcpy(vi->name, "Composite Video Input");
+		}
+		vi->std = V4L2_STD_PAL;
+		break;
+	case 2:
+		vi->type = V4L2_INPUT_TYPE_CAMERA;
+		if (usbvision_device_data[usbvision->DevModel].VideoChannels == 4) {
+			strcpy(vi->name, "Yellow Video Input");
+		} else {
+			strcpy(vi->name, "S-Video Input");
+		}
+		vi->std = V4L2_STD_PAL;
+		break;
+	case 3:
+		vi->type = V4L2_INPUT_TYPE_CAMERA;
+		strcpy(vi->name, "Red Video Input");
+		vi->std = V4L2_STD_PAL;
+		break;
+	}
+	return 0;
+}
+
+static int vidioc_g_input (struct file *file, void *priv, unsigned int *input)
+{
+	struct video_device *dev = video_devdata(file);
+	struct usb_usbvision *usbvision =
+		(struct usb_usbvision *) video_get_drvdata(dev);
+
+	*input = usbvision->ctl_input;
+	return 0;
+}
+
+static int vidioc_s_input (struct file *file, void *priv, unsigned int input)
+{
+	struct video_device *dev = video_devdata(file);
+	struct usb_usbvision *usbvision =
+		(struct usb_usbvision *) video_get_drvdata(dev);
+
+	if ((input >= usbvision->video_inputs) || (input < 0) )
+		return -EINVAL;
+
+	down(&usbvision->lock);
+	usbvision_muxsel(usbvision, input);
+	usbvision_set_input(usbvision);
+	usbvision_set_output(usbvision,
+			     usbvision->curwidth,
+			     usbvision->curheight);
+	up(&usbvision->lock);
+	return 0;
+}
+
+static int vidioc_s_std (struct file *file, void *priv, v4l2_std_id *id)
+{
+	struct video_device *dev = video_devdata(file);
+	struct usb_usbvision *usbvision =
+		(struct usb_usbvision *) video_get_drvdata(dev);
+	usbvision->tvnormId=*id;
+
+	down(&usbvision->lock);
+	call_i2c_clients(usbvision, VIDIOC_S_STD,
+			 &usbvision->tvnormId);
+	up(&usbvision->lock);
+	/* propagate the change to the decoder */
+	usbvision_muxsel(usbvision, usbvision->ctl_input);
+
+	return 0;
+}
+
+static int vidioc_g_tuner (struct file *file, void *priv,
+				struct v4l2_tuner *vt)
+{
+	struct video_device *dev = video_devdata(file);
+	struct usb_usbvision *usbvision =
+		(struct usb_usbvision *) video_get_drvdata(dev);
+
+	if (!usbvision->have_tuner || vt->index)	// Only tuner 0
+		return -EINVAL;
+	if(usbvision->radio) {
+		strcpy(vt->name, "Radio");
+		vt->type = V4L2_TUNER_RADIO;
+	} else {
+		strcpy(vt->name, "Television");
+	}
+	/* Let clients fill in the remainder of this struct */
+	call_i2c_clients(usbvision,VIDIOC_G_TUNER,vt);
+
+	return 0;
+}
+
+static int vidioc_s_tuner (struct file *file, void *priv,
+				struct v4l2_tuner *vt)
+{
+	struct video_device *dev = video_devdata(file);
+	struct usb_usbvision *usbvision =
+		(struct usb_usbvision *) video_get_drvdata(dev);
+
+	// Only no or one tuner for now
+	if (!usbvision->have_tuner || vt->index)
+		return -EINVAL;
+	/* let clients handle this */
+	call_i2c_clients(usbvision,VIDIOC_S_TUNER,vt);
+
+	return 0;
+}
+
+static int vidioc_g_frequency (struct file *file, void *priv,
+				struct v4l2_frequency *freq)
+{
+	struct video_device *dev = video_devdata(file);
+	struct usb_usbvision *usbvision =
+		(struct usb_usbvision *) video_get_drvdata(dev);
+
+	freq->tuner = 0; // Only one tuner
+	if(usbvision->radio) {
+		freq->type = V4L2_TUNER_RADIO;
+	} else {
+		freq->type = V4L2_TUNER_ANALOG_TV;
+	}
+	freq->frequency = usbvision->freq;
+
+	return 0;
+}
+
+static int vidioc_s_frequency (struct file *file, void *priv,
+				struct v4l2_frequency *freq)
+{
+	struct video_device *dev = video_devdata(file);
+	struct usb_usbvision *usbvision =
+		(struct usb_usbvision *) video_get_drvdata(dev);
+
+	// Only no or one tuner for now
+	if (!usbvision->have_tuner || freq->tuner)
+		return -EINVAL;
+
+	usbvision->freq = freq->frequency;
+	call_i2c_clients(usbvision, VIDIOC_S_FREQUENCY, freq);
+
+	return 0;
+}
+
+static int vidioc_g_audio (struct file *file, void *priv, struct v4l2_audio *a)
+{
+	struct video_device *dev = video_devdata(file);
+	struct usb_usbvision *usbvision =
+		(struct usb_usbvision *) video_get_drvdata(dev);
+
+	memset(a,0,sizeof(*a));
+	if(usbvision->radio) {
+		strcpy(a->name,"Radio");
+	} else {
+		strcpy(a->name, "TV");
+	}
+
+	return 0;
+}
+
+static int vidioc_s_audio (struct file *file, void *fh,
+			  struct v4l2_audio *a)
+{
+	if(a->index) {
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int vidioc_queryctrl (struct file *file, void *priv,
+			    struct v4l2_queryctrl *ctrl)
+{
+	struct video_device *dev = video_devdata(file);
+	struct usb_usbvision *usbvision =
+		(struct usb_usbvision *) video_get_drvdata(dev);
+	int id=ctrl->id;
+
+	memset(ctrl,0,sizeof(*ctrl));
+	ctrl->id=id;
+
+	call_i2c_clients(usbvision, VIDIOC_QUERYCTRL, ctrl);
+
+	if (!ctrl->type)
+		return -EINVAL;
+
+	return 0;
+}
+
+static int vidioc_g_ctrl (struct file *file, void *priv,
+				struct v4l2_control *ctrl)
+{
+	struct video_device *dev = video_devdata(file);
+	struct usb_usbvision *usbvision =
+		(struct usb_usbvision *) video_get_drvdata(dev);
+	call_i2c_clients(usbvision, VIDIOC_G_CTRL, ctrl);
+
+	return 0;
+}
+
+static int vidioc_s_ctrl (struct file *file, void *priv,
+				struct v4l2_control *ctrl)
+{
+	struct video_device *dev = video_devdata(file);
+	struct usb_usbvision *usbvision =
+		(struct usb_usbvision *) video_get_drvdata(dev);
+	call_i2c_clients(usbvision, VIDIOC_S_CTRL, ctrl);
+
+	return 0;
+}
+
+static int vidioc_reqbufs (struct file *file,
+			   void *priv, struct v4l2_requestbuffers *vr)
+{
+	struct video_device *dev = video_devdata(file);
+	struct usb_usbvision *usbvision =
+		(struct usb_usbvision *) video_get_drvdata(dev);
+	int ret;
+
+	RESTRICT_TO_RANGE(vr->count,1,USBVISION_NUMFRAMES);
+
+	/* Check input validity:
+	   the user must do a VIDEO CAPTURE and MMAP method. */
+	if((vr->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) ||
+	   (vr->memory != V4L2_MEMORY_MMAP))
+		return -EINVAL;
+
+	if(usbvision->streaming == Stream_On) {
+		if ((ret = usbvision_stream_interrupt(usbvision)))
+			return ret;
+	}
+
+	usbvision_frames_free(usbvision);
+	usbvision_empty_framequeues(usbvision);
+	vr->count = usbvision_frames_alloc(usbvision,vr->count);
+
+	usbvision->curFrame = NULL;
+
+	return 0;
+}
+
+static int vidioc_querybuf (struct file *file,
+			    void *priv, struct v4l2_buffer *vb)
+{
+	struct video_device *dev = video_devdata(file);
+	struct usb_usbvision *usbvision =
+		(struct usb_usbvision *) video_get_drvdata(dev);
+	struct usbvision_frame *frame;
+
+	/* FIXME : must control
+	   that buffers are mapped (VIDIOC_REQBUFS has been called) */
+	if(vb->type != V4L2_CAP_VIDEO_CAPTURE) {
+		return -EINVAL;
+	}
+	if(vb->index>=usbvision->num_frames)  {
+		return -EINVAL;
+	}
+	/* Updating the corresponding frame state */
+	vb->flags = 0;
+	frame = &usbvision->frame[vb->index];
+	if(frame->grabstate >= FrameState_Ready)
+		vb->flags |= V4L2_BUF_FLAG_QUEUED;
+	if(frame->grabstate >= FrameState_Done)
+		vb->flags |= V4L2_BUF_FLAG_DONE;
+	if(frame->grabstate == FrameState_Unused)
+		vb->flags |= V4L2_BUF_FLAG_MAPPED;
+	vb->memory = V4L2_MEMORY_MMAP;
+
+	vb->m.offset = vb->index*PAGE_ALIGN(usbvision->max_frame_size);
+
+	vb->memory = V4L2_MEMORY_MMAP;
+	vb->field = V4L2_FIELD_NONE;
+	vb->length = usbvision->curwidth*
+		usbvision->curheight*
+		usbvision->palette.bytes_per_pixel;
+	vb->timestamp = usbvision->frame[vb->index].timestamp;
+	vb->sequence = usbvision->frame[vb->index].sequence;
+	return 0;
+}
+
+static int vidioc_qbuf (struct file *file, void *priv, struct v4l2_buffer *vb)
+{
+	struct video_device *dev = video_devdata(file);
+	struct usb_usbvision *usbvision =
+		(struct usb_usbvision *) video_get_drvdata(dev);
+	struct usbvision_frame *frame;
+	unsigned long lock_flags;
+
+	/* FIXME : works only on VIDEO_CAPTURE MODE, MMAP. */
+	if(vb->type != V4L2_CAP_VIDEO_CAPTURE) {
+		return -EINVAL;
+	}
+	if(vb->index>=usbvision->num_frames)  {
+		return -EINVAL;
+	}
+
+	frame = &usbvision->frame[vb->index];
+
+	if (frame->grabstate != FrameState_Unused) {
+		return -EAGAIN;
+	}
+
+	/* Mark it as ready and enqueue frame */
+	frame->grabstate = FrameState_Ready;
+	frame->scanstate = ScanState_Scanning;
+	frame->scanlength = 0;	/* Accumulated in usbvision_parse_data() */
+
+	vb->flags &= ~V4L2_BUF_FLAG_DONE;
+
+	/* set v4l2_format index */
+	frame->v4l2_format = usbvision->palette;
+
+	spin_lock_irqsave(&usbvision->queue_lock, lock_flags);
+	list_add_tail(&usbvision->frame[vb->index].frame, &usbvision->inqueue);
+	spin_unlock_irqrestore(&usbvision->queue_lock, lock_flags);
+
+	return 0;
+}
+
+static int vidioc_dqbuf (struct file *file, void *priv, struct v4l2_buffer *vb)
+{
+	struct video_device *dev = video_devdata(file);
+	struct usb_usbvision *usbvision =
+		(struct usb_usbvision *) video_get_drvdata(dev);
+	int ret;
+	struct usbvision_frame *f;
+	unsigned long lock_flags;
+
+	if (vb->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+		return -EINVAL;
+
+	if (list_empty(&(usbvision->outqueue))) {
+		if (usbvision->streaming == Stream_Idle)
+			return -EINVAL;
+		ret = wait_event_interruptible
+			(usbvision->wait_frame,
+			 !list_empty(&(usbvision->outqueue)));
+		if (ret)
+			return ret;
+	}
+
+	spin_lock_irqsave(&usbvision->queue_lock, lock_flags);
+	f = list_entry(usbvision->outqueue.next,
+		       struct usbvision_frame, frame);
+	list_del(usbvision->outqueue.next);
+	spin_unlock_irqrestore(&usbvision->queue_lock, lock_flags);
+
+	f->grabstate = FrameState_Unused;
+
+	vb->memory = V4L2_MEMORY_MMAP;
+	vb->flags = V4L2_BUF_FLAG_MAPPED |
+		V4L2_BUF_FLAG_QUEUED |
+		V4L2_BUF_FLAG_DONE;
+	vb->index = f->index;
+	vb->sequence = f->sequence;
+	vb->timestamp = f->timestamp;
+	vb->field = V4L2_FIELD_NONE;
+	vb->bytesused = f->scanlength;
+
+	return 0;
+}
+
+static int vidioc_streamon(struct file *file, void *priv, enum v4l2_buf_type i)
+{
+	struct video_device *dev = video_devdata(file);
+	struct usb_usbvision *usbvision =
+		(struct usb_usbvision *) video_get_drvdata(dev);
+	int b=V4L2_BUF_TYPE_VIDEO_CAPTURE;
+
+	usbvision->streaming = Stream_On;
+	call_i2c_clients(usbvision,VIDIOC_STREAMON , &b);
+
+	return 0;
+}
+
+static int vidioc_streamoff(struct file *file,
+			    void *priv, enum v4l2_buf_type type)
+{
+	struct video_device *dev = video_devdata(file);
+	struct usb_usbvision *usbvision =
+		(struct usb_usbvision *) video_get_drvdata(dev);
+	int b=V4L2_BUF_TYPE_VIDEO_CAPTURE;
+
+	if (type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+		return -EINVAL;
+
+	if(usbvision->streaming == Stream_On) {
+		usbvision_stream_interrupt(usbvision);
+		/* Stop all video streamings */
+		call_i2c_clients(usbvision,VIDIOC_STREAMOFF , &b);
+	}
+	usbvision_empty_framequeues(usbvision);
+
+	return 0;
+}
+
+static int vidioc_enum_fmt_cap (struct file *file, void  *priv,
+					struct v4l2_fmtdesc *vfd)
+{
+	if(vfd->index>=USBVISION_SUPPORTED_PALETTES-1) {
+		return -EINVAL;
+	}
+	vfd->flags = 0;
+	vfd->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+	strcpy(vfd->description,usbvision_v4l2_format[vfd->index].desc);
+	vfd->pixelformat = usbvision_v4l2_format[vfd->index].format;
+	memset(vfd->reserved, 0, sizeof(vfd->reserved));
+	return 0;
+}
+
+static int vidioc_g_fmt_cap (struct file *file, void *priv,
+					struct v4l2_format *vf)
+{
+	struct video_device *dev = video_devdata(file);
+	struct usb_usbvision *usbvision =
+		(struct usb_usbvision *) video_get_drvdata(dev);
+	vf->fmt.pix.width = usbvision->curwidth;
+	vf->fmt.pix.height = usbvision->curheight;
+	vf->fmt.pix.pixelformat = usbvision->palette.format;
+	vf->fmt.pix.bytesperline =
+		usbvision->curwidth*usbvision->palette.bytes_per_pixel;
+	vf->fmt.pix.sizeimage = vf->fmt.pix.bytesperline*usbvision->curheight;
+	vf->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M;
+	vf->fmt.pix.field = V4L2_FIELD_NONE; /* Always progressive image */
+
+	return 0;
+}
+
+static int vidioc_try_fmt_cap (struct file *file, void *priv,
+			       struct v4l2_format *vf)
+{
+	struct video_device *dev = video_devdata(file);
+	struct usb_usbvision *usbvision =
+		(struct usb_usbvision *) video_get_drvdata(dev);
+	int formatIdx;
+
+	/* Find requested format in available ones */
+	for(formatIdx=0;formatIdx<USBVISION_SUPPORTED_PALETTES;formatIdx++) {
+		if(vf->fmt.pix.pixelformat ==
+		   usbvision_v4l2_format[formatIdx].format) {
+			usbvision->palette = usbvision_v4l2_format[formatIdx];
+			break;
+		}
+	}
+	/* robustness */
+	if(formatIdx == USBVISION_SUPPORTED_PALETTES) {
+		return -EINVAL;
+	}
+	RESTRICT_TO_RANGE(vf->fmt.pix.width, MIN_FRAME_WIDTH, MAX_FRAME_WIDTH);
+	RESTRICT_TO_RANGE(vf->fmt.pix.height, MIN_FRAME_HEIGHT, MAX_FRAME_HEIGHT);
+
+	vf->fmt.pix.bytesperline = vf->fmt.pix.width*
+		usbvision->palette.bytes_per_pixel;
+	vf->fmt.pix.sizeimage = vf->fmt.pix.bytesperline*vf->fmt.pix.height;
+
+	return 0;
+}
+
+static int vidioc_s_fmt_cap(struct file *file, void *priv,
+			       struct v4l2_format *vf)
+{
+	struct video_device *dev = video_devdata(file);
+	struct usb_usbvision *usbvision =
+		(struct usb_usbvision *) video_get_drvdata(dev);
+	int ret;
+
+	if( 0 != (ret=vidioc_try_fmt_cap (file, priv, vf)) ) {
+		return ret;
+	}
+
+	/* stop io in case it is already in progress */
+	if(usbvision->streaming == Stream_On) {
+		if ((ret = usbvision_stream_interrupt(usbvision)))
+			return ret;
+	}
+	usbvision_frames_free(usbvision);
+	usbvision_empty_framequeues(usbvision);
+
+	usbvision->curFrame = NULL;
+
+	/* by now we are committed to the new data... */
+	down(&usbvision->lock);
+	usbvision_set_output(usbvision, vf->fmt.pix.width, vf->fmt.pix.height);
+	up(&usbvision->lock);
+
+	return 0;
+}
 
 static ssize_t usbvision_v4l2_read(struct file *file, char __user *buf,
 		      size_t count, loff_t *ppos)
 {
 	struct video_device *dev = video_devdata(file);
-	struct usb_usbvision *usbvision = (struct usb_usbvision *) video_get_drvdata(dev);
+	struct usb_usbvision *usbvision =
+		(struct usb_usbvision *) video_get_drvdata(dev);
 	int noblock = file->f_flags & O_NONBLOCK;
 	unsigned long lock_flags;
 
 	int ret,i;
 	struct usbvision_frame *frame;
 
-	PDEBUG(DBG_IO, "%s: %ld bytes, noblock=%d", __FUNCTION__, (unsigned long)count, noblock);
+	PDEBUG(DBG_IO, "%s: %ld bytes, noblock=%d", __FUNCTION__,
+	       (unsigned long)count, noblock);
 
 	if (!USBVISION_IS_OPERATIONAL(usbvision) || (buf == NULL))
 		return -EFAULT;
 
-	/* This entry point is compatible with the mmap routines so that a user can do either
-	   VIDIOC_QBUF/VIDIOC_DQBUF to get frames or call read on the device. */
+	/* This entry point is compatible with the mmap routines
+	   so that a user can do either VIDIOC_QBUF/VIDIOC_DQBUF
+	   to get frames or call read on the device. */
 	if(!usbvision->num_frames) {
-		/* First, allocate some frames to work with if this has not been done with
-		 VIDIOC_REQBUF */
+		/* First, allocate some frames to work with
+		   if this has not been done with VIDIOC_REQBUF */
 		usbvision_frames_free(usbvision);
 		usbvision_empty_framequeues(usbvision);
 		usbvision_frames_alloc(usbvision,USBVISION_NUMFRAMES);
@@ -1086,21 +1116,24 @@
 		call_i2c_clients(usbvision,VIDIOC_STREAMON , NULL);
 	}
 
-	/* Then, enqueue as many frames as possible (like a user of VIDIOC_QBUF would do) */
+	/* Then, enqueue as many frames as possible
+	   (like a user of VIDIOC_QBUF would do) */
 	for(i=0;i<usbvision->num_frames;i++) {
 		frame = &usbvision->frame[i];
 		if(frame->grabstate == FrameState_Unused) {
 			/* Mark it as ready and enqueue frame */
 			frame->grabstate = FrameState_Ready;
 			frame->scanstate = ScanState_Scanning;
-			frame->scanlength = 0;	/* Accumulated in usbvision_parse_data() */
+			/* Accumulated in usbvision_parse_data() */
+			frame->scanlength = 0;
 
 			/* set v4l2_format index */
 			frame->v4l2_format = usbvision->palette;
 
 			spin_lock_irqsave(&usbvision->queue_lock, lock_flags);
 			list_add_tail(&frame->frame, &usbvision->inqueue);
-			spin_unlock_irqrestore(&usbvision->queue_lock, lock_flags);
+			spin_unlock_irqrestore(&usbvision->queue_lock,
+					       lock_flags);
 		}
 	}
 
@@ -1128,8 +1161,9 @@
 		return 0;
 	}
 
-	PDEBUG(DBG_IO, "%s: frmx=%d, bytes_read=%ld, scanlength=%ld", __FUNCTION__,
-		       frame->index, frame->bytes_read, frame->scanlength);
+	PDEBUG(DBG_IO, "%s: frmx=%d, bytes_read=%ld, scanlength=%ld",
+	       __FUNCTION__,
+	       frame->index, frame->bytes_read, frame->scanlength);
 
 	/* copy bytes to user space; we allow for partials reads */
 	if ((count + frame->bytes_read) > (unsigned long)frame->scanlength)
@@ -1140,10 +1174,11 @@
 	}
 
 	frame->bytes_read += count;
-	PDEBUG(DBG_IO, "%s: {copy} count used=%ld, new bytes_read=%ld", __FUNCTION__,
-		       (unsigned long)count, frame->bytes_read);
+	PDEBUG(DBG_IO, "%s: {copy} count used=%ld, new bytes_read=%ld",
+	       __FUNCTION__,
+	       (unsigned long)count, frame->bytes_read);
 
-	// For now, forget the frame if it has not been read in one shot.
+	/* For now, forget the frame if it has not been read in one shot. */
 /* 	if (frame->bytes_read >= frame->scanlength) {// All data has been read */
 		frame->bytes_read = 0;
 
@@ -1162,7 +1197,8 @@
 	u32 i;
 
 	struct video_device *dev = video_devdata(file);
-	struct usb_usbvision *usbvision = (struct usb_usbvision *) video_get_drvdata(dev);
+	struct usb_usbvision *usbvision =
+		(struct usb_usbvision *) video_get_drvdata(dev);
 
 	PDEBUG(DBG_MMAP, "mmap");
 
@@ -1180,11 +1216,13 @@
 	}
 
 	for (i = 0; i < usbvision->num_frames; i++) {
-		if (((PAGE_ALIGN(usbvision->max_frame_size)*i) >> PAGE_SHIFT) == vma->vm_pgoff)
+		if (((PAGE_ALIGN(usbvision->max_frame_size)*i) >> PAGE_SHIFT) ==
+		    vma->vm_pgoff)
 			break;
 	}
 	if (i == usbvision->num_frames) {
-		PDEBUG(DBG_MMAP, "mmap: user supplied mapping address is out of range");
+		PDEBUG(DBG_MMAP,
+		       "mmap: user supplied mapping address is out of range");
 		up(&usbvision->lock);
 		return -EINVAL;
 	}
@@ -1218,8 +1256,8 @@
 static int usbvision_radio_open(struct inode *inode, struct file *file)
 {
 	struct video_device *dev = video_devdata(file);
-	struct usb_usbvision *usbvision = (struct usb_usbvision *) video_get_drvdata(dev);
-	struct v4l2_frequency freq;
+	struct usb_usbvision *usbvision =
+		(struct usb_usbvision *) video_get_drvdata(dev);
 	int errCode = 0;
 
 	PDEBUG(DBG_IO, "%s:", __FUNCTION__);
@@ -1249,8 +1287,6 @@
 		// If so far no errors then we shall start the radio
 		usbvision->radio = 1;
 		call_i2c_clients(usbvision,AUDC_SET_RADIO,&usbvision->tuner_type);
-		freq.frequency = 1517; //SWR3 @ 94.8MHz
-		call_i2c_clients(usbvision, VIDIOC_S_FREQUENCY, &freq);
 		usbvision_set_audio(usbvision, USBVISION_AUDIO_RADIO);
 		usbvision->user++;
 	}
@@ -1270,7 +1306,8 @@
 static int usbvision_radio_close(struct inode *inode, struct file *file)
 {
 	struct video_device *dev = video_devdata(file);
-	struct usb_usbvision *usbvision = (struct usb_usbvision *) video_get_drvdata(dev);
+	struct usb_usbvision *usbvision =
+		(struct usb_usbvision *) video_get_drvdata(dev);
 	int errCode = 0;
 
 	PDEBUG(DBG_IO, "");
@@ -1304,149 +1341,6 @@
 	return errCode;
 }
 
-static int usbvision_do_radio_ioctl(struct inode *inode, struct file *file,
-				 unsigned int cmd, void *arg)
-{
-	struct video_device *dev = video_devdata(file);
-	struct usb_usbvision *usbvision = (struct usb_usbvision *) video_get_drvdata(dev);
-
-	if (!USBVISION_IS_OPERATIONAL(usbvision))
-		return -EIO;
-
-	switch (cmd) {
-		case VIDIOC_QUERYCAP:
-		{
-			struct v4l2_capability *vc=arg;
-
-			memset(vc, 0, sizeof(*vc));
-			strlcpy(vc->driver, "USBVision", sizeof(vc->driver));
-			strlcpy(vc->card, usbvision_device_data[usbvision->DevModel].ModelString,
-				sizeof(vc->card));
-			strlcpy(vc->bus_info, usbvision->dev->dev.bus_id,
-				sizeof(vc->bus_info));
-			vc->version = USBVISION_DRIVER_VERSION;
-			vc->capabilities = (usbvision->have_tuner ? V4L2_CAP_TUNER : 0);
-			PDEBUG(DBG_IO, "VIDIOC_QUERYCAP");
-			return 0;
-		}
-		case VIDIOC_QUERYCTRL:
-		{
-			struct v4l2_queryctrl *ctrl = arg;
-			int id=ctrl->id;
-
-			memset(ctrl,0,sizeof(*ctrl));
-			ctrl->id=id;
-
-			call_i2c_clients(usbvision, cmd, arg);
-			PDEBUG(DBG_IO,"VIDIOC_QUERYCTRL id=%x value=%x",ctrl->id,ctrl->type);
-
-			if (ctrl->type)
-				return 0;
-			else
-				return -EINVAL;
-
-		}
-		case VIDIOC_G_CTRL:
-		{
-			struct v4l2_control *ctrl = arg;
-
-			call_i2c_clients(usbvision, VIDIOC_G_CTRL, ctrl);
-			PDEBUG(DBG_IO,"VIDIOC_G_CTRL id=%x value=%x",ctrl->id,ctrl->value);
-			return 0;
-		}
-		case VIDIOC_S_CTRL:
-		{
-			struct v4l2_control *ctrl = arg;
-
-			call_i2c_clients(usbvision, VIDIOC_S_CTRL, ctrl);
-			PDEBUG(DBG_IO, "VIDIOC_S_CTRL id=%x value=%x",ctrl->id,ctrl->value);
-			return 0;
-		}
-		case VIDIOC_G_TUNER:
-		{
-			struct v4l2_tuner *t = arg;
-
-			if (t->index > 0)
-				return -EINVAL;
-
-			memset(t,0,sizeof(*t));
-			strcpy(t->name, "Radio");
-			t->type = V4L2_TUNER_RADIO;
-
-			/* Let clients fill in the remainder of this struct */
-			call_i2c_clients(usbvision,VIDIOC_G_TUNER,t);
-			PDEBUG(DBG_IO, "VIDIOC_G_TUNER signal=%x, afc=%x",t->signal,t->afc);
-			return 0;
-		}
-		case VIDIOC_S_TUNER:
-		{
-			struct v4l2_tuner *vt = arg;
-
-			// Only no or one tuner for now
-			if (!usbvision->have_tuner || vt->index)
-				return -EINVAL;
-			/* let clients handle this */
-			call_i2c_clients(usbvision,VIDIOC_S_TUNER,vt);
-
-			PDEBUG(DBG_IO, "VIDIOC_S_TUNER");
-			return 0;
-		}
-		case VIDIOC_G_AUDIO:
-		{
-			struct v4l2_audio *a = arg;
-
-			memset(a,0,sizeof(*a));
-			strcpy(a->name,"Radio");
-			PDEBUG(DBG_IO, "VIDIOC_G_AUDIO");
-			return 0;
-		}
-		case VIDIOC_S_AUDIO:
-		case VIDIOC_S_INPUT:
-		case VIDIOC_S_STD:
-		return 0;
-
-		case VIDIOC_G_FREQUENCY:
-		{
-			struct v4l2_frequency *f = arg;
-
-			memset(f,0,sizeof(*f));
-
-			f->type = V4L2_TUNER_RADIO;
-			f->frequency = usbvision->freq;
-			call_i2c_clients(usbvision, cmd, f);
-			PDEBUG(DBG_IO, "VIDIOC_G_FREQUENCY freq=0x%X", (unsigned)f->frequency);
-
-			return 0;
-		}
-		case VIDIOC_S_FREQUENCY:
-		{
-			struct v4l2_frequency *f = arg;
-
-			if (f->tuner != 0)
-				return -EINVAL;
-			usbvision->freq = f->frequency;
-			call_i2c_clients(usbvision, cmd, f);
-			PDEBUG(DBG_IO, "VIDIOC_S_FREQUENCY freq=0x%X", (unsigned)f->frequency);
-
-			return 0;
-		}
-		default:
-		{
-			PDEBUG(DBG_IO, "%s: Unknown command %x", __FUNCTION__, cmd);
-			return -ENOIOCTLCMD;
-		}
-	}
-	return 0;
-}
-
-
-static int usbvision_radio_ioctl(struct inode *inode, struct file *file,
-		       unsigned int cmd, unsigned long arg)
-{
-	return video_usercopy(inode, file, cmd, arg, usbvision_do_radio_ioctl);
-}
-
-
 /*
  * Here comes the stuff for vbi on usbvision based devices
  *
@@ -1454,21 +1348,21 @@
 static int usbvision_vbi_open(struct inode *inode, struct file *file)
 {
 	/* TODO */
-	return -EINVAL;
+	return -ENODEV;
 
 }
 
 static int usbvision_vbi_close(struct inode *inode, struct file *file)
 {
 	/* TODO */
-	return -EINVAL;
+	return -ENODEV;
 }
 
 static int usbvision_do_vbi_ioctl(struct inode *inode, struct file *file,
 				 unsigned int cmd, void *arg)
 {
 	/* TODO */
-	return -EINVAL;
+	return -ENOIOCTLCMD;
 }
 
 static int usbvision_vbi_ioctl(struct inode *inode, struct file *file,
@@ -1489,8 +1383,11 @@
 	.release	= usbvision_v4l2_close,
 	.read		= usbvision_v4l2_read,
 	.mmap		= usbvision_v4l2_mmap,
-	.ioctl		= usbvision_v4l2_ioctl,
+	.ioctl		= video_ioctl2,
 	.llseek		= no_llseek,
+/* 	.poll          = video_poll, */
+	.mmap	       = usbvision_v4l2_mmap,
+	.compat_ioctl  = v4l_compat_ioctl32,
 };
 static struct video_device usbvision_video_template = {
 	.owner             = THIS_MODULE,
@@ -1500,6 +1397,39 @@
 	.name           = "usbvision-video",
 	.release	= video_device_release,
 	.minor		= -1,
+	.vidioc_querycap      = vidioc_querycap,
+	.vidioc_enum_fmt_cap  = vidioc_enum_fmt_cap,
+	.vidioc_g_fmt_cap     = vidioc_g_fmt_cap,
+	.vidioc_try_fmt_cap   = vidioc_try_fmt_cap,
+	.vidioc_s_fmt_cap     = vidioc_s_fmt_cap,
+	.vidioc_reqbufs       = vidioc_reqbufs,
+	.vidioc_querybuf      = vidioc_querybuf,
+	.vidioc_qbuf          = vidioc_qbuf,
+	.vidioc_dqbuf         = vidioc_dqbuf,
+	.vidioc_s_std         = vidioc_s_std,
+	.vidioc_enum_input    = vidioc_enum_input,
+	.vidioc_g_input       = vidioc_g_input,
+	.vidioc_s_input       = vidioc_s_input,
+	.vidioc_queryctrl     = vidioc_queryctrl,
+	.vidioc_g_audio       = vidioc_g_audio,
+	.vidioc_g_audio       = vidioc_s_audio,
+	.vidioc_g_ctrl        = vidioc_g_ctrl,
+	.vidioc_s_ctrl        = vidioc_s_ctrl,
+	.vidioc_streamon      = vidioc_streamon,
+	.vidioc_streamoff     = vidioc_streamoff,
+#ifdef CONFIG_VIDEO_V4L1_COMPAT
+/*  	.vidiocgmbuf          = vidiocgmbuf, */
+#endif
+	.vidioc_g_tuner       = vidioc_g_tuner,
+	.vidioc_s_tuner       = vidioc_s_tuner,
+	.vidioc_g_frequency   = vidioc_g_frequency,
+	.vidioc_s_frequency   = vidioc_s_frequency,
+#ifdef CONFIG_VIDEO_ADV_DEBUG
+	.vidioc_g_register    = vidioc_g_register,
+	.vidioc_s_register    = vidioc_s_register,
+#endif
+	.tvnorms              = USBVISION_NORMS,
+	.current_norm         = V4L2_STD_PAL
 };
 
 
@@ -1508,8 +1438,9 @@
 	.owner             = THIS_MODULE,
 	.open		= usbvision_radio_open,
 	.release	= usbvision_radio_close,
-	.ioctl		= usbvision_radio_ioctl,
+	.ioctl		= video_ioctl2,
 	.llseek		= no_llseek,
+	.compat_ioctl  = v4l_compat_ioctl32,
 };
 
 static struct video_device usbvision_radio_template=
@@ -1518,11 +1449,26 @@
 	.type		= VID_TYPE_TUNER,
 	.hardware	= VID_HARDWARE_USBVISION,
 	.fops		= &usbvision_radio_fops,
-	.release	= video_device_release,
 	.name           = "usbvision-radio",
+	.release	= video_device_release,
 	.minor		= -1,
-};
+	.vidioc_querycap      = vidioc_querycap,
+	.vidioc_enum_input    = vidioc_enum_input,
+	.vidioc_g_input       = vidioc_g_input,
+	.vidioc_s_input       = vidioc_s_input,
+	.vidioc_queryctrl     = vidioc_queryctrl,
+	.vidioc_g_audio       = vidioc_g_audio,
+	.vidioc_g_audio       = vidioc_s_audio,
+	.vidioc_g_ctrl        = vidioc_g_ctrl,
+	.vidioc_s_ctrl        = vidioc_s_ctrl,
+	.vidioc_g_tuner       = vidioc_g_tuner,
+	.vidioc_s_tuner       = vidioc_s_tuner,
+	.vidioc_g_frequency   = vidioc_g_frequency,
+	.vidioc_s_frequency   = vidioc_s_frequency,
 
+	.tvnorms              = USBVISION_NORMS,
+	.current_norm         = V4L2_STD_PAL
+};
 
 // vbi template
 static const struct file_operations usbvision_vbi_fops = {
@@ -1531,6 +1477,7 @@
 	.release	= usbvision_vbi_close,
 	.ioctl		= usbvision_vbi_ioctl,
 	.llseek		= no_llseek,
+	.compat_ioctl  = v4l_compat_ioctl32,
 };
 
 static struct video_device usbvision_vbi_template=
@@ -1574,11 +1521,11 @@
 {
 	// vbi Device:
 	if (usbvision->vbi) {
-		PDEBUG(DBG_PROBE, "unregister /dev/vbi%d [v4l2]", usbvision->vbi->minor & 0x1f);
+		PDEBUG(DBG_PROBE, "unregister /dev/vbi%d [v4l2]",
+		       usbvision->vbi->minor & 0x1f);
 		if (usbvision->vbi->minor != -1) {
 			video_unregister_device(usbvision->vbi);
-		}
-		else {
+		} else {
 			video_device_release(usbvision->vbi);
 		}
 		usbvision->vbi = NULL;
@@ -1586,11 +1533,11 @@
 
 	// Radio Device:
 	if (usbvision->rdev) {
-		PDEBUG(DBG_PROBE, "unregister /dev/radio%d [v4l2]", usbvision->rdev->minor & 0x1f);
+		PDEBUG(DBG_PROBE, "unregister /dev/radio%d [v4l2]",
+		       usbvision->rdev->minor & 0x1f);
 		if (usbvision->rdev->minor != -1) {
 			video_unregister_device(usbvision->rdev);
-		}
-		else {
+		} else {
 			video_device_release(usbvision->rdev);
 		}
 		usbvision->rdev = NULL;
@@ -1598,11 +1545,11 @@
 
 	// Video Device:
 	if (usbvision->vdev) {
-		PDEBUG(DBG_PROBE, "unregister /dev/video%d [v4l2]", usbvision->vdev->minor & 0x1f);
+		PDEBUG(DBG_PROBE, "unregister /dev/video%d [v4l2]",
+		       usbvision->vdev->minor & 0x1f);
 		if (usbvision->vdev->minor != -1) {
 			video_unregister_device(usbvision->vdev);
-		}
-		else {
+		} else {
 			video_device_release(usbvision->vdev);
 		}
 		usbvision->vdev = NULL;
@@ -1613,37 +1560,52 @@
 static int __devinit usbvision_register_video(struct usb_usbvision *usbvision)
 {
 	// Video Device:
-	usbvision->vdev = usbvision_vdev_init(usbvision, &usbvision_video_template, "USBVision Video");
+	usbvision->vdev = usbvision_vdev_init(usbvision,
+					      &usbvision_video_template,
+					      "USBVision Video");
 	if (usbvision->vdev == NULL) {
 		goto err_exit;
 	}
-	if (video_register_device(usbvision->vdev, VFL_TYPE_GRABBER, video_nr)<0) {
+	if (video_register_device(usbvision->vdev,
+				  VFL_TYPE_GRABBER,
+				  video_nr)<0) {
 		goto err_exit;
 	}
-	printk(KERN_INFO "USBVision[%d]: registered USBVision Video device /dev/video%d [v4l2]\n", usbvision->nr,usbvision->vdev->minor & 0x1f);
+	printk(KERN_INFO "USBVision[%d]: registered USBVision Video device /dev/video%d [v4l2]\n",
+	       usbvision->nr,usbvision->vdev->minor & 0x1f);
 
 	// Radio Device:
 	if (usbvision_device_data[usbvision->DevModel].Radio) {
 		// usbvision has radio
-		usbvision->rdev = usbvision_vdev_init(usbvision, &usbvision_radio_template, "USBVision Radio");
+		usbvision->rdev = usbvision_vdev_init(usbvision,
+						      &usbvision_radio_template,
+						      "USBVision Radio");
 		if (usbvision->rdev == NULL) {
 			goto err_exit;
 		}
-		if (video_register_device(usbvision->rdev, VFL_TYPE_RADIO, radio_nr)<0) {
+		if (video_register_device(usbvision->rdev,
+					  VFL_TYPE_RADIO,
+					  radio_nr)<0) {
 			goto err_exit;
 		}
-		printk(KERN_INFO "USBVision[%d]: registered USBVision Radio device /dev/radio%d [v4l2]\n", usbvision->nr, usbvision->rdev->minor & 0x1f);
+		printk(KERN_INFO "USBVision[%d]: registered USBVision Radio device /dev/radio%d [v4l2]\n",
+		       usbvision->nr, usbvision->rdev->minor & 0x1f);
 	}
 	// vbi Device:
 	if (usbvision_device_data[usbvision->DevModel].vbi) {
-		usbvision->vbi = usbvision_vdev_init(usbvision, &usbvision_vbi_template, "USBVision VBI");
+		usbvision->vbi = usbvision_vdev_init(usbvision,
+						     &usbvision_vbi_template,
+						     "USBVision VBI");
 		if (usbvision->vdev == NULL) {
 			goto err_exit;
 		}
-		if (video_register_device(usbvision->vbi, VFL_TYPE_VBI, vbi_nr)<0) {
+		if (video_register_device(usbvision->vbi,
+					  VFL_TYPE_VBI,
+					  vbi_nr)<0) {
 			goto err_exit;
 		}
-		printk(KERN_INFO "USBVision[%d]: registered USBVision VBI device /dev/vbi%d [v4l2] (Not Working Yet!)\n", usbvision->nr,usbvision->vbi->minor & 0x1f);
+		printk(KERN_INFO "USBVision[%d]: registered USBVision VBI device /dev/vbi%d [v4l2] (Not Working Yet!)\n",
+		       usbvision->nr,usbvision->vbi->minor & 0x1f);
 	}
 	// all done
 	return 0;
@@ -1657,7 +1619,8 @@
 /*
  * usbvision_alloc()
  *
- * This code allocates the struct usb_usbvision. It is filled with default values.
+ * This code allocates the struct usb_usbvision.
+ * It is filled with default values.
  *
  * Returns NULL on error, a pointer to usb_usbvision else.
  *
@@ -1666,7 +1629,8 @@
 {
 	struct usb_usbvision *usbvision;
 
-	if ((usbvision = kzalloc(sizeof(struct usb_usbvision), GFP_KERNEL)) == NULL) {
+	if ((usbvision = kzalloc(sizeof(struct usb_usbvision), GFP_KERNEL)) ==
+	    NULL) {
 		goto err_exit;
 	}
 
@@ -1728,11 +1692,11 @@
 }
 
 
-/******************************** usb interface *****************************************/
+/*********************** usb interface **********************************/
 
 static void usbvision_configure_video(struct usb_usbvision *usbvision)
 {
-	int model,i;
+	int model;
 
 	if (usbvision == NULL)
 		return;
@@ -1741,25 +1705,23 @@
 	usbvision->palette = usbvision_v4l2_format[2]; // V4L2_PIX_FMT_RGB24;
 
 	if (usbvision_device_data[usbvision->DevModel].Vin_Reg2_override) {
-		usbvision->Vin_Reg2_Preset = usbvision_device_data[usbvision->DevModel].Vin_Reg2;
+		usbvision->Vin_Reg2_Preset =
+			usbvision_device_data[usbvision->DevModel].Vin_Reg2;
 	} else {
 		usbvision->Vin_Reg2_Preset = 0;
 	}
 
-	for (i = 0; i < TVNORMS; i++)
-		if (usbvision_device_data[model].VideoNorm == tvnorms[i].mode)
-			break;
-	if (i == TVNORMS)
-		i = 0;
-	usbvision->tvnorm = &tvnorms[i];        /* set default norm */
+	usbvision->tvnormId = usbvision_device_data[model].VideoNorm;
 
 	usbvision->video_inputs = usbvision_device_data[model].VideoChannels;
 	usbvision->ctl_input = 0;
 
 	/* This should be here to make i2c clients to be able to register */
-	usbvision_audio_off(usbvision);	//first switch off audio
+	/* first switch off audio */
+	usbvision_audio_off(usbvision);
 	if (!PowerOnAtOpen) {
-		usbvision_power_on(usbvision);	//and then power up the noisy tuner
+		/* and then power up the noisy tuner */
+		usbvision_power_on(usbvision);
 		usbvision_i2c_register(usbvision);
 	}
 }
@@ -1796,18 +1758,22 @@
 
 	if (usbvision_device_data[model].Interface >= 0) {
 		interface = &dev->actconfig->interface[usbvision_device_data[model].Interface]->altsetting[0];
-	}
-	else {
+	} else {
 		interface = &dev->actconfig->interface[ifnum]->altsetting[0];
 	}
 	endpoint = &interface->endpoint[1].desc;
-	if ((endpoint->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) != USB_ENDPOINT_XFER_ISOC) {
-		err("%s: interface %d. has non-ISO endpoint!", __FUNCTION__, ifnum);
-		err("%s: Endpoint attributes %d", __FUNCTION__, endpoint->bmAttributes);
+	if ((endpoint->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) !=
+	    USB_ENDPOINT_XFER_ISOC) {
+		err("%s: interface %d. has non-ISO endpoint!",
+		    __FUNCTION__, ifnum);
+		err("%s: Endpoint attributes %d",
+		    __FUNCTION__, endpoint->bmAttributes);
 		return -ENODEV;
 	}
-	if ((endpoint->bEndpointAddress & USB_ENDPOINT_DIR_MASK) == USB_DIR_OUT) {
-		err("%s: interface %d. has ISO OUT endpoint!", __FUNCTION__, ifnum);
+	if ((endpoint->bEndpointAddress & USB_ENDPOINT_DIR_MASK) ==
+	    USB_DIR_OUT) {
+		err("%s: interface %d. has ISO OUT endpoint!",
+		    __FUNCTION__, ifnum);
 		return -ENODEV;
 	}
 
@@ -1818,11 +1784,9 @@
 
 	if (dev->descriptor.bNumConfigurations > 1) {
 		usbvision->bridgeType = BRIDGE_NT1004;
-	}
-	else if (model == DAZZLE_DVC_90_REV_1_SECAM) {
+	} else if (model == DAZZLE_DVC_90_REV_1_SECAM) {
 		usbvision->bridgeType = BRIDGE_NT1005;
-	}
-	else {
+	} else {
 		usbvision->bridgeType = BRIDGE_NT1003;
 	}
 	PDEBUG(DBG_PROBE, "bridgeType %d", usbvision->bridgeType);
@@ -1919,11 +1883,11 @@
 	up(&usbvision->lock);
 
 	if (usbvision->user) {
-		printk(KERN_INFO "%s: In use, disconnect pending\n", __FUNCTION__);
+		printk(KERN_INFO "%s: In use, disconnect pending\n",
+		       __FUNCTION__);
 		wake_up_interruptible(&usbvision->wait_frame);
 		wake_up_interruptible(&usbvision->wait_stream);
-	}
-	else {
+	} else {
 		usbvision_release(usbvision);
 	}
 
@@ -1950,7 +1914,6 @@
 
 	PDEBUG(DBG_PROBE, "");
 
-	PDEBUG(DBG_IOCTL, "IOCTL   debugging is enabled [video]");
 	PDEBUG(DBG_IO,  "IO      debugging is enabled [video]");
 	PDEBUG(DBG_PROBE, "PROBE   debugging is enabled [video]");
 	PDEBUG(DBG_MMAP, "MMAP    debugging is enabled [video]");
diff --git a/drivers/media/video/usbvision/usbvision.h b/drivers/media/video/usbvision/usbvision.h
index c759d00..c5b6c50 100644
--- a/drivers/media/video/usbvision/usbvision.h
+++ b/drivers/media/video/usbvision/usbvision.h
@@ -221,6 +221,8 @@
 
 #define I2C_USB_ADAP_MAX	16
 
+#define USBVISION_NORMS (V4L2_STD_PAL | V4L2_STD_NTSC | V4L2_STD_SECAM | V4L2_STD_PAL_M)
+
 /* ----------------------------------------------------------------- */
 /* usbvision video structures                                        */
 /* ----------------------------------------------------------------- */
@@ -301,14 +303,6 @@
 	__u16 frameHeight;				/* 10 - 11 after endian correction*/
 };
 
-/* tvnorms */
-struct usbvision_tvnorm {
-	char *name;
-	v4l2_std_id id;
-	/* mode for saa7113h */
-	int mode;
-};
-
 struct usbvision_frame {
 	char *data;					/* Frame buffer */
 	struct usbvision_frame_header isocHeader;	/* Header from stream */
@@ -386,7 +380,6 @@
 	int tuner_type;
 	int tuner_addr;
 	int bridgeType;							// NT1003, NT1004, NT1005
-	int channel;
 	int radio;
 	int video_inputs;						// # of inputs
 	unsigned long freq;
@@ -441,7 +434,7 @@
 
 	struct v4l2_capability vcap;					/* Video capabilities */
 	unsigned int ctl_input;						/* selected input */
-	struct usbvision_tvnorm *tvnorm;				/* selected tv norm */
+	v4l2_std_id tvnormId;						/* selected tv norm */
 	unsigned char video_endp;					/* 0x82 for USBVISION devices based */
 
 	// Decompression stuff:
diff --git a/drivers/media/video/video-buf-dvb.c b/drivers/media/video/video-buf-dvb.c
index fcc5467e..e617925 100644
--- a/drivers/media/video/video-buf-dvb.c
+++ b/drivers/media/video/video-buf-dvb.c
@@ -47,6 +47,7 @@
 	int err;
 
 	dprintk("dvb thread started\n");
+	set_freezable();
 	videobuf_read_start(&dvb->dvbq);
 
 	for (;;) {
diff --git a/drivers/media/video/vino.c b/drivers/media/video/vino.c
index 0c658b7..e94a9a6 100644
--- a/drivers/media/video/vino.c
+++ b/drivers/media/video/vino.c
@@ -2077,12 +2077,10 @@
 	init_waitqueue_entry(&wait, current);
 	/* add ourselves into wait queue */
 	add_wait_queue(&vcs->fb_queue.frame_wait_queue, &wait);
-	/* and set current state */
-	set_current_state(TASK_INTERRUPTIBLE);
 
 	/* to ensure that schedule_timeout will return immediately
-	 * if VINO interrupt was triggred meanwhile */
-	schedule_timeout(HZ / 10);
+	 * if VINO interrupt was triggered meanwhile */
+	schedule_timeout_interruptible(HZ / 10);
 
 	if (signal_pending(current))
 		err = -EINTR;
diff --git a/drivers/media/video/vivi.c b/drivers/media/video/vivi.c
index f7e1d19..f6d3a94 100644
--- a/drivers/media/video/vivi.c
+++ b/drivers/media/video/vivi.c
@@ -25,6 +25,7 @@
 #include <linux/pci.h>
 #include <linux/random.h>
 #include <linux/version.h>
+#include <linux/mutex.h>
 #include <linux/videodev2.h>
 #include <linux/dma-mapping.h>
 #ifdef CONFIG_VIDEO_V4L1_COMPAT
@@ -145,9 +146,6 @@
 
 	struct vivi_fmt        *fmt;
 
-#ifdef CONFIG_VIVI_SCATTER
-	struct sg_to_addr      *to_addr;
-#endif
 };
 
 struct vivi_dmaqueue {
@@ -168,7 +166,7 @@
 struct vivi_dev {
 	struct list_head           vivi_devlist;
 
-	struct semaphore           lock;
+	struct mutex               lock;
 
 	int                        users;
 
@@ -232,68 +230,13 @@
 #define TSTAMP_MAX_Y TSTAMP_MIN_Y+15
 #define TSTAMP_MIN_X 64
 
-#ifdef CONFIG_VIVI_SCATTER
-static void prep_to_addr(struct sg_to_addr to_addr[],
-			 struct videobuf_buffer *vb)
-{
-	int i, pos=0;
 
-	for (i=0;i<vb->dma.nr_pages;i++) {
-		to_addr[i].sg=&vb->dma.sglist[i];
-		to_addr[i].pos=pos;
-		pos += vb->dma.sglist[i].length;
-	}
-}
-
-static int get_addr_pos(int pos, int pages, struct sg_to_addr to_addr[])
-{
-	int p1=0,p2=pages-1,p3=pages/2;
-
-	/* Sanity test */
-	BUG_ON (pos>=to_addr[p2].pos+to_addr[p2].sg->length);
-
-	while (p1+1<p2) {
-		if (pos < to_addr[p3].pos) {
-			p2=p3;
-		} else {
-			p1=p3;
-		}
-		p3=(p1+p2)/2;
-	}
-	if (pos >= to_addr[p2].pos)
-		p1=p2;
-
-	return (p1);
-}
-#endif
-
-#ifdef CONFIG_VIVI_SCATTER
-static void gen_line(struct sg_to_addr to_addr[],int inipos,int pages,int wmax,
-		     int hmax, int line, char *timestr)
-#else
 static void gen_line(char *basep,int inipos,int wmax,
 		     int hmax, int line, char *timestr)
-#endif
 {
 	int  w,i,j,pos=inipos,y;
 	char *p,*s;
 	u8   chr,r,g,b,color;
-#ifdef CONFIG_VIVI_SCATTER
-	int pgpos,oldpg;
-	char *basep;
-	struct page *pg;
-
-	unsigned long flags;
-	spinlock_t spinlock;
-
-	spin_lock_init(&spinlock);
-
-	/* Get first addr pointed to pixel position */
-	oldpg=get_addr_pos(pos,pages,to_addr);
-	pg=pfn_to_page(sg_dma_address(to_addr[oldpg].sg) >> PAGE_SHIFT);
-	spin_lock_irqsave(&spinlock,flags);
-	basep = kmap_atomic(pg, KM_BOUNCE_READ)+to_addr[oldpg].sg->offset;
-#endif
 
 	/* We will just duplicate the second pixel at the packet */
 	wmax/=2;
@@ -305,18 +248,7 @@
 		b=bars[w*7/wmax][2];
 
 		for (color=0;color<4;color++) {
-#ifdef CONFIG_VIVI_SCATTER
-			pgpos=get_addr_pos(pos,pages,to_addr);
-			if (pgpos!=oldpg) {
-				pg=pfn_to_page(sg_dma_address(to_addr[pgpos].sg) >> PAGE_SHIFT);
-				kunmap_atomic(basep, KM_BOUNCE_READ);
-				basep= kmap_atomic(pg, KM_BOUNCE_READ)+to_addr[pgpos].sg->offset;
-				oldpg=pgpos;
-			}
-			p=basep+pos-to_addr[pgpos].pos;
-#else
 			p=basep+pos;
-#endif
 
 			switch (color) {
 				case 0:
@@ -361,23 +293,7 @@
 
 				pos=inipos+j*2;
 				for (color=0;color<4;color++) {
-#ifdef CONFIG_VIVI_SCATTER
-					pgpos=get_addr_pos(pos,pages,to_addr);
-					if (pgpos!=oldpg) {
-						pg=pfn_to_page(sg_dma_address(
-								to_addr[pgpos].sg)
-								>> PAGE_SHIFT);
-						kunmap_atomic(basep,
-								KM_BOUNCE_READ);
-						basep= kmap_atomic(pg,
-							KM_BOUNCE_READ)+
-							to_addr[pgpos].sg->offset;
-						oldpg=pgpos;
-					}
-					p=basep+pos-to_addr[pgpos].pos;
-#else
 					p=basep+pos;
-#endif
 
 					y=TO_Y(r,g,b);
 
@@ -402,12 +318,7 @@
 
 
 end:
-#ifdef CONFIG_VIVI_SCATTER
-	kunmap_atomic(basep, KM_BOUNCE_READ);
-	spin_unlock_irqrestore(&spinlock,flags);
-#else
 	return;
-#endif
 }
 static void vivi_fillbuff(struct vivi_dev *dev,struct vivi_buffer *buf)
 {
@@ -415,35 +326,16 @@
 	int hmax  = buf->vb.height;
 	int wmax  = buf->vb.width;
 	struct timeval ts;
-#ifdef CONFIG_VIVI_SCATTER
-	struct sg_to_addr *to_addr=buf->to_addr;
-	struct videobuf_buffer *vb=&buf->vb;
-#else
 	char *tmpbuf;
-#endif
 
-#ifdef CONFIG_VIVI_SCATTER
-	/* Test if DMA mapping is ready */
-	if (!sg_dma_address(&vb->dma.sglist[0]))
-		return;
-
-	prep_to_addr(to_addr,vb);
-
-	/* Check if there is enough memory */
-	BUG_ON(buf->vb.dma.nr_pages << PAGE_SHIFT < (buf->vb.width*buf->vb.height)*2);
-#else
 	if (buf->vb.dma.varea) {
 		tmpbuf=kmalloc (wmax*2, GFP_KERNEL);
 	} else {
 		tmpbuf=buf->vb.dma.vmalloc;
 	}
 
-#endif
 
 	for (h=0;h<hmax;h++) {
-#ifdef CONFIG_VIVI_SCATTER
-		gen_line(to_addr,pos,vb->dma.nr_pages,wmax,hmax,h,dev->timestr);
-#else
 		if (buf->vb.dma.varea) {
 			gen_line(tmpbuf,0,wmax,hmax,h,dev->timestr);
 			/* FIXME: replacing to __copy_to_user */
@@ -452,7 +344,6 @@
 		} else {
 			gen_line(tmpbuf,pos,wmax,hmax,h,dev->timestr);
 		}
-#endif
 		pos += wmax*2;
 	}
 
@@ -573,6 +464,7 @@
 	dprintk(1,"thread started\n");
 
 	mod_timer(&dma_q->timeout, jiffies+BUFFER_TIMEOUT);
+	set_freezable();
 
 	for (;;) {
 		vivi_sleep(dma_q);
@@ -717,11 +609,6 @@
 	if (in_interrupt())
 		BUG();
 
-#ifdef CONFIG_VIVI_SCATTER
-	/*FIXME: Maybe a spinlock is required here */
-	kfree(buf->to_addr);
-	buf->to_addr=NULL;
-#endif
 
 	videobuf_waiton(&buf->vb,0,0);
 	videobuf_dma_unmap(vq, &buf->vb.dma);
@@ -767,12 +654,6 @@
 
 	buf->vb.state = STATE_PREPARED;
 
-#ifdef CONFIG_VIVI_SCATTER
-	if (NULL == (buf->to_addr = kmalloc(sizeof(*buf->to_addr) * vb->dma.nr_pages,GFP_KERNEL))) {
-		rc=-ENOMEM;
-		goto fail;
-	}
-#endif
 	return 0;
 
 fail:
@@ -837,40 +718,6 @@
 	free_buffer(vq,buf);
 }
 
-#ifdef CONFIG_VIVI_SCATTER
-static int vivi_map_sg(void *dev, struct scatterlist *sg, int nents,
-		       int direction)
-{
-	int i;
-
-	dprintk(1,"%s, number of pages=%d\n",__FUNCTION__,nents);
-	BUG_ON(direction == DMA_NONE);
-
-	for (i = 0; i < nents; i++ ) {
-		BUG_ON(!sg[i].page);
-
-		sg_dma_address(&sg[i]) = page_to_phys(sg[i].page) + sg[i].offset;
-	}
-
-	return nents;
-}
-
-static int vivi_unmap_sg(void *dev,struct scatterlist *sglist,int nr_pages,
-			 int direction)
-{
-	dprintk(1,"%s\n",__FUNCTION__);
-	return 0;
-}
-
-static int vivi_dma_sync_sg(void *dev,struct scatterlist *sglist, int nr_pages,
-			    int direction)
-{
-//	dprintk(1,"%s\n",__FUNCTION__);
-
-//	flush_write_buffers();
-	return 0;
-}
-#endif
 
 static struct videobuf_queue_ops vivi_video_qops = {
 	.buf_setup      = buffer_setup,
@@ -892,16 +739,16 @@
 static int res_get(struct vivi_dev *dev, struct vivi_fh *fh)
 {
 	/* is it free? */
-	down(&dev->lock);
+	mutex_lock(&dev->lock);
 	if (dev->resources) {
 		/* no, someone else uses it */
-		up(&dev->lock);
+		mutex_unlock(&dev->lock);
 		return 0;
 	}
 	/* it's free, grab it */
 	dev->resources =1;
 	dprintk(1,"res: get\n");
-	up(&dev->lock);
+	mutex_unlock(&dev->lock);
 	return 1;
 }
 
@@ -912,10 +759,10 @@
 
 static void res_free(struct vivi_dev *dev, struct vivi_fh *fh)
 {
-	down(&dev->lock);
+	mutex_lock(&dev->lock);
 	dev->resources = 0;
 	dprintk(1,"res: put\n");
-	up(&dev->lock);
+	mutex_lock(&dev->lock);
 }
 
 /* ------------------------------------------------------------------
@@ -1259,19 +1106,11 @@
 	sprintf(dev->timestr,"%02d:%02d:%02d:%03d",
 			dev->h,dev->m,dev->s,(dev->us+500)/1000);
 
-#ifdef CONFIG_VIVI_SCATTER
-	videobuf_queue_init(&fh->vb_vidq,VIDEOBUF_DMA_SCATTER, &vivi_video_qops,
-			NULL, NULL,
-			fh->type,
-			V4L2_FIELD_INTERLACED,
-			sizeof(struct vivi_buffer),fh);
-#else
 	videobuf_queue_init(&fh->vb_vidq, &vivi_video_qops,
 			NULL, NULL,
 			fh->type,
 			V4L2_FIELD_INTERLACED,
 			sizeof(struct vivi_buffer),fh);
-#endif
 
 	return 0;
 }
@@ -1422,7 +1261,7 @@
 	init_waitqueue_head(&dev->vidq.wq);
 
 	/* initialize locks */
-	init_MUTEX(&dev->lock);
+	mutex_init(&dev->lock);
 
 	dev->vidq.timeout.function = vivi_vid_timeout;
 	dev->vidq.timeout.data     = (unsigned long)dev;
diff --git a/drivers/media/video/zc0301/Kconfig b/drivers/media/video/zc0301/Kconfig
index 47cd93f..edb0029 100644
--- a/drivers/media/video/zc0301/Kconfig
+++ b/drivers/media/video/zc0301/Kconfig
@@ -1,6 +1,6 @@
 config USB_ZC0301
 	tristate "USB ZC0301[P] Image Processor and Control Chip support"
-	depends on VIDEO_V4L1
+	depends on VIDEO_V4L2
 	---help---
 	  Say Y here if you want support for cameras based on the ZC0301 or
 	  ZC0301P Image Processors and Control Chips.
diff --git a/drivers/media/video/zc0301/zc0301.h b/drivers/media/video/zc0301/zc0301.h
index 710f12e..a2de50e 100644
--- a/drivers/media/video/zc0301/zc0301.h
+++ b/drivers/media/video/zc0301/zc0301.h
@@ -36,6 +36,7 @@
 #include <linux/rwsem.h>
 #include <linux/stddef.h>
 #include <linux/string.h>
+#include <linux/kref.h>
 
 #include "zc0301_sensor.h"
 
@@ -98,7 +99,7 @@
 	u16 frame_timeout;
 };
 
-static DECLARE_RWSEM(zc0301_disconnect);
+static DECLARE_RWSEM(zc0301_dev_lock);
 
 struct zc0301_device {
 	struct video_device* v4ldev;
@@ -121,12 +122,14 @@
 
 	struct zc0301_module_param module_param;
 
+	struct kref kref;
 	enum zc0301_dev_state state;
 	u8 users;
 
-	struct mutex dev_mutex, fileop_mutex;
+	struct completion probe;
+	struct mutex open_mutex, fileop_mutex;
 	spinlock_t queue_lock;
-	wait_queue_head_t open, wait_frame, wait_stream;
+	wait_queue_head_t wait_open, wait_frame, wait_stream;
 };
 
 /*****************************************************************************/
@@ -156,8 +159,8 @@
 		else if ((level) == 2)                                        \
 			dev_info(&cam->usbdev->dev, fmt "\n", ## args);       \
 		else if ((level) >= 3)                                        \
-			dev_info(&cam->usbdev->dev, "[%s:%d] " fmt "\n",      \
-				 __FUNCTION__, __LINE__ , ## args);           \
+			dev_info(&cam->usbdev->dev, "[%s:%s:%d] " fmt "\n",   \
+				 __FILE__, __FUNCTION__, __LINE__ , ## args); \
 	}                                                                     \
 } while (0)
 #	define KDBG(level, fmt, args...)                                      \
@@ -166,8 +169,8 @@
 		if ((level) == 1 || (level) == 2)                             \
 			pr_info("zc0301: " fmt "\n", ## args);                \
 		else if ((level) == 3)                                        \
-			pr_debug("zc0301: [%s:%d] " fmt "\n", __FUNCTION__,   \
-				 __LINE__ , ## args);                         \
+			pr_debug("sn9c102: [%s:%s:%d] " fmt "\n", __FILE__,   \
+				 __FUNCTION__, __LINE__ , ## args);           \
 	}                                                                     \
 } while (0)
 #	define V4LDBG(level, name, cmd)                                       \
@@ -183,8 +186,8 @@
 
 #undef PDBG
 #define PDBG(fmt, args...)                                                    \
-dev_info(&cam->usbdev->dev, "[%s:%d] " fmt "\n",                              \
-	 __FUNCTION__, __LINE__ , ## args)
+dev_info(&cam->usbdev->dev, "[%s:%s:%d] " fmt "\n", __FILE__, __FUNCTION__,   \
+	 __LINE__ , ## args)
 
 #undef PDBGG
 #define PDBGG(fmt, args...) do {;} while(0) /* placeholder */
diff --git a/drivers/media/video/zc0301/zc0301_core.c b/drivers/media/video/zc0301/zc0301_core.c
index f112055..703b741 100644
--- a/drivers/media/video/zc0301/zc0301_core.c
+++ b/drivers/media/video/zc0301/zc0301_core.c
@@ -49,11 +49,11 @@
 
 #define ZC0301_MODULE_NAME    "V4L2 driver for ZC0301[P] "                    \
 			      "Image Processor and Control Chip"
-#define ZC0301_MODULE_AUTHOR  "(C) 2006 Luca Risolia"
+#define ZC0301_MODULE_AUTHOR  "(C) 2006-2007 Luca Risolia"
 #define ZC0301_AUTHOR_EMAIL   "<luca.risolia@studio.unibo.it>"
 #define ZC0301_MODULE_LICENSE "GPL"
-#define ZC0301_MODULE_VERSION "1:1.07"
-#define ZC0301_MODULE_VERSION_CODE  KERNEL_VERSION(1, 1, 7)
+#define ZC0301_MODULE_VERSION "1:1.10"
+#define ZC0301_MODULE_VERSION_CODE  KERNEL_VERSION(1, 1, 10)
 
 /*****************************************************************************/
 
@@ -573,7 +573,8 @@
 	int err = 0;
 
 	if (!(cam->state & DEV_INITIALIZED)) {
-		init_waitqueue_head(&cam->open);
+		mutex_init(&cam->open_mutex);
+		init_waitqueue_head(&cam->wait_open);
 		qctrl = s->qctrl;
 		rect = &(s->cropcap.defrect);
 		cam->compression.quality = ZC0301_COMPRESSION_QUALITY;
@@ -634,59 +635,73 @@
 	return 0;
 }
 
+/*****************************************************************************/
 
-static void zc0301_release_resources(struct zc0301_device* cam)
+static void zc0301_release_resources(struct kref *kref)
 {
+	struct zc0301_device *cam = container_of(kref, struct zc0301_device,
+						 kref);
 	DBG(2, "V4L2 device /dev/video%d deregistered", cam->v4ldev->minor);
 	video_set_drvdata(cam->v4ldev, NULL);
 	video_unregister_device(cam->v4ldev);
+	usb_put_dev(cam->usbdev);
 	kfree(cam->control_buffer);
+	kfree(cam);
 }
 
-/*****************************************************************************/
 
 static int zc0301_open(struct inode* inode, struct file* filp)
 {
 	struct zc0301_device* cam;
 	int err = 0;
 
-	/*
-	   This is the only safe way to prevent race conditions with
-	   disconnect
-	*/
-	if (!down_read_trylock(&zc0301_disconnect))
+	if (!down_read_trylock(&zc0301_dev_lock))
 		return -ERESTARTSYS;
 
 	cam = video_get_drvdata(video_devdata(filp));
 
-	if (mutex_lock_interruptible(&cam->dev_mutex)) {
-		up_read(&zc0301_disconnect);
+	if (wait_for_completion_interruptible(&cam->probe)) {
+		up_read(&zc0301_dev_lock);
 		return -ERESTARTSYS;
 	}
 
+	kref_get(&cam->kref);
+
+	if (mutex_lock_interruptible(&cam->open_mutex)) {
+		kref_put(&cam->kref, zc0301_release_resources);
+		up_read(&zc0301_dev_lock);
+		return -ERESTARTSYS;
+	}
+
+	if (cam->state & DEV_DISCONNECTED) {
+		DBG(1, "Device not present");
+		err = -ENODEV;
+		goto out;
+	}
+
 	if (cam->users) {
 		DBG(2, "Device /dev/video%d is busy...", cam->v4ldev->minor);
+		DBG(3, "Simultaneous opens are not supported");
 		if ((filp->f_flags & O_NONBLOCK) ||
 		    (filp->f_flags & O_NDELAY)) {
 			err = -EWOULDBLOCK;
 			goto out;
 		}
-		mutex_unlock(&cam->dev_mutex);
-		err = wait_event_interruptible_exclusive(cam->open,
-						  cam->state & DEV_DISCONNECTED
+		DBG(2, "A blocking open() has been requested. Wait for the "
+		       "device to be released...");
+		up_read(&zc0301_dev_lock);
+		err = wait_event_interruptible_exclusive(cam->wait_open,
+						(cam->state & DEV_DISCONNECTED)
 							 || !cam->users);
-		if (err) {
-			up_read(&zc0301_disconnect);
-			return err;
-		}
+		down_read(&zc0301_dev_lock);
+		if (err)
+			goto out;
 		if (cam->state & DEV_DISCONNECTED) {
-			up_read(&zc0301_disconnect);
-			return -ENODEV;
+			err = -ENODEV;
+			goto out;
 		}
-		mutex_lock(&cam->dev_mutex);
 	}
 
-
 	if (cam->state & DEV_MISCONFIGURED) {
 		err = zc0301_init(cam);
 		if (err) {
@@ -711,36 +726,32 @@
 	DBG(3, "Video device /dev/video%d is open", cam->v4ldev->minor);
 
 out:
-	mutex_unlock(&cam->dev_mutex);
-	up_read(&zc0301_disconnect);
+	mutex_unlock(&cam->open_mutex);
+	if (err)
+		kref_put(&cam->kref, zc0301_release_resources);
+	up_read(&zc0301_dev_lock);
 	return err;
 }
 
 
 static int zc0301_release(struct inode* inode, struct file* filp)
 {
-	struct zc0301_device* cam = video_get_drvdata(video_devdata(filp));
+	struct zc0301_device* cam;
 
-	mutex_lock(&cam->dev_mutex); /* prevent disconnect() to be called */
+	down_write(&zc0301_dev_lock);
+
+	cam = video_get_drvdata(video_devdata(filp));
 
 	zc0301_stop_transfer(cam);
-
 	zc0301_release_buffers(cam);
-
-	if (cam->state & DEV_DISCONNECTED) {
-		zc0301_release_resources(cam);
-		usb_put_dev(cam->usbdev);
-		mutex_unlock(&cam->dev_mutex);
-		kfree(cam);
-		return 0;
-	}
-
 	cam->users--;
-	wake_up_interruptible_nr(&cam->open, 1);
+	wake_up_interruptible_nr(&cam->wait_open, 1);
 
 	DBG(3, "Video device /dev/video%d closed", cam->v4ldev->minor);
 
-	mutex_unlock(&cam->dev_mutex);
+	kref_put(&cam->kref, zc0301_release_resources);
+
+	up_write(&zc0301_dev_lock);
 
 	return 0;
 }
@@ -775,7 +786,7 @@
 		DBG(3, "Close and open the device again to choose the read "
 		       "method");
 		mutex_unlock(&cam->fileop_mutex);
-		return -EINVAL;
+		return -EBUSY;
 	}
 
 	if (cam->io == IO_NONE) {
@@ -953,7 +964,12 @@
 		return -EIO;
 	}
 
-	if (cam->io != IO_MMAP || !(vma->vm_flags & VM_WRITE) ||
+	if (!(vma->vm_flags & (VM_WRITE | VM_READ))) {
+		mutex_unlock(&cam->fileop_mutex);
+		return -EACCES;
+	}
+
+	if (cam->io != IO_MMAP ||
 	    size != PAGE_ALIGN(cam->frame[0].buf.length)) {
 		mutex_unlock(&cam->fileop_mutex);
 		return -EINVAL;
@@ -984,7 +1000,6 @@
 
 	vma->vm_ops = &zc0301_vm_ops;
 	vma->vm_private_data = &cam->frame[i];
-
 	zc0301_vm_open(vma);
 
 	mutex_unlock(&cam->fileop_mutex);
@@ -1211,7 +1226,7 @@
 			if (cam->frame[i].vma_use_count) {
 				DBG(3, "VIDIOC_S_CROP failed. "
 				       "Unmap the buffers first.");
-				return -EINVAL;
+				return -EBUSY;
 			}
 
 	if (!s->set_crop) {
@@ -1434,7 +1449,7 @@
 			if (cam->frame[i].vma_use_count) {
 				DBG(3, "VIDIOC_S_FMT failed. "
 				       "Unmap the buffers first.");
-				return -EINVAL;
+				return -EBUSY;
 			}
 
 	if (cam->stream == STREAM_ON)
@@ -1544,14 +1559,14 @@
 	if (cam->io == IO_READ) {
 		DBG(3, "Close and open the device again to choose the mmap "
 		       "I/O method");
-		return -EINVAL;
+		return -EBUSY;
 	}
 
 	for (i = 0; i < cam->nbuffers; i++)
 		if (cam->frame[i].vma_use_count) {
 			DBG(3, "VIDIOC_REQBUFS failed. "
 			       "Previous buffers are still mapped.");
-			return -EINVAL;
+			return -EBUSY;
 		}
 
 	if (cam->stream == STREAM_ON)
@@ -1699,9 +1714,6 @@
 	if (type != V4L2_BUF_TYPE_VIDEO_CAPTURE || cam->io != IO_MMAP)
 		return -EINVAL;
 
-	if (list_empty(&cam->inqueue))
-		return -EINVAL;
-
 	cam->stream = STREAM_ON;
 
 	DBG(3, "Stream on");
@@ -1949,8 +1961,6 @@
 		goto fail;
 	}
 
-	mutex_init(&cam->dev_mutex);
-
 	DBG(2, "ZC0301[P] Image Processor and Control Chip detected "
 	       "(vid/pid 0x%04X:0x%04X)",id->idVendor, id->idProduct);
 
@@ -1982,7 +1992,7 @@
 	cam->v4ldev->release = video_device_release;
 	video_set_drvdata(cam->v4ldev, cam);
 
-	mutex_lock(&cam->dev_mutex);
+	init_completion(&cam->probe);
 
 	err = video_register_device(cam->v4ldev, VFL_TYPE_GRABBER,
 				    video_nr[dev_nr]);
@@ -1992,7 +2002,7 @@
 			DBG(1, "Free /dev/videoX node not found");
 		video_nr[dev_nr] = -1;
 		dev_nr = (dev_nr < ZC0301_MAX_DEVICES-1) ? dev_nr+1 : 0;
-		mutex_unlock(&cam->dev_mutex);
+		complete_all(&cam->probe);
 		goto fail;
 	}
 
@@ -2004,8 +2014,10 @@
 	dev_nr = (dev_nr < ZC0301_MAX_DEVICES-1) ? dev_nr+1 : 0;
 
 	usb_set_intfdata(intf, cam);
+	kref_init(&cam->kref);
+	usb_get_dev(cam->usbdev);
 
-	mutex_unlock(&cam->dev_mutex);
+	complete_all(&cam->probe);
 
 	return 0;
 
@@ -2022,40 +2034,31 @@
 
 static void zc0301_usb_disconnect(struct usb_interface* intf)
 {
-	struct zc0301_device* cam = usb_get_intfdata(intf);
+	struct zc0301_device* cam;
 
-	if (!cam)
-		return;
+	down_write(&zc0301_dev_lock);
 
-	down_write(&zc0301_disconnect);
-
-	mutex_lock(&cam->dev_mutex);
+	cam = usb_get_intfdata(intf);
 
 	DBG(2, "Disconnecting %s...", cam->v4ldev->name);
 
-	wake_up_interruptible_all(&cam->open);
-
 	if (cam->users) {
 		DBG(2, "Device /dev/video%d is open! Deregistration and "
-		       "memory deallocation are deferred on close.",
+		       "memory deallocation are deferred.",
 		    cam->v4ldev->minor);
 		cam->state |= DEV_MISCONFIGURED;
 		zc0301_stop_transfer(cam);
 		cam->state |= DEV_DISCONNECTED;
 		wake_up_interruptible(&cam->wait_frame);
 		wake_up(&cam->wait_stream);
-		usb_get_dev(cam->usbdev);
-	} else {
+	} else
 		cam->state |= DEV_DISCONNECTED;
-		zc0301_release_resources(cam);
-	}
 
-	mutex_unlock(&cam->dev_mutex);
+	wake_up_interruptible_all(&cam->wait_open);
 
-	if (!cam->users)
-		kfree(cam);
+	kref_put(&cam->kref, zc0301_release_resources);
 
-	up_write(&zc0301_disconnect);
+	up_write(&zc0301_dev_lock);
 }
 
 
diff --git a/drivers/media/video/zc0301/zc0301_pas202bcb.c b/drivers/media/video/zc0301/zc0301_pas202bcb.c
index 3efb92a..24b0dfb 100644
--- a/drivers/media/video/zc0301/zc0301_pas202bcb.c
+++ b/drivers/media/video/zc0301/zc0301_pas202bcb.c
@@ -327,6 +327,7 @@
 		.height = 480,
 		.pixelformat = V4L2_PIX_FMT_JPEG,
 		.priv = 8,
+		.colorspace = V4L2_COLORSPACE_JPEG,
 	},
 };
 
diff --git a/drivers/media/video/zc0301/zc0301_pb0330.c b/drivers/media/video/zc0301/zc0301_pb0330.c
index 5784b1d..9519aba 100644
--- a/drivers/media/video/zc0301/zc0301_pb0330.c
+++ b/drivers/media/video/zc0301/zc0301_pb0330.c
@@ -157,6 +157,7 @@
 		.height = 480,
 		.pixelformat = V4L2_PIX_FMT_JPEG,
 		.priv = 8,
+		.colorspace = V4L2_COLORSPACE_JPEG,
 	},
 };
 
diff --git a/drivers/media/video/zc0301/zc0301_sensor.h b/drivers/media/video/zc0301/zc0301_sensor.h
index 44e82cf..70fe6fc 100644
--- a/drivers/media/video/zc0301/zc0301_sensor.h
+++ b/drivers/media/video/zc0301/zc0301_sensor.h
@@ -23,7 +23,7 @@
 #define _ZC0301_SENSOR_H_
 
 #include <linux/usb.h>
-#include <linux/videodev.h>
+#include <linux/videodev2.h>
 #include <linux/device.h>
 #include <linux/stddef.h>
 #include <linux/errno.h>
diff --git a/drivers/media/video/zoran_driver.c b/drivers/media/video/zoran_driver.c
index cf0ed6c..17118a4 100644
--- a/drivers/media/video/zoran_driver.c
+++ b/drivers/media/video/zoran_driver.c
@@ -183,14 +183,7 @@
     (sizeof(zoran_formats) / sizeof(struct zoran_format));
 
 // RJ: Test only - want to test BUZ_USE_HIMEM even when CONFIG_BIGPHYS_AREA is defined
-#if !defined(CONFIG_BIGPHYS_AREA)
-//#undef CONFIG_BIGPHYS_AREA
-#define BUZ_USE_HIMEM
-#endif
 
-#if defined(CONFIG_BIGPHYS_AREA)
-#   include <linux/bigphysarea.h>
-#endif
 
 extern int *zr_debug;
 
@@ -250,7 +243,6 @@
  *   Linux with the necessary memory left over).
  */
 
-#if defined(BUZ_USE_HIMEM) && !defined(CONFIG_BIGPHYS_AREA)
 static unsigned long
 get_high_mem (unsigned long size)
 {
@@ -314,7 +306,6 @@
 
 	return hi_mem_ph;
 }
-#endif
 
 static int
 v4l_fbuffer_alloc (struct file *file)
@@ -323,9 +314,7 @@
 	struct zoran *zr = fh->zr;
 	int i, off;
 	unsigned char *mem;
-#if defined(BUZ_USE_HIMEM) && !defined(CONFIG_BIGPHYS_AREA)
 	unsigned long pmem = 0;
-#endif
 
 	/* we might have old buffers lying around... */
 	if (fh->v4l_buffers.ready_to_be_freed) {
@@ -369,39 +358,6 @@
 				ZR_DEVNAME(zr), i, (unsigned long) mem,
 				virt_to_bus(mem));
 		} else {
-#if defined(CONFIG_BIGPHYS_AREA)
-			/* Use bigphysarea_alloc_pages */
-
-			int n =
-			    (fh->v4l_buffers.buffer_size + PAGE_SIZE -
-			     1) / PAGE_SIZE;
-
-			mem =
-			    (unsigned char *) bigphysarea_alloc_pages(n, 0,
-								      GFP_KERNEL);
-			if (mem == 0) {
-				dprintk(1,
-					KERN_ERR
-					"%s: v4l_fbuffer_alloc() - bigphysarea_alloc_pages for V4L buf %d failed\n",
-					ZR_DEVNAME(zr), i);
-				v4l_fbuffer_free(file);
-				return -ENOBUFS;
-			}
-			fh->v4l_buffers.buffer[i].fbuffer = mem;
-			fh->v4l_buffers.buffer[i].fbuffer_phys =
-			    virt_to_phys(mem);
-			fh->v4l_buffers.buffer[i].fbuffer_bus =
-			    virt_to_bus(mem);
-			dprintk(4,
-				KERN_INFO
-				"%s: Bigphysarea frame %d mem 0x%x (bus: 0x%x)\n",
-				ZR_DEVNAME(zr), i, (unsigned) mem,
-				(unsigned) virt_to_bus(mem));
-
-			/* Zero out the allocated memory */
-			memset(fh->v4l_buffers.buffer[i].fbuffer, 0,
-			       fh->v4l_buffers.buffer_size);
-#elif defined(BUZ_USE_HIMEM)
 
 			/* Use high memory which has been left at boot time */
 
@@ -441,20 +397,6 @@
 				fh->v4l_buffers.buffer[i].fbuffer_bus =
 				    pmem + i * fh->v4l_buffers.buffer_size;
 			}
-#else
-			/* No bigphysarea present, usage of high memory disabled,
-			 * but user wants buffers of more than MAX_KMALLOC_MEM */
-			dprintk(1,
-				KERN_ERR
-				"%s: v4l_fbuffer_alloc() - no bigphysarea_patch present, usage of high memory disabled,\n",
-				ZR_DEVNAME(zr));
-			dprintk(1,
-				KERN_ERR
-				"%s: v4l_fbuffer_alloc() - sorry, could not allocate %d V4L buffers of size %d KB.\n",
-				ZR_DEVNAME(zr), fh->v4l_buffers.num_buffers,
-				fh->v4l_buffers.buffer_size >> 10);
-			return -ENOBUFS;
-#endif
 		}
 	}
 
@@ -485,11 +427,6 @@
 				ClearPageReserved(MAP_NR(mem + off));
 			kfree((void *) fh->v4l_buffers.buffer[i].fbuffer);
 		}
-#if defined(CONFIG_BIGPHYS_AREA)
-		else
-			bigphysarea_free_pages((void *) fh->v4l_buffers.
-					       buffer[i].fbuffer);
-#endif
 		fh->v4l_buffers.buffer[i].fbuffer = NULL;
 	}
 
diff --git a/drivers/media/video/zr364xx.c b/drivers/media/video/zr364xx.c
index b5d3364..6f18925 100644
--- a/drivers/media/video/zr364xx.c
+++ b/drivers/media/video/zr364xx.c
@@ -92,6 +92,7 @@
 	{USB_DEVICE(0x0784, 0x0040), .driver_info = METHOD1 },
 	{USB_DEVICE(0x06d6, 0x0034), .driver_info = METHOD0 },
 	{USB_DEVICE(0x0a17, 0x0062), .driver_info = METHOD2 },
+	{USB_DEVICE(0x06d6, 0x003b), .driver_info = METHOD0 },
 	{}			/* Terminating entry */
 };
 
@@ -792,6 +793,7 @@
 {
 	struct usb_device *udev = interface_to_usbdev(intf);
 	struct zr364xx_camera *cam = NULL;
+	int err;
 
 	DBG("probing...");
 
@@ -799,12 +801,11 @@
 	info("model %04x:%04x detected", udev->descriptor.idVendor,
 	     udev->descriptor.idProduct);
 
-	if ((cam =
-	     kmalloc(sizeof(struct zr364xx_camera), GFP_KERNEL)) == NULL) {
+	cam = kzalloc(sizeof(struct zr364xx_camera), GFP_KERNEL);
+	if (cam == NULL) {
 		info("cam: out of memory !");
-		return -ENODEV;
+		return -ENOMEM;
 	}
-	memset(cam, 0x00, sizeof(struct zr364xx_camera));
 	/* save the init method used by this camera */
 	cam->method = id->driver_info;
 
@@ -812,7 +813,7 @@
 	if (cam->vdev == NULL) {
 		info("cam->vdev: out of memory !");
 		kfree(cam);
-		return -ENODEV;
+		return -ENOMEM;
 	}
 	memcpy(cam->vdev, &zr364xx_template, sizeof(zr364xx_template));
 	video_set_drvdata(cam->vdev, cam);
@@ -858,12 +859,13 @@
 	cam->brightness = 64;
 	mutex_init(&cam->lock);
 
-	if (video_register_device(cam->vdev, VFL_TYPE_GRABBER, -1) == -1) {
+	err = video_register_device(cam->vdev, VFL_TYPE_GRABBER, -1);
+	if (err) {
 		info("video_register_device failed");
 		video_device_release(cam->vdev);
 		kfree(cam->buffer);
 		kfree(cam);
-		return -ENODEV;
+		return err;
 	}
 
 	usb_set_intfdata(intf, cam);
@@ -905,7 +907,7 @@
 static int __init zr364xx_init(void)
 {
 	int retval;
-	retval = usb_register(&zr364xx_driver) < 0;
+	retval = usb_register(&zr364xx_driver);
 	if (retval)
 		info("usb_register failed!");
 	else
diff --git a/drivers/message/i2o/debug.c b/drivers/message/i2o/debug.c
index 8abe45e..ce62d8b 100644
--- a/drivers/message/i2o/debug.c
+++ b/drivers/message/i2o/debug.c
@@ -24,7 +24,7 @@
 	if (cmd == I2O_CMD_UTIL_EVT_REGISTER)
 		return;		// No status in this reply
 
-	printk(KERN_DEBUG "%s%s: ", severity, str);
+	printk("%s%s: ", severity, str);
 
 	if (cmd < 0x1F)		// Utility cmd
 		i2o_report_util_cmd(cmd);
@@ -32,7 +32,7 @@
 	else if (cmd >= 0xA0 && cmd <= 0xEF)	// Executive cmd
 		i2o_report_exec_cmd(cmd);
 	else
-		printk(KERN_DEBUG "Cmd = %0#2x, ", cmd);	// Other cmds
+		printk("Cmd = %0#2x, ", cmd);	// Other cmds
 
 	if (msg[0] & MSG_FAIL) {
 		i2o_report_fail_status(req_status, msg);
@@ -44,7 +44,7 @@
 	if (cmd < 0x1F || (cmd >= 0xA0 && cmd <= 0xEF))
 		i2o_report_common_dsc(detailed_status);
 	else
-		printk(KERN_DEBUG " / DetailedStatus = %0#4x.\n",
+		printk(" / DetailedStatus = %0#4x.\n",
 		       detailed_status);
 }
 
@@ -89,10 +89,10 @@
 	};
 
 	if (req_status == I2O_FSC_TRANSPORT_UNKNOWN_FAILURE)
-		printk(KERN_DEBUG "TRANSPORT_UNKNOWN_FAILURE (%0#2x).\n",
+		printk("TRANSPORT_UNKNOWN_FAILURE (%0#2x).\n",
 		       req_status);
 	else
-		printk(KERN_DEBUG "TRANSPORT_%s.\n",
+		printk("TRANSPORT_%s.\n",
 		       FAIL_STATUS[req_status & 0x0F]);
 
 	/* Dump some details */
@@ -104,7 +104,7 @@
 	printk(KERN_ERR "  FailingHostUnit = 0x%04X,  FailingIOP = 0x%03X\n",
 	       msg[5] >> 16, msg[5] & 0xFFF);
 
-	printk(KERN_ERR "  Severity:  0x%02X ", (msg[4] >> 16) & 0xFF);
+	printk(KERN_ERR "  Severity:  0x%02X\n", (msg[4] >> 16) & 0xFF);
 	if (msg[4] & (1 << 16))
 		printk(KERN_DEBUG "(FormatError), "
 		       "this msg can never be delivered/processed.\n");
@@ -142,9 +142,9 @@
 	};
 
 	if (req_status >= ARRAY_SIZE(REPLY_STATUS))
-		printk(KERN_DEBUG "RequestStatus = %0#2x", req_status);
+		printk("RequestStatus = %0#2x", req_status);
 	else
-		printk(KERN_DEBUG "%s", REPLY_STATUS[req_status]);
+		printk("%s", REPLY_STATUS[req_status]);
 }
 
 /*
@@ -187,10 +187,10 @@
 	};
 
 	if (detailed_status > I2O_DSC_DEVICE_NOT_AVAILABLE)
-		printk(KERN_DEBUG " / DetailedStatus = %0#4x.\n",
+		printk(" / DetailedStatus = %0#4x.\n",
 		       detailed_status);
 	else
-		printk(KERN_DEBUG " / %s.\n", COMMON_DSC[detailed_status]);
+		printk(" / %s.\n", COMMON_DSC[detailed_status]);
 }
 
 /*
@@ -200,49 +200,49 @@
 {
 	switch (cmd) {
 	case I2O_CMD_UTIL_NOP:
-		printk(KERN_DEBUG "UTIL_NOP, ");
+		printk("UTIL_NOP, ");
 		break;
 	case I2O_CMD_UTIL_ABORT:
-		printk(KERN_DEBUG "UTIL_ABORT, ");
+		printk("UTIL_ABORT, ");
 		break;
 	case I2O_CMD_UTIL_CLAIM:
-		printk(KERN_DEBUG "UTIL_CLAIM, ");
+		printk("UTIL_CLAIM, ");
 		break;
 	case I2O_CMD_UTIL_RELEASE:
-		printk(KERN_DEBUG "UTIL_CLAIM_RELEASE, ");
+		printk("UTIL_CLAIM_RELEASE, ");
 		break;
 	case I2O_CMD_UTIL_CONFIG_DIALOG:
-		printk(KERN_DEBUG "UTIL_CONFIG_DIALOG, ");
+		printk("UTIL_CONFIG_DIALOG, ");
 		break;
 	case I2O_CMD_UTIL_DEVICE_RESERVE:
-		printk(KERN_DEBUG "UTIL_DEVICE_RESERVE, ");
+		printk("UTIL_DEVICE_RESERVE, ");
 		break;
 	case I2O_CMD_UTIL_DEVICE_RELEASE:
-		printk(KERN_DEBUG "UTIL_DEVICE_RELEASE, ");
+		printk("UTIL_DEVICE_RELEASE, ");
 		break;
 	case I2O_CMD_UTIL_EVT_ACK:
-		printk(KERN_DEBUG "UTIL_EVENT_ACKNOWLEDGE, ");
+		printk("UTIL_EVENT_ACKNOWLEDGE, ");
 		break;
 	case I2O_CMD_UTIL_EVT_REGISTER:
-		printk(KERN_DEBUG "UTIL_EVENT_REGISTER, ");
+		printk("UTIL_EVENT_REGISTER, ");
 		break;
 	case I2O_CMD_UTIL_LOCK:
-		printk(KERN_DEBUG "UTIL_LOCK, ");
+		printk("UTIL_LOCK, ");
 		break;
 	case I2O_CMD_UTIL_LOCK_RELEASE:
-		printk(KERN_DEBUG "UTIL_LOCK_RELEASE, ");
+		printk("UTIL_LOCK_RELEASE, ");
 		break;
 	case I2O_CMD_UTIL_PARAMS_GET:
-		printk(KERN_DEBUG "UTIL_PARAMS_GET, ");
+		printk("UTIL_PARAMS_GET, ");
 		break;
 	case I2O_CMD_UTIL_PARAMS_SET:
-		printk(KERN_DEBUG "UTIL_PARAMS_SET, ");
+		printk("UTIL_PARAMS_SET, ");
 		break;
 	case I2O_CMD_UTIL_REPLY_FAULT_NOTIFY:
-		printk(KERN_DEBUG "UTIL_REPLY_FAULT_NOTIFY, ");
+		printk("UTIL_REPLY_FAULT_NOTIFY, ");
 		break;
 	default:
-		printk(KERN_DEBUG "Cmd = %0#2x, ", cmd);
+		printk("Cmd = %0#2x, ", cmd);
 	}
 }
 
@@ -253,106 +253,106 @@
 {
 	switch (cmd) {
 	case I2O_CMD_ADAPTER_ASSIGN:
-		printk(KERN_DEBUG "EXEC_ADAPTER_ASSIGN, ");
+		printk("EXEC_ADAPTER_ASSIGN, ");
 		break;
 	case I2O_CMD_ADAPTER_READ:
-		printk(KERN_DEBUG "EXEC_ADAPTER_READ, ");
+		printk("EXEC_ADAPTER_READ, ");
 		break;
 	case I2O_CMD_ADAPTER_RELEASE:
-		printk(KERN_DEBUG "EXEC_ADAPTER_RELEASE, ");
+		printk("EXEC_ADAPTER_RELEASE, ");
 		break;
 	case I2O_CMD_BIOS_INFO_SET:
-		printk(KERN_DEBUG "EXEC_BIOS_INFO_SET, ");
+		printk("EXEC_BIOS_INFO_SET, ");
 		break;
 	case I2O_CMD_BOOT_DEVICE_SET:
-		printk(KERN_DEBUG "EXEC_BOOT_DEVICE_SET, ");
+		printk("EXEC_BOOT_DEVICE_SET, ");
 		break;
 	case I2O_CMD_CONFIG_VALIDATE:
-		printk(KERN_DEBUG "EXEC_CONFIG_VALIDATE, ");
+		printk("EXEC_CONFIG_VALIDATE, ");
 		break;
 	case I2O_CMD_CONN_SETUP:
-		printk(KERN_DEBUG "EXEC_CONN_SETUP, ");
+		printk("EXEC_CONN_SETUP, ");
 		break;
 	case I2O_CMD_DDM_DESTROY:
-		printk(KERN_DEBUG "EXEC_DDM_DESTROY, ");
+		printk("EXEC_DDM_DESTROY, ");
 		break;
 	case I2O_CMD_DDM_ENABLE:
-		printk(KERN_DEBUG "EXEC_DDM_ENABLE, ");
+		printk("EXEC_DDM_ENABLE, ");
 		break;
 	case I2O_CMD_DDM_QUIESCE:
-		printk(KERN_DEBUG "EXEC_DDM_QUIESCE, ");
+		printk("EXEC_DDM_QUIESCE, ");
 		break;
 	case I2O_CMD_DDM_RESET:
-		printk(KERN_DEBUG "EXEC_DDM_RESET, ");
+		printk("EXEC_DDM_RESET, ");
 		break;
 	case I2O_CMD_DDM_SUSPEND:
-		printk(KERN_DEBUG "EXEC_DDM_SUSPEND, ");
+		printk("EXEC_DDM_SUSPEND, ");
 		break;
 	case I2O_CMD_DEVICE_ASSIGN:
-		printk(KERN_DEBUG "EXEC_DEVICE_ASSIGN, ");
+		printk("EXEC_DEVICE_ASSIGN, ");
 		break;
 	case I2O_CMD_DEVICE_RELEASE:
-		printk(KERN_DEBUG "EXEC_DEVICE_RELEASE, ");
+		printk("EXEC_DEVICE_RELEASE, ");
 		break;
 	case I2O_CMD_HRT_GET:
-		printk(KERN_DEBUG "EXEC_HRT_GET, ");
+		printk("EXEC_HRT_GET, ");
 		break;
 	case I2O_CMD_ADAPTER_CLEAR:
-		printk(KERN_DEBUG "EXEC_IOP_CLEAR, ");
+		printk("EXEC_IOP_CLEAR, ");
 		break;
 	case I2O_CMD_ADAPTER_CONNECT:
-		printk(KERN_DEBUG "EXEC_IOP_CONNECT, ");
+		printk("EXEC_IOP_CONNECT, ");
 		break;
 	case I2O_CMD_ADAPTER_RESET:
-		printk(KERN_DEBUG "EXEC_IOP_RESET, ");
+		printk("EXEC_IOP_RESET, ");
 		break;
 	case I2O_CMD_LCT_NOTIFY:
-		printk(KERN_DEBUG "EXEC_LCT_NOTIFY, ");
+		printk("EXEC_LCT_NOTIFY, ");
 		break;
 	case I2O_CMD_OUTBOUND_INIT:
-		printk(KERN_DEBUG "EXEC_OUTBOUND_INIT, ");
+		printk("EXEC_OUTBOUND_INIT, ");
 		break;
 	case I2O_CMD_PATH_ENABLE:
-		printk(KERN_DEBUG "EXEC_PATH_ENABLE, ");
+		printk("EXEC_PATH_ENABLE, ");
 		break;
 	case I2O_CMD_PATH_QUIESCE:
-		printk(KERN_DEBUG "EXEC_PATH_QUIESCE, ");
+		printk("EXEC_PATH_QUIESCE, ");
 		break;
 	case I2O_CMD_PATH_RESET:
-		printk(KERN_DEBUG "EXEC_PATH_RESET, ");
+		printk("EXEC_PATH_RESET, ");
 		break;
 	case I2O_CMD_STATIC_MF_CREATE:
-		printk(KERN_DEBUG "EXEC_STATIC_MF_CREATE, ");
+		printk("EXEC_STATIC_MF_CREATE, ");
 		break;
 	case I2O_CMD_STATIC_MF_RELEASE:
-		printk(KERN_DEBUG "EXEC_STATIC_MF_RELEASE, ");
+		printk("EXEC_STATIC_MF_RELEASE, ");
 		break;
 	case I2O_CMD_STATUS_GET:
-		printk(KERN_DEBUG "EXEC_STATUS_GET, ");
+		printk("EXEC_STATUS_GET, ");
 		break;
 	case I2O_CMD_SW_DOWNLOAD:
-		printk(KERN_DEBUG "EXEC_SW_DOWNLOAD, ");
+		printk("EXEC_SW_DOWNLOAD, ");
 		break;
 	case I2O_CMD_SW_UPLOAD:
-		printk(KERN_DEBUG "EXEC_SW_UPLOAD, ");
+		printk("EXEC_SW_UPLOAD, ");
 		break;
 	case I2O_CMD_SW_REMOVE:
-		printk(KERN_DEBUG "EXEC_SW_REMOVE, ");
+		printk("EXEC_SW_REMOVE, ");
 		break;
 	case I2O_CMD_SYS_ENABLE:
-		printk(KERN_DEBUG "EXEC_SYS_ENABLE, ");
+		printk("EXEC_SYS_ENABLE, ");
 		break;
 	case I2O_CMD_SYS_MODIFY:
-		printk(KERN_DEBUG "EXEC_SYS_MODIFY, ");
+		printk("EXEC_SYS_MODIFY, ");
 		break;
 	case I2O_CMD_SYS_QUIESCE:
-		printk(KERN_DEBUG "EXEC_SYS_QUIESCE, ");
+		printk("EXEC_SYS_QUIESCE, ");
 		break;
 	case I2O_CMD_SYS_TAB_SET:
-		printk(KERN_DEBUG "EXEC_SYS_TAB_SET, ");
+		printk("EXEC_SYS_TAB_SET, ");
 		break;
 	default:
-		printk(KERN_DEBUG "Cmd = %#02x, ", cmd);
+		printk("Cmd = %#02x, ", cmd);
 	}
 }
 
@@ -361,28 +361,28 @@
 	printk(KERN_INFO "%s: State = ", c->name);
 	switch (((i2o_status_block *) c->status_block.virt)->iop_state) {
 	case 0x01:
-		printk(KERN_DEBUG "INIT\n");
+		printk("INIT\n");
 		break;
 	case 0x02:
-		printk(KERN_DEBUG "RESET\n");
+		printk("RESET\n");
 		break;
 	case 0x04:
-		printk(KERN_DEBUG "HOLD\n");
+		printk("HOLD\n");
 		break;
 	case 0x05:
-		printk(KERN_DEBUG "READY\n");
+		printk("READY\n");
 		break;
 	case 0x08:
-		printk(KERN_DEBUG "OPERATIONAL\n");
+		printk("OPERATIONAL\n");
 		break;
 	case 0x10:
-		printk(KERN_DEBUG "FAILED\n");
+		printk("FAILED\n");
 		break;
 	case 0x11:
-		printk(KERN_DEBUG "FAULTED\n");
+		printk("FAULTED\n");
 		break;
 	default:
-		printk(KERN_DEBUG "%x (unknown !!)\n",
+		printk("%x (unknown !!)\n",
 		       ((i2o_status_block *) c->status_block.virt)->iop_state);
 	}
 };
diff --git a/drivers/message/i2o/exec-osm.c b/drivers/message/i2o/exec-osm.c
index c13b932..8c83ee3 100644
--- a/drivers/message/i2o/exec-osm.c
+++ b/drivers/message/i2o/exec-osm.c
@@ -131,8 +131,10 @@
 	int rc = 0;
 
 	wait = i2o_exec_wait_alloc();
-	if (!wait)
+	if (!wait) {
+		i2o_msg_nop(c, msg);
 		return -ENOMEM;
+	}
 
 	if (tcntxt == 0xffffffff)
 		tcntxt = 0x80000000;
@@ -337,6 +339,8 @@
 	rc = device_create_file(dev, &dev_attr_product_id);
 	if (rc) goto err_vid;
 
+	i2o_dev->iop->exec = i2o_dev;
+
 	return 0;
 
 err_vid:
diff --git a/drivers/message/i2o/i2o_block.c b/drivers/message/i2o/i2o_block.c
index b17c4b2..64a52bd 100644
--- a/drivers/message/i2o/i2o_block.c
+++ b/drivers/message/i2o/i2o_block.c
@@ -215,7 +215,7 @@
 	struct i2o_message *msg;
 
 	msg = i2o_msg_get_wait(dev->iop, I2O_TIMEOUT_MESSAGE_GET);
-	if (IS_ERR(msg) == I2O_QUEUE_EMPTY)
+	if (IS_ERR(msg))
 		return PTR_ERR(msg);
 
 	msg->u.head[0] = cpu_to_le32(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0);
diff --git a/drivers/message/i2o/i2o_config.c b/drivers/message/i2o/i2o_config.c
index 8ba275a..84e046e 100644
--- a/drivers/message/i2o/i2o_config.c
+++ b/drivers/message/i2o/i2o_config.c
@@ -554,8 +554,6 @@
 		return -ENXIO;
 	}
 
-	msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
-
 	sb = c->status_block.virt;
 
 	if (get_user(size, &user_msg[0])) {
@@ -573,24 +571,30 @@
 
 	size <<= 2;		// Convert to bytes
 
+	msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
+	if (IS_ERR(msg))
+		return PTR_ERR(msg);
+
+	rcode = -EFAULT;
 	/* Copy in the user's I2O command */
 	if (copy_from_user(msg, user_msg, size)) {
 		osm_warn("unable to copy user message\n");
-		return -EFAULT;
+		goto out;
 	}
 	i2o_dump_message(msg);
 
 	if (get_user(reply_size, &user_reply[0]) < 0)
-		return -EFAULT;
+		goto out;
 
 	reply_size >>= 16;
 	reply_size <<= 2;
 
+	rcode = -ENOMEM;
 	reply = kzalloc(reply_size, GFP_KERNEL);
 	if (!reply) {
 		printk(KERN_WARNING "%s: Could not allocate reply buffer\n",
 		       c->name);
-		return -ENOMEM;
+		goto out;
 	}
 
 	sg_offset = (msg->u.head[0] >> 4) & 0x0f;
@@ -661,13 +665,14 @@
 	}
 
 	rcode = i2o_msg_post_wait(c, msg, 60);
+	msg = NULL;
 	if (rcode) {
 		reply[4] = ((u32) rcode) << 24;
 		goto sg_list_cleanup;
 	}
 
 	if (sg_offset) {
-		u32 msg[I2O_OUTBOUND_MSG_FRAME_SIZE];
+		u32 rmsg[I2O_OUTBOUND_MSG_FRAME_SIZE];
 		/* Copy back the Scatter Gather buffers back to user space */
 		u32 j;
 		// TODO 64bit fix
@@ -675,7 +680,7 @@
 		int sg_size;
 
 		// re-acquire the original message to handle correctly the sg copy operation
-		memset(&msg, 0, I2O_OUTBOUND_MSG_FRAME_SIZE * 4);
+		memset(&rmsg, 0, I2O_OUTBOUND_MSG_FRAME_SIZE * 4);
 		// get user msg size in u32s
 		if (get_user(size, &user_msg[0])) {
 			rcode = -EFAULT;
@@ -684,7 +689,7 @@
 		size = size >> 16;
 		size *= 4;
 		/* Copy in the user's I2O command */
-		if (copy_from_user(msg, user_msg, size)) {
+		if (copy_from_user(rmsg, user_msg, size)) {
 			rcode = -EFAULT;
 			goto sg_list_cleanup;
 		}
@@ -692,7 +697,7 @@
 		    (size - sg_offset * 4) / sizeof(struct sg_simple_element);
 
 		// TODO 64bit fix
-		sg = (struct sg_simple_element *)(msg + sg_offset);
+		sg = (struct sg_simple_element *)(rmsg + sg_offset);
 		for (j = 0; j < sg_count; j++) {
 			/* Copy out the SG list to user's buffer if necessary */
 			if (!
@@ -714,7 +719,7 @@
 		}
 	}
 
-      sg_list_cleanup:
+sg_list_cleanup:
 	/* Copy back the reply to user space */
 	if (reply_size) {
 		// we wrote our own values for context - now restore the user supplied ones
@@ -723,7 +728,6 @@
 			       "%s: Could not copy message context FROM user\n",
 			       c->name);
 			rcode = -EFAULT;
-			goto sg_list_cleanup;
 		}
 		if (copy_to_user(user_reply, reply, reply_size)) {
 			printk(KERN_WARNING
@@ -731,12 +735,14 @@
 			rcode = -EFAULT;
 		}
 	}
-
 	for (i = 0; i < sg_index; i++)
 		i2o_dma_free(&c->pdev->dev, &sg_list[i]);
 
-      cleanup:
+cleanup:
 	kfree(reply);
+out:
+	if (msg)
+		i2o_msg_nop(c, msg);
 	return rcode;
 }
 
@@ -793,8 +799,6 @@
 		return -ENXIO;
 	}
 
-	msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
-
 	sb = c->status_block.virt;
 
 	if (get_user(size, &user_msg[0]))
@@ -810,12 +814,17 @@
 
 	size <<= 2;		// Convert to bytes
 
+	msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
+	if (IS_ERR(msg))
+		return PTR_ERR(msg);
+
+	rcode = -EFAULT;
 	/* Copy in the user's I2O command */
 	if (copy_from_user(msg, user_msg, size))
-		return -EFAULT;
+		goto out;
 
 	if (get_user(reply_size, &user_reply[0]) < 0)
-		return -EFAULT;
+		goto out;
 
 	reply_size >>= 16;
 	reply_size <<= 2;
@@ -824,7 +833,8 @@
 	if (!reply) {
 		printk(KERN_WARNING "%s: Could not allocate reply buffer\n",
 		       c->name);
-		return -ENOMEM;
+		rcode = -ENOMEM;
+		goto out;
 	}
 
 	sg_offset = (msg->u.head[0] >> 4) & 0x0f;
@@ -891,13 +901,14 @@
 	}
 
 	rcode = i2o_msg_post_wait(c, msg, 60);
+	msg = NULL;
 	if (rcode) {
 		reply[4] = ((u32) rcode) << 24;
 		goto sg_list_cleanup;
 	}
 
 	if (sg_offset) {
-		u32 msg[128];
+		u32 rmsg[128];
 		/* Copy back the Scatter Gather buffers back to user space */
 		u32 j;
 		// TODO 64bit fix
@@ -905,7 +916,7 @@
 		int sg_size;
 
 		// re-acquire the original message to handle correctly the sg copy operation
-		memset(&msg, 0, I2O_OUTBOUND_MSG_FRAME_SIZE * 4);
+		memset(&rmsg, 0, I2O_OUTBOUND_MSG_FRAME_SIZE * 4);
 		// get user msg size in u32s
 		if (get_user(size, &user_msg[0])) {
 			rcode = -EFAULT;
@@ -914,7 +925,7 @@
 		size = size >> 16;
 		size *= 4;
 		/* Copy in the user's I2O command */
-		if (copy_from_user(msg, user_msg, size)) {
+		if (copy_from_user(rmsg, user_msg, size)) {
 			rcode = -EFAULT;
 			goto sg_list_cleanup;
 		}
@@ -922,7 +933,7 @@
 		    (size - sg_offset * 4) / sizeof(struct sg_simple_element);
 
 		// TODO 64bit fix
-		sg = (struct sg_simple_element *)(msg + sg_offset);
+		sg = (struct sg_simple_element *)(rmsg + sg_offset);
 		for (j = 0; j < sg_count; j++) {
 			/* Copy out the SG list to user's buffer if necessary */
 			if (!
@@ -944,7 +955,7 @@
 		}
 	}
 
-      sg_list_cleanup:
+sg_list_cleanup:
 	/* Copy back the reply to user space */
 	if (reply_size) {
 		// we wrote our own values for context - now restore the user supplied ones
@@ -964,8 +975,11 @@
 	for (i = 0; i < sg_index; i++)
 		kfree(sg_list[i]);
 
-      cleanup:
+cleanup:
 	kfree(reply);
+out:
+	if (msg)
+		i2o_msg_nop(c, msg);
 	return rcode;
 }
 #endif
diff --git a/drivers/mfd/mcp-core.c b/drivers/mfd/mcp-core.c
index 75f401d..b4ed57e 100644
--- a/drivers/mfd/mcp-core.c
+++ b/drivers/mfd/mcp-core.c
@@ -200,9 +200,8 @@
 {
 	struct mcp *mcp;
 
-	mcp = kmalloc(sizeof(struct mcp) + size, GFP_KERNEL);
+	mcp = kzalloc(sizeof(struct mcp) + size, GFP_KERNEL);
 	if (mcp) {
-		memset(mcp, 0, sizeof(struct mcp) + size);
 		spin_lock_init(&mcp->lock);
 		mcp->attached_device.parent = parent;
 		mcp->attached_device.bus = &mcp_bus_type;
diff --git a/drivers/mfd/ucb1x00-core.c b/drivers/mfd/ucb1x00-core.c
index 149810a..e03f1bc 100644
--- a/drivers/mfd/ucb1x00-core.c
+++ b/drivers/mfd/ucb1x00-core.c
@@ -484,12 +484,11 @@
 		goto err_disable;
 	}
 
-	ucb = kmalloc(sizeof(struct ucb1x00), GFP_KERNEL);
+	ucb = kzalloc(sizeof(struct ucb1x00), GFP_KERNEL);
 	ret = -ENOMEM;
 	if (!ucb)
 		goto err_disable;
 
-	memset(ucb, 0, sizeof(struct ucb1x00));
 
 	ucb->cdev.class = &ucb1x00_class;
 	ucb->cdev.dev = &mcp->attached_device;
diff --git a/drivers/mfd/ucb1x00-ts.c b/drivers/mfd/ucb1x00-ts.c
index 38e815a..fdbaa77 100644
--- a/drivers/mfd/ucb1x00-ts.c
+++ b/drivers/mfd/ucb1x00-ts.c
@@ -209,6 +209,7 @@
 	DECLARE_WAITQUEUE(wait, tsk);
 	int valid = 0;
 
+	set_freezable();
 	add_wait_queue(&ts->irq_wait, &wait);
 	while (!kthread_should_stop()) {
 		unsigned int x, y, p;
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index a92b872..1d516f2 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -10,7 +10,7 @@
 
 config IBM_ASM
 	tristate "Device driver for IBM RSA service processor"
-	depends on X86 && PCI && EXPERIMENTAL
+	depends on X86 && PCI && INPUT && EXPERIMENTAL
 	---help---
 	  This option enables device driver support for in-band access to the
 	  IBM RSA (Condor) service processor in eServer xSeries systems.
diff --git a/drivers/misc/asus-laptop.c b/drivers/misc/asus-laptop.c
index 7798f590e..f753060 100644
--- a/drivers/misc/asus-laptop.c
+++ b/drivers/misc/asus-laptop.c
@@ -979,10 +979,9 @@
 	printk(ASUS_NOTICE "Asus Laptop Support version %s\n",
 	       ASUS_LAPTOP_VERSION);
 
-	hotk = kmalloc(sizeof(struct asus_hotk), GFP_KERNEL);
+	hotk = kzalloc(sizeof(struct asus_hotk), GFP_KERNEL);
 	if (!hotk)
 		return -ENOMEM;
-	memset(hotk, 0, sizeof(struct asus_hotk));
 
 	hotk->handle = device->handle;
 	strcpy(acpi_device_name(device), ASUS_HOTK_DEVICE_NAME);
diff --git a/drivers/misc/ibmasm/command.c b/drivers/misc/ibmasm/command.c
index 07a085c..6497872 100644
--- a/drivers/misc/ibmasm/command.c
+++ b/drivers/misc/ibmasm/command.c
@@ -18,7 +18,7 @@
  *
  * Copyright (C) IBM Corporation, 2004
  *
- * Author: Max Asböck <amax@us.ibm.com> 
+ * Author: Max Asböck <amax@us.ibm.com>
  *
  */
 
@@ -41,18 +41,16 @@
 	if (buffer_size > IBMASM_CMD_MAX_BUFFER_SIZE)
 		return NULL;
 
-	cmd = kmalloc(sizeof(struct command), GFP_KERNEL);
+	cmd = kzalloc(sizeof(struct command), GFP_KERNEL);
 	if (cmd == NULL)
 		return NULL;
 
-	memset(cmd, 0, sizeof(*cmd));
 
-	cmd->buffer = kmalloc(buffer_size, GFP_KERNEL);
+	cmd->buffer = kzalloc(buffer_size, GFP_KERNEL);
 	if (cmd->buffer == NULL) {
 		kfree(cmd);
 		return NULL;
 	}
-	memset(cmd->buffer, 0, buffer_size);
 	cmd->buffer_size = buffer_size;
 
 	kobject_init(&cmd->kobj);
@@ -72,7 +70,7 @@
 static void free_command(struct kobject *kobj)
 {
 	struct command *cmd = to_command(kobj);
- 
+
 	list_del(&cmd->queue_node);
 	atomic_dec(&command_count);
 	dbg("command count: %d\n", atomic_read(&command_count));
@@ -113,14 +111,14 @@
 		exec_next_command(sp);
 	}
 }
-	
+
 /**
  * exec_command
  * send a command to a service processor
  * Commands are executed sequentially. One command (sp->current_command)
  * is sent to the service processor. Once the interrupt handler gets a
  * message of type command_response, the message is copied into
- * the current commands buffer, 
+ * the current commands buffer,
  */
 void ibmasm_exec_command(struct service_processor *sp, struct command *cmd)
 {
@@ -160,7 +158,7 @@
 	}
 }
 
-/** 
+/**
  * Sleep until a command has failed or a response has been received
  * and the command status been updated by the interrupt handler.
  * (see receive_response).
@@ -182,8 +180,8 @@
 {
 	struct command *cmd = sp->current_command;
 
-	if (!sp->current_command) 
-		return; 
+	if (!sp->current_command)
+		return;
 
 	memcpy_fromio(cmd->buffer, response, min(size, cmd->buffer_size));
 	cmd->status = IBMASM_CMD_COMPLETE;
diff --git a/drivers/misc/ibmasm/dot_command.c b/drivers/misc/ibmasm/dot_command.c
index 13c52f8..3dd2dfb 100644
--- a/drivers/misc/ibmasm/dot_command.c
+++ b/drivers/misc/ibmasm/dot_command.c
@@ -17,7 +17,7 @@
  *
  * Copyright (C) IBM Corporation, 2004
  *
- * Author: Max Asböck <amax@us.ibm.com> 
+ * Author: Max Asböck <amax@us.ibm.com>
  *
  */
 
@@ -44,11 +44,11 @@
 		size = message_size;
 
 	switch (header->type) {
-	case sp_event: 
+	case sp_event:
 		ibmasm_receive_event(sp, message, size);
 		break;
 	case sp_command_response:
-		ibmasm_receive_command_response(sp, message, size); 
+		ibmasm_receive_command_response(sp, message, size);
 		break;
 	case sp_heartbeat:
 		ibmasm_receive_heartbeat(sp, message, size);
@@ -95,7 +95,7 @@
 	strcat(vpd_data, IBMASM_DRIVER_VPD);
 	vpd_data[10] = 0;
 	vpd_data[15] = 0;
-	
+
 	ibmasm_exec_command(sp, command);
 	ibmasm_wait_for_response(command, IBMASM_CMD_TIMEOUT_NORMAL);
 
@@ -118,7 +118,7 @@
  * During driver init this function is called with os state "up".
  * This causes the service processor to start sending heartbeats the
  * driver.
- * During driver exit the function is called with os state "down", 
+ * During driver exit the function is called with os state "down",
  * causing the service processor to stop the heartbeats.
  */
 int ibmasm_send_os_state(struct service_processor *sp, int os_state)
diff --git a/drivers/misc/ibmasm/dot_command.h b/drivers/misc/ibmasm/dot_command.h
index 2d21c27..6cbba1a 100644
--- a/drivers/misc/ibmasm/dot_command.h
+++ b/drivers/misc/ibmasm/dot_command.h
@@ -17,7 +17,7 @@
  *
  * Copyright (C) IBM Corporation, 2004
  *
- * Author: Max Asböck <amax@us.ibm.com> 
+ * Author: Max Asböck <amax@us.ibm.com>
  *
  */
 
diff --git a/drivers/misc/ibmasm/event.c b/drivers/misc/ibmasm/event.c
index fe1e819..fda6a4d 100644
--- a/drivers/misc/ibmasm/event.c
+++ b/drivers/misc/ibmasm/event.c
@@ -18,7 +18,7 @@
  *
  * Copyright (C) IBM Corporation, 2004
  *
- * Author: Max Asböck <amax@us.ibm.com> 
+ * Author: Max Asböck <amax@us.ibm.com>
  *
  */
 
@@ -51,7 +51,7 @@
  * event readers.
  * There is no reader marker in the buffer, therefore readers are
  * responsible for keeping up with the writer, or they will loose events.
- */ 
+ */
 void ibmasm_receive_event(struct service_processor *sp, void *data, unsigned int data_size)
 {
 	struct event_buffer *buffer = sp->event_buffer;
@@ -77,13 +77,13 @@
 
 static inline int event_available(struct event_buffer *b, struct event_reader *r)
 {
-	return 	(r->next_serial_number < b->next_serial_number);
+	return (r->next_serial_number < b->next_serial_number);
 }
 
 /**
  * get_next_event
  * Called by event readers (initiated from user space through the file
- * system). 
+ * system).
  * Sleeps until a new event is available.
  */
 int ibmasm_get_next_event(struct service_processor *sp, struct event_reader *reader)
diff --git a/drivers/misc/ibmasm/heartbeat.c b/drivers/misc/ibmasm/heartbeat.c
index 7fd7a43..3036e78 100644
--- a/drivers/misc/ibmasm/heartbeat.c
+++ b/drivers/misc/ibmasm/heartbeat.c
@@ -18,7 +18,7 @@
  *
  * Copyright (C) IBM Corporation, 2004
  *
- * Author: Max Asböck <amax@us.ibm.com> 
+ * Author: Max Asböck <amax@us.ibm.com>
  *
  */
 
diff --git a/drivers/misc/ibmasm/i2o.h b/drivers/misc/ibmasm/i2o.h
index 958c957..bf2c738 100644
--- a/drivers/misc/ibmasm/i2o.h
+++ b/drivers/misc/ibmasm/i2o.h
@@ -17,7 +17,7 @@
  *
  * Copyright (C) IBM Corporation, 2004
  *
- * Author: Max Asböck <amax@us.ibm.com> 
+ * Author: Max Asböck <amax@us.ibm.com>
  *
  */
 
@@ -26,9 +26,9 @@
 	u8	version;
 	u8	message_flags;
 	u16	message_size;
-	u8	target;           
+	u8	target;
 	u8	initiator_and_target;
-	u8	initiator;        
+	u8	initiator;
 	u8	function;
 	u32	initiator_context;
 };
@@ -64,12 +64,12 @@
 	size = sizeof(struct i2o_header) + data_size;
 
 	i2o_size = size / sizeof(u32);
-	
+
 	if (size % sizeof(u32))
 	       i2o_size++;
 
 	return i2o_size;
-}	
+}
 
 static inline u32 incoming_data_size(struct i2o_message *i2o_message)
 {
diff --git a/drivers/misc/ibmasm/ibmasm.h b/drivers/misc/ibmasm/ibmasm.h
index 48d5abe..de860bc 100644
--- a/drivers/misc/ibmasm/ibmasm.h
+++ b/drivers/misc/ibmasm/ibmasm.h
@@ -18,7 +18,7 @@
  *
  * Copyright (C) IBM Corporation, 2004
  *
- * Author: Max Asböck <amax@us.ibm.com> 
+ * Author: Max Asböck <amax@us.ibm.com>
  *
  */
 
@@ -58,8 +58,8 @@
 	return buf;
 }
 
-#define IBMASM_CMD_PENDING	0	
-#define IBMASM_CMD_COMPLETE	1	
+#define IBMASM_CMD_PENDING	0
+#define IBMASM_CMD_COMPLETE	1
 #define IBMASM_CMD_FAILED	2
 
 #define IBMASM_CMD_TIMEOUT_NORMAL	45
@@ -163,55 +163,55 @@
 };
 
 /* command processing */
-extern struct command *ibmasm_new_command(struct service_processor *sp, size_t buffer_size);
-extern void ibmasm_exec_command(struct service_processor *sp, struct command *cmd);
-extern void ibmasm_wait_for_response(struct command *cmd, int timeout);
-extern void ibmasm_receive_command_response(struct service_processor *sp, void *response,  size_t size);
+struct command *ibmasm_new_command(struct service_processor *sp, size_t buffer_size);
+void ibmasm_exec_command(struct service_processor *sp, struct command *cmd);
+void ibmasm_wait_for_response(struct command *cmd, int timeout);
+void ibmasm_receive_command_response(struct service_processor *sp, void *response,  size_t size);
 
 /* event processing */
-extern int ibmasm_event_buffer_init(struct service_processor *sp);
-extern void ibmasm_event_buffer_exit(struct service_processor *sp);
-extern void ibmasm_receive_event(struct service_processor *sp, void *data,  unsigned int data_size);
-extern void ibmasm_event_reader_register(struct service_processor *sp, struct event_reader *reader);
-extern void ibmasm_event_reader_unregister(struct service_processor *sp, struct event_reader *reader);
-extern int ibmasm_get_next_event(struct service_processor *sp, struct event_reader *reader);
-extern void ibmasm_cancel_next_event(struct event_reader *reader);
+int ibmasm_event_buffer_init(struct service_processor *sp);
+void ibmasm_event_buffer_exit(struct service_processor *sp);
+void ibmasm_receive_event(struct service_processor *sp, void *data,  unsigned int data_size);
+void ibmasm_event_reader_register(struct service_processor *sp, struct event_reader *reader);
+void ibmasm_event_reader_unregister(struct service_processor *sp, struct event_reader *reader);
+int ibmasm_get_next_event(struct service_processor *sp, struct event_reader *reader);
+void ibmasm_cancel_next_event(struct event_reader *reader);
 
 /* heartbeat - from SP to OS */
-extern void ibmasm_register_panic_notifier(void);
-extern void ibmasm_unregister_panic_notifier(void);
-extern int ibmasm_heartbeat_init(struct service_processor *sp);
-extern void ibmasm_heartbeat_exit(struct service_processor *sp);
-extern void ibmasm_receive_heartbeat(struct service_processor *sp,  void *message, size_t size);
+void ibmasm_register_panic_notifier(void);
+void ibmasm_unregister_panic_notifier(void);
+int ibmasm_heartbeat_init(struct service_processor *sp);
+void ibmasm_heartbeat_exit(struct service_processor *sp);
+void ibmasm_receive_heartbeat(struct service_processor *sp,  void *message, size_t size);
 
 /* reverse heartbeat - from OS to SP */
-extern void ibmasm_init_reverse_heartbeat(struct service_processor *sp, struct reverse_heartbeat *rhb);
-extern int ibmasm_start_reverse_heartbeat(struct service_processor *sp, struct reverse_heartbeat *rhb);
-extern void ibmasm_stop_reverse_heartbeat(struct reverse_heartbeat *rhb);
+void ibmasm_init_reverse_heartbeat(struct service_processor *sp, struct reverse_heartbeat *rhb);
+int ibmasm_start_reverse_heartbeat(struct service_processor *sp, struct reverse_heartbeat *rhb);
+void ibmasm_stop_reverse_heartbeat(struct reverse_heartbeat *rhb);
 
 /* dot commands */
-extern void ibmasm_receive_message(struct service_processor *sp, void *data, int data_size);
-extern int ibmasm_send_driver_vpd(struct service_processor *sp);
-extern int ibmasm_send_os_state(struct service_processor *sp, int os_state);
+void ibmasm_receive_message(struct service_processor *sp, void *data, int data_size);
+int ibmasm_send_driver_vpd(struct service_processor *sp);
+int ibmasm_send_os_state(struct service_processor *sp, int os_state);
 
 /* low level message processing */
-extern int ibmasm_send_i2o_message(struct service_processor *sp);
-extern irqreturn_t ibmasm_interrupt_handler(int irq, void * dev_id);
+int ibmasm_send_i2o_message(struct service_processor *sp);
+irqreturn_t ibmasm_interrupt_handler(int irq, void * dev_id);
 
 /* remote console */
-extern void ibmasm_handle_mouse_interrupt(struct service_processor *sp);
-extern int ibmasm_init_remote_input_dev(struct service_processor *sp);
-extern void ibmasm_free_remote_input_dev(struct service_processor *sp);
+void ibmasm_handle_mouse_interrupt(struct service_processor *sp);
+int ibmasm_init_remote_input_dev(struct service_processor *sp);
+void ibmasm_free_remote_input_dev(struct service_processor *sp);
 
 /* file system */
-extern int ibmasmfs_register(void);
-extern void ibmasmfs_unregister(void);
-extern void ibmasmfs_add_sp(struct service_processor *sp);
+int ibmasmfs_register(void);
+void ibmasmfs_unregister(void);
+void ibmasmfs_add_sp(struct service_processor *sp);
 
 /* uart */
 #ifdef CONFIG_SERIAL_8250
-extern void ibmasm_register_uart(struct service_processor *sp);
-extern void ibmasm_unregister_uart(struct service_processor *sp);
+void ibmasm_register_uart(struct service_processor *sp);
+void ibmasm_unregister_uart(struct service_processor *sp);
 #else
 #define ibmasm_register_uart(sp)	do { } while(0)
 #define ibmasm_unregister_uart(sp)	do { } while(0)
diff --git a/drivers/misc/ibmasm/ibmasmfs.c b/drivers/misc/ibmasm/ibmasmfs.c
index c436d3d..22a7e8b 100644
--- a/drivers/misc/ibmasm/ibmasmfs.c
+++ b/drivers/misc/ibmasm/ibmasmfs.c
@@ -17,12 +17,12 @@
  *
  * Copyright (C) IBM Corporation, 2004
  *
- * Author: Max Asböck <amax@us.ibm.com> 
+ * Author: Max Asböck <amax@us.ibm.com>
  *
  */
 
 /*
- * Parts of this code are based on an article by Jonathan Corbet 
+ * Parts of this code are based on an article by Jonathan Corbet
  * that appeared in Linux Weekly News.
  */
 
@@ -55,22 +55,22 @@
  * For each service processor the following files are created:
  *
  * command: execute dot commands
- * 	write: execute a dot command on the service processor
- * 	read: return the result of a previously executed dot command
+ *	write: execute a dot command on the service processor
+ *	read: return the result of a previously executed dot command
  *
  * events: listen for service processor events
- * 	read: sleep (interruptible) until an event occurs
+ *	read: sleep (interruptible) until an event occurs
  *      write: wakeup sleeping event listener
  *
  * reverse_heartbeat: send a heartbeat to the service processor
- * 	read: sleep (interruptible) until the reverse heartbeat fails
+ *	read: sleep (interruptible) until the reverse heartbeat fails
  *      write: wakeup sleeping heartbeat listener
  *
  * remote_video/width
  * remote_video/height
  * remote_video/width: control remote display settings
- * 	write: set value
- * 	read: read value
+ *	write: set value
+ *	read: read value
  */
 
 #include <linux/fs.h>
@@ -155,7 +155,7 @@
 
 static struct dentry *ibmasmfs_create_file (struct super_block *sb,
 			struct dentry *parent,
-		       	const char *name,
+			const char *name,
 			const struct file_operations *fops,
 			void *data,
 			int mode)
@@ -261,7 +261,7 @@
 	struct ibmasmfs_command_data *command_data = file->private_data;
 
 	if (command_data->command)
-		command_put(command_data->command);	
+		command_put(command_data->command);
 
 	kfree(command_data);
 	return 0;
@@ -348,7 +348,7 @@
 static int event_file_open(struct inode *inode, struct file *file)
 {
 	struct ibmasmfs_event_data *event_data;
-	struct service_processor *sp; 
+	struct service_processor *sp;
 
 	if (!inode->i_private)
 		return -ENODEV;
@@ -563,17 +563,16 @@
 	if (*offset != 0)
 		return 0;
 
-	buff = kmalloc (count + 1, GFP_KERNEL);
+	buff = kzalloc (count + 1, GFP_KERNEL);
 	if (!buff)
 		return -ENOMEM;
 
-	memset(buff, 0x0, count + 1);
 
 	if (copy_from_user(buff, ubuff, count)) {
 		kfree(buff);
 		return -EFAULT;
 	}
-	
+
 	value = simple_strtoul(buff, NULL, 10);
 	writel(value, address);
 	kfree(buff);
diff --git a/drivers/misc/ibmasm/lowlevel.c b/drivers/misc/ibmasm/lowlevel.c
index a3c589b..4b2398e 100644
--- a/drivers/misc/ibmasm/lowlevel.c
+++ b/drivers/misc/ibmasm/lowlevel.c
@@ -17,7 +17,7 @@
  *
  * Copyright (C) IBM Corporation, 2004
  *
- * Author: Max Asböck <amax@us.ibm.com> 
+ * Author: Max Asböck <amax@us.ibm.com>
  *
  */
 
diff --git a/drivers/misc/ibmasm/lowlevel.h b/drivers/misc/ibmasm/lowlevel.h
index e5ed59c..7667665 100644
--- a/drivers/misc/ibmasm/lowlevel.h
+++ b/drivers/misc/ibmasm/lowlevel.h
@@ -17,7 +17,7 @@
  *
  * Copyright (C) IBM Corporation, 2004
  *
- * Author: Max Asböck <amax@us.ibm.com> 
+ * Author: Max Asböck <amax@us.ibm.com>
  *
  */
 
@@ -48,9 +48,9 @@
 #define INTR_CONTROL_REGISTER  0x13A4
 
 #define SCOUT_COM_A_BASE         0x0000
-#define SCOUT_COM_B_BASE         0x0100   
-#define SCOUT_COM_C_BASE         0x0200   
-#define SCOUT_COM_D_BASE         0x0300   
+#define SCOUT_COM_B_BASE         0x0100
+#define SCOUT_COM_C_BASE         0x0200
+#define SCOUT_COM_D_BASE         0x0300
 
 static inline int sp_interrupt_pending(void __iomem *base_address)
 {
@@ -86,12 +86,12 @@
 
 static inline void enable_uart_interrupts(void __iomem *base_address)
 {
-	ibmasm_enable_interrupts(base_address, UART_INTR_MASK); 
+	ibmasm_enable_interrupts(base_address, UART_INTR_MASK);
 }
 
 static inline void disable_uart_interrupts(void __iomem *base_address)
 {
-	ibmasm_disable_interrupts(base_address, UART_INTR_MASK); 
+	ibmasm_disable_interrupts(base_address, UART_INTR_MASK);
 }
 
 #define valid_mfa(mfa)	( (mfa) != NO_MFAS_AVAILABLE )
@@ -111,7 +111,7 @@
 
 static inline void set_mfa_outbound(void __iomem *base_address, u32 mfa)
 {
-   	writel(mfa, base_address + OUTBOUND_QUEUE_PORT);
+	writel(mfa, base_address + OUTBOUND_QUEUE_PORT);
 }
 
 static inline u32 get_mfa_inbound(void __iomem *base_address)
@@ -126,7 +126,7 @@
 
 static inline void set_mfa_inbound(void __iomem *base_address, u32 mfa)
 {
-   	writel(mfa, base_address + INBOUND_QUEUE_PORT);
+	writel(mfa, base_address + INBOUND_QUEUE_PORT);
 }
 
 static inline struct i2o_message *get_i2o_message(void __iomem *base_address, u32 mfa)
diff --git a/drivers/misc/ibmasm/module.c b/drivers/misc/ibmasm/module.c
index 2f3bddf..4f9d4a9 100644
--- a/drivers/misc/ibmasm/module.c
+++ b/drivers/misc/ibmasm/module.c
@@ -18,9 +18,9 @@
  *
  * Copyright (C) IBM Corporation, 2004
  *
- * Author: Max Asböck <amax@us.ibm.com> 
+ * Author: Max Asböck <amax@us.ibm.com>
  *
- * This driver is based on code originally written by Pete Reynolds 
+ * This driver is based on code originally written by Pete Reynolds
  * and others.
  *
  */
@@ -30,13 +30,13 @@
  *
  * 1) When loaded it sends a message to the service processor,
  * indicating that an OS is * running. This causes the service processor
- * to send periodic heartbeats to the OS. 
+ * to send periodic heartbeats to the OS.
  *
  * 2) Answers the periodic heartbeats sent by the service processor.
  * Failure to do so would result in system reboot.
  *
  * 3) Acts as a pass through for dot commands sent from user applications.
- * The interface for this is the ibmasmfs file system. 
+ * The interface for this is the ibmasmfs file system.
  *
  * 4) Allows user applications to register for event notification. Events
  * are sent to the driver through interrupts. They can be read from user
@@ -77,13 +77,12 @@
 	/* vnc client won't work without bus-mastering */
 	pci_set_master(pdev);
 
-	sp = kmalloc(sizeof(struct service_processor), GFP_KERNEL);
+	sp = kzalloc(sizeof(struct service_processor), GFP_KERNEL);
 	if (sp == NULL) {
 		dev_err(&pdev->dev, "Failed to allocate memory\n");
 		result = -ENOMEM;
 		goto error_kmalloc;
 	}
-	memset(sp, 0, sizeof(struct service_processor));
 
 	spin_lock_init(&sp->lock);
 	INIT_LIST_HEAD(&sp->command_queue);
@@ -105,7 +104,7 @@
 	}
 
 	sp->irq = pdev->irq;
-	sp->base_address = ioremap(pci_resource_start(pdev, 0), 
+	sp->base_address = ioremap(pci_resource_start(pdev, 0),
 					pci_resource_len(pdev, 0));
 	if (sp->base_address == 0) {
 		dev_err(sp->dev, "Failed to ioremap pci memory\n");
diff --git a/drivers/misc/ibmasm/r_heartbeat.c b/drivers/misc/ibmasm/r_heartbeat.c
index f8fdb2d..bec9e2c 100644
--- a/drivers/misc/ibmasm/r_heartbeat.c
+++ b/drivers/misc/ibmasm/r_heartbeat.c
@@ -16,7 +16,7 @@
  *
  * Copyright (C) IBM Corporation, 2004
  *
- * Author: Max Asböck <amax@us.ibm.com> 
+ * Author: Max Asböck <amax@us.ibm.com>
  *
  */
 
@@ -36,10 +36,10 @@
 	unsigned char			command[3];
 } rhb_dot_cmd = {
 	.header = {
-		.type = 	sp_read,
+		.type =		sp_read,
 		.command_size = 3,
 		.data_size =	0,
-		.status = 	0
+		.status =	0
 	},
 	.command = { 4, 3, 6 }
 };
@@ -76,9 +76,9 @@
 		if (cmd->status != IBMASM_CMD_COMPLETE)
 			times_failed++;
 
-		wait_event_interruptible_timeout(rhb->wait, 
+		wait_event_interruptible_timeout(rhb->wait,
 			rhb->stopped,
-			REVERSE_HEARTBEAT_TIMEOUT * HZ); 	
+			REVERSE_HEARTBEAT_TIMEOUT * HZ);
 
 		if (signal_pending(current) || rhb->stopped) {
 			result = -EINTR;
diff --git a/drivers/misc/ibmasm/remote.c b/drivers/misc/ibmasm/remote.c
index a40fda6..0550ce0 100644
--- a/drivers/misc/ibmasm/remote.c
+++ b/drivers/misc/ibmasm/remote.c
@@ -28,11 +28,10 @@
 #include "ibmasm.h"
 #include "remote.h"
 
-static int xmax = 1600;
-static int ymax = 1200;
+#define MOUSE_X_MAX	1600
+#define MOUSE_Y_MAX	1200
 
-
-static unsigned short xlate_high[XLATE_SIZE] = {
+static const unsigned short xlate_high[XLATE_SIZE] = {
 	[KEY_SYM_ENTER & 0xff] = KEY_ENTER,
 	[KEY_SYM_KPSLASH & 0xff] = KEY_KPSLASH,
 	[KEY_SYM_KPSTAR & 0xff] = KEY_KPASTERISK,
@@ -81,7 +80,8 @@
 	[KEY_SYM_NUM_LOCK & 0xff] = KEY_NUMLOCK,
 	[KEY_SYM_SCR_LOCK & 0xff] = KEY_SCROLLLOCK,
 };
-static unsigned short xlate[XLATE_SIZE] = {
+
+static const unsigned short xlate[XLATE_SIZE] = {
 	[NO_KEYCODE] = KEY_RESERVED,
 	[KEY_SYM_SPACE] = KEY_SPACE,
 	[KEY_SYM_TILDE] = KEY_GRAVE,        [KEY_SYM_BKTIC] = KEY_GRAVE,
@@ -133,19 +133,16 @@
 	[KEY_SYM_Z] = KEY_Z,                [KEY_SYM_z] = KEY_Z,
 };
 
-static char remote_mouse_name[] = "ibmasm RSA I remote mouse";
-static char remote_keybd_name[] = "ibmasm RSA I remote keyboard";
-
 static void print_input(struct remote_input *input)
 {
 	if (input->type == INPUT_TYPE_MOUSE) {
 		unsigned char buttons = input->mouse_buttons;
 		dbg("remote mouse movement: (x,y)=(%d,%d)%s%s%s%s\n",
 			input->data.mouse.x, input->data.mouse.y,
-			(buttons)?" -- buttons:":"",
-			(buttons & REMOTE_BUTTON_LEFT)?"left ":"",
-			(buttons & REMOTE_BUTTON_MIDDLE)?"middle ":"",
-			(buttons & REMOTE_BUTTON_RIGHT)?"right":""
+			(buttons) ? " -- buttons:" : "",
+			(buttons & REMOTE_BUTTON_LEFT) ? "left " : "",
+			(buttons & REMOTE_BUTTON_MIDDLE) ? "middle " : "",
+			(buttons & REMOTE_BUTTON_RIGHT) ? "right" : ""
 		      );
 	} else {
 		dbg("remote keypress (code, flag, down):"
@@ -180,7 +177,7 @@
 		key = xlate_high[code & 0xff];
 	else
 		key = xlate[code];
-	input_report_key(dev, key, (input->data.keyboard.key_down) ? 1 : 0);
+	input_report_key(dev, key, input->data.keyboard.key_down);
 	input_sync(dev);
 }
 
@@ -228,20 +225,22 @@
 	mouse_dev->id.vendor = pdev->vendor;
 	mouse_dev->id.product = pdev->device;
 	mouse_dev->id.version = 1;
+	mouse_dev->dev.parent = sp->dev;
 	mouse_dev->evbit[0]  = BIT(EV_KEY) | BIT(EV_ABS);
 	mouse_dev->keybit[LONG(BTN_MOUSE)] = BIT(BTN_LEFT) |
 		BIT(BTN_RIGHT) | BIT(BTN_MIDDLE);
 	set_bit(BTN_TOUCH, mouse_dev->keybit);
-	mouse_dev->name = remote_mouse_name;
-	input_set_abs_params(mouse_dev, ABS_X, 0, xmax, 0, 0);
-	input_set_abs_params(mouse_dev, ABS_Y, 0, ymax, 0, 0);
+	mouse_dev->name = "ibmasm RSA I remote mouse";
+	input_set_abs_params(mouse_dev, ABS_X, 0, MOUSE_X_MAX, 0, 0);
+	input_set_abs_params(mouse_dev, ABS_Y, 0, MOUSE_Y_MAX, 0, 0);
 
-	mouse_dev->id.bustype = BUS_PCI;
+	keybd_dev->id.bustype = BUS_PCI;
 	keybd_dev->id.vendor = pdev->vendor;
 	keybd_dev->id.product = pdev->device;
-	mouse_dev->id.version = 2;
+	keybd_dev->id.version = 2;
+	keybd_dev->dev.parent = sp->dev;
 	keybd_dev->evbit[0]  = BIT(EV_KEY);
-	keybd_dev->name = remote_keybd_name;
+	keybd_dev->name = "ibmasm RSA I remote keyboard";
 
 	for (i = 0; i < XLATE_SIZE; i++) {
 		if (xlate_high[i])
diff --git a/drivers/misc/ibmasm/remote.h b/drivers/misc/ibmasm/remote.h
index b7076a8..72acf5a 100644
--- a/drivers/misc/ibmasm/remote.h
+++ b/drivers/misc/ibmasm/remote.h
@@ -18,7 +18,7 @@
  *
  * Copyright (C) IBM Corporation, 2004
  *
- * Author: Max Asböck <amax@us.ibm.com> 
+ * Author: Max Asböck <amax@us.ibm.com>
  *
  * Orignally written by Pete Reynolds
  */
@@ -73,7 +73,7 @@
 
 
 
-struct remote_input { 
+struct remote_input {
 	union {
 		struct mouse_input	mouse;
 		struct keyboard_input	keyboard;
@@ -85,7 +85,7 @@
 	unsigned char	pad3;
 };
 
-#define mouse_addr(sp) 		(sp->base_address + CONDOR_MOUSE_DATA)
+#define mouse_addr(sp)		(sp->base_address + CONDOR_MOUSE_DATA)
 #define display_width(sp)	(mouse_addr(sp) + CONDOR_INPUT_DISPLAY_RESX)
 #define display_height(sp)	(mouse_addr(sp) + CONDOR_INPUT_DISPLAY_RESY)
 #define display_depth(sp)	(mouse_addr(sp) + CONDOR_INPUT_DISPLAY_BITS)
@@ -93,7 +93,7 @@
 #define vnc_status(sp)		(mouse_addr(sp) + CONDOR_OUTPUT_VNC_STATUS)
 #define isr_control(sp)		(mouse_addr(sp) + CONDOR_MOUSE_ISR_CONTROL)
 
-#define mouse_interrupt_pending(sp) 	readl(mouse_addr(sp) + CONDOR_MOUSE_ISR_STATUS)
+#define mouse_interrupt_pending(sp)	readl(mouse_addr(sp) + CONDOR_MOUSE_ISR_STATUS)
 #define clear_mouse_interrupt(sp)	writel(0, mouse_addr(sp) + CONDOR_MOUSE_ISR_STATUS)
 #define enable_mouse_interrupts(sp)	writel(1, mouse_addr(sp) + CONDOR_MOUSE_ISR_CONTROL)
 #define disable_mouse_interrupts(sp)	writel(0, mouse_addr(sp) + CONDOR_MOUSE_ISR_CONTROL)
diff --git a/drivers/misc/ibmasm/uart.c b/drivers/misc/ibmasm/uart.c
index 9783caf..93baa35 100644
--- a/drivers/misc/ibmasm/uart.c
+++ b/drivers/misc/ibmasm/uart.c
@@ -18,7 +18,7 @@
  *
  * Copyright (C) IBM Corporation, 2004
  *
- * Author: Max Asböck <amax@us.ibm.com> 
+ * Author: Max Asböck <amax@us.ibm.com>
  *
  */
 
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index cbd4b6e..93fe2e5 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -414,13 +414,12 @@
 		return ERR_PTR(-ENOSPC);
 	__set_bit(devidx, dev_use);
 
-	md = kmalloc(sizeof(struct mmc_blk_data), GFP_KERNEL);
+	md = kzalloc(sizeof(struct mmc_blk_data), GFP_KERNEL);
 	if (!md) {
 		ret = -ENOMEM;
 		goto out;
 	}
 
-	memset(md, 0, sizeof(struct mmc_blk_data));
 
 	/*
 	 * Set the read-only status based on the supported commands
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c
index 4fb2089..b53dac8 100644
--- a/drivers/mmc/card/queue.c
+++ b/drivers/mmc/card/queue.c
@@ -11,6 +11,7 @@
  */
 #include <linux/module.h>
 #include <linux/blkdev.h>
+#include <linux/freezer.h>
 #include <linux/kthread.h>
 
 #include <linux/mmc/card.h>
@@ -44,11 +45,7 @@
 	struct mmc_queue *mq = d;
 	struct request_queue *q = mq->queue;
 
-	/*
-	 * Set iothread to ensure that we aren't put to sleep by
-	 * the process freezing.  We handle suspension ourselves.
-	 */
-	current->flags |= PF_MEMALLOC|PF_NOFREEZE;
+	current->flags |= PF_MEMALLOC;
 
 	down(&mq->thread_sem);
 	do {
diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c
index 51bc7e2..ef89780 100644
--- a/drivers/mtd/mtd_blkdevs.c
+++ b/drivers/mtd/mtd_blkdevs.c
@@ -16,6 +16,7 @@
 #include <linux/mtd/mtd.h>
 #include <linux/blkdev.h>
 #include <linux/blkpg.h>
+#include <linux/freezer.h>
 #include <linux/spinlock.h>
 #include <linux/hdreg.h>
 #include <linux/init.h>
@@ -80,7 +81,7 @@
 	struct request_queue *rq = tr->blkcore_priv->rq;
 
 	/* we might get involved when memory gets low, so use PF_MEMALLOC */
-	current->flags |= PF_MEMALLOC | PF_NOFREEZE;
+	current->flags |= PF_MEMALLOC;
 
 	spin_lock_irq(rq->queue_lock);
 	while (!kthread_should_stop()) {
diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
index 555d594..1cb22bf 100644
--- a/drivers/mtd/ubi/build.c
+++ b/drivers/mtd/ubi/build.c
@@ -33,6 +33,7 @@
 #include <linux/moduleparam.h>
 #include <linux/stringify.h>
 #include <linux/stat.h>
+#include <linux/log2.h>
 #include "ubi.h"
 
 /* Maximum length of the 'mtd=' parameter */
@@ -369,7 +370,7 @@
 out_wl:
 	ubi_wl_close(ubi);
 out_vtbl:
-	kfree(ubi->vtbl);
+	vfree(ubi->vtbl);
 out_si:
 	ubi_scan_destroy_si(si);
 	return err;
@@ -422,8 +423,7 @@
 	ubi->hdrs_min_io_size = ubi->mtd->writesize >> ubi->mtd->subpage_sft;
 
 	/* Make sure minimal I/O unit is power of 2 */
-	if (ubi->min_io_size == 0 ||
-	    (ubi->min_io_size & (ubi->min_io_size - 1))) {
+	if (!is_power_of_2(ubi->min_io_size)) {
 		ubi_err("bad min. I/O unit");
 		return -EINVAL;
 	}
@@ -593,8 +593,6 @@
 	if (err)
 		goto out_detach;
 
-	ubi_devices_cnt += 1;
-
 	ubi_msg("attached mtd%d to ubi%d", ubi->mtd->index, ubi_devices_cnt);
 	ubi_msg("MTD device name:            \"%s\"", ubi->mtd->name);
 	ubi_msg("MTD device size:            %llu MiB", ubi->flash_size >> 20);
@@ -624,12 +622,13 @@
 		wake_up_process(ubi->bgt_thread);
 	}
 
+	ubi_devices_cnt += 1;
 	return 0;
 
 out_detach:
 	ubi_eba_close(ubi);
 	ubi_wl_close(ubi);
-	kfree(ubi->vtbl);
+	vfree(ubi->vtbl);
 out_free:
 	kfree(ubi);
 out_mtd:
@@ -650,7 +649,7 @@
 	uif_close(ubi);
 	ubi_eba_close(ubi);
 	ubi_wl_close(ubi);
-	kfree(ubi->vtbl);
+	vfree(ubi->vtbl);
 	put_mtd_device(ubi->mtd);
 	kfree(ubi_devices[ubi_num]);
 	ubi_devices[ubi_num] = NULL;
@@ -686,13 +685,6 @@
 		struct mtd_dev_param *p = &mtd_dev_param[i];
 
 		cond_resched();
-
-		if (!p->name) {
-			dbg_err("empty name");
-			err = -EINVAL;
-			goto out_detach;
-		}
-
 		err = attach_mtd_dev(p->name, p->vid_hdr_offs, p->data_offs);
 		if (err)
 			goto out_detach;
@@ -799,7 +791,7 @@
 
 	/* Get rid of the final newline */
 	if (buf[len - 1] == '\n')
-		buf[len - 1] = 0;
+		buf[len - 1] = '\0';
 
 	for (i = 0; i < 3; i++)
 		tokens[i] = strsep(&pbuf, ",");
@@ -809,9 +801,6 @@
 		return -EINVAL;
 	}
 
-	if (tokens[0] == '\0')
-		return -EINVAL;
-
 	p = &mtd_dev_param[mtd_devs];
 	strcpy(&p->name[0], tokens[0]);
 
diff --git a/drivers/mtd/ubi/cdev.c b/drivers/mtd/ubi/cdev.c
index 6612eb7..fe4da1e 100644
--- a/drivers/mtd/ubi/cdev.c
+++ b/drivers/mtd/ubi/cdev.c
@@ -64,6 +64,7 @@
 		if (ubi_devices[i] && ubi_devices[i]->major == major)
 			return ubi_devices[i];
 	BUG();
+	return NULL;
 }
 
 /**
@@ -153,7 +154,7 @@
 		ubi_warn("update of volume %d not finished, volume is damaged",
 			 vol->vol_id);
 		vol->updating = 0;
-		kfree(vol->upd_buf);
+		vfree(vol->upd_buf);
 	}
 
 	ubi_close_volume(desc);
@@ -232,7 +233,7 @@
 	tbuf_size = vol->usable_leb_size;
 	if (count < tbuf_size)
 		tbuf_size = ALIGN(count, ubi->min_io_size);
-	tbuf = kmalloc(tbuf_size, GFP_KERNEL);
+	tbuf = vmalloc(tbuf_size);
 	if (!tbuf)
 		return -ENOMEM;
 
@@ -271,7 +272,7 @@
 		len = count > tbuf_size ? tbuf_size : count;
 	} while (count);
 
-	kfree(tbuf);
+	vfree(tbuf);
 	return err ? err : count_save - count;
 }
 
@@ -320,7 +321,7 @@
 	tbuf_size = vol->usable_leb_size;
 	if (count < tbuf_size)
 		tbuf_size = ALIGN(count, ubi->min_io_size);
-	tbuf = kmalloc(tbuf_size, GFP_KERNEL);
+	tbuf = vmalloc(tbuf_size);
 	if (!tbuf)
 		return -ENOMEM;
 
@@ -355,7 +356,7 @@
 		len = count > tbuf_size ? tbuf_size : count;
 	}
 
-	kfree(tbuf);
+	vfree(tbuf);
 	return err ? err : count_save - count;
 }
 
@@ -397,6 +398,7 @@
 			vol->corrupted = 1;
 		}
 		vol->checked = 1;
+		ubi_gluebi_updated(vol);
 		revoke_exclusive(desc, UBI_READWRITE);
 	}
 
@@ -413,19 +415,7 @@
 	struct ubi_device *ubi = vol->ubi;
 	void __user *argp = (void __user *)arg;
 
-	if (_IOC_NR(cmd) > VOL_CDEV_IOC_MAX_SEQ ||
-	    _IOC_TYPE(cmd) != UBI_VOL_IOC_MAGIC)
-		return -ENOTTY;
-
-	if (_IOC_DIR(cmd) && _IOC_READ)
-		err = !access_ok(VERIFY_WRITE, argp, _IOC_SIZE(cmd));
-	else if (_IOC_DIR(cmd) && _IOC_WRITE)
-		err = !access_ok(VERIFY_READ, argp, _IOC_SIZE(cmd));
-	if (err)
-		return -EFAULT;
-
 	switch (cmd) {
-
 	/* Volume update command */
 	case UBI_IOCVOLUP:
 	{
@@ -471,7 +461,7 @@
 	{
 		int32_t lnum;
 
-		err = __get_user(lnum, (__user int32_t *)argp);
+		err = get_user(lnum, (__user int32_t *)argp);
 		if (err) {
 			err = -EFAULT;
 			break;
@@ -587,17 +577,6 @@
 	struct ubi_volume_desc *desc;
 	void __user *argp = (void __user *)arg;
 
-	if (_IOC_NR(cmd) > UBI_CDEV_IOC_MAX_SEQ ||
-	    _IOC_TYPE(cmd) != UBI_IOC_MAGIC)
-		return -ENOTTY;
-
-	if (_IOC_DIR(cmd) && _IOC_READ)
-		err = !access_ok(VERIFY_WRITE, argp, _IOC_SIZE(cmd));
-	else if (_IOC_DIR(cmd) && _IOC_WRITE)
-		err = !access_ok(VERIFY_READ, argp, _IOC_SIZE(cmd));
-	if (err)
-		return -EFAULT;
-
 	if (!capable(CAP_SYS_RESOURCE))
 		return -EPERM;
 
@@ -612,7 +591,7 @@
 		struct ubi_mkvol_req req;
 
 		dbg_msg("create volume");
-		err = __copy_from_user(&req, argp,
+		err = copy_from_user(&req, argp,
 				       sizeof(struct ubi_mkvol_req));
 		if (err) {
 			err = -EFAULT;
@@ -629,7 +608,7 @@
 		if (err)
 			break;
 
-		err = __put_user(req.vol_id, (__user int32_t *)argp);
+		err = put_user(req.vol_id, (__user int32_t *)argp);
 		if (err)
 			err = -EFAULT;
 
@@ -642,7 +621,7 @@
 		int vol_id;
 
 		dbg_msg("remove volume");
-		err = __get_user(vol_id, (__user int32_t *)argp);
+		err = get_user(vol_id, (__user int32_t *)argp);
 		if (err) {
 			err = -EFAULT;
 			break;
@@ -669,7 +648,7 @@
 		struct ubi_rsvol_req req;
 
 		dbg_msg("re-size volume");
-		err = __copy_from_user(&req, argp,
+		err = copy_from_user(&req, argp,
 				       sizeof(struct ubi_rsvol_req));
 		if (err) {
 			err = -EFAULT;
@@ -707,7 +686,7 @@
 struct file_operations ubi_cdev_operations = {
 	.owner = THIS_MODULE,
 	.ioctl = ubi_cdev_ioctl,
-	.llseek = no_llseek
+	.llseek = no_llseek,
 };
 
 /* UBI volume character device operations */
@@ -718,5 +697,5 @@
 	.llseek  = vol_cdev_llseek,
 	.read    = vol_cdev_read,
 	.write   = vol_cdev_write,
-	.ioctl   = vol_cdev_ioctl
+	.ioctl   = vol_cdev_ioctl,
 };
diff --git a/drivers/mtd/ubi/debug.c b/drivers/mtd/ubi/debug.c
index 8636422..310341e 100644
--- a/drivers/mtd/ubi/debug.c
+++ b/drivers/mtd/ubi/debug.c
@@ -35,12 +35,12 @@
 void ubi_dbg_dump_ec_hdr(const struct ubi_ec_hdr *ec_hdr)
 {
 	dbg_msg("erase counter header dump:");
-	dbg_msg("magic          %#08x", ubi32_to_cpu(ec_hdr->magic));
+	dbg_msg("magic          %#08x", be32_to_cpu(ec_hdr->magic));
 	dbg_msg("version        %d",    (int)ec_hdr->version);
-	dbg_msg("ec             %llu",  (long long)ubi64_to_cpu(ec_hdr->ec));
-	dbg_msg("vid_hdr_offset %d",    ubi32_to_cpu(ec_hdr->vid_hdr_offset));
-	dbg_msg("data_offset    %d",    ubi32_to_cpu(ec_hdr->data_offset));
-	dbg_msg("hdr_crc        %#08x", ubi32_to_cpu(ec_hdr->hdr_crc));
+	dbg_msg("ec             %llu",  (long long)be64_to_cpu(ec_hdr->ec));
+	dbg_msg("vid_hdr_offset %d",    be32_to_cpu(ec_hdr->vid_hdr_offset));
+	dbg_msg("data_offset    %d",    be32_to_cpu(ec_hdr->data_offset));
+	dbg_msg("hdr_crc        %#08x", be32_to_cpu(ec_hdr->hdr_crc));
 	dbg_msg("erase counter header hexdump:");
 	ubi_dbg_hexdump(ec_hdr, UBI_EC_HDR_SIZE);
 }
@@ -52,20 +52,20 @@
 void ubi_dbg_dump_vid_hdr(const struct ubi_vid_hdr *vid_hdr)
 {
 	dbg_msg("volume identifier header dump:");
-	dbg_msg("magic     %08x", ubi32_to_cpu(vid_hdr->magic));
+	dbg_msg("magic     %08x", be32_to_cpu(vid_hdr->magic));
 	dbg_msg("version   %d",   (int)vid_hdr->version);
 	dbg_msg("vol_type  %d",   (int)vid_hdr->vol_type);
 	dbg_msg("copy_flag %d",   (int)vid_hdr->copy_flag);
 	dbg_msg("compat    %d",   (int)vid_hdr->compat);
-	dbg_msg("vol_id    %d",   ubi32_to_cpu(vid_hdr->vol_id));
-	dbg_msg("lnum      %d",   ubi32_to_cpu(vid_hdr->lnum));
-	dbg_msg("leb_ver   %u",   ubi32_to_cpu(vid_hdr->leb_ver));
-	dbg_msg("data_size %d",   ubi32_to_cpu(vid_hdr->data_size));
-	dbg_msg("used_ebs  %d",   ubi32_to_cpu(vid_hdr->used_ebs));
-	dbg_msg("data_pad  %d",   ubi32_to_cpu(vid_hdr->data_pad));
+	dbg_msg("vol_id    %d",   be32_to_cpu(vid_hdr->vol_id));
+	dbg_msg("lnum      %d",   be32_to_cpu(vid_hdr->lnum));
+	dbg_msg("leb_ver   %u",   be32_to_cpu(vid_hdr->leb_ver));
+	dbg_msg("data_size %d",   be32_to_cpu(vid_hdr->data_size));
+	dbg_msg("used_ebs  %d",   be32_to_cpu(vid_hdr->used_ebs));
+	dbg_msg("data_pad  %d",   be32_to_cpu(vid_hdr->data_pad));
 	dbg_msg("sqnum     %llu",
-		(unsigned long long)ubi64_to_cpu(vid_hdr->sqnum));
-	dbg_msg("hdr_crc   %08x", ubi32_to_cpu(vid_hdr->hdr_crc));
+		(unsigned long long)be64_to_cpu(vid_hdr->sqnum));
+	dbg_msg("hdr_crc   %08x", be32_to_cpu(vid_hdr->hdr_crc));
 	dbg_msg("volume identifier header hexdump:");
 }
 
@@ -91,7 +91,7 @@
 
 	if (vol->name_len <= UBI_VOL_NAME_MAX &&
 	    strnlen(vol->name, vol->name_len + 1) == vol->name_len) {
-		dbg_msg("name          %s", vol->name);
+		dbg_msg("name            %s", vol->name);
 	} else {
 		dbg_msg("the 1st 5 characters of the name: %c%c%c%c%c",
 			vol->name[0], vol->name[1], vol->name[2],
@@ -106,30 +106,30 @@
  */
 void ubi_dbg_dump_vtbl_record(const struct ubi_vtbl_record *r, int idx)
 {
-	int name_len = ubi16_to_cpu(r->name_len);
+	int name_len = be16_to_cpu(r->name_len);
 
 	dbg_msg("volume table record %d dump:", idx);
-	dbg_msg("reserved_pebs   %d", ubi32_to_cpu(r->reserved_pebs));
-	dbg_msg("alignment       %d", ubi32_to_cpu(r->alignment));
-	dbg_msg("data_pad        %d", ubi32_to_cpu(r->data_pad));
+	dbg_msg("reserved_pebs   %d", be32_to_cpu(r->reserved_pebs));
+	dbg_msg("alignment       %d", be32_to_cpu(r->alignment));
+	dbg_msg("data_pad        %d", be32_to_cpu(r->data_pad));
 	dbg_msg("vol_type        %d", (int)r->vol_type);
 	dbg_msg("upd_marker      %d", (int)r->upd_marker);
 	dbg_msg("name_len        %d", name_len);
 
 	if (r->name[0] == '\0') {
-		dbg_msg("name          NULL");
+		dbg_msg("name            NULL");
 		return;
 	}
 
 	if (name_len <= UBI_VOL_NAME_MAX &&
 	    strnlen(&r->name[0], name_len + 1) == name_len) {
-		dbg_msg("name          %s", &r->name[0]);
+		dbg_msg("name            %s", &r->name[0]);
 	} else {
 		dbg_msg("1st 5 characters of the name: %c%c%c%c%c",
 			r->name[0], r->name[1], r->name[2], r->name[3],
 			r->name[4]);
 	}
-	dbg_msg("crc             %#08x", ubi32_to_cpu(r->crc));
+	dbg_msg("crc             %#08x", be32_to_cpu(r->crc));
 }
 
 /**
diff --git a/drivers/mtd/ubi/debug.h b/drivers/mtd/ubi/debug.h
index f816ad9..ff8f395 100644
--- a/drivers/mtd/ubi/debug.h
+++ b/drivers/mtd/ubi/debug.h
@@ -52,7 +52,6 @@
 struct ubi_scan_leb;
 struct ubi_mkvol_req;
 
-void ubi_dbg_print(int type, const char *func, const char *fmt, ...);
 void ubi_dbg_dump_ec_hdr(const struct ubi_ec_hdr *ec_hdr);
 void ubi_dbg_dump_vid_hdr(const struct ubi_vid_hdr *vid_hdr);
 void ubi_dbg_dump_vol_info(const struct ubi_volume *vol);
@@ -66,7 +65,6 @@
 
 #define dbg_msg(fmt, ...)    ({})
 #define ubi_dbg_dump_stack() ({})
-#define ubi_dbg_print(func, fmt, ...)    ({})
 #define ubi_dbg_dump_ec_hdr(ec_hdr)      ({})
 #define ubi_dbg_dump_vid_hdr(vid_hdr)    ({})
 #define ubi_dbg_dump_vol_info(vol)       ({})
diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c
index 74002945..8aff938 100644
--- a/drivers/mtd/ubi/eba.c
+++ b/drivers/mtd/ubi/eba.c
@@ -368,7 +368,7 @@
 	int err, pnum, scrub = 0, idx = vol_id2idx(ubi, vol_id);
 	struct ubi_vid_hdr *vid_hdr;
 	struct ubi_volume *vol = ubi->volumes[idx];
-	uint32_t crc, crc1;
+	uint32_t uninitialized_var(crc);
 
 	err = leb_read_lock(ubi, vol_id, lnum);
 	if (err)
@@ -425,10 +425,10 @@
 		} else if (err == UBI_IO_BITFLIPS)
 			scrub = 1;
 
-		ubi_assert(lnum < ubi32_to_cpu(vid_hdr->used_ebs));
-		ubi_assert(len == ubi32_to_cpu(vid_hdr->data_size));
+		ubi_assert(lnum < be32_to_cpu(vid_hdr->used_ebs));
+		ubi_assert(len == be32_to_cpu(vid_hdr->data_size));
 
-		crc = ubi32_to_cpu(vid_hdr->data_crc);
+		crc = be32_to_cpu(vid_hdr->data_crc);
 		ubi_free_vid_hdr(ubi, vid_hdr);
 	}
 
@@ -451,7 +451,7 @@
 	}
 
 	if (check) {
-		crc1 = crc32(UBI_CRC32_INIT, buf, len);
+		uint32_t crc1 = crc32(UBI_CRC32_INIT, buf, len);
 		if (crc1 != crc) {
 			ubi_warn("CRC error: calculated %#08x, must be %#08x",
 				 crc1, crc);
@@ -518,13 +518,13 @@
 		goto out_put;
 	}
 
-	vid_hdr->sqnum = cpu_to_ubi64(next_sqnum(ubi));
+	vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi));
 	err = ubi_io_write_vid_hdr(ubi, new_pnum, vid_hdr);
 	if (err)
 		goto write_error;
 
 	data_size = offset + len;
-	new_buf = kmalloc(data_size, GFP_KERNEL);
+	new_buf = vmalloc(data_size);
 	if (!new_buf) {
 		err = -ENOMEM;
 		goto out_put;
@@ -535,7 +535,7 @@
 	if (offset > 0) {
 		err = ubi_io_read_data(ubi, new_buf, pnum, 0, offset);
 		if (err && err != UBI_IO_BITFLIPS) {
-			kfree(new_buf);
+			vfree(new_buf);
 			goto out_put;
 		}
 	}
@@ -544,11 +544,11 @@
 
 	err = ubi_io_write_data(ubi, new_buf, new_pnum, 0, data_size);
 	if (err) {
-		kfree(new_buf);
+		vfree(new_buf);
 		goto write_error;
 	}
 
-	kfree(new_buf);
+	vfree(new_buf);
 	ubi_free_vid_hdr(ubi, vid_hdr);
 
 	vol->eba_tbl[lnum] = new_pnum;
@@ -634,11 +634,11 @@
 	}
 
 	vid_hdr->vol_type = UBI_VID_DYNAMIC;
-	vid_hdr->sqnum = cpu_to_ubi64(next_sqnum(ubi));
-	vid_hdr->vol_id = cpu_to_ubi32(vol_id);
-	vid_hdr->lnum = cpu_to_ubi32(lnum);
+	vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi));
+	vid_hdr->vol_id = cpu_to_be32(vol_id);
+	vid_hdr->lnum = cpu_to_be32(lnum);
 	vid_hdr->compat = ubi_get_compat(ubi, vol_id);
-	vid_hdr->data_pad = cpu_to_ubi32(vol->data_pad);
+	vid_hdr->data_pad = cpu_to_be32(vol->data_pad);
 
 retry:
 	pnum = ubi_wl_get_peb(ubi, dtype);
@@ -692,7 +692,7 @@
 		return err;
 	}
 
-	vid_hdr->sqnum = cpu_to_ubi64(next_sqnum(ubi));
+	vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi));
 	ubi_msg("try another PEB");
 	goto retry;
 }
@@ -748,17 +748,17 @@
 		return err;
 	}
 
-	vid_hdr->sqnum = cpu_to_ubi64(next_sqnum(ubi));
-	vid_hdr->vol_id = cpu_to_ubi32(vol_id);
-	vid_hdr->lnum = cpu_to_ubi32(lnum);
+	vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi));
+	vid_hdr->vol_id = cpu_to_be32(vol_id);
+	vid_hdr->lnum = cpu_to_be32(lnum);
 	vid_hdr->compat = ubi_get_compat(ubi, vol_id);
-	vid_hdr->data_pad = cpu_to_ubi32(vol->data_pad);
+	vid_hdr->data_pad = cpu_to_be32(vol->data_pad);
 
 	crc = crc32(UBI_CRC32_INIT, buf, data_size);
 	vid_hdr->vol_type = UBI_VID_STATIC;
-	vid_hdr->data_size = cpu_to_ubi32(data_size);
-	vid_hdr->used_ebs = cpu_to_ubi32(used_ebs);
-	vid_hdr->data_crc = cpu_to_ubi32(crc);
+	vid_hdr->data_size = cpu_to_be32(data_size);
+	vid_hdr->used_ebs = cpu_to_be32(used_ebs);
+	vid_hdr->data_crc = cpu_to_be32(crc);
 
 retry:
 	pnum = ubi_wl_get_peb(ubi, dtype);
@@ -813,7 +813,7 @@
 		return err;
 	}
 
-	vid_hdr->sqnum = cpu_to_ubi64(next_sqnum(ubi));
+	vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi));
 	ubi_msg("try another PEB");
 	goto retry;
 }
@@ -854,17 +854,17 @@
 		return err;
 	}
 
-	vid_hdr->sqnum = cpu_to_ubi64(next_sqnum(ubi));
-	vid_hdr->vol_id = cpu_to_ubi32(vol_id);
-	vid_hdr->lnum = cpu_to_ubi32(lnum);
+	vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi));
+	vid_hdr->vol_id = cpu_to_be32(vol_id);
+	vid_hdr->lnum = cpu_to_be32(lnum);
 	vid_hdr->compat = ubi_get_compat(ubi, vol_id);
-	vid_hdr->data_pad = cpu_to_ubi32(vol->data_pad);
+	vid_hdr->data_pad = cpu_to_be32(vol->data_pad);
 
 	crc = crc32(UBI_CRC32_INIT, buf, len);
-	vid_hdr->vol_type = UBI_VID_STATIC;
-	vid_hdr->data_size = cpu_to_ubi32(len);
+	vid_hdr->vol_type = UBI_VID_DYNAMIC;
+	vid_hdr->data_size = cpu_to_be32(len);
 	vid_hdr->copy_flag = 1;
-	vid_hdr->data_crc = cpu_to_ubi32(crc);
+	vid_hdr->data_crc = cpu_to_be32(crc);
 
 retry:
 	pnum = ubi_wl_get_peb(ubi, dtype);
@@ -891,11 +891,13 @@
 		goto write_error;
 	}
 
-	err = ubi_wl_put_peb(ubi, vol->eba_tbl[lnum], 1);
-	if (err) {
-		ubi_free_vid_hdr(ubi, vid_hdr);
-		leb_write_unlock(ubi, vol_id, lnum);
-		return err;
+	if (vol->eba_tbl[lnum] >= 0) {
+		err = ubi_wl_put_peb(ubi, vol->eba_tbl[lnum], 1);
+		if (err) {
+			ubi_free_vid_hdr(ubi, vid_hdr);
+			leb_write_unlock(ubi, vol_id, lnum);
+			return err;
+		}
 	}
 
 	vol->eba_tbl[lnum] = pnum;
@@ -924,7 +926,7 @@
 		return err;
 	}
 
-	vid_hdr->sqnum = cpu_to_ubi64(next_sqnum(ubi));
+	vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi));
 	ubi_msg("try another PEB");
 	goto retry;
 }
@@ -965,19 +967,19 @@
 	uint32_t crc;
 	void *buf, *buf1 = NULL;
 
-	vol_id = ubi32_to_cpu(vid_hdr->vol_id);
-	lnum = ubi32_to_cpu(vid_hdr->lnum);
+	vol_id = be32_to_cpu(vid_hdr->vol_id);
+	lnum = be32_to_cpu(vid_hdr->lnum);
 
 	dbg_eba("copy LEB %d:%d, PEB %d to PEB %d", vol_id, lnum, from, to);
 
 	if (vid_hdr->vol_type == UBI_VID_STATIC) {
-		data_size = ubi32_to_cpu(vid_hdr->data_size);
+		data_size = be32_to_cpu(vid_hdr->data_size);
 		aldata_size = ALIGN(data_size, ubi->min_io_size);
 	} else
 		data_size = aldata_size =
-			    ubi->leb_size - ubi32_to_cpu(vid_hdr->data_pad);
+			    ubi->leb_size - be32_to_cpu(vid_hdr->data_pad);
 
-	buf = kmalloc(aldata_size, GFP_KERNEL);
+	buf = vmalloc(aldata_size);
 	if (!buf)
 		return -ENOMEM;
 
@@ -987,7 +989,7 @@
 	 */
 	err = leb_write_lock(ubi, vol_id, lnum);
 	if (err) {
-		kfree(buf);
+		vfree(buf);
 		return err;
 	}
 
@@ -1054,10 +1056,10 @@
 	 */
 	if (data_size > 0) {
 		vid_hdr->copy_flag = 1;
-		vid_hdr->data_size = cpu_to_ubi32(data_size);
-		vid_hdr->data_crc = cpu_to_ubi32(crc);
+		vid_hdr->data_size = cpu_to_be32(data_size);
+		vid_hdr->data_crc = cpu_to_be32(crc);
 	}
-	vid_hdr->sqnum = cpu_to_ubi64(next_sqnum(ubi));
+	vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi));
 
 	err = ubi_io_write_vid_hdr(ubi, to, vid_hdr);
 	if (err)
@@ -1082,7 +1084,7 @@
 		 * We've written the data and are going to read it back to make
 		 * sure it was written correctly.
 		 */
-		buf1 = kmalloc(aldata_size, GFP_KERNEL);
+		buf1 = vmalloc(aldata_size);
 		if (!buf1) {
 			err = -ENOMEM;
 			goto out_unlock;
@@ -1111,15 +1113,15 @@
 	vol->eba_tbl[lnum] = to;
 
 	leb_write_unlock(ubi, vol_id, lnum);
-	kfree(buf);
-	kfree(buf1);
+	vfree(buf);
+	vfree(buf1);
 
 	return 0;
 
 out_unlock:
 	leb_write_unlock(ubi, vol_id, lnum);
-	kfree(buf);
-	kfree(buf1);
+	vfree(buf);
+	vfree(buf1);
 	return err;
 }
 
diff --git a/drivers/mtd/ubi/gluebi.c b/drivers/mtd/ubi/gluebi.c
index fc9478d..41ff74c 100644
--- a/drivers/mtd/ubi/gluebi.c
+++ b/drivers/mtd/ubi/gluebi.c
@@ -282,7 +282,6 @@
 		mtd->flags = MTD_WRITEABLE;
 	mtd->writesize  = ubi->min_io_size;
 	mtd->owner      = THIS_MODULE;
-	mtd->size       = vol->usable_leb_size * vol->reserved_pebs;
 	mtd->erasesize  = vol->usable_leb_size;
 	mtd->read       = gluebi_read;
 	mtd->write      = gluebi_write;
@@ -290,6 +289,15 @@
 	mtd->get_device = gluebi_get_device;
 	mtd->put_device = gluebi_put_device;
 
+	/*
+	 * In case of dynamic volume, MTD device size is just volume size. In
+	 * case of a static volume the size is equivalent to the amount of data
+	 * bytes, which is zero at this moment and will be changed after volume
+	 * update.
+	 */
+	if (vol->vol_type == UBI_DYNAMIC_VOLUME)
+		mtd->size = vol->usable_leb_size * vol->reserved_pebs;
+
 	if (add_mtd_device(mtd)) {
 		ubi_err("cannot not add MTD device\n");
 		kfree(mtd->name);
@@ -321,3 +329,20 @@
 	kfree(mtd->name);
 	return 0;
 }
+
+/**
+ * ubi_gluebi_updated - UBI volume was updated notifier.
+ * @vol: volume description object
+ *
+ * This function is called every time an UBI volume is updated. This function
+ * does nothing if volume @vol is dynamic, and changes MTD device size if the
+ * volume is static. This is needed because static volumes cannot be read past
+ * data they contain.
+ */
+void ubi_gluebi_updated(struct ubi_volume *vol)
+{
+	struct mtd_info *mtd = &vol->gluebi_mtd;
+
+	if (vol->vol_type == UBI_STATIC_VOLUME)
+		mtd->size = vol->used_bytes;
+}
diff --git a/drivers/mtd/ubi/io.c b/drivers/mtd/ubi/io.c
index 438914d..b0d8f4c 100644
--- a/drivers/mtd/ubi/io.c
+++ b/drivers/mtd/ubi/io.c
@@ -125,9 +125,9 @@
  * o %UBI_IO_BITFLIPS if all the requested data were successfully read, but
  *   correctable bit-flips were detected; this is harmless but may indicate
  *   that this eraseblock may become bad soon (but do not have to);
- * o %-EBADMSG if the MTD subsystem reported about data data integrity
- *   problems, for example it can me an ECC error in case of NAND; this most
- *   probably means that the data is corrupted;
+ * o %-EBADMSG if the MTD subsystem reported about data integrity problems, for
+ *   example it can be an ECC error in case of NAND; this most probably means
+ *   that the data is corrupted;
  * o %-EIO if some I/O error occurred;
  * o other negative error codes in case of other errors.
  */
@@ -298,7 +298,7 @@
 	memset(&ei, 0, sizeof(struct erase_info));
 
 	ei.mtd      = ubi->mtd;
-	ei.addr     = pnum * ubi->peb_size;
+	ei.addr     = (loff_t)pnum * ubi->peb_size;
 	ei.len      = ubi->peb_size;
 	ei.callback = erase_callback;
 	ei.priv     = (unsigned long)&wq;
@@ -382,7 +382,7 @@
 	void *buf;
 	int err, i, patt_count;
 
-	buf = kmalloc(ubi->peb_size, GFP_KERNEL);
+	buf = vmalloc(ubi->peb_size);
 	if (!buf)
 		return -ENOMEM;
 
@@ -437,7 +437,7 @@
 		 * physical eraseblock which means something is wrong with it.
 		 */
 		err = -EIO;
-	kfree(buf);
+	vfree(buf);
 	return err;
 }
 
@@ -557,9 +557,9 @@
 	long long ec;
 	int vid_hdr_offset, leb_start;
 
-	ec = ubi64_to_cpu(ec_hdr->ec);
-	vid_hdr_offset = ubi32_to_cpu(ec_hdr->vid_hdr_offset);
-	leb_start = ubi32_to_cpu(ec_hdr->data_offset);
+	ec = be64_to_cpu(ec_hdr->ec);
+	vid_hdr_offset = be32_to_cpu(ec_hdr->vid_hdr_offset);
+	leb_start = be32_to_cpu(ec_hdr->data_offset);
 
 	if (ec_hdr->version != UBI_VERSION) {
 		ubi_err("node with incompatible UBI version found: "
@@ -640,7 +640,7 @@
 		read_err = err;
 	}
 
-	magic = ubi32_to_cpu(ec_hdr->magic);
+	magic = be32_to_cpu(ec_hdr->magic);
 	if (magic != UBI_EC_HDR_MAGIC) {
 		/*
 		 * The magic field is wrong. Let's check if we have read all
@@ -684,7 +684,7 @@
 	}
 
 	crc = crc32(UBI_CRC32_INIT, ec_hdr, UBI_EC_HDR_SIZE_CRC);
-	hdr_crc = ubi32_to_cpu(ec_hdr->hdr_crc);
+	hdr_crc = be32_to_cpu(ec_hdr->hdr_crc);
 
 	if (hdr_crc != crc) {
 		if (verbose) {
@@ -729,12 +729,12 @@
 	dbg_io("write EC header to PEB %d", pnum);
 	ubi_assert(pnum >= 0 &&  pnum < ubi->peb_count);
 
-	ec_hdr->magic = cpu_to_ubi32(UBI_EC_HDR_MAGIC);
+	ec_hdr->magic = cpu_to_be32(UBI_EC_HDR_MAGIC);
 	ec_hdr->version = UBI_VERSION;
-	ec_hdr->vid_hdr_offset = cpu_to_ubi32(ubi->vid_hdr_offset);
-	ec_hdr->data_offset = cpu_to_ubi32(ubi->leb_start);
+	ec_hdr->vid_hdr_offset = cpu_to_be32(ubi->vid_hdr_offset);
+	ec_hdr->data_offset = cpu_to_be32(ubi->leb_start);
 	crc = crc32(UBI_CRC32_INIT, ec_hdr, UBI_EC_HDR_SIZE_CRC);
-	ec_hdr->hdr_crc = cpu_to_ubi32(crc);
+	ec_hdr->hdr_crc = cpu_to_be32(crc);
 
 	err = paranoid_check_ec_hdr(ubi, pnum, ec_hdr);
 	if (err)
@@ -757,13 +757,13 @@
 {
 	int vol_type = vid_hdr->vol_type;
 	int copy_flag = vid_hdr->copy_flag;
-	int vol_id = ubi32_to_cpu(vid_hdr->vol_id);
-	int lnum = ubi32_to_cpu(vid_hdr->lnum);
+	int vol_id = be32_to_cpu(vid_hdr->vol_id);
+	int lnum = be32_to_cpu(vid_hdr->lnum);
 	int compat = vid_hdr->compat;
-	int data_size = ubi32_to_cpu(vid_hdr->data_size);
-	int used_ebs = ubi32_to_cpu(vid_hdr->used_ebs);
-	int data_pad = ubi32_to_cpu(vid_hdr->data_pad);
-	int data_crc = ubi32_to_cpu(vid_hdr->data_crc);
+	int data_size = be32_to_cpu(vid_hdr->data_size);
+	int used_ebs = be32_to_cpu(vid_hdr->used_ebs);
+	int data_pad = be32_to_cpu(vid_hdr->data_pad);
+	int data_crc = be32_to_cpu(vid_hdr->data_crc);
 	int usable_leb_size = ubi->leb_size - data_pad;
 
 	if (copy_flag != 0 && copy_flag != 1) {
@@ -914,7 +914,7 @@
 		read_err = err;
 	}
 
-	magic = ubi32_to_cpu(vid_hdr->magic);
+	magic = be32_to_cpu(vid_hdr->magic);
 	if (magic != UBI_VID_HDR_MAGIC) {
 		/*
 		 * If we have read all 0xFF bytes, the VID header probably does
@@ -957,7 +957,7 @@
 	}
 
 	crc = crc32(UBI_CRC32_INIT, vid_hdr, UBI_VID_HDR_SIZE_CRC);
-	hdr_crc = ubi32_to_cpu(vid_hdr->hdr_crc);
+	hdr_crc = be32_to_cpu(vid_hdr->hdr_crc);
 
 	if (hdr_crc != crc) {
 		if (verbose) {
@@ -1007,10 +1007,10 @@
 	if (err)
 		return err > 0 ? -EINVAL: err;
 
-	vid_hdr->magic = cpu_to_ubi32(UBI_VID_HDR_MAGIC);
+	vid_hdr->magic = cpu_to_be32(UBI_VID_HDR_MAGIC);
 	vid_hdr->version = UBI_VERSION;
 	crc = crc32(UBI_CRC32_INIT, vid_hdr, UBI_VID_HDR_SIZE_CRC);
-	vid_hdr->hdr_crc = cpu_to_ubi32(crc);
+	vid_hdr->hdr_crc = cpu_to_be32(crc);
 
 	err = paranoid_check_vid_hdr(ubi, pnum, vid_hdr);
 	if (err)
@@ -1060,7 +1060,7 @@
 	int err;
 	uint32_t magic;
 
-	magic = ubi32_to_cpu(ec_hdr->magic);
+	magic = be32_to_cpu(ec_hdr->magic);
 	if (magic != UBI_EC_HDR_MAGIC) {
 		ubi_err("bad magic %#08x, must be %#08x",
 			magic, UBI_EC_HDR_MAGIC);
@@ -1105,7 +1105,7 @@
 		goto exit;
 
 	crc = crc32(UBI_CRC32_INIT, ec_hdr, UBI_EC_HDR_SIZE_CRC);
-	hdr_crc = ubi32_to_cpu(ec_hdr->hdr_crc);
+	hdr_crc = be32_to_cpu(ec_hdr->hdr_crc);
 	if (hdr_crc != crc) {
 		ubi_err("bad CRC, calculated %#08x, read %#08x", crc, hdr_crc);
 		ubi_err("paranoid check failed for PEB %d", pnum);
@@ -1137,7 +1137,7 @@
 	int err;
 	uint32_t magic;
 
-	magic = ubi32_to_cpu(vid_hdr->magic);
+	magic = be32_to_cpu(vid_hdr->magic);
 	if (magic != UBI_VID_HDR_MAGIC) {
 		ubi_err("bad VID header magic %#08x at PEB %d, must be %#08x",
 			magic, pnum, UBI_VID_HDR_MAGIC);
@@ -1187,7 +1187,7 @@
 		goto exit;
 
 	crc = crc32(UBI_CRC32_INIT, vid_hdr, UBI_EC_HDR_SIZE_CRC);
-	hdr_crc = ubi32_to_cpu(vid_hdr->hdr_crc);
+	hdr_crc = be32_to_cpu(vid_hdr->hdr_crc);
 	if (hdr_crc != crc) {
 		ubi_err("bad VID header CRC at PEB %d, calculated %#08x, "
 			"read %#08x", pnum, crc, hdr_crc);
@@ -1224,9 +1224,10 @@
 	void *buf;
 	loff_t addr = (loff_t)pnum * ubi->peb_size + offset;
 
-	buf = kzalloc(len, GFP_KERNEL);
+	buf = vmalloc(len);
 	if (!buf)
 		return -ENOMEM;
+	memset(buf, 0, len);
 
 	err = ubi->mtd->read(ubi->mtd, addr, len, &read, buf);
 	if (err && err != -EUCLEAN) {
@@ -1242,7 +1243,7 @@
 		goto fail;
 	}
 
-	kfree(buf);
+	vfree(buf);
 	return 0;
 
 fail:
@@ -1252,7 +1253,7 @@
 	err = 1;
 error:
 	ubi_dbg_dump_stack();
-	kfree(buf);
+	vfree(buf);
 	return err;
 }
 
diff --git a/drivers/mtd/ubi/kapi.c b/drivers/mtd/ubi/kapi.c
index d352c45..4a458e8 100644
--- a/drivers/mtd/ubi/kapi.c
+++ b/drivers/mtd/ubi/kapi.c
@@ -37,14 +37,9 @@
 {
 	const struct ubi_device *ubi;
 
-	if (!try_module_get(THIS_MODULE))
-		return -ENODEV;
-
 	if (ubi_num < 0 || ubi_num >= UBI_MAX_DEVICES ||
-	    !ubi_devices[ubi_num]) {
-		module_put(THIS_MODULE);
+	    !ubi_devices[ubi_num])
 		return -ENODEV;
-	}
 
 	ubi = ubi_devices[ubi_num];
 	di->ubi_num = ubi->ubi_num;
@@ -52,7 +47,6 @@
 	di->min_io_size = ubi->min_io_size;
 	di->ro_mode = ubi->ro_mode;
 	di->cdev = MKDEV(ubi->major, 0);
-	module_put(THIS_MODULE);
 	return 0;
 }
 EXPORT_SYMBOL_GPL(ubi_get_device_info);
@@ -319,9 +313,14 @@
 	    offset + len > vol->usable_leb_size)
 		return -EINVAL;
 
-	if (vol->vol_type == UBI_STATIC_VOLUME && lnum == vol->used_ebs - 1 &&
-	    offset + len > vol->last_eb_bytes)
-		return -EINVAL;
+	if (vol->vol_type == UBI_STATIC_VOLUME) {
+		if (vol->used_ebs == 0)
+			/* Empty static UBI volume */
+			return 0;
+		if (lnum == vol->used_ebs - 1 &&
+		    offset + len > vol->last_eb_bytes)
+			return -EINVAL;
+	}
 
 	if (vol->upd_marker)
 		return -EBADF;
diff --git a/drivers/mtd/ubi/misc.c b/drivers/mtd/ubi/misc.c
index 38d4e67..9e2338c 100644
--- a/drivers/mtd/ubi/misc.c
+++ b/drivers/mtd/ubi/misc.c
@@ -67,7 +67,7 @@
 	if (vol->vol_type != UBI_STATIC_VOLUME)
 		return 0;
 
-	buf = kmalloc(vol->usable_leb_size, GFP_KERNEL);
+	buf = vmalloc(vol->usable_leb_size);
 	if (!buf)
 		return -ENOMEM;
 
@@ -87,7 +87,7 @@
 		}
 	}
 
-	kfree(buf);
+	vfree(buf);
 	return err;
 }
 
diff --git a/drivers/mtd/ubi/scan.c b/drivers/mtd/ubi/scan.c
index 473f320..94ee549 100644
--- a/drivers/mtd/ubi/scan.c
+++ b/drivers/mtd/ubi/scan.c
@@ -24,7 +24,7 @@
  * This unit is responsible for scanning the flash media, checking UBI
  * headers and providing complete information about the UBI flash image.
  *
- * The scanning information is reoresented by a &struct ubi_scan_info' object.
+ * The scanning information is represented by a &struct ubi_scan_info' object.
  * Information about found volumes is represented by &struct ubi_scan_volume
  * objects which are kept in volume RB-tree with root at the @volumes field.
  * The RB-tree is indexed by the volume ID.
@@ -55,8 +55,19 @@
 static struct ubi_ec_hdr *ech;
 static struct ubi_vid_hdr *vidh;
 
-int ubi_scan_add_to_list(struct ubi_scan_info *si, int pnum, int ec,
-			 struct list_head *list)
+/**
+ * add_to_list - add physical eraseblock to a list.
+ * @si: scanning information
+ * @pnum: physical eraseblock number to add
+ * @ec: erase counter of the physical eraseblock
+ * @list: the list to add to
+ *
+ * This function adds physical eraseblock @pnum to free, erase, corrupted or
+ * alien lists. Returns zero in case of success and a negative error code in
+ * case of failure.
+ */
+static int add_to_list(struct ubi_scan_info *si, int pnum, int ec,
+		       struct list_head *list)
 {
 	struct ubi_scan_leb *seb;
 
@@ -121,9 +132,9 @@
 			    const struct ubi_scan_volume *sv, int pnum)
 {
 	int vol_type = vid_hdr->vol_type;
-	int vol_id = ubi32_to_cpu(vid_hdr->vol_id);
-	int used_ebs = ubi32_to_cpu(vid_hdr->used_ebs);
-	int data_pad = ubi32_to_cpu(vid_hdr->data_pad);
+	int vol_id = be32_to_cpu(vid_hdr->vol_id);
+	int used_ebs = be32_to_cpu(vid_hdr->used_ebs);
+	int data_pad = be32_to_cpu(vid_hdr->data_pad);
 
 	if (sv->leb_count != 0) {
 		int sv_vol_type;
@@ -189,7 +200,7 @@
 	struct ubi_scan_volume *sv;
 	struct rb_node **p = &si->volumes.rb_node, *parent = NULL;
 
-	ubi_assert(vol_id == ubi32_to_cpu(vid_hdr->vol_id));
+	ubi_assert(vol_id == be32_to_cpu(vid_hdr->vol_id));
 
 	/* Walk the volume RB-tree to look if this volume is already present */
 	while (*p) {
@@ -211,11 +222,10 @@
 		return ERR_PTR(-ENOMEM);
 
 	sv->highest_lnum = sv->leb_count = 0;
-	si->max_sqnum = 0;
 	sv->vol_id = vol_id;
 	sv->root = RB_ROOT;
-	sv->used_ebs = ubi32_to_cpu(vid_hdr->used_ebs);
-	sv->data_pad = ubi32_to_cpu(vid_hdr->data_pad);
+	sv->used_ebs = be32_to_cpu(vid_hdr->used_ebs);
+	sv->data_pad = be32_to_cpu(vid_hdr->data_pad);
 	sv->compat = vid_hdr->compat;
 	sv->vol_type = vid_hdr->vol_type == UBI_VID_DYNAMIC ? UBI_DYNAMIC_VOLUME
 							    : UBI_STATIC_VOLUME;
@@ -257,10 +267,10 @@
 	int len, err, second_is_newer, bitflips = 0, corrupted = 0;
 	uint32_t data_crc, crc;
 	struct ubi_vid_hdr *vidh = NULL;
-	unsigned long long sqnum2 = ubi64_to_cpu(vid_hdr->sqnum);
+	unsigned long long sqnum2 = be64_to_cpu(vid_hdr->sqnum);
 
 	if (seb->sqnum == 0 && sqnum2 == 0) {
-		long long abs, v1 = seb->leb_ver, v2 = ubi32_to_cpu(vid_hdr->leb_ver);
+		long long abs, v1 = seb->leb_ver, v2 = be32_to_cpu(vid_hdr->leb_ver);
 
 		/*
 		 * UBI constantly increases the logical eraseblock version
@@ -344,8 +354,8 @@
 
 	/* Read the data of the copy and check the CRC */
 
-	len = ubi32_to_cpu(vid_hdr->data_size);
-	buf = kmalloc(len, GFP_KERNEL);
+	len = be32_to_cpu(vid_hdr->data_size);
+	buf = vmalloc(len);
 	if (!buf) {
 		err = -ENOMEM;
 		goto out_free_vidh;
@@ -355,7 +365,7 @@
 	if (err && err != UBI_IO_BITFLIPS)
 		goto out_free_buf;
 
-	data_crc = ubi32_to_cpu(vid_hdr->data_crc);
+	data_crc = be32_to_cpu(vid_hdr->data_crc);
 	crc = crc32(UBI_CRC32_INIT, buf, len);
 	if (crc != data_crc) {
 		dbg_bld("PEB %d CRC error: calculated %#08x, must be %#08x",
@@ -368,7 +378,7 @@
 		bitflips = !!err;
 	}
 
-	kfree(buf);
+	vfree(buf);
 	ubi_free_vid_hdr(ubi, vidh);
 
 	if (second_is_newer)
@@ -379,7 +389,7 @@
 	return second_is_newer | (bitflips << 1) | (corrupted << 2);
 
 out_free_buf:
-	kfree(buf);
+	vfree(buf);
 out_free_vidh:
 	ubi_free_vid_hdr(ubi, vidh);
 	ubi_assert(err < 0);
@@ -396,8 +406,12 @@
  * @vid_hdr: the volume identifier header
  * @bitflips: if bit-flips were detected when this physical eraseblock was read
  *
- * This function returns zero in case of success and a negative error code in
- * case of failure.
+ * This function adds information about a used physical eraseblock to the
+ * 'used' tree of the corresponding volume. The function is rather complex
+ * because it has to handle cases when this is not the first physical
+ * eraseblock belonging to the same logical eraseblock, and the newer one has
+ * to be picked, while the older one has to be dropped. This function returns
+ * zero in case of success and a negative error code in case of failure.
  */
 int ubi_scan_add_used(const struct ubi_device *ubi, struct ubi_scan_info *si,
 		      int pnum, int ec, const struct ubi_vid_hdr *vid_hdr,
@@ -410,10 +424,10 @@
 	struct ubi_scan_leb *seb;
 	struct rb_node **p, *parent = NULL;
 
-	vol_id = ubi32_to_cpu(vid_hdr->vol_id);
-	lnum = ubi32_to_cpu(vid_hdr->lnum);
-	sqnum = ubi64_to_cpu(vid_hdr->sqnum);
-	leb_ver = ubi32_to_cpu(vid_hdr->leb_ver);
+	vol_id = be32_to_cpu(vid_hdr->vol_id);
+	lnum = be32_to_cpu(vid_hdr->lnum);
+	sqnum = be64_to_cpu(vid_hdr->sqnum);
+	leb_ver = be32_to_cpu(vid_hdr->leb_ver);
 
 	dbg_bld("PEB %d, LEB %d:%d, EC %d, sqnum %llu, ver %u, bitflips %d",
 		pnum, vol_id, lnum, ec, sqnum, leb_ver, bitflips);
@@ -422,6 +436,9 @@
 	if (IS_ERR(sv) < 0)
 		return PTR_ERR(sv);
 
+	if (si->max_sqnum < sqnum)
+		si->max_sqnum = sqnum;
+
 	/*
 	 * Walk the RB-tree of logical eraseblocks of volume @vol_id to look
 	 * if this is the first instance of this logical eraseblock or not.
@@ -492,11 +509,11 @@
 				return err;
 
 			if (cmp_res & 4)
-				err = ubi_scan_add_to_list(si, seb->pnum,
-							   seb->ec, &si->corr);
+				err = add_to_list(si, seb->pnum, seb->ec,
+						  &si->corr);
 			else
-				err = ubi_scan_add_to_list(si, seb->pnum,
-							   seb->ec, &si->erase);
+				err = add_to_list(si, seb->pnum, seb->ec,
+						  &si->erase);
 			if (err)
 				return err;
 
@@ -508,7 +525,7 @@
 
 			if (sv->highest_lnum == lnum)
 				sv->last_data_size =
-					ubi32_to_cpu(vid_hdr->data_size);
+					be32_to_cpu(vid_hdr->data_size);
 
 			return 0;
 		} else {
@@ -517,11 +534,9 @@
 			 * previously.
 			 */
 			if (cmp_res & 4)
-				return ubi_scan_add_to_list(si, pnum, ec,
-							    &si->corr);
+				return add_to_list(si, pnum, ec, &si->corr);
 			else
-				return ubi_scan_add_to_list(si, pnum, ec,
-							    &si->erase);
+				return add_to_list(si, pnum, ec, &si->erase);
 		}
 	}
 
@@ -547,12 +562,9 @@
 
 	if (sv->highest_lnum <= lnum) {
 		sv->highest_lnum = lnum;
-		sv->last_data_size = ubi32_to_cpu(vid_hdr->data_size);
+		sv->last_data_size = be32_to_cpu(vid_hdr->data_size);
 	}
 
-	if (si->max_sqnum < sqnum)
-		si->max_sqnum = sqnum;
-
 	sv->leb_count += 1;
 	rb_link_node(&seb->u.rb, parent, p);
 	rb_insert_color(&seb->u.rb, &sv->root);
@@ -674,7 +686,7 @@
 		return -EINVAL;
 	}
 
-	ec_hdr->ec = cpu_to_ubi64(ec);
+	ec_hdr->ec = cpu_to_be64(ec);
 
 	err = ubi_io_sync_erase(ubi, pnum, 0);
 	if (err < 0)
@@ -754,7 +766,7 @@
  * @si: scanning information
  * @pnum: the physical eraseblock number
  *
- * This function returns a zero if the physical eraseblock was succesfully
+ * This function returns a zero if the physical eraseblock was successfully
  * handled and a negative error code in case of failure.
  */
 static int process_eb(struct ubi_device *ubi, struct ubi_scan_info *si, int pnum)
@@ -783,8 +795,7 @@
 	else if (err == UBI_IO_BITFLIPS)
 		bitflips = 1;
 	else if (err == UBI_IO_PEB_EMPTY)
-		return ubi_scan_add_to_list(si, pnum, UBI_SCAN_UNKNOWN_EC,
-					    &si->erase);
+		return add_to_list(si, pnum, UBI_SCAN_UNKNOWN_EC, &si->erase);
 	else if (err == UBI_IO_BAD_EC_HDR) {
 		/*
 		 * We have to also look at the VID header, possibly it is not
@@ -806,7 +817,7 @@
 			return -EINVAL;
 		}
 
-		ec = ubi64_to_cpu(ech->ec);
+		ec = be64_to_cpu(ech->ec);
 		if (ec > UBI_MAX_ERASECOUNTER) {
 			/*
 			 * Erase counter overflow. The EC headers have 64 bits
@@ -832,28 +843,28 @@
 	else if (err == UBI_IO_BAD_VID_HDR ||
 		 (err == UBI_IO_PEB_FREE && ec_corr)) {
 		/* VID header is corrupted */
-		err = ubi_scan_add_to_list(si, pnum, ec, &si->corr);
+		err = add_to_list(si, pnum, ec, &si->corr);
 		if (err)
 			return err;
 		goto adjust_mean_ec;
 	} else if (err == UBI_IO_PEB_FREE) {
 		/* No VID header - the physical eraseblock is free */
-		err = ubi_scan_add_to_list(si, pnum, ec, &si->free);
+		err = add_to_list(si, pnum, ec, &si->free);
 		if (err)
 			return err;
 		goto adjust_mean_ec;
 	}
 
-	vol_id = ubi32_to_cpu(vidh->vol_id);
+	vol_id = be32_to_cpu(vidh->vol_id);
 	if (vol_id > UBI_MAX_VOLUMES && vol_id != UBI_LAYOUT_VOL_ID) {
-		int lnum = ubi32_to_cpu(vidh->lnum);
+		int lnum = be32_to_cpu(vidh->lnum);
 
 		/* Unsupported internal volume */
 		switch (vidh->compat) {
 		case UBI_COMPAT_DELETE:
 			ubi_msg("\"delete\" compatible internal volume %d:%d"
 				" found, remove it", vol_id, lnum);
-			err = ubi_scan_add_to_list(si, pnum, ec, &si->corr);
+			err = add_to_list(si, pnum, ec, &si->corr);
 			if (err)
 				return err;
 			break;
@@ -868,7 +879,7 @@
 		case UBI_COMPAT_PRESERVE:
 			ubi_msg("\"preserve\" compatible internal volume %d:%d"
 				" found", vol_id, lnum);
-			err = ubi_scan_add_to_list(si, pnum, ec, &si->alien);
+			err = add_to_list(si, pnum, ec, &si->alien);
 			if (err)
 				return err;
 			si->alien_peb_count += 1;
@@ -1109,7 +1120,7 @@
 	uint8_t *buf;
 
 	/*
-	 * At first, check that scanning information is ok.
+	 * At first, check that scanning information is OK.
 	 */
 	ubi_rb_for_each_entry(rb1, sv, &si->volumes, rb) {
 		int leb_count = 0;
@@ -1249,12 +1260,12 @@
 				goto bad_vid_hdr;
 			}
 
-			if (seb->sqnum != ubi64_to_cpu(vidh->sqnum)) {
+			if (seb->sqnum != be64_to_cpu(vidh->sqnum)) {
 				ubi_err("bad sqnum %llu", seb->sqnum);
 				goto bad_vid_hdr;
 			}
 
-			if (sv->vol_id != ubi32_to_cpu(vidh->vol_id)) {
+			if (sv->vol_id != be32_to_cpu(vidh->vol_id)) {
 				ubi_err("bad vol_id %d", sv->vol_id);
 				goto bad_vid_hdr;
 			}
@@ -1264,22 +1275,22 @@
 				goto bad_vid_hdr;
 			}
 
-			if (seb->lnum != ubi32_to_cpu(vidh->lnum)) {
+			if (seb->lnum != be32_to_cpu(vidh->lnum)) {
 				ubi_err("bad lnum %d", seb->lnum);
 				goto bad_vid_hdr;
 			}
 
-			if (sv->used_ebs != ubi32_to_cpu(vidh->used_ebs)) {
+			if (sv->used_ebs != be32_to_cpu(vidh->used_ebs)) {
 				ubi_err("bad used_ebs %d", sv->used_ebs);
 				goto bad_vid_hdr;
 			}
 
-			if (sv->data_pad != ubi32_to_cpu(vidh->data_pad)) {
+			if (sv->data_pad != be32_to_cpu(vidh->data_pad)) {
 				ubi_err("bad data_pad %d", sv->data_pad);
 				goto bad_vid_hdr;
 			}
 
-			if (seb->leb_ver != ubi32_to_cpu(vidh->leb_ver)) {
+			if (seb->leb_ver != be32_to_cpu(vidh->leb_ver)) {
 				ubi_err("bad leb_ver %u", seb->leb_ver);
 				goto bad_vid_hdr;
 			}
@@ -1288,12 +1299,12 @@
 		if (!last_seb)
 			continue;
 
-		if (sv->highest_lnum != ubi32_to_cpu(vidh->lnum)) {
+		if (sv->highest_lnum != be32_to_cpu(vidh->lnum)) {
 			ubi_err("bad highest_lnum %d", sv->highest_lnum);
 			goto bad_vid_hdr;
 		}
 
-		if (sv->last_data_size != ubi32_to_cpu(vidh->data_size)) {
+		if (sv->last_data_size != be32_to_cpu(vidh->data_size)) {
 			ubi_err("bad last_data_size %d", sv->last_data_size);
 			goto bad_vid_hdr;
 		}
@@ -1310,8 +1321,10 @@
 	memset(buf, 1, ubi->peb_count);
 	for (pnum = 0; pnum < ubi->peb_count; pnum++) {
 		err = ubi_io_is_bad(ubi, pnum);
-		if (err < 0)
+		if (err < 0) {
+			kfree(buf);
 			return err;
+		}
 		else if (err)
 			buf[pnum] = 0;
 	}
diff --git a/drivers/mtd/ubi/scan.h b/drivers/mtd/ubi/scan.h
index 3949f61..140e82e 100644
--- a/drivers/mtd/ubi/scan.h
+++ b/drivers/mtd/ubi/scan.h
@@ -147,8 +147,6 @@
 		list_add_tail(&seb->u.list, list);
 }
 
-int ubi_scan_add_to_list(struct ubi_scan_info *si, int pnum, int ec,
-			 struct list_head *list);
 int ubi_scan_add_used(const struct ubi_device *ubi, struct ubi_scan_info *si,
 		      int pnum, int ec, const struct ubi_vid_hdr *vid_hdr,
 		      int bitflips);
diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h
index feb647f..5959f91 100644
--- a/drivers/mtd/ubi/ubi.h
+++ b/drivers/mtd/ubi/ubi.h
@@ -35,6 +35,7 @@
 #include <linux/cdev.h>
 #include <linux/device.h>
 #include <linux/string.h>
+#include <linux/vmalloc.h>
 #include <linux/mtd/mtd.h>
 
 #include <mtd/ubi-header.h>
@@ -374,9 +375,11 @@
 #ifdef CONFIG_MTD_UBI_GLUEBI
 int ubi_create_gluebi(struct ubi_device *ubi, struct ubi_volume *vol);
 int ubi_destroy_gluebi(struct ubi_volume *vol);
+void ubi_gluebi_updated(struct ubi_volume *vol);
 #else
 #define ubi_create_gluebi(ubi, vol) 0
 #define ubi_destroy_gluebi(vol) 0
+#define ubi_gluebi_updated(vol)
 #endif
 
 /* eba.c */
diff --git a/drivers/mtd/ubi/upd.c b/drivers/mtd/ubi/upd.c
index 8925b97..0efc586 100644
--- a/drivers/mtd/ubi/upd.c
+++ b/drivers/mtd/ubi/upd.c
@@ -150,7 +150,7 @@
 			vol->updating = 0;
 	}
 
-	vol->upd_buf = kmalloc(ubi->leb_size, GFP_KERNEL);
+	vol->upd_buf = vmalloc(ubi->leb_size);
 	if (!vol->upd_buf)
 		return -ENOMEM;
 
@@ -339,7 +339,7 @@
 		err = ubi_wl_flush(ubi);
 		if (err == 0) {
 			err = to_write;
-			kfree(vol->upd_buf);
+			vfree(vol->upd_buf);
 			vol->updating = 0;
 		}
 	}
diff --git a/drivers/mtd/ubi/vmt.c b/drivers/mtd/ubi/vmt.c
index 622d0d1..ea0d5c8 100644
--- a/drivers/mtd/ubi/vmt.c
+++ b/drivers/mtd/ubi/vmt.c
@@ -228,7 +228,7 @@
 	for (i = 0; i < ubi->vtbl_slots; i++)
 		if (ubi->volumes[i] &&
 		    ubi->volumes[i]->name_len == req->name_len &&
-		    strcmp(ubi->volumes[i]->name, req->name) == 0) {
+		    !strcmp(ubi->volumes[i]->name, req->name)) {
 			dbg_err("volume \"%s\" exists (ID %d)", req->name, i);
 			goto out_unlock;
 		}
@@ -243,7 +243,6 @@
 	/* Reserve physical eraseblocks */
 	if (vol->reserved_pebs > ubi->avail_pebs) {
 		dbg_err("not enough PEBs, only %d available", ubi->avail_pebs);
-		spin_unlock(&ubi->volumes_lock);
 		err = -ENOSPC;
 		goto out_unlock;
 	}
@@ -281,7 +280,8 @@
 	if (vol->vol_type == UBI_DYNAMIC_VOLUME) {
 		vol->used_ebs = vol->reserved_pebs;
 		vol->last_eb_bytes = vol->usable_leb_size;
-		vol->used_bytes = vol->used_ebs * vol->usable_leb_size;
+		vol->used_bytes =
+			(long long)vol->used_ebs * vol->usable_leb_size;
 	} else {
 		bytes = vol->used_bytes;
 		vol->last_eb_bytes = do_div(bytes, vol->usable_leb_size);
@@ -320,10 +320,10 @@
 
 	/* Fill volume table record */
 	memset(&vtbl_rec, 0, sizeof(struct ubi_vtbl_record));
-	vtbl_rec.reserved_pebs = cpu_to_ubi32(vol->reserved_pebs);
-	vtbl_rec.alignment     = cpu_to_ubi32(vol->alignment);
-	vtbl_rec.data_pad      = cpu_to_ubi32(vol->data_pad);
-	vtbl_rec.name_len      = cpu_to_ubi16(vol->name_len);
+	vtbl_rec.reserved_pebs = cpu_to_be32(vol->reserved_pebs);
+	vtbl_rec.alignment     = cpu_to_be32(vol->alignment);
+	vtbl_rec.data_pad      = cpu_to_be32(vol->data_pad);
+	vtbl_rec.name_len      = cpu_to_be16(vol->name_len);
 	if (vol->vol_type == UBI_DYNAMIC_VOLUME)
 		vtbl_rec.vol_type = UBI_VID_DYNAMIC;
 	else
@@ -352,6 +352,7 @@
 	spin_lock(&ubi->volumes_lock);
 	ubi->rsvd_pebs -= vol->reserved_pebs;
 	ubi->avail_pebs += vol->reserved_pebs;
+	ubi->volumes[vol_id] = NULL;
 out_unlock:
 	spin_unlock(&ubi->volumes_lock);
 	kfree(vol);
@@ -368,6 +369,7 @@
 	spin_lock(&ubi->volumes_lock);
 	ubi->rsvd_pebs -= vol->reserved_pebs;
 	ubi->avail_pebs += vol->reserved_pebs;
+	ubi->volumes[vol_id] = NULL;
 	spin_unlock(&ubi->volumes_lock);
 	volume_sysfs_close(vol);
 	return err;
@@ -503,7 +505,7 @@
 
 	/* Change volume table record */
 	memcpy(&vtbl_rec, &ubi->vtbl[vol_id], sizeof(struct ubi_vtbl_record));
-	vtbl_rec.reserved_pebs = cpu_to_ubi32(reserved_pebs);
+	vtbl_rec.reserved_pebs = cpu_to_be32(reserved_pebs);
 	err = ubi_change_vtbl_record(ubi, vol_id, &vtbl_rec);
 	if (err)
 		goto out_acc;
@@ -537,7 +539,8 @@
 	if (vol->vol_type == UBI_DYNAMIC_VOLUME) {
 		vol->used_ebs = reserved_pebs;
 		vol->last_eb_bytes = vol->usable_leb_size;
-		vol->used_bytes = vol->used_ebs * vol->usable_leb_size;
+		vol->used_bytes =
+			(long long)vol->used_ebs * vol->usable_leb_size;
 	}
 
 	paranoid_check_volumes(ubi);
@@ -643,21 +646,33 @@
  * @ubi: UBI device description object
  * @vol_id: volume ID
  */
-static void paranoid_check_volume(const struct ubi_device *ubi, int vol_id)
+static void paranoid_check_volume(struct ubi_device *ubi, int vol_id)
 {
 	int idx = vol_id2idx(ubi, vol_id);
 	int reserved_pebs, alignment, data_pad, vol_type, name_len, upd_marker;
-	const struct ubi_volume *vol = ubi->volumes[idx];
+	const struct ubi_volume *vol;
 	long long n;
 	const char *name;
 
-	reserved_pebs = ubi32_to_cpu(ubi->vtbl[vol_id].reserved_pebs);
+	spin_lock(&ubi->volumes_lock);
+	reserved_pebs = be32_to_cpu(ubi->vtbl[vol_id].reserved_pebs);
+	vol = ubi->volumes[idx];
 
 	if (!vol) {
 		if (reserved_pebs) {
 			ubi_err("no volume info, but volume exists");
 			goto fail;
 		}
+		spin_unlock(&ubi->volumes_lock);
+		return;
+	}
+
+	if (vol->exclusive) {
+		/*
+		 * The volume may be being created at the moment, do not check
+		 * it (e.g., it may be in the middle of ubi_create_volume().
+		 */
+		spin_unlock(&ubi->volumes_lock);
 		return;
 	}
 
@@ -726,7 +741,7 @@
 		goto fail;
 	}
 
-	n = vol->used_ebs * vol->usable_leb_size;
+	n = (long long)vol->used_ebs * vol->usable_leb_size;
 	if (vol->vol_type == UBI_DYNAMIC_VOLUME) {
 		if (vol->corrupted != 0) {
 			ubi_err("corrupted dynamic volume");
@@ -765,9 +780,9 @@
 		}
 	}
 
-	alignment  = ubi32_to_cpu(ubi->vtbl[vol_id].alignment);
-	data_pad   = ubi32_to_cpu(ubi->vtbl[vol_id].data_pad);
-	name_len   = ubi16_to_cpu(ubi->vtbl[vol_id].name_len);
+	alignment  = be32_to_cpu(ubi->vtbl[vol_id].alignment);
+	data_pad   = be32_to_cpu(ubi->vtbl[vol_id].data_pad);
+	name_len   = be16_to_cpu(ubi->vtbl[vol_id].name_len);
 	upd_marker = ubi->vtbl[vol_id].upd_marker;
 	name       = &ubi->vtbl[vol_id].name[0];
 	if (ubi->vtbl[vol_id].vol_type == UBI_VID_DYNAMIC)
@@ -782,12 +797,14 @@
 		goto fail;
 	}
 
+	spin_unlock(&ubi->volumes_lock);
 	return;
 
 fail:
-	ubi_err("paranoid check failed");
+	ubi_err("paranoid check failed for volume %d", vol_id);
 	ubi_dbg_dump_vol_info(vol);
 	ubi_dbg_dump_vtbl_record(&ubi->vtbl[vol_id], vol_id);
+	spin_unlock(&ubi->volumes_lock);
 	BUG();
 }
 
@@ -800,10 +817,8 @@
 	int i;
 
 	mutex_lock(&ubi->vtbl_mutex);
-	spin_lock(&ubi->volumes_lock);
 	for (i = 0; i < ubi->vtbl_slots; i++)
 		paranoid_check_volume(ubi, i);
-	spin_unlock(&ubi->volumes_lock);
 	mutex_unlock(&ubi->vtbl_mutex);
 }
 #endif
diff --git a/drivers/mtd/ubi/vtbl.c b/drivers/mtd/ubi/vtbl.c
index b6fd6bb..bc5df50 100644
--- a/drivers/mtd/ubi/vtbl.c
+++ b/drivers/mtd/ubi/vtbl.c
@@ -93,12 +93,9 @@
 		vtbl_rec = &empty_vtbl_record;
 	else {
 		crc = crc32(UBI_CRC32_INIT, vtbl_rec, UBI_VTBL_RECORD_SIZE_CRC);
-		vtbl_rec->crc = cpu_to_ubi32(crc);
+		vtbl_rec->crc = cpu_to_be32(crc);
 	}
 
-	dbg_msg("change record %d", idx);
-	ubi_dbg_dump_vtbl_record(vtbl_rec, idx);
-
 	mutex_lock(&ubi->vtbl_mutex);
 	memcpy(&ubi->vtbl[idx], vtbl_rec, sizeof(struct ubi_vtbl_record));
 	for (i = 0; i < UBI_LAYOUT_VOLUME_EBS; i++) {
@@ -141,18 +138,18 @@
 	for (i = 0; i < ubi->vtbl_slots; i++) {
 		cond_resched();
 
-		reserved_pebs = ubi32_to_cpu(vtbl[i].reserved_pebs);
-		alignment = ubi32_to_cpu(vtbl[i].alignment);
-		data_pad = ubi32_to_cpu(vtbl[i].data_pad);
+		reserved_pebs = be32_to_cpu(vtbl[i].reserved_pebs);
+		alignment = be32_to_cpu(vtbl[i].alignment);
+		data_pad = be32_to_cpu(vtbl[i].data_pad);
 		upd_marker = vtbl[i].upd_marker;
 		vol_type = vtbl[i].vol_type;
-		name_len = ubi16_to_cpu(vtbl[i].name_len);
+		name_len = be16_to_cpu(vtbl[i].name_len);
 		name = &vtbl[i].name[0];
 
 		crc = crc32(UBI_CRC32_INIT, &vtbl[i], UBI_VTBL_RECORD_SIZE_CRC);
-		if (ubi32_to_cpu(vtbl[i].crc) != crc) {
+		if (be32_to_cpu(vtbl[i].crc) != crc) {
 			ubi_err("bad CRC at record %u: %#08x, not %#08x",
-				 i, crc, ubi32_to_cpu(vtbl[i].crc));
+				 i, crc, be32_to_cpu(vtbl[i].crc));
 			ubi_dbg_dump_vtbl_record(&vtbl[i], i);
 			return 1;
 		}
@@ -225,8 +222,8 @@
 	/* Checks that all names are unique */
 	for (i = 0; i < ubi->vtbl_slots - 1; i++) {
 		for (n = i + 1; n < ubi->vtbl_slots; n++) {
-			int len1 = ubi16_to_cpu(vtbl[i].name_len);
-			int len2 = ubi16_to_cpu(vtbl[n].name_len);
+			int len1 = be16_to_cpu(vtbl[i].name_len);
+			int len2 = be16_to_cpu(vtbl[n].name_len);
 
 			if (len1 > 0 && len1 == len2 &&
 			    !strncmp(vtbl[i].name, vtbl[n].name, len1)) {
@@ -288,13 +285,13 @@
 	}
 
 	vid_hdr->vol_type = UBI_VID_DYNAMIC;
-	vid_hdr->vol_id = cpu_to_ubi32(UBI_LAYOUT_VOL_ID);
+	vid_hdr->vol_id = cpu_to_be32(UBI_LAYOUT_VOL_ID);
 	vid_hdr->compat = UBI_LAYOUT_VOLUME_COMPAT;
 	vid_hdr->data_size = vid_hdr->used_ebs =
-			     vid_hdr->data_pad = cpu_to_ubi32(0);
-	vid_hdr->lnum = cpu_to_ubi32(copy);
-	vid_hdr->sqnum = cpu_to_ubi64(++si->max_sqnum);
-	vid_hdr->leb_ver = cpu_to_ubi32(old_seb ? old_seb->leb_ver + 1: 0);
+			     vid_hdr->data_pad = cpu_to_be32(0);
+	vid_hdr->lnum = cpu_to_be32(copy);
+	vid_hdr->sqnum = cpu_to_be64(++si->max_sqnum);
+	vid_hdr->leb_ver = cpu_to_be32(old_seb ? old_seb->leb_ver + 1: 0);
 
 	/* The EC header is already there, write the VID header */
 	err = ubi_io_write_vid_hdr(ubi, new_seb->pnum, vid_hdr);
@@ -317,14 +314,15 @@
 	return err;
 
 write_error:
-	kfree(new_seb);
-	/* May be this physical eraseblock went bad, try to pick another one */
-	if (++tries <= 5) {
-		err = ubi_scan_add_to_list(si, new_seb->pnum, new_seb->ec,
-					   &si->corr);
-		if (!err)
-			goto retry;
+	if (err == -EIO && ++tries <= 5) {
+		/*
+		 * Probably this physical eraseblock went bad, try to pick
+		 * another one.
+		 */
+		list_add_tail(&new_seb->u.list, &si->corr);
+		goto retry;
 	}
+	kfree(new_seb);
 out_free:
 	ubi_free_vid_hdr(ubi, vid_hdr);
 	return err;
@@ -380,11 +378,12 @@
 
 	/* Read both LEB 0 and LEB 1 into memory */
 	ubi_rb_for_each_entry(rb, seb, &sv->root, u.rb) {
-		leb[seb->lnum] = kzalloc(ubi->vtbl_size, GFP_KERNEL);
+		leb[seb->lnum] = vmalloc(ubi->vtbl_size);
 		if (!leb[seb->lnum]) {
 			err = -ENOMEM;
 			goto out_free;
 		}
+		memset(leb[seb->lnum], 0, ubi->vtbl_size);
 
 		err = ubi_io_read_data(ubi, leb[seb->lnum], seb->pnum, 0,
 				       ubi->vtbl_size);
@@ -415,7 +414,7 @@
 		}
 
 		/* Both LEB 1 and LEB 2 are OK and consistent */
-		kfree(leb[1]);
+		vfree(leb[1]);
 		return leb[0];
 	} else {
 		/* LEB 0 is corrupted or does not exist */
@@ -436,13 +435,13 @@
 			goto out_free;
 		ubi_msg("volume table was restored");
 
-		kfree(leb[0]);
+		vfree(leb[0]);
 		return leb[1];
 	}
 
 out_free:
-	kfree(leb[0]);
-	kfree(leb[1]);
+	vfree(leb[0]);
+	vfree(leb[1]);
 	return ERR_PTR(err);
 }
 
@@ -460,9 +459,10 @@
 	int i;
 	struct ubi_vtbl_record *vtbl;
 
-	vtbl = kzalloc(ubi->vtbl_size, GFP_KERNEL);
+	vtbl = vmalloc(ubi->vtbl_size);
 	if (!vtbl)
 		return ERR_PTR(-ENOMEM);
+	memset(vtbl, 0, ubi->vtbl_size);
 
 	for (i = 0; i < ubi->vtbl_slots; i++)
 		memcpy(&vtbl[i], &empty_vtbl_record, UBI_VTBL_RECORD_SIZE);
@@ -472,7 +472,7 @@
 
 		err = create_vtbl(ubi, si, i, vtbl);
 		if (err) {
-			kfree(vtbl);
+			vfree(vtbl);
 			return ERR_PTR(err);
 		}
 	}
@@ -500,19 +500,19 @@
 	for (i = 0; i < ubi->vtbl_slots; i++) {
 		cond_resched();
 
-		if (ubi32_to_cpu(vtbl[i].reserved_pebs) == 0)
+		if (be32_to_cpu(vtbl[i].reserved_pebs) == 0)
 			continue; /* Empty record */
 
 		vol = kzalloc(sizeof(struct ubi_volume), GFP_KERNEL);
 		if (!vol)
 			return -ENOMEM;
 
-		vol->reserved_pebs = ubi32_to_cpu(vtbl[i].reserved_pebs);
-		vol->alignment = ubi32_to_cpu(vtbl[i].alignment);
-		vol->data_pad = ubi32_to_cpu(vtbl[i].data_pad);
+		vol->reserved_pebs = be32_to_cpu(vtbl[i].reserved_pebs);
+		vol->alignment = be32_to_cpu(vtbl[i].alignment);
+		vol->data_pad = be32_to_cpu(vtbl[i].data_pad);
 		vol->vol_type = vtbl[i].vol_type == UBI_VID_DYNAMIC ?
 					UBI_DYNAMIC_VOLUME : UBI_STATIC_VOLUME;
-		vol->name_len = ubi16_to_cpu(vtbl[i].name_len);
+		vol->name_len = be16_to_cpu(vtbl[i].name_len);
 		vol->usable_leb_size = ubi->leb_size - vol->data_pad;
 		memcpy(vol->name, vtbl[i].name, vol->name_len);
 		vol->name[vol->name_len] = '\0';
@@ -531,7 +531,8 @@
 		if (vol->vol_type == UBI_DYNAMIC_VOLUME) {
 			vol->used_ebs = vol->reserved_pebs;
 			vol->last_eb_bytes = vol->usable_leb_size;
-			vol->used_bytes = vol->used_ebs * vol->usable_leb_size;
+			vol->used_bytes =
+				(long long)vol->used_ebs * vol->usable_leb_size;
 			continue;
 		}
 
@@ -561,7 +562,8 @@
 		}
 
 		vol->used_ebs = sv->used_ebs;
-		vol->used_bytes = (vol->used_ebs - 1) * vol->usable_leb_size;
+		vol->used_bytes =
+			(long long)(vol->used_ebs - 1) * vol->usable_leb_size;
 		vol->used_bytes += sv->last_data_size;
 		vol->last_eb_bytes = sv->last_data_size;
 	}
@@ -578,7 +580,8 @@
 	vol->usable_leb_size = ubi->leb_size;
 	vol->used_ebs = vol->reserved_pebs;
 	vol->last_eb_bytes = vol->reserved_pebs;
-	vol->used_bytes = vol->used_ebs * (ubi->leb_size - vol->data_pad);
+	vol->used_bytes =
+		(long long)vol->used_ebs * (ubi->leb_size - vol->data_pad);
 	vol->vol_id = UBI_LAYOUT_VOL_ID;
 
 	ubi_assert(!ubi->volumes[i]);
@@ -718,7 +721,7 @@
 	int i, err;
 	struct ubi_scan_volume *sv;
 
-	empty_vtbl_record.crc = cpu_to_ubi32(0xf116c36b);
+	empty_vtbl_record.crc = cpu_to_be32(0xf116c36b);
 
 	/*
 	 * The number of supported volumes is limited by the eraseblock size
@@ -783,7 +786,7 @@
 	return 0;
 
 out_free:
-	kfree(ubi->vtbl);
+	vfree(ubi->vtbl);
 	for (i = 0; i < ubi->vtbl_slots + UBI_INT_VOL_COUNT; i++)
 		if (ubi->volumes[i]) {
 			kfree(ubi->volumes[i]);
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
index 9ecaf77..9de9537 100644
--- a/drivers/mtd/ubi/wl.c
+++ b/drivers/mtd/ubi/wl.c
@@ -667,7 +667,7 @@
 
 	dbg_wl("erased PEB %d, new EC %llu", e->pnum, ec);
 
-	ec_hdr->ec = cpu_to_ubi64(ec);
+	ec_hdr->ec = cpu_to_be64(ec);
 
 	err = ubi_io_write_ec_hdr(ubi, e->pnum, ec_hdr);
 	if (err)
@@ -1060,9 +1060,8 @@
 static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
 			int cancel)
 {
-	int err;
 	struct ubi_wl_entry *e = wl_wrk->e;
-	int pnum = e->pnum;
+	int pnum = e->pnum, err, need;
 
 	if (cancel) {
 		dbg_wl("cancel erasure of PEB %d EC %d", pnum, e->ec);
@@ -1097,62 +1096,70 @@
 	kfree(wl_wrk);
 	kmem_cache_free(wl_entries_slab, e);
 
-	if (err != -EIO) {
+	if (err == -EINTR || err == -ENOMEM || err == -EAGAIN ||
+	    err == -EBUSY) {
+		int err1;
+
+		/* Re-schedule the LEB for erasure */
+		err1 = schedule_erase(ubi, e, 0);
+		if (err1) {
+			err = err1;
+			goto out_ro;
+		}
+		return err;
+	} else if (err != -EIO) {
 		/*
 		 * If this is not %-EIO, we have no idea what to do. Scheduling
 		 * this physical eraseblock for erasure again would cause
 		 * errors again and again. Well, lets switch to RO mode.
 		 */
-		ubi_ro_mode(ubi);
-		return err;
+		goto out_ro;
 	}
 
 	/* It is %-EIO, the PEB went bad */
 
 	if (!ubi->bad_allowed) {
 		ubi_err("bad physical eraseblock %d detected", pnum);
-		ubi_ro_mode(ubi);
-		err = -EIO;
-	} else {
-		int need;
-
-		spin_lock(&ubi->volumes_lock);
-		need = ubi->beb_rsvd_level - ubi->beb_rsvd_pebs + 1;
-		if (need > 0) {
-			need = ubi->avail_pebs >= need ? need : ubi->avail_pebs;
-			ubi->avail_pebs -= need;
-			ubi->rsvd_pebs += need;
-			ubi->beb_rsvd_pebs += need;
-			if (need > 0)
-				ubi_msg("reserve more %d PEBs", need);
-		}
-
-		if (ubi->beb_rsvd_pebs == 0) {
-			spin_unlock(&ubi->volumes_lock);
-			ubi_err("no reserved physical eraseblocks");
-			ubi_ro_mode(ubi);
-			return -EIO;
-		}
-
-		spin_unlock(&ubi->volumes_lock);
-		ubi_msg("mark PEB %d as bad", pnum);
-
-		err = ubi_io_mark_bad(ubi, pnum);
-		if (err) {
-			ubi_ro_mode(ubi);
-			return err;
-		}
-
-		spin_lock(&ubi->volumes_lock);
-		ubi->beb_rsvd_pebs -= 1;
-		ubi->bad_peb_count += 1;
-		ubi->good_peb_count -= 1;
-		ubi_calculate_reserved(ubi);
-		if (ubi->beb_rsvd_pebs == 0)
-			ubi_warn("last PEB from the reserved pool was used");
-		spin_unlock(&ubi->volumes_lock);
+		goto out_ro;
 	}
 
+	spin_lock(&ubi->volumes_lock);
+	need = ubi->beb_rsvd_level - ubi->beb_rsvd_pebs + 1;
+	if (need > 0) {
+		need = ubi->avail_pebs >= need ? need : ubi->avail_pebs;
+		ubi->avail_pebs -= need;
+		ubi->rsvd_pebs += need;
+		ubi->beb_rsvd_pebs += need;
+		if (need > 0)
+			ubi_msg("reserve more %d PEBs", need);
+	}
+
+	if (ubi->beb_rsvd_pebs == 0) {
+		spin_unlock(&ubi->volumes_lock);
+		ubi_err("no reserved physical eraseblocks");
+		goto out_ro;
+	}
+
+	spin_unlock(&ubi->volumes_lock);
+	ubi_msg("mark PEB %d as bad", pnum);
+
+	err = ubi_io_mark_bad(ubi, pnum);
+	if (err)
+		goto out_ro;
+
+	spin_lock(&ubi->volumes_lock);
+	ubi->beb_rsvd_pebs -= 1;
+	ubi->bad_peb_count += 1;
+	ubi->good_peb_count -= 1;
+	ubi_calculate_reserved(ubi);
+	if (ubi->beb_rsvd_pebs == 0)
+		ubi_warn("last PEB from the reserved pool was used");
+	spin_unlock(&ubi->volumes_lock);
+
+	return err;
+
+out_ro:
+	ubi_ro_mode(ubi);
 	return err;
 }
 
@@ -1346,6 +1353,7 @@
 	ubi_msg("background thread \"%s\" started, PID %d",
 		ubi->bgt_name, current->pid);
 
+	set_freezable();
 	for (;;) {
 		int err;
 
@@ -1633,7 +1641,7 @@
 		goto out_free;
 	}
 
-	read_ec = ubi64_to_cpu(ec_hdr->ec);
+	read_ec = be64_to_cpu(ec_hdr->ec);
 	if (ec != read_ec) {
 		ubi_err("paranoid check failed for PEB %d", pnum);
 		ubi_err("read EC is %lld, should be %d", read_ec, ec);
diff --git a/drivers/net/8139cp.c b/drivers/net/8139cp.c
index 807e699..e970e64 100644
--- a/drivers/net/8139cp.c
+++ b/drivers/net/8139cp.c
@@ -26,7 +26,6 @@
 
 	TODO:
 	* Test Tx checksumming thoroughly
-	* Implement dev->tx_timeout
 
 	Low priority TODO:
 	* Complete reset on PciErr
@@ -1218,6 +1217,30 @@
 	return 0;
 }
 
+static void cp_tx_timeout(struct net_device *dev)
+{
+	struct cp_private *cp = netdev_priv(dev);
+	unsigned long flags;
+	int rc;
+
+	printk(KERN_WARNING "%s: Transmit timeout, status %2x %4x %4x %4x\n",
+	       dev->name, cpr8(Cmd), cpr16(CpCmd),
+	       cpr16(IntrStatus), cpr16(IntrMask));
+
+	spin_lock_irqsave(&cp->lock, flags);
+
+	cp_stop_hw(cp);
+	cp_clean_rings(cp);
+	rc = cp_init_rings(cp);
+	cp_start_hw(cp);
+
+	netif_wake_queue(dev);
+
+	spin_unlock_irqrestore(&cp->lock, flags);
+
+	return;
+}
+
 #ifdef BROKEN
 static int cp_change_mtu(struct net_device *dev, int new_mtu)
 {
@@ -1920,10 +1943,8 @@
 	dev->change_mtu = cp_change_mtu;
 #endif
 	dev->ethtool_ops = &cp_ethtool_ops;
-#if 0
 	dev->tx_timeout = cp_tx_timeout;
 	dev->watchdog_timeo = TX_TIMEOUT;
-#endif
 
 #if CP_VLAN_TAG_USED
 	dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 713ab05..3073f67 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -205,7 +205,7 @@
 config MACB
 	tristate "Atmel MACB support"
 	depends on AVR32 || ARCH_AT91SAM9260 || ARCH_AT91SAM9263
-	select MII
+	select PHYLIB
 	help
 	  The Atmel MACB ethernet interface is found on many AT32 and AT91
 	  parts. Say Y to include support for the MACB chip.
@@ -838,6 +838,50 @@
 	  <file:Documentation/networking/net-modules.txt>. The module
 	  will be called smc-ultra32.
 
+config BFIN_MAC
+	tristate "Blackfin 536/537 on-chip mac support"
+	depends on NET_ETHERNET && (BF537 || BF536) && (!BF537_PORT_H)
+	select CRC32
+	select BFIN_MAC_USE_L1 if DMA_UNCACHED_NONE
+	help
+	  This is the driver for blackfin on-chip mac device. Say Y if you want it
+	  compiled into the kernel. This driver is also available as a module
+	  ( = code which can be inserted in and removed from the running kernel
+	  whenever you want). The module will be called bfin_mac.
+
+config BFIN_MAC_USE_L1
+	bool "Use L1 memory for rx/tx packets"
+	depends on BFIN_MAC && BF537
+	default y
+	help
+	  To get maximum network performace, you should use L1 memory as rx/tx buffers.
+	  Say N here if you want to reserve L1 memory for other uses.
+
+config BFIN_TX_DESC_NUM
+	int "Number of transmit buffer packets"
+	depends on BFIN_MAC
+	range 6 10 if BFIN_MAC_USE_L1
+	range 10 100
+	default "10"
+	help
+	  Set the number of buffer packets used in driver.
+
+config BFIN_RX_DESC_NUM
+	int "Number of receive buffer packets"
+	depends on BFIN_MAC
+	range 20 100 if BFIN_MAC_USE_L1
+	range 20 800
+	default "20"
+	help
+	  Set the number of buffer packets used in driver.
+
+config BFIN_MAC_RMII
+	bool "RMII PHY Interface (EXPERIMENTAL)"
+	depends on BFIN_MAC && EXPERIMENTAL
+	default n
+	help
+	  Use Reduced PHY MII Interface
+
 config SMC9194
 	tristate "SMC 9194 support"
 	depends on NET_VENDOR_SMC && (ISA || MAC && BROKEN)
@@ -2486,6 +2530,18 @@
 
 source "drivers/s390/net/Kconfig"
 
+config XEN_NETDEV_FRONTEND
+	tristate "Xen network device frontend driver"
+	depends on XEN
+	default y
+	help
+	  The network device frontend driver allows the kernel to
+	  access network devices exported exported by a virtual
+	  machine containing a physical network device driver. The
+	  frontend driver is intended for unprivileged guest domains;
+	  if you are compiling a kernel for a Xen guest, you almost
+	  certainly want to enable this.
+
 config ISERIES_VETH
 	tristate "iSeries Virtual Ethernet driver support"
 	depends on PPC_ISERIES
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index eb41676..336af06 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -127,6 +127,8 @@
 obj-$(CONFIG_SLIP) += slip.o
 obj-$(CONFIG_SLHC) += slhc.o
 
+obj-$(CONFIG_XEN_NETDEV_FRONTEND) += xen-netfront.o
+
 obj-$(CONFIG_DUMMY) += dummy.o
 obj-$(CONFIG_IFB) += ifb.o
 obj-$(CONFIG_MACVLAN) += macvlan.o
@@ -175,6 +177,7 @@
 obj-$(CONFIG_HPLANCE) += hplance.o 7990.o
 obj-$(CONFIG_MVME147_NET) += mvme147.o 7990.o
 obj-$(CONFIG_EQUALIZER) += eql.o
+obj-$(CONFIG_LGUEST_GUEST) += lguest_net.o
 obj-$(CONFIG_MIPS_JAZZ_SONIC) += jazzsonic.o
 obj-$(CONFIG_MIPS_AU1X00_ENET) += au1000_eth.o
 obj-$(CONFIG_MIPS_SIM_NET) += mipsnet.o
@@ -198,6 +201,7 @@
 obj-$(CONFIG_MYRI10GE) += myri10ge/
 obj-$(CONFIG_SMC91X) += smc91x.o
 obj-$(CONFIG_SMC911X) += smc911x.o
+obj-$(CONFIG_BFIN_MAC) += bfin_mac.o
 obj-$(CONFIG_DM9000) += dm9000.o
 obj-$(CONFIG_FEC_8XX) += fec_8xx/
 obj-$(CONFIG_PASEMI_MAC) += pasemi_mac.o
diff --git a/drivers/net/arm/Kconfig b/drivers/net/arm/Kconfig
index 5bf2d33..f9cc2b6 100644
--- a/drivers/net/arm/Kconfig
+++ b/drivers/net/arm/Kconfig
@@ -43,6 +43,7 @@
 config EP93XX_ETH
 	tristate "EP93xx Ethernet support"
 	depends on ARM && ARCH_EP93XX
+	select MII
 	help
 	  This is a driver for the ethernet hardware included in EP93xx CPUs.
 	  Say Y if you are building a kernel for EP93xx based devices.
diff --git a/drivers/net/arm/ether3.c b/drivers/net/arm/ether3.c
index da71350..a7cac69 100644
--- a/drivers/net/arm/ether3.c
+++ b/drivers/net/arm/ether3.c
@@ -464,7 +464,7 @@
 	if (dev->flags & IFF_PROMISC) {
 		/* promiscuous mode */
 		priv(dev)->regs.config1 |= CFG1_RECVPROMISC;
-	} else if (dev->flags & IFF_ALLMULTI) {
+	} else if (dev->flags & IFF_ALLMULTI || dev->mc_count) {
 		priv(dev)->regs.config1 |= CFG1_RECVSPECBRMULTI;
 	} else
 		priv(dev)->regs.config1 |= CFG1_RECVSPECBROAD;
diff --git a/drivers/net/atl1/atl1.h b/drivers/net/atl1/atl1.h
index df4c1a0..ff4765f6 100644
--- a/drivers/net/atl1/atl1.h
+++ b/drivers/net/atl1/atl1.h
@@ -43,6 +43,7 @@
 struct atl1_adapter;
 
 #define ATL1_MAX_INTR		3
+#define ATL1_MAX_TX_BUF_LEN	0x3000	/* 12288 bytes */
 
 #define ATL1_DEFAULT_TPD	256
 #define ATL1_MAX_TPD		1024
@@ -57,29 +58,45 @@
 #define ATL1_RRD_DESC(R, i)	ATL1_GET_DESC(R, i, struct rx_return_desc)
 
 /*
+ * This detached comment is preserved for documentation purposes only.
+ * It was originally attached to some code that got deleted, but seems
+ * important enough to keep around...
+ *
+ * <begin detached comment>
  * Some workarounds require millisecond delays and are run during interrupt
  * context.  Most notably, when establishing link, the phy may need tweaking
  * but cannot process phy register reads/writes faster than millisecond
  * intervals...and we establish link due to a "link status change" interrupt.
+ * <end detached comment>
  */
 
 /*
- * wrapper around a pointer to a socket buffer,
- * so a DMA handle can be stored along with the buffer
+ * atl1_ring_header represents a single, contiguous block of DMA space
+ * mapped for the three descriptor rings (tpd, rfd, rrd) and the two
+ * message blocks (cmb, smb) described below
+ */
+struct atl1_ring_header {
+	void *desc;		/* virtual address */
+	dma_addr_t dma;		/* physical address*/
+	unsigned int size;	/* length in bytes */
+};
+
+/*
+ * atl1_buffer is wrapper around a pointer to a socket buffer
+ * so a DMA handle can be stored along with the skb
  */
 struct atl1_buffer {
-	struct sk_buff *skb;
-	u16 length;
-	u16 alloced;
+	struct sk_buff *skb;	/* socket buffer */
+	u16 length;		/* rx buffer length */
+	u16 alloced;		/* 1 if skb allocated */
 	dma_addr_t dma;
 };
 
-#define MAX_TX_BUF_LEN		0x3000	/* 12KB */
-
+/* transmit packet descriptor (tpd) ring */
 struct atl1_tpd_ring {
-	void *desc;		/* pointer to the descriptor ring memory */
-	dma_addr_t dma;		/* physical adress of the descriptor ring */
-	u16 size;		/* length of descriptor ring in bytes */
+	void *desc;		/* descriptor ring virtual address */
+	dma_addr_t dma;		/* descriptor ring physical address */
+	u16 size;		/* descriptor ring length in bytes */
 	u16 count;		/* number of descriptors in the ring */
 	u16 hw_idx;		/* hardware index */
 	atomic_t next_to_clean;
@@ -87,36 +104,34 @@
 	struct atl1_buffer *buffer_info;
 };
 
+/* receive free descriptor (rfd) ring */
 struct atl1_rfd_ring {
-	void *desc;
-	dma_addr_t dma;
-	u16 size;
-	u16 count;
+	void *desc;		/* descriptor ring virtual address */
+	dma_addr_t dma;		/* descriptor ring physical address */
+	u16 size;		/* descriptor ring length in bytes */
+	u16 count;		/* number of descriptors in the ring */
 	atomic_t next_to_use;
 	u16 next_to_clean;
 	struct atl1_buffer *buffer_info;
 };
 
+/* receive return descriptor (rrd) ring */
 struct atl1_rrd_ring {
-	void *desc;
-	dma_addr_t dma;
-	unsigned int size;
-	u16 count;
+	void *desc;		/* descriptor ring virtual address */
+	dma_addr_t dma;		/* descriptor ring physical address */
+	unsigned int size;	/* descriptor ring length in bytes */
+	u16 count;		/* number of descriptors in the ring */
 	u16 next_to_use;
 	atomic_t next_to_clean;
 };
 
-struct atl1_ring_header {
-	void *desc;		/* pointer to the descriptor ring memory */
-	dma_addr_t dma;		/* physical adress of the descriptor ring */
-	unsigned int size;	/* length of descriptor ring in bytes */
-};
-
+/* coalescing message block (cmb) */
 struct atl1_cmb {
 	struct coals_msg_block *cmb;
 	dma_addr_t dma;
 };
 
+/* statistics message block (smb) */
 struct atl1_smb {
 	struct stats_msg_block *smb;
 	dma_addr_t dma;
@@ -141,24 +156,26 @@
 	u64 tx_aborted_errors;
 	u64 tx_window_errors;
 	u64 tx_carrier_errors;
-
-	u64 tx_pause;		/* num Pause packet transmitted. */
-	u64 excecol;		/* num tx packets aborted due to excessive collisions. */
-	u64 deffer;		/* num deferred tx packets */
-	u64 scc;		/* num packets subsequently transmitted successfully w/ single prior collision. */
-	u64 mcc;		/* num packets subsequently transmitted successfully w/ multiple prior collisions. */
+	u64 tx_pause;		/* num pause packets transmitted. */
+	u64 excecol;		/* num tx packets w/ excessive collisions. */
+	u64 deffer;		/* num tx packets deferred */
+	u64 scc;		/* num packets subsequently transmitted
+				 * successfully w/ single prior collision. */
+	u64 mcc;		/* num packets subsequently transmitted
+				 * successfully w/ multiple prior collisions. */
 	u64 latecol;		/* num tx packets  w/ late collisions. */
-	u64 tx_underun;		/* num tx packets aborted due to transmit FIFO underrun, or TRD FIFO underrun */
-	u64 tx_trunc;		/* num tx packets truncated due to size exceeding MTU, regardless whether truncated by Selene or not. (The name doesn't really reflect the meaning in this case.) */
+	u64 tx_underun;		/* num tx packets aborted due to transmit
+				 * FIFO underrun, or TRD FIFO underrun */
+	u64 tx_trunc;		/* num tx packets truncated due to size
+				 * exceeding MTU, regardless whether truncated
+				 * by the chip or not. (The name doesn't really
+				 * reflect the meaning in this case.) */
 	u64 rx_pause;		/* num Pause packets received. */
 	u64 rx_rrd_ov;
 	u64 rx_trunc;
 };
 
-/* board specific private data structure */
-#define ATL1_REGS_LEN	8
-
-/* Structure containing variables used by the shared code */
+/* hardware structure */
 struct atl1_hw {
 	u8 __iomem *hw_addr;
 	struct atl1_adapter *back;
@@ -167,24 +184,35 @@
 	enum atl1_dma_req_block dmar_block;
 	enum atl1_dma_req_block dmaw_block;
 	u8 preamble_len;
-	u8 max_retry;		/* Retransmission maximum, after which the packet will be discarded */
-	u8 jam_ipg;		/* IPG to start JAM for collision based flow control in half-duplex mode. In units of 8-bit time */
-	u8 ipgt;		/* Desired back to back inter-packet gap. The default is 96-bit time */
-	u8 min_ifg;		/* Minimum number of IFG to enforce in between RX frames. Frame gap below such IFP is dropped */
+	u8 max_retry;		/* Retransmission maximum, after which the
+				 * packet will be discarded */
+	u8 jam_ipg;		/* IPG to start JAM for collision based flow
+				 * control in half-duplex mode. In units of
+				 * 8-bit time */
+	u8 ipgt;		/* Desired back to back inter-packet gap.
+				 * The default is 96-bit time */
+	u8 min_ifg;		/* Minimum number of IFG to enforce in between
+				 * receive frames. Frame gap below such IFP
+				 * is dropped */
 	u8 ipgr1;		/* 64bit Carrier-Sense window */
 	u8 ipgr2;		/* 96-bit IPG window */
-	u8 tpd_burst;		/* Number of TPD to prefetch in cache-aligned burst. Each TPD is 16 bytes long */
-	u8 rfd_burst;		/* Number of RFD to prefetch in cache-aligned burst. Each RFD is 12 bytes long */
+	u8 tpd_burst;		/* Number of TPD to prefetch in cache-aligned
+				 * burst. Each TPD is 16 bytes long */
+	u8 rfd_burst;		/* Number of RFD to prefetch in cache-aligned
+				 * burst. Each RFD is 12 bytes long */
 	u8 rfd_fetch_gap;
-	u8 rrd_burst;		/* Threshold number of RRDs that can be retired in a burst. Each RRD is 16 bytes long */
+	u8 rrd_burst;		/* Threshold number of RRDs that can be retired
+				 * in a burst. Each RRD is 16 bytes long */
 	u8 tpd_fetch_th;
 	u8 tpd_fetch_gap;
 	u16 tx_jumbo_task_th;
-	u16 txf_burst;		/* Number of data bytes to read in a cache-aligned burst. Each SRAM entry is
-				   8 bytes long */
-	u16 rx_jumbo_th;	/* Jumbo packet size for non-VLAN packet. VLAN packets should add 4 bytes */
+	u16 txf_burst;		/* Number of data bytes to read in a cache-
+				 * aligned burst. Each SRAM entry is 8 bytes */
+	u16 rx_jumbo_th;	/* Jumbo packet size for non-VLAN packet. VLAN
+				 * packets should add 4 bytes */
 	u16 rx_jumbo_lkah;
-	u16 rrd_ret_timer;	/* RRD retirement timer. Decrement by 1 after every 512ns passes. */
+	u16 rrd_ret_timer;	/* RRD retirement timer. Decrement by 1 after
+				 * every 512ns passes. */
 	u16 lcol;		/* Collision Window */
 
 	u16 cmb_tpd;
@@ -194,48 +222,35 @@
 	u32 smb_timer;
 	u16 media_type;
 	u16 autoneg_advertised;
-	u16 pci_cmd_word;
 
 	u16 mii_autoneg_adv_reg;
 	u16 mii_1000t_ctrl_reg;
 
-	u32 mem_rang;
-	u32 txcw;
 	u32 max_frame_size;
 	u32 min_frame_size;
-	u32 mc_filter_type;
-	u32 num_mc_addrs;
-	u32 collision_delta;
-	u32 tx_packet_delta;
-	u16 phy_spd_default;
 
 	u16 dev_rev;
 
 	/* spi flash */
 	u8 flash_vendor;
 
-	u8 dma_fairness;
 	u8 mac_addr[ETH_ALEN];
 	u8 perm_mac_addr[ETH_ALEN];
 
-	/* bool phy_preamble_sup; */
 	bool phy_configured;
 };
 
 struct atl1_adapter {
-	/* OS defined structs */
 	struct net_device *netdev;
 	struct pci_dev *pdev;
 	struct net_device_stats net_stats;
 	struct atl1_sft_stats soft_stats;
-
 	struct vlan_group *vlgrp;
 	u32 rx_buffer_len;
 	u32 wol;
 	u16 link_speed;
 	u16 link_duplex;
 	spinlock_t lock;
-	atomic_t irq_sem;
 	struct work_struct tx_timeout_task;
 	struct work_struct link_chg_task;
 	struct work_struct pcie_dma_to_rst_task;
@@ -243,9 +258,7 @@
 	struct timer_list phy_config_timer;
 	bool phy_timer_pending;
 
-	bool mac_disabled;
-
-	/* All descriptor rings' memory */
+	/* all descriptor rings' memory */
 	struct atl1_ring_header ring_header;
 
 	/* TX */
@@ -258,25 +271,16 @@
 	u64 hw_csum_err;
 	u64 hw_csum_good;
 
-	u32 gorcl;
-	u64 gorcl_old;
-
-	/* Interrupt Moderator timer ( 2us resolution) */
-	u16 imt;
-	/* Interrupt Clear timer (2us resolution) */
-	u16 ict;
-
-	/* MII interface info */
-	struct mii_if_info mii;
+	u16 imt;	/* interrupt moderator timer (2us resolution */
+	u16 ict;	/* interrupt clear timer (2us resolution */
+	struct mii_if_info mii;		/* MII interface info */
 
 	/* structs defined in atl1_hw.h */
-	u32 bd_number;		/* board number */
+	u32 bd_number;			/* board number */
 	bool pci_using_64;
 	struct atl1_hw hw;
 	struct atl1_smb smb;
 	struct atl1_cmb cmb;
-
-	u32 pci_state[16];
 };
 
 #endif	/* _ATL1_H_ */
diff --git a/drivers/net/atl1/atl1_main.c b/drivers/net/atl1/atl1_main.c
index 501919e..fd1e156 100644
--- a/drivers/net/atl1/atl1_main.c
+++ b/drivers/net/atl1/atl1_main.c
@@ -38,7 +38,7 @@
  * TODO:
  * Fix TSO; tx performance is horrible with TSO enabled.
  * Wake on LAN.
- * Add more ethtool functions, including set ring parameters.
+ * Add more ethtool functions.
  * Fix abstruse irq enable/disable condition described here:
  *	http://marc.theaimsgroup.com/?l=linux-netdev&m=116398508500553&w=2
  *
@@ -75,6 +75,7 @@
 #include <linux/compiler.h>
 #include <linux/delay.h>
 #include <linux/mii.h>
+#include <linux/interrupt.h>
 #include <net/checksum.h>
 
 #include <asm/atomic.h>
@@ -158,13 +159,70 @@
 	hw->cmb_tx_timer = 1;	/* about 2us */
 	hw->smb_timer = 100000;	/* about 200ms */
 
-	atomic_set(&adapter->irq_sem, 0);
 	spin_lock_init(&adapter->lock);
 	spin_lock_init(&adapter->mb_lock);
 
 	return 0;
 }
 
+static int mdio_read(struct net_device *netdev, int phy_id, int reg_num)
+{
+	struct atl1_adapter *adapter = netdev_priv(netdev);
+	u16 result;
+
+	atl1_read_phy_reg(&adapter->hw, reg_num & 0x1f, &result);
+
+	return result;
+}
+
+static void mdio_write(struct net_device *netdev, int phy_id, int reg_num,
+	int val)
+{
+	struct atl1_adapter *adapter = netdev_priv(netdev);
+
+	atl1_write_phy_reg(&adapter->hw, reg_num, val);
+}
+
+/*
+ * atl1_mii_ioctl -
+ * @netdev:
+ * @ifreq:
+ * @cmd:
+ */
+static int atl1_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
+{
+	struct atl1_adapter *adapter = netdev_priv(netdev);
+	unsigned long flags;
+	int retval;
+
+	if (!netif_running(netdev))
+		return -EINVAL;
+
+	spin_lock_irqsave(&adapter->lock, flags);
+	retval = generic_mii_ioctl(&adapter->mii, if_mii(ifr), cmd, NULL);
+	spin_unlock_irqrestore(&adapter->lock, flags);
+
+	return retval;
+}
+
+/*
+ * atl1_ioctl -
+ * @netdev:
+ * @ifreq:
+ * @cmd:
+ */
+static int atl1_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
+{
+	switch (cmd) {
+	case SIOCGMIIPHY:
+	case SIOCGMIIREG:
+	case SIOCSMIIREG:
+		return atl1_mii_ioctl(netdev, ifr, cmd);
+	default:
+		return -EOPNOTSUPP;
+	}
+}
+
 /*
  * atl1_setup_mem_resources - allocate Tx / RX descriptor resources
  * @adapter: board private structure
@@ -188,19 +246,22 @@
 		goto err_nomem;
 	}
 	rfd_ring->buffer_info =
-	    (struct atl1_buffer *)(tpd_ring->buffer_info + tpd_ring->count);
+		(struct atl1_buffer *)(tpd_ring->buffer_info + tpd_ring->count);
 
-	/* real ring DMA buffer */
-	ring_header->size = size = sizeof(struct tx_packet_desc) *
-					tpd_ring->count
-	    + sizeof(struct rx_free_desc) * rfd_ring->count
-	    + sizeof(struct rx_return_desc) * rrd_ring->count
-	    + sizeof(struct coals_msg_block)
-	    + sizeof(struct stats_msg_block)
-	    + 40;		/* "40: for 8 bytes align" huh? -- CHS */
+	/* real ring DMA buffer
+	 * each ring/block may need up to 8 bytes for alignment, hence the
+	 * additional 40 bytes tacked onto the end.
+	 */
+	ring_header->size = size =
+		sizeof(struct tx_packet_desc) * tpd_ring->count
+		+ sizeof(struct rx_free_desc) * rfd_ring->count
+		+ sizeof(struct rx_return_desc) * rrd_ring->count
+		+ sizeof(struct coals_msg_block)
+		+ sizeof(struct stats_msg_block)
+		+ 40;
 
 	ring_header->desc = pci_alloc_consistent(pdev, ring_header->size,
-						&ring_header->dma);
+		&ring_header->dma);
 	if (unlikely(!ring_header->desc)) {
 		dev_err(&pdev->dev, "pci_alloc_consistent failed\n");
 		goto err_nomem;
@@ -214,8 +275,6 @@
 	tpd_ring->dma += offset;
 	tpd_ring->desc = (u8 *) ring_header->desc + offset;
 	tpd_ring->size = sizeof(struct tx_packet_desc) * tpd_ring->count;
-	atomic_set(&tpd_ring->next_to_use, 0);
-	atomic_set(&tpd_ring->next_to_clean, 0);
 
 	/* init RFD ring */
 	rfd_ring->dma = tpd_ring->dma + tpd_ring->size;
@@ -223,9 +282,7 @@
 	rfd_ring->dma += offset;
 	rfd_ring->desc = (u8 *) tpd_ring->desc + (tpd_ring->size + offset);
 	rfd_ring->size = sizeof(struct rx_free_desc) * rfd_ring->count;
-	rfd_ring->next_to_clean = 0;
-	/* rfd_ring->next_to_use = rfd_ring->count - 1; */
-	atomic_set(&rfd_ring->next_to_use, 0);
+
 
 	/* init RRD ring */
 	rrd_ring->dma = rfd_ring->dma + rfd_ring->size;
@@ -233,23 +290,22 @@
 	rrd_ring->dma += offset;
 	rrd_ring->desc = (u8 *) rfd_ring->desc + (rfd_ring->size + offset);
 	rrd_ring->size = sizeof(struct rx_return_desc) * rrd_ring->count;
-	rrd_ring->next_to_use = 0;
-	atomic_set(&rrd_ring->next_to_clean, 0);
+
 
 	/* init CMB */
 	adapter->cmb.dma = rrd_ring->dma + rrd_ring->size;
 	offset = (adapter->cmb.dma & 0x7) ? (8 - (adapter->cmb.dma & 0x7)) : 0;
 	adapter->cmb.dma += offset;
-	adapter->cmb.cmb =
-	    (struct coals_msg_block *) ((u8 *) rrd_ring->desc +
-				   (rrd_ring->size + offset));
+	adapter->cmb.cmb = (struct coals_msg_block *)
+		((u8 *) rrd_ring->desc + (rrd_ring->size + offset));
 
 	/* init SMB */
 	adapter->smb.dma = adapter->cmb.dma + sizeof(struct coals_msg_block);
 	offset = (adapter->smb.dma & 0x7) ? (8 - (adapter->smb.dma & 0x7)) : 0;
 	adapter->smb.dma += offset;
 	adapter->smb.smb = (struct stats_msg_block *)
-	    ((u8 *) adapter->cmb.cmb + (sizeof(struct coals_msg_block) + offset));
+		((u8 *) adapter->cmb.cmb +
+		(sizeof(struct coals_msg_block) + offset));
 
 	return ATL1_SUCCESS;
 
@@ -258,14 +314,682 @@
 	return -ENOMEM;
 }
 
+void atl1_init_ring_ptrs(struct atl1_adapter *adapter)
+{
+	struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring;
+	struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring;
+	struct atl1_rrd_ring *rrd_ring = &adapter->rrd_ring;
+
+	atomic_set(&tpd_ring->next_to_use, 0);
+	atomic_set(&tpd_ring->next_to_clean, 0);
+
+	rfd_ring->next_to_clean = 0;
+	atomic_set(&rfd_ring->next_to_use, 0);
+
+	rrd_ring->next_to_use = 0;
+	atomic_set(&rrd_ring->next_to_clean, 0);
+}
+
+/*
+ * atl1_clean_rx_ring - Free RFD Buffers
+ * @adapter: board private structure
+ */
+static void atl1_clean_rx_ring(struct atl1_adapter *adapter)
+{
+	struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring;
+	struct atl1_rrd_ring *rrd_ring = &adapter->rrd_ring;
+	struct atl1_buffer *buffer_info;
+	struct pci_dev *pdev = adapter->pdev;
+	unsigned long size;
+	unsigned int i;
+
+	/* Free all the Rx ring sk_buffs */
+	for (i = 0; i < rfd_ring->count; i++) {
+		buffer_info = &rfd_ring->buffer_info[i];
+		if (buffer_info->dma) {
+			pci_unmap_page(pdev, buffer_info->dma,
+				buffer_info->length, PCI_DMA_FROMDEVICE);
+			buffer_info->dma = 0;
+		}
+		if (buffer_info->skb) {
+			dev_kfree_skb(buffer_info->skb);
+			buffer_info->skb = NULL;
+		}
+	}
+
+	size = sizeof(struct atl1_buffer) * rfd_ring->count;
+	memset(rfd_ring->buffer_info, 0, size);
+
+	/* Zero out the descriptor ring */
+	memset(rfd_ring->desc, 0, rfd_ring->size);
+
+	rfd_ring->next_to_clean = 0;
+	atomic_set(&rfd_ring->next_to_use, 0);
+
+	rrd_ring->next_to_use = 0;
+	atomic_set(&rrd_ring->next_to_clean, 0);
+}
+
+/*
+ * atl1_clean_tx_ring - Free Tx Buffers
+ * @adapter: board private structure
+ */
+static void atl1_clean_tx_ring(struct atl1_adapter *adapter)
+{
+	struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring;
+	struct atl1_buffer *buffer_info;
+	struct pci_dev *pdev = adapter->pdev;
+	unsigned long size;
+	unsigned int i;
+
+	/* Free all the Tx ring sk_buffs */
+	for (i = 0; i < tpd_ring->count; i++) {
+		buffer_info = &tpd_ring->buffer_info[i];
+		if (buffer_info->dma) {
+			pci_unmap_page(pdev, buffer_info->dma,
+				buffer_info->length, PCI_DMA_TODEVICE);
+			buffer_info->dma = 0;
+		}
+	}
+
+	for (i = 0; i < tpd_ring->count; i++) {
+		buffer_info = &tpd_ring->buffer_info[i];
+		if (buffer_info->skb) {
+			dev_kfree_skb_any(buffer_info->skb);
+			buffer_info->skb = NULL;
+		}
+	}
+
+	size = sizeof(struct atl1_buffer) * tpd_ring->count;
+	memset(tpd_ring->buffer_info, 0, size);
+
+	/* Zero out the descriptor ring */
+	memset(tpd_ring->desc, 0, tpd_ring->size);
+
+	atomic_set(&tpd_ring->next_to_use, 0);
+	atomic_set(&tpd_ring->next_to_clean, 0);
+}
+
+/*
+ * atl1_free_ring_resources - Free Tx / RX descriptor Resources
+ * @adapter: board private structure
+ *
+ * Free all transmit software resources
+ */
+void atl1_free_ring_resources(struct atl1_adapter *adapter)
+{
+	struct pci_dev *pdev = adapter->pdev;
+	struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring;
+	struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring;
+	struct atl1_rrd_ring *rrd_ring = &adapter->rrd_ring;
+	struct atl1_ring_header *ring_header = &adapter->ring_header;
+
+	atl1_clean_tx_ring(adapter);
+	atl1_clean_rx_ring(adapter);
+
+	kfree(tpd_ring->buffer_info);
+	pci_free_consistent(pdev, ring_header->size, ring_header->desc,
+		ring_header->dma);
+
+	tpd_ring->buffer_info = NULL;
+	tpd_ring->desc = NULL;
+	tpd_ring->dma = 0;
+
+	rfd_ring->buffer_info = NULL;
+	rfd_ring->desc = NULL;
+	rfd_ring->dma = 0;
+
+	rrd_ring->desc = NULL;
+	rrd_ring->dma = 0;
+}
+
+static void atl1_setup_mac_ctrl(struct atl1_adapter *adapter)
+{
+	u32 value;
+	struct atl1_hw *hw = &adapter->hw;
+	struct net_device *netdev = adapter->netdev;
+	/* Config MAC CTRL Register */
+	value = MAC_CTRL_TX_EN | MAC_CTRL_RX_EN;
+	/* duplex */
+	if (FULL_DUPLEX == adapter->link_duplex)
+		value |= MAC_CTRL_DUPLX;
+	/* speed */
+	value |= ((u32) ((SPEED_1000 == adapter->link_speed) ?
+			 MAC_CTRL_SPEED_1000 : MAC_CTRL_SPEED_10_100) <<
+		  MAC_CTRL_SPEED_SHIFT);
+	/* flow control */
+	value |= (MAC_CTRL_TX_FLOW | MAC_CTRL_RX_FLOW);
+	/* PAD & CRC */
+	value |= (MAC_CTRL_ADD_CRC | MAC_CTRL_PAD);
+	/* preamble length */
+	value |= (((u32) adapter->hw.preamble_len
+		   & MAC_CTRL_PRMLEN_MASK) << MAC_CTRL_PRMLEN_SHIFT);
+	/* vlan */
+	if (adapter->vlgrp)
+		value |= MAC_CTRL_RMV_VLAN;
+	/* rx checksum
+	   if (adapter->rx_csum)
+	   value |= MAC_CTRL_RX_CHKSUM_EN;
+	 */
+	/* filter mode */
+	value |= MAC_CTRL_BC_EN;
+	if (netdev->flags & IFF_PROMISC)
+		value |= MAC_CTRL_PROMIS_EN;
+	else if (netdev->flags & IFF_ALLMULTI)
+		value |= MAC_CTRL_MC_ALL_EN;
+	/* value |= MAC_CTRL_LOOPBACK; */
+	iowrite32(value, hw->hw_addr + REG_MAC_CTRL);
+}
+
+/*
+ * atl1_set_mac - Change the Ethernet Address of the NIC
+ * @netdev: network interface device structure
+ * @p: pointer to an address structure
+ *
+ * Returns 0 on success, negative on failure
+ */
+static int atl1_set_mac(struct net_device *netdev, void *p)
+{
+	struct atl1_adapter *adapter = netdev_priv(netdev);
+	struct sockaddr *addr = p;
+
+	if (netif_running(netdev))
+		return -EBUSY;
+
+	if (!is_valid_ether_addr(addr->sa_data))
+		return -EADDRNOTAVAIL;
+
+	memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+	memcpy(adapter->hw.mac_addr, addr->sa_data, netdev->addr_len);
+
+	atl1_set_mac_addr(&adapter->hw);
+	return 0;
+}
+
+static u32 atl1_check_link(struct atl1_adapter *adapter)
+{
+	struct atl1_hw *hw = &adapter->hw;
+	struct net_device *netdev = adapter->netdev;
+	u32 ret_val;
+	u16 speed, duplex, phy_data;
+	int reconfig = 0;
+
+	/* MII_BMSR must read twice */
+	atl1_read_phy_reg(hw, MII_BMSR, &phy_data);
+	atl1_read_phy_reg(hw, MII_BMSR, &phy_data);
+	if (!(phy_data & BMSR_LSTATUS)) {	/* link down */
+		if (netif_carrier_ok(netdev)) {	/* old link state: Up */
+			dev_info(&adapter->pdev->dev, "link is down\n");
+			adapter->link_speed = SPEED_0;
+			netif_carrier_off(netdev);
+			netif_stop_queue(netdev);
+		}
+		return ATL1_SUCCESS;
+	}
+
+	/* Link Up */
+	ret_val = atl1_get_speed_and_duplex(hw, &speed, &duplex);
+	if (ret_val)
+		return ret_val;
+
+	switch (hw->media_type) {
+	case MEDIA_TYPE_1000M_FULL:
+		if (speed != SPEED_1000 || duplex != FULL_DUPLEX)
+			reconfig = 1;
+		break;
+	case MEDIA_TYPE_100M_FULL:
+		if (speed != SPEED_100 || duplex != FULL_DUPLEX)
+			reconfig = 1;
+		break;
+	case MEDIA_TYPE_100M_HALF:
+		if (speed != SPEED_100 || duplex != HALF_DUPLEX)
+			reconfig = 1;
+		break;
+	case MEDIA_TYPE_10M_FULL:
+		if (speed != SPEED_10 || duplex != FULL_DUPLEX)
+			reconfig = 1;
+		break;
+	case MEDIA_TYPE_10M_HALF:
+		if (speed != SPEED_10 || duplex != HALF_DUPLEX)
+			reconfig = 1;
+		break;
+	}
+
+	/* link result is our setting */
+	if (!reconfig) {
+		if (adapter->link_speed != speed
+		    || adapter->link_duplex != duplex) {
+			adapter->link_speed = speed;
+			adapter->link_duplex = duplex;
+			atl1_setup_mac_ctrl(adapter);
+			dev_info(&adapter->pdev->dev,
+				"%s link is up %d Mbps %s\n",
+				netdev->name, adapter->link_speed,
+				adapter->link_duplex == FULL_DUPLEX ?
+				"full duplex" : "half duplex");
+		}
+		if (!netif_carrier_ok(netdev)) {	/* Link down -> Up */
+			netif_carrier_on(netdev);
+			netif_wake_queue(netdev);
+		}
+		return ATL1_SUCCESS;
+	}
+
+	/* change orignal link status */
+	if (netif_carrier_ok(netdev)) {
+		adapter->link_speed = SPEED_0;
+		netif_carrier_off(netdev);
+		netif_stop_queue(netdev);
+	}
+
+	if (hw->media_type != MEDIA_TYPE_AUTO_SENSOR &&
+	    hw->media_type != MEDIA_TYPE_1000M_FULL) {
+		switch (hw->media_type) {
+		case MEDIA_TYPE_100M_FULL:
+			phy_data = MII_CR_FULL_DUPLEX | MII_CR_SPEED_100 |
+			           MII_CR_RESET;
+			break;
+		case MEDIA_TYPE_100M_HALF:
+			phy_data = MII_CR_SPEED_100 | MII_CR_RESET;
+			break;
+		case MEDIA_TYPE_10M_FULL:
+			phy_data =
+			    MII_CR_FULL_DUPLEX | MII_CR_SPEED_10 | MII_CR_RESET;
+			break;
+		default:	/* MEDIA_TYPE_10M_HALF: */
+			phy_data = MII_CR_SPEED_10 | MII_CR_RESET;
+			break;
+		}
+		atl1_write_phy_reg(hw, MII_BMCR, phy_data);
+		return ATL1_SUCCESS;
+	}
+
+	/* auto-neg, insert timer to re-config phy */
+	if (!adapter->phy_timer_pending) {
+		adapter->phy_timer_pending = true;
+		mod_timer(&adapter->phy_config_timer, jiffies + 3 * HZ);
+	}
+
+	return ATL1_SUCCESS;
+}
+
+static void atl1_check_for_link(struct atl1_adapter *adapter)
+{
+	struct net_device *netdev = adapter->netdev;
+	u16 phy_data = 0;
+
+	spin_lock(&adapter->lock);
+	adapter->phy_timer_pending = false;
+	atl1_read_phy_reg(&adapter->hw, MII_BMSR, &phy_data);
+	atl1_read_phy_reg(&adapter->hw, MII_BMSR, &phy_data);
+	spin_unlock(&adapter->lock);
+
+	/* notify upper layer link down ASAP */
+	if (!(phy_data & BMSR_LSTATUS)) {	/* Link Down */
+		if (netif_carrier_ok(netdev)) {	/* old link state: Up */
+			dev_info(&adapter->pdev->dev, "%s link is down\n",
+				netdev->name);
+			adapter->link_speed = SPEED_0;
+			netif_carrier_off(netdev);
+			netif_stop_queue(netdev);
+		}
+	}
+	schedule_work(&adapter->link_chg_task);
+}
+
+/*
+ * atl1_set_multi - Multicast and Promiscuous mode set
+ * @netdev: network interface device structure
+ *
+ * The set_multi entry point is called whenever the multicast address
+ * list or the network interface flags are updated.  This routine is
+ * responsible for configuring the hardware for proper multicast,
+ * promiscuous mode, and all-multi behavior.
+ */
+static void atl1_set_multi(struct net_device *netdev)
+{
+	struct atl1_adapter *adapter = netdev_priv(netdev);
+	struct atl1_hw *hw = &adapter->hw;
+	struct dev_mc_list *mc_ptr;
+	u32 rctl;
+	u32 hash_value;
+
+	/* Check for Promiscuous and All Multicast modes */
+	rctl = ioread32(hw->hw_addr + REG_MAC_CTRL);
+	if (netdev->flags & IFF_PROMISC)
+		rctl |= MAC_CTRL_PROMIS_EN;
+	else if (netdev->flags & IFF_ALLMULTI) {
+		rctl |= MAC_CTRL_MC_ALL_EN;
+		rctl &= ~MAC_CTRL_PROMIS_EN;
+	} else
+		rctl &= ~(MAC_CTRL_PROMIS_EN | MAC_CTRL_MC_ALL_EN);
+
+	iowrite32(rctl, hw->hw_addr + REG_MAC_CTRL);
+
+	/* clear the old settings from the multicast hash table */
+	iowrite32(0, hw->hw_addr + REG_RX_HASH_TABLE);
+	iowrite32(0, (hw->hw_addr + REG_RX_HASH_TABLE) + (1 << 2));
+
+	/* compute mc addresses' hash value ,and put it into hash table */
+	for (mc_ptr = netdev->mc_list; mc_ptr; mc_ptr = mc_ptr->next) {
+		hash_value = atl1_hash_mc_addr(hw, mc_ptr->dmi_addr);
+		atl1_hash_set(hw, hash_value);
+	}
+}
+
+/*
+ * atl1_change_mtu - Change the Maximum Transfer Unit
+ * @netdev: network interface device structure
+ * @new_mtu: new value for maximum frame size
+ *
+ * Returns 0 on success, negative on failure
+ */
+static int atl1_change_mtu(struct net_device *netdev, int new_mtu)
+{
+	struct atl1_adapter *adapter = netdev_priv(netdev);
+	int old_mtu = netdev->mtu;
+	int max_frame = new_mtu + ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
+
+	if ((max_frame < MINIMUM_ETHERNET_FRAME_SIZE) ||
+	    (max_frame > MAX_JUMBO_FRAME_SIZE)) {
+		dev_warn(&adapter->pdev->dev, "invalid MTU setting\n");
+		return -EINVAL;
+	}
+
+	adapter->hw.max_frame_size = max_frame;
+	adapter->hw.tx_jumbo_task_th = (max_frame + 7) >> 3;
+	adapter->rx_buffer_len = (max_frame + 7) & ~7;
+	adapter->hw.rx_jumbo_th = adapter->rx_buffer_len / 8;
+
+	netdev->mtu = new_mtu;
+	if ((old_mtu != new_mtu) && netif_running(netdev)) {
+		atl1_down(adapter);
+		atl1_up(adapter);
+	}
+
+	return 0;
+}
+
+static void set_flow_ctrl_old(struct atl1_adapter *adapter)
+{
+	u32 hi, lo, value;
+
+	/* RFD Flow Control */
+	value = adapter->rfd_ring.count;
+	hi = value / 16;
+	if (hi < 2)
+		hi = 2;
+	lo = value * 7 / 8;
+
+	value = ((hi & RXQ_RXF_PAUSE_TH_HI_MASK) << RXQ_RXF_PAUSE_TH_HI_SHIFT) |
+		((lo & RXQ_RXF_PAUSE_TH_LO_MASK) << RXQ_RXF_PAUSE_TH_LO_SHIFT);
+	iowrite32(value, adapter->hw.hw_addr + REG_RXQ_RXF_PAUSE_THRESH);
+
+	/* RRD Flow Control */
+	value = adapter->rrd_ring.count;
+	lo = value / 16;
+	hi = value * 7 / 8;
+	if (lo < 2)
+		lo = 2;
+	value = ((hi & RXQ_RRD_PAUSE_TH_HI_MASK) << RXQ_RRD_PAUSE_TH_HI_SHIFT) |
+		((lo & RXQ_RRD_PAUSE_TH_LO_MASK) << RXQ_RRD_PAUSE_TH_LO_SHIFT);
+	iowrite32(value, adapter->hw.hw_addr + REG_RXQ_RRD_PAUSE_THRESH);
+}
+
+static void set_flow_ctrl_new(struct atl1_hw *hw)
+{
+	u32 hi, lo, value;
+
+	/* RXF Flow Control */
+	value = ioread32(hw->hw_addr + REG_SRAM_RXF_LEN);
+	lo = value / 16;
+	if (lo < 192)
+		lo = 192;
+	hi = value * 7 / 8;
+	if (hi < lo)
+		hi = lo + 16;
+	value = ((hi & RXQ_RXF_PAUSE_TH_HI_MASK) << RXQ_RXF_PAUSE_TH_HI_SHIFT) |
+		((lo & RXQ_RXF_PAUSE_TH_LO_MASK) << RXQ_RXF_PAUSE_TH_LO_SHIFT);
+	iowrite32(value, hw->hw_addr + REG_RXQ_RXF_PAUSE_THRESH);
+
+	/* RRD Flow Control */
+	value = ioread32(hw->hw_addr + REG_SRAM_RRD_LEN);
+	lo = value / 8;
+	hi = value * 7 / 8;
+	if (lo < 2)
+		lo = 2;
+	if (hi < lo)
+		hi = lo + 3;
+	value = ((hi & RXQ_RRD_PAUSE_TH_HI_MASK) << RXQ_RRD_PAUSE_TH_HI_SHIFT) |
+		((lo & RXQ_RRD_PAUSE_TH_LO_MASK) << RXQ_RRD_PAUSE_TH_LO_SHIFT);
+	iowrite32(value, hw->hw_addr + REG_RXQ_RRD_PAUSE_THRESH);
+}
+
+/*
+ * atl1_configure - Configure Transmit&Receive Unit after Reset
+ * @adapter: board private structure
+ *
+ * Configure the Tx /Rx unit of the MAC after a reset.
+ */
+static u32 atl1_configure(struct atl1_adapter *adapter)
+{
+	struct atl1_hw *hw = &adapter->hw;
+	u32 value;
+
+	/* clear interrupt status */
+	iowrite32(0xffffffff, adapter->hw.hw_addr + REG_ISR);
+
+	/* set MAC Address */
+	value = (((u32) hw->mac_addr[2]) << 24) |
+		(((u32) hw->mac_addr[3]) << 16) |
+		(((u32) hw->mac_addr[4]) << 8) |
+		(((u32) hw->mac_addr[5]));
+	iowrite32(value, hw->hw_addr + REG_MAC_STA_ADDR);
+	value = (((u32) hw->mac_addr[0]) << 8) | (((u32) hw->mac_addr[1]));
+	iowrite32(value, hw->hw_addr + (REG_MAC_STA_ADDR + 4));
+
+	/* tx / rx ring */
+
+	/* HI base address */
+	iowrite32((u32) ((adapter->tpd_ring.dma & 0xffffffff00000000ULL) >> 32),
+		hw->hw_addr + REG_DESC_BASE_ADDR_HI);
+	/* LO base address */
+	iowrite32((u32) (adapter->rfd_ring.dma & 0x00000000ffffffffULL),
+		hw->hw_addr + REG_DESC_RFD_ADDR_LO);
+	iowrite32((u32) (adapter->rrd_ring.dma & 0x00000000ffffffffULL),
+		hw->hw_addr + REG_DESC_RRD_ADDR_LO);
+	iowrite32((u32) (adapter->tpd_ring.dma & 0x00000000ffffffffULL),
+		hw->hw_addr + REG_DESC_TPD_ADDR_LO);
+	iowrite32((u32) (adapter->cmb.dma & 0x00000000ffffffffULL),
+		hw->hw_addr + REG_DESC_CMB_ADDR_LO);
+	iowrite32((u32) (adapter->smb.dma & 0x00000000ffffffffULL),
+		hw->hw_addr + REG_DESC_SMB_ADDR_LO);
+
+	/* element count */
+	value = adapter->rrd_ring.count;
+	value <<= 16;
+	value += adapter->rfd_ring.count;
+	iowrite32(value, hw->hw_addr + REG_DESC_RFD_RRD_RING_SIZE);
+	iowrite32(adapter->tpd_ring.count, hw->hw_addr +
+		REG_DESC_TPD_RING_SIZE);
+
+	/* Load Ptr */
+	iowrite32(1, hw->hw_addr + REG_LOAD_PTR);
+
+	/* config Mailbox */
+	value = ((atomic_read(&adapter->tpd_ring.next_to_use)
+		  & MB_TPD_PROD_INDX_MASK) << MB_TPD_PROD_INDX_SHIFT) |
+		((atomic_read(&adapter->rrd_ring.next_to_clean)
+		& MB_RRD_CONS_INDX_MASK) << MB_RRD_CONS_INDX_SHIFT) |
+		((atomic_read(&adapter->rfd_ring.next_to_use)
+		& MB_RFD_PROD_INDX_MASK) << MB_RFD_PROD_INDX_SHIFT);
+	iowrite32(value, hw->hw_addr + REG_MAILBOX);
+
+	/* config IPG/IFG */
+	value = (((u32) hw->ipgt & MAC_IPG_IFG_IPGT_MASK)
+		 << MAC_IPG_IFG_IPGT_SHIFT) |
+		(((u32) hw->min_ifg & MAC_IPG_IFG_MIFG_MASK)
+		<< MAC_IPG_IFG_MIFG_SHIFT) |
+		(((u32) hw->ipgr1 & MAC_IPG_IFG_IPGR1_MASK)
+		<< MAC_IPG_IFG_IPGR1_SHIFT) |
+		(((u32) hw->ipgr2 & MAC_IPG_IFG_IPGR2_MASK)
+		<< MAC_IPG_IFG_IPGR2_SHIFT);
+	iowrite32(value, hw->hw_addr + REG_MAC_IPG_IFG);
+
+	/* config  Half-Duplex Control */
+	value = ((u32) hw->lcol & MAC_HALF_DUPLX_CTRL_LCOL_MASK) |
+		(((u32) hw->max_retry & MAC_HALF_DUPLX_CTRL_RETRY_MASK)
+		<< MAC_HALF_DUPLX_CTRL_RETRY_SHIFT) |
+		MAC_HALF_DUPLX_CTRL_EXC_DEF_EN |
+		(0xa << MAC_HALF_DUPLX_CTRL_ABEBT_SHIFT) |
+		(((u32) hw->jam_ipg & MAC_HALF_DUPLX_CTRL_JAMIPG_MASK)
+		<< MAC_HALF_DUPLX_CTRL_JAMIPG_SHIFT);
+	iowrite32(value, hw->hw_addr + REG_MAC_HALF_DUPLX_CTRL);
+
+	/* set Interrupt Moderator Timer */
+	iowrite16(adapter->imt, hw->hw_addr + REG_IRQ_MODU_TIMER_INIT);
+	iowrite32(MASTER_CTRL_ITIMER_EN, hw->hw_addr + REG_MASTER_CTRL);
+
+	/* set Interrupt Clear Timer */
+	iowrite16(adapter->ict, hw->hw_addr + REG_CMBDISDMA_TIMER);
+
+	/* set MTU, 4 : VLAN */
+	iowrite32(hw->max_frame_size + 4, hw->hw_addr + REG_MTU);
+
+	/* jumbo size & rrd retirement timer */
+	value = (((u32) hw->rx_jumbo_th & RXQ_JMBOSZ_TH_MASK)
+		 << RXQ_JMBOSZ_TH_SHIFT) |
+		(((u32) hw->rx_jumbo_lkah & RXQ_JMBO_LKAH_MASK)
+		<< RXQ_JMBO_LKAH_SHIFT) |
+		(((u32) hw->rrd_ret_timer & RXQ_RRD_TIMER_MASK)
+		<< RXQ_RRD_TIMER_SHIFT);
+	iowrite32(value, hw->hw_addr + REG_RXQ_JMBOSZ_RRDTIM);
+
+	/* Flow Control */
+	switch (hw->dev_rev) {
+	case 0x8001:
+	case 0x9001:
+	case 0x9002:
+	case 0x9003:
+		set_flow_ctrl_old(adapter);
+		break;
+	default:
+		set_flow_ctrl_new(hw);
+		break;
+	}
+
+	/* config TXQ */
+	value = (((u32) hw->tpd_burst & TXQ_CTRL_TPD_BURST_NUM_MASK)
+		 << TXQ_CTRL_TPD_BURST_NUM_SHIFT) |
+		(((u32) hw->txf_burst & TXQ_CTRL_TXF_BURST_NUM_MASK)
+		<< TXQ_CTRL_TXF_BURST_NUM_SHIFT) |
+		(((u32) hw->tpd_fetch_th & TXQ_CTRL_TPD_FETCH_TH_MASK)
+		<< TXQ_CTRL_TPD_FETCH_TH_SHIFT) | TXQ_CTRL_ENH_MODE |
+		TXQ_CTRL_EN;
+	iowrite32(value, hw->hw_addr + REG_TXQ_CTRL);
+
+	/* min tpd fetch gap & tx jumbo packet size threshold for taskoffload */
+	value = (((u32) hw->tx_jumbo_task_th & TX_JUMBO_TASK_TH_MASK)
+		<< TX_JUMBO_TASK_TH_SHIFT) |
+		(((u32) hw->tpd_fetch_gap & TX_TPD_MIN_IPG_MASK)
+		<< TX_TPD_MIN_IPG_SHIFT);
+	iowrite32(value, hw->hw_addr + REG_TX_JUMBO_TASK_TH_TPD_IPG);
+
+	/* config RXQ */
+	value = (((u32) hw->rfd_burst & RXQ_CTRL_RFD_BURST_NUM_MASK)
+		<< RXQ_CTRL_RFD_BURST_NUM_SHIFT) |
+		(((u32) hw->rrd_burst & RXQ_CTRL_RRD_BURST_THRESH_MASK)
+		<< RXQ_CTRL_RRD_BURST_THRESH_SHIFT) |
+		(((u32) hw->rfd_fetch_gap & RXQ_CTRL_RFD_PREF_MIN_IPG_MASK)
+		<< RXQ_CTRL_RFD_PREF_MIN_IPG_SHIFT) | RXQ_CTRL_CUT_THRU_EN |
+		RXQ_CTRL_EN;
+	iowrite32(value, hw->hw_addr + REG_RXQ_CTRL);
+
+	/* config DMA Engine */
+	value = ((((u32) hw->dmar_block) & DMA_CTRL_DMAR_BURST_LEN_MASK)
+		<< DMA_CTRL_DMAR_BURST_LEN_SHIFT) |
+		((((u32) hw->dmaw_block) & DMA_CTRL_DMAR_BURST_LEN_MASK)
+		<< DMA_CTRL_DMAR_BURST_LEN_SHIFT) | DMA_CTRL_DMAR_EN |
+		DMA_CTRL_DMAW_EN;
+	value |= (u32) hw->dma_ord;
+	if (atl1_rcb_128 == hw->rcb_value)
+		value |= DMA_CTRL_RCB_VALUE;
+	iowrite32(value, hw->hw_addr + REG_DMA_CTRL);
+
+	/* config CMB / SMB */
+	value = hw->cmb_rrd | ((u32) hw->cmb_tpd << 16);
+	iowrite32(value, hw->hw_addr + REG_CMB_WRITE_TH);
+	value = hw->cmb_rx_timer | ((u32) hw->cmb_tx_timer << 16);
+	iowrite32(value, hw->hw_addr + REG_CMB_WRITE_TIMER);
+	iowrite32(hw->smb_timer, hw->hw_addr + REG_SMB_TIMER);
+
+	/* --- enable CMB / SMB */
+	value = CSMB_CTRL_CMB_EN | CSMB_CTRL_SMB_EN;
+	iowrite32(value, hw->hw_addr + REG_CSMB_CTRL);
+
+	value = ioread32(adapter->hw.hw_addr + REG_ISR);
+	if (unlikely((value & ISR_PHY_LINKDOWN) != 0))
+		value = 1;	/* config failed */
+	else
+		value = 0;
+
+	/* clear all interrupt status */
+	iowrite32(0x3fffffff, adapter->hw.hw_addr + REG_ISR);
+	iowrite32(0, adapter->hw.hw_addr + REG_ISR);
+	return value;
+}
+
+/*
+ * atl1_pcie_patch - Patch for PCIE module
+ */
+static void atl1_pcie_patch(struct atl1_adapter *adapter)
+{
+	u32 value;
+
+	/* much vendor magic here */
+	value = 0x6500;
+	iowrite32(value, adapter->hw.hw_addr + 0x12FC);
+	/* pcie flow control mode change */
+	value = ioread32(adapter->hw.hw_addr + 0x1008);
+	value |= 0x8000;
+	iowrite32(value, adapter->hw.hw_addr + 0x1008);
+}
+
+/*
+ * When ACPI resume on some VIA MotherBoard, the Interrupt Disable bit/0x400
+ * on PCI Command register is disable.
+ * The function enable this bit.
+ * Brackett, 2006/03/15
+ */
+static void atl1_via_workaround(struct atl1_adapter *adapter)
+{
+	unsigned long value;
+
+	value = ioread16(adapter->hw.hw_addr + PCI_COMMAND);
+	if (value & PCI_COMMAND_INTX_DISABLE)
+		value &= ~PCI_COMMAND_INTX_DISABLE;
+	iowrite32(value, adapter->hw.hw_addr + PCI_COMMAND);
+}
+
 /*
  * atl1_irq_enable - Enable default interrupt generation settings
  * @adapter: board private structure
  */
 static void atl1_irq_enable(struct atl1_adapter *adapter)
 {
-	if (likely(!atomic_dec_and_test(&adapter->irq_sem)))
-		iowrite32(IMR_NORMAL_MASK, adapter->hw.hw_addr + REG_IMR);
+	iowrite32(IMR_NORMAL_MASK, adapter->hw.hw_addr + REG_IMR);
+	ioread32(adapter->hw.hw_addr + REG_IMR);
+}
+
+/*
+ * atl1_irq_disable - Mask off interrupt generation on the NIC
+ * @adapter: board private structure
+ */
+static void atl1_irq_disable(struct atl1_adapter *adapter)
+{
+	iowrite32(0, adapter->hw.hw_addr + REG_IMR);
+	ioread32(adapter->hw.hw_addr + REG_IMR);
+	synchronize_irq(adapter->pdev->irq);
 }
 
 static void atl1_clear_phy_int(struct atl1_adapter *adapter)
@@ -288,25 +1012,19 @@
 	adapter->soft_stats.rx_bytes += smb->rx_byte_cnt;
 	adapter->soft_stats.tx_bytes += smb->tx_byte_cnt;
 	adapter->soft_stats.multicast += smb->rx_mcast;
-	adapter->soft_stats.collisions += (smb->tx_1_col +
-					   smb->tx_2_col * 2 +
-					   smb->tx_late_col +
-					   smb->tx_abort_col *
-					   adapter->hw.max_retry);
+	adapter->soft_stats.collisions += (smb->tx_1_col + smb->tx_2_col * 2 +
+		smb->tx_late_col + smb->tx_abort_col * adapter->hw.max_retry);
 
 	/* Rx Errors */
-	adapter->soft_stats.rx_errors += (smb->rx_frag +
-					  smb->rx_fcs_err +
-					  smb->rx_len_err +
-					  smb->rx_sz_ov +
-					  smb->rx_rxf_ov +
-					  smb->rx_rrd_ov + smb->rx_align_err);
+	adapter->soft_stats.rx_errors += (smb->rx_frag + smb->rx_fcs_err +
+		smb->rx_len_err + smb->rx_sz_ov + smb->rx_rxf_ov +
+		smb->rx_rrd_ov + smb->rx_align_err);
 	adapter->soft_stats.rx_fifo_errors += smb->rx_rxf_ov;
 	adapter->soft_stats.rx_length_errors += smb->rx_len_err;
 	adapter->soft_stats.rx_crc_errors += smb->rx_fcs_err;
 	adapter->soft_stats.rx_frame_errors += smb->rx_align_err;
 	adapter->soft_stats.rx_missed_errors += (smb->rx_rrd_ov +
-						 smb->rx_rxf_ov);
+		smb->rx_rxf_ov);
 
 	adapter->soft_stats.rx_pause += smb->rx_pause;
 	adapter->soft_stats.rx_rrd_ov += smb->rx_rrd_ov;
@@ -314,8 +1032,7 @@
 
 	/* Tx Errors */
 	adapter->soft_stats.tx_errors += (smb->tx_late_col +
-					  smb->tx_abort_col +
-					  smb->tx_underrun + smb->tx_trunc);
+		smb->tx_abort_col + smb->tx_underrun + smb->tx_trunc);
 	adapter->soft_stats.tx_fifo_errors += smb->tx_underrun;
 	adapter->soft_stats.tx_aborted_errors += smb->tx_abort_col;
 	adapter->soft_stats.tx_window_errors += smb->tx_late_col;
@@ -337,36 +1054,101 @@
 	adapter->net_stats.collisions = adapter->soft_stats.collisions;
 	adapter->net_stats.rx_errors = adapter->soft_stats.rx_errors;
 	adapter->net_stats.rx_over_errors =
-	    adapter->soft_stats.rx_missed_errors;
+		adapter->soft_stats.rx_missed_errors;
 	adapter->net_stats.rx_length_errors =
-	    adapter->soft_stats.rx_length_errors;
+		adapter->soft_stats.rx_length_errors;
 	adapter->net_stats.rx_crc_errors = adapter->soft_stats.rx_crc_errors;
 	adapter->net_stats.rx_frame_errors =
-	    adapter->soft_stats.rx_frame_errors;
+		adapter->soft_stats.rx_frame_errors;
 	adapter->net_stats.rx_fifo_errors = adapter->soft_stats.rx_fifo_errors;
 	adapter->net_stats.rx_missed_errors =
-	    adapter->soft_stats.rx_missed_errors;
+		adapter->soft_stats.rx_missed_errors;
 	adapter->net_stats.tx_errors = adapter->soft_stats.tx_errors;
 	adapter->net_stats.tx_fifo_errors = adapter->soft_stats.tx_fifo_errors;
 	adapter->net_stats.tx_aborted_errors =
-	    adapter->soft_stats.tx_aborted_errors;
+		adapter->soft_stats.tx_aborted_errors;
 	adapter->net_stats.tx_window_errors =
-	    adapter->soft_stats.tx_window_errors;
+		adapter->soft_stats.tx_window_errors;
 	adapter->net_stats.tx_carrier_errors =
-	    adapter->soft_stats.tx_carrier_errors;
+		adapter->soft_stats.tx_carrier_errors;
+}
+
+/*
+ * atl1_get_stats - Get System Network Statistics
+ * @netdev: network interface device structure
+ *
+ * Returns the address of the device statistics structure.
+ * The statistics are actually updated from the timer callback.
+ */
+static struct net_device_stats *atl1_get_stats(struct net_device *netdev)
+{
+	struct atl1_adapter *adapter = netdev_priv(netdev);
+	return &adapter->net_stats;
+}
+
+static void atl1_update_mailbox(struct atl1_adapter *adapter)
+{
+	unsigned long flags;
+	u32 tpd_next_to_use;
+	u32 rfd_next_to_use;
+	u32 rrd_next_to_clean;
+	u32 value;
+
+	spin_lock_irqsave(&adapter->mb_lock, flags);
+
+	tpd_next_to_use = atomic_read(&adapter->tpd_ring.next_to_use);
+	rfd_next_to_use = atomic_read(&adapter->rfd_ring.next_to_use);
+	rrd_next_to_clean = atomic_read(&adapter->rrd_ring.next_to_clean);
+
+	value = ((rfd_next_to_use & MB_RFD_PROD_INDX_MASK) <<
+		MB_RFD_PROD_INDX_SHIFT) |
+		((rrd_next_to_clean & MB_RRD_CONS_INDX_MASK) <<
+		MB_RRD_CONS_INDX_SHIFT) |
+		((tpd_next_to_use & MB_TPD_PROD_INDX_MASK) <<
+		MB_TPD_PROD_INDX_SHIFT);
+	iowrite32(value, adapter->hw.hw_addr + REG_MAILBOX);
+
+	spin_unlock_irqrestore(&adapter->mb_lock, flags);
+}
+
+static void atl1_clean_alloc_flag(struct atl1_adapter *adapter,
+	struct rx_return_desc *rrd, u16 offset)
+{
+	struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring;
+
+	while (rfd_ring->next_to_clean != (rrd->buf_indx + offset)) {
+		rfd_ring->buffer_info[rfd_ring->next_to_clean].alloced = 0;
+		if (++rfd_ring->next_to_clean == rfd_ring->count) {
+			rfd_ring->next_to_clean = 0;
+		}
+	}
+}
+
+static void atl1_update_rfd_index(struct atl1_adapter *adapter,
+	struct rx_return_desc *rrd)
+{
+	u16 num_buf;
+
+	num_buf = (rrd->xsz.xsum_sz.pkt_size + adapter->rx_buffer_len - 1) /
+		adapter->rx_buffer_len;
+	if (rrd->num_buf == num_buf)
+		/* clean alloc flag for bad rrd */
+		atl1_clean_alloc_flag(adapter, rrd, num_buf);
 }
 
 static void atl1_rx_checksum(struct atl1_adapter *adapter,
-					struct rx_return_desc *rrd,
-					struct sk_buff *skb)
+	struct rx_return_desc *rrd, struct sk_buff *skb)
 {
+	struct pci_dev *pdev = adapter->pdev;
+
 	skb->ip_summed = CHECKSUM_NONE;
 
 	if (unlikely(rrd->pkt_flg & PACKET_FLAG_ERR)) {
 		if (rrd->err_flg & (ERR_FLAG_CRC | ERR_FLAG_TRUNC |
 					ERR_FLAG_CODE | ERR_FLAG_OV)) {
 			adapter->hw_csum_err++;
-			dev_dbg(&adapter->pdev->dev, "rx checksum error\n");
+			dev_printk(KERN_DEBUG, &pdev->dev,
+				"rx checksum error\n");
 			return;
 		}
 	}
@@ -385,7 +1167,7 @@
 	}
 
 	/* IPv4, but hardware thinks its checksum is wrong */
-	dev_dbg(&adapter->pdev->dev,
+	dev_printk(KERN_DEBUG, &pdev->dev,
 		"hw csum wrong, pkt_flag:%x, err_flag:%x\n",
 		rrd->pkt_flg, rrd->err_flg);
 	skb->ip_summed = CHECKSUM_COMPLETE;
@@ -500,34 +1282,17 @@
 			/* rrd seems to be bad */
 			if (unlikely(i-- > 0)) {
 				/* rrd may not be DMAed completely */
-				dev_dbg(&adapter->pdev->dev,
+				dev_printk(KERN_DEBUG, &adapter->pdev->dev,
 					"incomplete RRD DMA transfer\n");
 				udelay(1);
 				goto chk_rrd;
 			}
 			/* bad rrd */
-			dev_dbg(&adapter->pdev->dev, "bad RRD\n");
+			dev_printk(KERN_DEBUG, &adapter->pdev->dev,
+				"bad RRD\n");
 			/* see if update RFD index */
-			if (rrd->num_buf > 1) {
-				u16 num_buf;
-				num_buf =
-				    (rrd->xsz.xsum_sz.pkt_size +
-				     adapter->rx_buffer_len -
-				     1) / adapter->rx_buffer_len;
-				if (rrd->num_buf == num_buf) {
-					/* clean alloc flag for bad rrd */
-					while (rfd_ring->next_to_clean !=
-					       (rrd->buf_indx + num_buf)) {
-						rfd_ring->buffer_info[rfd_ring->
-								      next_to_clean].alloced = 0;
-						if (++rfd_ring->next_to_clean ==
-						    rfd_ring->count) {
-							rfd_ring->
-							    next_to_clean = 0;
-						}
-					}
-				}
-			}
+			if (rrd->num_buf > 1)
+				atl1_update_rfd_index(adapter, rrd);
 
 			/* update rrd */
 			rrd->xsz.valid = 0;
@@ -541,12 +1306,7 @@
 		}
 rrd_ok:
 		/* clean alloc flag for bad rrd */
-		while (rfd_ring->next_to_clean != rrd->buf_indx) {
-			rfd_ring->buffer_info[rfd_ring->next_to_clean].alloced =
-			    0;
-			if (++rfd_ring->next_to_clean == rfd_ring->count)
-				rfd_ring->next_to_clean = 0;
-		}
+		atl1_clean_alloc_flag(adapter, rrd, 0);
 
 		buffer_info = &rfd_ring->buffer_info[rrd->buf_indx];
 		if (++rfd_ring->next_to_clean == rfd_ring->count)
@@ -662,580 +1422,13 @@
 		netif_wake_queue(adapter->netdev);
 }
 
-static void atl1_check_for_link(struct atl1_adapter *adapter)
-{
-	struct net_device *netdev = adapter->netdev;
-	u16 phy_data = 0;
-
-	spin_lock(&adapter->lock);
-	adapter->phy_timer_pending = false;
-	atl1_read_phy_reg(&adapter->hw, MII_BMSR, &phy_data);
-	atl1_read_phy_reg(&adapter->hw, MII_BMSR, &phy_data);
-	spin_unlock(&adapter->lock);
-
-	/* notify upper layer link down ASAP */
-	if (!(phy_data & BMSR_LSTATUS)) {	/* Link Down */
-		if (netif_carrier_ok(netdev)) {	/* old link state: Up */
-			dev_info(&adapter->pdev->dev, "%s link is down\n",
-				netdev->name);
-			adapter->link_speed = SPEED_0;
-			netif_carrier_off(netdev);
-			netif_stop_queue(netdev);
-		}
-	}
-	schedule_work(&adapter->link_chg_task);
-}
-
-/*
- * atl1_intr - Interrupt Handler
- * @irq: interrupt number
- * @data: pointer to a network interface device structure
- * @pt_regs: CPU registers structure
- */
-static irqreturn_t atl1_intr(int irq, void *data)
-{
-	/*struct atl1_adapter *adapter = ((struct net_device *)data)->priv;*/
-	struct atl1_adapter *adapter = netdev_priv(data);
-	u32 status;
-	u8 update_rx;
-	int max_ints = 10;
-
-	status = adapter->cmb.cmb->int_stats;
-	if (!status)
-		return IRQ_NONE;
-
-	update_rx = 0;
-
-	do {
-		/* clear CMB interrupt status at once */
-		adapter->cmb.cmb->int_stats = 0;
-
-		if (status & ISR_GPHY)	/* clear phy status */
-			atl1_clear_phy_int(adapter);
-
-		/* clear ISR status, and Enable CMB DMA/Disable Interrupt */
-		iowrite32(status | ISR_DIS_INT, adapter->hw.hw_addr + REG_ISR);
-
-		/* check if SMB intr */
-		if (status & ISR_SMB)
-			atl1_inc_smb(adapter);
-
-		/* check if PCIE PHY Link down */
-		if (status & ISR_PHY_LINKDOWN) {
-			dev_dbg(&adapter->pdev->dev, "pcie phy link down %x\n",
-				status);
-			if (netif_running(adapter->netdev)) {	/* reset MAC */
-				iowrite32(0, adapter->hw.hw_addr + REG_IMR);
-				schedule_work(&adapter->pcie_dma_to_rst_task);
-				return IRQ_HANDLED;
-			}
-		}
-
-		/* check if DMA read/write error ? */
-		if (status & (ISR_DMAR_TO_RST | ISR_DMAW_TO_RST)) {
-			dev_dbg(&adapter->pdev->dev,
-				"pcie DMA r/w error (status = 0x%x)\n",
-				status);
-			iowrite32(0, adapter->hw.hw_addr + REG_IMR);
-			schedule_work(&adapter->pcie_dma_to_rst_task);
-			return IRQ_HANDLED;
-		}
-
-		/* link event */
-		if (status & ISR_GPHY) {
-			adapter->soft_stats.tx_carrier_errors++;
-			atl1_check_for_link(adapter);
-		}
-
-		/* transmit event */
-		if (status & ISR_CMB_TX)
-			atl1_intr_tx(adapter);
-
-		/* rx exception */
-		if (unlikely(status & (ISR_RXF_OV | ISR_RFD_UNRUN |
-			ISR_RRD_OV | ISR_HOST_RFD_UNRUN |
-			ISR_HOST_RRD_OV | ISR_CMB_RX))) {
-			if (status & (ISR_RXF_OV | ISR_RFD_UNRUN |
-				ISR_RRD_OV | ISR_HOST_RFD_UNRUN |
-				ISR_HOST_RRD_OV))
-				dev_dbg(&adapter->pdev->dev,
-					"rx exception, ISR = 0x%x\n", status);
-			atl1_intr_rx(adapter);
-		}
-
-		if (--max_ints < 0)
-			break;
-
-	} while ((status = adapter->cmb.cmb->int_stats));
-
-	/* re-enable Interrupt */
-	iowrite32(ISR_DIS_SMB | ISR_DIS_DMA, adapter->hw.hw_addr + REG_ISR);
-	return IRQ_HANDLED;
-}
-
-/*
- * atl1_set_multi - Multicast and Promiscuous mode set
- * @netdev: network interface device structure
- *
- * The set_multi entry point is called whenever the multicast address
- * list or the network interface flags are updated.  This routine is
- * responsible for configuring the hardware for proper multicast,
- * promiscuous mode, and all-multi behavior.
- */
-static void atl1_set_multi(struct net_device *netdev)
-{
-	struct atl1_adapter *adapter = netdev_priv(netdev);
-	struct atl1_hw *hw = &adapter->hw;
-	struct dev_mc_list *mc_ptr;
-	u32 rctl;
-	u32 hash_value;
-
-	/* Check for Promiscuous and All Multicast modes */
-	rctl = ioread32(hw->hw_addr + REG_MAC_CTRL);
-	if (netdev->flags & IFF_PROMISC)
-		rctl |= MAC_CTRL_PROMIS_EN;
-	else if (netdev->flags & IFF_ALLMULTI) {
-		rctl |= MAC_CTRL_MC_ALL_EN;
-		rctl &= ~MAC_CTRL_PROMIS_EN;
-	} else
-		rctl &= ~(MAC_CTRL_PROMIS_EN | MAC_CTRL_MC_ALL_EN);
-
-	iowrite32(rctl, hw->hw_addr + REG_MAC_CTRL);
-
-	/* clear the old settings from the multicast hash table */
-	iowrite32(0, hw->hw_addr + REG_RX_HASH_TABLE);
-	iowrite32(0, (hw->hw_addr + REG_RX_HASH_TABLE) + (1 << 2));
-
-	/* compute mc addresses' hash value ,and put it into hash table */
-	for (mc_ptr = netdev->mc_list; mc_ptr; mc_ptr = mc_ptr->next) {
-		hash_value = atl1_hash_mc_addr(hw, mc_ptr->dmi_addr);
-		atl1_hash_set(hw, hash_value);
-	}
-}
-
-static void atl1_setup_mac_ctrl(struct atl1_adapter *adapter)
-{
-	u32 value;
-	struct atl1_hw *hw = &adapter->hw;
-	struct net_device *netdev = adapter->netdev;
-	/* Config MAC CTRL Register */
-	value = MAC_CTRL_TX_EN | MAC_CTRL_RX_EN;
-	/* duplex */
-	if (FULL_DUPLEX == adapter->link_duplex)
-		value |= MAC_CTRL_DUPLX;
-	/* speed */
-	value |= ((u32) ((SPEED_1000 == adapter->link_speed) ?
-			 MAC_CTRL_SPEED_1000 : MAC_CTRL_SPEED_10_100) <<
-		  MAC_CTRL_SPEED_SHIFT);
-	/* flow control */
-	value |= (MAC_CTRL_TX_FLOW | MAC_CTRL_RX_FLOW);
-	/* PAD & CRC */
-	value |= (MAC_CTRL_ADD_CRC | MAC_CTRL_PAD);
-	/* preamble length */
-	value |= (((u32) adapter->hw.preamble_len
-		   & MAC_CTRL_PRMLEN_MASK) << MAC_CTRL_PRMLEN_SHIFT);
-	/* vlan */
-	if (adapter->vlgrp)
-		value |= MAC_CTRL_RMV_VLAN;
-	/* rx checksum
-	   if (adapter->rx_csum)
-	   value |= MAC_CTRL_RX_CHKSUM_EN;
-	 */
-	/* filter mode */
-	value |= MAC_CTRL_BC_EN;
-	if (netdev->flags & IFF_PROMISC)
-		value |= MAC_CTRL_PROMIS_EN;
-	else if (netdev->flags & IFF_ALLMULTI)
-		value |= MAC_CTRL_MC_ALL_EN;
-	/* value |= MAC_CTRL_LOOPBACK; */
-	iowrite32(value, hw->hw_addr + REG_MAC_CTRL);
-}
-
-static u32 atl1_check_link(struct atl1_adapter *adapter)
-{
-	struct atl1_hw *hw = &adapter->hw;
-	struct net_device *netdev = adapter->netdev;
-	u32 ret_val;
-	u16 speed, duplex, phy_data;
-	int reconfig = 0;
-
-	/* MII_BMSR must read twice */
-	atl1_read_phy_reg(hw, MII_BMSR, &phy_data);
-	atl1_read_phy_reg(hw, MII_BMSR, &phy_data);
-	if (!(phy_data & BMSR_LSTATUS)) {	/* link down */
-		if (netif_carrier_ok(netdev)) {	/* old link state: Up */
-			dev_info(&adapter->pdev->dev, "link is down\n");
-			adapter->link_speed = SPEED_0;
-			netif_carrier_off(netdev);
-			netif_stop_queue(netdev);
-		}
-		return ATL1_SUCCESS;
-	}
-
-	/* Link Up */
-	ret_val = atl1_get_speed_and_duplex(hw, &speed, &duplex);
-	if (ret_val)
-		return ret_val;
-
-	switch (hw->media_type) {
-	case MEDIA_TYPE_1000M_FULL:
-		if (speed != SPEED_1000 || duplex != FULL_DUPLEX)
-			reconfig = 1;
-		break;
-	case MEDIA_TYPE_100M_FULL:
-		if (speed != SPEED_100 || duplex != FULL_DUPLEX)
-			reconfig = 1;
-		break;
-	case MEDIA_TYPE_100M_HALF:
-		if (speed != SPEED_100 || duplex != HALF_DUPLEX)
-			reconfig = 1;
-		break;
-	case MEDIA_TYPE_10M_FULL:
-		if (speed != SPEED_10 || duplex != FULL_DUPLEX)
-			reconfig = 1;
-		break;
-	case MEDIA_TYPE_10M_HALF:
-		if (speed != SPEED_10 || duplex != HALF_DUPLEX)
-			reconfig = 1;
-		break;
-	}
-
-	/* link result is our setting */
-	if (!reconfig) {
-		if (adapter->link_speed != speed
-		    || adapter->link_duplex != duplex) {
-			adapter->link_speed = speed;
-			adapter->link_duplex = duplex;
-			atl1_setup_mac_ctrl(adapter);
-			dev_info(&adapter->pdev->dev,
-				"%s link is up %d Mbps %s\n",
-				netdev->name, adapter->link_speed,
-				adapter->link_duplex == FULL_DUPLEX ?
-				"full duplex" : "half duplex");
-		}
-		if (!netif_carrier_ok(netdev)) {	/* Link down -> Up */
-			netif_carrier_on(netdev);
-			netif_wake_queue(netdev);
-		}
-		return ATL1_SUCCESS;
-	}
-
-	/* change orignal link status */
-	if (netif_carrier_ok(netdev)) {
-		adapter->link_speed = SPEED_0;
-		netif_carrier_off(netdev);
-		netif_stop_queue(netdev);
-	}
-
-	if (hw->media_type != MEDIA_TYPE_AUTO_SENSOR &&
-	    hw->media_type != MEDIA_TYPE_1000M_FULL) {
-		switch (hw->media_type) {
-		case MEDIA_TYPE_100M_FULL:
-			phy_data = MII_CR_FULL_DUPLEX | MII_CR_SPEED_100 |
-			           MII_CR_RESET;
-			break;
-		case MEDIA_TYPE_100M_HALF:
-			phy_data = MII_CR_SPEED_100 | MII_CR_RESET;
-			break;
-		case MEDIA_TYPE_10M_FULL:
-			phy_data =
-			    MII_CR_FULL_DUPLEX | MII_CR_SPEED_10 | MII_CR_RESET;
-			break;
-		default:	/* MEDIA_TYPE_10M_HALF: */
-			phy_data = MII_CR_SPEED_10 | MII_CR_RESET;
-			break;
-		}
-		atl1_write_phy_reg(hw, MII_BMCR, phy_data);
-		return ATL1_SUCCESS;
-	}
-
-	/* auto-neg, insert timer to re-config phy */
-	if (!adapter->phy_timer_pending) {
-		adapter->phy_timer_pending = true;
-		mod_timer(&adapter->phy_config_timer, jiffies + 3 * HZ);
-	}
-
-	return ATL1_SUCCESS;
-}
-
-static void set_flow_ctrl_old(struct atl1_adapter *adapter)
-{
-	u32 hi, lo, value;
-
-	/* RFD Flow Control */
-	value = adapter->rfd_ring.count;
-	hi = value / 16;
-	if (hi < 2)
-		hi = 2;
-	lo = value * 7 / 8;
-
-	value = ((hi & RXQ_RXF_PAUSE_TH_HI_MASK) << RXQ_RXF_PAUSE_TH_HI_SHIFT) |
-	    ((lo & RXQ_RXF_PAUSE_TH_LO_MASK) << RXQ_RXF_PAUSE_TH_LO_SHIFT);
-	iowrite32(value, adapter->hw.hw_addr + REG_RXQ_RXF_PAUSE_THRESH);
-
-	/* RRD Flow Control */
-	value = adapter->rrd_ring.count;
-	lo = value / 16;
-	hi = value * 7 / 8;
-	if (lo < 2)
-		lo = 2;
-	value = ((hi & RXQ_RRD_PAUSE_TH_HI_MASK) << RXQ_RRD_PAUSE_TH_HI_SHIFT) |
-	    ((lo & RXQ_RRD_PAUSE_TH_LO_MASK) << RXQ_RRD_PAUSE_TH_LO_SHIFT);
-	iowrite32(value, adapter->hw.hw_addr + REG_RXQ_RRD_PAUSE_THRESH);
-}
-
-static void set_flow_ctrl_new(struct atl1_hw *hw)
-{
-	u32 hi, lo, value;
-
-	/* RXF Flow Control */
-	value = ioread32(hw->hw_addr + REG_SRAM_RXF_LEN);
-	lo = value / 16;
-	if (lo < 192)
-		lo = 192;
-	hi = value * 7 / 8;
-	if (hi < lo)
-		hi = lo + 16;
-	value = ((hi & RXQ_RXF_PAUSE_TH_HI_MASK) << RXQ_RXF_PAUSE_TH_HI_SHIFT) |
-	    ((lo & RXQ_RXF_PAUSE_TH_LO_MASK) << RXQ_RXF_PAUSE_TH_LO_SHIFT);
-	iowrite32(value, hw->hw_addr + REG_RXQ_RXF_PAUSE_THRESH);
-
-	/* RRD Flow Control */
-	value = ioread32(hw->hw_addr + REG_SRAM_RRD_LEN);
-	lo = value / 8;
-	hi = value * 7 / 8;
-	if (lo < 2)
-		lo = 2;
-	if (hi < lo)
-		hi = lo + 3;
-	value = ((hi & RXQ_RRD_PAUSE_TH_HI_MASK) << RXQ_RRD_PAUSE_TH_HI_SHIFT) |
-	    ((lo & RXQ_RRD_PAUSE_TH_LO_MASK) << RXQ_RRD_PAUSE_TH_LO_SHIFT);
-	iowrite32(value, hw->hw_addr + REG_RXQ_RRD_PAUSE_THRESH);
-}
-
-/*
- * atl1_configure - Configure Transmit&Receive Unit after Reset
- * @adapter: board private structure
- *
- * Configure the Tx /Rx unit of the MAC after a reset.
- */
-static u32 atl1_configure(struct atl1_adapter *adapter)
-{
-	struct atl1_hw *hw = &adapter->hw;
-	u32 value;
-
-	/* clear interrupt status */
-	iowrite32(0xffffffff, adapter->hw.hw_addr + REG_ISR);
-
-	/* set MAC Address */
-	value = (((u32) hw->mac_addr[2]) << 24) |
-		(((u32) hw->mac_addr[3]) << 16) |
-		(((u32) hw->mac_addr[4]) << 8) |
-		(((u32) hw->mac_addr[5]));
-	iowrite32(value, hw->hw_addr + REG_MAC_STA_ADDR);
-	value = (((u32) hw->mac_addr[0]) << 8) | (((u32) hw->mac_addr[1]));
-	iowrite32(value, hw->hw_addr + (REG_MAC_STA_ADDR + 4));
-
-	/* tx / rx ring */
-
-	/* HI base address */
-	iowrite32((u32) ((adapter->tpd_ring.dma & 0xffffffff00000000ULL) >> 32),
-		hw->hw_addr + REG_DESC_BASE_ADDR_HI);
-	/* LO base address */
-	iowrite32((u32) (adapter->rfd_ring.dma & 0x00000000ffffffffULL),
-		hw->hw_addr + REG_DESC_RFD_ADDR_LO);
-	iowrite32((u32) (adapter->rrd_ring.dma & 0x00000000ffffffffULL),
-		hw->hw_addr + REG_DESC_RRD_ADDR_LO);
-	iowrite32((u32) (adapter->tpd_ring.dma & 0x00000000ffffffffULL),
-		hw->hw_addr + REG_DESC_TPD_ADDR_LO);
-	iowrite32((u32) (adapter->cmb.dma & 0x00000000ffffffffULL),
-		hw->hw_addr + REG_DESC_CMB_ADDR_LO);
-	iowrite32((u32) (adapter->smb.dma & 0x00000000ffffffffULL),
-		hw->hw_addr + REG_DESC_SMB_ADDR_LO);
-
-	/* element count */
-	value = adapter->rrd_ring.count;
-	value <<= 16;
-	value += adapter->rfd_ring.count;
-	iowrite32(value, hw->hw_addr + REG_DESC_RFD_RRD_RING_SIZE);
-	iowrite32(adapter->tpd_ring.count, hw->hw_addr + REG_DESC_TPD_RING_SIZE);
-
-	/* Load Ptr */
-	iowrite32(1, hw->hw_addr + REG_LOAD_PTR);
-
-	/* config Mailbox */
-	value = ((atomic_read(&adapter->tpd_ring.next_to_use)
-		  & MB_TPD_PROD_INDX_MASK) << MB_TPD_PROD_INDX_SHIFT) |
-	    ((atomic_read(&adapter->rrd_ring.next_to_clean)
-	      & MB_RRD_CONS_INDX_MASK) << MB_RRD_CONS_INDX_SHIFT) |
-	    ((atomic_read(&adapter->rfd_ring.next_to_use)
-	      & MB_RFD_PROD_INDX_MASK) << MB_RFD_PROD_INDX_SHIFT);
-	iowrite32(value, hw->hw_addr + REG_MAILBOX);
-
-	/* config IPG/IFG */
-	value = (((u32) hw->ipgt & MAC_IPG_IFG_IPGT_MASK)
-		 << MAC_IPG_IFG_IPGT_SHIFT) |
-	    (((u32) hw->min_ifg & MAC_IPG_IFG_MIFG_MASK)
-	     << MAC_IPG_IFG_MIFG_SHIFT) |
-	    (((u32) hw->ipgr1 & MAC_IPG_IFG_IPGR1_MASK)
-	     << MAC_IPG_IFG_IPGR1_SHIFT) |
-	    (((u32) hw->ipgr2 & MAC_IPG_IFG_IPGR2_MASK)
-	     << MAC_IPG_IFG_IPGR2_SHIFT);
-	iowrite32(value, hw->hw_addr + REG_MAC_IPG_IFG);
-
-	/* config  Half-Duplex Control */
-	value = ((u32) hw->lcol & MAC_HALF_DUPLX_CTRL_LCOL_MASK) |
-	    (((u32) hw->max_retry & MAC_HALF_DUPLX_CTRL_RETRY_MASK)
-	     << MAC_HALF_DUPLX_CTRL_RETRY_SHIFT) |
-	    MAC_HALF_DUPLX_CTRL_EXC_DEF_EN |
-	    (0xa << MAC_HALF_DUPLX_CTRL_ABEBT_SHIFT) |
-	    (((u32) hw->jam_ipg & MAC_HALF_DUPLX_CTRL_JAMIPG_MASK)
-	     << MAC_HALF_DUPLX_CTRL_JAMIPG_SHIFT);
-	iowrite32(value, hw->hw_addr + REG_MAC_HALF_DUPLX_CTRL);
-
-	/* set Interrupt Moderator Timer */
-	iowrite16(adapter->imt, hw->hw_addr + REG_IRQ_MODU_TIMER_INIT);
-	iowrite32(MASTER_CTRL_ITIMER_EN, hw->hw_addr + REG_MASTER_CTRL);
-
-	/* set Interrupt Clear Timer */
-	iowrite16(adapter->ict, hw->hw_addr + REG_CMBDISDMA_TIMER);
-
-	/* set MTU, 4 : VLAN */
-	iowrite32(hw->max_frame_size + 4, hw->hw_addr + REG_MTU);
-
-	/* jumbo size & rrd retirement timer */
-	value = (((u32) hw->rx_jumbo_th & RXQ_JMBOSZ_TH_MASK)
-		 << RXQ_JMBOSZ_TH_SHIFT) |
-	    (((u32) hw->rx_jumbo_lkah & RXQ_JMBO_LKAH_MASK)
-	     << RXQ_JMBO_LKAH_SHIFT) |
-	    (((u32) hw->rrd_ret_timer & RXQ_RRD_TIMER_MASK)
-	     << RXQ_RRD_TIMER_SHIFT);
-	iowrite32(value, hw->hw_addr + REG_RXQ_JMBOSZ_RRDTIM);
-
-	/* Flow Control */
-	switch (hw->dev_rev) {
-	case 0x8001:
-	case 0x9001:
-	case 0x9002:
-	case 0x9003:
-		set_flow_ctrl_old(adapter);
-		break;
-	default:
-		set_flow_ctrl_new(hw);
-		break;
-	}
-
-	/* config TXQ */
-	value = (((u32) hw->tpd_burst & TXQ_CTRL_TPD_BURST_NUM_MASK)
-		 << TXQ_CTRL_TPD_BURST_NUM_SHIFT) |
-	    (((u32) hw->txf_burst & TXQ_CTRL_TXF_BURST_NUM_MASK)
-	     << TXQ_CTRL_TXF_BURST_NUM_SHIFT) |
-	    (((u32) hw->tpd_fetch_th & TXQ_CTRL_TPD_FETCH_TH_MASK)
-	     << TXQ_CTRL_TPD_FETCH_TH_SHIFT) | TXQ_CTRL_ENH_MODE | TXQ_CTRL_EN;
-	iowrite32(value, hw->hw_addr + REG_TXQ_CTRL);
-
-	/* min tpd fetch gap & tx jumbo packet size threshold for taskoffload */
-	value = (((u32) hw->tx_jumbo_task_th & TX_JUMBO_TASK_TH_MASK)
-		 << TX_JUMBO_TASK_TH_SHIFT) |
-	    (((u32) hw->tpd_fetch_gap & TX_TPD_MIN_IPG_MASK)
-	     << TX_TPD_MIN_IPG_SHIFT);
-	iowrite32(value, hw->hw_addr + REG_TX_JUMBO_TASK_TH_TPD_IPG);
-
-	/* config RXQ */
-	value = (((u32) hw->rfd_burst & RXQ_CTRL_RFD_BURST_NUM_MASK)
-		 << RXQ_CTRL_RFD_BURST_NUM_SHIFT) |
-	    (((u32) hw->rrd_burst & RXQ_CTRL_RRD_BURST_THRESH_MASK)
-	     << RXQ_CTRL_RRD_BURST_THRESH_SHIFT) |
-	    (((u32) hw->rfd_fetch_gap & RXQ_CTRL_RFD_PREF_MIN_IPG_MASK)
-	     << RXQ_CTRL_RFD_PREF_MIN_IPG_SHIFT) |
-	    RXQ_CTRL_CUT_THRU_EN | RXQ_CTRL_EN;
-	iowrite32(value, hw->hw_addr + REG_RXQ_CTRL);
-
-	/* config DMA Engine */
-	value = ((((u32) hw->dmar_block) & DMA_CTRL_DMAR_BURST_LEN_MASK)
-		 << DMA_CTRL_DMAR_BURST_LEN_SHIFT) |
-	    ((((u32) hw->dmaw_block) & DMA_CTRL_DMAR_BURST_LEN_MASK)
-	     << DMA_CTRL_DMAR_BURST_LEN_SHIFT) |
-	    DMA_CTRL_DMAR_EN | DMA_CTRL_DMAW_EN;
-	value |= (u32) hw->dma_ord;
-	if (atl1_rcb_128 == hw->rcb_value)
-		value |= DMA_CTRL_RCB_VALUE;
-	iowrite32(value, hw->hw_addr + REG_DMA_CTRL);
-
-	/* config CMB / SMB */
-	value = hw->cmb_rrd | ((u32) hw->cmb_tpd << 16);
-	iowrite32(value, hw->hw_addr + REG_CMB_WRITE_TH);
-	value = hw->cmb_rx_timer | ((u32) hw->cmb_tx_timer << 16);
-	iowrite32(value, hw->hw_addr + REG_CMB_WRITE_TIMER);
-	iowrite32(hw->smb_timer, hw->hw_addr + REG_SMB_TIMER);
-
-	/* --- enable CMB / SMB */
-	value = CSMB_CTRL_CMB_EN | CSMB_CTRL_SMB_EN;
-	iowrite32(value, hw->hw_addr + REG_CSMB_CTRL);
-
-	value = ioread32(adapter->hw.hw_addr + REG_ISR);
-	if (unlikely((value & ISR_PHY_LINKDOWN) != 0))
-		value = 1;	/* config failed */
-	else
-		value = 0;
-
-	/* clear all interrupt status */
-	iowrite32(0x3fffffff, adapter->hw.hw_addr + REG_ISR);
-	iowrite32(0, adapter->hw.hw_addr + REG_ISR);
-	return value;
-}
-
-/*
- * atl1_irq_disable - Mask off interrupt generation on the NIC
- * @adapter: board private structure
- */
-static void atl1_irq_disable(struct atl1_adapter *adapter)
-{
-	atomic_inc(&adapter->irq_sem);
-	iowrite32(0, adapter->hw.hw_addr + REG_IMR);
-	ioread32(adapter->hw.hw_addr + REG_IMR);
-	synchronize_irq(adapter->pdev->irq);
-}
-
-static void atl1_vlan_rx_register(struct net_device *netdev,
-				struct vlan_group *grp)
-{
-	struct atl1_adapter *adapter = netdev_priv(netdev);
-	unsigned long flags;
-	u32 ctrl;
-
-	spin_lock_irqsave(&adapter->lock, flags);
-	/* atl1_irq_disable(adapter); */
-	adapter->vlgrp = grp;
-
-	if (grp) {
-		/* enable VLAN tag insert/strip */
-		ctrl = ioread32(adapter->hw.hw_addr + REG_MAC_CTRL);
-		ctrl |= MAC_CTRL_RMV_VLAN;
-		iowrite32(ctrl, adapter->hw.hw_addr + REG_MAC_CTRL);
-	} else {
-		/* disable VLAN tag insert/strip */
-		ctrl = ioread32(adapter->hw.hw_addr + REG_MAC_CTRL);
-		ctrl &= ~MAC_CTRL_RMV_VLAN;
-		iowrite32(ctrl, adapter->hw.hw_addr + REG_MAC_CTRL);
-	}
-
-	/* atl1_irq_enable(adapter); */
-	spin_unlock_irqrestore(&adapter->lock, flags);
-}
-
-static void atl1_restore_vlan(struct atl1_adapter *adapter)
-{
-	atl1_vlan_rx_register(adapter->netdev, adapter->vlgrp);
-}
-
 static u16 tpd_avail(struct atl1_tpd_ring *tpd_ring)
 {
 	u16 next_to_clean = atomic_read(&tpd_ring->next_to_clean);
 	u16 next_to_use = atomic_read(&tpd_ring->next_to_use);
-	return ((next_to_clean >
-		 next_to_use) ? next_to_clean - next_to_use -
-		1 : tpd_ring->count + next_to_clean - next_to_use - 1);
+	return ((next_to_clean > next_to_use) ?
+		next_to_clean - next_to_use - 1 :
+		tpd_ring->count + next_to_clean - next_to_use - 1);
 }
 
 static int atl1_tso(struct atl1_adapter *adapter, struct sk_buff *skb,
@@ -1258,9 +1451,7 @@
 			iph->tot_len = 0;
 			iph->check = 0;
 			tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
-								 iph->daddr, 0,
-								 IPPROTO_TCP,
-								 0);
+				iph->daddr, 0, IPPROTO_TCP, 0);
 			ipofst = skb_network_offset(skb);
 			if (ipofst != ENET_HEADER_SIZE) /* 802.3 frame */
 				tso->tsopl |= 1 << TSO_PARAM_ETHTYPE_SHIFT;
@@ -1268,7 +1459,8 @@
 			tso->tsopl |= (iph->ihl &
 				CSUM_PARAM_IPHL_MASK) << CSUM_PARAM_IPHL_SHIFT;
 			tso->tsopl |= (tcp_hdrlen(skb) &
-				TSO_PARAM_TCPHDRLEN_MASK) << TSO_PARAM_TCPHDRLEN_SHIFT;
+				TSO_PARAM_TCPHDRLEN_MASK) <<
+				TSO_PARAM_TCPHDRLEN_SHIFT;
 			tso->tsopl |= (skb_shinfo(skb)->gso_size &
 				TSO_PARAM_MSS_MASK) << TSO_PARAM_MSS_SHIFT;
 			tso->tsopl |= 1 << TSO_PARAM_IPCKSUM_SHIFT;
@@ -1281,7 +1473,7 @@
 }
 
 static int atl1_tx_csum(struct atl1_adapter *adapter, struct sk_buff *skb,
-			struct csum_param *csum)
+	struct csum_param *csum)
 {
 	u8 css, cso;
 
@@ -1289,7 +1481,7 @@
 		cso = skb_transport_offset(skb);
 		css = cso + skb->csum_offset;
 		if (unlikely(cso & 0x1)) {
-			dev_dbg(&adapter->pdev->dev,
+			dev_printk(KERN_DEBUG, &adapter->pdev->dev,
 				"payload offset not an even number\n");
 			return -1;
 		}
@@ -1304,8 +1496,8 @@
 	return true;
 }
 
-static void atl1_tx_map(struct atl1_adapter *adapter,
-				struct sk_buff *skb, bool tcp_seg)
+static void atl1_tx_map(struct atl1_adapter *adapter, struct sk_buff *skb,
+	bool tcp_seg)
 {
 	/* We enter this function holding a spinlock. */
 	struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring;
@@ -1342,26 +1534,25 @@
 
 		if (first_buf_len > proto_hdr_len) {
 			len12 = first_buf_len - proto_hdr_len;
-			m = (len12 + MAX_TX_BUF_LEN - 1) / MAX_TX_BUF_LEN;
+			m = (len12 + ATL1_MAX_TX_BUF_LEN - 1) /
+				ATL1_MAX_TX_BUF_LEN;
 			for (i = 0; i < m; i++) {
 				buffer_info =
 				    &tpd_ring->buffer_info[tpd_next_to_use];
 				buffer_info->skb = NULL;
 				buffer_info->length =
-				    (MAX_TX_BUF_LEN >=
-				     len12) ? MAX_TX_BUF_LEN : len12;
+				    (ATL1_MAX_TX_BUF_LEN >=
+				     len12) ? ATL1_MAX_TX_BUF_LEN : len12;
 				len12 -= buffer_info->length;
 				page = virt_to_page(skb->data +
-						 (proto_hdr_len +
-						  i * MAX_TX_BUF_LEN));
+					(proto_hdr_len +
+					i * ATL1_MAX_TX_BUF_LEN));
 				offset = (unsigned long)(skb->data +
-							(proto_hdr_len +
-							i * MAX_TX_BUF_LEN)) &
-							~PAGE_MASK;
-				buffer_info->dma =
-				    pci_map_page(adapter->pdev, page, offset,
-						 buffer_info->length,
-						 PCI_DMA_TODEVICE);
+					(proto_hdr_len +
+					i * ATL1_MAX_TX_BUF_LEN)) & ~PAGE_MASK;
+				buffer_info->dma = pci_map_page(adapter->pdev,
+					page, offset, buffer_info->length,
+					PCI_DMA_TODEVICE);
 				if (++tpd_next_to_use == tpd_ring->count)
 					tpd_next_to_use = 0;
 			}
@@ -1372,8 +1563,7 @@
 		page = virt_to_page(skb->data);
 		offset = (unsigned long)skb->data & ~PAGE_MASK;
 		buffer_info->dma = pci_map_page(adapter->pdev, page,
-						offset, first_buf_len,
-						PCI_DMA_TODEVICE);
+			offset, first_buf_len, PCI_DMA_TODEVICE);
 		if (++tpd_next_to_use == tpd_ring->count)
 			tpd_next_to_use = 0;
 	}
@@ -1385,19 +1575,19 @@
 		frag = &skb_shinfo(skb)->frags[f];
 		lenf = frag->size;
 
-		m = (lenf + MAX_TX_BUF_LEN - 1) / MAX_TX_BUF_LEN;
+		m = (lenf + ATL1_MAX_TX_BUF_LEN - 1) / ATL1_MAX_TX_BUF_LEN;
 		for (i = 0; i < m; i++) {
 			buffer_info = &tpd_ring->buffer_info[tpd_next_to_use];
 			if (unlikely(buffer_info->skb))
 				BUG();
 			buffer_info->skb = NULL;
-			buffer_info->length =
-			    (lenf > MAX_TX_BUF_LEN) ? MAX_TX_BUF_LEN : lenf;
+			buffer_info->length = (lenf > ATL1_MAX_TX_BUF_LEN) ?
+				ATL1_MAX_TX_BUF_LEN : lenf;
 			lenf -= buffer_info->length;
-			buffer_info->dma =
-			    pci_map_page(adapter->pdev, frag->page,
-					 frag->page_offset + i * MAX_TX_BUF_LEN,
-					 buffer_info->length, PCI_DMA_TODEVICE);
+			buffer_info->dma = pci_map_page(adapter->pdev,
+				frag->page,
+				frag->page_offset + (i * ATL1_MAX_TX_BUF_LEN),
+				buffer_info->length, PCI_DMA_TODEVICE);
 
 			if (++tpd_next_to_use == tpd_ring->count)
 				tpd_next_to_use = 0;
@@ -1409,7 +1599,7 @@
 }
 
 static void atl1_tx_queue(struct atl1_adapter *adapter, int count,
-			       union tpd_descr *descr)
+       union tpd_descr *descr)
 {
 	/* We enter this function holding a spinlock. */
 	struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring;
@@ -1453,31 +1643,6 @@
 	atomic_set(&tpd_ring->next_to_use, (int)tpd_next_to_use);
 }
 
-static void atl1_update_mailbox(struct atl1_adapter *adapter)
-{
-	unsigned long flags;
-	u32 tpd_next_to_use;
-	u32 rfd_next_to_use;
-	u32 rrd_next_to_clean;
-	u32 value;
-
-	spin_lock_irqsave(&adapter->mb_lock, flags);
-
-	tpd_next_to_use = atomic_read(&adapter->tpd_ring.next_to_use);
-	rfd_next_to_use = atomic_read(&adapter->rfd_ring.next_to_use);
-	rrd_next_to_clean = atomic_read(&adapter->rrd_ring.next_to_clean);
-
-	value = ((rfd_next_to_use & MB_RFD_PROD_INDX_MASK) <<
-		MB_RFD_PROD_INDX_SHIFT) |
-		((rrd_next_to_clean & MB_RRD_CONS_INDX_MASK) <<
-		MB_RRD_CONS_INDX_SHIFT) |
-		((tpd_next_to_use & MB_TPD_PROD_INDX_MASK) <<
-		MB_TPD_PROD_INDX_SHIFT);
-	iowrite32(value, adapter->hw.hw_addr + REG_MAILBOX);
-
-	spin_unlock_irqrestore(&adapter->mb_lock, flags);
-}
-
 static int atl1_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
 {
 	struct atl1_adapter *adapter = netdev_priv(netdev);
@@ -1513,8 +1678,8 @@
 	for (f = 0; f < nr_frags; f++) {
 		frag_size = skb_shinfo(skb)->frags[f].size;
 		if (frag_size)
-			count +=
-			    (frag_size + MAX_TX_BUF_LEN - 1) / MAX_TX_BUF_LEN;
+			count += (frag_size + ATL1_MAX_TX_BUF_LEN - 1) /
+				ATL1_MAX_TX_BUF_LEN;
 	}
 
 	/* mss will be nonzero if we're doing segment offload (TSO/GSO) */
@@ -1530,7 +1695,8 @@
 			/* need additional TPD ? */
 			if (proto_hdr_len != len)
 				count += (len - proto_hdr_len +
-					MAX_TX_BUF_LEN - 1) / MAX_TX_BUF_LEN;
+					ATL1_MAX_TX_BUF_LEN - 1) /
+					ATL1_MAX_TX_BUF_LEN;
 		}
 	}
 
@@ -1538,7 +1704,7 @@
 	if (!spin_trylock(&adapter->lock)) {
 		/* Can't get lock - tell upper layer to requeue */
 		local_irq_restore(flags);
-		dev_dbg(&adapter->pdev->dev, "tx locked\n");
+		dev_printk(KERN_DEBUG, &adapter->pdev->dev, "tx locked\n");
 		return NETDEV_TX_LOCKED;
 	}
 
@@ -1546,7 +1712,7 @@
 		/* not enough descriptors */
 		netif_stop_queue(netdev);
 		spin_unlock_irqrestore(&adapter->lock, flags);
-		dev_dbg(&adapter->pdev->dev, "tx busy\n");
+		dev_printk(KERN_DEBUG, &adapter->pdev->dev, "tx busy\n");
 		return NETDEV_TX_BUSY;
 	}
 
@@ -1588,131 +1754,208 @@
 }
 
 /*
- * atl1_get_stats - Get System Network Statistics
- * @netdev: network interface device structure
- *
- * Returns the address of the device statistics structure.
- * The statistics are actually updated from the timer callback.
+ * atl1_intr - Interrupt Handler
+ * @irq: interrupt number
+ * @data: pointer to a network interface device structure
+ * @pt_regs: CPU registers structure
  */
-static struct net_device_stats *atl1_get_stats(struct net_device *netdev)
+static irqreturn_t atl1_intr(int irq, void *data)
+{
+	struct atl1_adapter *adapter = netdev_priv(data);
+	u32 status;
+	u8 update_rx;
+	int max_ints = 10;
+
+	status = adapter->cmb.cmb->int_stats;
+	if (!status)
+		return IRQ_NONE;
+
+	update_rx = 0;
+
+	do {
+		/* clear CMB interrupt status at once */
+		adapter->cmb.cmb->int_stats = 0;
+
+		if (status & ISR_GPHY)	/* clear phy status */
+			atl1_clear_phy_int(adapter);
+
+		/* clear ISR status, and Enable CMB DMA/Disable Interrupt */
+		iowrite32(status | ISR_DIS_INT, adapter->hw.hw_addr + REG_ISR);
+
+		/* check if SMB intr */
+		if (status & ISR_SMB)
+			atl1_inc_smb(adapter);
+
+		/* check if PCIE PHY Link down */
+		if (status & ISR_PHY_LINKDOWN) {
+			dev_printk(KERN_DEBUG, &adapter->pdev->dev,
+				"pcie phy link down %x\n", status);
+			if (netif_running(adapter->netdev)) {	/* reset MAC */
+				iowrite32(0, adapter->hw.hw_addr + REG_IMR);
+				schedule_work(&adapter->pcie_dma_to_rst_task);
+				return IRQ_HANDLED;
+			}
+		}
+
+		/* check if DMA read/write error ? */
+		if (status & (ISR_DMAR_TO_RST | ISR_DMAW_TO_RST)) {
+			dev_printk(KERN_DEBUG, &adapter->pdev->dev,
+				"pcie DMA r/w error (status = 0x%x)\n",
+				status);
+			iowrite32(0, adapter->hw.hw_addr + REG_IMR);
+			schedule_work(&adapter->pcie_dma_to_rst_task);
+			return IRQ_HANDLED;
+		}
+
+		/* link event */
+		if (status & ISR_GPHY) {
+			adapter->soft_stats.tx_carrier_errors++;
+			atl1_check_for_link(adapter);
+		}
+
+		/* transmit event */
+		if (status & ISR_CMB_TX)
+			atl1_intr_tx(adapter);
+
+		/* rx exception */
+		if (unlikely(status & (ISR_RXF_OV | ISR_RFD_UNRUN |
+			ISR_RRD_OV | ISR_HOST_RFD_UNRUN |
+			ISR_HOST_RRD_OV | ISR_CMB_RX))) {
+			if (status & (ISR_RXF_OV | ISR_RFD_UNRUN |
+				ISR_RRD_OV | ISR_HOST_RFD_UNRUN |
+				ISR_HOST_RRD_OV))
+				dev_printk(KERN_DEBUG, &adapter->pdev->dev,
+					"rx exception, ISR = 0x%x\n", status);
+			atl1_intr_rx(adapter);
+		}
+
+		if (--max_ints < 0)
+			break;
+
+	} while ((status = adapter->cmb.cmb->int_stats));
+
+	/* re-enable Interrupt */
+	iowrite32(ISR_DIS_SMB | ISR_DIS_DMA, adapter->hw.hw_addr + REG_ISR);
+	return IRQ_HANDLED;
+}
+
+/*
+ * atl1_watchdog - Timer Call-back
+ * @data: pointer to netdev cast into an unsigned long
+ */
+static void atl1_watchdog(unsigned long data)
+{
+	struct atl1_adapter *adapter = (struct atl1_adapter *)data;
+
+	/* Reset the timer */
+	mod_timer(&adapter->watchdog_timer, jiffies + 2 * HZ);
+}
+
+/*
+ * atl1_phy_config - Timer Call-back
+ * @data: pointer to netdev cast into an unsigned long
+ */
+static void atl1_phy_config(unsigned long data)
+{
+	struct atl1_adapter *adapter = (struct atl1_adapter *)data;
+	struct atl1_hw *hw = &adapter->hw;
+	unsigned long flags;
+
+	spin_lock_irqsave(&adapter->lock, flags);
+	adapter->phy_timer_pending = false;
+	atl1_write_phy_reg(hw, MII_ADVERTISE, hw->mii_autoneg_adv_reg);
+	atl1_write_phy_reg(hw, MII_AT001_CR, hw->mii_1000t_ctrl_reg);
+	atl1_write_phy_reg(hw, MII_BMCR, MII_CR_RESET | MII_CR_AUTO_NEG_EN);
+	spin_unlock_irqrestore(&adapter->lock, flags);
+}
+
+/*
+ * atl1_tx_timeout - Respond to a Tx Hang
+ * @netdev: network interface device structure
+ */
+static void atl1_tx_timeout(struct net_device *netdev)
 {
 	struct atl1_adapter *adapter = netdev_priv(netdev);
-	return &adapter->net_stats;
+	/* Do the reset outside of interrupt context */
+	schedule_work(&adapter->tx_timeout_task);
 }
 
 /*
- * atl1_clean_rx_ring - Free RFD Buffers
- * @adapter: board private structure
+ * Orphaned vendor comment left intact here:
+ * <vendor comment>
+ * If TPD Buffer size equal to 0, PCIE DMAR_TO_INT
+ * will assert. We do soft reset <0x1400=1> according
+ * with the SPEC. BUT, it seemes that PCIE or DMA
+ * state-machine will not be reset. DMAR_TO_INT will
+ * assert again and again.
+ * </vendor comment>
  */
-static void atl1_clean_rx_ring(struct atl1_adapter *adapter)
+static void atl1_tx_timeout_task(struct work_struct *work)
 {
-	struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring;
-	struct atl1_rrd_ring *rrd_ring = &adapter->rrd_ring;
-	struct atl1_buffer *buffer_info;
-	struct pci_dev *pdev = adapter->pdev;
-	unsigned long size;
-	unsigned int i;
+	struct atl1_adapter *adapter =
+		container_of(work, struct atl1_adapter, tx_timeout_task);
+	struct net_device *netdev = adapter->netdev;
 
-	/* Free all the Rx ring sk_buffs */
-	for (i = 0; i < rfd_ring->count; i++) {
-		buffer_info = &rfd_ring->buffer_info[i];
-		if (buffer_info->dma) {
-			pci_unmap_page(pdev,
-					buffer_info->dma,
-					buffer_info->length,
-					PCI_DMA_FROMDEVICE);
-			buffer_info->dma = 0;
-		}
-		if (buffer_info->skb) {
-			dev_kfree_skb(buffer_info->skb);
-			buffer_info->skb = NULL;
-		}
-	}
-
-	size = sizeof(struct atl1_buffer) * rfd_ring->count;
-	memset(rfd_ring->buffer_info, 0, size);
-
-	/* Zero out the descriptor ring */
-	memset(rfd_ring->desc, 0, rfd_ring->size);
-
-	rfd_ring->next_to_clean = 0;
-	atomic_set(&rfd_ring->next_to_use, 0);
-
-	rrd_ring->next_to_use = 0;
-	atomic_set(&rrd_ring->next_to_clean, 0);
+	netif_device_detach(netdev);
+	atl1_down(adapter);
+	atl1_up(adapter);
+	netif_device_attach(netdev);
 }
 
 /*
- * atl1_clean_tx_ring - Free Tx Buffers
- * @adapter: board private structure
+ * atl1_link_chg_task - deal with link change event Out of interrupt context
  */
-static void atl1_clean_tx_ring(struct atl1_adapter *adapter)
+static void atl1_link_chg_task(struct work_struct *work)
 {
-	struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring;
-	struct atl1_buffer *buffer_info;
-	struct pci_dev *pdev = adapter->pdev;
-	unsigned long size;
-	unsigned int i;
+	struct atl1_adapter *adapter =
+               container_of(work, struct atl1_adapter, link_chg_task);
+	unsigned long flags;
 
-	/* Free all the Tx ring sk_buffs */
-	for (i = 0; i < tpd_ring->count; i++) {
-		buffer_info = &tpd_ring->buffer_info[i];
-		if (buffer_info->dma) {
-			pci_unmap_page(pdev, buffer_info->dma,
-				       buffer_info->length, PCI_DMA_TODEVICE);
-			buffer_info->dma = 0;
-		}
-	}
-
-	for (i = 0; i < tpd_ring->count; i++) {
-		buffer_info = &tpd_ring->buffer_info[i];
-		if (buffer_info->skb) {
-			dev_kfree_skb_any(buffer_info->skb);
-			buffer_info->skb = NULL;
-		}
-	}
-
-	size = sizeof(struct atl1_buffer) * tpd_ring->count;
-	memset(tpd_ring->buffer_info, 0, size);
-
-	/* Zero out the descriptor ring */
-	memset(tpd_ring->desc, 0, tpd_ring->size);
-
-	atomic_set(&tpd_ring->next_to_use, 0);
-	atomic_set(&tpd_ring->next_to_clean, 0);
+	spin_lock_irqsave(&adapter->lock, flags);
+	atl1_check_link(adapter);
+	spin_unlock_irqrestore(&adapter->lock, flags);
 }
 
-/*
- * atl1_free_ring_resources - Free Tx / RX descriptor Resources
- * @adapter: board private structure
- *
- * Free all transmit software resources
- */
-void atl1_free_ring_resources(struct atl1_adapter *adapter)
+static void atl1_vlan_rx_register(struct net_device *netdev,
+	struct vlan_group *grp)
 {
-	struct pci_dev *pdev = adapter->pdev;
-	struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring;
-	struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring;
-	struct atl1_rrd_ring *rrd_ring = &adapter->rrd_ring;
-	struct atl1_ring_header *ring_header = &adapter->ring_header;
+	struct atl1_adapter *adapter = netdev_priv(netdev);
+	unsigned long flags;
+	u32 ctrl;
 
-	atl1_clean_tx_ring(adapter);
-	atl1_clean_rx_ring(adapter);
+	spin_lock_irqsave(&adapter->lock, flags);
+	/* atl1_irq_disable(adapter); */
+	adapter->vlgrp = grp;
 
-	kfree(tpd_ring->buffer_info);
-	pci_free_consistent(pdev, ring_header->size, ring_header->desc,
-			    ring_header->dma);
+	if (grp) {
+		/* enable VLAN tag insert/strip */
+		ctrl = ioread32(adapter->hw.hw_addr + REG_MAC_CTRL);
+		ctrl |= MAC_CTRL_RMV_VLAN;
+		iowrite32(ctrl, adapter->hw.hw_addr + REG_MAC_CTRL);
+	} else {
+		/* disable VLAN tag insert/strip */
+		ctrl = ioread32(adapter->hw.hw_addr + REG_MAC_CTRL);
+		ctrl &= ~MAC_CTRL_RMV_VLAN;
+		iowrite32(ctrl, adapter->hw.hw_addr + REG_MAC_CTRL);
+	}
 
-	tpd_ring->buffer_info = NULL;
-	tpd_ring->desc = NULL;
-	tpd_ring->dma = 0;
+	/* atl1_irq_enable(adapter); */
+	spin_unlock_irqrestore(&adapter->lock, flags);
+}
 
-	rfd_ring->buffer_info = NULL;
-	rfd_ring->desc = NULL;
-	rfd_ring->dma = 0;
+static void atl1_restore_vlan(struct atl1_adapter *adapter)
+{
+	atl1_vlan_rx_register(adapter->netdev, adapter->vlgrp);
+}
 
-	rrd_ring->desc = NULL;
-	rrd_ring->dma = 0;
+int atl1_reset(struct atl1_adapter *adapter)
+{
+	int ret;
+
+	ret = atl1_reset_hw(&adapter->hw);
+	if (ret != ATL1_SUCCESS)
+		return ret;
+	return atl1_init_hw(&adapter->hw);
 }
 
 s32 atl1_up(struct atl1_adapter *adapter)
@@ -1723,6 +1966,7 @@
 
 	/* hardware has been reset, we need to reload some things */
 	atl1_set_multi(netdev);
+	atl1_init_ring_ptrs(adapter);
 	atl1_restore_vlan(adapter);
 	err = atl1_alloc_rx_buffers(adapter);
 	if (unlikely(!err))		/* no RX BUFFER allocated */
@@ -1750,11 +1994,6 @@
 	atl1_check_link(adapter);
 	return 0;
 
-	/* FIXME: unreachable code! -- CHS */
-	/* free irq disable any interrupt */
-	iowrite32(0, adapter->hw.hw_addr + REG_IMR);
-	free_irq(adapter->pdev->irq, netdev);
-
 err_up:
 	pci_disable_msi(adapter->pdev);
 	/* free rx_buffers */
@@ -1786,172 +2025,6 @@
 }
 
 /*
- * atl1_change_mtu - Change the Maximum Transfer Unit
- * @netdev: network interface device structure
- * @new_mtu: new value for maximum frame size
- *
- * Returns 0 on success, negative on failure
- */
-static int atl1_change_mtu(struct net_device *netdev, int new_mtu)
-{
-	struct atl1_adapter *adapter = netdev_priv(netdev);
-	int old_mtu = netdev->mtu;
-	int max_frame = new_mtu + ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
-
-	if ((max_frame < MINIMUM_ETHERNET_FRAME_SIZE) ||
-	    (max_frame > MAX_JUMBO_FRAME_SIZE)) {
-		dev_warn(&adapter->pdev->dev, "invalid MTU setting\n");
-		return -EINVAL;
-	}
-
-	adapter->hw.max_frame_size = max_frame;
-	adapter->hw.tx_jumbo_task_th = (max_frame + 7) >> 3;
-	adapter->rx_buffer_len = (max_frame + 7) & ~7;
-	adapter->hw.rx_jumbo_th = adapter->rx_buffer_len / 8;
-
-	netdev->mtu = new_mtu;
-	if ((old_mtu != new_mtu) && netif_running(netdev)) {
-		atl1_down(adapter);
-		atl1_up(adapter);
-	}
-
-	return 0;
-}
-
-/*
- * atl1_set_mac - Change the Ethernet Address of the NIC
- * @netdev: network interface device structure
- * @p: pointer to an address structure
- *
- * Returns 0 on success, negative on failure
- */
-static int atl1_set_mac(struct net_device *netdev, void *p)
-{
-	struct atl1_adapter *adapter = netdev_priv(netdev);
-	struct sockaddr *addr = p;
-
-	if (netif_running(netdev))
-		return -EBUSY;
-
-	if (!is_valid_ether_addr(addr->sa_data))
-		return -EADDRNOTAVAIL;
-
-	memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
-	memcpy(adapter->hw.mac_addr, addr->sa_data, netdev->addr_len);
-
-	atl1_set_mac_addr(&adapter->hw);
-	return 0;
-}
-
-/*
- * atl1_watchdog - Timer Call-back
- * @data: pointer to netdev cast into an unsigned long
- */
-static void atl1_watchdog(unsigned long data)
-{
-	struct atl1_adapter *adapter = (struct atl1_adapter *)data;
-
-	/* Reset the timer */
-	mod_timer(&adapter->watchdog_timer, jiffies + 2 * HZ);
-}
-
-static int mdio_read(struct net_device *netdev, int phy_id, int reg_num)
-{
-	struct atl1_adapter *adapter = netdev_priv(netdev);
-	u16 result;
-
-	atl1_read_phy_reg(&adapter->hw, reg_num & 0x1f, &result);
-
-	return result;
-}
-
-static void mdio_write(struct net_device *netdev, int phy_id, int reg_num, int val)
-{
-	struct atl1_adapter *adapter = netdev_priv(netdev);
-
-	atl1_write_phy_reg(&adapter->hw, reg_num, val);
-}
-
-/*
- * atl1_mii_ioctl -
- * @netdev:
- * @ifreq:
- * @cmd:
- */
-static int atl1_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
-{
-	struct atl1_adapter *adapter = netdev_priv(netdev);
-	unsigned long flags;
-	int retval;
-
-	if (!netif_running(netdev))
-		return -EINVAL;
-
-	spin_lock_irqsave(&adapter->lock, flags);
-	retval = generic_mii_ioctl(&adapter->mii, if_mii(ifr), cmd, NULL);
-	spin_unlock_irqrestore(&adapter->lock, flags);
-
-	return retval;
-}
-
-/*
- * atl1_ioctl -
- * @netdev:
- * @ifreq:
- * @cmd:
- */
-static int atl1_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
-{
-	switch (cmd) {
-	case SIOCGMIIPHY:
-	case SIOCGMIIREG:
-	case SIOCSMIIREG:
-		return atl1_mii_ioctl(netdev, ifr, cmd);
-	default:
-		return -EOPNOTSUPP;
-	}
-}
-
-/*
- * atl1_tx_timeout - Respond to a Tx Hang
- * @netdev: network interface device structure
- */
-static void atl1_tx_timeout(struct net_device *netdev)
-{
-	struct atl1_adapter *adapter = netdev_priv(netdev);
-	/* Do the reset outside of interrupt context */
-	schedule_work(&adapter->tx_timeout_task);
-}
-
-/*
- * atl1_phy_config - Timer Call-back
- * @data: pointer to netdev cast into an unsigned long
- */
-static void atl1_phy_config(unsigned long data)
-{
-	struct atl1_adapter *adapter = (struct atl1_adapter *)data;
-	struct atl1_hw *hw = &adapter->hw;
-	unsigned long flags;
-
-	spin_lock_irqsave(&adapter->lock, flags);
-	adapter->phy_timer_pending = false;
-	atl1_write_phy_reg(hw, MII_ADVERTISE, hw->mii_autoneg_adv_reg);
-	atl1_write_phy_reg(hw, MII_AT001_CR, hw->mii_1000t_ctrl_reg);
-	atl1_write_phy_reg(hw, MII_BMCR, MII_CR_RESET | MII_CR_AUTO_NEG_EN);
-	spin_unlock_irqrestore(&adapter->lock, flags);
-}
-
-int atl1_reset(struct atl1_adapter *adapter)
-{
-	int ret;
-
-	ret = atl1_reset_hw(&adapter->hw);
-	if (ret != ATL1_SUCCESS)
-		return ret;
-	return atl1_init_hw(&adapter->hw);
-}
-
-/*
  * atl1_open - Called when a network interface is made active
  * @netdev: network interface device structure
  *
@@ -2003,6 +2076,105 @@
 	return 0;
 }
 
+#ifdef CONFIG_PM
+static int atl1_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+	struct net_device *netdev = pci_get_drvdata(pdev);
+	struct atl1_adapter *adapter = netdev_priv(netdev);
+	struct atl1_hw *hw = &adapter->hw;
+	u32 ctrl = 0;
+	u32 wufc = adapter->wol;
+
+	netif_device_detach(netdev);
+	if (netif_running(netdev))
+		atl1_down(adapter);
+
+	atl1_read_phy_reg(hw, MII_BMSR, (u16 *) & ctrl);
+	atl1_read_phy_reg(hw, MII_BMSR, (u16 *) & ctrl);
+	if (ctrl & BMSR_LSTATUS)
+		wufc &= ~ATL1_WUFC_LNKC;
+
+	/* reduce speed to 10/100M */
+	if (wufc) {
+		atl1_phy_enter_power_saving(hw);
+		/* if resume, let driver to re- setup link */
+		hw->phy_configured = false;
+		atl1_set_mac_addr(hw);
+		atl1_set_multi(netdev);
+
+		ctrl = 0;
+		/* turn on magic packet wol */
+		if (wufc & ATL1_WUFC_MAG)
+			ctrl = WOL_MAGIC_EN | WOL_MAGIC_PME_EN;
+
+		/* turn on Link change WOL */
+		if (wufc & ATL1_WUFC_LNKC)
+			ctrl |= (WOL_LINK_CHG_EN | WOL_LINK_CHG_PME_EN);
+		iowrite32(ctrl, hw->hw_addr + REG_WOL_CTRL);
+
+		/* turn on all-multi mode if wake on multicast is enabled */
+		ctrl = ioread32(hw->hw_addr + REG_MAC_CTRL);
+		ctrl &= ~MAC_CTRL_DBG;
+		ctrl &= ~MAC_CTRL_PROMIS_EN;
+		if (wufc & ATL1_WUFC_MC)
+			ctrl |= MAC_CTRL_MC_ALL_EN;
+		else
+			ctrl &= ~MAC_CTRL_MC_ALL_EN;
+
+		/* turn on broadcast mode if wake on-BC is enabled */
+		if (wufc & ATL1_WUFC_BC)
+			ctrl |= MAC_CTRL_BC_EN;
+		else
+			ctrl &= ~MAC_CTRL_BC_EN;
+
+		/* enable RX */
+		ctrl |= MAC_CTRL_RX_EN;
+		iowrite32(ctrl, hw->hw_addr + REG_MAC_CTRL);
+		pci_enable_wake(pdev, PCI_D3hot, 1);
+		pci_enable_wake(pdev, PCI_D3cold, 1);
+	} else {
+		iowrite32(0, hw->hw_addr + REG_WOL_CTRL);
+		pci_enable_wake(pdev, PCI_D3hot, 0);
+		pci_enable_wake(pdev, PCI_D3cold, 0);
+	}
+
+	pci_save_state(pdev);
+	pci_disable_device(pdev);
+
+	pci_set_power_state(pdev, PCI_D3hot);
+
+	return 0;
+}
+
+static int atl1_resume(struct pci_dev *pdev)
+{
+	struct net_device *netdev = pci_get_drvdata(pdev);
+	struct atl1_adapter *adapter = netdev_priv(netdev);
+	u32 ret_val;
+
+	pci_set_power_state(pdev, 0);
+	pci_restore_state(pdev);
+
+	ret_val = pci_enable_device(pdev);
+	pci_enable_wake(pdev, PCI_D3hot, 0);
+	pci_enable_wake(pdev, PCI_D3cold, 0);
+
+	iowrite32(0, adapter->hw.hw_addr + REG_WOL_CTRL);
+	atl1_reset(adapter);
+
+	if (netif_running(netdev))
+		atl1_up(adapter);
+	netif_device_attach(netdev);
+
+	atl1_via_workaround(adapter);
+
+	return 0;
+}
+#else
+#define atl1_suspend NULL
+#define atl1_resume NULL
+#endif
+
 #ifdef CONFIG_NET_POLL_CONTROLLER
 static void atl1_poll_controller(struct net_device *netdev)
 {
@@ -2013,69 +2185,6 @@
 #endif
 
 /*
- * If TPD Buffer size equal to 0, PCIE DMAR_TO_INT
- * will assert. We do soft reset <0x1400=1> according
- * with the SPEC. BUT, it seemes that PCIE or DMA
- * state-machine will not be reset. DMAR_TO_INT will
- * assert again and again.
- */
-static void atl1_tx_timeout_task(struct work_struct *work)
-{
-	struct atl1_adapter *adapter =
-		container_of(work, struct atl1_adapter, tx_timeout_task);
-	struct net_device *netdev = adapter->netdev;
-
-	netif_device_detach(netdev);
-	atl1_down(adapter);
-	atl1_up(adapter);
-	netif_device_attach(netdev);
-}
-
-/*
- * atl1_link_chg_task - deal with link change event Out of interrupt context
- */
-static void atl1_link_chg_task(struct work_struct *work)
-{
-	struct atl1_adapter *adapter =
-               container_of(work, struct atl1_adapter, link_chg_task);
-	unsigned long flags;
-
-	spin_lock_irqsave(&adapter->lock, flags);
-	atl1_check_link(adapter);
-	spin_unlock_irqrestore(&adapter->lock, flags);
-}
-
-/*
- * atl1_pcie_patch - Patch for PCIE module
- */
-static void atl1_pcie_patch(struct atl1_adapter *adapter)
-{
-	u32 value;
-	value = 0x6500;
-	iowrite32(value, adapter->hw.hw_addr + 0x12FC);
-	/* pcie flow control mode change */
-	value = ioread32(adapter->hw.hw_addr + 0x1008);
-	value |= 0x8000;
-	iowrite32(value, adapter->hw.hw_addr + 0x1008);
-}
-
-/*
- * When ACPI resume on some VIA MotherBoard, the Interrupt Disable bit/0x400
- * on PCI Command register is disable.
- * The function enable this bit.
- * Brackett, 2006/03/15
- */
-static void atl1_via_workaround(struct atl1_adapter *adapter)
-{
-	unsigned long value;
-
-	value = ioread16(adapter->hw.hw_addr + PCI_COMMAND);
-	if (value & PCI_COMMAND_INTX_DISABLE)
-		value &= ~PCI_COMMAND_INTX_DISABLE;
-	iowrite32(value, adapter->hw.hw_addr + PCI_COMMAND);
-}
-
-/*
  * atl1_probe - Device Initialization Routine
  * @pdev: PCI device information struct
  * @ent: entry in atl1_pci_tbl
@@ -2087,7 +2196,7 @@
  * and a hardware reset occur.
  */
 static int __devinit atl1_probe(struct pci_dev *pdev,
-			      const struct pci_device_id *ent)
+	const struct pci_device_id *ent)
 {
 	struct net_device *netdev;
 	struct atl1_adapter *adapter;
@@ -2141,7 +2250,7 @@
 	}
 	/* get device revision number */
 	adapter->hw.dev_rev = ioread16(adapter->hw.hw_addr +
-					(REG_MASTER_CTRL + 2));
+		(REG_MASTER_CTRL + 2));
 	dev_info(&pdev->dev, "version %s\n", DRIVER_VERSION);
 
 	/* set default ring resource counts */
@@ -2294,7 +2403,8 @@
 	 * address, we need to save the permanent one.
 	 */
 	if (memcmp(adapter->hw.mac_addr, adapter->hw.perm_mac_addr, ETH_ALEN)) {
-		memcpy(adapter->hw.mac_addr, adapter->hw.perm_mac_addr, ETH_ALEN);
+		memcpy(adapter->hw.mac_addr, adapter->hw.perm_mac_addr,
+			ETH_ALEN);
 		atl1_set_mac_addr(&adapter->hw);
 	}
 
@@ -2306,112 +2416,11 @@
 	pci_disable_device(pdev);
 }
 
-#ifdef CONFIG_PM
-static int atl1_suspend(struct pci_dev *pdev, pm_message_t state)
-{
-	struct net_device *netdev = pci_get_drvdata(pdev);
-	struct atl1_adapter *adapter = netdev_priv(netdev);
-	struct atl1_hw *hw = &adapter->hw;
-	u32 ctrl = 0;
-	u32 wufc = adapter->wol;
-
-	netif_device_detach(netdev);
-	if (netif_running(netdev))
-		atl1_down(adapter);
-
-	atl1_read_phy_reg(hw, MII_BMSR, (u16 *) & ctrl);
-	atl1_read_phy_reg(hw, MII_BMSR, (u16 *) & ctrl);
-	if (ctrl & BMSR_LSTATUS)
-		wufc &= ~ATL1_WUFC_LNKC;
-
-	/* reduce speed to 10/100M */
-	if (wufc) {
-		atl1_phy_enter_power_saving(hw);
-		/* if resume, let driver to re- setup link */
-		hw->phy_configured = false;
-		atl1_set_mac_addr(hw);
-		atl1_set_multi(netdev);
-
-		ctrl = 0;
-		/* turn on magic packet wol */
-		if (wufc & ATL1_WUFC_MAG)
-			ctrl = WOL_MAGIC_EN | WOL_MAGIC_PME_EN;
-
-		/* turn on Link change WOL */
-		if (wufc & ATL1_WUFC_LNKC)
-			ctrl |= (WOL_LINK_CHG_EN | WOL_LINK_CHG_PME_EN);
-		iowrite32(ctrl, hw->hw_addr + REG_WOL_CTRL);
-
-		/* turn on all-multi mode if wake on multicast is enabled */
-		ctrl = ioread32(hw->hw_addr + REG_MAC_CTRL);
-		ctrl &= ~MAC_CTRL_DBG;
-		ctrl &= ~MAC_CTRL_PROMIS_EN;
-		if (wufc & ATL1_WUFC_MC)
-			ctrl |= MAC_CTRL_MC_ALL_EN;
-		else
-			ctrl &= ~MAC_CTRL_MC_ALL_EN;
-
-		/* turn on broadcast mode if wake on-BC is enabled */
-		if (wufc & ATL1_WUFC_BC)
-			ctrl |= MAC_CTRL_BC_EN;
-		else
-			ctrl &= ~MAC_CTRL_BC_EN;
-
-		/* enable RX */
-		ctrl |= MAC_CTRL_RX_EN;
-		iowrite32(ctrl, hw->hw_addr + REG_MAC_CTRL);
-		pci_enable_wake(pdev, PCI_D3hot, 1);
-		pci_enable_wake(pdev, PCI_D3cold, 1);	/* 4 == D3 cold */
-	} else {
-		iowrite32(0, hw->hw_addr + REG_WOL_CTRL);
-		pci_enable_wake(pdev, PCI_D3hot, 0);
-		pci_enable_wake(pdev, PCI_D3cold, 0);	/* 4 == D3 cold */
-	}
-
-	pci_save_state(pdev);
-	pci_disable_device(pdev);
-
-	pci_set_power_state(pdev, PCI_D3hot);
-
-	return 0;
-}
-
-static int atl1_resume(struct pci_dev *pdev)
-{
-	struct net_device *netdev = pci_get_drvdata(pdev);
-	struct atl1_adapter *adapter = netdev_priv(netdev);
-	u32 ret_val;
-
-	pci_set_power_state(pdev, 0);
-	pci_restore_state(pdev);
-
-	ret_val = pci_enable_device(pdev);
-	pci_enable_wake(pdev, PCI_D3hot, 0);
-	pci_enable_wake(pdev, PCI_D3cold, 0);
-
-	iowrite32(0, adapter->hw.hw_addr + REG_WOL_CTRL);
-	atl1_reset(adapter);
-
-	if (netif_running(netdev))
-		atl1_up(adapter);
-	netif_device_attach(netdev);
-
-	atl1_via_workaround(adapter);
-
-	return 0;
-}
-#else
-#define atl1_suspend NULL
-#define atl1_resume NULL
-#endif
-
 static struct pci_driver atl1_driver = {
 	.name = atl1_driver_name,
 	.id_table = atl1_pci_tbl,
 	.probe = atl1_probe,
 	.remove = __devexit_p(atl1_remove),
-	/* Power Managment Hooks */
-	/* probably broken right now -- CHS */
 	.suspend = atl1_suspend,
 	.resume = atl1_resume
 };
diff --git a/drivers/net/b44.c b/drivers/net/b44.c
index 96fb0ec..37f1b6f 100644
--- a/drivers/net/b44.c
+++ b/drivers/net/b44.c
@@ -1519,14 +1519,13 @@
 	u8 *pwol_pattern;
 	u8 pwol_mask[B44_PMASK_SIZE];
 
-	pwol_pattern = kmalloc(B44_PATTERN_SIZE, GFP_KERNEL);
+	pwol_pattern = kzalloc(B44_PATTERN_SIZE, GFP_KERNEL);
 	if (!pwol_pattern) {
 		printk(KERN_ERR PFX "Memory not available for WOL\n");
 		return;
 	}
 
 	/* Ipv4 magic packet pattern - pattern 0.*/
-	memset(pwol_pattern, 0, B44_PATTERN_SIZE);
 	memset(pwol_mask, 0, B44_PMASK_SIZE);
 	plen0 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask,
 				  B44_ETHIPV4UDP_HLEN);
diff --git a/drivers/net/bfin_mac.c b/drivers/net/bfin_mac.c
new file mode 100644
index 0000000..9a08d65
--- /dev/null
+++ b/drivers/net/bfin_mac.c
@@ -0,0 +1,1009 @@
+/*
+ * File:	drivers/net/bfin_mac.c
+ * Based on:
+ * Maintainer:
+ * 		Bryan Wu <bryan.wu@analog.com>
+ *
+ * Original author:
+ * 		Luke Yang <luke.yang@analog.com>
+ *
+ * Created:
+ * Description:
+ *
+ * Modified:
+ *		Copyright 2004-2006 Analog Devices Inc.
+ *
+ * Bugs:	Enter bugs at http://blackfin.uclinux.org/
+ *
+ * This program is free software ;  you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation ;  either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY ;  without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program ;  see the file COPYING.
+ * If not, write to the Free Software Foundation,
+ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/timer.h>
+#include <linux/errno.h>
+#include <linux/irq.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/crc32.h>
+#include <linux/device.h>
+#include <linux/spinlock.h>
+#include <linux/ethtool.h>
+#include <linux/mii.h>
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+
+#include <linux/platform_device.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+
+#include <asm/dma.h>
+#include <linux/dma-mapping.h>
+
+#include <asm/blackfin.h>
+#include <asm/cacheflush.h>
+#include <asm/portmux.h>
+
+#include "bfin_mac.h"
+
+#define DRV_NAME	"bfin_mac"
+#define DRV_VERSION	"1.1"
+#define DRV_AUTHOR	"Bryan Wu, Luke Yang"
+#define DRV_DESC	"Blackfin BF53[67] on-chip Ethernet MAC driver"
+
+MODULE_AUTHOR(DRV_AUTHOR);
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION(DRV_DESC);
+
+#if defined(CONFIG_BFIN_MAC_USE_L1)
+# define bfin_mac_alloc(dma_handle, size)  l1_data_sram_zalloc(size)
+# define bfin_mac_free(dma_handle, ptr)    l1_data_sram_free(ptr)
+#else
+# define bfin_mac_alloc(dma_handle, size) \
+	dma_alloc_coherent(NULL, size, dma_handle, GFP_KERNEL)
+# define bfin_mac_free(dma_handle, ptr) \
+	dma_free_coherent(NULL, sizeof(*ptr), ptr, dma_handle)
+#endif
+
+#define PKT_BUF_SZ 1580
+
+#define MAX_TIMEOUT_CNT	500
+
+/* pointers to maintain transmit list */
+static struct net_dma_desc_tx *tx_list_head;
+static struct net_dma_desc_tx *tx_list_tail;
+static struct net_dma_desc_rx *rx_list_head;
+static struct net_dma_desc_rx *rx_list_tail;
+static struct net_dma_desc_rx *current_rx_ptr;
+static struct net_dma_desc_tx *current_tx_ptr;
+static struct net_dma_desc_tx *tx_desc;
+static struct net_dma_desc_rx *rx_desc;
+
+static void desc_list_free(void)
+{
+	struct net_dma_desc_rx *r;
+	struct net_dma_desc_tx *t;
+	int i;
+#if !defined(CONFIG_BFIN_MAC_USE_L1)
+	dma_addr_t dma_handle = 0;
+#endif
+
+	if (tx_desc) {
+		t = tx_list_head;
+		for (i = 0; i < CONFIG_BFIN_TX_DESC_NUM; i++) {
+			if (t) {
+				if (t->skb) {
+					dev_kfree_skb(t->skb);
+					t->skb = NULL;
+				}
+				t = t->next;
+			}
+		}
+		bfin_mac_free(dma_handle, tx_desc);
+	}
+
+	if (rx_desc) {
+		r = rx_list_head;
+		for (i = 0; i < CONFIG_BFIN_RX_DESC_NUM; i++) {
+			if (r) {
+				if (r->skb) {
+					dev_kfree_skb(r->skb);
+					r->skb = NULL;
+				}
+				r = r->next;
+			}
+		}
+		bfin_mac_free(dma_handle, rx_desc);
+	}
+}
+
+static int desc_list_init(void)
+{
+	int i;
+	struct sk_buff *new_skb;
+#if !defined(CONFIG_BFIN_MAC_USE_L1)
+	/*
+	 * This dma_handle is useless in Blackfin dma_alloc_coherent().
+	 * The real dma handler is the return value of dma_alloc_coherent().
+	 */
+	dma_addr_t dma_handle;
+#endif
+
+	tx_desc = bfin_mac_alloc(&dma_handle,
+				sizeof(struct net_dma_desc_tx) *
+				CONFIG_BFIN_TX_DESC_NUM);
+	if (tx_desc == NULL)
+		goto init_error;
+
+	rx_desc = bfin_mac_alloc(&dma_handle,
+				sizeof(struct net_dma_desc_rx) *
+				CONFIG_BFIN_RX_DESC_NUM);
+	if (rx_desc == NULL)
+		goto init_error;
+
+	/* init tx_list */
+	tx_list_head = tx_list_tail = tx_desc;
+
+	for (i = 0; i < CONFIG_BFIN_TX_DESC_NUM; i++) {
+		struct net_dma_desc_tx *t = tx_desc + i;
+		struct dma_descriptor *a = &(t->desc_a);
+		struct dma_descriptor *b = &(t->desc_b);
+
+		/*
+		 * disable DMA
+		 * read from memory WNR = 0
+		 * wordsize is 32 bits
+		 * 6 half words is desc size
+		 * large desc flow
+		 */
+		a->config = WDSIZE_32 | NDSIZE_6 | DMAFLOW_LARGE;
+		a->start_addr = (unsigned long)t->packet;
+		a->x_count = 0;
+		a->next_dma_desc = b;
+
+		/*
+		 * enabled DMA
+		 * write to memory WNR = 1
+		 * wordsize is 32 bits
+		 * disable interrupt
+		 * 6 half words is desc size
+		 * large desc flow
+		 */
+		b->config = DMAEN | WNR | WDSIZE_32 | NDSIZE_6 | DMAFLOW_LARGE;
+		b->start_addr = (unsigned long)(&(t->status));
+		b->x_count = 0;
+
+		t->skb = NULL;
+		tx_list_tail->desc_b.next_dma_desc = a;
+		tx_list_tail->next = t;
+		tx_list_tail = t;
+	}
+	tx_list_tail->next = tx_list_head;	/* tx_list is a circle */
+	tx_list_tail->desc_b.next_dma_desc = &(tx_list_head->desc_a);
+	current_tx_ptr = tx_list_head;
+
+	/* init rx_list */
+	rx_list_head = rx_list_tail = rx_desc;
+
+	for (i = 0; i < CONFIG_BFIN_RX_DESC_NUM; i++) {
+		struct net_dma_desc_rx *r = rx_desc + i;
+		struct dma_descriptor *a = &(r->desc_a);
+		struct dma_descriptor *b = &(r->desc_b);
+
+		/* allocate a new skb for next time receive */
+		new_skb = dev_alloc_skb(PKT_BUF_SZ + 2);
+		if (!new_skb) {
+			printk(KERN_NOTICE DRV_NAME
+			       ": init: low on mem - packet dropped\n");
+			goto init_error;
+		}
+		skb_reserve(new_skb, 2);
+		r->skb = new_skb;
+
+		/*
+		 * enabled DMA
+		 * write to memory WNR = 1
+		 * wordsize is 32 bits
+		 * disable interrupt
+		 * 6 half words is desc size
+		 * large desc flow
+		 */
+		a->config = DMAEN | WNR | WDSIZE_32 | NDSIZE_6 | DMAFLOW_LARGE;
+		/* since RXDWA is enabled */
+		a->start_addr = (unsigned long)new_skb->data - 2;
+		a->x_count = 0;
+		a->next_dma_desc = b;
+
+		/*
+		 * enabled DMA
+		 * write to memory WNR = 1
+		 * wordsize is 32 bits
+		 * enable interrupt
+		 * 6 half words is desc size
+		 * large desc flow
+		 */
+		b->config = DMAEN | WNR | WDSIZE_32 | DI_EN |
+				NDSIZE_6 | DMAFLOW_LARGE;
+		b->start_addr = (unsigned long)(&(r->status));
+		b->x_count = 0;
+
+		rx_list_tail->desc_b.next_dma_desc = a;
+		rx_list_tail->next = r;
+		rx_list_tail = r;
+	}
+	rx_list_tail->next = rx_list_head;	/* rx_list is a circle */
+	rx_list_tail->desc_b.next_dma_desc = &(rx_list_head->desc_a);
+	current_rx_ptr = rx_list_head;
+
+	return 0;
+
+init_error:
+	desc_list_free();
+	printk(KERN_ERR DRV_NAME ": kmalloc failed\n");
+	return -ENOMEM;
+}
+
+
+/*---PHY CONTROL AND CONFIGURATION-----------------------------------------*/
+
+/* Set FER regs to MUX in Ethernet pins */
+static int setup_pin_mux(int action)
+{
+#if defined(CONFIG_BFIN_MAC_RMII)
+	u16 pin_req[] = P_RMII0;
+#else
+	u16 pin_req[] = P_MII0;
+#endif
+
+	if (action) {
+		if (peripheral_request_list(pin_req, DRV_NAME)) {
+			printk(KERN_ERR DRV_NAME
+			": Requesting Peripherals failed\n");
+			return -EFAULT;
+		}
+	} else
+		peripheral_free_list(pin_req);
+
+	return 0;
+}
+
+/* Wait until the previous MDC/MDIO transaction has completed */
+static void poll_mdc_done(void)
+{
+	int timeout_cnt = MAX_TIMEOUT_CNT;
+
+	/* poll the STABUSY bit */
+	while ((bfin_read_EMAC_STAADD()) & STABUSY) {
+		mdelay(10);
+		if (timeout_cnt-- < 0) {
+			printk(KERN_ERR DRV_NAME
+			": wait MDC/MDIO transaction to complete timeout\n");
+			break;
+		}
+	}
+}
+
+/* Read an off-chip register in a PHY through the MDC/MDIO port */
+static u16 read_phy_reg(u16 PHYAddr, u16 RegAddr)
+{
+	poll_mdc_done();
+	/* read mode */
+	bfin_write_EMAC_STAADD(SET_PHYAD(PHYAddr) |
+				SET_REGAD(RegAddr) |
+				STABUSY);
+	poll_mdc_done();
+
+	return (u16) bfin_read_EMAC_STADAT();
+}
+
+/* Write an off-chip register in a PHY through the MDC/MDIO port */
+static void raw_write_phy_reg(u16 PHYAddr, u16 RegAddr, u32 Data)
+{
+	bfin_write_EMAC_STADAT(Data);
+
+	/* write mode */
+	bfin_write_EMAC_STAADD(SET_PHYAD(PHYAddr) |
+				SET_REGAD(RegAddr) |
+				STAOP |
+				STABUSY);
+
+	poll_mdc_done();
+}
+
+static void write_phy_reg(u16 PHYAddr, u16 RegAddr, u32 Data)
+{
+	poll_mdc_done();
+	raw_write_phy_reg(PHYAddr, RegAddr, Data);
+}
+
+/* set up the phy */
+static void bf537mac_setphy(struct net_device *dev)
+{
+	u16 phydat;
+	struct bf537mac_local *lp = netdev_priv(dev);
+
+	/* Program PHY registers */
+	pr_debug("start setting up phy\n");
+
+	/* issue a reset */
+	raw_write_phy_reg(lp->PhyAddr, PHYREG_MODECTL, 0x8000);
+
+	/* wait half a second */
+	msleep(500);
+
+	phydat = read_phy_reg(lp->PhyAddr, PHYREG_MODECTL);
+
+	/* advertise flow control supported */
+	phydat = read_phy_reg(lp->PhyAddr, PHYREG_ANAR);
+	phydat |= (1 << 10);
+	write_phy_reg(lp->PhyAddr, PHYREG_ANAR, phydat);
+
+	phydat = 0;
+	if (lp->Negotiate)
+		phydat |= 0x1000;	/* enable auto negotiation */
+	else {
+		if (lp->FullDuplex)
+			phydat |= (1 << 8);	/* full duplex */
+		else
+			phydat &= (~(1 << 8));	/* half duplex */
+
+		if (!lp->Port10)
+			phydat |= (1 << 13);	/* 100 Mbps */
+		else
+			phydat &= (~(1 << 13));	/* 10 Mbps */
+	}
+
+	if (lp->Loopback)
+		phydat |= (1 << 14);	/* enable TX->RX loopback */
+
+	write_phy_reg(lp->PhyAddr, PHYREG_MODECTL, phydat);
+	msleep(500);
+
+	phydat = read_phy_reg(lp->PhyAddr, PHYREG_MODECTL);
+	/* check for SMSC PHY */
+	if ((read_phy_reg(lp->PhyAddr, PHYREG_PHYID1) == 0x7) &&
+	((read_phy_reg(lp->PhyAddr, PHYREG_PHYID2) & 0xfff0) == 0xC0A0)) {
+		/*
+		 * we have SMSC PHY so reqest interrupt
+		 * on link down condition
+		 */
+
+		/* enable interrupts */
+		write_phy_reg(lp->PhyAddr, 30, 0x0ff);
+	}
+}
+
+/**************************************************************************/
+void setup_system_regs(struct net_device *dev)
+{
+	int phyaddr;
+	unsigned short sysctl, phydat;
+	u32 opmode;
+	struct bf537mac_local *lp = netdev_priv(dev);
+	int count = 0;
+
+	phyaddr = lp->PhyAddr;
+
+	/* Enable PHY output */
+	if (!(bfin_read_VR_CTL() & PHYCLKOE))
+		bfin_write_VR_CTL(bfin_read_VR_CTL() | PHYCLKOE);
+
+	/* MDC  = 2.5 MHz */
+	sysctl = SET_MDCDIV(24);
+	/* Odd word alignment for Receive Frame DMA word */
+	/* Configure checksum support and rcve frame word alignment */
+#if defined(BFIN_MAC_CSUM_OFFLOAD)
+	sysctl |= RXDWA | RXCKS;
+#else
+	sysctl |= RXDWA;
+#endif
+	bfin_write_EMAC_SYSCTL(sysctl);
+	/* auto negotiation on  */
+	/* full duplex          */
+	/* 100 Mbps             */
+	phydat = PHY_ANEG_EN | PHY_DUPLEX | PHY_SPD_SET;
+	write_phy_reg(phyaddr, PHYREG_MODECTL, phydat);
+
+	/* test if full duplex supported */
+	do {
+		msleep(100);
+		phydat = read_phy_reg(phyaddr, PHYREG_MODESTAT);
+		if (count > 30) {
+			printk(KERN_NOTICE DRV_NAME ": Link is down\n");
+			printk(KERN_NOTICE DRV_NAME
+				 "please check your network connection\n");
+			break;
+		}
+		count++;
+	} while (!(phydat & 0x0004));
+
+	phydat = read_phy_reg(phyaddr, PHYREG_ANLPAR);
+
+	if ((phydat & 0x0100) || (phydat & 0x0040)) {
+		opmode = FDMODE;
+	} else {
+		opmode = 0;
+		printk(KERN_INFO DRV_NAME
+			": Network is set to half duplex\n");
+	}
+
+#if defined(CONFIG_BFIN_MAC_RMII)
+	opmode |= RMII; /* For Now only 100MBit are supported */
+#endif
+
+	bfin_write_EMAC_OPMODE(opmode);
+
+	bfin_write_EMAC_MMC_CTL(RSTC | CROLL);
+
+	/* Initialize the TX DMA channel registers */
+	bfin_write_DMA2_X_COUNT(0);
+	bfin_write_DMA2_X_MODIFY(4);
+	bfin_write_DMA2_Y_COUNT(0);
+	bfin_write_DMA2_Y_MODIFY(0);
+
+	/* Initialize the RX DMA channel registers */
+	bfin_write_DMA1_X_COUNT(0);
+	bfin_write_DMA1_X_MODIFY(4);
+	bfin_write_DMA1_Y_COUNT(0);
+	bfin_write_DMA1_Y_MODIFY(0);
+}
+
+void setup_mac_addr(u8 * mac_addr)
+{
+	u32 addr_low = le32_to_cpu(*(__le32 *) & mac_addr[0]);
+	u16 addr_hi = le16_to_cpu(*(__le16 *) & mac_addr[4]);
+
+	/* this depends on a little-endian machine */
+	bfin_write_EMAC_ADDRLO(addr_low);
+	bfin_write_EMAC_ADDRHI(addr_hi);
+}
+
+static void adjust_tx_list(void)
+{
+	int timeout_cnt = MAX_TIMEOUT_CNT;
+
+	if (tx_list_head->status.status_word != 0
+	    && current_tx_ptr != tx_list_head) {
+		goto adjust_head;	/* released something, just return; */
+	}
+
+	/*
+	 * if nothing released, check wait condition
+	 * current's next can not be the head,
+	 * otherwise the dma will not stop as we want
+	 */
+	if (current_tx_ptr->next->next == tx_list_head) {
+		while (tx_list_head->status.status_word == 0) {
+			mdelay(10);
+			if (tx_list_head->status.status_word != 0
+			    || !(bfin_read_DMA2_IRQ_STATUS() & 0x08)) {
+				goto adjust_head;
+			}
+			if (timeout_cnt-- < 0) {
+				printk(KERN_ERR DRV_NAME
+				": wait for adjust tx list head timeout\n");
+				break;
+			}
+		}
+		if (tx_list_head->status.status_word != 0) {
+			goto adjust_head;
+		}
+	}
+
+	return;
+
+adjust_head:
+	do {
+		tx_list_head->desc_a.config &= ~DMAEN;
+		tx_list_head->status.status_word = 0;
+		if (tx_list_head->skb) {
+			dev_kfree_skb(tx_list_head->skb);
+			tx_list_head->skb = NULL;
+		} else {
+			printk(KERN_ERR DRV_NAME
+			       ": no sk_buff in a transmitted frame!\n");
+		}
+		tx_list_head = tx_list_head->next;
+	} while (tx_list_head->status.status_word != 0
+		 && current_tx_ptr != tx_list_head);
+	return;
+
+}
+
+static int bf537mac_hard_start_xmit(struct sk_buff *skb,
+				struct net_device *dev)
+{
+	struct bf537mac_local *lp = netdev_priv(dev);
+	unsigned int data;
+
+	current_tx_ptr->skb = skb;
+
+	/*
+	 * Is skb->data always 16-bit aligned?
+	 * Do we need to memcpy((char *)(tail->packet + 2), skb->data, len)?
+	 */
+	if ((((unsigned int)(skb->data)) & 0x02) == 2) {
+		/* move skb->data to current_tx_ptr payload */
+		data = (unsigned int)(skb->data) - 2;
+		*((unsigned short *)data) = (unsigned short)(skb->len);
+		current_tx_ptr->desc_a.start_addr = (unsigned long)data;
+		/* this is important! */
+		blackfin_dcache_flush_range(data, (data + (skb->len)) + 2);
+
+	} else {
+		*((unsigned short *)(current_tx_ptr->packet)) =
+		    (unsigned short)(skb->len);
+		memcpy((char *)(current_tx_ptr->packet + 2), skb->data,
+		       (skb->len));
+		current_tx_ptr->desc_a.start_addr =
+		    (unsigned long)current_tx_ptr->packet;
+		if (current_tx_ptr->status.status_word != 0)
+			current_tx_ptr->status.status_word = 0;
+		blackfin_dcache_flush_range((unsigned int)current_tx_ptr->
+					    packet,
+					    (unsigned int)(current_tx_ptr->
+							   packet + skb->len) +
+					    2);
+	}
+
+	/* enable this packet's dma */
+	current_tx_ptr->desc_a.config |= DMAEN;
+
+	/* tx dma is running, just return */
+	if (bfin_read_DMA2_IRQ_STATUS() & 0x08)
+		goto out;
+
+	/* tx dma is not running */
+	bfin_write_DMA2_NEXT_DESC_PTR(&(current_tx_ptr->desc_a));
+	/* dma enabled, read from memory, size is 6 */
+	bfin_write_DMA2_CONFIG(current_tx_ptr->desc_a.config);
+	/* Turn on the EMAC tx */
+	bfin_write_EMAC_OPMODE(bfin_read_EMAC_OPMODE() | TE);
+
+out:
+	adjust_tx_list();
+	current_tx_ptr = current_tx_ptr->next;
+	dev->trans_start = jiffies;
+	lp->stats.tx_packets++;
+	lp->stats.tx_bytes += (skb->len);
+	return 0;
+}
+
+static void bf537mac_rx(struct net_device *dev)
+{
+	struct sk_buff *skb, *new_skb;
+	struct bf537mac_local *lp = netdev_priv(dev);
+	unsigned short len;
+
+	/* allocate a new skb for next time receive */
+	skb = current_rx_ptr->skb;
+	new_skb = dev_alloc_skb(PKT_BUF_SZ + 2);
+	if (!new_skb) {
+		printk(KERN_NOTICE DRV_NAME
+		       ": rx: low on mem - packet dropped\n");
+		lp->stats.rx_dropped++;
+		goto out;
+	}
+	/* reserve 2 bytes for RXDWA padding */
+	skb_reserve(new_skb, 2);
+	current_rx_ptr->skb = new_skb;
+	current_rx_ptr->desc_a.start_addr = (unsigned long)new_skb->data - 2;
+
+	len = (unsigned short)((current_rx_ptr->status.status_word) & RX_FRLEN);
+	skb_put(skb, len);
+	blackfin_dcache_invalidate_range((unsigned long)skb->head,
+					 (unsigned long)skb->tail);
+
+	dev->last_rx = jiffies;
+	skb->dev = dev;
+	skb->protocol = eth_type_trans(skb, dev);
+#if defined(BFIN_MAC_CSUM_OFFLOAD)
+	skb->csum = current_rx_ptr->status.ip_payload_csum;
+	skb->ip_summed = CHECKSUM_PARTIAL;
+#endif
+
+	netif_rx(skb);
+	lp->stats.rx_packets++;
+	lp->stats.rx_bytes += len;
+	current_rx_ptr->status.status_word = 0x00000000;
+	current_rx_ptr = current_rx_ptr->next;
+
+out:
+	return;
+}
+
+/* interrupt routine to handle rx and error signal */
+static irqreturn_t bf537mac_interrupt(int irq, void *dev_id)
+{
+	struct net_device *dev = dev_id;
+	int number = 0;
+
+get_one_packet:
+	if (current_rx_ptr->status.status_word == 0) {
+		/* no more new packet received */
+		if (number == 0) {
+			if (current_rx_ptr->next->status.status_word != 0) {
+				current_rx_ptr = current_rx_ptr->next;
+				goto real_rx;
+			}
+		}
+		bfin_write_DMA1_IRQ_STATUS(bfin_read_DMA1_IRQ_STATUS() |
+					   DMA_DONE | DMA_ERR);
+		return IRQ_HANDLED;
+	}
+
+real_rx:
+	bf537mac_rx(dev);
+	number++;
+	goto get_one_packet;
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void bf537mac_poll(struct net_device *dev)
+{
+	disable_irq(IRQ_MAC_RX);
+	bf537mac_interrupt(IRQ_MAC_RX, dev);
+	enable_irq(IRQ_MAC_RX);
+}
+#endif				/* CONFIG_NET_POLL_CONTROLLER */
+
+static void bf537mac_reset(void)
+{
+	unsigned int opmode;
+
+	opmode = bfin_read_EMAC_OPMODE();
+	opmode &= (~RE);
+	opmode &= (~TE);
+	/* Turn off the EMAC */
+	bfin_write_EMAC_OPMODE(opmode);
+}
+
+/*
+ * Enable Interrupts, Receive, and Transmit
+ */
+static int bf537mac_enable(struct net_device *dev)
+{
+	u32 opmode;
+
+	pr_debug("%s: %s\n", dev->name, __FUNCTION__);
+
+	/* Set RX DMA */
+	bfin_write_DMA1_NEXT_DESC_PTR(&(rx_list_head->desc_a));
+	bfin_write_DMA1_CONFIG(rx_list_head->desc_a.config);
+
+	/* Wait MII done */
+	poll_mdc_done();
+
+	/* We enable only RX here */
+	/* ASTP   : Enable Automatic Pad Stripping
+	   PR     : Promiscuous Mode for test
+	   PSF    : Receive frames with total length less than 64 bytes.
+	   FDMODE : Full Duplex Mode
+	   LB     : Internal Loopback for test
+	   RE     : Receiver Enable */
+	opmode = bfin_read_EMAC_OPMODE();
+	if (opmode & FDMODE)
+		opmode |= PSF;
+	else
+		opmode |= DRO | DC | PSF;
+	opmode |= RE;
+
+#if defined(CONFIG_BFIN_MAC_RMII)
+	opmode |= RMII; /* For Now only 100MBit are supported */
+#ifdef CONFIG_BF_REV_0_2
+	opmode |= TE;
+#endif
+#endif
+	/* Turn on the EMAC rx */
+	bfin_write_EMAC_OPMODE(opmode);
+
+	return 0;
+}
+
+/* Our watchdog timed out. Called by the networking layer */
+static void bf537mac_timeout(struct net_device *dev)
+{
+	pr_debug("%s: %s\n", dev->name, __FUNCTION__);
+
+	bf537mac_reset();
+
+	/* reset tx queue */
+	tx_list_tail = tx_list_head->next;
+
+	bf537mac_enable(dev);
+
+	/* We can accept TX packets again */
+	dev->trans_start = jiffies;
+	netif_wake_queue(dev);
+}
+
+/*
+ * Get the current statistics.
+ * This may be called with the card open or closed.
+ */
+static struct net_device_stats *bf537mac_query_statistics(struct net_device
+							  *dev)
+{
+	struct bf537mac_local *lp = netdev_priv(dev);
+
+	pr_debug("%s: %s\n", dev->name, __FUNCTION__);
+
+	return &lp->stats;
+}
+
+/*
+ * This routine will, depending on the values passed to it,
+ * either make it accept multicast packets, go into
+ * promiscuous mode (for TCPDUMP and cousins) or accept
+ * a select set of multicast packets
+ */
+static void bf537mac_set_multicast_list(struct net_device *dev)
+{
+	u32 sysctl;
+
+	if (dev->flags & IFF_PROMISC) {
+		printk(KERN_INFO "%s: set to promisc mode\n", dev->name);
+		sysctl = bfin_read_EMAC_OPMODE();
+		sysctl |= RAF;
+		bfin_write_EMAC_OPMODE(sysctl);
+	} else if (dev->flags & IFF_ALLMULTI || dev->mc_count) {
+		/* accept all multicast */
+		sysctl = bfin_read_EMAC_OPMODE();
+		sysctl |= PAM;
+		bfin_write_EMAC_OPMODE(sysctl);
+	} else {
+		/* clear promisc or multicast mode */
+		sysctl = bfin_read_EMAC_OPMODE();
+		sysctl &= ~(RAF | PAM);
+		bfin_write_EMAC_OPMODE(sysctl);
+	}
+}
+
+/*
+ * this puts the device in an inactive state
+ */
+static void bf537mac_shutdown(struct net_device *dev)
+{
+	/* Turn off the EMAC */
+	bfin_write_EMAC_OPMODE(0x00000000);
+	/* Turn off the EMAC RX DMA */
+	bfin_write_DMA1_CONFIG(0x0000);
+	bfin_write_DMA2_CONFIG(0x0000);
+}
+
+/*
+ * Open and Initialize the interface
+ *
+ * Set up everything, reset the card, etc..
+ */
+static int bf537mac_open(struct net_device *dev)
+{
+	pr_debug("%s: %s\n", dev->name, __FUNCTION__);
+
+	/*
+	 * Check that the address is valid.  If its not, refuse
+	 * to bring the device up.  The user must specify an
+	 * address using ifconfig eth0 hw ether xx:xx:xx:xx:xx:xx
+	 */
+	if (!is_valid_ether_addr(dev->dev_addr)) {
+		printk(KERN_WARNING DRV_NAME ": no valid ethernet hw addr\n");
+		return -EINVAL;
+	}
+
+	/* initial rx and tx list */
+	desc_list_init();
+
+	bf537mac_setphy(dev);
+	setup_system_regs(dev);
+	bf537mac_reset();
+	bf537mac_enable(dev);
+
+	pr_debug("hardware init finished\n");
+	netif_start_queue(dev);
+	netif_carrier_on(dev);
+
+	return 0;
+}
+
+/*
+ *
+ * this makes the board clean up everything that it can
+ * and not talk to the outside world.   Caused by
+ * an 'ifconfig ethX down'
+ */
+static int bf537mac_close(struct net_device *dev)
+{
+	pr_debug("%s: %s\n", dev->name, __FUNCTION__);
+
+	netif_stop_queue(dev);
+	netif_carrier_off(dev);
+
+	/* clear everything */
+	bf537mac_shutdown(dev);
+
+	/* free the rx/tx buffers */
+	desc_list_free();
+
+	return 0;
+}
+
+static int __init bf537mac_probe(struct net_device *dev)
+{
+	struct bf537mac_local *lp = netdev_priv(dev);
+	int retval;
+
+	/* Grab the MAC address in the MAC */
+	*(__le32 *) (&(dev->dev_addr[0])) = cpu_to_le32(bfin_read_EMAC_ADDRLO());
+	*(__le16 *) (&(dev->dev_addr[4])) = cpu_to_le16((u16) bfin_read_EMAC_ADDRHI());
+
+	/* probe mac */
+	/*todo: how to proble? which is revision_register */
+	bfin_write_EMAC_ADDRLO(0x12345678);
+	if (bfin_read_EMAC_ADDRLO() != 0x12345678) {
+		pr_debug("can't detect bf537 mac!\n");
+		retval = -ENODEV;
+		goto err_out;
+	}
+
+	/* set the GPIO pins to Ethernet mode */
+	retval = setup_pin_mux(1);
+
+	if (retval)
+		return retval;
+
+	/*Is it valid? (Did bootloader initialize it?) */
+	if (!is_valid_ether_addr(dev->dev_addr)) {
+		/* Grab the MAC from the board somehow - this is done in the
+		   arch/blackfin/mach-bf537/boards/eth_mac.c */
+		get_bf537_ether_addr(dev->dev_addr);
+	}
+
+	/* If still not valid, get a random one */
+	if (!is_valid_ether_addr(dev->dev_addr)) {
+		random_ether_addr(dev->dev_addr);
+	}
+
+	setup_mac_addr(dev->dev_addr);
+
+	/* Fill in the fields of the device structure with ethernet values. */
+	ether_setup(dev);
+
+	dev->open = bf537mac_open;
+	dev->stop = bf537mac_close;
+	dev->hard_start_xmit = bf537mac_hard_start_xmit;
+	dev->tx_timeout = bf537mac_timeout;
+	dev->get_stats = bf537mac_query_statistics;
+	dev->set_multicast_list = bf537mac_set_multicast_list;
+#ifdef CONFIG_NET_POLL_CONTROLLER
+	dev->poll_controller = bf537mac_poll;
+#endif
+
+	/* fill in some of the fields */
+	lp->version = 1;
+	lp->PhyAddr = 0x01;
+	lp->CLKIN = 25;
+	lp->FullDuplex = 0;
+	lp->Negotiate = 1;
+	lp->FlowControl = 0;
+	spin_lock_init(&lp->lock);
+
+	/* now, enable interrupts */
+	/* register irq handler */
+	if (request_irq
+	    (IRQ_MAC_RX, bf537mac_interrupt, IRQF_DISABLED | IRQF_SHARED,
+	     "BFIN537_MAC_RX", dev)) {
+		printk(KERN_WARNING DRV_NAME
+		       ": Unable to attach BlackFin MAC RX interrupt\n");
+		return -EBUSY;
+	}
+
+	/* Enable PHY output early */
+	if (!(bfin_read_VR_CTL() & PHYCLKOE))
+		bfin_write_VR_CTL(bfin_read_VR_CTL() | PHYCLKOE);
+
+	retval = register_netdev(dev);
+	if (retval == 0) {
+		/* now, print out the card info, in a short format.. */
+		printk(KERN_INFO "%s: Version %s, %s\n",
+			 DRV_NAME, DRV_VERSION, DRV_DESC);
+	}
+
+err_out:
+	return retval;
+}
+
+static int bfin_mac_probe(struct platform_device *pdev)
+{
+	struct net_device *ndev;
+
+	ndev = alloc_etherdev(sizeof(struct bf537mac_local));
+	if (!ndev) {
+		printk(KERN_WARNING DRV_NAME ": could not allocate device\n");
+		return -ENOMEM;
+	}
+
+	SET_MODULE_OWNER(ndev);
+	SET_NETDEV_DEV(ndev, &pdev->dev);
+
+	platform_set_drvdata(pdev, ndev);
+
+	if (bf537mac_probe(ndev) != 0) {
+		platform_set_drvdata(pdev, NULL);
+		free_netdev(ndev);
+		printk(KERN_WARNING DRV_NAME ": not found\n");
+		return -ENODEV;
+	}
+
+	return 0;
+}
+
+static int bfin_mac_remove(struct platform_device *pdev)
+{
+	struct net_device *ndev = platform_get_drvdata(pdev);
+
+	platform_set_drvdata(pdev, NULL);
+
+	unregister_netdev(ndev);
+
+	free_irq(IRQ_MAC_RX, ndev);
+
+	free_netdev(ndev);
+
+	setup_pin_mux(0);
+
+	return 0;
+}
+
+static int bfin_mac_suspend(struct platform_device *pdev, pm_message_t state)
+{
+	return 0;
+}
+
+static int bfin_mac_resume(struct platform_device *pdev)
+{
+	return 0;
+}
+
+static struct platform_driver bfin_mac_driver = {
+	.probe = bfin_mac_probe,
+	.remove = bfin_mac_remove,
+	.resume = bfin_mac_resume,
+	.suspend = bfin_mac_suspend,
+	.driver = {
+		   .name = DRV_NAME,
+		   },
+};
+
+static int __init bfin_mac_init(void)
+{
+	return platform_driver_register(&bfin_mac_driver);
+}
+
+module_init(bfin_mac_init);
+
+static void __exit bfin_mac_cleanup(void)
+{
+	platform_driver_unregister(&bfin_mac_driver);
+}
+
+module_exit(bfin_mac_cleanup);
diff --git a/drivers/net/bfin_mac.h b/drivers/net/bfin_mac.h
new file mode 100644
index 0000000..af87189
--- /dev/null
+++ b/drivers/net/bfin_mac.h
@@ -0,0 +1,132 @@
+/*
+ * File:	drivers/net/bfin_mac.c
+ * Based on:
+ * Maintainer:
+ * 		Bryan Wu <bryan.wu@analog.com>
+ *
+ * Original author:
+ * 		Luke Yang <luke.yang@analog.com>
+ *
+ * Created:
+ * Description:
+ *
+ * Modified:
+ *		Copyright 2004-2006 Analog Devices Inc.
+ *
+ * Bugs:	Enter bugs at http://blackfin.uclinux.org/
+ *
+ * This program is free software ;  you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation ;  either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY ;  without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program ;  see the file COPYING.
+ * If not, write to the Free Software Foundation,
+ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+/*
+ * PHY REGISTER NAMES
+ */
+#define PHYREG_MODECTL		0x0000
+#define PHYREG_MODESTAT		0x0001
+#define PHYREG_PHYID1		0x0002
+#define PHYREG_PHYID2		0x0003
+#define PHYREG_ANAR		0x0004
+#define PHYREG_ANLPAR		0x0005
+#define PHYREG_ANER		0x0006
+#define PHYREG_NSR		0x0010
+#define PHYREG_LBREMR		0x0011
+#define PHYREG_REC		0x0012
+#define PHYREG_10CFG		0x0013
+#define PHYREG_PHY1_1		0x0014
+#define PHYREG_PHY1_2		0x0015
+#define PHYREG_PHY2		0x0016
+#define PHYREG_TW_1		0x0017
+#define PHYREG_TW_2		0x0018
+#define PHYREG_TEST		0x0019
+
+#define PHY_RESET		0x8000
+#define PHY_ANEG_EN		0x1000
+#define PHY_DUPLEX		0x0100
+#define PHY_SPD_SET		0x2000
+
+#define BFIN_MAC_CSUM_OFFLOAD
+
+struct dma_descriptor {
+	struct dma_descriptor *next_dma_desc;
+	unsigned long start_addr;
+	unsigned short config;
+	unsigned short x_count;
+};
+
+struct status_area_rx {
+#if defined(BFIN_MAC_CSUM_OFFLOAD)
+	unsigned short ip_hdr_csum;	/* ip header checksum */
+	/* ip payload(udp or tcp or others) checksum */
+	unsigned short ip_payload_csum;
+#endif
+	unsigned long status_word;	/* the frame status word */
+};
+
+struct status_area_tx {
+	unsigned long status_word;	/* the frame status word */
+};
+
+/* use two descriptors for a packet */
+struct net_dma_desc_rx {
+	struct net_dma_desc_rx *next;
+	struct sk_buff *skb;
+	struct dma_descriptor desc_a;
+	struct dma_descriptor desc_b;
+	struct status_area_rx status;
+};
+
+/* use two descriptors for a packet */
+struct net_dma_desc_tx {
+	struct net_dma_desc_tx *next;
+	struct sk_buff *skb;
+	struct dma_descriptor desc_a;
+	struct dma_descriptor desc_b;
+	unsigned char packet[1560];
+	struct status_area_tx status;
+};
+
+struct bf537mac_local {
+	/*
+	 * these are things that the kernel wants me to keep, so users
+	 * can find out semi-useless statistics of how well the card is
+	 * performing
+	 */
+	struct net_device_stats stats;
+
+	int version;
+
+	int FlowEnabled;	/* record if data flow is active */
+	int EtherIntIVG;	/* IVG for the ethernet interrupt */
+	int RXIVG;		/* IVG for the RX completion */
+	int TXIVG;		/* IVG for the TX completion */
+	int PhyAddr;		/* PHY address */
+	int OpMode;		/* set these bits n the OPMODE regs */
+	int Port10;		/* set port speed to 10 Mbit/s */
+	int GenChksums;		/* IP checksums to be calculated */
+	int NoRcveLnth;		/* dont insert recv length at start of buffer */
+	int StripPads;		/* remove trailing pad bytes */
+	int FullDuplex;		/* set full duplex mode */
+	int Negotiate;		/* enable auto negotiation */
+	int Loopback;		/* loopback at the PHY */
+	int Cache;		/* Buffers may be cached */
+	int FlowControl;	/* flow control active */
+	int CLKIN;		/* clock in value in MHZ */
+	unsigned short IntMask;	/* interrupt mask */
+	unsigned char Mac[6];	/* MAC address of the board */
+	spinlock_t lock;
+};
+
+extern void get_bf537_ether_addr(char *addr);
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
index d23861c..a729da0 100644
--- a/drivers/net/bnx2.c
+++ b/drivers/net/bnx2.c
@@ -54,8 +54,8 @@
 
 #define DRV_MODULE_NAME		"bnx2"
 #define PFX DRV_MODULE_NAME	": "
-#define DRV_MODULE_VERSION	"1.6.2"
-#define DRV_MODULE_RELDATE	"July 6, 2007"
+#define DRV_MODULE_VERSION	"1.6.3"
+#define DRV_MODULE_RELDATE	"July 16, 2007"
 
 #define RUN_AT(x) (jiffies + (x))
 
@@ -126,91 +126,102 @@
 
 static struct flash_spec flash_table[] =
 {
+#define BUFFERED_FLAGS		(BNX2_NV_BUFFERED | BNX2_NV_TRANSLATE)
+#define NONBUFFERED_FLAGS	(BNX2_NV_WREN)
 	/* Slow EEPROM */
 	{0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
-	 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
+	 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
 	 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
 	 "EEPROM - slow"},
 	/* Expansion entry 0001 */
 	{0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
-	 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
+	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
 	 "Entry 0001"},
 	/* Saifun SA25F010 (non-buffered flash) */
 	/* strap, cfg1, & write1 need updates */
 	{0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
-	 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
+	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
 	 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
 	 "Non-buffered flash (128kB)"},
 	/* Saifun SA25F020 (non-buffered flash) */
 	/* strap, cfg1, & write1 need updates */
 	{0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
-	 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
+	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
 	 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
 	 "Non-buffered flash (256kB)"},
 	/* Expansion entry 0100 */
 	{0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
-	 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
+	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
 	 "Entry 0100"},
 	/* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
 	{0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
-	 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
+	 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
 	 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
 	 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
 	/* Entry 0110: ST M45PE20 (non-buffered flash)*/
 	{0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
-	 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
+	 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
 	 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
 	 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
 	/* Saifun SA25F005 (non-buffered flash) */
 	/* strap, cfg1, & write1 need updates */
 	{0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
-	 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
+	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
 	 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
 	 "Non-buffered flash (64kB)"},
 	/* Fast EEPROM */
 	{0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
-	 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
+	 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
 	 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
 	 "EEPROM - fast"},
 	/* Expansion entry 1001 */
 	{0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
-	 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
+	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
 	 "Entry 1001"},
 	/* Expansion entry 1010 */
 	{0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
-	 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
+	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
 	 "Entry 1010"},
 	/* ATMEL AT45DB011B (buffered flash) */
 	{0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
-	 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
+	 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
 	 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
 	 "Buffered flash (128kB)"},
 	/* Expansion entry 1100 */
 	{0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
-	 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
+	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
 	 "Entry 1100"},
 	/* Expansion entry 1101 */
 	{0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
-	 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
+	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
 	 "Entry 1101"},
 	/* Ateml Expansion entry 1110 */
 	{0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
-	 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
+	 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
 	 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
 	 "Entry 1110 (Atmel)"},
 	/* ATMEL AT45DB021B (buffered flash) */
 	{0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
-	 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
+	 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
 	 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
 	 "Buffered flash (256kB)"},
 };
 
+static struct flash_spec flash_5709 = {
+	.flags		= BNX2_NV_BUFFERED,
+	.page_bits	= BCM5709_FLASH_PAGE_BITS,
+	.page_size	= BCM5709_FLASH_PAGE_SIZE,
+	.addr_mask	= BCM5709_FLASH_BYTE_ADDR_MASK,
+	.total_size	= BUFFERED_FLASH_TOTAL_SIZE*2,
+	.name		= "5709 Buffered flash (256kB)",
+};
+
 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
 
 static inline u32 bnx2_tx_avail(struct bnx2 *bp)
@@ -3289,7 +3300,7 @@
 	val = REG_RD(bp, BNX2_MISC_CFG);
 	REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
 
-	if (!bp->flash_info->buffered) {
+	if (bp->flash_info->flags & BNX2_NV_WREN) {
 		int j;
 
 		REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
@@ -3349,7 +3360,7 @@
 	u32 cmd;
 	int j;
 
-	if (bp->flash_info->buffered)
+	if (bp->flash_info->flags & BNX2_NV_BUFFERED)
 		/* Buffered flash, no erase needed */
 		return 0;
 
@@ -3392,8 +3403,8 @@
 	/* Build the command word. */
 	cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
 
-	/* Calculate an offset of a buffered flash. */
-	if (bp->flash_info->buffered) {
+	/* Calculate an offset of a buffered flash, not needed for 5709. */
+	if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
 		offset = ((offset / bp->flash_info->page_size) <<
 			   bp->flash_info->page_bits) +
 			  (offset % bp->flash_info->page_size);
@@ -3439,8 +3450,8 @@
 	/* Build the command word. */
 	cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
 
-	/* Calculate an offset of a buffered flash. */
-	if (bp->flash_info->buffered) {
+	/* Calculate an offset of a buffered flash, not needed for 5709. */
+	if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
 		offset = ((offset / bp->flash_info->page_size) <<
 			  bp->flash_info->page_bits) +
 			 (offset % bp->flash_info->page_size);
@@ -3478,15 +3489,19 @@
 bnx2_init_nvram(struct bnx2 *bp)
 {
 	u32 val;
-	int j, entry_count, rc;
+	int j, entry_count, rc = 0;
 	struct flash_spec *flash;
 
+	if (CHIP_NUM(bp) == CHIP_NUM_5709) {
+		bp->flash_info = &flash_5709;
+		goto get_flash_size;
+	}
+
 	/* Determine the selected interface. */
 	val = REG_RD(bp, BNX2_NVM_CFG1);
 
 	entry_count = sizeof(flash_table) / sizeof(struct flash_spec);
 
-	rc = 0;
 	if (val & 0x40000000) {
 
 		/* Flash interface has been reconfigured */
@@ -3542,6 +3557,7 @@
 		return -ENODEV;
 	}
 
+get_flash_size:
 	val = REG_RD_IND(bp, bp->shmem_base + BNX2_SHARED_HW_CFG_CONFIG2);
 	val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
 	if (val)
@@ -3706,7 +3722,7 @@
 		buf = align_buf;
 	}
 
-	if (bp->flash_info->buffered == 0) {
+	if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
 		flash_buffer = kmalloc(264, GFP_KERNEL);
 		if (flash_buffer == NULL) {
 			rc = -ENOMEM;
@@ -3739,7 +3755,7 @@
 		bnx2_enable_nvram_access(bp);
 
 		cmd_flags = BNX2_NVM_COMMAND_FIRST;
-		if (bp->flash_info->buffered == 0) {
+		if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
 			int j;
 
 			/* Read the whole page into the buffer
@@ -3767,7 +3783,7 @@
 		/* Loop to write back the buffer data from page_start to
 		 * data_start */
 		i = 0;
-		if (bp->flash_info->buffered == 0) {
+		if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
 			/* Erase the page */
 			if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
 				goto nvram_write_end;
@@ -3791,7 +3807,7 @@
 		/* Loop to write the new data from data_start to data_end */
 		for (addr = data_start; addr < data_end; addr += 4, i += 4) {
 			if ((addr == page_end - 4) ||
-				((bp->flash_info->buffered) &&
+				((bp->flash_info->flags & BNX2_NV_BUFFERED) &&
 				 (addr == data_end - 4))) {
 
 				cmd_flags |= BNX2_NVM_COMMAND_LAST;
@@ -3808,7 +3824,7 @@
 
 		/* Loop to write back the buffer data from data_end
 		 * to page_end */
-		if (bp->flash_info->buffered == 0) {
+		if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
 			for (addr = data_end; addr < page_end;
 				addr += 4, i += 4) {
 
@@ -4107,7 +4123,7 @@
 	if (CHIP_NUM(bp) == CHIP_NUM_5708)
 		REG_WR(bp, BNX2_HC_STATS_TICKS, 0);
 	else
-		REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks & 0xffff00);
+		REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks);
 	REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8);  /* 3ms */
 
 	if (CHIP_ID(bp) == CHIP_ID_5706_A1)
@@ -4127,10 +4143,6 @@
 
 	REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
 
-	if (REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_FEATURE) &
-	    BNX2_PORT_FEATURE_ASF_ENABLED)
-		bp->flags |= ASF_ENABLE_FLAG;
-
 	/* Initialize the receive filter. */
 	bnx2_set_rx_mode(bp->dev);
 
@@ -5786,8 +5798,9 @@
 		if (bp->stats_ticks != 0 && bp->stats_ticks != USEC_PER_SEC)
 			bp->stats_ticks = USEC_PER_SEC;
 	}
-	if (bp->stats_ticks > 0xffff00) bp->stats_ticks = 0xffff00;
-	bp->stats_ticks &= 0xffff00;
+	if (bp->stats_ticks > BNX2_HC_STATS_TICKS_HC_STAT_TICKS)
+		bp->stats_ticks = BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
+	bp->stats_ticks &= BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
 
 	if (netif_running(bp->dev)) {
 		bnx2_netif_stop(bp);
@@ -6629,6 +6642,18 @@
 		if (i != 2)
 			bp->fw_version[j++] = '.';
 	}
+	if (REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_FEATURE) &
+	    BNX2_PORT_FEATURE_ASF_ENABLED) {
+		bp->flags |= ASF_ENABLE_FLAG;
+
+		for (i = 0; i < 30; i++) {
+			reg = REG_RD_IND(bp, bp->shmem_base +
+					     BNX2_BC_STATE_CONDITION);
+			if (reg & BNX2_CONDITION_MFW_RUN_MASK)
+				break;
+			msleep(10);
+		}
+	}
 	reg = REG_RD_IND(bp, bp->shmem_base + BNX2_BC_STATE_CONDITION);
 	reg &= BNX2_CONDITION_MFW_RUN_MASK;
 	if (reg != BNX2_CONDITION_MFW_RUN_UNKNOWN &&
@@ -6672,7 +6697,7 @@
 	bp->rx_ticks_int = 18;
 	bp->rx_ticks = 18;
 
-	bp->stats_ticks = 1000000 & 0xffff00;
+	bp->stats_ticks = USEC_PER_SEC & BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
 
 	bp->timer_interval =  HZ;
 	bp->current_interval =  HZ;
diff --git a/drivers/net/bnx2.h b/drivers/net/bnx2.h
index d8cd1af..102adfe 100644
--- a/drivers/net/bnx2.h
+++ b/drivers/net/bnx2.h
@@ -6433,6 +6433,11 @@
 #define ST_MICRO_FLASH_PAGE_SIZE		256
 #define ST_MICRO_FLASH_BASE_TOTAL_SIZE		65536
 
+#define BCM5709_FLASH_PAGE_BITS			8
+#define BCM5709_FLASH_PHY_PAGE_SIZE		(1 << BCM5709_FLASH_PAGE_BITS)
+#define BCM5709_FLASH_BYTE_ADDR_MASK		(BCM5709_FLASH_PHY_PAGE_SIZE-1)
+#define BCM5709_FLASH_PAGE_SIZE			256
+
 #define NVRAM_TIMEOUT_COUNT			30000
 
 
@@ -6449,7 +6454,10 @@
 	u32 config2;
 	u32 config3;
 	u32 write1;
-	u32 buffered;
+	u32 flags;
+#define BNX2_NV_BUFFERED	0x00000001
+#define BNX2_NV_TRANSLATE	0x00000002
+#define BNX2_NV_WREN		0x00000004
 	u32 page_bits;
 	u32 page_size;
 	u32 addr_mask;
diff --git a/drivers/net/bsd_comp.c b/drivers/net/bsd_comp.c
index 7845eaf..202d4a4 100644
--- a/drivers/net/bsd_comp.c
+++ b/drivers/net/bsd_comp.c
@@ -395,14 +395,13 @@
  * Allocate the main control structure for this instance.
  */
     maxmaxcode = MAXCODE(bits);
-    db         = kmalloc(sizeof (struct bsd_db),
+    db         = kzalloc(sizeof (struct bsd_db),
 					    GFP_KERNEL);
     if (!db)
       {
 	return NULL;
       }
 
-    memset (db, 0, sizeof(struct bsd_db));
 /*
  * Allocate space for the dictionary. This may be more than one page in
  * length.
diff --git a/drivers/net/eepro100.c b/drivers/net/eepro100.c
index 9afa47e..3c54014 100644
--- a/drivers/net/eepro100.c
+++ b/drivers/net/eepro100.c
@@ -2292,10 +2292,15 @@
 	struct net_device *dev = pci_get_drvdata (pdev);
 	struct speedo_private *sp = netdev_priv(dev);
 	void __iomem *ioaddr = sp->regs;
+	int rc;
 
 	pci_set_power_state(pdev, PCI_D0);
 	pci_restore_state(pdev);
-	pci_enable_device(pdev);
+
+	rc = pci_enable_device(pdev);
+	if (rc)
+		return rc;
+
 	pci_set_master(pdev);
 
 	if (!netif_running(dev))
diff --git a/drivers/net/ehea/ehea.h b/drivers/net/ehea/ehea.h
index f03f070..489c8b2 100644
--- a/drivers/net/ehea/ehea.h
+++ b/drivers/net/ehea/ehea.h
@@ -39,13 +39,13 @@
 #include <asm/io.h>
 
 #define DRV_NAME	"ehea"
-#define DRV_VERSION	"EHEA_0067"
+#define DRV_VERSION	"EHEA_0071"
 
-/* EHEA capability flags */
+/* eHEA capability flags */
 #define DLPAR_PORT_ADD_REM 1
-#define DLPAR_MEM_ADD 2
-#define DLPAR_MEM_REM 4
-#define EHEA_CAPABILITIES (DLPAR_PORT_ADD_REM)
+#define DLPAR_MEM_ADD      2
+#define DLPAR_MEM_REM      4
+#define EHEA_CAPABILITIES  (DLPAR_PORT_ADD_REM)
 
 #define EHEA_MSG_DEFAULT (NETIF_MSG_LINK | NETIF_MSG_TIMER \
 	| NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
@@ -113,6 +113,8 @@
 /* Memory Regions */
 #define EHEA_MR_ACC_CTRL       0x00800000
 
+#define EHEA_BUSMAP_START      0x8000000000000000ULL
+
 #define EHEA_WATCH_DOG_TIMEOUT 10*HZ
 
 /* utility functions */
@@ -186,6 +188,12 @@
 				   set to 0 if unused */
 };
 
+struct ehea_busmap {
+	unsigned int entries;		/* total number of entries */
+	unsigned int valid_sections;	/* number of valid sections */
+	u64 *vaddr;
+};
+
 struct ehea_qp;
 struct ehea_cq;
 struct ehea_eq;
@@ -382,6 +390,8 @@
 	struct ehea_mr mr;
 	u32 pd;                    /* protection domain */
 	u64 max_mc_mac;            /* max number of multicast mac addresses */
+	int active_ports;
+	struct list_head list;
 };
 
 
@@ -431,6 +441,9 @@
 	int max_entries_rq3;
 };
 
+enum ehea_flag_bits {
+	__EHEA_STOP_XFER
+};
 
 void ehea_set_ethtool_ops(struct net_device *netdev);
 int ehea_sense_port_attr(struct ehea_port *port);
diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c
index 383144d..4c70a93 100644
--- a/drivers/net/ehea/ehea_main.c
+++ b/drivers/net/ehea/ehea_main.c
@@ -79,6 +79,11 @@
 MODULE_PARM_DESC(use_mcs, " 0:NAPI, 1:Multiple receive queues, Default = 1 ");
 
 static int port_name_cnt = 0;
+static LIST_HEAD(adapter_list);
+u64 ehea_driver_flags = 0;
+struct workqueue_struct *ehea_driver_wq;
+struct work_struct ehea_rereg_mr_task;
+
 
 static int __devinit ehea_probe_adapter(struct ibmebus_dev *dev,
 					const struct of_device_id *id);
@@ -238,13 +243,17 @@
 		rwqe->wr_id = EHEA_BMASK_SET(EHEA_WR_ID_TYPE, wqe_type)
 			    | EHEA_BMASK_SET(EHEA_WR_ID_INDEX, index);
 		rwqe->sg_list[0].l_key = pr->recv_mr.lkey;
-		rwqe->sg_list[0].vaddr = (u64)skb->data;
+		rwqe->sg_list[0].vaddr = ehea_map_vaddr(skb->data);
 		rwqe->sg_list[0].len = packet_size;
 		rwqe->data_segments = 1;
 
 		index++;
 		index &= max_index_mask;
+
+		if (unlikely(test_bit(__EHEA_STOP_XFER, &ehea_driver_flags)))
+			goto out;
 	}
+
 	q_skba->index = index;
 
 	/* Ring doorbell */
@@ -253,7 +262,7 @@
 		ehea_update_rq2a(pr->qp, i);
 	else
 		ehea_update_rq3a(pr->qp, i);
-
+out:
 	return ret;
 }
 
@@ -457,6 +466,8 @@
 							 cqe->vlan_tag);
 			else
 				netif_receive_skb(skb);
+
+			dev->last_rx = jiffies;
 		} else {
 			pr->p_stats.poll_receive_errors++;
 			port_reset = ehea_treat_poll_error(pr, rq, cqe,
@@ -1321,7 +1332,7 @@
 			sg1entry->len = skb_data_size - headersize;
 
 			tmp_addr = (u64)(skb->data + headersize);
-			sg1entry->vaddr = tmp_addr;
+			sg1entry->vaddr = ehea_map_vaddr(tmp_addr);
 			swqe->descriptors++;
 		}
 	} else
@@ -1352,7 +1363,7 @@
 			sg1entry->l_key = lkey;
 			sg1entry->len = skb_data_size - SWQE2_MAX_IMM;
 			tmp_addr = (u64)(skb->data + SWQE2_MAX_IMM);
-			sg1entry->vaddr = tmp_addr;
+			sg1entry->vaddr = ehea_map_vaddr(tmp_addr);
 			swqe->descriptors++;
 		}
 	} else {
@@ -1391,7 +1402,7 @@
 			sg1entry->len = frag->size;
 			tmp_addr =  (u64)(page_address(frag->page)
 					  + frag->page_offset);
-			sg1entry->vaddr = tmp_addr;
+			sg1entry->vaddr = ehea_map_vaddr(tmp_addr);
 			swqe->descriptors++;
 			sg1entry_contains_frag_data = 1;
 		}
@@ -1406,7 +1417,7 @@
 
 			tmp_addr = (u64)(page_address(frag->page)
 					 + frag->page_offset);
-			sgentry->vaddr = tmp_addr;
+			sgentry->vaddr = ehea_map_vaddr(tmp_addr);
 			swqe->descriptors++;
 		}
 	}
@@ -1424,7 +1435,8 @@
 				     port->logical_port_id,
 				     reg_type, port->mac_addr, 0, hcallid);
 	if (hret != H_SUCCESS) {
-		ehea_error("reg_dereg_bcmc failed (tagged)");
+		ehea_error("%sregistering bc address failed (tagged)",
+                           hcallid == H_REG_BCMC ? "" : "de");
 		ret = -EIO;
 		goto out_herr;
 	}
@@ -1435,7 +1447,8 @@
 				     port->logical_port_id,
 				     reg_type, port->mac_addr, 0, hcallid);
 	if (hret != H_SUCCESS) {
-		ehea_error("reg_dereg_bcmc failed (vlan)");
+		ehea_error("%sregistering bc address failed (vlan)",
+			   hcallid == H_REG_BCMC ? "" : "de");
 		ret = -EIO;
 	}
 out_herr:
@@ -1878,6 +1891,9 @@
 		ehea_dump(swqe, 512, "swqe");
 	}
 
+	if (unlikely(test_bit(__EHEA_STOP_XFER, &ehea_driver_flags)))
+		goto out;
+
 	ehea_post_swqe(pr->qp, swqe);
 	pr->tx_packets++;
 
@@ -1892,7 +1908,7 @@
 	}
 	dev->trans_start = jiffies;
 	spin_unlock(&pr->xmit_lock);
-
+out:
 	return NETDEV_TX_OK;
 }
 
@@ -2158,7 +2174,6 @@
 {
 	int ret, i;
 	struct ehea_port *port = netdev_priv(dev);
-	u64 mac_addr = 0;
 
 	if (port->state == EHEA_PORT_UP)
 		return 0;
@@ -2177,18 +2192,10 @@
 		goto out_clean_pr;
 	}
 
-	ret = ehea_broadcast_reg_helper(port, H_REG_BCMC);
-	if (ret) {
-		ret = -EIO;
-		ehea_error("out_clean_pr");
-		goto out_clean_pr;
-	}
-	mac_addr = (*(u64*)dev->dev_addr) >> 16;
-
 	ret = ehea_reg_interrupts(dev);
 	if (ret) {
-		ehea_error("out_dereg_bc");
-		goto out_dereg_bc;
+		ehea_error("reg_interrupts failed. ret:%d", ret);
+		goto out_clean_pr;
 	}
 
 	for(i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
@@ -2214,12 +2221,12 @@
 out_free_irqs:
 	ehea_free_interrupts(dev);
 
-out_dereg_bc:
-	ehea_broadcast_reg_helper(port, H_DEREG_BCMC);
-
 out_clean_pr:
 	ehea_clean_all_portres(port);
 out:
+	if (ret)
+		ehea_info("Failed starting %s. ret=%i", dev->name, ret);
+
 	return ret;
 }
 
@@ -2258,9 +2265,13 @@
 				&port->port_res[i].d_netdev->state))
 			msleep(1);
 
-	ehea_broadcast_reg_helper(port, H_DEREG_BCMC);
-	ret = ehea_clean_all_portres(port);
 	port->state = EHEA_PORT_DOWN;
+
+	ret = ehea_clean_all_portres(port);
+	if (ret)
+		ehea_info("Failed freeing resources for %s. ret=%i",
+			  dev->name, ret);
+
 	return ret;
 }
 
@@ -2292,15 +2303,11 @@
 	netif_stop_queue(dev);
 	netif_poll_disable(dev);
 
-	ret = ehea_down(dev);
-	if (ret)
-		ehea_error("ehea_down failed. not all resources are freed");
+	ehea_down(dev);
 
 	ret = ehea_up(dev);
-	if (ret) {
-		ehea_error("Reset device %s failed: ret=%d", dev->name, ret);
+	if (ret)
 		goto out;
-	}
 
 	if (netif_msg_timer(port))
 		ehea_info("Device %s resetted successfully", dev->name);
@@ -2312,6 +2319,88 @@
 	return;
 }
 
+static void ehea_rereg_mrs(struct work_struct *work)
+{
+	int ret, i;
+	struct ehea_adapter *adapter;
+
+	ehea_info("LPAR memory enlarged - re-initializing driver");
+
+	list_for_each_entry(adapter, &adapter_list, list)
+		if (adapter->active_ports) {
+			/* Shutdown all ports */
+			for (i = 0; i < EHEA_MAX_PORTS; i++) {
+				struct ehea_port *port = adapter->port[i];
+
+				if (port) {
+					struct net_device *dev = port->netdev;
+
+					if (dev->flags & IFF_UP) {
+						ehea_info("stopping %s",
+							  dev->name);
+						down(&port->port_lock);
+						netif_stop_queue(dev);
+						netif_poll_disable(dev);
+						ehea_down(dev);
+						up(&port->port_lock);
+					}
+				}
+			}
+
+			/* Unregister old memory region */
+			ret = ehea_rem_mr(&adapter->mr);
+			if (ret) {
+				ehea_error("unregister MR failed - driver"
+					   " inoperable!");
+				goto out;
+			}
+		}
+
+	ehea_destroy_busmap();
+
+	ret = ehea_create_busmap();
+	if (ret)
+		goto out;
+
+	clear_bit(__EHEA_STOP_XFER, &ehea_driver_flags);
+
+	list_for_each_entry(adapter, &adapter_list, list)
+		if (adapter->active_ports) {
+			/* Register new memory region */
+			ret = ehea_reg_kernel_mr(adapter, &adapter->mr);
+			if (ret) {
+				ehea_error("register MR failed - driver"
+					   " inoperable!");
+				goto out;
+			}
+
+			/* Restart all ports */
+			for (i = 0; i < EHEA_MAX_PORTS; i++) {
+				struct ehea_port *port = adapter->port[i];
+
+				if (port) {
+					struct net_device *dev = port->netdev;
+
+					if (dev->flags & IFF_UP) {
+						ehea_info("restarting %s",
+							  dev->name);
+						down(&port->port_lock);
+
+						ret = ehea_up(dev);
+						if (!ret) {
+							netif_poll_enable(dev);
+							netif_wake_queue(dev);
+						}
+
+						up(&port->port_lock);
+					}
+				}
+			}
+		}
+out:
+	return;
+}
+
 static void ehea_tx_watchdog(struct net_device *dev)
 {
 	struct ehea_port *port = netdev_priv(dev);
@@ -2557,12 +2646,18 @@
 
 	INIT_WORK(&port->reset_task, ehea_reset_port);
 
+	ret = ehea_broadcast_reg_helper(port, H_REG_BCMC);
+	if (ret) {
+		ret = -EIO;
+		goto out_unreg_port;
+	}
+
 	ehea_set_ethtool_ops(dev);
 
 	ret = register_netdev(dev);
 	if (ret) {
 		ehea_error("register_netdev failed. ret=%d", ret);
-		goto out_unreg_port;
+		goto out_dereg_bc;
 	}
 
 	ret = ehea_get_jumboframe_status(port, &jumbo);
@@ -2573,8 +2668,13 @@
 	ehea_info("%s: Jumbo frames are %sabled", dev->name,
 		  jumbo == 1 ? "en" : "dis");
 
+	adapter->active_ports++;
+
 	return port;
 
+out_dereg_bc:
+	ehea_broadcast_reg_helper(port, H_DEREG_BCMC);
+
 out_unreg_port:
 	ehea_unregister_port(port);
 
@@ -2594,8 +2694,10 @@
 {
 	unregister_netdev(port->netdev);
 	ehea_unregister_port(port);
+	ehea_broadcast_reg_helper(port, H_DEREG_BCMC);
 	kfree(port->mc_list);
 	free_netdev(port->netdev);
+	port->adapter->active_ports--;
 }
 
 static int ehea_setup_ports(struct ehea_adapter *adapter)
@@ -2788,6 +2890,8 @@
 		goto out;
 	}
 
+	list_add(&adapter->list, &adapter_list);
+
 	adapter->ebus_dev = dev;
 
 	adapter_handle = of_get_property(dev->ofdev.node, "ibm,hea-handle",
@@ -2891,7 +2995,10 @@
 
 	ehea_destroy_eq(adapter->neq);
 	ehea_remove_adapter_mr(adapter);
+	list_del(&adapter->list);
+
 	kfree(adapter);
+
 	return 0;
 }
 
@@ -2939,9 +3046,18 @@
 	printk(KERN_INFO "IBM eHEA ethernet device driver (Release %s)\n",
 	       DRV_VERSION);
 
+	ehea_driver_wq = create_workqueue("ehea_driver_wq");
+
+	INIT_WORK(&ehea_rereg_mr_task, ehea_rereg_mrs);
+
 	ret = check_module_parm();
 	if (ret)
 		goto out;
+
+	ret = ehea_create_busmap();
+	if (ret)
+		goto out;
+
 	ret = ibmebus_register_driver(&ehea_driver);
 	if (ret) {
 		ehea_error("failed registering eHEA device driver on ebus");
@@ -2965,6 +3081,7 @@
 {
 	driver_remove_file(&ehea_driver.driver, &driver_attr_capabilities);
 	ibmebus_unregister_driver(&ehea_driver);
+	ehea_destroy_busmap();
 }
 
 module_init(ehea_module_init);
diff --git a/drivers/net/ehea/ehea_phyp.h b/drivers/net/ehea/ehea_phyp.h
index d17a45a..89b6353 100644
--- a/drivers/net/ehea/ehea_phyp.h
+++ b/drivers/net/ehea/ehea_phyp.h
@@ -60,6 +60,9 @@
 	}
 }
 
+/* Number of pages which can be registered at once by H_REGISTER_HEA_RPAGES */
+#define EHEA_MAX_RPAGE 512
+
 /* Notification Event Queue (NEQ) Entry bit masks */
 #define NEQE_EVENT_CODE		EHEA_BMASK_IBM(2, 7)
 #define NEQE_PORTNUM  		EHEA_BMASK_IBM(32, 47)
diff --git a/drivers/net/ehea/ehea_qmr.c b/drivers/net/ehea/ehea_qmr.c
index 29eaa46..a36fa6c2 100644
--- a/drivers/net/ehea/ehea_qmr.c
+++ b/drivers/net/ehea/ehea_qmr.c
@@ -31,6 +31,13 @@
 #include "ehea_phyp.h"
 #include "ehea_qmr.h"
 
+
+struct ehea_busmap ehea_bmap = { 0, 0, NULL };
+extern u64 ehea_driver_flags;
+extern struct workqueue_struct *ehea_driver_wq;
+extern struct work_struct ehea_rereg_mr_task;
+
+
 static void *hw_qpageit_get_inc(struct hw_queue *queue)
 {
 	void *retvalue = hw_qeit_get(queue);
@@ -547,18 +554,84 @@
 	return 0;
 }
 
+int ehea_create_busmap( void )
+{
+	u64 vaddr = EHEA_BUSMAP_START;
+	unsigned long abs_max_pfn = 0;
+	unsigned long sec_max_pfn;
+	int i;
+
+	/*
+	 * Sections are not in ascending order -> Loop over all sections and
+	 * find the highest PFN to compute the required map size.
+	*/
+	ehea_bmap.valid_sections = 0;
+
+	for (i = 0; i < NR_MEM_SECTIONS; i++)
+		if (valid_section_nr(i)) {
+			sec_max_pfn = section_nr_to_pfn(i);
+			if (sec_max_pfn > abs_max_pfn)
+				abs_max_pfn = sec_max_pfn;
+			ehea_bmap.valid_sections++;
+		}
+
+	ehea_bmap.entries = abs_max_pfn / EHEA_PAGES_PER_SECTION + 1;
+	ehea_bmap.vaddr = vmalloc(ehea_bmap.entries * sizeof(*ehea_bmap.vaddr));
+
+	if (!ehea_bmap.vaddr)
+		return -ENOMEM;
+
+	for (i = 0 ; i < ehea_bmap.entries; i++) {
+		unsigned long pfn = section_nr_to_pfn(i);
+
+		if (pfn_valid(pfn)) {
+			ehea_bmap.vaddr[i] = vaddr;
+			vaddr += EHEA_SECTSIZE;
+		} else
+			ehea_bmap.vaddr[i] = 0;
+	}
+
+	return 0;
+}
+
+void ehea_destroy_busmap( void )
+{
+	vfree(ehea_bmap.vaddr);
+}
+
+u64 ehea_map_vaddr(void *caddr)
+{
+	u64 mapped_addr;
+	unsigned long index = __pa(caddr) >> SECTION_SIZE_BITS;
+
+	if (likely(index < ehea_bmap.entries)) {
+		mapped_addr = ehea_bmap.vaddr[index];
+		if (likely(mapped_addr))
+			mapped_addr |= (((unsigned long)caddr)
+					& (EHEA_SECTSIZE - 1));
+		else
+			mapped_addr = -1;
+	} else
+		mapped_addr = -1;
+
+	if (unlikely(mapped_addr == -1))
+		if (!test_and_set_bit(__EHEA_STOP_XFER, &ehea_driver_flags))
+			queue_work(ehea_driver_wq, &ehea_rereg_mr_task);
+
+	return mapped_addr;
+}
+
 int ehea_reg_kernel_mr(struct ehea_adapter *adapter, struct ehea_mr *mr)
 {
-	int i, k, ret;
-	u64 hret, pt_abs, start, end, nr_pages;
-	u32 acc_ctrl = EHEA_MR_ACC_CTRL;
+	int ret;
 	u64 *pt;
+	void *pg;
+	u64 hret, pt_abs, i, j, m, mr_len;
+	u32 acc_ctrl = EHEA_MR_ACC_CTRL;
 
-	start = KERNELBASE;
-	end = (u64)high_memory;
-	nr_pages = (end - start) / EHEA_PAGESIZE;
+	mr_len = ehea_bmap.valid_sections * EHEA_SECTSIZE;
 
-	pt =  kzalloc(PAGE_SIZE, GFP_KERNEL);
+	pt =  kzalloc(EHEA_MAX_RPAGE * sizeof(u64), GFP_KERNEL);
 	if (!pt) {
 		ehea_error("no mem");
 		ret = -ENOMEM;
@@ -566,7 +639,8 @@
 	}
 	pt_abs = virt_to_abs(pt);
 
-	hret = ehea_h_alloc_resource_mr(adapter->handle, start, end - start,
+	hret = ehea_h_alloc_resource_mr(adapter->handle,
+					EHEA_BUSMAP_START, mr_len,
 					acc_ctrl, adapter->pd,
 					&mr->handle, &mr->lkey);
 	if (hret != H_SUCCESS) {
@@ -575,49 +649,43 @@
 		goto out;
 	}
 
-	mr->vaddr = KERNELBASE;
-	k = 0;
+	for (i = 0 ; i < ehea_bmap.entries; i++)
+		if (ehea_bmap.vaddr[i]) {
+			void *sectbase = __va(i << SECTION_SIZE_BITS);
+			unsigned long k = 0;
 
-	while (nr_pages > 0) {
-		if (nr_pages > 1) {
-			u64 num_pages = min(nr_pages, (u64)512);
-			for (i = 0; i < num_pages; i++)
-				pt[i] = virt_to_abs((void*)(((u64)start) +
-							    ((k++) *
-							     EHEA_PAGESIZE)));
+			for (j = 0; j < (PAGES_PER_SECTION / EHEA_MAX_RPAGE);
+			      j++) {
 
-			hret = ehea_h_register_rpage_mr(adapter->handle,
-							mr->handle, 0,
-							0, (u64)pt_abs,
-							num_pages);
-			nr_pages -= num_pages;
-		} else {
-			u64 abs_adr = virt_to_abs((void*)(((u64)start) +
-							  (k * EHEA_PAGESIZE)));
+				for (m = 0; m < EHEA_MAX_RPAGE; m++) {
+					pg = sectbase + ((k++) * EHEA_PAGESIZE);
+					pt[m] = virt_to_abs(pg);
+				}
 
-			hret = ehea_h_register_rpage_mr(adapter->handle,
-							mr->handle, 0,
-							0, abs_adr,1);
-			nr_pages--;
+				hret = ehea_h_register_rpage_mr(adapter->handle,
+								mr->handle,
+								0, 0, pt_abs,
+								EHEA_MAX_RPAGE);
+				if ((hret != H_SUCCESS)
+				    && (hret != H_PAGE_REGISTERED)) {
+					ehea_h_free_resource(adapter->handle,
+							     mr->handle,
+							     FORCE_FREE);
+					ehea_error("register_rpage_mr failed");
+					ret = -EIO;
+					goto out;
+				}
+			}
 		}
 
-		if ((hret != H_SUCCESS) && (hret != H_PAGE_REGISTERED)) {
-			ehea_h_free_resource(adapter->handle,
-					     mr->handle, FORCE_FREE);
-			ehea_error("register_rpage_mr failed");
-			ret = -EIO;
-			goto out;
-		}
-	}
-
 	if (hret != H_SUCCESS) {
-		ehea_h_free_resource(adapter->handle, mr->handle,
-				     FORCE_FREE);
-		ehea_error("register_rpage failed for last page");
+		ehea_h_free_resource(adapter->handle, mr->handle, FORCE_FREE);
+		ehea_error("registering mr failed");
 		ret = -EIO;
 		goto out;
 	}
 
+	mr->vaddr = EHEA_BUSMAP_START;
 	mr->adapter = adapter;
 	ret = 0;
 out:
diff --git a/drivers/net/ehea/ehea_qmr.h b/drivers/net/ehea/ehea_qmr.h
index c0eb3e0..b71f845 100644
--- a/drivers/net/ehea/ehea_qmr.h
+++ b/drivers/net/ehea/ehea_qmr.h
@@ -36,8 +36,14 @@
  * page size of ehea hardware queues
  */
 
-#define EHEA_PAGESHIFT  12
-#define EHEA_PAGESIZE   4096UL
+#define EHEA_PAGESHIFT         12
+#define EHEA_PAGESIZE          (1UL << EHEA_PAGESHIFT)
+#define EHEA_SECTSIZE          (1UL << 24)
+#define EHEA_PAGES_PER_SECTION (EHEA_SECTSIZE >> PAGE_SHIFT)
+
+#if (1UL << SECTION_SIZE_BITS) < EHEA_SECTSIZE
+#error eHEA module can't work if kernel sectionsize < ehea sectionsize
+#endif
 
 /* Some abbreviations used here:
  *
@@ -372,4 +378,8 @@
 
 void ehea_error_data(struct ehea_adapter *adapter, u64 res_handle);
 
+int ehea_create_busmap( void );
+void ehea_destroy_busmap( void );
+u64 ehea_map_vaddr(void *caddr);
+
 #endif	/* __EHEA_QMR_H__ */
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
index 67046e8..6d1d50a 100644
--- a/drivers/net/forcedeth.c
+++ b/drivers/net/forcedeth.c
@@ -550,6 +550,8 @@
 /* PHY defines */
 #define PHY_OUI_MARVELL	0x5043
 #define PHY_OUI_CICADA	0x03f1
+#define PHY_OUI_VITESSE	0x01c1
+#define PHY_OUI_REALTEK	0x01c1
 #define PHYID1_OUI_MASK	0x03ff
 #define PHYID1_OUI_SHFT	6
 #define PHYID2_OUI_MASK	0xfc00
@@ -557,12 +559,36 @@
 #define PHYID2_MODEL_MASK		0x03f0
 #define PHY_MODEL_MARVELL_E3016		0x220
 #define PHY_MARVELL_E3016_INITMASK	0x0300
-#define PHY_INIT1	0x0f000
-#define PHY_INIT2	0x0e00
-#define PHY_INIT3	0x01000
-#define PHY_INIT4	0x0200
-#define PHY_INIT5	0x0004
-#define PHY_INIT6	0x02000
+#define PHY_CICADA_INIT1	0x0f000
+#define PHY_CICADA_INIT2	0x0e00
+#define PHY_CICADA_INIT3	0x01000
+#define PHY_CICADA_INIT4	0x0200
+#define PHY_CICADA_INIT5	0x0004
+#define PHY_CICADA_INIT6	0x02000
+#define PHY_VITESSE_INIT_REG1	0x1f
+#define PHY_VITESSE_INIT_REG2	0x10
+#define PHY_VITESSE_INIT_REG3	0x11
+#define PHY_VITESSE_INIT_REG4	0x12
+#define PHY_VITESSE_INIT_MSK1	0xc
+#define PHY_VITESSE_INIT_MSK2	0x0180
+#define PHY_VITESSE_INIT1	0x52b5
+#define PHY_VITESSE_INIT2	0xaf8a
+#define PHY_VITESSE_INIT3	0x8
+#define PHY_VITESSE_INIT4	0x8f8a
+#define PHY_VITESSE_INIT5	0xaf86
+#define PHY_VITESSE_INIT6	0x8f86
+#define PHY_VITESSE_INIT7	0xaf82
+#define PHY_VITESSE_INIT8	0x0100
+#define PHY_VITESSE_INIT9	0x8f82
+#define PHY_VITESSE_INIT10	0x0
+#define PHY_REALTEK_INIT_REG1	0x1f
+#define PHY_REALTEK_INIT_REG2	0x19
+#define PHY_REALTEK_INIT_REG3	0x13
+#define PHY_REALTEK_INIT1	0x0000
+#define PHY_REALTEK_INIT2	0x8e00
+#define PHY_REALTEK_INIT3	0x0001
+#define PHY_REALTEK_INIT4	0xad17
+
 #define PHY_GIGABIT	0x0100
 
 #define PHY_TIMEOUT	0x1
@@ -1096,6 +1122,28 @@
 			return PHY_ERROR;
 		}
 	}
+	if (np->phy_oui == PHY_OUI_REALTEK) {
+		if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) {
+			printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
+			return PHY_ERROR;
+		}
+		if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG2, PHY_REALTEK_INIT2)) {
+			printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
+			return PHY_ERROR;
+		}
+		if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT3)) {
+			printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
+			return PHY_ERROR;
+		}
+		if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG3, PHY_REALTEK_INIT4)) {
+			printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
+			return PHY_ERROR;
+		}
+		if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) {
+			printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
+			return PHY_ERROR;
+		}
+	}
 
 	/* set advertise register */
 	reg = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
@@ -1141,14 +1189,14 @@
 	/* phy vendor specific configuration */
 	if ((np->phy_oui == PHY_OUI_CICADA) && (phyinterface & PHY_RGMII) ) {
 		phy_reserved = mii_rw(dev, np->phyaddr, MII_RESV1, MII_READ);
-		phy_reserved &= ~(PHY_INIT1 | PHY_INIT2);
-		phy_reserved |= (PHY_INIT3 | PHY_INIT4);
+		phy_reserved &= ~(PHY_CICADA_INIT1 | PHY_CICADA_INIT2);
+		phy_reserved |= (PHY_CICADA_INIT3 | PHY_CICADA_INIT4);
 		if (mii_rw(dev, np->phyaddr, MII_RESV1, phy_reserved)) {
 			printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
 			return PHY_ERROR;
 		}
 		phy_reserved = mii_rw(dev, np->phyaddr, MII_NCONFIG, MII_READ);
-		phy_reserved |= PHY_INIT5;
+		phy_reserved |= PHY_CICADA_INIT5;
 		if (mii_rw(dev, np->phyaddr, MII_NCONFIG, phy_reserved)) {
 			printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
 			return PHY_ERROR;
@@ -1156,12 +1204,106 @@
 	}
 	if (np->phy_oui == PHY_OUI_CICADA) {
 		phy_reserved = mii_rw(dev, np->phyaddr, MII_SREVISION, MII_READ);
-		phy_reserved |= PHY_INIT6;
+		phy_reserved |= PHY_CICADA_INIT6;
 		if (mii_rw(dev, np->phyaddr, MII_SREVISION, phy_reserved)) {
 			printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
 			return PHY_ERROR;
 		}
 	}
+	if (np->phy_oui == PHY_OUI_VITESSE) {
+		if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG1, PHY_VITESSE_INIT1)) {
+			printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
+			return PHY_ERROR;
+		}
+		if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT2)) {
+			printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
+			return PHY_ERROR;
+		}
+		phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, MII_READ);
+		if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved)) {
+			printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
+			return PHY_ERROR;
+		}
+		phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, MII_READ);
+		phy_reserved &= ~PHY_VITESSE_INIT_MSK1;
+		phy_reserved |= PHY_VITESSE_INIT3;
+		if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved)) {
+			printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
+			return PHY_ERROR;
+		}
+		if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT4)) {
+			printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
+			return PHY_ERROR;
+		}
+		if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT5)) {
+			printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
+			return PHY_ERROR;
+		}
+		phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, MII_READ);
+		phy_reserved &= ~PHY_VITESSE_INIT_MSK1;
+		phy_reserved |= PHY_VITESSE_INIT3;
+		if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved)) {
+			printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
+			return PHY_ERROR;
+		}
+		phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, MII_READ);
+		if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved)) {
+			printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
+			return PHY_ERROR;
+		}
+		if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT6)) {
+			printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
+			return PHY_ERROR;
+		}
+		if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT7)) {
+			printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
+			return PHY_ERROR;
+		}
+		phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, MII_READ);
+		if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved)) {
+			printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
+			return PHY_ERROR;
+		}
+		phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, MII_READ);
+		phy_reserved &= ~PHY_VITESSE_INIT_MSK2;
+		phy_reserved |= PHY_VITESSE_INIT8;
+		if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved)) {
+			printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
+			return PHY_ERROR;
+		}
+		if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT9)) {
+			printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
+			return PHY_ERROR;
+		}
+		if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG1, PHY_VITESSE_INIT10)) {
+			printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
+			return PHY_ERROR;
+		}
+	}
+	if (np->phy_oui == PHY_OUI_REALTEK) {
+		/* reset could have cleared these out, set them back */
+		if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) {
+			printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
+			return PHY_ERROR;
+		}
+		if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG2, PHY_REALTEK_INIT2)) {
+			printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
+			return PHY_ERROR;
+		}
+		if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT3)) {
+			printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
+			return PHY_ERROR;
+		}
+		if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG3, PHY_REALTEK_INIT4)) {
+			printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
+			return PHY_ERROR;
+		}
+		if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) {
+			printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
+			return PHY_ERROR;
+		}
+	}
+
 	/* some phys clear out pause advertisment on reset, set it back */
 	mii_rw(dev, np->phyaddr, MII_ADVERTISE, reg);
 
@@ -4995,12 +5137,10 @@
 			goto out_unmap;
 		np->tx_ring.ex = &np->rx_ring.ex[np->rx_ring_size];
 	}
-	np->rx_skb = kmalloc(sizeof(struct nv_skb_map) * np->rx_ring_size, GFP_KERNEL);
-	np->tx_skb = kmalloc(sizeof(struct nv_skb_map) * np->tx_ring_size, GFP_KERNEL);
+	np->rx_skb = kcalloc(np->rx_ring_size, sizeof(struct nv_skb_map), GFP_KERNEL);
+	np->tx_skb = kcalloc(np->tx_ring_size, sizeof(struct nv_skb_map), GFP_KERNEL);
 	if (!np->rx_skb || !np->tx_skb)
 		goto out_freering;
-	memset(np->rx_skb, 0, sizeof(struct nv_skb_map) * np->rx_ring_size);
-	memset(np->tx_skb, 0, sizeof(struct nv_skb_map) * np->tx_ring_size);
 
 	dev->open = nv_open;
 	dev->stop = nv_close;
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c
index d7a1a58..f926905 100644
--- a/drivers/net/gianfar.c
+++ b/drivers/net/gianfar.c
@@ -420,8 +420,18 @@
 	if (ecntrl & ECNTRL_REDUCED_MODE) {
 		if (ecntrl & ECNTRL_REDUCED_MII_MODE)
 			return PHY_INTERFACE_MODE_RMII;
-		else
+		else {
+			phy_interface_t interface = priv->einfo->interface;
+
+			/*
+			 * This isn't autodetected right now, so it must
+			 * be set by the device tree or platform code.
+			 */
+			if (interface == PHY_INTERFACE_MODE_RGMII_ID)
+				return PHY_INTERFACE_MODE_RGMII_ID;
+
 			return PHY_INTERFACE_MODE_RGMII;
+		}
 	}
 
 	if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT)
diff --git a/drivers/net/gianfar_mii.c b/drivers/net/gianfar_mii.c
index 5dd34a1..ac3596f 100644
--- a/drivers/net/gianfar_mii.c
+++ b/drivers/net/gianfar_mii.c
@@ -31,7 +31,6 @@
 #include <linux/mm.h>
 #include <linux/module.h>
 #include <linux/platform_device.h>
-#include <asm/ocp.h>
 #include <linux/crc32.h>
 #include <linux/mii.h>
 #include <linux/phy.h>
diff --git a/drivers/net/hamradio/baycom_epp.c b/drivers/net/hamradio/baycom_epp.c
index 84aa211..355c6cf 100644
--- a/drivers/net/hamradio/baycom_epp.c
+++ b/drivers/net/hamradio/baycom_epp.c
@@ -320,7 +320,7 @@
 	sprintf(portarg, "%ld", bc->pdev->port->base);
 	printk(KERN_DEBUG "%s: %s -s -p %s -m %s\n", bc_drvname, eppconfig_path, portarg, modearg);
 
-	return call_usermodehelper(eppconfig_path, argv, envp, 1);
+	return call_usermodehelper(eppconfig_path, argv, envp, UMH_WAIT_PROC);
 }
 
 /* ---------------------------------------------------------------------- */
diff --git a/drivers/net/hamradio/dmascc.c b/drivers/net/hamradio/dmascc.c
index 3be8c50..205f096 100644
--- a/drivers/net/hamradio/dmascc.c
+++ b/drivers/net/hamradio/dmascc.c
@@ -453,8 +453,8 @@
 	int scc_base = card_base + hw[type].scc_offset;
 	char *chipnames[] = CHIPNAMES;
 
-	/* Allocate memory */
-	info = kmalloc(sizeof(struct scc_info), GFP_KERNEL | GFP_DMA);
+	/* Initialize what is necessary for write_scc and write_scc_data */
+	info = kzalloc(sizeof(struct scc_info), GFP_KERNEL | GFP_DMA);
 	if (!info) {
 		printk(KERN_ERR "dmascc: "
 		       "could not allocate memory for %s at %#3x\n",
@@ -462,8 +462,6 @@
 		goto out;
 	}
 
-	/* Initialize what is necessary for write_scc and write_scc_data */
-	memset(info, 0, sizeof(struct scc_info));
 
 	info->dev[0] = alloc_netdev(0, "", dev_setup);
 	if (!info->dev[0]) {
diff --git a/drivers/net/irda/irport.c b/drivers/net/irda/irport.c
index 3078c41..2073245 100644
--- a/drivers/net/irda/irport.c
+++ b/drivers/net/irda/irport.c
@@ -164,14 +164,13 @@
 	
 	/* Allocate memory if needed */
 	if (self->tx_buff.truesize > 0) {
-		self->tx_buff.head = kmalloc(self->tx_buff.truesize,
+		self->tx_buff.head = kzalloc(self->tx_buff.truesize,
 						      GFP_KERNEL);
 		if (self->tx_buff.head == NULL) {
 			IRDA_ERROR("%s(), can't allocate memory for "
 				   "transmit buffer!\n", __FUNCTION__);
 			goto err_out4;
 		}
-		memset(self->tx_buff.head, 0, self->tx_buff.truesize);
 	}	
 	self->tx_buff.data = self->tx_buff.head;
 
diff --git a/drivers/net/irda/irtty-sir.c b/drivers/net/irda/irtty-sir.c
index ad18573..6f5f697 100644
--- a/drivers/net/irda/irtty-sir.c
+++ b/drivers/net/irda/irtty-sir.c
@@ -505,10 +505,9 @@
 	}
 
 	/* allocate private device info block */
-	priv = kmalloc(sizeof(*priv), GFP_KERNEL);
+	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
 	if (!priv)
 		goto out_put;
-	memset(priv, 0, sizeof(*priv));
 
 	priv->magic = IRTTY_MAGIC;
 	priv->tty = tty;
diff --git a/drivers/net/iseries_veth.c b/drivers/net/iseries_veth.c
index 347d50c..0433c41 100644
--- a/drivers/net/iseries_veth.c
+++ b/drivers/net/iseries_veth.c
@@ -822,10 +822,9 @@
 	     || ! HvLpConfig_doLpsCommunicateOnVirtualLan(this_lp, rlp) )
 		return 0;
 
-	cnx = kmalloc(sizeof(*cnx), GFP_KERNEL);
+	cnx = kzalloc(sizeof(*cnx), GFP_KERNEL);
 	if (! cnx)
 		return -ENOMEM;
-	memset(cnx, 0, sizeof(*cnx));
 
 	cnx->remote_lp = rlp;
 	spin_lock_init(&cnx->lock);
@@ -852,14 +851,13 @@
 	if (rc != 0)
 		return rc;
 
-	msgs = kmalloc(VETH_NUMBUFFERS * sizeof(struct veth_msg), GFP_KERNEL);
+	msgs = kcalloc(VETH_NUMBUFFERS, sizeof(struct veth_msg), GFP_KERNEL);
 	if (! msgs) {
 		veth_error("Can't allocate buffers for LPAR %d.\n", rlp);
 		return -ENOMEM;
 	}
 
 	cnx->msgs = msgs;
-	memset(msgs, 0, VETH_NUMBUFFERS * sizeof(struct veth_msg));
 
 	for (i = 0; i < VETH_NUMBUFFERS; i++) {
 		msgs[i].token = i;
diff --git a/drivers/net/lance.c b/drivers/net/lance.c
index a2f37e5..a4e5fab 100644
--- a/drivers/net/lance.c
+++ b/drivers/net/lance.c
@@ -533,11 +533,10 @@
 	dev->base_addr = ioaddr;
 	/* Make certain the data structures used by the LANCE are aligned and DMAble. */
 
-	lp = kmalloc(sizeof(*lp), GFP_DMA | GFP_KERNEL);
+	lp = kzalloc(sizeof(*lp), GFP_DMA | GFP_KERNEL);
 	if(lp==NULL)
 		return -ENODEV;
 	if (lance_debug > 6) printk(" (#0x%05lx)", (unsigned long)lp);
-	memset(lp, 0, sizeof(*lp));
 	dev->priv = lp;
 	lp->name = chipname;
 	lp->rx_buffs = (unsigned long)kmalloc(PKT_BUF_SZ*RX_RING_SIZE,
diff --git a/drivers/net/lguest_net.c b/drivers/net/lguest_net.c
new file mode 100644
index 0000000..1127786
--- /dev/null
+++ b/drivers/net/lguest_net.c
@@ -0,0 +1,354 @@
+/* A simple network driver for lguest.
+ *
+ * Copyright 2006 Rusty Russell <rusty@rustcorp.com.au> IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+//#define DEBUG
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/module.h>
+#include <linux/mm_types.h>
+#include <linux/io.h>
+#include <linux/lguest_bus.h>
+
+#define SHARED_SIZE		PAGE_SIZE
+#define MAX_LANS		4
+#define NUM_SKBS		8
+
+struct lguestnet_info
+{
+	/* The shared page(s). */
+	struct lguest_net *peer;
+	unsigned long peer_phys;
+	unsigned long mapsize;
+
+	/* The lguest_device I come from */
+	struct lguest_device *lgdev;
+
+	/* My peerid. */
+	unsigned int me;
+
+	/* Receive queue. */
+	struct sk_buff *skb[NUM_SKBS];
+	struct lguest_dma dma[NUM_SKBS];
+};
+
+/* How many bytes left in this page. */
+static unsigned int rest_of_page(void *data)
+{
+	return PAGE_SIZE - ((unsigned long)data % PAGE_SIZE);
+}
+
+/* Simple convention: offset 4 * peernum. */
+static unsigned long peer_key(struct lguestnet_info *info, unsigned peernum)
+{
+	return info->peer_phys + 4 * peernum;
+}
+
+static void skb_to_dma(const struct sk_buff *skb, unsigned int headlen,
+		       struct lguest_dma *dma)
+{
+	unsigned int i, seg;
+
+	for (i = seg = 0; i < headlen; seg++, i += rest_of_page(skb->data+i)) {
+		dma->addr[seg] = virt_to_phys(skb->data + i);
+		dma->len[seg] = min((unsigned)(headlen - i),
+				    rest_of_page(skb->data + i));
+	}
+	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++, seg++) {
+		const skb_frag_t *f = &skb_shinfo(skb)->frags[i];
+		/* Should not happen with MTU less than 64k - 2 * PAGE_SIZE. */
+		if (seg == LGUEST_MAX_DMA_SECTIONS) {
+			printk("Woah dude!  Megapacket!\n");
+			break;
+		}
+		dma->addr[seg] = page_to_phys(f->page) + f->page_offset;
+		dma->len[seg] = f->size;
+	}
+	if (seg < LGUEST_MAX_DMA_SECTIONS)
+		dma->len[seg] = 0;
+}
+
+/* We overload multicast bit to show promiscuous mode. */
+#define PROMISC_BIT		0x01
+
+static void lguestnet_set_multicast(struct net_device *dev)
+{
+	struct lguestnet_info *info = netdev_priv(dev);
+
+	if ((dev->flags & (IFF_PROMISC|IFF_ALLMULTI)) || dev->mc_count)
+		info->peer[info->me].mac[0] |= PROMISC_BIT;
+	else
+		info->peer[info->me].mac[0] &= ~PROMISC_BIT;
+}
+
+static int promisc(struct lguestnet_info *info, unsigned int peer)
+{
+	return info->peer[peer].mac[0] & PROMISC_BIT;
+}
+
+static int mac_eq(const unsigned char mac[ETH_ALEN],
+		  struct lguestnet_info *info, unsigned int peer)
+{
+	/* Ignore multicast bit, which peer turns on to mean promisc. */
+	if ((info->peer[peer].mac[0] & (~PROMISC_BIT)) != mac[0])
+		return 0;
+	return memcmp(mac+1, info->peer[peer].mac+1, ETH_ALEN-1) == 0;
+}
+
+static void transfer_packet(struct net_device *dev,
+			    struct sk_buff *skb,
+			    unsigned int peernum)
+{
+	struct lguestnet_info *info = netdev_priv(dev);
+	struct lguest_dma dma;
+
+	skb_to_dma(skb, skb_headlen(skb), &dma);
+	pr_debug("xfer length %04x (%u)\n", htons(skb->len), skb->len);
+
+	lguest_send_dma(peer_key(info, peernum), &dma);
+	if (dma.used_len != skb->len) {
+		dev->stats.tx_carrier_errors++;
+		pr_debug("Bad xfer to peer %i: %i of %i (dma %p/%i)\n",
+			 peernum, dma.used_len, skb->len,
+			 (void *)dma.addr[0], dma.len[0]);
+	} else {
+		dev->stats.tx_bytes += skb->len;
+		dev->stats.tx_packets++;
+	}
+}
+
+static int unused_peer(const struct lguest_net peer[], unsigned int num)
+{
+	return peer[num].mac[0] == 0;
+}
+
+static int lguestnet_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+	unsigned int i;
+	int broadcast;
+	struct lguestnet_info *info = netdev_priv(dev);
+	const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest;
+
+	pr_debug("%s: xmit %02x:%02x:%02x:%02x:%02x:%02x\n",
+		 dev->name, dest[0],dest[1],dest[2],dest[3],dest[4],dest[5]);
+
+	broadcast = is_multicast_ether_addr(dest);
+	for (i = 0; i < info->mapsize/sizeof(struct lguest_net); i++) {
+		if (i == info->me || unused_peer(info->peer, i))
+			continue;
+
+		if (!broadcast && !promisc(info, i) && !mac_eq(dest, info, i))
+			continue;
+
+		pr_debug("lguestnet %s: sending from %i to %i\n",
+			 dev->name, info->me, i);
+		transfer_packet(dev, skb, i);
+	}
+	dev_kfree_skb(skb);
+	return 0;
+}
+
+/* Find a new skb to put in this slot in shared mem. */
+static int fill_slot(struct net_device *dev, unsigned int slot)
+{
+	struct lguestnet_info *info = netdev_priv(dev);
+	/* Try to create and register a new one. */
+	info->skb[slot] = netdev_alloc_skb(dev, ETH_HLEN + ETH_DATA_LEN);
+	if (!info->skb[slot]) {
+		printk("%s: could not fill slot %i\n", dev->name, slot);
+		return -ENOMEM;
+	}
+
+	skb_to_dma(info->skb[slot], ETH_HLEN + ETH_DATA_LEN, &info->dma[slot]);
+	wmb();
+	/* Now we tell hypervisor it can use the slot. */
+	info->dma[slot].used_len = 0;
+	return 0;
+}
+
+static irqreturn_t lguestnet_rcv(int irq, void *dev_id)
+{
+	struct net_device *dev = dev_id;
+	struct lguestnet_info *info = netdev_priv(dev);
+	unsigned int i, done = 0;
+
+	for (i = 0; i < ARRAY_SIZE(info->dma); i++) {
+		unsigned int length;
+		struct sk_buff *skb;
+
+		length = info->dma[i].used_len;
+		if (length == 0)
+			continue;
+
+		done++;
+		skb = info->skb[i];
+		fill_slot(dev, i);
+
+		if (length < ETH_HLEN || length > ETH_HLEN + ETH_DATA_LEN) {
+			pr_debug(KERN_WARNING "%s: unbelievable skb len: %i\n",
+				 dev->name, length);
+			dev_kfree_skb(skb);
+			continue;
+		}
+
+		skb_put(skb, length);
+		skb->protocol = eth_type_trans(skb, dev);
+		/* This is a reliable transport. */
+		if (dev->features & NETIF_F_NO_CSUM)
+			skb->ip_summed = CHECKSUM_UNNECESSARY;
+		pr_debug("Receiving skb proto 0x%04x len %i type %i\n",
+			 ntohs(skb->protocol), skb->len, skb->pkt_type);
+
+		dev->stats.rx_bytes += skb->len;
+		dev->stats.rx_packets++;
+		netif_rx(skb);
+	}
+	return done ? IRQ_HANDLED : IRQ_NONE;
+}
+
+static int lguestnet_open(struct net_device *dev)
+{
+	int i;
+	struct lguestnet_info *info = netdev_priv(dev);
+
+	/* Set up our MAC address */
+	memcpy(info->peer[info->me].mac, dev->dev_addr, ETH_ALEN);
+
+	/* Turn on promisc mode if needed */
+	lguestnet_set_multicast(dev);
+
+	for (i = 0; i < ARRAY_SIZE(info->dma); i++) {
+		if (fill_slot(dev, i) != 0)
+			goto cleanup;
+	}
+	if (lguest_bind_dma(peer_key(info,info->me), info->dma,
+			    NUM_SKBS, lgdev_irq(info->lgdev)) != 0)
+		goto cleanup;
+	return 0;
+
+cleanup:
+	while (--i >= 0)
+		dev_kfree_skb(info->skb[i]);
+	return -ENOMEM;
+}
+
+static int lguestnet_close(struct net_device *dev)
+{
+	unsigned int i;
+	struct lguestnet_info *info = netdev_priv(dev);
+
+	/* Clear all trace: others might deliver packets, we'll ignore it. */
+	memset(&info->peer[info->me], 0, sizeof(info->peer[info->me]));
+
+	/* Deregister sg lists. */
+	lguest_unbind_dma(peer_key(info, info->me), info->dma);
+	for (i = 0; i < ARRAY_SIZE(info->dma); i++)
+		dev_kfree_skb(info->skb[i]);
+	return 0;
+}
+
+static int lguestnet_probe(struct lguest_device *lgdev)
+{
+	int err, irqf = IRQF_SHARED;
+	struct net_device *dev;
+	struct lguestnet_info *info;
+	struct lguest_device_desc *desc = &lguest_devices[lgdev->index];
+
+	pr_debug("lguest_net: probing for device %i\n", lgdev->index);
+
+	dev = alloc_etherdev(sizeof(struct lguestnet_info));
+	if (!dev)
+		return -ENOMEM;
+
+	SET_MODULE_OWNER(dev);
+
+	/* Ethernet defaults with some changes */
+	ether_setup(dev);
+	dev->set_mac_address = NULL;
+
+	dev->dev_addr[0] = 0x02; /* set local assignment bit (IEEE802) */
+	dev->dev_addr[1] = 0x00;
+	memcpy(&dev->dev_addr[2], &lguest_data.guestid, 2);
+	dev->dev_addr[4] = 0x00;
+	dev->dev_addr[5] = 0x00;
+
+	dev->open = lguestnet_open;
+	dev->stop = lguestnet_close;
+	dev->hard_start_xmit = lguestnet_start_xmit;
+
+	/* Turning on/off promisc will call dev->set_multicast_list.
+	 * We don't actually support multicast yet */
+	dev->set_multicast_list = lguestnet_set_multicast;
+	SET_NETDEV_DEV(dev, &lgdev->dev);
+	if (desc->features & LGUEST_NET_F_NOCSUM)
+		dev->features = NETIF_F_SG|NETIF_F_NO_CSUM;
+
+	info = netdev_priv(dev);
+	info->mapsize = PAGE_SIZE * desc->num_pages;
+	info->peer_phys = ((unsigned long)desc->pfn << PAGE_SHIFT);
+	info->lgdev = lgdev;
+	info->peer = lguest_map(info->peer_phys, desc->num_pages);
+	if (!info->peer) {
+		err = -ENOMEM;
+		goto free;
+	}
+
+	/* This stores our peerid (upper bits reserved for future). */
+	info->me = (desc->features & (info->mapsize-1));
+
+	err = register_netdev(dev);
+	if (err) {
+		pr_debug("lguestnet: registering device failed\n");
+		goto unmap;
+	}
+
+	if (lguest_devices[lgdev->index].features & LGUEST_DEVICE_F_RANDOMNESS)
+		irqf |= IRQF_SAMPLE_RANDOM;
+	if (request_irq(lgdev_irq(lgdev), lguestnet_rcv, irqf, "lguestnet",
+			dev) != 0) {
+		pr_debug("lguestnet: cannot get irq %i\n", lgdev_irq(lgdev));
+		goto unregister;
+	}
+
+	pr_debug("lguestnet: registered device %s\n", dev->name);
+	lgdev->private = dev;
+	return 0;
+
+unregister:
+	unregister_netdev(dev);
+unmap:
+	lguest_unmap(info->peer);
+free:
+	free_netdev(dev);
+	return err;
+}
+
+static struct lguest_driver lguestnet_drv = {
+	.name = "lguestnet",
+	.owner = THIS_MODULE,
+	.device_type = LGUEST_DEVICE_T_NET,
+	.probe = lguestnet_probe,
+};
+
+static __init int lguestnet_init(void)
+{
+	return register_lguest_driver(&lguestnet_drv);
+}
+module_init(lguestnet_init);
+
+MODULE_DESCRIPTION("Lguest network driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/macb.c b/drivers/net/macb.c
index 0e04f7a..a4bb026 100644
--- a/drivers/net/macb.c
+++ b/drivers/net/macb.c
@@ -17,13 +17,12 @@
 #include <linux/init.h>
 #include <linux/netdevice.h>
 #include <linux/etherdevice.h>
-#include <linux/mii.h>
-#include <linux/mutex.h>
 #include <linux/dma-mapping.h>
-#include <linux/ethtool.h>
 #include <linux/platform_device.h>
+#include <linux/phy.h>
 
 #include <asm/arch/board.h>
+#include <asm/arch/cpu.h>
 
 #include "macb.h"
 
@@ -85,172 +84,202 @@
 		memcpy(bp->dev->dev_addr, addr, sizeof(addr));
 }
 
-static void macb_enable_mdio(struct macb *bp)
+static int macb_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
 {
-	unsigned long flags;
-	u32 reg;
-
-	spin_lock_irqsave(&bp->lock, flags);
-	reg = macb_readl(bp, NCR);
-	reg |= MACB_BIT(MPE);
-	macb_writel(bp, NCR, reg);
-	macb_writel(bp, IER, MACB_BIT(MFD));
-	spin_unlock_irqrestore(&bp->lock, flags);
-}
-
-static void macb_disable_mdio(struct macb *bp)
-{
-	unsigned long flags;
-	u32 reg;
-
-	spin_lock_irqsave(&bp->lock, flags);
-	reg = macb_readl(bp, NCR);
-	reg &= ~MACB_BIT(MPE);
-	macb_writel(bp, NCR, reg);
-	macb_writel(bp, IDR, MACB_BIT(MFD));
-	spin_unlock_irqrestore(&bp->lock, flags);
-}
-
-static int macb_mdio_read(struct net_device *dev, int phy_id, int location)
-{
-	struct macb *bp = netdev_priv(dev);
+	struct macb *bp = bus->priv;
 	int value;
 
-	mutex_lock(&bp->mdio_mutex);
-
-	macb_enable_mdio(bp);
 	macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_SOF)
 			      | MACB_BF(RW, MACB_MAN_READ)
-			      | MACB_BF(PHYA, phy_id)
-			      | MACB_BF(REGA, location)
+			      | MACB_BF(PHYA, mii_id)
+			      | MACB_BF(REGA, regnum)
 			      | MACB_BF(CODE, MACB_MAN_CODE)));
 
-	wait_for_completion(&bp->mdio_complete);
+	/* wait for end of transfer */
+	while (!MACB_BFEXT(IDLE, macb_readl(bp, NSR)))
+		cpu_relax();
 
 	value = MACB_BFEXT(DATA, macb_readl(bp, MAN));
-	macb_disable_mdio(bp);
-	mutex_unlock(&bp->mdio_mutex);
 
 	return value;
 }
 
-static void macb_mdio_write(struct net_device *dev, int phy_id,
-			    int location, int val)
+static int macb_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
+			   u16 value)
 {
-	struct macb *bp = netdev_priv(dev);
-
-	dev_dbg(&bp->pdev->dev, "mdio_write %02x:%02x <- %04x\n",
-		phy_id, location, val);
-
-	mutex_lock(&bp->mdio_mutex);
-	macb_enable_mdio(bp);
+	struct macb *bp = bus->priv;
 
 	macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_SOF)
 			      | MACB_BF(RW, MACB_MAN_WRITE)
-			      | MACB_BF(PHYA, phy_id)
-			      | MACB_BF(REGA, location)
+			      | MACB_BF(PHYA, mii_id)
+			      | MACB_BF(REGA, regnum)
 			      | MACB_BF(CODE, MACB_MAN_CODE)
-			      | MACB_BF(DATA, val)));
+			      | MACB_BF(DATA, value)));
 
-	wait_for_completion(&bp->mdio_complete);
+	/* wait for end of transfer */
+	while (!MACB_BFEXT(IDLE, macb_readl(bp, NSR)))
+		cpu_relax();
 
-	macb_disable_mdio(bp);
-	mutex_unlock(&bp->mdio_mutex);
-}
-
-static int macb_phy_probe(struct macb *bp)
-{
-	int phy_address;
-	u16 phyid1, phyid2;
-
-	for (phy_address = 0; phy_address < 32; phy_address++) {
-		phyid1 = macb_mdio_read(bp->dev, phy_address, MII_PHYSID1);
-		phyid2 = macb_mdio_read(bp->dev, phy_address, MII_PHYSID2);
-
-		if (phyid1 != 0xffff && phyid1 != 0x0000
-		    && phyid2 != 0xffff && phyid2 != 0x0000)
-			break;
-	}
-
-	if (phy_address == 32)
-		return -ENODEV;
-
-	dev_info(&bp->pdev->dev,
-		 "detected PHY at address %d (ID %04x:%04x)\n",
-		 phy_address, phyid1, phyid2);
-
-	bp->mii.phy_id = phy_address;
 	return 0;
 }
 
-static void macb_set_media(struct macb *bp, int media)
+static int macb_mdio_reset(struct mii_bus *bus)
 {
-	u32 reg;
-
-	spin_lock_irq(&bp->lock);
-	reg = macb_readl(bp, NCFGR);
-	reg &= ~(MACB_BIT(SPD) | MACB_BIT(FD));
-	if (media & (ADVERTISE_100HALF | ADVERTISE_100FULL))
-		reg |= MACB_BIT(SPD);
-	if (media & ADVERTISE_FULL)
-		reg |= MACB_BIT(FD);
-	macb_writel(bp, NCFGR, reg);
-	spin_unlock_irq(&bp->lock);
+	return 0;
 }
 
-static void macb_check_media(struct macb *bp, int ok_to_print, int init_media)
+static void macb_handle_link_change(struct net_device *dev)
 {
-	struct mii_if_info *mii = &bp->mii;
-	unsigned int old_carrier, new_carrier;
-	int advertise, lpa, media, duplex;
+	struct macb *bp = netdev_priv(dev);
+	struct phy_device *phydev = bp->phy_dev;
+	unsigned long flags;
 
-	/* if forced media, go no further */
-	if (mii->force_media)
-		return;
+	int status_change = 0;
 
-	/* check current and old link status */
-	old_carrier = netif_carrier_ok(mii->dev) ? 1 : 0;
-	new_carrier = (unsigned int) mii_link_ok(mii);
+	spin_lock_irqsave(&bp->lock, flags);
 
-	/* if carrier state did not change, assume nothing else did */
-	if (!init_media && old_carrier == new_carrier)
-		return;
+	if (phydev->link) {
+		if ((bp->speed != phydev->speed) ||
+		    (bp->duplex != phydev->duplex)) {
+			u32 reg;
 
-	/* no carrier, nothing much to do */
-	if (!new_carrier) {
-		netif_carrier_off(mii->dev);
-		printk(KERN_INFO "%s: link down\n", mii->dev->name);
-		return;
+			reg = macb_readl(bp, NCFGR);
+			reg &= ~(MACB_BIT(SPD) | MACB_BIT(FD));
+
+			if (phydev->duplex)
+				reg |= MACB_BIT(FD);
+			if (phydev->speed)
+				reg |= MACB_BIT(SPD);
+
+			macb_writel(bp, NCFGR, reg);
+
+			bp->speed = phydev->speed;
+			bp->duplex = phydev->duplex;
+			status_change = 1;
+		}
 	}
 
-	/*
-	 * we have carrier, see who's on the other end
-	 */
-	netif_carrier_on(mii->dev);
+	if (phydev->link != bp->link) {
+		if (phydev->link)
+			netif_schedule(dev);
+		else {
+			bp->speed = 0;
+			bp->duplex = -1;
+		}
+		bp->link = phydev->link;
 
-	/* get MII advertise and LPA values */
-	if (!init_media && mii->advertising) {
-		advertise = mii->advertising;
+		status_change = 1;
+	}
+
+	spin_unlock_irqrestore(&bp->lock, flags);
+
+	if (status_change) {
+		if (phydev->link)
+			printk(KERN_INFO "%s: link up (%d/%s)\n",
+			       dev->name, phydev->speed,
+			       DUPLEX_FULL == phydev->duplex ? "Full":"Half");
+		else
+			printk(KERN_INFO "%s: link down\n", dev->name);
+	}
+}
+
+/* based on au1000_eth. c*/
+static int macb_mii_probe(struct net_device *dev)
+{
+	struct macb *bp = netdev_priv(dev);
+	struct phy_device *phydev = NULL;
+	struct eth_platform_data *pdata;
+	int phy_addr;
+
+	/* find the first phy */
+	for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++) {
+		if (bp->mii_bus.phy_map[phy_addr]) {
+			phydev = bp->mii_bus.phy_map[phy_addr];
+			break;
+		}
+	}
+
+	if (!phydev) {
+		printk (KERN_ERR "%s: no PHY found\n", dev->name);
+		return -1;
+	}
+
+	pdata = bp->pdev->dev.platform_data;
+	/* TODO : add pin_irq */
+
+	/* attach the mac to the phy */
+	if (pdata && pdata->is_rmii) {
+		phydev = phy_connect(dev, phydev->dev.bus_id,
+			&macb_handle_link_change, 0, PHY_INTERFACE_MODE_RMII);
 	} else {
-		advertise = mii->mdio_read(mii->dev, mii->phy_id, MII_ADVERTISE);
-		mii->advertising = advertise;
+		phydev = phy_connect(dev, phydev->dev.bus_id,
+			&macb_handle_link_change, 0, PHY_INTERFACE_MODE_MII);
 	}
-	lpa = mii->mdio_read(mii->dev, mii->phy_id, MII_LPA);
 
-	/* figure out media and duplex from advertise and LPA values */
-	media = mii_nway_result(lpa & advertise);
-	duplex = (media & ADVERTISE_FULL) ? 1 : 0;
+	if (IS_ERR(phydev)) {
+		printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name);
+		return PTR_ERR(phydev);
+	}
 
-	if (ok_to_print)
-		printk(KERN_INFO "%s: link up, %sMbps, %s-duplex, lpa 0x%04X\n",
-		       mii->dev->name,
-		       media & (ADVERTISE_100FULL | ADVERTISE_100HALF) ? "100" : "10",
-		       duplex ? "full" : "half", lpa);
+	/* mask with MAC supported features */
+	phydev->supported &= PHY_BASIC_FEATURES;
 
-	mii->full_duplex = duplex;
+	phydev->advertising = phydev->supported;
 
-	/* Let the MAC know about the new link state */
-	macb_set_media(bp, media);
+	bp->link = 0;
+	bp->speed = 0;
+	bp->duplex = -1;
+	bp->phy_dev = phydev;
+
+	return 0;
+}
+
+static int macb_mii_init(struct macb *bp)
+{
+	struct eth_platform_data *pdata;
+	int err = -ENXIO, i;
+
+	/* Enable managment port */
+	macb_writel(bp, NCR, MACB_BIT(MPE));
+
+	bp->mii_bus.name = "MACB_mii_bus",
+	bp->mii_bus.read = &macb_mdio_read,
+	bp->mii_bus.write = &macb_mdio_write,
+	bp->mii_bus.reset = &macb_mdio_reset,
+	bp->mii_bus.id = bp->pdev->id,
+	bp->mii_bus.priv = bp,
+	bp->mii_bus.dev = &bp->dev->dev;
+	pdata = bp->pdev->dev.platform_data;
+
+	if (pdata)
+		bp->mii_bus.phy_mask = pdata->phy_mask;
+
+	bp->mii_bus.irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL);
+	if (!bp->mii_bus.irq) {
+		err = -ENOMEM;
+		goto err_out;
+	}
+
+	for (i = 0; i < PHY_MAX_ADDR; i++)
+		bp->mii_bus.irq[i] = PHY_POLL;
+
+	platform_set_drvdata(bp->dev, &bp->mii_bus);
+
+	if (mdiobus_register(&bp->mii_bus))
+		goto err_out_free_mdio_irq;
+
+	if (macb_mii_probe(bp->dev) != 0) {
+		goto err_out_unregister_bus;
+	}
+
+	return 0;
+
+err_out_unregister_bus:
+	mdiobus_unregister(&bp->mii_bus);
+err_out_free_mdio_irq:
+	kfree(bp->mii_bus.irq);
+err_out:
+	return err;
 }
 
 static void macb_update_stats(struct macb *bp)
@@ -265,16 +294,6 @@
 		*p += __raw_readl(reg);
 }
 
-static void macb_periodic_task(struct work_struct *work)
-{
-	struct macb *bp = container_of(work, struct macb, periodic_task.work);
-
-	macb_update_stats(bp);
-	macb_check_media(bp, 1, 0);
-
-	schedule_delayed_work(&bp->periodic_task, HZ);
-}
-
 static void macb_tx(struct macb *bp)
 {
 	unsigned int tail;
@@ -519,9 +538,6 @@
 	spin_lock(&bp->lock);
 
 	while (status) {
-		if (status & MACB_BIT(MFD))
-			complete(&bp->mdio_complete);
-
 		/* close possible race with dev_close */
 		if (unlikely(!netif_running(dev))) {
 			macb_writel(bp, IDR, ~0UL);
@@ -535,7 +551,8 @@
 				 * until we have processed the buffers
 				 */
 				macb_writel(bp, IDR, MACB_RX_INT_FLAGS);
-				dev_dbg(&bp->pdev->dev, "scheduling RX softirq\n");
+				dev_dbg(&bp->pdev->dev,
+					"scheduling RX softirq\n");
 				__netif_rx_schedule(dev);
 			}
 		}
@@ -765,7 +782,7 @@
 	macb_writel(bp, TBQP, bp->tx_ring_dma);
 
 	/* Enable TX and RX */
-	macb_writel(bp, NCR, MACB_BIT(RE) | MACB_BIT(TE));
+	macb_writel(bp, NCR, MACB_BIT(RE) | MACB_BIT(TE) | MACB_BIT(MPE));
 
 	/* Enable interrupts */
 	macb_writel(bp, IER, (MACB_BIT(RCOMP)
@@ -776,18 +793,126 @@
 			      | MACB_BIT(TCOMP)
 			      | MACB_BIT(ISR_ROVR)
 			      | MACB_BIT(HRESP)));
+
 }
 
-static void macb_init_phy(struct net_device *dev)
+/*
+ * The hash address register is 64 bits long and takes up two
+ * locations in the memory map.  The least significant bits are stored
+ * in EMAC_HSL and the most significant bits in EMAC_HSH.
+ *
+ * The unicast hash enable and the multicast hash enable bits in the
+ * network configuration register enable the reception of hash matched
+ * frames. The destination address is reduced to a 6 bit index into
+ * the 64 bit hash register using the following hash function.  The
+ * hash function is an exclusive or of every sixth bit of the
+ * destination address.
+ *
+ * hi[5] = da[5] ^ da[11] ^ da[17] ^ da[23] ^ da[29] ^ da[35] ^ da[41] ^ da[47]
+ * hi[4] = da[4] ^ da[10] ^ da[16] ^ da[22] ^ da[28] ^ da[34] ^ da[40] ^ da[46]
+ * hi[3] = da[3] ^ da[09] ^ da[15] ^ da[21] ^ da[27] ^ da[33] ^ da[39] ^ da[45]
+ * hi[2] = da[2] ^ da[08] ^ da[14] ^ da[20] ^ da[26] ^ da[32] ^ da[38] ^ da[44]
+ * hi[1] = da[1] ^ da[07] ^ da[13] ^ da[19] ^ da[25] ^ da[31] ^ da[37] ^ da[43]
+ * hi[0] = da[0] ^ da[06] ^ da[12] ^ da[18] ^ da[24] ^ da[30] ^ da[36] ^ da[42]
+ *
+ * da[0] represents the least significant bit of the first byte
+ * received, that is, the multicast/unicast indicator, and da[47]
+ * represents the most significant bit of the last byte received.  If
+ * the hash index, hi[n], points to a bit that is set in the hash
+ * register then the frame will be matched according to whether the
+ * frame is multicast or unicast.  A multicast match will be signalled
+ * if the multicast hash enable bit is set, da[0] is 1 and the hash
+ * index points to a bit set in the hash register.  A unicast match
+ * will be signalled if the unicast hash enable bit is set, da[0] is 0
+ * and the hash index points to a bit set in the hash register.  To
+ * receive all multicast frames, the hash register should be set with
+ * all ones and the multicast hash enable bit should be set in the
+ * network configuration register.
+ */
+
+static inline int hash_bit_value(int bitnr, __u8 *addr)
 {
+	if (addr[bitnr / 8] & (1 << (bitnr % 8)))
+		return 1;
+	return 0;
+}
+
+/*
+ * Return the hash index value for the specified address.
+ */
+static int hash_get_index(__u8 *addr)
+{
+	int i, j, bitval;
+	int hash_index = 0;
+
+	for (j = 0; j < 6; j++) {
+		for (i = 0, bitval = 0; i < 8; i++)
+			bitval ^= hash_bit_value(i*6 + j, addr);
+
+		hash_index |= (bitval << j);
+	}
+
+	return hash_index;
+}
+
+/*
+ * Add multicast addresses to the internal multicast-hash table.
+ */
+static void macb_sethashtable(struct net_device *dev)
+{
+	struct dev_mc_list *curr;
+	unsigned long mc_filter[2];
+	unsigned int i, bitnr;
 	struct macb *bp = netdev_priv(dev);
 
-	/* Set some reasonable default settings */
-	macb_mdio_write(dev, bp->mii.phy_id, MII_ADVERTISE,
-			ADVERTISE_CSMA | ADVERTISE_ALL);
-	macb_mdio_write(dev, bp->mii.phy_id, MII_BMCR,
-			(BMCR_SPEED100 | BMCR_ANENABLE
-			 | BMCR_ANRESTART | BMCR_FULLDPLX));
+	mc_filter[0] = mc_filter[1] = 0;
+
+	curr = dev->mc_list;
+	for (i = 0; i < dev->mc_count; i++, curr = curr->next) {
+		if (!curr) break;	/* unexpected end of list */
+
+		bitnr = hash_get_index(curr->dmi_addr);
+		mc_filter[bitnr >> 5] |= 1 << (bitnr & 31);
+	}
+
+	macb_writel(bp, HRB, mc_filter[0]);
+	macb_writel(bp, HRT, mc_filter[1]);
+}
+
+/*
+ * Enable/Disable promiscuous and multicast modes.
+ */
+static void macb_set_rx_mode(struct net_device *dev)
+{
+	unsigned long cfg;
+	struct macb *bp = netdev_priv(dev);
+
+	cfg = macb_readl(bp, NCFGR);
+
+	if (dev->flags & IFF_PROMISC)
+		/* Enable promiscuous mode */
+		cfg |= MACB_BIT(CAF);
+	else if (dev->flags & (~IFF_PROMISC))
+		 /* Disable promiscuous mode */
+		cfg &= ~MACB_BIT(CAF);
+
+	if (dev->flags & IFF_ALLMULTI) {
+		/* Enable all multicast mode */
+		macb_writel(bp, HRB, -1);
+		macb_writel(bp, HRT, -1);
+		cfg |= MACB_BIT(NCFGR_MTI);
+	} else if (dev->mc_count > 0) {
+		/* Enable specific multicasts */
+		macb_sethashtable(dev);
+		cfg |= MACB_BIT(NCFGR_MTI);
+	} else if (dev->flags & (~IFF_ALLMULTI)) {
+		/* Disable all multicast mode */
+		macb_writel(bp, HRB, 0);
+		macb_writel(bp, HRT, 0);
+		cfg &= ~MACB_BIT(NCFGR_MTI);
+	}
+
+	macb_writel(bp, NCFGR, cfg);
 }
 
 static int macb_open(struct net_device *dev)
@@ -797,6 +922,10 @@
 
 	dev_dbg(&bp->pdev->dev, "open\n");
 
+	/* if the phy is not yet register, retry later*/
+	if (!bp->phy_dev)
+		return -EAGAIN;
+
 	if (!is_valid_ether_addr(dev->dev_addr))
 		return -EADDRNOTAVAIL;
 
@@ -810,13 +939,12 @@
 
 	macb_init_rings(bp);
 	macb_init_hw(bp);
-	macb_init_phy(dev);
 
-	macb_check_media(bp, 1, 1);
+	/* schedule a link state check */
+	phy_start(bp->phy_dev);
+
 	netif_start_queue(dev);
 
-	schedule_delayed_work(&bp->periodic_task, HZ);
-
 	return 0;
 }
 
@@ -825,10 +953,11 @@
 	struct macb *bp = netdev_priv(dev);
 	unsigned long flags;
 
-	cancel_rearming_delayed_work(&bp->periodic_task);
-
 	netif_stop_queue(dev);
 
+	if (bp->phy_dev)
+		phy_stop(bp->phy_dev);
+
 	spin_lock_irqsave(&bp->lock, flags);
 	macb_reset_hw(bp);
 	netif_carrier_off(dev);
@@ -845,6 +974,9 @@
 	struct net_device_stats *nstat = &bp->stats;
 	struct macb_stats *hwstat = &bp->hw_stats;
 
+	/* read stats from hardware */
+	macb_update_stats(bp);
+
 	/* Convert HW stats into netdevice stats */
 	nstat->rx_errors = (hwstat->rx_fcs_errors +
 			    hwstat->rx_align_errors +
@@ -882,18 +1014,27 @@
 static int macb_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
 {
 	struct macb *bp = netdev_priv(dev);
+	struct phy_device *phydev = bp->phy_dev;
 
-	return mii_ethtool_gset(&bp->mii, cmd);
+	if (!phydev)
+		return -ENODEV;
+
+	return phy_ethtool_gset(phydev, cmd);
 }
 
 static int macb_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
 {
 	struct macb *bp = netdev_priv(dev);
+	struct phy_device *phydev = bp->phy_dev;
 
-	return mii_ethtool_sset(&bp->mii, cmd);
+	if (!phydev)
+		return -ENODEV;
+
+	return phy_ethtool_sset(phydev, cmd);
 }
 
-static void macb_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
+static void macb_get_drvinfo(struct net_device *dev,
+			     struct ethtool_drvinfo *info)
 {
 	struct macb *bp = netdev_priv(dev);
 
@@ -902,104 +1043,34 @@
 	strcpy(info->bus_info, bp->pdev->dev.bus_id);
 }
 
-static int macb_nway_reset(struct net_device *dev)
-{
-	struct macb *bp = netdev_priv(dev);
-	return mii_nway_restart(&bp->mii);
-}
-
 static struct ethtool_ops macb_ethtool_ops = {
 	.get_settings		= macb_get_settings,
 	.set_settings		= macb_set_settings,
 	.get_drvinfo		= macb_get_drvinfo,
-	.nway_reset		= macb_nway_reset,
 	.get_link		= ethtool_op_get_link,
 };
 
 static int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
 {
 	struct macb *bp = netdev_priv(dev);
+	struct phy_device *phydev = bp->phy_dev;
 
 	if (!netif_running(dev))
 		return -EINVAL;
 
-	return generic_mii_ioctl(&bp->mii, if_mii(rq), cmd, NULL);
+	if (!phydev)
+		return -ENODEV;
+
+	return phy_mii_ioctl(phydev, if_mii(rq), cmd);
 }
 
-static ssize_t macb_mii_show(const struct device *_dev, char *buf,
-			unsigned long addr)
-{
-	struct net_device *dev = to_net_dev(_dev);
-	struct macb *bp = netdev_priv(dev);
-	ssize_t ret = -EINVAL;
-
-	if (netif_running(dev)) {
-		int value;
-		value = macb_mdio_read(dev, bp->mii.phy_id, addr);
-		ret = sprintf(buf, "0x%04x\n", (uint16_t)value);
-	}
-
-	return ret;
-}
-
-#define MII_ENTRY(name, addr)					\
-static ssize_t show_##name(struct device *_dev,			\
-			   struct device_attribute *attr,	\
-			   char *buf)				\
-{								\
-	return macb_mii_show(_dev, buf, addr);			\
-}								\
-static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL)
-
-MII_ENTRY(bmcr, MII_BMCR);
-MII_ENTRY(bmsr, MII_BMSR);
-MII_ENTRY(physid1, MII_PHYSID1);
-MII_ENTRY(physid2, MII_PHYSID2);
-MII_ENTRY(advertise, MII_ADVERTISE);
-MII_ENTRY(lpa, MII_LPA);
-MII_ENTRY(expansion, MII_EXPANSION);
-
-static struct attribute *macb_mii_attrs[] = {
-	&dev_attr_bmcr.attr,
-	&dev_attr_bmsr.attr,
-	&dev_attr_physid1.attr,
-	&dev_attr_physid2.attr,
-	&dev_attr_advertise.attr,
-	&dev_attr_lpa.attr,
-	&dev_attr_expansion.attr,
-	NULL,
-};
-
-static struct attribute_group macb_mii_group = {
-	.name	= "mii",
-	.attrs	= macb_mii_attrs,
-};
-
-static void macb_unregister_sysfs(struct net_device *net)
-{
-	struct device *_dev = &net->dev;
-
-	sysfs_remove_group(&_dev->kobj, &macb_mii_group);
-}
-
-static int macb_register_sysfs(struct net_device *net)
-{
-	struct device *_dev = &net->dev;
-	int ret;
-
-	ret = sysfs_create_group(&_dev->kobj, &macb_mii_group);
-	if (ret)
-		printk(KERN_WARNING
-		       "%s: sysfs mii attribute registration failed: %d\n",
-		       net->name, ret);
-	return ret;
-}
 static int __devinit macb_probe(struct platform_device *pdev)
 {
 	struct eth_platform_data *pdata;
 	struct resource *regs;
 	struct net_device *dev;
 	struct macb *bp;
+	struct phy_device *phydev;
 	unsigned long pclk_hz;
 	u32 config;
 	int err = -ENXIO;
@@ -1073,6 +1144,7 @@
 	dev->stop = macb_close;
 	dev->hard_start_xmit = macb_start_xmit;
 	dev->get_stats = macb_get_stats;
+	dev->set_multicast_list = macb_set_rx_mode;
 	dev->do_ioctl = macb_ioctl;
 	dev->poll = macb_poll;
 	dev->weight = 64;
@@ -1080,10 +1152,6 @@
 
 	dev->base_addr = regs->start;
 
-	INIT_DELAYED_WORK(&bp->periodic_task, macb_periodic_task);
-	mutex_init(&bp->mdio_mutex);
-	init_completion(&bp->mdio_complete);
-
 	/* Set MII management clock divider */
 	pclk_hz = clk_get_rate(bp->pclk);
 	if (pclk_hz <= 20000000)
@@ -1096,20 +1164,9 @@
 		config = MACB_BF(CLK, MACB_CLK_DIV64);
 	macb_writel(bp, NCFGR, config);
 
-	bp->mii.dev = dev;
-	bp->mii.mdio_read = macb_mdio_read;
-	bp->mii.mdio_write = macb_mdio_write;
-	bp->mii.phy_id_mask = 0x1f;
-	bp->mii.reg_num_mask = 0x1f;
-
 	macb_get_hwaddr(bp);
-	err = macb_phy_probe(bp);
-	if (err) {
-		dev_err(&pdev->dev, "Failed to detect PHY, aborting.\n");
-		goto err_out_free_irq;
-	}
-
 	pdata = pdev->dev.platform_data;
+
 	if (pdata && pdata->is_rmii)
 #if defined(CONFIG_ARCH_AT91)
 		macb_writel(bp, USRIO, (MACB_BIT(RMII) | MACB_BIT(CLKEN)) );
@@ -1131,9 +1188,11 @@
 		goto err_out_free_irq;
 	}
 
-	platform_set_drvdata(pdev, dev);
+	if (macb_mii_init(bp) != 0) {
+		goto err_out_unregister_netdev;
+	}
 
-	macb_register_sysfs(dev);
+	platform_set_drvdata(pdev, dev);
 
 	printk(KERN_INFO "%s: Atmel MACB at 0x%08lx irq %d "
 	       "(%02x:%02x:%02x:%02x:%02x:%02x)\n",
@@ -1141,8 +1200,15 @@
 	       dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2],
 	       dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]);
 
+	phydev = bp->phy_dev;
+	printk(KERN_INFO "%s: attached PHY driver [%s] "
+		"(mii_bus:phy_addr=%s, irq=%d)\n",
+		dev->name, phydev->drv->name, phydev->dev.bus_id, phydev->irq);
+
 	return 0;
 
+err_out_unregister_netdev:
+	unregister_netdev(dev);
 err_out_free_irq:
 	free_irq(dev->irq, dev);
 err_out_iounmap:
@@ -1153,7 +1219,9 @@
 	clk_put(bp->hclk);
 #endif
 	clk_disable(bp->pclk);
+#ifndef CONFIG_ARCH_AT91
 err_out_put_pclk:
+#endif
 	clk_put(bp->pclk);
 err_out_free_dev:
 	free_netdev(dev);
@@ -1171,7 +1239,8 @@
 
 	if (dev) {
 		bp = netdev_priv(dev);
-		macb_unregister_sysfs(dev);
+		mdiobus_unregister(&bp->mii_bus);
+		kfree(bp->mii_bus.irq);
 		unregister_netdev(dev);
 		free_irq(dev->irq, dev);
 		iounmap(bp->regs);
diff --git a/drivers/net/macb.h b/drivers/net/macb.h
index b3bb218..4e3283e 100644
--- a/drivers/net/macb.h
+++ b/drivers/net/macb.h
@@ -383,11 +383,11 @@
 
 	unsigned int		rx_pending, tx_pending;
 
-	struct delayed_work	periodic_task;
-
-	struct mutex		mdio_mutex;
-	struct completion	mdio_complete;
-	struct mii_if_info	mii;
+	struct mii_bus		mii_bus;
+	struct phy_device	*phy_dev;
+	unsigned int 		link;
+	unsigned int 		speed;
+	unsigned int 		duplex;
 };
 
 #endif /* _MACB_H */
diff --git a/drivers/net/mlx4/catas.c b/drivers/net/mlx4/catas.c
index 1bb088a..6b32ec9 100644
--- a/drivers/net/mlx4/catas.c
+++ b/drivers/net/mlx4/catas.c
@@ -30,41 +30,133 @@
  * SOFTWARE.
  */
 
+#include <linux/workqueue.h>
+
 #include "mlx4.h"
 
-void mlx4_handle_catas_err(struct mlx4_dev *dev)
+enum {
+	MLX4_CATAS_POLL_INTERVAL	= 5 * HZ,
+};
+
+static DEFINE_SPINLOCK(catas_lock);
+
+static LIST_HEAD(catas_list);
+static struct workqueue_struct *catas_wq;
+static struct work_struct catas_work;
+
+static int internal_err_reset = 1;
+module_param(internal_err_reset, int, 0644);
+MODULE_PARM_DESC(internal_err_reset,
+		 "Reset device on internal errors if non-zero (default 1)");
+
+static void dump_err_buf(struct mlx4_dev *dev)
 {
 	struct mlx4_priv *priv = mlx4_priv(dev);
 
 	int i;
 
-	mlx4_err(dev, "Catastrophic error detected:\n");
+	mlx4_err(dev, "Internal error detected:\n");
 	for (i = 0; i < priv->fw.catas_size; ++i)
 		mlx4_err(dev, "  buf[%02x]: %08x\n",
 			 i, swab32(readl(priv->catas_err.map + i)));
-
-	mlx4_dispatch_event(dev, MLX4_EVENT_TYPE_LOCAL_CATAS_ERROR, 0, 0);
 }
 
-void mlx4_map_catas_buf(struct mlx4_dev *dev)
+static void poll_catas(unsigned long dev_ptr)
+{
+	struct mlx4_dev *dev = (struct mlx4_dev *) dev_ptr;
+	struct mlx4_priv *priv = mlx4_priv(dev);
+
+	if (readl(priv->catas_err.map)) {
+		dump_err_buf(dev);
+
+		mlx4_dispatch_event(dev, MLX4_EVENT_TYPE_LOCAL_CATAS_ERROR, 0, 0);
+
+		if (internal_err_reset) {
+			spin_lock(&catas_lock);
+			list_add(&priv->catas_err.list, &catas_list);
+			spin_unlock(&catas_lock);
+
+			queue_work(catas_wq, &catas_work);
+		}
+	} else
+		mod_timer(&priv->catas_err.timer,
+			  round_jiffies(jiffies + MLX4_CATAS_POLL_INTERVAL));
+}
+
+static void catas_reset(struct work_struct *work)
+{
+	struct mlx4_priv *priv, *tmppriv;
+	struct mlx4_dev *dev;
+
+	LIST_HEAD(tlist);
+	int ret;
+
+	spin_lock_irq(&catas_lock);
+	list_splice_init(&catas_list, &tlist);
+	spin_unlock_irq(&catas_lock);
+
+	list_for_each_entry_safe(priv, tmppriv, &tlist, catas_err.list) {
+		ret = mlx4_restart_one(priv->dev.pdev);
+		dev = &priv->dev;
+		if (ret)
+			mlx4_err(dev, "Reset failed (%d)\n", ret);
+		else
+			mlx4_dbg(dev, "Reset succeeded\n");
+	}
+}
+
+void mlx4_start_catas_poll(struct mlx4_dev *dev)
 {
 	struct mlx4_priv *priv = mlx4_priv(dev);
 	unsigned long addr;
 
+	INIT_LIST_HEAD(&priv->catas_err.list);
+	init_timer(&priv->catas_err.timer);
+	priv->catas_err.map = NULL;
+
 	addr = pci_resource_start(dev->pdev, priv->fw.catas_bar) +
 		priv->fw.catas_offset;
 
 	priv->catas_err.map = ioremap(addr, priv->fw.catas_size * 4);
-	if (!priv->catas_err.map)
-		mlx4_warn(dev, "Failed to map catastrophic error buffer at 0x%lx\n",
+	if (!priv->catas_err.map) {
+		mlx4_warn(dev, "Failed to map internal error buffer at 0x%lx\n",
 			  addr);
+		return;
+	}
 
+	priv->catas_err.timer.data     = (unsigned long) dev;
+	priv->catas_err.timer.function = poll_catas;
+	priv->catas_err.timer.expires  =
+		round_jiffies(jiffies + MLX4_CATAS_POLL_INTERVAL);
+	add_timer(&priv->catas_err.timer);
 }
 
-void mlx4_unmap_catas_buf(struct mlx4_dev *dev)
+void mlx4_stop_catas_poll(struct mlx4_dev *dev)
 {
 	struct mlx4_priv *priv = mlx4_priv(dev);
 
+	del_timer_sync(&priv->catas_err.timer);
+
 	if (priv->catas_err.map)
 		iounmap(priv->catas_err.map);
+
+	spin_lock_irq(&catas_lock);
+	list_del(&priv->catas_err.list);
+	spin_unlock_irq(&catas_lock);
+}
+
+int __init mlx4_catas_init(void)
+{
+	INIT_WORK(&catas_work, catas_reset);
+
+	catas_wq = create_singlethread_workqueue("mlx4_err");
+	if (!catas_wq)
+		return -ENOMEM;
+
+	return 0;
+}
+
+void mlx4_catas_cleanup(void)
+{
+	destroy_workqueue(catas_wq);
 }
diff --git a/drivers/net/mlx4/eq.c b/drivers/net/mlx4/eq.c
index 27a82ce..2095c84 100644
--- a/drivers/net/mlx4/eq.c
+++ b/drivers/net/mlx4/eq.c
@@ -89,14 +89,12 @@
 			       (1ull << MLX4_EVENT_TYPE_PATH_MIG_FAILED)    | \
 			       (1ull << MLX4_EVENT_TYPE_WQ_INVAL_REQ_ERROR) | \
 			       (1ull << MLX4_EVENT_TYPE_WQ_ACCESS_ERROR)    | \
-			       (1ull << MLX4_EVENT_TYPE_LOCAL_CATAS_ERROR)  | \
 			       (1ull << MLX4_EVENT_TYPE_PORT_CHANGE)	    | \
 			       (1ull << MLX4_EVENT_TYPE_ECC_DETECT)	    | \
 			       (1ull << MLX4_EVENT_TYPE_SRQ_CATAS_ERROR)    | \
 			       (1ull << MLX4_EVENT_TYPE_SRQ_QP_LAST_WQE)    | \
 			       (1ull << MLX4_EVENT_TYPE_SRQ_LIMIT)	    | \
 			       (1ull << MLX4_EVENT_TYPE_CMD))
-#define MLX4_CATAS_EVENT_MASK  (1ull << MLX4_EVENT_TYPE_LOCAL_CATAS_ERROR)
 
 struct mlx4_eqe {
 	u8			reserved1;
@@ -264,7 +262,7 @@
 
 	writel(priv->eq_table.clr_mask, priv->eq_table.clr_int);
 
-	for (i = 0; i < MLX4_EQ_CATAS; ++i)
+	for (i = 0; i < MLX4_NUM_EQ; ++i)
 		work |= mlx4_eq_int(dev, &priv->eq_table.eq[i]);
 
 	return IRQ_RETVAL(work);
@@ -281,14 +279,6 @@
 	return IRQ_HANDLED;
 }
 
-static irqreturn_t mlx4_catas_interrupt(int irq, void *dev_ptr)
-{
-	mlx4_handle_catas_err(dev_ptr);
-
-	/* MSI-X vectors always belong to us */
-	return IRQ_HANDLED;
-}
-
 static int mlx4_MAP_EQ(struct mlx4_dev *dev, u64 event_mask, int unmap,
 			int eq_num)
 {
@@ -490,11 +480,9 @@
 
 	if (eq_table->have_irq)
 		free_irq(dev->pdev->irq, dev);
-	for (i = 0; i < MLX4_EQ_CATAS; ++i)
+	for (i = 0; i < MLX4_NUM_EQ; ++i)
 		if (eq_table->eq[i].have_irq)
 			free_irq(eq_table->eq[i].irq, eq_table->eq + i);
-	if (eq_table->eq[MLX4_EQ_CATAS].have_irq)
-		free_irq(eq_table->eq[MLX4_EQ_CATAS].irq, dev);
 }
 
 static int __devinit mlx4_map_clr_int(struct mlx4_dev *dev)
@@ -598,32 +586,19 @@
 	if (dev->flags & MLX4_FLAG_MSI_X) {
 		static const char *eq_name[] = {
 			[MLX4_EQ_COMP]  = DRV_NAME " (comp)",
-			[MLX4_EQ_ASYNC] = DRV_NAME " (async)",
-			[MLX4_EQ_CATAS] = DRV_NAME " (catas)"
+			[MLX4_EQ_ASYNC] = DRV_NAME " (async)"
 		};
 
-		err = mlx4_create_eq(dev, 1, MLX4_EQ_CATAS,
-				     &priv->eq_table.eq[MLX4_EQ_CATAS]);
-		if (err)
-			goto err_out_async;
-
-		for (i = 0; i < MLX4_EQ_CATAS; ++i) {
+		for (i = 0; i < MLX4_NUM_EQ; ++i) {
 			err = request_irq(priv->eq_table.eq[i].irq,
 					  mlx4_msi_x_interrupt,
 					  0, eq_name[i], priv->eq_table.eq + i);
 			if (err)
-				goto err_out_catas;
+				goto err_out_async;
 
 			priv->eq_table.eq[i].have_irq = 1;
 		}
 
-		err = request_irq(priv->eq_table.eq[MLX4_EQ_CATAS].irq,
-				  mlx4_catas_interrupt, 0,
-				  eq_name[MLX4_EQ_CATAS], dev);
-		if (err)
-			goto err_out_catas;
-
-		priv->eq_table.eq[MLX4_EQ_CATAS].have_irq = 1;
 	} else {
 		err = request_irq(dev->pdev->irq, mlx4_interrupt,
 				  IRQF_SHARED, DRV_NAME, dev);
@@ -639,22 +614,11 @@
 		mlx4_warn(dev, "MAP_EQ for async EQ %d failed (%d)\n",
 			   priv->eq_table.eq[MLX4_EQ_ASYNC].eqn, err);
 
-	for (i = 0; i < MLX4_EQ_CATAS; ++i)
+	for (i = 0; i < MLX4_NUM_EQ; ++i)
 		eq_set_ci(&priv->eq_table.eq[i], 1);
 
-	if (dev->flags & MLX4_FLAG_MSI_X) {
-		err = mlx4_MAP_EQ(dev, MLX4_CATAS_EVENT_MASK, 0,
-				  priv->eq_table.eq[MLX4_EQ_CATAS].eqn);
-		if (err)
-			mlx4_warn(dev, "MAP_EQ for catas EQ %d failed (%d)\n",
-				  priv->eq_table.eq[MLX4_EQ_CATAS].eqn, err);
-	}
-
 	return 0;
 
-err_out_catas:
-	mlx4_free_eq(dev, &priv->eq_table.eq[MLX4_EQ_CATAS]);
-
 err_out_async:
 	mlx4_free_eq(dev, &priv->eq_table.eq[MLX4_EQ_ASYNC]);
 
@@ -675,19 +639,13 @@
 	struct mlx4_priv *priv = mlx4_priv(dev);
 	int i;
 
-	if (dev->flags & MLX4_FLAG_MSI_X)
-		mlx4_MAP_EQ(dev, MLX4_CATAS_EVENT_MASK, 1,
-			    priv->eq_table.eq[MLX4_EQ_CATAS].eqn);
-
 	mlx4_MAP_EQ(dev, MLX4_ASYNC_EVENT_MASK, 1,
 		    priv->eq_table.eq[MLX4_EQ_ASYNC].eqn);
 
 	mlx4_free_irqs(dev);
 
-	for (i = 0; i < MLX4_EQ_CATAS; ++i)
+	for (i = 0; i < MLX4_NUM_EQ; ++i)
 		mlx4_free_eq(dev, &priv->eq_table.eq[i]);
-	if (dev->flags & MLX4_FLAG_MSI_X)
-		mlx4_free_eq(dev, &priv->eq_table.eq[MLX4_EQ_CATAS]);
 
 	mlx4_unmap_clr_int(dev);
 
diff --git a/drivers/net/mlx4/intf.c b/drivers/net/mlx4/intf.c
index 9ae951b..be5d9e9 100644
--- a/drivers/net/mlx4/intf.c
+++ b/drivers/net/mlx4/intf.c
@@ -142,6 +142,7 @@
 		mlx4_add_device(intf, priv);
 
 	mutex_unlock(&intf_mutex);
+	mlx4_start_catas_poll(dev);
 
 	return 0;
 }
@@ -151,6 +152,7 @@
 	struct mlx4_priv *priv = mlx4_priv(dev);
 	struct mlx4_interface *intf;
 
+	mlx4_stop_catas_poll(dev);
 	mutex_lock(&intf_mutex);
 
 	list_for_each_entry(intf, &intf_list, list)
diff --git a/drivers/net/mlx4/main.c b/drivers/net/mlx4/main.c
index a4f2e04..4dc9dc1 100644
--- a/drivers/net/mlx4/main.c
+++ b/drivers/net/mlx4/main.c
@@ -78,7 +78,7 @@
 static struct mlx4_profile default_profile = {
 	.num_qp		= 1 << 16,
 	.num_srq	= 1 << 16,
-	.rdmarc_per_qp	= 4,
+	.rdmarc_per_qp	= 1 << 4,
 	.num_cq		= 1 << 16,
 	.num_mcg	= 1 << 13,
 	.num_mpt	= 1 << 17,
@@ -583,13 +583,11 @@
 		goto err_pd_table_free;
 	}
 
-	mlx4_map_catas_buf(dev);
-
 	err = mlx4_init_eq_table(dev);
 	if (err) {
 		mlx4_err(dev, "Failed to initialize "
 			 "event queue table, aborting.\n");
-		goto err_catas_buf;
+		goto err_mr_table_free;
 	}
 
 	err = mlx4_cmd_use_events(dev);
@@ -659,8 +657,7 @@
 err_eq_table_free:
 	mlx4_cleanup_eq_table(dev);
 
-err_catas_buf:
-	mlx4_unmap_catas_buf(dev);
+err_mr_table_free:
 	mlx4_cleanup_mr_table(dev);
 
 err_pd_table_free:
@@ -836,9 +833,6 @@
 	mlx4_cleanup_cq_table(dev);
 	mlx4_cmd_use_polling(dev);
 	mlx4_cleanup_eq_table(dev);
-
-	mlx4_unmap_catas_buf(dev);
-
 	mlx4_cleanup_mr_table(dev);
 	mlx4_cleanup_pd_table(dev);
 	mlx4_cleanup_uar_table(dev);
@@ -885,9 +879,6 @@
 		mlx4_cleanup_cq_table(dev);
 		mlx4_cmd_use_polling(dev);
 		mlx4_cleanup_eq_table(dev);
-
-		mlx4_unmap_catas_buf(dev);
-
 		mlx4_cleanup_mr_table(dev);
 		mlx4_cleanup_pd_table(dev);
 
@@ -908,6 +899,12 @@
 	}
 }
 
+int mlx4_restart_one(struct pci_dev *pdev)
+{
+	mlx4_remove_one(pdev);
+	return mlx4_init_one(pdev, NULL);
+}
+
 static struct pci_device_id mlx4_pci_table[] = {
 	{ PCI_VDEVICE(MELLANOX, 0x6340) }, /* MT25408 "Hermon" SDR */
 	{ PCI_VDEVICE(MELLANOX, 0x634a) }, /* MT25408 "Hermon" DDR */
@@ -930,6 +927,10 @@
 {
 	int ret;
 
+	ret = mlx4_catas_init();
+	if (ret)
+		return ret;
+
 	ret = pci_register_driver(&mlx4_driver);
 	return ret < 0 ? ret : 0;
 }
@@ -937,6 +938,7 @@
 static void __exit mlx4_cleanup(void)
 {
 	pci_unregister_driver(&mlx4_driver);
+	mlx4_catas_cleanup();
 }
 
 module_init(mlx4_init);
diff --git a/drivers/net/mlx4/mlx4.h b/drivers/net/mlx4/mlx4.h
index d9c91a7..be304a7 100644
--- a/drivers/net/mlx4/mlx4.h
+++ b/drivers/net/mlx4/mlx4.h
@@ -39,6 +39,7 @@
 
 #include <linux/mutex.h>
 #include <linux/radix-tree.h>
+#include <linux/timer.h>
 
 #include <linux/mlx4/device.h>
 #include <linux/mlx4/doorbell.h>
@@ -67,7 +68,6 @@
 enum {
 	MLX4_EQ_ASYNC,
 	MLX4_EQ_COMP,
-	MLX4_EQ_CATAS,
 	MLX4_NUM_EQ
 };
 
@@ -248,7 +248,8 @@
 
 struct mlx4_catas_err {
 	u32 __iomem	       *map;
-	int			size;
+	struct timer_list	timer;
+	struct list_head	list;
 };
 
 struct mlx4_priv {
@@ -311,9 +312,11 @@
 void mlx4_cleanup_srq_table(struct mlx4_dev *dev);
 void mlx4_cleanup_mcg_table(struct mlx4_dev *dev);
 
-void mlx4_map_catas_buf(struct mlx4_dev *dev);
-void mlx4_unmap_catas_buf(struct mlx4_dev *dev);
-
+void mlx4_start_catas_poll(struct mlx4_dev *dev);
+void mlx4_stop_catas_poll(struct mlx4_dev *dev);
+int mlx4_catas_init(void);
+void mlx4_catas_cleanup(void);
+int mlx4_restart_one(struct pci_dev *pdev);
 int mlx4_register_device(struct mlx4_dev *dev);
 void mlx4_unregister_device(struct mlx4_dev *dev);
 void mlx4_dispatch_event(struct mlx4_dev *dev, enum mlx4_event type,
diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c
index e1732c1..deca653 100644
--- a/drivers/net/myri10ge/myri10ge.c
+++ b/drivers/net/myri10ge/myri10ge.c
@@ -1060,7 +1060,6 @@
 	struct myri10ge_tx_buf *tx = &mgp->tx;
 	struct sk_buff *skb;
 	int idx, len;
-	int limit = 0;
 
 	while (tx->pkt_done != mcp_index) {
 		idx = tx->done & tx->mask;
@@ -1091,11 +1090,6 @@
 							      bus), len,
 					       PCI_DMA_TODEVICE);
 		}
-
-		/* limit potential for livelock by only handling
-		 * 2 full tx rings per call */
-		if (unlikely(++limit > 2 * tx->mask))
-			break;
 	}
 	/* start the queue if we've stopped it */
 	if (netif_queue_stopped(mgp->dev)
diff --git a/drivers/net/natsemi.c b/drivers/net/natsemi.c
index 3450051..6bb48ba 100644
--- a/drivers/net/natsemi.c
+++ b/drivers/net/natsemi.c
@@ -671,7 +671,7 @@
 #define NATSEMI_CREATE_FILE(_dev, _name) \
          device_create_file(&_dev->dev, &dev_attr_##_name)
 #define NATSEMI_REMOVE_FILE(_dev, _name) \
-         device_create_file(&_dev->dev, &dev_attr_##_name)
+         device_remove_file(&_dev->dev, &dev_attr_##_name)
 
 NATSEMI_ATTR(dspcfg_workaround);
 
diff --git a/drivers/net/ne2k-pci.c b/drivers/net/ne2k-pci.c
index 995c0a5..cfdeaf7 100644
--- a/drivers/net/ne2k-pci.c
+++ b/drivers/net/ne2k-pci.c
@@ -669,10 +669,15 @@
 static int ne2k_pci_resume (struct pci_dev *pdev)
 {
 	struct net_device *dev = pci_get_drvdata (pdev);
+	int rc;
 
 	pci_set_power_state(pdev, 0);
 	pci_restore_state(pdev);
-	pci_enable_device(pdev);
+
+	rc = pci_enable_device(pdev);
+	if (rc)
+		return rc;
+
 	NS8390_init(dev, 1);
 	netif_device_attach(dev);
 
diff --git a/drivers/net/ni5010.c b/drivers/net/ni5010.c
index 3d5b423..22a3b3d 100644
--- a/drivers/net/ni5010.c
+++ b/drivers/net/ni5010.c
@@ -670,14 +670,10 @@
 
 	PRINTK2((KERN_DEBUG "%s: entering set_multicast_list\n", dev->name));
 
-	if (dev->flags&IFF_PROMISC || dev->flags&IFF_ALLMULTI) {
+	if (dev->flags&IFF_PROMISC || dev->flags&IFF_ALLMULTI || dev->mc_list) {
 		dev->flags |= IFF_PROMISC;
 		outb(RMD_PROMISC, EDLC_RMODE); /* Enable promiscuous mode */
 		PRINTK((KERN_DEBUG "%s: Entering promiscuous mode\n", dev->name));
-	} else if (dev->mc_list) {
-		/* Sorry, multicast not supported */
-		PRINTK((KERN_DEBUG "%s: No multicast, entering broadcast mode\n", dev->name));
-		outb(RMD_BROADCAST, EDLC_RMODE);
 	} else {
 		PRINTK((KERN_DEBUG "%s: Entering broadcast mode\n", dev->name));
 		outb(RMD_BROADCAST, EDLC_RMODE);  /* Disable promiscuous mode, use normal mode */
diff --git a/drivers/net/ns83820.c b/drivers/net/ns83820.c
index 104aab3..ea80e6c 100644
--- a/drivers/net/ns83820.c
+++ b/drivers/net/ns83820.c
@@ -1582,7 +1582,7 @@
 	else
 		and_mask &= ~(RFCR_AAU | RFCR_AAM);
 
-	if (ndev->flags & IFF_ALLMULTI)
+	if (ndev->flags & IFF_ALLMULTI || ndev->mc_count)
 		or_mask |= RFCR_AAM;
 	else
 		and_mask &= ~RFCR_AAM;
diff --git a/drivers/net/pcmcia/com20020_cs.c b/drivers/net/pcmcia/com20020_cs.c
index 0d1c7a4..ea9414c4 100644
--- a/drivers/net/pcmcia/com20020_cs.c
+++ b/drivers/net/pcmcia/com20020_cs.c
@@ -147,7 +147,7 @@
     DEBUG(0, "com20020_attach()\n");
 
     /* Create new network device */
-    info = kmalloc(sizeof(struct com20020_dev_t), GFP_KERNEL);
+    info = kzalloc(sizeof(struct com20020_dev_t), GFP_KERNEL);
     if (!info)
 	goto fail_alloc_info;
 
@@ -155,7 +155,6 @@
     if (!dev)
 	goto fail_alloc_dev;
 
-    memset(info, 0, sizeof(struct com20020_dev_t));
     lp = dev->priv;
     lp->timeout = timeout;
     lp->backplane = backplane;
diff --git a/drivers/net/pcmcia/ibmtr_cs.c b/drivers/net/pcmcia/ibmtr_cs.c
index 4ecb8ca..4eafa4f 100644
--- a/drivers/net/pcmcia/ibmtr_cs.c
+++ b/drivers/net/pcmcia/ibmtr_cs.c
@@ -146,9 +146,8 @@
     DEBUG(0, "ibmtr_attach()\n");
 
     /* Create new token-ring device */
-    info = kmalloc(sizeof(*info), GFP_KERNEL); 
+    info = kzalloc(sizeof(*info), GFP_KERNEL);
     if (!info) return -ENOMEM;
-    memset(info,0,sizeof(*info));
     dev = alloc_trdev(sizeof(struct tok_info));
     if (!dev) {
 	kfree(info);
diff --git a/drivers/net/phy/vitesse.c b/drivers/net/phy/vitesse.c
index 596222b..6a53856 100644
--- a/drivers/net/phy/vitesse.c
+++ b/drivers/net/phy/vitesse.c
@@ -21,6 +21,10 @@
 /* Vitesse Extended Control Register 1 */
 #define MII_VSC8244_EXT_CON1           0x17
 #define MII_VSC8244_EXTCON1_INIT       0x0000
+#define MII_VSC8244_EXTCON1_TX_SKEW_MASK	0x0c00
+#define MII_VSC8244_EXTCON1_RX_SKEW_MASK	0x0300
+#define MII_VSC8244_EXTCON1_TX_SKEW	0x0800
+#define MII_VSC8244_EXTCON1_RX_SKEW	0x0200
 
 /* Vitesse Interrupt Mask Register */
 #define MII_VSC8244_IMASK		0x19
@@ -39,7 +43,7 @@
 
 /* Vitesse Auxiliary Control/Status Register */
 #define MII_VSC8244_AUX_CONSTAT        	0x1c
-#define MII_VSC8244_AUXCONSTAT_INIT    	0x0004
+#define MII_VSC8244_AUXCONSTAT_INIT    	0x0000
 #define MII_VSC8244_AUXCONSTAT_DUPLEX  	0x0020
 #define MII_VSC8244_AUXCONSTAT_SPEED   	0x0018
 #define MII_VSC8244_AUXCONSTAT_GBIT    	0x0010
@@ -51,6 +55,7 @@
 
 static int vsc824x_config_init(struct phy_device *phydev)
 {
+	int extcon;
 	int err;
 
 	err = phy_write(phydev, MII_VSC8244_AUX_CONSTAT,
@@ -58,14 +63,34 @@
 	if (err < 0)
 		return err;
 
-	err = phy_write(phydev, MII_VSC8244_EXT_CON1,
-			MII_VSC8244_EXTCON1_INIT);
+	extcon = phy_read(phydev, MII_VSC8244_EXT_CON1);
+
+	if (extcon < 0)
+		return err;
+
+	extcon &= ~(MII_VSC8244_EXTCON1_TX_SKEW_MASK |
+			MII_VSC8244_EXTCON1_RX_SKEW_MASK);
+
+	if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID)
+		extcon |= (MII_VSC8244_EXTCON1_TX_SKEW |
+				MII_VSC8244_EXTCON1_RX_SKEW);
+
+	err = phy_write(phydev, MII_VSC8244_EXT_CON1, extcon);
+
 	return err;
 }
 
 static int vsc824x_ack_interrupt(struct phy_device *phydev)
 {
-	int err = phy_read(phydev, MII_VSC8244_ISTAT);
+	int err = 0;
+	
+	/*
+	 * Don't bother to ACK the interrupts if interrupts
+	 * are disabled.  The 824x cannot clear the interrupts
+	 * if they are disabled.
+	 */
+	if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
+		err = phy_read(phydev, MII_VSC8244_ISTAT);
 
 	return (err < 0) ? err : 0;
 }
@@ -77,8 +102,19 @@
 	if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
 		err = phy_write(phydev, MII_VSC8244_IMASK,
 				MII_VSC8244_IMASK_MASK);
-	else
+	else {
+		/*
+		 * The Vitesse PHY cannot clear the interrupt
+		 * once it has disabled them, so we clear them first
+		 */
+		err = phy_read(phydev, MII_VSC8244_ISTAT);
+
+		if (err)
+			return err;
+
 		err = phy_write(phydev, MII_VSC8244_IMASK, 0);
+	}
+
 	return err;
 }
 
diff --git a/drivers/net/ppp_async.c b/drivers/net/ppp_async.c
index caabbc4..27f5b90 100644
--- a/drivers/net/ppp_async.c
+++ b/drivers/net/ppp_async.c
@@ -159,12 +159,11 @@
 	int err;
 
 	err = -ENOMEM;
-	ap = kmalloc(sizeof(*ap), GFP_KERNEL);
+	ap = kzalloc(sizeof(*ap), GFP_KERNEL);
 	if (ap == 0)
 		goto out;
 
 	/* initialize the asyncppp structure */
-	memset(ap, 0, sizeof(*ap));
 	ap->tty = tty;
 	ap->mru = PPP_MRU;
 	spin_lock_init(&ap->xmit_lock);
diff --git a/drivers/net/ppp_deflate.c b/drivers/net/ppp_deflate.c
index 72c8d66..eb98b66 100644
--- a/drivers/net/ppp_deflate.c
+++ b/drivers/net/ppp_deflate.c
@@ -121,12 +121,11 @@
 	if (w_size < DEFLATE_MIN_SIZE || w_size > DEFLATE_MAX_SIZE)
 		return NULL;
 
-	state = kmalloc(sizeof(*state),
+	state = kzalloc(sizeof(*state),
 						     GFP_KERNEL);
 	if (state == NULL)
 		return NULL;
 
-	memset (state, 0, sizeof (struct ppp_deflate_state));
 	state->strm.next_in   = NULL;
 	state->w_size         = w_size;
 	state->strm.workspace = vmalloc(zlib_deflate_workspacesize());
@@ -341,11 +340,10 @@
 	if (w_size < DEFLATE_MIN_SIZE || w_size > DEFLATE_MAX_SIZE)
 		return NULL;
 
-	state = kmalloc(sizeof(*state), GFP_KERNEL);
+	state = kzalloc(sizeof(*state), GFP_KERNEL);
 	if (state == NULL)
 		return NULL;
 
-	memset (state, 0, sizeof (struct ppp_deflate_state));
 	state->w_size         = w_size;
 	state->strm.next_out  = NULL;
 	state->strm.workspace = kmalloc(zlib_inflate_workspacesize(),
diff --git a/drivers/net/ppp_generic.c b/drivers/net/ppp_generic.c
index 3ef0092..ef3325b 100644
--- a/drivers/net/ppp_generic.c
+++ b/drivers/net/ppp_generic.c
@@ -2684,8 +2684,7 @@
 	if (atomic_read(&ppp_unit_count) || atomic_read(&channel_count))
 		printk(KERN_ERR "PPP: removing module but units remain!\n");
 	cardmap_destroy(&all_ppp_units);
-	if (unregister_chrdev(PPP_MAJOR, "ppp") != 0)
-		printk(KERN_ERR "PPP: failed to unregister PPP device\n");
+	unregister_chrdev(PPP_MAJOR, "ppp");
 	device_destroy(ppp_class, MKDEV(PPP_MAJOR, 0));
 	class_destroy(ppp_class);
 }
diff --git a/drivers/net/ppp_mppe.c b/drivers/net/ppp_mppe.c
index d5bdd25..f79cf87 100644
--- a/drivers/net/ppp_mppe.c
+++ b/drivers/net/ppp_mppe.c
@@ -200,11 +200,10 @@
 	    || options[0] != CI_MPPE || options[1] != CILEN_MPPE)
 		goto out;
 
-	state = kmalloc(sizeof(*state), GFP_KERNEL);
+	state = kzalloc(sizeof(*state), GFP_KERNEL);
 	if (state == NULL)
 		goto out;
 
-	memset(state, 0, sizeof(*state));
 
 	state->arc4 = crypto_alloc_blkcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC);
 	if (IS_ERR(state->arc4)) {
diff --git a/drivers/net/ppp_synctty.c b/drivers/net/ppp_synctty.c
index 5918fab..ce64032 100644
--- a/drivers/net/ppp_synctty.c
+++ b/drivers/net/ppp_synctty.c
@@ -207,13 +207,12 @@
 	struct syncppp *ap;
 	int err;
 
-	ap = kmalloc(sizeof(*ap), GFP_KERNEL);
+	ap = kzalloc(sizeof(*ap), GFP_KERNEL);
 	err = -ENOMEM;
 	if (ap == 0)
 		goto out;
 
 	/* initialize the syncppp structure */
-	memset(ap, 0, sizeof(*ap));
 	ap->tty = tty;
 	ap->mru = PPP_MRU;
 	spin_lock_init(&ap->xmit_lock);
diff --git a/drivers/net/pppol2tp.c b/drivers/net/pppol2tp.c
index 5891a0f..f871760 100644
--- a/drivers/net/pppol2tp.c
+++ b/drivers/net/pppol2tp.c
@@ -824,6 +824,7 @@
 	struct pppol2tp_session *session;
 	struct pppol2tp_tunnel *tunnel;
 	struct udphdr *uh;
+	unsigned int len;
 
 	error = -ENOTCONN;
 	if (sock_flag(sk, SOCK_DEAD) || !(sk->sk_state & PPPOX_CONNECTED))
@@ -912,14 +913,15 @@
 	}
 
 	/* Queue the packet to IP for output */
+	len = skb->len;
 	error = ip_queue_xmit(skb, 1);
 
 	/* Update stats */
 	if (error >= 0) {
 		tunnel->stats.tx_packets++;
-		tunnel->stats.tx_bytes += skb->len;
+		tunnel->stats.tx_bytes += len;
 		session->stats.tx_packets++;
-		session->stats.tx_bytes += skb->len;
+		session->stats.tx_bytes += len;
 	} else {
 		tunnel->stats.tx_errors++;
 		session->stats.tx_errors++;
@@ -958,6 +960,7 @@
 	__wsum csum = 0;
 	struct sk_buff *skb2 = NULL;
 	struct udphdr *uh;
+	unsigned int len;
 
 	if (sock_flag(sk, SOCK_DEAD) || !(sk->sk_state & PPPOX_CONNECTED))
 		goto abort;
@@ -1046,18 +1049,25 @@
 		printk("\n");
 	}
 
+	memset(&(IPCB(skb2)->opt), 0, sizeof(IPCB(skb2)->opt));
+	IPCB(skb2)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED |
+			       IPSKB_REROUTED);
+	nf_reset(skb2);
+
 	/* Get routing info from the tunnel socket */
+	dst_release(skb2->dst);
 	skb2->dst = sk_dst_get(sk_tun);
 
 	/* Queue the packet to IP for output */
+	len = skb2->len;
 	rc = ip_queue_xmit(skb2, 1);
 
 	/* Update stats */
 	if (rc >= 0) {
 		tunnel->stats.tx_packets++;
-		tunnel->stats.tx_bytes += skb2->len;
+		tunnel->stats.tx_bytes += len;
 		session->stats.tx_packets++;
-		session->stats.tx_bytes += skb2->len;
+		session->stats.tx_bytes += len;
 	} else {
 		tunnel->stats.tx_errors++;
 		session->stats.tx_errors++;
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index 982a901..bb6896a 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -2338,7 +2338,7 @@
 {
 	struct skb_shared_info *info = skb_shinfo(skb);
 	unsigned int cur_frag, entry;
-	struct TxDesc *txd;
+	struct TxDesc * uninitialized_var(txd);
 
 	entry = tp->cur_tx;
 	for (cur_frag = 0; cur_frag < info->nr_frags; cur_frag++) {
diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c
index 58bbfdd..afef6c0 100644
--- a/drivers/net/s2io.c
+++ b/drivers/net/s2io.c
@@ -796,12 +796,14 @@
 	struct mac_info *mac_control;
 	struct config_param *config;
 	int lst_size, lst_per_page;
-	struct net_device *dev = nic->dev;
+	struct net_device *dev;
 	int page_num = 0;
 
 	if (!nic)
 		return;
 
+	dev = nic->dev;
+
 	mac_control = &nic->mac_control;
 	config = &nic->config;
 
diff --git a/drivers/net/saa9730.c b/drivers/net/saa9730.c
index 451486b..7dae4d4 100644
--- a/drivers/net/saa9730.c
+++ b/drivers/net/saa9730.c
@@ -940,15 +940,14 @@
 		       CAM_CONTROL_GROUP_ACC | CAM_CONTROL_BROAD_ACC,
 		       &lp->lan_saa9730_regs->CamCtl);
 	} else {
-		if (dev->flags & IFF_ALLMULTI) {
+		if (dev->flags & IFF_ALLMULTI || dev->mc_count) {
 			/* accept all multicast packets */
-			writel(CAM_CONTROL_COMP_EN | CAM_CONTROL_GROUP_ACC |
-			       CAM_CONTROL_BROAD_ACC,
-			       &lp->lan_saa9730_regs->CamCtl);
-		} else {
 			/*
 			 * Will handle the multicast stuff later. -carstenl
 			 */
+			writel(CAM_CONTROL_COMP_EN | CAM_CONTROL_GROUP_ACC |
+			       CAM_CONTROL_BROAD_ACC,
+			       &lp->lan_saa9730_regs->CamCtl);
 		}
 	}
 
diff --git a/drivers/net/shaper.c b/drivers/net/shaper.c
index e886e8d..4c3d98f 100644
--- a/drivers/net/shaper.c
+++ b/drivers/net/shaper.c
@@ -600,10 +600,9 @@
 		return -ENODEV;
 
 	alloc_size = sizeof(*dev) * shapers;
-	devs = kmalloc(alloc_size, GFP_KERNEL);
+	devs = kzalloc(alloc_size, GFP_KERNEL);
 	if (!devs)
 		return -ENOMEM;
-	memset(devs, 0, alloc_size);
 
 	for (i = 0; i < shapers; i++) {
 
diff --git a/drivers/net/sunvnet.c b/drivers/net/sunvnet.c
index 8a667c1..b801e3b 100644
--- a/drivers/net/sunvnet.c
+++ b/drivers/net/sunvnet.c
@@ -12,6 +12,7 @@
 #include <linux/netdevice.h>
 #include <linux/ethtool.h>
 #include <linux/etherdevice.h>
+#include <linux/mutex.h>
 
 #include <asm/vio.h>
 #include <asm/ldc.h>
@@ -497,6 +498,8 @@
 		vio_link_state_change(vio, event);
 		spin_unlock_irqrestore(&vio->lock, flags);
 
+		if (event == LDC_EVENT_RESET)
+			vio_port_up(vio);
 		return;
 	}
 
@@ -875,6 +878,115 @@
 	return err;
 }
 
+static LIST_HEAD(vnet_list);
+static DEFINE_MUTEX(vnet_list_mutex);
+
+static struct vnet * __devinit vnet_new(const u64 *local_mac)
+{
+	struct net_device *dev;
+	struct vnet *vp;
+	int err, i;
+
+	dev = alloc_etherdev(sizeof(*vp));
+	if (!dev) {
+		printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
+		return ERR_PTR(-ENOMEM);
+	}
+
+	for (i = 0; i < ETH_ALEN; i++)
+		dev->dev_addr[i] = (*local_mac >> (5 - i) * 8) & 0xff;
+
+	memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
+
+	vp = netdev_priv(dev);
+
+	spin_lock_init(&vp->lock);
+	vp->dev = dev;
+
+	INIT_LIST_HEAD(&vp->port_list);
+	for (i = 0; i < VNET_PORT_HASH_SIZE; i++)
+		INIT_HLIST_HEAD(&vp->port_hash[i]);
+	INIT_LIST_HEAD(&vp->list);
+	vp->local_mac = *local_mac;
+
+	dev->open = vnet_open;
+	dev->stop = vnet_close;
+	dev->set_multicast_list = vnet_set_rx_mode;
+	dev->set_mac_address = vnet_set_mac_addr;
+	dev->tx_timeout = vnet_tx_timeout;
+	dev->ethtool_ops = &vnet_ethtool_ops;
+	dev->watchdog_timeo = VNET_TX_TIMEOUT;
+	dev->change_mtu = vnet_change_mtu;
+	dev->hard_start_xmit = vnet_start_xmit;
+
+	err = register_netdev(dev);
+	if (err) {
+		printk(KERN_ERR PFX "Cannot register net device, "
+		       "aborting.\n");
+		goto err_out_free_dev;
+	}
+
+	printk(KERN_INFO "%s: Sun LDOM vnet ", dev->name);
+
+	for (i = 0; i < 6; i++)
+		printk("%2.2x%c", dev->dev_addr[i], i == 5 ? '\n' : ':');
+
+	list_add(&vp->list, &vnet_list);
+
+	return vp;
+
+err_out_free_dev:
+	free_netdev(dev);
+
+	return ERR_PTR(err);
+}
+
+static struct vnet * __devinit vnet_find_or_create(const u64 *local_mac)
+{
+	struct vnet *iter, *vp;
+
+	mutex_lock(&vnet_list_mutex);
+	vp = NULL;
+	list_for_each_entry(iter, &vnet_list, list) {
+		if (iter->local_mac == *local_mac) {
+			vp = iter;
+			break;
+		}
+	}
+	if (!vp)
+		vp = vnet_new(local_mac);
+	mutex_unlock(&vnet_list_mutex);
+
+	return vp;
+}
+
+static const char *local_mac_prop = "local-mac-address";
+
+static struct vnet * __devinit vnet_find_parent(struct mdesc_handle *hp,
+						u64 port_node)
+{
+	const u64 *local_mac = NULL;
+	u64 a;
+
+	mdesc_for_each_arc(a, hp, port_node, MDESC_ARC_TYPE_BACK) {
+		u64 target = mdesc_arc_target(hp, a);
+		const char *name;
+
+		name = mdesc_get_property(hp, target, "name", NULL);
+		if (!name || strcmp(name, "network"))
+			continue;
+
+		local_mac = mdesc_get_property(hp, target,
+					       local_mac_prop, NULL);
+		if (local_mac)
+			break;
+	}
+	if (!local_mac)
+		return ERR_PTR(-ENODEV);
+
+	return vnet_find_or_create(local_mac);
+}
+
 static struct ldc_channel_config vnet_ldc_cfg = {
 	.event		= vnet_event,
 	.mtu		= 64,
@@ -887,6 +999,14 @@
 	.handshake_complete	= vnet_handshake_complete,
 };
 
+static void print_version(void)
+{
+	static int version_printed;
+
+	if (version_printed++ == 0)
+		printk(KERN_INFO "%s", version);
+}
+
 const char *remote_macaddr_prop = "remote-mac-address";
 
 static int __devinit vnet_port_probe(struct vio_dev *vdev,
@@ -899,14 +1019,17 @@
 	const u64 *rmac;
 	int len, i, err, switch_port;
 
-	vp = dev_get_drvdata(vdev->dev.parent);
-	if (!vp) {
-		printk(KERN_ERR PFX "Cannot find port parent vnet.\n");
-		return -ENODEV;
-	}
+	print_version();
 
 	hp = mdesc_grab();
 
+	vp = vnet_find_parent(hp, vdev->mp);
+	if (IS_ERR(vp)) {
+		printk(KERN_ERR PFX "Cannot find port parent vnet.\n");
+		err = PTR_ERR(vp);
+		goto err_out_put_mdesc;
+	}
+
 	rmac = mdesc_get_property(hp, vdev->mp, remote_macaddr_prop, &len);
 	err = -ENODEV;
 	if (!rmac) {
@@ -1025,139 +1148,14 @@
 	}
 };
 
-const char *local_mac_prop = "local-mac-address";
-
-static int __devinit vnet_probe(struct vio_dev *vdev,
-				const struct vio_device_id *id)
-{
-	static int vnet_version_printed;
-	struct mdesc_handle *hp;
-	struct net_device *dev;
-	struct vnet *vp;
-	const u64 *mac;
-	int err, i, len;
-
-	if (vnet_version_printed++ == 0)
-		printk(KERN_INFO "%s", version);
-
-	hp = mdesc_grab();
-
-	mac = mdesc_get_property(hp, vdev->mp, local_mac_prop, &len);
-	if (!mac) {
-		printk(KERN_ERR PFX "vnet lacks %s property.\n",
-		       local_mac_prop);
-		err = -ENODEV;
-		goto err_out;
-	}
-
-	dev = alloc_etherdev(sizeof(*vp));
-	if (!dev) {
-		printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
-		err = -ENOMEM;
-		goto err_out;
-	}
-
-	for (i = 0; i < ETH_ALEN; i++)
-		dev->dev_addr[i] = (*mac >> (5 - i) * 8) & 0xff;
-
-	memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
-
-	SET_NETDEV_DEV(dev, &vdev->dev);
-
-	vp = netdev_priv(dev);
-
-	spin_lock_init(&vp->lock);
-	vp->dev = dev;
-	vp->vdev = vdev;
-
-	INIT_LIST_HEAD(&vp->port_list);
-	for (i = 0; i < VNET_PORT_HASH_SIZE; i++)
-		INIT_HLIST_HEAD(&vp->port_hash[i]);
-
-	dev->open = vnet_open;
-	dev->stop = vnet_close;
-	dev->set_multicast_list = vnet_set_rx_mode;
-	dev->set_mac_address = vnet_set_mac_addr;
-	dev->tx_timeout = vnet_tx_timeout;
-	dev->ethtool_ops = &vnet_ethtool_ops;
-	dev->watchdog_timeo = VNET_TX_TIMEOUT;
-	dev->change_mtu = vnet_change_mtu;
-	dev->hard_start_xmit = vnet_start_xmit;
-
-	err = register_netdev(dev);
-	if (err) {
-		printk(KERN_ERR PFX "Cannot register net device, "
-		       "aborting.\n");
-		goto err_out_free_dev;
-	}
-
-	printk(KERN_INFO "%s: Sun LDOM vnet ", dev->name);
-
-	for (i = 0; i < 6; i++)
-		printk("%2.2x%c", dev->dev_addr[i], i == 5 ? '\n' : ':');
-
-	dev_set_drvdata(&vdev->dev, vp);
-
-	mdesc_release(hp);
-
-	return 0;
-
-err_out_free_dev:
-	free_netdev(dev);
-
-err_out:
-	mdesc_release(hp);
-	return err;
-}
-
-static int vnet_remove(struct vio_dev *vdev)
-{
-
-	struct vnet *vp = dev_get_drvdata(&vdev->dev);
-
-	if (vp) {
-		/* XXX unregister port, or at least check XXX */
-		unregister_netdevice(vp->dev);
-		dev_set_drvdata(&vdev->dev, NULL);
-	}
-	return 0;
-}
-
-static struct vio_device_id vnet_match[] = {
-	{
-		.type = "network",
-	},
-	{},
-};
-MODULE_DEVICE_TABLE(vio, vnet_match);
-
-static struct vio_driver vnet_driver = {
-	.id_table	= vnet_match,
-	.probe		= vnet_probe,
-	.remove		= vnet_remove,
-	.driver		= {
-		.name	= "vnet",
-		.owner	= THIS_MODULE,
-	}
-};
-
 static int __init vnet_init(void)
 {
-	int err = vio_register_driver(&vnet_driver);
-
-	if (!err) {
-		err = vio_register_driver(&vnet_port_driver);
-		if (err)
-			vio_unregister_driver(&vnet_driver);
-	}
-
-	return err;
+	return vio_register_driver(&vnet_port_driver);
 }
 
 static void __exit vnet_exit(void)
 {
 	vio_unregister_driver(&vnet_port_driver);
-	vio_unregister_driver(&vnet_driver);
 }
 
 module_init(vnet_init);
diff --git a/drivers/net/sunvnet.h b/drivers/net/sunvnet.h
index 1c88730..7d3a0ca 100644
--- a/drivers/net/sunvnet.h
+++ b/drivers/net/sunvnet.h
@@ -60,11 +60,13 @@
 	struct net_device	*dev;
 
 	u32			msg_enable;
-	struct vio_dev		*vdev;
 
 	struct list_head	port_list;
 
 	struct hlist_head	port_hash[VNET_PORT_HASH_SIZE];
+
+	struct list_head	list;
+	u64			local_mac;
 };
 
 #endif /* _SUNVNET_H */
diff --git a/drivers/net/tc35815.c b/drivers/net/tc35815.c
index 75655ad..7f94ca9 100644
--- a/drivers/net/tc35815.c
+++ b/drivers/net/tc35815.c
@@ -626,7 +626,7 @@
 	return -ENODEV;
 }
 #else
-static int __devinit tc35815_read_plat_dev_addr(struct device *dev)
+static int __devinit tc35815_read_plat_dev_addr(struct net_device *dev)
 {
 	return -ENODEV;
 }
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index 5ee1476..887b9a5 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -64,8 +64,8 @@
 
 #define DRV_MODULE_NAME		"tg3"
 #define PFX DRV_MODULE_NAME	": "
-#define DRV_MODULE_VERSION	"3.78"
-#define DRV_MODULE_RELDATE	"July 11, 2007"
+#define DRV_MODULE_VERSION	"3.79"
+#define DRV_MODULE_RELDATE	"July 18, 2007"
 
 #define TG3_DEF_MAC_MODE	0
 #define TG3_DEF_RX_MODE		0
@@ -4847,6 +4847,59 @@
 	return 0;
 }
 
+/* Save PCI command register before chip reset */
+static void tg3_save_pci_state(struct tg3 *tp)
+{
+	u32 val;
+
+	pci_read_config_dword(tp->pdev, TG3PCI_COMMAND, &val);
+	tp->pci_cmd = val;
+}
+
+/* Restore PCI state after chip reset */
+static void tg3_restore_pci_state(struct tg3 *tp)
+{
+	u32 val;
+
+	/* Re-enable indirect register accesses. */
+	pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
+			       tp->misc_host_ctrl);
+
+	/* Set MAX PCI retry to zero. */
+	val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
+	if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
+	    (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
+		val |= PCISTATE_RETRY_SAME_DMA;
+	pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
+
+	pci_write_config_dword(tp->pdev, TG3PCI_COMMAND, tp->pci_cmd);
+
+	/* Make sure PCI-X relaxed ordering bit is clear. */
+	pci_read_config_dword(tp->pdev, TG3PCI_X_CAPS, &val);
+	val &= ~PCIX_CAPS_RELAXED_ORDERING;
+	pci_write_config_dword(tp->pdev, TG3PCI_X_CAPS, val);
+
+	if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
+		u32 val;
+
+		/* Chip reset on 5780 will reset MSI enable bit,
+		 * so need to restore it.
+		 */
+		if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
+			u16 ctrl;
+
+			pci_read_config_word(tp->pdev,
+					     tp->msi_cap + PCI_MSI_FLAGS,
+					     &ctrl);
+			pci_write_config_word(tp->pdev,
+					      tp->msi_cap + PCI_MSI_FLAGS,
+					      ctrl | PCI_MSI_FLAGS_ENABLE);
+			val = tr32(MSGINT_MODE);
+			tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
+		}
+	}
+}
+
 static void tg3_stop_fw(struct tg3 *);
 
 /* tp->lock is held. */
@@ -4863,6 +4916,12 @@
 	 */
 	tp->nvram_lock_cnt = 0;
 
+	/* GRC_MISC_CFG core clock reset will clear the memory
+	 * enable bit in PCI register 4 and the MSI enable bit
+	 * on some chips, so we save relevant registers here.
+	 */
+	tg3_save_pci_state(tp);
+
 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
@@ -4961,50 +5020,14 @@
 		pci_write_config_dword(tp->pdev, 0xd8, 0xf5000);
 	}
 
-	/* Re-enable indirect register accesses. */
-	pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
-			       tp->misc_host_ctrl);
-
-	/* Set MAX PCI retry to zero. */
-	val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
-	if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
-	    (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
-		val |= PCISTATE_RETRY_SAME_DMA;
-	pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
-
-	pci_restore_state(tp->pdev);
+	tg3_restore_pci_state(tp);
 
 	tp->tg3_flags &= ~TG3_FLAG_CHIP_RESETTING;
 
-	/* Make sure PCI-X relaxed ordering bit is clear. */
-	pci_read_config_dword(tp->pdev, TG3PCI_X_CAPS, &val);
-	val &= ~PCIX_CAPS_RELAXED_ORDERING;
-	pci_write_config_dword(tp->pdev, TG3PCI_X_CAPS, val);
-
-	if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
-		u32 val;
-
-		/* Chip reset on 5780 will reset MSI enable bit,
-		 * so need to restore it.
-		 */
-		if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
-			u16 ctrl;
-
-			pci_read_config_word(tp->pdev,
-					     tp->msi_cap + PCI_MSI_FLAGS,
-					     &ctrl);
-			pci_write_config_word(tp->pdev,
-					      tp->msi_cap + PCI_MSI_FLAGS,
-					      ctrl | PCI_MSI_FLAGS_ENABLE);
-			val = tr32(MSGINT_MODE);
-			tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
-		}
-
+	val = 0;
+	if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
 		val = tr32(MEMARB_MODE);
-		tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
-
-	} else
-		tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
+	tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
 
 	if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
 		tg3_stop_fw(tp);
@@ -11978,7 +12001,6 @@
 	 */
 	if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
 	    (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
-		pci_save_state(tp->pdev);
 		tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
 		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
 	}
@@ -12007,12 +12029,6 @@
 
 	tg3_init_coal(tp);
 
-	/* Now that we have fully setup the chip, save away a snapshot
-	 * of the PCI config space.  We need to restore this after
-	 * GRC_MISC_CFG core clock resets and some resume events.
-	 */
-	pci_save_state(tp->pdev);
-
 	pci_set_drvdata(pdev, dev);
 
 	err = register_netdev(dev);
diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h
index d84e75e..5c21f49 100644
--- a/drivers/net/tg3.h
+++ b/drivers/net/tg3.h
@@ -2345,6 +2345,7 @@
 #define PHY_REV_BCM5411_X0		0x1 /* Found on Netgear GA302T */
 
 	u32				led_ctrl;
+	u32				pci_cmd;
 
 	char				board_part_number[24];
 	char				fw_ver[16];
diff --git a/drivers/net/tokenring/smctr.c b/drivers/net/tokenring/smctr.c
index 58d7e5d..f83bb5c 100644
--- a/drivers/net/tokenring/smctr.c
+++ b/drivers/net/tokenring/smctr.c
@@ -3692,7 +3692,6 @@
         __u16 rcode, correlator;
         int err = 0;
         __u8 xframe = 1;
-        __u16 tx_fstatus;
 
         rmf->vl = SWAP_BYTES(rmf->vl);
         if(rx_status & FCB_RX_STATUS_DA_MATCHED)
@@ -3783,7 +3782,9 @@
                                 }
                                 break;
 
-                        case TX_FORWARD:
+                        case TX_FORWARD: {
+        			__u16 uninitialized_var(tx_fstatus);
+
                                 if((rcode = smctr_rcv_tx_forward(dev, rmf))
                                         != POSITIVE_ACK)
                                 {
@@ -3811,6 +3812,7 @@
                                         }
                                 }
                                 break;
+			}
 
                         /* Received MAC Frames Processed by CRS/REM/RPS. */
                         case RSP:
diff --git a/drivers/net/usb/cdc_subset.c b/drivers/net/usb/cdc_subset.c
index bc62b01..943988e 100644
--- a/drivers/net/usb/cdc_subset.c
+++ b/drivers/net/usb/cdc_subset.c
@@ -305,6 +305,9 @@
 	USB_DEVICE (0x8086, 0x07d3),	// "blob" bootloader
 	.driver_info =	(unsigned long) &blob_info,
 }, {
+	USB_DEVICE (0x1286, 0x8001),    // "blob" bootloader
+	.driver_info =  (unsigned long) &blob_info,
+}, {
 	// Linux Ethernet/RNDIS gadget on pxa210/25x/26x, second config
 	// e.g. Gumstix, current OpenZaurus, ...
 	USB_DEVICE_VER (0x0525, 0xa4a2, 0x0203, 0x0203),
diff --git a/drivers/net/wan/c101.c b/drivers/net/wan/c101.c
index 6b63b35..8ead774 100644
--- a/drivers/net/wan/c101.c
+++ b/drivers/net/wan/c101.c
@@ -315,12 +315,11 @@
 		return -ENODEV;
 	}
 
-	card = kmalloc(sizeof(card_t), GFP_KERNEL);
+	card = kzalloc(sizeof(card_t), GFP_KERNEL);
 	if (card == NULL) {
 		printk(KERN_ERR "c101: unable to allocate memory\n");
 		return -ENOBUFS;
 	}
-	memset(card, 0, sizeof(card_t));
 
 	card->dev = alloc_hdlcdev(card);
 	if (!card->dev) {
diff --git a/drivers/net/wan/cosa.c b/drivers/net/wan/cosa.c
index 9ef49ce..26058b4 100644
--- a/drivers/net/wan/cosa.c
+++ b/drivers/net/wan/cosa.c
@@ -572,13 +572,11 @@
 	sprintf(cosa->name, "cosa%d", cosa->num);
 
 	/* Initialize the per-channel data */
-	cosa->chan = kmalloc(sizeof(struct channel_data)*cosa->nchannels,
-			     GFP_KERNEL);
+	cosa->chan = kcalloc(cosa->nchannels, sizeof(struct channel_data), GFP_KERNEL);
 	if (!cosa->chan) {
 	        err = -ENOMEM;
 		goto err_out3;
 	}
-	memset(cosa->chan, 0, sizeof(struct channel_data)*cosa->nchannels);
 	for (i=0; i<cosa->nchannels; i++) {
 		cosa->chan[i].cosa = cosa;
 		cosa->chan[i].num = i;
diff --git a/drivers/net/wan/cycx_main.c b/drivers/net/wan/cycx_main.c
index 6e5f1c8..a0e8611 100644
--- a/drivers/net/wan/cycx_main.c
+++ b/drivers/net/wan/cycx_main.c
@@ -113,12 +113,10 @@
 	/* Verify number of cards and allocate adapter data space */
 	cycx_ncards = min_t(int, cycx_ncards, CYCX_MAX_CARDS);
 	cycx_ncards = max_t(int, cycx_ncards, 1);
-	cycx_card_array = kmalloc(sizeof(struct cycx_device) * cycx_ncards,
-				  GFP_KERNEL);
+	cycx_card_array = kcalloc(cycx_ncards, sizeof(struct cycx_device), GFP_KERNEL);
 	if (!cycx_card_array)
 		goto out;
 
-	memset(cycx_card_array, 0, sizeof(struct cycx_device) * cycx_ncards);
 
 	/* Register adapters with WAN router */
 	for (cnt = 0; cnt < cycx_ncards; ++cnt) {
diff --git a/drivers/net/wan/cycx_x25.c b/drivers/net/wan/cycx_x25.c
index 016b3ff..a8af28b 100644
--- a/drivers/net/wan/cycx_x25.c
+++ b/drivers/net/wan/cycx_x25.c
@@ -376,11 +376,10 @@
 	}
 
 	/* allocate and initialize private data */
-	chan = kmalloc(sizeof(struct cycx_x25_channel), GFP_KERNEL);
+	chan = kzalloc(sizeof(struct cycx_x25_channel), GFP_KERNEL);
 	if (!chan)
 		return -ENOMEM;
 
-	memset(chan, 0, sizeof(*chan));
 	strcpy(chan->name, conf->name);
 	chan->card = card;
 	chan->link = conf->port;
diff --git a/drivers/net/wan/dscc4.c b/drivers/net/wan/dscc4.c
index dca0244..50d2f91 100644
--- a/drivers/net/wan/dscc4.c
+++ b/drivers/net/wan/dscc4.c
@@ -890,12 +890,11 @@
 	struct dscc4_dev_priv *root;
 	int i, ret = -ENOMEM;
 
-	root = kmalloc(dev_per_card*sizeof(*root), GFP_KERNEL);
+	root = kcalloc(dev_per_card, sizeof(*root), GFP_KERNEL);
 	if (!root) {
 		printk(KERN_ERR "%s: can't allocate data\n", DRV_NAME);
 		goto err_out;
 	}
-	memset(root, 0, dev_per_card*sizeof(*root));
 
 	for (i = 0; i < dev_per_card; i++) {
 		root[i].dev = alloc_hdlcdev(root + i);
@@ -903,12 +902,11 @@
 			goto err_free_dev;
 	}
 
-	ppriv = kmalloc(sizeof(*ppriv), GFP_KERNEL);
+	ppriv = kzalloc(sizeof(*ppriv), GFP_KERNEL);
 	if (!ppriv) {
 		printk(KERN_ERR "%s: can't allocate private data\n", DRV_NAME);
 		goto err_free_dev;
 	}
-	memset(ppriv, 0, sizeof(struct dscc4_pci_priv));
 
 	ppriv->root = root;
 	spin_lock_init(&ppriv->lock);
diff --git a/drivers/net/wan/farsync.c b/drivers/net/wan/farsync.c
index 58a53b6..12dae8e 100644
--- a/drivers/net/wan/farsync.c
+++ b/drivers/net/wan/farsync.c
@@ -2476,13 +2476,12 @@
 	}
 
 	/* Allocate driver private data */
-	card = kmalloc(sizeof (struct fst_card_info), GFP_KERNEL);
+	card = kzalloc(sizeof (struct fst_card_info), GFP_KERNEL);
 	if (card == NULL) {
 		printk_err("FarSync card found but insufficient memory for"
 			   " driver storage\n");
 		return -ENOMEM;
 	}
-	memset(card, 0, sizeof (struct fst_card_info));
 
 	/* Try to enable the device */
 	if ((err = pci_enable_device(pdev)) != 0) {
diff --git a/drivers/net/wan/hostess_sv11.c b/drivers/net/wan/hostess_sv11.c
index 9ba3e4e..bf5f8d9 100644
--- a/drivers/net/wan/hostess_sv11.c
+++ b/drivers/net/wan/hostess_sv11.c
@@ -231,11 +231,10 @@
 		return NULL;
 	}
 	
-	sv = kmalloc(sizeof(struct sv11_device), GFP_KERNEL);
+	sv = kzalloc(sizeof(struct sv11_device), GFP_KERNEL);
 	if(!sv)
 		goto fail3;
 			
-	memset(sv, 0, sizeof(*sv));
 	sv->if_ptr=&sv->netdev;
 	
 	sv->netdev.dev = alloc_netdev(0, "hdlc%d", sv11_setup);
diff --git a/drivers/net/wan/n2.c b/drivers/net/wan/n2.c
index 5c322df..cbdf0b7 100644
--- a/drivers/net/wan/n2.c
+++ b/drivers/net/wan/n2.c
@@ -351,12 +351,11 @@
 		return -ENODEV;
 	}
 
-	card = kmalloc(sizeof(card_t), GFP_KERNEL);
+	card = kzalloc(sizeof(card_t), GFP_KERNEL);
 	if (card == NULL) {
 		printk(KERN_ERR "n2: unable to allocate memory\n");
 		return -ENOBUFS;
 	}
-	memset(card, 0, sizeof(card_t));
 
 	card->ports[0].dev = alloc_hdlcdev(&card->ports[0]);
 	card->ports[1].dev = alloc_hdlcdev(&card->ports[1]);
diff --git a/drivers/net/wan/pc300_drv.c b/drivers/net/wan/pc300_drv.c
index ec1c556..99fee2f 100644
--- a/drivers/net/wan/pc300_drv.c
+++ b/drivers/net/wan/pc300_drv.c
@@ -2833,6 +2833,8 @@
 	int br, tc;
 	int br_pwr, error;
 
+	*br_io = 0;
+
 	if (rate == 0)
 		return (0);
 
@@ -3454,7 +3456,7 @@
 	if ((err = pci_enable_device(pdev)) < 0)
 		return err;
 
-	card = kmalloc(sizeof(pc300_t), GFP_KERNEL);
+	card = kzalloc(sizeof(pc300_t), GFP_KERNEL);
 	if (card == NULL) {
 		printk("PC300 found at RAM 0x%016llx, "
 		       "but could not allocate card structure.\n",
@@ -3462,7 +3464,6 @@
 		err = -ENOMEM;
 		goto err_disable_dev;
 	}
-	memset(card, 0, sizeof(pc300_t));
 
 	err = -ENODEV;
 
diff --git a/drivers/net/wan/pc300too.c b/drivers/net/wan/pc300too.c
index dfbd3b0..6353cb5 100644
--- a/drivers/net/wan/pc300too.c
+++ b/drivers/net/wan/pc300too.c
@@ -334,14 +334,13 @@
 		return i;
 	}
 
-	card = kmalloc(sizeof(card_t), GFP_KERNEL);
+	card = kzalloc(sizeof(card_t), GFP_KERNEL);
 	if (card == NULL) {
 		printk(KERN_ERR "pc300: unable to allocate memory\n");
 		pci_release_regions(pdev);
 		pci_disable_device(pdev);
 		return -ENOBUFS;
 	}
-	memset(card, 0, sizeof(card_t));
 	pci_set_drvdata(pdev, card);
 
 	if (pdev->device == PCI_DEVICE_ID_PC300_TE_1 ||
diff --git a/drivers/net/wan/pci200syn.c b/drivers/net/wan/pci200syn.c
index 7f720de..092e51d 100644
--- a/drivers/net/wan/pci200syn.c
+++ b/drivers/net/wan/pci200syn.c
@@ -312,14 +312,13 @@
 		return i;
 	}
 
-	card = kmalloc(sizeof(card_t), GFP_KERNEL);
+	card = kzalloc(sizeof(card_t), GFP_KERNEL);
 	if (card == NULL) {
 		printk(KERN_ERR "pci200syn: unable to allocate memory\n");
 		pci_release_regions(pdev);
 		pci_disable_device(pdev);
 		return -ENOBUFS;
 	}
-	memset(card, 0, sizeof(card_t));
 	pci_set_drvdata(pdev, card);
 	card->ports[0].dev = alloc_hdlcdev(&card->ports[0]);
 	card->ports[1].dev = alloc_hdlcdev(&card->ports[1]);
diff --git a/drivers/net/wan/sbni.c b/drivers/net/wan/sbni.c
index 35eded7..1cc18e7 100644
--- a/drivers/net/wan/sbni.c
+++ b/drivers/net/wan/sbni.c
@@ -595,8 +595,8 @@
 
 	u32  crc = CRC32_INITIAL;
 
-	unsigned  framelen, frameno, ack;
-	unsigned  is_first, frame_ok;
+	unsigned  framelen = 0, frameno, ack;
+	unsigned  is_first, frame_ok = 0;
 
 	if( check_fhdr( ioaddr, &framelen, &frameno, &ack, &is_first, &crc ) ) {
 		frame_ok = framelen > 4
@@ -604,8 +604,7 @@
 			:  skip_tail( ioaddr, framelen, crc );
 		if( frame_ok )
 			interpret_ack( dev, ack );
-	} else
-		frame_ok = 0;
+	}
 
 	outb( inb( ioaddr + CSR0 ) ^ CT_ZER, ioaddr + CSR0 );
 	if( frame_ok ) {
diff --git a/drivers/net/wan/sdla.c b/drivers/net/wan/sdla.c
index 6a485f0..792e588 100644
--- a/drivers/net/wan/sdla.c
+++ b/drivers/net/wan/sdla.c
@@ -1196,10 +1196,9 @@
 		
 	if (read)
 	{	
-		temp = kmalloc(mem.len, GFP_KERNEL);
+		temp = kzalloc(mem.len, GFP_KERNEL);
 		if (!temp)
 			return(-ENOMEM);
-		memset(temp, 0, mem.len);
 		sdla_read(dev, mem.addr, temp, mem.len);
 		if(copy_to_user(mem.data, temp, mem.len))
 		{
diff --git a/drivers/net/wan/sealevel.c b/drivers/net/wan/sealevel.c
index 1313581..11276bf 100644
--- a/drivers/net/wan/sealevel.c
+++ b/drivers/net/wan/sealevel.c
@@ -270,11 +270,10 @@
 		return NULL;
 	}
 	
-	b = kmalloc(sizeof(struct slvl_board), GFP_KERNEL);
+	b = kzalloc(sizeof(struct slvl_board), GFP_KERNEL);
 	if(!b)
 		goto fail3;
 
-	memset(b, 0, sizeof(*b));
 	if (!(b->dev[0]= slvl_alloc(iobase, irq)))
 		goto fail2;
 
diff --git a/drivers/net/wan/wanxl.c b/drivers/net/wan/wanxl.c
index c7360157..3c78f98 100644
--- a/drivers/net/wan/wanxl.c
+++ b/drivers/net/wan/wanxl.c
@@ -599,7 +599,7 @@
 	}
 
 	alloc_size = sizeof(card_t) + ports * sizeof(port_t);
-	card = kmalloc(alloc_size, GFP_KERNEL);
+	card = kzalloc(alloc_size, GFP_KERNEL);
 	if (card == NULL) {
 		printk(KERN_ERR "wanXL %s: unable to allocate memory\n",
 		       pci_name(pdev));
@@ -607,7 +607,6 @@
 		pci_disable_device(pdev);
 		return -ENOBUFS;
 	}
-	memset(card, 0, alloc_size);
 
 	pci_set_drvdata(pdev, card);
 	card->pdev = pdev;
diff --git a/drivers/net/wan/x25_asy.c b/drivers/net/wan/x25_asy.c
index 1c9edd9..c48b1cc 100644
--- a/drivers/net/wan/x25_asy.c
+++ b/drivers/net/wan/x25_asy.c
@@ -786,14 +786,12 @@
 	printk(KERN_INFO "X.25 async: version 0.00 ALPHA "
 			"(dynamic channels, max=%d).\n", x25_asy_maxdev );
 
-	x25_asy_devs = kmalloc(sizeof(struct net_device *)*x25_asy_maxdev, 
-			       GFP_KERNEL);
+	x25_asy_devs = kcalloc(x25_asy_maxdev, sizeof(struct net_device*), GFP_KERNEL);
 	if (!x25_asy_devs) {
 		printk(KERN_WARNING "X25 async: Can't allocate x25_asy_ctrls[] "
 				"array! Uaargh! (-> No X.25 available)\n");
 		return -ENOMEM;
 	}
-	memset(x25_asy_devs, 0, sizeof(struct net_device *)*x25_asy_maxdev); 
 
 	return tty_register_ldisc(N_X25, &x25_ldisc);
 }
diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c
index 2d3a180..ee1cc14 100644
--- a/drivers/net/wireless/airo.c
+++ b/drivers/net/wireless/airo.c
@@ -52,6 +52,8 @@
 
 #include "airo.h"
 
+#define DRV_NAME "airo"
+
 #ifdef CONFIG_PCI
 static struct pci_device_id card_ids[] = {
 	{ 0x14b9, 1, PCI_ANY_ID, PCI_ANY_ID, },
@@ -71,7 +73,7 @@
 static int airo_pci_resume(struct pci_dev *pdev);
 
 static struct pci_driver airo_driver = {
-	.name     = "airo",
+	.name     = DRV_NAME,
 	.id_table = card_ids,
 	.probe    = airo_pci_probe,
 	.remove   = __devexit_p(airo_pci_remove),
@@ -1092,7 +1094,7 @@
 static void OUT4500( struct airo_info *, u16 register, u16 value );
 static unsigned short IN4500( struct airo_info *, u16 register );
 static u16 setup_card(struct airo_info*, u8 *mac, int lock);
-static int enable_MAC( struct airo_info *ai, Resp *rsp, int lock );
+static int enable_MAC(struct airo_info *ai, int lock);
 static void disable_MAC(struct airo_info *ai, int lock);
 static void enable_interrupts(struct airo_info*);
 static void disable_interrupts(struct airo_info*);
@@ -1250,7 +1252,7 @@
 static int flashrestart(struct airo_info *ai,struct net_device *dev);
 
 #define airo_print(type, name, fmt, args...) \
-	{ printk(type "airo(%s): " fmt "\n", name, ##args); }
+	printk(type DRV_NAME "(%s): " fmt "\n", name, ##args)
 
 #define airo_print_info(name, fmt, args...) \
 	airo_print(KERN_INFO, name, fmt, ##args)
@@ -1926,28 +1928,54 @@
 	return rc;
 }
 
-static int airo_open(struct net_device *dev) {
-	struct airo_info *info = dev->priv;
-	Resp rsp;
+static void try_auto_wep(struct airo_info *ai)
+{
+	if (auto_wep && !(ai->flags & FLAG_RADIO_DOWN)) {
+		ai->expires = RUN_AT(3*HZ);
+		wake_up_interruptible(&ai->thr_wait);
+	}
+}
 
-	if (test_bit(FLAG_FLASHING, &info->flags))
+static int airo_open(struct net_device *dev) {
+	struct airo_info *ai = dev->priv;
+	int rc = 0;
+
+	if (test_bit(FLAG_FLASHING, &ai->flags))
 		return -EIO;
 
 	/* Make sure the card is configured.
 	 * Wireless Extensions may postpone config changes until the card
 	 * is open (to pipeline changes and speed-up card setup). If
 	 * those changes are not yet commited, do it now - Jean II */
-	if (test_bit (FLAG_COMMIT, &info->flags)) {
-		disable_MAC(info, 1);
-		writeConfigRid(info, 1);
+	if (test_bit(FLAG_COMMIT, &ai->flags)) {
+		disable_MAC(ai, 1);
+		writeConfigRid(ai, 1);
 	}
 
-	if (info->wifidev != dev) {
+	if (ai->wifidev != dev) {
+		clear_bit(JOB_DIE, &ai->jobs);
+		ai->airo_thread_task = kthread_run(airo_thread, dev, dev->name);
+		if (IS_ERR(ai->airo_thread_task))
+			return (int)PTR_ERR(ai->airo_thread_task);
+
+		rc = request_irq(dev->irq, airo_interrupt, IRQF_SHARED,
+			dev->name, dev);
+		if (rc) {
+			airo_print_err(dev->name,
+				"register interrupt %d failed, rc %d",
+				dev->irq, rc);
+			set_bit(JOB_DIE, &ai->jobs);
+			kthread_stop(ai->airo_thread_task);
+			return rc;
+		}
+
 		/* Power on the MAC controller (which may have been disabled) */
-		clear_bit(FLAG_RADIO_DOWN, &info->flags);
-		enable_interrupts(info);
+		clear_bit(FLAG_RADIO_DOWN, &ai->flags);
+		enable_interrupts(ai);
+
+		try_auto_wep(ai);
 	}
-	enable_MAC(info, &rsp, 1);
+	enable_MAC(ai, 1);
 
 	netif_start_queue(dev);
 	return 0;
@@ -2338,14 +2366,13 @@
 {
 	struct airo_info *ai = dev->priv;
 	struct sockaddr *addr = p;
-	Resp rsp;
 
 	readConfigRid(ai, 1);
 	memcpy (ai->config.macAddr, addr->sa_data, dev->addr_len);
 	set_bit (FLAG_COMMIT, &ai->flags);
 	disable_MAC(ai, 1);
 	writeConfigRid (ai, 1);
-	enable_MAC(ai, &rsp, 1);
+	enable_MAC(ai, 1);
 	memcpy (ai->dev->dev_addr, addr->sa_data, dev->addr_len);
 	if (ai->wifidev)
 		memcpy (ai->wifidev->dev_addr, addr->sa_data, dev->addr_len);
@@ -2392,6 +2419,11 @@
 		disable_MAC(ai, 1);
 #endif
 		disable_interrupts( ai );
+
+		free_irq(dev->irq, dev);
+
+		set_bit(JOB_DIE, &ai->jobs);
+		kthread_stop(ai->airo_thread_task);
 	}
 	return 0;
 }
@@ -2403,7 +2435,6 @@
 	set_bit(FLAG_RADIO_DOWN, &ai->flags);
 	disable_MAC(ai, 1);
 	disable_interrupts(ai);
-	free_irq( dev->irq, dev );
 	takedown_proc_entry( dev, ai );
 	if (test_bit(FLAG_REGISTERED, &ai->flags)) {
 		unregister_netdev( dev );
@@ -2414,9 +2445,6 @@
 		}
 		clear_bit(FLAG_REGISTERED, &ai->flags);
 	}
-	set_bit(JOB_DIE, &ai->jobs);
-	kthread_stop(ai->airo_thread_task);
-
 	/*
 	 * Clean out tx queue
 	 */
@@ -2554,8 +2582,7 @@
  * 2) Map PCI memory for issueing commands.
  * 3) Allocate memory (shared) to send and receive ethernet frames.
  */
-static int mpi_map_card(struct airo_info *ai, struct pci_dev *pci,
-		    const char *name)
+static int mpi_map_card(struct airo_info *ai, struct pci_dev *pci)
 {
 	unsigned long mem_start, mem_len, aux_start, aux_len;
 	int rc = -1;
@@ -2569,35 +2596,35 @@
 	aux_start = pci_resource_start(pci, 2);
 	aux_len = AUXMEMSIZE;
 
-	if (!request_mem_region(mem_start, mem_len, name)) {
-		airo_print_err(ai->dev->name, "Couldn't get region %x[%x] for %s",
-		       (int)mem_start, (int)mem_len, name);
+	if (!request_mem_region(mem_start, mem_len, DRV_NAME)) {
+		airo_print_err("", "Couldn't get region %x[%x]",
+			(int)mem_start, (int)mem_len);
 		goto out;
 	}
-	if (!request_mem_region(aux_start, aux_len, name)) {
-		airo_print_err(ai->dev->name, "Couldn't get region %x[%x] for %s",
-		       (int)aux_start, (int)aux_len, name);
+	if (!request_mem_region(aux_start, aux_len, DRV_NAME)) {
+		airo_print_err("", "Couldn't get region %x[%x]",
+			(int)aux_start, (int)aux_len);
 		goto free_region1;
 	}
 
 	ai->pcimem = ioremap(mem_start, mem_len);
 	if (!ai->pcimem) {
-		airo_print_err(ai->dev->name, "Couldn't map region %x[%x] for %s",
-		       (int)mem_start, (int)mem_len, name);
+		airo_print_err("", "Couldn't map region %x[%x]",
+			(int)mem_start, (int)mem_len);
 		goto free_region2;
 	}
 	ai->pciaux = ioremap(aux_start, aux_len);
 	if (!ai->pciaux) {
-		airo_print_err(ai->dev->name, "Couldn't map region %x[%x] for %s",
-		       (int)aux_start, (int)aux_len, name);
+		airo_print_err("", "Couldn't map region %x[%x]",
+			(int)aux_start, (int)aux_len);
 		goto free_memmap;
 	}
 
 	/* Reserve PKTSIZE for each fid and 2K for the Rids */
 	ai->shared = pci_alloc_consistent(pci, PCI_SHARED_LEN, &ai->shared_dma);
 	if (!ai->shared) {
-		airo_print_err(ai->dev->name, "Couldn't alloc_consistent %d",
-		       PCI_SHARED_LEN);
+		airo_print_err("", "Couldn't alloc_consistent %d",
+			PCI_SHARED_LEN);
 		goto free_auxmap;
 	}
 
@@ -2742,7 +2769,7 @@
 	    kzalloc(AIRO_MAX_NETWORK_COUNT * sizeof(BSSListElement),
 		    GFP_KERNEL);
 	if (!ai->networks) {
-		airo_print_warn(ai->dev->name, "Out of memory allocating beacons");
+		airo_print_warn("", "Out of memory allocating beacons");
 		return -ENOMEM;
 	}
 
@@ -2770,7 +2797,6 @@
 {
 	int status;
 	CapabilityRid cap_rid;
-	const char *name = ai->dev->name;
 
 	status = readCapabilityRid(ai, &cap_rid, 1);
 	if (status != SUCCESS) return 0;
@@ -2778,12 +2804,12 @@
 	/* Only firmware versions 5.30.17 or better can do WPA */
 	if ((cap_rid.softVer > 0x530)
 	  || ((cap_rid.softVer == 0x530) && (cap_rid.softSubVer >= 17))) {
-		airo_print_info(name, "WPA is supported.");
+		airo_print_info("", "WPA is supported.");
 		return 1;
 	}
 
 	/* No WPA support */
-	airo_print_info(name, "WPA unsupported (only firmware versions 5.30.17"
+	airo_print_info("", "WPA unsupported (only firmware versions 5.30.17"
 		" and greater support WPA.  Detected %s)", cap_rid.prodVer);
 	return 0;
 }
@@ -2797,23 +2823,19 @@
 	int i, rc;
 
 	/* Create the network device object. */
-        dev = alloc_etherdev(sizeof(*ai));
-        if (!dev) {
+	dev = alloc_netdev(sizeof(*ai), "", ether_setup);
+	if (!dev) {
 		airo_print_err("", "Couldn't alloc_etherdev");
 		return NULL;
-        }
-	if (dev_alloc_name(dev, dev->name) < 0) {
-		airo_print_err("", "Couldn't get name!");
-		goto err_out_free;
 	}
 
 	ai = dev->priv;
 	ai->wifidev = NULL;
-	ai->flags = 0;
+	ai->flags = 1 << FLAG_RADIO_DOWN;
 	ai->jobs = 0;
 	ai->dev = dev;
 	if (pci && (pci->device == 0x5000 || pci->device == 0xa504)) {
-		airo_print_dbg(dev->name, "Found an MPI350 card");
+		airo_print_dbg("", "Found an MPI350 card");
 		set_bit(FLAG_MPI, &ai->flags);
 	}
 	spin_lock_init(&ai->aux_lock);
@@ -2821,14 +2843,11 @@
 	ai->config.len = 0;
 	ai->pci = pci;
 	init_waitqueue_head (&ai->thr_wait);
-	ai->airo_thread_task = kthread_run(airo_thread, dev, dev->name);
-	if (IS_ERR(ai->airo_thread_task))
-		goto err_out_free;
 	ai->tfm = NULL;
 	add_airo_dev(ai);
 
 	if (airo_networks_allocate (ai))
-		goto err_out_thr;
+		goto err_out_free;
 	airo_networks_initialize (ai);
 
 	/* The Airo-specific entries in the device structure. */
@@ -2851,27 +2870,22 @@
 	dev->base_addr = port;
 
 	SET_NETDEV_DEV(dev, dmdev);
+	SET_MODULE_OWNER(dev);
 
 	reset_card (dev, 1);
 	msleep(400);
 
-	rc = request_irq( dev->irq, airo_interrupt, IRQF_SHARED, dev->name, dev );
-	if (rc) {
-		airo_print_err(dev->name, "register interrupt %d failed, rc %d",
-				irq, rc);
-		goto err_out_nets;
-	}
 	if (!is_pcmcia) {
-		if (!request_region( dev->base_addr, 64, dev->name )) {
+		if (!request_region(dev->base_addr, 64, DRV_NAME)) {
 			rc = -EBUSY;
 			airo_print_err(dev->name, "Couldn't request region");
-			goto err_out_irq;
+			goto err_out_nets;
 		}
 	}
 
 	if (test_bit(FLAG_MPI,&ai->flags)) {
-		if (mpi_map_card(ai, pci, dev->name)) {
-			airo_print_err(dev->name, "Could not map memory");
+		if (mpi_map_card(ai, pci)) {
+			airo_print_err("", "Could not map memory");
 			goto err_out_res;
 		}
 	}
@@ -2899,6 +2913,7 @@
 		ai->bssListRidLen = sizeof(BSSListRid) - sizeof(BSSListRidExtra);
 	}
 
+	strcpy(dev->name, "eth%d");
 	rc = register_netdev(dev);
 	if (rc) {
 		airo_print_err(dev->name, "Couldn't register_netdev");
@@ -2921,8 +2936,6 @@
 	if (setup_proc_entry(dev, dev->priv) < 0)
 		goto err_out_wifi;
 
-	netif_start_queue(dev);
-	SET_MODULE_OWNER(dev);
 	return dev;
 
 err_out_wifi:
@@ -2940,14 +2953,9 @@
 err_out_res:
 	if (!is_pcmcia)
 	        release_region( dev->base_addr, 64 );
-err_out_irq:
-	free_irq(dev->irq, dev);
 err_out_nets:
 	airo_networks_free(ai);
-err_out_thr:
 	del_airo_dev(ai);
-	set_bit(JOB_DIE, &ai->jobs);
-	kthread_stop(ai->airo_thread_task);
 err_out_free:
 	free_netdev(dev);
 	return NULL;
@@ -3078,7 +3086,8 @@
 	struct net_device *dev = data;
 	struct airo_info *ai = dev->priv;
 	int locked;
-	
+
+	set_freezable();
 	while(1) {
 		/* make swsusp happy with our thread */
 		try_to_freeze();
@@ -3529,9 +3538,11 @@
 	return rc;
 }
 
-static int enable_MAC( struct airo_info *ai, Resp *rsp, int lock ) {
+static int enable_MAC(struct airo_info *ai, int lock)
+{
 	int rc;
-        Cmd cmd;
+	Cmd cmd;
+	Resp rsp;
 
 	/* FLAG_RADIO_OFF : Radio disabled via /proc or Wireless Extensions
 	 * FLAG_RADIO_DOWN : Radio disabled via "ifconfig ethX down"
@@ -3547,7 +3558,7 @@
 	if (!test_bit(FLAG_ENABLED, &ai->flags)) {
 		memset(&cmd, 0, sizeof(cmd));
 		cmd.cmd = MAC_ENABLE;
-		rc = issuecommand(ai, &cmd, rsp);
+		rc = issuecommand(ai, &cmd, &rsp);
 		if (rc == SUCCESS)
 			set_bit(FLAG_ENABLED, &ai->flags);
 	} else
@@ -3557,8 +3568,12 @@
 	    up(&ai->sem);
 
 	if (rc)
-		airo_print_err(ai->dev->name, "%s: Cannot enable MAC, err=%d",
-			__FUNCTION__, rc);
+		airo_print_err(ai->dev->name, "Cannot enable MAC");
+	else if ((rsp.status & 0xFF00) != 0) {
+		airo_print_err(ai->dev->name, "Bad MAC enable reason=%x, "
+			"rid=%x, offset=%d", rsp.rsp0, rsp.rsp1, rsp.rsp2);
+		rc = ERROR;
+	}
 	return rc;
 }
 
@@ -3902,12 +3917,9 @@
 		if ( status != SUCCESS ) return ERROR;
 	}
 
-	status = enable_MAC(ai, &rsp, lock);
-	if ( status != SUCCESS || (rsp.status & 0xFF00) != 0) {
-		airo_print_err(ai->dev->name, "Bad MAC enable reason = %x, rid = %x,"
-			" offset = %d", rsp.rsp0, rsp.rsp1, rsp.rsp2 );
+	status = enable_MAC(ai, lock);
+	if (status != SUCCESS)
 		return ERROR;
-	}
 
 	/* Grab the initial wep key, we gotta save it for auto_wep */
 	rc = readWepKeyRid(ai, &wkr, 1, lock);
@@ -3919,10 +3931,7 @@
 		rc = readWepKeyRid(ai, &wkr, 0, lock);
 	} while(lastindex != wkr.kindex);
 
-	if (auto_wep) {
-		ai->expires = RUN_AT(3*HZ);
-		wake_up_interruptible(&ai->thr_wait);
-	}
+	try_auto_wep(ai);
 
 	return SUCCESS;
 }
@@ -4004,7 +4013,7 @@
 		}
 		if ( !(max_tries--) ) {
 			airo_print_err(ai->dev->name,
-				"airo: BAP setup error too many retries\n");
+				"BAP setup error too many retries\n");
 			return ERROR;
 		}
 		// -- PC4500 missed it, try again
@@ -5152,7 +5161,6 @@
 	struct net_device *dev = dp->data;
 	struct airo_info *ai = dev->priv;
 	SsidRid SSID_rid;
-	Resp rsp;
 	int i;
 	int offset = 0;
 
@@ -5177,7 +5185,7 @@
 		SSID_rid.len = sizeof(SSID_rid);
 	disable_MAC(ai, 1);
 	writeSsidRid(ai, &SSID_rid, 1);
-	enable_MAC(ai, &rsp, 1);
+	enable_MAC(ai, 1);
 }
 
 static inline u8 hexVal(char c) {
@@ -5193,7 +5201,6 @@
 	struct net_device *dev = dp->data;
 	struct airo_info *ai = dev->priv;
 	APListRid APList_rid;
-	Resp rsp;
 	int i;
 
 	if ( !data->writelen ) return;
@@ -5218,18 +5225,17 @@
 	}
 	disable_MAC(ai, 1);
 	writeAPListRid(ai, &APList_rid, 1);
-	enable_MAC(ai, &rsp, 1);
+	enable_MAC(ai, 1);
 }
 
 /* This function wraps PC4500_writerid with a MAC disable */
 static int do_writerid( struct airo_info *ai, u16 rid, const void *rid_data,
 			int len, int dummy ) {
 	int rc;
-	Resp rsp;
 
 	disable_MAC(ai, 1);
 	rc = PC4500_writerid(ai, rid, rid_data, len, 1);
-	enable_MAC(ai, &rsp, 1);
+	enable_MAC(ai, 1);
 	return rc;
 }
 
@@ -5260,7 +5266,6 @@
 		       const char *key, u16 keylen, int perm, int lock ) {
 	static const unsigned char macaddr[ETH_ALEN] = { 0x01, 0, 0, 0, 0, 0 };
 	WepKeyRid wkr;
-	Resp rsp;
 
 	memset(&wkr, 0, sizeof(wkr));
 	if (keylen == 0) {
@@ -5280,7 +5285,7 @@
 
 	if (perm) disable_MAC(ai, lock);
 	writeWepKeyRid(ai, &wkr, perm, lock);
-	if (perm) enable_MAC(ai, &rsp, lock);
+	if (perm) enable_MAC(ai, lock);
 	return 0;
 }
 
@@ -5548,7 +5553,6 @@
    changed. */
 static void timer_func( struct net_device *dev ) {
 	struct airo_info *apriv = dev->priv;
-	Resp rsp;
 
 /* We don't have a link so try changing the authtype */
 	readConfigRid(apriv, 0);
@@ -5575,7 +5579,7 @@
 	}
 	set_bit (FLAG_COMMIT, &apriv->flags);
 	writeConfigRid(apriv, 0);
-	enable_MAC(apriv, &rsp, 0);
+	enable_MAC(apriv, 0);
 	up(&apriv->sem);
 
 /* Schedule check to see if the change worked */
@@ -5597,8 +5601,10 @@
 			dev = _init_airo_card(pdev->irq, pdev->resource[0].start, 0, pdev, &pdev->dev);
 	else
 			dev = _init_airo_card(pdev->irq, pdev->resource[2].start, 0, pdev, &pdev->dev);
-	if (!dev)
+	if (!dev) {
+		pci_disable_device(pdev);
 		return -ENODEV;
+	}
 
 	pci_set_drvdata(pdev, dev);
 	return 0;
@@ -5610,6 +5616,8 @@
 
 	airo_print_info(dev->name, "Unregistering...");
 	stop_airo_card(dev, 1);
+	pci_disable_device(pdev);
+	pci_set_drvdata(pdev, NULL);
 }
 
 static int airo_pci_suspend(struct pci_dev *pdev, pm_message_t state)
@@ -5646,7 +5654,6 @@
 {
 	struct net_device *dev = pci_get_drvdata(pdev);
 	struct airo_info *ai = dev->priv;
-	Resp rsp;
 	pci_power_t prev_state = pdev->current_state;
 
 	pci_set_power_state(pdev, PCI_D0);
@@ -5679,7 +5686,7 @@
 		ai->APList = NULL;
 	}
 	writeConfigRid(ai, 0);
-	enable_MAC(ai, &rsp, 0);
+	enable_MAC(ai, 0);
 	ai->power = PMSG_ON;
 	netif_device_attach(dev);
 	netif_wake_queue(dev);
@@ -5903,7 +5910,6 @@
 			  char *extra)
 {
 	struct airo_info *local = dev->priv;
-	Resp rsp;
 	SsidRid SSID_rid;		/* SSIDs */
 
 	/* Reload the list of current SSID */
@@ -5935,7 +5941,7 @@
 	/* Write it to the card */
 	disable_MAC(local, 1);
 	writeSsidRid(local, &SSID_rid, 1);
-	enable_MAC(local, &rsp, 1);
+	enable_MAC(local, 1);
 
 	return 0;
 }
@@ -6000,7 +6006,7 @@
 		memcpy(APList_rid.ap[0], awrq->sa_data, ETH_ALEN);
 		disable_MAC(local, 1);
 		writeAPListRid(local, &APList_rid, 1);
-		enable_MAC(local, &rsp, 1);
+		enable_MAC(local, 1);
 	}
 	return 0;
 }
@@ -7454,7 +7460,6 @@
 			      char *extra)			/* NULL */
 {
 	struct airo_info *local = dev->priv;
-	Resp rsp;
 
 	if (!test_bit (FLAG_COMMIT, &local->flags))
 		return 0;
@@ -7479,7 +7484,7 @@
 	if (down_interruptible(&local->sem))
 		return -ERESTARTSYS;
 	writeConfigRid(local, 0);
-	enable_MAC(local, &rsp, 0);
+	enable_MAC(local, 0);
 	if (test_bit (FLAG_RESET, &local->flags))
 		airo_set_promisc(local);
 	else
@@ -7746,7 +7751,6 @@
 	unsigned char *iobuf;
 	int len;
 	struct airo_info *ai = dev->priv;
-	Resp rsp;
 
 	if (test_bit(FLAG_FLASHING, &ai->flags))
 		return -EIO;
@@ -7758,7 +7762,7 @@
 		if (test_bit(FLAG_COMMIT, &ai->flags)) {
 			disable_MAC (ai, 1);
 			writeConfigRid (ai, 1);
-			enable_MAC (ai, &rsp, 1);
+			enable_MAC(ai, 1);
 		}
 		break;
 	case AIROGSLIST:    ridcode = RID_SSID;         break;
@@ -7815,7 +7819,6 @@
 	struct airo_info *ai = dev->priv;
 	int  ridcode;
         int  enabled;
-	Resp      rsp;
 	static int (* writer)(struct airo_info *, u16 rid, const void *, int, int);
 	unsigned char *iobuf;
 
@@ -7849,7 +7852,7 @@
 		 * same with MAC off
 		 */
 	case AIROPMACON:
-		if (enable_MAC(ai, &rsp, 1) != 0)
+		if (enable_MAC(ai, 1) != 0)
 			return -EIO;
 		return 0;
 
diff --git a/drivers/net/wireless/ipw2100.c b/drivers/net/wireless/ipw2100.c
index d51daf8..8990585 100644
--- a/drivers/net/wireless/ipw2100.c
+++ b/drivers/net/wireless/ipw2100.c
@@ -1768,7 +1768,8 @@
 
 		if (priv->stop_rf_kill) {
 			priv->stop_rf_kill = 0;
-			queue_delayed_work(priv->workqueue, &priv->rf_kill, HZ);
+			queue_delayed_work(priv->workqueue, &priv->rf_kill,
+					   round_jiffies(HZ));
 		}
 
 		deferred = 1;
@@ -2098,7 +2099,7 @@
 	/* Make sure the RF Kill check timer is running */
 	priv->stop_rf_kill = 0;
 	cancel_delayed_work(&priv->rf_kill);
-	queue_delayed_work(priv->workqueue, &priv->rf_kill, HZ);
+	queue_delayed_work(priv->workqueue, &priv->rf_kill, round_jiffies(HZ));
 }
 
 static void isr_scan_complete(struct ipw2100_priv *priv, u32 status)
@@ -4233,7 +4234,8 @@
 			/* Make sure the RF_KILL check timer is running */
 			priv->stop_rf_kill = 0;
 			cancel_delayed_work(&priv->rf_kill);
-			queue_delayed_work(priv->workqueue, &priv->rf_kill, HZ);
+			queue_delayed_work(priv->workqueue, &priv->rf_kill,
+					   round_jiffies(HZ));
 		} else
 			schedule_reset(priv);
 	}
@@ -5969,7 +5971,8 @@
 	if (rf_kill_active(priv)) {
 		IPW_DEBUG_RF_KILL("RF Kill active, rescheduling GPIO check\n");
 		if (!priv->stop_rf_kill)
-			queue_delayed_work(priv->workqueue, &priv->rf_kill, HZ);
+			queue_delayed_work(priv->workqueue, &priv->rf_kill,
+					   round_jiffies(HZ));
 		goto exit_unlock;
 	}
 
@@ -7865,10 +7868,10 @@
 		goto done;
 	}
 
-	if ((mode < 1) || (mode > POWER_MODES))
+	if ((mode < 0) || (mode > POWER_MODES))
 		mode = IPW_POWER_AUTO;
 
-	if (priv->power_mode != mode)
+	if (IPW_POWER_LEVEL(priv->power_mode) != mode)
 		err = ipw2100_set_power_mode(priv, mode);
       done:
 	mutex_unlock(&priv->action_mutex);
@@ -7899,7 +7902,7 @@
 			break;
 		case IPW_POWER_AUTO:
 			snprintf(extra, MAX_POWER_STRING,
-				 "Power save level: %d (Auto)", 0);
+				 "Power save level: %d (Auto)", level);
 			break;
 		default:
 			timeout = timeout_duration[level - 1] / 1000;
diff --git a/drivers/net/wireless/ipw2200.c b/drivers/net/wireless/ipw2200.c
index 7cb2052..61497c4 100644
--- a/drivers/net/wireless/ipw2200.c
+++ b/drivers/net/wireless/ipw2200.c
@@ -70,7 +70,7 @@
 #define VQ
 #endif
 
-#define IPW2200_VERSION "1.2.0" VK VD VM VP VR VQ
+#define IPW2200_VERSION "1.2.2" VK VD VM VP VR VQ
 #define DRV_DESCRIPTION	"Intel(R) PRO/Wireless 2200/2915 Network Driver"
 #define DRV_COPYRIGHT	"Copyright(c) 2003-2006 Intel Corporation"
 #define DRV_VERSION     IPW2200_VERSION
@@ -1751,7 +1751,7 @@
 			/* Make sure the RF_KILL check timer is running */
 			cancel_delayed_work(&priv->rf_kill);
 			queue_delayed_work(priv->workqueue, &priv->rf_kill,
-					   2 * HZ);
+					   round_jiffies(2 * HZ));
 		} else
 			queue_work(priv->workqueue, &priv->up);
 	}
@@ -2506,7 +2506,7 @@
 		break;
 	}
 
-	param = cpu_to_le32(mode);
+	param = cpu_to_le32(param);
 	return ipw_send_cmd_pdu(priv, IPW_CMD_POWER_MODE, sizeof(param),
 				&param);
 }
@@ -4690,7 +4690,8 @@
 			else if (priv->config & CFG_BACKGROUND_SCAN
 				 && priv->status & STATUS_ASSOCIATED)
 				queue_delayed_work(priv->workqueue,
-						   &priv->request_scan, HZ);
+						   &priv->request_scan,
+						   round_jiffies(HZ));
 
 			/* Send an empty event to user space.
 			 * We don't send the received data on the event because
@@ -9567,6 +9568,7 @@
 		priv->power_mode = IPW_POWER_ENABLED | IPW_POWER_BATTERY;
 	else
 		priv->power_mode = IPW_POWER_ENABLED | priv->power_mode;
+
 	err = ipw_send_power_mode(priv, IPW_POWER_LEVEL(priv->power_mode));
 	if (err) {
 		IPW_DEBUG_WX("failed setting power mode.\n");
@@ -9603,22 +9605,19 @@
 	struct ipw_priv *priv = ieee80211_priv(dev);
 	int mode = *(int *)extra;
 	int err;
+
 	mutex_lock(&priv->mutex);
-	if ((mode < 1) || (mode > IPW_POWER_LIMIT)) {
+	if ((mode < 1) || (mode > IPW_POWER_LIMIT))
 		mode = IPW_POWER_AC;
-		priv->power_mode = mode;
-	} else {
-		priv->power_mode = IPW_POWER_ENABLED | mode;
-	}
 
-	if (priv->power_mode != mode) {
+	if (IPW_POWER_LEVEL(priv->power_mode) != mode) {
 		err = ipw_send_power_mode(priv, mode);
-
 		if (err) {
 			IPW_DEBUG_WX("failed setting power mode.\n");
 			mutex_unlock(&priv->mutex);
 			return err;
 		}
+		priv->power_mode = IPW_POWER_ENABLED | mode;
 	}
 	mutex_unlock(&priv->mutex);
 	return 0;
@@ -10554,7 +10553,7 @@
 	spin_lock(&priv->irq_lock);
 
 	if (!(priv->status & STATUS_INT_ENABLED)) {
-		/* Shared IRQ */
+		/* IRQ is disabled */
 		goto none;
 	}
 
diff --git a/drivers/net/wireless/libertas/cmd.c b/drivers/net/wireless/libertas/cmd.c
index 13f6528..4a8f5dc 100644
--- a/drivers/net/wireless/libertas/cmd.c
+++ b/drivers/net/wireless/libertas/cmd.c
@@ -240,7 +240,7 @@
 		if (*enable)
 			penableRSN->enable = cpu_to_le16(cmd_enable_rsn);
 		else
-			penableRSN->enable = cpu_to_le16(cmd_enable_rsn);
+			penableRSN->enable = cpu_to_le16(cmd_disable_rsn);
 	}
 
 	lbs_deb_leave(LBS_DEB_CMD);
diff --git a/drivers/net/wireless/libertas/main.c b/drivers/net/wireless/libertas/main.c
index 4a59306..9f36624 100644
--- a/drivers/net/wireless/libertas/main.c
+++ b/drivers/net/wireless/libertas/main.c
@@ -613,6 +613,7 @@
 
 	init_waitqueue_entry(&wait, current);
 
+	set_freezable();
 	for (;;) {
 		lbs_deb_thread( "main-thread 111: intcounter=%d "
 		       "currenttxskb=%p dnld_sent=%d\n",
diff --git a/drivers/net/wireless/libertas/rx.c b/drivers/net/wireless/libertas/rx.c
index 88d9d2d..769c86f 100644
--- a/drivers/net/wireless/libertas/rx.c
+++ b/drivers/net/wireless/libertas/rx.c
@@ -439,7 +439,6 @@
 	ret = 0;
 
 done:
-	skb->protocol = __constant_htons(0x0019);	/* ETH_P_80211_RAW */
 	lbs_deb_leave_args(LBS_DEB_RX, "ret %d", ret);
 	return ret;
 }
diff --git a/drivers/net/wireless/libertas/version.h b/drivers/net/wireless/libertas/version.h
deleted file mode 100644
index 8b13789..0000000
--- a/drivers/net/wireless/libertas/version.h
+++ /dev/null
@@ -1 +0,0 @@
-
diff --git a/drivers/net/wireless/libertas/wext.c b/drivers/net/wireless/libertas/wext.c
index f42b796..2fcc3bf 100644
--- a/drivers/net/wireless/libertas/wext.c
+++ b/drivers/net/wireless/libertas/wext.c
@@ -1719,9 +1719,6 @@
 			pkey->type = KEY_TYPE_ID_TKIP;
 		} else if (alg == IW_ENCODE_ALG_CCMP) {
 			pkey->type = KEY_TYPE_ID_AES;
-		} else {
-			ret = -EINVAL;
-			goto out;
 		}
 
 		/* If WPA isn't enabled yet, do that now */
diff --git a/drivers/net/wireless/prism54/isl_ioctl.c b/drivers/net/wireless/prism54/isl_ioctl.c
index 283be4a..585f599 100644
--- a/drivers/net/wireless/prism54/isl_ioctl.c
+++ b/drivers/net/wireless/prism54/isl_ioctl.c
@@ -1853,7 +1853,6 @@
 	islpci_private *priv = netdev_priv(ndev);
 	struct islpci_acl *acl = &priv->acl;
 	struct mac_entry *entry;
-	struct list_head *ptr;
 	struct sockaddr *addr = (struct sockaddr *) extra;
 
 	if (addr->sa_family != ARPHRD_ETHER)
@@ -1861,11 +1860,9 @@
 
 	if (down_interruptible(&acl->sem))
 		return -ERESTARTSYS;
-	for (ptr = acl->mac_list.next; ptr != &acl->mac_list; ptr = ptr->next) {
-		entry = list_entry(ptr, struct mac_entry, _list);
-
+	list_for_each_entry(entry, &acl->mac_list, _list) {
 		if (memcmp(entry->addr, addr->sa_data, ETH_ALEN) == 0) {
-			list_del(ptr);
+			list_del(&entry->_list);
 			acl->size--;
 			kfree(entry);
 			up(&acl->sem);
@@ -1883,7 +1880,6 @@
 	islpci_private *priv = netdev_priv(ndev);
 	struct islpci_acl *acl = &priv->acl;
 	struct mac_entry *entry;
-	struct list_head *ptr;
 	struct sockaddr *dst = (struct sockaddr *) extra;
 
 	dwrq->length = 0;
@@ -1891,9 +1887,7 @@
 	if (down_interruptible(&acl->sem))
 		return -ERESTARTSYS;
 
-	for (ptr = acl->mac_list.next; ptr != &acl->mac_list; ptr = ptr->next) {
-		entry = list_entry(ptr, struct mac_entry, _list);
-
+	list_for_each_entry(entry, &acl->mac_list, _list) {
 		memcpy(dst->sa_data, entry->addr, ETH_ALEN);
 		dst->sa_family = ARPHRD_ETHER;
 		dwrq->length++;
@@ -1960,7 +1954,6 @@
 static int
 prism54_mac_accept(struct islpci_acl *acl, char *mac)
 {
-	struct list_head *ptr;
 	struct mac_entry *entry;
 	int res = 0;
 
@@ -1972,8 +1965,7 @@
 		return 1;
 	}
 
-	for (ptr = acl->mac_list.next; ptr != &acl->mac_list; ptr = ptr->next) {
-		entry = list_entry(ptr, struct mac_entry, _list);
+	list_for_each_entry(entry, &acl->mac_list, _list) {
 		if (memcmp(entry->addr, mac, ETH_ALEN) == 0) {
 			res = 1;
 			break;
@@ -2216,11 +2208,9 @@
 void
 prism54_wpa_bss_ie_clean(islpci_private *priv)
 {
-	struct list_head *ptr, *n;
+	struct islpci_bss_wpa_ie *bss, *n;
 
-	list_for_each_safe(ptr, n, &priv->bss_wpa_list) {
-		struct islpci_bss_wpa_ie *bss;
-		bss = list_entry(ptr, struct islpci_bss_wpa_ie, list);
+	list_for_each_entry_safe(bss, n, &priv->bss_wpa_list, list) {
 		kfree(bss);
 	}
 }
diff --git a/drivers/net/wireless/rtl8187_rtl8225.c b/drivers/net/wireless/rtl8187_rtl8225.c
index e25a09f..efc4120 100644
--- a/drivers/net/wireless/rtl8187_rtl8225.c
+++ b/drivers/net/wireless/rtl8187_rtl8225.c
@@ -67,7 +67,7 @@
 	msleep(2);
 }
 
-static void rtl8225_write_8051(struct ieee80211_hw *dev, u8 addr, u16 data)
+static void rtl8225_write_8051(struct ieee80211_hw *dev, u8 addr, __le16 data)
 {
 	struct rtl8187_priv *priv = dev->priv;
 	u16 reg80, reg82, reg84;
@@ -106,7 +106,7 @@
 	struct rtl8187_priv *priv = dev->priv;
 
 	if (priv->asic_rev)
-		rtl8225_write_8051(dev, addr, data);
+		rtl8225_write_8051(dev, addr, cpu_to_le16(data));
 	else
 		rtl8225_write_bitbang(dev, addr, data);
 }
diff --git a/drivers/net/wireless/zd1211rw/zd_chip.c b/drivers/net/wireless/zd1211rw/zd_chip.c
index 5b624bf..c39f198 100644
--- a/drivers/net/wireless/zd1211rw/zd_chip.c
+++ b/drivers/net/wireless/zd1211rw/zd_chip.c
@@ -49,8 +49,9 @@
 	ZD_MEMCLEAR(chip, sizeof(*chip));
 }
 
-static int scnprint_mac_oui(const u8 *addr, char *buffer, size_t size)
+static int scnprint_mac_oui(struct zd_chip *chip, char *buffer, size_t size)
 {
+	u8 *addr = zd_usb_to_netdev(&chip->usb)->dev_addr;
 	return scnprintf(buffer, size, "%02x-%02x-%02x",
 		         addr[0], addr[1], addr[2]);
 }
@@ -61,10 +62,10 @@
 	int i = 0;
 
 	i = scnprintf(buffer, size, "zd1211%s chip ",
-		      chip->is_zd1211b ? "b" : "");
+		      zd_chip_is_zd1211b(chip) ? "b" : "");
 	i += zd_usb_scnprint_id(&chip->usb, buffer+i, size-i);
 	i += scnprintf(buffer+i, size-i, " ");
-	i += scnprint_mac_oui(chip->e2p_mac, buffer+i, size-i);
+	i += scnprint_mac_oui(chip, buffer+i, size-i);
 	i += scnprintf(buffer+i, size-i, " ");
 	i += zd_rf_scnprint_id(&chip->rf, buffer+i, size-i);
 	i += scnprintf(buffer+i, size-i, " pa%1x %c%c%c%c%c", chip->pa_type,
@@ -366,64 +367,9 @@
 	return r;
 }
 
-static int _read_mac_addr(struct zd_chip *chip, u8 *mac_addr,
-	                  const zd_addr_t *addr)
-{
-	int r;
-	u32 parts[2];
-
-	r = zd_ioread32v_locked(chip, parts, (const zd_addr_t *)addr, 2);
-	if (r) {
-		dev_dbg_f(zd_chip_dev(chip),
-			"error: couldn't read e2p macs. Error number %d\n", r);
-		return r;
-	}
-
-	mac_addr[0] = parts[0];
-	mac_addr[1] = parts[0] >>  8;
-	mac_addr[2] = parts[0] >> 16;
-	mac_addr[3] = parts[0] >> 24;
-	mac_addr[4] = parts[1];
-	mac_addr[5] = parts[1] >>  8;
-
-	return 0;
-}
-
-static int read_e2p_mac_addr(struct zd_chip *chip)
-{
-	static const zd_addr_t addr[2] = { E2P_MAC_ADDR_P1, E2P_MAC_ADDR_P2 };
-
-	ZD_ASSERT(mutex_is_locked(&chip->mutex));
-	return _read_mac_addr(chip, chip->e2p_mac, (const zd_addr_t *)addr);
-}
-
 /* MAC address: if custom mac addresses are to to be used CR_MAC_ADDR_P1 and
  *              CR_MAC_ADDR_P2 must be overwritten
  */
-void zd_get_e2p_mac_addr(struct zd_chip *chip, u8 *mac_addr)
-{
-	mutex_lock(&chip->mutex);
-	memcpy(mac_addr, chip->e2p_mac, ETH_ALEN);
-	mutex_unlock(&chip->mutex);
-}
-
-static int read_mac_addr(struct zd_chip *chip, u8 *mac_addr)
-{
-	static const zd_addr_t addr[2] = { CR_MAC_ADDR_P1, CR_MAC_ADDR_P2 };
-	return _read_mac_addr(chip, mac_addr, (const zd_addr_t *)addr);
-}
-
-int zd_read_mac_addr(struct zd_chip *chip, u8 *mac_addr)
-{
-	int r;
-
-	dev_dbg_f(zd_chip_dev(chip), "\n");
-	mutex_lock(&chip->mutex);
-	r = read_mac_addr(chip, mac_addr);
-	mutex_unlock(&chip->mutex);
-	return r;
-}
-
 int zd_write_mac_addr(struct zd_chip *chip, const u8 *mac_addr)
 {
 	int r;
@@ -444,12 +390,6 @@
 
 	mutex_lock(&chip->mutex);
 	r = zd_iowrite32a_locked(chip, reqs, ARRAY_SIZE(reqs));
-#ifdef DEBUG
-	{
-		u8 tmp[ETH_ALEN];
-		read_mac_addr(chip, tmp);
-	}
-#endif /* DEBUG */
 	mutex_unlock(&chip->mutex);
 	return r;
 }
@@ -809,7 +749,7 @@
 
 static int hw_reset_phy(struct zd_chip *chip)
 {
-	return chip->is_zd1211b ? zd1211b_hw_reset_phy(chip) :
+	return zd_chip_is_zd1211b(chip) ? zd1211b_hw_reset_phy(chip) :
 		                  zd1211_hw_reset_phy(chip);
 }
 
@@ -874,7 +814,7 @@
 	if (r)
 		return r;
 
-	return chip->is_zd1211b ?
+	return zd_chip_is_zd1211b(chip) ?
 		zd1211b_hw_init_hmac(chip) : zd1211_hw_init_hmac(chip);
 }
 
@@ -1136,8 +1076,15 @@
 	return 0;
 }
 
+/* Read mac address using pre-firmware interface */
+int zd_chip_read_mac_addr_fw(struct zd_chip *chip, u8 *addr)
+{
+	dev_dbg_f(zd_chip_dev(chip), "\n");
+	return zd_usb_read_fw(&chip->usb, E2P_MAC_ADDR_P1, addr,
+		ETH_ALEN);
+}
 
-int zd_chip_init_hw(struct zd_chip *chip, u8 device_type)
+int zd_chip_init_hw(struct zd_chip *chip)
 {
 	int r;
 	u8 rf_type;
@@ -1145,7 +1092,6 @@
 	dev_dbg_f(zd_chip_dev(chip), "\n");
 
 	mutex_lock(&chip->mutex);
-	chip->is_zd1211b = (device_type == DEVICE_ZD1211B) != 0;
 
 #ifdef DEBUG
 	r = test_init(chip);
@@ -1201,10 +1147,6 @@
 		goto out;
 #endif /* DEBUG */
 
-	r = read_e2p_mac_addr(chip);
-	if (r)
-		goto out;
-
 	r = read_cal_int_tables(chip);
 	if (r)
 		goto out;
@@ -1259,7 +1201,7 @@
 	r = update_pwr_int(chip, channel);
 	if (r)
 		return r;
-	if (chip->is_zd1211b) {
+	if (zd_chip_is_zd1211b(chip)) {
 		static const struct zd_ioreq16 ioreqs[] = {
 			{ CR69, 0x28 },
 			{},
diff --git a/drivers/net/wireless/zd1211rw/zd_chip.h b/drivers/net/wireless/zd1211rw/zd_chip.h
index 79d0288..f469857 100644
--- a/drivers/net/wireless/zd1211rw/zd_chip.h
+++ b/drivers/net/wireless/zd1211rw/zd_chip.h
@@ -704,7 +704,6 @@
 	struct mutex mutex;
 	/* Base address of FW_REG_ registers */
 	zd_addr_t fw_regs_base;
-	u8 e2p_mac[ETH_ALEN];
 	/* EepSetPoint in the vendor driver */
 	u8 pwr_cal_values[E2P_CHANNEL_COUNT];
 	/* integration values in the vendor driver */
@@ -715,7 +714,7 @@
 	unsigned int pa_type:4,
 		patch_cck_gain:1, patch_cr157:1, patch_6m_band_edge:1,
 		new_phy_layout:1, al2230s_bit:1,
-		is_zd1211b:1, supports_tx_led:1;
+		supports_tx_led:1;
 };
 
 static inline struct zd_chip *zd_usb_to_chip(struct zd_usb *usb)
@@ -734,9 +733,15 @@
 	         struct net_device *netdev,
 	         struct usb_interface *intf);
 void zd_chip_clear(struct zd_chip *chip);
-int zd_chip_init_hw(struct zd_chip *chip, u8 device_type);
+int zd_chip_read_mac_addr_fw(struct zd_chip *chip, u8 *addr);
+int zd_chip_init_hw(struct zd_chip *chip);
 int zd_chip_reset(struct zd_chip *chip);
 
+static inline int zd_chip_is_zd1211b(struct zd_chip *chip)
+{
+	return chip->usb.is_zd1211b;
+}
+
 static inline int zd_ioread16v_locked(struct zd_chip *chip, u16 *values,
 	                              const zd_addr_t *addresses,
 				      unsigned int count)
@@ -825,8 +830,6 @@
 }
 u8  zd_chip_get_channel(struct zd_chip *chip);
 int zd_read_regdomain(struct zd_chip *chip, u8 *regdomain);
-void zd_get_e2p_mac_addr(struct zd_chip *chip, u8 *mac_addr);
-int zd_read_mac_addr(struct zd_chip *chip, u8 *mac_addr);
 int zd_write_mac_addr(struct zd_chip *chip, const u8 *mac_addr);
 int zd_chip_switch_radio_on(struct zd_chip *chip);
 int zd_chip_switch_radio_off(struct zd_chip *chip);
diff --git a/drivers/net/wireless/zd1211rw/zd_mac.c b/drivers/net/wireless/zd1211rw/zd_mac.c
index 6753d24..f6c487a 100644
--- a/drivers/net/wireless/zd1211rw/zd_mac.c
+++ b/drivers/net/wireless/zd1211rw/zd_mac.c
@@ -86,38 +86,46 @@
 	return r;
 }
 
-int zd_mac_init_hw(struct zd_mac *mac, u8 device_type)
+int zd_mac_preinit_hw(struct zd_mac *mac)
+{
+	int r;
+	u8 addr[ETH_ALEN];
+
+	r = zd_chip_read_mac_addr_fw(&mac->chip, addr);
+	if (r)
+		return r;
+
+	memcpy(mac->netdev->dev_addr, addr, ETH_ALEN);
+	return 0;
+}
+
+int zd_mac_init_hw(struct zd_mac *mac)
 {
 	int r;
 	struct zd_chip *chip = &mac->chip;
-	u8 addr[ETH_ALEN];
 	u8 default_regdomain;
 
 	r = zd_chip_enable_int(chip);
 	if (r)
 		goto out;
-	r = zd_chip_init_hw(chip, device_type);
+	r = zd_chip_init_hw(chip);
 	if (r)
 		goto disable_int;
 
-	zd_get_e2p_mac_addr(chip, addr);
-	r = zd_write_mac_addr(chip, addr);
-	if (r)
-		goto disable_int;
 	ZD_ASSERT(!irqs_disabled());
-	spin_lock_irq(&mac->lock);
-	memcpy(mac->netdev->dev_addr, addr, ETH_ALEN);
-	spin_unlock_irq(&mac->lock);
 
 	r = zd_read_regdomain(chip, &default_regdomain);
 	if (r)
 		goto disable_int;
 	if (!zd_regdomain_supported(default_regdomain)) {
-		dev_dbg_f(zd_mac_dev(mac),
-			  "Regulatory Domain %#04x is not supported.\n",
-		          default_regdomain);
-		r = -EINVAL;
-		goto disable_int;
+		/* The vendor driver overrides the regulatory domain and
+		 * allowed channel registers and unconditionally restricts
+		 * available channels to 1-11 everywhere. Match their
+		 * questionable behaviour only for regdomains which we don't
+		 * recognise. */
+		dev_warn(zd_mac_dev(mac),  "Unrecognised regulatory domain: "
+			"%#04x. Defaulting to FCC.\n", default_regdomain);
+		default_regdomain = ZD_REGDOMAIN_FCC;
 	}
 	spin_lock_irq(&mac->lock);
 	mac->regdomain = mac->default_regdomain = default_regdomain;
@@ -164,14 +172,25 @@
 {
 	struct zd_mac *mac = zd_netdev_mac(netdev);
 	struct zd_chip *chip = &mac->chip;
+	struct zd_usb *usb = &chip->usb;
 	int r;
 
+	if (!usb->initialized) {
+		r = zd_usb_init_hw(usb);
+		if (r)
+			goto out;
+	}
+
 	tasklet_enable(&mac->rx_tasklet);
 
 	r = zd_chip_enable_int(chip);
 	if (r < 0)
 		goto out;
 
+	r = zd_write_mac_addr(chip, netdev->dev_addr);
+	if (r)
+		goto disable_int;
+
 	r = zd_chip_set_basic_rates(chip, CR_RATES_80211B | CR_RATES_80211G);
 	if (r < 0)
 		goto disable_int;
@@ -251,9 +270,11 @@
 	dev_dbg_f(zd_mac_dev(mac),
 		  "Setting MAC to " MAC_FMT "\n", MAC_ARG(addr->sa_data));
 
-	r = zd_write_mac_addr(chip, addr->sa_data);
-	if (r)
-		return r;
+	if (netdev->flags & IFF_UP) {
+		r = zd_write_mac_addr(chip, addr->sa_data);
+		if (r)
+			return r;
+	}
 
 	spin_lock_irqsave(&mac->lock, flags);
 	memcpy(netdev->dev_addr, addr->sa_data, ETH_ALEN);
@@ -855,7 +876,7 @@
 	/* ZD1211B: Computing the length difference this way, gives us
 	 * flexibility to compute the packet length.
 	 */
-	cs->packet_length = cpu_to_le16(mac->chip.is_zd1211b ?
+	cs->packet_length = cpu_to_le16(zd_chip_is_zd1211b(&mac->chip) ?
 			packet_length - frag_len : packet_length);
 
 	/*
diff --git a/drivers/net/wireless/zd1211rw/zd_mac.h b/drivers/net/wireless/zd1211rw/zd_mac.h
index faf4c78..9f9344e 100644
--- a/drivers/net/wireless/zd1211rw/zd_mac.h
+++ b/drivers/net/wireless/zd1211rw/zd_mac.h
@@ -189,7 +189,8 @@
 		struct usb_interface *intf);
 void zd_mac_clear(struct zd_mac *mac);
 
-int zd_mac_init_hw(struct zd_mac *mac, u8 device_type);
+int zd_mac_preinit_hw(struct zd_mac *mac);
+int zd_mac_init_hw(struct zd_mac *mac);
 
 int zd_mac_open(struct net_device *netdev);
 int zd_mac_stop(struct net_device *netdev);
diff --git a/drivers/net/wireless/zd1211rw/zd_rf.c b/drivers/net/wireless/zd1211rw/zd_rf.c
index 7407409..abe5d38 100644
--- a/drivers/net/wireless/zd1211rw/zd_rf.c
+++ b/drivers/net/wireless/zd1211rw/zd_rf.c
@@ -34,7 +34,7 @@
 	[AL2210_RF]	= "AL2210_RF",
 	[MAXIM_NEW_RF]	= "MAXIM_NEW_RF",
 	[UW2453_RF]	= "UW2453_RF",
-	[UNKNOWN_A_RF]	= "UNKNOWN_A_RF",
+	[AL2230S_RF]	= "AL2230S_RF",
 	[RALINK_RF]	= "RALINK_RF",
 	[INTERSIL_RF]	= "INTERSIL_RF",
 	[RF2959_RF]	= "RF2959_RF",
@@ -77,6 +77,7 @@
 		r = zd_rf_init_rf2959(rf);
 		break;
 	case AL2230_RF:
+	case AL2230S_RF:
 		r = zd_rf_init_al2230(rf);
 		break;
 	case AL7230B_RF:
diff --git a/drivers/net/wireless/zd1211rw/zd_rf.h b/drivers/net/wireless/zd1211rw/zd_rf.h
index c6dfd82..30502f2 100644
--- a/drivers/net/wireless/zd1211rw/zd_rf.h
+++ b/drivers/net/wireless/zd1211rw/zd_rf.h
@@ -26,7 +26,7 @@
 #define AL2210_RF			0x7
 #define MAXIM_NEW_RF			0x8
 #define UW2453_RF			0x9
-#define UNKNOWN_A_RF			0xa
+#define AL2230S_RF			0xa
 #define RALINK_RF			0xb
 #define INTERSIL_RF			0xc
 #define RF2959_RF			0xd
diff --git a/drivers/net/wireless/zd1211rw/zd_rf_al2230.c b/drivers/net/wireless/zd1211rw/zd_rf_al2230.c
index e7a4ecf..006774d 100644
--- a/drivers/net/wireless/zd1211rw/zd_rf_al2230.c
+++ b/drivers/net/wireless/zd1211rw/zd_rf_al2230.c
@@ -21,6 +21,8 @@
 #include "zd_usb.h"
 #include "zd_chip.h"
 
+#define IS_AL2230S(chip) ((chip)->al2230s_bit || (chip)->rf.type == AL2230S_RF)
+
 static const u32 zd1211_al2230_table[][3] = {
 	RF_CHANNEL( 1) = { 0x03f790, 0x033331, 0x00000d, },
 	RF_CHANNEL( 2) = { 0x03f790, 0x0b3331, 0x00000d, },
@@ -176,7 +178,7 @@
 	if (r)
 		return r;
 
-	if (chip->al2230s_bit) {
+	if (IS_AL2230S(chip)) {
 		r = zd_iowrite16a_locked(chip, ioreqs_init_al2230s,
 			ARRAY_SIZE(ioreqs_init_al2230s));
 		if (r)
@@ -188,7 +190,7 @@
 		return r;
 
 	/* improve band edge for AL2230S */
-	if (chip->al2230s_bit)
+	if (IS_AL2230S(chip))
 		r = zd_rfwrite_locked(chip, 0x000824, RF_RV_BITS);
 	else
 		r = zd_rfwrite_locked(chip, 0x0005a4, RF_RV_BITS);
@@ -314,7 +316,7 @@
 	if (r)
 		return r;
 
-	if (chip->al2230s_bit) {
+	if (IS_AL2230S(chip)) {
 		r = zd_iowrite16a_locked(chip, ioreqs_init_al2230s,
 			ARRAY_SIZE(ioreqs_init_al2230s));
 		if (r)
@@ -328,7 +330,7 @@
 	if (r)
 		return r;
 
-	if (chip->al2230s_bit)
+	if (IS_AL2230S(chip))
 		r = zd_rfwrite_locked(chip, 0x241000, RF_RV_BITS);
 	else
 		r = zd_rfwrite_locked(chip, 0x25a000, RF_RV_BITS);
@@ -422,7 +424,7 @@
 	struct zd_chip *chip = zd_rf_to_chip(rf);
 
 	rf->switch_radio_off = al2230_switch_radio_off;
-	if (chip->is_zd1211b) {
+	if (zd_chip_is_zd1211b(chip)) {
 		rf->init_hw = zd1211b_al2230_init_hw;
 		rf->set_channel = zd1211b_al2230_set_channel;
 		rf->switch_radio_on = zd1211b_al2230_switch_radio_on;
diff --git a/drivers/net/wireless/zd1211rw/zd_rf_al7230b.c b/drivers/net/wireless/zd1211rw/zd_rf_al7230b.c
index f4e8b6a..73d0bb2 100644
--- a/drivers/net/wireless/zd1211rw/zd_rf_al7230b.c
+++ b/drivers/net/wireless/zd1211rw/zd_rf_al7230b.c
@@ -473,7 +473,7 @@
 {
 	struct zd_chip *chip = zd_rf_to_chip(rf);
 
-	if (chip->is_zd1211b) {
+	if (zd_chip_is_zd1211b(chip)) {
 		rf->init_hw = zd1211b_al7230b_init_hw;
 		rf->switch_radio_on = zd1211b_al7230b_switch_radio_on;
 		rf->set_channel = zd1211b_al7230b_set_channel;
diff --git a/drivers/net/wireless/zd1211rw/zd_rf_rf2959.c b/drivers/net/wireless/zd1211rw/zd_rf_rf2959.c
index 2d736bd..cc70d40 100644
--- a/drivers/net/wireless/zd1211rw/zd_rf_rf2959.c
+++ b/drivers/net/wireless/zd1211rw/zd_rf_rf2959.c
@@ -265,7 +265,7 @@
 {
 	struct zd_chip *chip = zd_rf_to_chip(rf);
 
-	if (chip->is_zd1211b) {
+	if (zd_chip_is_zd1211b(chip)) {
 		dev_err(zd_chip_dev(chip),
 		       "RF2959 is currently not supported for ZD1211B"
 		       " devices\n");
diff --git a/drivers/net/wireless/zd1211rw/zd_rf_uw2453.c b/drivers/net/wireless/zd1211rw/zd_rf_uw2453.c
index 414e40d..857dcf3 100644
--- a/drivers/net/wireless/zd1211rw/zd_rf_uw2453.c
+++ b/drivers/net/wireless/zd1211rw/zd_rf_uw2453.c
@@ -486,7 +486,7 @@
 	if (r)
 		return r;
 
-	if (chip->is_zd1211b)
+	if (zd_chip_is_zd1211b(chip))
 		ioreqs[1].value = 0x7f;
 
 	return zd_iowrite16a_locked(chip, ioreqs, ARRAY_SIZE(ioreqs));
diff --git a/drivers/net/wireless/zd1211rw/zd_usb.c b/drivers/net/wireless/zd1211rw/zd_usb.c
index ca24299..a9c339e 100644
--- a/drivers/net/wireless/zd1211rw/zd_usb.c
+++ b/drivers/net/wireless/zd1211rw/zd_usb.c
@@ -71,6 +71,9 @@
 	{ USB_DEVICE(0x0586, 0x3412), .driver_info = DEVICE_ZD1211B },
 	{ USB_DEVICE(0x0586, 0x3413), .driver_info = DEVICE_ZD1211B },
 	{ USB_DEVICE(0x0053, 0x5301), .driver_info = DEVICE_ZD1211B },
+	{ USB_DEVICE(0x0411, 0x00da), .driver_info = DEVICE_ZD1211B },
+	{ USB_DEVICE(0x2019, 0x5303), .driver_info = DEVICE_ZD1211B },
+	{ USB_DEVICE(0x129b, 0x1667), .driver_info = DEVICE_ZD1211B },
 	/* "Driverless" devices that need ejecting */
 	{ USB_DEVICE(0x0ace, 0x2011), .driver_info = DEVICE_INSTALLER },
 	{ USB_DEVICE(0x0ace, 0x20ff), .driver_info = DEVICE_INSTALLER },
@@ -195,26 +198,27 @@
 	return le16_to_cpu(p[offset]);
 }
 
-static char *get_fw_name(char *buffer, size_t size, u8 device_type,
+static char *get_fw_name(struct zd_usb *usb, char *buffer, size_t size,
 	               const char* postfix)
 {
 	scnprintf(buffer, size, "%s%s",
-		device_type == DEVICE_ZD1211B ?
+		usb->is_zd1211b ?
 			FW_ZD1211B_PREFIX : FW_ZD1211_PREFIX,
 		postfix);
 	return buffer;
 }
 
-static int handle_version_mismatch(struct usb_device *udev, u8 device_type,
+static int handle_version_mismatch(struct zd_usb *usb,
 	const struct firmware *ub_fw)
 {
+	struct usb_device *udev = zd_usb_to_usbdev(usb);
 	const struct firmware *ur_fw = NULL;
 	int offset;
 	int r = 0;
 	char fw_name[128];
 
 	r = request_fw_file(&ur_fw,
-		get_fw_name(fw_name, sizeof(fw_name), device_type, "ur"),
+		get_fw_name(usb, fw_name, sizeof(fw_name), "ur"),
 		&udev->dev);
 	if (r)
 		goto error;
@@ -237,11 +241,12 @@
 	return r;
 }
 
-static int upload_firmware(struct usb_device *udev, u8 device_type)
+static int upload_firmware(struct zd_usb *usb)
 {
 	int r;
 	u16 fw_bcdDevice;
 	u16 bcdDevice;
+	struct usb_device *udev = zd_usb_to_usbdev(usb);
 	const struct firmware *ub_fw = NULL;
 	const struct firmware *uph_fw = NULL;
 	char fw_name[128];
@@ -249,7 +254,7 @@
 	bcdDevice = get_bcdDevice(udev);
 
 	r = request_fw_file(&ub_fw,
-		get_fw_name(fw_name, sizeof(fw_name), device_type,  "ub"),
+		get_fw_name(usb, fw_name, sizeof(fw_name), "ub"),
 		&udev->dev);
 	if (r)
 		goto error;
@@ -264,7 +269,7 @@
 			dev_warn(&udev->dev, "device has old bootcode, please "
 				"report success or failure\n");
 
-		r = handle_version_mismatch(udev, device_type, ub_fw);
+		r = handle_version_mismatch(usb, ub_fw);
 		if (r)
 			goto error;
 	} else {
@@ -275,7 +280,7 @@
 
 
 	r = request_fw_file(&uph_fw,
-		get_fw_name(fw_name, sizeof(fw_name), device_type, "uphr"),
+		get_fw_name(usb, fw_name, sizeof(fw_name), "uphr"),
 		&udev->dev);
 	if (r)
 		goto error;
@@ -294,6 +299,30 @@
 	return r;
 }
 
+/* Read data from device address space using "firmware interface" which does
+ * not require firmware to be loaded. */
+int zd_usb_read_fw(struct zd_usb *usb, zd_addr_t addr, u8 *data, u16 len)
+{
+	int r;
+	struct usb_device *udev = zd_usb_to_usbdev(usb);
+
+	r = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
+		USB_REQ_FIRMWARE_READ_DATA, USB_DIR_IN | 0x40, addr, 0,
+		data, len, 5000);
+	if (r < 0) {
+		dev_err(&udev->dev,
+			"read over firmware interface failed: %d\n", r);
+		return r;
+	} else if (r != len) {
+		dev_err(&udev->dev,
+			"incomplete read over firmware interface: %d/%d\n",
+			r, len);
+		return -EIO;
+	}
+
+	return 0;
+}
+
 #define urb_dev(urb) (&(urb)->dev->dev)
 
 static inline void handle_regs_int(struct urb *urb)
@@ -920,9 +949,42 @@
 	return 0;
 }
 
+int zd_usb_init_hw(struct zd_usb *usb)
+{
+	int r;
+	struct zd_mac *mac = zd_usb_to_mac(usb);
+
+	dev_dbg_f(zd_usb_dev(usb), "\n");
+
+	r = upload_firmware(usb);
+	if (r) {
+		dev_err(zd_usb_dev(usb),
+		       "couldn't load firmware. Error number %d\n", r);
+		return r;
+	}
+
+	r = usb_reset_configuration(zd_usb_to_usbdev(usb));
+	if (r) {
+		dev_dbg_f(zd_usb_dev(usb),
+			"couldn't reset configuration. Error number %d\n", r);
+		return r;
+	}
+
+	r = zd_mac_init_hw(mac);
+	if (r) {
+		dev_dbg_f(zd_usb_dev(usb),
+		         "couldn't initialize mac. Error number %d\n", r);
+		return r;
+	}
+
+	usb->initialized = 1;
+	return 0;
+}
+
 static int probe(struct usb_interface *intf, const struct usb_device_id *id)
 {
 	int r;
+	struct zd_usb *usb;
 	struct usb_device *udev = interface_to_usbdev(intf);
 	struct net_device *netdev = NULL;
 
@@ -950,26 +1012,10 @@
 		goto error;
 	}
 
-	r = upload_firmware(udev, id->driver_info);
-	if (r) {
-		dev_err(&intf->dev,
-		       "couldn't load firmware. Error number %d\n", r);
-		goto error;
-	}
+	usb = &zd_netdev_mac(netdev)->chip.usb;
+	usb->is_zd1211b = (id->driver_info == DEVICE_ZD1211B) != 0;
 
-	r = usb_reset_configuration(udev);
-	if (r) {
-		dev_dbg_f(&intf->dev,
-			"couldn't reset configuration. Error number %d\n", r);
-		goto error;
-	}
-
-	/* At this point the interrupt endpoint is not generally enabled. We
-	 * save the USB bandwidth until the network device is opened. But
-	 * notify that the initialization of the MAC will require the
-	 * interrupts to be temporary enabled.
-	 */
-	r = zd_mac_init_hw(zd_netdev_mac(netdev), id->driver_info);
+	r = zd_mac_preinit_hw(zd_netdev_mac(netdev));
 	if (r) {
 		dev_dbg_f(&intf->dev,
 		         "couldn't initialize mac. Error number %d\n", r);
diff --git a/drivers/net/wireless/zd1211rw/zd_usb.h b/drivers/net/wireless/zd1211rw/zd_usb.h
index 506ea6a..961a7a1 100644
--- a/drivers/net/wireless/zd1211rw/zd_usb.h
+++ b/drivers/net/wireless/zd1211rw/zd_usb.h
@@ -188,6 +188,7 @@
 	struct zd_usb_rx rx;
 	struct zd_usb_tx tx;
 	struct usb_interface *intf;
+	u8 is_zd1211b:1, initialized:1;
 };
 
 #define zd_usb_dev(usb) (&usb->intf->dev)
@@ -236,6 +237,8 @@
 
 int zd_usb_rfwrite(struct zd_usb *usb, u32 value, u8 bits);
 
+int zd_usb_read_fw(struct zd_usb *usb, zd_addr_t addr, u8 *data, u16 len);
+
 extern struct workqueue_struct *zd_workqueue;
 
 #endif /* _ZD_USB_H */
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
new file mode 100644
index 0000000..489f69c
--- /dev/null
+++ b/drivers/net/xen-netfront.c
@@ -0,0 +1,1863 @@
+/*
+ * Virtual network driver for conversing with remote driver backends.
+ *
+ * Copyright (c) 2002-2005, K A Fraser
+ * Copyright (c) 2005, XenSource Ltd
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation; or, when distributed
+ * separately from the Linux kernel or incorporated into other
+ * software packages, subject to the following license:
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this source file (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy, modify,
+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/ethtool.h>
+#include <linux/if_ether.h>
+#include <linux/tcp.h>
+#include <linux/udp.h>
+#include <linux/moduleparam.h>
+#include <linux/mm.h>
+#include <net/ip.h>
+
+#include <xen/xenbus.h>
+#include <xen/events.h>
+#include <xen/page.h>
+#include <xen/grant_table.h>
+
+#include <xen/interface/io/netif.h>
+#include <xen/interface/memory.h>
+#include <xen/interface/grant_table.h>
+
+static struct ethtool_ops xennet_ethtool_ops;
+
+struct netfront_cb {
+	struct page *page;
+	unsigned offset;
+};
+
+#define NETFRONT_SKB_CB(skb)	((struct netfront_cb *)((skb)->cb))
+
+#define RX_COPY_THRESHOLD 256
+
+#define GRANT_INVALID_REF	0
+
+#define NET_TX_RING_SIZE __RING_SIZE((struct xen_netif_tx_sring *)0, PAGE_SIZE)
+#define NET_RX_RING_SIZE __RING_SIZE((struct xen_netif_rx_sring *)0, PAGE_SIZE)
+#define TX_MAX_TARGET min_t(int, NET_RX_RING_SIZE, 256)
+
+struct netfront_info {
+	struct list_head list;
+	struct net_device *netdev;
+
+	struct net_device_stats stats;
+
+	struct xen_netif_tx_front_ring tx;
+	struct xen_netif_rx_front_ring rx;
+
+	spinlock_t   tx_lock;
+	spinlock_t   rx_lock;
+
+	unsigned int evtchn;
+
+	/* Receive-ring batched refills. */
+#define RX_MIN_TARGET 8
+#define RX_DFL_MIN_TARGET 64
+#define RX_MAX_TARGET min_t(int, NET_RX_RING_SIZE, 256)
+	unsigned rx_min_target, rx_max_target, rx_target;
+	struct sk_buff_head rx_batch;
+
+	struct timer_list rx_refill_timer;
+
+	/*
+	 * {tx,rx}_skbs store outstanding skbuffs. Free tx_skb entries
+	 * are linked from tx_skb_freelist through skb_entry.link.
+	 *
+	 *  NB. Freelist index entries are always going to be less than
+	 *  PAGE_OFFSET, whereas pointers to skbs will always be equal or
+	 *  greater than PAGE_OFFSET: we use this property to distinguish
+	 *  them.
+	 */
+	union skb_entry {
+		struct sk_buff *skb;
+		unsigned link;
+	} tx_skbs[NET_TX_RING_SIZE];
+	grant_ref_t gref_tx_head;
+	grant_ref_t grant_tx_ref[NET_TX_RING_SIZE];
+	unsigned tx_skb_freelist;
+
+	struct sk_buff *rx_skbs[NET_RX_RING_SIZE];
+	grant_ref_t gref_rx_head;
+	grant_ref_t grant_rx_ref[NET_RX_RING_SIZE];
+
+	struct xenbus_device *xbdev;
+	int tx_ring_ref;
+	int rx_ring_ref;
+
+	unsigned long rx_pfn_array[NET_RX_RING_SIZE];
+	struct multicall_entry rx_mcl[NET_RX_RING_SIZE+1];
+	struct mmu_update rx_mmu[NET_RX_RING_SIZE];
+};
+
+struct netfront_rx_info {
+	struct xen_netif_rx_response rx;
+	struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX - 1];
+};
+
+/*
+ * Access macros for acquiring freeing slots in tx_skbs[].
+ */
+
+static void add_id_to_freelist(unsigned *head, union skb_entry *list,
+			       unsigned short id)
+{
+	list[id].link = *head;
+	*head = id;
+}
+
+static unsigned short get_id_from_freelist(unsigned *head,
+					   union skb_entry *list)
+{
+	unsigned int id = *head;
+	*head = list[id].link;
+	return id;
+}
+
+static int xennet_rxidx(RING_IDX idx)
+{
+	return idx & (NET_RX_RING_SIZE - 1);
+}
+
+static struct sk_buff *xennet_get_rx_skb(struct netfront_info *np,
+					 RING_IDX ri)
+{
+	int i = xennet_rxidx(ri);
+	struct sk_buff *skb = np->rx_skbs[i];
+	np->rx_skbs[i] = NULL;
+	return skb;
+}
+
+static grant_ref_t xennet_get_rx_ref(struct netfront_info *np,
+					    RING_IDX ri)
+{
+	int i = xennet_rxidx(ri);
+	grant_ref_t ref = np->grant_rx_ref[i];
+	np->grant_rx_ref[i] = GRANT_INVALID_REF;
+	return ref;
+}
+
+#ifdef CONFIG_SYSFS
+static int xennet_sysfs_addif(struct net_device *netdev);
+static void xennet_sysfs_delif(struct net_device *netdev);
+#else /* !CONFIG_SYSFS */
+#define xennet_sysfs_addif(dev) (0)
+#define xennet_sysfs_delif(dev) do { } while (0)
+#endif
+
+static int xennet_can_sg(struct net_device *dev)
+{
+	return dev->features & NETIF_F_SG;
+}
+
+
+static void rx_refill_timeout(unsigned long data)
+{
+	struct net_device *dev = (struct net_device *)data;
+	netif_rx_schedule(dev);
+}
+
+static int netfront_tx_slot_available(struct netfront_info *np)
+{
+	return ((np->tx.req_prod_pvt - np->tx.rsp_cons) <
+		(TX_MAX_TARGET - MAX_SKB_FRAGS - 2));
+}
+
+static void xennet_maybe_wake_tx(struct net_device *dev)
+{
+	struct netfront_info *np = netdev_priv(dev);
+
+	if (unlikely(netif_queue_stopped(dev)) &&
+	    netfront_tx_slot_available(np) &&
+	    likely(netif_running(dev)))
+		netif_wake_queue(dev);
+}
+
+static void xennet_alloc_rx_buffers(struct net_device *dev)
+{
+	unsigned short id;
+	struct netfront_info *np = netdev_priv(dev);
+	struct sk_buff *skb;
+	struct page *page;
+	int i, batch_target, notify;
+	RING_IDX req_prod = np->rx.req_prod_pvt;
+	struct xen_memory_reservation reservation;
+	grant_ref_t ref;
+	unsigned long pfn;
+	void *vaddr;
+	int nr_flips;
+	struct xen_netif_rx_request *req;
+
+	if (unlikely(!netif_carrier_ok(dev)))
+		return;
+
+	/*
+	 * Allocate skbuffs greedily, even though we batch updates to the
+	 * receive ring. This creates a less bursty demand on the memory
+	 * allocator, so should reduce the chance of failed allocation requests
+	 * both for ourself and for other kernel subsystems.
+	 */
+	batch_target = np->rx_target - (req_prod - np->rx.rsp_cons);
+	for (i = skb_queue_len(&np->rx_batch); i < batch_target; i++) {
+		skb = __netdev_alloc_skb(dev, RX_COPY_THRESHOLD,
+					 GFP_ATOMIC | __GFP_NOWARN);
+		if (unlikely(!skb))
+			goto no_skb;
+
+		page = alloc_page(GFP_ATOMIC | __GFP_NOWARN);
+		if (!page) {
+			kfree_skb(skb);
+no_skb:
+			/* Any skbuffs queued for refill? Force them out. */
+			if (i != 0)
+				goto refill;
+			/* Could not allocate any skbuffs. Try again later. */
+			mod_timer(&np->rx_refill_timer,
+				  jiffies + (HZ/10));
+			break;
+		}
+
+		skb_shinfo(skb)->frags[0].page = page;
+		skb_shinfo(skb)->nr_frags = 1;
+		__skb_queue_tail(&np->rx_batch, skb);
+	}
+
+	/* Is the batch large enough to be worthwhile? */
+	if (i < (np->rx_target/2)) {
+		if (req_prod > np->rx.sring->req_prod)
+			goto push;
+		return;
+	}
+
+	/* Adjust our fill target if we risked running out of buffers. */
+	if (((req_prod - np->rx.sring->rsp_prod) < (np->rx_target / 4)) &&
+	    ((np->rx_target *= 2) > np->rx_max_target))
+		np->rx_target = np->rx_max_target;
+
+ refill:
+	for (nr_flips = i = 0; ; i++) {
+		skb = __skb_dequeue(&np->rx_batch);
+		if (skb == NULL)
+			break;
+
+		skb->dev = dev;
+
+		id = xennet_rxidx(req_prod + i);
+
+		BUG_ON(np->rx_skbs[id]);
+		np->rx_skbs[id] = skb;
+
+		ref = gnttab_claim_grant_reference(&np->gref_rx_head);
+		BUG_ON((signed short)ref < 0);
+		np->grant_rx_ref[id] = ref;
+
+		pfn = page_to_pfn(skb_shinfo(skb)->frags[0].page);
+		vaddr = page_address(skb_shinfo(skb)->frags[0].page);
+
+		req = RING_GET_REQUEST(&np->rx, req_prod + i);
+		gnttab_grant_foreign_access_ref(ref,
+						np->xbdev->otherend_id,
+						pfn_to_mfn(pfn),
+						0);
+
+		req->id = id;
+		req->gref = ref;
+	}
+
+	if (nr_flips != 0) {
+		reservation.extent_start = np->rx_pfn_array;
+		reservation.nr_extents   = nr_flips;
+		reservation.extent_order = 0;
+		reservation.address_bits = 0;
+		reservation.domid        = DOMID_SELF;
+
+		if (!xen_feature(XENFEAT_auto_translated_physmap)) {
+			/* After all PTEs have been zapped, flush the TLB. */
+			np->rx_mcl[i-1].args[MULTI_UVMFLAGS_INDEX] =
+				UVMF_TLB_FLUSH|UVMF_ALL;
+
+			/* Give away a batch of pages. */
+			np->rx_mcl[i].op = __HYPERVISOR_memory_op;
+			np->rx_mcl[i].args[0] = XENMEM_decrease_reservation;
+			np->rx_mcl[i].args[1] = (unsigned long)&reservation;
+
+			/* Zap PTEs and give away pages in one big
+			 * multicall. */
+			(void)HYPERVISOR_multicall(np->rx_mcl, i+1);
+
+			/* Check return status of HYPERVISOR_memory_op(). */
+			if (unlikely(np->rx_mcl[i].result != i))
+				panic("Unable to reduce memory reservation\n");
+		} else {
+			if (HYPERVISOR_memory_op(XENMEM_decrease_reservation,
+						 &reservation) != i)
+				panic("Unable to reduce memory reservation\n");
+		}
+	} else {
+		wmb();		/* barrier so backend seens requests */
+	}
+
+	/* Above is a suitable barrier to ensure backend will see requests. */
+	np->rx.req_prod_pvt = req_prod + i;
+ push:
+	RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&np->rx, notify);
+	if (notify)
+		notify_remote_via_irq(np->netdev->irq);
+}
+
+static int xennet_open(struct net_device *dev)
+{
+	struct netfront_info *np = netdev_priv(dev);
+
+	memset(&np->stats, 0, sizeof(np->stats));
+
+	spin_lock_bh(&np->rx_lock);
+	if (netif_carrier_ok(dev)) {
+		xennet_alloc_rx_buffers(dev);
+		np->rx.sring->rsp_event = np->rx.rsp_cons + 1;
+		if (RING_HAS_UNCONSUMED_RESPONSES(&np->rx))
+			netif_rx_schedule(dev);
+	}
+	spin_unlock_bh(&np->rx_lock);
+
+	xennet_maybe_wake_tx(dev);
+
+	return 0;
+}
+
+static void xennet_tx_buf_gc(struct net_device *dev)
+{
+	RING_IDX cons, prod;
+	unsigned short id;
+	struct netfront_info *np = netdev_priv(dev);
+	struct sk_buff *skb;
+
+	BUG_ON(!netif_carrier_ok(dev));
+
+	do {
+		prod = np->tx.sring->rsp_prod;
+		rmb(); /* Ensure we see responses up to 'rp'. */
+
+		for (cons = np->tx.rsp_cons; cons != prod; cons++) {
+			struct xen_netif_tx_response *txrsp;
+
+			txrsp = RING_GET_RESPONSE(&np->tx, cons);
+			if (txrsp->status == NETIF_RSP_NULL)
+				continue;
+
+			id  = txrsp->id;
+			skb = np->tx_skbs[id].skb;
+			if (unlikely(gnttab_query_foreign_access(
+				np->grant_tx_ref[id]) != 0)) {
+				printk(KERN_ALERT "xennet_tx_buf_gc: warning "
+				       "-- grant still in use by backend "
+				       "domain.\n");
+				BUG();
+			}
+			gnttab_end_foreign_access_ref(
+				np->grant_tx_ref[id], GNTMAP_readonly);
+			gnttab_release_grant_reference(
+				&np->gref_tx_head, np->grant_tx_ref[id]);
+			np->grant_tx_ref[id] = GRANT_INVALID_REF;
+			add_id_to_freelist(&np->tx_skb_freelist, np->tx_skbs, id);
+			dev_kfree_skb_irq(skb);
+		}
+
+		np->tx.rsp_cons = prod;
+
+		/*
+		 * Set a new event, then check for race with update of tx_cons.
+		 * Note that it is essential to schedule a callback, no matter
+		 * how few buffers are pending. Even if there is space in the
+		 * transmit ring, higher layers may be blocked because too much
+		 * data is outstanding: in such cases notification from Xen is
+		 * likely to be the only kick that we'll get.
+		 */
+		np->tx.sring->rsp_event =
+			prod + ((np->tx.sring->req_prod - prod) >> 1) + 1;
+		mb();		/* update shared area */
+	} while ((cons == prod) && (prod != np->tx.sring->rsp_prod));
+
+	xennet_maybe_wake_tx(dev);
+}
+
+static void xennet_make_frags(struct sk_buff *skb, struct net_device *dev,
+			      struct xen_netif_tx_request *tx)
+{
+	struct netfront_info *np = netdev_priv(dev);
+	char *data = skb->data;
+	unsigned long mfn;
+	RING_IDX prod = np->tx.req_prod_pvt;
+	int frags = skb_shinfo(skb)->nr_frags;
+	unsigned int offset = offset_in_page(data);
+	unsigned int len = skb_headlen(skb);
+	unsigned int id;
+	grant_ref_t ref;
+	int i;
+
+	/* While the header overlaps a page boundary (including being
+	   larger than a page), split it it into page-sized chunks. */
+	while (len > PAGE_SIZE - offset) {
+		tx->size = PAGE_SIZE - offset;
+		tx->flags |= NETTXF_more_data;
+		len -= tx->size;
+		data += tx->size;
+		offset = 0;
+
+		id = get_id_from_freelist(&np->tx_skb_freelist, np->tx_skbs);
+		np->tx_skbs[id].skb = skb_get(skb);
+		tx = RING_GET_REQUEST(&np->tx, prod++);
+		tx->id = id;
+		ref = gnttab_claim_grant_reference(&np->gref_tx_head);
+		BUG_ON((signed short)ref < 0);
+
+		mfn = virt_to_mfn(data);
+		gnttab_grant_foreign_access_ref(ref, np->xbdev->otherend_id,
+						mfn, GNTMAP_readonly);
+
+		tx->gref = np->grant_tx_ref[id] = ref;
+		tx->offset = offset;
+		tx->size = len;
+		tx->flags = 0;
+	}
+
+	/* Grant backend access to each skb fragment page. */
+	for (i = 0; i < frags; i++) {
+		skb_frag_t *frag = skb_shinfo(skb)->frags + i;
+
+		tx->flags |= NETTXF_more_data;
+
+		id = get_id_from_freelist(&np->tx_skb_freelist, np->tx_skbs);
+		np->tx_skbs[id].skb = skb_get(skb);
+		tx = RING_GET_REQUEST(&np->tx, prod++);
+		tx->id = id;
+		ref = gnttab_claim_grant_reference(&np->gref_tx_head);
+		BUG_ON((signed short)ref < 0);
+
+		mfn = pfn_to_mfn(page_to_pfn(frag->page));
+		gnttab_grant_foreign_access_ref(ref, np->xbdev->otherend_id,
+						mfn, GNTMAP_readonly);
+
+		tx->gref = np->grant_tx_ref[id] = ref;
+		tx->offset = frag->page_offset;
+		tx->size = frag->size;
+		tx->flags = 0;
+	}
+
+	np->tx.req_prod_pvt = prod;
+}
+
+static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+	unsigned short id;
+	struct netfront_info *np = netdev_priv(dev);
+	struct xen_netif_tx_request *tx;
+	struct xen_netif_extra_info *extra;
+	char *data = skb->data;
+	RING_IDX i;
+	grant_ref_t ref;
+	unsigned long mfn;
+	int notify;
+	int frags = skb_shinfo(skb)->nr_frags;
+	unsigned int offset = offset_in_page(data);
+	unsigned int len = skb_headlen(skb);
+
+	frags += (offset + len + PAGE_SIZE - 1) / PAGE_SIZE;
+	if (unlikely(frags > MAX_SKB_FRAGS + 1)) {
+		printk(KERN_ALERT "xennet: skb rides the rocket: %d frags\n",
+		       frags);
+		dump_stack();
+		goto drop;
+	}
+
+	spin_lock_irq(&np->tx_lock);
+
+	if (unlikely(!netif_carrier_ok(dev) ||
+		     (frags > 1 && !xennet_can_sg(dev)) ||
+		     netif_needs_gso(dev, skb))) {
+		spin_unlock_irq(&np->tx_lock);
+		goto drop;
+	}
+
+	i = np->tx.req_prod_pvt;
+
+	id = get_id_from_freelist(&np->tx_skb_freelist, np->tx_skbs);
+	np->tx_skbs[id].skb = skb;
+
+	tx = RING_GET_REQUEST(&np->tx, i);
+
+	tx->id   = id;
+	ref = gnttab_claim_grant_reference(&np->gref_tx_head);
+	BUG_ON((signed short)ref < 0);
+	mfn = virt_to_mfn(data);
+	gnttab_grant_foreign_access_ref(
+		ref, np->xbdev->otherend_id, mfn, GNTMAP_readonly);
+	tx->gref = np->grant_tx_ref[id] = ref;
+	tx->offset = offset;
+	tx->size = len;
+	extra = NULL;
+
+	tx->flags = 0;
+	if (skb->ip_summed == CHECKSUM_PARTIAL)
+		/* local packet? */
+		tx->flags |= NETTXF_csum_blank | NETTXF_data_validated;
+	else if (skb->ip_summed == CHECKSUM_UNNECESSARY)
+		/* remote but checksummed. */
+		tx->flags |= NETTXF_data_validated;
+
+	if (skb_shinfo(skb)->gso_size) {
+		struct xen_netif_extra_info *gso;
+
+		gso = (struct xen_netif_extra_info *)
+			RING_GET_REQUEST(&np->tx, ++i);
+
+		if (extra)
+			extra->flags |= XEN_NETIF_EXTRA_FLAG_MORE;
+		else
+			tx->flags |= NETTXF_extra_info;
+
+		gso->u.gso.size = skb_shinfo(skb)->gso_size;
+		gso->u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4;
+		gso->u.gso.pad = 0;
+		gso->u.gso.features = 0;
+
+		gso->type = XEN_NETIF_EXTRA_TYPE_GSO;
+		gso->flags = 0;
+		extra = gso;
+	}
+
+	np->tx.req_prod_pvt = i + 1;
+
+	xennet_make_frags(skb, dev, tx);
+	tx->size = skb->len;
+
+	RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&np->tx, notify);
+	if (notify)
+		notify_remote_via_irq(np->netdev->irq);
+
+	xennet_tx_buf_gc(dev);
+
+	if (!netfront_tx_slot_available(np))
+		netif_stop_queue(dev);
+
+	spin_unlock_irq(&np->tx_lock);
+
+	np->stats.tx_bytes += skb->len;
+	np->stats.tx_packets++;
+
+	return 0;
+
+ drop:
+	np->stats.tx_dropped++;
+	dev_kfree_skb(skb);
+	return 0;
+}
+
+static int xennet_close(struct net_device *dev)
+{
+	struct netfront_info *np = netdev_priv(dev);
+	netif_stop_queue(np->netdev);
+	return 0;
+}
+
+static struct net_device_stats *xennet_get_stats(struct net_device *dev)
+{
+	struct netfront_info *np = netdev_priv(dev);
+	return &np->stats;
+}
+
+static void xennet_move_rx_slot(struct netfront_info *np, struct sk_buff *skb,
+				grant_ref_t ref)
+{
+	int new = xennet_rxidx(np->rx.req_prod_pvt);
+
+	BUG_ON(np->rx_skbs[new]);
+	np->rx_skbs[new] = skb;
+	np->grant_rx_ref[new] = ref;
+	RING_GET_REQUEST(&np->rx, np->rx.req_prod_pvt)->id = new;
+	RING_GET_REQUEST(&np->rx, np->rx.req_prod_pvt)->gref = ref;
+	np->rx.req_prod_pvt++;
+}
+
+static int xennet_get_extras(struct netfront_info *np,
+			     struct xen_netif_extra_info *extras,
+			     RING_IDX rp)
+
+{
+	struct xen_netif_extra_info *extra;
+	struct device *dev = &np->netdev->dev;
+	RING_IDX cons = np->rx.rsp_cons;
+	int err = 0;
+
+	do {
+		struct sk_buff *skb;
+		grant_ref_t ref;
+
+		if (unlikely(cons + 1 == rp)) {
+			if (net_ratelimit())
+				dev_warn(dev, "Missing extra info\n");
+			err = -EBADR;
+			break;
+		}
+
+		extra = (struct xen_netif_extra_info *)
+			RING_GET_RESPONSE(&np->rx, ++cons);
+
+		if (unlikely(!extra->type ||
+			     extra->type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
+			if (net_ratelimit())
+				dev_warn(dev, "Invalid extra type: %d\n",
+					extra->type);
+			err = -EINVAL;
+		} else {
+			memcpy(&extras[extra->type - 1], extra,
+			       sizeof(*extra));
+		}
+
+		skb = xennet_get_rx_skb(np, cons);
+		ref = xennet_get_rx_ref(np, cons);
+		xennet_move_rx_slot(np, skb, ref);
+	} while (extra->flags & XEN_NETIF_EXTRA_FLAG_MORE);
+
+	np->rx.rsp_cons = cons;
+	return err;
+}
+
+static int xennet_get_responses(struct netfront_info *np,
+				struct netfront_rx_info *rinfo, RING_IDX rp,
+				struct sk_buff_head *list)
+{
+	struct xen_netif_rx_response *rx = &rinfo->rx;
+	struct xen_netif_extra_info *extras = rinfo->extras;
+	struct device *dev = &np->netdev->dev;
+	RING_IDX cons = np->rx.rsp_cons;
+	struct sk_buff *skb = xennet_get_rx_skb(np, cons);
+	grant_ref_t ref = xennet_get_rx_ref(np, cons);
+	int max = MAX_SKB_FRAGS + (rx->status <= RX_COPY_THRESHOLD);
+	int frags = 1;
+	int err = 0;
+	unsigned long ret;
+
+	if (rx->flags & NETRXF_extra_info) {
+		err = xennet_get_extras(np, extras, rp);
+		cons = np->rx.rsp_cons;
+	}
+
+	for (;;) {
+		if (unlikely(rx->status < 0 ||
+			     rx->offset + rx->status > PAGE_SIZE)) {
+			if (net_ratelimit())
+				dev_warn(dev, "rx->offset: %x, size: %u\n",
+					 rx->offset, rx->status);
+			xennet_move_rx_slot(np, skb, ref);
+			err = -EINVAL;
+			goto next;
+		}
+
+		/*
+		 * This definitely indicates a bug, either in this driver or in
+		 * the backend driver. In future this should flag the bad
+		 * situation to the system controller to reboot the backed.
+		 */
+		if (ref == GRANT_INVALID_REF) {
+			if (net_ratelimit())
+				dev_warn(dev, "Bad rx response id %d.\n",
+					 rx->id);
+			err = -EINVAL;
+			goto next;
+		}
+
+		ret = gnttab_end_foreign_access_ref(ref, 0);
+		BUG_ON(!ret);
+
+		gnttab_release_grant_reference(&np->gref_rx_head, ref);
+
+		__skb_queue_tail(list, skb);
+
+next:
+		if (!(rx->flags & NETRXF_more_data))
+			break;
+
+		if (cons + frags == rp) {
+			if (net_ratelimit())
+				dev_warn(dev, "Need more frags\n");
+			err = -ENOENT;
+			break;
+		}
+
+		rx = RING_GET_RESPONSE(&np->rx, cons + frags);
+		skb = xennet_get_rx_skb(np, cons + frags);
+		ref = xennet_get_rx_ref(np, cons + frags);
+		frags++;
+	}
+
+	if (unlikely(frags > max)) {
+		if (net_ratelimit())
+			dev_warn(dev, "Too many frags\n");
+		err = -E2BIG;
+	}
+
+	if (unlikely(err))
+		np->rx.rsp_cons = cons + frags;
+
+	return err;
+}
+
+static int xennet_set_skb_gso(struct sk_buff *skb,
+			      struct xen_netif_extra_info *gso)
+{
+	if (!gso->u.gso.size) {
+		if (net_ratelimit())
+			printk(KERN_WARNING "GSO size must not be zero.\n");
+		return -EINVAL;
+	}
+
+	/* Currently only TCPv4 S.O. is supported. */
+	if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4) {
+		if (net_ratelimit())
+			printk(KERN_WARNING "Bad GSO type %d.\n", gso->u.gso.type);
+		return -EINVAL;
+	}
+
+	skb_shinfo(skb)->gso_size = gso->u.gso.size;
+	skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
+
+	/* Header must be checked, and gso_segs computed. */
+	skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
+	skb_shinfo(skb)->gso_segs = 0;
+
+	return 0;
+}
+
+static RING_IDX xennet_fill_frags(struct netfront_info *np,
+				  struct sk_buff *skb,
+				  struct sk_buff_head *list)
+{
+	struct skb_shared_info *shinfo = skb_shinfo(skb);
+	int nr_frags = shinfo->nr_frags;
+	RING_IDX cons = np->rx.rsp_cons;
+	skb_frag_t *frag = shinfo->frags + nr_frags;
+	struct sk_buff *nskb;
+
+	while ((nskb = __skb_dequeue(list))) {
+		struct xen_netif_rx_response *rx =
+			RING_GET_RESPONSE(&np->rx, ++cons);
+
+		frag->page = skb_shinfo(nskb)->frags[0].page;
+		frag->page_offset = rx->offset;
+		frag->size = rx->status;
+
+		skb->data_len += rx->status;
+
+		skb_shinfo(nskb)->nr_frags = 0;
+		kfree_skb(nskb);
+
+		frag++;
+		nr_frags++;
+	}
+
+	shinfo->nr_frags = nr_frags;
+	return cons;
+}
+
+static int skb_checksum_setup(struct sk_buff *skb)
+{
+	struct iphdr *iph;
+	unsigned char *th;
+	int err = -EPROTO;
+
+	if (skb->protocol != htons(ETH_P_IP))
+		goto out;
+
+	iph = (void *)skb->data;
+	th = skb->data + 4 * iph->ihl;
+	if (th >= skb_tail_pointer(skb))
+		goto out;
+
+	skb->csum_start = th - skb->head;
+	switch (iph->protocol) {
+	case IPPROTO_TCP:
+		skb->csum_offset = offsetof(struct tcphdr, check);
+		break;
+	case IPPROTO_UDP:
+		skb->csum_offset = offsetof(struct udphdr, check);
+		break;
+	default:
+		if (net_ratelimit())
+			printk(KERN_ERR "Attempting to checksum a non-"
+			       "TCP/UDP packet, dropping a protocol"
+			       " %d packet", iph->protocol);
+		goto out;
+	}
+
+	if ((th + skb->csum_offset + 2) > skb_tail_pointer(skb))
+		goto out;
+
+	err = 0;
+
+out:
+	return err;
+}
+
+static int handle_incoming_queue(struct net_device *dev,
+				  struct sk_buff_head *rxq)
+{
+	struct netfront_info *np = netdev_priv(dev);
+	int packets_dropped = 0;
+	struct sk_buff *skb;
+
+	while ((skb = __skb_dequeue(rxq)) != NULL) {
+		struct page *page = NETFRONT_SKB_CB(skb)->page;
+		void *vaddr = page_address(page);
+		unsigned offset = NETFRONT_SKB_CB(skb)->offset;
+
+		memcpy(skb->data, vaddr + offset,
+		       skb_headlen(skb));
+
+		if (page != skb_shinfo(skb)->frags[0].page)
+			__free_page(page);
+
+		/* Ethernet work: Delayed to here as it peeks the header. */
+		skb->protocol = eth_type_trans(skb, dev);
+
+		if (skb->ip_summed == CHECKSUM_PARTIAL) {
+			if (skb_checksum_setup(skb)) {
+				kfree_skb(skb);
+				packets_dropped++;
+				np->stats.rx_errors++;
+				continue;
+			}
+		}
+
+		np->stats.rx_packets++;
+		np->stats.rx_bytes += skb->len;
+
+		/* Pass it up. */
+		netif_receive_skb(skb);
+		dev->last_rx = jiffies;
+	}
+
+	return packets_dropped;
+}
+
+static int xennet_poll(struct net_device *dev, int *pbudget)
+{
+	struct netfront_info *np = netdev_priv(dev);
+	struct sk_buff *skb;
+	struct netfront_rx_info rinfo;
+	struct xen_netif_rx_response *rx = &rinfo.rx;
+	struct xen_netif_extra_info *extras = rinfo.extras;
+	RING_IDX i, rp;
+	int work_done, budget, more_to_do = 1;
+	struct sk_buff_head rxq;
+	struct sk_buff_head errq;
+	struct sk_buff_head tmpq;
+	unsigned long flags;
+	unsigned int len;
+	int err;
+
+	spin_lock(&np->rx_lock);
+
+	if (unlikely(!netif_carrier_ok(dev))) {
+		spin_unlock(&np->rx_lock);
+		return 0;
+	}
+
+	skb_queue_head_init(&rxq);
+	skb_queue_head_init(&errq);
+	skb_queue_head_init(&tmpq);
+
+	budget = *pbudget;
+	if (budget > dev->quota)
+		budget = dev->quota;
+	rp = np->rx.sring->rsp_prod;
+	rmb(); /* Ensure we see queued responses up to 'rp'. */
+
+	i = np->rx.rsp_cons;
+	work_done = 0;
+	while ((i != rp) && (work_done < budget)) {
+		memcpy(rx, RING_GET_RESPONSE(&np->rx, i), sizeof(*rx));
+		memset(extras, 0, sizeof(rinfo.extras));
+
+		err = xennet_get_responses(np, &rinfo, rp, &tmpq);
+
+		if (unlikely(err)) {
+err:
+			while ((skb = __skb_dequeue(&tmpq)))
+				__skb_queue_tail(&errq, skb);
+			np->stats.rx_errors++;
+			i = np->rx.rsp_cons;
+			continue;
+		}
+
+		skb = __skb_dequeue(&tmpq);
+
+		if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) {
+			struct xen_netif_extra_info *gso;
+			gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
+
+			if (unlikely(xennet_set_skb_gso(skb, gso))) {
+				__skb_queue_head(&tmpq, skb);
+				np->rx.rsp_cons += skb_queue_len(&tmpq);
+				goto err;
+			}
+		}
+
+		NETFRONT_SKB_CB(skb)->page = skb_shinfo(skb)->frags[0].page;
+		NETFRONT_SKB_CB(skb)->offset = rx->offset;
+
+		len = rx->status;
+		if (len > RX_COPY_THRESHOLD)
+			len = RX_COPY_THRESHOLD;
+		skb_put(skb, len);
+
+		if (rx->status > len) {
+			skb_shinfo(skb)->frags[0].page_offset =
+				rx->offset + len;
+			skb_shinfo(skb)->frags[0].size = rx->status - len;
+			skb->data_len = rx->status - len;
+		} else {
+			skb_shinfo(skb)->frags[0].page = NULL;
+			skb_shinfo(skb)->nr_frags = 0;
+		}
+
+		i = xennet_fill_frags(np, skb, &tmpq);
+
+		/*
+		 * Truesize approximates the size of true data plus
+		 * any supervisor overheads. Adding hypervisor
+		 * overheads has been shown to significantly reduce
+		 * achievable bandwidth with the default receive
+		 * buffer size. It is therefore not wise to account
+		 * for it here.
+		 *
+		 * After alloc_skb(RX_COPY_THRESHOLD), truesize is set
+		 * to RX_COPY_THRESHOLD + the supervisor
+		 * overheads. Here, we add the size of the data pulled
+		 * in xennet_fill_frags().
+		 *
+		 * We also adjust for any unused space in the main
+		 * data area by subtracting (RX_COPY_THRESHOLD -
+		 * len). This is especially important with drivers
+		 * which split incoming packets into header and data,
+		 * using only 66 bytes of the main data area (see the
+		 * e1000 driver for example.)  On such systems,
+		 * without this last adjustement, our achievable
+		 * receive throughout using the standard receive
+		 * buffer size was cut by 25%(!!!).
+		 */
+		skb->truesize += skb->data_len - (RX_COPY_THRESHOLD - len);
+		skb->len += skb->data_len;
+
+		if (rx->flags & NETRXF_csum_blank)
+			skb->ip_summed = CHECKSUM_PARTIAL;
+		else if (rx->flags & NETRXF_data_validated)
+			skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+		__skb_queue_tail(&rxq, skb);
+
+		np->rx.rsp_cons = ++i;
+		work_done++;
+	}
+
+	while ((skb = __skb_dequeue(&errq)))
+		kfree_skb(skb);
+
+	work_done -= handle_incoming_queue(dev, &rxq);
+
+	/* If we get a callback with very few responses, reduce fill target. */
+	/* NB. Note exponential increase, linear decrease. */
+	if (((np->rx.req_prod_pvt - np->rx.sring->rsp_prod) >
+	     ((3*np->rx_target) / 4)) &&
+	    (--np->rx_target < np->rx_min_target))
+		np->rx_target = np->rx_min_target;
+
+	xennet_alloc_rx_buffers(dev);
+
+	*pbudget   -= work_done;
+	dev->quota -= work_done;
+
+	if (work_done < budget) {
+		local_irq_save(flags);
+
+		RING_FINAL_CHECK_FOR_RESPONSES(&np->rx, more_to_do);
+		if (!more_to_do)
+			__netif_rx_complete(dev);
+
+		local_irq_restore(flags);
+	}
+
+	spin_unlock(&np->rx_lock);
+
+	return more_to_do;
+}
+
+static int xennet_change_mtu(struct net_device *dev, int mtu)
+{
+	int max = xennet_can_sg(dev) ? 65535 - ETH_HLEN : ETH_DATA_LEN;
+
+	if (mtu > max)
+		return -EINVAL;
+	dev->mtu = mtu;
+	return 0;
+}
+
+static void xennet_release_tx_bufs(struct netfront_info *np)
+{
+	struct sk_buff *skb;
+	int i;
+
+	for (i = 0; i < NET_TX_RING_SIZE; i++) {
+		/* Skip over entries which are actually freelist references */
+		if ((unsigned long)np->tx_skbs[i].skb < PAGE_OFFSET)
+			continue;
+
+		skb = np->tx_skbs[i].skb;
+		gnttab_end_foreign_access_ref(np->grant_tx_ref[i],
+					      GNTMAP_readonly);
+		gnttab_release_grant_reference(&np->gref_tx_head,
+					       np->grant_tx_ref[i]);
+		np->grant_tx_ref[i] = GRANT_INVALID_REF;
+		add_id_to_freelist(&np->tx_skb_freelist, np->tx_skbs, i);
+		dev_kfree_skb_irq(skb);
+	}
+}
+
+static void xennet_release_rx_bufs(struct netfront_info *np)
+{
+	struct mmu_update      *mmu = np->rx_mmu;
+	struct multicall_entry *mcl = np->rx_mcl;
+	struct sk_buff_head free_list;
+	struct sk_buff *skb;
+	unsigned long mfn;
+	int xfer = 0, noxfer = 0, unused = 0;
+	int id, ref;
+
+	dev_warn(&np->netdev->dev, "%s: fix me for copying receiver.\n",
+			 __func__);
+	return;
+
+	skb_queue_head_init(&free_list);
+
+	spin_lock_bh(&np->rx_lock);
+
+	for (id = 0; id < NET_RX_RING_SIZE; id++) {
+		ref = np->grant_rx_ref[id];
+		if (ref == GRANT_INVALID_REF) {
+			unused++;
+			continue;
+		}
+
+		skb = np->rx_skbs[id];
+		mfn = gnttab_end_foreign_transfer_ref(ref);
+		gnttab_release_grant_reference(&np->gref_rx_head, ref);
+		np->grant_rx_ref[id] = GRANT_INVALID_REF;
+
+		if (0 == mfn) {
+			skb_shinfo(skb)->nr_frags = 0;
+			dev_kfree_skb(skb);
+			noxfer++;
+			continue;
+		}
+
+		if (!xen_feature(XENFEAT_auto_translated_physmap)) {
+			/* Remap the page. */
+			struct page *page = skb_shinfo(skb)->frags[0].page;
+			unsigned long pfn = page_to_pfn(page);
+			void *vaddr = page_address(page);
+
+			MULTI_update_va_mapping(mcl, (unsigned long)vaddr,
+						mfn_pte(mfn, PAGE_KERNEL),
+						0);
+			mcl++;
+			mmu->ptr = ((u64)mfn << PAGE_SHIFT)
+				| MMU_MACHPHYS_UPDATE;
+			mmu->val = pfn;
+			mmu++;
+
+			set_phys_to_machine(pfn, mfn);
+		}
+		__skb_queue_tail(&free_list, skb);
+		xfer++;
+	}
+
+	dev_info(&np->netdev->dev, "%s: %d xfer, %d noxfer, %d unused\n",
+		 __func__, xfer, noxfer, unused);
+
+	if (xfer) {
+		if (!xen_feature(XENFEAT_auto_translated_physmap)) {
+			/* Do all the remapping work and M2P updates. */
+			MULTI_mmu_update(mcl, np->rx_mmu, mmu - np->rx_mmu,
+					 0, DOMID_SELF);
+			mcl++;
+			HYPERVISOR_multicall(np->rx_mcl, mcl - np->rx_mcl);
+		}
+	}
+
+	while ((skb = __skb_dequeue(&free_list)) != NULL)
+		dev_kfree_skb(skb);
+
+	spin_unlock_bh(&np->rx_lock);
+}
+
+static void xennet_uninit(struct net_device *dev)
+{
+	struct netfront_info *np = netdev_priv(dev);
+	xennet_release_tx_bufs(np);
+	xennet_release_rx_bufs(np);
+	gnttab_free_grant_references(np->gref_tx_head);
+	gnttab_free_grant_references(np->gref_rx_head);
+}
+
+static struct net_device * __devinit xennet_create_dev(struct xenbus_device *dev)
+{
+	int i, err;
+	struct net_device *netdev;
+	struct netfront_info *np;
+
+	netdev = alloc_etherdev(sizeof(struct netfront_info));
+	if (!netdev) {
+		printk(KERN_WARNING "%s> alloc_etherdev failed.\n",
+		       __func__);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	np                   = netdev_priv(netdev);
+	np->xbdev            = dev;
+
+	spin_lock_init(&np->tx_lock);
+	spin_lock_init(&np->rx_lock);
+
+	skb_queue_head_init(&np->rx_batch);
+	np->rx_target     = RX_DFL_MIN_TARGET;
+	np->rx_min_target = RX_DFL_MIN_TARGET;
+	np->rx_max_target = RX_MAX_TARGET;
+
+	init_timer(&np->rx_refill_timer);
+	np->rx_refill_timer.data = (unsigned long)netdev;
+	np->rx_refill_timer.function = rx_refill_timeout;
+
+	/* Initialise tx_skbs as a free chain containing every entry. */
+	np->tx_skb_freelist = 0;
+	for (i = 0; i < NET_TX_RING_SIZE; i++) {
+		np->tx_skbs[i].link = i+1;
+		np->grant_tx_ref[i] = GRANT_INVALID_REF;
+	}
+
+	/* Clear out rx_skbs */
+	for (i = 0; i < NET_RX_RING_SIZE; i++) {
+		np->rx_skbs[i] = NULL;
+		np->grant_rx_ref[i] = GRANT_INVALID_REF;
+	}
+
+	/* A grant for every tx ring slot */
+	if (gnttab_alloc_grant_references(TX_MAX_TARGET,
+					  &np->gref_tx_head) < 0) {
+		printk(KERN_ALERT "#### netfront can't alloc tx grant refs\n");
+		err = -ENOMEM;
+		goto exit;
+	}
+	/* A grant for every rx ring slot */
+	if (gnttab_alloc_grant_references(RX_MAX_TARGET,
+					  &np->gref_rx_head) < 0) {
+		printk(KERN_ALERT "#### netfront can't alloc rx grant refs\n");
+		err = -ENOMEM;
+		goto exit_free_tx;
+	}
+
+	netdev->open            = xennet_open;
+	netdev->hard_start_xmit = xennet_start_xmit;
+	netdev->stop            = xennet_close;
+	netdev->get_stats       = xennet_get_stats;
+	netdev->poll            = xennet_poll;
+	netdev->uninit          = xennet_uninit;
+	netdev->change_mtu	= xennet_change_mtu;
+	netdev->weight          = 64;
+	netdev->features        = NETIF_F_IP_CSUM;
+
+	SET_ETHTOOL_OPS(netdev, &xennet_ethtool_ops);
+	SET_MODULE_OWNER(netdev);
+	SET_NETDEV_DEV(netdev, &dev->dev);
+
+	np->netdev = netdev;
+
+	netif_carrier_off(netdev);
+
+	return netdev;
+
+ exit_free_tx:
+	gnttab_free_grant_references(np->gref_tx_head);
+ exit:
+	free_netdev(netdev);
+	return ERR_PTR(err);
+}
+
+/**
+ * Entry point to this code when a new device is created.  Allocate the basic
+ * structures and the ring buffers for communication with the backend, and
+ * inform the backend of the appropriate details for those.
+ */
+static int __devinit netfront_probe(struct xenbus_device *dev,
+				    const struct xenbus_device_id *id)
+{
+	int err;
+	struct net_device *netdev;
+	struct netfront_info *info;
+
+	netdev = xennet_create_dev(dev);
+	if (IS_ERR(netdev)) {
+		err = PTR_ERR(netdev);
+		xenbus_dev_fatal(dev, err, "creating netdev");
+		return err;
+	}
+
+	info = netdev_priv(netdev);
+	dev->dev.driver_data = info;
+
+	err = register_netdev(info->netdev);
+	if (err) {
+		printk(KERN_WARNING "%s: register_netdev err=%d\n",
+		       __func__, err);
+		goto fail;
+	}
+
+	err = xennet_sysfs_addif(info->netdev);
+	if (err) {
+		unregister_netdev(info->netdev);
+		printk(KERN_WARNING "%s: add sysfs failed err=%d\n",
+		       __func__, err);
+		goto fail;
+	}
+
+	return 0;
+
+ fail:
+	free_netdev(netdev);
+	dev->dev.driver_data = NULL;
+	return err;
+}
+
+static void xennet_end_access(int ref, void *page)
+{
+	/* This frees the page as a side-effect */
+	if (ref != GRANT_INVALID_REF)
+		gnttab_end_foreign_access(ref, 0, (unsigned long)page);
+}
+
+static void xennet_disconnect_backend(struct netfront_info *info)
+{
+	/* Stop old i/f to prevent errors whilst we rebuild the state. */
+	spin_lock_bh(&info->rx_lock);
+	spin_lock_irq(&info->tx_lock);
+	netif_carrier_off(info->netdev);
+	spin_unlock_irq(&info->tx_lock);
+	spin_unlock_bh(&info->rx_lock);
+
+	if (info->netdev->irq)
+		unbind_from_irqhandler(info->netdev->irq, info->netdev);
+	info->evtchn = info->netdev->irq = 0;
+
+	/* End access and free the pages */
+	xennet_end_access(info->tx_ring_ref, info->tx.sring);
+	xennet_end_access(info->rx_ring_ref, info->rx.sring);
+
+	info->tx_ring_ref = GRANT_INVALID_REF;
+	info->rx_ring_ref = GRANT_INVALID_REF;
+	info->tx.sring = NULL;
+	info->rx.sring = NULL;
+}
+
+/**
+ * We are reconnecting to the backend, due to a suspend/resume, or a backend
+ * driver restart.  We tear down our netif structure and recreate it, but
+ * leave the device-layer structures intact so that this is transparent to the
+ * rest of the kernel.
+ */
+static int netfront_resume(struct xenbus_device *dev)
+{
+	struct netfront_info *info = dev->dev.driver_data;
+
+	dev_dbg(&dev->dev, "%s\n", dev->nodename);
+
+	xennet_disconnect_backend(info);
+	return 0;
+}
+
+static int xen_net_read_mac(struct xenbus_device *dev, u8 mac[])
+{
+	char *s, *e, *macstr;
+	int i;
+
+	macstr = s = xenbus_read(XBT_NIL, dev->nodename, "mac", NULL);
+	if (IS_ERR(macstr))
+		return PTR_ERR(macstr);
+
+	for (i = 0; i < ETH_ALEN; i++) {
+		mac[i] = simple_strtoul(s, &e, 16);
+		if ((s == e) || (*e != ((i == ETH_ALEN-1) ? '\0' : ':'))) {
+			kfree(macstr);
+			return -ENOENT;
+		}
+		s = e+1;
+	}
+
+	kfree(macstr);
+	return 0;
+}
+
+static irqreturn_t xennet_interrupt(int irq, void *dev_id)
+{
+	struct net_device *dev = dev_id;
+	struct netfront_info *np = netdev_priv(dev);
+	unsigned long flags;
+
+	spin_lock_irqsave(&np->tx_lock, flags);
+
+	if (likely(netif_carrier_ok(dev))) {
+		xennet_tx_buf_gc(dev);
+		/* Under tx_lock: protects access to rx shared-ring indexes. */
+		if (RING_HAS_UNCONSUMED_RESPONSES(&np->rx))
+			netif_rx_schedule(dev);
+	}
+
+	spin_unlock_irqrestore(&np->tx_lock, flags);
+
+	return IRQ_HANDLED;
+}
+
+static int setup_netfront(struct xenbus_device *dev, struct netfront_info *info)
+{
+	struct xen_netif_tx_sring *txs;
+	struct xen_netif_rx_sring *rxs;
+	int err;
+	struct net_device *netdev = info->netdev;
+
+	info->tx_ring_ref = GRANT_INVALID_REF;
+	info->rx_ring_ref = GRANT_INVALID_REF;
+	info->rx.sring = NULL;
+	info->tx.sring = NULL;
+	netdev->irq = 0;
+
+	err = xen_net_read_mac(dev, netdev->dev_addr);
+	if (err) {
+		xenbus_dev_fatal(dev, err, "parsing %s/mac", dev->nodename);
+		goto fail;
+	}
+
+	txs = (struct xen_netif_tx_sring *)get_zeroed_page(GFP_KERNEL);
+	if (!txs) {
+		err = -ENOMEM;
+		xenbus_dev_fatal(dev, err, "allocating tx ring page");
+		goto fail;
+	}
+	SHARED_RING_INIT(txs);
+	FRONT_RING_INIT(&info->tx, txs, PAGE_SIZE);
+
+	err = xenbus_grant_ring(dev, virt_to_mfn(txs));
+	if (err < 0) {
+		free_page((unsigned long)txs);
+		goto fail;
+	}
+
+	info->tx_ring_ref = err;
+	rxs = (struct xen_netif_rx_sring *)get_zeroed_page(GFP_KERNEL);
+	if (!rxs) {
+		err = -ENOMEM;
+		xenbus_dev_fatal(dev, err, "allocating rx ring page");
+		goto fail;
+	}
+	SHARED_RING_INIT(rxs);
+	FRONT_RING_INIT(&info->rx, rxs, PAGE_SIZE);
+
+	err = xenbus_grant_ring(dev, virt_to_mfn(rxs));
+	if (err < 0) {
+		free_page((unsigned long)rxs);
+		goto fail;
+	}
+	info->rx_ring_ref = err;
+
+	err = xenbus_alloc_evtchn(dev, &info->evtchn);
+	if (err)
+		goto fail;
+
+	err = bind_evtchn_to_irqhandler(info->evtchn, xennet_interrupt,
+					IRQF_SAMPLE_RANDOM, netdev->name,
+					netdev);
+	if (err < 0)
+		goto fail;
+	netdev->irq = err;
+	return 0;
+
+ fail:
+	return err;
+}
+
+/* Common code used when first setting up, and when resuming. */
+static int talk_to_backend(struct xenbus_device *dev,
+			   struct netfront_info *info)
+{
+	const char *message;
+	struct xenbus_transaction xbt;
+	int err;
+
+	/* Create shared ring, alloc event channel. */
+	err = setup_netfront(dev, info);
+	if (err)
+		goto out;
+
+again:
+	err = xenbus_transaction_start(&xbt);
+	if (err) {
+		xenbus_dev_fatal(dev, err, "starting transaction");
+		goto destroy_ring;
+	}
+
+	err = xenbus_printf(xbt, dev->nodename, "tx-ring-ref", "%u",
+			    info->tx_ring_ref);
+	if (err) {
+		message = "writing tx ring-ref";
+		goto abort_transaction;
+	}
+	err = xenbus_printf(xbt, dev->nodename, "rx-ring-ref", "%u",
+			    info->rx_ring_ref);
+	if (err) {
+		message = "writing rx ring-ref";
+		goto abort_transaction;
+	}
+	err = xenbus_printf(xbt, dev->nodename,
+			    "event-channel", "%u", info->evtchn);
+	if (err) {
+		message = "writing event-channel";
+		goto abort_transaction;
+	}
+
+	err = xenbus_printf(xbt, dev->nodename, "request-rx-copy", "%u",
+			    1);
+	if (err) {
+		message = "writing request-rx-copy";
+		goto abort_transaction;
+	}
+
+	err = xenbus_printf(xbt, dev->nodename, "feature-rx-notify", "%d", 1);
+	if (err) {
+		message = "writing feature-rx-notify";
+		goto abort_transaction;
+	}
+
+	err = xenbus_printf(xbt, dev->nodename, "feature-sg", "%d", 1);
+	if (err) {
+		message = "writing feature-sg";
+		goto abort_transaction;
+	}
+
+	err = xenbus_printf(xbt, dev->nodename, "feature-gso-tcpv4", "%d", 1);
+	if (err) {
+		message = "writing feature-gso-tcpv4";
+		goto abort_transaction;
+	}
+
+	err = xenbus_transaction_end(xbt, 0);
+	if (err) {
+		if (err == -EAGAIN)
+			goto again;
+		xenbus_dev_fatal(dev, err, "completing transaction");
+		goto destroy_ring;
+	}
+
+	return 0;
+
+ abort_transaction:
+	xenbus_transaction_end(xbt, 1);
+	xenbus_dev_fatal(dev, err, "%s", message);
+ destroy_ring:
+	xennet_disconnect_backend(info);
+ out:
+	return err;
+}
+
+static int xennet_set_sg(struct net_device *dev, u32 data)
+{
+	if (data) {
+		struct netfront_info *np = netdev_priv(dev);
+		int val;
+
+		if (xenbus_scanf(XBT_NIL, np->xbdev->otherend, "feature-sg",
+				 "%d", &val) < 0)
+			val = 0;
+		if (!val)
+			return -ENOSYS;
+	} else if (dev->mtu > ETH_DATA_LEN)
+		dev->mtu = ETH_DATA_LEN;
+
+	return ethtool_op_set_sg(dev, data);
+}
+
+static int xennet_set_tso(struct net_device *dev, u32 data)
+{
+	if (data) {
+		struct netfront_info *np = netdev_priv(dev);
+		int val;
+
+		if (xenbus_scanf(XBT_NIL, np->xbdev->otherend,
+				 "feature-gso-tcpv4", "%d", &val) < 0)
+			val = 0;
+		if (!val)
+			return -ENOSYS;
+	}
+
+	return ethtool_op_set_tso(dev, data);
+}
+
+static void xennet_set_features(struct net_device *dev)
+{
+	/* Turn off all GSO bits except ROBUST. */
+	dev->features &= (1 << NETIF_F_GSO_SHIFT) - 1;
+	dev->features |= NETIF_F_GSO_ROBUST;
+	xennet_set_sg(dev, 0);
+
+	/* We need checksum offload to enable scatter/gather and TSO. */
+	if (!(dev->features & NETIF_F_IP_CSUM))
+		return;
+
+	if (!xennet_set_sg(dev, 1))
+		xennet_set_tso(dev, 1);
+}
+
+static int xennet_connect(struct net_device *dev)
+{
+	struct netfront_info *np = netdev_priv(dev);
+	int i, requeue_idx, err;
+	struct sk_buff *skb;
+	grant_ref_t ref;
+	struct xen_netif_rx_request *req;
+	unsigned int feature_rx_copy;
+
+	err = xenbus_scanf(XBT_NIL, np->xbdev->otherend,
+			   "feature-rx-copy", "%u", &feature_rx_copy);
+	if (err != 1)
+		feature_rx_copy = 0;
+
+	if (!feature_rx_copy) {
+		dev_info(&dev->dev,
+			 "backend does not support copying recieve path");
+		return -ENODEV;
+	}
+
+	err = talk_to_backend(np->xbdev, np);
+	if (err)
+		return err;
+
+	xennet_set_features(dev);
+
+	spin_lock_bh(&np->rx_lock);
+	spin_lock_irq(&np->tx_lock);
+
+	/* Step 1: Discard all pending TX packet fragments. */
+	xennet_release_tx_bufs(np);
+
+	/* Step 2: Rebuild the RX buffer freelist and the RX ring itself. */
+	for (requeue_idx = 0, i = 0; i < NET_RX_RING_SIZE; i++) {
+		if (!np->rx_skbs[i])
+			continue;
+
+		skb = np->rx_skbs[requeue_idx] = xennet_get_rx_skb(np, i);
+		ref = np->grant_rx_ref[requeue_idx] = xennet_get_rx_ref(np, i);
+		req = RING_GET_REQUEST(&np->rx, requeue_idx);
+
+		gnttab_grant_foreign_access_ref(
+			ref, np->xbdev->otherend_id,
+			pfn_to_mfn(page_to_pfn(skb_shinfo(skb)->
+					       frags->page)),
+			0);
+		req->gref = ref;
+		req->id   = requeue_idx;
+
+		requeue_idx++;
+	}
+
+	np->rx.req_prod_pvt = requeue_idx;
+
+	/*
+	 * Step 3: All public and private state should now be sane.  Get
+	 * ready to start sending and receiving packets and give the driver
+	 * domain a kick because we've probably just requeued some
+	 * packets.
+	 */
+	netif_carrier_on(np->netdev);
+	notify_remote_via_irq(np->netdev->irq);
+	xennet_tx_buf_gc(dev);
+	xennet_alloc_rx_buffers(dev);
+
+	spin_unlock_irq(&np->tx_lock);
+	spin_unlock_bh(&np->rx_lock);
+
+	return 0;
+}
+
+/**
+ * Callback received when the backend's state changes.
+ */
+static void backend_changed(struct xenbus_device *dev,
+			    enum xenbus_state backend_state)
+{
+	struct netfront_info *np = dev->dev.driver_data;
+	struct net_device *netdev = np->netdev;
+
+	dev_dbg(&dev->dev, "%s\n", xenbus_strstate(backend_state));
+
+	switch (backend_state) {
+	case XenbusStateInitialising:
+	case XenbusStateInitialised:
+	case XenbusStateConnected:
+	case XenbusStateUnknown:
+	case XenbusStateClosed:
+		break;
+
+	case XenbusStateInitWait:
+		if (dev->state != XenbusStateInitialising)
+			break;
+		if (xennet_connect(netdev) != 0)
+			break;
+		xenbus_switch_state(dev, XenbusStateConnected);
+		break;
+
+	case XenbusStateClosing:
+		xenbus_frontend_closed(dev);
+		break;
+	}
+}
+
+static struct ethtool_ops xennet_ethtool_ops =
+{
+	.get_tx_csum = ethtool_op_get_tx_csum,
+	.set_tx_csum = ethtool_op_set_tx_csum,
+	.get_sg = ethtool_op_get_sg,
+	.set_sg = xennet_set_sg,
+	.get_tso = ethtool_op_get_tso,
+	.set_tso = xennet_set_tso,
+	.get_link = ethtool_op_get_link,
+};
+
+#ifdef CONFIG_SYSFS
+static ssize_t show_rxbuf_min(struct device *dev,
+			      struct device_attribute *attr, char *buf)
+{
+	struct net_device *netdev = to_net_dev(dev);
+	struct netfront_info *info = netdev_priv(netdev);
+
+	return sprintf(buf, "%u\n", info->rx_min_target);
+}
+
+static ssize_t store_rxbuf_min(struct device *dev,
+			       struct device_attribute *attr,
+			       const char *buf, size_t len)
+{
+	struct net_device *netdev = to_net_dev(dev);
+	struct netfront_info *np = netdev_priv(netdev);
+	char *endp;
+	unsigned long target;
+
+	if (!capable(CAP_NET_ADMIN))
+		return -EPERM;
+
+	target = simple_strtoul(buf, &endp, 0);
+	if (endp == buf)
+		return -EBADMSG;
+
+	if (target < RX_MIN_TARGET)
+		target = RX_MIN_TARGET;
+	if (target > RX_MAX_TARGET)
+		target = RX_MAX_TARGET;
+
+	spin_lock_bh(&np->rx_lock);
+	if (target > np->rx_max_target)
+		np->rx_max_target = target;
+	np->rx_min_target = target;
+	if (target > np->rx_target)
+		np->rx_target = target;
+
+	xennet_alloc_rx_buffers(netdev);
+
+	spin_unlock_bh(&np->rx_lock);
+	return len;
+}
+
+static ssize_t show_rxbuf_max(struct device *dev,
+			      struct device_attribute *attr, char *buf)
+{
+	struct net_device *netdev = to_net_dev(dev);
+	struct netfront_info *info = netdev_priv(netdev);
+
+	return sprintf(buf, "%u\n", info->rx_max_target);
+}
+
+static ssize_t store_rxbuf_max(struct device *dev,
+			       struct device_attribute *attr,
+			       const char *buf, size_t len)
+{
+	struct net_device *netdev = to_net_dev(dev);
+	struct netfront_info *np = netdev_priv(netdev);
+	char *endp;
+	unsigned long target;
+
+	if (!capable(CAP_NET_ADMIN))
+		return -EPERM;
+
+	target = simple_strtoul(buf, &endp, 0);
+	if (endp == buf)
+		return -EBADMSG;
+
+	if (target < RX_MIN_TARGET)
+		target = RX_MIN_TARGET;
+	if (target > RX_MAX_TARGET)
+		target = RX_MAX_TARGET;
+
+	spin_lock_bh(&np->rx_lock);
+	if (target < np->rx_min_target)
+		np->rx_min_target = target;
+	np->rx_max_target = target;
+	if (target < np->rx_target)
+		np->rx_target = target;
+
+	xennet_alloc_rx_buffers(netdev);
+
+	spin_unlock_bh(&np->rx_lock);
+	return len;
+}
+
+static ssize_t show_rxbuf_cur(struct device *dev,
+			      struct device_attribute *attr, char *buf)
+{
+	struct net_device *netdev = to_net_dev(dev);
+	struct netfront_info *info = netdev_priv(netdev);
+
+	return sprintf(buf, "%u\n", info->rx_target);
+}
+
+static struct device_attribute xennet_attrs[] = {
+	__ATTR(rxbuf_min, S_IRUGO|S_IWUSR, show_rxbuf_min, store_rxbuf_min),
+	__ATTR(rxbuf_max, S_IRUGO|S_IWUSR, show_rxbuf_max, store_rxbuf_max),
+	__ATTR(rxbuf_cur, S_IRUGO, show_rxbuf_cur, NULL),
+};
+
+static int xennet_sysfs_addif(struct net_device *netdev)
+{
+	int i;
+	int err;
+
+	for (i = 0; i < ARRAY_SIZE(xennet_attrs); i++) {
+		err = device_create_file(&netdev->dev,
+					   &xennet_attrs[i]);
+		if (err)
+			goto fail;
+	}
+	return 0;
+
+ fail:
+	while (--i >= 0)
+		device_remove_file(&netdev->dev, &xennet_attrs[i]);
+	return err;
+}
+
+static void xennet_sysfs_delif(struct net_device *netdev)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(xennet_attrs); i++)
+		device_remove_file(&netdev->dev, &xennet_attrs[i]);
+}
+
+#endif /* CONFIG_SYSFS */
+
+static struct xenbus_device_id netfront_ids[] = {
+	{ "vif" },
+	{ "" }
+};
+
+
+static int __devexit xennet_remove(struct xenbus_device *dev)
+{
+	struct netfront_info *info = dev->dev.driver_data;
+
+	dev_dbg(&dev->dev, "%s\n", dev->nodename);
+
+	unregister_netdev(info->netdev);
+
+	xennet_disconnect_backend(info);
+
+	del_timer_sync(&info->rx_refill_timer);
+
+	xennet_sysfs_delif(info->netdev);
+
+	free_netdev(info->netdev);
+
+	return 0;
+}
+
+static struct xenbus_driver netfront = {
+	.name = "vif",
+	.owner = THIS_MODULE,
+	.ids = netfront_ids,
+	.probe = netfront_probe,
+	.remove = __devexit_p(xennet_remove),
+	.resume = netfront_resume,
+	.otherend_changed = backend_changed,
+};
+
+static int __init netif_init(void)
+{
+	if (!is_running_on_xen())
+		return -ENODEV;
+
+	if (is_initial_xendomain())
+		return 0;
+
+	printk(KERN_INFO "Initialising Xen virtual ethernet driver.\n");
+
+	return xenbus_register_frontend(&netfront);
+}
+module_init(netif_init);
+
+
+static void __exit netif_exit(void)
+{
+	if (is_initial_xendomain())
+		return;
+
+	return xenbus_unregister_driver(&netfront);
+}
+module_exit(netif_exit);
+
+MODULE_DESCRIPTION("Xen virtual network device frontend");
+MODULE_LICENSE("GPL");
diff --git a/drivers/nubus/nubus.c b/drivers/nubus/nubus.c
index 3a0a3a7..e503c9c 100644
--- a/drivers/nubus/nubus.c
+++ b/drivers/nubus/nubus.c
@@ -466,9 +466,8 @@
 		       parent->base, dir.base);
 
 	/* Actually we should probably panic if this fails */
-	if ((dev = kmalloc(sizeof(*dev), GFP_ATOMIC)) == NULL)
+	if ((dev = kzalloc(sizeof(*dev), GFP_ATOMIC)) == NULL)
 		return NULL;	
-	memset(dev, 0, sizeof(*dev));
 	dev->resid = parent->type;
 	dev->directory = dir.base;
 	dev->board = board;
@@ -800,9 +799,8 @@
 	nubus_rewind(&rp, FORMAT_BLOCK_SIZE, bytelanes);
 
 	/* Actually we should probably panic if this fails */
-	if ((board = kmalloc(sizeof(*board), GFP_ATOMIC)) == NULL)
+	if ((board = kzalloc(sizeof(*board), GFP_ATOMIC)) == NULL)
 		return NULL;	
-	memset(board, 0, sizeof(*board));
 	board->fblock = rp;
 
 	/* Dump the format block for debugging purposes */
diff --git a/drivers/parisc/hppb.c b/drivers/parisc/hppb.c
index a68b3b3..a728a7c 100644
--- a/drivers/parisc/hppb.c
+++ b/drivers/parisc/hppb.c
@@ -16,6 +16,7 @@
 #include <linux/init.h>
 #include <linux/mm.h>
 #include <linux/slab.h>
+#include <linux/dma-mapping.h>
 #include <linux/ioport.h>
 
 #include <asm/io.h>
diff --git a/drivers/parisc/superio.c b/drivers/parisc/superio.c
index a708c32..38cdf9f 100644
--- a/drivers/parisc/superio.c
+++ b/drivers/parisc/superio.c
@@ -73,6 +73,7 @@
 #include <linux/termios.h>
 #include <linux/tty.h>
 #include <linux/serial_core.h>
+#include <linux/serial_8250.h>
 #include <linux/delay.h>
 
 #include <asm/io.h>
diff --git a/drivers/parport/parport_cs.c b/drivers/parport/parport_cs.c
index 8b7d84e..802a81d 100644
--- a/drivers/parport/parport_cs.c
+++ b/drivers/parport/parport_cs.c
@@ -105,9 +105,8 @@
     DEBUG(0, "parport_attach()\n");
 
     /* Create new parport device */
-    info = kmalloc(sizeof(*info), GFP_KERNEL);
+    info = kzalloc(sizeof(*info), GFP_KERNEL);
     if (!info) return -ENOMEM;
-    memset(info, 0, sizeof(*info));
     link->priv = info;
     info->p_dev = link;
 
diff --git a/drivers/parport/parport_serial.c b/drivers/parport/parport_serial.c
index 90ea3b8..bd6ad8b 100644
--- a/drivers/parport/parport_serial.c
+++ b/drivers/parport/parport_serial.c
@@ -324,10 +324,9 @@
 	struct parport_serial_private *priv;
 	int err;
 
-	priv = kmalloc (sizeof *priv, GFP_KERNEL);
+	priv = kzalloc (sizeof *priv, GFP_KERNEL);
 	if (!priv)
 		return -ENOMEM;
-	memset(priv, 0, sizeof(struct parport_serial_private));
 	pci_set_drvdata (dev, priv);
 
 	err = pci_enable_device (dev);
diff --git a/drivers/pci/hotplug/rpadlpar_core.c b/drivers/pci/hotplug/rpadlpar_core.c
index bb3c101..deb6b5e 100644
--- a/drivers/pci/hotplug/rpadlpar_core.c
+++ b/drivers/pci/hotplug/rpadlpar_core.c
@@ -159,8 +159,8 @@
 	/* Claim new bus resources */
 	pcibios_claim_one_bus(dev->bus);
 
-	/* ioremap() for child bus, which may or may not succeed */
-	remap_bus_range(dev->subordinate);
+	/* Map IO space for child bus, which may or may not succeed */
+	pcibios_map_io_space(dev->subordinate);
 
 	/* Add new devices to global lists.  Register in proc, sysfs. */
 	pci_bus_add_devices(phb->bus);
@@ -390,7 +390,7 @@
 	} else
 		pcibios_remove_pci_devices(bus);
 
-	if (unmap_bus_range(bus)) {
+	if (pcibios_unmap_io_space(bus)) {
 		printk(KERN_ERR "%s: failed to unmap bus range\n",
 			__FUNCTION__);
 		return -ERANGE;
diff --git a/drivers/pci/pcie/aer/aerdrv.c b/drivers/pci/pcie/aer/aerdrv.c
index 6846fb4..ad90a01 100644
--- a/drivers/pci/pcie/aer/aerdrv.c
+++ b/drivers/pci/pcie/aer/aerdrv.c
@@ -148,11 +148,10 @@
 {
 	struct aer_rpc *rpc;
 
-	if (!(rpc = kmalloc(sizeof(struct aer_rpc),
+	if (!(rpc = kzalloc(sizeof(struct aer_rpc),
 		GFP_KERNEL)))
 		return NULL;
 
-	memset(rpc, 0, sizeof(struct aer_rpc));
 	/*
 	 * Initialize Root lock access, e_lock, to Root Error Status Reg,
 	 * Root Error ID Reg, and Root error producer/consumer index.
diff --git a/drivers/pcmcia/Kconfig b/drivers/pcmcia/Kconfig
index 35f8864..c0c77f8 100644
--- a/drivers/pcmcia/Kconfig
+++ b/drivers/pcmcia/Kconfig
@@ -180,14 +180,15 @@
 	  PCMCIA cards are plugged into. If unsure, say N.
 
 config PCMCIA_M8XX
-        tristate "MPC8xx PCMCIA support"
-        depends on PCMCIA && PPC && 8xx 
-        select PCCARD_IODYN
-        help
-        Say Y here to include support for PowerPC 8xx series PCMCIA
-        controller.
+	tristate "MPC8xx PCMCIA support"
+	depends on PCMCIA && PPC && 8xx
+	select PCCARD_IODYN
+	select PCCARD_NONSTATIC
+	help
+	  Say Y here to include support for PowerPC 8xx series PCMCIA
+	  controller.
 
-        This driver is also available as a module called m8xx_pcmcia.
+	  This driver is also available as a module called m8xx_pcmcia.
 
 config HD64465_PCMCIA
 	tristate "HD64465 host bridge support"
diff --git a/drivers/pcmcia/cs.c b/drivers/pcmcia/cs.c
index 50cad3a..7c93a10 100644
--- a/drivers/pcmcia/cs.c
+++ b/drivers/pcmcia/cs.c
@@ -651,6 +651,7 @@
 	add_wait_queue(&skt->thread_wait, &wait);
 	complete(&skt->thread_done);
 
+	set_freezable();
 	for (;;) {
 		unsigned long flags;
 		unsigned int events;
diff --git a/drivers/pcmcia/ds.c b/drivers/pcmcia/ds.c
index 143c6ef..a996071 100644
--- a/drivers/pcmcia/ds.c
+++ b/drivers/pcmcia/ds.c
@@ -1127,6 +1127,34 @@
 
 #endif
 
+/************************ runtime PM support ***************************/
+
+static int pcmcia_dev_suspend(struct device *dev, pm_message_t state);
+static int pcmcia_dev_resume(struct device *dev);
+
+static int runtime_suspend(struct device *dev)
+{
+	int rc;
+
+	down(&dev->sem);
+	rc = pcmcia_dev_suspend(dev, PMSG_SUSPEND);
+	up(&dev->sem);
+	if (!rc)
+		dev->power.power_state.event = PM_EVENT_SUSPEND;
+	return rc;
+}
+
+static void runtime_resume(struct device *dev)
+{
+	int rc;
+
+	down(&dev->sem);
+	rc = pcmcia_dev_resume(dev);
+	up(&dev->sem);
+	if (!rc)
+		dev->power.power_state.event = PM_EVENT_ON;
+}
+
 /************************ per-device sysfs output ***************************/
 
 #define pcmcia_device_attr(field, test, format)				\
@@ -1173,9 +1201,9 @@
                 return -EINVAL;
 
 	if ((!p_dev->suspended) && !strncmp(buf, "off", 3))
-		ret = dpm_runtime_suspend(dev, PMSG_SUSPEND);
+		ret = runtime_suspend(dev);
 	else if (p_dev->suspended && !strncmp(buf, "on", 2))
-		dpm_runtime_resume(dev);
+		runtime_resume(dev);
 
 	return ret ? ret : count;
 }
@@ -1312,10 +1340,10 @@
 	struct pcmcia_socket *skt = _data;
 	struct pcmcia_device *p_dev = to_pcmcia_dev(dev);
 
-	if (p_dev->socket != skt)
+	if (p_dev->socket != skt || p_dev->suspended)
 		return 0;
 
-	return dpm_runtime_suspend(dev, PMSG_SUSPEND);
+	return runtime_suspend(dev);
 }
 
 static int pcmcia_bus_resume_callback(struct device *dev, void * _data)
@@ -1323,10 +1351,10 @@
 	struct pcmcia_socket *skt = _data;
 	struct pcmcia_device *p_dev = to_pcmcia_dev(dev);
 
-	if (p_dev->socket != skt)
+	if (p_dev->socket != skt || !p_dev->suspended)
 		return 0;
 
-	dpm_runtime_resume(dev);
+	runtime_resume(dev);
 
 	return 0;
 }
diff --git a/drivers/pcmcia/m8xx_pcmcia.c b/drivers/pcmcia/m8xx_pcmcia.c
index 9721ed7..3c45142 100644
--- a/drivers/pcmcia/m8xx_pcmcia.c
+++ b/drivers/pcmcia/m8xx_pcmcia.c
@@ -10,7 +10,7 @@
  * Further fixes, v2.6 kernel port
  *     <marcelo.tosatti@cyclades.com>
  * 
- * Some fixes, additions (C) 2005 Montavista Software, Inc. 
+ * Some fixes, additions (C) 2005-2007 Montavista Software, Inc.
  *     <vbordug@ru.mvista.com>
  *
  * "The ExCA standard specifies that socket controllers should provide
@@ -40,10 +40,6 @@
 #include <linux/fcntl.h>
 #include <linux/string.h>
 
-#include <asm/io.h>
-#include <asm/bitops.h>
-#include <asm/system.h>
-
 #include <linux/kernel.h>
 #include <linux/errno.h>
 #include <linux/slab.h>
@@ -51,11 +47,18 @@
 #include <linux/ioport.h>
 #include <linux/delay.h>
 #include <linux/interrupt.h>
-#include <linux/platform_device.h>
+#include <linux/fsl_devices.h>
 
+#include <asm/io.h>
+#include <asm/bitops.h>
+#include <asm/system.h>
+#include <asm/time.h>
 #include <asm/mpc8xx.h>
 #include <asm/8xx_immap.h>
 #include <asm/irq.h>
+#include <asm/fs_pd.h>
+#include <asm/of_device.h>
+#include <asm/of_platform.h>
 
 #include <pcmcia/version.h>
 #include <pcmcia/cs_types.h>
@@ -110,7 +113,7 @@
 #define CONFIG_PCMCIA_SLOT_B
 #endif
 
-#endif /* !defined(CONFIG_PCMCIA_SLOT_A) && !defined(CONFIG_PCMCIA_SLOT_B) */
+#endif				/* !defined(CONFIG_PCMCIA_SLOT_A) && !defined(CONFIG_PCMCIA_SLOT_B) */
 
 #if defined(CONFIG_PCMCIA_SLOT_A) && defined(CONFIG_PCMCIA_SLOT_B)
 
@@ -143,30 +146,20 @@
 
 /* ------------------------------------------------------------------------- */
 
-#define PCMCIA_MEM_WIN_BASE 0xe0000000 /* base address for memory window 0   */
-#define PCMCIA_MEM_WIN_SIZE 0x04000000 /* each memory window is 64 MByte     */
-#define PCMCIA_IO_WIN_BASE  _IO_BASE   /* base address for io window 0       */
-
-#define PCMCIA_SCHLVL PCMCIA_INTERRUPT /* Status Change Interrupt Level      */
-
+#define PCMCIA_MEM_WIN_BASE 0xe0000000	/* base address for memory window 0   */
+#define PCMCIA_MEM_WIN_SIZE 0x04000000	/* each memory window is 64 MByte     */
+#define PCMCIA_IO_WIN_BASE  _IO_BASE	/* base address for io window 0       */
 /* ------------------------------------------------------------------------- */
 
-/* 2.4.x and newer has this always in HZ */
-#define M8XX_BUSFREQ ((((bd_t *)&(__res))->bi_busfreq))
-
-static int pcmcia_schlvl = PCMCIA_SCHLVL;
+static int pcmcia_schlvl;
 
 static DEFINE_SPINLOCK(events_lock);
 
-
 #define PCMCIA_SOCKET_KEY_5V 1
 #define PCMCIA_SOCKET_KEY_LV 2
 
 /* look up table for pgcrx registers */
-static u32 *m8xx_pgcrx[2] = {
-	&((immap_t *)IMAP_ADDR)->im_pcmcia.pcmc_pgcra,
-	&((immap_t *)IMAP_ADDR)->im_pcmcia.pcmc_pgcrb
-};
+static u32 *m8xx_pgcrx[2];
 
 /*
  * This structure is used to address each window in the PCMCIA controller.
@@ -176,8 +169,8 @@
  */
 
 struct pcmcia_win {
-	u32	br;
-	u32	or;
+	u32 br;
+	u32 or;
 };
 
 /*
@@ -221,22 +214,27 @@
 
 /* we keep one lookup table per socket to check flags */
 
-#define PCMCIA_EVENTS_MAX 5  /* 4 max at a time + termination */
+#define PCMCIA_EVENTS_MAX 5	/* 4 max at a time + termination */
 
 struct event_table {
 	u32 regbit;
 	u32 eventbit;
 };
 
+static const char driver_name[] = "m8xx-pcmcia";
+
 struct socket_info {
-	void	(*handler)(void *info, u32 events);
-	void	*info;
+	void (*handler) (void *info, u32 events);
+	void *info;
 
 	u32 slot;
+	pcmconf8xx_t *pcmcia;
+	u32 bus_freq;
+	int hwirq;
 
 	socket_state_t state;
 	struct pccard_mem_map mem_win[PCMCIA_MEM_WIN_NO];
-	struct pccard_io_map  io_win[PCMCIA_IO_WIN_NO];
+	struct pccard_io_map io_win[PCMCIA_IO_WIN_NO];
 	struct event_table events[PCMCIA_EVENTS_MAX];
 	struct pcmcia_socket socket;
 };
@@ -250,8 +248,7 @@
 
 #define M8XX_SIZES_NO 32
 
-static const u32 m8xx_size_to_gray[M8XX_SIZES_NO] =
-{
+static const u32 m8xx_size_to_gray[M8XX_SIZES_NO] = {
 	0x00000001, 0x00000002, 0x00000008, 0x00000004,
 	0x00000080, 0x00000040, 0x00000010, 0x00000020,
 	0x00008000, 0x00004000, 0x00001000, 0x00002000,
@@ -267,7 +264,7 @@
 
 static irqreturn_t m8xx_interrupt(int irq, void *dev);
 
-#define PCMCIA_BMT_LIMIT (15*4)  /* Bus Monitor Timeout value */
+#define PCMCIA_BMT_LIMIT (15*4)	/* Bus Monitor Timeout value */
 
 /* ------------------------------------------------------------------------- */
 /* board specific stuff:                                                     */
@@ -291,8 +288,9 @@
 {
 	u32 reg = 0;
 
-	switch(vcc) {
-	case 0: break;
+	switch (vcc) {
+	case 0:
+		break;
 	case 33:
 		reg |= BCSR1_PCVCTL4;
 		break;
@@ -303,11 +301,12 @@
 		return 1;
 	}
 
-	switch(vpp) {
-	case 0: break;
+	switch (vpp) {
+	case 0:
+		break;
 	case 33:
 	case 50:
-		if(vcc == vpp)
+		if (vcc == vpp)
 			reg |= BCSR1_PCVCTL6;
 		else
 			return 1;
@@ -318,25 +317,29 @@
 		return 1;
 	}
 
-	if(!((vcc == 50) || (vcc == 0)))
+	if (!((vcc == 50) || (vcc == 0)))
 		return 1;
 
 	/* first, turn off all power */
 
-	out_be32(((u32 *)RPX_CSR_ADDR), in_be32(((u32 *)RPX_CSR_ADDR)) & ~(BCSR1_PCVCTL4 | BCSR1_PCVCTL5 | BCSR1_PCVCTL6 | BCSR1_PCVCTL7));
+	out_be32(((u32 *) RPX_CSR_ADDR),
+		 in_be32(((u32 *) RPX_CSR_ADDR)) & ~(BCSR1_PCVCTL4 |
+						     BCSR1_PCVCTL5 |
+						     BCSR1_PCVCTL6 |
+						     BCSR1_PCVCTL7));
 
 	/* enable new powersettings */
 
-	out_be32(((u32 *)RPX_CSR_ADDR), in_be32(((u32 *)RPX_CSR_ADDR)) | reg);
+	out_be32(((u32 *) RPX_CSR_ADDR), in_be32(((u32 *) RPX_CSR_ADDR)) | reg);
 
 	return 0;
 }
 
 #define socket_get(_slot_) PCMCIA_SOCKET_KEY_5V
-#define hardware_enable(_slot_)  /* No hardware to enable */
-#define hardware_disable(_slot_) /* No hardware to disable */
+#define hardware_enable(_slot_)	/* No hardware to enable */
+#define hardware_disable(_slot_)	/* No hardware to disable */
 
-#endif /* CONFIG_RPXCLASSIC */
+#endif				/* CONFIG_RPXCLASSIC */
 
 /* FADS Boards from Motorola                                               */
 
@@ -348,43 +351,45 @@
 {
 	u32 reg = 0;
 
-	switch(vcc) {
-		case 0:
-			break;
-		case 33:
-			reg |= BCSR1_PCCVCC0;
-			break;
-		case 50:
-			reg |= BCSR1_PCCVCC1;
-			break;
-		default:
-			return 1;
+	switch (vcc) {
+	case 0:
+		break;
+	case 33:
+		reg |= BCSR1_PCCVCC0;
+		break;
+	case 50:
+		reg |= BCSR1_PCCVCC1;
+		break;
+	default:
+		return 1;
 	}
 
-	switch(vpp) {
-		case 0:
-			break;
-		case 33:
-		case 50:
-			if(vcc == vpp)
-				reg |= BCSR1_PCCVPP1;
-			else
-				return 1;
-			break;
-		case 120:
-			if ((vcc == 33) || (vcc == 50))
-				reg |= BCSR1_PCCVPP0;
-			else
-				return 1;
-		default:
+	switch (vpp) {
+	case 0:
+		break;
+	case 33:
+	case 50:
+		if (vcc == vpp)
+			reg |= BCSR1_PCCVPP1;
+		else
 			return 1;
+		break;
+	case 120:
+		if ((vcc == 33) || (vcc == 50))
+			reg |= BCSR1_PCCVPP0;
+		else
+			return 1;
+	default:
+		return 1;
 	}
 
 	/* first, turn off all power */
-	out_be32((u32 *)BCSR1, in_be32((u32 *)BCSR1) & ~(BCSR1_PCCVCC_MASK | BCSR1_PCCVPP_MASK));
+	out_be32((u32 *) BCSR1,
+		 in_be32((u32 *) BCSR1) & ~(BCSR1_PCCVCC_MASK |
+					    BCSR1_PCCVPP_MASK));
 
 	/* enable new powersettings */
-	out_be32((u32 *)BCSR1, in_be32((u32 *)BCSR1) | reg);
+	out_be32((u32 *) BCSR1, in_be32((u32 *) BCSR1) | reg);
 
 	return 0;
 }
@@ -393,12 +398,12 @@
 
 static void hardware_enable(int slot)
 {
-	out_be32((u32 *)BCSR1, in_be32((u32 *)BCSR1) & ~BCSR1_PCCEN);
+	out_be32((u32 *) BCSR1, in_be32((u32 *) BCSR1) & ~BCSR1_PCCEN);
 }
 
 static void hardware_disable(int slot)
 {
-	out_be32((u32 *)BCSR1, in_be32((u32 *)BCSR1) |  BCSR1_PCCEN);
+	out_be32((u32 *) BCSR1, in_be32((u32 *) BCSR1) | BCSR1_PCCEN);
 }
 
 #endif
@@ -408,78 +413,21 @@
 #if defined(CONFIG_MPC885ADS)
 
 #define PCMCIA_BOARD_MSG "MPC885ADS"
-
-static int voltage_set(int slot, int vcc, int vpp)
-{
-	u32 reg = 0;
-	unsigned *bcsr_io;
-
-	bcsr_io = ioremap(BCSR1, sizeof(unsigned long));
-
-	switch(vcc) {
-		case 0:
-			break;
-		case 33:
-			reg |= BCSR1_PCCVCC0;
-			break;
-		case 50:
-			reg |= BCSR1_PCCVCC1;
-			break;
-		default:
-			goto out_unmap;
-	}
-
-	switch(vpp) {
-		case 0:
-			break;
-		case 33:
-		case 50:
-			if(vcc == vpp)
-				reg |= BCSR1_PCCVPP1;
-			else
-				goto out_unmap;
-			break;
-		case 120:
-			if ((vcc == 33) || (vcc == 50))
-				reg |= BCSR1_PCCVPP0;
-			else
-				goto out_unmap;
-		default:
-			goto out_unmap;
-	}
-
-	/* first, turn off all power */
-	out_be32(bcsr_io, in_be32(bcsr_io) & ~(BCSR1_PCCVCC_MASK | BCSR1_PCCVPP_MASK));
-
-	/* enable new powersettings */
-	out_be32(bcsr_io, in_be32(bcsr_io) | reg);
-
-	iounmap(bcsr_io);
-	return 0;
-
-out_unmap:
-	iounmap(bcsr_io);
-	return 1;
-}
-
 #define socket_get(_slot_) PCMCIA_SOCKET_KEY_5V
 
-static void hardware_enable(int slot)
+static inline void hardware_enable(int slot)
 {
-	unsigned *bcsr_io;
-
-	bcsr_io = ioremap(BCSR1, sizeof(unsigned long));
-	out_be32(bcsr_io, in_be32(bcsr_io) & ~BCSR1_PCCEN);
-	iounmap(bcsr_io);
+	m8xx_pcmcia_ops.hw_ctrl(slot, 1);
 }
 
-static void hardware_disable(int slot)
+static inline void hardware_disable(int slot)
 {
-	unsigned *bcsr_io;
+	m8xx_pcmcia_ops.hw_ctrl(slot, 0);
+}
 
-	bcsr_io = ioremap(BCSR1, sizeof(unsigned long));
-	out_be32(bcsr_io, in_be32(bcsr_io) |  BCSR1_PCCEN);
-	iounmap(bcsr_io);
+static inline int voltage_set(int slot, int vcc, int vpp)
+{
+	return m8xx_pcmcia_ops.voltage_set(slot, vcc, vpp);
 }
 
 #endif
@@ -495,52 +443,53 @@
 {
 	u8 reg = 0;
 
-	switch(vcc) {
-		case 0:
-			break;
-		case 33:
-			reg |= CSR2_VCC_33;
-			break;
-		case 50:
-			reg |= CSR2_VCC_50;
-			break;
-		default:
-			return 1;
+	switch (vcc) {
+	case 0:
+		break;
+	case 33:
+		reg |= CSR2_VCC_33;
+		break;
+	case 50:
+		reg |= CSR2_VCC_50;
+		break;
+	default:
+		return 1;
 	}
 
-	switch(vpp) {
-		case 0:
-			break;
-		case 33:
-		case 50:
-			if(vcc == vpp)
-				reg |= CSR2_VPP_VCC;
-			else
-				return 1;
-			break;
-		case 120:
-			if ((vcc == 33) || (vcc == 50))
-				reg |= CSR2_VPP_12;
-			else
-				return 1;
-		default:
+	switch (vpp) {
+	case 0:
+		break;
+	case 33:
+	case 50:
+		if (vcc == vpp)
+			reg |= CSR2_VPP_VCC;
+		else
 			return 1;
+		break;
+	case 120:
+		if ((vcc == 33) || (vcc == 50))
+			reg |= CSR2_VPP_12;
+		else
+			return 1;
+	default:
+		return 1;
 	}
 
 	/* first, turn off all power */
-	out_8((u8 *)MBX_CSR2_ADDR, in_8((u8 *)MBX_CSR2_ADDR) & ~(CSR2_VCC_MASK | CSR2_VPP_MASK));
+	out_8((u8 *) MBX_CSR2_ADDR,
+	      in_8((u8 *) MBX_CSR2_ADDR) & ~(CSR2_VCC_MASK | CSR2_VPP_MASK));
 
 	/* enable new powersettings */
-	out_8((u8 *)MBX_CSR2_ADDR, in_8((u8 *)MBX_CSR2_ADDR) | reg);
+	out_8((u8 *) MBX_CSR2_ADDR, in_8((u8 *) MBX_CSR2_ADDR) | reg);
 
 	return 0;
 }
 
 #define socket_get(_slot_) PCMCIA_SOCKET_KEY_5V
-#define hardware_enable(_slot_)  /* No hardware to enable */
-#define hardware_disable(_slot_) /* No hardware to disable */
+#define hardware_enable(_slot_)	/* No hardware to enable */
+#define hardware_disable(_slot_)	/* No hardware to disable */
 
-#endif /* CONFIG_MBX */
+#endif				/* CONFIG_MBX */
 
 #if defined(CONFIG_PRxK)
 #include <asm/cpld.h>
@@ -554,43 +503,46 @@
 	u8 regread;
 	cpld_regs *ccpld = get_cpld();
 
-	switch(vcc) {
-		case 0:
-			break;
-		case 33:
-			reg |= PCMCIA_VCC_33;
-			break;
-		case 50:
-			reg |= PCMCIA_VCC_50;
-			break;
-		default:
-			return 1;
+	switch (vcc) {
+	case 0:
+		break;
+	case 33:
+		reg |= PCMCIA_VCC_33;
+		break;
+	case 50:
+		reg |= PCMCIA_VCC_50;
+		break;
+	default:
+		return 1;
 	}
 
-	switch(vpp) {
-		case 0:
-			break;
-		case 33:
-		case 50:
-			if(vcc == vpp)
-				reg |= PCMCIA_VPP_VCC;
-			else
-				return 1;
-			break;
-		case 120:
-			if ((vcc == 33) || (vcc == 50))
-				reg |= PCMCIA_VPP_12;
-			else
-				return 1;
-		default:
+	switch (vpp) {
+	case 0:
+		break;
+	case 33:
+	case 50:
+		if (vcc == vpp)
+			reg |= PCMCIA_VPP_VCC;
+		else
 			return 1;
+		break;
+	case 120:
+		if ((vcc == 33) || (vcc == 50))
+			reg |= PCMCIA_VPP_12;
+		else
+			return 1;
+	default:
+		return 1;
 	}
 
 	reg = reg >> (slot << 2);
 	regread = in_8(&ccpld->fpga_pc_ctl);
-	if (reg != (regread & ((PCMCIA_VCC_MASK | PCMCIA_VPP_MASK) >> (slot << 2)))) {
+	if (reg !=
+	    (regread & ((PCMCIA_VCC_MASK | PCMCIA_VPP_MASK) >> (slot << 2)))) {
 		/* enable new powersettings */
-		regread = regread & ~((PCMCIA_VCC_MASK | PCMCIA_VPP_MASK) >> (slot << 2));
+		regread =
+		    regread & ~((PCMCIA_VCC_MASK | PCMCIA_VPP_MASK) >>
+				(slot << 2));
 		out_8(&ccpld->fpga_pc_ctl, reg | regread);
 		msleep(100);
 	}
@@ -599,52 +551,10 @@
 }
 
 #define socket_get(_slot_) PCMCIA_SOCKET_KEY_LV
-#define hardware_enable(_slot_)  /* No hardware to enable */
-#define hardware_disable(_slot_) /* No hardware to disable */
+#define hardware_enable(_slot_)	/* No hardware to enable */
+#define hardware_disable(_slot_)	/* No hardware to disable */
 
-#endif /* CONFIG_PRxK */
-
-static void m8xx_shutdown(void)
-{
-	u32 m, i;
-	struct pcmcia_win *w;
-
-	for(i = 0; i < PCMCIA_SOCKETS_NO; i++){
-		w = (void *) &((immap_t *)IMAP_ADDR)->im_pcmcia.pcmc_pbr0;
-
-		out_be32(&((immap_t *)IMAP_ADDR)->im_pcmcia.pcmc_pscr, M8XX_PCMCIA_MASK(i));
-		out_be32(&((immap_t *)IMAP_ADDR)->im_pcmcia.pcmc_per, in_be32(&((immap_t *)IMAP_ADDR)->im_pcmcia.pcmc_per) & ~M8XX_PCMCIA_MASK(i));
-
-		/* turn off interrupt and disable CxOE */
-		out_be32(M8XX_PGCRX(i), M8XX_PGCRX_CXOE);
-
-		/* turn off memory windows */
-		for(m = 0; m < PCMCIA_MEM_WIN_NO; m++) {
-			out_be32(&w->or, 0); /* set to not valid */
-			w++;
-		}
-
-		/* turn off voltage */
-		voltage_set(i, 0, 0);
-
-		/* disable external hardware */
-		hardware_disable(i);
-	}
-
-	free_irq(pcmcia_schlvl, NULL);
-}
-
-static struct device_driver m8xx_driver = {
-        .name = "m8xx-pcmcia",
-        .bus = &platform_bus_type,
-        .suspend = pcmcia_socket_dev_suspend,
-        .resume = pcmcia_socket_dev_resume,
-};
-
-static struct platform_device m8xx_device = {
-        .name = "m8xx-pcmcia",
-        .id = 0,
-};
+#endif				/* CONFIG_PRxK */
 
 static u32 pending_events[PCMCIA_SOCKETS_NO];
 static DEFINE_SPINLOCK(pending_event_lock);
@@ -654,24 +564,25 @@
 	struct socket_info *s;
 	struct event_table *e;
 	unsigned int i, events, pscr, pipr, per;
+	pcmconf8xx_t *pcmcia = socket[0].pcmcia;
 
 	dprintk("Interrupt!\n");
 	/* get interrupt sources */
 
-	pscr = in_be32(&((immap_t *)IMAP_ADDR)->im_pcmcia.pcmc_pscr);
-	pipr = in_be32(&((immap_t *)IMAP_ADDR)->im_pcmcia.pcmc_pipr);
-	per = in_be32(&((immap_t *)IMAP_ADDR)->im_pcmcia.pcmc_per);
+	pscr = in_be32(&pcmcia->pcmc_pscr);
+	pipr = in_be32(&pcmcia->pcmc_pipr);
+	per = in_be32(&pcmcia->pcmc_per);
 
-	for(i = 0; i < PCMCIA_SOCKETS_NO; i++) {
+	for (i = 0; i < PCMCIA_SOCKETS_NO; i++) {
 		s = &socket[i];
 		e = &s->events[0];
 		events = 0;
 
-		while(e->regbit) {
-			if(pscr & e->regbit)
+		while (e->regbit) {
+			if (pscr & e->regbit)
 				events |= e->eventbit;
 
-				e++;
+			e++;
 		}
 
 		/*
@@ -679,13 +590,11 @@
 		 * not too nice done,
 		 * we depend on that CD2 is the bit to the left of CD1...
 		 */
-		if(events & SS_DETECT)
-			if(((pipr & M8XX_PCMCIA_CD2(i)) >> 1) ^
-				(pipr & M8XX_PCMCIA_CD1(i)))
-			{
+		if (events & SS_DETECT)
+			if (((pipr & M8XX_PCMCIA_CD2(i)) >> 1) ^
+			    (pipr & M8XX_PCMCIA_CD1(i))) {
 				events &= ~SS_DETECT;
 			}
-
 #ifdef PCMCIA_GLITCHY_CD
 		/*
 		 * I've experienced CD problems with my ADS board.
@@ -693,24 +602,23 @@
 		 * real change of Card detection.
 		 */
 
-		if((events & SS_DETECT) &&
-		   ((pipr &
-		     (M8XX_PCMCIA_CD2(i) | M8XX_PCMCIA_CD1(i))) == 0) &&
-		   (s->state.Vcc | s->state.Vpp)) {
+		if ((events & SS_DETECT) &&
+		    ((pipr &
+		      (M8XX_PCMCIA_CD2(i) | M8XX_PCMCIA_CD1(i))) == 0) &&
+		    (s->state.Vcc | s->state.Vpp)) {
 			events &= ~SS_DETECT;
 			/*printk( "CD glitch workaround - CD = 0x%08x!\n",
-				(pipr & (M8XX_PCMCIA_CD2(i)
-					 | M8XX_PCMCIA_CD1(i))));*/
+			   (pipr & (M8XX_PCMCIA_CD2(i)
+			   | M8XX_PCMCIA_CD1(i)))); */
 		}
 #endif
 
 		/* call the handler */
 
 		dprintk("slot %u: events = 0x%02x, pscr = 0x%08x, "
-			"pipr = 0x%08x\n",
-			i, events, pscr, pipr);
+			"pipr = 0x%08x\n", i, events, pscr, pipr);
 
-		if(events) {
+		if (events) {
 			spin_lock(&pending_event_lock);
 			pending_events[i] |= events;
 			spin_unlock(&pending_event_lock);
@@ -724,7 +632,7 @@
 			per &= ~M8XX_PCMCIA_RDY_L(0);
 			per &= ~M8XX_PCMCIA_RDY_L(1);
 
-			out_be32(&((immap_t *)IMAP_ADDR)->im_pcmcia.pcmc_per, per);
+			out_be32(&pcmcia->pcmc_per, per);
 
 			if (events)
 				pcmcia_parse_events(&socket[i].socket, events);
@@ -732,7 +640,7 @@
 	}
 
 	/* clear the interrupt sources */
-	out_be32(&((immap_t *)IMAP_ADDR)->im_pcmcia.pcmc_pscr, pscr);
+	out_be32(&pcmcia->pcmc_pscr, pscr);
 
 	dprintk("Interrupt done.\n");
 
@@ -743,21 +651,21 @@
 {
 	u32 k;
 
-	for(k = 0; k < M8XX_SIZES_NO; k++)
-		if(m8xx_size_to_gray[k] == size)
+	for (k = 0; k < M8XX_SIZES_NO; k++)
+		if (m8xx_size_to_gray[k] == size)
 			break;
 
-	if((k == M8XX_SIZES_NO) || (m8xx_size_to_gray[k] == -1))
+	if ((k == M8XX_SIZES_NO) || (m8xx_size_to_gray[k] == -1))
 		k = -1;
 
 	return k;
 }
 
-static u32 m8xx_get_speed(u32 ns, u32 is_io)
+static u32 m8xx_get_speed(u32 ns, u32 is_io, u32 bus_freq)
 {
 	u32 reg, clocks, psst, psl, psht;
 
-	if(!ns) {
+	if (!ns) {
 
 		/*
 		 * We get called with IO maps setup to 0ns
@@ -765,10 +673,10 @@
 		 * They should be 255ns.
 		 */
 
-		if(is_io)
+		if (is_io)
 			ns = 255;
 		else
-			ns = 100;  /* fast memory if 0 */
+			ns = 100;	/* fast memory if 0 */
 	}
 
 	/*
@@ -779,23 +687,23 @@
 
 /* how we want to adjust the timing - in percent */
 
-#define ADJ 180 /* 80 % longer accesstime - to be sure */
+#define ADJ 180			/* 80 % longer accesstime - to be sure */
 
-	clocks = ((M8XX_BUSFREQ / 1000) * ns) / 1000;
-	clocks = (clocks * ADJ) / (100*1000);
-	if(clocks >= PCMCIA_BMT_LIMIT) {
-		printk( "Max access time limit reached\n");
-		clocks = PCMCIA_BMT_LIMIT-1;
+	clocks = ((bus_freq / 1000) * ns) / 1000;
+	clocks = (clocks * ADJ) / (100 * 1000);
+	if (clocks >= PCMCIA_BMT_LIMIT) {
+		printk("Max access time limit reached\n");
+		clocks = PCMCIA_BMT_LIMIT - 1;
 	}
 
-	psst = clocks / 7;          /* setup time */
-	psht = clocks / 7;          /* hold time */
-	psl  = (clocks * 5) / 7;    /* strobe length */
+	psst = clocks / 7;	/* setup time */
+	psht = clocks / 7;	/* hold time */
+	psl = (clocks * 5) / 7;	/* strobe length */
 
 	psst += clocks - (psst + psht + psl);
 
-	reg =  psst << 12;
-	reg |= psl  << 7;
+	reg = psst << 12;
+	reg |= psl << 7;
 	reg |= psht << 16;
 
 	return reg;
@@ -806,11 +714,12 @@
 	int lsock = container_of(sock, struct socket_info, socket)->slot;
 	struct socket_info *s = &socket[lsock];
 	unsigned int pipr, reg;
+	pcmconf8xx_t *pcmcia = s->pcmcia;
 
-	pipr = in_be32(&((immap_t *)IMAP_ADDR)->im_pcmcia.pcmc_pipr);
+	pipr = in_be32(&pcmcia->pcmc_pipr);
 
-	*value  = ((pipr & (M8XX_PCMCIA_CD1(lsock)
-			    | M8XX_PCMCIA_CD2(lsock))) == 0) ? SS_DETECT : 0;
+	*value = ((pipr & (M8XX_PCMCIA_CD1(lsock)
+			   | M8XX_PCMCIA_CD2(lsock))) == 0) ? SS_DETECT : 0;
 	*value |= (pipr & M8XX_PCMCIA_WP(lsock)) ? SS_WRPROT : 0;
 
 	if (s->state.flags & SS_IOCARD)
@@ -894,16 +803,16 @@
 	/* read out VS1 and VS2 */
 
 	reg = (pipr & M8XX_PCMCIA_VS_MASK(lsock))
-		>> M8XX_PCMCIA_VS_SHIFT(lsock);
+	    >> M8XX_PCMCIA_VS_SHIFT(lsock);
 
-	if(socket_get(lsock) == PCMCIA_SOCKET_KEY_LV) {
-		switch(reg) {
+	if (socket_get(lsock) == PCMCIA_SOCKET_KEY_LV) {
+		switch (reg) {
 		case 1:
 			*value |= SS_3VCARD;
-			break; /* GND, NC - 3.3V only */
+			break;	/* GND, NC - 3.3V only */
 		case 2:
 			*value |= SS_XVCARD;
-			break; /* NC. GND - x.xV only */
+			break;	/* NC. GND - x.xV only */
 		};
 	}
 
@@ -911,27 +820,29 @@
 	return 0;
 }
 
-static int m8xx_set_socket(struct pcmcia_socket *sock, socket_state_t *state)
+static int m8xx_set_socket(struct pcmcia_socket *sock, socket_state_t * state)
 {
 	int lsock = container_of(sock, struct socket_info, socket)->slot;
 	struct socket_info *s = &socket[lsock];
 	struct event_table *e;
 	unsigned int reg;
 	unsigned long flags;
+	pcmconf8xx_t *pcmcia = socket[0].pcmcia;
 
-	dprintk( "SetSocket(%d, flags %#3.3x, Vcc %d, Vpp %d, "
-	      "io_irq %d, csc_mask %#2.2x)\n", lsock, state->flags,
-	      state->Vcc, state->Vpp, state->io_irq, state->csc_mask);
+	dprintk("SetSocket(%d, flags %#3.3x, Vcc %d, Vpp %d, "
+		"io_irq %d, csc_mask %#2.2x)\n", lsock, state->flags,
+		state->Vcc, state->Vpp, state->io_irq, state->csc_mask);
 
 	/* First, set voltage - bail out if invalid */
-	if(voltage_set(lsock, state->Vcc, state->Vpp))
+	if (voltage_set(lsock, state->Vcc, state->Vpp))
 		return -EINVAL;
 
 	/* Take care of reset... */
-	if(state->flags & SS_RESET)
-		out_be32(M8XX_PGCRX(lsock), in_be32(M8XX_PGCRX(lsock)) |  M8XX_PGCRX_CXRESET); /* active high */
+	if (state->flags & SS_RESET)
+		out_be32(M8XX_PGCRX(lsock), in_be32(M8XX_PGCRX(lsock)) | M8XX_PGCRX_CXRESET);	/* active high */
 	else
-		out_be32(M8XX_PGCRX(lsock), in_be32(M8XX_PGCRX(lsock)) & ~M8XX_PGCRX_CXRESET);
+		out_be32(M8XX_PGCRX(lsock),
+			 in_be32(M8XX_PGCRX(lsock)) & ~M8XX_PGCRX_CXRESET);
 
 	/* ... and output enable. */
 
@@ -943,10 +854,11 @@
 	   no pullups are present -> the cards act wierd.
 	   So right now the buffers are enabled if the power is on. */
 
-	if(state->Vcc || state->Vpp)
-		out_be32(M8XX_PGCRX(lsock), in_be32(M8XX_PGCRX(lsock)) & ~M8XX_PGCRX_CXOE); /* active low */
+	if (state->Vcc || state->Vpp)
+		out_be32(M8XX_PGCRX(lsock), in_be32(M8XX_PGCRX(lsock)) & ~M8XX_PGCRX_CXOE);	/* active low */
 	else
-		out_be32(M8XX_PGCRX(lsock), in_be32(M8XX_PGCRX(lsock)) | M8XX_PGCRX_CXOE);
+		out_be32(M8XX_PGCRX(lsock),
+			 in_be32(M8XX_PGCRX(lsock)) | M8XX_PGCRX_CXOE);
 
 	/*
 	 * We'd better turn off interrupts before
@@ -963,17 +875,17 @@
 	e = &s->events[0];
 	reg = 0;
 
-	if(state->csc_mask & SS_DETECT) {
+	if (state->csc_mask & SS_DETECT) {
 		e->eventbit = SS_DETECT;
 		reg |= e->regbit = (M8XX_PCMCIA_CD2(lsock)
 				    | M8XX_PCMCIA_CD1(lsock));
 		e++;
 	}
-	if(state->flags & SS_IOCARD) {
+	if (state->flags & SS_IOCARD) {
 		/*
 		 * I/O card
 		 */
-		if(state->csc_mask & SS_STSCHG) {
+		if (state->csc_mask & SS_STSCHG) {
 			e->eventbit = SS_STSCHG;
 			reg |= e->regbit = M8XX_PCMCIA_BVD1(lsock);
 			e++;
@@ -981,8 +893,10 @@
 		/*
 		 * If io_irq is non-zero we should enable irq.
 		 */
-		if(state->io_irq) {
-			out_be32(M8XX_PGCRX(lsock), in_be32(M8XX_PGCRX(lsock)) | mk_int_int_mask(state->io_irq) << 24);
+		if (state->io_irq) {
+			out_be32(M8XX_PGCRX(lsock),
+				 in_be32(M8XX_PGCRX(lsock)) |
+				 mk_int_int_mask(s->hwirq) << 24);
 			/*
 			 * Strange thing here:
 			 * The manual does not tell us which interrupt
@@ -993,33 +907,32 @@
 			 * have to be cleared in PSCR in the interrupt handler.
 			 */
 			reg |= M8XX_PCMCIA_RDY_L(lsock);
-		}
-		else
-			out_be32(M8XX_PGCRX(lsock), in_be32(M8XX_PGCRX(lsock)) & 0x00ffffff);
-	}
-	else {
+		} else
+			out_be32(M8XX_PGCRX(lsock),
+				 in_be32(M8XX_PGCRX(lsock)) & 0x00ffffff);
+	} else {
 		/*
 		 * Memory card
 		 */
-		if(state->csc_mask & SS_BATDEAD) {
+		if (state->csc_mask & SS_BATDEAD) {
 			e->eventbit = SS_BATDEAD;
 			reg |= e->regbit = M8XX_PCMCIA_BVD1(lsock);
 			e++;
 		}
-		if(state->csc_mask & SS_BATWARN) {
+		if (state->csc_mask & SS_BATWARN) {
 			e->eventbit = SS_BATWARN;
 			reg |= e->regbit = M8XX_PCMCIA_BVD2(lsock);
 			e++;
 		}
 		/* What should I trigger on - low/high,raise,fall? */
-		if(state->csc_mask & SS_READY) {
+		if (state->csc_mask & SS_READY) {
 			e->eventbit = SS_READY;
-			reg |= e->regbit = 0; //??
+			reg |= e->regbit = 0;	//??
 			e++;
 		}
 	}
 
-	e->regbit = 0;  /* terminate list */
+	e->regbit = 0;		/* terminate list */
 
 	/*
 	 * Clear the status changed .
@@ -1027,7 +940,7 @@
 	 * Writing ones will clear the bits.
 	 */
 
-	out_be32(&((immap_t *)IMAP_ADDR)->im_pcmcia.pcmc_pscr, reg);
+	out_be32(&pcmcia->pcmc_pscr, reg);
 
 	/*
 	 * Write the mask.
@@ -1036,15 +949,10 @@
 	 * Ones will enable the interrupt.
 	 */
 
-	/*
-	  reg |= ((immap_t *)IMAP_ADDR)->im_pcmcia.pcmc_per
-	  & M8XX_PCMCIA_MASK(lsock);
-	*/
-
-	reg |= in_be32(&((immap_t *)IMAP_ADDR)->im_pcmcia.pcmc_per) &
-		(M8XX_PCMCIA_MASK(0) | M8XX_PCMCIA_MASK(1));
-
-	out_be32(&((immap_t *)IMAP_ADDR)->im_pcmcia.pcmc_per, reg);
+	reg |=
+	    in_be32(&pcmcia->
+		    pcmc_per) & (M8XX_PCMCIA_MASK(0) | M8XX_PCMCIA_MASK(1));
+	out_be32(&pcmcia->pcmc_per, reg);
 
 	spin_unlock_irqrestore(&events_lock, flags);
 
@@ -1062,66 +970,68 @@
 	struct socket_info *s = &socket[lsock];
 	struct pcmcia_win *w;
 	unsigned int reg, winnr;
+	pcmconf8xx_t *pcmcia = s->pcmcia;
 
 #define M8XX_SIZE (io->stop - io->start + 1)
 #define M8XX_BASE (PCMCIA_IO_WIN_BASE + io->start)
 
-	dprintk( "SetIOMap(%d, %d, %#2.2x, %d ns, "
-	      "%#4.4x-%#4.4x)\n", lsock, io->map, io->flags,
-	      io->speed, io->start, io->stop);
+	dprintk("SetIOMap(%d, %d, %#2.2x, %d ns, "
+		"%#4.4x-%#4.4x)\n", lsock, io->map, io->flags,
+		io->speed, io->start, io->stop);
 
 	if ((io->map >= PCMCIA_IO_WIN_NO) || (io->start > 0xffff)
 	    || (io->stop > 0xffff) || (io->stop < io->start))
 		return -EINVAL;
 
-	if((reg = m8xx_get_graycode(M8XX_SIZE)) == -1)
+	if ((reg = m8xx_get_graycode(M8XX_SIZE)) == -1)
 		return -EINVAL;
 
-	if(io->flags & MAP_ACTIVE) {
+	if (io->flags & MAP_ACTIVE) {
 
-		dprintk( "io->flags & MAP_ACTIVE\n");
+		dprintk("io->flags & MAP_ACTIVE\n");
 
 		winnr = (PCMCIA_MEM_WIN_NO * PCMCIA_SOCKETS_NO)
-			+ (lsock * PCMCIA_IO_WIN_NO) + io->map;
+		    + (lsock * PCMCIA_IO_WIN_NO) + io->map;
 
 		/* setup registers */
 
-		w = (void *) &((immap_t *)IMAP_ADDR)->im_pcmcia.pcmc_pbr0;
+		w = (void *)&pcmcia->pcmc_pbr0;
 		w += winnr;
 
-		out_be32(&w->or, 0); /* turn off window first */
+		out_be32(&w->or, 0);	/* turn off window first */
 		out_be32(&w->br, M8XX_BASE);
 
 		reg <<= 27;
-  		reg |= M8XX_PCMCIA_POR_IO |(lsock << 2);
+		reg |= M8XX_PCMCIA_POR_IO | (lsock << 2);
 
-		reg |= m8xx_get_speed(io->speed, 1);
+		reg |= m8xx_get_speed(io->speed, 1, s->bus_freq);
 
-		if(io->flags & MAP_WRPROT)
+		if (io->flags & MAP_WRPROT)
 			reg |= M8XX_PCMCIA_POR_WRPROT;
 
-		if(io->flags & (MAP_16BIT | MAP_AUTOSZ))
+		/*if(io->flags & (MAP_16BIT | MAP_AUTOSZ)) */
+		if (io->flags & MAP_16BIT)
 			reg |= M8XX_PCMCIA_POR_16BIT;
 
-		if(io->flags & MAP_ACTIVE)
+		if (io->flags & MAP_ACTIVE)
 			reg |= M8XX_PCMCIA_POR_VALID;
 
 		out_be32(&w->or, reg);
 
 		dprintk("Socket %u: Mapped io window %u at %#8.8x, "
-		      "OR = %#8.8x.\n", lsock, io->map, w->br, w->or);
+			"OR = %#8.8x.\n", lsock, io->map, w->br, w->or);
 	} else {
 		/* shutdown IO window */
 		winnr = (PCMCIA_MEM_WIN_NO * PCMCIA_SOCKETS_NO)
-			+ (lsock * PCMCIA_IO_WIN_NO) + io->map;
+		    + (lsock * PCMCIA_IO_WIN_NO) + io->map;
 
 		/* setup registers */
 
-		w = (void *) &((immap_t *)IMAP_ADDR)->im_pcmcia.pcmc_pbr0;
+		w = (void *)&pcmcia->pcmc_pbr0;
 		w += winnr;
 
-		out_be32(&w->or, 0); /* turn off window */
-		out_be32(&w->br, 0); /* turn off base address */
+		out_be32(&w->or, 0);	/* turn off window */
+		out_be32(&w->br, 0);	/* turn off base address */
 
 		dprintk("Socket %u: Unmapped io window %u at %#8.8x, "
 			"OR = %#8.8x.\n", lsock, io->map, w->br, w->or);
@@ -1129,35 +1039,35 @@
 
 	/* copy the struct and modify the copy */
 	s->io_win[io->map] = *io;
-	s->io_win[io->map].flags &= (MAP_WRPROT
-				     | MAP_16BIT
-				     | MAP_ACTIVE);
+	s->io_win[io->map].flags &= (MAP_WRPROT | MAP_16BIT | MAP_ACTIVE);
 	dprintk("SetIOMap exit\n");
 
 	return 0;
 }
 
-static int m8xx_set_mem_map(struct pcmcia_socket *sock, struct pccard_mem_map *mem)
+static int m8xx_set_mem_map(struct pcmcia_socket *sock,
+			    struct pccard_mem_map *mem)
 {
 	int lsock = container_of(sock, struct socket_info, socket)->slot;
 	struct socket_info *s = &socket[lsock];
 	struct pcmcia_win *w;
 	struct pccard_mem_map *old;
 	unsigned int reg, winnr;
+	pcmconf8xx_t *pcmcia = s->pcmcia;
 
-	dprintk( "SetMemMap(%d, %d, %#2.2x, %d ns, "
-	      "%#5.5lx, %#5.5x)\n", lsock, mem->map, mem->flags,
-	      mem->speed, mem->static_start, mem->card_start);
+	dprintk("SetMemMap(%d, %d, %#2.2x, %d ns, "
+		"%#5.5lx, %#5.5x)\n", lsock, mem->map, mem->flags,
+		mem->speed, mem->static_start, mem->card_start);
 
 	if ((mem->map >= PCMCIA_MEM_WIN_NO)
-//	    || ((mem->s) >= PCMCIA_MEM_WIN_SIZE)
+//          || ((mem->s) >= PCMCIA_MEM_WIN_SIZE)
 	    || (mem->card_start >= 0x04000000)
-	    || (mem->static_start & 0xfff)                /* 4KByte resolution */
-	    || (mem->card_start & 0xfff))
+	    || (mem->static_start & 0xfff)	/* 4KByte resolution */
+	    ||(mem->card_start & 0xfff))
 		return -EINVAL;
 
-	if((reg = m8xx_get_graycode(PCMCIA_MEM_WIN_SIZE)) == -1) {
-		printk( "Cannot set size to 0x%08x.\n", PCMCIA_MEM_WIN_SIZE);
+	if ((reg = m8xx_get_graycode(PCMCIA_MEM_WIN_SIZE)) == -1) {
+		printk("Cannot set size to 0x%08x.\n", PCMCIA_MEM_WIN_SIZE);
 		return -EINVAL;
 	}
 	reg <<= 27;
@@ -1166,50 +1076,47 @@
 
 	/* Setup the window in the pcmcia controller */
 
-	w = (void *) &((immap_t *)IMAP_ADDR)->im_pcmcia.pcmc_pbr0;
+	w = (void *)&pcmcia->pcmc_pbr0;
 	w += winnr;
 
 	reg |= lsock << 2;
 
-	reg |= m8xx_get_speed(mem->speed, 0);
+	reg |= m8xx_get_speed(mem->speed, 0, s->bus_freq);
 
-	if(mem->flags & MAP_ATTRIB)
-		reg |=  M8XX_PCMCIA_POR_ATTRMEM;
+	if (mem->flags & MAP_ATTRIB)
+		reg |= M8XX_PCMCIA_POR_ATTRMEM;
 
-	if(mem->flags & MAP_WRPROT)
+	if (mem->flags & MAP_WRPROT)
 		reg |= M8XX_PCMCIA_POR_WRPROT;
 
-	if(mem->flags & MAP_16BIT)
+	if (mem->flags & MAP_16BIT)
 		reg |= M8XX_PCMCIA_POR_16BIT;
 
-	if(mem->flags & MAP_ACTIVE)
+	if (mem->flags & MAP_ACTIVE)
 		reg |= M8XX_PCMCIA_POR_VALID;
 
 	out_be32(&w->or, reg);
 
 	dprintk("Socket %u: Mapped memory window %u at %#8.8x, "
-	      "OR = %#8.8x.\n", lsock, mem->map, w->br, w->or);
+		"OR = %#8.8x.\n", lsock, mem->map, w->br, w->or);
 
-	if(mem->flags & MAP_ACTIVE) {
+	if (mem->flags & MAP_ACTIVE) {
 		/* get the new base address */
 		mem->static_start = PCMCIA_MEM_WIN_BASE +
-			(PCMCIA_MEM_WIN_SIZE * winnr)
-			+ mem->card_start;
+		    (PCMCIA_MEM_WIN_SIZE * winnr)
+		    + mem->card_start;
 	}
 
 	dprintk("SetMemMap(%d, %d, %#2.2x, %d ns, "
-	      "%#5.5lx, %#5.5x)\n", lsock, mem->map, mem->flags,
-	      mem->speed, mem->static_start, mem->card_start);
+		"%#5.5lx, %#5.5x)\n", lsock, mem->map, mem->flags,
+		mem->speed, mem->static_start, mem->card_start);
 
 	/* copy the struct and modify the copy */
 
 	old = &s->mem_win[mem->map];
 
 	*old = *mem;
-	old->flags &= (MAP_ATTRIB
-		       | MAP_WRPROT
-		       | MAP_16BIT
-		       | MAP_ACTIVE);
+	old->flags &= (MAP_ATTRIB | MAP_WRPROT | MAP_16BIT | MAP_ACTIVE);
 
 	return 0;
 }
@@ -1220,7 +1127,7 @@
 	pccard_io_map io = { 0, 0, 0, 0, 1 };
 	pccard_mem_map mem = { 0, 0, 0, 0, 0, 0 };
 
-	dprintk( "sock_init(%d)\n", s);
+	dprintk("sock_init(%d)\n", s);
 
 	m8xx_set_socket(sock, &dead_socket);
 	for (i = 0; i < PCMCIA_IO_WIN_NO; i++) {
@@ -1236,111 +1143,195 @@
 
 }
 
-static int m8xx_suspend(struct pcmcia_socket *sock)
+static int m8xx_sock_suspend(struct pcmcia_socket *sock)
 {
 	return m8xx_set_socket(sock, &dead_socket);
 }
 
 static struct pccard_operations m8xx_services = {
-	.init	= m8xx_sock_init,
-	.suspend = m8xx_suspend,
+	.init = m8xx_sock_init,
+	.suspend = m8xx_sock_suspend,
 	.get_status = m8xx_get_status,
 	.set_socket = m8xx_set_socket,
 	.set_io_map = m8xx_set_io_map,
 	.set_mem_map = m8xx_set_mem_map,
 };
 
-static int __init m8xx_init(void)
+static int __init m8xx_probe(struct of_device *ofdev,
+			     const struct of_device_id *match)
 {
 	struct pcmcia_win *w;
-	unsigned int i,m;
+	unsigned int i, m, hwirq;
+	pcmconf8xx_t *pcmcia;
+	int status;
+	struct device_node *np = ofdev->node;
 
 	pcmcia_info("%s\n", version);
 
-	if (driver_register(&m8xx_driver))
-		return -1;
+	pcmcia = of_iomap(np, 0);
+	if (pcmcia == NULL)
+		return -EINVAL;
+
+	pcmcia_schlvl = irq_of_parse_and_map(np, 0);
+	hwirq = irq_map[pcmcia_schlvl].hwirq;
+	if (pcmcia_schlvl < 0)
+		return -EINVAL;
+
+	m8xx_pgcrx[0] = &pcmcia->pcmc_pgcra;
+	m8xx_pgcrx[1] = &pcmcia->pcmc_pgcrb;
 
 	pcmcia_info(PCMCIA_BOARD_MSG " using " PCMCIA_SLOT_MSG
-		    " with IRQ %u.\n", pcmcia_schlvl);
+		    " with IRQ %u  (%d). \n", pcmcia_schlvl, hwirq);
 
 	/* Configure Status change interrupt */
 
-	if(request_irq(pcmcia_schlvl, m8xx_interrupt, 0,
-			  "m8xx_pcmcia", NULL)) {
+	if (request_irq(pcmcia_schlvl, m8xx_interrupt, IRQF_SHARED,
+			driver_name, socket)) {
 		pcmcia_error("Cannot allocate IRQ %u for SCHLVL!\n",
 			     pcmcia_schlvl);
 		return -1;
 	}
 
-	w = (void *) &((immap_t *)IMAP_ADDR)->im_pcmcia.pcmc_pbr0;
+	w = (void *)&pcmcia->pcmc_pbr0;
 
-	out_be32(&((immap_t *)IMAP_ADDR)->im_pcmcia.pcmc_pscr,
-		M8XX_PCMCIA_MASK(0)| M8XX_PCMCIA_MASK(1));
+	out_be32(&pcmcia->pcmc_pscr, M8XX_PCMCIA_MASK(0) | M8XX_PCMCIA_MASK(1));
+	clrbits32(&pcmcia->pcmc_per, M8XX_PCMCIA_MASK(0) | M8XX_PCMCIA_MASK(1));
 
-	out_be32(&((immap_t *)IMAP_ADDR)->im_pcmcia.pcmc_per,
-		in_be32(&((immap_t *)IMAP_ADDR)->im_pcmcia.pcmc_per) &
-		~(M8XX_PCMCIA_MASK(0)| M8XX_PCMCIA_MASK(1)));
+	/* connect interrupt and disable CxOE */
 
-/* connect interrupt and disable CxOE */
+	out_be32(M8XX_PGCRX(0),
+		 M8XX_PGCRX_CXOE | (mk_int_int_mask(hwirq) << 16));
+	out_be32(M8XX_PGCRX(1),
+		 M8XX_PGCRX_CXOE | (mk_int_int_mask(hwirq) << 16));
 
-	out_be32(M8XX_PGCRX(0), M8XX_PGCRX_CXOE | (mk_int_int_mask(pcmcia_schlvl) << 16));
-	out_be32(M8XX_PGCRX(1), M8XX_PGCRX_CXOE | (mk_int_int_mask(pcmcia_schlvl) << 16));
+	/* intialize the fixed memory windows */
 
-/* intialize the fixed memory windows */
-
-	for(i = 0; i < PCMCIA_SOCKETS_NO; i++){
-		for(m = 0; m < PCMCIA_MEM_WIN_NO; m++) {
+	for (i = 0; i < PCMCIA_SOCKETS_NO; i++) {
+		for (m = 0; m < PCMCIA_MEM_WIN_NO; m++) {
 			out_be32(&w->br, PCMCIA_MEM_WIN_BASE +
-				(PCMCIA_MEM_WIN_SIZE
-				 * (m + i * PCMCIA_MEM_WIN_NO)));
+				 (PCMCIA_MEM_WIN_SIZE
+				  * (m + i * PCMCIA_MEM_WIN_NO)));
 
-			out_be32(&w->or, 0);  /* set to not valid */
+			out_be32(&w->or, 0);	/* set to not valid */
 
 			w++;
 		}
 	}
 
-/* turn off voltage */
+	/* turn off voltage */
 	voltage_set(0, 0, 0);
 	voltage_set(1, 0, 0);
 
-/* Enable external hardware */
+	/* Enable external hardware */
 	hardware_enable(0);
 	hardware_enable(1);
 
-	platform_device_register(&m8xx_device);
-
-	for (i = 0 ; i < PCMCIA_SOCKETS_NO; i++) {
+	for (i = 0; i < PCMCIA_SOCKETS_NO; i++) {
 		socket[i].slot = i;
 		socket[i].socket.owner = THIS_MODULE;
-		socket[i].socket.features = SS_CAP_PCCARD | SS_CAP_MEM_ALIGN | SS_CAP_STATIC_MAP;
+		socket[i].socket.features =
+		    SS_CAP_PCCARD | SS_CAP_MEM_ALIGN | SS_CAP_STATIC_MAP;
 		socket[i].socket.irq_mask = 0x000;
 		socket[i].socket.map_size = 0x1000;
 		socket[i].socket.io_offset = 0;
-		socket[i].socket.pci_irq = i  ? 7 : 9;
+		socket[i].socket.pci_irq = pcmcia_schlvl;
 		socket[i].socket.ops = &m8xx_services;
-		socket[i].socket.resource_ops = &pccard_iodyn_ops;
+		socket[i].socket.resource_ops = &pccard_nonstatic_ops;
 		socket[i].socket.cb_dev = NULL;
-		socket[i].socket.dev.parent = &m8xx_device.dev;
+		socket[i].socket.dev.parent = &ofdev->dev;
+		socket[i].pcmcia = pcmcia;
+		socket[i].bus_freq = ppc_proc_freq;
+		socket[i].hwirq = hwirq;
+
 	}
 
-	for (i = 0; i < PCMCIA_SOCKETS_NO; i++)
-		pcmcia_register_socket(&socket[i].socket);
+	for (i = 0; i < PCMCIA_SOCKETS_NO; i++) {
+		status = pcmcia_register_socket(&socket[i].socket);
+		if (status < 0)
+			pcmcia_error("Socket register failed\n");
+	}
 
 	return 0;
 }
 
-static void __exit m8xx_exit(void)
+static int m8xx_remove(struct of_device *ofdev)
 {
-	int i;
+	u32 m, i;
+	struct pcmcia_win *w;
+	pcmconf8xx_t *pcmcia = socket[0].pcmcia;
 
+	for (i = 0; i < PCMCIA_SOCKETS_NO; i++) {
+		w = (void *)&pcmcia->pcmc_pbr0;
+
+		out_be32(&pcmcia->pcmc_pscr, M8XX_PCMCIA_MASK(i));
+		out_be32(&pcmcia->pcmc_per,
+			 in_be32(&pcmcia->pcmc_per) & ~M8XX_PCMCIA_MASK(i));
+
+		/* turn off interrupt and disable CxOE */
+		out_be32(M8XX_PGCRX(i), M8XX_PGCRX_CXOE);
+
+		/* turn off memory windows */
+		for (m = 0; m < PCMCIA_MEM_WIN_NO; m++) {
+			out_be32(&w->or, 0);	/* set to not valid */
+			w++;
+		}
+
+		/* turn off voltage */
+		voltage_set(i, 0, 0);
+
+		/* disable external hardware */
+		hardware_disable(i);
+	}
 	for (i = 0; i < PCMCIA_SOCKETS_NO; i++)
 		pcmcia_unregister_socket(&socket[i].socket);
 
-	m8xx_shutdown();
+	free_irq(pcmcia_schlvl, NULL);
 
-	platform_device_unregister(&m8xx_device);
-	driver_unregister(&m8xx_driver);
+	return 0;
+}
+
+#ifdef CONFIG_PM
+static int m8xx_suspend(struct platform_device *pdev, pm_message_t state)
+{
+	return pcmcia_socket_dev_suspend(&pdev->dev, state);
+}
+
+static int m8xx_resume(struct platform_device *pdev)
+{
+	return pcmcia_socket_dev_resume(&pdev->dev);
+}
+#else
+#define m8xx_suspend NULL
+#define m8xx_resume NULL
+#endif
+
+static struct of_device_id m8xx_pcmcia_match[] = {
+	{
+	 .type = "pcmcia",
+	 .compatible = "fsl,pq-pcmcia",
+	 },
+	{},
+};
+
+MODULE_DEVICE_TABLE(of, m8xx_pcmcia_match);
+
+static struct of_platform_driver m8xx_pcmcia_driver = {
+	.name = (char *)driver_name,
+	.match_table = m8xx_pcmcia_match,
+	.probe = m8xx_probe,
+	.remove = m8xx_remove,
+	.suspend = m8xx_suspend,
+	.resume = m8xx_resume,
+};
+
+static int __init m8xx_init(void)
+{
+	return of_register_platform_driver(&m8xx_pcmcia_driver);
+}
+
+static void __exit m8xx_exit(void)
+{
+	of_unregister_platform_driver(&m8xx_pcmcia_driver);
 }
 
 module_init(m8xx_init);
diff --git a/drivers/pnp/core.c b/drivers/pnp/core.c
index 3e20b1cc..8e7b2dd 100644
--- a/drivers/pnp/core.c
+++ b/drivers/pnp/core.c
@@ -35,12 +35,11 @@
 {
 	void *result;
 
-	result = kmalloc(size, GFP_KERNEL);
+	result = kzalloc(size, GFP_KERNEL);
 	if (!result){
 		printk(KERN_ERR "pnp: Out of Memory\n");
 		return NULL;
 	}
-	memset(result, 0, size);
 	return result;
 }
 
diff --git a/drivers/pnp/pnpbios/core.c b/drivers/pnp/pnpbios/core.c
index 3a201b7..ed112ee 100644
--- a/drivers/pnp/pnpbios/core.c
+++ b/drivers/pnp/pnpbios/core.c
@@ -147,7 +147,7 @@
 		info->location_id, info->serial, info->capabilities);
 	envp[i] = NULL;
 	
-	value = call_usermodehelper (argv [0], argv, envp, 0);
+	value = call_usermodehelper (argv [0], argv, envp, UMH_WAIT_EXEC);
 	kfree (buf);
 	kfree (envp);
 	return 0;
@@ -160,6 +160,7 @@
 {
 	static struct pnp_docking_station_info now;
 	int docked = -1, d = 0;
+	set_freezable();
 	while (!unloading)
 	{
 		int status;
diff --git a/drivers/ps3/Makefile b/drivers/ps3/Makefile
index e251d1c..746031d 100644
--- a/drivers/ps3/Makefile
+++ b/drivers/ps3/Makefile
@@ -1,3 +1,6 @@
 obj-$(CONFIG_PS3_VUART) += vuart.o
-obj-$(CONFIG_PS3_PS3AV) += ps3av.o ps3av_cmd.o
+obj-$(CONFIG_PS3_PS3AV) += ps3av_mod.o
+ps3av_mod-objs		+= ps3av.o ps3av_cmd.o
+obj-$(CONFIG_PPC_PS3) += sys-manager-core.o
 obj-$(CONFIG_PS3_SYS_MANAGER) += sys-manager.o
+obj-$(CONFIG_PS3_STORAGE) += ps3stor_lib.o
diff --git a/drivers/ps3/ps3av.c b/drivers/ps3/ps3av.c
index 1393e64..85e2161 100644
--- a/drivers/ps3/ps3av.c
+++ b/drivers/ps3/ps3av.c
@@ -1,32 +1,30 @@
 /*
- * Copyright (C) 2006 Sony Computer Entertainment Inc.
- * Copyright 2006, 2007 Sony Corporation
+ *  PS3 AV backend support.
  *
- * AV backend support for PS3
+ *  Copyright (C) 2007 Sony Computer Entertainment Inc.
+ *  Copyright 2007 Sony Corp.
  *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published
- * by the Free Software Foundation; version 2 of the License.
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; version 2 of the License.
  *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
  *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
  */
 
+#include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/delay.h>
 #include <linux/notifier.h>
-#include <linux/reboot.h>
-#include <linux/kernel.h>
 #include <linux/ioctl.h>
 
 #include <asm/firmware.h>
-#include <asm/lv1call.h>
 #include <asm/ps3av.h>
 #include <asm/ps3.h>
 
@@ -39,13 +37,12 @@
 module_param(timeout, int, 0644);
 
 static struct ps3av {
-	int available;
 	struct mutex mutex;
 	struct work_struct work;
 	struct completion done;
 	struct workqueue_struct *wq;
 	int open_count;
-	struct ps3_vuart_port_device *dev;
+	struct ps3_system_bus_device *dev;
 
 	int region;
 	struct ps3av_pkt_av_get_hw_conf av_hw_conf;
@@ -55,11 +52,13 @@
 	u32 audio_port;
 	int ps3av_mode;
 	int ps3av_mode_old;
-} ps3av;
-
-static struct ps3_vuart_port_device ps3av_dev = {
-	.match_id = PS3_MATCH_ID_AV_SETTINGS
-};
+	union {
+		struct ps3av_reply_hdr reply_hdr;
+		u8 raw[PS3AV_BUF_SIZE];
+	} recv_buf;
+	void (*flip_ctl)(int on, void *data);
+	void *flip_data;
+} *ps3av;
 
 /* color space */
 #define YUV444 PS3AV_CMD_VIDEO_CS_YUV444_8
@@ -169,7 +168,7 @@
 	if (hdr->cid & PS3AV_EVENT_CMD_MASK) {
 		table = ps3av_search_cmd_table(hdr->cid, PS3AV_EVENT_CMD_MASK);
 		if (table)
-			dev_dbg(&ps3av_dev.core,
+			dev_dbg(&ps3av->dev->core,
 				"recv event packet cid:%08x port:0x%x size:%d\n",
 				hdr->cid, ps3av_event_get_port_id(hdr->cid),
 				hdr->size);
@@ -182,6 +181,41 @@
 	return 0;
 }
 
+
+#define POLLING_INTERVAL  25	/* in msec */
+
+static int ps3av_vuart_write(struct ps3_system_bus_device *dev,
+			     const void *buf, unsigned long size)
+{
+	int error;
+	dev_dbg(&dev->core, " -> %s:%d\n", __func__, __LINE__);
+	error = ps3_vuart_write(dev, buf, size);
+	dev_dbg(&dev->core, " <- %s:%d\n", __func__, __LINE__);
+	return error ? error : size;
+}
+
+static int ps3av_vuart_read(struct ps3_system_bus_device *dev, void *buf,
+			    unsigned long size, int timeout)
+{
+	int error;
+	int loopcnt = 0;
+
+	dev_dbg(&dev->core, " -> %s:%d\n", __func__, __LINE__);
+	timeout = (timeout + POLLING_INTERVAL - 1) / POLLING_INTERVAL;
+	while (loopcnt++ <= timeout) {
+		error = ps3_vuart_read(dev, buf, size);
+		if (!error)
+			return size;
+		if (error != -EAGAIN) {
+			printk(KERN_ERR "%s: ps3_vuart_read failed %d\n",
+			       __func__, error);
+			return error;
+		}
+		msleep(POLLING_INTERVAL);
+	}
+	return -EWOULDBLOCK;
+}
+
 static int ps3av_send_cmd_pkt(const struct ps3av_send_hdr *send_buf,
 			      struct ps3av_reply_hdr *recv_buf, int write_len,
 			      int read_len)
@@ -190,13 +224,13 @@
 	u32 cmd;
 	int event;
 
-	if (!ps3av.available)
+	if (!ps3av)
 		return -ENODEV;
 
 	/* send pkt */
-	res = ps3av_vuart_write(ps3av.dev, send_buf, write_len);
+	res = ps3av_vuart_write(ps3av->dev, send_buf, write_len);
 	if (res < 0) {
-		dev_dbg(&ps3av_dev.core,
+		dev_dbg(&ps3av->dev->core,
 			"%s: ps3av_vuart_write() failed (result=%d)\n",
 			__func__, res);
 		return res;
@@ -206,20 +240,20 @@
 	cmd = send_buf->cid;
 	do {
 		/* read header */
-		res = ps3av_vuart_read(ps3av.dev, recv_buf, PS3AV_HDR_SIZE,
+		res = ps3av_vuart_read(ps3av->dev, recv_buf, PS3AV_HDR_SIZE,
 				       timeout);
 		if (res != PS3AV_HDR_SIZE) {
-			dev_dbg(&ps3av_dev.core,
+			dev_dbg(&ps3av->dev->core,
 				"%s: ps3av_vuart_read() failed (result=%d)\n",
 				__func__, res);
 			return res;
 		}
 
 		/* read body */
-		res = ps3av_vuart_read(ps3av.dev, &recv_buf->cid,
+		res = ps3av_vuart_read(ps3av->dev, &recv_buf->cid,
 				       recv_buf->size, timeout);
 		if (res < 0) {
-			dev_dbg(&ps3av_dev.core,
+			dev_dbg(&ps3av->dev->core,
 				"%s: ps3av_vuart_read() failed (result=%d)\n",
 				__func__, res);
 			return res;
@@ -230,7 +264,7 @@
 	} while (event);
 
 	if ((cmd | PS3AV_REPLY_BIT) != recv_buf->cid) {
-		dev_dbg(&ps3av_dev.core, "%s: reply err (result=%x)\n",
+		dev_dbg(&ps3av->dev->core, "%s: reply err (result=%x)\n",
 			__func__, recv_buf->cid);
 		return -EINVAL;
 	}
@@ -245,7 +279,7 @@
 	int return_len;
 
 	if (recv_buf->version != PS3AV_VERSION) {
-		dev_dbg(&ps3av_dev.core, "reply_packet invalid version:%x\n",
+		dev_dbg(&ps3av->dev->core, "reply_packet invalid version:%x\n",
 			recv_buf->version);
 		return -EFAULT;
 	}
@@ -267,16 +301,11 @@
 		 struct ps3av_send_hdr *buf)
 {
 	int res = 0;
-	static union {
-		struct ps3av_reply_hdr reply_hdr;
-		u8 raw[PS3AV_BUF_SIZE];
-	} recv_buf;
-
 	u32 *table;
 
-	BUG_ON(!ps3av.available);
+	BUG_ON(!ps3av);
 
-	mutex_lock(&ps3av.mutex);
+	mutex_lock(&ps3av->mutex);
 
 	table = ps3av_search_cmd_table(cid, PS3AV_CID_MASK);
 	BUG_ON(!table);
@@ -288,7 +317,7 @@
 	ps3av_set_hdr(cid, send_len, buf);
 
 	/* send packet via vuart */
-	res = ps3av_send_cmd_pkt(buf, &recv_buf.reply_hdr, send_len,
+	res = ps3av_send_cmd_pkt(buf, &ps3av->recv_buf.reply_hdr, send_len,
 				 usr_buf_size);
 	if (res < 0) {
 		printk(KERN_ERR
@@ -298,7 +327,7 @@
 	}
 
 	/* process reply packet */
-	res = ps3av_process_reply_packet(buf, &recv_buf.reply_hdr,
+	res = ps3av_process_reply_packet(buf, &ps3av->recv_buf.reply_hdr,
 					 usr_buf_size);
 	if (res < 0) {
 		printk(KERN_ERR "%s: put_return_status() failed (result=%d)\n",
@@ -306,11 +335,11 @@
 		goto err;
 	}
 
-	mutex_unlock(&ps3av.mutex);
+	mutex_unlock(&ps3av->mutex);
 	return 0;
 
       err:
-	mutex_unlock(&ps3av.mutex);
+	mutex_unlock(&ps3av->mutex);
 	printk(KERN_ERR "%s: failed cid:%x res:%d\n", __func__, cid, res);
 	return res;
 }
@@ -319,11 +348,11 @@
 {
 	int i, num_of_av_port, res;
 
-	num_of_av_port = ps3av.av_hw_conf.num_of_hdmi +
-			 ps3av.av_hw_conf.num_of_avmulti;
+	num_of_av_port = ps3av->av_hw_conf.num_of_hdmi +
+			 ps3av->av_hw_conf.num_of_avmulti;
 	/* video mute on */
 	for (i = 0; i < num_of_av_port; i++) {
-		res = ps3av_cmd_av_video_mute(1, &ps3av.av_port[i], mute);
+		res = ps3av_cmd_av_video_mute(1, &ps3av->av_port[i], mute);
 		if (res < 0)
 			return -1;
 	}
@@ -335,13 +364,13 @@
 {
 	int i, num_of_hdmi_port, num_of_av_port, res;
 
-	num_of_hdmi_port = ps3av.av_hw_conf.num_of_hdmi;
-	num_of_av_port = ps3av.av_hw_conf.num_of_hdmi +
-			 ps3av.av_hw_conf.num_of_avmulti;
+	num_of_hdmi_port = ps3av->av_hw_conf.num_of_hdmi;
+	num_of_av_port = ps3av->av_hw_conf.num_of_hdmi +
+			 ps3av->av_hw_conf.num_of_avmulti;
 
 	/* tv mute */
 	for (i = 0; i < num_of_hdmi_port; i++) {
-		res = ps3av_cmd_av_tv_mute(ps3av.av_port[i],
+		res = ps3av_cmd_av_tv_mute(ps3av->av_port[i],
 					   PS3AV_CMD_MUTE_ON);
 		if (res < 0)
 			return -1;
@@ -350,11 +379,11 @@
 
 	/* video mute on */
 	for (i = 0; i < num_of_av_port; i++) {
-		res = ps3av_cmd_av_video_disable_sig(ps3av.av_port[i]);
+		res = ps3av_cmd_av_video_disable_sig(ps3av->av_port[i]);
 		if (res < 0)
 			return -1;
 		if (i < num_of_hdmi_port) {
-			res = ps3av_cmd_av_tv_mute(ps3av.av_port[i],
+			res = ps3av_cmd_av_tv_mute(ps3av->av_port[i],
 						   PS3AV_CMD_MUTE_OFF);
 			if (res < 0)
 				return -1;
@@ -369,17 +398,17 @@
 {
 	int i, num_of_av_port, num_of_opt_port, res;
 
-	num_of_av_port = ps3av.av_hw_conf.num_of_hdmi +
-			 ps3av.av_hw_conf.num_of_avmulti;
-	num_of_opt_port = ps3av.av_hw_conf.num_of_spdif;
+	num_of_av_port = ps3av->av_hw_conf.num_of_hdmi +
+			 ps3av->av_hw_conf.num_of_avmulti;
+	num_of_opt_port = ps3av->av_hw_conf.num_of_spdif;
 
 	for (i = 0; i < num_of_av_port; i++) {
-		res = ps3av_cmd_av_audio_mute(1, &ps3av.av_port[i], mute);
+		res = ps3av_cmd_av_audio_mute(1, &ps3av->av_port[i], mute);
 		if (res < 0)
 			return -1;
 	}
 	for (i = 0; i < num_of_opt_port; i++) {
-		res = ps3av_cmd_audio_mute(1, &ps3av.opt_port[i], mute);
+		res = ps3av_cmd_audio_mute(1, &ps3av->opt_port[i], mute);
 		if (res < 0)
 			return -1;
 	}
@@ -394,40 +423,40 @@
 	struct ps3av_pkt_audio_mode audio_mode;
 	u32 len = 0;
 
-	num_of_audio = ps3av.av_hw_conf.num_of_hdmi +
-		       ps3av.av_hw_conf.num_of_avmulti +
-		       ps3av.av_hw_conf.num_of_spdif;
+	num_of_audio = ps3av->av_hw_conf.num_of_hdmi +
+		       ps3av->av_hw_conf.num_of_avmulti +
+		       ps3av->av_hw_conf.num_of_spdif;
 
 	avb_param.num_of_video_pkt = 0;
 	avb_param.num_of_audio_pkt = PS3AV_AVB_NUM_AUDIO;	/* always 0 */
 	avb_param.num_of_av_video_pkt = 0;
-	avb_param.num_of_av_audio_pkt = ps3av.av_hw_conf.num_of_hdmi;
+	avb_param.num_of_av_audio_pkt = ps3av->av_hw_conf.num_of_hdmi;
 
-	vid = video_mode_table[ps3av.ps3av_mode].vid;
+	vid = video_mode_table[ps3av->ps3av_mode].vid;
 
 	/* audio mute */
 	ps3av_set_audio_mute(PS3AV_CMD_MUTE_ON);
 
 	/* audio inactive */
-	res = ps3av_cmd_audio_active(0, ps3av.audio_port);
+	res = ps3av_cmd_audio_active(0, ps3av->audio_port);
 	if (res < 0)
-		dev_dbg(&ps3av_dev.core,
+		dev_dbg(&ps3av->dev->core,
 			"ps3av_cmd_audio_active OFF failed\n");
 
 	/* audio_pkt */
 	for (i = 0; i < num_of_audio; i++) {
-		ps3av_cmd_set_audio_mode(&audio_mode, ps3av.av_port[i], ch, fs,
-					 word_bits, format, source);
-		if (i < ps3av.av_hw_conf.num_of_hdmi) {
+		ps3av_cmd_set_audio_mode(&audio_mode, ps3av->av_port[i], ch,
+					 fs, word_bits, format, source);
+		if (i < ps3av->av_hw_conf.num_of_hdmi) {
 			/* hdmi only */
 			len += ps3av_cmd_set_av_audio_param(&avb_param.buf[len],
-							    ps3av.av_port[i],
+							    ps3av->av_port[i],
 							    &audio_mode, vid);
 		}
 		/* audio_mode pkt should be sent separately */
 		res = ps3av_cmd_audio_mode(&audio_mode);
 		if (res < 0)
-			dev_dbg(&ps3av_dev.core,
+			dev_dbg(&ps3av->dev->core,
 				"ps3av_cmd_audio_mode failed, port:%x\n", i);
 	}
 
@@ -435,15 +464,16 @@
 	len += offsetof(struct ps3av_pkt_avb_param, buf);
 	res = ps3av_cmd_avb_param(&avb_param, len);
 	if (res < 0)
-		dev_dbg(&ps3av_dev.core, "ps3av_cmd_avb_param failed\n");
+		dev_dbg(&ps3av->dev->core, "ps3av_cmd_avb_param failed\n");
 
 	/* audio mute */
 	ps3av_set_audio_mute(PS3AV_CMD_MUTE_OFF);
 
 	/* audio active */
-	res = ps3av_cmd_audio_active(1, ps3av.audio_port);
+	res = ps3av_cmd_audio_active(1, ps3av->audio_port);
 	if (res < 0)
-		dev_dbg(&ps3av_dev.core, "ps3av_cmd_audio_active ON failed\n");
+		dev_dbg(&ps3av->dev->core,
+			"ps3av_cmd_audio_active ON failed\n");
 
 	return 0;
 }
@@ -456,7 +486,7 @@
 	ps3av_set_av_video_mute(PS3AV_CMD_MUTE_ON);
 
 	/* wake up ps3avd to do the actual video mode setting */
-	queue_work(ps3av.wq, &ps3av.work);
+	queue_work(ps3av->wq, &ps3av->work);
 
 	return 0;
 }
@@ -473,8 +503,8 @@
 
 	avb_param.num_of_video_pkt = PS3AV_AVB_NUM_VIDEO;	/* num of head */
 	avb_param.num_of_audio_pkt = 0;
-	avb_param.num_of_av_video_pkt = ps3av.av_hw_conf.num_of_hdmi +
-					ps3av.av_hw_conf.num_of_avmulti;
+	avb_param.num_of_av_video_pkt = ps3av->av_hw_conf.num_of_hdmi +
+					ps3av->av_hw_conf.num_of_avmulti;
 	avb_param.num_of_av_audio_pkt = 0;
 
 	/* video signal off */
@@ -484,21 +514,21 @@
 	if (id & PS3AV_MODE_HDCP_OFF) {
 		res = ps3av_cmd_av_hdmi_mode(PS3AV_CMD_AV_HDMI_HDCP_OFF);
 		if (res == PS3AV_STATUS_UNSUPPORTED_HDMI_MODE)
-			dev_dbg(&ps3av_dev.core, "Not supported\n");
+			dev_dbg(&ps3av->dev->core, "Not supported\n");
 		else if (res)
-			dev_dbg(&ps3av_dev.core,
+			dev_dbg(&ps3av->dev->core,
 				"ps3av_cmd_av_hdmi_mode failed\n");
 	} else if (old_id & PS3AV_MODE_HDCP_OFF) {
 		res = ps3av_cmd_av_hdmi_mode(PS3AV_CMD_AV_HDMI_MODE_NORMAL);
 		if (res < 0 && res != PS3AV_STATUS_UNSUPPORTED_HDMI_MODE)
-			dev_dbg(&ps3av_dev.core,
+			dev_dbg(&ps3av->dev->core,
 				"ps3av_cmd_av_hdmi_mode failed\n");
 	}
 
 	/* video_pkt */
 	for (i = 0; i < avb_param.num_of_video_pkt; i++)
 		len += ps3av_cmd_set_video_mode(&avb_param.buf[len],
-						ps3av.head[i], video_mode->vid,
+						ps3av->head[i], video_mode->vid,
 						video_mode->fmt, id);
 	/* av_video_pkt */
 	for (i = 0; i < avb_param.num_of_av_video_pkt; i++) {
@@ -507,12 +537,12 @@
 		else
 			av_video_cs = video_mode->cs;
 #ifndef PS3AV_HDMI_YUV
-		if (ps3av.av_port[i] == PS3AV_CMD_AVPORT_HDMI_0 ||
-		    ps3av.av_port[i] == PS3AV_CMD_AVPORT_HDMI_1)
+		if (ps3av->av_port[i] == PS3AV_CMD_AVPORT_HDMI_0 ||
+		    ps3av->av_port[i] == PS3AV_CMD_AVPORT_HDMI_1)
 			av_video_cs = RGB8;	/* use RGB for HDMI */
 #endif
 		len += ps3av_cmd_set_av_video_cs(&avb_param.buf[len],
-						 ps3av.av_port[i],
+						 ps3av->av_port[i],
 						 video_mode->vid, av_video_cs,
 						 video_mode->aspect, id);
 	}
@@ -524,7 +554,7 @@
 		       "%s: Command failed. Please try your request again. \n",
 		       __func__);
 	else if (res)
-		dev_dbg(&ps3av_dev.core, "ps3av_cmd_avb_param failed\n");
+		dev_dbg(&ps3av->dev->core, "ps3av_cmd_avb_param failed\n");
 
 	msleep(1500);
 	/* av video mute */
@@ -533,8 +563,8 @@
 
 static void ps3avd(struct work_struct *work)
 {
-	ps3av_set_videomode_cont(ps3av.ps3av_mode, ps3av.ps3av_mode_old);
-	complete(&ps3av.done);
+	ps3av_set_videomode_cont(ps3av->ps3av_mode, ps3av->ps3av_mode_old);
+	complete(&ps3av->done);
 }
 
 static int ps3av_vid2table_id(int vid)
@@ -601,7 +631,7 @@
 		return vid;
 	}
 
-	if (ps3av.region & PS3AV_REGION_60)
+	if (ps3av->region & PS3AV_REGION_60)
 		vid = PS3AV_DEFAULT_HDMI_VID_REG_60;
 	else
 		vid = PS3AV_DEFAULT_HDMI_VID_REG_50;
@@ -643,16 +673,16 @@
 		vid = PS3AV_DEFAULT_DVI_VID;
 	} else if (vid == -1) {
 		/* no HDMI interface or HDMI is off */
-		if (ps3av.region & PS3AV_REGION_60)
+		if (ps3av->region & PS3AV_REGION_60)
 			vid = PS3AV_DEFAULT_AVMULTI_VID_REG_60;
 		else
 			vid = PS3AV_DEFAULT_AVMULTI_VID_REG_50;
-		if (ps3av.region & PS3AV_REGION_RGB)
+		if (ps3av->region & PS3AV_REGION_RGB)
 			rgb = PS3AV_MODE_RGB;
 	} else if (boot) {
 		/* HDMI: using DEFAULT HDMI_VID while booting up */
 		info = &monitor_info.info;
-		if (ps3av.region & PS3AV_REGION_60) {
+		if (ps3av->region & PS3AV_REGION_60) {
 			if (info->res_60.res_bits & PS3AV_RESBIT_720x480P)
 				vid = PS3AV_DEFAULT_HDMI_VID_REG_60;
 			else if (info->res_50.res_bits & PS3AV_RESBIT_720x576P)
@@ -715,14 +745,14 @@
 
 	size = ARRAY_SIZE(video_mode_table);
 	if ((id & PS3AV_MODE_MASK) > size - 1 || id < 0) {
-		dev_dbg(&ps3av_dev.core, "%s: error id :%d\n", __func__, id);
+		dev_dbg(&ps3av->dev->core, "%s: error id :%d\n", __func__, id);
 		return -EINVAL;
 	}
 
 	/* auto mode */
 	option = id & ~PS3AV_MODE_MASK;
 	if ((id & PS3AV_MODE_MASK) == 0) {
-		id = ps3av_auto_videomode(&ps3av.av_hw_conf, boot);
+		id = ps3av_auto_videomode(&ps3av->av_hw_conf, boot);
 		if (id < 1) {
 			printk(KERN_ERR "%s: invalid id :%d\n", __func__, id);
 			return -EINVAL;
@@ -731,11 +761,11 @@
 	}
 
 	/* set videomode */
-	wait_for_completion(&ps3av.done);
-	ps3av.ps3av_mode_old = ps3av.ps3av_mode;
-	ps3av.ps3av_mode = id;
+	wait_for_completion(&ps3av->done);
+	ps3av->ps3av_mode_old = ps3av->ps3av_mode;
+	ps3av->ps3av_mode = id;
 	if (ps3av_set_videomode())
-		ps3av.ps3av_mode = ps3av.ps3av_mode_old;
+		ps3av->ps3av_mode = ps3av->ps3av_mode_old;
 
 	return 0;
 }
@@ -744,7 +774,7 @@
 
 int ps3av_get_auto_mode(int boot)
 {
-	return ps3av_auto_videomode(&ps3av.av_hw_conf, boot);
+	return ps3av_auto_videomode(&ps3av->av_hw_conf, boot);
 }
 
 EXPORT_SYMBOL_GPL(ps3av_get_auto_mode);
@@ -772,7 +802,7 @@
 
 int ps3av_get_mode(void)
 {
-	return ps3av.ps3av_mode;
+	return ps3av ? ps3av->ps3av_mode : 0;
 }
 
 EXPORT_SYMBOL_GPL(ps3av_get_mode);
@@ -842,82 +872,65 @@
 
 EXPORT_SYMBOL_GPL(ps3av_audio_mute);
 
-int ps3av_dev_open(void)
+void ps3av_register_flip_ctl(void (*flip_ctl)(int on, void *data),
+			     void *flip_data)
 {
-	int status = 0;
+	mutex_lock(&ps3av->mutex);
+	ps3av->flip_ctl = flip_ctl;
+	ps3av->flip_data = flip_data;
+	mutex_unlock(&ps3av->mutex);
+}
+EXPORT_SYMBOL_GPL(ps3av_register_flip_ctl);
 
-	mutex_lock(&ps3av.mutex);
-	if (!ps3av.open_count++) {
-		status = lv1_gpu_open(0);
-		if (status) {
-			printk(KERN_ERR "%s: lv1_gpu_open failed %d\n",
-			       __func__, status);
-			ps3av.open_count--;
-		}
-	}
-	mutex_unlock(&ps3av.mutex);
-
-	return status;
+void ps3av_flip_ctl(int on)
+{
+	mutex_lock(&ps3av->mutex);
+	if (ps3av->flip_ctl)
+		ps3av->flip_ctl(on, ps3av->flip_data);
+	mutex_unlock(&ps3av->mutex);
 }
 
-EXPORT_SYMBOL_GPL(ps3av_dev_open);
-
-int ps3av_dev_close(void)
-{
-	int status = 0;
-
-	mutex_lock(&ps3av.mutex);
-	if (ps3av.open_count <= 0) {
-		printk(KERN_ERR "%s: GPU already closed\n", __func__);
-		status = -1;
-	} else if (!--ps3av.open_count) {
-		status = lv1_gpu_close();
-		if (status)
-			printk(KERN_WARNING "%s: lv1_gpu_close failed %d\n",
-			       __func__, status);
-	}
-	mutex_unlock(&ps3av.mutex);
-
-	return status;
-}
-
-EXPORT_SYMBOL_GPL(ps3av_dev_close);
-
-static int ps3av_probe(struct ps3_vuart_port_device *dev)
+static int ps3av_probe(struct ps3_system_bus_device *dev)
 {
 	int res;
 	u32 id;
 
-	dev_dbg(&ps3av_dev.core, "init ...\n");
-	dev_dbg(&ps3av_dev.core, "  timeout=%d\n", timeout);
+	dev_dbg(&dev->core, " -> %s:%d\n", __func__, __LINE__);
+	dev_dbg(&dev->core, "  timeout=%d\n", timeout);
 
-	memset(&ps3av, 0, sizeof(ps3av));
+	if (ps3av) {
+		dev_err(&dev->core, "Only one ps3av device is supported\n");
+		return -EBUSY;
+	}
 
-	mutex_init(&ps3av.mutex);
-	ps3av.ps3av_mode = 0;
-	ps3av.dev = dev;
-
-	INIT_WORK(&ps3av.work, ps3avd);
-	init_completion(&ps3av.done);
-	complete(&ps3av.done);
-	ps3av.wq = create_singlethread_workqueue("ps3avd");
-	if (!ps3av.wq)
+	ps3av = kzalloc(sizeof(*ps3av), GFP_KERNEL);
+	if (!ps3av)
 		return -ENOMEM;
 
-	ps3av.available = 1;
+	mutex_init(&ps3av->mutex);
+	ps3av->ps3av_mode = 0;
+	ps3av->dev = dev;
+
+	INIT_WORK(&ps3av->work, ps3avd);
+	init_completion(&ps3av->done);
+	complete(&ps3av->done);
+	ps3av->wq = create_singlethread_workqueue("ps3avd");
+	if (!ps3av->wq)
+		goto fail;
+
 	switch (ps3_os_area_get_av_multi_out()) {
 	case PS3_PARAM_AV_MULTI_OUT_NTSC:
-		ps3av.region = PS3AV_REGION_60;
+		ps3av->region = PS3AV_REGION_60;
 		break;
 	case PS3_PARAM_AV_MULTI_OUT_PAL_YCBCR:
 	case PS3_PARAM_AV_MULTI_OUT_SECAM:
-		ps3av.region = PS3AV_REGION_50;
+		ps3av->region = PS3AV_REGION_50;
 		break;
 	case PS3_PARAM_AV_MULTI_OUT_PAL_RGB:
-		ps3av.region = PS3AV_REGION_50 | PS3AV_REGION_RGB;
+		ps3av->region = PS3AV_REGION_50 | PS3AV_REGION_RGB;
 		break;
 	default:
-		ps3av.region = PS3AV_REGION_60;
+		ps3av->region = PS3AV_REGION_60;
 		break;
 	}
 
@@ -927,39 +940,47 @@
 		printk(KERN_ERR "%s: ps3av_cmd_init failed %d\n", __func__,
 		       res);
 
-	ps3av_get_hw_conf(&ps3av);
-	id = ps3av_auto_videomode(&ps3av.av_hw_conf, 1);
-	mutex_lock(&ps3av.mutex);
-	ps3av.ps3av_mode = id;
-	mutex_unlock(&ps3av.mutex);
+	ps3av_get_hw_conf(ps3av);
+	id = ps3av_auto_videomode(&ps3av->av_hw_conf, 1);
+	mutex_lock(&ps3av->mutex);
+	ps3av->ps3av_mode = id;
+	mutex_unlock(&ps3av->mutex);
 
-	dev_dbg(&ps3av_dev.core, "init...done\n");
+	dev_dbg(&dev->core, " <- %s:%d\n", __func__, __LINE__);
 
 	return 0;
+
+fail:
+	kfree(ps3av);
+	ps3av = NULL;
+	return -ENOMEM;
 }
 
-static int ps3av_remove(struct ps3_vuart_port_device *dev)
+static int ps3av_remove(struct ps3_system_bus_device *dev)
 {
-	if (ps3av.available) {
+	dev_dbg(&dev->core, " -> %s:%d\n", __func__, __LINE__);
+	if (ps3av) {
 		ps3av_cmd_fin();
-		if (ps3av.wq)
-			destroy_workqueue(ps3av.wq);
-		ps3av.available = 0;
+		if (ps3av->wq)
+			destroy_workqueue(ps3av->wq);
+		kfree(ps3av);
+		ps3av = NULL;
 	}
 
+	dev_dbg(&dev->core, " <- %s:%d\n", __func__, __LINE__);
 	return 0;
 }
 
-static void ps3av_shutdown(struct ps3_vuart_port_device *dev)
+static void ps3av_shutdown(struct ps3_system_bus_device *dev)
 {
+	dev_dbg(&dev->core, " -> %s:%d\n", __func__, __LINE__);
 	ps3av_remove(dev);
+	dev_dbg(&dev->core, " <- %s:%d\n", __func__, __LINE__);
 }
 
 static struct ps3_vuart_port_driver ps3av_driver = {
-	.match_id = PS3_MATCH_ID_AV_SETTINGS,
-	.core = {
-		.name = "ps3_av",
-	},
+	.core.match_id = PS3_MATCH_ID_AV_SETTINGS,
+	.core.core.name = "ps3_av",
 	.probe = ps3av_probe,
 	.remove = ps3av_remove,
 	.shutdown = ps3av_shutdown,
@@ -972,6 +993,8 @@
 	if (!firmware_has_feature(FW_FEATURE_PS3_LV1))
 		return -ENODEV;
 
+	pr_debug(" -> %s:%d\n", __func__, __LINE__);
+
 	error = ps3_vuart_port_driver_register(&ps3av_driver);
 	if (error) {
 		printk(KERN_ERR
@@ -980,20 +1003,21 @@
 		return error;
 	}
 
-	error = ps3_vuart_port_device_register(&ps3av_dev);
-	if (error)
-		printk(KERN_ERR
-		       "%s: ps3_vuart_port_device_register failed %d\n",
-		       __func__, error);
-
+	pr_debug(" <- %s:%d\n", __func__, __LINE__);
 	return error;
 }
 
 static void __exit ps3av_module_exit(void)
 {
-	device_unregister(&ps3av_dev.core);
+	pr_debug(" -> %s:%d\n", __func__, __LINE__);
 	ps3_vuart_port_driver_unregister(&ps3av_driver);
+	pr_debug(" <- %s:%d\n", __func__, __LINE__);
 }
 
 subsys_initcall(ps3av_module_init);
 module_exit(ps3av_module_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("PS3 AV Settings Driver");
+MODULE_AUTHOR("Sony Computer Entertainment Inc.");
+MODULE_ALIAS(PS3_MODULE_ALIAS_AV_SETTINGS);
diff --git a/drivers/ps3/ps3av_cmd.c b/drivers/ps3/ps3av_cmd.c
index 0145ea1..f72f5dd 100644
--- a/drivers/ps3/ps3av_cmd.c
+++ b/drivers/ps3/ps3av_cmd.c
@@ -143,6 +143,14 @@
 	return PS3AV_CMD_AV_VID_480P;
 }
 
+static int ps3av_hdmi_range(void)
+{
+	if (ps3_compare_firmware_version(1, 8, 0) < 0)
+		return 0;
+	else
+		return 1; /* supported */
+}
+
 int ps3av_cmd_init(void)
 {
 	int res;
@@ -350,6 +358,10 @@
 	/* should be same as video_mode.video_cs_out */
 	av_video_cs->av_cs_in = ps3av_cs_video2av(PS3AV_CMD_VIDEO_CS_RGB_8);
 	av_video_cs->bitlen_out = ps3av_cs_video2av_bitlen(cs_out);
+	if ((id & PS3AV_MODE_WHITE) && ps3av_hdmi_range())
+		av_video_cs->super_white = PS3AV_CMD_AV_SUPER_WHITE_ON;
+	else /* default off */
+		av_video_cs->super_white = PS3AV_CMD_AV_SUPER_WHITE_OFF;
 	av_video_cs->aspect = aspect;
 	if (id & PS3AV_MODE_DITHER) {
 		av_video_cs->dither = PS3AV_CMD_AV_DITHER_ON
@@ -392,6 +404,10 @@
 	video_mode->pitch = video_mode->width * 4;	/* line_length */
 	video_mode->video_out_format = PS3AV_CMD_VIDEO_OUT_FORMAT_RGB_12BIT;
 	video_mode->video_format = ps3av_video_fmt_table[video_fmt].format;
+	if ((id & PS3AV_MODE_COLOR) && ps3av_hdmi_range())
+		video_mode->video_cl_cnv = PS3AV_CMD_VIDEO_CL_CNV_DISABLE_LUT;
+	else /* default enable */
+		video_mode->video_cl_cnv = PS3AV_CMD_VIDEO_CL_CNV_ENABLE_LUT;
 	video_mode->video_order = ps3av_video_fmt_table[video_fmt].order;
 
 	pr_debug("%s: video_mode:vid:%x width:%d height:%d pitch:%d out_format:%d format:%x order:%x\n",
@@ -852,7 +868,7 @@
 {
 	int res;
 
-	ps3fb_flip_ctl(0);	/* flip off */
+	ps3av_flip_ctl(0);	/* flip off */
 
 	/* avb packet */
 	res = ps3av_do_pkt(PS3AV_CID_AVB_PARAM, send_len, sizeof(*avb),
@@ -866,7 +882,7 @@
 			 res);
 
       out:
-	ps3fb_flip_ctl(1);	/* flip on */
+	ps3av_flip_ctl(1);	/* flip on */
 	return res;
 }
 
@@ -987,34 +1003,3 @@
 		| PS3AV_CMD_AV_LAYOUT_176 \
 		| PS3AV_CMD_AV_LAYOUT_192)
 
-/************************* vuart ***************************/
-
-#define POLLING_INTERVAL  25	/* in msec */
-
-int ps3av_vuart_write(struct ps3_vuart_port_device *dev, const void *buf,
-		      unsigned long size)
-{
-	int error = ps3_vuart_write(dev, buf, size);
-	return error ? error : size;
-}
-
-int ps3av_vuart_read(struct ps3_vuart_port_device *dev, void *buf,
-		     unsigned long size, int timeout)
-{
-	int error;
-	int loopcnt = 0;
-
-	timeout = (timeout + POLLING_INTERVAL - 1) / POLLING_INTERVAL;
-	while (loopcnt++ <= timeout) {
-		error = ps3_vuart_read(dev, buf, size);
-		if (!error)
-			return size;
-		if (error != -EAGAIN) {
-			printk(KERN_ERR "%s: ps3_vuart_read failed %d\n",
-			       __func__, error);
-			return error;
-		}
-		msleep(POLLING_INTERVAL);
-	}
-	return -EWOULDBLOCK;
-}
diff --git a/drivers/ps3/ps3stor_lib.c b/drivers/ps3/ps3stor_lib.c
new file mode 100644
index 0000000..3a9824e
--- /dev/null
+++ b/drivers/ps3/ps3stor_lib.c
@@ -0,0 +1,302 @@
+/*
+ * PS3 Storage Library
+ *
+ * Copyright (C) 2007 Sony Computer Entertainment Inc.
+ * Copyright 2007 Sony Corp.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include <linux/dma-mapping.h>
+
+#include <asm/lv1call.h>
+#include <asm/ps3stor.h>
+
+
+static int ps3stor_probe_access(struct ps3_storage_device *dev)
+{
+	int res, error;
+	unsigned int i;
+	unsigned long n;
+
+	if (dev->sbd.match_id == PS3_MATCH_ID_STOR_ROM) {
+		/* special case: CD-ROM is assumed always accessible */
+		dev->accessible_regions = 1;
+		return 0;
+	}
+
+	error = -EPERM;
+	for (i = 0; i < dev->num_regions; i++) {
+		dev_dbg(&dev->sbd.core,
+			"%s:%u: checking accessibility of region %u\n",
+			__func__, __LINE__, i);
+
+		dev->region_idx = i;
+		res = ps3stor_read_write_sectors(dev, dev->bounce_lpar, 0, 1,
+						 0);
+		if (res) {
+			dev_dbg(&dev->sbd.core, "%s:%u: read failed, "
+				"region %u is not accessible\n", __func__,
+				__LINE__, i);
+			continue;
+		}
+
+		dev_dbg(&dev->sbd.core, "%s:%u: region %u is accessible\n",
+			__func__, __LINE__, i);
+		set_bit(i, &dev->accessible_regions);
+
+		/* We can access at least one region */
+		error = 0;
+	}
+	if (error)
+		return error;
+
+	n = hweight_long(dev->accessible_regions);
+	if (n > 1)
+		dev_info(&dev->sbd.core,
+			 "%s:%u: %lu accessible regions found. Only the first "
+			 "one will be used",
+			 __func__, __LINE__, n);
+	dev->region_idx = __ffs(dev->accessible_regions);
+	dev_info(&dev->sbd.core,
+		 "First accessible region has index %u start %lu size %lu\n",
+		 dev->region_idx, dev->regions[dev->region_idx].start,
+		 dev->regions[dev->region_idx].size);
+
+	return 0;
+}
+
+
+/**
+ *	ps3stor_setup - Setup a storage device before use
+ *	@dev: Pointer to a struct ps3_storage_device
+ *	@handler: Pointer to an interrupt handler
+ *
+ *	Returns 0 for success, or an error code
+ */
+int ps3stor_setup(struct ps3_storage_device *dev, irq_handler_t handler)
+{
+	int error, res, alignment;
+	enum ps3_dma_page_size page_size;
+
+	error = ps3_open_hv_device(&dev->sbd);
+	if (error) {
+		dev_err(&dev->sbd.core,
+			"%s:%u: ps3_open_hv_device failed %d\n", __func__,
+			__LINE__, error);
+		goto fail;
+	}
+
+	error = ps3_sb_event_receive_port_setup(&dev->sbd, PS3_BINDING_CPU_ANY,
+						&dev->irq);
+	if (error) {
+		dev_err(&dev->sbd.core,
+			"%s:%u: ps3_sb_event_receive_port_setup failed %d\n",
+		       __func__, __LINE__, error);
+		goto fail_close_device;
+	}
+
+	error = request_irq(dev->irq, handler, IRQF_DISABLED,
+			    dev->sbd.core.driver->name, dev);
+	if (error) {
+		dev_err(&dev->sbd.core, "%s:%u: request_irq failed %d\n",
+			__func__, __LINE__, error);
+		goto fail_sb_event_receive_port_destroy;
+	}
+
+	alignment = min(__ffs(dev->bounce_size),
+			__ffs((unsigned long)dev->bounce_buf));
+	if (alignment < 12) {
+		dev_err(&dev->sbd.core,
+			"%s:%u: bounce buffer not aligned (%lx at 0x%p)\n",
+			__func__, __LINE__, dev->bounce_size, dev->bounce_buf);
+		error = -EINVAL;
+		goto fail_free_irq;
+	} else if (alignment < 16)
+		page_size = PS3_DMA_4K;
+	else
+		page_size = PS3_DMA_64K;
+	dev->sbd.d_region = &dev->dma_region;
+	ps3_dma_region_init(&dev->sbd, &dev->dma_region, page_size,
+			    PS3_DMA_OTHER, dev->bounce_buf, dev->bounce_size);
+	res = ps3_dma_region_create(&dev->dma_region);
+	if (res) {
+		dev_err(&dev->sbd.core, "%s:%u: cannot create DMA region\n",
+			__func__, __LINE__);
+		error = -ENOMEM;
+		goto fail_free_irq;
+	}
+
+	dev->bounce_lpar = ps3_mm_phys_to_lpar(__pa(dev->bounce_buf));
+	dev->bounce_dma = dma_map_single(&dev->sbd.core, dev->bounce_buf,
+					 dev->bounce_size, DMA_BIDIRECTIONAL);
+	if (!dev->bounce_dma) {
+		dev_err(&dev->sbd.core, "%s:%u: map DMA region failed\n",
+			__func__, __LINE__);
+		error = -ENODEV;
+		goto fail_free_dma;
+	}
+
+	error = ps3stor_probe_access(dev);
+	if (error) {
+		dev_err(&dev->sbd.core, "%s:%u: No accessible regions found\n",
+			__func__, __LINE__);
+		goto fail_unmap_dma;
+	}
+	return 0;
+
+fail_unmap_dma:
+	dma_unmap_single(&dev->sbd.core, dev->bounce_dma, dev->bounce_size,
+			 DMA_BIDIRECTIONAL);
+fail_free_dma:
+	ps3_dma_region_free(&dev->dma_region);
+fail_free_irq:
+	free_irq(dev->irq, dev);
+fail_sb_event_receive_port_destroy:
+	ps3_sb_event_receive_port_destroy(&dev->sbd, dev->irq);
+fail_close_device:
+	ps3_close_hv_device(&dev->sbd);
+fail:
+	return error;
+}
+EXPORT_SYMBOL_GPL(ps3stor_setup);
+
+
+/**
+ *	ps3stor_teardown - Tear down a storage device after use
+ *	@dev: Pointer to a struct ps3_storage_device
+ */
+void ps3stor_teardown(struct ps3_storage_device *dev)
+{
+	int error;
+
+	dma_unmap_single(&dev->sbd.core, dev->bounce_dma, dev->bounce_size,
+			 DMA_BIDIRECTIONAL);
+	ps3_dma_region_free(&dev->dma_region);
+
+	free_irq(dev->irq, dev);
+
+	error = ps3_sb_event_receive_port_destroy(&dev->sbd, dev->irq);
+	if (error)
+		dev_err(&dev->sbd.core,
+			"%s:%u: destroy event receive port failed %d\n",
+			__func__, __LINE__, error);
+
+	error = ps3_close_hv_device(&dev->sbd);
+	if (error)
+		dev_err(&dev->sbd.core,
+			"%s:%u: ps3_close_hv_device failed %d\n", __func__,
+			__LINE__, error);
+}
+EXPORT_SYMBOL_GPL(ps3stor_teardown);
+
+
+/**
+ *	ps3stor_read_write_sectors - read/write from/to a storage device
+ *	@dev: Pointer to a struct ps3_storage_device
+ *	@lpar: HV logical partition address
+ *	@start_sector: First sector to read/write
+ *	@sectors: Number of sectors to read/write
+ *	@write: Flag indicating write (non-zero) or read (zero)
+ *
+ *	Returns 0 for success, -1 in case of failure to submit the command, or
+ *	an LV1 status value in case of other errors
+ */
+u64 ps3stor_read_write_sectors(struct ps3_storage_device *dev, u64 lpar,
+			       u64 start_sector, u64 sectors, int write)
+{
+	unsigned int region_id = dev->regions[dev->region_idx].id;
+	const char *op = write ? "write" : "read";
+	int res;
+
+	dev_dbg(&dev->sbd.core, "%s:%u: %s %lu sectors starting at %lu\n",
+		__func__, __LINE__, op, sectors, start_sector);
+
+	init_completion(&dev->done);
+	res = write ? lv1_storage_write(dev->sbd.dev_id, region_id,
+					start_sector, sectors, 0, lpar,
+					&dev->tag)
+		    : lv1_storage_read(dev->sbd.dev_id, region_id,
+				       start_sector, sectors, 0, lpar,
+				       &dev->tag);
+	if (res) {
+		dev_dbg(&dev->sbd.core, "%s:%u: %s failed %d\n", __func__,
+			__LINE__, op, res);
+		return -1;
+	}
+
+	wait_for_completion(&dev->done);
+	if (dev->lv1_status) {
+		dev_dbg(&dev->sbd.core, "%s:%u: %s failed 0x%lx\n", __func__,
+			__LINE__, op, dev->lv1_status);
+		return dev->lv1_status;
+	}
+
+	dev_dbg(&dev->sbd.core, "%s:%u: %s completed\n", __func__, __LINE__,
+		op);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(ps3stor_read_write_sectors);
+
+
+/**
+ *	ps3stor_send_command - send a device command to a storage device
+ *	@dev: Pointer to a struct ps3_storage_device
+ *	@cmd: Command number
+ *	@arg1: First command argument
+ *	@arg2: Second command argument
+ *	@arg3: Third command argument
+ *	@arg4: Fourth command argument
+ *
+ *	Returns 0 for success, -1 in case of failure to submit the command, or
+ *	an LV1 status value in case of other errors
+ */
+u64 ps3stor_send_command(struct ps3_storage_device *dev, u64 cmd, u64 arg1,
+			 u64 arg2, u64 arg3, u64 arg4)
+{
+	int res;
+
+	dev_dbg(&dev->sbd.core, "%s:%u: send device command 0x%lx\n", __func__,
+		__LINE__, cmd);
+
+	init_completion(&dev->done);
+
+	res = lv1_storage_send_device_command(dev->sbd.dev_id, cmd, arg1,
+					      arg2, arg3, arg4, &dev->tag);
+	if (res) {
+		dev_err(&dev->sbd.core,
+			"%s:%u: send_device_command 0x%lx failed %d\n",
+			__func__, __LINE__, cmd, res);
+		return -1;
+	}
+
+	wait_for_completion(&dev->done);
+	if (dev->lv1_status) {
+		dev_dbg(&dev->sbd.core, "%s:%u: command 0x%lx failed 0x%lx\n",
+			__func__, __LINE__, cmd, dev->lv1_status);
+		return dev->lv1_status;
+	}
+
+	dev_dbg(&dev->sbd.core, "%s:%u: command 0x%lx completed\n", __func__,
+		__LINE__, cmd);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(ps3stor_send_command);
+
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("PS3 Storage Bus Library");
+MODULE_AUTHOR("Sony Corporation");
diff --git a/drivers/ps3/sys-manager-core.c b/drivers/ps3/sys-manager-core.c
new file mode 100644
index 0000000..31648f7
--- /dev/null
+++ b/drivers/ps3/sys-manager-core.c
@@ -0,0 +1,68 @@
+/*
+ *  PS3 System Manager core.
+ *
+ *  Copyright (C) 2007 Sony Computer Entertainment Inc.
+ *  Copyright 2007 Sony Corp.
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; version 2 of the License.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#include <linux/kernel.h>
+#include <asm/ps3.h>
+
+/**
+ * Staticly linked routines that allow late binding of a loaded sys-manager
+ * module.
+ */
+
+static struct ps3_sys_manager_ops ps3_sys_manager_ops;
+
+/**
+ * ps3_register_sys_manager_ops - Bind ps3_sys_manager_ops to a module.
+ * @ops: struct ps3_sys_manager_ops.
+ *
+ * To be called from ps3_sys_manager_probe() and ps3_sys_manager_remove() to
+ * register call back ops for power control.  Copies data to the static
+ * variable ps3_sys_manager_ops.
+ */
+
+void ps3_sys_manager_register_ops(const struct ps3_sys_manager_ops *ops)
+{
+	BUG_ON(!ops);
+	BUG_ON(!ops->dev);
+	ps3_sys_manager_ops = ops ? *ops : ps3_sys_manager_ops;
+}
+EXPORT_SYMBOL_GPL(ps3_sys_manager_register_ops);
+
+void ps3_sys_manager_power_off(void)
+{
+	if (ps3_sys_manager_ops.power_off)
+		ps3_sys_manager_ops.power_off(ps3_sys_manager_ops.dev);
+
+	printk(KERN_EMERG "System Halted, OK to turn off power\n");
+	local_irq_disable();
+	while (1)
+		(void)0;
+}
+
+void ps3_sys_manager_restart(void)
+{
+	if (ps3_sys_manager_ops.restart)
+		ps3_sys_manager_ops.restart(ps3_sys_manager_ops.dev);
+
+	printk(KERN_EMERG "System Halted, OK to turn off power\n");
+	local_irq_disable();
+	while (1)
+		(void)0;
+}
diff --git a/drivers/ps3/sys-manager.c b/drivers/ps3/sys-manager.c
index 3aa2b0d..8461b08 100644
--- a/drivers/ps3/sys-manager.c
+++ b/drivers/ps3/sys-manager.c
@@ -35,7 +35,7 @@
 /**
  * ps3_sys_manager - PS3 system manager driver.
  *
- * The system manager provides an asyncronous system event notification
+ * The system manager provides an asynchronous system event notification
  * mechanism for reporting events like thermal alert and button presses to
  * guests.  It also provides support to control system shutdown and startup.
  *
@@ -52,6 +52,7 @@
  * @size: Header size in bytes, curently 16.
  * @payload_size: Message payload size in bytes.
  * @service_id: Message type, one of enum ps3_sys_manager_service_id.
+ * @request_tag: Unique number to identify reply.
  */
 
 struct ps3_sys_manager_header {
@@ -61,29 +62,49 @@
 	u16 reserved_1;
 	u32 payload_size;
 	u16 service_id;
-	u16 reserved_2[3];
+	u16 reserved_2;
+	u32 request_tag;
 };
 
+#define dump_sm_header(_h) _dump_sm_header(_h, __func__, __LINE__)
+static void __maybe_unused _dump_sm_header(
+	const struct ps3_sys_manager_header *h, const char *func, int line)
+{
+	pr_debug("%s:%d: version:      %xh\n", func, line, h->version);
+	pr_debug("%s:%d: size:         %xh\n", func, line, h->size);
+	pr_debug("%s:%d: payload_size: %xh\n", func, line, h->payload_size);
+	pr_debug("%s:%d: service_id:   %xh\n", func, line, h->service_id);
+	pr_debug("%s:%d: request_tag:  %xh\n", func, line, h->request_tag);
+}
+
 /**
- * @PS3_SM_RX_MSG_LEN - System manager received message length.
+ * @PS3_SM_RX_MSG_LEN_MIN - Shortest received message length.
+ * @PS3_SM_RX_MSG_LEN_MAX - Longest received message length.
  *
- * Currently all messages received from the system manager are the same length
- * (16 bytes header + 16 bytes payload = 32 bytes).  This knowlege is used to
- * simplify the logic.
+ * Currently all messages received from the system manager are either
+ * (16 bytes header + 8 bytes payload = 24 bytes) or (16 bytes header
+ * + 16 bytes payload = 32 bytes).  This knowlege is used to simplify
+ * the logic.
  */
 
 enum {
-	PS3_SM_RX_MSG_LEN = 32,
+	PS3_SM_RX_MSG_LEN_MIN = 24,
+	PS3_SM_RX_MSG_LEN_MAX = 32,
 };
 
 /**
  * enum ps3_sys_manager_service_id - Message header service_id.
- * @PS3_SM_SERVICE_ID_REQUEST:      guest --> sys_manager.
- * @PS3_SM_SERVICE_ID_COMMAND:      guest <-- sys_manager.
- * @PS3_SM_SERVICE_ID_RESPONSE:     guest --> sys_manager.
- * @PS3_SM_SERVICE_ID_SET_ATTR:     guest --> sys_manager.
- * @PS3_SM_SERVICE_ID_EXTERN_EVENT: guest <-- sys_manager.
- * @PS3_SM_SERVICE_ID_SET_NEXT_OP:  guest --> sys_manager.
+ * @PS3_SM_SERVICE_ID_REQUEST:       guest --> sys_manager.
+ * @PS3_SM_SERVICE_ID_REQUEST_ERROR: guest <-- sys_manager.
+ * @PS3_SM_SERVICE_ID_COMMAND:       guest <-- sys_manager.
+ * @PS3_SM_SERVICE_ID_RESPONSE:      guest --> sys_manager.
+ * @PS3_SM_SERVICE_ID_SET_ATTR:      guest --> sys_manager.
+ * @PS3_SM_SERVICE_ID_EXTERN_EVENT:  guest <-- sys_manager.
+ * @PS3_SM_SERVICE_ID_SET_NEXT_OP:   guest --> sys_manager.
+ *
+ * PS3_SM_SERVICE_ID_REQUEST_ERROR is returned for invalid data values in a
+ * a PS3_SM_SERVICE_ID_REQUEST message.  It also seems to be returned when
+ * a REQUEST message is sent at the wrong time.
  */
 
 enum ps3_sys_manager_service_id {
@@ -93,6 +114,7 @@
 	PS3_SM_SERVICE_ID_COMMAND = 3,
 	PS3_SM_SERVICE_ID_EXTERN_EVENT = 4,
 	PS3_SM_SERVICE_ID_SET_NEXT_OP = 5,
+	PS3_SM_SERVICE_ID_REQUEST_ERROR = 6,
 	PS3_SM_SERVICE_ID_SET_ATTR = 8,
 };
 
@@ -185,11 +207,21 @@
 };
 
 /**
+ * ps3_sm_force_power_off - Poweroff helper.
+ *
+ * A global variable used to force a poweroff when the power button has
+ * been pressed irrespective of how init handles the ctrl_alt_del signal.
+ *
+ */
+
+static unsigned int ps3_sm_force_power_off;
+
+/**
  * ps3_sys_manager_write - Helper to write a two part message to the vuart.
  *
  */
 
-static int ps3_sys_manager_write(struct ps3_vuart_port_device *dev,
+static int ps3_sys_manager_write(struct ps3_system_bus_device *dev,
 	const struct ps3_sys_manager_header *header, const void *payload)
 {
 	int result;
@@ -213,15 +245,10 @@
  *
  */
 
-static int ps3_sys_manager_send_attr(struct ps3_vuart_port_device *dev,
+static int ps3_sys_manager_send_attr(struct ps3_system_bus_device *dev,
 	enum ps3_sys_manager_attr attr)
 {
-	static const struct ps3_sys_manager_header header = {
-		.version = 1,
-		.size = 16,
-		.payload_size = 16,
-		.service_id = PS3_SM_SERVICE_ID_SET_ATTR,
-	};
+	struct ps3_sys_manager_header header;
 	struct {
 		u8 version;
 		u8 reserved_1[3];
@@ -232,6 +259,12 @@
 
 	dev_dbg(&dev->core, "%s:%d: %xh\n", __func__, __LINE__, attr);
 
+	memset(&header, 0, sizeof(header));
+	header.version = 1;
+	header.size = 16;
+	header.payload_size = 16;
+	header.service_id = PS3_SM_SERVICE_ID_SET_ATTR;
+
 	memset(&payload, 0, sizeof(payload));
 	payload.version = 1;
 	payload.attribute = attr;
@@ -245,16 +278,11 @@
  * Tell the system manager what to do after this lpar is destroyed.
  */
 
-static int ps3_sys_manager_send_next_op(struct ps3_vuart_port_device *dev,
+static int ps3_sys_manager_send_next_op(struct ps3_system_bus_device *dev,
 	enum ps3_sys_manager_next_op op,
 	enum ps3_sys_manager_wake_source wake_source)
 {
-	static const struct ps3_sys_manager_header header = {
-		.version = 1,
-		.size = 16,
-		.payload_size = 16,
-		.service_id = PS3_SM_SERVICE_ID_SET_NEXT_OP,
-	};
+	struct ps3_sys_manager_header header;
 	struct {
 		u8 version;
 		u8 type;
@@ -268,6 +296,12 @@
 
 	dev_dbg(&dev->core, "%s:%d: (%xh)\n", __func__, __LINE__, op);
 
+	memset(&header, 0, sizeof(header));
+	header.version = 1;
+	header.size = 16;
+	header.payload_size = 16;
+	header.service_id = PS3_SM_SERVICE_ID_SET_NEXT_OP;
+
 	memset(&payload, 0, sizeof(payload));
 	payload.version = 3;
 	payload.type = op;
@@ -286,32 +320,35 @@
  * the command is then communicated back to the system manager with a response
  * message.
  *
- * Currently, the only supported request it the 'shutdown self' request.
+ * Currently, the only supported request is the 'shutdown self' request.
  */
 
-static int ps3_sys_manager_send_request_shutdown(struct ps3_vuart_port_device *dev)
+static int ps3_sys_manager_send_request_shutdown(
+	struct ps3_system_bus_device *dev)
 {
-	static const struct ps3_sys_manager_header header = {
-		.version = 1,
-		.size = 16,
-		.payload_size = 16,
-		.service_id = PS3_SM_SERVICE_ID_REQUEST,
-	};
+	struct ps3_sys_manager_header header;
 	struct {
 		u8 version;
 		u8 type;
 		u8 gos_id;
 		u8 reserved_1[13];
-	} static const payload = {
-		.version = 1,
-		.type = 1, /* shutdown */
-		.gos_id = 0, /* self */
-	};
+	} payload;
 
 	BUILD_BUG_ON(sizeof(payload) != 16);
 
 	dev_dbg(&dev->core, "%s:%d\n", __func__, __LINE__);
 
+	memset(&header, 0, sizeof(header));
+	header.version = 1;
+	header.size = 16;
+	header.payload_size = 16;
+	header.service_id = PS3_SM_SERVICE_ID_REQUEST;
+
+	memset(&payload, 0, sizeof(payload));
+	payload.version = 1;
+	payload.type = 1; /* shutdown */
+	payload.gos_id = 0; /* self */
+
 	return ps3_sys_manager_write(dev, &header, &payload);
 }
 
@@ -323,15 +360,10 @@
  * failure of a command sent by the system manager.
  */
 
-static int ps3_sys_manager_send_response(struct ps3_vuart_port_device *dev,
+static int ps3_sys_manager_send_response(struct ps3_system_bus_device *dev,
 	u64 status)
 {
-	static const struct ps3_sys_manager_header header = {
-		.version = 1,
-		.size = 16,
-		.payload_size = 16,
-		.service_id = PS3_SM_SERVICE_ID_RESPONSE,
-	};
+	struct ps3_sys_manager_header header;
 	struct {
 		u8 version;
 		u8 reserved_1[3];
@@ -344,6 +376,12 @@
 	dev_dbg(&dev->core, "%s:%d: (%s)\n", __func__, __LINE__,
 		(status ? "nak" : "ack"));
 
+	memset(&header, 0, sizeof(header));
+	header.version = 1;
+	header.size = 16;
+	header.payload_size = 16;
+	header.service_id = PS3_SM_SERVICE_ID_RESPONSE;
+
 	memset(&payload, 0, sizeof(payload));
 	payload.version = 1;
 	payload.status = status;
@@ -356,7 +394,7 @@
  *
  */
 
-static int ps3_sys_manager_handle_event(struct ps3_vuart_port_device *dev)
+static int ps3_sys_manager_handle_event(struct ps3_system_bus_device *dev)
 {
 	int result;
 	struct {
@@ -370,7 +408,7 @@
 	BUILD_BUG_ON(sizeof(event) != 16);
 
 	result = ps3_vuart_read(dev, &event, sizeof(event));
-	BUG_ON(result);
+	BUG_ON(result && "need to retry here");
 
 	if (event.version != 1) {
 		dev_dbg(&dev->core, "%s:%d: unsupported event version (%u)\n",
@@ -382,11 +420,34 @@
 	case PS3_SM_EVENT_POWER_PRESSED:
 		dev_dbg(&dev->core, "%s:%d: POWER_PRESSED\n",
 			__func__, __LINE__);
+		ps3_sm_force_power_off = 1;
+		/*
+		 * A memory barrier is use here to sync memory since
+		 * ps3_sys_manager_final_restart() could be called on
+		 * another cpu.
+		 */
+		wmb();
+		kill_cad_pid(SIGINT, 1); /* ctrl_alt_del */
 		break;
 	case PS3_SM_EVENT_POWER_RELEASED:
 		dev_dbg(&dev->core, "%s:%d: POWER_RELEASED (%u ms)\n",
 			__func__, __LINE__, event.value);
-		kill_cad_pid(SIGINT, 1);
+		break;
+	case PS3_SM_EVENT_RESET_PRESSED:
+		dev_dbg(&dev->core, "%s:%d: RESET_PRESSED\n",
+			__func__, __LINE__);
+		ps3_sm_force_power_off = 0;
+		/*
+		 * A memory barrier is use here to sync memory since
+		 * ps3_sys_manager_final_restart() could be called on
+		 * another cpu.
+		 */
+		wmb();
+		kill_cad_pid(SIGINT, 1); /* ctrl_alt_del */
+		break;
+	case PS3_SM_EVENT_RESET_RELEASED:
+		dev_dbg(&dev->core, "%s:%d: RESET_RELEASED (%u ms)\n",
+			__func__, __LINE__, event.value);
 		break;
 	case PS3_SM_EVENT_THERMAL_ALERT:
 		dev_dbg(&dev->core, "%s:%d: THERMAL_ALERT (zone %u)\n",
@@ -411,7 +472,7 @@
  * The system manager sends this in reply to a 'request' message from the guest.
  */
 
-static int ps3_sys_manager_handle_cmd(struct ps3_vuart_port_device *dev)
+static int ps3_sys_manager_handle_cmd(struct ps3_system_bus_device *dev)
 {
 	int result;
 	struct {
@@ -425,6 +486,7 @@
 	dev_dbg(&dev->core, "%s:%d\n", __func__, __LINE__);
 
 	result = ps3_vuart_read(dev, &cmd, sizeof(cmd));
+	BUG_ON(result && "need to retry here");
 
 	if(result)
 		return result;
@@ -448,9 +510,10 @@
 /**
  * ps3_sys_manager_handle_msg - First stage msg handler.
  *
+ * Can be called directly to manually poll vuart and pump message handler.
  */
 
-static int ps3_sys_manager_handle_msg(struct ps3_vuart_port_device *dev)
+static int ps3_sys_manager_handle_msg(struct ps3_system_bus_device *dev)
 {
 	int result;
 	struct ps3_sys_manager_header header;
@@ -464,12 +527,17 @@
 	if (header.version != 1) {
 		dev_dbg(&dev->core, "%s:%d: unsupported header version (%u)\n",
 			__func__, __LINE__, header.version);
+		dump_sm_header(&header);
 		goto fail_header;
 	}
 
 	BUILD_BUG_ON(sizeof(header) != 16);
-	BUG_ON(header.size != 16);
-	BUG_ON(header.payload_size != 16);
+
+	if (header.size != 16 || (header.payload_size != 8
+		&& header.payload_size != 16)) {
+		dump_sm_header(&header);
+		BUG();
+	}
 
 	switch (header.service_id) {
 	case PS3_SM_SERVICE_ID_EXTERN_EVENT:
@@ -478,6 +546,11 @@
 	case PS3_SM_SERVICE_ID_COMMAND:
 		dev_dbg(&dev->core, "%s:%d: COMMAND\n", __func__, __LINE__);
 		return ps3_sys_manager_handle_cmd(dev);
+	case PS3_SM_SERVICE_ID_REQUEST_ERROR:
+		dev_dbg(&dev->core, "%s:%d: REQUEST_ERROR\n", __func__,
+			__LINE__);
+		dump_sm_header(&header);
+		break;
 	default:
 		dev_dbg(&dev->core, "%s:%d: unknown service_id (%u)\n",
 			__func__, __LINE__, header.service_id);
@@ -494,69 +567,19 @@
 }
 
 /**
- * ps3_sys_manager_work - Asyncronous read handler.
+ * ps3_sys_manager_final_power_off - The final platform machine_power_off routine.
  *
- * Signaled when a complete message arrives at the vuart port.
- */
-
-static void ps3_sys_manager_work(struct work_struct *work)
-{
-	struct ps3_vuart_port_device *dev = ps3_vuart_work_to_port_device(work);
-
-	ps3_sys_manager_handle_msg(dev);
-	ps3_vuart_read_async(dev, ps3_sys_manager_work, PS3_SM_RX_MSG_LEN);
-}
-
-struct {
-	struct ps3_vuart_port_device *dev;
-} static drv_priv;
-
-/**
- * ps3_sys_manager_restart - The final platform machine_restart routine.
- *
- * This routine never returns.  The routine disables asyncronous vuart reads
+ * This routine never returns.  The routine disables asynchronous vuart reads
  * then spins calling ps3_sys_manager_handle_msg() to receive and acknowledge
  * the shutdown command sent from the system manager.  Soon after the
  * acknowledgement is sent the lpar is destroyed by the HV.  This routine
- * should only be called from ps3_restart().
+ * should only be called from ps3_power_off() through
+ * ps3_sys_manager_ops.power_off.
  */
 
-void ps3_sys_manager_restart(void)
+static void ps3_sys_manager_final_power_off(struct ps3_system_bus_device *dev)
 {
-	struct ps3_vuart_port_device *dev = drv_priv.dev;
-
-	BUG_ON(!drv_priv.dev);
-
-	dev_dbg(&dev->core, "%s:%d\n", __func__, __LINE__);
-
-	ps3_vuart_cancel_async(dev);
-
-	ps3_sys_manager_send_attr(dev, 0);
-	ps3_sys_manager_send_next_op(dev, PS3_SM_NEXT_OP_LPAR_REBOOT,
-		PS3_SM_WAKE_DEFAULT);
-	ps3_sys_manager_send_request_shutdown(dev);
-
-	printk(KERN_EMERG "System Halted, OK to turn off power\n");
-
-	while(1)
-		ps3_sys_manager_handle_msg(dev);
-}
-
-/**
- * ps3_sys_manager_power_off - The final platform machine_power_off routine.
- *
- * This routine never returns.  The routine disables asyncronous vuart reads
- * then spins calling ps3_sys_manager_handle_msg() to receive and acknowledge
- * the shutdown command sent from the system manager.  Soon after the
- * acknowledgement is sent the lpar is destroyed by the HV.  This routine
- * should only be called from ps3_power_off().
- */
-
-void ps3_sys_manager_power_off(void)
-{
-	struct ps3_vuart_port_device *dev = drv_priv.dev;
-
-	BUG_ON(!drv_priv.dev);
+	BUG_ON(!dev);
 
 	dev_dbg(&dev->core, "%s:%d\n", __func__, __LINE__);
 
@@ -572,31 +595,97 @@
 		ps3_sys_manager_handle_msg(dev);
 }
 
-static int ps3_sys_manager_probe(struct ps3_vuart_port_device *dev)
+/**
+ * ps3_sys_manager_final_restart - The final platform machine_restart routine.
+ *
+ * This routine never returns.  The routine disables asynchronous vuart reads
+ * then spins calling ps3_sys_manager_handle_msg() to receive and acknowledge
+ * the shutdown command sent from the system manager.  Soon after the
+ * acknowledgement is sent the lpar is destroyed by the HV.  This routine
+ * should only be called from ps3_restart() through ps3_sys_manager_ops.restart.
+ */
+
+static void ps3_sys_manager_final_restart(struct ps3_system_bus_device *dev)
 {
-	int result;
+	BUG_ON(!dev);
 
 	dev_dbg(&dev->core, "%s:%d\n", __func__, __LINE__);
 
-	BUG_ON(drv_priv.dev);
-	drv_priv.dev = dev;
+	/* Check if we got here via a power button event. */
+
+	if (ps3_sm_force_power_off) {
+		dev_dbg(&dev->core, "%s:%d: forcing poweroff\n",
+			__func__, __LINE__);
+		ps3_sys_manager_final_power_off(dev);
+	}
+
+	ps3_vuart_cancel_async(dev);
+
+	ps3_sys_manager_send_attr(dev, 0);
+	ps3_sys_manager_send_next_op(dev, PS3_SM_NEXT_OP_LPAR_REBOOT,
+		PS3_SM_WAKE_DEFAULT);
+	ps3_sys_manager_send_request_shutdown(dev);
+
+	printk(KERN_EMERG "System Halted, OK to turn off power\n");
+
+	while(1)
+		ps3_sys_manager_handle_msg(dev);
+}
+
+/**
+ * ps3_sys_manager_work - Asynchronous read handler.
+ *
+ * Signaled when PS3_SM_RX_MSG_LEN_MIN bytes arrive at the vuart port.
+ */
+
+static void ps3_sys_manager_work(struct ps3_system_bus_device *dev)
+{
+	ps3_sys_manager_handle_msg(dev);
+	ps3_vuart_read_async(dev, PS3_SM_RX_MSG_LEN_MIN);
+}
+
+static int ps3_sys_manager_probe(struct ps3_system_bus_device *dev)
+{
+	int result;
+	struct ps3_sys_manager_ops ops;
+
+	dev_dbg(&dev->core, "%s:%d\n", __func__, __LINE__);
+
+	ops.power_off = ps3_sys_manager_final_power_off;
+	ops.restart = ps3_sys_manager_final_restart;
+	ops.dev = dev;
+
+	/* ps3_sys_manager_register_ops copies ops. */
+
+	ps3_sys_manager_register_ops(&ops);
 
 	result = ps3_sys_manager_send_attr(dev, PS3_SM_ATTR_ALL);
 	BUG_ON(result);
 
-	result = ps3_vuart_read_async(dev, ps3_sys_manager_work,
-		PS3_SM_RX_MSG_LEN);
+	result = ps3_vuart_read_async(dev, PS3_SM_RX_MSG_LEN_MIN);
 	BUG_ON(result);
 
 	return result;
 }
 
+static int ps3_sys_manager_remove(struct ps3_system_bus_device *dev)
+{
+	dev_dbg(&dev->core, "%s:%d\n", __func__, __LINE__);
+	return 0;
+}
+
+static void ps3_sys_manager_shutdown(struct ps3_system_bus_device *dev)
+{
+	dev_dbg(&dev->core, "%s:%d\n", __func__, __LINE__);
+}
+
 static struct ps3_vuart_port_driver ps3_sys_manager = {
-	.match_id = PS3_MATCH_ID_SYSTEM_MANAGER,
-	.core = {
-		.name = "ps3_sys_manager",
-	},
+	.core.match_id = PS3_MATCH_ID_SYSTEM_MANAGER,
+	.core.core.name = "ps3_sys_manager",
 	.probe = ps3_sys_manager_probe,
+	.remove = ps3_sys_manager_remove,
+	.shutdown = ps3_sys_manager_shutdown,
+	.work = ps3_sys_manager_work,
 };
 
 static int __init ps3_sys_manager_init(void)
@@ -608,3 +697,6 @@
 }
 
 module_init(ps3_sys_manager_init);
+/* Module remove not supported. */
+
+MODULE_ALIAS(PS3_MODULE_ALIAS_SYSTEM_MANAGER);
diff --git a/drivers/ps3/vuart.c b/drivers/ps3/vuart.c
index ec2d36a..bea25a1 100644
--- a/drivers/ps3/vuart.c
+++ b/drivers/ps3/vuart.c
@@ -71,6 +71,34 @@
 };
 
 /**
+ * struct ps3_vuart_port_priv - private vuart device data.
+ */
+
+struct ps3_vuart_port_priv {
+	u64 interrupt_mask;
+
+	struct {
+		spinlock_t lock;
+		struct list_head head;
+	} tx_list;
+	struct {
+		struct ps3_vuart_work work;
+		unsigned long bytes_held;
+		spinlock_t lock;
+		struct list_head head;
+	} rx_list;
+	struct ps3_vuart_stats stats;
+};
+
+static struct ps3_vuart_port_priv *to_port_priv(
+	struct ps3_system_bus_device *dev)
+{
+	BUG_ON(!dev);
+	BUG_ON(!dev->driver_priv);
+	return (struct ps3_vuart_port_priv *)dev->driver_priv;
+}
+
+/**
  * struct ports_bmp - bitmap indicating ports needing service.
  *
  * A 256 bit read only bitmap indicating ports needing service.  Do not write
@@ -83,31 +111,14 @@
 } __attribute__ ((aligned (32)));
 
 #define dump_ports_bmp(_b) _dump_ports_bmp(_b, __func__, __LINE__)
-static void __attribute__ ((unused)) _dump_ports_bmp(
+static void __maybe_unused _dump_ports_bmp(
 	const struct ports_bmp* bmp, const char* func, int line)
 {
 	pr_debug("%s:%d: ports_bmp: %016lxh\n", func, line, bmp->status);
 }
 
-static int ps3_vuart_match_id_to_port(enum ps3_match_id match_id,
-	unsigned int *port_number)
-{
-	switch(match_id) {
-	case PS3_MATCH_ID_AV_SETTINGS:
-		*port_number = 0;
-		return 0;
-	case PS3_MATCH_ID_SYSTEM_MANAGER:
-		*port_number = 2;
-		return 0;
-	default:
-		WARN_ON(1);
-		*port_number = UINT_MAX;
-		return -EINVAL;
-	};
-}
-
 #define dump_port_params(_b) _dump_port_params(_b, __func__, __LINE__)
-static void __attribute__ ((unused)) _dump_port_params(unsigned int port_number,
+static void __maybe_unused _dump_port_params(unsigned int port_number,
 	const char* func, int line)
 {
 #if defined(DEBUG)
@@ -144,14 +155,14 @@
 	unsigned long tx;
 };
 
-int ps3_vuart_get_triggers(struct ps3_vuart_port_device *dev,
+int ps3_vuart_get_triggers(struct ps3_system_bus_device *dev,
 	struct vuart_triggers *trig)
 {
 	int result;
 	unsigned long size;
 	unsigned long val;
 
-	result = lv1_get_virtual_uart_param(dev->priv->port_number,
+	result = lv1_get_virtual_uart_param(dev->port_number,
 		PARAM_TX_TRIGGER, &trig->tx);
 
 	if (result) {
@@ -160,7 +171,7 @@
 		return result;
 	}
 
-	result = lv1_get_virtual_uart_param(dev->priv->port_number,
+	result = lv1_get_virtual_uart_param(dev->port_number,
 		PARAM_RX_BUF_SIZE, &size);
 
 	if (result) {
@@ -169,7 +180,7 @@
 		return result;
 	}
 
-	result = lv1_get_virtual_uart_param(dev->priv->port_number,
+	result = lv1_get_virtual_uart_param(dev->port_number,
 		PARAM_RX_TRIGGER, &val);
 
 	if (result) {
@@ -186,13 +197,13 @@
 	return result;
 }
 
-int ps3_vuart_set_triggers(struct ps3_vuart_port_device *dev, unsigned int tx,
+int ps3_vuart_set_triggers(struct ps3_system_bus_device *dev, unsigned int tx,
 	unsigned int rx)
 {
 	int result;
 	unsigned long size;
 
-	result = lv1_set_virtual_uart_param(dev->priv->port_number,
+	result = lv1_set_virtual_uart_param(dev->port_number,
 		PARAM_TX_TRIGGER, tx);
 
 	if (result) {
@@ -201,7 +212,7 @@
 		return result;
 	}
 
-	result = lv1_get_virtual_uart_param(dev->priv->port_number,
+	result = lv1_get_virtual_uart_param(dev->port_number,
 		PARAM_RX_BUF_SIZE, &size);
 
 	if (result) {
@@ -210,7 +221,7 @@
 		return result;
 	}
 
-	result = lv1_set_virtual_uart_param(dev->priv->port_number,
+	result = lv1_set_virtual_uart_param(dev->port_number,
 		PARAM_RX_TRIGGER, size - rx);
 
 	if (result) {
@@ -225,10 +236,12 @@
 	return result;
 }
 
-static int ps3_vuart_get_rx_bytes_waiting(struct ps3_vuart_port_device *dev,
+static int ps3_vuart_get_rx_bytes_waiting(struct ps3_system_bus_device *dev,
 	u64 *bytes_waiting)
 {
-	int result = lv1_get_virtual_uart_param(dev->priv->port_number,
+	int result;
+
+	result = lv1_get_virtual_uart_param(dev->port_number,
 		PARAM_RX_BYTES, bytes_waiting);
 
 	if (result)
@@ -240,17 +253,24 @@
 	return result;
 }
 
-static int ps3_vuart_set_interrupt_mask(struct ps3_vuart_port_device *dev,
+/**
+ * ps3_vuart_set_interrupt_mask - Enable/disable the port interrupt sources.
+ * @dev: The struct ps3_system_bus_device instance.
+ * @bmp: Logical OR of enum vuart_interrupt_mask values. A zero bit disables.
+ */
+
+static int ps3_vuart_set_interrupt_mask(struct ps3_system_bus_device *dev,
 	unsigned long mask)
 {
 	int result;
+	struct ps3_vuart_port_priv *priv = to_port_priv(dev);
 
 	dev_dbg(&dev->core, "%s:%d: %lxh\n", __func__, __LINE__, mask);
 
-	dev->priv->interrupt_mask = mask;
+	priv->interrupt_mask = mask;
 
-	result = lv1_set_virtual_uart_param(dev->priv->port_number,
-		PARAM_INTERRUPT_MASK, dev->priv->interrupt_mask);
+	result = lv1_set_virtual_uart_param(dev->port_number,
+		PARAM_INTERRUPT_MASK, priv->interrupt_mask);
 
 	if (result)
 		dev_dbg(&dev->core, "%s:%d: interrupt_mask failed: %s\n",
@@ -259,79 +279,96 @@
 	return result;
 }
 
-static int ps3_vuart_get_interrupt_status(struct ps3_vuart_port_device *dev,
+static int ps3_vuart_get_interrupt_status(struct ps3_system_bus_device *dev,
 	unsigned long *status)
 {
+	int result;
+	struct ps3_vuart_port_priv *priv = to_port_priv(dev);
 	u64 tmp;
-	int result = lv1_get_virtual_uart_param(dev->priv->port_number,
+
+	result = lv1_get_virtual_uart_param(dev->port_number,
 		PARAM_INTERRUPT_STATUS, &tmp);
 
 	if (result)
 		dev_dbg(&dev->core, "%s:%d: interrupt_status failed: %s\n",
 			__func__, __LINE__, ps3_result(result));
 
-	*status = tmp & dev->priv->interrupt_mask;
+	*status = tmp & priv->interrupt_mask;
 
 	dev_dbg(&dev->core, "%s:%d: m %lxh, s %lxh, m&s %lxh\n",
-		__func__, __LINE__, dev->priv->interrupt_mask, tmp, *status);
+		__func__, __LINE__, priv->interrupt_mask, tmp, *status);
 
 	return result;
 }
 
-int ps3_vuart_enable_interrupt_tx(struct ps3_vuart_port_device *dev)
+int ps3_vuart_enable_interrupt_tx(struct ps3_system_bus_device *dev)
 {
-	return (dev->priv->interrupt_mask & INTERRUPT_MASK_TX) ? 0
-		: ps3_vuart_set_interrupt_mask(dev, dev->priv->interrupt_mask
+	struct ps3_vuart_port_priv *priv = to_port_priv(dev);
+
+	return (priv->interrupt_mask & INTERRUPT_MASK_TX) ? 0
+		: ps3_vuart_set_interrupt_mask(dev, priv->interrupt_mask
 		| INTERRUPT_MASK_TX);
 }
 
-int ps3_vuart_enable_interrupt_rx(struct ps3_vuart_port_device *dev)
+int ps3_vuart_enable_interrupt_rx(struct ps3_system_bus_device *dev)
 {
-	return (dev->priv->interrupt_mask & INTERRUPT_MASK_RX) ? 0
-		: ps3_vuart_set_interrupt_mask(dev, dev->priv->interrupt_mask
+	struct ps3_vuart_port_priv *priv = to_port_priv(dev);
+
+	return (priv->interrupt_mask & INTERRUPT_MASK_RX) ? 0
+		: ps3_vuart_set_interrupt_mask(dev, priv->interrupt_mask
 		| INTERRUPT_MASK_RX);
 }
 
-int ps3_vuart_enable_interrupt_disconnect(struct ps3_vuart_port_device *dev)
+int ps3_vuart_enable_interrupt_disconnect(struct ps3_system_bus_device *dev)
 {
-	return (dev->priv->interrupt_mask & INTERRUPT_MASK_DISCONNECT) ? 0
-		: ps3_vuart_set_interrupt_mask(dev, dev->priv->interrupt_mask
+	struct ps3_vuart_port_priv *priv = to_port_priv(dev);
+
+	return (priv->interrupt_mask & INTERRUPT_MASK_DISCONNECT) ? 0
+		: ps3_vuart_set_interrupt_mask(dev, priv->interrupt_mask
 		| INTERRUPT_MASK_DISCONNECT);
 }
 
-int ps3_vuart_disable_interrupt_tx(struct ps3_vuart_port_device *dev)
+int ps3_vuart_disable_interrupt_tx(struct ps3_system_bus_device *dev)
 {
-	return (dev->priv->interrupt_mask & INTERRUPT_MASK_TX)
-		? ps3_vuart_set_interrupt_mask(dev, dev->priv->interrupt_mask
+	struct ps3_vuart_port_priv *priv = to_port_priv(dev);
+
+	return (priv->interrupt_mask & INTERRUPT_MASK_TX)
+		? ps3_vuart_set_interrupt_mask(dev, priv->interrupt_mask
 		& ~INTERRUPT_MASK_TX) : 0;
 }
 
-int ps3_vuart_disable_interrupt_rx(struct ps3_vuart_port_device *dev)
+int ps3_vuart_disable_interrupt_rx(struct ps3_system_bus_device *dev)
 {
-	return (dev->priv->interrupt_mask & INTERRUPT_MASK_RX)
-		? ps3_vuart_set_interrupt_mask(dev, dev->priv->interrupt_mask
+	struct ps3_vuart_port_priv *priv = to_port_priv(dev);
+
+	return (priv->interrupt_mask & INTERRUPT_MASK_RX)
+		? ps3_vuart_set_interrupt_mask(dev, priv->interrupt_mask
 		& ~INTERRUPT_MASK_RX) : 0;
 }
 
-int ps3_vuart_disable_interrupt_disconnect(struct ps3_vuart_port_device *dev)
+int ps3_vuart_disable_interrupt_disconnect(struct ps3_system_bus_device *dev)
 {
-	return (dev->priv->interrupt_mask & INTERRUPT_MASK_DISCONNECT)
-		? ps3_vuart_set_interrupt_mask(dev, dev->priv->interrupt_mask
+	struct ps3_vuart_port_priv *priv = to_port_priv(dev);
+
+	return (priv->interrupt_mask & INTERRUPT_MASK_DISCONNECT)
+		? ps3_vuart_set_interrupt_mask(dev, priv->interrupt_mask
 		& ~INTERRUPT_MASK_DISCONNECT) : 0;
 }
 
 /**
  * ps3_vuart_raw_write - Low level write helper.
+ * @dev: The struct ps3_system_bus_device instance.
  *
  * Do not call ps3_vuart_raw_write directly, use ps3_vuart_write.
  */
 
-static int ps3_vuart_raw_write(struct ps3_vuart_port_device *dev,
+static int ps3_vuart_raw_write(struct ps3_system_bus_device *dev,
 	const void* buf, unsigned int bytes, unsigned long *bytes_written)
 {
 	int result;
+	struct ps3_vuart_port_priv *priv = to_port_priv(dev);
 
-	result = lv1_write_virtual_uart(dev->priv->port_number,
+	result = lv1_write_virtual_uart(dev->port_number,
 		ps3_mm_phys_to_lpar(__pa(buf)), bytes, bytes_written);
 
 	if (result) {
@@ -340,28 +377,30 @@
 		return result;
 	}
 
-	dev->priv->stats.bytes_written += *bytes_written;
+	priv->stats.bytes_written += *bytes_written;
 
 	dev_dbg(&dev->core, "%s:%d: wrote %lxh/%xh=>%lxh\n", __func__, __LINE__,
-		*bytes_written, bytes, dev->priv->stats.bytes_written);
+		*bytes_written, bytes, priv->stats.bytes_written);
 
 	return result;
 }
 
 /**
  * ps3_vuart_raw_read - Low level read helper.
+ * @dev: The struct ps3_system_bus_device instance.
  *
  * Do not call ps3_vuart_raw_read directly, use ps3_vuart_read.
  */
 
-static int ps3_vuart_raw_read(struct ps3_vuart_port_device *dev, void* buf,
+static int ps3_vuart_raw_read(struct ps3_system_bus_device *dev, void *buf,
 	unsigned int bytes, unsigned long *bytes_read)
 {
 	int result;
+	struct ps3_vuart_port_priv *priv = to_port_priv(dev);
 
 	dev_dbg(&dev->core, "%s:%d: %xh\n", __func__, __LINE__, bytes);
 
-	result = lv1_read_virtual_uart(dev->priv->port_number,
+	result = lv1_read_virtual_uart(dev->port_number,
 		ps3_mm_phys_to_lpar(__pa(buf)), bytes, bytes_read);
 
 	if (result) {
@@ -370,25 +409,27 @@
 		return result;
 	}
 
-	dev->priv->stats.bytes_read += *bytes_read;
+	priv->stats.bytes_read += *bytes_read;
 
 	dev_dbg(&dev->core, "%s:%d: read %lxh/%xh=>%lxh\n", __func__, __LINE__,
-		*bytes_read, bytes, dev->priv->stats.bytes_read);
+		*bytes_read, bytes, priv->stats.bytes_read);
 
 	return result;
 }
 
 /**
  * ps3_vuart_clear_rx_bytes - Discard bytes received.
+ * @dev: The struct ps3_system_bus_device instance.
  * @bytes: Max byte count to discard, zero = all pending.
  *
  * Used to clear pending rx interrupt source.  Will not block.
  */
 
-void ps3_vuart_clear_rx_bytes(struct ps3_vuart_port_device *dev,
+void ps3_vuart_clear_rx_bytes(struct ps3_system_bus_device *dev,
 	unsigned int bytes)
 {
 	int result;
+	struct ps3_vuart_port_priv *priv = to_port_priv(dev);
 	u64 bytes_waiting;
 	void* tmp;
 
@@ -418,8 +459,9 @@
 
 	/* Don't include these bytes in the stats. */
 
-	dev->priv->stats.bytes_read -= bytes_waiting;
+	priv->stats.bytes_read -= bytes_waiting;
 }
+EXPORT_SYMBOL_GPL(ps3_vuart_clear_rx_bytes);
 
 /**
  * struct list_buffer - An element for a port device fifo buffer list.
@@ -435,6 +477,7 @@
 
 /**
  * ps3_vuart_write - the entry point for writing data to a port
+ * @dev: The struct ps3_system_bus_device instance.
  *
  * If the port is idle on entry as much of the incoming data is written to
  * the port as the port will accept.  Otherwise a list buffer is created
@@ -442,25 +485,26 @@
  * then enqueued for transmision via the transmit interrupt.
  */
 
-int ps3_vuart_write(struct ps3_vuart_port_device *dev, const void* buf,
+int ps3_vuart_write(struct ps3_system_bus_device *dev, const void *buf,
 	unsigned int bytes)
 {
 	static unsigned long dbg_number;
 	int result;
+	struct ps3_vuart_port_priv *priv = to_port_priv(dev);
 	unsigned long flags;
 	struct list_buffer *lb;
 
 	dev_dbg(&dev->core, "%s:%d: %u(%xh) bytes\n", __func__, __LINE__,
 		bytes, bytes);
 
-	spin_lock_irqsave(&dev->priv->tx_list.lock, flags);
+	spin_lock_irqsave(&priv->tx_list.lock, flags);
 
-	if (list_empty(&dev->priv->tx_list.head)) {
+	if (list_empty(&priv->tx_list.head)) {
 		unsigned long bytes_written;
 
 		result = ps3_vuart_raw_write(dev, buf, bytes, &bytes_written);
 
-		spin_unlock_irqrestore(&dev->priv->tx_list.lock, flags);
+		spin_unlock_irqrestore(&priv->tx_list.lock, flags);
 
 		if (result) {
 			dev_dbg(&dev->core,
@@ -478,7 +522,7 @@
 		bytes -= bytes_written;
 		buf += bytes_written;
 	} else
-		spin_unlock_irqrestore(&dev->priv->tx_list.lock, flags);
+		spin_unlock_irqrestore(&priv->tx_list.lock, flags);
 
 	lb = kmalloc(sizeof(struct list_buffer) + bytes, GFP_KERNEL);
 
@@ -491,29 +535,86 @@
 	lb->tail = lb->data + bytes;
 	lb->dbg_number = ++dbg_number;
 
-	spin_lock_irqsave(&dev->priv->tx_list.lock, flags);
-	list_add_tail(&lb->link, &dev->priv->tx_list.head);
+	spin_lock_irqsave(&priv->tx_list.lock, flags);
+	list_add_tail(&lb->link, &priv->tx_list.head);
 	ps3_vuart_enable_interrupt_tx(dev);
-	spin_unlock_irqrestore(&dev->priv->tx_list.lock, flags);
+	spin_unlock_irqrestore(&priv->tx_list.lock, flags);
 
 	dev_dbg(&dev->core, "%s:%d: queued buf_%lu, %xh bytes\n",
 		__func__, __LINE__, lb->dbg_number, bytes);
 
 	return 0;
 }
+EXPORT_SYMBOL_GPL(ps3_vuart_write);
 
 /**
- * ps3_vuart_read - the entry point for reading data from a port
+ * ps3_vuart_queue_rx_bytes - Queue waiting bytes into the buffer list.
+ * @dev: The struct ps3_system_bus_device instance.
+ * @bytes_queued: Number of bytes queued to the buffer list.
  *
- * If enough bytes to satisfy the request are held in the buffer list those
- * bytes are dequeued and copied to the caller's buffer.  Emptied list buffers
- * are retiered.  If the request cannot be statified by bytes held in the list
- * buffers -EAGAIN is returned.
+ * Must be called with priv->rx_list.lock held.
  */
 
-int ps3_vuart_read(struct ps3_vuart_port_device *dev, void* buf,
+static int ps3_vuart_queue_rx_bytes(struct ps3_system_bus_device *dev,
+	u64 *bytes_queued)
+{
+	static unsigned long dbg_number;
+	int result;
+	struct ps3_vuart_port_priv *priv = to_port_priv(dev);
+	struct list_buffer *lb;
+	u64 bytes;
+
+	*bytes_queued = 0;
+
+	result = ps3_vuart_get_rx_bytes_waiting(dev, &bytes);
+	BUG_ON(result);
+
+	if (result)
+		return -EIO;
+
+	if (!bytes)
+		return 0;
+
+	/* Add some extra space for recently arrived data. */
+
+	bytes += 128;
+
+	lb = kmalloc(sizeof(struct list_buffer) + bytes, GFP_ATOMIC);
+
+	if (!lb)
+		return -ENOMEM;
+
+	ps3_vuart_raw_read(dev, lb->data, bytes, &bytes);
+
+	lb->head = lb->data;
+	lb->tail = lb->data + bytes;
+	lb->dbg_number = ++dbg_number;
+
+	list_add_tail(&lb->link, &priv->rx_list.head);
+	priv->rx_list.bytes_held += bytes;
+
+	dev_dbg(&dev->core, "%s:%d: buf_%lu: queued %lxh bytes\n",
+		__func__, __LINE__, lb->dbg_number, bytes);
+
+	*bytes_queued = bytes;
+
+	return 0;
+}
+
+/**
+ * ps3_vuart_read - The entry point for reading data from a port.
+ *
+ * Queue data waiting at the port, and if enough bytes to satisfy the request
+ * are held in the buffer list those bytes are dequeued and copied to the
+ * caller's buffer.  Emptied list buffers are retiered.  If the request cannot
+ * be statified by bytes held in the list buffers -EAGAIN is returned.
+ */
+
+int ps3_vuart_read(struct ps3_system_bus_device *dev, void *buf,
 	unsigned int bytes)
 {
+	int result;
+	struct ps3_vuart_port_priv *priv = to_port_priv(dev);
 	unsigned long flags;
 	struct list_buffer *lb, *n;
 	unsigned long bytes_read;
@@ -521,30 +622,37 @@
 	dev_dbg(&dev->core, "%s:%d: %u(%xh) bytes\n", __func__, __LINE__,
 		bytes, bytes);
 
-	spin_lock_irqsave(&dev->priv->rx_list.lock, flags);
+	spin_lock_irqsave(&priv->rx_list.lock, flags);
 
-	if (dev->priv->rx_list.bytes_held < bytes) {
-		spin_unlock_irqrestore(&dev->priv->rx_list.lock, flags);
-		dev_dbg(&dev->core, "%s:%d: starved for %lxh bytes\n",
-			__func__, __LINE__,
-			bytes - dev->priv->rx_list.bytes_held);
-		return -EAGAIN;
+	/* Queue rx bytes here for polled reads. */
+
+	while (priv->rx_list.bytes_held < bytes) {
+		u64 tmp;
+
+		result = ps3_vuart_queue_rx_bytes(dev, &tmp);
+		if (result || !tmp) {
+			dev_dbg(&dev->core, "%s:%d: starved for %lxh bytes\n",
+				__func__, __LINE__,
+				bytes - priv->rx_list.bytes_held);
+			spin_unlock_irqrestore(&priv->rx_list.lock, flags);
+			return -EAGAIN;
+		}
 	}
 
-	list_for_each_entry_safe(lb, n, &dev->priv->rx_list.head, link) {
+	list_for_each_entry_safe(lb, n, &priv->rx_list.head, link) {
 		bytes_read = min((unsigned int)(lb->tail - lb->head), bytes);
 
 		memcpy(buf, lb->head, bytes_read);
 		buf += bytes_read;
 		bytes -= bytes_read;
-		dev->priv->rx_list.bytes_held -= bytes_read;
+		priv->rx_list.bytes_held -= bytes_read;
 
 		if (bytes_read < lb->tail - lb->head) {
 			lb->head += bytes_read;
 			dev_dbg(&dev->core, "%s:%d: buf_%lu: dequeued %lxh "
 				"bytes\n", __func__, __LINE__, lb->dbg_number,
 				bytes_read);
-			spin_unlock_irqrestore(&dev->priv->rx_list.lock, flags);
+			spin_unlock_irqrestore(&priv->rx_list.lock, flags);
 			return 0;
 		}
 
@@ -556,16 +664,32 @@
 		kfree(lb);
 	}
 
-	spin_unlock_irqrestore(&dev->priv->rx_list.lock, flags);
+	spin_unlock_irqrestore(&priv->rx_list.lock, flags);
 	return 0;
 }
+EXPORT_SYMBOL_GPL(ps3_vuart_read);
 
-int ps3_vuart_read_async(struct ps3_vuart_port_device *dev, work_func_t func,
-	unsigned int bytes)
+/**
+ * ps3_vuart_work - Asynchronous read handler.
+ */
+
+static void ps3_vuart_work(struct work_struct *work)
 {
+	struct ps3_system_bus_device *dev =
+		ps3_vuart_work_to_system_bus_dev(work);
+	struct ps3_vuart_port_driver *drv =
+		ps3_system_bus_dev_to_vuart_drv(dev);
+
+	BUG_ON(!drv);
+	drv->work(dev);
+}
+
+int ps3_vuart_read_async(struct ps3_system_bus_device *dev, unsigned int bytes)
+{
+	struct ps3_vuart_port_priv *priv = to_port_priv(dev);
 	unsigned long flags;
 
-	if(dev->priv->work.trigger) {
+	if (priv->rx_list.work.trigger) {
 		dev_dbg(&dev->core, "%s:%d: warning, multiple calls\n",
 			__func__, __LINE__);
 		return -EAGAIN;
@@ -573,30 +697,32 @@
 
 	BUG_ON(!bytes);
 
-	PREPARE_WORK(&dev->priv->work.work, func);
+	PREPARE_WORK(&priv->rx_list.work.work, ps3_vuart_work);
 
-	spin_lock_irqsave(&dev->priv->work.lock, flags);
-	if(dev->priv->rx_list.bytes_held >= bytes) {
+	spin_lock_irqsave(&priv->rx_list.lock, flags);
+	if (priv->rx_list.bytes_held >= bytes) {
 		dev_dbg(&dev->core, "%s:%d: schedule_work %xh bytes\n",
 			__func__, __LINE__, bytes);
-		schedule_work(&dev->priv->work.work);
-		spin_unlock_irqrestore(&dev->priv->work.lock, flags);
+		schedule_work(&priv->rx_list.work.work);
+		spin_unlock_irqrestore(&priv->rx_list.lock, flags);
 		return 0;
 	}
 
-	dev->priv->work.trigger = bytes;
-	spin_unlock_irqrestore(&dev->priv->work.lock, flags);
+	priv->rx_list.work.trigger = bytes;
+	spin_unlock_irqrestore(&priv->rx_list.lock, flags);
 
 	dev_dbg(&dev->core, "%s:%d: waiting for %u(%xh) bytes\n", __func__,
 		__LINE__, bytes, bytes);
 
 	return 0;
 }
+EXPORT_SYMBOL_GPL(ps3_vuart_read_async);
 
-void ps3_vuart_cancel_async(struct ps3_vuart_port_device *dev)
+void ps3_vuart_cancel_async(struct ps3_system_bus_device *dev)
 {
-	dev->priv->work.trigger = 0;
+	to_port_priv(dev)->rx_list.work.trigger = 0;
 }
+EXPORT_SYMBOL_GPL(ps3_vuart_cancel_async);
 
 /**
  * ps3_vuart_handle_interrupt_tx - third stage transmit interrupt handler
@@ -606,18 +732,19 @@
  * adjusts the final list buffer state for a partial write.
  */
 
-static int ps3_vuart_handle_interrupt_tx(struct ps3_vuart_port_device *dev)
+static int ps3_vuart_handle_interrupt_tx(struct ps3_system_bus_device *dev)
 {
 	int result = 0;
+	struct ps3_vuart_port_priv *priv = to_port_priv(dev);
 	unsigned long flags;
 	struct list_buffer *lb, *n;
 	unsigned long bytes_total = 0;
 
 	dev_dbg(&dev->core, "%s:%d\n", __func__, __LINE__);
 
-	spin_lock_irqsave(&dev->priv->tx_list.lock, flags);
+	spin_lock_irqsave(&priv->tx_list.lock, flags);
 
-	list_for_each_entry_safe(lb, n, &dev->priv->tx_list.head, link) {
+	list_for_each_entry_safe(lb, n, &priv->tx_list.head, link) {
 
 		unsigned long bytes_written;
 
@@ -651,7 +778,7 @@
 
 	ps3_vuart_disable_interrupt_tx(dev);
 port_full:
-	spin_unlock_irqrestore(&dev->priv->tx_list.lock, flags);
+	spin_unlock_irqrestore(&priv->tx_list.lock, flags);
 	dev_dbg(&dev->core, "%s:%d wrote %lxh bytes total\n",
 		__func__, __LINE__, bytes_total);
 	return result;
@@ -665,60 +792,37 @@
  * buffer list.  Buffer list data is dequeued via ps3_vuart_read.
  */
 
-static int ps3_vuart_handle_interrupt_rx(struct ps3_vuart_port_device *dev)
+static int ps3_vuart_handle_interrupt_rx(struct ps3_system_bus_device *dev)
 {
-	static unsigned long dbg_number;
-	int result = 0;
+	int result;
+	struct ps3_vuart_port_priv *priv = to_port_priv(dev);
 	unsigned long flags;
-	struct list_buffer *lb;
-	unsigned long bytes;
+	u64 bytes;
 
 	dev_dbg(&dev->core, "%s:%d\n", __func__, __LINE__);
 
-	result = ps3_vuart_get_rx_bytes_waiting(dev, &bytes);
+	spin_lock_irqsave(&priv->rx_list.lock, flags);
+	result = ps3_vuart_queue_rx_bytes(dev, &bytes);
 
-	if (result)
-		return -EIO;
-
-	BUG_ON(!bytes);
-
-	/* Add some extra space for recently arrived data. */
-
-	bytes += 128;
-
-	lb = kmalloc(sizeof(struct list_buffer) + bytes, GFP_ATOMIC);
-
-	if (!lb)
-		return -ENOMEM;
-
-	ps3_vuart_raw_read(dev, lb->data, bytes, &bytes);
-
-	lb->head = lb->data;
-	lb->tail = lb->data + bytes;
-	lb->dbg_number = ++dbg_number;
-
-	spin_lock_irqsave(&dev->priv->rx_list.lock, flags);
-	list_add_tail(&lb->link, &dev->priv->rx_list.head);
-	dev->priv->rx_list.bytes_held += bytes;
-	spin_unlock_irqrestore(&dev->priv->rx_list.lock, flags);
-
-	dev_dbg(&dev->core, "%s:%d: buf_%lu: queued %lxh bytes\n",
-		__func__, __LINE__, lb->dbg_number, bytes);
-
-	spin_lock_irqsave(&dev->priv->work.lock, flags);
-	if(dev->priv->work.trigger
-		&& dev->priv->rx_list.bytes_held >= dev->priv->work.trigger) {
-		dev_dbg(&dev->core, "%s:%d: schedule_work %lxh bytes\n",
-			__func__, __LINE__, dev->priv->work.trigger);
-		dev->priv->work.trigger = 0;
-		schedule_work(&dev->priv->work.work);
+	if (result) {
+		spin_unlock_irqrestore(&priv->rx_list.lock, flags);
+		return result;
 	}
-	spin_unlock_irqrestore(&dev->priv->work.lock, flags);
-	return 0;
+
+	if (priv->rx_list.work.trigger && priv->rx_list.bytes_held
+		>= priv->rx_list.work.trigger) {
+		dev_dbg(&dev->core, "%s:%d: schedule_work %lxh bytes\n",
+			__func__, __LINE__, priv->rx_list.work.trigger);
+		priv->rx_list.work.trigger = 0;
+		schedule_work(&priv->rx_list.work.work);
+	}
+
+	spin_unlock_irqrestore(&priv->rx_list.lock, flags);
+	return result;
 }
 
 static int ps3_vuart_handle_interrupt_disconnect(
-	struct ps3_vuart_port_device *dev)
+	struct ps3_system_bus_device *dev)
 {
 	dev_dbg(&dev->core, "%s:%d\n", __func__, __LINE__);
 	BUG_ON("no support");
@@ -733,9 +837,10 @@
  * stage handler after one iteration.
  */
 
-static int ps3_vuart_handle_port_interrupt(struct ps3_vuart_port_device *dev)
+static int ps3_vuart_handle_port_interrupt(struct ps3_system_bus_device *dev)
 {
 	int result;
+	struct ps3_vuart_port_priv *priv = to_port_priv(dev);
 	unsigned long status;
 
 	result = ps3_vuart_get_interrupt_status(dev, &status);
@@ -747,21 +852,21 @@
 		status);
 
 	if (status & INTERRUPT_MASK_DISCONNECT) {
-		dev->priv->stats.disconnect_interrupts++;
+		priv->stats.disconnect_interrupts++;
 		result = ps3_vuart_handle_interrupt_disconnect(dev);
 		if (result)
 			ps3_vuart_disable_interrupt_disconnect(dev);
 	}
 
 	if (status & INTERRUPT_MASK_TX) {
-		dev->priv->stats.tx_interrupts++;
+		priv->stats.tx_interrupts++;
 		result = ps3_vuart_handle_interrupt_tx(dev);
 		if (result)
 			ps3_vuart_disable_interrupt_tx(dev);
 	}
 
 	if (status & INTERRUPT_MASK_RX) {
-		dev->priv->stats.rx_interrupts++;
+		priv->stats.rx_interrupts++;
 		result = ps3_vuart_handle_interrupt_rx(dev);
 		if (result)
 			ps3_vuart_disable_interrupt_rx(dev);
@@ -771,11 +876,11 @@
 }
 
 struct vuart_bus_priv {
-	const struct ports_bmp bmp;
+	struct ports_bmp *bmp;
 	unsigned int virq;
 	struct semaphore probe_mutex;
 	int use_count;
-	struct ps3_vuart_port_device *devices[PORT_COUNT];
+	struct ps3_system_bus_device *devices[PORT_COUNT];
 } static vuart_bus_priv;
 
 /**
@@ -788,17 +893,16 @@
 
 static irqreturn_t ps3_vuart_irq_handler(int irq, void *_private)
 {
-	struct vuart_bus_priv *bus_priv;
+	struct vuart_bus_priv *bus_priv = _private;
 
-	BUG_ON(!_private);
-	bus_priv = (struct vuart_bus_priv *)_private;
+	BUG_ON(!bus_priv);
 
 	while (1) {
 		unsigned int port;
 
-		dump_ports_bmp(&bus_priv->bmp);
+		dump_ports_bmp(bus_priv->bmp);
 
-		port = (BITS_PER_LONG - 1) - __ilog2(bus_priv->bmp.status);
+		port = (BITS_PER_LONG - 1) - __ilog2(bus_priv->bmp->status);
 
 		if (port == BITS_PER_LONG)
 			break;
@@ -812,100 +916,144 @@
 	return IRQ_HANDLED;
 }
 
-static int ps3_vuart_match(struct device *_dev, struct device_driver *_drv)
+static int ps3_vuart_bus_interrupt_get(void)
 {
 	int result;
-	struct ps3_vuart_port_driver *drv = to_ps3_vuart_port_driver(_drv);
-	struct ps3_vuart_port_device *dev = to_ps3_vuart_port_device(_dev);
 
-	result = dev->match_id == drv->match_id;
+	pr_debug(" -> %s:%d\n", __func__, __LINE__);
 
-	dev_info(&dev->core, "%s:%d: dev=%u(%s), drv=%u(%s): %s\n", __func__,
-		__LINE__, dev->match_id, dev->core.bus_id, drv->match_id,
-		drv->core.name, (result ? "match" : "miss"));
+	vuart_bus_priv.use_count++;
 
+	BUG_ON(vuart_bus_priv.use_count > 2);
+
+	if (vuart_bus_priv.use_count != 1) {
+		return 0;
+	}
+
+	BUG_ON(vuart_bus_priv.bmp);
+
+	vuart_bus_priv.bmp = kzalloc(sizeof(struct ports_bmp), GFP_KERNEL);
+
+	if (!vuart_bus_priv.bmp) {
+		pr_debug("%s:%d: kzalloc failed.\n", __func__, __LINE__);
+		result = -ENOMEM;
+		goto fail_bmp_malloc;
+	}
+
+	result = ps3_vuart_irq_setup(PS3_BINDING_CPU_ANY, vuart_bus_priv.bmp,
+		&vuart_bus_priv.virq);
+
+	if (result) {
+		pr_debug("%s:%d: ps3_vuart_irq_setup failed (%d)\n",
+			__func__, __LINE__, result);
+		result = -EPERM;
+		goto fail_alloc_irq;
+	}
+
+	result = request_irq(vuart_bus_priv.virq, ps3_vuart_irq_handler,
+		IRQF_DISABLED, "vuart", &vuart_bus_priv);
+
+	if (result) {
+		pr_debug("%s:%d: request_irq failed (%d)\n",
+			__func__, __LINE__, result);
+		goto fail_request_irq;
+	}
+
+	pr_debug(" <- %s:%d: ok\n", __func__, __LINE__);
+	return result;
+
+fail_request_irq:
+	ps3_vuart_irq_destroy(vuart_bus_priv.virq);
+	vuart_bus_priv.virq = NO_IRQ;
+fail_alloc_irq:
+	kfree(vuart_bus_priv.bmp);
+	vuart_bus_priv.bmp = NULL;
+fail_bmp_malloc:
+	vuart_bus_priv.use_count--;
+	pr_debug(" <- %s:%d: failed\n", __func__, __LINE__);
 	return result;
 }
 
-static int ps3_vuart_probe(struct device *_dev)
+static int ps3_vuart_bus_interrupt_put(void)
+{
+	pr_debug(" -> %s:%d\n", __func__, __LINE__);
+
+	vuart_bus_priv.use_count--;
+
+	BUG_ON(vuart_bus_priv.use_count < 0);
+
+	if (vuart_bus_priv.use_count != 0)
+		return 0;
+
+	free_irq(vuart_bus_priv.virq, &vuart_bus_priv);
+
+	ps3_vuart_irq_destroy(vuart_bus_priv.virq);
+	vuart_bus_priv.virq = NO_IRQ;
+
+	kfree(vuart_bus_priv.bmp);
+	vuart_bus_priv.bmp = NULL;
+
+	pr_debug(" <- %s:%d\n", __func__, __LINE__);
+	return 0;
+}
+
+static int ps3_vuart_probe(struct ps3_system_bus_device *dev)
 {
 	int result;
-	unsigned int port_number;
-	struct ps3_vuart_port_device *dev = to_ps3_vuart_port_device(_dev);
-	struct ps3_vuart_port_driver *drv =
-		to_ps3_vuart_port_driver(_dev->driver);
+	struct ps3_vuart_port_driver *drv;
+	struct ps3_vuart_port_priv *priv = NULL;
 
 	dev_dbg(&dev->core, "%s:%d\n", __func__, __LINE__);
 
+	drv = ps3_system_bus_dev_to_vuart_drv(dev);
+
+	dev_dbg(&dev->core, "%s:%d: (%s)\n", __func__, __LINE__,
+		drv->core.core.name);
+
 	BUG_ON(!drv);
 
+	if (dev->port_number >= PORT_COUNT) {
+		BUG();
+		return -EINVAL;
+	}
+
 	down(&vuart_bus_priv.probe_mutex);
 
-	/* Setup vuart_bus_priv.devices[]. */
+	result = ps3_vuart_bus_interrupt_get();
 
-	result = ps3_vuart_match_id_to_port(dev->match_id,
-		&port_number);
+	if (result)
+		goto fail_setup_interrupt;
 
-	if (result) {
-		dev_dbg(&dev->core, "%s:%d: unknown match_id (%d)\n",
-			__func__, __LINE__, dev->match_id);
-		result = -EINVAL;
-		goto fail_match;
-	}
-
-	if (vuart_bus_priv.devices[port_number]) {
+	if (vuart_bus_priv.devices[dev->port_number]) {
 		dev_dbg(&dev->core, "%s:%d: port busy (%d)\n", __func__,
-			__LINE__, port_number);
+			__LINE__, dev->port_number);
 		result = -EBUSY;
-		goto fail_match;
+		goto fail_busy;
 	}
 
-	vuart_bus_priv.devices[port_number] = dev;
+	vuart_bus_priv.devices[dev->port_number] = dev;
 
-	/* Setup dev->priv. */
+	/* Setup dev->driver_priv. */
 
-	dev->priv = kzalloc(sizeof(struct ps3_vuart_port_priv), GFP_KERNEL);
+	dev->driver_priv = kzalloc(sizeof(struct ps3_vuart_port_priv),
+		GFP_KERNEL);
 
-	if (!dev->priv) {
+	if (!dev->driver_priv) {
 		result = -ENOMEM;
-		goto fail_alloc;
+		goto fail_dev_malloc;
 	}
 
-	dev->priv->port_number = port_number;
+	priv = to_port_priv(dev);
 
-	INIT_LIST_HEAD(&dev->priv->tx_list.head);
-	spin_lock_init(&dev->priv->tx_list.lock);
+	INIT_LIST_HEAD(&priv->tx_list.head);
+	spin_lock_init(&priv->tx_list.lock);
 
-	INIT_LIST_HEAD(&dev->priv->rx_list.head);
-	spin_lock_init(&dev->priv->rx_list.lock);
+	INIT_LIST_HEAD(&priv->rx_list.head);
+	spin_lock_init(&priv->rx_list.lock);
 
-	INIT_WORK(&dev->priv->work.work, NULL);
-	spin_lock_init(&dev->priv->work.lock);
-	dev->priv->work.trigger = 0;
-	dev->priv->work.dev = dev;
-
-	if (++vuart_bus_priv.use_count == 1) {
-
-		result = ps3_vuart_irq_setup(PS3_BINDING_CPU_ANY,
-			(void*)&vuart_bus_priv.bmp.status, &vuart_bus_priv.virq);
-
-		if (result) {
-			dev_dbg(&dev->core,
-				"%s:%d: ps3_vuart_irq_setup failed (%d)\n",
-				__func__, __LINE__, result);
-			result = -EPERM;
-			goto fail_alloc_irq;
-		}
-
-		result = request_irq(vuart_bus_priv.virq, ps3_vuart_irq_handler,
-			IRQF_DISABLED, "vuart", &vuart_bus_priv);
-
-		if (result) {
-			dev_info(&dev->core, "%s:%d: request_irq failed (%d)\n",
-				__func__, __LINE__, result);
-			goto fail_request_irq;
-		}
-	}
+	INIT_WORK(&priv->rx_list.work.work, NULL);
+	priv->rx_list.work.trigger = 0;
+	priv->rx_list.work.dev = dev;
 
 	/* clear stale pending interrupts */
 
@@ -936,152 +1084,160 @@
 
 fail_probe:
 	ps3_vuart_set_interrupt_mask(dev, 0);
-fail_request_irq:
-	ps3_vuart_irq_destroy(vuart_bus_priv.virq);
-	vuart_bus_priv.virq = NO_IRQ;
-fail_alloc_irq:
-	--vuart_bus_priv.use_count;
-	kfree(dev->priv);
-	dev->priv = NULL;
-fail_alloc:
-	vuart_bus_priv.devices[port_number] = NULL;
-fail_match:
+	kfree(dev->driver_priv);
+	dev->driver_priv = NULL;
+fail_dev_malloc:
+	vuart_bus_priv.devices[dev->port_number] = NULL;
+fail_busy:
+	ps3_vuart_bus_interrupt_put();
+fail_setup_interrupt:
 	up(&vuart_bus_priv.probe_mutex);
-	dev_dbg(&dev->core, "%s:%d failed\n", __func__, __LINE__);
+	dev_dbg(&dev->core, "%s:%d: failed\n", __func__, __LINE__);
 	return result;
 }
 
-static int ps3_vuart_remove(struct device *_dev)
+/**
+ * ps3_vuart_cleanup - common cleanup helper.
+ * @dev: The struct ps3_system_bus_device instance.
+ *
+ * Cleans interrupts and HV resources.  Must be called with
+ * vuart_bus_priv.probe_mutex held.  Used by ps3_vuart_remove and
+ * ps3_vuart_shutdown.  After this call, polled reading will still work.
+ */
+
+static int ps3_vuart_cleanup(struct ps3_system_bus_device *dev)
 {
-	struct ps3_vuart_port_device *dev = to_ps3_vuart_port_device(_dev);
-	struct ps3_vuart_port_driver *drv =
-		to_ps3_vuart_port_driver(_dev->driver);
+	dev_dbg(&dev->core, "%s:%d\n", __func__, __LINE__);
+
+	ps3_vuart_cancel_async(dev);
+	ps3_vuart_set_interrupt_mask(dev, 0);
+	ps3_vuart_bus_interrupt_put();
+	return 0;
+}
+
+/**
+ * ps3_vuart_remove - Completely clean the device instance.
+ * @dev: The struct ps3_system_bus_device instance.
+ *
+ * Cleans all memory, interrupts and HV resources.  After this call the
+ * device can no longer be used.
+ */
+
+static int ps3_vuart_remove(struct ps3_system_bus_device *dev)
+{
+	struct ps3_vuart_port_priv *priv = to_port_priv(dev);
+	struct ps3_vuart_port_driver *drv;
+
+	BUG_ON(!dev);
 
 	down(&vuart_bus_priv.probe_mutex);
 
-	dev_dbg(&dev->core, "%s:%d: %s\n", __func__, __LINE__,
-		dev->core.bus_id);
+	dev_dbg(&dev->core, " -> %s:%d: match_id %d\n", __func__, __LINE__,
+		dev->match_id);
 
-	BUG_ON(vuart_bus_priv.use_count < 1);
-
-	if (drv->remove)
-		drv->remove(dev);
-	else
-		dev_dbg(&dev->core, "%s:%d: %s no remove method\n", __func__,
-			__LINE__, dev->core.bus_id);
-
-	vuart_bus_priv.devices[dev->priv->port_number] = NULL;
-
-	if (--vuart_bus_priv.use_count == 0) {
-		BUG();
-		free_irq(vuart_bus_priv.virq, &vuart_bus_priv);
-		ps3_vuart_irq_destroy(vuart_bus_priv.virq);
-		vuart_bus_priv.virq = NO_IRQ;
+	if (!dev->core.driver) {
+		dev_dbg(&dev->core, "%s:%d: no driver bound\n", __func__,
+			__LINE__);
+		up(&vuart_bus_priv.probe_mutex);
+		return 0;
 	}
 
-	kfree(dev->priv);
-	dev->priv = NULL;
+	drv = ps3_system_bus_dev_to_vuart_drv(dev);
+
+	BUG_ON(!drv);
+
+	if (drv->remove) {
+		drv->remove(dev);
+	} else {
+		dev_dbg(&dev->core, "%s:%d: no remove method\n", __func__,
+		__LINE__);
+		BUG();
+	}
+
+	ps3_vuart_cleanup(dev);
+
+	vuart_bus_priv.devices[dev->port_number] = NULL;
+	kfree(priv);
+	priv = NULL;
+
+	dev_dbg(&dev->core, " <- %s:%d\n", __func__, __LINE__);
+	up(&vuart_bus_priv.probe_mutex);
+	return 0;
+}
+
+/**
+ * ps3_vuart_shutdown - Cleans interrupts and HV resources.
+ * @dev: The struct ps3_system_bus_device instance.
+ *
+ * Cleans interrupts and HV resources.  After this call the
+ * device can still be used in polling mode.  This behavior required
+ * by sys-manager to be able to complete the device power operation
+ * sequence.
+ */
+
+static int ps3_vuart_shutdown(struct ps3_system_bus_device *dev)
+{
+	struct ps3_vuart_port_driver *drv;
+
+	BUG_ON(!dev);
+
+	down(&vuart_bus_priv.probe_mutex);
+
+	dev_dbg(&dev->core, " -> %s:%d: match_id %d\n", __func__, __LINE__,
+		dev->match_id);
+
+	if (!dev->core.driver) {
+		dev_dbg(&dev->core, "%s:%d: no driver bound\n", __func__,
+			__LINE__);
+		up(&vuart_bus_priv.probe_mutex);
+		return 0;
+	}
+
+	drv = ps3_system_bus_dev_to_vuart_drv(dev);
+
+	BUG_ON(!drv);
+
+	if (drv->shutdown)
+		drv->shutdown(dev);
+	else if (drv->remove) {
+		dev_dbg(&dev->core, "%s:%d: no shutdown, calling remove\n",
+			__func__, __LINE__);
+		drv->remove(dev);
+	} else {
+		dev_dbg(&dev->core, "%s:%d: no shutdown method\n", __func__,
+			__LINE__);
+		BUG();
+	}
+
+	ps3_vuart_cleanup(dev);
+
+	dev_dbg(&dev->core, " <- %s:%d\n", __func__, __LINE__);
 
 	up(&vuart_bus_priv.probe_mutex);
 	return 0;
 }
 
-static void ps3_vuart_shutdown(struct device *_dev)
+static int __init ps3_vuart_bus_init(void)
 {
-	struct ps3_vuart_port_device *dev = to_ps3_vuart_port_device(_dev);
-	struct ps3_vuart_port_driver *drv =
-		to_ps3_vuart_port_driver(_dev->driver);
-
-	dev_dbg(&dev->core, "%s:%d: %s\n", __func__, __LINE__,
-		dev->core.bus_id);
-
-	if (drv->shutdown)
-		drv->shutdown(dev);
-	else
-		dev_dbg(&dev->core, "%s:%d: %s no shutdown method\n", __func__,
-			__LINE__, dev->core.bus_id);
-}
-
-/**
- * ps3_vuart_bus - The vuart bus instance.
- *
- * The vuart is managed as a bus that port devices connect to.
- */
-
-struct bus_type ps3_vuart_bus = {
-        .name = "ps3_vuart",
-	.match = ps3_vuart_match,
-	.probe = ps3_vuart_probe,
-	.remove = ps3_vuart_remove,
-	.shutdown = ps3_vuart_shutdown,
-};
-
-int __init ps3_vuart_bus_init(void)
-{
-	int result;
-
 	pr_debug("%s:%d:\n", __func__, __LINE__);
 
 	if (!firmware_has_feature(FW_FEATURE_PS3_LV1))
 		return -ENODEV;
 
 	init_MUTEX(&vuart_bus_priv.probe_mutex);
-	result = bus_register(&ps3_vuart_bus);
-	BUG_ON(result);
 
-	return result;
+	return 0;
 }
 
-void __exit ps3_vuart_bus_exit(void)
+static void __exit ps3_vuart_bus_exit(void)
 {
 	pr_debug("%s:%d:\n", __func__, __LINE__);
-	bus_unregister(&ps3_vuart_bus);
 }
 
 core_initcall(ps3_vuart_bus_init);
 module_exit(ps3_vuart_bus_exit);
 
 /**
- * ps3_vuart_port_release_device - Remove a vuart port device.
- */
-
-static void ps3_vuart_port_release_device(struct device *_dev)
-{
-#if defined(DEBUG)
-	struct ps3_vuart_port_device *dev = to_ps3_vuart_port_device(_dev);
-
-	dev_dbg(&dev->core, "%s:%d\n", __func__, __LINE__);
-
-	BUG_ON(dev->priv && "forgot to free");
-	memset(&dev->core, 0, sizeof(dev->core));
-#endif
-}
-
-/**
- * ps3_vuart_port_device_register - Add a vuart port device.
- */
-
-int ps3_vuart_port_device_register(struct ps3_vuart_port_device *dev)
-{
-	static unsigned int dev_count = 1;
-
-	BUG_ON(dev->priv && "forgot to free");
-
-	dev->core.parent = NULL;
-	dev->core.bus = &ps3_vuart_bus;
-	dev->core.release = ps3_vuart_port_release_device;
-
-	snprintf(dev->core.bus_id, sizeof(dev->core.bus_id), "vuart_%02x",
-		dev_count++);
-
-	dev_dbg(&dev->core, "%s:%d register\n", __func__, __LINE__);
-
-	return device_register(&dev->core);
-}
-
-EXPORT_SYMBOL_GPL(ps3_vuart_port_device_register);
-
-/**
  * ps3_vuart_port_driver_register - Add a vuart port device driver.
  */
 
@@ -1089,12 +1245,18 @@
 {
 	int result;
 
-	pr_debug("%s:%d: (%s)\n", __func__, __LINE__, drv->core.name);
-	drv->core.bus = &ps3_vuart_bus;
-	result = driver_register(&drv->core);
+	pr_debug("%s:%d: (%s)\n", __func__, __LINE__, drv->core.core.name);
+
+	BUG_ON(!drv->core.match_id);
+	BUG_ON(!drv->core.core.name);
+
+	drv->core.probe = ps3_vuart_probe;
+	drv->core.remove = ps3_vuart_remove;
+	drv->core.shutdown = ps3_vuart_shutdown;
+
+	result = ps3_system_bus_driver_register(&drv->core);
 	return result;
 }
-
 EXPORT_SYMBOL_GPL(ps3_vuart_port_driver_register);
 
 /**
@@ -1103,8 +1265,7 @@
 
 void ps3_vuart_port_driver_unregister(struct ps3_vuart_port_driver *drv)
 {
-	pr_debug("%s:%d: (%s)\n", __func__, __LINE__, drv->core.name);
-	driver_unregister(&drv->core);
+	pr_debug("%s:%d: (%s)\n", __func__, __LINE__, drv->core.core.name);
+	ps3_system_bus_driver_unregister(&drv->core);
 }
-
 EXPORT_SYMBOL_GPL(ps3_vuart_port_driver_unregister);
diff --git a/drivers/ps3/vuart.h b/drivers/ps3/vuart.h
index 1be992d..eb7f6d9 100644
--- a/drivers/ps3/vuart.h
+++ b/drivers/ps3/vuart.h
@@ -34,29 +34,7 @@
 struct ps3_vuart_work {
 	struct work_struct work;
 	unsigned long trigger;
-	spinlock_t lock;
-	struct ps3_vuart_port_device* dev; /* to convert work to device */
-};
-
-/**
- * struct ps3_vuart_port_priv - private vuart device data.
- */
-
-struct ps3_vuart_port_priv {
-	unsigned int port_number;
-	u64 interrupt_mask;
-
-	struct {
-		spinlock_t lock;
-		struct list_head head;
-	} tx_list;
-	struct {
-		unsigned long bytes_held;
-		spinlock_t lock;
-		struct list_head head;
-	} rx_list;
-	struct ps3_vuart_stats stats;
-	struct ps3_vuart_work work;
+	struct ps3_system_bus_device *dev; /* to convert work to device */
 };
 
 /**
@@ -64,32 +42,30 @@
  */
 
 struct ps3_vuart_port_driver {
-	enum ps3_match_id match_id;
-	struct device_driver core;
-	int (*probe)(struct ps3_vuart_port_device *);
-	int (*remove)(struct ps3_vuart_port_device *);
-	void (*shutdown)(struct ps3_vuart_port_device *);
-	int (*tx_event)(struct ps3_vuart_port_device *dev);
-	int (*rx_event)(struct ps3_vuart_port_device *dev);
-	int (*disconnect_event)(struct ps3_vuart_port_device *dev);
-	/* int (*suspend)(struct ps3_vuart_port_device *, pm_message_t); */
-	/* int (*resume)(struct ps3_vuart_port_device *); */
+	struct ps3_system_bus_driver core;
+	int (*probe)(struct ps3_system_bus_device *);
+	int (*remove)(struct ps3_system_bus_device *);
+	void (*shutdown)(struct ps3_system_bus_device *);
+	void (*work)(struct ps3_system_bus_device *);
+	/* int (*tx_event)(struct ps3_system_bus_device *dev); */
+	/* int (*rx_event)(struct ps3_system_bus_device *dev); */
+	/* int (*disconnect_event)(struct ps3_system_bus_device *dev); */
+	/* int (*suspend)(struct ps3_system_bus_device *, pm_message_t); */
+	/* int (*resume)(struct ps3_system_bus_device *); */
 };
 
 int ps3_vuart_port_driver_register(struct ps3_vuart_port_driver *drv);
 void ps3_vuart_port_driver_unregister(struct ps3_vuart_port_driver *drv);
 
-static inline struct ps3_vuart_port_driver *to_ps3_vuart_port_driver(
-	struct device_driver *_drv)
+static inline struct ps3_vuart_port_driver *
+	ps3_system_bus_dev_to_vuart_drv(struct ps3_system_bus_device *_dev)
 {
-	return container_of(_drv, struct ps3_vuart_port_driver, core);
+	struct ps3_system_bus_driver *sbd =
+		ps3_system_bus_dev_to_system_bus_drv(_dev);
+	BUG_ON(!sbd);
+	return container_of(sbd, struct ps3_vuart_port_driver, core);
 }
-static inline struct ps3_vuart_port_device *to_ps3_vuart_port_device(
-	struct device *_dev)
-{
-	return container_of(_dev, struct ps3_vuart_port_device, core);
-}
-static inline struct ps3_vuart_port_device *ps3_vuart_work_to_port_device(
+static inline struct ps3_system_bus_device *ps3_vuart_work_to_system_bus_dev(
 	struct work_struct *_work)
 {
 	struct ps3_vuart_work *vw = container_of(_work, struct ps3_vuart_work,
@@ -97,14 +73,13 @@
 	return vw->dev;
 }
 
-int ps3_vuart_write(struct ps3_vuart_port_device *dev, const void* buf,
+int ps3_vuart_write(struct ps3_system_bus_device *dev, const void *buf,
 	unsigned int bytes);
-int ps3_vuart_read(struct ps3_vuart_port_device *dev, void* buf,
+int ps3_vuart_read(struct ps3_system_bus_device *dev, void *buf,
 	unsigned int bytes);
-int ps3_vuart_read_async(struct ps3_vuart_port_device *dev, work_func_t func,
-	unsigned int bytes);
-void ps3_vuart_cancel_async(struct ps3_vuart_port_device *dev);
-void ps3_vuart_clear_rx_bytes(struct ps3_vuart_port_device *dev,
+int ps3_vuart_read_async(struct ps3_system_bus_device *dev, unsigned int bytes);
+void ps3_vuart_cancel_async(struct ps3_system_bus_device *dev);
+void ps3_vuart_clear_rx_bytes(struct ps3_system_bus_device *dev,
 	unsigned int bytes);
 
 #endif
diff --git a/drivers/rapidio/rio-scan.c b/drivers/rapidio/rio-scan.c
index f935c1f..4442072 100644
--- a/drivers/rapidio/rio-scan.c
+++ b/drivers/rapidio/rio-scan.c
@@ -297,11 +297,10 @@
 	struct rio_switch *rswitch;
 	int result, rdid;
 
-	rdev = kmalloc(sizeof(struct rio_dev), GFP_KERNEL);
+	rdev = kzalloc(sizeof(struct rio_dev), GFP_KERNEL);
 	if (!rdev)
 		goto out;
 
-	memset(rdev, 0, sizeof(struct rio_dev));
 	rdev->net = net;
 	rio_mport_read_config_32(port, destid, hopcount, RIO_DEV_ID_CAR,
 				 &result);
@@ -801,9 +800,8 @@
 {
 	struct rio_net *net;
 
-	net = kmalloc(sizeof(struct rio_net), GFP_KERNEL);
+	net = kzalloc(sizeof(struct rio_net), GFP_KERNEL);
 	if (net) {
-		memset(net, 0, sizeof(struct rio_net));
 		INIT_LIST_HEAD(&net->node);
 		INIT_LIST_HEAD(&net->devices);
 		INIT_LIST_HEAD(&net->mports);
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index 4e4c10a..cea401f 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -10,7 +10,6 @@
 
 config RTC_CLASS
 	tristate "RTC class"
-	depends on EXPERIMENTAL
 	default n
 	select RTC_LIB
 	help
@@ -119,7 +118,7 @@
 	  will be called rtc-test.
 
 comment "I2C RTC drivers"
-	depends on RTC_CLASS
+	depends on RTC_CLASS && I2C
 
 config RTC_DRV_DS1307
 	tristate "Dallas/Maxim DS1307/37/38/39/40, ST M41T00"
@@ -160,11 +159,11 @@
 	  will be called rtc-max6900.
 
 config RTC_DRV_RS5C372
-	tristate "Ricoh RS5C372A/B"
+	tristate "Ricoh RS5C372A/B, RV5C386, RV5C387A"
 	depends on RTC_CLASS && I2C
 	help
 	  If you say yes here you get support for the
-	  Ricoh RS5C372A and RS5C372B RTC chips.
+	  Ricoh RS5C372A, RS5C372B, RV5C386, and RV5C387A RTC chips.
 
 	  This driver can also be built as a module. If so, the module
 	  will be called rtc-rs5c372.
@@ -213,12 +212,40 @@
 	  This driver can also be built as a module. If so, the module
 	  will be called rtc-pcf8583.
 
+config RTC_DRV_M41T80
+	tristate "ST M41T80 series RTC"
+	depends on RTC_CLASS && I2C
+	help
+	  If you say Y here you will get support for the
+	  ST M41T80 RTC chips series. Currently following chips are
+	  supported: M41T80, M41T81, M41T82, M41T83, M41ST84, M41ST85
+	  and M41ST87.
+
+	  This driver can also be built as a module. If so, the module
+	  will be called rtc-m41t80.
+
+config RTC_DRV_M41T80_WDT
+	bool "ST M41T80 series RTC watchdog timer"
+	depends on RTC_DRV_M41T80
+	help
+	  If you say Y here you will get support for the
+	  watchdog timer in ST M41T80 RTC chips series.
+
+config RTC_DRV_TWL92330
+	boolean "TI TWL92330/Menelaus"
+	depends on RTC_CLASS && I2C && MENELAUS
+	help
+	  If you say yes here you get support for the RTC on the
+	  TWL92330 "Menelaus" power mangement chip, used with OMAP2
+	  platforms.  The support is integrated with the rest of
+	  the Menelaus driver; it's not separate module.
+
 comment "SPI RTC drivers"
-	depends on RTC_CLASS
+	depends on RTC_CLASS && SPI_MASTER
 
 config RTC_DRV_RS5C348
 	tristate "Ricoh RS5C348A/B"
-	depends on RTC_CLASS && SPI
+	depends on RTC_CLASS && SPI_MASTER
 	help
 	  If you say yes here you get support for the
 	  Ricoh RS5C348A and RS5C348B RTC chips.
@@ -228,7 +255,7 @@
 
 config RTC_DRV_MAX6902
 	tristate "Maxim 6902"
-	depends on RTC_CLASS && SPI
+	depends on RTC_CLASS && SPI_MASTER
 	help
 	  If you say yes here you will get support for the
 	  Maxim MAX6902 SPI RTC chip.
@@ -246,7 +273,7 @@
 config RTC_DRV_CMOS
 	tristate "PC-style 'CMOS'"
 	depends on RTC_CLASS && (X86 || ALPHA || ARM26 || ARM \
-		|| M32R || ATARI || POWERPC || MIPS)
+		|| M32R || ATARI || PPC || MIPS)
 	help
 	  Say "yes" here to get direct support for the real time clock
 	  found in every PC or ACPI-based system, and some other boards.
@@ -262,6 +289,12 @@
 	  This driver can also be built as a module. If so, the module
 	  will be called rtc-cmos.
 
+config RTC_DRV_DS1216
+	tristate "Dallas DS1216"
+	depends on RTC_CLASS && SNI_RM
+	help
+	  If you say yes here you get support for the Dallas DS1216 RTC chips.
+
 config RTC_DRV_DS1553
 	tristate "Dallas DS1553"
 	depends on RTC_CLASS
@@ -292,6 +325,16 @@
 	  This driver can also be built as a module. If so, the module
 	  will be called rtc-m48t86.
 
+config RTC_DRV_M48T59
+	tristate "ST M48T59"
+	depends on RTC_CLASS
+	help
+	  If you say Y here you will get support for the
+	  ST M48T59 RTC chip.
+
+	  This driver can also be built as a module, if so, the module
+	  will be called "rtc-m48t59".
+
 config RTC_DRV_V3020
 	tristate "EM Microelectronic V3020"
 	depends on RTC_CLASS
@@ -379,6 +422,13 @@
 	  To compile this driver as a module, choose M here: the
 	  module will be called rtc-pl031.
 
+config RTC_DRV_AT32AP700X
+	tristate "AT32AP700X series RTC"
+	depends on RTC_CLASS && PLATFORM_AT32AP
+	help
+	  Driver for the internal RTC (Realtime Clock) on Atmel AVR32
+	  AT32AP700x family processors.
+
 config RTC_DRV_AT91RM9200
 	tristate "AT91RM9200"
 	depends on RTC_CLASS && ARCH_AT91RM9200
diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile
index a1afbc2..3109af9 100644
--- a/drivers/rtc/Makefile
+++ b/drivers/rtc/Makefile
@@ -19,6 +19,7 @@
 obj-$(CONFIG_RTC_DRV_X1205)	+= rtc-x1205.o
 obj-$(CONFIG_RTC_DRV_ISL1208)	+= rtc-isl1208.o
 obj-$(CONFIG_RTC_DRV_TEST)	+= rtc-test.o
+obj-$(CONFIG_RTC_DRV_AT32AP700X)	+= rtc-at32ap700x.o
 obj-$(CONFIG_RTC_DRV_DS1307)	+= rtc-ds1307.o
 obj-$(CONFIG_RTC_DRV_DS1672)	+= rtc-ds1672.o
 obj-$(CONFIG_RTC_DRV_DS1742)	+= rtc-ds1742.o
@@ -28,6 +29,7 @@
 obj-$(CONFIG_RTC_DRV_RS5C372)	+= rtc-rs5c372.o
 obj-$(CONFIG_RTC_DRV_S3C)	+= rtc-s3c.o
 obj-$(CONFIG_RTC_DRV_RS5C348)	+= rtc-rs5c348.o
+obj-$(CONFIG_RTC_DRV_M41T80)	+= rtc-m41t80.o
 obj-$(CONFIG_RTC_DRV_M48T86)	+= rtc-m48t86.o
 obj-$(CONFIG_RTC_DRV_DS1553)	+= rtc-ds1553.o
 obj-$(CONFIG_RTC_DRV_RS5C313)	+= rtc-rs5c313.o
@@ -41,3 +43,5 @@
 obj-$(CONFIG_RTC_DRV_AT91RM9200)+= rtc-at91rm9200.o
 obj-$(CONFIG_RTC_DRV_SH)	+= rtc-sh.o
 obj-$(CONFIG_RTC_DRV_BFIN)	+= rtc-bfin.o
+obj-$(CONFIG_RTC_DRV_M48T59)	+= rtc-m48t59.o
+obj-$(CONFIG_RTC_DRV_DS1216)	+= rtc-ds1216.o
diff --git a/drivers/rtc/rtc-at32ap700x.c b/drivers/rtc/rtc-at32ap700x.c
new file mode 100644
index 0000000..2999214
--- /dev/null
+++ b/drivers/rtc/rtc-at32ap700x.c
@@ -0,0 +1,317 @@
+/*
+ * An RTC driver for the AVR32 AT32AP700x processor series.
+ *
+ * Copyright (C) 2007 Atmel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/rtc.h>
+#include <linux/io.h>
+
+/*
+ * This is a bare-bones RTC. It runs during most system sleep states, but has
+ * no battery backup and gets reset during system restart.  It must be
+ * initialized from an external clock (network, I2C, etc) before it can be of
+ * much use.
+ *
+ * The alarm functionality is limited by the hardware, not supporting
+ * periodic interrupts.
+ */
+
+#define RTC_CTRL		0x00
+#define RTC_CTRL_EN		   0
+#define RTC_CTRL_PCLR		   1
+#define RTC_CTRL_TOPEN		   2
+#define RTC_CTRL_PSEL		   8
+
+#define RTC_VAL			0x04
+
+#define RTC_TOP			0x08
+
+#define RTC_IER			0x10
+#define RTC_IER_TOPI		   0
+
+#define RTC_IDR			0x14
+#define RTC_IDR_TOPI		   0
+
+#define RTC_IMR			0x18
+#define RTC_IMR_TOPI		   0
+
+#define RTC_ISR			0x1c
+#define RTC_ISR_TOPI		   0
+
+#define RTC_ICR			0x20
+#define RTC_ICR_TOPI		   0
+
+#define RTC_BIT(name)		(1 << RTC_##name)
+#define RTC_BF(name, value)	((value) << RTC_##name)
+
+#define rtc_readl(dev, reg)				\
+	__raw_readl((dev)->regs + RTC_##reg)
+#define rtc_writel(dev, reg, value)			\
+	__raw_writel((value), (dev)->regs + RTC_##reg)
+
+struct rtc_at32ap700x {
+	struct rtc_device	*rtc;
+	void __iomem		*regs;
+	unsigned long		alarm_time;
+	unsigned long		irq;
+	/* Protect against concurrent register access. */
+	spinlock_t		lock;
+};
+
+static int at32_rtc_readtime(struct device *dev, struct rtc_time *tm)
+{
+	struct rtc_at32ap700x *rtc = dev_get_drvdata(dev);
+	unsigned long now;
+
+	now = rtc_readl(rtc, VAL);
+	rtc_time_to_tm(now, tm);
+
+	return 0;
+}
+
+static int at32_rtc_settime(struct device *dev, struct rtc_time *tm)
+{
+	struct rtc_at32ap700x *rtc = dev_get_drvdata(dev);
+	unsigned long now;
+	int ret;
+
+	ret = rtc_tm_to_time(tm, &now);
+	if (ret == 0)
+		rtc_writel(rtc, VAL, now);
+
+	return ret;
+}
+
+static int at32_rtc_readalarm(struct device *dev, struct rtc_wkalrm *alrm)
+{
+	struct rtc_at32ap700x *rtc = dev_get_drvdata(dev);
+
+	rtc_time_to_tm(rtc->alarm_time, &alrm->time);
+	alrm->pending = rtc_readl(rtc, IMR) & RTC_BIT(IMR_TOPI) ? 1 : 0;
+
+	return 0;
+}
+
+static int at32_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm)
+{
+	struct rtc_at32ap700x *rtc = dev_get_drvdata(dev);
+	unsigned long rtc_unix_time;
+	unsigned long alarm_unix_time;
+	int ret;
+
+	rtc_unix_time = rtc_readl(rtc, VAL);
+
+	ret = rtc_tm_to_time(&alrm->time, &alarm_unix_time);
+	if (ret)
+		return ret;
+
+	if (alarm_unix_time < rtc_unix_time)
+		return -EINVAL;
+
+	spin_lock_irq(&rtc->lock);
+	rtc->alarm_time = alarm_unix_time;
+	rtc_writel(rtc, TOP, rtc->alarm_time);
+	if (alrm->pending)
+		rtc_writel(rtc, CTRL, rtc_readl(rtc, CTRL)
+				| RTC_BIT(CTRL_TOPEN));
+	else
+		rtc_writel(rtc, CTRL, rtc_readl(rtc, CTRL)
+				& ~RTC_BIT(CTRL_TOPEN));
+	spin_unlock_irq(&rtc->lock);
+
+	return ret;
+}
+
+static int at32_rtc_ioctl(struct device *dev, unsigned int cmd,
+			unsigned long arg)
+{
+	struct rtc_at32ap700x *rtc = dev_get_drvdata(dev);
+	int ret = 0;
+
+	spin_lock_irq(&rtc->lock);
+
+	switch (cmd) {
+	case RTC_AIE_ON:
+		if (rtc_readl(rtc, VAL) > rtc->alarm_time) {
+			ret = -EINVAL;
+			break;
+		}
+		rtc_writel(rtc, CTRL, rtc_readl(rtc, CTRL)
+				| RTC_BIT(CTRL_TOPEN));
+		rtc_writel(rtc, ICR, RTC_BIT(ICR_TOPI));
+		rtc_writel(rtc, IER, RTC_BIT(IER_TOPI));
+		break;
+	case RTC_AIE_OFF:
+		rtc_writel(rtc, CTRL, rtc_readl(rtc, CTRL)
+				& ~RTC_BIT(CTRL_TOPEN));
+		rtc_writel(rtc, IDR, RTC_BIT(IDR_TOPI));
+		rtc_writel(rtc, ICR, RTC_BIT(ICR_TOPI));
+		break;
+	default:
+		ret = -ENOIOCTLCMD;
+		break;
+	}
+
+	spin_unlock_irq(&rtc->lock);
+
+	return ret;
+}
+
+static irqreturn_t at32_rtc_interrupt(int irq, void *dev_id)
+{
+	struct rtc_at32ap700x *rtc = (struct rtc_at32ap700x *)dev_id;
+	unsigned long isr = rtc_readl(rtc, ISR);
+	unsigned long events = 0;
+	int ret = IRQ_NONE;
+
+	spin_lock(&rtc->lock);
+
+	if (isr & RTC_BIT(ISR_TOPI)) {
+		rtc_writel(rtc, ICR, RTC_BIT(ICR_TOPI));
+		rtc_writel(rtc, IDR, RTC_BIT(IDR_TOPI));
+		rtc_writel(rtc, CTRL, rtc_readl(rtc, CTRL)
+				& ~RTC_BIT(CTRL_TOPEN));
+		rtc_writel(rtc, VAL, rtc->alarm_time);
+		events = RTC_AF | RTC_IRQF;
+		rtc_update_irq(rtc->rtc, 1, events);
+		ret = IRQ_HANDLED;
+	}
+
+	spin_unlock(&rtc->lock);
+
+	return ret;
+}
+
+static struct rtc_class_ops at32_rtc_ops = {
+	.ioctl		= at32_rtc_ioctl,
+	.read_time	= at32_rtc_readtime,
+	.set_time	= at32_rtc_settime,
+	.read_alarm	= at32_rtc_readalarm,
+	.set_alarm	= at32_rtc_setalarm,
+};
+
+static int __init at32_rtc_probe(struct platform_device *pdev)
+{
+	struct resource	*regs;
+	struct rtc_at32ap700x *rtc;
+	int irq = -1;
+	int ret;
+
+	rtc = kzalloc(sizeof(struct rtc_at32ap700x), GFP_KERNEL);
+	if (!rtc) {
+		dev_dbg(&pdev->dev, "out of memory\n");
+		return -ENOMEM;
+	}
+
+	regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!regs) {
+		dev_dbg(&pdev->dev, "no mmio resource defined\n");
+		ret = -ENXIO;
+		goto out;
+	}
+
+	irq = platform_get_irq(pdev, 0);
+	if (irq < 0) {
+		dev_dbg(&pdev->dev, "could not get irq\n");
+		ret = -ENXIO;
+		goto out;
+	}
+
+	ret = request_irq(irq, at32_rtc_interrupt, IRQF_SHARED, "rtc", rtc);
+	if (ret) {
+		dev_dbg(&pdev->dev, "could not request irq %d\n", irq);
+		goto out;
+	}
+
+	rtc->irq = irq;
+	rtc->regs = ioremap(regs->start, regs->end - regs->start + 1);
+	if (!rtc->regs) {
+		ret = -ENOMEM;
+		dev_dbg(&pdev->dev, "could not map I/O memory\n");
+		goto out_free_irq;
+	}
+	spin_lock_init(&rtc->lock);
+
+	/*
+	 * Maybe init RTC: count from zero at 1 Hz, disable wrap irq.
+	 *
+	 * Do not reset VAL register, as it can hold an old time
+	 * from last JTAG reset.
+	 */
+	if (!(rtc_readl(rtc, CTRL) & RTC_BIT(CTRL_EN))) {
+		rtc_writel(rtc, CTRL, RTC_BIT(CTRL_PCLR));
+		rtc_writel(rtc, IDR, RTC_BIT(IDR_TOPI));
+		rtc_writel(rtc, CTRL, RTC_BF(CTRL_PSEL, 0xe)
+				| RTC_BIT(CTRL_EN));
+	}
+
+	rtc->rtc = rtc_device_register(pdev->name, &pdev->dev,
+				&at32_rtc_ops, THIS_MODULE);
+	if (IS_ERR(rtc->rtc)) {
+		dev_dbg(&pdev->dev, "could not register rtc device\n");
+		ret = PTR_ERR(rtc->rtc);
+		goto out_iounmap;
+	}
+
+	platform_set_drvdata(pdev, rtc);
+
+	dev_info(&pdev->dev, "Atmel RTC for AT32AP700x at %08lx irq %ld\n",
+			(unsigned long)rtc->regs, rtc->irq);
+
+	return 0;
+
+out_iounmap:
+	iounmap(rtc->regs);
+out_free_irq:
+	free_irq(irq, rtc);
+out:
+	kfree(rtc);
+	return ret;
+}
+
+static int __exit at32_rtc_remove(struct platform_device *pdev)
+{
+	struct rtc_at32ap700x *rtc = platform_get_drvdata(pdev);
+
+	free_irq(rtc->irq, rtc);
+	iounmap(rtc->regs);
+	rtc_device_unregister(rtc->rtc);
+	kfree(rtc);
+	platform_set_drvdata(pdev, NULL);
+
+	return 0;
+}
+
+MODULE_ALIAS("at32ap700x_rtc");
+
+static struct platform_driver at32_rtc_driver = {
+	.remove		= __exit_p(at32_rtc_remove),
+	.driver		= {
+		.name	= "at32ap700x_rtc",
+		.owner	= THIS_MODULE,
+	},
+};
+
+static int __init at32_rtc_init(void)
+{
+	return platform_driver_probe(&at32_rtc_driver, at32_rtc_probe);
+}
+module_init(at32_rtc_init);
+
+static void __exit at32_rtc_exit(void)
+{
+	platform_driver_unregister(&at32_rtc_driver);
+}
+module_exit(at32_rtc_exit);
+
+MODULE_AUTHOR("Hans-Christian Egtvedt <hcegtvedt@atmel.com>");
+MODULE_DESCRIPTION("Real time clock for AVR32 AT32AP700x");
+MODULE_LICENSE("GPL");
diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
index e24ea82..5d760bb 100644
--- a/drivers/rtc/rtc-cmos.c
+++ b/drivers/rtc/rtc-cmos.c
@@ -235,7 +235,7 @@
 	return 0;
 }
 
-static int cmos_set_freq(struct device *dev, int freq)
+static int cmos_irq_set_freq(struct device *dev, int freq)
 {
 	struct cmos_rtc	*cmos = dev_get_drvdata(dev);
 	int		f;
@@ -259,6 +259,34 @@
 	return 0;
 }
 
+static int cmos_irq_set_state(struct device *dev, int enabled)
+{
+	struct cmos_rtc	*cmos = dev_get_drvdata(dev);
+	unsigned char	rtc_control, rtc_intr;
+	unsigned long	flags;
+
+	if (!is_valid_irq(cmos->irq))
+		return -ENXIO;
+
+	spin_lock_irqsave(&rtc_lock, flags);
+	rtc_control = CMOS_READ(RTC_CONTROL);
+
+	if (enabled)
+		rtc_control |= RTC_PIE;
+	else
+		rtc_control &= ~RTC_PIE;
+
+	CMOS_WRITE(rtc_control, RTC_CONTROL);
+
+	rtc_intr = CMOS_READ(RTC_INTR_FLAGS);
+	rtc_intr &= (rtc_control & RTC_IRQMASK) | RTC_IRQF;
+	if (is_intr(rtc_intr))
+		rtc_update_irq(cmos->rtc, 1, rtc_intr);
+
+	spin_unlock_irqrestore(&rtc_lock, flags);
+	return 0;
+}
+
 #if defined(CONFIG_RTC_INTF_DEV) || defined(CONFIG_RTC_INTF_DEV_MODULE)
 
 static int
@@ -360,7 +388,8 @@
 	.read_alarm	= cmos_read_alarm,
 	.set_alarm	= cmos_set_alarm,
 	.proc		= cmos_procfs,
-	.irq_set_freq	= cmos_set_freq,
+	.irq_set_freq	= cmos_irq_set_freq,
+	.irq_set_state	= cmos_irq_set_state,
 };
 
 /*----------------------------------------------------------------*/
diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c
index f4e5f00..3045359 100644
--- a/drivers/rtc/rtc-dev.c
+++ b/drivers/rtc/rtc-dev.c
@@ -341,6 +341,8 @@
 	case RTC_IRQP_READ:
 		if (ops->irq_set_freq)
 			err = put_user(rtc->irq_freq, (unsigned long __user *)uarg);
+		else
+			err = -ENOTTY;
 		break;
 
 	case RTC_IRQP_SET:
diff --git a/drivers/rtc/rtc-ds1216.c b/drivers/rtc/rtc-ds1216.c
new file mode 100644
index 0000000..83efb88
--- /dev/null
+++ b/drivers/rtc/rtc-ds1216.c
@@ -0,0 +1,226 @@
+/*
+ * Dallas DS1216 RTC driver
+ *
+ * Copyright (c) 2007 Thomas Bogendoerfer
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/rtc.h>
+#include <linux/platform_device.h>
+#include <linux/bcd.h>
+
+#define DRV_VERSION "0.1"
+
+struct ds1216_regs {
+	u8 tsec;
+	u8 sec;
+	u8 min;
+	u8 hour;
+	u8 wday;
+	u8 mday;
+	u8 month;
+	u8 year;
+};
+
+#define DS1216_HOUR_1224	(1 << 7)
+#define DS1216_HOUR_AMPM	(1 << 5)
+
+struct ds1216_priv {
+	struct rtc_device *rtc;
+	void __iomem *ioaddr;
+	size_t size;
+	unsigned long baseaddr;
+};
+
+static const u8 magic[] = {
+	0xc5, 0x3a, 0xa3, 0x5c, 0xc5, 0x3a, 0xa3, 0x5c
+};
+
+/*
+ * Read the 64 bit we'd like to have - It a series
+ * of 64 bits showing up in the LSB of the base register.
+ *
+ */
+static void ds1216_read(u8 __iomem *ioaddr, u8 *buf)
+{
+	unsigned char c;
+	int i, j;
+
+	for (i = 0; i < 8; i++) {
+		c = 0;
+		for (j = 0; j < 8; j++)
+			c |= (readb(ioaddr) & 0x1) << j;
+		buf[i] = c;
+	}
+}
+
+static void ds1216_write(u8 __iomem *ioaddr, const u8 *buf)
+{
+	unsigned char c;
+	int i, j;
+
+	for (i = 0; i < 8; i++) {
+		c = buf[i];
+		for (j = 0; j < 8; j++) {
+			writeb(c, ioaddr);
+			c = c >> 1;
+		}
+	}
+}
+
+static void ds1216_switch_ds_to_clock(u8 __iomem *ioaddr)
+{
+	/* Reset magic pointer */
+	readb(ioaddr);
+	/* Write 64 bit magic to DS1216 */
+	ds1216_write(ioaddr, magic);
+}
+
+static int ds1216_rtc_read_time(struct device *dev, struct rtc_time *tm)
+{
+	struct platform_device *pdev = to_platform_device(dev);
+	struct ds1216_priv *priv = platform_get_drvdata(pdev);
+	struct ds1216_regs regs;
+
+	ds1216_switch_ds_to_clock(priv->ioaddr);
+	ds1216_read(priv->ioaddr, (u8 *)&regs);
+
+	tm->tm_sec = BCD2BIN(regs.sec);
+	tm->tm_min = BCD2BIN(regs.min);
+	if (regs.hour & DS1216_HOUR_1224) {
+		/* AM/PM mode */
+		tm->tm_hour = BCD2BIN(regs.hour & 0x1f);
+		if (regs.hour & DS1216_HOUR_AMPM)
+			tm->tm_hour += 12;
+	} else
+		tm->tm_hour = BCD2BIN(regs.hour & 0x3f);
+	tm->tm_wday = (regs.wday & 7) - 1;
+	tm->tm_mday = BCD2BIN(regs.mday & 0x3f);
+	tm->tm_mon = BCD2BIN(regs.month & 0x1f);
+	tm->tm_year = BCD2BIN(regs.year);
+	if (tm->tm_year < 70)
+		tm->tm_year += 100;
+	return 0;
+}
+
+static int ds1216_rtc_set_time(struct device *dev, struct rtc_time *tm)
+{
+	struct platform_device *pdev = to_platform_device(dev);
+	struct ds1216_priv *priv = platform_get_drvdata(pdev);
+	struct ds1216_regs regs;
+
+	ds1216_switch_ds_to_clock(priv->ioaddr);
+	ds1216_read(priv->ioaddr, (u8 *)&regs);
+
+	regs.tsec = 0; /* clear 0.1 and 0.01 seconds */
+	regs.sec = BIN2BCD(tm->tm_sec);
+	regs.min = BIN2BCD(tm->tm_min);
+	regs.hour &= DS1216_HOUR_1224;
+	if (regs.hour && tm->tm_hour > 12) {
+		regs.hour |= DS1216_HOUR_AMPM;
+		tm->tm_hour -= 12;
+	}
+	regs.hour |= BIN2BCD(tm->tm_hour);
+	regs.wday &= ~7;
+	regs.wday |= tm->tm_wday;
+	regs.mday = BIN2BCD(tm->tm_mday);
+	regs.month = BIN2BCD(tm->tm_mon);
+	regs.year = BIN2BCD(tm->tm_year % 100);
+
+	ds1216_switch_ds_to_clock(priv->ioaddr);
+	ds1216_write(priv->ioaddr, (u8 *)&regs);
+	return 0;
+}
+
+static const struct rtc_class_ops ds1216_rtc_ops = {
+	.read_time	= ds1216_rtc_read_time,
+	.set_time	= ds1216_rtc_set_time,
+};
+
+static int __devinit ds1216_rtc_probe(struct platform_device *pdev)
+{
+	struct rtc_device *rtc;
+	struct resource *res;
+	struct ds1216_priv *priv;
+	int ret = 0;
+	u8 dummy[8];
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!res)
+		return -ENODEV;
+	priv = kzalloc(sizeof *priv, GFP_KERNEL);
+	if (!priv)
+		return -ENOMEM;
+	priv->size = res->end - res->start + 1;
+	if (!request_mem_region(res->start, priv->size, pdev->name)) {
+		ret = -EBUSY;
+		goto out;
+	}
+	priv->baseaddr = res->start;
+	priv->ioaddr = ioremap(priv->baseaddr, priv->size);
+	if (!priv->ioaddr) {
+		ret = -ENOMEM;
+		goto out;
+	}
+	rtc = rtc_device_register("ds1216", &pdev->dev,
+				  &ds1216_rtc_ops, THIS_MODULE);
+	if (IS_ERR(rtc)) {
+		ret = PTR_ERR(rtc);
+		goto out;
+	}
+	priv->rtc = rtc;
+	platform_set_drvdata(pdev, priv);
+
+	/* dummy read to get clock into a known state */
+	ds1216_read(priv->ioaddr, dummy);
+	return 0;
+
+out:
+	if (priv->rtc)
+		rtc_device_unregister(priv->rtc);
+	if (priv->ioaddr)
+		iounmap(priv->ioaddr);
+	if (priv->baseaddr)
+		release_mem_region(priv->baseaddr, priv->size);
+	kfree(priv);
+	return ret;
+}
+
+static int __devexit ds1216_rtc_remove(struct platform_device *pdev)
+{
+	struct ds1216_priv *priv = platform_get_drvdata(pdev);
+
+	rtc_device_unregister(priv->rtc);
+	iounmap(priv->ioaddr);
+	release_mem_region(priv->baseaddr, priv->size);
+	kfree(priv);
+	return 0;
+}
+
+static struct platform_driver ds1216_rtc_platform_driver = {
+	.driver		= {
+		.name	= "rtc-ds1216",
+		.owner	= THIS_MODULE,
+	},
+	.probe		= ds1216_rtc_probe,
+	.remove		= __devexit_p(ds1216_rtc_remove),
+};
+
+static int __init ds1216_rtc_init(void)
+{
+	return platform_driver_register(&ds1216_rtc_platform_driver);
+}
+
+static void __exit ds1216_rtc_exit(void)
+{
+	platform_driver_unregister(&ds1216_rtc_platform_driver);
+}
+
+MODULE_AUTHOR("Thomas Bogendoerfer <tsbogend@alpha.franken.de>");
+MODULE_DESCRIPTION("DS1216 RTC driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_VERSION);
+
+module_init(ds1216_rtc_init);
+module_exit(ds1216_rtc_exit);
diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c
index 3f0f7b8..5158a62 100644
--- a/drivers/rtc/rtc-ds1307.c
+++ b/drivers/rtc/rtc-ds1307.c
@@ -24,29 +24,29 @@
  * setting the date and time), Linux can ignore the non-clock features.
  * That's a natural job for a factory or repair bench.
  *
- * If the I2C "force" mechanism is used, we assume the chip is a ds1337.
- * (Much better would be board-specific tables of I2C devices, along with
- * the platform_data drivers would use to sort such issues out.)
+ * This is currently a simple no-alarms driver.  If your board has the
+ * alarm irq wired up on a ds1337 or ds1339, and you want to use that,
+ * then look at the rtc-rs5c372 driver for code to steal...
  */
 enum ds_type {
-	unknown = 0,
-	ds_1307,		/* or ds1338, ... */
-	ds_1337,		/* or ds1339, ... */
-	ds_1340,		/* or st m41t00, ... */
+	ds_1307,
+	ds_1337,
+	ds_1338,
+	ds_1339,
+	ds_1340,
+	m41t00,
 	// rs5c372 too?  different address...
 };
 
-static unsigned short normal_i2c[] = { 0x68, I2C_CLIENT_END };
-
-I2C_CLIENT_INSMOD;
-
-
 
 /* RTC registers don't differ much, except for the century flag */
 #define DS1307_REG_SECS		0x00	/* 00-59 */
 #	define DS1307_BIT_CH		0x80
+#	define DS1340_BIT_nEOSC		0x80
 #define DS1307_REG_MIN		0x01	/* 00-59 */
 #define DS1307_REG_HOUR		0x02	/* 00-23, or 1-12{am,pm} */
+#	define DS1307_BIT_12HR		0x40	/* in REG_HOUR */
+#	define DS1307_BIT_PM		0x20	/* in REG_HOUR */
 #	define DS1340_BIT_CENTURY_EN	0x80	/* in REG_HOUR */
 #	define DS1340_BIT_CENTURY	0x40	/* in REG_HOUR */
 #define DS1307_REG_WDAY		0x03	/* 01-07 */
@@ -56,11 +56,12 @@
 #define DS1307_REG_YEAR		0x06	/* 00-99 */
 
 /* Other registers (control, status, alarms, trickle charge, NVRAM, etc)
- * start at 7, and they differ a lot. Only control and status matter for RTC;
- * be careful using them.
+ * start at 7, and they differ a LOT. Only control and status matter for
+ * basic RTC date and time functionality; be careful using them.
  */
-#define DS1307_REG_CONTROL	0x07
+#define DS1307_REG_CONTROL	0x07		/* or ds1338 */
 #	define DS1307_BIT_OUT		0x80
+#	define DS1338_BIT_OSF		0x20
 #	define DS1307_BIT_SQWE		0x10
 #	define DS1307_BIT_RS1		0x02
 #	define DS1307_BIT_RS0		0x01
@@ -71,6 +72,13 @@
 #	define DS1337_BIT_INTCN		0x04
 #	define DS1337_BIT_A2IE		0x02
 #	define DS1337_BIT_A1IE		0x01
+#define DS1340_REG_CONTROL	0x07
+#	define DS1340_BIT_OUT		0x80
+#	define DS1340_BIT_FT		0x40
+#	define DS1340_BIT_CALIB_SIGN	0x20
+#	define DS1340_M_CALIBRATION	0x1f
+#define DS1340_REG_FLAG		0x09
+#	define DS1340_BIT_OSF		0x80
 #define DS1337_REG_STATUS	0x0f
 #	define DS1337_BIT_OSF		0x80
 #	define DS1337_BIT_A2I		0x02
@@ -84,21 +92,63 @@
 	u8			regs[8];
 	enum ds_type		type;
 	struct i2c_msg		msg[2];
-	struct i2c_client	client;
+	struct i2c_client	*client;
+	struct i2c_client	dev;
 	struct rtc_device	*rtc;
 };
 
+struct chip_desc {
+	char			name[9];
+	unsigned		nvram56:1;
+	unsigned		alarm:1;
+	enum ds_type		type;
+};
+
+static const struct chip_desc chips[] = { {
+	.name		= "ds1307",
+	.type		= ds_1307,
+	.nvram56	= 1,
+}, {
+	.name		= "ds1337",
+	.type		= ds_1337,
+	.alarm		= 1,
+}, {
+	.name		= "ds1338",
+	.type		= ds_1338,
+	.nvram56	= 1,
+}, {
+	.name		= "ds1339",
+	.type		= ds_1339,
+	.alarm		= 1,
+}, {
+	.name		= "ds1340",
+	.type		= ds_1340,
+}, {
+	.name		= "m41t00",
+	.type		= m41t00,
+}, };
+
+static inline const struct chip_desc *find_chip(const char *s)
+{
+	unsigned i;
+
+	for (i = 0; i < ARRAY_SIZE(chips); i++)
+		if (strnicmp(s, chips[i].name, sizeof chips[i].name) == 0)
+			return &chips[i];
+	return NULL;
+}
 
 static int ds1307_get_time(struct device *dev, struct rtc_time *t)
 {
 	struct ds1307	*ds1307 = dev_get_drvdata(dev);
 	int		tmp;
 
-	/* read the RTC registers all at once */
+	/* read the RTC date and time registers all at once */
 	ds1307->msg[1].flags = I2C_M_RD;
 	ds1307->msg[1].len = 7;
 
-	tmp = i2c_transfer(ds1307->client.adapter, ds1307->msg, 2);
+	tmp = i2c_transfer(to_i2c_adapter(ds1307->client->dev.parent),
+			ds1307->msg, 2);
 	if (tmp != 2) {
 		dev_err(dev, "%s error %d\n", "read", tmp);
 		return -EIO;
@@ -129,7 +179,8 @@
 		t->tm_hour, t->tm_mday,
 		t->tm_mon, t->tm_year, t->tm_wday);
 
-	return 0;
+	/* initial clock setting can be undefined */
+	return rtc_valid_tm(t);
 }
 
 static int ds1307_set_time(struct device *dev, struct rtc_time *t)
@@ -157,11 +208,18 @@
 	tmp = t->tm_year - 100;
 	buf[DS1307_REG_YEAR] = BIN2BCD(tmp);
 
-	if (ds1307->type == ds_1337)
+	switch (ds1307->type) {
+	case ds_1337:
+	case ds_1339:
 		buf[DS1307_REG_MONTH] |= DS1337_BIT_CENTURY;
-	else if (ds1307->type == ds_1340)
+		break;
+	case ds_1340:
 		buf[DS1307_REG_HOUR] |= DS1340_BIT_CENTURY_EN
 				| DS1340_BIT_CENTURY;
+		break;
+	default:
+		break;
+	}
 
 	ds1307->msg[1].flags = 0;
 	ds1307->msg[1].len = 8;
@@ -170,7 +228,8 @@
 		"write", buf[0], buf[1], buf[2], buf[3],
 		buf[4], buf[5], buf[6]);
 
-	result = i2c_transfer(ds1307->client.adapter, &ds1307->msg[1], 1);
+	result = i2c_transfer(to_i2c_adapter(ds1307->client->dev.parent),
+			&ds1307->msg[1], 1);
 	if (result != 1) {
 		dev_err(dev, "%s error %d\n", "write", tmp);
 		return -EIO;
@@ -185,25 +244,29 @@
 
 static struct i2c_driver ds1307_driver;
 
-static int __devinit
-ds1307_detect(struct i2c_adapter *adapter, int address, int kind)
+static int __devinit ds1307_probe(struct i2c_client *client)
 {
 	struct ds1307		*ds1307;
 	int			err = -ENODEV;
-	struct i2c_client	*client;
 	int			tmp;
+	const struct chip_desc	*chip;
+	struct i2c_adapter	*adapter = to_i2c_adapter(client->dev.parent);
 
-	if (!(ds1307 = kzalloc(sizeof(struct ds1307), GFP_KERNEL))) {
-		err = -ENOMEM;
-		goto exit;
+	chip = find_chip(client->name);
+	if (!chip) {
+		dev_err(&client->dev, "unknown chip type '%s'\n",
+				client->name);
+		return -ENODEV;
 	}
 
-	client = &ds1307->client;
-	client->addr = address;
-	client->adapter = adapter;
-	client->driver = &ds1307_driver;
-	client->flags = 0;
+	if (!i2c_check_functionality(adapter,
+			I2C_FUNC_I2C | I2C_FUNC_SMBUS_WRITE_BYTE_DATA))
+		return -EIO;
 
+	if (!(ds1307 = kzalloc(sizeof(struct ds1307), GFP_KERNEL)))
+		return -ENOMEM;
+
+	ds1307->client = client;
 	i2c_set_clientdata(client, ds1307);
 
 	ds1307->msg[0].addr = client->addr;
@@ -216,14 +279,16 @@
 	ds1307->msg[1].len = sizeof(ds1307->regs);
 	ds1307->msg[1].buf = ds1307->regs;
 
-	/* HACK: "force" implies "needs ds1337-style-oscillator setup" */
-	if (kind >= 0) {
-		ds1307->type = ds_1337;
+	ds1307->type = chip->type;
 
+	switch (ds1307->type) {
+	case ds_1337:
+	case ds_1339:
 		ds1307->reg_addr = DS1337_REG_CONTROL;
 		ds1307->msg[1].len = 2;
 
-		tmp = i2c_transfer(client->adapter, ds1307->msg, 2);
+		/* get registers that the "rtc" read below won't read... */
+		tmp = i2c_transfer(adapter, ds1307->msg, 2);
 		if (tmp != 2) {
 			pr_debug("read error %d\n", tmp);
 			err = -EIO;
@@ -233,19 +298,26 @@
 		ds1307->reg_addr = 0;
 		ds1307->msg[1].len = sizeof(ds1307->regs);
 
-		/* oscillator is off; need to turn it on */
-		if ((ds1307->regs[0] & DS1337_BIT_nEOSC)
-				|| (ds1307->regs[1] & DS1337_BIT_OSF)) {
-			printk(KERN_ERR "no ds1337 oscillator code\n");
-			goto exit_free;
+		/* oscillator off?  turn it on, so clock can tick. */
+		if (ds1307->regs[0] & DS1337_BIT_nEOSC)
+			i2c_smbus_write_byte_data(client, DS1337_REG_CONTROL,
+				ds1307->regs[0] & ~DS1337_BIT_nEOSC);
+
+		/* oscillator fault?  clear flag, and warn */
+		if (ds1307->regs[1] & DS1337_BIT_OSF) {
+			i2c_smbus_write_byte_data(client, DS1337_REG_STATUS,
+				ds1307->regs[1] & ~DS1337_BIT_OSF);
+			dev_warn(&client->dev, "SET TIME!\n");
 		}
-	} else
-		ds1307->type = ds_1307;
+		break;
+	default:
+		break;
+	}
 
 read_rtc:
 	/* read RTC registers */
 
-	tmp = i2c_transfer(client->adapter, ds1307->msg, 2);
+	tmp = i2c_transfer(adapter, ds1307->msg, 2);
 	if (tmp != 2) {
 		pr_debug("read error %d\n", tmp);
 		err = -EIO;
@@ -257,119 +329,121 @@
 	 * still a few values that are clearly out-of-range.
 	 */
 	tmp = ds1307->regs[DS1307_REG_SECS];
-	if (tmp & DS1307_BIT_CH) {
-		if (ds1307->type && ds1307->type != ds_1307) {
-			pr_debug("not a ds1307?\n");
-			goto exit_free;
-		}
-		ds1307->type = ds_1307;
-
-		/* this partial initialization should work for ds1307,
-		 * ds1338, ds1340, st m41t00, and more.
+	switch (ds1307->type) {
+	case ds_1340:
+		/* FIXME read register with DS1340_BIT_OSF, use that to
+		 * trigger the "set time" warning (*after* restarting the
+		 * oscillator!) instead of this weaker ds1307/m41t00 test.
 		 */
-		dev_warn(&client->dev, "oscillator started; SET TIME!\n");
-		i2c_smbus_write_byte_data(client, 0, 0);
-		goto read_rtc;
+	case ds_1307:
+	case m41t00:
+		/* clock halted?  turn it on, so clock can tick. */
+		if (tmp & DS1307_BIT_CH) {
+			i2c_smbus_write_byte_data(client, DS1307_REG_SECS, 0);
+			dev_warn(&client->dev, "SET TIME!\n");
+			goto read_rtc;
+		}
+		break;
+	case ds_1338:
+		/* clock halted?  turn it on, so clock can tick. */
+		if (tmp & DS1307_BIT_CH)
+			i2c_smbus_write_byte_data(client, DS1307_REG_SECS, 0);
+
+		/* oscillator fault?  clear flag, and warn */
+		if (ds1307->regs[DS1307_REG_CONTROL] & DS1338_BIT_OSF) {
+			i2c_smbus_write_byte_data(client, DS1307_REG_CONTROL,
+					ds1307->regs[DS1337_REG_CONTROL]
+					& ~DS1338_BIT_OSF);
+			dev_warn(&client->dev, "SET TIME!\n");
+			goto read_rtc;
+		}
+		break;
+	case ds_1337:
+	case ds_1339:
+		break;
 	}
+
+	tmp = ds1307->regs[DS1307_REG_SECS];
 	tmp = BCD2BIN(tmp & 0x7f);
 	if (tmp > 60)
-		goto exit_free;
+		goto exit_bad;
 	tmp = BCD2BIN(ds1307->regs[DS1307_REG_MIN] & 0x7f);
 	if (tmp > 60)
-		goto exit_free;
+		goto exit_bad;
 
 	tmp = BCD2BIN(ds1307->regs[DS1307_REG_MDAY] & 0x3f);
 	if (tmp == 0 || tmp > 31)
-		goto exit_free;
+		goto exit_bad;
 
 	tmp = BCD2BIN(ds1307->regs[DS1307_REG_MONTH] & 0x1f);
 	if (tmp == 0 || tmp > 12)
-		goto exit_free;
+		goto exit_bad;
 
-	/* force into in 24 hour mode (most chips) or
-	 * disable century bit (ds1340)
-	 */
 	tmp = ds1307->regs[DS1307_REG_HOUR];
-	if (tmp & (1 << 6)) {
-		if (tmp & (1 << 5))
-			tmp = BCD2BIN(tmp & 0x1f) + 12;
-		else
-			tmp = BCD2BIN(tmp);
+	switch (ds1307->type) {
+	case ds_1340:
+	case m41t00:
+		/* NOTE: ignores century bits; fix before deploying
+		 * systems that will run through year 2100.
+		 */
+		break;
+	default:
+		if (!(tmp & DS1307_BIT_12HR))
+			break;
+
+		/* Be sure we're in 24 hour mode.  Multi-master systems
+		 * take note...
+		 */
+		tmp = BCD2BIN(tmp & 0x1f);
+		if (tmp == 12)
+			tmp = 0;
+		if (ds1307->regs[DS1307_REG_HOUR] & DS1307_BIT_PM)
+			tmp += 12;
 		i2c_smbus_write_byte_data(client,
 				DS1307_REG_HOUR,
 				BIN2BCD(tmp));
 	}
 
-	/* FIXME chips like 1337 can generate alarm irqs too; those are
-	 * worth exposing through the API (especially when the irq is
-	 * wakeup-capable).
-	 */
-
-	switch (ds1307->type) {
-	case unknown:
-		strlcpy(client->name, "unknown", I2C_NAME_SIZE);
-		break;
-	case ds_1307:
-		strlcpy(client->name, "ds1307", I2C_NAME_SIZE);
-		break;
-	case ds_1337:
-		strlcpy(client->name, "ds1337", I2C_NAME_SIZE);
-		break;
-	case ds_1340:
-		strlcpy(client->name, "ds1340", I2C_NAME_SIZE);
-		break;
-	}
-
-	/* Tell the I2C layer a new client has arrived */
-	if ((err = i2c_attach_client(client)))
-		goto exit_free;
-
 	ds1307->rtc = rtc_device_register(client->name, &client->dev,
 				&ds13xx_rtc_ops, THIS_MODULE);
 	if (IS_ERR(ds1307->rtc)) {
 		err = PTR_ERR(ds1307->rtc);
 		dev_err(&client->dev,
 			"unable to register the class device\n");
-		goto exit_detach;
+		goto exit_free;
 	}
 
 	return 0;
 
-exit_detach:
-	i2c_detach_client(client);
+exit_bad:
+	dev_dbg(&client->dev, "%s: %02x %02x %02x %02x %02x %02x %02x\n",
+			"bogus register",
+			ds1307->regs[0], ds1307->regs[1],
+			ds1307->regs[2], ds1307->regs[3],
+			ds1307->regs[4], ds1307->regs[5],
+			ds1307->regs[6]);
+
 exit_free:
 	kfree(ds1307);
-exit:
 	return err;
 }
 
-static int __devinit
-ds1307_attach_adapter(struct i2c_adapter *adapter)
+static int __devexit ds1307_remove(struct i2c_client *client)
 {
-	if (!i2c_check_functionality(adapter, I2C_FUNC_I2C))
-		return 0;
-	return i2c_probe(adapter, &addr_data, ds1307_detect);
-}
-
-static int __devexit ds1307_detach_client(struct i2c_client *client)
-{
-	int		err;
 	struct ds1307	*ds1307 = i2c_get_clientdata(client);
 
 	rtc_device_unregister(ds1307->rtc);
-	if ((err = i2c_detach_client(client)))
-		return err;
 	kfree(ds1307);
 	return 0;
 }
 
 static struct i2c_driver ds1307_driver = {
 	.driver = {
-		.name	= "ds1307",
+		.name	= "rtc-ds1307",
 		.owner	= THIS_MODULE,
 	},
-	.attach_adapter	= ds1307_attach_adapter,
-	.detach_client	= __devexit_p(ds1307_detach_client),
+	.probe		= ds1307_probe,
+	.remove		= __devexit_p(ds1307_remove),
 };
 
 static int __init ds1307_init(void)
diff --git a/drivers/rtc/rtc-m41t80.c b/drivers/rtc/rtc-m41t80.c
new file mode 100644
index 0000000..80c4a84
--- /dev/null
+++ b/drivers/rtc/rtc-m41t80.c
@@ -0,0 +1,917 @@
+/*
+ * I2C client/driver for the ST M41T80 family of i2c rtc chips.
+ *
+ * Author: Alexander Bigga <ab@mycable.de>
+ *
+ * Based on m41t00.c by Mark A. Greer <mgreer@mvista.com>
+ *
+ * 2006 (c) mycable GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/i2c.h>
+#include <linux/rtc.h>
+#include <linux/bcd.h>
+#ifdef CONFIG_RTC_DRV_M41T80_WDT
+#include <linux/miscdevice.h>
+#include <linux/watchdog.h>
+#include <linux/reboot.h>
+#include <linux/fs.h>
+#include <linux/ioctl.h>
+#endif
+
+#define M41T80_REG_SSEC	0
+#define M41T80_REG_SEC	1
+#define M41T80_REG_MIN	2
+#define M41T80_REG_HOUR	3
+#define M41T80_REG_WDAY	4
+#define M41T80_REG_DAY	5
+#define M41T80_REG_MON	6
+#define M41T80_REG_YEAR	7
+#define M41T80_REG_ALARM_MON	0xa
+#define M41T80_REG_ALARM_DAY	0xb
+#define M41T80_REG_ALARM_HOUR	0xc
+#define M41T80_REG_ALARM_MIN	0xd
+#define M41T80_REG_ALARM_SEC	0xe
+#define M41T80_REG_FLAGS	0xf
+#define M41T80_REG_SQW	0x13
+
+#define M41T80_DATETIME_REG_SIZE	(M41T80_REG_YEAR + 1)
+#define M41T80_ALARM_REG_SIZE	\
+	(M41T80_REG_ALARM_SEC + 1 - M41T80_REG_ALARM_MON)
+
+#define M41T80_SEC_ST		(1 << 7)	/* ST: Stop Bit */
+#define M41T80_ALMON_AFE	(1 << 7)	/* AFE: AF Enable Bit */
+#define M41T80_ALMON_SQWE	(1 << 6)	/* SQWE: SQW Enable Bit */
+#define M41T80_ALHOUR_HT	(1 << 6)	/* HT: Halt Update Bit */
+#define M41T80_FLAGS_AF		(1 << 6)	/* AF: Alarm Flag Bit */
+#define M41T80_FLAGS_BATT_LOW	(1 << 4)	/* BL: Battery Low Bit */
+
+#define M41T80_FEATURE_HT	(1 << 0)
+#define M41T80_FEATURE_BL	(1 << 1)
+
+#define DRV_VERSION "0.05"
+
+struct m41t80_chip_info {
+	const char *name;
+	u8 features;
+};
+
+static const struct m41t80_chip_info m41t80_chip_info_tbl[] = {
+	{
+		.name		= "m41t80",
+		.features	= 0,
+	},
+	{
+		.name		= "m41t81",
+		.features	= M41T80_FEATURE_HT,
+	},
+	{
+		.name		= "m41t81s",
+		.features	= M41T80_FEATURE_HT | M41T80_FEATURE_BL,
+	},
+	{
+		.name		= "m41t82",
+		.features	= M41T80_FEATURE_HT | M41T80_FEATURE_BL,
+	},
+	{
+		.name		= "m41t83",
+		.features	= M41T80_FEATURE_HT | M41T80_FEATURE_BL,
+	},
+	{
+		.name		= "m41st84",
+		.features	= M41T80_FEATURE_HT | M41T80_FEATURE_BL,
+	},
+	{
+		.name		= "m41st85",
+		.features	= M41T80_FEATURE_HT | M41T80_FEATURE_BL,
+	},
+	{
+		.name		= "m41st87",
+		.features	= M41T80_FEATURE_HT | M41T80_FEATURE_BL,
+	},
+};
+
+struct m41t80_data {
+	const struct m41t80_chip_info *chip;
+	struct rtc_device *rtc;
+};
+
+static int m41t80_get_datetime(struct i2c_client *client,
+			       struct rtc_time *tm)
+{
+	u8 buf[M41T80_DATETIME_REG_SIZE], dt_addr[1] = { M41T80_REG_SEC };
+	struct i2c_msg msgs[] = {
+		{
+			.addr	= client->addr,
+			.flags	= 0,
+			.len	= 1,
+			.buf	= dt_addr,
+		},
+		{
+			.addr	= client->addr,
+			.flags	= I2C_M_RD,
+			.len	= M41T80_DATETIME_REG_SIZE - M41T80_REG_SEC,
+			.buf	= buf + M41T80_REG_SEC,
+		},
+	};
+
+	if (i2c_transfer(client->adapter, msgs, 2) < 0) {
+		dev_err(&client->dev, "read error\n");
+		return -EIO;
+	}
+
+	tm->tm_sec = BCD2BIN(buf[M41T80_REG_SEC] & 0x7f);
+	tm->tm_min = BCD2BIN(buf[M41T80_REG_MIN] & 0x7f);
+	tm->tm_hour = BCD2BIN(buf[M41T80_REG_HOUR] & 0x3f);
+	tm->tm_mday = BCD2BIN(buf[M41T80_REG_DAY] & 0x3f);
+	tm->tm_wday = buf[M41T80_REG_WDAY] & 0x07;
+	tm->tm_mon = BCD2BIN(buf[M41T80_REG_MON] & 0x1f) - 1;
+
+	/* assume 20YY not 19YY, and ignore the Century Bit */
+	tm->tm_year = BCD2BIN(buf[M41T80_REG_YEAR]) + 100;
+	return 0;
+}
+
+/* Sets the given date and time to the real time clock. */
+static int m41t80_set_datetime(struct i2c_client *client, struct rtc_time *tm)
+{
+	u8 wbuf[1 + M41T80_DATETIME_REG_SIZE];
+	u8 *buf = &wbuf[1];
+	u8 dt_addr[1] = { M41T80_REG_SEC };
+	struct i2c_msg msgs_in[] = {
+		{
+			.addr	= client->addr,
+			.flags	= 0,
+			.len	= 1,
+			.buf	= dt_addr,
+		},
+		{
+			.addr	= client->addr,
+			.flags	= I2C_M_RD,
+			.len	= M41T80_DATETIME_REG_SIZE - M41T80_REG_SEC,
+			.buf	= buf + M41T80_REG_SEC,
+		},
+	};
+	struct i2c_msg msgs[] = {
+		{
+			.addr	= client->addr,
+			.flags	= 0,
+			.len	= 1 + M41T80_DATETIME_REG_SIZE,
+			.buf	= wbuf,
+		 },
+	};
+
+	/* Read current reg values into buf[1..7] */
+	if (i2c_transfer(client->adapter, msgs_in, 2) < 0) {
+		dev_err(&client->dev, "read error\n");
+		return -EIO;
+	}
+
+	wbuf[0] = 0; /* offset into rtc's regs */
+	/* Merge time-data and register flags into buf[0..7] */
+	buf[M41T80_REG_SSEC] = 0;
+	buf[M41T80_REG_SEC] =
+		BIN2BCD(tm->tm_sec) | (buf[M41T80_REG_SEC] & ~0x7f);
+	buf[M41T80_REG_MIN] =
+		BIN2BCD(tm->tm_min) | (buf[M41T80_REG_MIN] & ~0x7f);
+	buf[M41T80_REG_HOUR] =
+		BIN2BCD(tm->tm_hour) | (buf[M41T80_REG_HOUR] & ~0x3f) ;
+	buf[M41T80_REG_WDAY] =
+		(tm->tm_wday & 0x07) | (buf[M41T80_REG_WDAY] & ~0x07);
+	buf[M41T80_REG_DAY] =
+		BIN2BCD(tm->tm_mday) | (buf[M41T80_REG_DAY] & ~0x3f);
+	buf[M41T80_REG_MON] =
+		BIN2BCD(tm->tm_mon + 1) | (buf[M41T80_REG_MON] & ~0x1f);
+	/* assume 20YY not 19YY */
+	buf[M41T80_REG_YEAR] = BIN2BCD(tm->tm_year % 100);
+
+	if (i2c_transfer(client->adapter, msgs, 1) != 1) {
+		dev_err(&client->dev, "write error\n");
+		return -EIO;
+	}
+	return 0;
+}
+
+#if defined(CONFIG_RTC_INTF_PROC) || defined(CONFIG_RTC_INTF_PROC_MODULE)
+static int m41t80_rtc_proc(struct device *dev, struct seq_file *seq)
+{
+	struct i2c_client *client = to_i2c_client(dev);
+	struct m41t80_data *clientdata = i2c_get_clientdata(client);
+	u8 reg;
+
+	if (clientdata->chip->features & M41T80_FEATURE_BL) {
+		reg = i2c_smbus_read_byte_data(client, M41T80_REG_FLAGS);
+		seq_printf(seq, "battery\t\t: %s\n",
+			   (reg & M41T80_FLAGS_BATT_LOW) ? "exhausted" : "ok");
+	}
+	return 0;
+}
+#else
+#define m41t80_rtc_proc NULL
+#endif
+
+static int m41t80_rtc_read_time(struct device *dev, struct rtc_time *tm)
+{
+	return m41t80_get_datetime(to_i2c_client(dev), tm);
+}
+
+static int m41t80_rtc_set_time(struct device *dev, struct rtc_time *tm)
+{
+	return m41t80_set_datetime(to_i2c_client(dev), tm);
+}
+
+#if defined(CONFIG_RTC_INTF_DEV) || defined(CONFIG_RTC_INTF_DEV_MODULE)
+static int
+m41t80_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
+{
+	struct i2c_client *client = to_i2c_client(dev);
+	int rc;
+
+	switch (cmd) {
+	case RTC_AIE_OFF:
+	case RTC_AIE_ON:
+		break;
+	default:
+		return -ENOIOCTLCMD;
+	}
+
+	rc = i2c_smbus_read_byte_data(client, M41T80_REG_ALARM_MON);
+	if (rc < 0)
+		goto err;
+	switch (cmd) {
+	case RTC_AIE_OFF:
+		rc &= ~M41T80_ALMON_AFE;
+		break;
+	case RTC_AIE_ON:
+		rc |= M41T80_ALMON_AFE;
+		break;
+	}
+	if (i2c_smbus_write_byte_data(client, M41T80_REG_ALARM_MON, rc) < 0)
+		goto err;
+	return 0;
+err:
+	return -EIO;
+}
+#else
+#define	m41t80_rtc_ioctl NULL
+#endif
+
+static int m41t80_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *t)
+{
+	struct i2c_client *client = to_i2c_client(dev);
+	u8 wbuf[1 + M41T80_ALARM_REG_SIZE];
+	u8 *buf = &wbuf[1];
+	u8 *reg = buf - M41T80_REG_ALARM_MON;
+	u8 dt_addr[1] = { M41T80_REG_ALARM_MON };
+	struct i2c_msg msgs_in[] = {
+		{
+			.addr	= client->addr,
+			.flags	= 0,
+			.len	= 1,
+			.buf	= dt_addr,
+		},
+		{
+			.addr	= client->addr,
+			.flags	= I2C_M_RD,
+			.len	= M41T80_ALARM_REG_SIZE,
+			.buf	= buf,
+		},
+	};
+	struct i2c_msg msgs[] = {
+		{
+			.addr	= client->addr,
+			.flags	= 0,
+			.len	= 1 + M41T80_ALARM_REG_SIZE,
+			.buf	= wbuf,
+		 },
+	};
+
+	if (i2c_transfer(client->adapter, msgs_in, 2) < 0) {
+		dev_err(&client->dev, "read error\n");
+		return -EIO;
+	}
+	reg[M41T80_REG_ALARM_MON] &= ~(0x1f | M41T80_ALMON_AFE);
+	reg[M41T80_REG_ALARM_DAY] = 0;
+	reg[M41T80_REG_ALARM_HOUR] &= ~(0x3f | 0x80);
+	reg[M41T80_REG_ALARM_MIN] = 0;
+	reg[M41T80_REG_ALARM_SEC] = 0;
+
+	wbuf[0] = M41T80_REG_ALARM_MON; /* offset into rtc's regs */
+	reg[M41T80_REG_ALARM_SEC] |= t->time.tm_sec >= 0 ?
+		BIN2BCD(t->time.tm_sec) : 0x80;
+	reg[M41T80_REG_ALARM_MIN] |= t->time.tm_min >= 0 ?
+		BIN2BCD(t->time.tm_min) : 0x80;
+	reg[M41T80_REG_ALARM_HOUR] |= t->time.tm_hour >= 0 ?
+		BIN2BCD(t->time.tm_hour) : 0x80;
+	reg[M41T80_REG_ALARM_DAY] |= t->time.tm_mday >= 0 ?
+		BIN2BCD(t->time.tm_mday) : 0x80;
+	if (t->time.tm_mon >= 0)
+		reg[M41T80_REG_ALARM_MON] |= BIN2BCD(t->time.tm_mon + 1);
+	else
+		reg[M41T80_REG_ALARM_DAY] |= 0x40;
+
+	if (i2c_transfer(client->adapter, msgs, 1) != 1) {
+		dev_err(&client->dev, "write error\n");
+		return -EIO;
+	}
+
+	if (t->enabled) {
+		reg[M41T80_REG_ALARM_MON] |= M41T80_ALMON_AFE;
+		if (i2c_smbus_write_byte_data(client, M41T80_REG_ALARM_MON,
+					      reg[M41T80_REG_ALARM_MON]) < 0) {
+			dev_err(&client->dev, "write error\n");
+			return -EIO;
+		}
+	}
+	return 0;
+}
+
+static int m41t80_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *t)
+{
+	struct i2c_client *client = to_i2c_client(dev);
+	u8 buf[M41T80_ALARM_REG_SIZE + 1]; /* all alarm regs and flags */
+	u8 dt_addr[1] = { M41T80_REG_ALARM_MON };
+	u8 *reg = buf - M41T80_REG_ALARM_MON;
+	struct i2c_msg msgs[] = {
+		{
+			.addr	= client->addr,
+			.flags	= 0,
+			.len	= 1,
+			.buf	= dt_addr,
+		},
+		{
+			.addr	= client->addr,
+			.flags	= I2C_M_RD,
+			.len	= M41T80_ALARM_REG_SIZE + 1,
+			.buf	= buf,
+		},
+	};
+
+	if (i2c_transfer(client->adapter, msgs, 2) < 0) {
+		dev_err(&client->dev, "read error\n");
+		return -EIO;
+	}
+	t->time.tm_sec = -1;
+	t->time.tm_min = -1;
+	t->time.tm_hour = -1;
+	t->time.tm_mday = -1;
+	t->time.tm_mon = -1;
+	if (!(reg[M41T80_REG_ALARM_SEC] & 0x80))
+		t->time.tm_sec = BCD2BIN(reg[M41T80_REG_ALARM_SEC] & 0x7f);
+	if (!(reg[M41T80_REG_ALARM_MIN] & 0x80))
+		t->time.tm_min = BCD2BIN(reg[M41T80_REG_ALARM_MIN] & 0x7f);
+	if (!(reg[M41T80_REG_ALARM_HOUR] & 0x80))
+		t->time.tm_hour = BCD2BIN(reg[M41T80_REG_ALARM_HOUR] & 0x3f);
+	if (!(reg[M41T80_REG_ALARM_DAY] & 0x80))
+		t->time.tm_mday = BCD2BIN(reg[M41T80_REG_ALARM_DAY] & 0x3f);
+	if (!(reg[M41T80_REG_ALARM_DAY] & 0x40))
+		t->time.tm_mon = BCD2BIN(reg[M41T80_REG_ALARM_MON] & 0x1f) - 1;
+	t->time.tm_year = -1;
+	t->time.tm_wday = -1;
+	t->time.tm_yday = -1;
+	t->time.tm_isdst = -1;
+	t->enabled = !!(reg[M41T80_REG_ALARM_MON] & M41T80_ALMON_AFE);
+	t->pending = !!(reg[M41T80_REG_FLAGS] & M41T80_FLAGS_AF);
+	return 0;
+}
+
+static struct rtc_class_ops m41t80_rtc_ops = {
+	.read_time = m41t80_rtc_read_time,
+	.set_time = m41t80_rtc_set_time,
+	.read_alarm = m41t80_rtc_read_alarm,
+	.set_alarm = m41t80_rtc_set_alarm,
+	.proc = m41t80_rtc_proc,
+	.ioctl = m41t80_rtc_ioctl,
+};
+
+#if defined(CONFIG_RTC_INTF_SYSFS) || defined(CONFIG_RTC_INTF_SYSFS_MODULE)
+static ssize_t m41t80_sysfs_show_flags(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	struct i2c_client *client = to_i2c_client(dev);
+	int val;
+
+	val = i2c_smbus_read_byte_data(client, M41T80_REG_FLAGS);
+	if (val < 0)
+		return -EIO;
+	return sprintf(buf, "%#x\n", val);
+}
+static DEVICE_ATTR(flags, S_IRUGO, m41t80_sysfs_show_flags, NULL);
+
+static ssize_t m41t80_sysfs_show_sqwfreq(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	struct i2c_client *client = to_i2c_client(dev);
+	int val;
+
+	val = i2c_smbus_read_byte_data(client, M41T80_REG_SQW);
+	if (val < 0)
+		return -EIO;
+	val = (val >> 4) & 0xf;
+	switch (val) {
+	case 0:
+		break;
+	case 1:
+		val = 32768;
+		break;
+	default:
+		val = 32768 >> val;
+	}
+	return sprintf(buf, "%d\n", val);
+}
+static ssize_t m41t80_sysfs_set_sqwfreq(struct device *dev,
+				struct device_attribute *attr,
+				const char *buf, size_t count)
+{
+	struct i2c_client *client = to_i2c_client(dev);
+	int almon, sqw;
+	int val = simple_strtoul(buf, NULL, 0);
+
+	if (val) {
+		if (!is_power_of_2(val))
+			return -EINVAL;
+		val = ilog2(val);
+		if (val == 15)
+			val = 1;
+		else if (val < 14)
+			val = 15 - val;
+		else
+			return -EINVAL;
+	}
+	/* disable SQW, set SQW frequency & re-enable */
+	almon = i2c_smbus_read_byte_data(client, M41T80_REG_ALARM_MON);
+	if (almon < 0)
+		return -EIO;
+	sqw = i2c_smbus_read_byte_data(client, M41T80_REG_SQW);
+	if (sqw < 0)
+		return -EIO;
+	sqw = (sqw & 0x0f) | (val << 4);
+	if (i2c_smbus_write_byte_data(client, M41T80_REG_ALARM_MON,
+				      almon & ~M41T80_ALMON_SQWE) < 0 ||
+	    i2c_smbus_write_byte_data(client, M41T80_REG_SQW, sqw) < 0)
+		return -EIO;
+	if (val && i2c_smbus_write_byte_data(client, M41T80_REG_ALARM_MON,
+					     almon | M41T80_ALMON_SQWE) < 0)
+		return -EIO;
+	return count;
+}
+static DEVICE_ATTR(sqwfreq, S_IRUGO | S_IWUSR,
+		   m41t80_sysfs_show_sqwfreq, m41t80_sysfs_set_sqwfreq);
+
+static struct attribute *attrs[] = {
+	&dev_attr_flags.attr,
+	&dev_attr_sqwfreq.attr,
+	NULL,
+};
+static struct attribute_group attr_group = {
+	.attrs = attrs,
+};
+
+static int m41t80_sysfs_register(struct device *dev)
+{
+	return sysfs_create_group(&dev->kobj, &attr_group);
+}
+#else
+static int m41t80_sysfs_register(struct device *dev)
+{
+	return 0;
+}
+#endif
+
+#ifdef CONFIG_RTC_DRV_M41T80_WDT
+/*
+ *****************************************************************************
+ *
+ * Watchdog Driver
+ *
+ *****************************************************************************
+ */
+static struct i2c_client *save_client;
+
+/* Default margin */
+#define WD_TIMO 60		/* 1..31 seconds */
+
+static int wdt_margin = WD_TIMO;
+module_param(wdt_margin, int, 0);
+MODULE_PARM_DESC(wdt_margin, "Watchdog timeout in seconds (default 60s)");
+
+static unsigned long wdt_is_open;
+static int boot_flag;
+
+/**
+ *	wdt_ping:
+ *
+ *	Reload counter one with the watchdog timeout. We don't bother reloading
+ *	the cascade counter.
+ */
+static void wdt_ping(void)
+{
+	unsigned char i2c_data[2];
+	struct i2c_msg msgs1[1] = {
+		{
+			.addr	= save_client->addr,
+			.flags	= 0,
+			.len	= 2,
+			.buf	= i2c_data,
+		},
+	};
+	i2c_data[0] = 0x09;		/* watchdog register */
+
+	if (wdt_margin > 31)
+		i2c_data[1] = (wdt_margin & 0xFC) | 0x83; /* resolution = 4s */
+	else
+		/*
+		 * WDS = 1 (0x80), mulitplier = WD_TIMO, resolution = 1s (0x02)
+		 */
+		i2c_data[1] = wdt_margin<<2 | 0x82;
+
+	i2c_transfer(save_client->adapter, msgs1, 1);
+}
+
+/**
+ *	wdt_disable:
+ *
+ *	disables watchdog.
+ */
+static void wdt_disable(void)
+{
+	unsigned char i2c_data[2], i2c_buf[0x10];
+	struct i2c_msg msgs0[2] = {
+		{
+			.addr	= save_client->addr,
+			.flags	= 0,
+			.len	= 1,
+			.buf	= i2c_data,
+		},
+		{
+			.addr	= save_client->addr,
+			.flags	= I2C_M_RD,
+			.len	= 1,
+			.buf	= i2c_buf,
+		},
+	};
+	struct i2c_msg msgs1[1] = {
+		{
+			.addr	= save_client->addr,
+			.flags	= 0,
+			.len	= 2,
+			.buf	= i2c_data,
+		},
+	};
+
+	i2c_data[0] = 0x09;
+	i2c_transfer(save_client->adapter, msgs0, 2);
+
+	i2c_data[0] = 0x09;
+	i2c_data[1] = 0x00;
+	i2c_transfer(save_client->adapter, msgs1, 1);
+}
+
+/**
+ *	wdt_write:
+ *	@file: file handle to the watchdog
+ *	@buf: buffer to write (unused as data does not matter here
+ *	@count: count of bytes
+ *	@ppos: pointer to the position to write. No seeks allowed
+ *
+ *	A write to a watchdog device is defined as a keepalive signal. Any
+ *	write of data will do, as we we don't define content meaning.
+ */
+static ssize_t wdt_write(struct file *file, const char __user *buf,
+			 size_t count, loff_t *ppos)
+{
+	/*  Can't seek (pwrite) on this device
+	if (ppos != &file->f_pos)
+	return -ESPIPE;
+	*/
+	if (count) {
+		wdt_ping();
+		return 1;
+	}
+	return 0;
+}
+
+static ssize_t wdt_read(struct file *file, char __user *buf,
+			size_t count, loff_t *ppos)
+{
+	return 0;
+}
+
+/**
+ *	wdt_ioctl:
+ *	@inode: inode of the device
+ *	@file: file handle to the device
+ *	@cmd: watchdog command
+ *	@arg: argument pointer
+ *
+ *	The watchdog API defines a common set of functions for all watchdogs
+ *	according to their available features. We only actually usefully support
+ *	querying capabilities and current status.
+ */
+static int wdt_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
+		     unsigned long arg)
+{
+	int new_margin, rv;
+	static struct watchdog_info ident = {
+		.options = WDIOF_POWERUNDER | WDIOF_KEEPALIVEPING |
+			WDIOF_SETTIMEOUT,
+		.firmware_version = 1,
+		.identity = "M41T80 WTD"
+	};
+
+	switch (cmd) {
+	case WDIOC_GETSUPPORT:
+		return copy_to_user((struct watchdog_info __user *)arg, &ident,
+				    sizeof(ident)) ? -EFAULT : 0;
+
+	case WDIOC_GETSTATUS:
+	case WDIOC_GETBOOTSTATUS:
+		return put_user(boot_flag, (int __user *)arg);
+	case WDIOC_KEEPALIVE:
+		wdt_ping();
+		return 0;
+	case WDIOC_SETTIMEOUT:
+		if (get_user(new_margin, (int __user *)arg))
+			return -EFAULT;
+		/* Arbitrary, can't find the card's limits */
+		if (new_margin < 1 || new_margin > 124)
+			return -EINVAL;
+		wdt_margin = new_margin;
+		wdt_ping();
+		/* Fall */
+	case WDIOC_GETTIMEOUT:
+		return put_user(wdt_margin, (int __user *)arg);
+
+	case WDIOC_SETOPTIONS:
+		if (copy_from_user(&rv, (int __user *)arg, sizeof(int)))
+			return -EFAULT;
+
+		if (rv & WDIOS_DISABLECARD) {
+			printk(KERN_INFO
+			       "rtc-m41t80: disable watchdog\n");
+			wdt_disable();
+		}
+
+		if (rv & WDIOS_ENABLECARD) {
+			printk(KERN_INFO
+			       "rtc-m41t80: enable watchdog\n");
+			wdt_ping();
+		}
+
+		return -EINVAL;
+	}
+	return -ENOTTY;
+}
+
+/**
+ *	wdt_open:
+ *	@inode: inode of device
+ *	@file: file handle to device
+ *
+ */
+static int wdt_open(struct inode *inode, struct file *file)
+{
+	if (MINOR(inode->i_rdev) == WATCHDOG_MINOR) {
+		if (test_and_set_bit(0, &wdt_is_open))
+			return -EBUSY;
+		/*
+		 *	Activate
+		 */
+		wdt_is_open = 1;
+		return 0;
+	}
+	return -ENODEV;
+}
+
+/**
+ *	wdt_close:
+ *	@inode: inode to board
+ *	@file: file handle to board
+ *
+ */
+static int wdt_release(struct inode *inode, struct file *file)
+{
+	if (MINOR(inode->i_rdev) == WATCHDOG_MINOR)
+		clear_bit(0, &wdt_is_open);
+	return 0;
+}
+
+/**
+ *	notify_sys:
+ *	@this: our notifier block
+ *	@code: the event being reported
+ *	@unused: unused
+ *
+ *	Our notifier is called on system shutdowns. We want to turn the card
+ *	off at reboot otherwise the machine will reboot again during memory
+ *	test or worse yet during the following fsck. This would suck, in fact
+ *	trust me - if it happens it does suck.
+ */
+static int wdt_notify_sys(struct notifier_block *this, unsigned long code,
+			  void *unused)
+{
+	if (code == SYS_DOWN || code == SYS_HALT)
+		/* Disable Watchdog */
+		wdt_disable();
+	return NOTIFY_DONE;
+}
+
+static const struct file_operations wdt_fops = {
+	.owner	= THIS_MODULE,
+	.read	= wdt_read,
+	.ioctl	= wdt_ioctl,
+	.write	= wdt_write,
+	.open	= wdt_open,
+	.release = wdt_release,
+};
+
+static struct miscdevice wdt_dev = {
+	.minor = WATCHDOG_MINOR,
+	.name = "watchdog",
+	.fops = &wdt_fops,
+};
+
+/*
+ *	The WDT card needs to learn about soft shutdowns in order to
+ *	turn the timebomb registers off.
+ */
+static struct notifier_block wdt_notifier = {
+	.notifier_call = wdt_notify_sys,
+};
+#endif /* CONFIG_RTC_DRV_M41T80_WDT */
+
+/*
+ *****************************************************************************
+ *
+ *	Driver Interface
+ *
+ *****************************************************************************
+ */
+static int m41t80_probe(struct i2c_client *client)
+{
+	int i, rc = 0;
+	struct rtc_device *rtc = NULL;
+	struct rtc_time tm;
+	const struct m41t80_chip_info *chip;
+	struct m41t80_data *clientdata = NULL;
+
+	if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C
+				     | I2C_FUNC_SMBUS_BYTE_DATA)) {
+		rc = -ENODEV;
+		goto exit;
+	}
+
+	dev_info(&client->dev,
+		 "chip found, driver version " DRV_VERSION "\n");
+
+	chip = NULL;
+	for (i = 0; i < ARRAY_SIZE(m41t80_chip_info_tbl); i++) {
+		if (!strcmp(m41t80_chip_info_tbl[i].name, client->name)) {
+			chip = &m41t80_chip_info_tbl[i];
+			break;
+		}
+	}
+	if (!chip) {
+		dev_err(&client->dev, "%s is not supported\n", client->name);
+		rc = -ENODEV;
+		goto exit;
+	}
+
+	clientdata = kzalloc(sizeof(*clientdata), GFP_KERNEL);
+	if (!clientdata) {
+		rc = -ENOMEM;
+		goto exit;
+	}
+
+	rtc = rtc_device_register(client->name, &client->dev,
+				  &m41t80_rtc_ops, THIS_MODULE);
+	if (IS_ERR(rtc)) {
+		rc = PTR_ERR(rtc);
+		rtc = NULL;
+		goto exit;
+	}
+
+	clientdata->rtc = rtc;
+	clientdata->chip = chip;
+	i2c_set_clientdata(client, clientdata);
+
+	/* Make sure HT (Halt Update) bit is cleared */
+	rc = i2c_smbus_read_byte_data(client, M41T80_REG_ALARM_HOUR);
+	if (rc < 0)
+		goto ht_err;
+
+	if (rc & M41T80_ALHOUR_HT) {
+		if (chip->features & M41T80_FEATURE_HT) {
+			m41t80_get_datetime(client, &tm);
+			dev_info(&client->dev, "HT bit was set!\n");
+			dev_info(&client->dev,
+				 "Power Down at "
+				 "%04i-%02i-%02i %02i:%02i:%02i\n",
+				 tm.tm_year + 1900,
+				 tm.tm_mon + 1, tm.tm_mday, tm.tm_hour,
+				 tm.tm_min, tm.tm_sec);
+		}
+		if (i2c_smbus_write_byte_data(client,
+					      M41T80_REG_ALARM_HOUR,
+					      rc & ~M41T80_ALHOUR_HT) < 0)
+			goto ht_err;
+	}
+
+	/* Make sure ST (stop) bit is cleared */
+	rc = i2c_smbus_read_byte_data(client, M41T80_REG_SEC);
+	if (rc < 0)
+		goto st_err;
+
+	if (rc & M41T80_SEC_ST) {
+		if (i2c_smbus_write_byte_data(client, M41T80_REG_SEC,
+					      rc & ~M41T80_SEC_ST) < 0)
+			goto st_err;
+	}
+
+	rc = m41t80_sysfs_register(&client->dev);
+	if (rc)
+		goto exit;
+
+#ifdef CONFIG_RTC_DRV_M41T80_WDT
+	if (chip->features & M41T80_FEATURE_HT) {
+		rc = misc_register(&wdt_dev);
+		if (rc)
+			goto exit;
+		rc = register_reboot_notifier(&wdt_notifier);
+		if (rc) {
+			misc_deregister(&wdt_dev);
+			goto exit;
+		}
+		save_client = client;
+	}
+#endif
+	return 0;
+
+st_err:
+	rc = -EIO;
+	dev_err(&client->dev, "Can't clear ST bit\n");
+	goto exit;
+ht_err:
+	rc = -EIO;
+	dev_err(&client->dev, "Can't clear HT bit\n");
+	goto exit;
+
+exit:
+	if (rtc)
+		rtc_device_unregister(rtc);
+	kfree(clientdata);
+	return rc;
+}
+
+static int m41t80_remove(struct i2c_client *client)
+{
+	struct m41t80_data *clientdata = i2c_get_clientdata(client);
+	struct rtc_device *rtc = clientdata->rtc;
+
+#ifdef CONFIG_RTC_DRV_M41T80_WDT
+	if (clientdata->chip->features & M41T80_FEATURE_HT) {
+		misc_deregister(&wdt_dev);
+		unregister_reboot_notifier(&wdt_notifier);
+	}
+#endif
+	if (rtc)
+		rtc_device_unregister(rtc);
+	kfree(clientdata);
+
+	return 0;
+}
+
+static struct i2c_driver m41t80_driver = {
+	.driver = {
+		.name = "m41t80",
+	},
+	.probe = m41t80_probe,
+	.remove = m41t80_remove,
+};
+
+static int __init m41t80_rtc_init(void)
+{
+	return i2c_add_driver(&m41t80_driver);
+}
+
+static void __exit m41t80_rtc_exit(void)
+{
+	i2c_del_driver(&m41t80_driver);
+}
+
+MODULE_AUTHOR("Alexander Bigga <ab@mycable.de>");
+MODULE_DESCRIPTION("ST Microelectronics M41T80 series RTC I2C Client Driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_VERSION);
+
+module_init(m41t80_rtc_init);
+module_exit(m41t80_rtc_exit);
diff --git a/drivers/rtc/rtc-m48t59.c b/drivers/rtc/rtc-m48t59.c
new file mode 100644
index 0000000..33b7523
--- /dev/null
+++ b/drivers/rtc/rtc-m48t59.c
@@ -0,0 +1,491 @@
+/*
+ * ST M48T59 RTC driver
+ *
+ * Copyright (c) 2007 Wind River Systems, Inc.
+ *
+ * Author: Mark Zhan <rongkai.zhan@windriver.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/rtc.h>
+#include <linux/rtc/m48t59.h>
+#include <linux/bcd.h>
+
+#ifndef NO_IRQ
+#define NO_IRQ	(-1)
+#endif
+
+#define M48T59_READ(reg)	pdata->read_byte(dev, reg)
+#define M48T59_WRITE(val, reg)	pdata->write_byte(dev, reg, val)
+
+#define M48T59_SET_BITS(mask, reg)	\
+	M48T59_WRITE((M48T59_READ(reg) | (mask)), (reg))
+#define M48T59_CLEAR_BITS(mask, reg)	\
+	M48T59_WRITE((M48T59_READ(reg) & ~(mask)), (reg))
+
+struct m48t59_private {
+	void __iomem *ioaddr;
+	unsigned int size; /* iomem size */
+	unsigned int irq;
+	struct rtc_device *rtc;
+	spinlock_t lock; /* serialize the NVRAM and RTC access */
+};
+
+/*
+ * This is the generic access method when the chip is memory-mapped
+ */
+static void
+m48t59_mem_writeb(struct device *dev, u32 ofs, u8 val)
+{
+	struct platform_device *pdev = to_platform_device(dev);
+	struct m48t59_private *m48t59 = platform_get_drvdata(pdev);
+
+	writeb(val, m48t59->ioaddr+ofs);
+}
+
+static u8
+m48t59_mem_readb(struct device *dev, u32 ofs)
+{
+	struct platform_device *pdev = to_platform_device(dev);
+	struct m48t59_private *m48t59 = platform_get_drvdata(pdev);
+
+	return readb(m48t59->ioaddr+ofs);
+}
+
+/*
+ * NOTE: M48T59 only uses BCD mode
+ */
+static int m48t59_rtc_read_time(struct device *dev, struct rtc_time *tm)
+{
+	struct platform_device *pdev = to_platform_device(dev);
+	struct m48t59_plat_data *pdata = pdev->dev.platform_data;
+	struct m48t59_private *m48t59 = platform_get_drvdata(pdev);
+	unsigned long flags;
+	u8 val;
+
+	spin_lock_irqsave(&m48t59->lock, flags);
+	/* Issue the READ command */
+	M48T59_SET_BITS(M48T59_CNTL_READ, M48T59_CNTL);
+
+	tm->tm_year	= BCD2BIN(M48T59_READ(M48T59_YEAR));
+	/* tm_mon is 0-11 */
+	tm->tm_mon	= BCD2BIN(M48T59_READ(M48T59_MONTH)) - 1;
+	tm->tm_mday	= BCD2BIN(M48T59_READ(M48T59_MDAY));
+
+	val = M48T59_READ(M48T59_WDAY);
+	if ((val & M48T59_WDAY_CEB) && (val & M48T59_WDAY_CB)) {
+		dev_dbg(dev, "Century bit is enabled\n");
+		tm->tm_year += 100;	/* one century */
+	}
+
+	tm->tm_wday	= BCD2BIN(val & 0x07);
+	tm->tm_hour	= BCD2BIN(M48T59_READ(M48T59_HOUR) & 0x3F);
+	tm->tm_min	= BCD2BIN(M48T59_READ(M48T59_MIN) & 0x7F);
+	tm->tm_sec	= BCD2BIN(M48T59_READ(M48T59_SEC) & 0x7F);
+
+	/* Clear the READ bit */
+	M48T59_CLEAR_BITS(M48T59_CNTL_READ, M48T59_CNTL);
+	spin_unlock_irqrestore(&m48t59->lock, flags);
+
+	dev_dbg(dev, "RTC read time %04d-%02d-%02d %02d/%02d/%02d\n",
+		tm->tm_year + 1900, tm->tm_mon, tm->tm_mday,
+		tm->tm_hour, tm->tm_min, tm->tm_sec);
+	return 0;
+}
+
+static int m48t59_rtc_set_time(struct device *dev, struct rtc_time *tm)
+{
+	struct platform_device *pdev = to_platform_device(dev);
+	struct m48t59_plat_data *pdata = pdev->dev.platform_data;
+	struct m48t59_private *m48t59 = platform_get_drvdata(pdev);
+	unsigned long flags;
+	u8 val = 0;
+
+	dev_dbg(dev, "RTC set time %04d-%02d-%02d %02d/%02d/%02d\n",
+		tm->tm_year + 1900, tm->tm_mon, tm->tm_mday,
+		tm->tm_hour, tm->tm_min, tm->tm_sec);
+
+	spin_lock_irqsave(&m48t59->lock, flags);
+	/* Issue the WRITE command */
+	M48T59_SET_BITS(M48T59_CNTL_WRITE, M48T59_CNTL);
+
+	M48T59_WRITE((BIN2BCD(tm->tm_sec) & 0x7F), M48T59_SEC);
+	M48T59_WRITE((BIN2BCD(tm->tm_min) & 0x7F), M48T59_MIN);
+	M48T59_WRITE((BIN2BCD(tm->tm_hour) & 0x3F), M48T59_HOUR);
+	M48T59_WRITE((BIN2BCD(tm->tm_mday) & 0x3F), M48T59_MDAY);
+	/* tm_mon is 0-11 */
+	M48T59_WRITE((BIN2BCD(tm->tm_mon + 1) & 0x1F), M48T59_MONTH);
+	M48T59_WRITE(BIN2BCD(tm->tm_year % 100), M48T59_YEAR);
+
+	if (tm->tm_year/100)
+		val = (M48T59_WDAY_CEB | M48T59_WDAY_CB);
+	val |= (BIN2BCD(tm->tm_wday) & 0x07);
+	M48T59_WRITE(val, M48T59_WDAY);
+
+	/* Clear the WRITE bit */
+	M48T59_CLEAR_BITS(M48T59_CNTL_WRITE, M48T59_CNTL);
+	spin_unlock_irqrestore(&m48t59->lock, flags);
+	return 0;
+}
+
+/*
+ * Read alarm time and date in RTC
+ */
+static int m48t59_rtc_readalarm(struct device *dev, struct rtc_wkalrm *alrm)
+{
+	struct platform_device *pdev = to_platform_device(dev);
+	struct m48t59_plat_data *pdata = pdev->dev.platform_data;
+	struct m48t59_private *m48t59 = platform_get_drvdata(pdev);
+	struct rtc_time *tm = &alrm->time;
+	unsigned long flags;
+	u8 val;
+
+	/* If no irq, we don't support ALARM */
+	if (m48t59->irq == NO_IRQ)
+		return -EIO;
+
+	spin_lock_irqsave(&m48t59->lock, flags);
+	/* Issue the READ command */
+	M48T59_SET_BITS(M48T59_CNTL_READ, M48T59_CNTL);
+
+	tm->tm_year = BCD2BIN(M48T59_READ(M48T59_YEAR));
+	/* tm_mon is 0-11 */
+	tm->tm_mon = BCD2BIN(M48T59_READ(M48T59_MONTH)) - 1;
+
+	val = M48T59_READ(M48T59_WDAY);
+	if ((val & M48T59_WDAY_CEB) && (val & M48T59_WDAY_CB))
+		tm->tm_year += 100;	/* one century */
+
+	tm->tm_mday = BCD2BIN(M48T59_READ(M48T59_ALARM_DATE));
+	tm->tm_hour = BCD2BIN(M48T59_READ(M48T59_ALARM_HOUR));
+	tm->tm_min = BCD2BIN(M48T59_READ(M48T59_ALARM_MIN));
+	tm->tm_sec = BCD2BIN(M48T59_READ(M48T59_ALARM_SEC));
+
+	/* Clear the READ bit */
+	M48T59_CLEAR_BITS(M48T59_CNTL_READ, M48T59_CNTL);
+	spin_unlock_irqrestore(&m48t59->lock, flags);
+
+	dev_dbg(dev, "RTC read alarm time %04d-%02d-%02d %02d/%02d/%02d\n",
+		tm->tm_year + 1900, tm->tm_mon, tm->tm_mday,
+		tm->tm_hour, tm->tm_min, tm->tm_sec);
+	return 0;
+}
+
+/*
+ * Set alarm time and date in RTC
+ */
+static int m48t59_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm)
+{
+	struct platform_device *pdev = to_platform_device(dev);
+	struct m48t59_plat_data *pdata = pdev->dev.platform_data;
+	struct m48t59_private *m48t59 = platform_get_drvdata(pdev);
+	struct rtc_time *tm = &alrm->time;
+	u8 mday, hour, min, sec;
+	unsigned long flags;
+
+	/* If no irq, we don't support ALARM */
+	if (m48t59->irq == NO_IRQ)
+		return -EIO;
+
+	/*
+	 * 0xff means "always match"
+	 */
+	mday = tm->tm_mday;
+	mday = (mday >= 1 && mday <= 31) ? BIN2BCD(mday) : 0xff;
+	if (mday == 0xff)
+		mday = M48T59_READ(M48T59_MDAY);
+
+	hour = tm->tm_hour;
+	hour = (hour < 24) ? BIN2BCD(hour) : 0x00;
+
+	min = tm->tm_min;
+	min = (min < 60) ? BIN2BCD(min) : 0x00;
+
+	sec = tm->tm_sec;
+	sec = (sec < 60) ? BIN2BCD(sec) : 0x00;
+
+	spin_lock_irqsave(&m48t59->lock, flags);
+	/* Issue the WRITE command */
+	M48T59_SET_BITS(M48T59_CNTL_WRITE, M48T59_CNTL);
+
+	M48T59_WRITE(mday, M48T59_ALARM_DATE);
+	M48T59_WRITE(hour, M48T59_ALARM_HOUR);
+	M48T59_WRITE(min, M48T59_ALARM_MIN);
+	M48T59_WRITE(sec, M48T59_ALARM_SEC);
+
+	/* Clear the WRITE bit */
+	M48T59_CLEAR_BITS(M48T59_CNTL_WRITE, M48T59_CNTL);
+	spin_unlock_irqrestore(&m48t59->lock, flags);
+
+	dev_dbg(dev, "RTC set alarm time %04d-%02d-%02d %02d/%02d/%02d\n",
+		tm->tm_year + 1900, tm->tm_mon, tm->tm_mday,
+		tm->tm_hour, tm->tm_min, tm->tm_sec);
+	return 0;
+}
+
+/*
+ * Handle commands from user-space
+ */
+static int m48t59_rtc_ioctl(struct device *dev, unsigned int cmd,
+			unsigned long arg)
+{
+	struct platform_device *pdev = to_platform_device(dev);
+	struct m48t59_plat_data *pdata = pdev->dev.platform_data;
+	struct m48t59_private *m48t59 = platform_get_drvdata(pdev);
+	unsigned long flags;
+	int ret = 0;
+
+	spin_lock_irqsave(&m48t59->lock, flags);
+	switch (cmd) {
+	case RTC_AIE_OFF:	/* alarm interrupt off */
+		M48T59_WRITE(0x00, M48T59_INTR);
+		break;
+	case RTC_AIE_ON:	/* alarm interrupt on */
+		M48T59_WRITE(M48T59_INTR_AFE, M48T59_INTR);
+		break;
+	default:
+		ret = -ENOIOCTLCMD;
+		break;
+	}
+	spin_unlock_irqrestore(&m48t59->lock, flags);
+
+	return ret;
+}
+
+static int m48t59_rtc_proc(struct device *dev, struct seq_file *seq)
+{
+	struct platform_device *pdev = to_platform_device(dev);
+	struct m48t59_plat_data *pdata = pdev->dev.platform_data;
+	struct m48t59_private *m48t59 = platform_get_drvdata(pdev);
+	unsigned long flags;
+	u8 val;
+
+	spin_lock_irqsave(&m48t59->lock, flags);
+	val = M48T59_READ(M48T59_FLAGS);
+	spin_unlock_irqrestore(&m48t59->lock, flags);
+
+	seq_printf(seq, "battery\t\t: %s\n",
+		 (val & M48T59_FLAGS_BF) ? "low" : "normal");
+	return 0;
+}
+
+/*
+ * IRQ handler for the RTC
+ */
+static irqreturn_t m48t59_rtc_interrupt(int irq, void *dev_id)
+{
+	struct device *dev = (struct device *)dev_id;
+	struct platform_device *pdev = to_platform_device(dev);
+	struct m48t59_plat_data *pdata = pdev->dev.platform_data;
+	struct m48t59_private *m48t59 = platform_get_drvdata(pdev);
+	u8 event;
+
+	spin_lock(&m48t59->lock);
+	event = M48T59_READ(M48T59_FLAGS);
+	spin_unlock(&m48t59->lock);
+
+	if (event & M48T59_FLAGS_AF) {
+		rtc_update_irq(m48t59->rtc, 1, (RTC_AF | RTC_IRQF));
+		return IRQ_HANDLED;
+	}
+
+	return IRQ_NONE;
+}
+
+static const struct rtc_class_ops m48t59_rtc_ops = {
+	.ioctl		= m48t59_rtc_ioctl,
+	.read_time	= m48t59_rtc_read_time,
+	.set_time	= m48t59_rtc_set_time,
+	.read_alarm	= m48t59_rtc_readalarm,
+	.set_alarm	= m48t59_rtc_setalarm,
+	.proc		= m48t59_rtc_proc,
+};
+
+static ssize_t m48t59_nvram_read(struct kobject *kobj,
+				struct bin_attribute *bin_attr,
+				char *buf, loff_t pos, size_t size)
+{
+	struct device *dev = container_of(kobj, struct device, kobj);
+	struct platform_device *pdev = to_platform_device(dev);
+	struct m48t59_plat_data *pdata = pdev->dev.platform_data;
+	struct m48t59_private *m48t59 = platform_get_drvdata(pdev);
+	ssize_t cnt = 0;
+	unsigned long flags;
+
+	for (; size > 0 && pos < M48T59_NVRAM_SIZE; cnt++, size--) {
+		spin_lock_irqsave(&m48t59->lock, flags);
+		*buf++ = M48T59_READ(cnt);
+		spin_unlock_irqrestore(&m48t59->lock, flags);
+	}
+
+	return cnt;
+}
+
+static ssize_t m48t59_nvram_write(struct kobject *kobj,
+				struct bin_attribute *bin_attr,
+				char *buf, loff_t pos, size_t size)
+{
+	struct device *dev = container_of(kobj, struct device, kobj);
+	struct platform_device *pdev = to_platform_device(dev);
+	struct m48t59_plat_data *pdata = pdev->dev.platform_data;
+	struct m48t59_private *m48t59 = platform_get_drvdata(pdev);
+	ssize_t cnt = 0;
+	unsigned long flags;
+
+	for (; size > 0 && pos < M48T59_NVRAM_SIZE; cnt++, size--) {
+		spin_lock_irqsave(&m48t59->lock, flags);
+		M48T59_WRITE(*buf++, cnt);
+		spin_unlock_irqrestore(&m48t59->lock, flags);
+	}
+
+	return cnt;
+}
+
+static struct bin_attribute m48t59_nvram_attr = {
+	.attr = {
+		.name = "nvram",
+		.mode = S_IRUGO | S_IWUGO,
+		.owner = THIS_MODULE,
+	},
+	.read = m48t59_nvram_read,
+	.write = m48t59_nvram_write,
+};
+
+static int __devinit m48t59_rtc_probe(struct platform_device *pdev)
+{
+	struct m48t59_plat_data *pdata = pdev->dev.platform_data;
+	struct m48t59_private *m48t59 = NULL;
+	struct resource *res;
+	int ret = -ENOMEM;
+
+	/* This chip could be memory-mapped or I/O-mapped */
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!res) {
+		res = platform_get_resource(pdev, IORESOURCE_IO, 0);
+		if (!res)
+			return -EINVAL;
+	}
+
+	if (res->flags & IORESOURCE_IO) {
+		/* If we are I/O-mapped, the platform should provide
+		 * the operations accessing chip registers.
+		 */
+		if (!pdata || !pdata->write_byte || !pdata->read_byte)
+			return -EINVAL;
+	} else if (res->flags & IORESOURCE_MEM) {
+		/* we are memory-mapped */
+		if (!pdata) {
+			pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
+			if (!pdata)
+				return -ENOMEM;
+			/* Ensure we only kmalloc platform data once */
+			pdev->dev.platform_data = pdata;
+		}
+
+		/* Try to use the generic memory read/write ops */
+		if (!pdata->write_byte)
+			pdata->write_byte = m48t59_mem_writeb;
+		if (!pdata->read_byte)
+			pdata->read_byte = m48t59_mem_readb;
+	}
+
+	m48t59 = kzalloc(sizeof(*m48t59), GFP_KERNEL);
+	if (!m48t59)
+		return -ENOMEM;
+
+	m48t59->size = res->end - res->start + 1;
+	m48t59->ioaddr = ioremap(res->start, m48t59->size);
+	if (!m48t59->ioaddr)
+		goto out;
+
+	/* Try to get irq number. We also can work in
+	 * the mode without IRQ.
+	 */
+	m48t59->irq = platform_get_irq(pdev, 0);
+	if (m48t59->irq < 0)
+		m48t59->irq = NO_IRQ;
+
+	if (m48t59->irq != NO_IRQ) {
+		ret = request_irq(m48t59->irq, m48t59_rtc_interrupt,
+			IRQF_SHARED, "rtc-m48t59", &pdev->dev);
+		if (ret)
+			goto out;
+	}
+
+	m48t59->rtc = rtc_device_register("m48t59", &pdev->dev,
+				&m48t59_rtc_ops, THIS_MODULE);
+	if (IS_ERR(m48t59->rtc)) {
+		ret = PTR_ERR(m48t59->rtc);
+		goto out;
+	}
+
+	ret = sysfs_create_bin_file(&pdev->dev.kobj, &m48t59_nvram_attr);
+	if (ret)
+		goto out;
+
+	spin_lock_init(&m48t59->lock);
+	platform_set_drvdata(pdev, m48t59);
+	return 0;
+
+out:
+	if (!IS_ERR(m48t59->rtc))
+		rtc_device_unregister(m48t59->rtc);
+	if (m48t59->irq != NO_IRQ)
+		free_irq(m48t59->irq, &pdev->dev);
+	if (m48t59->ioaddr)
+		iounmap(m48t59->ioaddr);
+	if (m48t59)
+		kfree(m48t59);
+	return ret;
+}
+
+static int __devexit m48t59_rtc_remove(struct platform_device *pdev)
+{
+	struct m48t59_private *m48t59 = platform_get_drvdata(pdev);
+
+	sysfs_remove_bin_file(&pdev->dev.kobj, &m48t59_nvram_attr);
+	if (!IS_ERR(m48t59->rtc))
+		rtc_device_unregister(m48t59->rtc);
+	if (m48t59->ioaddr)
+		iounmap(m48t59->ioaddr);
+	if (m48t59->irq != NO_IRQ)
+		free_irq(m48t59->irq, &pdev->dev);
+	platform_set_drvdata(pdev, NULL);
+	kfree(m48t59);
+	return 0;
+}
+
+static struct platform_driver m48t59_rtc_platdrv = {
+	.driver		= {
+		.name	= "rtc-m48t59",
+		.owner	= THIS_MODULE,
+	},
+	.probe		= m48t59_rtc_probe,
+	.remove		= __devexit_p(m48t59_rtc_remove),
+};
+
+static int __init m48t59_rtc_init(void)
+{
+	return platform_driver_register(&m48t59_rtc_platdrv);
+}
+
+static void __exit m48t59_rtc_exit(void)
+{
+	platform_driver_unregister(&m48t59_rtc_platdrv);
+}
+
+module_init(m48t59_rtc_init);
+module_exit(m48t59_rtc_exit);
+
+MODULE_AUTHOR("Mark Zhan <rongkai.zhan@windriver.com>");
+MODULE_DESCRIPTION("M48T59 RTC driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/rtc/rtc-rs5c372.c b/drivers/rtc/rtc-rs5c372.c
index 09bbe575..6b67b50 100644
--- a/drivers/rtc/rtc-rs5c372.c
+++ b/drivers/rtc/rtc-rs5c372.c
@@ -13,13 +13,7 @@
 #include <linux/rtc.h>
 #include <linux/bcd.h>
 
-#define DRV_VERSION "0.4"
-
-/* Addresses to scan */
-static unsigned short normal_i2c[] = { /* 0x32,*/ I2C_CLIENT_END };
-
-/* Insmod parameters */
-I2C_CLIENT_INSMOD;
+#define DRV_VERSION "0.5"
 
 
 /*
@@ -88,9 +82,6 @@
 	unsigned		has_irq:1;
 	char			buf[17];
 	char			*regs;
-
-	/* on conversion to a "new style" i2c driver, this vanishes */
-	struct i2c_client	dev;
 };
 
 static int rs5c_get_regs(struct rs5c372 *rs5c)
@@ -483,25 +474,35 @@
 	return err;
 }
 
+static void rs5c_sysfs_unregister(struct device *dev)
+{
+	device_remove_file(dev, &dev_attr_trim);
+	device_remove_file(dev, &dev_attr_osc);
+}
+
 #else
 static int rs5c_sysfs_register(struct device *dev)
 {
 	return 0;
 }
+
+static void rs5c_sysfs_unregister(struct device *dev)
+{
+	/* nothing */
+}
 #endif	/* SYSFS */
 
 static struct i2c_driver rs5c372_driver;
 
-static int rs5c372_probe(struct i2c_adapter *adapter, int address, int kind)
+static int rs5c372_probe(struct i2c_client *client)
 {
 	int err = 0;
-	struct i2c_client *client;
 	struct rs5c372 *rs5c372;
 	struct rtc_time tm;
 
-	dev_dbg(&adapter->dev, "%s\n", __FUNCTION__);
+	dev_dbg(&client->dev, "%s\n", __FUNCTION__);
 
-	if (!i2c_check_functionality(adapter, I2C_FUNC_I2C)) {
+	if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
 		err = -ENODEV;
 		goto exit;
 	}
@@ -514,35 +515,22 @@
 	/* we read registers 0x0f then 0x00-0x0f; skip the first one */
 	rs5c372->regs=&rs5c372->buf[1];
 
-	/* On conversion to a "new style" i2c driver, we'll be handed
-	 * the i2c_client (we won't create it)
-	 */
-	client = &rs5c372->dev;
 	rs5c372->client = client;
-
-	/* I2C client */
-	client->addr = address;
-	client->driver = &rs5c372_driver;
-	client->adapter = adapter;
-
-	strlcpy(client->name, rs5c372_driver.driver.name, I2C_NAME_SIZE);
-
 	i2c_set_clientdata(client, rs5c372);
 
-	/* Inform the i2c layer */
-	if ((err = i2c_attach_client(client)))
-		goto exit_kfree;
-
 	err = rs5c_get_regs(rs5c372);
 	if (err < 0)
-		goto exit_detach;
+		goto exit_kfree;
 
-	/* For "new style" drivers, irq is in i2c_client and chip type
-	 * info comes from i2c_client.dev.platform_data.  Meanwhile:
-	 *
-	 * STICK BOARD-SPECIFIC SETUP CODE RIGHT HERE
-	 */
-	if (rs5c372->type == rtc_undef) {
+	if (strcmp(client->name, "rs5c372a") == 0)
+		rs5c372->type = rtc_rs5c372a;
+	else if (strcmp(client->name, "rs5c372b") == 0)
+		rs5c372->type = rtc_rs5c372b;
+	else if (strcmp(client->name, "rv5c386") == 0)
+		rs5c372->type = rtc_rv5c386;
+	else if (strcmp(client->name, "rv5c387a") == 0)
+		rs5c372->type = rtc_rv5c387a;
+	else {
 		rs5c372->type = rtc_rs5c372b;
 		dev_warn(&client->dev, "assuming rs5c372b\n");
 	}
@@ -567,7 +555,7 @@
 		break;
 	default:
 		dev_err(&client->dev, "unknown RTC type\n");
-		goto exit_detach;
+		goto exit_kfree;
 	}
 
 	/* if the oscillator lost power and no other software (like
@@ -601,7 +589,7 @@
 
 		if ((i2c_master_send(client, buf, 3)) != 3) {
 			dev_err(&client->dev, "setup error\n");
-			goto exit_detach;
+			goto exit_kfree;
 		}
 		rs5c372->regs[RS5C_REG_CTRL1] = buf[1];
 		rs5c372->regs[RS5C_REG_CTRL2] = buf[2];
@@ -621,14 +609,14 @@
 			rs5c372->time24 ? "24hr" : "am/pm"
 			);
 
-	/* FIXME when client->irq exists, use it to register alarm irq */
+	/* REVISIT use client->irq to register alarm irq ... */
 
 	rs5c372->rtc = rtc_device_register(rs5c372_driver.driver.name,
 				&client->dev, &rs5c372_rtc_ops, THIS_MODULE);
 
 	if (IS_ERR(rs5c372->rtc)) {
 		err = PTR_ERR(rs5c372->rtc);
-		goto exit_detach;
+		goto exit_kfree;
 	}
 
 	err = rs5c_sysfs_register(&client->dev);
@@ -640,9 +628,6 @@
 exit_devreg:
 	rtc_device_unregister(rs5c372->rtc);
 
-exit_detach:
-	i2c_detach_client(client);
-
 exit_kfree:
 	kfree(rs5c372);
 
@@ -650,24 +635,12 @@
 	return err;
 }
 
-static int rs5c372_attach(struct i2c_adapter *adapter)
+static int rs5c372_remove(struct i2c_client *client)
 {
-	return i2c_probe(adapter, &addr_data, rs5c372_probe);
-}
-
-static int rs5c372_detach(struct i2c_client *client)
-{
-	int err;
 	struct rs5c372 *rs5c372 = i2c_get_clientdata(client);
 
-	if (rs5c372->rtc)
-		rtc_device_unregister(rs5c372->rtc);
-
-	/* REVISIT properly destroy the sysfs files ... */
-
-	if ((err = i2c_detach_client(client)))
-		return err;
-
+	rtc_device_unregister(rs5c372->rtc);
+	rs5c_sysfs_unregister(&client->dev);
 	kfree(rs5c372);
 	return 0;
 }
@@ -676,8 +649,8 @@
 	.driver		= {
 		.name	= "rtc-rs5c372",
 	},
-	.attach_adapter	= &rs5c372_attach,
-	.detach_client	= &rs5c372_detach,
+	.probe		= rs5c372_probe,
+	.remove		= rs5c372_remove,
 };
 
 static __init int rs5c372_init(void)
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c
index 1340451..35765f6 100644
--- a/drivers/s390/block/dcssblk.c
+++ b/drivers/s390/block/dcssblk.c
@@ -747,14 +747,9 @@
 static void __exit
 dcssblk_exit(void)
 {
-	int rc;
-
 	PRINT_DEBUG("DCSSBLOCK EXIT...\n");
 	s390_root_dev_unregister(dcssblk_root_dev);
-	rc = unregister_blkdev(dcssblk_major, DCSSBLK_NAME);
-	if (rc) {
-		PRINT_ERR("unregister_blkdev() failed!\n");
-	}
+	unregister_blkdev(dcssblk_major, DCSSBLK_NAME);
 	PRINT_DEBUG("...finished!\n");
 }
 
diff --git a/drivers/s390/char/Kconfig b/drivers/s390/char/Kconfig
index 66102a1..3f36cb3 100644
--- a/drivers/s390/char/Kconfig
+++ b/drivers/s390/char/Kconfig
@@ -164,3 +164,10 @@
 	help
 	  Character device driver for writing z/VM monitor service records
 
+config S390_VMUR
+	tristate "z/VM unit record device driver"
+	depends on S390
+	default "m"
+	help
+	  Character device driver for z/VM reader, puncher and printer.
+
diff --git a/drivers/s390/char/Makefile b/drivers/s390/char/Makefile
index c210784..130de19 100644
--- a/drivers/s390/char/Makefile
+++ b/drivers/s390/char/Makefile
@@ -29,6 +29,7 @@
 obj-$(CONFIG_S390_TAPE_3590) += tape_3590.o
 obj-$(CONFIG_MONREADER) += monreader.o
 obj-$(CONFIG_MONWRITER) += monwriter.o
+obj-$(CONFIG_S390_VMUR) += vmur.o
 
 zcore_mod-objs := sclp_sdias.o zcore.o
 obj-$(CONFIG_ZFCPDUMP) += zcore_mod.o
diff --git a/drivers/s390/char/tape_34xx.c b/drivers/s390/char/tape_34xx.c
index e765875..80e7a53 100644
--- a/drivers/s390/char/tape_34xx.c
+++ b/drivers/s390/char/tape_34xx.c
@@ -131,10 +131,9 @@
 {
 	struct tape_34xx_work *p;
 
-	if ((p = kmalloc(sizeof(*p), GFP_ATOMIC)) == NULL)
+	if ((p = kzalloc(sizeof(*p), GFP_ATOMIC)) == NULL)
 		return -ENOMEM;
 
-	memset(p, 0, sizeof(*p));
 	INIT_WORK(&p->work, tape_34xx_work_handler);
 
 	p->device = tape_get_device_reference(device);
diff --git a/drivers/s390/char/vmcp.c b/drivers/s390/char/vmcp.c
index 82e6a6b..2f419b0 100644
--- a/drivers/s390/char/vmcp.c
+++ b/drivers/s390/char/vmcp.c
@@ -1,7 +1,7 @@
 /*
- * Copyright (C) 2004,2005 IBM Corporation
+ * Copyright IBM Corp. 2004,2007
  * Interface implementation for communication with the z/VM control program
- * Author(s): Christian Borntraeger <cborntra@de.ibm.com>
+ * Author(s): Christian Borntraeger <borntraeger@de.ibm.com>
  *
  *
  * z/VMs CP offers the possibility to issue commands via the diagnose code 8
@@ -22,9 +22,11 @@
 #include "vmcp.h"
 
 MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Christian Borntraeger <cborntra@de.ibm.com>");
+MODULE_AUTHOR("Christian Borntraeger <borntraeger@de.ibm.com>");
 MODULE_DESCRIPTION("z/VM CP interface");
 
+#define PRINTK_HEADER "vmcp: "
+
 static debug_info_t *vmcp_debug;
 
 static int vmcp_open(struct inode *inode, struct file *file)
@@ -40,7 +42,7 @@
 	session->bufsize = PAGE_SIZE;
 	session->response = NULL;
 	session->resp_size = 0;
-	init_MUTEX(&session->mutex);
+	mutex_init(&session->mutex);
 	file->private_data = session;
 	return nonseekable_open(inode, file);
 }
@@ -57,37 +59,37 @@
 }
 
 static ssize_t
-vmcp_read(struct file *file, char __user * buff, size_t count, loff_t * ppos)
+vmcp_read(struct file *file, char __user *buff, size_t count, loff_t *ppos)
 {
 	size_t tocopy;
 	struct vmcp_session *session;
 
 	session = (struct vmcp_session *)file->private_data;
-	if (down_interruptible(&session->mutex))
+	if (mutex_lock_interruptible(&session->mutex))
 		return -ERESTARTSYS;
 	if (!session->response) {
-		up(&session->mutex);
+		mutex_unlock(&session->mutex);
 		return 0;
 	}
 	if (*ppos > session->resp_size) {
-		up(&session->mutex);
+		mutex_unlock(&session->mutex);
 		return 0;
 	}
 	tocopy = min(session->resp_size - (size_t) (*ppos), count);
-	tocopy = min(tocopy,session->bufsize - (size_t) (*ppos));
+	tocopy = min(tocopy, session->bufsize - (size_t) (*ppos));
 
 	if (copy_to_user(buff, session->response + (*ppos), tocopy)) {
-		up(&session->mutex);
+		mutex_unlock(&session->mutex);
 		return -EFAULT;
 	}
-	up(&session->mutex);
+	mutex_unlock(&session->mutex);
 	*ppos += tocopy;
 	return tocopy;
 }
 
 static ssize_t
-vmcp_write(struct file *file, const char __user * buff, size_t count,
-	   loff_t * ppos)
+vmcp_write(struct file *file, const char __user *buff, size_t count,
+	   loff_t *ppos)
 {
 	char *cmd;
 	struct vmcp_session *session;
@@ -103,24 +105,23 @@
 	}
 	cmd[count] = '\0';
 	session = (struct vmcp_session *)file->private_data;
-	if (down_interruptible(&session->mutex)) {
+	if (mutex_lock_interruptible(&session->mutex)) {
 		kfree(cmd);
 		return -ERESTARTSYS;
 	}
 	if (!session->response)
 		session->response = (char *)__get_free_pages(GFP_KERNEL
-						| __GFP_REPEAT 	| GFP_DMA,
+						| __GFP_REPEAT | GFP_DMA,
 						get_order(session->bufsize));
 	if (!session->response) {
-		up(&session->mutex);
+		mutex_unlock(&session->mutex);
 		kfree(cmd);
 		return -ENOMEM;
 	}
 	debug_text_event(vmcp_debug, 1, cmd);
-	session->resp_size = cpcmd(cmd, session->response,
-				     session->bufsize,
-				     &session->resp_code);
-	up(&session->mutex);
+	session->resp_size = cpcmd(cmd, session->response, session->bufsize,
+				   &session->resp_code);
+	mutex_unlock(&session->mutex);
 	kfree(cmd);
 	*ppos = 0;		/* reset the file pointer after a command */
 	return count;
@@ -145,12 +146,12 @@
 	int temp;
 
 	session = (struct vmcp_session *)file->private_data;
-	if (down_interruptible(&session->mutex))
+	if (mutex_lock_interruptible(&session->mutex))
 		return -ERESTARTSYS;
 	switch (cmd) {
 	case VMCP_GETCODE:
 		temp = session->resp_code;
-		up(&session->mutex);
+		mutex_unlock(&session->mutex);
 		return put_user(temp, (int __user *)arg);
 	case VMCP_SETBUF:
 		free_pages((unsigned long)session->response,
@@ -161,14 +162,14 @@
 			session->bufsize = PAGE_SIZE;
 			temp = -EINVAL;
 		}
-		up(&session->mutex);
+		mutex_unlock(&session->mutex);
 		return temp;
 	case VMCP_GETSIZE:
 		temp = session->resp_size;
-		up(&session->mutex);
+		mutex_unlock(&session->mutex);
 		return put_user(temp, (int __user *)arg);
 	default:
-		up(&session->mutex);
+		mutex_unlock(&session->mutex);
 		return -ENOIOCTLCMD;
 	}
 }
@@ -180,7 +181,7 @@
 	.read		= vmcp_read,
 	.write		= vmcp_write,
 	.unlocked_ioctl	= vmcp_ioctl,
-	.compat_ioctl	= vmcp_ioctl
+	.compat_ioctl	= vmcp_ioctl,
 };
 
 static struct miscdevice vmcp_dev = {
@@ -194,26 +195,38 @@
 	int ret;
 
 	if (!MACHINE_IS_VM) {
-		printk(KERN_WARNING
-		       "z/VM CP interface is only available under z/VM\n");
+		PRINT_WARN("z/VM CP interface is only available under z/VM\n");
 		return -ENODEV;
 	}
-	ret = misc_register(&vmcp_dev);
-	if (!ret)
-		printk(KERN_INFO "z/VM CP interface loaded\n");
-	else
-		printk(KERN_WARNING
-		       "z/VM CP interface not loaded. Could not register misc device.\n");
 	vmcp_debug = debug_register("vmcp", 1, 1, 240);
-	debug_register_view(vmcp_debug, &debug_hex_ascii_view);
-	return ret;
+	if (!vmcp_debug) {
+		PRINT_ERR("z/VM CP interface not loaded. Could not register "
+			   "debug feature\n");
+		return -ENOMEM;
+	}
+	ret = debug_register_view(vmcp_debug, &debug_hex_ascii_view);
+	if (ret) {
+		PRINT_ERR("z/VM CP interface not loaded. Could not register "
+			  "debug feature view. Error code: %d\n", ret);
+		debug_unregister(vmcp_debug);
+		return ret;
+	}
+	ret = misc_register(&vmcp_dev);
+	if (ret) {
+		PRINT_ERR("z/VM CP interface not loaded. Could not register "
+			   "misc device. Error code: %d\n", ret);
+		debug_unregister(vmcp_debug);
+		return ret;
+	}
+	PRINT_INFO("z/VM CP interface loaded\n");
+	return 0;
 }
 
 static void __exit vmcp_exit(void)
 {
-	WARN_ON(misc_deregister(&vmcp_dev) != 0);
+	misc_deregister(&vmcp_dev);
 	debug_unregister(vmcp_debug);
-	printk(KERN_INFO "z/VM CP interface unloaded.\n");
+	PRINT_INFO("z/VM CP interface unloaded.\n");
 }
 
 module_init(vmcp_init);
diff --git a/drivers/s390/char/vmcp.h b/drivers/s390/char/vmcp.h
index 8a5975f..6a99394 100644
--- a/drivers/s390/char/vmcp.h
+++ b/drivers/s390/char/vmcp.h
@@ -12,8 +12,8 @@
  * The idea of this driver is based on cpint from Neale Ferguson
  */
 
-#include <asm/semaphore.h>
 #include <linux/ioctl.h>
+#include <linux/mutex.h>
 
 #define VMCP_GETCODE _IOR(0x10, 1, int)
 #define VMCP_SETBUF _IOW(0x10, 2, int)
@@ -26,5 +26,5 @@
 	int resp_code;
 	/* As we use copy_from/to_user, which might     *
 	 * sleep and cannot use a spinlock              */
-	struct semaphore mutex;
+	struct mutex mutex;
 };
diff --git a/drivers/s390/char/vmur.c b/drivers/s390/char/vmur.c
new file mode 100644
index 0000000..e90b0f8
--- /dev/null
+++ b/drivers/s390/char/vmur.c
@@ -0,0 +1,906 @@
+/*
+ * Linux driver for System z and s390 unit record devices
+ * (z/VM virtual punch, reader, printer)
+ *
+ * Copyright IBM Corp. 2001, 2007
+ * Authors: Malcolm Beattie <beattiem@uk.ibm.com>
+ *	    Michael Holzheu <holzheu@de.ibm.com>
+ *	    Frank Munzert <munzert@de.ibm.com>
+ */
+
+#include <linux/cdev.h>
+
+#include <asm/uaccess.h>
+#include <asm/cio.h>
+#include <asm/ccwdev.h>
+#include <asm/debug.h>
+
+#include "vmur.h"
+
+/*
+ * Driver overview
+ *
+ * Unit record device support is implemented as a character device driver.
+ * We can fit at least 16 bits into a device minor number and use the
+ * simple method of mapping a character device number with minor abcd
+ * to the unit record device with devno abcd.
+ * I/O to virtual unit record devices is handled as follows:
+ * Reads: Diagnose code 0x14 (input spool file manipulation)
+ * is used to read spool data page-wise.
+ * Writes: The CCW used is WRITE_CCW_CMD (0x01). The device's record length
+ * is available by reading sysfs attr reclen. Each write() to the device
+ * must specify an integral multiple (maximal 511) of reclen.
+ */
+
+static char ur_banner[] = "z/VM virtual unit record device driver";
+
+MODULE_AUTHOR("IBM Corporation");
+MODULE_DESCRIPTION("s390 z/VM virtual unit record device driver");
+MODULE_LICENSE("GPL");
+
+#define PRINTK_HEADER "vmur: "
+
+static dev_t ur_first_dev_maj_min;
+static struct class *vmur_class;
+static struct debug_info *vmur_dbf;
+
+/* We put the device's record length (for writes) in the driver_info field */
+static struct ccw_device_id ur_ids[] = {
+	{ CCWDEV_CU_DI(READER_PUNCH_DEVTYPE, 80) },
+	{ CCWDEV_CU_DI(PRINTER_DEVTYPE, 132) },
+	{ /* end of list */ }
+};
+
+MODULE_DEVICE_TABLE(ccw, ur_ids);
+
+static int ur_probe(struct ccw_device *cdev);
+static void ur_remove(struct ccw_device *cdev);
+static int ur_set_online(struct ccw_device *cdev);
+static int ur_set_offline(struct ccw_device *cdev);
+
+static struct ccw_driver ur_driver = {
+	.name		= "vmur",
+	.owner		= THIS_MODULE,
+	.ids		= ur_ids,
+	.probe		= ur_probe,
+	.remove		= ur_remove,
+	.set_online	= ur_set_online,
+	.set_offline	= ur_set_offline,
+};
+
+/*
+ * Allocation, freeing, getting and putting of urdev structures
+ */
+static struct urdev *urdev_alloc(struct ccw_device *cdev)
+{
+	struct urdev *urd;
+
+	urd = kzalloc(sizeof(struct urdev), GFP_KERNEL);
+	if (!urd)
+		return NULL;
+	urd->cdev = cdev;
+	urd->reclen = cdev->id.driver_info;
+	ccw_device_get_id(cdev, &urd->dev_id);
+	mutex_init(&urd->io_mutex);
+	mutex_init(&urd->open_mutex);
+	return urd;
+}
+
+static void urdev_free(struct urdev *urd)
+{
+	kfree(urd);
+}
+
+/*
+ * This is how the character device driver gets a reference to a
+ * ur device. When this call returns successfully, a reference has
+ * been taken (by get_device) on the underlying kobject. The recipient
+ * of this urdev pointer must eventually drop it with urdev_put(urd)
+ * which does the corresponding put_device().
+ */
+static struct urdev *urdev_get_from_devno(u16 devno)
+{
+	char bus_id[16];
+	struct ccw_device *cdev;
+
+	sprintf(bus_id, "0.0.%04x", devno);
+	cdev = get_ccwdev_by_busid(&ur_driver, bus_id);
+	if (!cdev)
+		return NULL;
+
+	return cdev->dev.driver_data;
+}
+
+static void urdev_put(struct urdev *urd)
+{
+	put_device(&urd->cdev->dev);
+}
+
+/*
+ * Low-level functions to do I/O to a ur device.
+ *     alloc_chan_prog
+ *     do_ur_io
+ *     ur_int_handler
+ *
+ * alloc_chan_prog allocates and builds the channel program
+ *
+ * do_ur_io issues the channel program to the device and blocks waiting
+ * on a completion event it publishes at urd->io_done. The function
+ * serialises itself on the device's mutex so that only one I/O
+ * is issued at a time (and that I/O is synchronous).
+ *
+ * ur_int_handler catches the "I/O done" interrupt, writes the
+ * subchannel status word into the scsw member of the urdev structure
+ * and complete()s the io_done to wake the waiting do_ur_io.
+ *
+ * The caller of do_ur_io is responsible for kfree()ing the channel program
+ * address pointer that alloc_chan_prog returned.
+ */
+
+
+/*
+ * alloc_chan_prog
+ * The channel program we use is write commands chained together
+ * with a final NOP CCW command-chained on (which ensures that CE and DE
+ * are presented together in a single interrupt instead of as separate
+ * interrupts unless an incorrect length indication kicks in first). The
+ * data length in each CCW is reclen. The caller must ensure that count
+ * is an integral multiple of reclen.
+ * The channel program pointer returned by this function must be freed
+ * with kfree. The caller is responsible for checking that
+ * count/reclen is not ridiculously large.
+ */
+static struct ccw1 *alloc_chan_prog(char *buf, size_t count, size_t reclen)
+{
+	size_t num_ccws;
+	struct ccw1 *cpa;
+	int i;
+
+	TRACE("alloc_chan_prog(%p, %zu, %zu)\n", buf, count, reclen);
+
+	/*
+	 * We chain a NOP onto the writes to force CE+DE together.
+	 * That means we allocate room for CCWs to cover count/reclen
+	 * records plus a NOP.
+	 */
+	num_ccws = count / reclen + 1;
+	cpa = kmalloc(num_ccws * sizeof(struct ccw1), GFP_KERNEL | GFP_DMA);
+	if (!cpa)
+		return NULL;
+
+	for (i = 0; count; i++) {
+		cpa[i].cmd_code = WRITE_CCW_CMD;
+		cpa[i].flags = CCW_FLAG_CC | CCW_FLAG_SLI;
+		cpa[i].count = reclen;
+		cpa[i].cda = __pa(buf);
+		buf += reclen;
+		count -= reclen;
+	}
+	/* The following NOP CCW forces CE+DE to be presented together */
+	cpa[i].cmd_code = CCW_CMD_NOOP;
+	cpa[i].flags = 0;
+	cpa[i].count = 0;
+	cpa[i].cda = 0;
+
+	return cpa;
+}
+
+static int do_ur_io(struct urdev *urd, struct ccw1 *cpa)
+{
+	int rc;
+	struct ccw_device *cdev = urd->cdev;
+	DECLARE_COMPLETION(event);
+
+	TRACE("do_ur_io: cpa=%p\n", cpa);
+
+	rc = mutex_lock_interruptible(&urd->io_mutex);
+	if (rc)
+		return rc;
+
+	urd->io_done = &event;
+
+	spin_lock_irq(get_ccwdev_lock(cdev));
+	rc = ccw_device_start(cdev, cpa, 1, 0, 0);
+	spin_unlock_irq(get_ccwdev_lock(cdev));
+
+	TRACE("do_ur_io: ccw_device_start returned %d\n", rc);
+	if (rc)
+		goto out;
+
+	wait_for_completion(&event);
+	TRACE("do_ur_io: I/O complete\n");
+	rc = 0;
+
+out:
+	mutex_unlock(&urd->io_mutex);
+	return rc;
+}
+
+/*
+ * ur interrupt handler, called from the ccw_device layer
+ */
+static void ur_int_handler(struct ccw_device *cdev, unsigned long intparm,
+			   struct irb *irb)
+{
+	struct urdev *urd;
+
+	TRACE("ur_int_handler: intparm=0x%lx cstat=%02x dstat=%02x res=%u\n",
+	      intparm, irb->scsw.cstat, irb->scsw.dstat, irb->scsw.count);
+
+	if (!intparm) {
+		TRACE("ur_int_handler: unsolicited interrupt\n");
+		return;
+	}
+	urd = cdev->dev.driver_data;
+	/* On special conditions irb is an error pointer */
+	if (IS_ERR(irb))
+		urd->io_request_rc = PTR_ERR(irb);
+	else if (irb->scsw.dstat == (DEV_STAT_CHN_END | DEV_STAT_DEV_END))
+		urd->io_request_rc = 0;
+	else
+		urd->io_request_rc = -EIO;
+
+	complete(urd->io_done);
+}
+
+/*
+ * reclen sysfs attribute - The record length to be used for write CCWs
+ */
+static ssize_t ur_attr_reclen_show(struct device *dev,
+				   struct device_attribute *attr, char *buf)
+{
+	struct urdev *urd = dev->driver_data;
+
+	return sprintf(buf, "%zu\n", urd->reclen);
+}
+
+static DEVICE_ATTR(reclen, 0444, ur_attr_reclen_show, NULL);
+
+static int ur_create_attributes(struct device *dev)
+{
+	return device_create_file(dev, &dev_attr_reclen);
+}
+
+static void ur_remove_attributes(struct device *dev)
+{
+	device_remove_file(dev, &dev_attr_reclen);
+}
+
+/*
+ * diagnose code 0x210 - retrieve device information
+ * cc=0  normal completion, we have a real device
+ * cc=1  CP paging error
+ * cc=2  The virtual device exists, but is not associated with a real device
+ * cc=3  Invalid device address, or the virtual device does not exist
+ */
+static int get_urd_class(struct urdev *urd)
+{
+	static struct diag210 ur_diag210;
+	int cc;
+
+	ur_diag210.vrdcdvno = urd->dev_id.devno;
+	ur_diag210.vrdclen = sizeof(struct diag210);
+
+	cc = diag210(&ur_diag210);
+	switch (cc) {
+	case 0:
+		return -ENOTSUPP;
+	case 2:
+		return ur_diag210.vrdcvcla; /* virtual device class */
+	case 3:
+		return -ENODEV;
+	default:
+		return -EIO;
+	}
+}
+
+/*
+ * Allocation and freeing of urfile structures
+ */
+static struct urfile *urfile_alloc(struct urdev *urd)
+{
+	struct urfile *urf;
+
+	urf = kzalloc(sizeof(struct urfile), GFP_KERNEL);
+	if (!urf)
+		return NULL;
+	urf->urd = urd;
+
+	TRACE("urfile_alloc: urd=%p urf=%p rl=%zu\n", urd, urf,
+	      urf->dev_reclen);
+
+	return urf;
+}
+
+static void urfile_free(struct urfile *urf)
+{
+	TRACE("urfile_free: urf=%p urd=%p\n", urf, urf->urd);
+	kfree(urf);
+}
+
+/*
+ * The fops implementation of the character device driver
+ */
+static ssize_t do_write(struct urdev *urd, const char __user *udata,
+			size_t count, size_t reclen, loff_t *ppos)
+{
+	struct ccw1 *cpa;
+	char *buf;
+	int rc;
+
+	/* Data buffer must be under 2GB line for fmt1 CCWs: hence GFP_DMA */
+	buf = kmalloc(count, GFP_KERNEL | GFP_DMA);
+	if (!buf)
+		return -ENOMEM;
+
+	if (copy_from_user(buf, udata, count)) {
+		rc = -EFAULT;
+		goto fail_kfree_buf;
+	}
+
+	cpa = alloc_chan_prog(buf, count, reclen);
+	if (!cpa) {
+		rc = -ENOMEM;
+		goto fail_kfree_buf;
+	}
+
+	rc = do_ur_io(urd, cpa);
+	if (rc)
+		goto fail_kfree_cpa;
+
+	if (urd->io_request_rc) {
+		rc = urd->io_request_rc;
+		goto fail_kfree_cpa;
+	}
+	*ppos += count;
+	rc = count;
+fail_kfree_cpa:
+	kfree(cpa);
+fail_kfree_buf:
+	kfree(buf);
+	return rc;
+}
+
+static ssize_t ur_write(struct file *file, const char __user *udata,
+			size_t count, loff_t *ppos)
+{
+	struct urfile *urf = file->private_data;
+
+	TRACE("ur_write: count=%zu\n", count);
+
+	if (count == 0)
+		return 0;
+
+	if (count % urf->dev_reclen)
+		return -EINVAL;	/* count must be a multiple of reclen */
+
+	if (count > urf->dev_reclen * MAX_RECS_PER_IO)
+		count = urf->dev_reclen * MAX_RECS_PER_IO;
+
+	return do_write(urf->urd, udata, count, urf->dev_reclen, ppos);
+}
+
+static int do_diag_14(unsigned long rx, unsigned long ry1,
+		      unsigned long subcode)
+{
+	register unsigned long _ry1 asm("2") = ry1;
+	register unsigned long _ry2 asm("3") = subcode;
+	int rc = 0;
+
+	asm volatile(
+#ifdef CONFIG_64BIT
+		"   sam31\n"
+		"   diag    %2,2,0x14\n"
+		"   sam64\n"
+#else
+		"   diag    %2,2,0x14\n"
+#endif
+		"   ipm     %0\n"
+		"   srl     %0,28\n"
+		: "=d" (rc), "+d" (_ry2)
+		: "d" (rx), "d" (_ry1)
+		: "cc");
+
+	TRACE("diag 14: subcode=0x%lx, cc=%i\n", subcode, rc);
+	return rc;
+}
+
+/*
+ * diagnose code 0x14 subcode 0x0028 - position spool file to designated
+ *				       record
+ * cc=0  normal completion
+ * cc=2  no file active on the virtual reader or device not ready
+ * cc=3  record specified is beyond EOF
+ */
+static int diag_position_to_record(int devno, int record)
+{
+	int cc;
+
+	cc = do_diag_14(record, devno, 0x28);
+	switch (cc) {
+	case 0:
+		return 0;
+	case 2:
+		return -ENOMEDIUM;
+	case 3:
+		return -ENODATA; /* position beyond end of file */
+	default:
+		return -EIO;
+	}
+}
+
+/*
+ * diagnose code 0x14 subcode 0x0000 - read next spool file buffer
+ * cc=0  normal completion
+ * cc=1  EOF reached
+ * cc=2  no file active on the virtual reader, and no file eligible
+ * cc=3  file already active on the virtual reader or specified virtual
+ *	 reader does not exist or is not a reader
+ */
+static int diag_read_file(int devno, char *buf)
+{
+	int cc;
+
+	cc = do_diag_14((unsigned long) buf, devno, 0x00);
+	switch (cc) {
+	case 0:
+		return 0;
+	case 1:
+		return -ENODATA;
+	case 2:
+		return -ENOMEDIUM;
+	default:
+		return -EIO;
+	}
+}
+
+static ssize_t diag14_read(struct file *file, char __user *ubuf, size_t count,
+			   loff_t *offs)
+{
+	size_t len, copied, res;
+	char *buf;
+	int rc;
+	u16 reclen;
+	struct urdev *urd;
+
+	urd = ((struct urfile *) file->private_data)->urd;
+	reclen = ((struct urfile *) file->private_data)->file_reclen;
+
+	rc = diag_position_to_record(urd->dev_id.devno, *offs / PAGE_SIZE + 1);
+	if (rc == -ENODATA)
+		return 0;
+	if (rc)
+		return rc;
+
+	len = min((size_t) PAGE_SIZE, count);
+	buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	copied = 0;
+	res = (size_t) (*offs % PAGE_SIZE);
+	do {
+		rc = diag_read_file(urd->dev_id.devno, buf);
+		if (rc == -ENODATA) {
+			break;
+		}
+		if (rc)
+			goto fail;
+		if (reclen)
+			*((u16 *) &buf[FILE_RECLEN_OFFSET]) = reclen;
+		len = min(count - copied, PAGE_SIZE - res);
+		if (copy_to_user(ubuf + copied, buf + res, len)) {
+			rc = -EFAULT;
+			goto fail;
+		}
+		res = 0;
+		copied += len;
+	} while (copied != count);
+
+	*offs += copied;
+	rc = copied;
+fail:
+	kfree(buf);
+	return rc;
+}
+
+static ssize_t ur_read(struct file *file, char __user *ubuf, size_t count,
+		       loff_t *offs)
+{
+	struct urdev *urd;
+	int rc;
+
+	TRACE("ur_read: count=%zu ppos=%li\n", count, (unsigned long) *offs);
+
+	if (count == 0)
+		return 0;
+
+	urd = ((struct urfile *) file->private_data)->urd;
+	rc = mutex_lock_interruptible(&urd->io_mutex);
+	if (rc)
+		return rc;
+	rc = diag14_read(file, ubuf, count, offs);
+	mutex_unlock(&urd->io_mutex);
+	return rc;
+}
+
+/*
+ * diagnose code 0x14 subcode 0x0fff - retrieve next file descriptor
+ * cc=0  normal completion
+ * cc=1  no files on reader queue or no subsequent file
+ * cc=2  spid specified is invalid
+ */
+static int diag_read_next_file_info(struct file_control_block *buf, int spid)
+{
+	int cc;
+
+	cc = do_diag_14((unsigned long) buf, spid, 0xfff);
+	switch (cc) {
+	case 0:
+		return 0;
+	default:
+		return -ENODATA;
+	}
+}
+
+static int verify_device(struct urdev *urd)
+{
+	struct file_control_block fcb;
+	char *buf;
+	int rc;
+
+	switch (urd->class) {
+	case DEV_CLASS_UR_O:
+		return 0; /* no check needed here */
+	case DEV_CLASS_UR_I:
+		/* check for empty reader device (beginning of chain) */
+		rc = diag_read_next_file_info(&fcb, 0);
+		if (rc)
+			return rc;
+
+		/* open file on virtual reader	*/
+		buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
+		if (!buf)
+			return -ENOMEM;
+		rc = diag_read_file(urd->dev_id.devno, buf);
+		kfree(buf);
+
+		if ((rc != 0) && (rc != -ENODATA)) /* EOF does not hurt */
+			return rc;
+		return 0;
+	default:
+		return -ENOTSUPP;
+	}
+}
+
+static int get_file_reclen(struct urdev *urd)
+{
+	struct file_control_block fcb;
+	int rc;
+
+	switch (urd->class) {
+	case DEV_CLASS_UR_O:
+		return 0;
+	case DEV_CLASS_UR_I:
+		rc = diag_read_next_file_info(&fcb, 0);
+		if (rc)
+			return rc;
+		break;
+	default:
+		return -ENOTSUPP;
+	}
+	if (fcb.file_stat & FLG_CP_DUMP)
+		return 0;
+
+	return fcb.rec_len;
+}
+
+static int ur_open(struct inode *inode, struct file *file)
+{
+	u16 devno;
+	struct urdev *urd;
+	struct urfile *urf;
+	unsigned short accmode;
+	int rc;
+
+	accmode = file->f_flags & O_ACCMODE;
+
+	if (accmode == O_RDWR)
+		return -EACCES;
+
+	/*
+	 * We treat the minor number as the devno of the ur device
+	 * to find in the driver tree.
+	 */
+	devno = MINOR(file->f_dentry->d_inode->i_rdev);
+
+	urd = urdev_get_from_devno(devno);
+	if (!urd)
+		return -ENXIO;
+
+	if (file->f_flags & O_NONBLOCK) {
+		if (!mutex_trylock(&urd->open_mutex)) {
+			rc = -EBUSY;
+			goto fail_put;
+		}
+	} else {
+		if (mutex_lock_interruptible(&urd->open_mutex)) {
+			rc = -ERESTARTSYS;
+			goto fail_put;
+		}
+	}
+
+	TRACE("ur_open\n");
+
+	if (((accmode == O_RDONLY) && (urd->class != DEV_CLASS_UR_I)) ||
+	    ((accmode == O_WRONLY) && (urd->class != DEV_CLASS_UR_O))) {
+		TRACE("ur_open: unsupported dev class (%d)\n", urd->class);
+		rc = -EACCES;
+		goto fail_unlock;
+	}
+
+	rc = verify_device(urd);
+	if (rc)
+		goto fail_unlock;
+
+	urf = urfile_alloc(urd);
+	if (!urf) {
+		rc = -ENOMEM;
+		goto fail_unlock;
+	}
+
+	urf->dev_reclen = urd->reclen;
+	rc = get_file_reclen(urd);
+	if (rc < 0)
+		goto fail_urfile_free;
+	urf->file_reclen = rc;
+	file->private_data = urf;
+	return 0;
+
+fail_urfile_free:
+	urfile_free(urf);
+fail_unlock:
+	mutex_unlock(&urd->open_mutex);
+fail_put:
+	urdev_put(urd);
+	return rc;
+}
+
+static int ur_release(struct inode *inode, struct file *file)
+{
+	struct urfile *urf = file->private_data;
+
+	TRACE("ur_release\n");
+	mutex_unlock(&urf->urd->open_mutex);
+	urdev_put(urf->urd);
+	urfile_free(urf);
+	return 0;
+}
+
+static loff_t ur_llseek(struct file *file, loff_t offset, int whence)
+{
+	loff_t newpos;
+
+	if ((file->f_flags & O_ACCMODE) != O_RDONLY)
+		return -ESPIPE; /* seek allowed only for reader */
+	if (offset % PAGE_SIZE)
+		return -ESPIPE; /* only multiples of 4K allowed */
+	switch (whence) {
+	case 0: /* SEEK_SET */
+		newpos = offset;
+		break;
+	case 1: /* SEEK_CUR */
+		newpos = file->f_pos + offset;
+		break;
+	default:
+		return -EINVAL;
+	}
+	file->f_pos = newpos;
+	return newpos;
+}
+
+static struct file_operations ur_fops = {
+	.owner	 = THIS_MODULE,
+	.open	 = ur_open,
+	.release = ur_release,
+	.read	 = ur_read,
+	.write	 = ur_write,
+	.llseek  = ur_llseek,
+};
+
+/*
+ * ccw_device infrastructure:
+ *     ur_probe gets its own ref to the device (i.e. get_device),
+ *     creates the struct urdev, the device attributes, sets up
+ *     the interrupt handler and validates the virtual unit record device.
+ *     ur_remove removes the device attributes, frees the struct urdev
+ *     and drops (put_device) the ref to the device we got in ur_probe.
+ */
+static int ur_probe(struct ccw_device *cdev)
+{
+	struct urdev *urd;
+	int rc;
+
+	TRACE("ur_probe: cdev=%p state=%d\n", cdev, *(int *) cdev->private);
+
+	if (!get_device(&cdev->dev))
+		return -ENODEV;
+
+	urd = urdev_alloc(cdev);
+	if (!urd) {
+		rc = -ENOMEM;
+		goto fail;
+	}
+	rc = ur_create_attributes(&cdev->dev);
+	if (rc) {
+		rc = -ENOMEM;
+		goto fail;
+	}
+	cdev->dev.driver_data = urd;
+	cdev->handler = ur_int_handler;
+
+	/* validate virtual unit record device */
+	urd->class = get_urd_class(urd);
+	if (urd->class < 0) {
+		rc = urd->class;
+		goto fail;
+	}
+	if ((urd->class != DEV_CLASS_UR_I) && (urd->class != DEV_CLASS_UR_O)) {
+		rc = -ENOTSUPP;
+		goto fail;
+	}
+
+	return 0;
+
+fail:
+	urdev_free(urd);
+	put_device(&cdev->dev);
+	return rc;
+}
+
+static void ur_remove(struct ccw_device *cdev)
+{
+	struct urdev *urd = cdev->dev.driver_data;
+
+	TRACE("ur_remove\n");
+	if (cdev->online)
+		ur_set_offline(cdev);
+	ur_remove_attributes(&cdev->dev);
+	urdev_free(urd);
+	put_device(&cdev->dev);
+}
+
+static int ur_set_online(struct ccw_device *cdev)
+{
+	struct urdev *urd;
+	int minor, major, rc;
+	char node_id[16];
+
+	TRACE("ur_set_online: cdev=%p state=%d\n", cdev,
+	      *(int *) cdev->private);
+
+	if (!try_module_get(ur_driver.owner))
+		return -EINVAL;
+
+	urd = (struct urdev *) cdev->dev.driver_data;
+	minor = urd->dev_id.devno;
+	major = MAJOR(ur_first_dev_maj_min);
+
+	urd->char_device = cdev_alloc();
+	if (!urd->char_device) {
+		rc = -ENOMEM;
+		goto fail_module_put;
+	}
+
+	cdev_init(urd->char_device, &ur_fops);
+	urd->char_device->dev = MKDEV(major, minor);
+	urd->char_device->owner = ur_fops.owner;
+
+	rc = cdev_add(urd->char_device, urd->char_device->dev, 1);
+	if (rc)
+		goto fail_free_cdev;
+	if (urd->cdev->id.cu_type == READER_PUNCH_DEVTYPE) {
+		if (urd->class == DEV_CLASS_UR_I)
+			sprintf(node_id, "vmrdr-%s", cdev->dev.bus_id);
+		if (urd->class == DEV_CLASS_UR_O)
+			sprintf(node_id, "vmpun-%s", cdev->dev.bus_id);
+	} else if (urd->cdev->id.cu_type == PRINTER_DEVTYPE) {
+		sprintf(node_id, "vmprt-%s", cdev->dev.bus_id);
+	} else {
+		rc = -ENOTSUPP;
+		goto fail_free_cdev;
+	}
+
+	urd->device = device_create(vmur_class, NULL, urd->char_device->dev,
+					"%s", node_id);
+	if (IS_ERR(urd->device)) {
+		rc = PTR_ERR(urd->device);
+		TRACE("ur_set_online: device_create rc=%d\n", rc);
+		goto fail_free_cdev;
+	}
+
+	return 0;
+
+fail_free_cdev:
+	cdev_del(urd->char_device);
+fail_module_put:
+	module_put(ur_driver.owner);
+
+	return rc;
+}
+
+static int ur_set_offline(struct ccw_device *cdev)
+{
+	struct urdev *urd;
+
+	TRACE("ur_set_offline: cdev=%p cdev->private=%p state=%d\n",
+		cdev, cdev->private, *(int *) cdev->private);
+	urd = (struct urdev *) cdev->dev.driver_data;
+	device_destroy(vmur_class, urd->char_device->dev);
+	cdev_del(urd->char_device);
+	module_put(ur_driver.owner);
+
+	return 0;
+}
+
+/*
+ * Module initialisation and cleanup
+ */
+static int __init ur_init(void)
+{
+	int rc;
+	dev_t dev;
+
+	if (!MACHINE_IS_VM) {
+		PRINT_ERR("%s is only available under z/VM.\n", ur_banner);
+		return -ENODEV;
+	}
+
+	vmur_dbf = debug_register("vmur", 4, 1, 4 * sizeof(long));
+	if (!vmur_dbf)
+		return -ENOMEM;
+	rc = debug_register_view(vmur_dbf, &debug_sprintf_view);
+	if (rc)
+		goto fail_free_dbf;
+
+	debug_set_level(vmur_dbf, 6);
+
+	rc = ccw_driver_register(&ur_driver);
+	if (rc)
+		goto fail_free_dbf;
+
+	rc = alloc_chrdev_region(&dev, 0, NUM_MINORS, "vmur");
+	if (rc) {
+		PRINT_ERR("alloc_chrdev_region failed: err = %d\n", rc);
+		goto fail_unregister_driver;
+	}
+	ur_first_dev_maj_min = MKDEV(MAJOR(dev), 0);
+
+	vmur_class = class_create(THIS_MODULE, "vmur");
+	if (IS_ERR(vmur_class)) {
+		rc = PTR_ERR(vmur_class);
+		goto fail_unregister_region;
+	}
+	PRINT_INFO("%s loaded.\n", ur_banner);
+	return 0;
+
+fail_unregister_region:
+	unregister_chrdev_region(ur_first_dev_maj_min, NUM_MINORS);
+fail_unregister_driver:
+	ccw_driver_unregister(&ur_driver);
+fail_free_dbf:
+	debug_unregister(vmur_dbf);
+	return rc;
+}
+
+static void __exit ur_exit(void)
+{
+	class_destroy(vmur_class);
+	unregister_chrdev_region(ur_first_dev_maj_min, NUM_MINORS);
+	ccw_driver_unregister(&ur_driver);
+	debug_unregister(vmur_dbf);
+	PRINT_INFO("%s unloaded.\n", ur_banner);
+}
+
+module_init(ur_init);
+module_exit(ur_exit);
diff --git a/drivers/s390/char/vmur.h b/drivers/s390/char/vmur.h
new file mode 100644
index 0000000..16d0a4e
--- /dev/null
+++ b/drivers/s390/char/vmur.h
@@ -0,0 +1,104 @@
+/*
+ * Linux driver for System z and s390 unit record devices
+ * (z/VM virtual punch, reader, printer)
+ *
+ * Copyright IBM Corp. 2001, 2007
+ * Authors: Malcolm Beattie <beattiem@uk.ibm.com>
+ *	    Michael Holzheu <holzheu@de.ibm.com>
+ *	    Frank Munzert <munzert@de.ibm.com>
+ */
+
+#ifndef _VMUR_H_
+#define _VMUR_H_
+
+#define DEV_CLASS_UR_I 0x20 /* diag210 unit record input device class */
+#define DEV_CLASS_UR_O 0x10 /* diag210 unit record output device class */
+/*
+ * we only support z/VM's default unit record devices:
+ * both in SPOOL directory control statement and in CP DEFINE statement
+ *	RDR defaults to 2540 reader
+ *	PUN defaults to 2540 punch
+ *	PRT defaults to 1403 printer
+ */
+#define READER_PUNCH_DEVTYPE	0x2540
+#define PRINTER_DEVTYPE		0x1403
+
+/* z/VM spool file control block SFBLOK */
+struct file_control_block {
+	char reserved_1[8];
+	char user_owner[8];
+	char user_orig[8];
+	__s32 data_recs;
+	__s16 rec_len;
+	__s16 file_num;
+	__u8  file_stat;
+	__u8  dev_type;
+	char  reserved_2[6];
+	char  file_name[12];
+	char  file_type[12];
+	char  create_date[8];
+	char  create_time[8];
+	char  reserved_3[6];
+	__u8  file_class;
+	__u8  sfb_lok;
+	__u64 distr_code;
+	__u32 reserved_4;
+	__u8  current_starting_copy_number;
+	__u8  sfblock_cntrl_flags;
+	__u8  reserved_5;
+	__u8  more_status_flags;
+	char  rest[200];
+} __attribute__ ((packed));
+
+#define FLG_CP_DUMP 0x10
+
+/*
+ * A struct urdev is created for each ur device that is made available
+ * via the ccw_device driver model.
+ */
+struct urdev {
+	struct ccw_device *cdev;	/* Backpointer to ccw device */
+	struct mutex io_mutex;		/* Serialises device IO */
+	struct mutex open_mutex;	/* Serialises access to device */
+	struct completion *io_done;	/* do_ur_io waits; irq completes */
+	struct device *device;
+	struct cdev *char_device;
+	struct ccw_dev_id dev_id;	/* device id */
+	size_t reclen;			/* Record length for *write* CCWs */
+	int class;			/* VM device class */
+	int io_request_rc;		/* return code from I/O request */
+};
+
+/*
+ * A struct urfile is allocated at open() time for each device and
+ * freed on release().
+ */
+struct urfile {
+	struct urdev *urd;
+	unsigned int flags;
+	size_t dev_reclen;
+	__u16 file_reclen;
+};
+
+/*
+ * Device major/minor definitions.
+ */
+
+#define UR_MAJOR 0	/* get dynamic major */
+/*
+ * We map minor numbers directly to device numbers (0-FFFF) for simplicity.
+ * This avoids having to allocate (and manage) slot numbers.
+ */
+#define NUM_MINORS 65536
+
+/* Limiting each I/O to 511 records limits chan prog to 4KB (511 r/w + 1 NOP) */
+#define MAX_RECS_PER_IO		511
+#define WRITE_CCW_CMD		0x01
+
+#define TRACE(x...) debug_sprintf_event(vmur_dbf, 1, x)
+#define CCWDEV_CU_DI(cutype, di) \
+		CCW_DEVICE(cutype, 0x00), .driver_info = (di)
+
+#define FILE_RECLEN_OFFSET	4064 /* reclen offset in spool data block */
+
+#endif /* _VMUR_H_ */
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index 6b264bd..001682e 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -272,7 +272,7 @@
 	struct ccw_device_id *id = &(cdev->id);
 	int len;
 
-	len = snprint_alias(buf, PAGE_SIZE, id, "\n") + 1;
+	len = snprint_alias(buf, PAGE_SIZE, id, "\n");
 
 	return len > PAGE_SIZE ? PAGE_SIZE : len;
 }
diff --git a/drivers/s390/cio/qdio.c b/drivers/s390/cio/qdio.c
index e70aeb7..ed026a1 100644
--- a/drivers/s390/cio/qdio.c
+++ b/drivers/s390/cio/qdio.c
@@ -166,9 +166,9 @@
 {
 	char dbf_text[15];
 
-	if (ccq == 0 || ccq == 32 || ccq == 96)
+	if (ccq == 0 || ccq == 32)
 		return 0;
-	if (ccq == 97)
+	if (ccq == 96 || ccq == 97)
 		return 1;
 	/*notify devices immediately*/
 	sprintf(dbf_text,"%d", ccq);
@@ -2306,8 +2306,8 @@
 	if (!ssqd_area) {
 	        QDIO_PRINT_WARN("Could not get memory for chsc. Using all " \
 				"SIGAs for sch x%x.\n", irq_ptr->schid.sch_no);
-		irq_ptr->qdioac = CHSC_FLAG_SIGA_INPUT_NECESSARY ||
-				  CHSC_FLAG_SIGA_OUTPUT_NECESSARY ||
+		irq_ptr->qdioac = CHSC_FLAG_SIGA_INPUT_NECESSARY |
+				  CHSC_FLAG_SIGA_OUTPUT_NECESSARY |
 				  CHSC_FLAG_SIGA_SYNC_NECESSARY; /* all flags set */
 		irq_ptr->is_qebsm = 0;
 		irq_ptr->sch_token = 0;
@@ -2328,8 +2328,8 @@
 		QDIO_PRINT_WARN("CHSC returned cc %i. Using all " \
 				"SIGAs for sch 0.%x.%x.\n", result,
 				irq_ptr->schid.ssid, irq_ptr->schid.sch_no);
-		qdioac = CHSC_FLAG_SIGA_INPUT_NECESSARY ||
-			CHSC_FLAG_SIGA_OUTPUT_NECESSARY ||
+		qdioac = CHSC_FLAG_SIGA_INPUT_NECESSARY |
+			CHSC_FLAG_SIGA_OUTPUT_NECESSARY |
 			CHSC_FLAG_SIGA_SYNC_NECESSARY; /* all flags set */
 		irq_ptr->is_qebsm  = 0;
 		goto out;
@@ -2340,8 +2340,8 @@
 				"is 0x%x. Using all SIGAs for sch 0.%x.%x.\n",
 				ssqd_area->response.code,
 				irq_ptr->schid.ssid, irq_ptr->schid.sch_no);
-		qdioac = CHSC_FLAG_SIGA_INPUT_NECESSARY ||
-			CHSC_FLAG_SIGA_OUTPUT_NECESSARY ||
+		qdioac = CHSC_FLAG_SIGA_INPUT_NECESSARY |
+			CHSC_FLAG_SIGA_OUTPUT_NECESSARY |
 			CHSC_FLAG_SIGA_SYNC_NECESSARY; /* all flags set */
 		irq_ptr->is_qebsm  = 0;
 		goto out;
diff --git a/drivers/s390/net/claw.c b/drivers/s390/net/claw.c
index 348bb7b..023455a 100644
--- a/drivers/s390/net/claw.c
+++ b/drivers/s390/net/claw.c
@@ -317,8 +317,8 @@
 		CLAW_DBF_TEXT_(2,setup,"probex%d",-ENOMEM);
 		return -ENOMEM;
 	}
-	privptr->p_mtc_envelope= kmalloc( MAX_ENVELOPE_SIZE, GFP_KERNEL);
-	privptr->p_env = kmalloc(sizeof(struct claw_env), GFP_KERNEL);
+	privptr->p_mtc_envelope= kzalloc( MAX_ENVELOPE_SIZE, GFP_KERNEL);
+	privptr->p_env = kzalloc(sizeof(struct claw_env), GFP_KERNEL);
         if ((privptr->p_mtc_envelope==NULL) || (privptr->p_env==NULL)) {
                 probe_error(cgdev);
 		put_device(&cgdev->dev);
@@ -327,8 +327,6 @@
 		CLAW_DBF_TEXT_(2,setup,"probex%d",-ENOMEM);
                 return -ENOMEM;
         }
-	memset(privptr->p_mtc_envelope, 0x00, MAX_ENVELOPE_SIZE);
-	memset(privptr->p_env, 0x00, sizeof(struct claw_env));
 	memcpy(privptr->p_env->adapter_name,WS_NAME_NOT_DEF,8);
 	memcpy(privptr->p_env->host_name,WS_NAME_NOT_DEF,8);
 	memcpy(privptr->p_env->api_type,WS_NAME_NOT_DEF,8);
@@ -3924,7 +3922,7 @@
 	snprintf(p_ch->id, CLAW_ID_SIZE, "cl-%s", cdev->dev.bus_id);
 	ccw_device_get_id(cdev, &dev_id);
 	p_ch->devno = dev_id.devno;
-	if ((p_ch->irb = kmalloc(sizeof (struct irb),GFP_KERNEL)) == NULL) {
+	if ((p_ch->irb = kzalloc(sizeof (struct irb),GFP_KERNEL)) == NULL) {
 		printk(KERN_WARNING "%s Out of memory in %s for irb\n",
 			p_ch->id,__FUNCTION__);
 #ifdef FUNCTRACE
@@ -3933,7 +3931,6 @@
 #endif
 		return -ENOMEM;
 	}
-	memset(p_ch->irb, 0, sizeof (struct irb));
 #ifdef FUNCTRACE
         	printk(KERN_INFO "%s:%s Exit on line %d\n",
 			cdev->dev.bus_id,__FUNCTION__,__LINE__);
diff --git a/drivers/s390/net/qeth.h b/drivers/s390/net/qeth.h
index b34eb82..ec18bae 100644
--- a/drivers/s390/net/qeth.h
+++ b/drivers/s390/net/qeth.h
@@ -211,6 +211,10 @@
 	/* initial values when measuring starts */
 	unsigned long initial_rx_packets;
 	unsigned long initial_tx_packets;
+	/* inbound scatter gather data */
+	unsigned int sg_skbs_rx;
+	unsigned int sg_frags_rx;
+	unsigned int sg_alloc_page_rx;
 };
 
 /* Routing stuff */
@@ -341,6 +345,9 @@
 
 #define QETH_IP_HEADER_SIZE 40
 
+/* large receive scatter gather copy break */
+#define QETH_RX_SG_CB (PAGE_SIZE >> 1)
+
 struct qeth_hdr_layer3 {
 	__u8  id;
 	__u8  flags;
@@ -771,6 +778,7 @@
 	int layer2;
 	enum qeth_large_send_types large_send;
 	int performance_stats;
+	int rx_sg_cb;
 };
 
 /*
@@ -828,6 +836,7 @@
 	int (*orig_hard_header)(struct sk_buff *,struct net_device *,
 				unsigned short,void *,void *,unsigned);
 	struct qeth_osn_info osn_info;
+	atomic_t force_alloc_skb;
 };
 
 struct qeth_card_list_struct {
diff --git a/drivers/s390/net/qeth_main.c b/drivers/s390/net/qeth_main.c
index 86b0c44..57f6943 100644
--- a/drivers/s390/net/qeth_main.c
+++ b/drivers/s390/net/qeth_main.c
@@ -1054,6 +1054,7 @@
 	else
 		card->options.layer2 = 0;
 	card->options.performance_stats = 0;
+	card->options.rx_sg_cb = QETH_RX_SG_CB;
 }
 
 /**
@@ -1934,6 +1935,7 @@
 			atomic_inc(&reply->received);
 			wake_up(&reply->wait_q);
 		}
+		cpu_relax();
 	};
 	rc = reply->rc;
 	qeth_put_reply(reply);
@@ -2258,6 +2260,89 @@
 	return skb;
 }
 
+static inline int
+qeth_create_skb_frag(struct qdio_buffer_element *element,
+		     struct sk_buff **pskb,
+		     int offset, int *pfrag, int data_len)
+{
+	struct page *page = virt_to_page(element->addr);
+	if (*pfrag == 0) {
+		/* the upper protocol layers assume that there is data in the
+		 * skb itself. Copy a small amount (64 bytes) to make them
+		 * happy. */
+		*pskb = dev_alloc_skb(64 + QETH_FAKE_LL_LEN_ETH);
+		if (!(*pskb))
+			return -ENOMEM;
+		skb_reserve(*pskb, QETH_FAKE_LL_LEN_ETH);
+		if (data_len <= 64) {
+			memcpy(skb_put(*pskb, data_len), element->addr + offset,
+				data_len);
+		} else {
+			get_page(page);
+			memcpy(skb_put(*pskb, 64), element->addr + offset, 64);
+			skb_fill_page_desc(*pskb, *pfrag, page, offset + 64,
+				data_len - 64);
+			(*pskb)->data_len += data_len - 64;
+			(*pskb)->len	  += data_len - 64;
+			(*pskb)->truesize += data_len - 64;
+		}
+	} else {
+		get_page(page);
+		skb_fill_page_desc(*pskb, *pfrag, page, offset, data_len);
+		(*pskb)->data_len += data_len;
+		(*pskb)->len	  += data_len;
+		(*pskb)->truesize += data_len;
+	}
+	(*pfrag)++;
+	return 0;
+}
+
+static inline struct qeth_buffer_pool_entry *
+qeth_find_free_buffer_pool_entry(struct qeth_card *card)
+{
+	struct list_head *plh;
+	struct qeth_buffer_pool_entry *entry;
+	int i, free;
+	struct page *page;
+
+	if (list_empty(&card->qdio.in_buf_pool.entry_list))
+		return NULL;
+
+	list_for_each(plh, &card->qdio.in_buf_pool.entry_list) {
+		entry = list_entry(plh, struct qeth_buffer_pool_entry, list);
+		free = 1;
+		for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) {
+			if (page_count(virt_to_page(entry->elements[i])) > 1) {
+				free = 0;
+				break;
+			}
+		}
+		if (free) {
+			list_del_init(&entry->list);
+			return entry;
+		}
+	}
+
+	/* no free buffer in pool so take first one and swap pages */
+	entry = list_entry(card->qdio.in_buf_pool.entry_list.next,
+			struct qeth_buffer_pool_entry, list);
+	for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) {
+		if (page_count(virt_to_page(entry->elements[i])) > 1) {
+			page = alloc_page(GFP_ATOMIC|GFP_DMA);
+			if (!page) {
+				return NULL;
+			} else {
+				free_page((unsigned long)entry->elements[i]);
+				entry->elements[i] = page_address(page);
+				if (card->options.performance_stats)
+					card->perf_stats.sg_alloc_page_rx++;
+			}
+		}
+	}
+	list_del_init(&entry->list);
+	return entry;
+}
+
 static struct sk_buff *
 qeth_get_next_skb(struct qeth_card *card, struct qdio_buffer *buffer,
 		  struct qdio_buffer_element **__element, int *__offset,
@@ -2269,6 +2354,8 @@
 	int skb_len;
 	void *data_ptr;
 	int data_len;
+	int use_rx_sg = 0;
+	int frag = 0;
 
 	QETH_DBF_TEXT(trace,6,"nextskb");
 	/* qeth_hdr must not cross element boundaries */
@@ -2293,23 +2380,43 @@
 
 	if (!skb_len)
 		return NULL;
-	if (card->options.fake_ll){
-		if(card->dev->type == ARPHRD_IEEE802_TR){
-			if (!(skb = qeth_get_skb(skb_len+QETH_FAKE_LL_LEN_TR, *hdr)))
-				goto no_mem;
-			skb_reserve(skb,QETH_FAKE_LL_LEN_TR);
+	if ((skb_len >= card->options.rx_sg_cb) &&
+	    (!(card->info.type == QETH_CARD_TYPE_OSN)) &&
+	    (!atomic_read(&card->force_alloc_skb))) {
+		use_rx_sg = 1;
+	} else {
+		if (card->options.fake_ll) {
+			if (card->dev->type == ARPHRD_IEEE802_TR) {
+				if (!(skb = qeth_get_skb(skb_len +
+						QETH_FAKE_LL_LEN_TR, *hdr)))
+					goto no_mem;
+				skb_reserve(skb, QETH_FAKE_LL_LEN_TR);
+			} else {
+				if (!(skb = qeth_get_skb(skb_len +
+						QETH_FAKE_LL_LEN_ETH, *hdr)))
+					goto no_mem;
+				skb_reserve(skb, QETH_FAKE_LL_LEN_ETH);
+			}
 		} else {
-			if (!(skb = qeth_get_skb(skb_len+QETH_FAKE_LL_LEN_ETH, *hdr)))
+			skb = qeth_get_skb(skb_len, *hdr);
+			if (!skb)
 				goto no_mem;
-			skb_reserve(skb,QETH_FAKE_LL_LEN_ETH);
 		}
-	} else if (!(skb = qeth_get_skb(skb_len, *hdr)))
-		goto no_mem;
+	}
+
 	data_ptr = element->addr + offset;
 	while (skb_len) {
 		data_len = min(skb_len, (int)(element->length - offset));
-		if (data_len)
-			memcpy(skb_put(skb, data_len), data_ptr, data_len);
+		if (data_len) {
+			if (use_rx_sg) {
+				if (qeth_create_skb_frag(element, &skb, offset,
+				    &frag, data_len))
+					goto no_mem;
+			} else {
+				memcpy(skb_put(skb, data_len), data_ptr,
+					data_len);
+			}
+		}
 		skb_len -= data_len;
 		if (skb_len){
 			if (qeth_is_last_sbale(element)){
@@ -2331,6 +2438,10 @@
 	}
 	*__element = element;
 	*__offset = offset;
+	if (use_rx_sg && card->options.performance_stats) {
+		card->perf_stats.sg_skbs_rx++;
+		card->perf_stats.sg_frags_rx += skb_shinfo(skb)->nr_frags;
+	}
 	return skb;
 no_mem:
 	if (net_ratelimit()){
@@ -2608,28 +2719,15 @@
 	}
 }
 
-static struct qeth_buffer_pool_entry *
-qeth_get_buffer_pool_entry(struct qeth_card *card)
-{
-	struct qeth_buffer_pool_entry *entry;
-
-	QETH_DBF_TEXT(trace, 6, "gtbfplen");
-	if (!list_empty(&card->qdio.in_buf_pool.entry_list)) {
-		entry = list_entry(card->qdio.in_buf_pool.entry_list.next,
-				struct qeth_buffer_pool_entry, list);
-		list_del_init(&entry->list);
-		return entry;
-	}
-	return NULL;
-}
-
-static void
+static int
 qeth_init_input_buffer(struct qeth_card *card, struct qeth_qdio_buffer *buf)
 {
 	struct qeth_buffer_pool_entry *pool_entry;
 	int i;
- 
-	pool_entry = qeth_get_buffer_pool_entry(card);
+
+	pool_entry = qeth_find_free_buffer_pool_entry(card);
+	if (!pool_entry)
+		return 1;
 	/*
 	 * since the buffer is accessed only from the input_tasklet
 	 * there shouldn't be a need to synchronize; also, since we use
@@ -2648,6 +2746,7 @@
 			buf->buffer->element[i].flags = 0;
 	}
 	buf->state = QETH_QDIO_BUF_EMPTY;
+	return 0;
 }
 
 static void
@@ -2682,6 +2781,7 @@
 	int count;
 	int i;
 	int rc;
+	int newcount = 0;
 
 	QETH_DBF_TEXT(trace,6,"queinbuf");
 	count = (index < queue->next_buf_to_init)?
@@ -2692,9 +2792,27 @@
 	/* only requeue at a certain threshold to avoid SIGAs */
 	if (count >= QETH_IN_BUF_REQUEUE_THRESHOLD(card)){
 		for (i = queue->next_buf_to_init;
-		     i < queue->next_buf_to_init + count; ++i)
-			qeth_init_input_buffer(card,
-				&queue->bufs[i % QDIO_MAX_BUFFERS_PER_Q]);
+		     i < queue->next_buf_to_init + count; ++i) {
+			if (qeth_init_input_buffer(card,
+				&queue->bufs[i % QDIO_MAX_BUFFERS_PER_Q])) {
+				break;
+			} else {
+				newcount++;
+			}
+		}
+
+		if (newcount < count) {
+			/* we are in memory shortage so we switch back to
+			   traditional skb allocation and drop packages */
+			if (atomic_cmpxchg(&card->force_alloc_skb, 0, 1))
+				printk(KERN_WARNING
+					"qeth: switch to alloc skb\n");
+			count = newcount;
+		} else {
+			if (atomic_cmpxchg(&card->force_alloc_skb, 1, 0))
+				printk(KERN_WARNING "qeth: switch to sg\n");
+		}
+
 		/*
 		 * according to old code it should be avoided to requeue all
 		 * 128 buffers in order to benefit from PCI avoidance.
@@ -6494,6 +6612,7 @@
 
 	QETH_DBF_TEXT(setup, 2, "hrdsetup");
 
+	atomic_set(&card->force_alloc_skb, 0);
 retry:
 	if (retries < 3){
 		PRINT_WARN("Retrying to do IDX activates.\n");
diff --git a/drivers/s390/net/qeth_proc.c b/drivers/s390/net/qeth_proc.c
index 89d56c8..f1ff165 100644
--- a/drivers/s390/net/qeth_proc.c
+++ b/drivers/s390/net/qeth_proc.c
@@ -212,6 +212,12 @@
 		      "  Skb fragments sent in SG mode          : %u\n\n",
 		      card->perf_stats.sg_skbs_sent,
 		      card->perf_stats.sg_frags_sent);
+	seq_printf(s, "  Skbs received in SG mode               : %u\n"
+		      "  Skb fragments received in SG mode      : %u\n"
+		      "  Page allocations for rx SG mode        : %u\n\n",
+		      card->perf_stats.sg_skbs_rx,
+		      card->perf_stats.sg_frags_rx,
+		      card->perf_stats.sg_alloc_page_rx);
 	seq_printf(s, "  large_send tx (in Kbytes)              : %u\n"
 		      "  large_send count                       : %u\n\n",
 		      card->perf_stats.large_send_bytes >> 10,
diff --git a/drivers/sbus/char/bbc_envctrl.c b/drivers/sbus/char/bbc_envctrl.c
index a54e414..e821a15 100644
--- a/drivers/sbus/char/bbc_envctrl.c
+++ b/drivers/sbus/char/bbc_envctrl.c
@@ -7,6 +7,7 @@
 #include <linux/kthread.h>
 #include <linux/delay.h>
 #include <linux/kmod.h>
+#include <linux/reboot.h>
 #include <asm/oplib.h>
 #include <asm/ebus.h>
 
@@ -170,8 +171,6 @@
 static void do_envctrl_shutdown(struct bbc_cpu_temperature *tp)
 {
 	static int shutting_down = 0;
-	static char *envp[] = { "HOME=/", "TERM=linux", "PATH=/sbin:/usr/sbin:/bin:/usr/bin", NULL };
-	char *argv[] = { "/sbin/shutdown", "-h", "now", NULL };
 	char *type = "???";
 	s8 val = -1;
 
@@ -195,7 +194,7 @@
 	printk(KERN_CRIT "kenvctrld: Shutting down the system now.\n");
 
 	shutting_down = 1;
-	if (call_usermodehelper("/sbin/shutdown", argv, envp, 0) < 0)
+	if (orderly_poweroff(true) < 0)
 		printk(KERN_CRIT "envctrl: shutdown execution failed\n");
 }
 
diff --git a/drivers/sbus/char/bbc_i2c.c b/drivers/sbus/char/bbc_i2c.c
index 178155b..fbadd4d 100644
--- a/drivers/sbus/char/bbc_i2c.c
+++ b/drivers/sbus/char/bbc_i2c.c
@@ -156,10 +156,9 @@
 
 	if (!bp)
 		return NULL;
-	client = kmalloc(sizeof(*client), GFP_KERNEL);
+	client = kzalloc(sizeof(*client), GFP_KERNEL);
 	if (!client)
 		return NULL;
-	memset(client, 0, sizeof(*client));
 	client->bp = bp;
 	client->echild = echild;
 	client->bus = echild->resource[0].start;
diff --git a/drivers/sbus/char/envctrl.c b/drivers/sbus/char/envctrl.c
index 8328aca..dadabef 100644
--- a/drivers/sbus/char/envctrl.c
+++ b/drivers/sbus/char/envctrl.c
@@ -26,6 +26,7 @@
 #include <linux/ioport.h>
 #include <linux/miscdevice.h>
 #include <linux/kmod.h>
+#include <linux/reboot.h>
 
 #include <asm/ebus.h>
 #include <asm/uaccess.h>
@@ -966,10 +967,6 @@
 static void envctrl_do_shutdown(void)
 {
 	static int inprog = 0;
-	static char *envp[] = {	
-		"HOME=/", "TERM=linux", "PATH=/sbin:/usr/sbin:/bin:/usr/bin", NULL };
-	char *argv[] = { 
-		"/sbin/shutdown", "-h", "now", NULL };	
 	int ret;
 
 	if (inprog != 0)
@@ -977,7 +974,7 @@
 
 	inprog = 1;
 	printk(KERN_CRIT "kenvctrld: WARNING: Shutting down the system now.\n");
-	ret = call_usermodehelper("/sbin/shutdown", argv, envp, 0);
+	ret = orderly_poweroff(true);
 	if (ret < 0) {
 		printk(KERN_CRIT "kenvctrld: WARNING: system shutdown failed!\n"); 
 		inprog = 0;  /* unlikely to succeed, but we could try again */
diff --git a/drivers/sbus/char/jsflash.c b/drivers/sbus/char/jsflash.c
index 512857a2..5157a2a 100644
--- a/drivers/sbus/char/jsflash.c
+++ b/drivers/sbus/char/jsflash.c
@@ -619,8 +619,7 @@
 	jsf0.busy = 0;
 
 	misc_deregister(&jsf_dev);
-	if (unregister_blkdev(JSFD_MAJOR, "jsfd") != 0)
-		printk("jsfd: cleanup_module failed\n");
+	unregister_blkdev(JSFD_MAJOR, "jsfd");
 	blk_cleanup_queue(jsf_queue);
 }
 
diff --git a/drivers/sbus/char/vfc_dev.c b/drivers/sbus/char/vfc_dev.c
index 6afc7e5..26b1d2a 100644
--- a/drivers/sbus/char/vfc_dev.c
+++ b/drivers/sbus/char/vfc_dev.c
@@ -656,12 +656,9 @@
 	if (!cards)
 		return -ENODEV;
 
-	vfc_dev_lst = kmalloc(sizeof(struct vfc_dev *) *
-						 (cards+1),
-						 GFP_KERNEL);
+	vfc_dev_lst = kcalloc(cards + 1, sizeof(struct vfc_dev*), GFP_KERNEL);
 	if (vfc_dev_lst == NULL)
 		return -ENOMEM;
-	memset(vfc_dev_lst, 0, sizeof(struct vfc_dev *) * (cards + 1));
 	vfc_dev_lst[cards] = NULL;
 
 	ret = register_chrdev(VFC_MAJOR, vfcstr, &vfc_fops);
diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c
index 76c0909..6b49f6a 100644
--- a/drivers/scsi/3w-9xxx.c
+++ b/drivers/scsi/3w-9xxx.c
@@ -1160,13 +1160,12 @@
 	}
 
 	/* Allocate event info space */
-	tw_dev->event_queue[0] = kmalloc(sizeof(TW_Event) * TW_Q_LENGTH, GFP_KERNEL);
+	tw_dev->event_queue[0] = kcalloc(TW_Q_LENGTH, sizeof(TW_Event), GFP_KERNEL);
 	if (!tw_dev->event_queue[0]) {
 		TW_PRINTK(tw_dev->host, TW_DRIVER, 0x18, "Event info memory allocation failed");
 		goto out;
 	}
 
-	memset(tw_dev->event_queue[0], 0, sizeof(TW_Event) * TW_Q_LENGTH);
 
 	for (i = 0; i < TW_Q_LENGTH; i++) {
 		tw_dev->event_queue[i] = (TW_Event *)((unsigned char *)tw_dev->event_queue[0] + (i * sizeof(TW_Event)));
diff --git a/drivers/scsi/NCR53C9x.c b/drivers/scsi/NCR53C9x.c
index 8b5334c..773d11d 100644
--- a/drivers/scsi/NCR53C9x.c
+++ b/drivers/scsi/NCR53C9x.c
@@ -3606,11 +3606,10 @@
 int esp_slave_alloc(struct scsi_device *SDptr)
 {
 	struct esp_device *esp_dev =
-		kmalloc(sizeof(struct esp_device), GFP_ATOMIC);
+		kzalloc(sizeof(struct esp_device), GFP_ATOMIC);
 
 	if (!esp_dev)
 		return -ENOMEM;
-	memset(esp_dev, 0, sizeof(struct esp_device));
 	SDptr->hostdata = esp_dev;
 	return 0;
 }
diff --git a/drivers/scsi/NCR_D700.c b/drivers/scsi/NCR_D700.c
index f12864a..3a80897 100644
--- a/drivers/scsi/NCR_D700.c
+++ b/drivers/scsi/NCR_D700.c
@@ -181,13 +181,12 @@
 	struct Scsi_Host *host;
 	int ret;
 
-	hostdata = kmalloc(sizeof(*hostdata), GFP_KERNEL);
+	hostdata = kzalloc(sizeof(*hostdata), GFP_KERNEL);
 	if (!hostdata) {
 		printk(KERN_ERR "NCR D700: SIOP%d: Failed to allocate host"
 		       "data, detatching\n", siop);
 		return -ENOMEM;
 	}
-	memset(hostdata, 0, sizeof(*hostdata));
 
 	if (!request_region(region, 64, "NCR_D700")) {
 		printk(KERN_ERR "NCR D700: Failed to reserve IO region 0x%x\n",
diff --git a/drivers/scsi/NCR_Q720.c b/drivers/scsi/NCR_Q720.c
index 778844c..a8bbdc2 100644
--- a/drivers/scsi/NCR_Q720.c
+++ b/drivers/scsi/NCR_Q720.c
@@ -148,11 +148,10 @@
 	__u32 base_addr, mem_size;
 	void __iomem *mem_base;
 
-	p = kmalloc(sizeof(*p), GFP_KERNEL);
+	p = kzalloc(sizeof(*p), GFP_KERNEL);
 	if (!p)
 		return -ENOMEM;
 
-	memset(p, 0, sizeof(*p));
 	pos2 = mca_device_read_pos(mca_dev, 2);
 	/* enable device */
 	pos2 |=  NCR_Q720_POS2_BOARD_ENABLE | NCR_Q720_POS2_INTERRUPT_ENABLE;
diff --git a/drivers/scsi/imm.c b/drivers/scsi/imm.c
index 0464c18..005d2b0 100644
--- a/drivers/scsi/imm.c
+++ b/drivers/scsi/imm.c
@@ -1159,11 +1159,10 @@
 
 	init_waitqueue_head(&waiting);
 
-	dev = kmalloc(sizeof(imm_struct), GFP_KERNEL);
+	dev = kzalloc(sizeof(imm_struct), GFP_KERNEL);
 	if (!dev)
 		return -ENOMEM;
 
-	memset(dev, 0, sizeof(imm_struct));
 
 	dev->base = -1;
 	dev->mode = IMM_AUTODETECT;
diff --git a/drivers/scsi/ips.c b/drivers/scsi/ips.c
index 9f8ed6b..492a51b 100644
--- a/drivers/scsi/ips.c
+++ b/drivers/scsi/ips.c
@@ -7068,14 +7068,13 @@
 	subdevice_id = pci_dev->subsystem_device;
 
 	/* found a controller */
-	ha = kmalloc(sizeof (ips_ha_t), GFP_KERNEL);
+	ha = kzalloc(sizeof (ips_ha_t), GFP_KERNEL);
 	if (ha == NULL) {
 		IPS_PRINTK(KERN_WARNING, pci_dev,
 			   "Unable to allocate temporary ha struct\n");
 		return -1;
 	}
 
-	memset(ha, 0, sizeof (ips_ha_t));
 
 	ips_sh[index] = NULL;
 	ips_ha[index] = ha;
diff --git a/drivers/scsi/lasi700.c b/drivers/scsi/lasi700.c
index 5c32a69..3126824 100644
--- a/drivers/scsi/lasi700.c
+++ b/drivers/scsi/lasi700.c
@@ -101,13 +101,12 @@
 	struct NCR_700_Host_Parameters *hostdata;
 	struct Scsi_Host *host;
 
-	hostdata = kmalloc(sizeof(*hostdata), GFP_KERNEL);
+	hostdata = kzalloc(sizeof(*hostdata), GFP_KERNEL);
 	if (!hostdata) {
 		printk(KERN_ERR "%s: Failed to allocate host data\n",
 		       dev->dev.bus_id);
 		return -ENOMEM;
 	}
-	memset(hostdata, 0, sizeof(struct NCR_700_Host_Parameters));
 
 	hostdata->dev = &dev->dev;
 	dma_set_mask(&dev->dev, DMA_32BIT_MASK);
diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c
index d70ddfd..9c5342e 100644
--- a/drivers/scsi/libsas/sas_scsi_host.c
+++ b/drivers/scsi/libsas/sas_scsi_host.c
@@ -40,6 +40,7 @@
 
 #include <linux/err.h>
 #include <linux/blkdev.h>
+#include <linux/freezer.h>
 #include <linux/scatterlist.h>
 
 /* ---------- SCSI Host glue ---------- */
@@ -868,8 +869,6 @@
 {
 	struct sas_ha_struct *sas_ha = _sas_ha;
 
-	current->flags |= PF_NOFREEZE;
-
 	while (1) {
 		set_current_state(TASK_INTERRUPTIBLE);
 		schedule();
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index f81f85e..07bd0dc 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -1830,7 +1830,7 @@
 	/* Initialize and populate the iocb list per host.  */
 	INIT_LIST_HEAD(&phba->lpfc_iocb_list);
 	for (i = 0; i < LPFC_IOCB_LIST_CNT; i++) {
-		iocbq_entry = kmalloc(sizeof(struct lpfc_iocbq), GFP_KERNEL);
+		iocbq_entry = kzalloc(sizeof(struct lpfc_iocbq), GFP_KERNEL);
 		if (iocbq_entry == NULL) {
 			printk(KERN_ERR "%s: only allocated %d iocbs of "
 				"expected %d count. Unloading driver.\n",
@@ -1839,7 +1839,6 @@
 			goto out_free_iocbq;
 		}
 
-		memset(iocbq_entry, 0, sizeof(struct lpfc_iocbq));
 		iotag = lpfc_sli_next_iotag(phba, iocbq_entry);
 		if (iotag == 0) {
 			kfree (iocbq_entry);
diff --git a/drivers/scsi/megaraid/megaraid_mbox.c b/drivers/scsi/megaraid/megaraid_mbox.c
index c46685a..c6a53dc 100644
--- a/drivers/scsi/megaraid/megaraid_mbox.c
+++ b/drivers/scsi/megaraid/megaraid_mbox.c
@@ -454,7 +454,7 @@
 	pci_set_master(pdev);
 
 	// Allocate the per driver initialization structure
-	adapter = kmalloc(sizeof(adapter_t), GFP_KERNEL);
+	adapter = kzalloc(sizeof(adapter_t), GFP_KERNEL);
 
 	if (adapter == NULL) {
 		con_log(CL_ANN, (KERN_WARNING
@@ -462,7 +462,6 @@
 
 		goto out_probe_one;
 	}
-	memset(adapter, 0, sizeof(adapter_t));
 
 
 	// set up PCI related soft state and other pre-known parameters
@@ -746,10 +745,9 @@
 	 * Allocate and initialize the init data structure for mailbox
 	 * controllers
 	 */
-	raid_dev = kmalloc(sizeof(mraid_device_t), GFP_KERNEL);
+	raid_dev = kzalloc(sizeof(mraid_device_t), GFP_KERNEL);
 	if (raid_dev == NULL) return -1;
 
-	memset(raid_dev, 0, sizeof(mraid_device_t));
 
 	/*
 	 * Attach the adapter soft state to raid device soft state
@@ -1050,8 +1048,7 @@
 	 * since the calling routine does not yet know the number of available
 	 * commands.
 	 */
-	adapter->kscb_list = kmalloc(sizeof(scb_t) * MBOX_MAX_SCSI_CMDS,
-			GFP_KERNEL);
+	adapter->kscb_list = kcalloc(MBOX_MAX_SCSI_CMDS, sizeof(scb_t), GFP_KERNEL);
 
 	if (adapter->kscb_list == NULL) {
 		con_log(CL_ANN, (KERN_WARNING
@@ -1059,7 +1056,6 @@
 			__LINE__));
 		goto out_free_ibuf;
 	}
-	memset(adapter->kscb_list, 0, sizeof(scb_t) * MBOX_MAX_SCSI_CMDS);
 
 	// memory allocation for our command packets
 	if (megaraid_mbox_setup_dma_pools(adapter) != 0) {
@@ -3495,8 +3491,7 @@
 	int		i;
 
 	// Allocate memory for the base list of scb for management module.
-	adapter->uscb_list = kmalloc(sizeof(scb_t) * MBOX_MAX_USER_CMDS,
-			GFP_KERNEL);
+	adapter->uscb_list = kcalloc(MBOX_MAX_USER_CMDS, sizeof(scb_t), GFP_KERNEL);
 
 	if (adapter->uscb_list == NULL) {
 		con_log(CL_ANN, (KERN_WARNING
@@ -3504,7 +3499,6 @@
 			__LINE__));
 		return -1;
 	}
-	memset(adapter->uscb_list, 0, sizeof(scb_t) * MBOX_MAX_USER_CMDS);
 
 
 	// Initialize the synchronization parameters for resources for
diff --git a/drivers/scsi/megaraid/megaraid_mm.c b/drivers/scsi/megaraid/megaraid_mm.c
index 84d9c27..b6587a6 100644
--- a/drivers/scsi/megaraid/megaraid_mm.c
+++ b/drivers/scsi/megaraid/megaraid_mm.c
@@ -890,12 +890,11 @@
 	if (lld_adp->drvr_type != DRVRTYPE_MBOX)
 		return (-EINVAL);
 
-	adapter = kmalloc(sizeof(mraid_mmadp_t), GFP_KERNEL);
+	adapter = kzalloc(sizeof(mraid_mmadp_t), GFP_KERNEL);
 
 	if (!adapter)
 		return -ENOMEM;
 
-	memset(adapter, 0, sizeof(mraid_mmadp_t));
 
 	adapter->unique_id	= lld_adp->unique_id;
 	adapter->drvr_type	= lld_adp->drvr_type;
diff --git a/drivers/scsi/megaraid/megaraid_sas.c b/drivers/scsi/megaraid/megaraid_sas.c
index b7f2e61..ebb948c 100644
--- a/drivers/scsi/megaraid/megaraid_sas.c
+++ b/drivers/scsi/megaraid/megaraid_sas.c
@@ -1636,15 +1636,13 @@
 	 * Allocate the dynamic array first and then allocate individual
 	 * commands.
 	 */
-	instance->cmd_list = kmalloc(sizeof(struct megasas_cmd *) * max_cmd,
-				     GFP_KERNEL);
+	instance->cmd_list = kcalloc(max_cmd, sizeof(struct megasas_cmd*), GFP_KERNEL);
 
 	if (!instance->cmd_list) {
 		printk(KERN_DEBUG "megasas: out of memory\n");
 		return -ENOMEM;
 	}
 
-	memset(instance->cmd_list, 0, sizeof(struct megasas_cmd *) * max_cmd);
 
 	for (i = 0; i < max_cmd; i++) {
 		instance->cmd_list[i] = kmalloc(sizeof(struct megasas_cmd),
diff --git a/drivers/scsi/pcmcia/aha152x_stub.c b/drivers/scsi/pcmcia/aha152x_stub.c
index 370802d..2dd0dc9 100644
--- a/drivers/scsi/pcmcia/aha152x_stub.c
+++ b/drivers/scsi/pcmcia/aha152x_stub.c
@@ -106,9 +106,8 @@
     DEBUG(0, "aha152x_attach()\n");
 
     /* Create new SCSI device */
-    info = kmalloc(sizeof(*info), GFP_KERNEL);
+    info = kzalloc(sizeof(*info), GFP_KERNEL);
     if (!info) return -ENOMEM;
-    memset(info, 0, sizeof(*info));
     info->p_dev = link;
     link->priv = info;
 
diff --git a/drivers/scsi/pcmcia/nsp_cs.c b/drivers/scsi/pcmcia/nsp_cs.c
index c6f8c6e..445cfbb 100644
--- a/drivers/scsi/pcmcia/nsp_cs.c
+++ b/drivers/scsi/pcmcia/nsp_cs.c
@@ -1602,9 +1602,8 @@
 	nsp_dbg(NSP_DEBUG_INIT, "in");
 
 	/* Create new SCSI device */
-	info = kmalloc(sizeof(*info), GFP_KERNEL);
+	info = kzalloc(sizeof(*info), GFP_KERNEL);
 	if (info == NULL) { return -ENOMEM; }
-	memset(info, 0, sizeof(*info));
 	info->p_dev = link;
 	link->priv = info;
 	data->ScsiInfo = info;
diff --git a/drivers/scsi/pcmcia/qlogic_stub.c b/drivers/scsi/pcmcia/qlogic_stub.c
index 697cfb7..67c5a58 100644
--- a/drivers/scsi/pcmcia/qlogic_stub.c
+++ b/drivers/scsi/pcmcia/qlogic_stub.c
@@ -162,10 +162,9 @@
 	DEBUG(0, "qlogic_attach()\n");
 
 	/* Create new SCSI device */
-	info = kmalloc(sizeof(*info), GFP_KERNEL);
+	info = kzalloc(sizeof(*info), GFP_KERNEL);
 	if (!info)
 		return -ENOMEM;
-	memset(info, 0, sizeof(*info));
 	info->p_dev = link;
 	link->priv = info;
 	link->io.NumPorts1 = 16;
diff --git a/drivers/scsi/pcmcia/sym53c500_cs.c b/drivers/scsi/pcmcia/sym53c500_cs.c
index 2695b71..961839e 100644
--- a/drivers/scsi/pcmcia/sym53c500_cs.c
+++ b/drivers/scsi/pcmcia/sym53c500_cs.c
@@ -875,10 +875,9 @@
 	DEBUG(0, "SYM53C500_attach()\n");
 
 	/* Create new SCSI device */
-	info = kmalloc(sizeof(*info), GFP_KERNEL);
+	info = kzalloc(sizeof(*info), GFP_KERNEL);
 	if (!info)
 		return -ENOMEM;
-	memset(info, 0, sizeof(*info));
 	info->p_dev = link;
 	link->priv = info;
 	link->io.NumPorts1 = 16;
diff --git a/drivers/scsi/ppa.c b/drivers/scsi/ppa.c
index 2f1fa1e..67b6d76 100644
--- a/drivers/scsi/ppa.c
+++ b/drivers/scsi/ppa.c
@@ -1014,10 +1014,9 @@
 	int modes, ppb, ppb_hi;
 	int err = -ENOMEM;
 
-	dev = kmalloc(sizeof(ppa_struct), GFP_KERNEL);
+	dev = kzalloc(sizeof(ppa_struct), GFP_KERNEL);
 	if (!dev)
 		return -ENOMEM;
-	memset(dev, 0, sizeof(ppa_struct));
 	dev->base = -1;
 	dev->mode = PPA_AUTODETECT;
 	dev->recon_tmo = PPA_RECON_TMO;
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index 9adb64a..8a525ab 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -19,6 +19,7 @@
 #include <linux/timer.h>
 #include <linux/string.h>
 #include <linux/kernel.h>
+#include <linux/freezer.h>
 #include <linux/kthread.h>
 #include <linux/interrupt.h>
 #include <linux/blkdev.h>
@@ -1516,8 +1517,6 @@
 {
 	struct Scsi_Host *shost = data;
 
-	current->flags |= PF_NOFREEZE;
-
 	/*
 	 * We use TASK_INTERRUPTIBLE so that the thread is not
 	 * counted against the load average as a running process.
diff --git a/drivers/scsi/sim710.c b/drivers/scsi/sim710.c
index 018c65f..710f19d 100644
--- a/drivers/scsi/sim710.c
+++ b/drivers/scsi/sim710.c
@@ -100,7 +100,7 @@
 {
 	struct Scsi_Host * host = NULL;
 	struct NCR_700_Host_Parameters *hostdata =
-		kmalloc(sizeof(struct NCR_700_Host_Parameters),	GFP_KERNEL);
+		kzalloc(sizeof(struct NCR_700_Host_Parameters),	GFP_KERNEL);
 
 	printk(KERN_NOTICE "sim710: %s\n", dev->bus_id);
 	printk(KERN_NOTICE "sim710: irq = %d, clock = %d, base = 0x%lx, scsi_id = %d\n",
@@ -110,7 +110,6 @@
 		printk(KERN_ERR "sim710: Failed to allocate host data\n");
 		goto out;
 	}
-	memset(hostdata, 0, sizeof(struct NCR_700_Host_Parameters));
 
 	if(request_region(base_addr, 64, "sim710") == NULL) {
 		printk(KERN_ERR "sim710: Failed to reserve IO region 0x%lx\n",
diff --git a/drivers/scsi/tmscsim.c b/drivers/scsi/tmscsim.c
index 14cba1c..5db1520 100644
--- a/drivers/scsi/tmscsim.c
+++ b/drivers/scsi/tmscsim.c
@@ -2082,10 +2082,9 @@
 	uint id = scsi_device->id;
 	uint lun = scsi_device->lun;
 
-	pDCB = kmalloc(sizeof(struct dc390_dcb), GFP_KERNEL);
+	pDCB = kzalloc(sizeof(struct dc390_dcb), GFP_KERNEL);
 	if (!pDCB)
 		return -ENOMEM;
-	memset(pDCB, 0, sizeof(struct dc390_dcb));
 
 	if (!pACB->DCBCnt++) {
 		pACB->pLinkDCB = pDCB;
diff --git a/drivers/serial/8250_hp300.c b/drivers/serial/8250_hp300.c
index 53e81a4..2cf0953 100644
--- a/drivers/serial/8250_hp300.c
+++ b/drivers/serial/8250_hp300.c
@@ -11,6 +11,7 @@
 #include <linux/kernel.h>
 #include <linux/serial.h>
 #include <linux/serial_core.h>
+#include <linux/serial_8250.h>
 #include <linux/delay.h>
 #include <linux/dio.h>
 #include <linux/console.h>
diff --git a/drivers/serial/Kconfig b/drivers/serial/Kconfig
index cab42cb..18f6297 100644
--- a/drivers/serial/Kconfig
+++ b/drivers/serial/Kconfig
@@ -338,6 +338,34 @@
 	  your boot loader (lilo or loadlin) about how to pass options to the
 	  kernel at boot time.)
 
+config SERIAL_SB1250_DUART
+	tristate "BCM1xxx on-chip DUART serial support"
+	depends on SIBYTE_SB1xxx_SOC=y
+	select SERIAL_CORE
+	default y
+	---help---
+	  Support for the asynchronous serial interface (DUART) included in
+	  the BCM1250 and derived System-On-a-Chip (SOC) devices.  Note that
+	  the letter D in DUART stands for "dual", which is how the device
+	  is implemented.  Depending on the SOC configuration there may be
+	  one or more DUARTs available of which all are handled.
+
+	  If unsure, say Y.  To compile this driver as a module, choose M here:
+	  the module will be called sb1250-duart.
+
+config SERIAL_SB1250_DUART_CONSOLE
+	bool "Support for console on a BCM1xxx DUART serial port"
+	depends on SERIAL_SB1250_DUART=y
+	select SERIAL_CORE_CONSOLE
+	default y
+	---help---
+	  If you say Y here, it will be possible to use a serial port as the
+	  system console (the system console is the device which receives all
+	  kernel messages and warnings and which allows logins in single user
+	  mode).
+
+	  If unsure, say Y.
+
 config SERIAL_ATMEL
 	bool "AT91 / AT32 on-chip serial port support"
 	depends on (ARM && ARCH_AT91) || AVR32
@@ -458,6 +486,36 @@
 
 	  If unsure, say Y.
 
+config SERIAL_ZS
+	tristate "DECstation Z85C30 serial support"
+	depends on MACH_DECSTATION
+	select SERIAL_CORE
+	default y
+	---help---
+	  Support for the Zilog 85C350 serial communications controller used
+	  for serial ports in newer DECstation systems.  These include the
+	  DECsystem 5900 and all models of the DECstation and DECsystem 5000
+	  systems except from model 200.
+
+	  If unsure, say Y.  To compile this driver as a module, choose M here:
+	  the module will be called zs.
+
+config SERIAL_ZS_CONSOLE
+	bool "Support for console on a DECstation Z85C30 serial port"
+	depends on SERIAL_ZS=y
+	select SERIAL_CORE_CONSOLE
+	default y
+	---help---
+	  If you say Y here, it will be possible to use a serial port as the
+	  system console (the system console is the device which receives all
+	  kernel messages and warnings and which allows logins in single user
+	  mode).
+
+	  Note that the firmware uses ttyS1 as the serial console on the
+	  Maxine and ttyS3 on the others using this driver.
+
+	  If unsure, say Y.
+
 config SERIAL_21285
 	tristate "DC21285 serial port support"
 	depends on ARM && FOOTBRIDGE
diff --git a/drivers/serial/Makefile b/drivers/serial/Makefile
index 08ad0d9..af6377d 100644
--- a/drivers/serial/Makefile
+++ b/drivers/serial/Makefile
@@ -43,6 +43,7 @@
 obj-$(CONFIG_SERIAL_PMACZILOG) += pmac_zilog.o
 obj-$(CONFIG_SERIAL_LH7A40X) += serial_lh7a40x.o
 obj-$(CONFIG_SERIAL_DZ) += dz.o
+obj-$(CONFIG_SERIAL_ZS) += zs.o
 obj-$(CONFIG_SERIAL_SH_SCI) += sh-sci.o
 obj-$(CONFIG_SERIAL_SGI_L1_CONSOLE) += sn_console.o
 obj-$(CONFIG_SERIAL_CPM) += cpm_uart/
@@ -51,6 +52,7 @@
 obj-$(CONFIG_SERIAL_ICOM) += icom.o
 obj-$(CONFIG_SERIAL_M32R_SIO) += m32r_sio.o
 obj-$(CONFIG_SERIAL_MPSC) += mpsc.o
+obj-$(CONFIG_SERIAL_SB1250_DUART) += sb1250-duart.o
 obj-$(CONFIG_ETRAX_SERIAL) += crisv10.o
 obj-$(CONFIG_SERIAL_JSM) += jsm/
 obj-$(CONFIG_SERIAL_TXX9) += serial_txx9.o
diff --git a/drivers/serial/amba-pl011.c b/drivers/serial/amba-pl011.c
index 954073c..72229df 100644
--- a/drivers/serial/amba-pl011.c
+++ b/drivers/serial/amba-pl011.c
@@ -716,7 +716,7 @@
 		goto out;
 	}
 
-	uap = kmalloc(sizeof(struct uart_amba_port), GFP_KERNEL);
+	uap = kzalloc(sizeof(struct uart_amba_port), GFP_KERNEL);
 	if (uap == NULL) {
 		ret = -ENOMEM;
 		goto out;
@@ -728,7 +728,6 @@
 		goto free;
 	}
 
-	memset(uap, 0, sizeof(struct uart_amba_port));
 	uap->clk = clk_get(&dev->dev, "UARTCLK");
 	if (IS_ERR(uap->clk)) {
 		ret = PTR_ERR(uap->clk);
diff --git a/drivers/serial/cpm_uart/cpm_uart_core.c b/drivers/serial/cpm_uart/cpm_uart_core.c
index b63ff8d..cefde58 100644
--- a/drivers/serial/cpm_uart/cpm_uart_core.c
+++ b/drivers/serial/cpm_uart/cpm_uart_core.c
@@ -678,7 +678,7 @@
 		}
 		bdp->cbd_datlen = count;
 		bdp->cbd_sc |= BD_SC_READY;
-		__asm__("eieio");
+		eieio();
 		/* Get next BD. */
 		if (bdp->cbd_sc & BD_SC_WRAP)
 			bdp = pinfo->tx_bd_base;
diff --git a/drivers/serial/of_serial.c b/drivers/serial/of_serial.c
index 7ffdaea..a64d858 100644
--- a/drivers/serial/of_serial.c
+++ b/drivers/serial/of_serial.c
@@ -17,6 +17,11 @@
 #include <asm/of_platform.h>
 #include <asm/prom.h>
 
+struct of_serial_info {
+	int type;
+	int line;
+};
+
 /*
  * Fill a struct uart_port for a given device node
  */
@@ -62,6 +67,7 @@
 static int __devinit of_platform_serial_probe(struct of_device *ofdev,
 						const struct of_device_id *id)
 {
+	struct of_serial_info *info;
 	struct uart_port port;
 	int port_type;
 	int ret;
@@ -69,30 +75,35 @@
 	if (of_find_property(ofdev->node, "used-by-rtas", NULL))
 		return -EBUSY;
 
+	info = kmalloc(sizeof(*info), GFP_KERNEL);
+	if (info == NULL)
+		return -ENOMEM;
+
 	port_type = (unsigned long)id->data;
 	ret = of_platform_serial_setup(ofdev, port_type, &port);
 	if (ret)
 		goto out;
 
 	switch (port_type) {
-	case PORT_UNKNOWN:
-		dev_info(&ofdev->dev, "Unknown serial port found, "
-			"attempting to use 8250 driver\n");
-		/* fallthrough */
 	case PORT_8250 ... PORT_MAX_8250:
 		ret = serial8250_register_port(&port);
 		break;
 	default:
 		/* need to add code for these */
+	case PORT_UNKNOWN:
+		dev_info(&ofdev->dev, "Unknown serial port found, ignored\n");
 		ret = -ENODEV;
 		break;
 	}
 	if (ret < 0)
 		goto out;
 
-	ofdev->dev.driver_data = (void *)(unsigned long)ret;
+	info->type = port_type;
+	info->line = ret;
+	ofdev->dev.driver_data = info;
 	return 0;
 out:
+	kfree(info);
 	irq_dispose_mapping(port.irq);
 	return ret;
 }
@@ -102,8 +113,16 @@
  */
 static int of_platform_serial_remove(struct of_device *ofdev)
 {
-	int line = (unsigned long)ofdev->dev.driver_data;
-	serial8250_unregister_port(line);
+	struct of_serial_info *info = ofdev->dev.driver_data;
+	switch (info->type) {
+	case PORT_8250 ... PORT_MAX_8250:
+		serial8250_unregister_port(info->line);
+		break;
+	default:
+		/* need to add code for these */
+		break;
+	}
+	kfree(info);
 	return 0;
 }
 
diff --git a/drivers/serial/sb1250-duart.c b/drivers/serial/sb1250-duart.c
new file mode 100644
index 0000000..1d9d728
--- /dev/null
+++ b/drivers/serial/sb1250-duart.c
@@ -0,0 +1,972 @@
+/*
+ *	drivers/serial/sb1250-duart.c
+ *
+ *	Support for the asynchronous serial interface (DUART) included
+ *	in the BCM1250 and derived System-On-a-Chip (SOC) devices.
+ *
+ *	Copyright (c) 2007  Maciej W. Rozycki
+ *
+ *	Derived from drivers/char/sb1250_duart.c for which the following
+ *	copyright applies:
+ *
+ *	Copyright (c) 2000, 2001, 2002, 2003, 2004  Broadcom Corporation
+ *
+ *	This program is free software; you can redistribute it and/or
+ *	modify it under the terms of the GNU General Public License
+ *	as published by the Free Software Foundation; either version
+ *	2 of the License, or (at your option) any later version.
+ *
+ *	References:
+ *
+ *	"BCM1250/BCM1125/BCM1125H User Manual", Broadcom Corporation
+ */
+
+#if defined(CONFIG_SERIAL_SB1250_DUART_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
+#define SUPPORT_SYSRQ
+#endif
+
+#include <linux/console.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/kernel.h>
+#include <linux/major.h>
+#include <linux/serial.h>
+#include <linux/serial_core.h>
+#include <linux/spinlock.h>
+#include <linux/sysrq.h>
+#include <linux/tty.h>
+#include <linux/types.h>
+
+#include <asm/atomic.h>
+#include <asm/io.h>
+#include <asm/war.h>
+
+#include <asm/sibyte/sb1250.h>
+#include <asm/sibyte/sb1250_uart.h>
+#include <asm/sibyte/swarm.h>
+
+
+#if defined(CONFIG_SIBYTE_BCM1x55) || defined(CONFIG_SIBYTE_BCM1x80)
+#include <asm/sibyte/bcm1480_regs.h>
+#include <asm/sibyte/bcm1480_int.h>
+
+#define SBD_CHANREGS(line)	A_BCM1480_DUART_CHANREG((line), 0)
+#define SBD_CTRLREGS(line)	A_BCM1480_DUART_CTRLREG((line), 0)
+#define SBD_INT(line)		(K_BCM1480_INT_UART_0 + (line))
+
+#elif defined(CONFIG_SIBYTE_SB1250) || defined(CONFIG_SIBYTE_BCM112X)
+#include <asm/sibyte/sb1250_regs.h>
+#include <asm/sibyte/sb1250_int.h>
+
+#define SBD_CHANREGS(line)	A_DUART_CHANREG((line), 0)
+#define SBD_CTRLREGS(line)	A_DUART_CTRLREG(0)
+#define SBD_INT(line)		(K_INT_UART_0 + (line))
+
+#else
+#error invalid SB1250 UART configuration
+
+#endif
+
+
+MODULE_AUTHOR("Maciej W. Rozycki <macro@linux-mips.org>");
+MODULE_DESCRIPTION("BCM1xxx on-chip DUART serial driver");
+MODULE_LICENSE("GPL");
+
+
+#define DUART_MAX_CHIP 2
+#define DUART_MAX_SIDE 2
+
+/*
+ * Per-port state.
+ */
+struct sbd_port {
+	struct sbd_duart	*duart;
+	struct uart_port	port;
+	unsigned char __iomem	*memctrl;
+	int			tx_stopped;
+	int			initialised;
+};
+
+/*
+ * Per-DUART state for the shared register space.
+ */
+struct sbd_duart {
+	struct sbd_port		sport[2];
+	unsigned long		mapctrl;
+	atomic_t		map_guard;
+};
+
+#define to_sport(uport) container_of(uport, struct sbd_port, port)
+
+static struct sbd_duart sbd_duarts[DUART_MAX_CHIP];
+
+#define __unused __attribute__((__unused__))
+
+
+/*
+ * Reading and writing SB1250 DUART registers.
+ *
+ * There are three register spaces: two per-channel ones and
+ * a shared one.  We have to define accessors appropriately.
+ * All registers are 64-bit and all but the Baud Rate Clock
+ * registers only define 8 least significant bits.  There is
+ * also a workaround to take into account.  Raw accessors use
+ * the full register width, but cooked ones truncate it
+ * intentionally so that the rest of the driver does not care.
+ */
+static u64 __read_sbdchn(struct sbd_port *sport, int reg)
+{
+	void __iomem *csr = sport->port.membase + reg;
+
+	return __raw_readq(csr);
+}
+
+static u64 __read_sbdshr(struct sbd_port *sport, int reg)
+{
+	void __iomem *csr = sport->memctrl + reg;
+
+	return __raw_readq(csr);
+}
+
+static void __write_sbdchn(struct sbd_port *sport, int reg, u64 value)
+{
+	void __iomem *csr = sport->port.membase + reg;
+
+	__raw_writeq(value, csr);
+}
+
+static void __write_sbdshr(struct sbd_port *sport, int reg, u64 value)
+{
+	void __iomem *csr = sport->memctrl + reg;
+
+	__raw_writeq(value, csr);
+}
+
+/*
+ * In bug 1956, we get glitches that can mess up uart registers.  This
+ * "read-mode-reg after any register access" is an accepted workaround.
+ */
+static void __war_sbd1956(struct sbd_port *sport)
+{
+	__read_sbdchn(sport, R_DUART_MODE_REG_1);
+	__read_sbdchn(sport, R_DUART_MODE_REG_2);
+}
+
+static unsigned char read_sbdchn(struct sbd_port *sport, int reg)
+{
+	unsigned char retval;
+
+	retval = __read_sbdchn(sport, reg);
+	if (SIBYTE_1956_WAR)
+		__war_sbd1956(sport);
+	return retval;
+}
+
+static unsigned char read_sbdshr(struct sbd_port *sport, int reg)
+{
+	unsigned char retval;
+
+	retval = __read_sbdshr(sport, reg);
+	if (SIBYTE_1956_WAR)
+		__war_sbd1956(sport);
+	return retval;
+}
+
+static void write_sbdchn(struct sbd_port *sport, int reg, unsigned int value)
+{
+	__write_sbdchn(sport, reg, value);
+	if (SIBYTE_1956_WAR)
+		__war_sbd1956(sport);
+}
+
+static void write_sbdshr(struct sbd_port *sport, int reg, unsigned int value)
+{
+	__write_sbdshr(sport, reg, value);
+	if (SIBYTE_1956_WAR)
+		__war_sbd1956(sport);
+}
+
+
+static int sbd_receive_ready(struct sbd_port *sport)
+{
+	return read_sbdchn(sport, R_DUART_STATUS) & M_DUART_RX_RDY;
+}
+
+static int sbd_receive_drain(struct sbd_port *sport)
+{
+	int loops = 10000;
+
+	while (sbd_receive_ready(sport) && loops--)
+		read_sbdchn(sport, R_DUART_RX_HOLD);
+	return loops;
+}
+
+static int __unused sbd_transmit_ready(struct sbd_port *sport)
+{
+	return read_sbdchn(sport, R_DUART_STATUS) & M_DUART_TX_RDY;
+}
+
+static int __unused sbd_transmit_drain(struct sbd_port *sport)
+{
+	int loops = 10000;
+
+	while (!sbd_transmit_ready(sport) && loops--)
+		udelay(2);
+	return loops;
+}
+
+static int sbd_transmit_empty(struct sbd_port *sport)
+{
+	return read_sbdchn(sport, R_DUART_STATUS) & M_DUART_TX_EMT;
+}
+
+static int sbd_line_drain(struct sbd_port *sport)
+{
+	int loops = 10000;
+
+	while (!sbd_transmit_empty(sport) && loops--)
+		udelay(2);
+	return loops;
+}
+
+
+static unsigned int sbd_tx_empty(struct uart_port *uport)
+{
+	struct sbd_port *sport = to_sport(uport);
+
+	return sbd_transmit_empty(sport) ? TIOCSER_TEMT : 0;
+}
+
+static unsigned int sbd_get_mctrl(struct uart_port *uport)
+{
+	struct sbd_port *sport = to_sport(uport);
+	unsigned int mctrl, status;
+
+	status = read_sbdshr(sport, R_DUART_IN_PORT);
+	status >>= (uport->line) % 2;
+	mctrl = (!(status & M_DUART_IN_PIN0_VAL) ? TIOCM_CTS : 0) |
+		(!(status & M_DUART_IN_PIN4_VAL) ? TIOCM_CAR : 0) |
+		(!(status & M_DUART_RIN0_PIN) ? TIOCM_RNG : 0) |
+		(!(status & M_DUART_IN_PIN2_VAL) ? TIOCM_DSR : 0);
+	return mctrl;
+}
+
+static void sbd_set_mctrl(struct uart_port *uport, unsigned int mctrl)
+{
+	struct sbd_port *sport = to_sport(uport);
+	unsigned int clr = 0, set = 0, mode2;
+
+	if (mctrl & TIOCM_DTR)
+		set |= M_DUART_SET_OPR2;
+	else
+		clr |= M_DUART_CLR_OPR2;
+	if (mctrl & TIOCM_RTS)
+		set |= M_DUART_SET_OPR0;
+	else
+		clr |= M_DUART_CLR_OPR0;
+	clr <<= (uport->line) % 2;
+	set <<= (uport->line) % 2;
+
+	mode2 = read_sbdchn(sport, R_DUART_MODE_REG_2);
+	mode2 &= ~M_DUART_CHAN_MODE;
+	if (mctrl & TIOCM_LOOP)
+		mode2 |= V_DUART_CHAN_MODE_LCL_LOOP;
+	else
+		mode2 |= V_DUART_CHAN_MODE_NORMAL;
+
+	write_sbdshr(sport, R_DUART_CLEAR_OPR, clr);
+	write_sbdshr(sport, R_DUART_SET_OPR, set);
+	write_sbdchn(sport, R_DUART_MODE_REG_2, mode2);
+}
+
+static void sbd_stop_tx(struct uart_port *uport)
+{
+	struct sbd_port *sport = to_sport(uport);
+
+	write_sbdchn(sport, R_DUART_CMD, M_DUART_TX_DIS);
+	sport->tx_stopped = 1;
+};
+
+static void sbd_start_tx(struct uart_port *uport)
+{
+	struct sbd_port *sport = to_sport(uport);
+	unsigned int mask;
+
+	/* Enable tx interrupts.  */
+	mask = read_sbdshr(sport, R_DUART_IMRREG((uport->line) % 2));
+	mask |= M_DUART_IMR_TX;
+	write_sbdshr(sport, R_DUART_IMRREG((uport->line) % 2), mask);
+
+	/* Go!, go!, go!...  */
+	write_sbdchn(sport, R_DUART_CMD, M_DUART_TX_EN);
+	sport->tx_stopped = 0;
+};
+
+static void sbd_stop_rx(struct uart_port *uport)
+{
+	struct sbd_port *sport = to_sport(uport);
+
+	write_sbdshr(sport, R_DUART_IMRREG((uport->line) % 2), 0);
+};
+
+static void sbd_enable_ms(struct uart_port *uport)
+{
+	struct sbd_port *sport = to_sport(uport);
+
+	write_sbdchn(sport, R_DUART_AUXCTL_X,
+		     M_DUART_CIN_CHNG_ENA | M_DUART_CTS_CHNG_ENA);
+}
+
+static void sbd_break_ctl(struct uart_port *uport, int break_state)
+{
+	struct sbd_port *sport = to_sport(uport);
+
+	if (break_state == -1)
+		write_sbdchn(sport, R_DUART_CMD, V_DUART_MISC_CMD_START_BREAK);
+	else
+		write_sbdchn(sport, R_DUART_CMD, V_DUART_MISC_CMD_STOP_BREAK);
+}
+
+
+static void sbd_receive_chars(struct sbd_port *sport)
+{
+	struct uart_port *uport = &sport->port;
+	struct uart_icount *icount;
+	unsigned int status, ch, flag;
+	int count;
+
+	for (count = 16; count; count--) {
+		status = read_sbdchn(sport, R_DUART_STATUS);
+		if (!(status & M_DUART_RX_RDY))
+			break;
+
+		ch = read_sbdchn(sport, R_DUART_RX_HOLD);
+
+		flag = TTY_NORMAL;
+
+		icount = &uport->icount;
+		icount->rx++;
+
+		if (unlikely(status &
+			     (M_DUART_RCVD_BRK | M_DUART_FRM_ERR |
+			      M_DUART_PARITY_ERR | M_DUART_OVRUN_ERR))) {
+			if (status & M_DUART_RCVD_BRK) {
+				icount->brk++;
+				if (uart_handle_break(uport))
+					continue;
+			} else if (status & M_DUART_FRM_ERR)
+				icount->frame++;
+			else if (status & M_DUART_PARITY_ERR)
+				icount->parity++;
+			if (status & M_DUART_OVRUN_ERR)
+				icount->overrun++;
+
+			status &= uport->read_status_mask;
+			if (status & M_DUART_RCVD_BRK)
+				flag = TTY_BREAK;
+			else if (status & M_DUART_FRM_ERR)
+				flag = TTY_FRAME;
+			else if (status & M_DUART_PARITY_ERR)
+				flag = TTY_PARITY;
+		}
+
+		if (uart_handle_sysrq_char(uport, ch))
+			continue;
+
+		uart_insert_char(uport, status, M_DUART_OVRUN_ERR, ch, flag);
+	}
+
+	tty_flip_buffer_push(uport->info->tty);
+}
+
+static void sbd_transmit_chars(struct sbd_port *sport)
+{
+	struct uart_port *uport = &sport->port;
+	struct circ_buf *xmit = &sport->port.info->xmit;
+	unsigned int mask;
+	int stop_tx;
+
+	/* XON/XOFF chars.  */
+	if (sport->port.x_char) {
+		write_sbdchn(sport, R_DUART_TX_HOLD, sport->port.x_char);
+		sport->port.icount.tx++;
+		sport->port.x_char = 0;
+		return;
+	}
+
+	/* If nothing to do or stopped or hardware stopped.  */
+	stop_tx = (uart_circ_empty(xmit) || uart_tx_stopped(&sport->port));
+
+	/* Send char.  */
+	if (!stop_tx) {
+		write_sbdchn(sport, R_DUART_TX_HOLD, xmit->buf[xmit->tail]);
+		xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
+		sport->port.icount.tx++;
+
+		if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+			uart_write_wakeup(&sport->port);
+	}
+
+	/* Are we are done?  */
+	if (stop_tx || uart_circ_empty(xmit)) {
+		/* Disable tx interrupts.  */
+		mask = read_sbdshr(sport, R_DUART_IMRREG((uport->line) % 2));
+		mask &= ~M_DUART_IMR_TX;
+		write_sbdshr(sport, R_DUART_IMRREG((uport->line) % 2), mask);
+	}
+}
+
+static void sbd_status_handle(struct sbd_port *sport)
+{
+	struct uart_port *uport = &sport->port;
+	unsigned int delta;
+
+	delta = read_sbdshr(sport, R_DUART_INCHREG((uport->line) % 2));
+	delta >>= (uport->line) % 2;
+
+	if (delta & (M_DUART_IN_PIN0_VAL << S_DUART_IN_PIN_CHNG))
+		uart_handle_cts_change(uport, !(delta & M_DUART_IN_PIN0_VAL));
+
+	if (delta & (M_DUART_IN_PIN2_VAL << S_DUART_IN_PIN_CHNG))
+		uport->icount.dsr++;
+
+	if (delta & ((M_DUART_IN_PIN2_VAL | M_DUART_IN_PIN0_VAL) <<
+		     S_DUART_IN_PIN_CHNG))
+		wake_up_interruptible(&uport->info->delta_msr_wait);
+}
+
+static irqreturn_t sbd_interrupt(int irq, void *dev_id)
+{
+	struct sbd_port *sport = dev_id;
+	struct uart_port *uport = &sport->port;
+	irqreturn_t status = IRQ_NONE;
+	unsigned int intstat;
+	int count;
+
+	for (count = 16; count; count--) {
+		intstat = read_sbdshr(sport,
+				      R_DUART_ISRREG((uport->line) % 2));
+		intstat &= read_sbdshr(sport,
+				       R_DUART_IMRREG((uport->line) % 2));
+		intstat &= M_DUART_ISR_ALL;
+		if (!intstat)
+			break;
+
+		if (intstat & M_DUART_ISR_RX)
+			sbd_receive_chars(sport);
+		if (intstat & M_DUART_ISR_IN)
+			sbd_status_handle(sport);
+		if (intstat & M_DUART_ISR_TX)
+			sbd_transmit_chars(sport);
+
+		status = IRQ_HANDLED;
+	}
+
+	return status;
+}
+
+
+static int sbd_startup(struct uart_port *uport)
+{
+	struct sbd_port *sport = to_sport(uport);
+	unsigned int mode1;
+	int ret;
+
+	ret = request_irq(sport->port.irq, sbd_interrupt,
+			  IRQF_SHARED, "sb1250-duart", sport);
+	if (ret)
+		return ret;
+
+	/* Clear the receive FIFO.  */
+	sbd_receive_drain(sport);
+
+	/* Clear the interrupt registers.  */
+	write_sbdchn(sport, R_DUART_CMD, V_DUART_MISC_CMD_RESET_BREAK_INT);
+	read_sbdshr(sport, R_DUART_INCHREG((uport->line) % 2));
+
+	/* Set rx/tx interrupt to FIFO available.  */
+	mode1 = read_sbdchn(sport, R_DUART_MODE_REG_1);
+	mode1 &= ~(M_DUART_RX_IRQ_SEL_RXFULL | M_DUART_TX_IRQ_SEL_TXEMPT);
+	write_sbdchn(sport, R_DUART_MODE_REG_1, mode1);
+
+	/* Disable tx, enable rx.  */
+	write_sbdchn(sport, R_DUART_CMD, M_DUART_TX_DIS | M_DUART_RX_EN);
+	sport->tx_stopped = 1;
+
+	/* Enable interrupts.  */
+	write_sbdshr(sport, R_DUART_IMRREG((uport->line) % 2),
+		     M_DUART_IMR_IN | M_DUART_IMR_RX);
+
+	return 0;
+}
+
+static void sbd_shutdown(struct uart_port *uport)
+{
+	struct sbd_port *sport = to_sport(uport);
+
+	write_sbdchn(sport, R_DUART_CMD, M_DUART_TX_DIS | M_DUART_RX_DIS);
+	sport->tx_stopped = 1;
+	free_irq(sport->port.irq, sport);
+}
+
+
+static void sbd_init_port(struct sbd_port *sport)
+{
+	struct uart_port *uport = &sport->port;
+
+	if (sport->initialised)
+		return;
+
+	/* There is no DUART reset feature, so just set some sane defaults.  */
+	write_sbdchn(sport, R_DUART_CMD, V_DUART_MISC_CMD_RESET_TX);
+	write_sbdchn(sport, R_DUART_CMD, V_DUART_MISC_CMD_RESET_RX);
+	write_sbdchn(sport, R_DUART_MODE_REG_1, V_DUART_BITS_PER_CHAR_8);
+	write_sbdchn(sport, R_DUART_MODE_REG_2, 0);
+	write_sbdchn(sport, R_DUART_FULL_CTL,
+		     V_DUART_INT_TIME(0) | V_DUART_SIG_FULL(15));
+	write_sbdchn(sport, R_DUART_OPCR_X, 0);
+	write_sbdchn(sport, R_DUART_AUXCTL_X, 0);
+	write_sbdshr(sport, R_DUART_IMRREG((uport->line) % 2), 0);
+
+	sport->initialised = 1;
+}
+
+static void sbd_set_termios(struct uart_port *uport, struct ktermios *termios,
+			    struct ktermios *old_termios)
+{
+	struct sbd_port *sport = to_sport(uport);
+	unsigned int mode1 = 0, mode2 = 0, aux = 0;
+	unsigned int mode1mask = 0, mode2mask = 0, auxmask = 0;
+	unsigned int oldmode1, oldmode2, oldaux;
+	unsigned int baud, brg;
+	unsigned int command;
+
+	mode1mask |= ~(M_DUART_PARITY_MODE | M_DUART_PARITY_TYPE_ODD |
+		       M_DUART_BITS_PER_CHAR);
+	mode2mask |= ~M_DUART_STOP_BIT_LEN_2;
+	auxmask |= ~M_DUART_CTS_CHNG_ENA;
+
+	/* Byte size.  */
+	switch (termios->c_cflag & CSIZE) {
+	case CS5:
+	case CS6:
+		/* Unsupported, leave unchanged.  */
+		mode1mask |= M_DUART_PARITY_MODE;
+		break;
+	case CS7:
+		mode1 |= V_DUART_BITS_PER_CHAR_7;
+		break;
+	case CS8:
+	default:
+		mode1 |= V_DUART_BITS_PER_CHAR_8;
+		break;
+	}
+
+	/* Parity and stop bits.  */
+	if (termios->c_cflag & CSTOPB)
+		mode2 |= M_DUART_STOP_BIT_LEN_2;
+	else
+		mode2 |= M_DUART_STOP_BIT_LEN_1;
+	if (termios->c_cflag & PARENB)
+		mode1 |= V_DUART_PARITY_MODE_ADD;
+	else
+		mode1 |= V_DUART_PARITY_MODE_NONE;
+	if (termios->c_cflag & PARODD)
+		mode1 |= M_DUART_PARITY_TYPE_ODD;
+	else
+		mode1 |= M_DUART_PARITY_TYPE_EVEN;
+
+	baud = uart_get_baud_rate(uport, termios, old_termios, 1200, 5000000);
+	brg = V_DUART_BAUD_RATE(baud);
+	/* The actual lower bound is 1221bps, so compensate.  */
+	if (brg > M_DUART_CLK_COUNTER)
+		brg = M_DUART_CLK_COUNTER;
+
+	uart_update_timeout(uport, termios->c_cflag, baud);
+
+	uport->read_status_mask = M_DUART_OVRUN_ERR;
+	if (termios->c_iflag & INPCK)
+		uport->read_status_mask |= M_DUART_FRM_ERR |
+					   M_DUART_PARITY_ERR;
+	if (termios->c_iflag & (BRKINT | PARMRK))
+		uport->read_status_mask |= M_DUART_RCVD_BRK;
+
+	uport->ignore_status_mask = 0;
+	if (termios->c_iflag & IGNPAR)
+		uport->ignore_status_mask |= M_DUART_FRM_ERR |
+					     M_DUART_PARITY_ERR;
+	if (termios->c_iflag & IGNBRK) {
+		uport->ignore_status_mask |= M_DUART_RCVD_BRK;
+		if (termios->c_iflag & IGNPAR)
+			uport->ignore_status_mask |= M_DUART_OVRUN_ERR;
+	}
+
+	if (termios->c_cflag & CREAD)
+		command = M_DUART_RX_EN;
+	else
+		command = M_DUART_RX_DIS;
+
+	if (termios->c_cflag & CRTSCTS)
+		aux |= M_DUART_CTS_CHNG_ENA;
+	else
+		aux &= ~M_DUART_CTS_CHNG_ENA;
+
+	spin_lock(&uport->lock);
+
+	if (sport->tx_stopped)
+		command |= M_DUART_TX_DIS;
+	else
+		command |= M_DUART_TX_EN;
+
+	oldmode1 = read_sbdchn(sport, R_DUART_MODE_REG_1) & mode1mask;
+	oldmode2 = read_sbdchn(sport, R_DUART_MODE_REG_2) & mode2mask;
+	oldaux = read_sbdchn(sport, R_DUART_AUXCTL_X) & auxmask;
+
+	if (!sport->tx_stopped)
+		sbd_line_drain(sport);
+	write_sbdchn(sport, R_DUART_CMD, M_DUART_TX_DIS | M_DUART_RX_DIS);
+
+	write_sbdchn(sport, R_DUART_MODE_REG_1, mode1 | oldmode1);
+	write_sbdchn(sport, R_DUART_MODE_REG_2, mode2 | oldmode2);
+	write_sbdchn(sport, R_DUART_CLK_SEL, brg);
+	write_sbdchn(sport, R_DUART_AUXCTL_X, aux | oldaux);
+
+	write_sbdchn(sport, R_DUART_CMD, command);
+
+	spin_unlock(&uport->lock);
+}
+
+
+static const char *sbd_type(struct uart_port *uport)
+{
+	return "SB1250 DUART";
+}
+
+static void sbd_release_port(struct uart_port *uport)
+{
+	struct sbd_port *sport = to_sport(uport);
+	struct sbd_duart *duart = sport->duart;
+	int map_guard;
+
+	iounmap(sport->memctrl);
+	sport->memctrl = NULL;
+	iounmap(uport->membase);
+	uport->membase = NULL;
+
+	map_guard = atomic_add_return(-1, &duart->map_guard);
+	if (!map_guard)
+		release_mem_region(duart->mapctrl, DUART_CHANREG_SPACING);
+	release_mem_region(uport->mapbase, DUART_CHANREG_SPACING);
+}
+
+static int sbd_map_port(struct uart_port *uport)
+{
+	static const char *err = KERN_ERR "sbd: Cannot map MMIO\n";
+	struct sbd_port *sport = to_sport(uport);
+	struct sbd_duart *duart = sport->duart;
+
+	if (!uport->membase)
+		uport->membase = ioremap_nocache(uport->mapbase,
+						 DUART_CHANREG_SPACING);
+	if (!uport->membase) {
+		printk(err);
+		return -ENOMEM;
+	}
+
+	if (!sport->memctrl)
+		sport->memctrl = ioremap_nocache(duart->mapctrl,
+						 DUART_CHANREG_SPACING);
+	if (!sport->memctrl) {
+		printk(err);
+		iounmap(uport->membase);
+		uport->membase = NULL;
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+
+static int sbd_request_port(struct uart_port *uport)
+{
+	static const char *err = KERN_ERR
+				 "sbd: Unable to reserve MMIO resource\n";
+	struct sbd_duart *duart = to_sport(uport)->duart;
+	int map_guard;
+	int ret = 0;
+
+	if (!request_mem_region(uport->mapbase, DUART_CHANREG_SPACING,
+				"sb1250-duart")) {
+		printk(err);
+		return -EBUSY;
+	}
+	map_guard = atomic_add_return(1, &duart->map_guard);
+	if (map_guard == 1) {
+		if (!request_mem_region(duart->mapctrl, DUART_CHANREG_SPACING,
+					"sb1250-duart")) {
+			atomic_add(-1, &duart->map_guard);
+			printk(err);
+			ret = -EBUSY;
+		}
+	}
+	if (!ret) {
+		ret = sbd_map_port(uport);
+		if (ret) {
+			map_guard = atomic_add_return(-1, &duart->map_guard);
+			if (!map_guard)
+				release_mem_region(duart->mapctrl,
+						   DUART_CHANREG_SPACING);
+		}
+	}
+	if (ret) {
+		release_mem_region(uport->mapbase, DUART_CHANREG_SPACING);
+		return ret;
+	}
+	return 0;
+}
+
+static void sbd_config_port(struct uart_port *uport, int flags)
+{
+	struct sbd_port *sport = to_sport(uport);
+
+	if (flags & UART_CONFIG_TYPE) {
+		if (sbd_request_port(uport))
+			return;
+
+		uport->type = PORT_SB1250_DUART;
+
+		sbd_init_port(sport);
+	}
+}
+
+static int sbd_verify_port(struct uart_port *uport, struct serial_struct *ser)
+{
+	int ret = 0;
+
+	if (ser->type != PORT_UNKNOWN && ser->type != PORT_SB1250_DUART)
+		ret = -EINVAL;
+	if (ser->irq != uport->irq)
+		ret = -EINVAL;
+	if (ser->baud_base != uport->uartclk / 16)
+		ret = -EINVAL;
+	return ret;
+}
+
+
+static struct uart_ops sbd_ops = {
+	.tx_empty	= sbd_tx_empty,
+	.set_mctrl	= sbd_set_mctrl,
+	.get_mctrl	= sbd_get_mctrl,
+	.stop_tx	= sbd_stop_tx,
+	.start_tx	= sbd_start_tx,
+	.stop_rx	= sbd_stop_rx,
+	.enable_ms	= sbd_enable_ms,
+	.break_ctl	= sbd_break_ctl,
+	.startup	= sbd_startup,
+	.shutdown	= sbd_shutdown,
+	.set_termios	= sbd_set_termios,
+	.type		= sbd_type,
+	.release_port	= sbd_release_port,
+	.request_port	= sbd_request_port,
+	.config_port	= sbd_config_port,
+	.verify_port	= sbd_verify_port,
+};
+
+/* Initialize SB1250 DUART port structures.  */
+static void __init sbd_probe_duarts(void)
+{
+	static int probed;
+	int chip, side;
+	int max_lines, line;
+
+	if (probed)
+		return;
+
+	/* Set the number of available units based on the SOC type.  */
+	switch (soc_type) {
+	case K_SYS_SOC_TYPE_BCM1x55:
+	case K_SYS_SOC_TYPE_BCM1x80:
+		max_lines = 4;
+		break;
+	default:
+		/* Assume at least two serial ports at the normal address.  */
+		max_lines = 2;
+		break;
+	}
+
+	probed = 1;
+
+	for (chip = 0, line = 0; chip < DUART_MAX_CHIP && line < max_lines;
+	     chip++) {
+		sbd_duarts[chip].mapctrl = SBD_CTRLREGS(line);
+
+		for (side = 0; side < DUART_MAX_SIDE && line < max_lines;
+		     side++, line++) {
+			struct sbd_port *sport = &sbd_duarts[chip].sport[side];
+			struct uart_port *uport = &sport->port;
+
+			sport->duart	= &sbd_duarts[chip];
+
+			uport->irq	= SBD_INT(line);
+			uport->uartclk	= 100000000 / 20 * 16;
+			uport->fifosize	= 16;
+			uport->iotype	= UPIO_MEM;
+			uport->flags	= UPF_BOOT_AUTOCONF;
+			uport->ops	= &sbd_ops;
+			uport->line	= line;
+			uport->mapbase	= SBD_CHANREGS(line);
+		}
+	}
+}
+
+
+#ifdef CONFIG_SERIAL_SB1250_DUART_CONSOLE
+/*
+ * Serial console stuff.  Very basic, polling driver for doing serial
+ * console output.  The console_sem is held by the caller, so we
+ * shouldn't be interrupted for more console activity.
+ */
+static void sbd_console_putchar(struct uart_port *uport, int ch)
+{
+	struct sbd_port *sport = to_sport(uport);
+
+	sbd_transmit_drain(sport);
+	write_sbdchn(sport, R_DUART_TX_HOLD, ch);
+}
+
+static void sbd_console_write(struct console *co, const char *s,
+			      unsigned int count)
+{
+	int chip = co->index / DUART_MAX_SIDE;
+	int side = co->index % DUART_MAX_SIDE;
+	struct sbd_port *sport = &sbd_duarts[chip].sport[side];
+	struct uart_port *uport = &sport->port;
+	unsigned long flags;
+	unsigned int mask;
+
+	/* Disable transmit interrupts and enable the transmitter. */
+	spin_lock_irqsave(&uport->lock, flags);
+	mask = read_sbdshr(sport, R_DUART_IMRREG((uport->line) % 2));
+	write_sbdshr(sport, R_DUART_IMRREG((uport->line) % 2),
+		     mask & ~M_DUART_IMR_TX);
+	write_sbdchn(sport, R_DUART_CMD, M_DUART_TX_EN);
+	spin_unlock_irqrestore(&uport->lock, flags);
+
+	uart_console_write(&sport->port, s, count, sbd_console_putchar);
+
+	/* Restore transmit interrupts and the transmitter enable. */
+	spin_lock_irqsave(&uport->lock, flags);
+	sbd_line_drain(sport);
+	if (sport->tx_stopped)
+		write_sbdchn(sport, R_DUART_CMD, M_DUART_TX_DIS);
+	write_sbdshr(sport, R_DUART_IMRREG((uport->line) % 2), mask);
+	spin_unlock_irqrestore(&uport->lock, flags);
+}
+
+static int __init sbd_console_setup(struct console *co, char *options)
+{
+	int chip = co->index / DUART_MAX_SIDE;
+	int side = co->index % DUART_MAX_SIDE;
+	struct sbd_port *sport = &sbd_duarts[chip].sport[side];
+	struct uart_port *uport = &sport->port;
+	int baud = 115200;
+	int bits = 8;
+	int parity = 'n';
+	int flow = 'n';
+	int ret;
+
+	if (!sport->duart)
+		return -ENXIO;
+
+	ret = sbd_map_port(uport);
+	if (ret)
+		return ret;
+
+	sbd_init_port(sport);
+
+	if (options)
+		uart_parse_options(options, &baud, &parity, &bits, &flow);
+	return uart_set_options(uport, co, baud, parity, bits, flow);
+}
+
+static struct uart_driver sbd_reg;
+static struct console sbd_console = {
+	.name	= "duart",
+	.write	= sbd_console_write,
+	.device	= uart_console_device,
+	.setup	= sbd_console_setup,
+	.flags	= CON_PRINTBUFFER,
+	.index	= -1,
+	.data	= &sbd_reg
+};
+
+static int __init sbd_serial_console_init(void)
+{
+	sbd_probe_duarts();
+	register_console(&sbd_console);
+
+	return 0;
+}
+
+console_initcall(sbd_serial_console_init);
+
+#define SERIAL_SB1250_DUART_CONSOLE	&sbd_console
+#else
+#define SERIAL_SB1250_DUART_CONSOLE	NULL
+#endif /* CONFIG_SERIAL_SB1250_DUART_CONSOLE */
+
+
+static struct uart_driver sbd_reg = {
+	.owner		= THIS_MODULE,
+	.driver_name	= "serial",
+	.dev_name	= "duart",
+	.major		= TTY_MAJOR,
+	.minor		= SB1250_DUART_MINOR_BASE,
+	.nr		= DUART_MAX_CHIP * DUART_MAX_SIDE,
+	.cons		= SERIAL_SB1250_DUART_CONSOLE,
+};
+
+/* Set up the driver and register it.  */
+static int __init sbd_init(void)
+{
+	int i, ret;
+
+	sbd_probe_duarts();
+
+	ret = uart_register_driver(&sbd_reg);
+	if (ret)
+		return ret;
+
+	for (i = 0; i < DUART_MAX_CHIP * DUART_MAX_SIDE; i++) {
+		struct sbd_duart *duart = &sbd_duarts[i / DUART_MAX_SIDE];
+		struct sbd_port *sport = &duart->sport[i % DUART_MAX_SIDE];
+		struct uart_port *uport = &sport->port;
+
+		if (sport->duart)
+			uart_add_one_port(&sbd_reg, uport);
+	}
+
+	return 0;
+}
+
+/* Unload the driver.  Unregister stuff, get ready to go away.  */
+static void __exit sbd_exit(void)
+{
+	int i;
+
+	for (i = DUART_MAX_CHIP * DUART_MAX_SIDE - 1; i >= 0; i--) {
+		struct sbd_duart *duart = &sbd_duarts[i / DUART_MAX_SIDE];
+		struct sbd_port *sport = &duart->sport[i % DUART_MAX_SIDE];
+		struct uart_port *uport = &sport->port;
+
+		if (sport->duart)
+			uart_remove_one_port(&sbd_reg, uport);
+	}
+
+	uart_unregister_driver(&sbd_reg);
+}
+
+module_init(sbd_init);
+module_exit(sbd_exit);
diff --git a/drivers/serial/sunhv.c b/drivers/serial/sunhv.c
index 17bcca5..d82be42 100644
--- a/drivers/serial/sunhv.c
+++ b/drivers/serial/sunhv.c
@@ -258,17 +258,7 @@
 /* port->lock held by caller.  */
 static void sunhv_start_tx(struct uart_port *port)
 {
-	struct circ_buf *xmit = &port->info->xmit;
-
-	while (!uart_circ_empty(xmit)) {
-		long status = sun4v_con_putchar(xmit->buf[xmit->tail]);
-
-		if (status != HV_EOK)
-			break;
-
-		xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
-		port->icount.tx++;
-	}
+	transmit_chars(port);
 }
 
 /* port->lock is not held.  */
diff --git a/drivers/serial/zs.c b/drivers/serial/zs.c
new file mode 100644
index 0000000..65f1294
--- /dev/null
+++ b/drivers/serial/zs.c
@@ -0,0 +1,1287 @@
+/*
+ * zs.c: Serial port driver for IOASIC DECstations.
+ *
+ * Derived from drivers/sbus/char/sunserial.c by Paul Mackerras.
+ * Derived from drivers/macintosh/macserial.c by Harald Koerfgen.
+ *
+ * DECstation changes
+ * Copyright (C) 1998-2000 Harald Koerfgen
+ * Copyright (C) 2000, 2001, 2002, 2003, 2004, 2005, 2007  Maciej W. Rozycki
+ *
+ * For the rest of the code the original Copyright applies:
+ * Copyright (C) 1996 Paul Mackerras (Paul.Mackerras@cs.anu.edu.au)
+ * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
+ *
+ *
+ * Note: for IOASIC systems the wiring is as follows:
+ *
+ * mouse/keyboard:
+ * DIN-7 MJ-4  signal        SCC
+ * 2     1     TxD       <-  A.TxD
+ * 3     4     RxD       ->  A.RxD
+ *
+ * EIA-232/EIA-423:
+ * DB-25 MMJ-6 signal        SCC
+ * 2     2     TxD       <-  B.TxD
+ * 3     5     RxD       ->  B.RxD
+ * 4           RTS       <- ~A.RTS
+ * 5           CTS       -> ~B.CTS
+ * 6     6     DSR       -> ~A.SYNC
+ * 8           CD        -> ~B.DCD
+ * 12          DSRS(DCE) -> ~A.CTS  (*)
+ * 15          TxC       ->  B.TxC
+ * 17          RxC       ->  B.RxC
+ * 20    1     DTR       <- ~A.DTR
+ * 22          RI        -> ~A.DCD
+ * 23          DSRS(DTE) <- ~B.RTS
+ *
+ * (*) EIA-232 defines the signal at this pin to be SCD, while DSRS(DCE)
+ *     is shared with DSRS(DTE) at pin 23.
+ *
+ * As you can immediately notice the wiring of the RTS, DTR and DSR signals
+ * is a bit odd.  This makes the handling of port B unnecessarily
+ * complicated and prevents the use of some automatic modes of operation.
+ */
+
+#if defined(CONFIG_SERIAL_ZS_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
+#define SUPPORT_SYSRQ
+#endif
+
+#include <linux/bug.h>
+#include <linux/console.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/irqflags.h>
+#include <linux/kernel.h>
+#include <linux/major.h>
+#include <linux/serial.h>
+#include <linux/serial_core.h>
+#include <linux/spinlock.h>
+#include <linux/sysrq.h>
+#include <linux/tty.h>
+#include <linux/types.h>
+
+#include <asm/atomic.h>
+#include <asm/system.h>
+
+#include <asm/dec/interrupts.h>
+#include <asm/dec/ioasic_addrs.h>
+#include <asm/dec/system.h>
+
+#include "zs.h"
+
+
+MODULE_AUTHOR("Maciej W. Rozycki <macro@linux-mips.org>");
+MODULE_DESCRIPTION("DECstation Z85C30 serial driver");
+MODULE_LICENSE("GPL");
+
+
+static char zs_name[] __initdata = "DECstation Z85C30 serial driver version ";
+static char zs_version[] __initdata = "0.10";
+
+/*
+ * It would be nice to dynamically allocate everything that
+ * depends on ZS_NUM_SCCS, so we could support any number of
+ * Z85C30s, but for now...
+ */
+#define ZS_NUM_SCCS	2		/* Max # of ZS chips supported.  */
+#define ZS_NUM_CHAN	2		/* 2 channels per chip.  */
+#define ZS_CHAN_A	0		/* Index of the channel A.  */
+#define ZS_CHAN_B	1		/* Index of the channel B.  */
+#define ZS_CHAN_IO_SIZE 8		/* IOMEM space size.  */
+#define ZS_CHAN_IO_STRIDE 4		/* Register alignment.  */
+#define ZS_CHAN_IO_OFFSET 1		/* The SCC resides on the high byte
+					   of the 16-bit IOBUS.  */
+#define ZS_CLOCK        7372800 	/* Z85C30 PCLK input clock rate.  */
+
+#define to_zport(uport) container_of(uport, struct zs_port, port)
+
+struct zs_parms {
+	resource_size_t scc[ZS_NUM_SCCS];
+	int irq[ZS_NUM_SCCS];
+};
+
+static struct zs_scc zs_sccs[ZS_NUM_SCCS];
+
+static u8 zs_init_regs[ZS_NUM_REGS] __initdata = {
+	0,				/* write 0 */
+	PAR_SPEC,			/* write 1 */
+	0,				/* write 2 */
+	0,				/* write 3 */
+	X16CLK | SB1,			/* write 4 */
+	0,				/* write 5 */
+	0, 0, 0,			/* write 6, 7, 8 */
+	MIE | DLC | NV,			/* write 9 */
+	NRZ,				/* write 10 */
+	TCBR | RCBR,			/* write 11 */
+	0, 0,				/* BRG time constant, write 12 + 13 */
+	BRSRC | BRENABL,		/* write 14 */
+	0,				/* write 15 */
+};
+
+/*
+ * Debugging.
+ */
+#undef ZS_DEBUG_REGS
+
+
+/*
+ * Reading and writing Z85C30 registers.
+ */
+static void recovery_delay(void)
+{
+	udelay(2);
+}
+
+static u8 read_zsreg(struct zs_port *zport, int reg)
+{
+	void __iomem *control = zport->port.membase + ZS_CHAN_IO_OFFSET;
+	u8 retval;
+
+	if (reg != 0) {
+		writeb(reg & 0xf, control);
+		fast_iob();
+		recovery_delay();
+	}
+	retval = readb(control);
+	recovery_delay();
+	return retval;
+}
+
+static void write_zsreg(struct zs_port *zport, int reg, u8 value)
+{
+	void __iomem *control = zport->port.membase + ZS_CHAN_IO_OFFSET;
+
+	if (reg != 0) {
+		writeb(reg & 0xf, control);
+		fast_iob(); recovery_delay();
+	}
+	writeb(value, control);
+	fast_iob();
+	recovery_delay();
+	return;
+}
+
+static u8 read_zsdata(struct zs_port *zport)
+{
+	void __iomem *data = zport->port.membase +
+			     ZS_CHAN_IO_STRIDE + ZS_CHAN_IO_OFFSET;
+	u8 retval;
+
+	retval = readb(data);
+	recovery_delay();
+	return retval;
+}
+
+static void write_zsdata(struct zs_port *zport, u8 value)
+{
+	void __iomem *data = zport->port.membase +
+			     ZS_CHAN_IO_STRIDE + ZS_CHAN_IO_OFFSET;
+
+	writeb(value, data);
+	fast_iob();
+	recovery_delay();
+	return;
+}
+
+#ifdef ZS_DEBUG_REGS
+void zs_dump(void)
+{
+	struct zs_port *zport;
+	int i, j;
+
+	for (i = 0; i < ZS_NUM_SCCS * ZS_NUM_CHAN; i++) {
+		zport = &zs_sccs[i / ZS_NUM_CHAN].zport[i % ZS_NUM_CHAN];
+
+		if (!zport->scc)
+			continue;
+
+		for (j = 0; j < 16; j++)
+			printk("W%-2d = 0x%02x\t", j, zport->regs[j]);
+		printk("\n");
+		for (j = 0; j < 16; j++)
+			printk("R%-2d = 0x%02x\t", j, read_zsreg(zport, j));
+		printk("\n\n");
+	}
+}
+#endif
+
+
+static void zs_spin_lock_cond_irq(spinlock_t *lock, int irq)
+{
+	if (irq)
+		spin_lock_irq(lock);
+	else
+		spin_lock(lock);
+}
+
+static void zs_spin_unlock_cond_irq(spinlock_t *lock, int irq)
+{
+	if (irq)
+		spin_unlock_irq(lock);
+	else
+		spin_unlock(lock);
+}
+
+static int zs_receive_drain(struct zs_port *zport)
+{
+	int loops = 10000;
+
+	while ((read_zsreg(zport, R0) & Rx_CH_AV) && loops--)
+		read_zsdata(zport);
+	return loops;
+}
+
+static int zs_transmit_drain(struct zs_port *zport, int irq)
+{
+	struct zs_scc *scc = zport->scc;
+	int loops = 10000;
+
+	while (!(read_zsreg(zport, R0) & Tx_BUF_EMP) && loops--) {
+		zs_spin_unlock_cond_irq(&scc->zlock, irq);
+		udelay(2);
+		zs_spin_lock_cond_irq(&scc->zlock, irq);
+	}
+	return loops;
+}
+
+static int zs_line_drain(struct zs_port *zport, int irq)
+{
+	struct zs_scc *scc = zport->scc;
+	int loops = 10000;
+
+	while (!(read_zsreg(zport, R1) & ALL_SNT) && loops--) {
+		zs_spin_unlock_cond_irq(&scc->zlock, irq);
+		udelay(2);
+		zs_spin_lock_cond_irq(&scc->zlock, irq);
+	}
+	return loops;
+}
+
+
+static void load_zsregs(struct zs_port *zport, u8 *regs, int irq)
+{
+	/* Let the current transmission finish.  */
+	zs_line_drain(zport, irq);
+	/* Load 'em up.  */
+	write_zsreg(zport, R3, regs[3] & ~RxENABLE);
+	write_zsreg(zport, R5, regs[5] & ~TxENAB);
+	write_zsreg(zport, R4, regs[4]);
+	write_zsreg(zport, R9, regs[9]);
+	write_zsreg(zport, R1, regs[1]);
+	write_zsreg(zport, R2, regs[2]);
+	write_zsreg(zport, R10, regs[10]);
+	write_zsreg(zport, R14, regs[14] & ~BRENABL);
+	write_zsreg(zport, R11, regs[11]);
+	write_zsreg(zport, R12, regs[12]);
+	write_zsreg(zport, R13, regs[13]);
+	write_zsreg(zport, R14, regs[14]);
+	write_zsreg(zport, R15, regs[15]);
+	if (regs[3] & RxENABLE)
+		write_zsreg(zport, R3, regs[3]);
+	if (regs[5] & TxENAB)
+		write_zsreg(zport, R5, regs[5]);
+	return;
+}
+
+
+/*
+ * Status handling routines.
+ */
+
+/*
+ * zs_tx_empty() -- get the transmitter empty status
+ *
+ * Purpose: Let user call ioctl() to get info when the UART physically
+ * 	    is emptied.  On bus types like RS485, the transmitter must
+ * 	    release the bus after transmitting.  This must be done when
+ * 	    the transmit shift register is empty, not be done when the
+ * 	    transmit holding register is empty.  This functionality
+ * 	    allows an RS485 driver to be written in user space.
+ */
+static unsigned int zs_tx_empty(struct uart_port *uport)
+{
+	struct zs_port *zport = to_zport(uport);
+	struct zs_scc *scc = zport->scc;
+	unsigned long flags;
+	u8 status;
+
+	spin_lock_irqsave(&scc->zlock, flags);
+	status = read_zsreg(zport, R1);
+	spin_unlock_irqrestore(&scc->zlock, flags);
+
+	return status & ALL_SNT ? TIOCSER_TEMT : 0;
+}
+
+static unsigned int zs_raw_get_ab_mctrl(struct zs_port *zport_a,
+					struct zs_port *zport_b)
+{
+	u8 status_a, status_b;
+	unsigned int mctrl;
+
+	status_a = read_zsreg(zport_a, R0);
+	status_b = read_zsreg(zport_b, R0);
+
+	mctrl = ((status_b & CTS) ? TIOCM_CTS : 0) |
+		((status_b & DCD) ? TIOCM_CAR : 0) |
+		((status_a & DCD) ? TIOCM_RNG : 0) |
+		((status_a & SYNC_HUNT) ? TIOCM_DSR : 0);
+
+	return mctrl;
+}
+
+static unsigned int zs_raw_get_mctrl(struct zs_port *zport)
+{
+	struct zs_port *zport_a = &zport->scc->zport[ZS_CHAN_A];
+
+	return zport != zport_a ? zs_raw_get_ab_mctrl(zport_a, zport) : 0;
+}
+
+static unsigned int zs_raw_xor_mctrl(struct zs_port *zport)
+{
+	struct zs_port *zport_a = &zport->scc->zport[ZS_CHAN_A];
+	unsigned int mmask, mctrl, delta;
+	u8 mask_a, mask_b;
+
+	if (zport == zport_a)
+		return 0;
+
+	mask_a = zport_a->regs[15];
+	mask_b = zport->regs[15];
+
+	mmask = ((mask_b & CTSIE) ? TIOCM_CTS : 0) |
+		((mask_b & DCDIE) ? TIOCM_CAR : 0) |
+		((mask_a & DCDIE) ? TIOCM_RNG : 0) |
+		((mask_a & SYNCIE) ? TIOCM_DSR : 0);
+
+	mctrl = zport->mctrl;
+	if (mmask) {
+		mctrl &= ~mmask;
+		mctrl |= zs_raw_get_ab_mctrl(zport_a, zport) & mmask;
+	}
+
+	delta = mctrl ^ zport->mctrl;
+	if (delta)
+		zport->mctrl = mctrl;
+
+	return delta;
+}
+
+static unsigned int zs_get_mctrl(struct uart_port *uport)
+{
+	struct zs_port *zport = to_zport(uport);
+	struct zs_scc *scc = zport->scc;
+	unsigned int mctrl;
+
+	spin_lock(&scc->zlock);
+	mctrl = zs_raw_get_mctrl(zport);
+	spin_unlock(&scc->zlock);
+
+	return mctrl;
+}
+
+static void zs_set_mctrl(struct uart_port *uport, unsigned int mctrl)
+{
+	struct zs_port *zport = to_zport(uport);
+	struct zs_scc *scc = zport->scc;
+	struct zs_port *zport_a = &scc->zport[ZS_CHAN_A];
+	u8 oldloop, newloop;
+
+	spin_lock(&scc->zlock);
+	if (zport != zport_a) {
+		if (mctrl & TIOCM_DTR)
+			zport_a->regs[5] |= DTR;
+		else
+			zport_a->regs[5] &= ~DTR;
+		if (mctrl & TIOCM_RTS)
+			zport_a->regs[5] |= RTS;
+		else
+			zport_a->regs[5] &= ~RTS;
+		write_zsreg(zport_a, R5, zport_a->regs[5]);
+	}
+
+	/* Rarely modified, so don't poke at hardware unless necessary. */
+	oldloop = zport->regs[14];
+	newloop = oldloop;
+	if (mctrl & TIOCM_LOOP)
+		newloop |= LOOPBAK;
+	else
+		newloop &= ~LOOPBAK;
+	if (newloop != oldloop) {
+		zport->regs[14] = newloop;
+		write_zsreg(zport, R14, zport->regs[14]);
+	}
+	spin_unlock(&scc->zlock);
+}
+
+static void zs_raw_stop_tx(struct zs_port *zport)
+{
+	write_zsreg(zport, R0, RES_Tx_P);
+	zport->tx_stopped = 1;
+}
+
+static void zs_stop_tx(struct uart_port *uport)
+{
+	struct zs_port *zport = to_zport(uport);
+	struct zs_scc *scc = zport->scc;
+
+	spin_lock(&scc->zlock);
+	zs_raw_stop_tx(zport);
+	spin_unlock(&scc->zlock);
+}
+
+static void zs_raw_transmit_chars(struct zs_port *);
+
+static void zs_start_tx(struct uart_port *uport)
+{
+	struct zs_port *zport = to_zport(uport);
+	struct zs_scc *scc = zport->scc;
+
+	spin_lock(&scc->zlock);
+	if (zport->tx_stopped) {
+		zs_transmit_drain(zport, 0);
+		zport->tx_stopped = 0;
+		zs_raw_transmit_chars(zport);
+	}
+	spin_unlock(&scc->zlock);
+}
+
+static void zs_stop_rx(struct uart_port *uport)
+{
+	struct zs_port *zport = to_zport(uport);
+	struct zs_scc *scc = zport->scc;
+	struct zs_port *zport_a = &scc->zport[ZS_CHAN_A];
+
+	spin_lock(&scc->zlock);
+	zport->regs[15] &= ~BRKIE;
+	zport->regs[1] &= ~(RxINT_MASK | TxINT_ENAB);
+	zport->regs[1] |= RxINT_DISAB;
+
+	if (zport != zport_a) {
+		/* A-side DCD tracks RI and SYNC tracks DSR.  */
+		zport_a->regs[15] &= ~(DCDIE | SYNCIE);
+		write_zsreg(zport_a, R15, zport_a->regs[15]);
+		if (!(zport_a->regs[15] & BRKIE)) {
+			zport_a->regs[1] &= ~EXT_INT_ENAB;
+			write_zsreg(zport_a, R1, zport_a->regs[1]);
+		}
+
+		/* This-side DCD tracks DCD and CTS tracks CTS.  */
+		zport->regs[15] &= ~(DCDIE | CTSIE);
+		zport->regs[1] &= ~EXT_INT_ENAB;
+	} else {
+		/* DCD tracks RI and SYNC tracks DSR for the B side.  */
+		if (!(zport->regs[15] & (DCDIE | SYNCIE)))
+			zport->regs[1] &= ~EXT_INT_ENAB;
+	}
+
+	write_zsreg(zport, R15, zport->regs[15]);
+	write_zsreg(zport, R1, zport->regs[1]);
+	spin_unlock(&scc->zlock);
+}
+
+static void zs_enable_ms(struct uart_port *uport)
+{
+	struct zs_port *zport = to_zport(uport);
+	struct zs_scc *scc = zport->scc;
+	struct zs_port *zport_a = &scc->zport[ZS_CHAN_A];
+
+	if (zport == zport_a)
+		return;
+
+	spin_lock(&scc->zlock);
+
+	/* Clear Ext interrupts if not being handled already.  */
+	if (!(zport_a->regs[1] & EXT_INT_ENAB))
+		write_zsreg(zport_a, R0, RES_EXT_INT);
+
+	/* A-side DCD tracks RI and SYNC tracks DSR.  */
+	zport_a->regs[1] |= EXT_INT_ENAB;
+	zport_a->regs[15] |= DCDIE | SYNCIE;
+
+	/* This-side DCD tracks DCD and CTS tracks CTS.  */
+	zport->regs[15] |= DCDIE | CTSIE;
+
+	zs_raw_xor_mctrl(zport);
+
+	write_zsreg(zport_a, R1, zport_a->regs[1]);
+	write_zsreg(zport_a, R15, zport_a->regs[15]);
+	write_zsreg(zport, R15, zport->regs[15]);
+	spin_unlock(&scc->zlock);
+}
+
+static void zs_break_ctl(struct uart_port *uport, int break_state)
+{
+	struct zs_port *zport = to_zport(uport);
+	struct zs_scc *scc = zport->scc;
+	unsigned long flags;
+
+	spin_lock_irqsave(&scc->zlock, flags);
+	if (break_state == -1)
+		zport->regs[5] |= SND_BRK;
+	else
+		zport->regs[5] &= ~SND_BRK;
+	write_zsreg(zport, R5, zport->regs[5]);
+	spin_unlock_irqrestore(&scc->zlock, flags);
+}
+
+
+/*
+ * Interrupt handling routines.
+ */
+#define Rx_BRK 0x0100			/* BREAK event software flag.  */
+#define Rx_SYS 0x0200			/* SysRq event software flag.  */
+
+static void zs_receive_chars(struct zs_port *zport)
+{
+	struct uart_port *uport = &zport->port;
+	struct zs_scc *scc = zport->scc;
+	struct uart_icount *icount;
+	unsigned int avail, status, ch, flag;
+	int count;
+
+	for (count = 16; count; count--) {
+		spin_lock(&scc->zlock);
+		avail = read_zsreg(zport, R0) & Rx_CH_AV;
+		spin_unlock(&scc->zlock);
+		if (!avail)
+			break;
+
+		spin_lock(&scc->zlock);
+		status = read_zsreg(zport, R1) & (Rx_OVR | FRM_ERR | PAR_ERR);
+		ch = read_zsdata(zport);
+		spin_unlock(&scc->zlock);
+
+		flag = TTY_NORMAL;
+
+		icount = &uport->icount;
+		icount->rx++;
+
+		/* Handle the null char got when BREAK is removed.  */
+		if (!ch)
+			status |= zport->tty_break;
+		if (unlikely(status &
+			     (Rx_OVR | FRM_ERR | PAR_ERR | Rx_SYS | Rx_BRK))) {
+			zport->tty_break = 0;
+
+			/* Reset the error indication.  */
+			if (status & (Rx_OVR | FRM_ERR | PAR_ERR)) {
+				spin_lock(&scc->zlock);
+				write_zsreg(zport, R0, ERR_RES);
+				spin_unlock(&scc->zlock);
+			}
+
+			if (status & (Rx_SYS | Rx_BRK)) {
+				icount->brk++;
+				/* SysRq discards the null char.  */
+				if (status & Rx_SYS)
+					continue;
+			} else if (status & FRM_ERR)
+				icount->frame++;
+			else if (status & PAR_ERR)
+				icount->parity++;
+			if (status & Rx_OVR)
+				icount->overrun++;
+
+			status &= uport->read_status_mask;
+			if (status & Rx_BRK)
+				flag = TTY_BREAK;
+			else if (status & FRM_ERR)
+				flag = TTY_FRAME;
+			else if (status & PAR_ERR)
+				flag = TTY_PARITY;
+		}
+
+		if (uart_handle_sysrq_char(uport, ch))
+			continue;
+
+		uart_insert_char(uport, status, Rx_OVR, ch, flag);
+	}
+
+	tty_flip_buffer_push(uport->info->tty);
+}
+
+static void zs_raw_transmit_chars(struct zs_port *zport)
+{
+	struct circ_buf *xmit = &zport->port.info->xmit;
+
+	/* XON/XOFF chars.  */
+	if (zport->port.x_char) {
+		write_zsdata(zport, zport->port.x_char);
+		zport->port.icount.tx++;
+		zport->port.x_char = 0;
+		return;
+	}
+
+	/* If nothing to do or stopped or hardware stopped.  */
+	if (uart_circ_empty(xmit) || uart_tx_stopped(&zport->port)) {
+		zs_raw_stop_tx(zport);
+		return;
+	}
+
+	/* Send char.  */
+	write_zsdata(zport, xmit->buf[xmit->tail]);
+	xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
+	zport->port.icount.tx++;
+
+	if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+		uart_write_wakeup(&zport->port);
+
+	/* Are we are done?  */
+	if (uart_circ_empty(xmit))
+		zs_raw_stop_tx(zport);
+}
+
+static void zs_transmit_chars(struct zs_port *zport)
+{
+	struct zs_scc *scc = zport->scc;
+
+	spin_lock(&scc->zlock);
+	zs_raw_transmit_chars(zport);
+	spin_unlock(&scc->zlock);
+}
+
+static void zs_status_handle(struct zs_port *zport, struct zs_port *zport_a)
+{
+	struct uart_port *uport = &zport->port;
+	struct zs_scc *scc = zport->scc;
+	unsigned int delta;
+	u8 status, brk;
+
+	spin_lock(&scc->zlock);
+
+	/* Get status from Read Register 0.  */
+	status = read_zsreg(zport, R0);
+
+	if (zport->regs[15] & BRKIE) {
+		brk = status & BRK_ABRT;
+		if (brk && !zport->brk) {
+			spin_unlock(&scc->zlock);
+			if (uart_handle_break(uport))
+				zport->tty_break = Rx_SYS;
+			else
+				zport->tty_break = Rx_BRK;
+			spin_lock(&scc->zlock);
+		}
+		zport->brk = brk;
+	}
+
+	if (zport != zport_a) {
+		delta = zs_raw_xor_mctrl(zport);
+		spin_unlock(&scc->zlock);
+
+		if (delta & TIOCM_CTS)
+			uart_handle_cts_change(uport,
+					       zport->mctrl & TIOCM_CTS);
+		if (delta & TIOCM_CAR)
+			uart_handle_dcd_change(uport,
+					       zport->mctrl & TIOCM_CAR);
+		if (delta & TIOCM_RNG)
+			uport->icount.dsr++;
+		if (delta & TIOCM_DSR)
+			uport->icount.rng++;
+
+		if (delta)
+			wake_up_interruptible(&uport->info->delta_msr_wait);
+
+		spin_lock(&scc->zlock);
+	}
+
+	/* Clear the status condition...  */
+	write_zsreg(zport, R0, RES_EXT_INT);
+
+	spin_unlock(&scc->zlock);
+}
+
+/*
+ * This is the Z85C30 driver's generic interrupt routine.
+ */
+static irqreturn_t zs_interrupt(int irq, void *dev_id)
+{
+	struct zs_scc *scc = dev_id;
+	struct zs_port *zport_a = &scc->zport[ZS_CHAN_A];
+	struct zs_port *zport_b = &scc->zport[ZS_CHAN_B];
+	irqreturn_t status = IRQ_NONE;
+	u8 zs_intreg;
+	int count;
+
+	/*
+	 * NOTE: The read register 3, which holds the irq status,
+	 *       does so for both channels on each chip.  Although
+	 *       the status value itself must be read from the A
+	 *       channel and is only valid when read from channel A.
+	 *       Yes... broken hardware...
+	 */
+	for (count = 16; count; count--) {
+		spin_lock(&scc->zlock);
+		zs_intreg = read_zsreg(zport_a, R3);
+		spin_unlock(&scc->zlock);
+		if (!zs_intreg)
+			break;
+
+		/*
+		 * We do not like losing characters, so we prioritise
+		 * interrupt sources a little bit differently than
+		 * the SCC would, was it allowed to.
+		 */
+		if (zs_intreg & CHBRxIP)
+			zs_receive_chars(zport_b);
+		if (zs_intreg & CHARxIP)
+			zs_receive_chars(zport_a);
+		if (zs_intreg & CHBEXT)
+			zs_status_handle(zport_b, zport_a);
+		if (zs_intreg & CHAEXT)
+			zs_status_handle(zport_a, zport_a);
+		if (zs_intreg & CHBTxIP)
+			zs_transmit_chars(zport_b);
+		if (zs_intreg & CHATxIP)
+			zs_transmit_chars(zport_a);
+
+		status = IRQ_HANDLED;
+	}
+
+	return status;
+}
+
+
+/*
+ * Finally, routines used to initialize the serial port.
+ */
+static int zs_startup(struct uart_port *uport)
+{
+	struct zs_port *zport = to_zport(uport);
+	struct zs_scc *scc = zport->scc;
+	unsigned long flags;
+	int irq_guard;
+	int ret;
+
+	irq_guard = atomic_add_return(1, &scc->irq_guard);
+	if (irq_guard == 1) {
+		ret = request_irq(zport->port.irq, zs_interrupt,
+				  IRQF_SHARED, "scc", scc);
+		if (ret) {
+			atomic_add(-1, &scc->irq_guard);
+			printk(KERN_ERR "zs: can't get irq %d\n",
+			       zport->port.irq);
+			return ret;
+		}
+	}
+
+	spin_lock_irqsave(&scc->zlock, flags);
+
+	/* Clear the receive FIFO.  */
+	zs_receive_drain(zport);
+
+	/* Clear the interrupt registers.  */
+	write_zsreg(zport, R0, ERR_RES);
+	write_zsreg(zport, R0, RES_Tx_P);
+	/* But Ext only if not being handled already.  */
+	if (!(zport->regs[1] & EXT_INT_ENAB))
+		write_zsreg(zport, R0, RES_EXT_INT);
+
+	/* Finally, enable sequencing and interrupts.  */
+	zport->regs[1] &= ~RxINT_MASK;
+	zport->regs[1] |= RxINT_ALL | TxINT_ENAB | EXT_INT_ENAB;
+	zport->regs[3] |= RxENABLE;
+	zport->regs[5] |= TxENAB;
+	zport->regs[15] |= BRKIE;
+	write_zsreg(zport, R1, zport->regs[1]);
+	write_zsreg(zport, R3, zport->regs[3]);
+	write_zsreg(zport, R5, zport->regs[5]);
+	write_zsreg(zport, R15, zport->regs[15]);
+
+	/* Record the current state of RR0.  */
+	zport->mctrl = zs_raw_get_mctrl(zport);
+	zport->brk = read_zsreg(zport, R0) & BRK_ABRT;
+
+	zport->tx_stopped = 1;
+
+	spin_unlock_irqrestore(&scc->zlock, flags);
+
+	return 0;
+}
+
+static void zs_shutdown(struct uart_port *uport)
+{
+	struct zs_port *zport = to_zport(uport);
+	struct zs_scc *scc = zport->scc;
+	unsigned long flags;
+	int irq_guard;
+
+	spin_lock_irqsave(&scc->zlock, flags);
+
+	zport->regs[5] &= ~TxENAB;
+	zport->regs[3] &= ~RxENABLE;
+	write_zsreg(zport, R5, zport->regs[5]);
+	write_zsreg(zport, R3, zport->regs[3]);
+
+	spin_unlock_irqrestore(&scc->zlock, flags);
+
+	irq_guard = atomic_add_return(-1, &scc->irq_guard);
+	if (!irq_guard)
+		free_irq(zport->port.irq, scc);
+}
+
+
+static void zs_reset(struct zs_port *zport)
+{
+	struct zs_scc *scc = zport->scc;
+	int irq;
+	unsigned long flags;
+
+	spin_lock_irqsave(&scc->zlock, flags);
+	irq = !irqs_disabled_flags(flags);
+	if (!scc->initialised) {
+		/* Reset the pointer first, just in case...  */
+		read_zsreg(zport, R0);
+		/* And let the current transmission finish.  */
+		zs_line_drain(zport, irq);
+		write_zsreg(zport, R9, FHWRES);
+		udelay(10);
+		write_zsreg(zport, R9, 0);
+		scc->initialised = 1;
+	}
+	load_zsregs(zport, zport->regs, irq);
+	spin_unlock_irqrestore(&scc->zlock, flags);
+}
+
+static void zs_set_termios(struct uart_port *uport, struct ktermios *termios,
+			   struct ktermios *old_termios)
+{
+	struct zs_port *zport = to_zport(uport);
+	struct zs_scc *scc = zport->scc;
+	struct zs_port *zport_a = &scc->zport[ZS_CHAN_A];
+	int irq;
+	unsigned int baud, brg;
+	unsigned long flags;
+
+	spin_lock_irqsave(&scc->zlock, flags);
+	irq = !irqs_disabled_flags(flags);
+
+	/* Byte size.  */
+	zport->regs[3] &= ~RxNBITS_MASK;
+	zport->regs[5] &= ~TxNBITS_MASK;
+	switch (termios->c_cflag & CSIZE) {
+	case CS5:
+		zport->regs[3] |= Rx5;
+		zport->regs[5] |= Tx5;
+		break;
+	case CS6:
+		zport->regs[3] |= Rx6;
+		zport->regs[5] |= Tx6;
+		break;
+	case CS7:
+		zport->regs[3] |= Rx7;
+		zport->regs[5] |= Tx7;
+		break;
+	case CS8:
+	default:
+		zport->regs[3] |= Rx8;
+		zport->regs[5] |= Tx8;
+		break;
+	}
+
+	/* Parity and stop bits.  */
+	zport->regs[4] &= ~(XCLK_MASK | SB_MASK | PAR_ENA | PAR_EVEN);
+	if (termios->c_cflag & CSTOPB)
+		zport->regs[4] |= SB2;
+	else
+		zport->regs[4] |= SB1;
+	if (termios->c_cflag & PARENB)
+		zport->regs[4] |= PAR_ENA;
+	if (!(termios->c_cflag & PARODD))
+		zport->regs[4] |= PAR_EVEN;
+	switch (zport->clk_mode) {
+	case 64:
+		zport->regs[4] |= X64CLK;
+		break;
+	case 32:
+		zport->regs[4] |= X32CLK;
+		break;
+	case 16:
+		zport->regs[4] |= X16CLK;
+		break;
+	case 1:
+		zport->regs[4] |= X1CLK;
+		break;
+	default:
+		BUG();
+	}
+
+	baud = uart_get_baud_rate(uport, termios, old_termios, 0,
+				  uport->uartclk / zport->clk_mode / 4);
+
+	brg = ZS_BPS_TO_BRG(baud, uport->uartclk / zport->clk_mode);
+	zport->regs[12] = brg & 0xff;
+	zport->regs[13] = (brg >> 8) & 0xff;
+
+	uart_update_timeout(uport, termios->c_cflag, baud);
+
+	uport->read_status_mask = Rx_OVR;
+	if (termios->c_iflag & INPCK)
+		uport->read_status_mask |= FRM_ERR | PAR_ERR;
+	if (termios->c_iflag & (BRKINT | PARMRK))
+		uport->read_status_mask |= Rx_BRK;
+
+	uport->ignore_status_mask = 0;
+	if (termios->c_iflag & IGNPAR)
+		uport->ignore_status_mask |= FRM_ERR | PAR_ERR;
+	if (termios->c_iflag & IGNBRK) {
+		uport->ignore_status_mask |= Rx_BRK;
+		if (termios->c_iflag & IGNPAR)
+			uport->ignore_status_mask |= Rx_OVR;
+	}
+
+	if (termios->c_cflag & CREAD)
+		zport->regs[3] |= RxENABLE;
+	else
+		zport->regs[3] &= ~RxENABLE;
+
+	if (zport != zport_a) {
+		if (!(termios->c_cflag & CLOCAL)) {
+			zport->regs[15] |= DCDIE;
+		} else
+			zport->regs[15] &= ~DCDIE;
+		if (termios->c_cflag & CRTSCTS) {
+			zport->regs[15] |= CTSIE;
+		} else
+			zport->regs[15] &= ~CTSIE;
+		zs_raw_xor_mctrl(zport);
+	}
+
+	/* Load up the new values.  */
+	load_zsregs(zport, zport->regs, irq);
+
+	spin_unlock_irqrestore(&scc->zlock, flags);
+}
+
+
+static const char *zs_type(struct uart_port *uport)
+{
+	return "Z85C30 SCC";
+}
+
+static void zs_release_port(struct uart_port *uport)
+{
+	iounmap(uport->membase);
+	uport->membase = 0;
+	release_mem_region(uport->mapbase, ZS_CHAN_IO_SIZE);
+}
+
+static int zs_map_port(struct uart_port *uport)
+{
+	if (!uport->membase)
+		uport->membase = ioremap_nocache(uport->mapbase,
+						 ZS_CHAN_IO_SIZE);
+	if (!uport->membase) {
+		printk(KERN_ERR "zs: Cannot map MMIO\n");
+		return -ENOMEM;
+	}
+	return 0;
+}
+
+static int zs_request_port(struct uart_port *uport)
+{
+	int ret;
+
+	if (!request_mem_region(uport->mapbase, ZS_CHAN_IO_SIZE, "scc")) {
+		printk(KERN_ERR "zs: Unable to reserve MMIO resource\n");
+		return -EBUSY;
+	}
+	ret = zs_map_port(uport);
+	if (ret) {
+		release_mem_region(uport->mapbase, ZS_CHAN_IO_SIZE);
+		return ret;
+	}
+	return 0;
+}
+
+static void zs_config_port(struct uart_port *uport, int flags)
+{
+	struct zs_port *zport = to_zport(uport);
+
+	if (flags & UART_CONFIG_TYPE) {
+		if (zs_request_port(uport))
+			return;
+
+		uport->type = PORT_ZS;
+
+		zs_reset(zport);
+	}
+}
+
+static int zs_verify_port(struct uart_port *uport, struct serial_struct *ser)
+{
+	struct zs_port *zport = to_zport(uport);
+	int ret = 0;
+
+	if (ser->type != PORT_UNKNOWN && ser->type != PORT_ZS)
+		ret = -EINVAL;
+	if (ser->irq != uport->irq)
+		ret = -EINVAL;
+	if (ser->baud_base != uport->uartclk / zport->clk_mode / 4)
+		ret = -EINVAL;
+	return ret;
+}
+
+
+static struct uart_ops zs_ops = {
+	.tx_empty	= zs_tx_empty,
+	.set_mctrl	= zs_set_mctrl,
+	.get_mctrl	= zs_get_mctrl,
+	.stop_tx	= zs_stop_tx,
+	.start_tx	= zs_start_tx,
+	.stop_rx	= zs_stop_rx,
+	.enable_ms	= zs_enable_ms,
+	.break_ctl	= zs_break_ctl,
+	.startup	= zs_startup,
+	.shutdown	= zs_shutdown,
+	.set_termios	= zs_set_termios,
+	.type		= zs_type,
+	.release_port	= zs_release_port,
+	.request_port	= zs_request_port,
+	.config_port	= zs_config_port,
+	.verify_port	= zs_verify_port,
+};
+
+/*
+ * Initialize Z85C30 port structures.
+ */
+static int __init zs_probe_sccs(void)
+{
+	static int probed;
+	struct zs_parms zs_parms;
+	int chip, side, irq;
+	int n_chips = 0;
+	int i;
+
+	if (probed)
+		return 0;
+
+	irq = dec_interrupt[DEC_IRQ_SCC0];
+	if (irq >= 0) {
+		zs_parms.scc[n_chips] = IOASIC_SCC0;
+		zs_parms.irq[n_chips] = dec_interrupt[DEC_IRQ_SCC0];
+		n_chips++;
+	}
+	irq = dec_interrupt[DEC_IRQ_SCC1];
+	if (irq >= 0) {
+		zs_parms.scc[n_chips] = IOASIC_SCC1;
+		zs_parms.irq[n_chips] = dec_interrupt[DEC_IRQ_SCC1];
+		n_chips++;
+	}
+	if (!n_chips)
+		return -ENXIO;
+
+	probed = 1;
+
+	for (chip = 0; chip < n_chips; chip++) {
+		spin_lock_init(&zs_sccs[chip].zlock);
+		for (side = 0; side < ZS_NUM_CHAN; side++) {
+			struct zs_port *zport = &zs_sccs[chip].zport[side];
+			struct uart_port *uport = &zport->port;
+
+			zport->scc	= &zs_sccs[chip];
+			zport->clk_mode	= 16;
+
+			uport->irq	= zs_parms.irq[chip];
+			uport->uartclk	= ZS_CLOCK;
+			uport->fifosize	= 1;
+			uport->iotype	= UPIO_MEM;
+			uport->flags	= UPF_BOOT_AUTOCONF;
+			uport->ops	= &zs_ops;
+			uport->line	= chip * ZS_NUM_CHAN + side;
+			uport->mapbase	= dec_kn_slot_base +
+					  zs_parms.scc[chip] +
+					  (side ^ ZS_CHAN_B) * ZS_CHAN_IO_SIZE;
+
+			for (i = 0; i < ZS_NUM_REGS; i++)
+				zport->regs[i] = zs_init_regs[i];
+		}
+	}
+
+	return 0;
+}
+
+
+#ifdef CONFIG_SERIAL_ZS_CONSOLE
+static void zs_console_putchar(struct uart_port *uport, int ch)
+{
+	struct zs_port *zport = to_zport(uport);
+	struct zs_scc *scc = zport->scc;
+	int irq;
+	unsigned long flags;
+
+	spin_lock_irqsave(&scc->zlock, flags);
+	irq = !irqs_disabled_flags(flags);
+	if (zs_transmit_drain(zport, irq))
+		write_zsdata(zport, ch);
+	spin_unlock_irqrestore(&scc->zlock, flags);
+}
+
+/*
+ * Print a string to the serial port trying not to disturb
+ * any possible real use of the port...
+ */
+static void zs_console_write(struct console *co, const char *s,
+			     unsigned int count)
+{
+	int chip = co->index / ZS_NUM_CHAN, side = co->index % ZS_NUM_CHAN;
+	struct zs_port *zport = &zs_sccs[chip].zport[side];
+	struct zs_scc *scc = zport->scc;
+	unsigned long flags;
+	u8 txint, txenb;
+	int irq;
+
+	/* Disable transmit interrupts and enable the transmitter. */
+	spin_lock_irqsave(&scc->zlock, flags);
+	txint = zport->regs[1];
+	txenb = zport->regs[5];
+	if (txint & TxINT_ENAB) {
+		zport->regs[1] = txint & ~TxINT_ENAB;
+		write_zsreg(zport, R1, zport->regs[1]);
+	}
+	if (!(txenb & TxENAB)) {
+		zport->regs[5] = txenb | TxENAB;
+		write_zsreg(zport, R5, zport->regs[5]);
+	}
+	spin_unlock_irqrestore(&scc->zlock, flags);
+
+	uart_console_write(&zport->port, s, count, zs_console_putchar);
+
+	/* Restore transmit interrupts and the transmitter enable. */
+	spin_lock_irqsave(&scc->zlock, flags);
+	irq = !irqs_disabled_flags(flags);
+	zs_line_drain(zport, irq);
+	if (!(txenb & TxENAB)) {
+		zport->regs[5] &= ~TxENAB;
+		write_zsreg(zport, R5, zport->regs[5]);
+	}
+	if (txint & TxINT_ENAB) {
+		zport->regs[1] |= TxINT_ENAB;
+		write_zsreg(zport, R1, zport->regs[1]);
+	}
+	spin_unlock_irqrestore(&scc->zlock, flags);
+}
+
+/*
+ * Setup serial console baud/bits/parity.  We do two things here:
+ * - construct a cflag setting for the first uart_open()
+ * - initialise the serial port
+ * Return non-zero if we didn't find a serial port.
+ */
+static int __init zs_console_setup(struct console *co, char *options)
+{
+	int chip = co->index / ZS_NUM_CHAN, side = co->index % ZS_NUM_CHAN;
+	struct zs_port *zport = &zs_sccs[chip].zport[side];
+	struct uart_port *uport = &zport->port;
+	int baud = 9600;
+	int bits = 8;
+	int parity = 'n';
+	int flow = 'n';
+	int ret;
+
+	ret = zs_map_port(uport);
+	if (ret)
+		return ret;
+
+	zs_reset(zport);
+
+	if (options)
+		uart_parse_options(options, &baud, &parity, &bits, &flow);
+	return uart_set_options(uport, co, baud, parity, bits, flow);
+}
+
+static struct uart_driver zs_reg;
+static struct console zs_console = {
+	.name	= "ttyS",
+	.write	= zs_console_write,
+	.device	= uart_console_device,
+	.setup	= zs_console_setup,
+	.flags	= CON_PRINTBUFFER,
+	.index	= -1,
+	.data	= &zs_reg,
+};
+
+/*
+ *	Register console.
+ */
+static int __init zs_serial_console_init(void)
+{
+	int ret;
+
+	ret = zs_probe_sccs();
+	if (ret)
+		return ret;
+	register_console(&zs_console);
+
+	return 0;
+}
+
+console_initcall(zs_serial_console_init);
+
+#define SERIAL_ZS_CONSOLE	&zs_console
+#else
+#define SERIAL_ZS_CONSOLE	NULL
+#endif /* CONFIG_SERIAL_ZS_CONSOLE */
+
+static struct uart_driver zs_reg = {
+	.owner			= THIS_MODULE,
+	.driver_name		= "serial",
+	.dev_name		= "ttyS",
+	.major			= TTY_MAJOR,
+	.minor			= 64,
+	.nr			= ZS_NUM_SCCS * ZS_NUM_CHAN,
+	.cons			= SERIAL_ZS_CONSOLE,
+};
+
+/* zs_init inits the driver. */
+static int __init zs_init(void)
+{
+	int i, ret;
+
+	pr_info("%s%s\n", zs_name, zs_version);
+
+	/* Find out how many Z85C30 SCCs we have.  */
+	ret = zs_probe_sccs();
+	if (ret)
+		return ret;
+
+	ret = uart_register_driver(&zs_reg);
+	if (ret)
+		return ret;
+
+	for (i = 0; i < ZS_NUM_SCCS * ZS_NUM_CHAN; i++) {
+		struct zs_scc *scc = &zs_sccs[i / ZS_NUM_CHAN];
+		struct zs_port *zport = &scc->zport[i % ZS_NUM_CHAN];
+		struct uart_port *uport = &zport->port;
+
+		if (zport->scc)
+			uart_add_one_port(&zs_reg, uport);
+	}
+
+	return 0;
+}
+
+static void __exit zs_exit(void)
+{
+	int i;
+
+	for (i = ZS_NUM_SCCS * ZS_NUM_CHAN - 1; i >= 0; i--) {
+		struct zs_scc *scc = &zs_sccs[i / ZS_NUM_CHAN];
+		struct zs_port *zport = &scc->zport[i % ZS_NUM_CHAN];
+		struct uart_port *uport = &zport->port;
+
+		if (zport->scc)
+			uart_remove_one_port(&zs_reg, uport);
+	}
+
+	uart_unregister_driver(&zs_reg);
+}
+
+module_init(zs_init);
+module_exit(zs_exit);
diff --git a/drivers/serial/zs.h b/drivers/serial/zs.h
new file mode 100644
index 0000000..aa921b5
--- /dev/null
+++ b/drivers/serial/zs.h
@@ -0,0 +1,284 @@
+/*
+ * zs.h: Definitions for the DECstation Z85C30 serial driver.
+ *
+ * Adapted from drivers/sbus/char/sunserial.h by Paul Mackerras.
+ * Adapted from drivers/macintosh/macserial.h by Harald Koerfgen.
+ *
+ * Copyright (C) 1996 Paul Mackerras (Paul.Mackerras@cs.anu.edu.au)
+ * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
+ * Copyright (C) 2004, 2005, 2007  Maciej W. Rozycki
+ */
+#ifndef _SERIAL_ZS_H
+#define _SERIAL_ZS_H
+
+#ifdef __KERNEL__
+
+#define ZS_NUM_REGS 16
+
+/*
+ * This is our internal structure for each serial port's state.
+ */
+struct zs_port {
+	struct zs_scc	*scc;			/* Containing SCC.  */
+	struct uart_port port;			/* Underlying UART.  */
+
+	int		clk_mode;		/* May be 1, 16, 32, or 64.  */
+
+	unsigned int	tty_break;		/* Set on BREAK condition.  */
+	int		tx_stopped;		/* Output is suspended.  */
+
+	unsigned int	mctrl;			/* State of modem lines.  */
+	u8		brk;			/* BREAK state from RR0.  */
+
+	u8		regs[ZS_NUM_REGS];	/* Channel write registers.  */
+};
+
+/*
+ * Per-SCC state for locking and the interrupt handler.
+ */
+struct zs_scc {
+	struct zs_port	zport[2];
+	spinlock_t	zlock;
+	atomic_t	irq_guard;
+	int		initialised;
+};
+
+#endif /* __KERNEL__ */
+
+/*
+ * Conversion routines to/from brg time constants from/to bits per second.
+ */
+#define ZS_BRG_TO_BPS(brg, freq) ((freq) / 2 / ((brg) + 2))
+#define ZS_BPS_TO_BRG(bps, freq) ((((freq) + (bps)) / (2 * (bps))) - 2)
+
+/*
+ * The Zilog register set.
+ */
+
+/* Write Register 0 (Command) */
+#define R0		0	/* Register selects */
+#define R1		1
+#define R2		2
+#define R3		3
+#define R4		4
+#define R5		5
+#define R6		6
+#define R7		7
+#define R8		8
+#define R9		9
+#define R10		10
+#define R11		11
+#define R12		12
+#define R13		13
+#define R14		14
+#define R15		15
+
+#define NULLCODE	0	/* Null Code */
+#define POINT_HIGH	0x8	/* Select upper half of registers */
+#define RES_EXT_INT	0x10	/* Reset Ext. Status Interrupts */
+#define SEND_ABORT	0x18	/* HDLC Abort */
+#define RES_RxINT_FC	0x20	/* Reset RxINT on First Character */
+#define RES_Tx_P	0x28	/* Reset TxINT Pending */
+#define ERR_RES		0x30	/* Error Reset */
+#define RES_H_IUS	0x38	/* Reset highest IUS */
+
+#define RES_Rx_CRC	0x40	/* Reset Rx CRC Checker */
+#define RES_Tx_CRC	0x80	/* Reset Tx CRC Checker */
+#define RES_EOM_L	0xC0	/* Reset EOM latch */
+
+/* Write Register 1 (Tx/Rx/Ext Int Enable and WAIT/DMA Commands) */
+#define EXT_INT_ENAB	0x1	/* Ext Int Enable */
+#define TxINT_ENAB	0x2	/* Tx Int Enable */
+#define PAR_SPEC	0x4	/* Parity is special condition */
+
+#define RxINT_DISAB	0	/* Rx Int Disable */
+#define RxINT_FCERR	0x8	/* Rx Int on First Character Only or Error */
+#define RxINT_ALL	0x10	/* Int on all Rx Characters or error */
+#define RxINT_ERR	0x18	/* Int on error only */
+#define RxINT_MASK	0x18
+
+#define WT_RDY_RT	0x20	/* Wait/Ready on R/T */
+#define WT_FN_RDYFN	0x40	/* Wait/FN/Ready FN */
+#define WT_RDY_ENAB	0x80	/* Wait/Ready Enable */
+
+/* Write Register 2 (Interrupt Vector) */
+
+/* Write Register 3 (Receive Parameters and Control) */
+#define RxENABLE	0x1	/* Rx Enable */
+#define SYNC_L_INH	0x2	/* Sync Character Load Inhibit */
+#define ADD_SM		0x4	/* Address Search Mode (SDLC) */
+#define RxCRC_ENAB	0x8	/* Rx CRC Enable */
+#define ENT_HM		0x10	/* Enter Hunt Mode */
+#define AUTO_ENAB	0x20	/* Auto Enables */
+#define Rx5		0x0	/* Rx 5 Bits/Character */
+#define Rx7		0x40	/* Rx 7 Bits/Character */
+#define Rx6		0x80	/* Rx 6 Bits/Character */
+#define Rx8		0xc0	/* Rx 8 Bits/Character */
+#define RxNBITS_MASK	0xc0
+
+/* Write Register 4 (Transmit/Receive Miscellaneous Parameters and Modes) */
+#define PAR_ENA		0x1	/* Parity Enable */
+#define PAR_EVEN	0x2	/* Parity Even/Odd* */
+
+#define SYNC_ENAB	0	/* Sync Modes Enable */
+#define SB1		0x4	/* 1 stop bit/char */
+#define SB15		0x8	/* 1.5 stop bits/char */
+#define SB2		0xc	/* 2 stop bits/char */
+#define SB_MASK		0xc
+
+#define MONSYNC		0	/* 8 Bit Sync character */
+#define BISYNC		0x10	/* 16 bit sync character */
+#define SDLC		0x20	/* SDLC Mode (01111110 Sync Flag) */
+#define EXTSYNC		0x30	/* External Sync Mode */
+
+#define X1CLK		0x0	/* x1 clock mode */
+#define X16CLK		0x40	/* x16 clock mode */
+#define X32CLK		0x80	/* x32 clock mode */
+#define X64CLK		0xc0	/* x64 clock mode */
+#define XCLK_MASK	0xc0
+
+/* Write Register 5 (Transmit Parameters and Controls) */
+#define TxCRC_ENAB	0x1	/* Tx CRC Enable */
+#define RTS		0x2	/* RTS */
+#define SDLC_CRC	0x4	/* SDLC/CRC-16 */
+#define TxENAB		0x8	/* Tx Enable */
+#define SND_BRK		0x10	/* Send Break */
+#define Tx5		0x0	/* Tx 5 bits (or less)/character */
+#define Tx7		0x20	/* Tx 7 bits/character */
+#define Tx6		0x40	/* Tx 6 bits/character */
+#define Tx8		0x60	/* Tx 8 bits/character */
+#define TxNBITS_MASK	0x60
+#define DTR		0x80	/* DTR */
+
+/* Write Register 6 (Sync bits 0-7/SDLC Address Field) */
+
+/* Write Register 7 (Sync bits 8-15/SDLC 01111110) */
+
+/* Write Register 8 (Transmit Buffer) */
+
+/* Write Register 9 (Master Interrupt Control) */
+#define VIS		1	/* Vector Includes Status */
+#define NV		2	/* No Vector */
+#define DLC		4	/* Disable Lower Chain */
+#define MIE		8	/* Master Interrupt Enable */
+#define STATHI		0x10	/* Status high */
+#define SOFTACK		0x20	/* Software Interrupt Acknowledge */
+#define NORESET		0	/* No reset on write to R9 */
+#define CHRB		0x40	/* Reset channel B */
+#define CHRA		0x80	/* Reset channel A */
+#define FHWRES		0xc0	/* Force hardware reset */
+
+/* Write Register 10 (Miscellaneous Transmitter/Receiver Control Bits) */
+#define BIT6		1	/* 6 bit/8bit sync */
+#define LOOPMODE	2	/* SDLC Loop mode */
+#define ABUNDER		4	/* Abort/flag on SDLC xmit underrun */
+#define MARKIDLE	8	/* Mark/flag on idle */
+#define GAOP		0x10	/* Go active on poll */
+#define NRZ		0	/* NRZ mode */
+#define NRZI		0x20	/* NRZI mode */
+#define FM1		0x40	/* FM1 (transition = 1) */
+#define FM0		0x60	/* FM0 (transition = 0) */
+#define CRCPS		0x80	/* CRC Preset I/O */
+
+/* Write Register 11 (Clock Mode Control) */
+#define TRxCXT		0	/* TRxC = Xtal output */
+#define TRxCTC		1	/* TRxC = Transmit clock */
+#define TRxCBR		2	/* TRxC = BR Generator Output */
+#define TRxCDP		3	/* TRxC = DPLL output */
+#define TRxCOI		4	/* TRxC O/I */
+#define TCRTxCP		0	/* Transmit clock = RTxC pin */
+#define TCTRxCP		8	/* Transmit clock = TRxC pin */
+#define TCBR		0x10	/* Transmit clock = BR Generator output */
+#define TCDPLL		0x18	/* Transmit clock = DPLL output */
+#define RCRTxCP		0	/* Receive clock = RTxC pin */
+#define RCTRxCP		0x20	/* Receive clock = TRxC pin */
+#define RCBR		0x40	/* Receive clock = BR Generator output */
+#define RCDPLL		0x60	/* Receive clock = DPLL output */
+#define RTxCX		0x80	/* RTxC Xtal/No Xtal */
+
+/* Write Register 12 (Lower Byte of Baud Rate Generator Time Constant) */
+
+/* Write Register 13 (Upper Byte of Baud Rate Generator Time Constant) */
+
+/* Write Register 14 (Miscellaneous Control Bits) */
+#define BRENABL		1	/* Baud rate generator enable */
+#define BRSRC		2	/* Baud rate generator source */
+#define DTRREQ		4	/* DTR/Request function */
+#define AUTOECHO	8	/* Auto Echo */
+#define LOOPBAK		0x10	/* Local loopback */
+#define SEARCH		0x20	/* Enter search mode */
+#define RMC		0x40	/* Reset missing clock */
+#define DISDPLL		0x60	/* Disable DPLL */
+#define SSBR		0x80	/* Set DPLL source = BR generator */
+#define SSRTxC		0xa0	/* Set DPLL source = RTxC */
+#define SFMM		0xc0	/* Set FM mode */
+#define SNRZI		0xe0	/* Set NRZI mode */
+
+/* Write Register 15 (External/Status Interrupt Control) */
+#define WR7P_EN		1	/* WR7 Prime SDLC Feature Enable */
+#define ZCIE		2	/* Zero count IE */
+#define DCDIE		8	/* DCD IE */
+#define SYNCIE		0x10	/* Sync/hunt IE */
+#define CTSIE		0x20	/* CTS IE */
+#define TxUIE		0x40	/* Tx Underrun/EOM IE */
+#define BRKIE		0x80	/* Break/Abort IE */
+
+
+/* Read Register 0 (Transmit/Receive Buffer Status and External Status) */
+#define Rx_CH_AV	0x1	/* Rx Character Available */
+#define ZCOUNT		0x2	/* Zero count */
+#define Tx_BUF_EMP	0x4	/* Tx Buffer empty */
+#define DCD		0x8	/* DCD */
+#define SYNC_HUNT	0x10	/* Sync/hunt */
+#define CTS		0x20	/* CTS */
+#define TxEOM		0x40	/* Tx underrun */
+#define BRK_ABRT	0x80	/* Break/Abort */
+
+/* Read Register 1 (Special Receive Condition Status) */
+#define ALL_SNT		0x1	/* All sent */
+/* Residue Data for 8 Rx bits/char programmed */
+#define RES3		0x8	/* 0/3 */
+#define RES4		0x4	/* 0/4 */
+#define RES5		0xc	/* 0/5 */
+#define RES6		0x2	/* 0/6 */
+#define RES7		0xa	/* 0/7 */
+#define RES8		0x6	/* 0/8 */
+#define RES18		0xe	/* 1/8 */
+#define RES28		0x0	/* 2/8 */
+/* Special Rx Condition Interrupts */
+#define PAR_ERR		0x10	/* Parity Error */
+#define Rx_OVR		0x20	/* Rx Overrun Error */
+#define FRM_ERR		0x40	/* CRC/Framing Error */
+#define END_FR		0x80	/* End of Frame (SDLC) */
+
+/* Read Register 2 (Interrupt Vector (WR2) -- channel A).  */
+
+/* Read Register 2 (Modified Interrupt Vector -- channel B).  */
+
+/* Read Register 3 (Interrupt Pending Bits -- channel A only).  */
+#define CHBEXT		0x1	/* Channel B Ext/Stat IP */
+#define CHBTxIP		0x2	/* Channel B Tx IP */
+#define CHBRxIP		0x4	/* Channel B Rx IP */
+#define CHAEXT		0x8	/* Channel A Ext/Stat IP */
+#define CHATxIP		0x10	/* Channel A Tx IP */
+#define CHARxIP		0x20	/* Channel A Rx IP */
+
+/* Read Register 6 (SDLC FIFO Status and Byte Count LSB) */
+
+/* Read Register 7 (SDLC FIFO Status and Byte Count MSB) */
+
+/* Read Register 8 (Receive Data) */
+
+/* Read Register 10 (Miscellaneous Status Bits) */
+#define ONLOOP		2	/* On loop */
+#define LOOPSEND	0x10	/* Loop sending */
+#define CLK2MIS		0x40	/* Two clocks missing */
+#define CLK1MIS		0x80	/* One clock missing */
+
+/* Read Register 12 (Lower Byte of Baud Rate Generator Constant (WR12)) */
+
+/* Read Register 13 (Upper Byte of Baud Rate Generator Constant (WR13) */
+
+/* Read Register 15 (External/Status Interrupt Control (WR15)) */
+
+#endif /* _SERIAL_ZS_H */
diff --git a/drivers/sh/superhyway/superhyway.c b/drivers/sh/superhyway/superhyway.c
index 94b2290..7d873b3 100644
--- a/drivers/sh/superhyway/superhyway.c
+++ b/drivers/sh/superhyway/superhyway.c
@@ -56,11 +56,10 @@
 	struct superhyway_device *dev = sdev;
 
 	if (!dev) {
-		dev = kmalloc(sizeof(struct superhyway_device), GFP_KERNEL);
+		dev = kzalloc(sizeof(struct superhyway_device), GFP_KERNEL);
 		if (!dev)
 			return -ENOMEM;
 
-		memset(dev, 0, sizeof(struct superhyway_device));
 	}
 
 	dev->bus = bus;
diff --git a/drivers/sn/ioc3.c b/drivers/sn/ioc3.c
index 2dd6eed..29fcd6d 100644
--- a/drivers/sn/ioc3.c
+++ b/drivers/sn/ioc3.c
@@ -629,7 +629,7 @@
 #endif
 
 	/* Set up per-IOC3 data */
-	idd = kmalloc(sizeof(struct ioc3_driver_data), GFP_KERNEL);
+	idd = kzalloc(sizeof(struct ioc3_driver_data), GFP_KERNEL);
 	if (!idd) {
 		printk(KERN_WARNING
 		       "%s: Failed to allocate IOC3 data for pci_dev %s.\n",
@@ -637,7 +637,6 @@
 		ret = -ENODEV;
 		goto out_idd;
 	}
-	memset(idd, 0, sizeof(struct ioc3_driver_data));
 	spin_lock_init(&idd->ir_lock);
 	spin_lock_init(&idd->gpio_lock);
 	idd->pdev = pdev;
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index 5e3f748..b915711 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -107,6 +107,15 @@
 	  This enables using the Freescale iMX SPI controller in master
 	  mode.
 
+config SPI_LM70_LLP
+	tristate "Parallel port adapter for LM70 eval board (DEVELOPMENT)"
+	depends on SPI_MASTER && PARPORT && EXPERIMENTAL
+	select SPI_BITBANG
+	help
+	  This driver supports the NS LM70 LLP Evaluation Board,
+	  which interfaces to an LM70 temperature sensor using
+	  a parallel port.
+
 config SPI_MPC52xx_PSC
 	tristate "Freescale MPC52xx PSC SPI controller"
 	depends on SPI_MASTER && PPC_MPC52xx && EXPERIMENTAL
@@ -133,6 +142,12 @@
 	help
 	  This hooks up to the MicroWire controller on OMAP1 chips.
 
+config SPI_OMAP24XX
+	tristate "McSPI driver for OMAP24xx"
+	depends on SPI_MASTER && ARCH_OMAP24XX
+	help
+	  SPI master controller for OMAP24xx Multichannel SPI
+	  (McSPI) modules.
 
 config SPI_PXA2XX
 	tristate "PXA2xx SSP SPI master"
@@ -145,17 +160,36 @@
 config SPI_S3C24XX
 	tristate "Samsung S3C24XX series SPI"
 	depends on SPI_MASTER && ARCH_S3C2410 && EXPERIMENTAL
+	select SPI_BITBANG
 	help
 	  SPI driver for Samsung S3C24XX series ARM SoCs
 
 config SPI_S3C24XX_GPIO
 	tristate "Samsung S3C24XX series SPI by GPIO"
-	depends on SPI_MASTER && ARCH_S3C2410 && SPI_BITBANG && EXPERIMENTAL
+	depends on SPI_MASTER && ARCH_S3C2410 && EXPERIMENTAL
+	select SPI_BITBANG
 	help
 	  SPI driver for Samsung S3C24XX series ARM SoCs using
 	  GPIO lines to provide the SPI bus. This can be used where
 	  the inbuilt hardware cannot provide the transfer mode, or
 	  where the board is using non hardware connected pins.
+
+config SPI_TXX9
+	tristate "Toshiba TXx9 SPI controller"
+	depends on SPI_MASTER && GENERIC_GPIO && CPU_TX49XX
+	help
+	  SPI driver for Toshiba TXx9 MIPS SoCs
+
+config SPI_XILINX
+	tristate "Xilinx SPI controller"
+	depends on SPI_MASTER && XILINX_VIRTEX && EXPERIMENTAL
+	select SPI_BITBANG
+	help
+	  This exposes the SPI controller IP from the Xilinx EDK.
+
+	  See the "OPB Serial Peripheral Interface (SPI) (v1.00e)"
+	  Product Specification document (DS464) for hardware details.
+
 #
 # Add new SPI master controllers in alphabetical order above this line
 #
@@ -187,6 +221,15 @@
 	  Note that this application programming interface is EXPERIMENTAL
 	  and hence SUBJECT TO CHANGE WITHOUT NOTICE while it stabilizes.
 
+config SPI_TLE62X0
+	tristate "Infineon TLE62X0 (for power switching)"
+	depends on SPI_MASTER && SYSFS
+	help
+	  SPI driver for Infineon TLE62X0 series line driver chips,
+	  such as the TLE6220, TLE6230 and TLE6240.  This provides a
+	  sysfs interface, with each line presented as a kind of GPIO
+	  exposing both switch control and diagnostic feedback.
+
 #
 # Add new SPI protocol masters in alphabetical order above this line
 #
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile
index 5788d86..41fbac4 100644
--- a/drivers/spi/Makefile
+++ b/drivers/spi/Makefile
@@ -17,17 +17,22 @@
 obj-$(CONFIG_SPI_AU1550)		+= au1550_spi.o
 obj-$(CONFIG_SPI_BUTTERFLY)		+= spi_butterfly.o
 obj-$(CONFIG_SPI_IMX)			+= spi_imx.o
+obj-$(CONFIG_SPI_LM70_LLP)		+= spi_lm70llp.o
 obj-$(CONFIG_SPI_PXA2XX)		+= pxa2xx_spi.o
 obj-$(CONFIG_SPI_OMAP_UWIRE)		+= omap_uwire.o
+obj-$(CONFIG_SPI_OMAP24XX)		+= omap2_mcspi.o
 obj-$(CONFIG_SPI_MPC52xx_PSC)		+= mpc52xx_psc_spi.o
 obj-$(CONFIG_SPI_MPC83xx)		+= spi_mpc83xx.o
 obj-$(CONFIG_SPI_S3C24XX_GPIO)		+= spi_s3c24xx_gpio.o
 obj-$(CONFIG_SPI_S3C24XX)		+= spi_s3c24xx.o
+obj-$(CONFIG_SPI_TXX9)			+= spi_txx9.o
+obj-$(CONFIG_SPI_XILINX)		+= xilinx_spi.o
 # 	... add above this line ...
 
 # SPI protocol drivers (device/link on bus)
 obj-$(CONFIG_SPI_AT25)		+= at25.o
 obj-$(CONFIG_SPI_SPIDEV)	+= spidev.o
+obj-$(CONFIG_SPI_TLE62X0)	+= tle62x0.o
 # 	... add above this line ...
 
 # SPI slave controller drivers (upstream link)
diff --git a/drivers/spi/atmel_spi.c b/drivers/spi/atmel_spi.c
index 8b2601d..ad14405 100644
--- a/drivers/spi/atmel_spi.c
+++ b/drivers/spi/atmel_spi.c
@@ -46,6 +46,7 @@
 	struct clk		*clk;
 	struct platform_device	*pdev;
 	unsigned		new_1:1;
+	struct spi_device	*stay;
 
 	u8			stopping;
 	struct list_head	queue;
@@ -62,29 +63,62 @@
 /*
  * Earlier SPI controllers (e.g. on at91rm9200) have a design bug whereby
  * they assume that spi slave device state will not change on deselect, so
- * that automagic deselection is OK.  Not so!  Workaround uses nCSx pins
- * as GPIOs; or newer controllers have CSAAT and friends.
+ * that automagic deselection is OK.  ("NPCSx rises if no data is to be
+ * transmitted")  Not so!  Workaround uses nCSx pins as GPIOs; or newer
+ * controllers have CSAAT and friends.
  *
- * Since the CSAAT functionality is a bit weird on newer controllers
- * as well, we use GPIO to control nCSx pins on all controllers.
+ * Since the CSAAT functionality is a bit weird on newer controllers as
+ * well, we use GPIO to control nCSx pins on all controllers, updating
+ * MR.PCS to avoid confusing the controller.  Using GPIOs also lets us
+ * support active-high chipselects despite the controller's belief that
+ * only active-low devices/systems exists.
+ *
+ * However, at91rm9200 has a second erratum whereby nCS0 doesn't work
+ * right when driven with GPIO.  ("Mode Fault does not allow more than one
+ * Master on Chip Select 0.")  No workaround exists for that ... so for
+ * nCS0 on that chip, we (a) don't use the GPIO, (b) can't support CS_HIGH,
+ * and (c) will trigger that first erratum in some cases.
  */
 
-static inline void cs_activate(struct spi_device *spi)
+static void cs_activate(struct atmel_spi *as, struct spi_device *spi)
 {
 	unsigned gpio = (unsigned) spi->controller_data;
 	unsigned active = spi->mode & SPI_CS_HIGH;
+	u32 mr;
 
-	dev_dbg(&spi->dev, "activate %u%s\n", gpio, active ? " (high)" : "");
-	gpio_set_value(gpio, active);
+	mr = spi_readl(as, MR);
+	mr = SPI_BFINS(PCS, ~(1 << spi->chip_select), mr);
+
+	dev_dbg(&spi->dev, "activate %u%s, mr %08x\n",
+			gpio, active ? " (high)" : "",
+			mr);
+
+	if (!(cpu_is_at91rm9200() && spi->chip_select == 0))
+		gpio_set_value(gpio, active);
+	spi_writel(as, MR, mr);
 }
 
-static inline void cs_deactivate(struct spi_device *spi)
+static void cs_deactivate(struct atmel_spi *as, struct spi_device *spi)
 {
 	unsigned gpio = (unsigned) spi->controller_data;
 	unsigned active = spi->mode & SPI_CS_HIGH;
+	u32 mr;
 
-	dev_dbg(&spi->dev, "DEactivate %u%s\n", gpio, active ? " (low)" : "");
-	gpio_set_value(gpio, !active);
+	/* only deactivate *this* device; sometimes transfers to
+	 * another device may be active when this routine is called.
+	 */
+	mr = spi_readl(as, MR);
+	if (~SPI_BFEXT(PCS, mr) & (1 << spi->chip_select)) {
+		mr = SPI_BFINS(PCS, 0xf, mr);
+		spi_writel(as, MR, mr);
+	}
+
+	dev_dbg(&spi->dev, "DEactivate %u%s, mr %08x\n",
+			gpio, active ? " (low)" : "",
+			mr);
+
+	if (!(cpu_is_at91rm9200() && spi->chip_select == 0))
+		gpio_set_value(gpio, !active);
 }
 
 /*
@@ -140,6 +174,7 @@
 
 	/* REVISIT: when xfer->delay_usecs == 0, the PDC "next transfer"
 	 * mechanism might help avoid the IRQ latency between transfers
+	 * (and improve the nCS0 errata handling on at91rm9200 chips)
 	 *
 	 * We're also waiting for ENDRX before we start the next
 	 * transfer because we need to handle some difficult timing
@@ -169,33 +204,62 @@
 {
 	struct atmel_spi	*as = spi_master_get_devdata(master);
 	struct spi_message	*msg;
-	u32			mr;
+	struct spi_device	*spi;
 
 	BUG_ON(as->current_transfer);
 
 	msg = list_entry(as->queue.next, struct spi_message, queue);
+	spi = msg->spi;
 
-	/* Select the chip */
-	mr = spi_readl(as, MR);
-	mr = SPI_BFINS(PCS, ~(1 << msg->spi->chip_select), mr);
-	spi_writel(as, MR, mr);
-	cs_activate(msg->spi);
+	dev_dbg(master->cdev.dev, "start message %p for %s\n",
+			msg, spi->dev.bus_id);
+
+	/* select chip if it's not still active */
+	if (as->stay) {
+		if (as->stay != spi) {
+			cs_deactivate(as, as->stay);
+			cs_activate(as, spi);
+		}
+		as->stay = NULL;
+	} else
+		cs_activate(as, spi);
 
 	atmel_spi_next_xfer(master, msg);
 }
 
-static void
+/*
+ * For DMA, tx_buf/tx_dma have the same relationship as rx_buf/rx_dma:
+ *  - The buffer is either valid for CPU access, else NULL
+ *  - If the buffer is valid, so is its DMA addresss
+ *
+ * This driver manages the dma addresss unless message->is_dma_mapped.
+ */
+static int
 atmel_spi_dma_map_xfer(struct atmel_spi *as, struct spi_transfer *xfer)
 {
+	struct device	*dev = &as->pdev->dev;
+
 	xfer->tx_dma = xfer->rx_dma = INVALID_DMA_ADDRESS;
-	if (xfer->tx_buf)
-		xfer->tx_dma = dma_map_single(&as->pdev->dev,
+	if (xfer->tx_buf) {
+		xfer->tx_dma = dma_map_single(dev,
 				(void *) xfer->tx_buf, xfer->len,
 				DMA_TO_DEVICE);
-	if (xfer->rx_buf)
-		xfer->rx_dma = dma_map_single(&as->pdev->dev,
+		if (dma_mapping_error(xfer->tx_dma))
+			return -ENOMEM;
+	}
+	if (xfer->rx_buf) {
+		xfer->rx_dma = dma_map_single(dev,
 				xfer->rx_buf, xfer->len,
 				DMA_FROM_DEVICE);
+		if (dma_mapping_error(xfer->tx_dma)) {
+			if (xfer->tx_buf)
+				dma_unmap_single(dev,
+						xfer->tx_dma, xfer->len,
+						DMA_TO_DEVICE);
+			return -ENOMEM;
+		}
+	}
+	return 0;
 }
 
 static void atmel_spi_dma_unmap_xfer(struct spi_master *master,
@@ -211,9 +275,13 @@
 
 static void
 atmel_spi_msg_done(struct spi_master *master, struct atmel_spi *as,
-		   struct spi_message *msg, int status)
+		struct spi_message *msg, int status, int stay)
 {
-	cs_deactivate(msg->spi);
+	if (!stay || status < 0)
+		cs_deactivate(as, msg->spi);
+	else
+		as->stay = msg->spi;
+
 	list_del(&msg->queue);
 	msg->status = status;
 
@@ -303,7 +371,7 @@
 		/* Clear any overrun happening while cleaning up */
 		spi_readl(as, SR);
 
-		atmel_spi_msg_done(master, as, msg, -EIO);
+		atmel_spi_msg_done(master, as, msg, -EIO, 0);
 	} else if (pending & SPI_BIT(ENDRX)) {
 		ret = IRQ_HANDLED;
 
@@ -321,12 +389,13 @@
 
 			if (msg->transfers.prev == &xfer->transfer_list) {
 				/* report completed message */
-				atmel_spi_msg_done(master, as, msg, 0);
+				atmel_spi_msg_done(master, as, msg, 0,
+						xfer->cs_change);
 			} else {
 				if (xfer->cs_change) {
-					cs_deactivate(msg->spi);
+					cs_deactivate(as, msg->spi);
 					udelay(1);
-					cs_activate(msg->spi);
+					cs_activate(as, msg->spi);
 				}
 
 				/*
@@ -350,6 +419,7 @@
 	return ret;
 }
 
+/* the spi->mode bits understood by this driver: */
 #define MODEBITS (SPI_CPOL | SPI_CPHA | SPI_CS_HIGH)
 
 static int atmel_spi_setup(struct spi_device *spi)
@@ -388,6 +458,14 @@
 		return -EINVAL;
 	}
 
+	/* see notes above re chipselect */
+	if (cpu_is_at91rm9200()
+			&& spi->chip_select == 0
+			&& (spi->mode & SPI_CS_HIGH)) {
+		dev_dbg(&spi->dev, "setup: can't be active-high\n");
+		return -EINVAL;
+	}
+
 	/* speed zero convention is used by some upper layers */
 	bus_hz = clk_get_rate(as->clk);
 	if (spi->max_speed_hz) {
@@ -397,8 +475,9 @@
 		scbr = ((bus_hz + spi->max_speed_hz - 1)
 			/ spi->max_speed_hz);
 		if (scbr >= (1 << SPI_SCBR_SIZE)) {
-			dev_dbg(&spi->dev, "setup: %d Hz too slow, scbr %u\n",
-					spi->max_speed_hz, scbr);
+			dev_dbg(&spi->dev,
+				"setup: %d Hz too slow, scbr %u; min %ld Hz\n",
+				spi->max_speed_hz, scbr, bus_hz/255);
 			return -EINVAL;
 		}
 	} else
@@ -423,6 +502,14 @@
 			return ret;
 		spi->controller_state = (void *)npcs_pin;
 		gpio_direction_output(npcs_pin, !(spi->mode & SPI_CS_HIGH));
+	} else {
+		unsigned long		flags;
+
+		spin_lock_irqsave(&as->lock, flags);
+		if (as->stay == spi)
+			as->stay = NULL;
+		cs_deactivate(as, spi);
+		spin_unlock_irqrestore(&as->lock, flags);
 	}
 
 	dev_dbg(&spi->dev,
@@ -464,14 +551,22 @@
 			dev_dbg(&spi->dev, "no protocol options yet\n");
 			return -ENOPROTOOPT;
 		}
+
+		/*
+		 * DMA map early, for performance (empties dcache ASAP) and
+		 * better fault reporting.  This is a DMA-only driver.
+		 *
+		 * NOTE that if dma_unmap_single() ever starts to do work on
+		 * platforms supported by this driver, we would need to clean
+		 * up mappings for previously-mapped transfers.
+		 */
+		if (!msg->is_dma_mapped) {
+			if (atmel_spi_dma_map_xfer(as, xfer) < 0)
+				return -ENOMEM;
+		}
 	}
 
-	/* scrub dcache "early" */
-	if (!msg->is_dma_mapped) {
-		list_for_each_entry(xfer, &msg->transfers, transfer_list)
-			atmel_spi_dma_map_xfer(as, xfer);
-	}
-
+#ifdef VERBOSE
 	list_for_each_entry(xfer, &msg->transfers, transfer_list) {
 		dev_dbg(controller,
 			"  xfer %p: len %u tx %p/%08x rx %p/%08x\n",
@@ -479,6 +574,7 @@
 			xfer->tx_buf, xfer->tx_dma,
 			xfer->rx_buf, xfer->rx_dma);
 	}
+#endif
 
 	msg->status = -EINPROGRESS;
 	msg->actual_length = 0;
@@ -494,8 +590,21 @@
 
 static void atmel_spi_cleanup(struct spi_device *spi)
 {
-	if (spi->controller_state)
-		gpio_free((unsigned int)spi->controller_data);
+	struct atmel_spi	*as = spi_master_get_devdata(spi->master);
+	unsigned		gpio = (unsigned) spi->controller_data;
+	unsigned long		flags;
+
+	if (!spi->controller_state)
+		return;
+
+	spin_lock_irqsave(&as->lock, flags);
+	if (as->stay == spi) {
+		as->stay = NULL;
+		cs_deactivate(as, spi);
+	}
+	spin_unlock_irqrestore(&as->lock, flags);
+
+	gpio_free(gpio);
 }
 
 /*-------------------------------------------------------------------------*/
@@ -536,6 +645,10 @@
 
 	as = spi_master_get_devdata(master);
 
+	/*
+	 * Scratch buffer is used for throwaway rx and tx data.
+	 * It's coherent to minimize dcache pollution.
+	 */
 	as->buffer = dma_alloc_coherent(&pdev->dev, BUFFER_SIZE,
 					&as->buffer_dma, GFP_KERNEL);
 	if (!as->buffer)
diff --git a/drivers/spi/au1550_spi.c b/drivers/spi/au1550_spi.c
index ae2b1af..c47a650 100644
--- a/drivers/spi/au1550_spi.c
+++ b/drivers/spi/au1550_spi.c
@@ -280,6 +280,9 @@
 	return 0;
 }
 
+/* the spi->mode bits understood by this driver: */
+#define MODEBITS (SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LSB_FIRST)
+
 static int au1550_spi_setup(struct spi_device *spi)
 {
 	struct au1550_spi *hw = spi_master_get_devdata(spi->master);
@@ -292,6 +295,12 @@
 		return -EINVAL;
 	}
 
+	if (spi->mode & ~MODEBITS) {
+		dev_dbg(&spi->dev, "setup: unsupported mode bits %x\n",
+			spi->mode & ~MODEBITS);
+		return -EINVAL;
+	}
+
 	if (spi->max_speed_hz == 0)
 		spi->max_speed_hz = hw->freq_max;
 	if (spi->max_speed_hz > hw->freq_max
diff --git a/drivers/spi/mpc52xx_psc_spi.c b/drivers/spi/mpc52xx_psc_spi.c
index 11f36be..d2a4b2b 100644
--- a/drivers/spi/mpc52xx_psc_spi.c
+++ b/drivers/spi/mpc52xx_psc_spi.c
@@ -270,6 +270,9 @@
 	spin_unlock_irq(&mps->lock);
 }
 
+/* the spi->mode bits understood by this driver: */
+#define MODEBITS (SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LSB_FIRST)
+
 static int mpc52xx_psc_spi_setup(struct spi_device *spi)
 {
 	struct mpc52xx_psc_spi *mps = spi_master_get_devdata(spi->master);
@@ -279,6 +282,12 @@
 	if (spi->bits_per_word%8)
 		return -EINVAL;
 
+	if (spi->mode & ~MODEBITS) {
+		dev_dbg(&spi->dev, "setup: unsupported mode bits %x\n",
+			spi->mode & ~MODEBITS);
+		return -EINVAL;
+	}
+
 	if (!cs) {
 		cs = kzalloc(sizeof *cs, GFP_KERNEL);
 		if (!cs)
diff --git a/drivers/spi/omap2_mcspi.c b/drivers/spi/omap2_mcspi.c
new file mode 100644
index 0000000..6b357cd
--- /dev/null
+++ b/drivers/spi/omap2_mcspi.c
@@ -0,0 +1,1081 @@
+/*
+ * OMAP2 McSPI controller driver
+ *
+ * Copyright (C) 2005, 2006 Nokia Corporation
+ * Author:	Samuel Ortiz <samuel.ortiz@nokia.com> and
+ *		Juha Yrjölä <juha.yrjola@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/platform_device.h>
+#include <linux/err.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+
+#include <linux/spi/spi.h>
+
+#include <asm/arch/dma.h>
+#include <asm/arch/clock.h>
+
+
+#define OMAP2_MCSPI_MAX_FREQ		48000000
+
+#define OMAP2_MCSPI_REVISION		0x00
+#define OMAP2_MCSPI_SYSCONFIG		0x10
+#define OMAP2_MCSPI_SYSSTATUS		0x14
+#define OMAP2_MCSPI_IRQSTATUS		0x18
+#define OMAP2_MCSPI_IRQENABLE		0x1c
+#define OMAP2_MCSPI_WAKEUPENABLE	0x20
+#define OMAP2_MCSPI_SYST		0x24
+#define OMAP2_MCSPI_MODULCTRL		0x28
+
+/* per-channel banks, 0x14 bytes each, first is: */
+#define OMAP2_MCSPI_CHCONF0		0x2c
+#define OMAP2_MCSPI_CHSTAT0		0x30
+#define OMAP2_MCSPI_CHCTRL0		0x34
+#define OMAP2_MCSPI_TX0			0x38
+#define OMAP2_MCSPI_RX0			0x3c
+
+/* per-register bitmasks: */
+
+#define OMAP2_MCSPI_SYSCONFIG_AUTOIDLE	(1 << 0)
+#define OMAP2_MCSPI_SYSCONFIG_SOFTRESET	(1 << 1)
+
+#define OMAP2_MCSPI_SYSSTATUS_RESETDONE	(1 << 0)
+
+#define OMAP2_MCSPI_MODULCTRL_SINGLE	(1 << 0)
+#define OMAP2_MCSPI_MODULCTRL_MS	(1 << 2)
+#define OMAP2_MCSPI_MODULCTRL_STEST	(1 << 3)
+
+#define OMAP2_MCSPI_CHCONF_PHA		(1 << 0)
+#define OMAP2_MCSPI_CHCONF_POL		(1 << 1)
+#define OMAP2_MCSPI_CHCONF_CLKD_MASK	(0x0f << 2)
+#define OMAP2_MCSPI_CHCONF_EPOL		(1 << 6)
+#define OMAP2_MCSPI_CHCONF_WL_MASK	(0x1f << 7)
+#define OMAP2_MCSPI_CHCONF_TRM_RX_ONLY	(0x01 << 12)
+#define OMAP2_MCSPI_CHCONF_TRM_TX_ONLY	(0x02 << 12)
+#define OMAP2_MCSPI_CHCONF_TRM_MASK	(0x03 << 12)
+#define OMAP2_MCSPI_CHCONF_DMAW		(1 << 14)
+#define OMAP2_MCSPI_CHCONF_DMAR		(1 << 15)
+#define OMAP2_MCSPI_CHCONF_DPE0		(1 << 16)
+#define OMAP2_MCSPI_CHCONF_DPE1		(1 << 17)
+#define OMAP2_MCSPI_CHCONF_IS		(1 << 18)
+#define OMAP2_MCSPI_CHCONF_TURBO	(1 << 19)
+#define OMAP2_MCSPI_CHCONF_FORCE	(1 << 20)
+
+#define OMAP2_MCSPI_CHSTAT_RXS		(1 << 0)
+#define OMAP2_MCSPI_CHSTAT_TXS		(1 << 1)
+#define OMAP2_MCSPI_CHSTAT_EOT		(1 << 2)
+
+#define OMAP2_MCSPI_CHCTRL_EN		(1 << 0)
+
+
+/* We have 2 DMA channels per CS, one for RX and one for TX */
+struct omap2_mcspi_dma {
+	int dma_tx_channel;
+	int dma_rx_channel;
+
+	int dma_tx_sync_dev;
+	int dma_rx_sync_dev;
+
+	struct completion dma_tx_completion;
+	struct completion dma_rx_completion;
+};
+
+/* use PIO for small transfers, avoiding DMA setup/teardown overhead and
+ * cache operations; better heuristics consider wordsize and bitrate.
+ */
+#define DMA_MIN_BYTES			8
+
+
+struct omap2_mcspi {
+	struct work_struct	work;
+	/* lock protects queue and registers */
+	spinlock_t		lock;
+	struct list_head	msg_queue;
+	struct spi_master	*master;
+	struct clk		*ick;
+	struct clk		*fck;
+	/* Virtual base address of the controller */
+	void __iomem		*base;
+	/* SPI1 has 4 channels, while SPI2 has 2 */
+	struct omap2_mcspi_dma	*dma_channels;
+};
+
+struct omap2_mcspi_cs {
+	void __iomem		*base;
+	int			word_len;
+};
+
+static struct workqueue_struct *omap2_mcspi_wq;
+
+#define MOD_REG_BIT(val, mask, set) do { \
+	if (set) \
+		val |= mask; \
+	else \
+		val &= ~mask; \
+} while (0)
+
+static inline void mcspi_write_reg(struct spi_master *master,
+		int idx, u32 val)
+{
+	struct omap2_mcspi *mcspi = spi_master_get_devdata(master);
+
+	__raw_writel(val, mcspi->base + idx);
+}
+
+static inline u32 mcspi_read_reg(struct spi_master *master, int idx)
+{
+	struct omap2_mcspi *mcspi = spi_master_get_devdata(master);
+
+	return __raw_readl(mcspi->base + idx);
+}
+
+static inline void mcspi_write_cs_reg(const struct spi_device *spi,
+		int idx, u32 val)
+{
+	struct omap2_mcspi_cs	*cs = spi->controller_state;
+
+	__raw_writel(val, cs->base +  idx);
+}
+
+static inline u32 mcspi_read_cs_reg(const struct spi_device *spi, int idx)
+{
+	struct omap2_mcspi_cs	*cs = spi->controller_state;
+
+	return __raw_readl(cs->base + idx);
+}
+
+static void omap2_mcspi_set_dma_req(const struct spi_device *spi,
+		int is_read, int enable)
+{
+	u32 l, rw;
+
+	l = mcspi_read_cs_reg(spi, OMAP2_MCSPI_CHCONF0);
+
+	if (is_read) /* 1 is read, 0 write */
+		rw = OMAP2_MCSPI_CHCONF_DMAR;
+	else
+		rw = OMAP2_MCSPI_CHCONF_DMAW;
+
+	MOD_REG_BIT(l, rw, enable);
+	mcspi_write_cs_reg(spi, OMAP2_MCSPI_CHCONF0, l);
+}
+
+static void omap2_mcspi_set_enable(const struct spi_device *spi, int enable)
+{
+	u32 l;
+
+	l = enable ? OMAP2_MCSPI_CHCTRL_EN : 0;
+	mcspi_write_cs_reg(spi, OMAP2_MCSPI_CHCTRL0, l);
+}
+
+static void omap2_mcspi_force_cs(struct spi_device *spi, int cs_active)
+{
+	u32 l;
+
+	l = mcspi_read_cs_reg(spi, OMAP2_MCSPI_CHCONF0);
+	MOD_REG_BIT(l, OMAP2_MCSPI_CHCONF_FORCE, cs_active);
+	mcspi_write_cs_reg(spi, OMAP2_MCSPI_CHCONF0, l);
+}
+
+static void omap2_mcspi_set_master_mode(struct spi_master *master)
+{
+	u32 l;
+
+	/* setup when switching from (reset default) slave mode
+	 * to single-channel master mode
+	 */
+	l = mcspi_read_reg(master, OMAP2_MCSPI_MODULCTRL);
+	MOD_REG_BIT(l, OMAP2_MCSPI_MODULCTRL_STEST, 0);
+	MOD_REG_BIT(l, OMAP2_MCSPI_MODULCTRL_MS, 0);
+	MOD_REG_BIT(l, OMAP2_MCSPI_MODULCTRL_SINGLE, 1);
+	mcspi_write_reg(master, OMAP2_MCSPI_MODULCTRL, l);
+}
+
+static unsigned
+omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer)
+{
+	struct omap2_mcspi	*mcspi;
+	struct omap2_mcspi_cs	*cs = spi->controller_state;
+	struct omap2_mcspi_dma  *mcspi_dma;
+	unsigned int		count, c;
+	unsigned long		base, tx_reg, rx_reg;
+	int			word_len, data_type, element_count;
+	u8			* rx;
+	const u8		* tx;
+
+	mcspi = spi_master_get_devdata(spi->master);
+	mcspi_dma = &mcspi->dma_channels[spi->chip_select];
+
+	count = xfer->len;
+	c = count;
+	word_len = cs->word_len;
+
+	base = (unsigned long) io_v2p(cs->base);
+	tx_reg = base + OMAP2_MCSPI_TX0;
+	rx_reg = base + OMAP2_MCSPI_RX0;
+	rx = xfer->rx_buf;
+	tx = xfer->tx_buf;
+
+	if (word_len <= 8) {
+		data_type = OMAP_DMA_DATA_TYPE_S8;
+		element_count = count;
+	} else if (word_len <= 16) {
+		data_type = OMAP_DMA_DATA_TYPE_S16;
+		element_count = count >> 1;
+	} else /* word_len <= 32 */ {
+		data_type = OMAP_DMA_DATA_TYPE_S32;
+		element_count = count >> 2;
+	}
+
+	if (tx != NULL) {
+		omap_set_dma_transfer_params(mcspi_dma->dma_tx_channel,
+				data_type, element_count, 1,
+				OMAP_DMA_SYNC_ELEMENT,
+				mcspi_dma->dma_tx_sync_dev, 0);
+
+		omap_set_dma_dest_params(mcspi_dma->dma_tx_channel, 0,
+				OMAP_DMA_AMODE_CONSTANT,
+				tx_reg, 0, 0);
+
+		omap_set_dma_src_params(mcspi_dma->dma_tx_channel, 0,
+				OMAP_DMA_AMODE_POST_INC,
+				xfer->tx_dma, 0, 0);
+	}
+
+	if (rx != NULL) {
+		omap_set_dma_transfer_params(mcspi_dma->dma_rx_channel,
+				data_type, element_count, 1,
+				OMAP_DMA_SYNC_ELEMENT,
+				mcspi_dma->dma_rx_sync_dev, 1);
+
+		omap_set_dma_src_params(mcspi_dma->dma_rx_channel, 0,
+				OMAP_DMA_AMODE_CONSTANT,
+				rx_reg, 0, 0);
+
+		omap_set_dma_dest_params(mcspi_dma->dma_rx_channel, 0,
+				OMAP_DMA_AMODE_POST_INC,
+				xfer->rx_dma, 0, 0);
+	}
+
+	if (tx != NULL) {
+		omap_start_dma(mcspi_dma->dma_tx_channel);
+		omap2_mcspi_set_dma_req(spi, 0, 1);
+	}
+
+	if (rx != NULL) {
+		omap_start_dma(mcspi_dma->dma_rx_channel);
+		omap2_mcspi_set_dma_req(spi, 1, 1);
+	}
+
+	if (tx != NULL) {
+		wait_for_completion(&mcspi_dma->dma_tx_completion);
+		dma_unmap_single(NULL, xfer->tx_dma, count, DMA_TO_DEVICE);
+	}
+
+	if (rx != NULL) {
+		wait_for_completion(&mcspi_dma->dma_rx_completion);
+		dma_unmap_single(NULL, xfer->rx_dma, count, DMA_FROM_DEVICE);
+	}
+	return count;
+}
+
+static int mcspi_wait_for_reg_bit(void __iomem *reg, unsigned long bit)
+{
+	unsigned long timeout;
+
+	timeout = jiffies + msecs_to_jiffies(1000);
+	while (!(__raw_readl(reg) & bit)) {
+		if (time_after(jiffies, timeout))
+			return -1;
+		cpu_relax();
+	}
+	return 0;
+}
+
+static unsigned
+omap2_mcspi_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer)
+{
+	struct omap2_mcspi	*mcspi;
+	struct omap2_mcspi_cs	*cs = spi->controller_state;
+	unsigned int		count, c;
+	u32			l;
+	void __iomem		*base = cs->base;
+	void __iomem		*tx_reg;
+	void __iomem		*rx_reg;
+	void __iomem		*chstat_reg;
+	int			word_len;
+
+	mcspi = spi_master_get_devdata(spi->master);
+	count = xfer->len;
+	c = count;
+	word_len = cs->word_len;
+
+	l = mcspi_read_cs_reg(spi, OMAP2_MCSPI_CHCONF0);
+	l &= ~OMAP2_MCSPI_CHCONF_TRM_MASK;
+
+	/* We store the pre-calculated register addresses on stack to speed
+	 * up the transfer loop. */
+	tx_reg		= base + OMAP2_MCSPI_TX0;
+	rx_reg		= base + OMAP2_MCSPI_RX0;
+	chstat_reg	= base + OMAP2_MCSPI_CHSTAT0;
+
+	if (word_len <= 8) {
+		u8		*rx;
+		const u8	*tx;
+
+		rx = xfer->rx_buf;
+		tx = xfer->tx_buf;
+
+		do {
+			if (tx != NULL) {
+				if (mcspi_wait_for_reg_bit(chstat_reg,
+						OMAP2_MCSPI_CHSTAT_TXS) < 0) {
+					dev_err(&spi->dev, "TXS timed out\n");
+					goto out;
+				}
+#ifdef VERBOSE
+				dev_dbg(&spi->dev, "write-%d %02x\n",
+						word_len, *tx);
+#endif
+				__raw_writel(*tx++, tx_reg);
+			}
+			if (rx != NULL) {
+				if (mcspi_wait_for_reg_bit(chstat_reg,
+						OMAP2_MCSPI_CHSTAT_RXS) < 0) {
+					dev_err(&spi->dev, "RXS timed out\n");
+					goto out;
+				}
+				/* prevent last RX_ONLY read from triggering
+				 * more word i/o: switch to rx+tx
+				 */
+				if (c == 0 && tx == NULL)
+					mcspi_write_cs_reg(spi,
+							OMAP2_MCSPI_CHCONF0, l);
+				*rx++ = __raw_readl(rx_reg);
+#ifdef VERBOSE
+				dev_dbg(&spi->dev, "read-%d %02x\n",
+						word_len, *(rx - 1));
+#endif
+			}
+			c -= 1;
+		} while (c);
+	} else if (word_len <= 16) {
+		u16		*rx;
+		const u16	*tx;
+
+		rx = xfer->rx_buf;
+		tx = xfer->tx_buf;
+		do {
+			if (tx != NULL) {
+				if (mcspi_wait_for_reg_bit(chstat_reg,
+						OMAP2_MCSPI_CHSTAT_TXS) < 0) {
+					dev_err(&spi->dev, "TXS timed out\n");
+					goto out;
+				}
+#ifdef VERBOSE
+				dev_dbg(&spi->dev, "write-%d %04x\n",
+						word_len, *tx);
+#endif
+				__raw_writel(*tx++, tx_reg);
+			}
+			if (rx != NULL) {
+				if (mcspi_wait_for_reg_bit(chstat_reg,
+						OMAP2_MCSPI_CHSTAT_RXS) < 0) {
+					dev_err(&spi->dev, "RXS timed out\n");
+					goto out;
+				}
+				/* prevent last RX_ONLY read from triggering
+				 * more word i/o: switch to rx+tx
+				 */
+				if (c == 0 && tx == NULL)
+					mcspi_write_cs_reg(spi,
+							OMAP2_MCSPI_CHCONF0, l);
+				*rx++ = __raw_readl(rx_reg);
+#ifdef VERBOSE
+				dev_dbg(&spi->dev, "read-%d %04x\n",
+						word_len, *(rx - 1));
+#endif
+			}
+			c -= 2;
+		} while (c);
+	} else if (word_len <= 32) {
+		u32		*rx;
+		const u32	*tx;
+
+		rx = xfer->rx_buf;
+		tx = xfer->tx_buf;
+		do {
+			if (tx != NULL) {
+				if (mcspi_wait_for_reg_bit(chstat_reg,
+						OMAP2_MCSPI_CHSTAT_TXS) < 0) {
+					dev_err(&spi->dev, "TXS timed out\n");
+					goto out;
+				}
+#ifdef VERBOSE
+				dev_dbg(&spi->dev, "write-%d %04x\n",
+						word_len, *tx);
+#endif
+				__raw_writel(*tx++, tx_reg);
+			}
+			if (rx != NULL) {
+				if (mcspi_wait_for_reg_bit(chstat_reg,
+						OMAP2_MCSPI_CHSTAT_RXS) < 0) {
+					dev_err(&spi->dev, "RXS timed out\n");
+					goto out;
+				}
+				/* prevent last RX_ONLY read from triggering
+				 * more word i/o: switch to rx+tx
+				 */
+				if (c == 0 && tx == NULL)
+					mcspi_write_cs_reg(spi,
+							OMAP2_MCSPI_CHCONF0, l);
+				*rx++ = __raw_readl(rx_reg);
+#ifdef VERBOSE
+				dev_dbg(&spi->dev, "read-%d %04x\n",
+						word_len, *(rx - 1));
+#endif
+			}
+			c -= 4;
+		} while (c);
+	}
+
+	/* for TX_ONLY mode, be sure all words have shifted out */
+	if (xfer->rx_buf == NULL) {
+		if (mcspi_wait_for_reg_bit(chstat_reg,
+				OMAP2_MCSPI_CHSTAT_TXS) < 0) {
+			dev_err(&spi->dev, "TXS timed out\n");
+		} else if (mcspi_wait_for_reg_bit(chstat_reg,
+				OMAP2_MCSPI_CHSTAT_EOT) < 0)
+			dev_err(&spi->dev, "EOT timed out\n");
+	}
+out:
+	return count - c;
+}
+
+/* called only when no transfer is active to this device */
+static int omap2_mcspi_setup_transfer(struct spi_device *spi,
+		struct spi_transfer *t)
+{
+	struct omap2_mcspi_cs *cs = spi->controller_state;
+	struct omap2_mcspi *mcspi;
+	u32 l = 0, div = 0;
+	u8 word_len = spi->bits_per_word;
+
+	mcspi = spi_master_get_devdata(spi->master);
+
+	if (t != NULL && t->bits_per_word)
+		word_len = t->bits_per_word;
+
+	cs->word_len = word_len;
+
+	if (spi->max_speed_hz) {
+		while (div <= 15 && (OMAP2_MCSPI_MAX_FREQ / (1 << div))
+					> spi->max_speed_hz)
+			div++;
+	} else
+		div = 15;
+
+	l = mcspi_read_cs_reg(spi, OMAP2_MCSPI_CHCONF0);
+
+	/* standard 4-wire master mode:  SCK, MOSI/out, MISO/in, nCS
+	 * REVISIT: this controller could support SPI_3WIRE mode.
+	 */
+	l &= ~(OMAP2_MCSPI_CHCONF_IS|OMAP2_MCSPI_CHCONF_DPE1);
+	l |= OMAP2_MCSPI_CHCONF_DPE0;
+
+	/* wordlength */
+	l &= ~OMAP2_MCSPI_CHCONF_WL_MASK;
+	l |= (word_len - 1) << 7;
+
+	/* set chipselect polarity; manage with FORCE */
+	if (!(spi->mode & SPI_CS_HIGH))
+		l |= OMAP2_MCSPI_CHCONF_EPOL;	/* active-low; normal */
+	else
+		l &= ~OMAP2_MCSPI_CHCONF_EPOL;
+
+	/* set clock divisor */
+	l &= ~OMAP2_MCSPI_CHCONF_CLKD_MASK;
+	l |= div << 2;
+
+	/* set SPI mode 0..3 */
+	if (spi->mode & SPI_CPOL)
+		l |= OMAP2_MCSPI_CHCONF_POL;
+	else
+		l &= ~OMAP2_MCSPI_CHCONF_POL;
+	if (spi->mode & SPI_CPHA)
+		l |= OMAP2_MCSPI_CHCONF_PHA;
+	else
+		l &= ~OMAP2_MCSPI_CHCONF_PHA;
+
+	mcspi_write_cs_reg(spi, OMAP2_MCSPI_CHCONF0, l);
+
+	dev_dbg(&spi->dev, "setup: speed %d, sample %s edge, clk %s\n",
+			OMAP2_MCSPI_MAX_FREQ / (1 << div),
+			(spi->mode & SPI_CPHA) ? "trailing" : "leading",
+			(spi->mode & SPI_CPOL) ? "inverted" : "normal");
+
+	return 0;
+}
+
+static void omap2_mcspi_dma_rx_callback(int lch, u16 ch_status, void *data)
+{
+	struct spi_device	*spi = data;
+	struct omap2_mcspi	*mcspi;
+	struct omap2_mcspi_dma	*mcspi_dma;
+
+	mcspi = spi_master_get_devdata(spi->master);
+	mcspi_dma = &(mcspi->dma_channels[spi->chip_select]);
+
+	complete(&mcspi_dma->dma_rx_completion);
+
+	/* We must disable the DMA RX request */
+	omap2_mcspi_set_dma_req(spi, 1, 0);
+}
+
+static void omap2_mcspi_dma_tx_callback(int lch, u16 ch_status, void *data)
+{
+	struct spi_device	*spi = data;
+	struct omap2_mcspi	*mcspi;
+	struct omap2_mcspi_dma	*mcspi_dma;
+
+	mcspi = spi_master_get_devdata(spi->master);
+	mcspi_dma = &(mcspi->dma_channels[spi->chip_select]);
+
+	complete(&mcspi_dma->dma_tx_completion);
+
+	/* We must disable the DMA TX request */
+	omap2_mcspi_set_dma_req(spi, 0, 0);
+}
+
+static int omap2_mcspi_request_dma(struct spi_device *spi)
+{
+	struct spi_master	*master = spi->master;
+	struct omap2_mcspi	*mcspi;
+	struct omap2_mcspi_dma	*mcspi_dma;
+
+	mcspi = spi_master_get_devdata(master);
+	mcspi_dma = mcspi->dma_channels + spi->chip_select;
+
+	if (omap_request_dma(mcspi_dma->dma_rx_sync_dev, "McSPI RX",
+			omap2_mcspi_dma_rx_callback, spi,
+			&mcspi_dma->dma_rx_channel)) {
+		dev_err(&spi->dev, "no RX DMA channel for McSPI\n");
+		return -EAGAIN;
+	}
+
+	if (omap_request_dma(mcspi_dma->dma_tx_sync_dev, "McSPI TX",
+			omap2_mcspi_dma_tx_callback, spi,
+			&mcspi_dma->dma_tx_channel)) {
+		omap_free_dma(mcspi_dma->dma_rx_channel);
+		mcspi_dma->dma_rx_channel = -1;
+		dev_err(&spi->dev, "no TX DMA channel for McSPI\n");
+		return -EAGAIN;
+	}
+
+	init_completion(&mcspi_dma->dma_rx_completion);
+	init_completion(&mcspi_dma->dma_tx_completion);
+
+	return 0;
+}
+
+/* the spi->mode bits understood by this driver: */
+#define MODEBITS (SPI_CPOL | SPI_CPHA | SPI_CS_HIGH)
+
+static int omap2_mcspi_setup(struct spi_device *spi)
+{
+	int			ret;
+	struct omap2_mcspi	*mcspi;
+	struct omap2_mcspi_dma	*mcspi_dma;
+	struct omap2_mcspi_cs	*cs = spi->controller_state;
+
+	if (spi->mode & ~MODEBITS) {
+		dev_dbg(&spi->dev, "setup: unsupported mode bits %x\n",
+			spi->mode & ~MODEBITS);
+		return -EINVAL;
+	}
+
+	if (spi->bits_per_word == 0)
+		spi->bits_per_word = 8;
+	else if (spi->bits_per_word < 4 || spi->bits_per_word > 32) {
+		dev_dbg(&spi->dev, "setup: unsupported %d bit words\n",
+			spi->bits_per_word);
+		return -EINVAL;
+	}
+
+	mcspi = spi_master_get_devdata(spi->master);
+	mcspi_dma = &mcspi->dma_channels[spi->chip_select];
+
+	if (!cs) {
+		cs = kzalloc(sizeof *cs, GFP_KERNEL);
+		if (!cs)
+			return -ENOMEM;
+		cs->base = mcspi->base + spi->chip_select * 0x14;
+		spi->controller_state = cs;
+	}
+
+	if (mcspi_dma->dma_rx_channel == -1
+			|| mcspi_dma->dma_tx_channel == -1) {
+		ret = omap2_mcspi_request_dma(spi);
+		if (ret < 0)
+			return ret;
+	}
+
+	clk_enable(mcspi->ick);
+	clk_enable(mcspi->fck);
+	ret =  omap2_mcspi_setup_transfer(spi, NULL);
+	clk_disable(mcspi->fck);
+	clk_disable(mcspi->ick);
+
+	return ret;
+}
+
+static void omap2_mcspi_cleanup(struct spi_device *spi)
+{
+	struct omap2_mcspi	*mcspi;
+	struct omap2_mcspi_dma	*mcspi_dma;
+
+	mcspi = spi_master_get_devdata(spi->master);
+	mcspi_dma = &mcspi->dma_channels[spi->chip_select];
+
+	kfree(spi->controller_state);
+
+	if (mcspi_dma->dma_rx_channel != -1) {
+		omap_free_dma(mcspi_dma->dma_rx_channel);
+		mcspi_dma->dma_rx_channel = -1;
+	}
+	if (mcspi_dma->dma_tx_channel != -1) {
+		omap_free_dma(mcspi_dma->dma_tx_channel);
+		mcspi_dma->dma_tx_channel = -1;
+	}
+}
+
+static void omap2_mcspi_work(struct work_struct *work)
+{
+	struct omap2_mcspi	*mcspi;
+
+	mcspi = container_of(work, struct omap2_mcspi, work);
+	spin_lock_irq(&mcspi->lock);
+
+	clk_enable(mcspi->ick);
+	clk_enable(mcspi->fck);
+
+	/* We only enable one channel at a time -- the one whose message is
+	 * at the head of the queue -- although this controller would gladly
+	 * arbitrate among multiple channels.  This corresponds to "single
+	 * channel" master mode.  As a side effect, we need to manage the
+	 * chipselect with the FORCE bit ... CS != channel enable.
+	 */
+	while (!list_empty(&mcspi->msg_queue)) {
+		struct spi_message		*m;
+		struct spi_device		*spi;
+		struct spi_transfer		*t = NULL;
+		int				cs_active = 0;
+		struct omap2_mcspi_device_config *conf;
+		struct omap2_mcspi_cs		*cs;
+		int				par_override = 0;
+		int				status = 0;
+		u32				chconf;
+
+		m = container_of(mcspi->msg_queue.next, struct spi_message,
+				 queue);
+
+		list_del_init(&m->queue);
+		spin_unlock_irq(&mcspi->lock);
+
+		spi = m->spi;
+		conf = spi->controller_data;
+		cs = spi->controller_state;
+
+		omap2_mcspi_set_enable(spi, 1);
+		list_for_each_entry(t, &m->transfers, transfer_list) {
+			if (t->tx_buf == NULL && t->rx_buf == NULL && t->len) {
+				status = -EINVAL;
+				break;
+			}
+			if (par_override || t->speed_hz || t->bits_per_word) {
+				par_override = 1;
+				status = omap2_mcspi_setup_transfer(spi, t);
+				if (status < 0)
+					break;
+				if (!t->speed_hz && !t->bits_per_word)
+					par_override = 0;
+			}
+
+			if (!cs_active) {
+				omap2_mcspi_force_cs(spi, 1);
+				cs_active = 1;
+			}
+
+			chconf = mcspi_read_cs_reg(spi, OMAP2_MCSPI_CHCONF0);
+			chconf &= ~OMAP2_MCSPI_CHCONF_TRM_MASK;
+			if (t->tx_buf == NULL)
+				chconf |= OMAP2_MCSPI_CHCONF_TRM_RX_ONLY;
+			else if (t->rx_buf == NULL)
+				chconf |= OMAP2_MCSPI_CHCONF_TRM_TX_ONLY;
+			mcspi_write_cs_reg(spi, OMAP2_MCSPI_CHCONF0, chconf);
+
+			if (t->len) {
+				unsigned	count;
+
+				/* RX_ONLY mode needs dummy data in TX reg */
+				if (t->tx_buf == NULL)
+					__raw_writel(0, cs->base
+							+ OMAP2_MCSPI_TX0);
+
+				if (m->is_dma_mapped || t->len >= DMA_MIN_BYTES)
+					count = omap2_mcspi_txrx_dma(spi, t);
+				else
+					count = omap2_mcspi_txrx_pio(spi, t);
+				m->actual_length += count;
+
+				if (count != t->len) {
+					status = -EIO;
+					break;
+				}
+			}
+
+			if (t->delay_usecs)
+				udelay(t->delay_usecs);
+
+			/* ignore the "leave it on after last xfer" hint */
+			if (t->cs_change) {
+				omap2_mcspi_force_cs(spi, 0);
+				cs_active = 0;
+			}
+		}
+
+		/* Restore defaults if they were overriden */
+		if (par_override) {
+			par_override = 0;
+			status = omap2_mcspi_setup_transfer(spi, NULL);
+		}
+
+		if (cs_active)
+			omap2_mcspi_force_cs(spi, 0);
+
+		omap2_mcspi_set_enable(spi, 0);
+
+		m->status = status;
+		m->complete(m->context);
+
+		spin_lock_irq(&mcspi->lock);
+	}
+
+	clk_disable(mcspi->fck);
+	clk_disable(mcspi->ick);
+
+	spin_unlock_irq(&mcspi->lock);
+}
+
+static int omap2_mcspi_transfer(struct spi_device *spi, struct spi_message *m)
+{
+	struct omap2_mcspi	*mcspi;
+	unsigned long		flags;
+	struct spi_transfer	*t;
+
+	m->actual_length = 0;
+	m->status = 0;
+
+	/* reject invalid messages and transfers */
+	if (list_empty(&m->transfers) || !m->complete)
+		return -EINVAL;
+	list_for_each_entry(t, &m->transfers, transfer_list) {
+		const void	*tx_buf = t->tx_buf;
+		void		*rx_buf = t->rx_buf;
+		unsigned	len = t->len;
+
+		if (t->speed_hz > OMAP2_MCSPI_MAX_FREQ
+				|| (len && !(rx_buf || tx_buf))
+				|| (t->bits_per_word &&
+					(  t->bits_per_word < 4
+					|| t->bits_per_word > 32))) {
+			dev_dbg(&spi->dev, "transfer: %d Hz, %d %s%s, %d bpw\n",
+					t->speed_hz,
+					len,
+					tx_buf ? "tx" : "",
+					rx_buf ? "rx" : "",
+					t->bits_per_word);
+			return -EINVAL;
+		}
+		if (t->speed_hz && t->speed_hz < OMAP2_MCSPI_MAX_FREQ/(1<<16)) {
+			dev_dbg(&spi->dev, "%d Hz max exceeds %d\n",
+					t->speed_hz,
+					OMAP2_MCSPI_MAX_FREQ/(1<<16));
+			return -EINVAL;
+		}
+
+		if (m->is_dma_mapped || len < DMA_MIN_BYTES)
+			continue;
+
+		/* Do DMA mapping "early" for better error reporting and
+		 * dcache use.  Note that if dma_unmap_single() ever starts
+		 * to do real work on ARM, we'd need to clean up mappings
+		 * for previous transfers on *ALL* exits of this loop...
+		 */
+		if (tx_buf != NULL) {
+			t->tx_dma = dma_map_single(&spi->dev, (void *) tx_buf,
+					len, DMA_TO_DEVICE);
+			if (dma_mapping_error(t->tx_dma)) {
+				dev_dbg(&spi->dev, "dma %cX %d bytes error\n",
+						'T', len);
+				return -EINVAL;
+			}
+		}
+		if (rx_buf != NULL) {
+			t->rx_dma = dma_map_single(&spi->dev, rx_buf, t->len,
+					DMA_FROM_DEVICE);
+			if (dma_mapping_error(t->rx_dma)) {
+				dev_dbg(&spi->dev, "dma %cX %d bytes error\n",
+						'R', len);
+				if (tx_buf != NULL)
+					dma_unmap_single(NULL, t->tx_dma,
+							len, DMA_TO_DEVICE);
+				return -EINVAL;
+			}
+		}
+	}
+
+	mcspi = spi_master_get_devdata(spi->master);
+
+	spin_lock_irqsave(&mcspi->lock, flags);
+	list_add_tail(&m->queue, &mcspi->msg_queue);
+	queue_work(omap2_mcspi_wq, &mcspi->work);
+	spin_unlock_irqrestore(&mcspi->lock, flags);
+
+	return 0;
+}
+
+static int __init omap2_mcspi_reset(struct omap2_mcspi *mcspi)
+{
+	struct spi_master	*master = mcspi->master;
+	u32			tmp;
+
+	clk_enable(mcspi->ick);
+	clk_enable(mcspi->fck);
+
+	mcspi_write_reg(master, OMAP2_MCSPI_SYSCONFIG,
+			OMAP2_MCSPI_SYSCONFIG_SOFTRESET);
+	do {
+		tmp = mcspi_read_reg(master, OMAP2_MCSPI_SYSSTATUS);
+	} while (!(tmp & OMAP2_MCSPI_SYSSTATUS_RESETDONE));
+
+	mcspi_write_reg(master, OMAP2_MCSPI_SYSCONFIG,
+			/* (3 << 8) | (2 << 3) | */
+			OMAP2_MCSPI_SYSCONFIG_AUTOIDLE);
+
+	omap2_mcspi_set_master_mode(master);
+
+	clk_disable(mcspi->fck);
+	clk_disable(mcspi->ick);
+	return 0;
+}
+
+static u8 __initdata spi1_rxdma_id [] = {
+	OMAP24XX_DMA_SPI1_RX0,
+	OMAP24XX_DMA_SPI1_RX1,
+	OMAP24XX_DMA_SPI1_RX2,
+	OMAP24XX_DMA_SPI1_RX3,
+};
+
+static u8 __initdata spi1_txdma_id [] = {
+	OMAP24XX_DMA_SPI1_TX0,
+	OMAP24XX_DMA_SPI1_TX1,
+	OMAP24XX_DMA_SPI1_TX2,
+	OMAP24XX_DMA_SPI1_TX3,
+};
+
+static u8 __initdata spi2_rxdma_id[] = {
+	OMAP24XX_DMA_SPI2_RX0,
+	OMAP24XX_DMA_SPI2_RX1,
+};
+
+static u8 __initdata spi2_txdma_id[] = {
+	OMAP24XX_DMA_SPI2_TX0,
+	OMAP24XX_DMA_SPI2_TX1,
+};
+
+static int __init omap2_mcspi_probe(struct platform_device *pdev)
+{
+	struct spi_master	*master;
+	struct omap2_mcspi	*mcspi;
+	struct resource		*r;
+	int			status = 0, i;
+	const u8		*rxdma_id, *txdma_id;
+	unsigned		num_chipselect;
+
+	switch (pdev->id) {
+	case 1:
+		rxdma_id = spi1_rxdma_id;
+		txdma_id = spi1_txdma_id;
+		num_chipselect = 4;
+		break;
+	case 2:
+		rxdma_id = spi2_rxdma_id;
+		txdma_id = spi2_txdma_id;
+		num_chipselect = 2;
+		break;
+	/* REVISIT omap2430 has a third McSPI ... */
+	default:
+		return -EINVAL;
+	}
+
+	master = spi_alloc_master(&pdev->dev, sizeof *mcspi);
+	if (master == NULL) {
+		dev_dbg(&pdev->dev, "master allocation failed\n");
+		return -ENOMEM;
+	}
+
+	if (pdev->id != -1)
+		master->bus_num = pdev->id;
+
+	master->setup = omap2_mcspi_setup;
+	master->transfer = omap2_mcspi_transfer;
+	master->cleanup = omap2_mcspi_cleanup;
+	master->num_chipselect = num_chipselect;
+
+	dev_set_drvdata(&pdev->dev, master);
+
+	mcspi = spi_master_get_devdata(master);
+	mcspi->master = master;
+
+	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (r == NULL) {
+		status = -ENODEV;
+		goto err1;
+	}
+	if (!request_mem_region(r->start, (r->end - r->start) + 1,
+			pdev->dev.bus_id)) {
+		status = -EBUSY;
+		goto err1;
+	}
+
+	mcspi->base = (void __iomem *) io_p2v(r->start);
+
+	INIT_WORK(&mcspi->work, omap2_mcspi_work);
+
+	spin_lock_init(&mcspi->lock);
+	INIT_LIST_HEAD(&mcspi->msg_queue);
+
+	mcspi->ick = clk_get(&pdev->dev, "mcspi_ick");
+	if (IS_ERR(mcspi->ick)) {
+		dev_dbg(&pdev->dev, "can't get mcspi_ick\n");
+		status = PTR_ERR(mcspi->ick);
+		goto err1a;
+	}
+	mcspi->fck = clk_get(&pdev->dev, "mcspi_fck");
+	if (IS_ERR(mcspi->fck)) {
+		dev_dbg(&pdev->dev, "can't get mcspi_fck\n");
+		status = PTR_ERR(mcspi->fck);
+		goto err2;
+	}
+
+	mcspi->dma_channels = kcalloc(master->num_chipselect,
+			sizeof(struct omap2_mcspi_dma),
+			GFP_KERNEL);
+
+	if (mcspi->dma_channels == NULL)
+		goto err3;
+
+	for (i = 0; i < num_chipselect; i++) {
+		mcspi->dma_channels[i].dma_rx_channel = -1;
+		mcspi->dma_channels[i].dma_rx_sync_dev = rxdma_id[i];
+		mcspi->dma_channels[i].dma_tx_channel = -1;
+		mcspi->dma_channels[i].dma_tx_sync_dev = txdma_id[i];
+	}
+
+	if (omap2_mcspi_reset(mcspi) < 0)
+		goto err4;
+
+	status = spi_register_master(master);
+	if (status < 0)
+		goto err4;
+
+	return status;
+
+err4:
+	kfree(mcspi->dma_channels);
+err3:
+	clk_put(mcspi->fck);
+err2:
+	clk_put(mcspi->ick);
+err1a:
+	release_mem_region(r->start, (r->end - r->start) + 1);
+err1:
+	spi_master_put(master);
+	return status;
+}
+
+static int __exit omap2_mcspi_remove(struct platform_device *pdev)
+{
+	struct spi_master	*master;
+	struct omap2_mcspi	*mcspi;
+	struct omap2_mcspi_dma	*dma_channels;
+	struct resource		*r;
+
+	master = dev_get_drvdata(&pdev->dev);
+	mcspi = spi_master_get_devdata(master);
+	dma_channels = mcspi->dma_channels;
+
+	clk_put(mcspi->fck);
+	clk_put(mcspi->ick);
+
+	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	release_mem_region(r->start, (r->end - r->start) + 1);
+
+	spi_unregister_master(master);
+	kfree(dma_channels);
+
+	return 0;
+}
+
+static struct platform_driver omap2_mcspi_driver = {
+	.driver = {
+		.name =		"omap2_mcspi",
+		.owner =	THIS_MODULE,
+	},
+	.remove =	__exit_p(omap2_mcspi_remove),
+};
+
+
+static int __init omap2_mcspi_init(void)
+{
+	omap2_mcspi_wq = create_singlethread_workqueue(
+				omap2_mcspi_driver.driver.name);
+	if (omap2_mcspi_wq == NULL)
+		return -1;
+	return platform_driver_probe(&omap2_mcspi_driver, omap2_mcspi_probe);
+}
+subsys_initcall(omap2_mcspi_init);
+
+static void __exit omap2_mcspi_exit(void)
+{
+	platform_driver_unregister(&omap2_mcspi_driver);
+
+	destroy_workqueue(omap2_mcspi_wq);
+}
+module_exit(omap2_mcspi_exit);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/spi/omap_uwire.c b/drivers/spi/omap_uwire.c
index 95183e1..d275c61 100644
--- a/drivers/spi/omap_uwire.c
+++ b/drivers/spi/omap_uwire.c
@@ -445,10 +445,19 @@
 	return status;
 }
 
+/* the spi->mode bits understood by this driver: */
+#define MODEBITS (SPI_CPOL | SPI_CPHA | SPI_CS_HIGH)
+
 static int uwire_setup(struct spi_device *spi)
 {
 	struct uwire_state *ust = spi->controller_state;
 
+	if (spi->mode & ~MODEBITS) {
+		dev_dbg(&spi->dev, "setup: unsupported mode bits %x\n",
+			spi->mode & ~MODEBITS);
+		return -EINVAL;
+	}
+
 	if (ust == NULL) {
 		ust = kzalloc(sizeof(*ust), GFP_KERNEL);
 		if (ust == NULL)
diff --git a/drivers/spi/pxa2xx_spi.c b/drivers/spi/pxa2xx_spi.c
index 9f2c887..e51311b 100644
--- a/drivers/spi/pxa2xx_spi.c
+++ b/drivers/spi/pxa2xx_spi.c
@@ -1067,6 +1067,9 @@
 	return 0;
 }
 
+/* the spi->mode bits understood by this driver: */
+#define MODEBITS (SPI_CPOL | SPI_CPHA)
+
 static int setup(struct spi_device *spi)
 {
 	struct pxa2xx_spi_chip *chip_info = NULL;
@@ -1093,6 +1096,12 @@
 		return -EINVAL;
 	}
 
+	if (spi->mode & ~MODEBITS) {
+		dev_dbg(&spi->dev, "setup: unsupported mode bits %x\n",
+			spi->mode & ~MODEBITS);
+		return -EINVAL;
+	}
+
 	/* Only alloc on first setup */
 	chip = spi_get_ctldata(spi);
 	if (!chip) {
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 4831edb..018884d 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -23,6 +23,7 @@
 #include <linux/device.h>
 #include <linux/init.h>
 #include <linux/cache.h>
+#include <linux/mutex.h>
 #include <linux/spi/spi.h>
 
 
@@ -185,7 +186,7 @@
 };
 
 static LIST_HEAD(board_list);
-static DECLARE_MUTEX(board_lock);
+static DEFINE_MUTEX(board_lock);
 
 
 /**
@@ -292,9 +293,9 @@
 	bi->n_board_info = n;
 	memcpy(bi->board_info, info, n * sizeof *info);
 
-	down(&board_lock);
+	mutex_lock(&board_lock);
 	list_add_tail(&bi->list, &board_list);
-	up(&board_lock);
+	mutex_unlock(&board_lock);
 	return 0;
 }
 
@@ -308,7 +309,7 @@
 	struct boardinfo	*bi;
 	struct device		*dev = master->cdev.dev;
 
-	down(&board_lock);
+	mutex_lock(&board_lock);
 	list_for_each_entry(bi, &board_list, list) {
 		struct spi_board_info	*chip = bi->board_info;
 		unsigned		n;
@@ -330,7 +331,7 @@
 			(void) spi_new_device(master, chip);
 		}
 	}
-	up(&board_lock);
+	mutex_unlock(&board_lock);
 }
 
 /*-------------------------------------------------------------------------*/
diff --git a/drivers/spi/spi_bitbang.c b/drivers/spi/spi_bitbang.c
index 88425e1..0c85c98 100644
--- a/drivers/spi/spi_bitbang.c
+++ b/drivers/spi/spi_bitbang.c
@@ -187,12 +187,10 @@
 
 	bitbang = spi_master_get_devdata(spi->master);
 
-	/* REVISIT: some systems will want to support devices using lsb-first
-	 * bit encodings on the wire.  In pure software that would be trivial,
-	 * just bitbang_txrx_le_cphaX() routines shifting the other way, and
-	 * some hardware controllers also have this support.
+	/* Bitbangers can support SPI_CS_HIGH, SPI_3WIRE, and so on;
+	 * add those to master->flags, and provide the other support.
 	 */
-	if ((spi->mode & SPI_LSB_FIRST) != 0)
+	if ((spi->mode & ~(SPI_CPOL|SPI_CPHA|bitbang->flags)) != 0)
 		return -EINVAL;
 
 	if (!cs) {
diff --git a/drivers/spi/spi_imx.c b/drivers/spi/spi_imx.c
index 656be4a..aee9ad6 100644
--- a/drivers/spi/spi_imx.c
+++ b/drivers/spi/spi_imx.c
@@ -1163,6 +1163,9 @@
 	return -EINVAL;
 }
 
+/* the spi->mode bits understood by this driver: */
+#define MODEBITS (SPI_CPOL | SPI_CPHA | SPI_CS_HIGH)
+
 /* On first setup bad values must free chip_data memory since will cause
    spi_new_device to fail. Bad value setup from protocol driver are simply not
    applied and notified to the calling driver. */
@@ -1174,6 +1177,12 @@
 	u32 tmp;
 	int status = 0;
 
+	if (spi->mode & ~MODEBITS) {
+		dev_dbg(&spi->dev, "setup: unsupported mode bits %x\n",
+			spi->mode & ~MODEBITS);
+		return -EINVAL;
+	}
+
 	/* Get controller data */
 	chip_info = spi->controller_data;
 
@@ -1245,21 +1254,6 @@
 
 	/* SPI mode */
 	tmp = spi->mode;
-	if (tmp & SPI_LSB_FIRST) {
-		status = -EINVAL;
-		if (first_setup) {
-			dev_err(&spi->dev,
-				"setup - "
-				"HW doesn't support LSB first transfer\n");
-			goto err_first_setup;
-		} else {
-			dev_err(&spi->dev,
-				"setup - "
-				"HW doesn't support LSB first transfer, "
-				"default to MSB first\n");
-			spi->mode &= ~SPI_LSB_FIRST;
-		}
-	}
 	if (tmp & SPI_CS_HIGH) {
 		u32_EDIT(chip->control,
 				SPI_CONTROL_SSPOL, SPI_CONTROL_SSPOL_ACT_HIGH);
diff --git a/drivers/spi/spi_lm70llp.c b/drivers/spi/spi_lm70llp.c
new file mode 100644
index 0000000..4ea68ac
--- /dev/null
+++ b/drivers/spi/spi_lm70llp.c
@@ -0,0 +1,361 @@
+/*
+ * spi_lm70llp.c - driver for lm70llp eval board for the LM70 sensor
+ *
+ * Copyright (C) 2006 Kaiwan N Billimoria <kaiwan@designergraphix.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/parport.h>
+#include <linux/sysfs.h>
+#include <linux/workqueue.h>
+
+
+#include <linux/spi/spi.h>
+#include <linux/spi/spi_bitbang.h>
+
+
+/*
+ * The LM70 communicates with a host processor using a 3-wire variant of
+ * the SPI/Microwire bus interface. This driver specifically supports an
+ * NS LM70 LLP Evaluation Board, interfacing to a PC using its parallel
+ * port to bitbang an SPI-parport bridge.  Accordingly, this is an SPI
+ * master controller driver.  The hwmon/lm70 driver is a "SPI protocol
+ * driver", layered on top of this one and usable without the lm70llp.
+ *
+ * The LM70 is a temperature sensor chip from National Semiconductor; its
+ * datasheet is available at http://www.national.com/pf/LM/LM70.html
+ *
+ * Also see Documentation/spi/spi-lm70llp.  The SPI<->parport code here is
+ * (heavily) based on spi-butterfly by David Brownell.
+ *
+ * The LM70 LLP connects to the PC parallel port in the following manner:
+ *
+ *   Parallel                 LM70 LLP
+ *     Port      Direction   JP2 Header
+ *  -----------  ---------  ------------
+ *      D0    2      -         -
+ *      D1    3     -->      V+   5
+ *      D2    4     -->      V+   5
+ *      D3    5     -->      V+   5
+ *      D4    6     -->      V+   5
+ *      D5    7     -->      nCS  8
+ *      D6    8     -->      SCLK 3
+ *      D7    9     -->      SI/O 5
+ *     GND   25      -       GND  7
+ *    Select 13     <--      SI/O 1
+ *
+ * Note that parport pin 13 actually gets inverted by the transistor
+ * arrangement which lets either the parport or the LM70 drive the
+ * SI/SO signal.
+ */
+
+#define DRVNAME		"spi-lm70llp"
+
+#define lm70_INIT	0xBE
+#define SIO		0x10
+#define nCS		0x20
+#define SCLK		0x40
+
+/*-------------------------------------------------------------------------*/
+
+struct spi_lm70llp {
+	struct spi_bitbang	bitbang;
+	struct parport		*port;
+	struct pardevice	*pd;
+	struct spi_device	*spidev_lm70;
+	struct spi_board_info	info;
+	struct class_device	*cdev;
+};
+
+/* REVISIT : ugly global ; provides "exclusive open" facility */
+static struct spi_lm70llp *lm70llp;
+
+
+/*-------------------------------------------------------------------*/
+
+static inline struct spi_lm70llp *spidev_to_pp(struct spi_device *spi)
+{
+	return spi->controller_data;
+}
+
+/*---------------------- LM70 LLP eval board-specific inlines follow */
+
+/* NOTE:  we don't actually need to reread the output values, since they'll
+ * still be what we wrote before.  Plus, going through parport builds in
+ * a ~1ms/operation delay; these SPI transfers could easily be faster.
+ */
+
+static inline void deassertCS(struct spi_lm70llp *pp)
+{
+	u8 data = parport_read_data(pp->port);
+	parport_write_data(pp->port, data | nCS);
+}
+
+static inline void assertCS(struct spi_lm70llp *pp)
+{
+	u8 data = parport_read_data(pp->port);
+	parport_write_data(pp->port, data & ~nCS);
+}
+
+static inline void clkHigh(struct spi_lm70llp *pp)
+{
+	u8 data = parport_read_data(pp->port);
+	parport_write_data(pp->port, data | SCLK);
+}
+
+static inline void clkLow(struct spi_lm70llp *pp)
+{
+	u8 data = parport_read_data(pp->port);
+	parport_write_data(pp->port, data & ~SCLK);
+}
+
+/*------------------------- SPI-LM70-specific inlines ----------------------*/
+
+static inline void spidelay(unsigned d)
+{
+	udelay(d);
+}
+
+static inline void setsck(struct spi_device *s, int is_on)
+{
+	struct spi_lm70llp *pp = spidev_to_pp(s);
+
+	if (is_on)
+		clkHigh(pp);
+	else
+		clkLow(pp);
+}
+
+static inline void setmosi(struct spi_device *s, int is_on)
+{
+	/* FIXME update D7 ... this way we can put the chip
+	 * into shutdown mode and read the manufacturer ID,
+	 * but we can't put it back into operational mode.
+	 */
+}
+
+/*
+ * getmiso:
+ * Why do we return 0 when the SIO line is high and vice-versa?
+ * The fact is, the lm70 eval board from NS (which this driver drives),
+ * is wired in just such a way : when the lm70's SIO goes high, a transistor
+ * switches it to low reflecting this on the parport (pin 13), and vice-versa.
+ */
+static inline int getmiso(struct spi_device *s)
+{
+	struct spi_lm70llp *pp = spidev_to_pp(s);
+	return ((SIO == (parport_read_status(pp->port) & SIO)) ? 0 : 1 );
+}
+/*--------------------------------------------------------------------*/
+
+#define EXPAND_BITBANG_TXRX 1
+#include <linux/spi/spi_bitbang.h>
+
+static void lm70_chipselect(struct spi_device *spi, int value)
+{
+	struct spi_lm70llp *pp = spidev_to_pp(spi);
+
+	if (value)
+		assertCS(pp);
+	else
+		deassertCS(pp);
+}
+
+/*
+ * Our actual bitbanger routine.
+ */
+static u32 lm70_txrx(struct spi_device *spi, unsigned nsecs, u32 word, u8 bits)
+{
+	static u32 sio=0;
+	static int first_time=1;
+
+	/* First time: perform SPI bitbang and return the LSB of
+	 * the result of the SPI call.
+	 */
+	if (first_time) {
+		sio = bitbang_txrx_be_cpha0(spi, nsecs, 0, word, bits);
+		first_time=0;
+		return (sio & 0x00ff);
+	}
+	/* Return the MSB of the result of the SPI call */
+	else {
+		first_time=1;
+		return (sio >> 8);
+	}
+}
+
+static void spi_lm70llp_attach(struct parport *p)
+{
+	struct pardevice	*pd;
+	struct spi_lm70llp	*pp;
+	struct spi_master	*master;
+	int			status;
+
+	if (lm70llp) {
+		printk(KERN_WARNING
+			"%s: spi_lm70llp instance already loaded. Aborting.\n",
+			DRVNAME);
+		return;
+	}
+
+	/* TODO:  this just _assumes_ a lm70 is there ... no probe;
+	 * the lm70 driver could verify it, reading the manf ID.
+	 */
+
+	master = spi_alloc_master(p->physport->dev, sizeof *pp);
+	if (!master) {
+		status = -ENOMEM;
+		goto out_fail;
+	}
+	pp = spi_master_get_devdata(master);
+
+	master->bus_num = -1;	/* dynamic alloc of a bus number */
+	master->num_chipselect = 1;
+
+	/*
+	 * SPI and bitbang hookup.
+	 */
+	pp->bitbang.master = spi_master_get(master);
+	pp->bitbang.chipselect = lm70_chipselect;
+	pp->bitbang.txrx_word[SPI_MODE_0] = lm70_txrx;
+	pp->bitbang.flags = SPI_3WIRE;
+
+	/*
+	 * Parport hookup
+	 */
+	pp->port = p;
+	pd = parport_register_device(p, DRVNAME,
+			NULL, NULL, NULL,
+			PARPORT_FLAG_EXCL, pp);
+	if (!pd) {
+		status = -ENOMEM;
+		goto out_free_master;
+	}
+	pp->pd = pd;
+
+	status = parport_claim(pd);
+	if (status < 0)
+		goto out_parport_unreg;
+
+	/*
+	 * Start SPI ...
+	 */
+	status = spi_bitbang_start(&pp->bitbang);
+	if (status < 0) {
+		printk(KERN_WARNING
+			"%s: spi_bitbang_start failed with status %d\n",
+			DRVNAME, status);
+		goto out_off_and_release;
+	}
+
+	/*
+	 * The modalias name MUST match the device_driver name
+	 * for the bus glue code to match and subsequently bind them.
+	 * We are binding to the generic drivers/hwmon/lm70.c device
+	 * driver.
+	 */
+	strcpy(pp->info.modalias, "lm70");
+	pp->info.max_speed_hz = 6 * 1000 * 1000;
+	pp->info.chip_select = 0;
+	pp->info.mode = SPI_3WIRE | SPI_MODE_0;
+
+	/* power up the chip, and let the LM70 control SI/SO */
+	parport_write_data(pp->port, lm70_INIT);
+
+	/* Enable access to our primary data structure via
+	 * the board info's (void *)controller_data.
+	 */
+	pp->info.controller_data = pp;
+	pp->spidev_lm70 = spi_new_device(pp->bitbang.master, &pp->info);
+	if (pp->spidev_lm70)
+		dev_dbg(&pp->spidev_lm70->dev, "spidev_lm70 at %s\n",
+				pp->spidev_lm70->dev.bus_id);
+	else {
+		printk(KERN_WARNING "%s: spi_new_device failed\n", DRVNAME);
+		status = -ENODEV;
+		goto out_bitbang_stop;
+	}
+	pp->spidev_lm70->bits_per_word = 16;
+
+	lm70llp = pp;
+
+	return;
+
+out_bitbang_stop:
+	spi_bitbang_stop(&pp->bitbang);
+out_off_and_release:
+	/* power down */
+	parport_write_data(pp->port, 0);
+	mdelay(10);
+	parport_release(pp->pd);
+out_parport_unreg:
+	parport_unregister_device(pd);
+out_free_master:
+	(void) spi_master_put(master);
+out_fail:
+	pr_info("%s: spi_lm70llp probe fail, status %d\n", DRVNAME, status);
+}
+
+static void spi_lm70llp_detach(struct parport *p)
+{
+	struct spi_lm70llp		*pp;
+
+	if (!lm70llp || lm70llp->port != p)
+		return;
+
+	pp = lm70llp;
+	spi_bitbang_stop(&pp->bitbang);
+
+	/* power down */
+	parport_write_data(pp->port, 0);
+	msleep(10);
+
+	parport_release(pp->pd);
+	parport_unregister_device(pp->pd);
+
+	(void) spi_master_put(pp->bitbang.master);
+
+	lm70llp = NULL;
+}
+
+
+static struct parport_driver spi_lm70llp_drv = {
+	.name =		DRVNAME,
+	.attach =	spi_lm70llp_attach,
+	.detach =	spi_lm70llp_detach,
+};
+
+static int __init init_spi_lm70llp(void)
+{
+	return parport_register_driver(&spi_lm70llp_drv);
+}
+module_init(init_spi_lm70llp);
+
+static void __exit cleanup_spi_lm70llp(void)
+{
+	parport_unregister_driver(&spi_lm70llp_drv);
+}
+module_exit(cleanup_spi_lm70llp);
+
+MODULE_AUTHOR("Kaiwan N Billimoria <kaiwan@designergraphix.com>");
+MODULE_DESCRIPTION(
+	"Parport adapter for the National Semiconductor LM70 LLP eval board");
+MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi_mpc83xx.c b/drivers/spi/spi_mpc83xx.c
index e9798bf..3295cfc 100644
--- a/drivers/spi/spi_mpc83xx.c
+++ b/drivers/spi/spi_mpc83xx.c
@@ -47,6 +47,7 @@
 #define	SPMODE_ENABLE		(1 << 24)
 #define	SPMODE_LEN(x)		((x) << 20)
 #define	SPMODE_PM(x)		((x) << 16)
+#define	SPMODE_OP		(1 << 14)
 
 /*
  * Default for SPI Mode:
@@ -85,6 +86,11 @@
 	unsigned nsecs;		/* (clock cycle time)/2 */
 
 	u32 sysclk;
+	u32 rx_shift;		/* RX data reg shift when in qe mode */
+	u32 tx_shift;		/* TX data reg shift when in qe mode */
+
+	bool qe_mode;
+
 	void (*activate_cs) (u8 cs, u8 polarity);
 	void (*deactivate_cs) (u8 cs, u8 polarity);
 };
@@ -103,7 +109,7 @@
 void mpc83xx_spi_rx_buf_##type(u32 data, struct mpc83xx_spi *mpc83xx_spi) \
 {									  \
 	type * rx = mpc83xx_spi->rx;					  \
-	*rx++ = (type)data;						  \
+	*rx++ = (type)(data >> mpc83xx_spi->rx_shift);			  \
 	mpc83xx_spi->rx = rx;						  \
 }
 
@@ -114,7 +120,7 @@
 	const type * tx = mpc83xx_spi->tx;			\
 	if (!tx)						\
 		return 0;					\
-	data = *tx++;						\
+	data = *tx++ << mpc83xx_spi->tx_shift;			\
 	mpc83xx_spi->tx = tx;					\
 	return data;						\
 }
@@ -158,6 +164,12 @@
 
 		if ((mpc83xx_spi->sysclk / spi->max_speed_hz) >= 64) {
 			u8 pm = mpc83xx_spi->sysclk / (spi->max_speed_hz * 64);
+			if (pm > 0x0f) {
+				printk(KERN_WARNING "MPC83xx SPI: SPICLK can't be less then a SYSCLK/1024!\n"
+						"Requested SPICLK is %d Hz. Will use %d Hz instead.\n",
+						spi->max_speed_hz, mpc83xx_spi->sysclk / 1024);
+				pm = 0x0f;
+			}
 			regval |= SPMODE_PM(pm) | SPMODE_DIV16;
 		} else {
 			u8 pm = mpc83xx_spi->sysclk / (spi->max_speed_hz * 4);
@@ -197,12 +209,22 @@
 	    || ((bits_per_word > 16) && (bits_per_word != 32)))
 		return -EINVAL;
 
+	mpc83xx_spi->rx_shift = 0;
+	mpc83xx_spi->tx_shift = 0;
 	if (bits_per_word <= 8) {
 		mpc83xx_spi->get_rx = mpc83xx_spi_rx_buf_u8;
 		mpc83xx_spi->get_tx = mpc83xx_spi_tx_buf_u8;
+		if (mpc83xx_spi->qe_mode) {
+			mpc83xx_spi->rx_shift = 16;
+			mpc83xx_spi->tx_shift = 24;
+		}
 	} else if (bits_per_word <= 16) {
 		mpc83xx_spi->get_rx = mpc83xx_spi_rx_buf_u16;
 		mpc83xx_spi->get_tx = mpc83xx_spi_tx_buf_u16;
+		if (mpc83xx_spi->qe_mode) {
+			mpc83xx_spi->rx_shift = 16;
+			mpc83xx_spi->tx_shift = 16;
+		}
 	} else if (bits_per_word <= 32) {
 		mpc83xx_spi->get_rx = mpc83xx_spi_rx_buf_u32;
 		mpc83xx_spi->get_tx = mpc83xx_spi_tx_buf_u32;
@@ -232,12 +254,21 @@
 	return 0;
 }
 
+/* the spi->mode bits understood by this driver: */
+#define MODEBITS (SPI_CPOL | SPI_CPHA | SPI_CS_HIGH)
+
 static int mpc83xx_spi_setup(struct spi_device *spi)
 {
 	struct spi_bitbang *bitbang;
 	struct mpc83xx_spi *mpc83xx_spi;
 	int retval;
 
+	if (spi->mode & ~MODEBITS) {
+		dev_dbg(&spi->dev, "setup: unsupported mode bits %x\n",
+			spi->mode & ~MODEBITS);
+		return -EINVAL;
+	}
+
 	if (!spi->max_speed_hz)
 		return -EINVAL;
 
@@ -371,7 +402,6 @@
 		ret = -ENODEV;
 		goto free_master;
 	}
-
 	mpc83xx_spi = spi_master_get_devdata(master);
 	mpc83xx_spi->bitbang.master = spi_master_get(master);
 	mpc83xx_spi->bitbang.chipselect = mpc83xx_spi_chipselect;
@@ -380,9 +410,17 @@
 	mpc83xx_spi->sysclk = pdata->sysclk;
 	mpc83xx_spi->activate_cs = pdata->activate_cs;
 	mpc83xx_spi->deactivate_cs = pdata->deactivate_cs;
+	mpc83xx_spi->qe_mode = pdata->qe_mode;
 	mpc83xx_spi->get_rx = mpc83xx_spi_rx_buf_u8;
 	mpc83xx_spi->get_tx = mpc83xx_spi_tx_buf_u8;
 
+	mpc83xx_spi->rx_shift = 0;
+	mpc83xx_spi->tx_shift = 0;
+	if (mpc83xx_spi->qe_mode) {
+		mpc83xx_spi->rx_shift = 16;
+		mpc83xx_spi->tx_shift = 24;
+	}
+
 	mpc83xx_spi->bitbang.master->setup = mpc83xx_spi_setup;
 	init_completion(&mpc83xx_spi->done);
 
@@ -417,6 +455,9 @@
 
 	/* Enable SPI interface */
 	regval = pdata->initial_spmode | SPMODE_INIT_VAL | SPMODE_ENABLE;
+	if (pdata->qe_mode)
+		regval |= SPMODE_OP;
+
 	mpc83xx_spi_write_reg(&mpc83xx_spi->base->mode, regval);
 
 	ret = spi_bitbang_start(&mpc83xx_spi->bitbang);
diff --git a/drivers/spi/spi_s3c24xx.c b/drivers/spi/spi_s3c24xx.c
index d5a710f..7071ff8 100644
--- a/drivers/spi/spi_s3c24xx.c
+++ b/drivers/spi/spi_s3c24xx.c
@@ -146,6 +146,9 @@
 	return 0;
 }
 
+/* the spi->mode bits understood by this driver: */
+#define MODEBITS (SPI_CPOL | SPI_CPHA | SPI_CS_HIGH)
+
 static int s3c24xx_spi_setup(struct spi_device *spi)
 {
 	int ret;
@@ -153,8 +156,11 @@
 	if (!spi->bits_per_word)
 		spi->bits_per_word = 8;
 
-	if ((spi->mode & SPI_LSB_FIRST) != 0)
+	if (spi->mode & ~MODEBITS) {
+		dev_dbg(&spi->dev, "setup: unsupported mode bits %x\n",
+			spi->mode & ~MODEBITS);
 		return -EINVAL;
+	}
 
 	ret = s3c24xx_spi_setupxfer(spi, NULL);
 	if (ret < 0) {
diff --git a/drivers/spi/spi_txx9.c b/drivers/spi/spi_txx9.c
new file mode 100644
index 0000000..08e981c
--- /dev/null
+++ b/drivers/spi/spi_txx9.c
@@ -0,0 +1,474 @@
+/*
+ * spi_txx9.c - TXx9 SPI controller driver.
+ *
+ * Based on linux/arch/mips/tx4938/toshiba_rbtx4938/spi_txx9.c
+ * Copyright (C) 2000-2001 Toshiba Corporation
+ *
+ * 2003-2005 (c) MontaVista Software, Inc. This file is licensed under the
+ * terms of the GNU General Public License version 2. This program is
+ * licensed "as is" without any warranty of any kind, whether express
+ * or implied.
+ *
+ * Support for TX4938 in 2.6 - Manish Lachwani (mlachwani@mvista.com)
+ *
+ * Convert to generic SPI framework - Atsushi Nemoto (anemo@mba.ocn.ne.jp)
+ */
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/sched.h>
+#include <linux/spinlock.h>
+#include <linux/workqueue.h>
+#include <linux/spi/spi.h>
+#include <linux/err.h>
+#include <linux/clk.h>
+#include <asm/gpio.h>
+
+
+#define SPI_FIFO_SIZE 4
+
+#define TXx9_SPMCR		0x00
+#define TXx9_SPCR0		0x04
+#define TXx9_SPCR1		0x08
+#define TXx9_SPFS		0x0c
+#define TXx9_SPSR		0x14
+#define TXx9_SPDR		0x18
+
+/* SPMCR : SPI Master Control */
+#define TXx9_SPMCR_OPMODE	0xc0
+#define TXx9_SPMCR_CONFIG	0x40
+#define TXx9_SPMCR_ACTIVE	0x80
+#define TXx9_SPMCR_SPSTP	0x02
+#define TXx9_SPMCR_BCLR		0x01
+
+/* SPCR0 : SPI Control 0 */
+#define TXx9_SPCR0_TXIFL_MASK	0xc000
+#define TXx9_SPCR0_RXIFL_MASK	0x3000
+#define TXx9_SPCR0_SIDIE	0x0800
+#define TXx9_SPCR0_SOEIE	0x0400
+#define TXx9_SPCR0_RBSIE	0x0200
+#define TXx9_SPCR0_TBSIE	0x0100
+#define TXx9_SPCR0_IFSPSE	0x0010
+#define TXx9_SPCR0_SBOS		0x0004
+#define TXx9_SPCR0_SPHA		0x0002
+#define TXx9_SPCR0_SPOL		0x0001
+
+/* SPSR : SPI Status */
+#define TXx9_SPSR_TBSI		0x8000
+#define TXx9_SPSR_RBSI		0x4000
+#define TXx9_SPSR_TBS_MASK	0x3800
+#define TXx9_SPSR_RBS_MASK	0x0700
+#define TXx9_SPSR_SPOE		0x0080
+#define TXx9_SPSR_IFSD		0x0008
+#define TXx9_SPSR_SIDLE		0x0004
+#define TXx9_SPSR_STRDY		0x0002
+#define TXx9_SPSR_SRRDY		0x0001
+
+
+struct txx9spi {
+	struct workqueue_struct	*workqueue;
+	struct work_struct work;
+	spinlock_t lock;	/* protect 'queue' */
+	struct list_head queue;
+	wait_queue_head_t waitq;
+	void __iomem *membase;
+	int irq;
+	int baseclk;
+	struct clk *clk;
+	u32 max_speed_hz, min_speed_hz;
+	int last_chipselect;
+	int last_chipselect_val;
+};
+
+static u32 txx9spi_rd(struct txx9spi *c, int reg)
+{
+	return __raw_readl(c->membase + reg);
+}
+static void txx9spi_wr(struct txx9spi *c, u32 val, int reg)
+{
+	__raw_writel(val, c->membase + reg);
+}
+
+static void txx9spi_cs_func(struct spi_device *spi, struct txx9spi *c,
+		int on, unsigned int cs_delay)
+{
+	int val = (spi->mode & SPI_CS_HIGH) ? on : !on;
+	if (on) {
+		/* deselect the chip with cs_change hint in last transfer */
+		if (c->last_chipselect >= 0)
+			gpio_set_value(c->last_chipselect,
+					!c->last_chipselect_val);
+		c->last_chipselect = spi->chip_select;
+		c->last_chipselect_val = val;
+	} else {
+		c->last_chipselect = -1;
+		ndelay(cs_delay);	/* CS Hold Time */
+	}
+	gpio_set_value(spi->chip_select, val);
+	ndelay(cs_delay);	/* CS Setup Time / CS Recovery Time */
+}
+
+/* the spi->mode bits understood by this driver: */
+#define MODEBITS	(SPI_CS_HIGH|SPI_CPOL|SPI_CPHA)
+
+static int txx9spi_setup(struct spi_device *spi)
+{
+	struct txx9spi *c = spi_master_get_devdata(spi->master);
+	u8 bits_per_word;
+
+	if (spi->mode & ~MODEBITS)
+		return -EINVAL;
+
+	if (!spi->max_speed_hz
+			|| spi->max_speed_hz > c->max_speed_hz
+			|| spi->max_speed_hz < c->min_speed_hz)
+		return -EINVAL;
+
+	bits_per_word = spi->bits_per_word ? : 8;
+	if (bits_per_word != 8 && bits_per_word != 16)
+		return -EINVAL;
+
+	if (gpio_direction_output(spi->chip_select,
+			!(spi->mode & SPI_CS_HIGH))) {
+		dev_err(&spi->dev, "Cannot setup GPIO for chipselect.\n");
+		return -EINVAL;
+	}
+
+	/* deselect chip */
+	spin_lock(&c->lock);
+	txx9spi_cs_func(spi, c, 0, (NSEC_PER_SEC / 2) / spi->max_speed_hz);
+	spin_unlock(&c->lock);
+
+	return 0;
+}
+
+static irqreturn_t txx9spi_interrupt(int irq, void *dev_id)
+{
+	struct txx9spi *c = dev_id;
+
+	/* disable rx intr */
+	txx9spi_wr(c, txx9spi_rd(c, TXx9_SPCR0) & ~TXx9_SPCR0_RBSIE,
+			TXx9_SPCR0);
+	wake_up(&c->waitq);
+	return IRQ_HANDLED;
+}
+
+static void txx9spi_work_one(struct txx9spi *c, struct spi_message *m)
+{
+	struct spi_device *spi = m->spi;
+	struct spi_transfer *t;
+	unsigned int cs_delay;
+	unsigned int cs_change = 1;
+	int status = 0;
+	u32 mcr;
+	u32 prev_speed_hz = 0;
+	u8 prev_bits_per_word = 0;
+
+	/* CS setup/hold/recovery time in nsec */
+	cs_delay = 100 + (NSEC_PER_SEC / 2) / spi->max_speed_hz;
+
+	mcr = txx9spi_rd(c, TXx9_SPMCR);
+	if (unlikely((mcr & TXx9_SPMCR_OPMODE) == TXx9_SPMCR_ACTIVE)) {
+		dev_err(&spi->dev, "Bad mode.\n");
+		status = -EIO;
+		goto exit;
+	}
+	mcr &= ~(TXx9_SPMCR_OPMODE | TXx9_SPMCR_SPSTP | TXx9_SPMCR_BCLR);
+
+	/* enter config mode */
+	txx9spi_wr(c, mcr | TXx9_SPMCR_CONFIG | TXx9_SPMCR_BCLR, TXx9_SPMCR);
+	txx9spi_wr(c, TXx9_SPCR0_SBOS
+			| ((spi->mode & SPI_CPOL) ? TXx9_SPCR0_SPOL : 0)
+			| ((spi->mode & SPI_CPHA) ? TXx9_SPCR0_SPHA : 0)
+			| 0x08,
+			TXx9_SPCR0);
+
+	list_for_each_entry (t, &m->transfers, transfer_list) {
+		const void *txbuf = t->tx_buf;
+		void *rxbuf = t->rx_buf;
+		u32 data;
+		unsigned int len = t->len;
+		unsigned int wsize;
+		u32 speed_hz = t->speed_hz ? : spi->max_speed_hz;
+		u8 bits_per_word = t->bits_per_word ? : spi->bits_per_word;
+
+		bits_per_word = bits_per_word ? : 8;
+		wsize = bits_per_word >> 3; /* in bytes */
+
+		if (prev_speed_hz != speed_hz
+				|| prev_bits_per_word != bits_per_word) {
+			u32 n = (c->baseclk + speed_hz - 1) / speed_hz;
+			if (n < 1)
+				n = 1;
+			else if (n > 0xff)
+				n = 0xff;
+			/* enter config mode */
+			txx9spi_wr(c, mcr | TXx9_SPMCR_CONFIG | TXx9_SPMCR_BCLR,
+					TXx9_SPMCR);
+			txx9spi_wr(c, (n << 8) | bits_per_word, TXx9_SPCR1);
+			/* enter active mode */
+			txx9spi_wr(c, mcr | TXx9_SPMCR_ACTIVE, TXx9_SPMCR);
+
+			prev_speed_hz = speed_hz;
+			prev_bits_per_word = bits_per_word;
+		}
+
+		if (cs_change)
+			txx9spi_cs_func(spi, c, 1, cs_delay);
+		cs_change = t->cs_change;
+		while (len) {
+			unsigned int count = SPI_FIFO_SIZE;
+			int i;
+			u32 cr0;
+
+			if (len < count * wsize)
+				count = len / wsize;
+			/* now tx must be idle... */
+			while (!(txx9spi_rd(c, TXx9_SPSR) & TXx9_SPSR_SIDLE))
+				cpu_relax();
+			cr0 = txx9spi_rd(c, TXx9_SPCR0);
+			cr0 &= ~TXx9_SPCR0_RXIFL_MASK;
+			cr0 |= (count - 1) << 12;
+			/* enable rx intr */
+			cr0 |= TXx9_SPCR0_RBSIE;
+			txx9spi_wr(c, cr0, TXx9_SPCR0);
+			/* send */
+			for (i = 0; i < count; i++) {
+				if (txbuf) {
+					data = (wsize == 1)
+						? *(const u8 *)txbuf
+						: *(const u16 *)txbuf;
+					txx9spi_wr(c, data, TXx9_SPDR);
+					txbuf += wsize;
+				} else
+					txx9spi_wr(c, 0, TXx9_SPDR);
+			}
+			/* wait all rx data */
+			wait_event(c->waitq,
+				txx9spi_rd(c, TXx9_SPSR) & TXx9_SPSR_RBSI);
+			/* receive */
+			for (i = 0; i < count; i++) {
+				data = txx9spi_rd(c, TXx9_SPDR);
+				if (rxbuf) {
+					if (wsize == 1)
+						*(u8 *)rxbuf = data;
+					else
+						*(u16 *)rxbuf = data;
+					rxbuf += wsize;
+				}
+			}
+			len -= count * wsize;
+		}
+		m->actual_length += t->len;
+		if (t->delay_usecs)
+			udelay(t->delay_usecs);
+
+		if (!cs_change)
+			continue;
+		if (t->transfer_list.next == &m->transfers)
+			break;
+		/* sometimes a short mid-message deselect of the chip
+		 * may be needed to terminate a mode or command
+		 */
+		txx9spi_cs_func(spi, c, 0, cs_delay);
+	}
+
+exit:
+	m->status = status;
+	m->complete(m->context);
+
+	/* normally deactivate chipselect ... unless no error and
+	 * cs_change has hinted that the next message will probably
+	 * be for this chip too.
+	 */
+	if (!(status == 0 && cs_change))
+		txx9spi_cs_func(spi, c, 0, cs_delay);
+
+	/* enter config mode */
+	txx9spi_wr(c, mcr | TXx9_SPMCR_CONFIG | TXx9_SPMCR_BCLR, TXx9_SPMCR);
+}
+
+static void txx9spi_work(struct work_struct *work)
+{
+	struct txx9spi *c = container_of(work, struct txx9spi, work);
+	unsigned long flags;
+
+	spin_lock_irqsave(&c->lock, flags);
+	while (!list_empty(&c->queue)) {
+		struct spi_message *m;
+
+		m = container_of(c->queue.next, struct spi_message, queue);
+		list_del_init(&m->queue);
+		spin_unlock_irqrestore(&c->lock, flags);
+
+		txx9spi_work_one(c, m);
+
+		spin_lock_irqsave(&c->lock, flags);
+	}
+	spin_unlock_irqrestore(&c->lock, flags);
+}
+
+static int txx9spi_transfer(struct spi_device *spi, struct spi_message *m)
+{
+	struct spi_master *master = spi->master;
+	struct txx9spi *c = spi_master_get_devdata(master);
+	struct spi_transfer *t;
+	unsigned long flags;
+
+	m->actual_length = 0;
+
+	/* check each transfer's parameters */
+	list_for_each_entry (t, &m->transfers, transfer_list) {
+		u32 speed_hz = t->speed_hz ? : spi->max_speed_hz;
+		u8 bits_per_word = t->bits_per_word ? : spi->bits_per_word;
+
+		bits_per_word = bits_per_word ? : 8;
+		if (!t->tx_buf && !t->rx_buf && t->len)
+			return -EINVAL;
+		if (bits_per_word != 8 && bits_per_word != 16)
+			return -EINVAL;
+		if (t->len & ((bits_per_word >> 3) - 1))
+			return -EINVAL;
+		if (speed_hz < c->min_speed_hz || speed_hz > c->max_speed_hz)
+			return -EINVAL;
+	}
+
+	spin_lock_irqsave(&c->lock, flags);
+	list_add_tail(&m->queue, &c->queue);
+	queue_work(c->workqueue, &c->work);
+	spin_unlock_irqrestore(&c->lock, flags);
+
+	return 0;
+}
+
+static int __init txx9spi_probe(struct platform_device *dev)
+{
+	struct spi_master *master;
+	struct txx9spi *c;
+	struct resource *res;
+	int ret = -ENODEV;
+	u32 mcr;
+
+	master = spi_alloc_master(&dev->dev, sizeof(*c));
+	if (!master)
+		return ret;
+	c = spi_master_get_devdata(master);
+	c->irq = -1;
+	platform_set_drvdata(dev, master);
+
+	INIT_WORK(&c->work, txx9spi_work);
+	spin_lock_init(&c->lock);
+	INIT_LIST_HEAD(&c->queue);
+	init_waitqueue_head(&c->waitq);
+
+	c->clk = clk_get(&dev->dev, "spi-baseclk");
+	if (IS_ERR(c->clk)) {
+		ret = PTR_ERR(c->clk);
+		c->clk = NULL;
+		goto exit;
+	}
+	ret = clk_enable(c->clk);
+	if (ret) {
+		clk_put(c->clk);
+		c->clk = NULL;
+		goto exit;
+	}
+	c->baseclk = clk_get_rate(c->clk);
+	c->min_speed_hz = (c->baseclk + 0xff - 1) / 0xff;
+	c->max_speed_hz = c->baseclk;
+
+	res = platform_get_resource(dev, IORESOURCE_MEM, 0);
+	if (!res)
+		goto exit;
+	c->membase = ioremap(res->start, res->end - res->start + 1);
+	if (!c->membase)
+		goto exit;
+
+	/* enter config mode */
+	mcr = txx9spi_rd(c, TXx9_SPMCR);
+	mcr &= ~(TXx9_SPMCR_OPMODE | TXx9_SPMCR_SPSTP | TXx9_SPMCR_BCLR);
+	txx9spi_wr(c, mcr | TXx9_SPMCR_CONFIG | TXx9_SPMCR_BCLR, TXx9_SPMCR);
+
+	c->irq = platform_get_irq(dev, 0);
+	if (c->irq < 0)
+		goto exit;
+	ret = request_irq(c->irq, txx9spi_interrupt, 0, dev->name, c);
+	if (ret) {
+		c->irq = -1;
+		goto exit;
+	}
+
+	c->workqueue = create_singlethread_workqueue(master->cdev.dev->bus_id);
+	if (!c->workqueue)
+		goto exit;
+	c->last_chipselect = -1;
+
+	dev_info(&dev->dev, "at %#llx, irq %d, %dMHz\n",
+		 (unsigned long long)res->start, c->irq,
+		 (c->baseclk + 500000) / 1000000);
+
+	master->bus_num = dev->id;
+	master->setup = txx9spi_setup;
+	master->transfer = txx9spi_transfer;
+	master->num_chipselect = (u16)UINT_MAX; /* any GPIO numbers */
+
+	ret = spi_register_master(master);
+	if (ret)
+		goto exit;
+	return 0;
+exit:
+	if (c->workqueue)
+		destroy_workqueue(c->workqueue);
+	if (c->irq >= 0)
+		free_irq(c->irq, c);
+	if (c->membase)
+		iounmap(c->membase);
+	if (c->clk) {
+		clk_disable(c->clk);
+		clk_put(c->clk);
+	}
+	platform_set_drvdata(dev, NULL);
+	spi_master_put(master);
+	return ret;
+}
+
+static int __exit txx9spi_remove(struct platform_device *dev)
+{
+	struct spi_master *master = spi_master_get(platform_get_drvdata(dev));
+	struct txx9spi *c = spi_master_get_devdata(master);
+
+	spi_unregister_master(master);
+	platform_set_drvdata(dev, NULL);
+	destroy_workqueue(c->workqueue);
+	free_irq(c->irq, c);
+	iounmap(c->membase);
+	clk_disable(c->clk);
+	clk_put(c->clk);
+	spi_master_put(master);
+	return 0;
+}
+
+static struct platform_driver txx9spi_driver = {
+	.remove = __exit_p(txx9spi_remove),
+	.driver = {
+		.name = "txx9spi",
+		.owner = THIS_MODULE,
+	},
+};
+
+static int __init txx9spi_init(void)
+{
+	return platform_driver_probe(&txx9spi_driver, txx9spi_probe);
+}
+subsys_initcall(txx9spi_init);
+
+static void __exit txx9spi_exit(void)
+{
+	platform_driver_unregister(&txx9spi_driver);
+}
+module_exit(txx9spi_exit);
+
+MODULE_DESCRIPTION("TXx9 SPI Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c
index d04242a..38b60ad 100644
--- a/drivers/spi/spidev.c
+++ b/drivers/spi/spidev.c
@@ -181,7 +181,8 @@
 		}
 		if (u_tmp->tx_buf) {
 			k_tmp->tx_buf = buf;
-			if (copy_from_user(buf, (const u8 __user *)u_tmp->tx_buf,
+			if (copy_from_user(buf, (const u8 __user *)
+						(ptrdiff_t) u_tmp->tx_buf,
 					u_tmp->len))
 				goto done;
 		}
@@ -213,7 +214,8 @@
 	buf = spidev->buffer;
 	for (n = n_xfers, u_tmp = u_xfers; n; n--, u_tmp++) {
 		if (u_tmp->rx_buf) {
-			if (__copy_to_user((u8 __user *)u_tmp->rx_buf, buf,
+			if (__copy_to_user((u8 __user *)
+					(ptrdiff_t) u_tmp->rx_buf, buf,
 					u_tmp->len)) {
 				status = -EFAULT;
 				goto done;
diff --git a/drivers/spi/tle62x0.c b/drivers/spi/tle62x0.c
new file mode 100644
index 0000000..6da58ca
--- /dev/null
+++ b/drivers/spi/tle62x0.c
@@ -0,0 +1,328 @@
+/*
+ * tle62x0.c -- support Infineon TLE62x0 driver chips
+ *
+ * Copyright (c) 2007 Simtec Electronics
+ *	Ben Dooks, <ben@simtec.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/device.h>
+#include <linux/kernel.h>
+
+#include <linux/spi/spi.h>
+#include <linux/spi/tle62x0.h>
+
+
+#define CMD_READ	0x00
+#define CMD_SET		0xff
+
+#define DIAG_NORMAL	0x03
+#define DIAG_OVERLOAD	0x02
+#define DIAG_OPEN	0x01
+#define DIAG_SHORTGND	0x00
+
+struct tle62x0_state {
+	struct spi_device	*us;
+	struct mutex		lock;
+	unsigned int		nr_gpio;
+	unsigned int		gpio_state;
+
+	unsigned char		tx_buff[4];
+	unsigned char		rx_buff[4];
+};
+
+static int to_gpio_num(struct device_attribute *attr);
+
+static inline int tle62x0_write(struct tle62x0_state *st)
+{
+	unsigned char *buff = st->tx_buff;
+	unsigned int gpio_state = st->gpio_state;
+
+	buff[0] = CMD_SET;
+
+	if (st->nr_gpio == 16) {
+		buff[1] = gpio_state >> 8;
+		buff[2] = gpio_state;
+	} else {
+		buff[1] = gpio_state;
+	}
+
+	dev_dbg(&st->us->dev, "buff %02x,%02x,%02x\n",
+		buff[0], buff[1], buff[2]);
+
+	return spi_write(st->us, buff, (st->nr_gpio == 16) ? 3 : 2);
+}
+
+static inline int tle62x0_read(struct tle62x0_state *st)
+{
+	unsigned char *txbuff = st->tx_buff;
+	struct spi_transfer xfer = {
+		.tx_buf		= txbuff,
+		.rx_buf		= st->rx_buff,
+		.len		= (st->nr_gpio * 2) / 8,
+	};
+	struct spi_message msg;
+
+	txbuff[0] = CMD_READ;
+	txbuff[1] = 0x00;
+	txbuff[2] = 0x00;
+	txbuff[3] = 0x00;
+
+	spi_message_init(&msg);
+	spi_message_add_tail(&xfer, &msg);
+
+	return spi_sync(st->us, &msg);
+}
+
+static unsigned char *decode_fault(unsigned int fault_code)
+{
+	fault_code &= 3;
+
+	switch (fault_code) {
+	case DIAG_NORMAL:
+		return "N";
+	case DIAG_OVERLOAD:
+		return "V";
+	case DIAG_OPEN:
+		return "O";
+	case DIAG_SHORTGND:
+		return "G";
+	}
+
+	return "?";
+}
+
+static ssize_t tle62x0_status_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct tle62x0_state *st = dev_get_drvdata(dev);
+	char *bp = buf;
+	unsigned char *buff = st->rx_buff;
+	unsigned long fault = 0;
+	int ptr;
+	int ret;
+
+	mutex_lock(&st->lock);
+	ret = tle62x0_read(st);
+
+	dev_dbg(dev, "tle62x0_read() returned %d\n", ret);
+
+	for (ptr = 0; ptr < (st->nr_gpio * 2)/8; ptr += 1) {
+		fault <<= 8;
+		fault  |= ((unsigned long)buff[ptr]);
+
+		dev_dbg(dev, "byte %d is %02x\n", ptr, buff[ptr]);
+	}
+
+	for (ptr = 0; ptr < st->nr_gpio; ptr++) {
+		bp += sprintf(bp, "%s ", decode_fault(fault >> (ptr * 2)));
+	}
+
+	*bp++ = '\n';
+
+	mutex_unlock(&st->lock);
+	return bp - buf;
+}
+
+static DEVICE_ATTR(status_show, S_IRUGO, tle62x0_status_show, NULL);
+
+static ssize_t tle62x0_gpio_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct tle62x0_state *st = dev_get_drvdata(dev);
+	int gpio_num = to_gpio_num(attr);
+	int value;
+
+	mutex_lock(&st->lock);
+	value = (st->gpio_state >> gpio_num) & 1;
+	mutex_unlock(&st->lock);
+
+	return snprintf(buf, PAGE_SIZE, "%d", value);
+}
+
+static ssize_t tle62x0_gpio_store(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf, size_t len)
+{
+	struct tle62x0_state *st = dev_get_drvdata(dev);
+	int gpio_num = to_gpio_num(attr);
+	unsigned long val;
+	char *endp;
+
+	val = simple_strtoul(buf, &endp, 0);
+	if (buf == endp)
+		return -EINVAL;
+
+	dev_dbg(dev, "setting gpio %d to %ld\n", gpio_num, val);
+
+	mutex_lock(&st->lock);
+
+	if (val)
+		st->gpio_state |= 1 << gpio_num;
+	else
+		st->gpio_state &= ~(1 << gpio_num);
+
+	tle62x0_write(st);
+	mutex_unlock(&st->lock);
+
+	return len;
+}
+
+static DEVICE_ATTR(gpio1, S_IWUSR|S_IRUGO,
+		tle62x0_gpio_show, tle62x0_gpio_store);
+static DEVICE_ATTR(gpio2, S_IWUSR|S_IRUGO,
+		tle62x0_gpio_show, tle62x0_gpio_store);
+static DEVICE_ATTR(gpio3, S_IWUSR|S_IRUGO,
+		tle62x0_gpio_show, tle62x0_gpio_store);
+static DEVICE_ATTR(gpio4, S_IWUSR|S_IRUGO,
+		tle62x0_gpio_show, tle62x0_gpio_store);
+static DEVICE_ATTR(gpio5, S_IWUSR|S_IRUGO,
+		tle62x0_gpio_show, tle62x0_gpio_store);
+static DEVICE_ATTR(gpio6, S_IWUSR|S_IRUGO,
+		tle62x0_gpio_show, tle62x0_gpio_store);
+static DEVICE_ATTR(gpio7, S_IWUSR|S_IRUGO,
+		tle62x0_gpio_show, tle62x0_gpio_store);
+static DEVICE_ATTR(gpio8, S_IWUSR|S_IRUGO,
+		tle62x0_gpio_show, tle62x0_gpio_store);
+static DEVICE_ATTR(gpio9, S_IWUSR|S_IRUGO,
+		tle62x0_gpio_show, tle62x0_gpio_store);
+static DEVICE_ATTR(gpio10, S_IWUSR|S_IRUGO,
+		tle62x0_gpio_show, tle62x0_gpio_store);
+static DEVICE_ATTR(gpio11, S_IWUSR|S_IRUGO,
+		tle62x0_gpio_show, tle62x0_gpio_store);
+static DEVICE_ATTR(gpio12, S_IWUSR|S_IRUGO,
+		tle62x0_gpio_show, tle62x0_gpio_store);
+static DEVICE_ATTR(gpio13, S_IWUSR|S_IRUGO,
+		tle62x0_gpio_show, tle62x0_gpio_store);
+static DEVICE_ATTR(gpio14, S_IWUSR|S_IRUGO,
+		tle62x0_gpio_show, tle62x0_gpio_store);
+static DEVICE_ATTR(gpio15, S_IWUSR|S_IRUGO,
+		tle62x0_gpio_show, tle62x0_gpio_store);
+static DEVICE_ATTR(gpio16, S_IWUSR|S_IRUGO,
+		tle62x0_gpio_show, tle62x0_gpio_store);
+
+static struct device_attribute *gpio_attrs[] = {
+	[0]		= &dev_attr_gpio1,
+	[1]		= &dev_attr_gpio2,
+	[2]		= &dev_attr_gpio3,
+	[3]		= &dev_attr_gpio4,
+	[4]		= &dev_attr_gpio5,
+	[5]		= &dev_attr_gpio6,
+	[6]		= &dev_attr_gpio7,
+	[7]		= &dev_attr_gpio8,
+	[8]		= &dev_attr_gpio9,
+	[9]		= &dev_attr_gpio10,
+	[10]		= &dev_attr_gpio11,
+	[11]		= &dev_attr_gpio12,
+	[12]		= &dev_attr_gpio13,
+	[13]		= &dev_attr_gpio14,
+	[14]		= &dev_attr_gpio15,
+	[15]		= &dev_attr_gpio16
+};
+
+static int to_gpio_num(struct device_attribute *attr)
+{
+	int ptr;
+
+	for (ptr = 0; ptr < ARRAY_SIZE(gpio_attrs); ptr++) {
+		if (gpio_attrs[ptr] == attr)
+			return ptr;
+	}
+
+	return -1;
+}
+
+static int __devinit tle62x0_probe(struct spi_device *spi)
+{
+	struct tle62x0_state *st;
+	struct tle62x0_pdata *pdata;
+	int ptr;
+	int ret;
+
+	pdata = spi->dev.platform_data;
+	if (pdata == NULL) {
+		dev_err(&spi->dev, "no device data specified\n");
+		return -EINVAL;
+	}
+
+	st = kzalloc(sizeof(struct tle62x0_state), GFP_KERNEL);
+	if (st == NULL) {
+		dev_err(&spi->dev, "no memory for device state\n");
+		return -ENOMEM;
+	}
+
+	st->us = spi;
+	st->nr_gpio = pdata->gpio_count;
+	st->gpio_state = pdata->init_state;
+
+	mutex_init(&st->lock);
+
+	ret = device_create_file(&spi->dev, &dev_attr_status_show);
+	if (ret) {
+		dev_err(&spi->dev, "cannot create status attribute\n");
+		goto err_status;
+	}
+
+	for (ptr = 0; ptr < pdata->gpio_count; ptr++) {
+		ret = device_create_file(&spi->dev, gpio_attrs[ptr]);
+		if (ret) {
+			dev_err(&spi->dev, "cannot create gpio attribute\n");
+			goto err_gpios;
+		}
+	}
+
+	/* tle62x0_write(st); */
+	spi_set_drvdata(spi, st);
+	return 0;
+
+ err_gpios:
+	for (; ptr > 0; ptr--)
+		device_remove_file(&spi->dev, gpio_attrs[ptr]);
+
+	device_remove_file(&spi->dev, &dev_attr_status_show);
+
+ err_status:
+	kfree(st);
+	return ret;
+}
+
+static int __devexit tle62x0_remove(struct spi_device *spi)
+{
+	struct tle62x0_state *st = spi_get_drvdata(spi);
+	int ptr;
+
+	for (ptr = 0; ptr < st->nr_gpio; ptr++)
+		device_remove_file(&spi->dev, gpio_attrs[ptr]);
+
+	kfree(st);
+	return 0;
+}
+
+static struct spi_driver tle62x0_driver = {
+	.driver = {
+		.name	= "tle62x0",
+		.owner	= THIS_MODULE,
+	},
+	.probe		= tle62x0_probe,
+	.remove		= __devexit_p(tle62x0_remove),
+};
+
+static __init int tle62x0_init(void)
+{
+	return spi_register_driver(&tle62x0_driver);
+}
+
+static __exit void tle62x0_exit(void)
+{
+	spi_unregister_driver(&tle62x0_driver);
+}
+
+module_init(tle62x0_init);
+module_exit(tle62x0_exit);
+
+MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>");
+MODULE_DESCRIPTION("TLE62x0 SPI driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/spi/xilinx_spi.c b/drivers/spi/xilinx_spi.c
new file mode 100644
index 0000000..f0bf9a6
--- /dev/null
+++ b/drivers/spi/xilinx_spi.c
@@ -0,0 +1,434 @@
+/*
+ * xilinx_spi.c
+ *
+ * Xilinx SPI controller driver (master mode only)
+ *
+ * Author: MontaVista Software, Inc.
+ *	source@mvista.com
+ *
+ * 2002-2007 (c) MontaVista Software, Inc.  This file is licensed under the
+ * terms of the GNU General Public License version 2.  This program is licensed
+ * "as is" without any warranty of any kind, whether express or implied.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/spi_bitbang.h>
+#include <linux/io.h>
+
+#include <syslib/virtex_devices.h>
+
+#define XILINX_SPI_NAME "xspi"
+
+/* Register definitions as per "OPB Serial Peripheral Interface (SPI) (v1.00e)
+ * Product Specification", DS464
+ */
+#define XSPI_CR_OFFSET		0x62	/* 16-bit Control Register */
+
+#define XSPI_CR_ENABLE		0x02
+#define XSPI_CR_MASTER_MODE	0x04
+#define XSPI_CR_CPOL		0x08
+#define XSPI_CR_CPHA		0x10
+#define XSPI_CR_MODE_MASK	(XSPI_CR_CPHA | XSPI_CR_CPOL)
+#define XSPI_CR_TXFIFO_RESET	0x20
+#define XSPI_CR_RXFIFO_RESET	0x40
+#define XSPI_CR_MANUAL_SSELECT	0x80
+#define XSPI_CR_TRANS_INHIBIT	0x100
+
+#define XSPI_SR_OFFSET		0x67	/* 8-bit Status Register */
+
+#define XSPI_SR_RX_EMPTY_MASK	0x01	/* Receive FIFO is empty */
+#define XSPI_SR_RX_FULL_MASK	0x02	/* Receive FIFO is full */
+#define XSPI_SR_TX_EMPTY_MASK	0x04	/* Transmit FIFO is empty */
+#define XSPI_SR_TX_FULL_MASK	0x08	/* Transmit FIFO is full */
+#define XSPI_SR_MODE_FAULT_MASK	0x10	/* Mode fault error */
+
+#define XSPI_TXD_OFFSET		0x6b	/* 8-bit Data Transmit Register */
+#define XSPI_RXD_OFFSET		0x6f	/* 8-bit Data Receive Register */
+
+#define XSPI_SSR_OFFSET		0x70	/* 32-bit Slave Select Register */
+
+/* Register definitions as per "OPB IPIF (v3.01c) Product Specification", DS414
+ * IPIF registers are 32 bit
+ */
+#define XIPIF_V123B_DGIER_OFFSET	0x1c	/* IPIF global int enable reg */
+#define XIPIF_V123B_GINTR_ENABLE	0x80000000
+
+#define XIPIF_V123B_IISR_OFFSET		0x20	/* IPIF interrupt status reg */
+#define XIPIF_V123B_IIER_OFFSET		0x28	/* IPIF interrupt enable reg */
+
+#define XSPI_INTR_MODE_FAULT		0x01	/* Mode fault error */
+#define XSPI_INTR_SLAVE_MODE_FAULT	0x02	/* Selected as slave while
+						 * disabled */
+#define XSPI_INTR_TX_EMPTY		0x04	/* TxFIFO is empty */
+#define XSPI_INTR_TX_UNDERRUN		0x08	/* TxFIFO was underrun */
+#define XSPI_INTR_RX_FULL		0x10	/* RxFIFO is full */
+#define XSPI_INTR_RX_OVERRUN		0x20	/* RxFIFO was overrun */
+
+#define XIPIF_V123B_RESETR_OFFSET	0x40	/* IPIF reset register */
+#define XIPIF_V123B_RESET_MASK		0x0a	/* the value to write */
+
+struct xilinx_spi {
+	/* bitbang has to be first */
+	struct spi_bitbang bitbang;
+	struct completion done;
+
+	void __iomem	*regs;	/* virt. address of the control registers */
+
+	u32		irq;
+
+	u32		speed_hz; /* SCK has a fixed frequency of speed_hz Hz */
+
+	u8 *rx_ptr;		/* pointer in the Tx buffer */
+	const u8 *tx_ptr;	/* pointer in the Rx buffer */
+	int remaining_bytes;	/* the number of bytes left to transfer */
+};
+
+static void xspi_init_hw(void __iomem *regs_base)
+{
+	/* Reset the SPI device */
+	out_be32(regs_base + XIPIF_V123B_RESETR_OFFSET,
+		 XIPIF_V123B_RESET_MASK);
+	/* Disable all the interrupts just in case */
+	out_be32(regs_base + XIPIF_V123B_IIER_OFFSET, 0);
+	/* Enable the global IPIF interrupt */
+	out_be32(regs_base + XIPIF_V123B_DGIER_OFFSET,
+		 XIPIF_V123B_GINTR_ENABLE);
+	/* Deselect the slave on the SPI bus */
+	out_be32(regs_base + XSPI_SSR_OFFSET, 0xffff);
+	/* Disable the transmitter, enable Manual Slave Select Assertion,
+	 * put SPI controller into master mode, and enable it */
+	out_be16(regs_base + XSPI_CR_OFFSET,
+		 XSPI_CR_TRANS_INHIBIT | XSPI_CR_MANUAL_SSELECT
+		 | XSPI_CR_MASTER_MODE | XSPI_CR_ENABLE);
+}
+
+static void xilinx_spi_chipselect(struct spi_device *spi, int is_on)
+{
+	struct xilinx_spi *xspi = spi_master_get_devdata(spi->master);
+
+	if (is_on == BITBANG_CS_INACTIVE) {
+		/* Deselect the slave on the SPI bus */
+		out_be32(xspi->regs + XSPI_SSR_OFFSET, 0xffff);
+	} else if (is_on == BITBANG_CS_ACTIVE) {
+		/* Set the SPI clock phase and polarity */
+		u16 cr = in_be16(xspi->regs + XSPI_CR_OFFSET)
+			 & ~XSPI_CR_MODE_MASK;
+		if (spi->mode & SPI_CPHA)
+			cr |= XSPI_CR_CPHA;
+		if (spi->mode & SPI_CPOL)
+			cr |= XSPI_CR_CPOL;
+		out_be16(xspi->regs + XSPI_CR_OFFSET, cr);
+
+		/* We do not check spi->max_speed_hz here as the SPI clock
+		 * frequency is not software programmable (the IP block design
+		 * parameter)
+		 */
+
+		/* Activate the chip select */
+		out_be32(xspi->regs + XSPI_SSR_OFFSET,
+			 ~(0x0001 << spi->chip_select));
+	}
+}
+
+/* spi_bitbang requires custom setup_transfer() to be defined if there is a
+ * custom txrx_bufs(). We have nothing to setup here as the SPI IP block
+ * supports just 8 bits per word, and SPI clock can't be changed in software.
+ * Check for 8 bits per word. Chip select delay calculations could be
+ * added here as soon as bitbang_work() can be made aware of the delay value.
+ */
+static int xilinx_spi_setup_transfer(struct spi_device *spi,
+		struct spi_transfer *t)
+{
+	u8 bits_per_word;
+	u32 hz;
+	struct xilinx_spi *xspi = spi_master_get_devdata(spi->master);
+
+	bits_per_word = (t) ? t->bits_per_word : spi->bits_per_word;
+	hz = (t) ? t->speed_hz : spi->max_speed_hz;
+	if (bits_per_word != 8) {
+		dev_err(&spi->dev, "%s, unsupported bits_per_word=%d\n",
+			__FUNCTION__, bits_per_word);
+		return -EINVAL;
+	}
+
+	if (hz && xspi->speed_hz > hz) {
+		dev_err(&spi->dev, "%s, unsupported clock rate %uHz\n",
+			__FUNCTION__, hz);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+/* the spi->mode bits understood by this driver: */
+#define MODEBITS (SPI_CPOL | SPI_CPHA)
+
+static int xilinx_spi_setup(struct spi_device *spi)
+{
+	struct spi_bitbang *bitbang;
+	struct xilinx_spi *xspi;
+	int retval;
+
+	xspi = spi_master_get_devdata(spi->master);
+	bitbang = &xspi->bitbang;
+
+	if (!spi->bits_per_word)
+		spi->bits_per_word = 8;
+
+	if (spi->mode & ~MODEBITS) {
+		dev_err(&spi->dev, "%s, unsupported mode bits %x\n",
+			__FUNCTION__, spi->mode & ~MODEBITS);
+		return -EINVAL;
+	}
+
+	retval = xilinx_spi_setup_transfer(spi, NULL);
+	if (retval < 0)
+		return retval;
+
+	dev_dbg(&spi->dev, "%s, mode %d, %u bits/w, %u nsec/bit\n",
+		__FUNCTION__, spi->mode & MODEBITS, spi->bits_per_word, 0);
+
+	return 0;
+}
+
+static void xilinx_spi_fill_tx_fifo(struct xilinx_spi *xspi)
+{
+	u8 sr;
+
+	/* Fill the Tx FIFO with as many bytes as possible */
+	sr = in_8(xspi->regs + XSPI_SR_OFFSET);
+	while ((sr & XSPI_SR_TX_FULL_MASK) == 0 && xspi->remaining_bytes > 0) {
+		if (xspi->tx_ptr) {
+			out_8(xspi->regs + XSPI_TXD_OFFSET, *xspi->tx_ptr++);
+		} else {
+			out_8(xspi->regs + XSPI_TXD_OFFSET, 0);
+		}
+		xspi->remaining_bytes--;
+		sr = in_8(xspi->regs + XSPI_SR_OFFSET);
+	}
+}
+
+static int xilinx_spi_txrx_bufs(struct spi_device *spi, struct spi_transfer *t)
+{
+	struct xilinx_spi *xspi = spi_master_get_devdata(spi->master);
+	u32 ipif_ier;
+	u16 cr;
+
+	/* We get here with transmitter inhibited */
+
+	xspi->tx_ptr = t->tx_buf;
+	xspi->rx_ptr = t->rx_buf;
+	xspi->remaining_bytes = t->len;
+	INIT_COMPLETION(xspi->done);
+
+	xilinx_spi_fill_tx_fifo(xspi);
+
+	/* Enable the transmit empty interrupt, which we use to determine
+	 * progress on the transmission.
+	 */
+	ipif_ier = in_be32(xspi->regs + XIPIF_V123B_IIER_OFFSET);
+	out_be32(xspi->regs + XIPIF_V123B_IIER_OFFSET,
+		 ipif_ier | XSPI_INTR_TX_EMPTY);
+
+	/* Start the transfer by not inhibiting the transmitter any longer */
+	cr = in_be16(xspi->regs + XSPI_CR_OFFSET) & ~XSPI_CR_TRANS_INHIBIT;
+	out_be16(xspi->regs + XSPI_CR_OFFSET, cr);
+
+	wait_for_completion(&xspi->done);
+
+	/* Disable the transmit empty interrupt */
+	out_be32(xspi->regs + XIPIF_V123B_IIER_OFFSET, ipif_ier);
+
+	return t->len - xspi->remaining_bytes;
+}
+
+
+/* This driver supports single master mode only. Hence Tx FIFO Empty
+ * is the only interrupt we care about.
+ * Receive FIFO Overrun, Transmit FIFO Underrun, Mode Fault, and Slave Mode
+ * Fault are not to happen.
+ */
+static irqreturn_t xilinx_spi_irq(int irq, void *dev_id)
+{
+	struct xilinx_spi *xspi = dev_id;
+	u32 ipif_isr;
+
+	/* Get the IPIF interrupts, and clear them immediately */
+	ipif_isr = in_be32(xspi->regs + XIPIF_V123B_IISR_OFFSET);
+	out_be32(xspi->regs + XIPIF_V123B_IISR_OFFSET, ipif_isr);
+
+	if (ipif_isr & XSPI_INTR_TX_EMPTY) {	/* Transmission completed */
+		u16 cr;
+		u8 sr;
+
+		/* A transmit has just completed. Process received data and
+		 * check for more data to transmit. Always inhibit the
+		 * transmitter while the Isr refills the transmit register/FIFO,
+		 * or make sure it is stopped if we're done.
+		 */
+		cr = in_be16(xspi->regs + XSPI_CR_OFFSET);
+		out_be16(xspi->regs + XSPI_CR_OFFSET,
+			 cr | XSPI_CR_TRANS_INHIBIT);
+
+		/* Read out all the data from the Rx FIFO */
+		sr = in_8(xspi->regs + XSPI_SR_OFFSET);
+		while ((sr & XSPI_SR_RX_EMPTY_MASK) == 0) {
+			u8 data;
+
+			data = in_8(xspi->regs + XSPI_RXD_OFFSET);
+			if (xspi->rx_ptr) {
+				*xspi->rx_ptr++ = data;
+			}
+			sr = in_8(xspi->regs + XSPI_SR_OFFSET);
+		}
+
+		/* See if there is more data to send */
+		if (xspi->remaining_bytes > 0) {
+			xilinx_spi_fill_tx_fifo(xspi);
+			/* Start the transfer by not inhibiting the
+			 * transmitter any longer
+			 */
+			out_be16(xspi->regs + XSPI_CR_OFFSET, cr);
+		} else {
+			/* No more data to send.
+			 * Indicate the transfer is completed.
+			 */
+			complete(&xspi->done);
+		}
+	}
+
+	return IRQ_HANDLED;
+}
+
+static int __init xilinx_spi_probe(struct platform_device *dev)
+{
+	int ret = 0;
+	struct spi_master *master;
+	struct xilinx_spi *xspi;
+	struct xspi_platform_data *pdata;
+	struct resource *r;
+
+	/* Get resources(memory, IRQ) associated with the device */
+	master = spi_alloc_master(&dev->dev, sizeof(struct xilinx_spi));
+
+	if (master == NULL) {
+		return -ENOMEM;
+	}
+
+	platform_set_drvdata(dev, master);
+	pdata = dev->dev.platform_data;
+
+	if (pdata == NULL) {
+		ret = -ENODEV;
+		goto put_master;
+	}
+
+	r = platform_get_resource(dev, IORESOURCE_MEM, 0);
+	if (r == NULL) {
+		ret = -ENODEV;
+		goto put_master;
+	}
+
+	xspi = spi_master_get_devdata(master);
+	xspi->bitbang.master = spi_master_get(master);
+	xspi->bitbang.chipselect = xilinx_spi_chipselect;
+	xspi->bitbang.setup_transfer = xilinx_spi_setup_transfer;
+	xspi->bitbang.txrx_bufs = xilinx_spi_txrx_bufs;
+	xspi->bitbang.master->setup = xilinx_spi_setup;
+	init_completion(&xspi->done);
+
+	if (!request_mem_region(r->start,
+			r->end - r->start + 1, XILINX_SPI_NAME)) {
+		ret = -ENXIO;
+		goto put_master;
+	}
+
+	xspi->regs = ioremap(r->start, r->end - r->start + 1);
+	if (xspi->regs == NULL) {
+		ret = -ENOMEM;
+		goto put_master;
+	}
+
+	xspi->irq = platform_get_irq(dev, 0);
+	if (xspi->irq < 0) {
+		ret = -ENXIO;
+		goto unmap_io;
+	}
+
+	master->bus_num = pdata->bus_num;
+	master->num_chipselect = pdata->num_chipselect;
+	xspi->speed_hz = pdata->speed_hz;
+
+	/* SPI controller initializations */
+	xspi_init_hw(xspi->regs);
+
+	/* Register for SPI Interrupt */
+	ret = request_irq(xspi->irq, xilinx_spi_irq, 0, XILINX_SPI_NAME, xspi);
+	if (ret != 0)
+		goto unmap_io;
+
+	ret = spi_bitbang_start(&xspi->bitbang);
+	if (ret != 0) {
+		dev_err(&dev->dev, "spi_bitbang_start FAILED\n");
+		goto free_irq;
+	}
+
+	dev_info(&dev->dev, "at 0x%08X mapped to 0x%08X, irq=%d\n",
+			r->start, (u32)xspi->regs, xspi->irq);
+
+	return ret;
+
+free_irq:
+	free_irq(xspi->irq, xspi);
+unmap_io:
+	iounmap(xspi->regs);
+put_master:
+	spi_master_put(master);
+	return ret;
+}
+
+static int __devexit xilinx_spi_remove(struct platform_device *dev)
+{
+	struct xilinx_spi *xspi;
+	struct spi_master *master;
+
+	master = platform_get_drvdata(dev);
+	xspi = spi_master_get_devdata(master);
+
+	spi_bitbang_stop(&xspi->bitbang);
+	free_irq(xspi->irq, xspi);
+	iounmap(xspi->regs);
+	platform_set_drvdata(dev, 0);
+	spi_master_put(xspi->bitbang.master);
+
+	return 0;
+}
+
+static struct platform_driver xilinx_spi_driver = {
+	.probe	= xilinx_spi_probe,
+	.remove	= __devexit_p(xilinx_spi_remove),
+	.driver = {
+		.name = XILINX_SPI_NAME,
+		.owner = THIS_MODULE,
+	},
+};
+
+static int __init xilinx_spi_init(void)
+{
+	return platform_driver_register(&xilinx_spi_driver);
+}
+module_init(xilinx_spi_init);
+
+static void __exit xilinx_spi_exit(void)
+{
+	platform_driver_unregister(&xilinx_spi_driver);
+}
+module_exit(xilinx_spi_exit);
+
+MODULE_AUTHOR("MontaVista Software, Inc. <source@mvista.com>");
+MODULE_DESCRIPTION("Xilinx SPI driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/tc/Makefile b/drivers/tc/Makefile
index 9673426..c899246 100644
--- a/drivers/tc/Makefile
+++ b/drivers/tc/Makefile
@@ -5,7 +5,6 @@
 # Object file lists.
 
 obj-$(CONFIG_TC) += tc.o tc-driver.o
-obj-$(CONFIG_ZS) += zs.o
 obj-$(CONFIG_VT) += lk201.o lk201-map.o lk201-remap.o
 
 $(obj)/lk201-map.o: $(obj)/lk201-map.c
diff --git a/drivers/tc/zs.c b/drivers/tc/zs.c
deleted file mode 100644
index ed979f1..0000000
--- a/drivers/tc/zs.c
+++ /dev/null
@@ -1,2203 +0,0 @@
-/*
- * decserial.c: Serial port driver for IOASIC DECstations.
- *
- * Derived from drivers/sbus/char/sunserial.c by Paul Mackerras.
- * Derived from drivers/macintosh/macserial.c by Harald Koerfgen.
- *
- * DECstation changes
- * Copyright (C) 1998-2000 Harald Koerfgen
- * Copyright (C) 2000, 2001, 2002, 2003, 2004, 2005  Maciej W. Rozycki
- *
- * For the rest of the code the original Copyright applies:
- * Copyright (C) 1996 Paul Mackerras (Paul.Mackerras@cs.anu.edu.au)
- * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
- *
- *
- * Note: for IOASIC systems the wiring is as follows:
- *
- * mouse/keyboard:
- * DIN-7 MJ-4  signal        SCC
- * 2     1     TxD       <-  A.TxD
- * 3     4     RxD       ->  A.RxD
- *
- * EIA-232/EIA-423:
- * DB-25 MMJ-6 signal        SCC
- * 2     2     TxD       <-  B.TxD
- * 3     5     RxD       ->  B.RxD
- * 4           RTS       <- ~A.RTS
- * 5           CTS       -> ~B.CTS
- * 6     6     DSR       -> ~A.SYNC
- * 8           CD        -> ~B.DCD
- * 12          DSRS(DCE) -> ~A.CTS  (*)
- * 15          TxC       ->  B.TxC
- * 17          RxC       ->  B.RxC
- * 20    1     DTR       <- ~A.DTR
- * 22          RI        -> ~A.DCD
- * 23          DSRS(DTE) <- ~B.RTS
- *
- * (*) EIA-232 defines the signal at this pin to be SCD, while DSRS(DCE)
- *     is shared with DSRS(DTE) at pin 23.
- */
-
-#include <linux/errno.h>
-#include <linux/signal.h>
-#include <linux/sched.h>
-#include <linux/timer.h>
-#include <linux/interrupt.h>
-#include <linux/tty.h>
-#include <linux/tty_flip.h>
-#include <linux/major.h>
-#include <linux/string.h>
-#include <linux/fcntl.h>
-#include <linux/mm.h>
-#include <linux/kernel.h>
-#include <linux/delay.h>
-#include <linux/init.h>
-#include <linux/ioport.h>
-#include <linux/spinlock.h>
-#ifdef CONFIG_SERIAL_DEC_CONSOLE
-#include <linux/console.h>
-#endif
-
-#include <asm/io.h>
-#include <asm/pgtable.h>
-#include <asm/irq.h>
-#include <asm/system.h>
-#include <asm/bootinfo.h>
-
-#include <asm/dec/interrupts.h>
-#include <asm/dec/ioasic_addrs.h>
-#include <asm/dec/machtype.h>
-#include <asm/dec/serial.h>
-#include <asm/dec/system.h>
-
-#ifdef CONFIG_KGDB
-#include <asm/kgdb.h>
-#endif
-#ifdef CONFIG_MAGIC_SYSRQ
-#include <linux/sysrq.h>
-#endif
-
-#include "zs.h"
-
-/*
- * It would be nice to dynamically allocate everything that
- * depends on NUM_SERIAL, so we could support any number of
- * Z8530s, but for now...
- */
-#define NUM_SERIAL	2		/* Max number of ZS chips supported */
-#define NUM_CHANNELS	(NUM_SERIAL * 2)	/* 2 channels per chip */
-#define CHANNEL_A_NR  (zs_parms->channel_a_offset > zs_parms->channel_b_offset)
-                                        /* Number of channel A in the chip */
-#define ZS_CHAN_IO_SIZE 8
-#define ZS_CLOCK        7372800 	/* Z8530 RTxC input clock rate */
-
-#define RECOVERY_DELAY  udelay(2)
-
-struct zs_parms {
-	unsigned long scc0;
-	unsigned long scc1;
-	int channel_a_offset;
-	int channel_b_offset;
-	int irq0;
-	int irq1;
-	int clock;
-};
-
-static struct zs_parms *zs_parms;
-
-#ifdef CONFIG_MACH_DECSTATION
-static struct zs_parms ds_parms = {
-	scc0 : IOASIC_SCC0,
-	scc1 : IOASIC_SCC1,
-	channel_a_offset : 1,
-	channel_b_offset : 9,
-	irq0 : -1,
-	irq1 : -1,
-	clock : ZS_CLOCK
-};
-#endif
-
-#ifdef CONFIG_MACH_DECSTATION
-#define DS_BUS_PRESENT (IOASIC)
-#else
-#define DS_BUS_PRESENT 0
-#endif
-
-#define BUS_PRESENT (DS_BUS_PRESENT)
-
-DEFINE_SPINLOCK(zs_lock);
-
-struct dec_zschannel zs_channels[NUM_CHANNELS];
-struct dec_serial zs_soft[NUM_CHANNELS];
-int zs_channels_found;
-struct dec_serial *zs_chain;	/* list of all channels */
-
-struct tty_struct zs_ttys[NUM_CHANNELS];
-
-#ifdef CONFIG_SERIAL_DEC_CONSOLE
-static struct console zs_console;
-#endif
-#if defined(CONFIG_SERIAL_DEC_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ) && \
-   !defined(MODULE)
-static unsigned long break_pressed; /* break, really ... */
-#endif
-
-static unsigned char zs_init_regs[16] __initdata = {
-	0,				/* write 0 */
-	0,				/* write 1 */
-	0,				/* write 2 */
-	0,				/* write 3 */
-	(X16CLK),			/* write 4 */
-	0,				/* write 5 */
-	0, 0, 0,			/* write 6, 7, 8 */
-	(MIE | DLC | NV),		/* write 9 */
-	(NRZ),				/* write 10 */
-	(TCBR | RCBR),			/* write 11 */
-	0, 0,				/* BRG time constant, write 12 + 13 */
-	(BRSRC | BRENABL),		/* write 14 */
-	0				/* write 15 */
-};
-
-static struct tty_driver *serial_driver;
-
-/* serial subtype definitions */
-#define SERIAL_TYPE_NORMAL	1
-
-/* number of characters left in xmit buffer before we ask for more */
-#define WAKEUP_CHARS 256
-
-/*
- * Debugging.
- */
-#undef SERIAL_DEBUG_OPEN
-#undef SERIAL_DEBUG_FLOW
-#undef SERIAL_DEBUG_THROTTLE
-#undef SERIAL_PARANOIA_CHECK
-
-#undef ZS_DEBUG_REGS
-
-#ifdef SERIAL_DEBUG_THROTTLE
-#define _tty_name(tty,buf) tty_name(tty,buf)
-#endif
-
-#define RS_STROBE_TIME 10
-#define RS_ISR_PASS_LIMIT 256
-
-static void probe_sccs(void);
-static void change_speed(struct dec_serial *info);
-static void rs_wait_until_sent(struct tty_struct *tty, int timeout);
-
-static inline int serial_paranoia_check(struct dec_serial *info,
-					char *name, const char *routine)
-{
-#ifdef SERIAL_PARANOIA_CHECK
-	static const char *badmagic =
-		"Warning: bad magic number for serial struct %s in %s\n";
-	static const char *badinfo =
-		"Warning: null mac_serial for %s in %s\n";
-
-	if (!info) {
-		printk(badinfo, name, routine);
-		return 1;
-	}
-	if (info->magic != SERIAL_MAGIC) {
-		printk(badmagic, name, routine);
-		return 1;
-	}
-#endif
-	return 0;
-}
-
-/*
- * This is used to figure out the divisor speeds and the timeouts
- */
-static int baud_table[] = {
-	0, 50, 75, 110, 134, 150, 200, 300, 600, 1200, 1800, 2400, 4800,
-	9600, 19200, 38400, 57600, 115200, 0 };
-
-/*
- * Reading and writing Z8530 registers.
- */
-static inline unsigned char read_zsreg(struct dec_zschannel *channel,
-				       unsigned char reg)
-{
-	unsigned char retval;
-
-	if (reg != 0) {
-		*channel->control = reg & 0xf;
-		fast_iob(); RECOVERY_DELAY;
-	}
-	retval = *channel->control;
-	RECOVERY_DELAY;
-	return retval;
-}
-
-static inline void write_zsreg(struct dec_zschannel *channel,
-			       unsigned char reg, unsigned char value)
-{
-	if (reg != 0) {
-		*channel->control = reg & 0xf;
-		fast_iob(); RECOVERY_DELAY;
-	}
-	*channel->control = value;
-	fast_iob(); RECOVERY_DELAY;
-	return;
-}
-
-static inline unsigned char read_zsdata(struct dec_zschannel *channel)
-{
-	unsigned char retval;
-
-	retval = *channel->data;
-	RECOVERY_DELAY;
-	return retval;
-}
-
-static inline void write_zsdata(struct dec_zschannel *channel,
-				unsigned char value)
-{
-	*channel->data = value;
-	fast_iob(); RECOVERY_DELAY;
-	return;
-}
-
-static inline void load_zsregs(struct dec_zschannel *channel,
-			       unsigned char *regs)
-{
-/*	ZS_CLEARERR(channel);
-	ZS_CLEARFIFO(channel); */
-	/* Load 'em up */
-	write_zsreg(channel, R3, regs[R3] & ~RxENABLE);
-	write_zsreg(channel, R5, regs[R5] & ~TxENAB);
-	write_zsreg(channel, R4, regs[R4]);
-	write_zsreg(channel, R9, regs[R9]);
-	write_zsreg(channel, R1, regs[R1]);
-	write_zsreg(channel, R2, regs[R2]);
-	write_zsreg(channel, R10, regs[R10]);
-	write_zsreg(channel, R11, regs[R11]);
-	write_zsreg(channel, R12, regs[R12]);
-	write_zsreg(channel, R13, regs[R13]);
-	write_zsreg(channel, R14, regs[R14]);
-	write_zsreg(channel, R15, regs[R15]);
-	write_zsreg(channel, R3, regs[R3]);
-	write_zsreg(channel, R5, regs[R5]);
-	return;
-}
-
-/* Sets or clears DTR/RTS on the requested line */
-static inline void zs_rtsdtr(struct dec_serial *info, int which, int set)
-{
-        unsigned long flags;
-
-	spin_lock_irqsave(&zs_lock, flags);
-	if (info->zs_channel != info->zs_chan_a) {
-		if (set) {
-			info->zs_chan_a->curregs[5] |= (which & (RTS | DTR));
-		} else {
-			info->zs_chan_a->curregs[5] &= ~(which & (RTS | DTR));
-		}
-		write_zsreg(info->zs_chan_a, 5, info->zs_chan_a->curregs[5]);
-	}
-	spin_unlock_irqrestore(&zs_lock, flags);
-}
-
-/* Utility routines for the Zilog */
-static inline int get_zsbaud(struct dec_serial *ss)
-{
-	struct dec_zschannel *channel = ss->zs_channel;
-	int brg;
-
-	/* The baud rate is split up between two 8-bit registers in
-	 * what is termed 'BRG time constant' format in my docs for
-	 * the chip, it is a function of the clk rate the chip is
-	 * receiving which happens to be constant.
-	 */
-	brg = (read_zsreg(channel, 13) << 8);
-	brg |= read_zsreg(channel, 12);
-	return BRG_TO_BPS(brg, (zs_parms->clock/(ss->clk_divisor)));
-}
-
-/* On receive, this clears errors and the receiver interrupts */
-static inline void rs_recv_clear(struct dec_zschannel *zsc)
-{
-	write_zsreg(zsc, 0, ERR_RES);
-	write_zsreg(zsc, 0, RES_H_IUS); /* XXX this is unnecessary */
-}
-
-/*
- * ----------------------------------------------------------------------
- *
- * Here starts the interrupt handling routines.  All of the following
- * subroutines are declared as inline and are folded into
- * rs_interrupt().  They were separated out for readability's sake.
- *
- * 				- Ted Ts'o (tytso@mit.edu), 7-Mar-93
- * -----------------------------------------------------------------------
- */
-
-/*
- * This routine is used by the interrupt handler to schedule
- * processing in the software interrupt portion of the driver.
- */
-static void rs_sched_event(struct dec_serial *info, int event)
-{
-	info->event |= 1 << event;
-	tasklet_schedule(&info->tlet);
-}
-
-static void receive_chars(struct dec_serial *info)
-{
-	struct tty_struct *tty = info->tty;
-	unsigned char ch, stat, flag;
-
-	while ((read_zsreg(info->zs_channel, R0) & Rx_CH_AV) != 0) {
-
-		stat = read_zsreg(info->zs_channel, R1);
-		ch = read_zsdata(info->zs_channel);
-
-		if (!tty && (!info->hook || !info->hook->rx_char))
-			continue;
-
-		flag = TTY_NORMAL;
-		if (info->tty_break) {
-			info->tty_break = 0;
-			flag = TTY_BREAK;
-			if (info->flags & ZILOG_SAK)
-				do_SAK(tty);
-			/* Ignore the null char got when BREAK is removed.  */
-			if (ch == 0)
-				continue;
-		} else {
-			if (stat & Rx_OVR) {
-				flag = TTY_OVERRUN;
-			} else if (stat & FRM_ERR) {
-				flag = TTY_FRAME;
-			} else if (stat & PAR_ERR) {
-				flag = TTY_PARITY;
-			}
-			if (flag != TTY_NORMAL)
-				/* reset the error indication */
-				write_zsreg(info->zs_channel, R0, ERR_RES);
-		}
-
-#if defined(CONFIG_SERIAL_DEC_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ) && \
-   !defined(MODULE)
-		if (break_pressed && info->line == zs_console.index) {
-			/* Ignore the null char got when BREAK is removed.  */
-			if (ch == 0)
-				continue;
-			if (time_before(jiffies, break_pressed + HZ * 5)) {
-				handle_sysrq(ch, NULL);
-				break_pressed = 0;
-				continue;
-			}
-			break_pressed = 0;
-		}
-#endif
-
-		if (info->hook && info->hook->rx_char) {
-			(*info->hook->rx_char)(ch, flag);
-			return;
-  		}
-
-		tty_insert_flip_char(tty, ch, flag);
-	}
-	if (tty)
-		tty_flip_buffer_push(tty);
-}
-
-static void transmit_chars(struct dec_serial *info)
-{
-	if ((read_zsreg(info->zs_channel, R0) & Tx_BUF_EMP) == 0)
-		return;
-	info->tx_active = 0;
-
-	if (info->x_char) {
-		/* Send next char */
-		write_zsdata(info->zs_channel, info->x_char);
-		info->x_char = 0;
-		info->tx_active = 1;
-		return;
-	}
-
-	if ((info->xmit_cnt <= 0) || (info->tty && info->tty->stopped)
-	    || info->tx_stopped) {
-		write_zsreg(info->zs_channel, R0, RES_Tx_P);
-		return;
-	}
-	/* Send char */
-	write_zsdata(info->zs_channel, info->xmit_buf[info->xmit_tail++]);
-	info->xmit_tail = info->xmit_tail & (SERIAL_XMIT_SIZE-1);
-	info->xmit_cnt--;
-	info->tx_active = 1;
-
-	if (info->xmit_cnt < WAKEUP_CHARS)
-		rs_sched_event(info, RS_EVENT_WRITE_WAKEUP);
-}
-
-static void status_handle(struct dec_serial *info)
-{
-	unsigned char stat;
-
-	/* Get status from Read Register 0 */
-	stat = read_zsreg(info->zs_channel, R0);
-
-	if ((stat & BRK_ABRT) && !(info->read_reg_zero & BRK_ABRT)) {
-#if defined(CONFIG_SERIAL_DEC_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ) && \
-   !defined(MODULE)
-		if (info->line == zs_console.index) {
-			if (!break_pressed)
-				break_pressed = jiffies;
-		} else
-#endif
-			info->tty_break = 1;
-	}
-
-	if (info->zs_channel != info->zs_chan_a) {
-
-		/* Check for DCD transitions */
-		if (info->tty && !C_CLOCAL(info->tty) &&
-		    ((stat ^ info->read_reg_zero) & DCD) != 0 ) {
-			if (stat & DCD) {
-				wake_up_interruptible(&info->open_wait);
-			} else {
-				tty_hangup(info->tty);
-			}
-		}
-
-		/* Check for CTS transitions */
-		if (info->tty && C_CRTSCTS(info->tty)) {
-			if ((stat & CTS) != 0) {
-				if (info->tx_stopped) {
-					info->tx_stopped = 0;
-					if (!info->tx_active)
-						transmit_chars(info);
-				}
-			} else {
-				info->tx_stopped = 1;
-			}
-		}
-
-	}
-
-	/* Clear status condition... */
-	write_zsreg(info->zs_channel, R0, RES_EXT_INT);
-	info->read_reg_zero = stat;
-}
-
-/*
- * This is the serial driver's generic interrupt routine
- */
-static irqreturn_t rs_interrupt(int irq, void *dev_id)
-{
-	struct dec_serial *info = (struct dec_serial *) dev_id;
-	irqreturn_t status = IRQ_NONE;
-	unsigned char zs_intreg;
-	int shift;
-
-	/* NOTE: The read register 3, which holds the irq status,
-	 *       does so for both channels on each chip.  Although
-	 *       the status value itself must be read from the A
-	 *       channel and is only valid when read from channel A.
-	 *       Yes... broken hardware...
-	 */
-#define CHAN_IRQMASK (CHBRxIP | CHBTxIP | CHBEXT)
-
-	if (info->zs_chan_a == info->zs_channel)
-		shift = 3;	/* Channel A */
-	else
-		shift = 0;	/* Channel B */
-
-	for (;;) {
-		zs_intreg = read_zsreg(info->zs_chan_a, R3) >> shift;
-		if ((zs_intreg & CHAN_IRQMASK) == 0)
-			break;
-
-		status = IRQ_HANDLED;
-
-		if (zs_intreg & CHBRxIP) {
-			receive_chars(info);
-		}
-		if (zs_intreg & CHBTxIP) {
-			transmit_chars(info);
-		}
-		if (zs_intreg & CHBEXT) {
-			status_handle(info);
-		}
-	}
-
-	/* Why do we need this ? */
-	write_zsreg(info->zs_channel, 0, RES_H_IUS);
-
-	return status;
-}
-
-#ifdef ZS_DEBUG_REGS
-void zs_dump (void) {
-	int i, j;
-	for (i = 0; i < zs_channels_found; i++) {
-		struct dec_zschannel *ch = &zs_channels[i];
-		if ((long)ch->control == UNI_IO_BASE+UNI_SCC1A_CTRL) {
-			for (j = 0; j < 15; j++) {
-				printk("W%d = 0x%x\t",
-				       j, (int)ch->curregs[j]);
-			}
-			for (j = 0; j < 15; j++) {
-				printk("R%d = 0x%x\t",
-				       j, (int)read_zsreg(ch,j));
-			}
-			printk("\n\n");
-		}
-	}
-}
-#endif
-
-/*
- * -------------------------------------------------------------------
- * Here ends the serial interrupt routines.
- * -------------------------------------------------------------------
- */
-
-/*
- * ------------------------------------------------------------
- * rs_stop() and rs_start()
- *
- * This routines are called before setting or resetting tty->stopped.
- * ------------------------------------------------------------
- */
-static void rs_stop(struct tty_struct *tty)
-{
-	struct dec_serial *info = (struct dec_serial *)tty->driver_data;
-	unsigned long flags;
-
-	if (serial_paranoia_check(info, tty->name, "rs_stop"))
-		return;
-
-#if 1
-	spin_lock_irqsave(&zs_lock, flags);
-	if (info->zs_channel->curregs[5] & TxENAB) {
-		info->zs_channel->curregs[5] &= ~TxENAB;
-		write_zsreg(info->zs_channel, 5, info->zs_channel->curregs[5]);
-	}
-	spin_unlock_irqrestore(&zs_lock, flags);
-#endif
-}
-
-static void rs_start(struct tty_struct *tty)
-{
-	struct dec_serial *info = (struct dec_serial *)tty->driver_data;
-	unsigned long flags;
-
-	if (serial_paranoia_check(info, tty->name, "rs_start"))
-		return;
-
-	spin_lock_irqsave(&zs_lock, flags);
-#if 1
-	if (info->xmit_cnt && info->xmit_buf && !(info->zs_channel->curregs[5] & TxENAB)) {
-		info->zs_channel->curregs[5] |= TxENAB;
-		write_zsreg(info->zs_channel, 5, info->zs_channel->curregs[5]);
-	}
-#else
-	if (info->xmit_cnt && info->xmit_buf && !info->tx_active) {
-		transmit_chars(info);
-	}
-#endif
-	spin_unlock_irqrestore(&zs_lock, flags);
-}
-
-/*
- * This routine is used to handle the "bottom half" processing for the
- * serial driver, known also the "software interrupt" processing.
- * This processing is done at the kernel interrupt level, after the
- * rs_interrupt() has returned, BUT WITH INTERRUPTS TURNED ON.  This
- * is where time-consuming activities which can not be done in the
- * interrupt driver proper are done; the interrupt driver schedules
- * them using rs_sched_event(), and they get done here.
- */
-
-static void do_softint(unsigned long private_)
-{
-	struct dec_serial	*info = (struct dec_serial *) private_;
-	struct tty_struct	*tty;
-
-	tty = info->tty;
-	if (!tty)
-		return;
-
-	if (test_and_clear_bit(RS_EVENT_WRITE_WAKEUP, &info->event))
-		tty_wakeup(tty);
-}
-
-static int zs_startup(struct dec_serial * info)
-{
-	unsigned long flags;
-
-	if (info->flags & ZILOG_INITIALIZED)
-		return 0;
-
-	if (!info->xmit_buf) {
-		info->xmit_buf = (unsigned char *) get_zeroed_page(GFP_KERNEL);
-		if (!info->xmit_buf)
-			return -ENOMEM;
-	}
-
-	spin_lock_irqsave(&zs_lock, flags);
-
-#ifdef SERIAL_DEBUG_OPEN
-	printk("starting up ttyS%d (irq %d)...", info->line, info->irq);
-#endif
-
-	/*
-	 * Clear the receive FIFO.
-	 */
-	ZS_CLEARFIFO(info->zs_channel);
-	info->xmit_fifo_size = 1;
-
-	/*
-	 * Clear the interrupt registers.
-	 */
-	write_zsreg(info->zs_channel, R0, ERR_RES);
-	write_zsreg(info->zs_channel, R0, RES_H_IUS);
-
-	/*
-	 * Set the speed of the serial port
-	 */
-	change_speed(info);
-
-	/*
-	 * Turn on RTS and DTR.
-	 */
-	zs_rtsdtr(info, RTS | DTR, 1);
-
-	/*
-	 * Finally, enable sequencing and interrupts
-	 */
-	info->zs_channel->curregs[R1] &= ~RxINT_MASK;
-	info->zs_channel->curregs[R1] |= (RxINT_ALL | TxINT_ENAB |
-					  EXT_INT_ENAB);
-	info->zs_channel->curregs[R3] |= RxENABLE;
-	info->zs_channel->curregs[R5] |= TxENAB;
-	info->zs_channel->curregs[R15] |= (DCDIE | CTSIE | TxUIE | BRKIE);
-	write_zsreg(info->zs_channel, R1, info->zs_channel->curregs[R1]);
-	write_zsreg(info->zs_channel, R3, info->zs_channel->curregs[R3]);
-	write_zsreg(info->zs_channel, R5, info->zs_channel->curregs[R5]);
-	write_zsreg(info->zs_channel, R15, info->zs_channel->curregs[R15]);
-
-	/*
-	 * And clear the interrupt registers again for luck.
-	 */
-	write_zsreg(info->zs_channel, R0, ERR_RES);
-	write_zsreg(info->zs_channel, R0, RES_H_IUS);
-
-	/* Save the current value of RR0 */
-	info->read_reg_zero = read_zsreg(info->zs_channel, R0);
-
-	if (info->tty)
-		clear_bit(TTY_IO_ERROR, &info->tty->flags);
-	info->xmit_cnt = info->xmit_head = info->xmit_tail = 0;
-
-	info->flags |= ZILOG_INITIALIZED;
-	spin_unlock_irqrestore(&zs_lock, flags);
-	return 0;
-}
-
-/*
- * This routine will shutdown a serial port; interrupts are disabled, and
- * DTR is dropped if the hangup on close termio flag is on.
- */
-static void shutdown(struct dec_serial * info)
-{
-	unsigned long	flags;
-
-	if (!(info->flags & ZILOG_INITIALIZED))
-		return;
-
-#ifdef SERIAL_DEBUG_OPEN
-	printk("Shutting down serial port %d (irq %d)....", info->line,
-	       info->irq);
-#endif
-
-	spin_lock_irqsave(&zs_lock, flags);
-
-	if (info->xmit_buf) {
-		free_page((unsigned long) info->xmit_buf);
-		info->xmit_buf = 0;
-	}
-
-	info->zs_channel->curregs[1] = 0;
-	write_zsreg(info->zs_channel, 1, info->zs_channel->curregs[1]);	/* no interrupts */
-
-	info->zs_channel->curregs[3] &= ~RxENABLE;
-	write_zsreg(info->zs_channel, 3, info->zs_channel->curregs[3]);
-
-	info->zs_channel->curregs[5] &= ~TxENAB;
-	write_zsreg(info->zs_channel, 5, info->zs_channel->curregs[5]);
-	if (!info->tty || C_HUPCL(info->tty)) {
-		zs_rtsdtr(info, RTS | DTR, 0);
-	}
-
-	if (info->tty)
-		set_bit(TTY_IO_ERROR, &info->tty->flags);
-
-	info->flags &= ~ZILOG_INITIALIZED;
-	spin_unlock_irqrestore(&zs_lock, flags);
-}
-
-/*
- * This routine is called to set the UART divisor registers to match
- * the specified baud rate for a serial port.
- */
-static void change_speed(struct dec_serial *info)
-{
-	unsigned cflag;
-	int	i;
-	int	brg, bits;
-	unsigned long flags;
-
-	if (!info->hook) {
-		if (!info->tty || !info->tty->termios)
-			return;
-		cflag = info->tty->termios->c_cflag;
-		if (!info->port)
-			return;
-	} else {
-		cflag = info->hook->cflags;
-	}
-
-	i = cflag & CBAUD;
-	if (i & CBAUDEX) {
-		i &= ~CBAUDEX;
-		if (i < 1 || i > 2) {
-			if (!info->hook)
-				info->tty->termios->c_cflag &= ~CBAUDEX;
-			else
-				info->hook->cflags &= ~CBAUDEX;
-		} else
-			i += 15;
-	}
-
-	spin_lock_irqsave(&zs_lock, flags);
-	info->zs_baud = baud_table[i];
-	if (info->zs_baud) {
-		brg = BPS_TO_BRG(info->zs_baud, zs_parms->clock/info->clk_divisor);
-		info->zs_channel->curregs[12] = (brg & 255);
-		info->zs_channel->curregs[13] = ((brg >> 8) & 255);
-		zs_rtsdtr(info, DTR, 1);
-	} else {
-		zs_rtsdtr(info, RTS | DTR, 0);
-		return;
-	}
-
-	/* byte size and parity */
-	info->zs_channel->curregs[3] &= ~RxNBITS_MASK;
-	info->zs_channel->curregs[5] &= ~TxNBITS_MASK;
-	switch (cflag & CSIZE) {
-	case CS5:
-		bits = 7;
-		info->zs_channel->curregs[3] |= Rx5;
-		info->zs_channel->curregs[5] |= Tx5;
-		break;
-	case CS6:
-		bits = 8;
-		info->zs_channel->curregs[3] |= Rx6;
-		info->zs_channel->curregs[5] |= Tx6;
-		break;
-	case CS7:
-		bits = 9;
-		info->zs_channel->curregs[3] |= Rx7;
-		info->zs_channel->curregs[5] |= Tx7;
-		break;
-	case CS8:
-	default: /* defaults to 8 bits */
-		bits = 10;
-		info->zs_channel->curregs[3] |= Rx8;
-		info->zs_channel->curregs[5] |= Tx8;
-		break;
-	}
-
-	info->timeout = ((info->xmit_fifo_size*HZ*bits) / info->zs_baud);
-        info->timeout += HZ/50;         /* Add .02 seconds of slop */
-
-	info->zs_channel->curregs[4] &= ~(SB_MASK | PAR_ENA | PAR_EVEN);
-	if (cflag & CSTOPB) {
-		info->zs_channel->curregs[4] |= SB2;
-	} else {
-		info->zs_channel->curregs[4] |= SB1;
-	}
-	if (cflag & PARENB) {
-		info->zs_channel->curregs[4] |= PAR_ENA;
-	}
-	if (!(cflag & PARODD)) {
-		info->zs_channel->curregs[4] |= PAR_EVEN;
-	}
-
-	if (!(cflag & CLOCAL)) {
-		if (!(info->zs_channel->curregs[15] & DCDIE))
-			info->read_reg_zero = read_zsreg(info->zs_channel, 0);
-		info->zs_channel->curregs[15] |= DCDIE;
-	} else
-		info->zs_channel->curregs[15] &= ~DCDIE;
-	if (cflag & CRTSCTS) {
-		info->zs_channel->curregs[15] |= CTSIE;
-		if ((read_zsreg(info->zs_channel, 0) & CTS) == 0)
-			info->tx_stopped = 1;
-	} else {
-		info->zs_channel->curregs[15] &= ~CTSIE;
-		info->tx_stopped = 0;
-	}
-
-	/* Load up the new values */
-	load_zsregs(info->zs_channel, info->zs_channel->curregs);
-
-	spin_unlock_irqrestore(&zs_lock, flags);
-}
-
-static void rs_flush_chars(struct tty_struct *tty)
-{
-	struct dec_serial *info = (struct dec_serial *)tty->driver_data;
-	unsigned long flags;
-
-	if (serial_paranoia_check(info, tty->name, "rs_flush_chars"))
-		return;
-
-	if (info->xmit_cnt <= 0 || tty->stopped || info->tx_stopped ||
-	    !info->xmit_buf)
-		return;
-
-	/* Enable transmitter */
-	spin_lock_irqsave(&zs_lock, flags);
-	transmit_chars(info);
-	spin_unlock_irqrestore(&zs_lock, flags);
-}
-
-static int rs_write(struct tty_struct * tty,
-		    const unsigned char *buf, int count)
-{
-	int	c, total = 0;
-	struct dec_serial *info = (struct dec_serial *)tty->driver_data;
-	unsigned long flags;
-
-	if (serial_paranoia_check(info, tty->name, "rs_write"))
-		return 0;
-
-	if (!tty || !info->xmit_buf)
-		return 0;
-
-	while (1) {
-		spin_lock_irqsave(&zs_lock, flags);
-		c = min(count, min(SERIAL_XMIT_SIZE - info->xmit_cnt - 1,
-				   SERIAL_XMIT_SIZE - info->xmit_head));
-		if (c <= 0)
-			break;
-
-		memcpy(info->xmit_buf + info->xmit_head, buf, c);
-		info->xmit_head = (info->xmit_head + c) & (SERIAL_XMIT_SIZE-1);
-		info->xmit_cnt += c;
-		spin_unlock_irqrestore(&zs_lock, flags);
-		buf += c;
-		count -= c;
-		total += c;
-	}
-
-	if (info->xmit_cnt && !tty->stopped && !info->tx_stopped
-	    && !info->tx_active)
-		transmit_chars(info);
-	spin_unlock_irqrestore(&zs_lock, flags);
-	return total;
-}
-
-static int rs_write_room(struct tty_struct *tty)
-{
-	struct dec_serial *info = (struct dec_serial *)tty->driver_data;
-	int	ret;
-
-	if (serial_paranoia_check(info, tty->name, "rs_write_room"))
-		return 0;
-	ret = SERIAL_XMIT_SIZE - info->xmit_cnt - 1;
-	if (ret < 0)
-		ret = 0;
-	return ret;
-}
-
-static int rs_chars_in_buffer(struct tty_struct *tty)
-{
-	struct dec_serial *info = (struct dec_serial *)tty->driver_data;
-
-	if (serial_paranoia_check(info, tty->name, "rs_chars_in_buffer"))
-		return 0;
-	return info->xmit_cnt;
-}
-
-static void rs_flush_buffer(struct tty_struct *tty)
-{
-	struct dec_serial *info = (struct dec_serial *)tty->driver_data;
-
-	if (serial_paranoia_check(info, tty->name, "rs_flush_buffer"))
-		return;
-	spin_lock_irq(&zs_lock);
-	info->xmit_cnt = info->xmit_head = info->xmit_tail = 0;
-	spin_unlock_irq(&zs_lock);
-	tty_wakeup(tty);
-}
-
-/*
- * ------------------------------------------------------------
- * rs_throttle()
- *
- * This routine is called by the upper-layer tty layer to signal that
- * incoming characters should be throttled.
- * ------------------------------------------------------------
- */
-static void rs_throttle(struct tty_struct * tty)
-{
-	struct dec_serial *info = (struct dec_serial *)tty->driver_data;
-	unsigned long flags;
-
-#ifdef SERIAL_DEBUG_THROTTLE
-	char	buf[64];
-
-	printk("throttle %s: %d....\n", _tty_name(tty, buf),
-	       tty->ldisc.chars_in_buffer(tty));
-#endif
-
-	if (serial_paranoia_check(info, tty->name, "rs_throttle"))
-		return;
-
-	if (I_IXOFF(tty)) {
-		spin_lock_irqsave(&zs_lock, flags);
-		info->x_char = STOP_CHAR(tty);
-		if (!info->tx_active)
-			transmit_chars(info);
-		spin_unlock_irqrestore(&zs_lock, flags);
-	}
-
-	if (C_CRTSCTS(tty)) {
-		zs_rtsdtr(info, RTS, 0);
-	}
-}
-
-static void rs_unthrottle(struct tty_struct * tty)
-{
-	struct dec_serial *info = (struct dec_serial *)tty->driver_data;
-	unsigned long flags;
-
-#ifdef SERIAL_DEBUG_THROTTLE
-	char	buf[64];
-
-	printk("unthrottle %s: %d....\n", _tty_name(tty, buf),
-	       tty->ldisc.chars_in_buffer(tty));
-#endif
-
-	if (serial_paranoia_check(info, tty->name, "rs_unthrottle"))
-		return;
-
-	if (I_IXOFF(tty)) {
-		spin_lock_irqsave(&zs_lock, flags);
-		if (info->x_char)
-			info->x_char = 0;
-		else {
-			info->x_char = START_CHAR(tty);
-			if (!info->tx_active)
-				transmit_chars(info);
-		}
-		spin_unlock_irqrestore(&zs_lock, flags);
-	}
-
-	if (C_CRTSCTS(tty)) {
-		zs_rtsdtr(info, RTS, 1);
-	}
-}
-
-/*
- * ------------------------------------------------------------
- * rs_ioctl() and friends
- * ------------------------------------------------------------
- */
-
-static int get_serial_info(struct dec_serial * info,
-			   struct serial_struct * retinfo)
-{
-	struct serial_struct tmp;
-
-	if (!retinfo)
-		return -EFAULT;
-	memset(&tmp, 0, sizeof(tmp));
-	tmp.type = info->type;
-	tmp.line = info->line;
-	tmp.port = info->port;
-	tmp.irq = info->irq;
-	tmp.flags = info->flags;
-	tmp.baud_base = info->baud_base;
-	tmp.close_delay = info->close_delay;
-	tmp.closing_wait = info->closing_wait;
-	tmp.custom_divisor = info->custom_divisor;
-	return copy_to_user(retinfo,&tmp,sizeof(*retinfo)) ? -EFAULT : 0;
-}
-
-static int set_serial_info(struct dec_serial * info,
-			   struct serial_struct * new_info)
-{
-	struct serial_struct new_serial;
-	struct dec_serial old_info;
-	int 			retval = 0;
-
-	if (!new_info)
-		return -EFAULT;
-	copy_from_user(&new_serial,new_info,sizeof(new_serial));
-	old_info = *info;
-
-	if (!capable(CAP_SYS_ADMIN)) {
-		if ((new_serial.baud_base != info->baud_base) ||
-		    (new_serial.type != info->type) ||
-		    (new_serial.close_delay != info->close_delay) ||
-		    ((new_serial.flags & ~ZILOG_USR_MASK) !=
-		     (info->flags & ~ZILOG_USR_MASK)))
-			return -EPERM;
-		info->flags = ((info->flags & ~ZILOG_USR_MASK) |
-			       (new_serial.flags & ZILOG_USR_MASK));
-		info->custom_divisor = new_serial.custom_divisor;
-		goto check_and_exit;
-	}
-
-	if (info->count > 1)
-		return -EBUSY;
-
-	/*
-	 * OK, past this point, all the error checking has been done.
-	 * At this point, we start making changes.....
-	 */
-
-	info->baud_base = new_serial.baud_base;
-	info->flags = ((info->flags & ~ZILOG_FLAGS) |
-			(new_serial.flags & ZILOG_FLAGS));
-	info->type = new_serial.type;
-	info->close_delay = new_serial.close_delay;
-	info->closing_wait = new_serial.closing_wait;
-
-check_and_exit:
-	retval = zs_startup(info);
-	return retval;
-}
-
-/*
- * get_lsr_info - get line status register info
- *
- * Purpose: Let user call ioctl() to get info when the UART physically
- * 	    is emptied.  On bus types like RS485, the transmitter must
- * 	    release the bus after transmitting. This must be done when
- * 	    the transmit shift register is empty, not be done when the
- * 	    transmit holding register is empty.  This functionality
- * 	    allows an RS485 driver to be written in user space.
- */
-static int get_lsr_info(struct dec_serial * info, unsigned int *value)
-{
-	unsigned char status;
-
-	spin_lock(&zs_lock);
-	status = read_zsreg(info->zs_channel, 0);
-	spin_unlock_irq(&zs_lock);
-	put_user(status,value);
-	return 0;
-}
-
-static int rs_tiocmget(struct tty_struct *tty, struct file *file)
-{
-	struct dec_serial * info = (struct dec_serial *)tty->driver_data;
-	unsigned char control, status_a, status_b;
-	unsigned int result;
-
-	if (info->hook)
-		return -ENODEV;
-
-	if (serial_paranoia_check(info, tty->name, __FUNCTION__))
-		return -ENODEV;
-
-	if (tty->flags & (1 << TTY_IO_ERROR))
-		return -EIO;
-
-	if (info->zs_channel == info->zs_chan_a)
-		result = 0;
-	else {
-		spin_lock(&zs_lock);
-		control = info->zs_chan_a->curregs[5];
-		status_a = read_zsreg(info->zs_chan_a, 0);
-		status_b = read_zsreg(info->zs_channel, 0);
-		spin_unlock_irq(&zs_lock);
-		result =  ((control  & RTS) ? TIOCM_RTS: 0)
-			| ((control  & DTR) ? TIOCM_DTR: 0)
-			| ((status_b & DCD) ? TIOCM_CAR: 0)
-			| ((status_a & DCD) ? TIOCM_RNG: 0)
-			| ((status_a & SYNC_HUNT) ? TIOCM_DSR: 0)
-			| ((status_b & CTS) ? TIOCM_CTS: 0);
-	}
-	return result;
-}
-
-static int rs_tiocmset(struct tty_struct *tty, struct file *file,
-                       unsigned int set, unsigned int clear)
-{
-	struct dec_serial * info = (struct dec_serial *)tty->driver_data;
-
-	if (info->hook)
-		return -ENODEV;
-
-	if (serial_paranoia_check(info, tty->name, __FUNCTION__))
-		return -ENODEV;
-
-	if (tty->flags & (1 << TTY_IO_ERROR))
-		return -EIO;
-
-	if (info->zs_channel == info->zs_chan_a)
-		return 0;
-
-	spin_lock(&zs_lock);
-	if (set & TIOCM_RTS)
-		info->zs_chan_a->curregs[5] |= RTS;
-	if (set & TIOCM_DTR)
-		info->zs_chan_a->curregs[5] |= DTR;
-	if (clear & TIOCM_RTS)
-		info->zs_chan_a->curregs[5] &= ~RTS;
-	if (clear & TIOCM_DTR)
-		info->zs_chan_a->curregs[5] &= ~DTR;
-	write_zsreg(info->zs_chan_a, 5, info->zs_chan_a->curregs[5]);
-	spin_unlock_irq(&zs_lock);
-	return 0;
-}
-
-/*
- * rs_break - turn transmit break condition on/off
- */
-static void rs_break(struct tty_struct *tty, int break_state)
-{
-	struct dec_serial *info = (struct dec_serial *) tty->driver_data;
-	unsigned long flags;
-
-	if (serial_paranoia_check(info, tty->name, "rs_break"))
-		return;
-	if (!info->port)
-		return;
-
-	spin_lock_irqsave(&zs_lock, flags);
-	if (break_state == -1)
-		info->zs_channel->curregs[5] |= SND_BRK;
-	else
-		info->zs_channel->curregs[5] &= ~SND_BRK;
-	write_zsreg(info->zs_channel, 5, info->zs_channel->curregs[5]);
-	spin_unlock_irqrestore(&zs_lock, flags);
-}
-
-static int rs_ioctl(struct tty_struct *tty, struct file * file,
-		    unsigned int cmd, unsigned long arg)
-{
-	struct dec_serial * info = (struct dec_serial *)tty->driver_data;
-
-	if (info->hook)
-		return -ENODEV;
-
-	if (serial_paranoia_check(info, tty->name, "rs_ioctl"))
-		return -ENODEV;
-
-	if ((cmd != TIOCGSERIAL) && (cmd != TIOCSSERIAL) &&
-	    (cmd != TIOCSERCONFIG) && (cmd != TIOCSERGWILD)  &&
-	    (cmd != TIOCSERSWILD) && (cmd != TIOCSERGSTRUCT)) {
-		if (tty->flags & (1 << TTY_IO_ERROR))
-		    return -EIO;
-	}
-
-	switch (cmd) {
-	case TIOCGSERIAL:
-		if (!access_ok(VERIFY_WRITE, (void *)arg,
-			       sizeof(struct serial_struct)))
-			return -EFAULT;
-		return get_serial_info(info, (struct serial_struct *)arg);
-
-	case TIOCSSERIAL:
-		return set_serial_info(info, (struct serial_struct *)arg);
-
-	case TIOCSERGETLSR:			/* Get line status register */
-		if (!access_ok(VERIFY_WRITE, (void *)arg,
-			       sizeof(unsigned int)))
-			return -EFAULT;
-		return get_lsr_info(info, (unsigned int *)arg);
-
-	case TIOCSERGSTRUCT:
-		if (!access_ok(VERIFY_WRITE, (void *)arg,
-			       sizeof(struct dec_serial)))
-			return -EFAULT;
-		copy_from_user((struct dec_serial *)arg, info,
-			       sizeof(struct dec_serial));
-		return 0;
-
-	default:
-		return -ENOIOCTLCMD;
-	}
-	return 0;
-}
-
-static void rs_set_termios(struct tty_struct *tty, struct ktermios *old_termios)
-{
-	struct dec_serial *info = (struct dec_serial *)tty->driver_data;
-	int was_stopped;
-
-	if (tty->termios->c_cflag == old_termios->c_cflag)
-		return;
-	was_stopped = info->tx_stopped;
-
-	change_speed(info);
-
-	if (was_stopped && !info->tx_stopped)
-		rs_start(tty);
-}
-
-/*
- * ------------------------------------------------------------
- * rs_close()
- *
- * This routine is called when the serial port gets closed.
- * Wait for the last remaining data to be sent.
- * ------------------------------------------------------------
- */
-static void rs_close(struct tty_struct *tty, struct file * filp)
-{
-	struct dec_serial * info = (struct dec_serial *)tty->driver_data;
-	unsigned long flags;
-
-	if (!info || serial_paranoia_check(info, tty->name, "rs_close"))
-		return;
-
-	spin_lock_irqsave(&zs_lock, flags);
-
-	if (tty_hung_up_p(filp)) {
-		spin_unlock_irqrestore(&zs_lock, flags);
-		return;
-	}
-
-#ifdef SERIAL_DEBUG_OPEN
-	printk("rs_close ttyS%d, count = %d\n", info->line, info->count);
-#endif
-	if ((tty->count == 1) && (info->count != 1)) {
-		/*
-		 * Uh, oh.  tty->count is 1, which means that the tty
-		 * structure will be freed.  Info->count should always
-		 * be one in these conditions.  If it's greater than
-		 * one, we've got real problems, since it means the
-		 * serial port won't be shutdown.
-		 */
-		printk("rs_close: bad serial port count; tty->count is 1, "
-		       "info->count is %d\n", info->count);
-		info->count = 1;
-	}
-	if (--info->count < 0) {
-		printk("rs_close: bad serial port count for ttyS%d: %d\n",
-		       info->line, info->count);
-		info->count = 0;
-	}
-	if (info->count) {
-		spin_unlock_irqrestore(&zs_lock, flags);
-		return;
-	}
-	info->flags |= ZILOG_CLOSING;
-	/*
-	 * Now we wait for the transmit buffer to clear; and we notify
-	 * the line discipline to only process XON/XOFF characters.
-	 */
-	tty->closing = 1;
-	if (info->closing_wait != ZILOG_CLOSING_WAIT_NONE)
-		tty_wait_until_sent(tty, info->closing_wait);
-	/*
-	 * At this point we stop accepting input.  To do this, we
-	 * disable the receiver and receive interrupts.
-	 */
-	info->zs_channel->curregs[3] &= ~RxENABLE;
-	write_zsreg(info->zs_channel, 3, info->zs_channel->curregs[3]);
-	info->zs_channel->curregs[1] = 0;	/* disable any rx ints */
-	write_zsreg(info->zs_channel, 1, info->zs_channel->curregs[1]);
-	ZS_CLEARFIFO(info->zs_channel);
-	if (info->flags & ZILOG_INITIALIZED) {
-		/*
-		 * Before we drop DTR, make sure the SCC transmitter
-		 * has completely drained.
-		 */
-		rs_wait_until_sent(tty, info->timeout);
-	}
-
-	shutdown(info);
-	if (tty->driver->flush_buffer)
-		tty->driver->flush_buffer(tty);
-	tty_ldisc_flush(tty);
-	tty->closing = 0;
-	info->event = 0;
-	info->tty = 0;
-	if (info->blocked_open) {
-		if (info->close_delay) {
-			msleep_interruptible(jiffies_to_msecs(info->close_delay));
-		}
-		wake_up_interruptible(&info->open_wait);
-	}
-	info->flags &= ~(ZILOG_NORMAL_ACTIVE|ZILOG_CLOSING);
-	wake_up_interruptible(&info->close_wait);
-	spin_unlock_irqrestore(&zs_lock, flags);
-}
-
-/*
- * rs_wait_until_sent() --- wait until the transmitter is empty
- */
-static void rs_wait_until_sent(struct tty_struct *tty, int timeout)
-{
-	struct dec_serial *info = (struct dec_serial *) tty->driver_data;
-	unsigned long orig_jiffies;
-	int char_time;
-
-	if (serial_paranoia_check(info, tty->name, "rs_wait_until_sent"))
-		return;
-
-	orig_jiffies = jiffies;
-	/*
-	 * Set the check interval to be 1/5 of the estimated time to
-	 * send a single character, and make it at least 1.  The check
-	 * interval should also be less than the timeout.
-	 */
-	char_time = (info->timeout - HZ/50) / info->xmit_fifo_size;
-	char_time = char_time / 5;
-	if (char_time == 0)
-		char_time = 1;
-	if (timeout)
-		char_time = min(char_time, timeout);
-	while ((read_zsreg(info->zs_channel, 1) & Tx_BUF_EMP) == 0) {
-		msleep_interruptible(jiffies_to_msecs(char_time));
-		if (signal_pending(current))
-			break;
-		if (timeout && time_after(jiffies, orig_jiffies + timeout))
-			break;
-	}
-	current->state = TASK_RUNNING;
-}
-
-/*
- * rs_hangup() --- called by tty_hangup() when a hangup is signaled.
- */
-static void rs_hangup(struct tty_struct *tty)
-{
-	struct dec_serial * info = (struct dec_serial *)tty->driver_data;
-
-	if (serial_paranoia_check(info, tty->name, "rs_hangup"))
-		return;
-
-	rs_flush_buffer(tty);
-	shutdown(info);
-	info->event = 0;
-	info->count = 0;
-	info->flags &= ~ZILOG_NORMAL_ACTIVE;
-	info->tty = 0;
-	wake_up_interruptible(&info->open_wait);
-}
-
-/*
- * ------------------------------------------------------------
- * rs_open() and friends
- * ------------------------------------------------------------
- */
-static int block_til_ready(struct tty_struct *tty, struct file * filp,
-			   struct dec_serial *info)
-{
-	DECLARE_WAITQUEUE(wait, current);
-	int		retval;
-	int		do_clocal = 0;
-
-	/*
-	 * If the device is in the middle of being closed, then block
-	 * until it's done, and then try again.
-	 */
-	if (info->flags & ZILOG_CLOSING) {
-		interruptible_sleep_on(&info->close_wait);
-#ifdef SERIAL_DO_RESTART
-		return ((info->flags & ZILOG_HUP_NOTIFY) ?
-			-EAGAIN : -ERESTARTSYS);
-#else
-		return -EAGAIN;
-#endif
-	}
-
-	/*
-	 * If non-blocking mode is set, or the port is not enabled,
-	 * then make the check up front and then exit.
-	 */
-	if ((filp->f_flags & O_NONBLOCK) ||
-	    (tty->flags & (1 << TTY_IO_ERROR))) {
-		info->flags |= ZILOG_NORMAL_ACTIVE;
-		return 0;
-	}
-
-	if (tty->termios->c_cflag & CLOCAL)
-		do_clocal = 1;
-
-	/*
-	 * Block waiting for the carrier detect and the line to become
-	 * free (i.e., not in use by the callout).  While we are in
-	 * this loop, info->count is dropped by one, so that
-	 * rs_close() knows when to free things.  We restore it upon
-	 * exit, either normal or abnormal.
-	 */
-	retval = 0;
-	add_wait_queue(&info->open_wait, &wait);
-#ifdef SERIAL_DEBUG_OPEN
-	printk("block_til_ready before block: ttyS%d, count = %d\n",
-	       info->line, info->count);
-#endif
-	spin_lock(&zs_lock);
-	if (!tty_hung_up_p(filp))
-		info->count--;
-	spin_unlock_irq(&zs_lock);
-	info->blocked_open++;
-	while (1) {
-		spin_lock(&zs_lock);
-		if (tty->termios->c_cflag & CBAUD)
-			zs_rtsdtr(info, RTS | DTR, 1);
-		spin_unlock_irq(&zs_lock);
-		set_current_state(TASK_INTERRUPTIBLE);
-		if (tty_hung_up_p(filp) ||
-		    !(info->flags & ZILOG_INITIALIZED)) {
-#ifdef SERIAL_DO_RESTART
-			if (info->flags & ZILOG_HUP_NOTIFY)
-				retval = -EAGAIN;
-			else
-				retval = -ERESTARTSYS;
-#else
-			retval = -EAGAIN;
-#endif
-			break;
-		}
-		if (!(info->flags & ZILOG_CLOSING) &&
-		    (do_clocal || (read_zsreg(info->zs_channel, 0) & DCD)))
-			break;
-		if (signal_pending(current)) {
-			retval = -ERESTARTSYS;
-			break;
-		}
-#ifdef SERIAL_DEBUG_OPEN
-		printk("block_til_ready blocking: ttyS%d, count = %d\n",
-		       info->line, info->count);
-#endif
-		schedule();
-	}
-	current->state = TASK_RUNNING;
-	remove_wait_queue(&info->open_wait, &wait);
-	if (!tty_hung_up_p(filp))
-		info->count++;
-	info->blocked_open--;
-#ifdef SERIAL_DEBUG_OPEN
-	printk("block_til_ready after blocking: ttyS%d, count = %d\n",
-	       info->line, info->count);
-#endif
-	if (retval)
-		return retval;
-	info->flags |= ZILOG_NORMAL_ACTIVE;
-	return 0;
-}
-
-/*
- * This routine is called whenever a serial port is opened.  It
- * enables interrupts for a serial port, linking in its ZILOG structure into
- * the IRQ chain.   It also performs the serial-specific
- * initialization for the tty structure.
- */
-static int rs_open(struct tty_struct *tty, struct file * filp)
-{
-	struct dec_serial	*info;
-	int 			retval, line;
-
-	line = tty->index;
-	if ((line < 0) || (line >= zs_channels_found))
-		return -ENODEV;
-	info = zs_soft + line;
-
-	if (info->hook)
-		return -ENODEV;
-
-	if (serial_paranoia_check(info, tty->name, "rs_open"))
-		return -ENODEV;
-#ifdef SERIAL_DEBUG_OPEN
-	printk("rs_open %s, count = %d\n", tty->name, info->count);
-#endif
-
-	info->count++;
-	tty->driver_data = info;
-	info->tty = tty;
-
-	/*
-	 * If the port is the middle of closing, bail out now
-	 */
-	if (tty_hung_up_p(filp) ||
-	    (info->flags & ZILOG_CLOSING)) {
-		if (info->flags & ZILOG_CLOSING)
-			interruptible_sleep_on(&info->close_wait);
-#ifdef SERIAL_DO_RESTART
-		return ((info->flags & ZILOG_HUP_NOTIFY) ?
-			-EAGAIN : -ERESTARTSYS);
-#else
-		return -EAGAIN;
-#endif
-	}
-
-	/*
-	 * Start up serial port
-	 */
-	retval = zs_startup(info);
-	if (retval)
-		return retval;
-
-	retval = block_til_ready(tty, filp, info);
-	if (retval) {
-#ifdef SERIAL_DEBUG_OPEN
-		printk("rs_open returning after block_til_ready with %d\n",
-		       retval);
-#endif
-		return retval;
-	}
-
-#ifdef CONFIG_SERIAL_DEC_CONSOLE
-	if (zs_console.cflag && zs_console.index == line) {
-		tty->termios->c_cflag = zs_console.cflag;
-		zs_console.cflag = 0;
-		change_speed(info);
-	}
-#endif
-
-#ifdef SERIAL_DEBUG_OPEN
-	printk("rs_open %s successful...", tty->name);
-#endif
-/* tty->low_latency = 1; */
-	return 0;
-}
-
-/* Finally, routines used to initialize the serial driver. */
-
-static void __init show_serial_version(void)
-{
-	printk("DECstation Z8530 serial driver version 0.09\n");
-}
-
-/*  Initialize Z8530s zs_channels
- */
-
-static void __init probe_sccs(void)
-{
-	struct dec_serial **pp;
-	int i, n, n_chips = 0, n_channels, chip, channel;
-	unsigned long flags;
-
-	/*
-	 * did we get here by accident?
-	 */
-	if(!BUS_PRESENT) {
-		printk("Not on JUNKIO machine, skipping probe_sccs\n");
-		return;
-	}
-
-	switch(mips_machtype) {
-#ifdef CONFIG_MACH_DECSTATION
-	case MACH_DS5000_2X0:
-	case MACH_DS5900:
-		n_chips = 2;
-		zs_parms = &ds_parms;
-		zs_parms->irq0 = dec_interrupt[DEC_IRQ_SCC0];
-		zs_parms->irq1 = dec_interrupt[DEC_IRQ_SCC1];
-		break;
-	case MACH_DS5000_1XX:
-		n_chips = 2;
-		zs_parms = &ds_parms;
-		zs_parms->irq0 = dec_interrupt[DEC_IRQ_SCC0];
-		zs_parms->irq1 = dec_interrupt[DEC_IRQ_SCC1];
-		break;
-	case MACH_DS5000_XX:
-		n_chips = 1;
-		zs_parms = &ds_parms;
-		zs_parms->irq0 = dec_interrupt[DEC_IRQ_SCC0];
-		break;
-#endif
-	default:
-		panic("zs: unsupported bus");
-	}
-	if (!zs_parms)
-		panic("zs: uninitialized parms");
-
-	pp = &zs_chain;
-
-	n_channels = 0;
-
-	for (chip = 0; chip < n_chips; chip++) {
-		for (channel = 0; channel <= 1; channel++) {
-			/*
-			 * The sccs reside on the high byte of the 16 bit IOBUS
-			 */
-			zs_channels[n_channels].control =
-				(volatile void *)CKSEG1ADDR(dec_kn_slot_base +
-			  (0 == chip ? zs_parms->scc0 : zs_parms->scc1) +
-			  (0 == channel ? zs_parms->channel_a_offset :
-			                  zs_parms->channel_b_offset));
-			zs_channels[n_channels].data =
-				zs_channels[n_channels].control + 4;
-
-#ifndef CONFIG_SERIAL_DEC_CONSOLE
-			/*
-			 * We're called early and memory managment isn't up, yet.
-			 * Thus request_region would fail.
-			 */
-			if (!request_region((unsigned long)
-					 zs_channels[n_channels].control,
-					 ZS_CHAN_IO_SIZE, "SCC"))
-				panic("SCC I/O region is not free");
-#endif
-			zs_soft[n_channels].zs_channel = &zs_channels[n_channels];
-			/* HACK alert! */
-			if (!(chip & 1))
-				zs_soft[n_channels].irq = zs_parms->irq0;
-			else
-				zs_soft[n_channels].irq = zs_parms->irq1;
-
-			/*
-			 *  Identification of channel A. Location of channel A
-                         *  inside chip depends on mapping of internal address
-			 *  the chip decodes channels by.
-			 *  CHANNEL_A_NR returns either 0 (in case of
-			 *  DECstations) or 1 (in case of Baget).
-			 */
-			if (CHANNEL_A_NR == channel)
-				zs_soft[n_channels].zs_chan_a =
-				    &zs_channels[n_channels+1-2*CHANNEL_A_NR];
-			else
-				zs_soft[n_channels].zs_chan_a =
-				    &zs_channels[n_channels];
-
-			*pp = &zs_soft[n_channels];
-			pp = &zs_soft[n_channels].zs_next;
-			n_channels++;
-		}
-	}
-
-	*pp = 0;
-	zs_channels_found = n_channels;
-
-	for (n = 0; n < zs_channels_found; n++) {
-		for (i = 0; i < 16; i++) {
-			zs_soft[n].zs_channel->curregs[i] = zs_init_regs[i];
-		}
-	}
-
-	spin_lock_irqsave(&zs_lock, flags);
-	for (n = 0; n < zs_channels_found; n++) {
-		if (n % 2 == 0) {
-			write_zsreg(zs_soft[n].zs_chan_a, R9, FHWRES);
-			udelay(10);
-			write_zsreg(zs_soft[n].zs_chan_a, R9, 0);
-		}
-		load_zsregs(zs_soft[n].zs_channel,
-			    zs_soft[n].zs_channel->curregs);
-	}
-	spin_unlock_irqrestore(&zs_lock, flags);
-}
-
-static const struct tty_operations serial_ops = {
-	.open = rs_open,
-	.close = rs_close,
-	.write = rs_write,
-	.flush_chars = rs_flush_chars,
-	.write_room = rs_write_room,
-	.chars_in_buffer = rs_chars_in_buffer,
-	.flush_buffer = rs_flush_buffer,
-	.ioctl = rs_ioctl,
-	.throttle = rs_throttle,
-	.unthrottle = rs_unthrottle,
-	.set_termios = rs_set_termios,
-	.stop = rs_stop,
-	.start = rs_start,
-	.hangup = rs_hangup,
-	.break_ctl = rs_break,
-	.wait_until_sent = rs_wait_until_sent,
-	.tiocmget = rs_tiocmget,
-	.tiocmset = rs_tiocmset,
-};
-
-/* zs_init inits the driver */
-int __init zs_init(void)
-{
-	int channel, i;
-	struct dec_serial *info;
-
-	if(!BUS_PRESENT)
-		return -ENODEV;
-
-	/* Find out how many Z8530 SCCs we have */
-	if (zs_chain == 0)
-		probe_sccs();
-	serial_driver = alloc_tty_driver(zs_channels_found);
-	if (!serial_driver)
-		return -ENOMEM;
-
-	show_serial_version();
-
-	/* Initialize the tty_driver structure */
-	/* Not all of this is exactly right for us. */
-
-	serial_driver->owner = THIS_MODULE;
-	serial_driver->name = "ttyS";
-	serial_driver->major = TTY_MAJOR;
-	serial_driver->minor_start = 64;
-	serial_driver->type = TTY_DRIVER_TYPE_SERIAL;
-	serial_driver->subtype = SERIAL_TYPE_NORMAL;
-	serial_driver->init_termios = tty_std_termios;
-	serial_driver->init_termios.c_cflag =
-		B9600 | CS8 | CREAD | HUPCL | CLOCAL;
-	serial_driver->flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV;
-	tty_set_operations(serial_driver, &serial_ops);
-
-	if (tty_register_driver(serial_driver))
-		panic("Couldn't register serial driver");
-
-	for (info = zs_chain, i = 0; info; info = info->zs_next, i++) {
-
-		/* Needed before interrupts are enabled. */
-		info->tty = 0;
-		info->x_char = 0;
-
-		if (info->hook && info->hook->init_info) {
-			(*info->hook->init_info)(info);
-			continue;
-		}
-
-		info->magic = SERIAL_MAGIC;
-		info->port = (int) info->zs_channel->control;
-		info->line = i;
-		info->custom_divisor = 16;
-		info->close_delay = 50;
-		info->closing_wait = 3000;
-		info->event = 0;
-		info->count = 0;
-		info->blocked_open = 0;
-		tasklet_init(&info->tlet, do_softint, (unsigned long)info);
-		init_waitqueue_head(&info->open_wait);
-		init_waitqueue_head(&info->close_wait);
-		printk("ttyS%02d at 0x%08x (irq = %d) is a Z85C30 SCC\n",
-		       info->line, info->port, info->irq);
-		tty_register_device(serial_driver, info->line, NULL);
-
-	}
-
-	for (channel = 0; channel < zs_channels_found; ++channel) {
-		zs_soft[channel].clk_divisor = 16;
-		zs_soft[channel].zs_baud = get_zsbaud(&zs_soft[channel]);
-
-		if (request_irq(zs_soft[channel].irq, rs_interrupt, IRQF_SHARED,
-				"scc", &zs_soft[channel]))
-			printk(KERN_ERR "decserial: can't get irq %d\n",
-			       zs_soft[channel].irq);
-
-		if (zs_soft[channel].hook) {
-			zs_startup(&zs_soft[channel]);
-			if (zs_soft[channel].hook->init_channel)
-				(*zs_soft[channel].hook->init_channel)
-					(&zs_soft[channel]);
-		}
-	}
-
-	return 0;
-}
-
-/*
- * polling I/O routines
- */
-static int zs_poll_tx_char(void *handle, unsigned char ch)
-{
-	struct dec_serial *info = handle;
-	struct dec_zschannel *chan = info->zs_channel;
-	int    ret;
-
-	if(chan) {
-		int loops = 10000;
-
-		while (loops && !(read_zsreg(chan, 0) & Tx_BUF_EMP))
-			loops--;
-
-		if (loops) {
-			write_zsdata(chan, ch);
-			ret = 0;
-		} else
-			ret = -EAGAIN;
-
-		return ret;
-	} else
-		return -ENODEV;
-}
-
-static int zs_poll_rx_char(void *handle)
-{
-	struct dec_serial *info = handle;
-        struct dec_zschannel *chan = info->zs_channel;
-        int    ret;
-
-	if(chan) {
-                int loops = 10000;
-
-		while (loops && !(read_zsreg(chan, 0) & Rx_CH_AV))
-			loops--;
-
-                if (loops)
-                        ret = read_zsdata(chan);
-                else
-                        ret = -EAGAIN;
-
-		return ret;
-	} else
-		return -ENODEV;
-}
-
-int register_zs_hook(unsigned int channel, struct dec_serial_hook *hook)
-{
-	struct dec_serial *info = &zs_soft[channel];
-
-	if (info->hook) {
-		printk("%s: line %d has already a hook registered\n",
-		       __FUNCTION__, channel);
-
-		return 0;
-	} else {
-		hook->poll_rx_char = zs_poll_rx_char;
-		hook->poll_tx_char = zs_poll_tx_char;
-		info->hook = hook;
-
-		return 1;
-	}
-}
-
-int unregister_zs_hook(unsigned int channel)
-{
-	struct dec_serial *info = &zs_soft[channel];
-
-        if (info->hook) {
-                info->hook = NULL;
-                return 1;
-        } else {
-                printk("%s: trying to unregister hook on line %d,"
-                       " but none is registered\n", __FUNCTION__, channel);
-                return 0;
-        }
-}
-
-/*
- * ------------------------------------------------------------
- * Serial console driver
- * ------------------------------------------------------------
- */
-#ifdef CONFIG_SERIAL_DEC_CONSOLE
-
-
-/*
- *	Print a string to the serial port trying not to disturb
- *	any possible real use of the port...
- */
-static void serial_console_write(struct console *co, const char *s,
-				 unsigned count)
-{
-	struct dec_serial *info;
-	int i;
-
-	info = zs_soft + co->index;
-
-	for (i = 0; i < count; i++, s++) {
-		if(*s == '\n')
-			zs_poll_tx_char(info, '\r');
-		zs_poll_tx_char(info, *s);
-	}
-}
-
-static struct tty_driver *serial_console_device(struct console *c, int *index)
-{
-	*index = c->index;
-	return serial_driver;
-}
-
-/*
- *	Setup initial baud/bits/parity. We do two things here:
- *	- construct a cflag setting for the first rs_open()
- *	- initialize the serial port
- *	Return non-zero if we didn't find a serial port.
- */
-static int __init serial_console_setup(struct console *co, char *options)
-{
-	struct dec_serial *info;
-	int baud = 9600;
-	int bits = 8;
-	int parity = 'n';
-	int cflag = CREAD | HUPCL | CLOCAL;
-	int clk_divisor = 16;
-	int brg;
-	char *s;
-	unsigned long flags;
-
-	if(!BUS_PRESENT)
-		return -ENODEV;
-
-	info = zs_soft + co->index;
-
-	if (zs_chain == 0)
-		probe_sccs();
-
-	info->is_cons = 1;
-
-	if (options) {
-		baud = simple_strtoul(options, NULL, 10);
-		s = options;
-		while(*s >= '0' && *s <= '9')
-			s++;
-		if (*s)
-			parity = *s++;
-		if (*s)
-			bits   = *s - '0';
-	}
-
-	/*
-	 *	Now construct a cflag setting.
-	 */
-	switch(baud) {
-	case 1200:
-		cflag |= B1200;
-		break;
-	case 2400:
-		cflag |= B2400;
-		break;
-	case 4800:
-		cflag |= B4800;
-		break;
-	case 19200:
-		cflag |= B19200;
-		break;
-	case 38400:
-		cflag |= B38400;
-		break;
-	case 57600:
-		cflag |= B57600;
-		break;
-	case 115200:
-		cflag |= B115200;
-		break;
-	case 9600:
-	default:
-		cflag |= B9600;
-		/*
-		 * Set this to a sane value to prevent a divide error.
-		 */
-		baud  = 9600;
-		break;
-	}
-	switch(bits) {
-	case 7:
-		cflag |= CS7;
-		break;
-	default:
-	case 8:
-		cflag |= CS8;
-		break;
-	}
-	switch(parity) {
-	case 'o': case 'O':
-		cflag |= PARODD;
-		break;
-	case 'e': case 'E':
-		cflag |= PARENB;
-		break;
-	}
-	co->cflag = cflag;
-
-	spin_lock_irqsave(&zs_lock, flags);
-
-	/*
-	 * Set up the baud rate generator.
-	 */
-	brg = BPS_TO_BRG(baud, zs_parms->clock / clk_divisor);
-	info->zs_channel->curregs[R12] = (brg & 255);
-	info->zs_channel->curregs[R13] = ((brg >> 8) & 255);
-
-	/*
-	 * Set byte size and parity.
-	 */
-	if (bits == 7) {
-		info->zs_channel->curregs[R3] |= Rx7;
-		info->zs_channel->curregs[R5] |= Tx7;
-	} else {
-		info->zs_channel->curregs[R3] |= Rx8;
-		info->zs_channel->curregs[R5] |= Tx8;
-	}
-	if (cflag & PARENB) {
-		info->zs_channel->curregs[R4] |= PAR_ENA;
-	}
-	if (!(cflag & PARODD)) {
-		info->zs_channel->curregs[R4] |= PAR_EVEN;
-	}
-	info->zs_channel->curregs[R4] |= SB1;
-
-	/*
-	 * Turn on RTS and DTR.
-	 */
-	zs_rtsdtr(info, RTS | DTR, 1);
-
-	/*
-	 * Finally, enable sequencing.
-	 */
-	info->zs_channel->curregs[R3] |= RxENABLE;
-	info->zs_channel->curregs[R5] |= TxENAB;
-
-	/*
-	 * Clear the interrupt registers.
-	 */
-	write_zsreg(info->zs_channel, R0, ERR_RES);
-	write_zsreg(info->zs_channel, R0, RES_H_IUS);
-
-	/*
-	 * Load up the new values.
-	 */
-	load_zsregs(info->zs_channel, info->zs_channel->curregs);
-
-	/* Save the current value of RR0 */
-	info->read_reg_zero = read_zsreg(info->zs_channel, R0);
-
-	zs_soft[co->index].clk_divisor = clk_divisor;
-	zs_soft[co->index].zs_baud = get_zsbaud(&zs_soft[co->index]);
-
-	spin_unlock_irqrestore(&zs_lock, flags);
-
-	return 0;
-}
-
-static struct console zs_console = {
-	.name		= "ttyS",
-	.write		= serial_console_write,
-	.device		= serial_console_device,
-	.setup		= serial_console_setup,
-	.flags		= CON_PRINTBUFFER,
-	.index		= -1,
-};
-
-/*
- *	Register console.
- */
-void __init zs_serial_console_init(void)
-{
-	register_console(&zs_console);
-}
-#endif /* ifdef CONFIG_SERIAL_DEC_CONSOLE */
-
-#ifdef CONFIG_KGDB
-struct dec_zschannel *zs_kgdbchan;
-static unsigned char scc_inittab[] = {
-	9,  0x80,	/* reset A side (CHRA) */
-	13, 0,		/* set baud rate divisor */
-	12, 1,
-	14, 1,		/* baud rate gen enable, src=rtxc (BRENABL) */
-	11, 0x50,	/* clocks = br gen (RCBR | TCBR) */
-	5,  0x6a,	/* tx 8 bits, assert RTS (Tx8 | TxENAB | RTS) */
-	4,  0x44,	/* x16 clock, 1 stop (SB1 | X16CLK)*/
-	3,  0xc1,	/* rx enable, 8 bits (RxENABLE | Rx8)*/
-};
-
-/* These are for receiving and sending characters under the kgdb
- * source level kernel debugger.
- */
-void putDebugChar(char kgdb_char)
-{
-	struct dec_zschannel *chan = zs_kgdbchan;
-	while ((read_zsreg(chan, 0) & Tx_BUF_EMP) == 0)
-		RECOVERY_DELAY;
-	write_zsdata(chan, kgdb_char);
-}
-char getDebugChar(void)
-{
-	struct dec_zschannel *chan = zs_kgdbchan;
-	while((read_zsreg(chan, 0) & Rx_CH_AV) == 0)
-		eieio(); /*barrier();*/
-	return read_zsdata(chan);
-}
-void kgdb_interruptible(int yes)
-{
-	struct dec_zschannel *chan = zs_kgdbchan;
-	int one, nine;
-	nine = read_zsreg(chan, 9);
-	if (yes == 1) {
-		one = EXT_INT_ENAB|RxINT_ALL;
-		nine |= MIE;
-		printk("turning serial ints on\n");
-	} else {
-		one = RxINT_DISAB;
-		nine &= ~MIE;
-		printk("turning serial ints off\n");
-	}
-	write_zsreg(chan, 1, one);
-	write_zsreg(chan, 9, nine);
-}
-
-static int kgdbhook_init_channel(void *handle)
-{
-	return 0;
-}
-
-static void kgdbhook_init_info(void *handle)
-{
-}
-
-static void kgdbhook_rx_char(void *handle, unsigned char ch, unsigned char fl)
-{
-	struct dec_serial *info = handle;
-
-	if (fl != TTY_NORMAL)
-		return;
-	if (ch == 0x03 || ch == '$')
-		breakpoint();
-}
-
-/* This sets up the serial port we're using, and turns on
- * interrupts for that channel, so kgdb is usable once we're done.
- */
-static inline void kgdb_chaninit(struct dec_zschannel *ms, int intson, int bps)
-{
-	int brg;
-	int i, x;
-	volatile char *sccc = ms->control;
-	brg = BPS_TO_BRG(bps, zs_parms->clock/16);
-	printk("setting bps on kgdb line to %d [brg=%x]\n", bps, brg);
-	for (i = 20000; i != 0; --i) {
-		x = *sccc; eieio();
-	}
-	for (i = 0; i < sizeof(scc_inittab); ++i) {
-		write_zsreg(ms, scc_inittab[i], scc_inittab[i+1]);
-		i++;
-	}
-}
-/* This is called at boot time to prime the kgdb serial debugging
- * serial line.  The 'tty_num' argument is 0 for /dev/ttya and 1
- * for /dev/ttyb which is determined in setup_arch() from the
- * boot command line flags.
- */
-struct dec_serial_hook zs_kgdbhook = {
-	.init_channel	= kgdbhook_init_channel,
-	.init_info	= kgdbhook_init_info,
-	.rx_char	= kgdbhook_rx_char,
-	.cflags		= B38400 | CS8 | CLOCAL,
-};
-
-void __init zs_kgdb_hook(int tty_num)
-{
-	/* Find out how many Z8530 SCCs we have */
-	if (zs_chain == 0)
-		probe_sccs();
-	zs_soft[tty_num].zs_channel = &zs_channels[tty_num];
-	zs_kgdbchan = zs_soft[tty_num].zs_channel;
-	zs_soft[tty_num].change_needed = 0;
-	zs_soft[tty_num].clk_divisor = 16;
-	zs_soft[tty_num].zs_baud = 38400;
- 	zs_soft[tty_num].hook = &zs_kgdbhook; /* This runs kgdb */
-	/* Turn on transmitter/receiver at 8-bits/char */
-        kgdb_chaninit(zs_soft[tty_num].zs_channel, 1, 38400);
-	printk("KGDB: on channel %d initialized\n", tty_num);
-	set_debug_traps(); /* init stub */
-}
-#endif /* ifdef CONFIG_KGDB */
diff --git a/drivers/tc/zs.h b/drivers/tc/zs.h
deleted file mode 100644
index 1351220..0000000
--- a/drivers/tc/zs.h
+++ /dev/null
@@ -1,404 +0,0 @@
-/*
- * drivers/tc/zs.h: Definitions for the DECstation Z85C30 serial driver.
- *
- * Adapted from drivers/sbus/char/sunserial.h by Paul Mackerras.
- * Adapted from drivers/macintosh/macserial.h by Harald Koerfgen.
- *
- * Copyright (C) 1996 Paul Mackerras (Paul.Mackerras@cs.anu.edu.au)
- * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
- * Copyright (C) 2004, 2005  Maciej W. Rozycki
- */
-#ifndef _DECSERIAL_H
-#define _DECSERIAL_H
-
-#include <asm/dec/serial.h>
-
-#define NUM_ZSREGS 16
-
-struct serial_struct {
-	int	type;
-	int	line;
-	int	port;
-	int	irq;
-	int	flags;
-	int	xmit_fifo_size;
-	int	custom_divisor;
-	int	baud_base;
-	unsigned short	close_delay;
-	char	reserved_char[2];
-	int	hub6;
-	unsigned short	closing_wait; /* time to wait before closing */
-	unsigned short	closing_wait2; /* no longer used... */
-	int	reserved[4];
-};
-
-/*
- * For the close wait times, 0 means wait forever for serial port to
- * flush its output.  65535 means don't wait at all.
- */
-#define ZILOG_CLOSING_WAIT_INF	0
-#define ZILOG_CLOSING_WAIT_NONE	65535
-
-/*
- * Definitions for ZILOG_struct (and serial_struct) flags field
- */
-#define ZILOG_HUP_NOTIFY 0x0001 /* Notify getty on hangups and closes
-				   on the callout port */
-#define ZILOG_FOURPORT  0x0002	/* Set OU1, OUT2 per AST Fourport settings */
-#define ZILOG_SAK	0x0004	/* Secure Attention Key (Orange book) */
-#define ZILOG_SPLIT_TERMIOS 0x0008 /* Separate termios for dialin/callout */
-
-#define ZILOG_SPD_MASK	0x0030
-#define ZILOG_SPD_HI	0x0010	/* Use 56000 instead of 38400 bps */
-
-#define ZILOG_SPD_VHI	0x0020  /* Use 115200 instead of 38400 bps */
-#define ZILOG_SPD_CUST	0x0030  /* Use user-specified divisor */
-
-#define ZILOG_SKIP_TEST	0x0040 /* Skip UART test during autoconfiguration */
-#define ZILOG_AUTO_IRQ  0x0080 /* Do automatic IRQ during autoconfiguration */
-#define ZILOG_SESSION_LOCKOUT 0x0100 /* Lock out cua opens based on session */
-#define ZILOG_PGRP_LOCKOUT    0x0200 /* Lock out cua opens based on pgrp */
-#define ZILOG_CALLOUT_NOHUP   0x0400 /* Don't do hangups for cua device */
-
-#define ZILOG_FLAGS	0x0FFF	/* Possible legal ZILOG flags */
-#define ZILOG_USR_MASK 0x0430	/* Legal flags that non-privileged
-				 * users can set or reset */
-
-/* Internal flags used only by kernel/chr_drv/serial.c */
-#define ZILOG_INITIALIZED	0x80000000 /* Serial port was initialized */
-#define ZILOG_CALLOUT_ACTIVE	0x40000000 /* Call out device is active */
-#define ZILOG_NORMAL_ACTIVE	0x20000000 /* Normal device is active */
-#define ZILOG_BOOT_AUTOCONF	0x10000000 /* Autoconfigure port on bootup */
-#define ZILOG_CLOSING		0x08000000 /* Serial port is closing */
-#define ZILOG_CTS_FLOW		0x04000000 /* Do CTS flow control */
-#define ZILOG_CHECK_CD		0x02000000 /* i.e., CLOCAL */
-
-/* Software state per channel */
-
-#ifdef __KERNEL__
-/*
- * This is our internal structure for each serial port's state.
- *
- * Many fields are paralleled by the structure used by the serial_struct
- * structure.
- *
- * For definitions of the flags field, see tty.h
- */
-
-struct dec_zschannel {
-	volatile unsigned char *control;
-	volatile unsigned char *data;
-
-	/* Current write register values */
-	unsigned char curregs[NUM_ZSREGS];
-};
-
-struct dec_serial {
-	struct dec_serial	*zs_next;	/* For IRQ servicing chain.  */
-	struct dec_zschannel	*zs_channel;	/* Channel registers.  */
-	struct dec_zschannel	*zs_chan_a;	/* A side registers.  */
-	unsigned char		read_reg_zero;
-
-	struct dec_serial_hook	*hook;		/* Hook on this channel.  */
-	int			tty_break;	/* Set on BREAK condition.  */
-	int			is_cons;	/* Is this our console.  */
-	int			tx_active;	/* Char is being xmitted.  */
-	int			tx_stopped;	/* Output is suspended.  */
-
-	/*
-	 * We need to know the current clock divisor
-	 * to read the bps rate the chip has currently loaded.
-	 */
-	int			clk_divisor;	/* May be 1, 16, 32, or 64.  */
-	int			zs_baud;
-
-	char			change_needed;
-
-	int			magic;
-	int			baud_base;
-	int			port;
-	int			irq;
-	int			flags; 		/* Defined in tty.h.  */
-	int			type; 		/* UART type.  */
-	struct tty_struct 	*tty;
-	int			read_status_mask;
-	int			ignore_status_mask;
-	int			timeout;
-	int			xmit_fifo_size;
-	int			custom_divisor;
-	int			x_char;		/* XON/XOFF character.  */
-	int			close_delay;
-	unsigned short		closing_wait;
-	unsigned short		closing_wait2;
-	unsigned long		event;
-	unsigned long		last_active;
-	int			line;
-	int			count;		/* # of fds on device.  */
-	int			blocked_open;	/* # of blocked opens.  */
-	unsigned char 		*xmit_buf;
-	int			xmit_head;
-	int			xmit_tail;
-	int			xmit_cnt;
-	struct tasklet_struct	tlet;
-	wait_queue_head_t	open_wait;
-	wait_queue_head_t	close_wait;
-};
-
-
-#define SERIAL_MAGIC 0x5301
-
-/*
- * The size of the serial xmit buffer is 1 page, or 4096 bytes
- */
-#define SERIAL_XMIT_SIZE 4096
-
-/*
- * Events are used to schedule things to happen at timer-interrupt
- * time, instead of at rs interrupt time.
- */
-#define RS_EVENT_WRITE_WAKEUP	0
-
-#endif /* __KERNEL__ */
-
-/* Conversion routines to/from brg time constants from/to bits
- * per second.
- */
-#define BRG_TO_BPS(brg, freq) ((freq) / 2 / ((brg) + 2))
-#define BPS_TO_BRG(bps, freq) ((((freq) + (bps)) / (2 * (bps))) - 2)
-
-/* The Zilog register set */
-
-#define	FLAG	0x7e
-
-/* Write Register 0 */
-#define	R0	0		/* Register selects */
-#define	R1	1
-#define	R2	2
-#define	R3	3
-#define	R4	4
-#define	R5	5
-#define	R6	6
-#define	R7	7
-#define	R8	8
-#define	R9	9
-#define	R10	10
-#define	R11	11
-#define	R12	12
-#define	R13	13
-#define	R14	14
-#define	R15	15
-
-#define	NULLCODE	0	/* Null Code */
-#define	POINT_HIGH	0x8	/* Select upper half of registers */
-#define	RES_EXT_INT	0x10	/* Reset Ext. Status Interrupts */
-#define	SEND_ABORT	0x18	/* HDLC Abort */
-#define	RES_RxINT_FC	0x20	/* Reset RxINT on First Character */
-#define	RES_Tx_P	0x28	/* Reset TxINT Pending */
-#define	ERR_RES		0x30	/* Error Reset */
-#define	RES_H_IUS	0x38	/* Reset highest IUS */
-
-#define	RES_Rx_CRC	0x40	/* Reset Rx CRC Checker */
-#define	RES_Tx_CRC	0x80	/* Reset Tx CRC Checker */
-#define	RES_EOM_L	0xC0	/* Reset EOM latch */
-
-/* Write Register 1 */
-
-#define	EXT_INT_ENAB	0x1	/* Ext Int Enable */
-#define	TxINT_ENAB	0x2	/* Tx Int Enable */
-#define	PAR_SPEC	0x4	/* Parity is special condition */
-
-#define	RxINT_DISAB	0	/* Rx Int Disable */
-#define	RxINT_FCERR	0x8	/* Rx Int on First Character Only or Error */
-#define	RxINT_ALL	0x10	/* Int on all Rx Characters or error */
-#define	RxINT_ERR	0x18	/* Int on error only */
-#define	RxINT_MASK	0x18
-
-#define	WT_RDY_RT	0x20	/* Wait/Ready on R/T */
-#define	WT_FN_RDYFN	0x40	/* Wait/FN/Ready FN */
-#define	WT_RDY_ENAB	0x80	/* Wait/Ready Enable */
-
-/* Write Register #2 (Interrupt Vector) */
-
-/* Write Register 3 */
-
-#define	RxENABLE	0x1	/* Rx Enable */
-#define	SYNC_L_INH	0x2	/* Sync Character Load Inhibit */
-#define	ADD_SM		0x4	/* Address Search Mode (SDLC) */
-#define	RxCRC_ENAB	0x8	/* Rx CRC Enable */
-#define	ENT_HM		0x10	/* Enter Hunt Mode */
-#define	AUTO_ENAB	0x20	/* Auto Enables */
-#define	Rx5		0x0	/* Rx 5 Bits/Character */
-#define	Rx7		0x40	/* Rx 7 Bits/Character */
-#define	Rx6		0x80	/* Rx 6 Bits/Character */
-#define	Rx8		0xc0	/* Rx 8 Bits/Character */
-#define RxNBITS_MASK	0xc0
-
-/* Write Register 4 */
-
-#define	PAR_ENA		0x1	/* Parity Enable */
-#define	PAR_EVEN	0x2	/* Parity Even/Odd* */
-
-#define	SYNC_ENAB	0	/* Sync Modes Enable */
-#define	SB1		0x4	/* 1 stop bit/char */
-#define	SB15		0x8	/* 1.5 stop bits/char */
-#define	SB2		0xc	/* 2 stop bits/char */
-#define SB_MASK		0xc
-
-#define	MONSYNC		0	/* 8 Bit Sync character */
-#define	BISYNC		0x10	/* 16 bit sync character */
-#define	SDLC		0x20	/* SDLC Mode (01111110 Sync Flag) */
-#define	EXTSYNC		0x30	/* External Sync Mode */
-
-#define	X1CLK		0x0	/* x1 clock mode */
-#define	X16CLK		0x40	/* x16 clock mode */
-#define	X32CLK		0x80	/* x32 clock mode */
-#define	X64CLK		0xC0	/* x64 clock mode */
-#define XCLK_MASK	0xC0
-
-/* Write Register 5 */
-
-#define	TxCRC_ENAB	0x1	/* Tx CRC Enable */
-#define	RTS		0x2	/* RTS */
-#define	SDLC_CRC	0x4	/* SDLC/CRC-16 */
-#define	TxENAB		0x8	/* Tx Enable */
-#define	SND_BRK		0x10	/* Send Break */
-#define	Tx5		0x0	/* Tx 5 bits (or less)/character */
-#define	Tx7		0x20	/* Tx 7 bits/character */
-#define	Tx6		0x40	/* Tx 6 bits/character */
-#define	Tx8		0x60	/* Tx 8 bits/character */
-#define TxNBITS_MASK	0x60
-#define	DTR		0x80	/* DTR */
-
-/* Write Register 6 (Sync bits 0-7/SDLC Address Field) */
-
-/* Write Register 7 (Sync bits 8-15/SDLC 01111110) */
-
-/* Write Register 8 (transmit buffer) */
-
-/* Write Register 9 (Master interrupt control) */
-#define	VIS	1	/* Vector Includes Status */
-#define	NV	2	/* No Vector */
-#define	DLC	4	/* Disable Lower Chain */
-#define	MIE	8	/* Master Interrupt Enable */
-#define	STATHI	0x10	/* Status high */
-#define	SOFTACK	0x20	/* Software Interrupt Acknowledge */
-#define	NORESET	0	/* No reset on write to R9 */
-#define	CHRB	0x40	/* Reset channel B */
-#define	CHRA	0x80	/* Reset channel A */
-#define	FHWRES	0xc0	/* Force hardware reset */
-
-/* Write Register 10 (misc control bits) */
-#define	BIT6	1	/* 6 bit/8bit sync */
-#define	LOOPMODE 2	/* SDLC Loop mode */
-#define	ABUNDER	4	/* Abort/flag on SDLC xmit underrun */
-#define	MARKIDLE 8	/* Mark/flag on idle */
-#define	GAOP	0x10	/* Go active on poll */
-#define	NRZ	0	/* NRZ mode */
-#define	NRZI	0x20	/* NRZI mode */
-#define	FM1	0x40	/* FM1 (transition = 1) */
-#define	FM0	0x60	/* FM0 (transition = 0) */
-#define	CRCPS	0x80	/* CRC Preset I/O */
-
-/* Write Register 11 (Clock Mode control) */
-#define	TRxCXT	0	/* TRxC = Xtal output */
-#define	TRxCTC	1	/* TRxC = Transmit clock */
-#define	TRxCBR	2	/* TRxC = BR Generator Output */
-#define	TRxCDP	3	/* TRxC = DPLL output */
-#define	TRxCOI	4	/* TRxC O/I */
-#define	TCRTxCP	0	/* Transmit clock = RTxC pin */
-#define	TCTRxCP	8	/* Transmit clock = TRxC pin */
-#define	TCBR	0x10	/* Transmit clock = BR Generator output */
-#define	TCDPLL	0x18	/* Transmit clock = DPLL output */
-#define	RCRTxCP	0	/* Receive clock = RTxC pin */
-#define	RCTRxCP	0x20	/* Receive clock = TRxC pin */
-#define	RCBR	0x40	/* Receive clock = BR Generator output */
-#define	RCDPLL	0x60	/* Receive clock = DPLL output */
-#define	RTxCX	0x80	/* RTxC Xtal/No Xtal */
-
-/* Write Register 12 (lower byte of baud rate generator time constant) */
-
-/* Write Register 13 (upper byte of baud rate generator time constant) */
-
-/* Write Register 14 (Misc control bits) */
-#define	BRENABL	1	/* Baud rate generator enable */
-#define	BRSRC	2	/* Baud rate generator source */
-#define	DTRREQ	4	/* DTR/Request function */
-#define	AUTOECHO 8	/* Auto Echo */
-#define	LOOPBAK	0x10	/* Local loopback */
-#define	SEARCH	0x20	/* Enter search mode */
-#define	RMC	0x40	/* Reset missing clock */
-#define	DISDPLL	0x60	/* Disable DPLL */
-#define	SSBR	0x80	/* Set DPLL source = BR generator */
-#define	SSRTxC	0xa0	/* Set DPLL source = RTxC */
-#define	SFMM	0xc0	/* Set FM mode */
-#define	SNRZI	0xe0	/* Set NRZI mode */
-
-/* Write Register 15 (external/status interrupt control) */
-#define	ZCIE	2	/* Zero count IE */
-#define	DCDIE	8	/* DCD IE */
-#define	SYNCIE	0x10	/* Sync/hunt IE */
-#define	CTSIE	0x20	/* CTS IE */
-#define	TxUIE	0x40	/* Tx Underrun/EOM IE */
-#define	BRKIE	0x80	/* Break/Abort IE */
-
-
-/* Read Register 0 */
-#define	Rx_CH_AV	0x1	/* Rx Character Available */
-#define	ZCOUNT		0x2	/* Zero count */
-#define	Tx_BUF_EMP	0x4	/* Tx Buffer empty */
-#define	DCD		0x8	/* DCD */
-#define	SYNC_HUNT	0x10	/* Sync/hunt */
-#define	CTS		0x20	/* CTS */
-#define	TxEOM		0x40	/* Tx underrun */
-#define	BRK_ABRT	0x80	/* Break/Abort */
-
-/* Read Register 1 */
-#define	ALL_SNT		0x1	/* All sent */
-/* Residue Data for 8 Rx bits/char programmed */
-#define	RES3		0x8	/* 0/3 */
-#define	RES4		0x4	/* 0/4 */
-#define	RES5		0xc	/* 0/5 */
-#define	RES6		0x2	/* 0/6 */
-#define	RES7		0xa	/* 0/7 */
-#define	RES8		0x6	/* 0/8 */
-#define	RES18		0xe	/* 1/8 */
-#define	RES28		0x0	/* 2/8 */
-/* Special Rx Condition Interrupts */
-#define	PAR_ERR		0x10	/* Parity error */
-#define	Rx_OVR		0x20	/* Rx Overrun Error */
-#define	FRM_ERR		0x40	/* CRC/Framing Error */
-#define	END_FR		0x80	/* End of Frame (SDLC) */
-
-/* Read Register 2 (channel b only) - Interrupt vector */
-
-/* Read Register 3 (interrupt pending register) ch a only */
-#define	CHBEXT	0x1		/* Channel B Ext/Stat IP */
-#define	CHBTxIP	0x2		/* Channel B Tx IP */
-#define	CHBRxIP	0x4		/* Channel B Rx IP */
-#define	CHAEXT	0x8		/* Channel A Ext/Stat IP */
-#define	CHATxIP	0x10		/* Channel A Tx IP */
-#define	CHARxIP	0x20		/* Channel A Rx IP */
-
-/* Read Register 8 (receive data register) */
-
-/* Read Register 10  (misc status bits) */
-#define	ONLOOP	2		/* On loop */
-#define	LOOPSEND 0x10		/* Loop sending */
-#define	CLK2MIS	0x40		/* Two clocks missing */
-#define	CLK1MIS	0x80		/* One clock missing */
-
-/* Read Register 12 (lower byte of baud rate generator constant) */
-
-/* Read Register 13 (upper byte of baud rate generator constant) */
-
-/* Read Register 15 (value of WR 15) */
-
-/* Misc macros */
-#define ZS_CLEARERR(channel)	(write_zsreg(channel, 0, ERR_RES))
-#define ZS_CLEARFIFO(channel)	do { volatile unsigned char garbage; \
-				     garbage = read_zsdata(channel); \
-				     garbage = read_zsdata(channel); \
-				     garbage = read_zsdata(channel); \
-				} while(0)
-
-#endif /* !(_DECSERIAL_H) */
diff --git a/drivers/telephony/Kconfig b/drivers/telephony/Kconfig
index 8f530e6..5f98f67 100644
--- a/drivers/telephony/Kconfig
+++ b/drivers/telephony/Kconfig
@@ -19,6 +19,7 @@
 
 config PHONE_IXJ
 	tristate "QuickNet Internet LineJack/PhoneJack support"
+	depends ISA || PCI
 	---help---
 	  Say M if you have a telephony card manufactured by Quicknet
 	  Technologies, Inc.  These include the Internet PhoneJACK and
diff --git a/drivers/telephony/ixj.c b/drivers/telephony/ixj.c
index c7b0a35..49cd979 100644
--- a/drivers/telephony/ixj.c
+++ b/drivers/telephony/ixj.c
@@ -3453,7 +3453,6 @@
 {
 	int cnt, frame_count, dly;
 	IXJ_WORD dat;
-	BYTES blankword;
 
 	frame_count = 0;
 	if(j->flags.cidplay) {
@@ -3501,6 +3500,8 @@
 		}
 		if (frame_count >= 1) {
 			if (j->ver.low == 0x12 && j->play_mode && j->flags.play_first_frame) {
+				BYTES blankword;
+
 				switch (j->play_mode) {
 				case PLAYBACK_MODE_ULAW:
 				case PLAYBACK_MODE_ALAW:
@@ -3508,6 +3509,7 @@
 					break;
 				case PLAYBACK_MODE_8LINEAR:
 				case PLAYBACK_MODE_16LINEAR:
+				default:
 					blankword.low = blankword.high = 0x00;
 					break;
 				case PLAYBACK_MODE_8LINEAR_WSS:
@@ -3531,6 +3533,8 @@
 				j->flags.play_first_frame = 0;
 			} else	if (j->play_codec == G723_63 && j->flags.play_first_frame) {
 				for (cnt = 0; cnt < 24; cnt++) {
+					BYTES blankword;
+
 					if(cnt == 12) {
 						blankword.low = 0x02;
 						blankword.high = 0x00;
@@ -4868,6 +4872,7 @@
 		bytes.high = 0xB0 + cr;
 		break;
 	case SOP_PU_PULSEDIALING:
+	default:
 		bytes.high = 0xF0 + cr;
 		break;
 	}
diff --git a/drivers/telephony/ixj_pcmcia.c b/drivers/telephony/ixj_pcmcia.c
index 3e658dc..ff9a29b 100644
--- a/drivers/telephony/ixj_pcmcia.c
+++ b/drivers/telephony/ixj_pcmcia.c
@@ -45,11 +45,10 @@
 	p_dev->io.Attributes2 = IO_DATA_PATH_WIDTH_8;
 	p_dev->io.IOAddrLines = 3;
 	p_dev->conf.IntType = INT_MEMORY_AND_IO;
-	p_dev->priv = kmalloc(sizeof(struct ixj_info_t), GFP_KERNEL);
+	p_dev->priv = kzalloc(sizeof(struct ixj_info_t), GFP_KERNEL);
 	if (!p_dev->priv) {
 		return -ENOMEM;
 	}
-	memset(p_dev->priv, 0, sizeof(struct ixj_info_t));
 
 	return ixj_config(p_dev);
 }
diff --git a/drivers/uio/Kconfig b/drivers/uio/Kconfig
new file mode 100644
index 0000000..b778ed71
--- /dev/null
+++ b/drivers/uio/Kconfig
@@ -0,0 +1,29 @@
+menu "Userspace I/O"
+	depends on !S390
+
+config UIO
+	tristate "Userspace I/O drivers"
+	default n
+	help
+	  Enable this to allow the userspace driver core code to be
+	  built.  This code allows userspace programs easy access to
+	  kernel interrupts and memory locations, allowing some drivers
+	  to be written in userspace.  Note that a small kernel driver
+	  is also required for interrupt handling to work properly.
+
+	  If you don't know what to do here, say N.
+
+config UIO_CIF
+	tristate "generic Hilscher CIF Card driver"
+	depends on UIO && PCI
+	default n
+	help
+	  Driver for Hilscher CIF DeviceNet and Profibus cards.  This
+	  driver requires a userspace component that handles all of the
+	  heavy lifting and can be found at:
+	  	http://www.osadl.org/projects/downloads/UIO/user/cif-*
+
+	  To compile this driver as a module, choose M here: the module
+	  will be called uio_cif.
+
+endmenu
diff --git a/drivers/uio/Makefile b/drivers/uio/Makefile
new file mode 100644
index 0000000..7fecfb4
--- /dev/null
+++ b/drivers/uio/Makefile
@@ -0,0 +1,2 @@
+obj-$(CONFIG_UIO)	+= uio.o
+obj-$(CONFIG_UIO_CIF)	+= uio_cif.o
diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
new file mode 100644
index 0000000..865f32b6
--- /dev/null
+++ b/drivers/uio/uio.c
@@ -0,0 +1,701 @@
+/*
+ * drivers/uio/uio.c
+ *
+ * Copyright(C) 2005, Benedikt Spranger <b.spranger@linutronix.de>
+ * Copyright(C) 2005, Thomas Gleixner <tglx@linutronix.de>
+ * Copyright(C) 2006, Hans J. Koch <hjk@linutronix.de>
+ * Copyright(C) 2006, Greg Kroah-Hartman <greg@kroah.com>
+ *
+ * Userspace IO
+ *
+ * Base Functions
+ *
+ * Licensed under the GPLv2 only.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/poll.h>
+#include <linux/device.h>
+#include <linux/mm.h>
+#include <linux/idr.h>
+#include <linux/string.h>
+#include <linux/kobject.h>
+#include <linux/uio_driver.h>
+
+#define UIO_MAX_DEVICES 255
+
+struct uio_device {
+	struct module		*owner;
+	struct device		*dev;
+	int			minor;
+	atomic_t		event;
+	struct fasync_struct	*async_queue;
+	wait_queue_head_t	wait;
+	int			vma_count;
+	struct uio_info		*info;
+	struct kset 		map_attr_kset;
+};
+
+static int uio_major;
+static DEFINE_IDR(uio_idr);
+static struct file_operations uio_fops;
+
+/* UIO class infrastructure */
+static struct uio_class {
+	struct kref kref;
+	struct class *class;
+} *uio_class;
+
+/*
+ * attributes
+ */
+
+static struct attribute attr_addr = {
+	.name  = "addr",
+	.mode  = S_IRUGO,
+};
+
+static struct attribute attr_size = {
+	.name  = "size",
+	.mode  = S_IRUGO,
+};
+
+static struct attribute* map_attrs[] = {
+	&attr_addr, &attr_size, NULL
+};
+
+static ssize_t map_attr_show(struct kobject *kobj, struct attribute *attr,
+			     char *buf)
+{
+	struct uio_mem *mem = container_of(kobj, struct uio_mem, kobj);
+
+	if (strncmp(attr->name,"addr",4) == 0)
+		return sprintf(buf, "0x%lx\n", mem->addr);
+
+	if (strncmp(attr->name,"size",4) == 0)
+		return sprintf(buf, "0x%lx\n", mem->size);
+
+	return -ENODEV;
+}
+
+static void map_attr_release(struct kobject *kobj)
+{
+	/* TODO ??? */
+}
+
+static struct sysfs_ops map_attr_ops = {
+	.show  = map_attr_show,
+};
+
+static struct kobj_type map_attr_type = {
+	.release	= map_attr_release,
+	.sysfs_ops	= &map_attr_ops,
+	.default_attrs	= map_attrs,
+};
+
+static ssize_t show_name(struct device *dev,
+			 struct device_attribute *attr, char *buf)
+{
+	struct uio_device *idev = dev_get_drvdata(dev);
+	if (idev)
+		return sprintf(buf, "%s\n", idev->info->name);
+	else
+		return -ENODEV;
+}
+static DEVICE_ATTR(name, S_IRUGO, show_name, NULL);
+
+static ssize_t show_version(struct device *dev,
+			    struct device_attribute *attr, char *buf)
+{
+	struct uio_device *idev = dev_get_drvdata(dev);
+	if (idev)
+		return sprintf(buf, "%s\n", idev->info->version);
+	else
+		return -ENODEV;
+}
+static DEVICE_ATTR(version, S_IRUGO, show_version, NULL);
+
+static ssize_t show_event(struct device *dev,
+			  struct device_attribute *attr, char *buf)
+{
+	struct uio_device *idev = dev_get_drvdata(dev);
+	if (idev)
+		return sprintf(buf, "%u\n",
+				(unsigned int)atomic_read(&idev->event));
+	else
+		return -ENODEV;
+}
+static DEVICE_ATTR(event, S_IRUGO, show_event, NULL);
+
+static struct attribute *uio_attrs[] = {
+	&dev_attr_name.attr,
+	&dev_attr_version.attr,
+	&dev_attr_event.attr,
+	NULL,
+};
+
+static struct attribute_group uio_attr_grp = {
+	.attrs = uio_attrs,
+};
+
+/*
+ * device functions
+ */
+static int uio_dev_add_attributes(struct uio_device *idev)
+{
+	int ret;
+	int mi;
+	int map_found = 0;
+	struct uio_mem *mem;
+
+	ret = sysfs_create_group(&idev->dev->kobj, &uio_attr_grp);
+	if (ret)
+		goto err_group;
+
+	for (mi = 0; mi < MAX_UIO_MAPS; mi++) {
+		mem = &idev->info->mem[mi];
+		if (mem->size == 0)
+			break;
+		if (!map_found) {
+			map_found = 1;
+			kobject_set_name(&idev->map_attr_kset.kobj,"maps");
+			idev->map_attr_kset.ktype = &map_attr_type;
+			idev->map_attr_kset.kobj.parent = &idev->dev->kobj;
+			ret = kset_register(&idev->map_attr_kset);
+			if (ret)
+				goto err_remove_group;
+		}
+		kobject_init(&mem->kobj);
+		kobject_set_name(&mem->kobj,"map%d",mi);
+		mem->kobj.parent = &idev->map_attr_kset.kobj;
+		mem->kobj.kset = &idev->map_attr_kset;
+		ret = kobject_add(&mem->kobj);
+		if (ret)
+			goto err_remove_maps;
+	}
+
+	return 0;
+
+err_remove_maps:
+	for (mi--; mi>=0; mi--) {
+		mem = &idev->info->mem[mi];
+		kobject_unregister(&mem->kobj);
+	}
+	kset_unregister(&idev->map_attr_kset); /* Needed ? */
+err_remove_group:
+	sysfs_remove_group(&idev->dev->kobj, &uio_attr_grp);
+err_group:
+	dev_err(idev->dev, "error creating sysfs files (%d)\n", ret);
+	return ret;
+}
+
+static void uio_dev_del_attributes(struct uio_device *idev)
+{
+	int mi;
+	struct uio_mem *mem;
+	for (mi = 0; mi < MAX_UIO_MAPS; mi++) {
+		mem = &idev->info->mem[mi];
+		if (mem->size == 0)
+			break;
+		kobject_unregister(&mem->kobj);
+	}
+	kset_unregister(&idev->map_attr_kset);
+	sysfs_remove_group(&idev->dev->kobj, &uio_attr_grp);
+}
+
+static int uio_get_minor(struct uio_device *idev)
+{
+	static DEFINE_MUTEX(minor_lock);
+	int retval = -ENOMEM;
+	int id;
+
+	mutex_lock(&minor_lock);
+	if (idr_pre_get(&uio_idr, GFP_KERNEL) == 0)
+		goto exit;
+
+	retval = idr_get_new(&uio_idr, idev, &id);
+	if (retval < 0) {
+		if (retval == -EAGAIN)
+			retval = -ENOMEM;
+		goto exit;
+	}
+	idev->minor = id & MAX_ID_MASK;
+exit:
+	mutex_unlock(&minor_lock);
+	return retval;
+}
+
+static void uio_free_minor(struct uio_device *idev)
+{
+	idr_remove(&uio_idr, idev->minor);
+}
+
+/**
+ * uio_event_notify - trigger an interrupt event
+ * @info: UIO device capabilities
+ */
+void uio_event_notify(struct uio_info *info)
+{
+	struct uio_device *idev = info->uio_dev;
+
+	atomic_inc(&idev->event);
+	wake_up_interruptible(&idev->wait);
+	kill_fasync(&idev->async_queue, SIGIO, POLL_IN);
+}
+EXPORT_SYMBOL_GPL(uio_event_notify);
+
+/**
+ * uio_interrupt - hardware interrupt handler
+ * @irq: IRQ number, can be UIO_IRQ_CYCLIC for cyclic timer
+ * @dev_id: Pointer to the devices uio_device structure
+ */
+static irqreturn_t uio_interrupt(int irq, void *dev_id)
+{
+	struct uio_device *idev = (struct uio_device *)dev_id;
+	irqreturn_t ret = idev->info->handler(irq, idev->info);
+
+	if (ret == IRQ_HANDLED)
+		uio_event_notify(idev->info);
+
+	return ret;
+}
+
+struct uio_listener {
+	struct uio_device *dev;
+	s32 event_count;
+};
+
+static int uio_open(struct inode *inode, struct file *filep)
+{
+	struct uio_device *idev;
+	struct uio_listener *listener;
+	int ret = 0;
+
+	idev = idr_find(&uio_idr, iminor(inode));
+	if (!idev)
+		return -ENODEV;
+
+	listener = kmalloc(sizeof(*listener), GFP_KERNEL);
+	if (!listener)
+		return -ENOMEM;
+
+	listener->dev = idev;
+	listener->event_count = atomic_read(&idev->event);
+	filep->private_data = listener;
+
+	if (idev->info->open) {
+		if (!try_module_get(idev->owner))
+			return -ENODEV;
+		ret = idev->info->open(idev->info, inode);
+		module_put(idev->owner);
+	}
+
+	if (ret)
+		kfree(listener);
+
+	return ret;
+}
+
+static int uio_fasync(int fd, struct file *filep, int on)
+{
+	struct uio_listener *listener = filep->private_data;
+	struct uio_device *idev = listener->dev;
+
+	return fasync_helper(fd, filep, on, &idev->async_queue);
+}
+
+static int uio_release(struct inode *inode, struct file *filep)
+{
+	int ret = 0;
+	struct uio_listener *listener = filep->private_data;
+	struct uio_device *idev = listener->dev;
+
+	if (idev->info->release) {
+		if (!try_module_get(idev->owner))
+			return -ENODEV;
+		ret = idev->info->release(idev->info, inode);
+		module_put(idev->owner);
+	}
+	if (filep->f_flags & FASYNC)
+		ret = uio_fasync(-1, filep, 0);
+	kfree(listener);
+	return ret;
+}
+
+static unsigned int uio_poll(struct file *filep, poll_table *wait)
+{
+	struct uio_listener *listener = filep->private_data;
+	struct uio_device *idev = listener->dev;
+
+	if (idev->info->irq == UIO_IRQ_NONE)
+		return -EIO;
+
+	poll_wait(filep, &idev->wait, wait);
+	if (listener->event_count != atomic_read(&idev->event))
+		return POLLIN | POLLRDNORM;
+	return 0;
+}
+
+static ssize_t uio_read(struct file *filep, char __user *buf,
+			size_t count, loff_t *ppos)
+{
+	struct uio_listener *listener = filep->private_data;
+	struct uio_device *idev = listener->dev;
+	DECLARE_WAITQUEUE(wait, current);
+	ssize_t retval;
+	s32 event_count;
+
+	if (idev->info->irq == UIO_IRQ_NONE)
+		return -EIO;
+
+	if (count != sizeof(s32))
+		return -EINVAL;
+
+	add_wait_queue(&idev->wait, &wait);
+
+	do {
+		set_current_state(TASK_INTERRUPTIBLE);
+
+		event_count = atomic_read(&idev->event);
+		if (event_count != listener->event_count) {
+			if (copy_to_user(buf, &event_count, count))
+				retval = -EFAULT;
+			else {
+				listener->event_count = event_count;
+				retval = count;
+			}
+			break;
+		}
+
+		if (filep->f_flags & O_NONBLOCK) {
+			retval = -EAGAIN;
+			break;
+		}
+
+		if (signal_pending(current)) {
+			retval = -ERESTARTSYS;
+			break;
+		}
+		schedule();
+	} while (1);
+
+	__set_current_state(TASK_RUNNING);
+	remove_wait_queue(&idev->wait, &wait);
+
+	return retval;
+}
+
+static int uio_find_mem_index(struct vm_area_struct *vma)
+{
+	int mi;
+	struct uio_device *idev = vma->vm_private_data;
+
+	for (mi = 0; mi < MAX_UIO_MAPS; mi++) {
+		if (idev->info->mem[mi].size == 0)
+			return -1;
+		if (vma->vm_pgoff == mi)
+			return mi;
+	}
+	return -1;
+}
+
+static void uio_vma_open(struct vm_area_struct *vma)
+{
+	struct uio_device *idev = vma->vm_private_data;
+	idev->vma_count++;
+}
+
+static void uio_vma_close(struct vm_area_struct *vma)
+{
+	struct uio_device *idev = vma->vm_private_data;
+	idev->vma_count--;
+}
+
+static struct page *uio_vma_nopage(struct vm_area_struct *vma,
+				   unsigned long address, int *type)
+{
+	struct uio_device *idev = vma->vm_private_data;
+	struct page* page = NOPAGE_SIGBUS;
+
+	int mi = uio_find_mem_index(vma);
+	if (mi < 0)
+		return page;
+
+	if (idev->info->mem[mi].memtype == UIO_MEM_LOGICAL)
+		page = virt_to_page(idev->info->mem[mi].addr);
+	else
+		page = vmalloc_to_page((void*)idev->info->mem[mi].addr);
+	get_page(page);
+	if (type)
+		*type = VM_FAULT_MINOR;
+	return page;
+}
+
+static struct vm_operations_struct uio_vm_ops = {
+	.open = uio_vma_open,
+	.close = uio_vma_close,
+	.nopage = uio_vma_nopage,
+};
+
+static int uio_mmap_physical(struct vm_area_struct *vma)
+{
+	struct uio_device *idev = vma->vm_private_data;
+	int mi = uio_find_mem_index(vma);
+	if (mi < 0)
+		return -EINVAL;
+
+	vma->vm_flags |= VM_IO | VM_RESERVED;
+
+	return remap_pfn_range(vma,
+			       vma->vm_start,
+			       idev->info->mem[mi].addr >> PAGE_SHIFT,
+			       vma->vm_end - vma->vm_start,
+			       vma->vm_page_prot);
+}
+
+static int uio_mmap_logical(struct vm_area_struct *vma)
+{
+	vma->vm_flags |= VM_RESERVED;
+	vma->vm_ops = &uio_vm_ops;
+	uio_vma_open(vma);
+	return 0;
+}
+
+static int uio_mmap(struct file *filep, struct vm_area_struct *vma)
+{
+	struct uio_listener *listener = filep->private_data;
+	struct uio_device *idev = listener->dev;
+	int mi;
+	unsigned long requested_pages, actual_pages;
+	int ret = 0;
+
+	if (vma->vm_end < vma->vm_start)
+		return -EINVAL;
+
+	vma->vm_private_data = idev;
+
+	mi = uio_find_mem_index(vma);
+	if (mi < 0)
+		return -EINVAL;
+
+	requested_pages = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
+	actual_pages = (idev->info->mem[mi].size + PAGE_SIZE -1) >> PAGE_SHIFT;
+	if (requested_pages > actual_pages)
+		return -EINVAL;
+
+	if (idev->info->mmap) {
+		if (!try_module_get(idev->owner))
+			return -ENODEV;
+		ret = idev->info->mmap(idev->info, vma);
+		module_put(idev->owner);
+		return ret;
+	}
+
+	switch (idev->info->mem[mi].memtype) {
+		case UIO_MEM_PHYS:
+			return uio_mmap_physical(vma);
+		case UIO_MEM_LOGICAL:
+		case UIO_MEM_VIRTUAL:
+			return uio_mmap_logical(vma);
+		default:
+			return -EINVAL;
+	}
+}
+
+static struct file_operations uio_fops = {
+	.owner		= THIS_MODULE,
+	.open		= uio_open,
+	.release	= uio_release,
+	.read		= uio_read,
+	.mmap		= uio_mmap,
+	.poll		= uio_poll,
+	.fasync		= uio_fasync,
+};
+
+static int uio_major_init(void)
+{
+	uio_major = register_chrdev(0, "uio", &uio_fops);
+	if (uio_major < 0)
+		return uio_major;
+	return 0;
+}
+
+static void uio_major_cleanup(void)
+{
+	unregister_chrdev(uio_major, "uio");
+}
+
+static int init_uio_class(void)
+{
+	int ret = 0;
+
+	if (uio_class != NULL) {
+		kref_get(&uio_class->kref);
+		goto exit;
+	}
+
+	/* This is the first time in here, set everything up properly */
+	ret = uio_major_init();
+	if (ret)
+		goto exit;
+
+	uio_class = kzalloc(sizeof(*uio_class), GFP_KERNEL);
+	if (!uio_class) {
+		ret = -ENOMEM;
+		goto err_kzalloc;
+	}
+
+	kref_init(&uio_class->kref);
+	uio_class->class = class_create(THIS_MODULE, "uio");
+	if (IS_ERR(uio_class->class)) {
+		ret = IS_ERR(uio_class->class);
+		printk(KERN_ERR "class_create failed for uio\n");
+		goto err_class_create;
+	}
+	return 0;
+
+err_class_create:
+	kfree(uio_class);
+	uio_class = NULL;
+err_kzalloc:
+	uio_major_cleanup();
+exit:
+	return ret;
+}
+
+static void release_uio_class(struct kref *kref)
+{
+	/* Ok, we cheat as we know we only have one uio_class */
+	class_destroy(uio_class->class);
+	kfree(uio_class);
+	uio_major_cleanup();
+	uio_class = NULL;
+}
+
+static void uio_class_destroy(void)
+{
+	if (uio_class)
+		kref_put(&uio_class->kref, release_uio_class);
+}
+
+/**
+ * uio_register_device - register a new userspace IO device
+ * @owner:	module that creates the new device
+ * @parent:	parent device
+ * @info:	UIO device capabilities
+ *
+ * returns zero on success or a negative error code.
+ */
+int __uio_register_device(struct module *owner,
+			  struct device *parent,
+			  struct uio_info *info)
+{
+	struct uio_device *idev;
+	int ret = 0;
+
+	if (!parent || !info || !info->name || !info->version)
+		return -EINVAL;
+
+	info->uio_dev = NULL;
+
+	ret = init_uio_class();
+	if (ret)
+		return ret;
+
+	idev = kzalloc(sizeof(*idev), GFP_KERNEL);
+	if (!idev) {
+		ret = -ENOMEM;
+		goto err_kzalloc;
+	}
+
+	idev->owner = owner;
+	idev->info = info;
+	init_waitqueue_head(&idev->wait);
+	atomic_set(&idev->event, 0);
+
+	ret = uio_get_minor(idev);
+	if (ret)
+		goto err_get_minor;
+
+	idev->dev = device_create(uio_class->class, parent,
+				  MKDEV(uio_major, idev->minor),
+				  "uio%d", idev->minor);
+	if (IS_ERR(idev->dev)) {
+		printk(KERN_ERR "UIO: device register failed\n");
+		ret = PTR_ERR(idev->dev);
+		goto err_device_create;
+	}
+	dev_set_drvdata(idev->dev, idev);
+
+	ret = uio_dev_add_attributes(idev);
+	if (ret)
+		goto err_uio_dev_add_attributes;
+
+	info->uio_dev = idev;
+
+	if (idev->info->irq >= 0) {
+		ret = request_irq(idev->info->irq, uio_interrupt,
+				  idev->info->irq_flags, idev->info->name, idev);
+		if (ret)
+			goto err_request_irq;
+	}
+
+	return 0;
+
+err_request_irq:
+	uio_dev_del_attributes(idev);
+err_uio_dev_add_attributes:
+	device_destroy(uio_class->class, MKDEV(uio_major, idev->minor));
+err_device_create:
+	uio_free_minor(idev);
+err_get_minor:
+	kfree(idev);
+err_kzalloc:
+	uio_class_destroy();
+	return ret;
+}
+EXPORT_SYMBOL_GPL(__uio_register_device);
+
+/**
+ * uio_unregister_device - unregister a industrial IO device
+ * @info:	UIO device capabilities
+ *
+ */
+void uio_unregister_device(struct uio_info *info)
+{
+	struct uio_device *idev;
+
+	if (!info || !info->uio_dev)
+		return;
+
+	idev = info->uio_dev;
+
+	uio_free_minor(idev);
+
+	if (info->irq >= 0)
+		free_irq(info->irq, idev);
+
+	uio_dev_del_attributes(idev);
+
+	dev_set_drvdata(idev->dev, NULL);
+	device_destroy(uio_class->class, MKDEV(uio_major, idev->minor));
+	kfree(idev);
+	uio_class_destroy();
+
+	return;
+}
+EXPORT_SYMBOL_GPL(uio_unregister_device);
+
+static int __init uio_init(void)
+{
+	return 0;
+}
+
+static void __exit uio_exit(void)
+{
+}
+
+module_init(uio_init)
+module_exit(uio_exit)
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/uio/uio_cif.c b/drivers/uio/uio_cif.c
new file mode 100644
index 0000000..838bae4
--- /dev/null
+++ b/drivers/uio/uio_cif.c
@@ -0,0 +1,156 @@
+/*
+ * UIO Hilscher CIF card driver
+ *
+ * (C) 2007 Hans J. Koch <hjk@linutronix.de>
+ * Original code (C) 2005 Benedikt Spranger <b.spranger@linutronix.de>
+ *
+ * Licensed under GPL version 2 only.
+ *
+ */
+
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/uio_driver.h>
+
+#include <asm/io.h>
+
+#ifndef PCI_DEVICE_ID_PLX_9030
+#define PCI_DEVICE_ID_PLX_9030	0x9030
+#endif
+
+#define PLX9030_INTCSR		0x4C
+#define INTSCR_INT1_ENABLE	0x01
+#define INTSCR_INT1_STATUS	0x04
+#define INT1_ENABLED_AND_ACTIVE	(INTSCR_INT1_ENABLE | INTSCR_INT1_STATUS)
+
+#define PCI_SUBVENDOR_ID_PEP	0x1518
+#define CIF_SUBDEVICE_PROFIBUS	0x430
+#define CIF_SUBDEVICE_DEVICENET	0x432
+
+
+static irqreturn_t hilscher_handler(int irq, struct uio_info *dev_info)
+{
+	void __iomem *plx_intscr = dev_info->mem[0].internal_addr
+					+ PLX9030_INTCSR;
+
+	if ((ioread8(plx_intscr) & INT1_ENABLED_AND_ACTIVE)
+	    != INT1_ENABLED_AND_ACTIVE)
+		return IRQ_NONE;
+
+	/* Disable interrupt */
+	iowrite8(ioread8(plx_intscr) & ~INTSCR_INT1_ENABLE, plx_intscr);
+	return IRQ_HANDLED;
+}
+
+static int __devinit hilscher_pci_probe(struct pci_dev *dev,
+					const struct pci_device_id *id)
+{
+	struct uio_info *info;
+
+	info = kzalloc(sizeof(struct uio_info), GFP_KERNEL);
+	if (!info)
+		return -ENOMEM;
+
+	if (pci_enable_device(dev))
+		goto out_free;
+
+	if (pci_request_regions(dev, "hilscher"))
+		goto out_disable;
+
+	info->mem[0].addr = pci_resource_start(dev, 0);
+	if (!info->mem[0].addr)
+		goto out_release;
+	info->mem[0].internal_addr = ioremap(pci_resource_start(dev, 0),
+					     pci_resource_len(dev, 0));
+	if (!info->mem[0].internal_addr)
+		goto out_release;
+
+	info->mem[0].size = pci_resource_len(dev, 0);
+	info->mem[0].memtype = UIO_MEM_PHYS;
+	info->mem[1].addr = pci_resource_start(dev, 2);
+	info->mem[1].size = pci_resource_len(dev, 2);
+	info->mem[1].memtype = UIO_MEM_PHYS;
+	switch (id->subdevice) {
+		case CIF_SUBDEVICE_PROFIBUS:
+			info->name = "CIF_Profibus";
+			break;
+		case CIF_SUBDEVICE_DEVICENET:
+			info->name = "CIF_Devicenet";
+			break;
+		default:
+			info->name = "CIF_???";
+	}
+	info->version = "0.0.1";
+	info->irq = dev->irq;
+	info->irq_flags = IRQF_DISABLED | IRQF_SHARED;
+	info->handler = hilscher_handler;
+
+	if (uio_register_device(&dev->dev, info))
+		goto out_unmap;
+
+	pci_set_drvdata(dev, info);
+
+	return 0;
+out_unmap:
+	iounmap(info->mem[0].internal_addr);
+out_release:
+	pci_release_regions(dev);
+out_disable:
+	pci_disable_device(dev);
+out_free:
+	kfree (info);
+	return -ENODEV;
+}
+
+static void hilscher_pci_remove(struct pci_dev *dev)
+{
+	struct uio_info *info = pci_get_drvdata(dev);
+
+	uio_unregister_device(info);
+	pci_release_regions(dev);
+	pci_disable_device(dev);
+	pci_set_drvdata(dev, NULL);
+	iounmap(info->mem[0].internal_addr);
+
+	kfree (info);
+}
+
+static struct pci_device_id hilscher_pci_ids[] = {
+	{
+		.vendor =	PCI_VENDOR_ID_PLX,
+		.device =	PCI_DEVICE_ID_PLX_9030,
+		.subvendor =	PCI_SUBVENDOR_ID_PEP,
+		.subdevice =	CIF_SUBDEVICE_PROFIBUS,
+	},
+	{
+		.vendor =	PCI_VENDOR_ID_PLX,
+		.device =	PCI_DEVICE_ID_PLX_9030,
+		.subvendor =	PCI_SUBVENDOR_ID_PEP,
+		.subdevice =	CIF_SUBDEVICE_DEVICENET,
+	},
+	{ 0, }
+};
+
+static struct pci_driver hilscher_pci_driver = {
+	.name = "hilscher",
+	.id_table = hilscher_pci_ids,
+	.probe = hilscher_pci_probe,
+	.remove = hilscher_pci_remove,
+};
+
+static int __init hilscher_init_module(void)
+{
+	return pci_register_driver(&hilscher_pci_driver);
+}
+
+static void __exit hilscher_exit_module(void)
+{
+	pci_unregister_driver(&hilscher_pci_driver);
+}
+
+module_init(hilscher_init_module);
+module_exit(hilscher_exit_module);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Hans J. Koch, Benedikt Spranger");
diff --git a/drivers/usb/Kconfig b/drivers/usb/Kconfig
index 071b967..7dd7354 100644
--- a/drivers/usb/Kconfig
+++ b/drivers/usb/Kconfig
@@ -16,7 +16,7 @@
 	boolean
 	default y if USB_ARCH_HAS_OHCI
 	default y if USB_ARCH_HAS_EHCI
-	default y if PCMCIA				# sl811_cs
+	default y if PCMCIA && !M32R			# sl811_cs
 	default y if ARM				# SL-811
 	default PCI
 
diff --git a/drivers/usb/atm/ueagle-atm.c b/drivers/usb/atm/ueagle-atm.c
index 4973e14..8f04665 100644
--- a/drivers/usb/atm/ueagle-atm.c
+++ b/drivers/usb/atm/ueagle-atm.c
@@ -1168,6 +1168,7 @@
 	struct uea_softc *sc = data;
 	int ret = -EAGAIN;
 
+	set_freezable();
 	uea_enters(INS_TO_USBDEV(sc));
 	while (!kthread_should_stop()) {
 		if (ret < 0 || sc->reset)
diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c
index 73c4936..6548574 100644
--- a/drivers/usb/core/driver.c
+++ b/drivers/usb/core/driver.c
@@ -29,13 +29,6 @@
 #include "hcd.h"
 #include "usb.h"
 
-#define VERBOSE_DEBUG	0
-
-#if VERBOSE_DEBUG
-#define dev_vdbg	dev_dbg
-#else
-#define dev_vdbg(dev, fmt, args...)	do { } while (0)
-#endif
 
 #ifdef CONFIG_HOTPLUG
 
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 50e79010..fd74c50 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -2728,6 +2728,7 @@
 
 static int hub_thread(void *__unused)
 {
+	set_freezable();
 	do {
 		hub_events();
 		wait_event_interruptible(khubd_wait,
diff --git a/drivers/usb/gadget/file_storage.c b/drivers/usb/gadget/file_storage.c
index 8712ef9..be7a1bd 100644
--- a/drivers/usb/gadget/file_storage.c
+++ b/drivers/usb/gadget/file_storage.c
@@ -3434,6 +3434,9 @@
 	allow_signal(SIGKILL);
 	allow_signal(SIGUSR1);
 
+	/* Allow the thread to be frozen */
+	set_freezable();
+
 	/* Arrange for userspace references to be interpreted as kernel
 	 * pointers.  That way we can pass a kernel pointer to a routine
 	 * that expects a __user pointer and it will work okay. */
diff --git a/drivers/usb/gadget/goku_udc.c b/drivers/usb/gadget/goku_udc.c
index d6c5f11..349b816 100644
--- a/drivers/usb/gadget/goku_udc.c
+++ b/drivers/usb/gadget/goku_udc.c
@@ -1777,14 +1777,13 @@
 	}
 
 	/* alloc, and start init */
-	dev = kmalloc (sizeof *dev, GFP_KERNEL);
+	dev = kzalloc (sizeof *dev, GFP_KERNEL);
 	if (dev == NULL){
 		pr_debug("enomem %s\n", pci_name(pdev));
 		retval = -ENOMEM;
 		goto done;
 	}
 
-	memset(dev, 0, sizeof *dev);
 	spin_lock_init(&dev->lock);
 	dev->pdev = pdev;
 	dev->gadget.ops = &goku_ops;
diff --git a/drivers/usb/gadget/serial.c b/drivers/usb/gadget/serial.c
index dd33ff0..38138bb 100644
--- a/drivers/usb/gadget/serial.c
+++ b/drivers/usb/gadget/serial.c
@@ -1427,7 +1427,7 @@
 		gs_acm_config_desc.bmAttributes |= USB_CONFIG_ATT_WAKEUP;
 	}
 
-	gs_device = dev = kmalloc(sizeof(struct gs_dev), GFP_KERNEL);
+	gs_device = dev = kzalloc(sizeof(struct gs_dev), GFP_KERNEL);
 	if (dev == NULL)
 		return -ENOMEM;
 
@@ -1435,7 +1435,6 @@
 		init_utsname()->sysname, init_utsname()->release,
 		gadget->name);
 
-	memset(dev, 0, sizeof(struct gs_dev));
 	dev->dev_gadget = gadget;
 	spin_lock_init(&dev->dev_lock);
 	INIT_LIST_HEAD(&dev->dev_req_list);
diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c
index 2038125..6edf409 100644
--- a/drivers/usb/host/ohci-hcd.c
+++ b/drivers/usb/host/ohci-hcd.c
@@ -171,11 +171,10 @@
 	}
 
 	/* allocate the private part of the URB */
-	urb_priv = kmalloc (sizeof (urb_priv_t) + size * sizeof (struct td *),
+	urb_priv = kzalloc (sizeof (urb_priv_t) + size * sizeof (struct td *),
 			mem_flags);
 	if (!urb_priv)
 		return -ENOMEM;
-	memset (urb_priv, 0, sizeof (urb_priv_t) + size * sizeof (struct td *));
 	INIT_LIST_HEAD (&urb_priv->pending);
 	urb_priv->length = size;
 	urb_priv->ed = ed;
diff --git a/drivers/usb/host/sl811_cs.c b/drivers/usb/host/sl811_cs.c
index 2d0e73b..5da63f5 100644
--- a/drivers/usb/host/sl811_cs.c
+++ b/drivers/usb/host/sl811_cs.c
@@ -278,10 +278,9 @@
 {
 	local_info_t *local;
 
-	local = kmalloc(sizeof(local_info_t), GFP_KERNEL);
+	local = kzalloc(sizeof(local_info_t), GFP_KERNEL);
 	if (!local)
 		return -ENOMEM;
-	memset(local, 0, sizeof(local_info_t));
 	local->p_dev = link;
 	link->priv = local;
 
diff --git a/drivers/usb/misc/auerswald.c b/drivers/usb/misc/auerswald.c
index 1fd5fc2..42d4e64 100644
--- a/drivers/usb/misc/auerswald.c
+++ b/drivers/usb/misc/auerswald.c
@@ -630,7 +630,7 @@
 	} else
 		status = urb->status;
 
-	if (actual_length)
+	if (status >= 0)
 		*actual_length = urb->actual_length;
 
   	return status;
@@ -664,7 +664,7 @@
 	int ret;
 	struct usb_ctrlrequest *dr;
 	struct urb *urb;
-        int length;
+        int uninitialized_var(length);
 
         dbg ("auerchain_control_msg");
         dr = kmalloc (sizeof (struct usb_ctrlrequest), GFP_KERNEL);
diff --git a/drivers/usb/storage/usb.c b/drivers/usb/storage/usb.c
index bef8bcd..28842d2 100644
--- a/drivers/usb/storage/usb.c
+++ b/drivers/usb/storage/usb.c
@@ -311,8 +311,6 @@
 	struct Scsi_Host *host = us_to_host(us);
 	int autopm_rc;
 
-	current->flags |= PF_NOFREEZE;
-
 	for(;;) {
 		US_DEBUGP("*** thread sleeping.\n");
 		if(down_interruptible(&us->sema))
@@ -920,6 +918,7 @@
 	printk(KERN_DEBUG
 		"usb-storage: device found at %d\n", us->pusb_dev->devnum);
 
+	set_freezable();
 	/* Wait for the timeout to expire or for a disconnect */
 	if (delay_use > 0) {
 		printk(KERN_DEBUG "usb-storage: waiting for device "
diff --git a/drivers/video/68328fb.c b/drivers/video/68328fb.c
index 0dda73d..7f907fb 100644
--- a/drivers/video/68328fb.c
+++ b/drivers/video/68328fb.c
@@ -60,7 +60,7 @@
 static u_long videomemorysize;
 
 static struct fb_info fb_info;
-static u32 mc68x328fb_pseudo_palette[17];
+static u32 mc68x328fb_pseudo_palette[16];
 
 static struct fb_var_screeninfo mc68x328fb_default __initdata = {
 	.red =		{ 0, 8, 0 },
diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig
index 403dac7..0c5644bb 100644
--- a/drivers/video/Kconfig
+++ b/drivers/video/Kconfig
@@ -812,7 +812,7 @@
 
 config FB_EPSON1355
 	bool "Epson 1355 framebuffer support"
-	depends on (FB = y) && (SUPERH || ARCH_CEIVA)
+	depends on (FB = y) && ARCH_CEIVA
 	select FB_CFB_FILLRECT
 	select FB_CFB_COPYAREA
 	select FB_CFB_IMAGEBLIT
@@ -1790,8 +1790,8 @@
 	  adaptor, found on some IBM System P (pSeries) machines.
 
 config FB_PS3
-	bool "PS3 GPU framebuffer driver"
-	depends on (FB = y) && PS3_PS3AV
+	tristate "PS3 GPU framebuffer driver"
+	depends on FB && PS3_PS3AV
 	select FB_SYS_FILLRECT
 	select FB_SYS_COPYAREA
 	select FB_SYS_IMAGEBLIT
@@ -1820,6 +1820,10 @@
 	  framebuffer. ML300 carries a 640*480 LCD display on the board,
 	  ML403 uses a standard DB15 VGA connector.
 
+if ARCH_OMAP
+	source "drivers/video/omap/Kconfig"
+endif
+
 config FB_VIRTUAL
 	tristate "Virtual Frame Buffer support (ONLY FOR TESTING!)"
 	depends on FB
diff --git a/drivers/video/Makefile b/drivers/video/Makefile
index bd8b052..a562f9d 100644
--- a/drivers/video/Makefile
+++ b/drivers/video/Makefile
@@ -113,6 +113,7 @@
 obj-$(CONFIG_FB_PS3)		  += ps3fb.o
 obj-$(CONFIG_FB_SM501)            += sm501fb.o
 obj-$(CONFIG_FB_XILINX)           += xilinxfb.o
+obj-$(CONFIG_FB_OMAP)             += omap/
 
 # Platform or fallback drivers go here
 obj-$(CONFIG_FB_VESA)             += vesafb.o
diff --git a/drivers/video/amba-clcd.c b/drivers/video/amba-clcd.c
index 6c9dc2e..a7a1c89 100644
--- a/drivers/video/amba-clcd.c
+++ b/drivers/video/amba-clcd.c
@@ -447,13 +447,12 @@
 		goto out;
 	}
 
-	fb = kmalloc(sizeof(struct clcd_fb), GFP_KERNEL);
+	fb = kzalloc(sizeof(struct clcd_fb), GFP_KERNEL);
 	if (!fb) {
 		printk(KERN_INFO "CLCD: could not allocate new clcd_fb struct\n");
 		ret = -ENOMEM;
 		goto free_region;
 	}
-	memset(fb, 0, sizeof(struct clcd_fb));
 
 	fb->dev = dev;
 	fb->board = board;
diff --git a/drivers/video/aty/ati_ids.h b/drivers/video/aty/ati_ids.h
index 90e7df2..685a754 100644
--- a/drivers/video/aty/ati_ids.h
+++ b/drivers/video/aty/ati_ids.h
@@ -204,6 +204,7 @@
 #define PCI_CHIP_RV280_5961		0x5961
 #define PCI_CHIP_RV280_5962		0x5962
 #define PCI_CHIP_RV280_5964		0x5964
+#define PCI_CHIP_RS485_5975		0x5975
 #define PCI_CHIP_RV280_5C61		0x5C61
 #define PCI_CHIP_RV280_5C63		0x5C63
 #define PCI_CHIP_R423_5D57              0x5D57
diff --git a/drivers/video/aty/atyfb_base.c b/drivers/video/aty/atyfb_base.c
index 2fbff63..0c7bf75 100644
--- a/drivers/video/aty/atyfb_base.c
+++ b/drivers/video/aty/atyfb_base.c
@@ -541,7 +541,7 @@
 #endif /* CONFIG_FB_ATY_CT */
 
 
-static u32 pseudo_palette[17];
+static u32 pseudo_palette[16];
 
 #ifdef CONFIG_FB_ATY_GX
 static char *aty_gx_ram[8] __devinitdata = {
@@ -2937,12 +2937,11 @@
 		/* nothing */ ;
 	j = i + 4;
 
-	par->mmap_map = kmalloc(j * sizeof(*par->mmap_map), GFP_ATOMIC);
+	par->mmap_map = kcalloc(j, sizeof(*par->mmap_map), GFP_ATOMIC);
 	if (!par->mmap_map) {
 		PRINTKE("atyfb_setup_sparc() can't alloc mmap_map\n");
 		return -ENOMEM;
 	}
-	memset(par->mmap_map, 0, j * sizeof(*par->mmap_map));
 
 	for (i = 0, j = 2; i < 6 && pdev->resource[i].start; i++) {
 		struct resource *rp = &pdev->resource[i];
diff --git a/drivers/video/aty/radeon_base.c b/drivers/video/aty/radeon_base.c
index 2349e71..47ca62f 100644
--- a/drivers/video/aty/radeon_base.c
+++ b/drivers/video/aty/radeon_base.c
@@ -153,6 +153,8 @@
 	/* Mobility 9200 (M9+) */
 	CHIP_DEF(PCI_CHIP_RV280_5C61,	RV280,	CHIP_HAS_CRTC2 | CHIP_IS_MOBILITY),
 	CHIP_DEF(PCI_CHIP_RV280_5C63,	RV280,	CHIP_HAS_CRTC2 | CHIP_IS_MOBILITY),
+	/*Mobility Xpress 200 */
+	CHIP_DEF(PCI_CHIP_RS485_5975,	R300,	CHIP_HAS_CRTC2 | CHIP_IS_IGP | CHIP_IS_MOBILITY),
 	/* 9200 */
 	CHIP_DEF(PCI_CHIP_RV280_5960,	RV280,	CHIP_HAS_CRTC2),
 	CHIP_DEF(PCI_CHIP_RV280_5961,	RV280,	CHIP_HAS_CRTC2),
diff --git a/drivers/video/aty/radeonfb.h b/drivers/video/aty/radeonfb.h
index 7ebffcd..7c922c7 100644
--- a/drivers/video/aty/radeonfb.h
+++ b/drivers/video/aty/radeonfb.h
@@ -301,7 +301,7 @@
 	void __iomem		*bios_seg;
 	int			fp_bios_start;
 
-	u32			pseudo_palette[17];
+	u32			pseudo_palette[16];
 	struct { u8 red, green, blue, pad; }
 				palette[256];
 
diff --git a/drivers/video/au1200fb.c b/drivers/video/au1200fb.c
index dbf4ec3..03e57ef 100644
--- a/drivers/video/au1200fb.c
+++ b/drivers/video/au1200fb.c
@@ -1589,11 +1589,10 @@
 		return -EFAULT;
 	}
 
-	fbi->pseudo_palette = kmalloc(sizeof(u32) * 16, GFP_KERNEL);
+	fbi->pseudo_palette = kcalloc(16, sizeof(u32), GFP_KERNEL);
 	if (!fbi->pseudo_palette) {
 		return -ENOMEM;
 	}
-	memset(fbi->pseudo_palette, 0, sizeof(u32) * 16);
 
 	if (fb_alloc_cmap(&fbi->cmap, AU1200_LCD_NBR_PALETTE_ENTRIES, 0) < 0) {
 		print_err("Fail to allocate colormap (%d entries)",
diff --git a/drivers/video/clps711xfb.c b/drivers/video/clps711xfb.c
index 50b78af..dea6579 100644
--- a/drivers/video/clps711xfb.c
+++ b/drivers/video/clps711xfb.c
@@ -366,11 +366,10 @@
 	if (fb_get_options("clps711xfb", NULL))
 		return -ENODEV;
 
-	cfb = kmalloc(sizeof(*cfb), GFP_KERNEL);
+	cfb = kzalloc(sizeof(*cfb), GFP_KERNEL);
 	if (!cfb)
 		goto out;
 
-	memset(cfb, 0, sizeof(*cfb));
 	strcpy(cfb->fix.id, "clps711x");
 
 	cfb->fbops		= &clps7111fb_ops;
diff --git a/drivers/video/console/Kconfig b/drivers/video/console/Kconfig
index d3b8a6b..4964396 100644
--- a/drivers/video/console/Kconfig
+++ b/drivers/video/console/Kconfig
@@ -118,6 +118,22 @@
 	help
 	  Low-level framebuffer-based console driver.
 
+config FRAMEBUFFER_CONSOLE_DETECT_PRIMARY
+       bool "Map the console to the primary display device"
+       depends on FRAMEBUFFER_CONSOLE
+       default n
+       ---help---
+         If this option is selected, the framebuffer console will
+         automatically select the primary display device (if the architecture
+	 supports this feature).  Otherwise, the framebuffer console will
+         always select the first framebuffer driver that is loaded. The latter
+         is the default behavior.
+
+	 You can always override the automatic selection of the primary device
+	 by using the fbcon=map: boot option.
+
+	 If unsure, select n.
+
 config FRAMEBUFFER_CONSOLE_ROTATION
        bool "Framebuffer Console Rotation"
        depends on FRAMEBUFFER_CONSOLE
diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c
index 73813c6..decfdc8 100644
--- a/drivers/video/console/fbcon.c
+++ b/drivers/video/console/fbcon.c
@@ -75,6 +75,7 @@
 #include <linux/init.h>
 #include <linux/interrupt.h>
 #include <linux/crc32.h> /* For counting font checksums */
+#include <asm/fb.h>
 #include <asm/irq.h>
 #include <asm/system.h>
 #include <asm/uaccess.h>
@@ -125,6 +126,8 @@
 static int last_fb_vc = MAX_NR_CONSOLES - 1;
 static int fbcon_is_default = 1; 
 static int fbcon_has_exited;
+static int primary_device = -1;
+static int map_override;
 
 /* font data */
 static char fontname[40];
@@ -152,6 +155,7 @@
 #define DEFAULT_CURSOR_BLINK_RATE	(20)
 
 static int vbl_cursor_cnt;
+static int fbcon_cursor_noblink;
 
 #define divides(a, b)	((!(a) || (b)%(a)) ? 0 : 1)
 
@@ -188,16 +192,14 @@
 static void fbcon_bmove_rec(struct vc_data *vc, struct display *p, int sy, int sx,
 			    int dy, int dx, int height, int width, u_int y_break);
 static void fbcon_set_disp(struct fb_info *info, struct fb_var_screeninfo *var,
-			   struct vc_data *vc);
-static void fbcon_preset_disp(struct fb_info *info, struct fb_var_screeninfo *var,
-			      int unit);
+			   int unit);
 static void fbcon_redraw_move(struct vc_data *vc, struct display *p,
 			      int line, int count, int dy);
 static void fbcon_modechanged(struct fb_info *info);
 static void fbcon_set_all_vcs(struct fb_info *info);
 static void fbcon_start(void);
 static void fbcon_exit(void);
-static struct class_device *fbcon_class_device;
+static struct device *fbcon_device;
 
 #ifdef CONFIG_MAC
 /*
@@ -441,7 +443,8 @@
 	struct fbcon_ops *ops = info->fbcon_par;
 
 	if ((!info->queue.func || info->queue.func == fb_flashcursor) &&
-	    !(ops->flags & FBCON_FLAGS_CURSOR_TIMER)) {
+	    !(ops->flags & FBCON_FLAGS_CURSOR_TIMER) &&
+	    !fbcon_cursor_noblink) {
 		if (!info->queue.func)
 			INIT_WORK(&info->queue, fb_flashcursor);
 
@@ -495,13 +498,17 @@
 		
 		if (!strncmp(options, "map:", 4)) {
 			options += 4;
-			if (*options)
+			if (*options) {
 				for (i = 0, j = 0; i < MAX_NR_CONSOLES; i++) {
 					if (!options[j])
 						j = 0;
 					con2fb_map_boot[i] =
 						(options[j++]-'0') % FB_MAX;
 				}
+
+				map_override = 1;
+			}
+
 			return 1;
 		}
 
@@ -736,7 +743,9 @@
 
 	if (!err) {
 		info->fbcon_par = ops;
-		set_blitting_type(vc, info);
+
+		if (vc)
+			set_blitting_type(vc, info);
 	}
 
 	if (err) {
@@ -798,11 +807,7 @@
 
 	ops->flags |= FBCON_FLAGS_INIT;
 	ops->graphics = 0;
-
-	if (vc)
-		fbcon_set_disp(info, &info->var, vc);
-	else
-		fbcon_preset_disp(info, &info->var, unit);
+	fbcon_set_disp(info, &info->var, unit);
 
 	if (show_logo) {
 		struct vc_data *fg_vc = vc_cons[fg_console].d;
@@ -1107,6 +1112,9 @@
 	if (var_to_display(p, &info->var, info))
 		return;
 
+	if (!info->fbcon_par)
+		con2fb_acquire_newinfo(vc, info, vc->vc_num, -1);
+
 	/* If we are not the first console on this
 	   fb, copy the font from that console */
 	t = &fb_display[fg_console];
@@ -1349,6 +1357,11 @@
 	if (fbcon_is_inactive(vc, info) || vc->vc_deccm != 1)
 		return;
 
+	if (vc->vc_cursor_type & 0x10)
+		fbcon_del_cursor_timer(info);
+	else
+		fbcon_add_cursor_timer(info);
+
 	ops->cursor_flash = (mode == CM_ERASE) ? 0 : 1;
 	if (mode & CM_SOFTBACK) {
 		mode &= ~CM_SOFTBACK;
@@ -1368,36 +1381,29 @@
 static int scrollback_max = 0;
 static int scrollback_current = 0;
 
-/*
- * If no vc is existent yet, just set struct display
- */
-static void fbcon_preset_disp(struct fb_info *info, struct fb_var_screeninfo *var,
-			      int unit)
-{
-	struct display *p = &fb_display[unit];
-	struct display *t = &fb_display[fg_console];
-
-	if (var_to_display(p, var, info))
-		return;
-
-	p->fontdata = t->fontdata;
-	p->userfont = t->userfont;
-	if (p->userfont)
-		REFCOUNT(p->fontdata)++;
-}
-
 static void fbcon_set_disp(struct fb_info *info, struct fb_var_screeninfo *var,
-			   struct vc_data *vc)
+			   int unit)
 {
-	struct display *p = &fb_display[vc->vc_num], *t;
-	struct vc_data **default_mode = vc->vc_display_fg;
-	struct vc_data *svc = *default_mode;
+	struct display *p, *t;
+	struct vc_data **default_mode, *vc;
+	struct vc_data *svc;
 	struct fbcon_ops *ops = info->fbcon_par;
 	int rows, cols, charcnt = 256;
 
+	p = &fb_display[unit];
+
 	if (var_to_display(p, var, info))
 		return;
+
+	vc = vc_cons[unit].d;
+
+	if (!vc)
+		return;
+
+	default_mode = vc->vc_display_fg;
+	svc = *default_mode;
 	t = &fb_display[svc->vc_num];
+
 	if (!vc->vc_font.data) {
 		vc->vc_font.data = (void *)(p->fontdata = t->fontdata);
 		vc->vc_font.width = (*default_mode)->vc_font.width;
@@ -1704,6 +1710,56 @@
 	}
 }
 
+static void fbcon_redraw_blit(struct vc_data *vc, struct fb_info *info,
+			struct display *p, int line, int count, int ycount)
+{
+	int offset = ycount * vc->vc_cols;
+	unsigned short *d = (unsigned short *)
+	    (vc->vc_origin + vc->vc_size_row * line);
+	unsigned short *s = d + offset;
+	struct fbcon_ops *ops = info->fbcon_par;
+
+	while (count--) {
+		unsigned short *start = s;
+		unsigned short *le = advance_row(s, 1);
+		unsigned short c;
+		int x = 0;
+
+		do {
+			c = scr_readw(s);
+
+			if (c == scr_readw(d)) {
+				if (s > start) {
+					ops->bmove(vc, info, line + ycount, x,
+						   line, x, 1, s-start);
+					x += s - start + 1;
+					start = s + 1;
+				} else {
+					x++;
+					start++;
+				}
+			}
+
+			scr_writew(c, d);
+			console_conditional_schedule();
+			s++;
+			d++;
+		} while (s < le);
+		if (s > start)
+			ops->bmove(vc, info, line + ycount, x, line, x, 1,
+				   s-start);
+		console_conditional_schedule();
+		if (ycount > 0)
+			line++;
+		else {
+			line--;
+			/* NOTE: We subtract two lines from these pointers */
+			s -= vc->vc_size_row;
+			d -= vc->vc_size_row;
+		}
+	}
+}
+
 static void fbcon_redraw(struct vc_data *vc, struct display *p,
 			 int line, int count, int offset)
 {
@@ -1789,7 +1845,6 @@
 {
 	struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]];
 	struct display *p = &fb_display[vc->vc_num];
-	struct fbcon_ops *ops = info->fbcon_par;
 	int scroll_partial = info->flags & FBINFO_PARTIAL_PAN_OK;
 
 	if (fbcon_is_inactive(vc, info))
@@ -1813,10 +1868,15 @@
 			goto redraw_up;
 		switch (p->scrollmode) {
 		case SCROLL_MOVE:
-			ops->bmove(vc, info, t + count, 0, t, 0,
-				    b - t - count, vc->vc_cols);
-			ops->clear(vc, info, b - count, 0, count,
-				  vc->vc_cols);
+			fbcon_redraw_blit(vc, info, p, t, b - t - count,
+				     count);
+			fbcon_clear(vc, b - count, 0, count, vc->vc_cols);
+			scr_memsetw((unsigned short *) (vc->vc_origin +
+							vc->vc_size_row *
+							(b - count)),
+				    vc->vc_video_erase_char,
+				    vc->vc_size_row * count);
+			return 1;
 			break;
 
 		case SCROLL_WRAP_MOVE:
@@ -1899,9 +1959,15 @@
 			goto redraw_down;
 		switch (p->scrollmode) {
 		case SCROLL_MOVE:
-			ops->bmove(vc, info, t, 0, t + count, 0,
-				    b - t - count, vc->vc_cols);
-			ops->clear(vc, info, t, 0, count, vc->vc_cols);
+			fbcon_redraw_blit(vc, info, p, b - 1, b - t - count,
+				     -count);
+			fbcon_clear(vc, t, 0, count, vc->vc_cols);
+			scr_memsetw((unsigned short *) (vc->vc_origin +
+							vc->vc_size_row *
+							t),
+				    vc->vc_video_erase_char,
+				    vc->vc_size_row * count);
+			return 1;
 			break;
 
 		case SCROLL_WRAP_MOVE:
@@ -2937,9 +3003,48 @@
 	return found;
 }
 
-static int fbcon_fb_unregistered(int idx)
+#ifdef CONFIG_VT_HW_CONSOLE_BINDING
+static int fbcon_unbind(void)
 {
-	int i;
+	int ret;
+
+	ret = unbind_con_driver(&fb_con, first_fb_vc, last_fb_vc,
+				fbcon_is_default);
+	return ret;
+}
+#else
+static inline int fbcon_unbind(void)
+{
+	return -EINVAL;
+}
+#endif /* CONFIG_VT_HW_CONSOLE_BINDING */
+
+static int fbcon_fb_unbind(int idx)
+{
+	int i, new_idx = -1, ret = 0;
+
+	for (i = first_fb_vc; i <= last_fb_vc; i++) {
+		if (con2fb_map[i] != idx &&
+		    con2fb_map[i] != -1) {
+			new_idx = i;
+			break;
+		}
+	}
+
+	if (new_idx != -1) {
+		for (i = first_fb_vc; i <= last_fb_vc; i++) {
+			if (con2fb_map[i] == idx)
+				set_con2fb_map(i, new_idx, 0);
+		}
+	} else
+		ret = fbcon_unbind();
+
+	return ret;
+}
+
+static int fbcon_fb_unregistered(struct fb_info *info)
+{
+	int i, idx = info->node;
 
 	for (i = first_fb_vc; i <= last_fb_vc; i++) {
 		if (con2fb_map[i] == idx)
@@ -2967,12 +3072,48 @@
 	if (!num_registered_fb)
 		unregister_con_driver(&fb_con);
 
+
+	if (primary_device == idx)
+		primary_device = -1;
+
 	return 0;
 }
 
-static int fbcon_fb_registered(int idx)
+#ifdef CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY
+static void fbcon_select_primary(struct fb_info *info)
 {
-	int ret = 0, i;
+	if (!map_override && primary_device == -1 &&
+	    fb_is_primary_device(info)) {
+		int i;
+
+		printk(KERN_INFO "fbcon: %s (fb%i) is primary device\n",
+		       info->fix.id, info->node);
+		primary_device = info->node;
+
+		for (i = first_fb_vc; i <= last_fb_vc; i++)
+			con2fb_map_boot[i] = primary_device;
+
+		if (con_is_bound(&fb_con)) {
+			printk(KERN_INFO "fbcon: Remapping primary device, "
+			       "fb%i, to tty %i-%i\n", info->node,
+			       first_fb_vc + 1, last_fb_vc + 1);
+			info_idx = primary_device;
+		}
+	}
+
+}
+#else
+static inline void fbcon_select_primary(struct fb_info *info)
+{
+	return;
+}
+#endif /* CONFIG_FRAMEBUFFER_DETECT_PRIMARY */
+
+static int fbcon_fb_registered(struct fb_info *info)
+{
+	int ret = 0, i, idx = info->node;
+
+	fbcon_select_primary(info);
 
 	if (info_idx == -1) {
 		for (i = first_fb_vc; i <= last_fb_vc; i++) {
@@ -2986,8 +3127,7 @@
 			ret = fbcon_takeover(1);
 	} else {
 		for (i = first_fb_vc; i <= last_fb_vc; i++) {
-			if (con2fb_map_boot[i] == idx &&
-			    con2fb_map[i] == -1)
+			if (con2fb_map_boot[i] == idx)
 				set_con2fb_map(i, idx, 0);
 		}
 	}
@@ -3034,12 +3174,7 @@
 		mode = fb_find_nearest_mode(fb_display[i].mode,
 					    &info->modelist);
 		fb_videomode_to_var(&var, mode);
-
-		if (vc)
-			fbcon_set_disp(info, &var, vc);
-		else
-			fbcon_preset_disp(info, &var, i);
-
+		fbcon_set_disp(info, &var, vc->vc_num);
 	}
 }
 
@@ -3114,11 +3249,14 @@
 		mode = event->data;
 		ret = fbcon_mode_deleted(info, mode);
 		break;
+	case FB_EVENT_FB_UNBIND:
+		ret = fbcon_fb_unbind(info->node);
+		break;
 	case FB_EVENT_FB_REGISTERED:
-		ret = fbcon_fb_registered(info->node);
+		ret = fbcon_fb_registered(info);
 		break;
 	case FB_EVENT_FB_UNREGISTERED:
-		ret = fbcon_fb_unregistered(info->node);
+		ret = fbcon_fb_unregistered(info);
 		break;
 	case FB_EVENT_SET_CONSOLE_MAP:
 		con2fb = event->data;
@@ -3179,8 +3317,9 @@
 	.notifier_call	= fbcon_event_notify,
 };
 
-static ssize_t store_rotate(struct class_device *class_device,
-			    const char *buf, size_t count)
+static ssize_t store_rotate(struct device *device,
+			    struct device_attribute *attr, const char *buf,
+			    size_t count)
 {
 	struct fb_info *info;
 	int rotate, idx;
@@ -3203,8 +3342,9 @@
 	return count;
 }
 
-static ssize_t store_rotate_all(struct class_device *class_device,
-				const char *buf, size_t count)
+static ssize_t store_rotate_all(struct device *device,
+				struct device_attribute *attr,const char *buf,
+				size_t count)
 {
 	struct fb_info *info;
 	int rotate, idx;
@@ -3227,7 +3367,8 @@
 	return count;
 }
 
-static ssize_t show_rotate(struct class_device *class_device, char *buf)
+static ssize_t show_rotate(struct device *device,
+			   struct device_attribute *attr,char *buf)
 {
 	struct fb_info *info;
 	int rotate = 0, idx;
@@ -3248,20 +3389,86 @@
 	return snprintf(buf, PAGE_SIZE, "%d\n", rotate);
 }
 
-static struct class_device_attribute class_device_attrs[] = {
+static ssize_t show_cursor_blink(struct device *device,
+				 struct device_attribute *attr, char *buf)
+{
+	struct fb_info *info;
+	struct fbcon_ops *ops;
+	int idx, blink = -1;
+
+	if (fbcon_has_exited)
+		return 0;
+
+	acquire_console_sem();
+	idx = con2fb_map[fg_console];
+
+	if (idx == -1 || registered_fb[idx] == NULL)
+		goto err;
+
+	info = registered_fb[idx];
+	ops = info->fbcon_par;
+
+	if (!ops)
+		goto err;
+
+	blink = (ops->flags & FBCON_FLAGS_CURSOR_TIMER) ? 1 : 0;
+err:
+	release_console_sem();
+	return snprintf(buf, PAGE_SIZE, "%d\n", blink);
+}
+
+static ssize_t store_cursor_blink(struct device *device,
+				  struct device_attribute *attr,
+				  const char *buf, size_t count)
+{
+	struct fb_info *info;
+	int blink, idx;
+	char **last = NULL;
+
+	if (fbcon_has_exited)
+		return count;
+
+	acquire_console_sem();
+	idx = con2fb_map[fg_console];
+
+	if (idx == -1 || registered_fb[idx] == NULL)
+		goto err;
+
+	info = registered_fb[idx];
+
+	if (!info->fbcon_par)
+		goto err;
+
+	blink = simple_strtoul(buf, last, 0);
+
+	if (blink) {
+		fbcon_cursor_noblink = 0;
+		fbcon_add_cursor_timer(info);
+	} else {
+		fbcon_cursor_noblink = 1;
+		fbcon_del_cursor_timer(info);
+	}
+
+err:
+	release_console_sem();
+	return count;
+}
+
+static struct device_attribute device_attrs[] = {
 	__ATTR(rotate, S_IRUGO|S_IWUSR, show_rotate, store_rotate),
 	__ATTR(rotate_all, S_IWUSR, NULL, store_rotate_all),
+	__ATTR(cursor_blink, S_IRUGO|S_IWUSR, show_cursor_blink,
+	       store_cursor_blink),
 };
 
-static int fbcon_init_class_device(void)
+static int fbcon_init_device(void)
 {
 	int i, error = 0;
 
 	fbcon_has_sysfs = 1;
 
-	for (i = 0; i < ARRAY_SIZE(class_device_attrs); i++) {
-		error = class_device_create_file(fbcon_class_device,
-						 &class_device_attrs[i]);
+	for (i = 0; i < ARRAY_SIZE(device_attrs); i++) {
+		error = device_create_file(fbcon_device, &device_attrs[i]);
 
 		if (error)
 			break;
@@ -3269,8 +3476,7 @@
 
 	if (error) {
 		while (--i >= 0)
-			class_device_remove_file(fbcon_class_device,
-						 &class_device_attrs[i]);
+			device_remove_file(fbcon_device, &device_attrs[i]);
 
 		fbcon_has_sysfs = 0;
 	}
@@ -3356,16 +3562,15 @@
 
 	acquire_console_sem();
 	fb_register_client(&fbcon_event_notifier);
-	fbcon_class_device =
-	    class_device_create(fb_class, NULL, MKDEV(0, 0), NULL, "fbcon");
+	fbcon_device = device_create(fb_class, NULL, MKDEV(0, 0), "fbcon");
 
-	if (IS_ERR(fbcon_class_device)) {
-		printk(KERN_WARNING "Unable to create class_device "
+	if (IS_ERR(fbcon_device)) {
+		printk(KERN_WARNING "Unable to create device "
 		       "for fbcon; errno = %ld\n",
-		       PTR_ERR(fbcon_class_device));
-		fbcon_class_device = NULL;
+		       PTR_ERR(fbcon_device));
+		fbcon_device = NULL;
 	} else
-		fbcon_init_class_device();
+		fbcon_init_device();
 
 	for (i = 0; i < MAX_NR_CONSOLES; i++)
 		con2fb_map[i] = -1;
@@ -3379,14 +3584,13 @@
 
 #ifdef MODULE
 
-static void __exit fbcon_deinit_class_device(void)
+static void __exit fbcon_deinit_device(void)
 {
 	int i;
 
 	if (fbcon_has_sysfs) {
-		for (i = 0; i < ARRAY_SIZE(class_device_attrs); i++)
-			class_device_remove_file(fbcon_class_device,
-						 &class_device_attrs[i]);
+		for (i = 0; i < ARRAY_SIZE(device_attrs); i++)
+			device_remove_file(fbcon_device, &device_attrs[i]);
 
 		fbcon_has_sysfs = 0;
 	}
@@ -3396,8 +3600,8 @@
 {
 	acquire_console_sem();
 	fb_unregister_client(&fbcon_event_notifier);
-	fbcon_deinit_class_device();
-	class_device_destroy(fb_class, MKDEV(0, 0));
+	fbcon_deinit_device();
+	device_destroy(fb_class, MKDEV(0, 0));
 	fbcon_exit();
 	release_console_sem();
 	unregister_con_driver(&fb_con);
diff --git a/drivers/video/controlfb.c b/drivers/video/controlfb.c
index 8b76273..b0be7ea 100644
--- a/drivers/video/controlfb.c
+++ b/drivers/video/controlfb.c
@@ -94,7 +94,7 @@
 struct fb_info_control {
 	struct fb_info		info;
 	struct fb_par_control	par;
-	u32			pseudo_palette[17];
+	u32			pseudo_palette[16];
 		
 	struct cmap_regs	__iomem *cmap_regs;
 	unsigned long		cmap_regs_phys;
diff --git a/drivers/video/cyber2000fb.c b/drivers/video/cyber2000fb.c
index 7a6eeda..30ede6e 100644
--- a/drivers/video/cyber2000fb.c
+++ b/drivers/video/cyber2000fb.c
@@ -1221,11 +1221,10 @@
 {
 	struct cfb_info *cfb;
 
-	cfb = kmalloc(sizeof(struct cfb_info), GFP_KERNEL);
+	cfb = kzalloc(sizeof(struct cfb_info), GFP_KERNEL);
 	if (!cfb)
 		return NULL;
 
-	memset(cfb, 0, sizeof(struct cfb_info));
 
 	cfb->id			= id;
 
diff --git a/drivers/video/cyblafb.c b/drivers/video/cyblafb.c
index 94a66c2..e23324d 100644
--- a/drivers/video/cyblafb.c
+++ b/drivers/video/cyblafb.c
@@ -1068,15 +1068,18 @@
 		out8(0x3C9, green >> 10);
 		out8(0x3C9, blue >> 10);
 
-	} else if (bpp == 16)	// RGB 565
-		((u32 *) info->pseudo_palette)[regno] =
-		    (red & 0xF800) |
-		    ((green & 0xFC00) >> 5) | ((blue & 0xF800) >> 11);
-	else if (bpp == 32)	// ARGB 8888
-		((u32 *) info->pseudo_palette)[regno] =
-		    ((transp & 0xFF00) << 16) |
-		    ((red & 0xFF00) << 8) |
-		    ((green & 0xFF00)) | ((blue & 0xFF00) >> 8);
+	} else if (regno < 16) {
+		if (bpp == 16)	// RGB 565
+			((u32 *) info->pseudo_palette)[regno] =
+				(red & 0xF800) |
+				((green & 0xFC00) >> 5) |
+				((blue & 0xF800) >> 11);
+		else if (bpp == 32)	// ARGB 8888
+			((u32 *) info->pseudo_palette)[regno] =
+				((transp & 0xFF00) << 16) |
+				((red & 0xFF00) << 8) |
+				((green & 0xFF00)) | ((blue & 0xFF00) >> 8);
+	}
 
 	return 0;
 }
diff --git a/drivers/video/epson1355fb.c b/drivers/video/epson1355fb.c
index ca2c54c..33be46c 100644
--- a/drivers/video/epson1355fb.c
+++ b/drivers/video/epson1355fb.c
@@ -63,23 +63,12 @@
 
 struct epson1355_par {
 	unsigned long reg_addr;
+	u32 pseudo_palette[16];
 };
 
 /* ------------------------------------------------------------------------- */
 
-#ifdef CONFIG_SUPERH
-
-static inline u8 epson1355_read_reg(int index)
-{
-	return ctrl_inb(par.reg_addr + index);
-}
-
-static inline void epson1355_write_reg(u8 data, int index)
-{
-	ctrl_outb(data, par.reg_addr + index);
-}
-
-#elif defined(CONFIG_ARM)
+#if defined(CONFIG_ARM)
 
 # ifdef CONFIG_ARCH_CEIVA
 #  include <asm/arch/hardware.h>
@@ -289,7 +278,7 @@
 	struct epson1355_par *par = info->par;
 
 	switch (blank_mode) {
-	case FB_BLANK_UNBLANKING:
+	case FB_BLANK_UNBLANK:
 	case FB_BLANK_NORMAL:
 		lcd_enable(par, 1);
 		backlight_enable(1);
@@ -635,7 +624,7 @@
 		goto bail;
 	}
 
-	info = framebuffer_alloc(sizeof(struct epson1355_par) + sizeof(u32) * 256, &dev->dev);
+	info = framebuffer_alloc(sizeof(struct epson1355_par), &dev->dev);
 	if (!info) {
 		rc = -ENOMEM;
 		goto bail;
@@ -648,7 +637,7 @@
 		rc = -ENOMEM;
 		goto bail;
 	}
-	info->pseudo_palette = (void *)(default_par + 1);
+	info->pseudo_palette = default_par->pseudo_palette;
 
 	info->screen_base = ioremap(EPSON1355FB_FB_PHYS, EPSON1355FB_FB_LEN);
 	if (!info->screen_base) {
diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
index 38c2e25..215ac57 100644
--- a/drivers/video/fbmem.c
+++ b/drivers/video/fbmem.c
@@ -33,18 +33,11 @@
 #include <linux/err.h>
 #include <linux/device.h>
 #include <linux/efi.h>
-
-#if defined(__mc68000__) || defined(CONFIG_APUS)
-#include <asm/setup.h>
-#endif
-
-#include <asm/io.h>
-#include <asm/uaccess.h>
-#include <asm/page.h>
-#include <asm/pgtable.h>
-
 #include <linux/fb.h>
 
+#include <asm/fb.h>
+
+
     /*
      *  Frame buffer device initialization and setup routines
      */
@@ -411,10 +404,146 @@
 	}
 }
 
+static int fb_show_logo_line(struct fb_info *info, int rotate,
+			     const struct linux_logo *logo, int y,
+			     unsigned int n)
+{
+	u32 *palette = NULL, *saved_pseudo_palette = NULL;
+	unsigned char *logo_new = NULL, *logo_rotate = NULL;
+	struct fb_image image;
+
+	/* Return if the frame buffer is not mapped or suspended */
+	if (logo == NULL || info->state != FBINFO_STATE_RUNNING ||
+	    info->flags & FBINFO_MODULE)
+		return 0;
+
+	image.depth = 8;
+	image.data = logo->data;
+
+	if (fb_logo.needs_cmapreset)
+		fb_set_logocmap(info, logo);
+
+	if (fb_logo.needs_truepalette ||
+	    fb_logo.needs_directpalette) {
+		palette = kmalloc(256 * 4, GFP_KERNEL);
+		if (palette == NULL)
+			return 0;
+
+		if (fb_logo.needs_truepalette)
+			fb_set_logo_truepalette(info, logo, palette);
+		else
+			fb_set_logo_directpalette(info, logo, palette);
+
+		saved_pseudo_palette = info->pseudo_palette;
+		info->pseudo_palette = palette;
+	}
+
+	if (fb_logo.depth <= 4) {
+		logo_new = kmalloc(logo->width * logo->height, GFP_KERNEL);
+		if (logo_new == NULL) {
+			kfree(palette);
+			if (saved_pseudo_palette)
+				info->pseudo_palette = saved_pseudo_palette;
+			return 0;
+		}
+		image.data = logo_new;
+		fb_set_logo(info, logo, logo_new, fb_logo.depth);
+	}
+
+	image.dx = 0;
+	image.dy = y;
+	image.width = logo->width;
+	image.height = logo->height;
+
+	if (rotate) {
+		logo_rotate = kmalloc(logo->width *
+				      logo->height, GFP_KERNEL);
+		if (logo_rotate)
+			fb_rotate_logo(info, logo_rotate, &image, rotate);
+	}
+
+	fb_do_show_logo(info, &image, rotate, n);
+
+	kfree(palette);
+	if (saved_pseudo_palette != NULL)
+		info->pseudo_palette = saved_pseudo_palette;
+	kfree(logo_new);
+	kfree(logo_rotate);
+	return logo->height;
+}
+
+
+#ifdef CONFIG_FB_LOGO_EXTRA
+
+#define FB_LOGO_EX_NUM_MAX 10
+static struct logo_data_extra {
+	const struct linux_logo *logo;
+	unsigned int n;
+} fb_logo_ex[FB_LOGO_EX_NUM_MAX];
+static unsigned int fb_logo_ex_num;
+
+void fb_append_extra_logo(const struct linux_logo *logo, unsigned int n)
+{
+	if (!n || fb_logo_ex_num == FB_LOGO_EX_NUM_MAX)
+		return;
+
+	fb_logo_ex[fb_logo_ex_num].logo = logo;
+	fb_logo_ex[fb_logo_ex_num].n = n;
+	fb_logo_ex_num++;
+}
+
+static int fb_prepare_extra_logos(struct fb_info *info, unsigned int height,
+				  unsigned int yres)
+{
+	unsigned int i;
+
+	/* FIXME: logo_ex supports only truecolor fb. */
+	if (info->fix.visual != FB_VISUAL_TRUECOLOR)
+		fb_logo_ex_num = 0;
+
+	for (i = 0; i < fb_logo_ex_num; i++) {
+		height += fb_logo_ex[i].logo->height;
+		if (height > yres) {
+			height -= fb_logo_ex[i].logo->height;
+			fb_logo_ex_num = i;
+			break;
+		}
+	}
+	return height;
+}
+
+static int fb_show_extra_logos(struct fb_info *info, int y, int rotate)
+{
+	unsigned int i;
+
+	for (i = 0; i < fb_logo_ex_num; i++)
+		y += fb_show_logo_line(info, rotate,
+				       fb_logo_ex[i].logo, y, fb_logo_ex[i].n);
+
+	return y;
+}
+
+#else /* !CONFIG_FB_LOGO_EXTRA */
+
+static inline int fb_prepare_extra_logos(struct fb_info *info,
+					 unsigned int height,
+					 unsigned int yres)
+{
+	return height;
+}
+
+static inline int fb_show_extra_logos(struct fb_info *info, int y, int rotate)
+{
+	return y;
+}
+
+#endif /* CONFIG_FB_LOGO_EXTRA */
+
+
 int fb_prepare_logo(struct fb_info *info, int rotate)
 {
 	int depth = fb_get_color_depth(&info->var, &info->fix);
-	int yres;
+	unsigned int yres;
 
 	memset(&fb_logo, 0, sizeof(struct logo_data));
 
@@ -456,7 +585,7 @@
 	if (!fb_logo.logo) {
 		return 0;
 	}
-	
+
 	if (rotate == FB_ROTATE_UR || rotate == FB_ROTATE_UD)
 		yres = info->var.yres;
 	else
@@ -473,75 +602,20 @@
 	else if (fb_logo.logo->type == LINUX_LOGO_VGA16)
 		fb_logo.depth = 4;
 	else
-		fb_logo.depth = 1;		
-	return fb_logo.logo->height;
+		fb_logo.depth = 1;
+
+	return fb_prepare_extra_logos(info, fb_logo.logo->height, yres);
 }
 
 int fb_show_logo(struct fb_info *info, int rotate)
 {
-	u32 *palette = NULL, *saved_pseudo_palette = NULL;
-	unsigned char *logo_new = NULL, *logo_rotate = NULL;
-	struct fb_image image;
+	int y;
 
-	/* Return if the frame buffer is not mapped or suspended */
-	if (fb_logo.logo == NULL || info->state != FBINFO_STATE_RUNNING ||
-	    info->flags & FBINFO_MODULE)
-		return 0;
+	y = fb_show_logo_line(info, rotate, fb_logo.logo, 0,
+			      num_online_cpus());
+	y = fb_show_extra_logos(info, y, rotate);
 
-	image.depth = 8;
-	image.data = fb_logo.logo->data;
-
-	if (fb_logo.needs_cmapreset)
-		fb_set_logocmap(info, fb_logo.logo);
-
-	if (fb_logo.needs_truepalette || 
-	    fb_logo.needs_directpalette) {
-		palette = kmalloc(256 * 4, GFP_KERNEL);
-		if (palette == NULL)
-			return 0;
-
-		if (fb_logo.needs_truepalette)
-			fb_set_logo_truepalette(info, fb_logo.logo, palette);
-		else
-			fb_set_logo_directpalette(info, fb_logo.logo, palette);
-
-		saved_pseudo_palette = info->pseudo_palette;
-		info->pseudo_palette = palette;
-	}
-
-	if (fb_logo.depth <= 4) {
-		logo_new = kmalloc(fb_logo.logo->width * fb_logo.logo->height, 
-				   GFP_KERNEL);
-		if (logo_new == NULL) {
-			kfree(palette);
-			if (saved_pseudo_palette)
-				info->pseudo_palette = saved_pseudo_palette;
-			return 0;
-		}
-		image.data = logo_new;
-		fb_set_logo(info, fb_logo.logo, logo_new, fb_logo.depth);
-	}
-
-	image.dx = 0;
-	image.dy = 0;
-	image.width = fb_logo.logo->width;
-	image.height = fb_logo.logo->height;
-
-	if (rotate) {
-		logo_rotate = kmalloc(fb_logo.logo->width *
-				      fb_logo.logo->height, GFP_KERNEL);
-		if (logo_rotate)
-			fb_rotate_logo(info, logo_rotate, &image, rotate);
-	}
-
-	fb_do_show_logo(info, &image, rotate, num_online_cpus());
-
-	kfree(palette);
-	if (saved_pseudo_palette != NULL)
-		info->pseudo_palette = saved_pseudo_palette;
-	kfree(logo_new);
-	kfree(logo_rotate);
-	return fb_logo.logo->height;
+	return y;
 }
 #else
 int fb_prepare_logo(struct fb_info *info, int rotate) { return 0; }
@@ -1155,17 +1229,15 @@
 }
 #endif
 
-static int 
+static int
 fb_mmap(struct file *file, struct vm_area_struct * vma)
 {
 	int fbidx = iminor(file->f_path.dentry->d_inode);
 	struct fb_info *info = registered_fb[fbidx];
 	struct fb_ops *fb = info->fbops;
 	unsigned long off;
-#if !defined(__sparc__) || defined(__sparc_v9__)
 	unsigned long start;
 	u32 len;
-#endif
 
 	if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT))
 		return -EINVAL;
@@ -1180,12 +1252,6 @@
 		return res;
 	}
 
-#if defined(__sparc__) && !defined(__sparc_v9__)
-	/* Should never get here, all fb drivers should have their own
-	   mmap routines */
-	return -EINVAL;
-#else
-	/* !sparc32... */
 	lock_kernel();
 
 	/* frame buffer memory */
@@ -1209,50 +1275,11 @@
 	vma->vm_pgoff = off >> PAGE_SHIFT;
 	/* This is an IO map - tell maydump to skip this VMA */
 	vma->vm_flags |= VM_IO | VM_RESERVED;
-#if defined(__mc68000__)
-#if defined(CONFIG_SUN3)
-	pgprot_val(vma->vm_page_prot) |= SUN3_PAGE_NOCACHE;
-#elif defined(CONFIG_MMU)
-	if (CPU_IS_020_OR_030)
-		pgprot_val(vma->vm_page_prot) |= _PAGE_NOCACHE030;
-	if (CPU_IS_040_OR_060) {
-		pgprot_val(vma->vm_page_prot) &= _CACHEMASK040;
-		/* Use no-cache mode, serialized */
-		pgprot_val(vma->vm_page_prot) |= _PAGE_NOCACHE_S;
-	}
-#endif
-#elif defined(__powerpc__)
-	vma->vm_page_prot = phys_mem_access_prot(file, off >> PAGE_SHIFT,
-						 vma->vm_end - vma->vm_start,
-						 vma->vm_page_prot);
-#elif defined(__alpha__)
-	/* Caching is off in the I/O space quadrant by design.  */
-#elif defined(__i386__) || defined(__x86_64__)
-	if (boot_cpu_data.x86 > 3)
-		pgprot_val(vma->vm_page_prot) |= _PAGE_PCD;
-#elif defined(__mips__) || defined(__sparc_v9__)
-	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
-#elif defined(__hppa__)
-	pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE;
-#elif defined(__arm__) || defined(__sh__) || defined(__m32r__)
-	vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
-#elif defined(__avr32__)
-	vma->vm_page_prot = __pgprot((pgprot_val(vma->vm_page_prot)
-				      & ~_PAGE_CACHABLE)
-				     | (_PAGE_BUFFER | _PAGE_DIRTY));
-#elif defined(__ia64__)
-	if (efi_range_is_wc(vma->vm_start, vma->vm_end - vma->vm_start))
-		vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
-	else
-		vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
-#else
-#warning What do we have to do here??
-#endif
+	fb_pgprotect(file, vma, off);
 	if (io_remap_pfn_range(vma, vma->vm_start, off >> PAGE_SHIFT,
 			     vma->vm_end - vma->vm_start, vma->vm_page_prot))
 		return -EAGAIN;
 	return 0;
-#endif /* !sparc32 */
 }
 
 static int
@@ -1388,17 +1415,34 @@
  *
  *	Returns negative errno on error, or zero for success.
  *
+ *      This function will also notify the framebuffer console
+ *      to release the driver.
+ *
+ *      This is meant to be called within a driver's module_exit()
+ *      function. If this is called outside module_exit(), ensure
+ *      that the driver implements fb_open() and fb_release() to
+ *      check that no processes are using the device.
  */
 
 int
 unregister_framebuffer(struct fb_info *fb_info)
 {
 	struct fb_event event;
-	int i;
+	int i, ret = 0;
 
 	i = fb_info->node;
-	if (!registered_fb[i])
-		return -EINVAL;
+	if (!registered_fb[i]) {
+		ret = -EINVAL;
+		goto done;
+	}
+
+	event.info = fb_info;
+	ret = fb_notifier_call_chain(FB_EVENT_FB_UNBIND, &event);
+
+	if (ret) {
+		ret = -EINVAL;
+		goto done;
+	}
 
 	if (fb_info->pixmap.addr &&
 	    (fb_info->pixmap.flags & FB_PIXMAP_DEFAULT))
@@ -1410,7 +1454,8 @@
 	device_destroy(fb_class, MKDEV(FB_MAJOR, i));
 	event.info = fb_info;
 	fb_notifier_call_chain(FB_EVENT_FB_UNREGISTERED, &event);
-	return 0;
+done:
+	return ret;
 }
 
 /**
diff --git a/drivers/video/fm2fb.c b/drivers/video/fm2fb.c
index 70ff55b..6c91c61 100644
--- a/drivers/video/fm2fb.c
+++ b/drivers/video/fm2fb.c
@@ -195,13 +195,15 @@
 static int fm2fb_setcolreg(u_int regno, u_int red, u_int green, u_int blue,
                          u_int transp, struct fb_info *info)
 {
-	if (regno > info->cmap.len)
-		return 1;
-	red >>= 8;
-	green >>= 8;
-	blue >>= 8;
+	if (regno < 16) {
+		red >>= 8;
+		green >>= 8;
+		blue >>= 8;
 
-	((u32*)(info->pseudo_palette))[regno] = (red << 16) | (green << 8) | blue;
+		((u32*)(info->pseudo_palette))[regno] = (red << 16) |
+			(green << 8) | blue;
+	}
+
 	return 0;
 }
 
@@ -237,7 +239,7 @@
 	if (!zorro_request_device(z,"fm2fb"))
 		return -ENXIO;
 
-	info = framebuffer_alloc(256 * sizeof(u32), &z->dev);
+	info = framebuffer_alloc(16 * sizeof(u32), &z->dev);
 	if (!info) {
 		zorro_release_device(z);
 		return -ENOMEM;
diff --git a/drivers/video/gbefb.c b/drivers/video/gbefb.c
index bf0e60b..b9b572b 100644
--- a/drivers/video/gbefb.c
+++ b/drivers/video/gbefb.c
@@ -86,7 +86,7 @@
 
 static int ypan, ywrap;
 
-static uint32_t pseudo_palette[256];
+static uint32_t pseudo_palette[16];
 
 static char *mode_option __initdata = NULL;
 
@@ -854,8 +854,7 @@
 	green >>= 8;
 	blue >>= 8;
 
-	switch (info->var.bits_per_pixel) {
-	case 8:
+	if (info->var.bits_per_pixel <= 8) {
 		/* wait for the color map FIFO to have a free entry */
 		for (i = 0; i < 1000 && gbe->cm_fifo >= 63; i++)
 			udelay(10);
@@ -864,23 +863,25 @@
 			return 1;
 		}
 		gbe->cmap[regno] = (red << 24) | (green << 16) | (blue << 8);
-		break;
-	case 15:
-	case 16:
-		red >>= 3;
-		green >>= 3;
-		blue >>= 3;
-		pseudo_palette[regno] =
-			(red << info->var.red.offset) |
-			(green << info->var.green.offset) |
-			(blue << info->var.blue.offset);
-		break;
-	case 32:
-		pseudo_palette[regno] =
-			(red << info->var.red.offset) |
-			(green << info->var.green.offset) |
-			(blue << info->var.blue.offset);
-		break;
+	} else if (regno < 16) {
+		switch (info->var.bits_per_pixel) {
+		case 15:
+		case 16:
+			red >>= 3;
+			green >>= 3;
+			blue >>= 3;
+			pseudo_palette[regno] =
+				(red << info->var.red.offset) |
+				(green << info->var.green.offset) |
+				(blue << info->var.blue.offset);
+			break;
+		case 32:
+			pseudo_palette[regno] =
+				(red << info->var.red.offset) |
+				(green << info->var.green.offset) |
+				(blue << info->var.blue.offset);
+			break;
+		}
 	}
 
 	return 0;
diff --git a/drivers/video/i810/i810.h b/drivers/video/i810/i810.h
index 889e4ea..328ae6c 100644
--- a/drivers/video/i810/i810.h
+++ b/drivers/video/i810/i810.h
@@ -266,7 +266,7 @@
 	struct i810fb_i2c_chan   chan[3];
 	struct mutex		 open_lock;
 	unsigned int		 use_count;
-	u32 pseudo_palette[17];
+	u32 pseudo_palette[16];
 	unsigned long mmio_start_phys;
 	u8 __iomem *mmio_start_virtual;
 	u8 *edid;
diff --git a/drivers/video/intelfb/intelfb.h b/drivers/video/intelfb/intelfb.h
index 80b94c1..6148300 100644
--- a/drivers/video/intelfb/intelfb.h
+++ b/drivers/video/intelfb/intelfb.h
@@ -302,7 +302,7 @@
 	u32 ring_lockup;
 
 	/* palette */
-	u32 pseudo_palette[17];
+	u32 pseudo_palette[16];
 
 	/* chip info */
 	int pci_chipset;
diff --git a/drivers/video/logo/Kconfig b/drivers/video/logo/Kconfig
index 9397bce..9de1c11 100644
--- a/drivers/video/logo/Kconfig
+++ b/drivers/video/logo/Kconfig
@@ -10,6 +10,11 @@
 
 if LOGO
 
+config FB_LOGO_EXTRA
+	bool
+	depends on FB=y
+	default y if SPU_BASE
+
 config LOGO_LINUX_MONO
 	bool "Standard black and white Linux logo"
 	default y
diff --git a/drivers/video/logo/Makefile b/drivers/video/logo/Makefile
index b985dfa..a5fc4ed 100644
--- a/drivers/video/logo/Makefile
+++ b/drivers/video/logo/Makefile
@@ -14,6 +14,8 @@
 obj-$(CONFIG_LOGO_SUPERH_CLUT224)	+= logo_superh_clut224.o
 obj-$(CONFIG_LOGO_M32R_CLUT224)		+= logo_m32r_clut224.o
 
+obj-$(CONFIG_SPU_BASE)			+= logo_spe_clut224.o
+
 # How to generate logo's
 
 # Use logo-cfiles to retrieve list of .c files to be built
diff --git a/drivers/video/logo/logo_spe_clut224.ppm b/drivers/video/logo/logo_spe_clut224.ppm
new file mode 100644
index 0000000..d36ad62
--- /dev/null
+++ b/drivers/video/logo/logo_spe_clut224.ppm
@@ -0,0 +1,283 @@
+P3
+40 40
+255
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  2 2 2  6 6 6
+15 15 15  21 21 21  19 19 19  14 14 14  6 6 6  2 2 2
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  2 2 2  21 21 21  55 55 55
+56 56 56  54 54 54  53 53 53  60 60 60  56 56 56  25 25 25
+6 6 6  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  2 2 2  27 27 27  62 62 62  17 17 19
+2 2 6  2 2 6  2 2 6  2 2 6  16 16 18  57 57 57
+45 45 45  8 8 8  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 1  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  16 16 16  62 62 62  8 8 10  2 2 6
+2 2 6  2 2 6  2 2 6  12 12 14  67 67 67  16 16 17
+45 45 45  41 41 41  4 4 4  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  2 2 2  35 35 35  40 40 40  2 2 6  2 2 6
+2 2 6  2 2 6  2 2 6  15 15 17  70 70 70  27 27 27
+3 3 6  62 62 62  20 20 20  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  4 4 4  58 58 58  12 12 14  2 2 6  2 2 6
+2 2 6  2 2 6  2 2 6  4 4 7  4 4 7  2 2 6
+2 2 6  34 34 36  40 40 40  3 3 3  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  7 7 7  64 64 64  2 2 6  5 5 5  17 17 17
+3 3 6  2 2 6  2 2 6  15 15 15  21 21 21  7 7 10
+2 2 6  8 8 10  62 62 62  6 6 6  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  7 7 7  66 66 66  5 5 8  122 122 122  122 122 122
+9 9 11  3 3 6  104 96 81  179 179 179  122 122 122  13 13 13
+2 2 6  2 2 6  67 67 67  10 10 10  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  7 7 7  65 65 65  41 41 43  152 149 142  192 191 189
+48 48 49  23 23 24  228 210 210  86 86 86  192 191 189  59 59 61
+2 2 6  2 2 6  64 64 64  14 14 14  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 1  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  7 7 7  66 66 66  59 59 59  59 59 61  86 86 86
+99 84 50  78 66 28  152 149 142  5 5 8  122 122 122  104 96 81
+2 2 6  2 2 6  67 67 67  14 14 14  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  5 5 5  63 63 63  24 24 24  152 149 142  175 122 13
+238 184 12  220 170 13  226 181 52  112 86 32  194 165 151  46 46 47
+2 2 6  2 2 6  65 65 65  17 17 17  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  5 5 5  59 59 59  21 21 21  175 122 13  231 174 11
+240 192 13  237 183 61  240 192 13  240 192 13  234 179 16  81 64 9
+2 2 6  2 2 6  63 63 63  25 25 25  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  5 5 5  54 54 54  51 48 39  189 138 9  238 184 12
+240 192 13  240 192 13  240 192 13  215 161 11  207 152 19  81 64 9
+16 16 18  5 5 8  40 40 40  44 44 44  4 4 4  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  5 5 5  59 59 59  27 27 27  126 107 64  187 136 12
+220 170 13  201 147 20  189 138 9  198 154 46  199 182 125  70 70 70
+27 27 27  104 96 81  12 12 14  70 70 70  16 16 16  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  17 17 17  70 70 70  12 12 12  168 168 168  174 135 135
+175 122 13  175 122 13  178 151 83  192 191 189  233 233 233  179 179 179
+3 3 6  29 29 31  3 3 6  41 41 41  44 44 44  5 5 5
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+8 8 8  53 53 53  44 44 44  59 59 59  238 238 238  192 191 189
+192 191 189  192 191 189  221 205 205  240 240 240  253 253 253  253 253 253
+70 70 70  2 2 6  2 2 6  5 5 8  67 67 67  22 22 22
+2 2 2  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  5 5 5
+38 38 38  56 56 56  7 7 9  221 205 205  253 253 253  233 233 233
+221 205 205  233 233 233  251 251 251  253 253 253  253 253 253  253 253 253
+192 191 189  2 2 6  2 2 6  2 2 6  25 25 25  64 64 64
+15 15 15  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  2 2 2  27 27 27
+66 66 66  7 7 9  86 86 86  252 252 252  253 253 253  253 253 253
+252 252 252  253 253 253  253 253 253  253 253 253  253 253 253  253 253 253
+244 244 244  19 19 21  2 2 6  2 2 6  2 2 6  38 38 38
+54 54 54  10 10 10  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  14 14 14  62 62 62
+10 10 12  3 3 6  122 122 122  235 235 235  251 251 251  248 248 248
+235 235 235  248 248 248  252 252 252  246 246 246  233 233 233  237 228 228
+223 207 207  70 70 70  2 2 6  2 2 6  2 2 6  2 2 6
+46 46 47  38 38 38  4 4 4  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  2 2 2  33 33 33  44 44 44
+4 4 7  9 9 11  168 168 168  240 240 240  252 252 252  252 252 252
+246 246 246  253 253 253  253 253 253  251 251 251  245 241 241  233 233 233
+221 205 205  192 191 189  29 29 31  27 27 27  9 9 12  2 2 6
+3 3 6  65 65 65  15 15 15  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  6 6 6  59 59 59  19 19 21
+24 24 24  86 86 86  249 249 249  253 253 253  253 253 253  253 253 253
+253 253 253  228 210 210  241 230 230  253 253 253  253 253 253  253 253 253
+251 251 251  228 210 210  152 149 142  5 5 8  27 27 27  4 4 7
+2 2 6  46 46 47  34 34 34  2 2 2  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  16 16 16  67 67 67  19 19 21
+12 12 14  223 207 207  254 20 20  254 20 20  253 127 127  242 223 223
+254 20 20  253 127 127  254 48 48  242 223 223  254 86 86  254 20 20
+254 20 20  253 137 137  233 233 233  32 32 32  35 35 35  23 23 24
+2 2 6  15 15 15  60 60 60  6 6 6  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  4 4 4  38 38 38  48 48 49  22 22 22
+86 86 86  253 253 253  254 20 20  241 230 230  227 216 186  253 137 137
+253 137 137  253 253 253  253 137 137  253 137 137  254 48 48  253 253 253
+253 253 253  253 253 253  253 253 253  62 62 62  2 2 6  23 23 24
+2 2 6  2 2 6  62 62 62  17 17 17  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  14 14 14  70 70 70  14 14 14  16 16 18
+179 179 179  253 253 253  227 216 186  254 48 48  240 219 160  253 127 127
+254 20 20  253 137 137  254 86 86  231 203 141  254 20 20  254 20 20
+253 137 137  253 253 253  253 253 253  104 96 81  2 2 6  23 23 24
+2 2 6  2 2 6  46 46 47  27 27 27  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  4 4 4  39 39 39  42 42 43  19 19 21  13 13 13
+228 210 210  242 223 223  253 253 253  242 223 223  253 127 127  253 127 127
+253 127 127  253 127 127  253 137 137  253 253 253  254 48 48  253 253 253
+228 210 210  253 253 253  253 253 253  122 122 122  2 2 6  19 19 19
+2 2 6  2 2 6  39 39 39  38 38 38  3 3 3  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  8 8 8  60 60 60  3 3 6  33 33 33  38 38 38
+253 137 137  254 86 86  253 137 137  254 86 86  253 137 137  209 197 168
+253 127 127  253 253 253  253 253 253  253 253 253  253 127 127  254 86 86
+254 86 86  253 137 137  253 253 253  122 122 122  2 2 6  17 17 17
+2 2 6  2 2 6  34 34 36  42 42 43  3 3 3  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  13 13 13  59 59 59  2 2 6  9 9 12  56 56 56
+252 252 252  240 219 160  253 137 137  240 219 160  253 253 253  237 228 228
+254 86 86  253 253 253  253 253 253  253 253 253  253 253 253  242 223 223
+227 216 186  249 249 249  253 253 253  122 122 122  16 16 17  17 17 17
+12 12 14  3 3 6  39 39 39  38 38 38  3 3 3  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  2 2 2
+5 5 5  22 22 22  104 96 81  187 136 12  207 152 19  51 48 39
+221 205 205  253 253 253  253 253 253  253 253 253  253 253 253  240 240 240
+250 247 243  253 253 253  253 253 253  253 253 253  253 253 253  253 253 253
+253 253 253  250 247 243  240 219 160  99 84 50  5 5 8  2 2 6
+7 7 9  46 46 47  58 58 58  35 35 35  3 3 3  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  8 8 8  33 33 33
+58 58 58  86 86 86  170 136 53  239 182 13  246 190 14  220 170 13
+44 38 29  179 179 179  253 253 253  253 253 253  253 253 253  240 240 240
+253 253 253  253 253 253  253 253 253  253 253 253  253 253 253  253 253 253
+253 253 253  240 219 160  240 192 13  112 86 32  2 2 6  2 2 6
+3 3 6  41 33 20  220 170 13  53 53 53  4 4 4  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  2 2 2  32 32 32  150 116 44
+215 161 11  215 161 11  228 170 11  245 188 14  246 190 14  246 190 14
+187 136 12  9 9 11  122 122 122  251 251 251  253 253 253  253 253 253
+253 253 253  253 253 253  253 253 253  253 253 253  253 253 253  253 253 253
+248 248 248  211 196 135  239 182 13  175 122 13  6 5 6  2 2 6
+16 14 12  187 136 12  238 184 12  84 78 65  10 10 10  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  4 4 4  53 53 53  207 152 19
+242 185 13  245 188 14  246 190 14  246 190 14  246 190 14  246 190 14
+240 192 13  81 64 9  2 2 6  86 86 86  244 244 244  253 253 253
+253 253 253  253 253 253  253 253 253  253 253 253  253 253 253  253 253 253
+233 233 233  199 182 125  231 174 11  207 152 19  175 122 13  175 122 13
+201 147 20  239 182 13  244 187 14  150 116 44  35 35 35  6 6 6
+0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  5 5 5  53 53 53  201 147 20
+242 185 13  246 190 14  246 190 14  246 190 14  246 190 14  246 190 14
+246 190 14  220 170 13  13 11 10  2 2 6  152 149 142  253 253 253
+253 253 253  253 253 253  253 253 253  253 253 253  253 253 253  253 253 253
+235 235 235  199 182 125  228 170 11  234 177 12  226 168 11  226 168 11
+234 177 12  246 190 14  246 190 14  234 179 16  126 107 64  36 36 36
+6 6 6  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  3 3 3  48 48 49  189 142 35
+242 185 13  246 190 14  246 190 14  246 190 14  246 190 14  246 190 14
+246 190 14  246 190 14  140 112 39  36 36 36  192 191 189  253 253 253
+253 253 253  253 253 253  253 253 253  253 253 253  253 253 253  253 253 253
+192 191 189  112 86 32  226 168 11  244 187 14  244 187 14  244 187 14
+245 188 14  246 190 14  246 190 14  246 190 14  242 185 13  150 116 44
+27 27 27  2 2 2  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  6 6 6  58 58 58  189 142 35
+239 182 13  246 190 14  246 190 14  246 190 14  246 190 14  246 190 14
+246 190 14  246 190 14  239 188 14  209 197 168  253 253 253  253 253 253
+253 253 253  253 253 253  253 253 253  253 253 253  252 252 252  168 168 168
+16 16 18  97 67 8  228 170 11  245 188 14  246 190 14  246 190 14
+246 190 14  246 190 14  246 190 14  246 190 14  244 187 14  198 154 46
+35 35 35  3 3 3  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  13 13 13  84 78 65  215 161 11
+244 187 14  246 190 14  246 190 14  246 190 14  246 190 14  246 190 14
+246 190 14  246 190 14  238 184 12  187 136 12  168 168 168  244 244 244
+253 253 253  252 252 252  240 240 240  179 179 179  67 67 67  2 2 6
+2 2 6  97 67 8  228 170 11  246 190 14  246 190 14  246 190 14
+246 190 14  246 190 14  245 188 14  234 177 12  189 142 35  86 77 61
+16 16 16  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  13 13 13  103 92 56  207 152 19
+228 170 11  234 177 12  239 182 13  242 186 14  245 188 14  246 190 14
+246 190 14  246 190 14  239 182 13  189 138 9  41 33 20  10 10 12
+30 30 31  23 23 24  5 5 8  2 2 6  2 2 6  2 2 6
+4 4 6  112 86 32  215 161 11  245 188 14  246 190 14  245 188 14
+239 182 13  228 170 11  189 142 35  104 96 81  48 48 49  17 17 17
+2 2 2  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  5 5 5  39 39 39  103 92 56
+141 109 44  175 122 13  187 136 12  189 138 9  207 152 19  228 170 11
+239 182 13  239 182 13  215 161 11  175 122 13  41 33 20  2 2 6
+15 15 17  20 20 22  20 20 22  20 20 22  20 20 22  8 8 10
+4 4 6  97 67 8  189 138 9  231 174 11  239 182 13  226 168 11
+189 138 9  126 107 64  59 59 59  21 21 21  5 5 5  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  5 5 5  17 17 17
+34 34 34  57 57 57  84 78 65  103 92 56  125 101 41  140 112 39
+175 122 13  175 122 13  175 122 13  97 67 8  72 67 58  84 78 65
+60 60 60  56 56 56  56 56 56  56 56 56  57 57 57  65 65 65
+86 86 86  95 73 34  175 122 13  187 136 12  187 136 12  175 122 13
+103 92 56  41 41 41  10 10 10  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+2 2 2  4 4 4  12 12 12  24 24 24  40 40 40  70 70 70
+86 77 61  95 73 34  88 72 41  72 67 58  36 36 36  10 10 10
+5 5 5  5 5 5  5 5 5  4 4 4  5 5 5  6 6 6
+22 22 22  61 61 59  88 72 41  112 86 32  112 86 32  84 78 65
+32 32 32  6 6 6  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  3 3 3  10 10 10
+21 21 21  33 33 33  31 31 31  16 16 16  2 2 2  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+2 2 2  12 12 12  30 30 31  40 40 40  32 32 32  16 16 16
+2 2 2  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0
diff --git a/drivers/video/macfb.c b/drivers/video/macfb.c
index f7d647d..aa8c714 100644
--- a/drivers/video/macfb.c
+++ b/drivers/video/macfb.c
@@ -170,7 +170,7 @@
 };
 
 static struct fb_info fb_info;
-static u32 pseudo_palette[17];
+static u32 pseudo_palette[16];
 static int inverse   = 0;
 static int vidtest   = 0;
 
@@ -529,56 +529,63 @@
 	if (regno >= fb_info->cmap.len)
 		return 1;
 
-	switch (fb_info->var.bits_per_pixel) {
-	case 1:
-		/* We shouldn't get here */
-		break;
-	case 2:
-	case 4:
-	case 8:
-		if (macfb_setpalette)
-			macfb_setpalette(regno, red, green, blue, fb_info);
-		else
-			return 1;
-		break;
-	case 16:
-		if (fb_info->var.red.offset == 10) {
-			/* 1:5:5:5 */
-			((u32*) (fb_info->pseudo_palette))[regno] =
+	if (fb_info->var.bits_per_pixel <= 8) {
+		switch (fb_info->var.bits_per_pixel) {
+		case 1:
+			/* We shouldn't get here */
+			break;
+		case 2:
+		case 4:
+		case 8:
+			if (macfb_setpalette)
+				macfb_setpalette(regno, red, green, blue,
+						 fb_info);
+			else
+				return 1;
+			break;
+		}
+	} else if (regno < 16) {
+		switch (fb_info->var.bits_per_pixel) {
+		case 16:
+			if (fb_info->var.red.offset == 10) {
+				/* 1:5:5:5 */
+				((u32*) (fb_info->pseudo_palette))[regno] =
 					((red   & 0xf800) >>  1) |
 					((green & 0xf800) >>  6) |
 					((blue  & 0xf800) >> 11) |
 					((transp != 0) << 15);
-		} else {
-			/* 0:5:6:5 */
-			((u32*) (fb_info->pseudo_palette))[regno] =
+			} else {
+				/* 0:5:6:5 */
+				((u32*) (fb_info->pseudo_palette))[regno] =
 					((red   & 0xf800)      ) |
 					((green & 0xfc00) >>  5) |
 					((blue  & 0xf800) >> 11);
+			}
+			break;
+			/* I'm pretty sure that one or the other of these
+			   doesn't exist on 68k Macs */
+		case 24:
+			red   >>= 8;
+			green >>= 8;
+			blue  >>= 8;
+			((u32 *)(fb_info->pseudo_palette))[regno] =
+				(red   << fb_info->var.red.offset)   |
+				(green << fb_info->var.green.offset) |
+				(blue  << fb_info->var.blue.offset);
+			break;
+		case 32:
+			red   >>= 8;
+			green >>= 8;
+			blue  >>= 8;
+			((u32 *)(fb_info->pseudo_palette))[regno] =
+				(red   << fb_info->var.red.offset)   |
+				(green << fb_info->var.green.offset) |
+				(blue  << fb_info->var.blue.offset);
+			break;
 		}
-		break;	
-		/* I'm pretty sure that one or the other of these
-		   doesn't exist on 68k Macs */
-	case 24:
-		red   >>= 8;
-		green >>= 8;
-		blue  >>= 8;
-		((u32 *)(fb_info->pseudo_palette))[regno] =
-			(red   << fb_info->var.red.offset)   |
-			(green << fb_info->var.green.offset) |
-			(blue  << fb_info->var.blue.offset);
-		break;
-	case 32:
-		red   >>= 8;
-		green >>= 8;
-		blue  >>= 8;
-		((u32 *)(fb_info->pseudo_palette))[regno] =
-			(red   << fb_info->var.red.offset)   |
-			(green << fb_info->var.green.offset) |
-			(blue  << fb_info->var.blue.offset);
-		break;
-    }
-    return 0;
+	}
+
+	return 0;
 }
 
 static struct fb_ops macfb_ops = {
diff --git a/drivers/video/macmodes.c b/drivers/video/macmodes.c
index ab21495..083f603 100644
--- a/drivers/video/macmodes.c
+++ b/drivers/video/macmodes.c
@@ -369,9 +369,8 @@
  *
  */
 
-int __devinit mac_find_mode(struct fb_var_screeninfo *var,
-			    struct fb_info *info, const char *mode_option,
-			    unsigned int default_bpp)
+int mac_find_mode(struct fb_var_screeninfo *var, struct fb_info *info,
+		  const char *mode_option, unsigned int default_bpp)
 {
     const struct fb_videomode *db = NULL;
     unsigned int dbsize = 0;
diff --git a/drivers/video/macmodes.h b/drivers/video/macmodes.h
index babeb81..b86ba08 100644
--- a/drivers/video/macmodes.h
+++ b/drivers/video/macmodes.h
@@ -55,10 +55,10 @@
 extern int mac_var_to_vmode(const struct fb_var_screeninfo *var, int *vmode,
 			    int *cmode);
 extern int mac_map_monitor_sense(int sense);
-extern int __devinit mac_find_mode(struct fb_var_screeninfo *var,
-				   struct fb_info *info,
-				   const char *mode_option,
-				   unsigned int default_bpp);
+extern int mac_find_mode(struct fb_var_screeninfo *var,
+			 struct fb_info *info,
+			 const char *mode_option,
+			 unsigned int default_bpp);
 
 
     /*
diff --git a/drivers/video/matrox/matroxfb_accel.c b/drivers/video/matrox/matroxfb_accel.c
index c57aaad..3660d26 100644
--- a/drivers/video/matrox/matroxfb_accel.c
+++ b/drivers/video/matrox/matroxfb_accel.c
@@ -91,7 +91,6 @@
 	for (i = 0; i < 16; i++) {
 		pal[i] = i * 0x11111111U;
 	}
-	pal[i] = 0xFFFFFFFF;
 }
 
 static inline void matrox_cfb8_pal(u_int32_t* pal) {
@@ -100,7 +99,6 @@
 	for (i = 0; i < 16; i++) {
 		pal[i] = i * 0x01010101U;
 	}
-	pal[i] = 0x0F0F0F0F;
 }
 
 static void matroxfb_copyarea(struct fb_info* info, const struct fb_copyarea* area);
@@ -145,13 +143,10 @@
 					ACCESS_FBINFO(fbops).fb_imageblit = matroxfb_imageblit;
 				}
 				break;
-		case 16:	if (ACCESS_FBINFO(fbcon).var.green.length == 5) {
+		case 16:	if (ACCESS_FBINFO(fbcon).var.green.length == 5)
 					maccess = 0xC0000001;
-					ACCESS_FBINFO(cmap[16]) = 0x7FFF7FFF;
-				} else {
+				else
 					maccess = 0x40000001;
-					ACCESS_FBINFO(cmap[16]) = 0xFFFFFFFF;
-				}
 				mopmode = M_OPMODE_16BPP;
 				if (accel) {
 					ACCESS_FBINFO(fbops).fb_copyarea = matroxfb_copyarea;
@@ -161,7 +156,6 @@
 				break;
 		case 24:	maccess = 0x00000003;
 				mopmode = M_OPMODE_24BPP;
-				ACCESS_FBINFO(cmap[16]) = 0xFFFFFFFF;
 				if (accel) {
 					ACCESS_FBINFO(fbops).fb_copyarea = matroxfb_copyarea;
 					ACCESS_FBINFO(fbops).fb_fillrect = matroxfb_fillrect;
@@ -170,7 +164,6 @@
 				break;
 		case 32:	maccess = 0x00000002;
 				mopmode = M_OPMODE_32BPP;
-				ACCESS_FBINFO(cmap[16]) = 0xFFFFFFFF;
 				if (accel) {
 					ACCESS_FBINFO(fbops).fb_copyarea = matroxfb_copyarea;
 					ACCESS_FBINFO(fbops).fb_fillrect = matroxfb_fillrect;
diff --git a/drivers/video/matrox/matroxfb_base.c b/drivers/video/matrox/matroxfb_base.c
index 886e475..86ca7b1 100644
--- a/drivers/video/matrox/matroxfb_base.c
+++ b/drivers/video/matrox/matroxfb_base.c
@@ -679,6 +679,8 @@
 		mga_outb(M_DAC_VAL, blue);
 		break;
 	case 16:
+		if (regno >= 16)
+			break;
 		{
 			u_int16_t col =
 				(red << ACCESS_FBINFO(fbcon).var.red.offset)     |
@@ -690,6 +692,8 @@
 		break;
 	case 24:
 	case 32:
+		if (regno >= 16)
+			break;
 		ACCESS_FBINFO(cmap[regno]) =
 			(red   << ACCESS_FBINFO(fbcon).var.red.offset)   |
 			(green << ACCESS_FBINFO(fbcon).var.green.offset) |
diff --git a/drivers/video/matrox/matroxfb_base.h b/drivers/video/matrox/matroxfb_base.h
index 9c25c2f..d59577c 100644
--- a/drivers/video/matrox/matroxfb_base.h
+++ b/drivers/video/matrox/matroxfb_base.h
@@ -518,7 +518,7 @@
 					dll:1;
 				      } memory;
 			      } values;
-	u_int32_t cmap[17];
+	u_int32_t cmap[16];
 };
 
 #define info2minfo(info) container_of(info, struct matrox_fb_info, fbcon)
diff --git a/drivers/video/matrox/matroxfb_crtc2.c b/drivers/video/matrox/matroxfb_crtc2.c
index 03ae55b..4b3344e 100644
--- a/drivers/video/matrox/matroxfb_crtc2.c
+++ b/drivers/video/matrox/matroxfb_crtc2.c
@@ -163,11 +163,6 @@
 	ACCESS_FBINFO(hw).crtc2.ctl = 0x00000004;
 }
 
-static void matroxfb_dh_cfbX_init(struct matroxfb_dh_fb_info* m2info) {
-	/* no acceleration for secondary head... */
-	m2info->cmap[16] = 0xFFFFFFFF;
-}
-
 static void matroxfb_dh_pan_var(struct matroxfb_dh_fb_info* m2info,
 		struct fb_var_screeninfo* var) {
 	unsigned int pos;
@@ -385,7 +380,6 @@
 			}
 		}
 		up_read(&ACCESS_FBINFO(altout).lock);
-		matroxfb_dh_cfbX_init(m2info);
 	}
 	m2info->initialized = 1;
 	return 0;
diff --git a/drivers/video/matrox/matroxfb_crtc2.h b/drivers/video/matrox/matroxfb_crtc2.h
index 1771776..1005582 100644
--- a/drivers/video/matrox/matroxfb_crtc2.h
+++ b/drivers/video/matrox/matroxfb_crtc2.h
@@ -28,7 +28,7 @@
 
 	unsigned int		interlaced:1;
 
-	u_int32_t cmap[17];
+	u_int32_t cmap[16];
 };
 
 #endif /* __MATROXFB_CRTC2_H__ */
diff --git a/drivers/video/matrox/matroxfb_maven.c b/drivers/video/matrox/matroxfb_maven.c
index 5d29a26..de0d755 100644
--- a/drivers/video/matrox/matroxfb_maven.c
+++ b/drivers/video/matrox/matroxfb_maven.c
@@ -273,8 +273,11 @@
 			}
 		}
 	}
+
+	/* if h2/post/in/feed have not been assigned, return zero (error) */
 	if (besth2 < 2)
 		return 0;
+
 	dprintk(KERN_ERR "clk: %02X %02X %02X %d %d\n", *in, *feed, *post, fxtal, fwant);
 	return fxtal * (*feed) / (*in) * ctl->den;
 }
@@ -284,7 +287,7 @@
 		unsigned int* in, unsigned int* feed, unsigned int* post,
 		unsigned int* htotal2) {
 	unsigned int fvco;
-	unsigned int p;
+	unsigned int uninitialized_var(p);
 
 	fvco = matroxfb_PLL_mavenclock(&maven1000_pll, ctl, htotal, vtotal, in, feed, &p, htotal2);
 	if (!fvco)
@@ -715,7 +718,9 @@
 	m->regs[0x82] = 0x81;
 
 	for (x = 0; x < 8; x++) {
-		unsigned int a, b, c, h2;
+		unsigned int c;
+		unsigned int uninitialized_var(a), uninitialized_var(b),
+			     uninitialized_var(h2);
 		unsigned int h = ht + 2 + x;
 
 		if (!matroxfb_mavenclock((m->mode == MATROXFB_OUTPUT_MODE_PAL) ? &maven_PAL : &maven_NTSC, h, vt, &a, &b, &c, &h2)) {
diff --git a/drivers/video/nvidia/nv_hw.c b/drivers/video/nvidia/nv_hw.c
index aff11bb..d1a1054 100644
--- a/drivers/video/nvidia/nv_hw.c
+++ b/drivers/video/nvidia/nv_hw.c
@@ -150,8 +150,7 @@
 		M = pll & 0xFF;
 		N = (pll >> 8) & 0xFF;
 		if (((par->Chipset & 0xfff0) == 0x0290) ||
-			((par->Chipset & 0xfff0) == 0x0390) ||
-			((par->Chipset & 0xfff0) == 0x02E0)) {
+		    ((par->Chipset & 0xfff0) == 0x0390)) {
 			MB = 1;
 			NB = 1;
 		} else {
@@ -161,7 +160,7 @@
 		*MClk = ((N * NB * par->CrystalFreqKHz) / (M * MB)) >> P;
 
 		pll = NV_RD32(par->PMC, 0x4000);
-		P = (pll >> 16) & 0x03;
+		P = (pll >> 16) & 0x07;
 		pll = NV_RD32(par->PMC, 0x4004);
 		M = pll & 0xFF;
 		N = (pll >> 8) & 0xFF;
@@ -892,11 +891,17 @@
 		state->general = bpp == 16 ? 0x00101100 : 0x00100100;
 		state->repaint1 = hDisplaySize < 1280 ? 0x04 : 0x00;
 		break;
+	case NV_ARCH_40:
+		if (!par->FlatPanel)
+			state->control = NV_RD32(par->PRAMDAC0, 0x0580) &
+				0xeffffeff;
+		/* fallthrough */
 	case NV_ARCH_10:
 	case NV_ARCH_20:
 	case NV_ARCH_30:
 	default:
-		if ((par->Chipset & 0xfff0) == 0x0240) {
+		if ((par->Chipset & 0xfff0) == 0x0240 ||
+		    (par->Chipset & 0xfff0) == 0x03d0) {
 			state->arbitration0 = 256;
 			state->arbitration1 = 0x0480;
 		} else if (((par->Chipset & 0xffff) == 0x01A0) ||
@@ -939,7 +944,7 @@
 
 void NVLoadStateExt(struct nvidia_par *par, RIVA_HW_STATE * state)
 {
-	int i;
+	int i, j;
 
 	NV_WR32(par->PMC, 0x0140, 0x00000000);
 	NV_WR32(par->PMC, 0x0200, 0xFFFF00FF);
@@ -951,7 +956,8 @@
 	NV_WR32(par->PTIMER, 0x0100 * 4, 0xFFFFFFFF);
 
 	if (par->Architecture == NV_ARCH_04) {
-		NV_WR32(par->PFB, 0x0200, state->config);
+		if (state)
+			NV_WR32(par->PFB, 0x0200, state->config);
 	} else if ((par->Architecture < NV_ARCH_40) ||
 		   (par->Chipset & 0xfff0) == 0x0040) {
 		for (i = 0; i < 8; i++) {
@@ -964,8 +970,9 @@
 
 		if (((par->Chipset & 0xfff0) == 0x0090) ||
 		    ((par->Chipset & 0xfff0) == 0x01D0) ||
-		    ((par->Chipset & 0xfff0) == 0x02E0) ||
-		    ((par->Chipset & 0xfff0) == 0x0290))
+		    ((par->Chipset & 0xfff0) == 0x0290) ||
+		    ((par->Chipset & 0xfff0) == 0x0390) ||
+		    ((par->Chipset & 0xfff0) == 0x03D0))
 			regions = 15;
 		for(i = 0; i < regions; i++) {
 			NV_WR32(par->PFB, 0x0600 + (i * 0x10), 0);
@@ -1206,16 +1213,20 @@
 			NV_WR32(par->PGRAPH, 0x0608, 0xFFFFFFFF);
 		} else {
 			if (par->Architecture >= NV_ARCH_40) {
-				u32 tmp;
-
 				NV_WR32(par->PGRAPH, 0x0084, 0x401287c0);
 				NV_WR32(par->PGRAPH, 0x008C, 0x60de8051);
 				NV_WR32(par->PGRAPH, 0x0090, 0x00008000);
 				NV_WR32(par->PGRAPH, 0x0610, 0x00be3c5f);
+				NV_WR32(par->PGRAPH, 0x0bc4,
+					NV_RD32(par->PGRAPH, 0x0bc4) |
+					0x00008000);
 
-				tmp = NV_RD32(par->REGS, 0x1540) & 0xff;
-				for(i = 0; tmp && !(tmp & 1); tmp >>= 1, i++);
-				NV_WR32(par->PGRAPH, 0x5000, i);
+				j = NV_RD32(par->REGS, 0x1540) & 0xff;
+
+				if (j) {
+					for (i = 0; !(j & 1); j >>= 1, i++);
+					NV_WR32(par->PGRAPH, 0x5000, i);
+				}
 
 				if ((par->Chipset & 0xfff0) == 0x0040) {
 					NV_WR32(par->PGRAPH, 0x09b0,
@@ -1250,6 +1261,7 @@
 				case 0x0160:
 				case 0x01D0:
 				case 0x0240:
+				case 0x03D0:
 					NV_WR32(par->PMC, 0x1700,
 						NV_RD32(par->PFB, 0x020C));
 					NV_WR32(par->PMC, 0x1704, 0);
@@ -1269,7 +1281,6 @@
 						0x00000108);
 					break;
 				case 0x0220:
-				case 0x0230:
 					NV_WR32(par->PGRAPH, 0x0860, 0);
 					NV_WR32(par->PGRAPH, 0x0864, 0);
 					NV_WR32(par->PRAMDAC, 0x0608,
@@ -1277,8 +1288,8 @@
 						0x00100000);
 					break;
 				case 0x0090:
-				case 0x02E0:
 				case 0x0290:
+				case 0x0390:
 					NV_WR32(par->PRAMDAC, 0x0608,
 						NV_RD32(par->PRAMDAC, 0x0608) |
 						0x00100000);
@@ -1355,8 +1366,9 @@
 			} else {
 				if (((par->Chipset & 0xfff0) == 0x0090) ||
 				    ((par->Chipset & 0xfff0) == 0x01D0) ||
-				    ((par->Chipset & 0xfff0) == 0x02E0) ||
-				    ((par->Chipset & 0xfff0) == 0x0290)) {
+				    ((par->Chipset & 0xfff0) == 0x0290) ||
+				    ((par->Chipset & 0xfff0) == 0x0390) ||
+				    ((par->Chipset & 0xfff0) == 0x03D0)) {
 					for (i = 0; i < 60; i++) {
 						NV_WR32(par->PGRAPH,
 							0x0D00 + i*4,
@@ -1407,8 +1419,8 @@
 				} else {
 					if ((par->Chipset & 0xfff0) == 0x0090 ||
 					    (par->Chipset & 0xfff0) == 0x01D0 ||
-					    (par->Chipset & 0xfff0) == 0x02E0 ||
-					    (par->Chipset & 0xfff0) == 0x0290) {
+					    (par->Chipset & 0xfff0) == 0x0290 ||
+					    (par->Chipset & 0xfff0) == 0x0390) {
 						NV_WR32(par->PGRAPH, 0x0DF0,
 							NV_RD32(par->PFB, 0x0200));
 						NV_WR32(par->PGRAPH, 0x0DF4,
@@ -1495,6 +1507,12 @@
 	NV_WR32(par->PFIFO, 0x0494 * 4, 0x00000001);
 	NV_WR32(par->PFIFO, 0x0495 * 4, 0x00000001);
 	NV_WR32(par->PFIFO, 0x0140 * 4, 0x00000001);
+
+    if (!state) {
+	    par->CurrentState = NULL;
+	    return;
+    }
+
 	if (par->Architecture >= NV_ARCH_10) {
 		if (par->twoHeads) {
 			NV_WR32(par->PCRTC0, 0x0860, state->head);
@@ -1566,6 +1584,9 @@
 	VGA_WR08(par->PCIO, 0x03D5, state->interlace);
 
 	if (!par->FlatPanel) {
+		if (par->Architecture >= NV_ARCH_40)
+			NV_WR32(par->PRAMDAC0, 0x0580, state->control);
+
 		NV_WR32(par->PRAMDAC0, 0x050C, state->pllsel);
 		NV_WR32(par->PRAMDAC0, 0x0508, state->vpll);
 		if (par->twoHeads)
@@ -1631,6 +1652,9 @@
 	state->scale = NV_RD32(par->PRAMDAC, 0x0848);
 	state->config = NV_RD32(par->PFB, 0x0200);
 
+	if (par->Architecture >= NV_ARCH_40 && !par->FlatPanel)
+		state->control  = NV_RD32(par->PRAMDAC0, 0x0580);
+
 	if (par->Architecture >= NV_ARCH_10) {
 		if (par->twoHeads) {
 			state->head = NV_RD32(par->PCRTC0, 0x0860);
diff --git a/drivers/video/nvidia/nv_setup.c b/drivers/video/nvidia/nv_setup.c
index 707e2c8..82579d3 100644
--- a/drivers/video/nvidia/nv_setup.c
+++ b/drivers/video/nvidia/nv_setup.c
@@ -166,11 +166,13 @@
 static int NVIsConnected(struct nvidia_par *par, int output)
 {
 	volatile u32 __iomem *PRAMDAC = par->PRAMDAC0;
-	u32 reg52C, reg608;
+	u32 reg52C, reg608, dac0_reg608 = 0;
 	int present;
 
-	if (output)
-		PRAMDAC += 0x800;
+	if (output) {
+	    dac0_reg608 = NV_RD32(PRAMDAC, 0x0608);
+	    PRAMDAC += 0x800;
+	}
 
 	reg52C = NV_RD32(PRAMDAC, 0x052C);
 	reg608 = NV_RD32(PRAMDAC, 0x0608);
@@ -194,8 +196,8 @@
 	else
 		printk("nvidiafb: CRTC%i analog not found\n", output);
 
-	NV_WR32(par->PRAMDAC0, 0x0608, NV_RD32(par->PRAMDAC0, 0x0608) &
-		0x0000EFFF);
+	if (output)
+	    NV_WR32(par->PRAMDAC0, 0x0608, dac0_reg608);
 
 	NV_WR32(PRAMDAC, 0x052C, reg52C);
 	NV_WR32(PRAMDAC, 0x0608, reg608);
diff --git a/drivers/video/nvidia/nv_type.h b/drivers/video/nvidia/nv_type.h
index 38f7cc0..2fdf77ec3 100644
--- a/drivers/video/nvidia/nv_type.h
+++ b/drivers/video/nvidia/nv_type.h
@@ -86,6 +86,7 @@
 	u32 timingV;
 	u32 displayV;
 	u32 crtcSync;
+	u32 control;
 } RIVA_HW_STATE;
 
 struct riva_regs {
diff --git a/drivers/video/nvidia/nvidia.c b/drivers/video/nvidia/nvidia.c
index 41f63658..a7fe214 100644
--- a/drivers/video/nvidia/nvidia.c
+++ b/drivers/video/nvidia/nvidia.c
@@ -674,6 +674,7 @@
 		info->fbops->fb_sync = nvidiafb_sync;
 		info->pixmap.scan_align = 4;
 		info->flags &= ~FBINFO_HWACCEL_DISABLED;
+		info->flags |= FBINFO_READS_FAST;
 		NVResetGraphics(info);
 	} else {
 		info->fbops->fb_imageblit = cfb_imageblit;
@@ -682,6 +683,7 @@
 		info->fbops->fb_sync = NULL;
 		info->pixmap.scan_align = 1;
 		info->flags |= FBINFO_HWACCEL_DISABLED;
+		info->flags &= ~FBINFO_READS_FAST;
 	}
 
 	par->cursor_reset = 1;
@@ -1193,7 +1195,8 @@
 
 	printk(KERN_INFO PFX "Device ID: %x \n", id);
 
-	if ((id & 0xfff0) == 0x00f0) {
+	if ((id & 0xfff0) == 0x00f0 ||
+	    (id & 0xfff0) == 0x02e0) {
 		/* pci-e */
 		id = NV_RD32(par->REGS, 0x1800);
 
@@ -1238,18 +1241,16 @@
 	case 0x0040:		/* GeForce 6800 */
 	case 0x00C0:		/* GeForce 6800 */
 	case 0x0120:		/* GeForce 6800 */
-	case 0x0130:
 	case 0x0140:		/* GeForce 6600 */
 	case 0x0160:		/* GeForce 6200 */
 	case 0x01D0:		/* GeForce 7200, 7300, 7400 */
-	case 0x02E0:		/* GeForce 7300 GT */
 	case 0x0090:		/* GeForce 7800 */
 	case 0x0210:		/* GeForce 6800 */
 	case 0x0220:		/* GeForce 6200 */
-	case 0x0230:
 	case 0x0240:		/* GeForce 6100 */
 	case 0x0290:		/* GeForce 7900 */
 	case 0x0390:		/* GeForce 7600 */
+	case 0x03D0:
 		arch = NV_ARCH_40;
 		break;
 	case 0x0020:		/* TNT, TNT2 */
diff --git a/drivers/video/offb.c b/drivers/video/offb.c
index 885b428..452433d 100644
--- a/drivers/video/offb.c
+++ b/drivers/video/offb.c
@@ -271,7 +271,7 @@
 		return;
 	}
 
-	size = sizeof(struct fb_info) + sizeof(u32) * 17;
+	size = sizeof(struct fb_info) + sizeof(u32) * 16;
 
 	info = kmalloc(size, GFP_ATOMIC);
 	
diff --git a/drivers/video/omap/Kconfig b/drivers/video/omap/Kconfig
new file mode 100644
index 0000000..7f4d25b
--- /dev/null
+++ b/drivers/video/omap/Kconfig
@@ -0,0 +1,58 @@
+config FB_OMAP
+	tristate "OMAP frame buffer support (EXPERIMENTAL)"
+	depends on FB
+	select FB_CFB_FILLRECT
+	select FB_CFB_COPYAREA
+	select FB_CFB_IMAGEBLIT
+	help
+          Frame buffer driver for OMAP based boards.
+
+config FB_OMAP_BOOTLOADER_INIT
+	bool "Check bootloader initializaion"
+	depends on FB_OMAP
+	help
+	  Say Y here if you want to enable checking if the bootloader has
+	  already initialized the display controller. In this case the
+	  driver will skip the initialization.
+
+config FB_OMAP_CONSISTENT_DMA_SIZE
+	int "Consistent DMA memory size (MB)"
+	depends on FB_OMAP
+	range 1 14
+	default 2
+	help
+	  Increase the DMA consistent memory size according to your video
+	  memory needs, for example if you want to use multiple planes.
+	  The size must be 2MB aligned.
+	  If unsure say 1.
+
+config FB_OMAP_DMA_TUNE
+        bool "Set DMA SDRAM access priority high"
+        depends on FB_OMAP && ARCH_OMAP1
+        help
+          On systems in which video memory is in system memory
+          (SDRAM) this will speed up graphics DMA operations.
+          If you have such a system and want to use rotation
+          answer yes. Answer no if you have a dedicated video
+          memory, or don't use any of the accelerated features.
+
+config FB_OMAP_LCDC_EXTERNAL
+	bool "External LCD controller support"
+	depends on FB_OMAP
+	help
+	  Say Y here, if you want to have support for boards with an
+	  external LCD controller connected to the SoSSI/RFBI interface.
+
+config FB_OMAP_LCDC_HWA742
+	bool "Epson HWA742 LCD controller support"
+	depends on FB_OMAP && FB_OMAP_LCDC_EXTERNAL
+	help
+	  Say Y here if you want to have support for the external
+	  Epson HWA742 LCD controller.
+
+config FB_OMAP_LCDC_BLIZZARD
+	bool "Epson Blizzard LCD controller support"
+	depends on FB_OMAP && FB_OMAP_LCDC_EXTERNAL
+	help
+	  Say Y here if you want to have support for the external
+	  Epson Blizzard LCD controller.
diff --git a/drivers/video/omap/Makefile b/drivers/video/omap/Makefile
new file mode 100644
index 0000000..99da8b6
--- /dev/null
+++ b/drivers/video/omap/Makefile
@@ -0,0 +1,29 @@
+#
+# Makefile for the new OMAP framebuffer device driver
+#
+
+obj-$(CONFIG_FB_OMAP) += omapfb.o
+
+objs-yy := omapfb_main.o
+
+objs-y$(CONFIG_ARCH_OMAP1) += lcdc.o
+objs-y$(CONFIG_ARCH_OMAP2) += dispc.o
+
+objs-$(CONFIG_ARCH_OMAP1)$(CONFIG_FB_OMAP_LCDC_EXTERNAL) += sossi.o
+objs-$(CONFIG_ARCH_OMAP2)$(CONFIG_FB_OMAP_LCDC_EXTERNAL) += rfbi.o
+
+objs-y$(CONFIG_FB_OMAP_LCDC_HWA742) += hwa742.o
+objs-y$(CONFIG_FB_OMAP_LCDC_BLIZZARD) += blizzard.o
+
+objs-y$(CONFIG_MACH_OMAP_H4) += lcd_h4.o
+objs-y$(CONFIG_MACH_OMAP_H3) += lcd_h3.o
+objs-y$(CONFIG_MACH_OMAP_PALMTE) += lcd_palmte.o
+objs-y$(CONFIG_MACH_OMAP_PALMTT) += lcd_palmtt.o
+objs-y$(CONFIG_MACH_OMAP_PALMZ71) += lcd_palmz71.o
+objs-$(CONFIG_ARCH_OMAP16XX)$(CONFIG_MACH_OMAP_INNOVATOR) += lcd_inn1610.o
+objs-$(CONFIG_ARCH_OMAP15XX)$(CONFIG_MACH_OMAP_INNOVATOR) += lcd_inn1510.o
+objs-y$(CONFIG_MACH_OMAP_OSK) += lcd_osk.o
+objs-y$(CONFIG_MACH_SX1) += lcd_sx1.o
+
+omapfb-objs := $(objs-yy)
+
diff --git a/drivers/video/omap/blizzard.c b/drivers/video/omap/blizzard.c
new file mode 100644
index 0000000..e682940
--- /dev/null
+++ b/drivers/video/omap/blizzard.c
@@ -0,0 +1,1568 @@
+/*
+ * Epson Blizzard LCD controller driver
+ *
+ * Copyright (C) 2004-2005 Nokia Corporation
+ * Authors:     Juha Yrjola   <juha.yrjola@nokia.com>
+ *	        Imre Deak     <imre.deak@nokia.com>
+ * YUV support: Jussi Laako   <jussi.laako@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.
+ */
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <linux/fb.h>
+#include <linux/delay.h>
+#include <linux/clk.h>
+
+#include <asm/arch/dma.h>
+#include <asm/arch/omapfb.h>
+#include <asm/arch/blizzard.h>
+
+#include "dispc.h"
+
+#define MODULE_NAME				"blizzard"
+
+#define BLIZZARD_REV_CODE			0x00
+#define BLIZZARD_CONFIG				0x02
+#define BLIZZARD_PLL_DIV			0x04
+#define BLIZZARD_PLL_LOCK_RANGE			0x06
+#define BLIZZARD_PLL_CLOCK_SYNTH_0		0x08
+#define BLIZZARD_PLL_CLOCK_SYNTH_1		0x0a
+#define BLIZZARD_PLL_MODE			0x0c
+#define BLIZZARD_CLK_SRC			0x0e
+#define BLIZZARD_MEM_BANK0_ACTIVATE		0x10
+#define BLIZZARD_MEM_BANK0_STATUS		0x14
+#define BLIZZARD_HDISP				0x2a
+#define BLIZZARD_HNDP				0x2c
+#define BLIZZARD_VDISP0				0x2e
+#define BLIZZARD_VDISP1				0x30
+#define BLIZZARD_VNDP				0x32
+#define BLIZZARD_HSW				0x34
+#define BLIZZARD_VSW				0x38
+#define BLIZZARD_DISPLAY_MODE			0x68
+#define BLIZZARD_INPUT_WIN_X_START_0		0x6c
+#define BLIZZARD_DATA_SOURCE_SELECT		0x8e
+#define BLIZZARD_DISP_MEM_DATA_PORT		0x90
+#define BLIZZARD_DISP_MEM_READ_ADDR0		0x92
+#define BLIZZARD_POWER_SAVE			0xE6
+#define BLIZZARD_NDISP_CTRL_STATUS		0xE8
+
+/* Data source select */
+/* For S1D13745 */
+#define BLIZZARD_SRC_WRITE_LCD_BACKGROUND	0x00
+#define BLIZZARD_SRC_WRITE_LCD_DESTRUCTIVE	0x01
+#define BLIZZARD_SRC_WRITE_OVERLAY_ENABLE	0x04
+#define BLIZZARD_SRC_DISABLE_OVERLAY		0x05
+/* For S1D13744 */
+#define BLIZZARD_SRC_WRITE_LCD			0x00
+#define BLIZZARD_SRC_BLT_LCD			0x06
+
+#define BLIZZARD_COLOR_RGB565			0x01
+#define BLIZZARD_COLOR_YUV420			0x09
+
+#define BLIZZARD_VERSION_S1D13745		0x01	/* Hailstorm */
+#define BLIZZARD_VERSION_S1D13744		0x02	/* Blizzard */
+
+#define BLIZZARD_AUTO_UPDATE_TIME		(HZ / 20)
+
+/* Reserve 4 request slots for requests in irq context */
+#define REQ_POOL_SIZE			24
+#define IRQ_REQ_POOL_SIZE		4
+
+#define REQ_FROM_IRQ_POOL 0x01
+
+#define REQ_COMPLETE	0
+#define REQ_PENDING	1
+
+struct blizzard_reg_list {
+	int	start;
+	int	end;
+};
+
+/* These need to be saved / restored separately from the rest. */
+static struct blizzard_reg_list blizzard_pll_regs[] = {
+	{
+		.start	= 0x04,		/* Don't save PLL ctrl (0x0C) */
+		.end	= 0x0a,
+	},
+	{
+		.start	= 0x0e,		/* Clock configuration */
+		.end	= 0x0e,
+	},
+};
+
+static struct blizzard_reg_list blizzard_gen_regs[] = {
+	{
+		.start	= 0x18,		/* SDRAM control */
+		.end	= 0x20,
+	},
+	{
+		.start	= 0x28,		/* LCD Panel configuration */
+		.end	= 0x5a,		/* HSSI interface, TV configuration */
+	},
+};
+
+static u8 blizzard_reg_cache[0x5a / 2];
+
+struct update_param {
+	int	plane;
+	int	x, y, width, height;
+	int	out_x, out_y;
+	int	out_width, out_height;
+	int	color_mode;
+	int	bpp;
+	int	flags;
+};
+
+struct blizzard_request {
+	struct list_head entry;
+	unsigned int	 flags;
+
+	int		 (*handler)(struct blizzard_request *req);
+	void		 (*complete)(void *data);
+	void		 *complete_data;
+
+	union {
+		struct update_param	update;
+		struct completion	*sync;
+	} par;
+};
+
+struct plane_info {
+	unsigned long offset;
+	int pos_x, pos_y;
+	int width, height;
+	int out_width, out_height;
+	int scr_width;
+	int color_mode;
+	int bpp;
+};
+
+struct blizzard_struct {
+	enum omapfb_update_mode	update_mode;
+	enum omapfb_update_mode	update_mode_before_suspend;
+
+	struct timer_list	auto_update_timer;
+	int			stop_auto_update;
+	struct omapfb_update_window	auto_update_window;
+	int			enabled_planes;
+	int			vid_nonstd_color;
+	int			vid_scaled;
+	int			last_color_mode;
+	int			zoom_on;
+	int			screen_width;
+	int			screen_height;
+	unsigned		te_connected:1;
+	unsigned		vsync_only:1;
+
+	struct plane_info	plane[OMAPFB_PLANE_NUM];
+
+	struct blizzard_request	req_pool[REQ_POOL_SIZE];
+	struct list_head	pending_req_list;
+	struct list_head	free_req_list;
+	struct semaphore	req_sema;
+	spinlock_t		req_lock;
+
+	unsigned long		sys_ck_rate;
+	struct extif_timings	reg_timings, lut_timings;
+
+	u32			max_transmit_size;
+	u32			extif_clk_period;
+	int			extif_clk_div;
+	unsigned long		pix_tx_time;
+	unsigned long		line_upd_time;
+
+	struct omapfb_device	*fbdev;
+	struct lcd_ctrl_extif	*extif;
+	struct lcd_ctrl		*int_ctrl;
+
+	void			(*power_up)(struct device *dev);
+	void			(*power_down)(struct device *dev);
+
+	int			version;
+} blizzard;
+
+struct lcd_ctrl blizzard_ctrl;
+
+static u8 blizzard_read_reg(u8 reg)
+{
+	u8 data;
+
+	blizzard.extif->set_bits_per_cycle(8);
+	blizzard.extif->write_command(&reg, 1);
+	blizzard.extif->read_data(&data, 1);
+
+	return data;
+}
+
+static void blizzard_write_reg(u8 reg, u8 val)
+{
+	blizzard.extif->set_bits_per_cycle(8);
+	blizzard.extif->write_command(&reg, 1);
+	blizzard.extif->write_data(&val, 1);
+}
+
+static void blizzard_restart_sdram(void)
+{
+	unsigned long tmo;
+
+	blizzard_write_reg(BLIZZARD_MEM_BANK0_ACTIVATE, 0);
+	udelay(50);
+	blizzard_write_reg(BLIZZARD_MEM_BANK0_ACTIVATE, 1);
+	tmo = jiffies + msecs_to_jiffies(200);
+	while (!(blizzard_read_reg(BLIZZARD_MEM_BANK0_STATUS) & 0x01)) {
+		if (time_after(jiffies, tmo)) {
+			dev_err(blizzard.fbdev->dev,
+					"s1d1374x: SDRAM not ready");
+			break;
+		}
+		msleep(1);
+	}
+}
+
+static void blizzard_stop_sdram(void)
+{
+	blizzard_write_reg(BLIZZARD_MEM_BANK0_ACTIVATE, 0);
+}
+
+/* Wait until the last window was completely written into the controllers
+ * SDRAM and we can start transferring the next window.
+ */
+static void blizzard_wait_line_buffer(void)
+{
+	unsigned long tmo = jiffies + msecs_to_jiffies(30);
+
+	while (blizzard_read_reg(BLIZZARD_NDISP_CTRL_STATUS) & (1 << 7)) {
+		if (time_after(jiffies, tmo)) {
+			if (printk_ratelimit())
+				dev_err(blizzard.fbdev->dev,
+					"s1d1374x: line buffer not ready\n");
+			break;
+		}
+	}
+}
+
+/* Wait until the YYC color space converter is idle. */
+static void blizzard_wait_yyc(void)
+{
+	unsigned long tmo = jiffies + msecs_to_jiffies(30);
+
+	while (blizzard_read_reg(BLIZZARD_NDISP_CTRL_STATUS) & (1 << 4)) {
+		if (time_after(jiffies, tmo)) {
+			if (printk_ratelimit())
+				dev_err(blizzard.fbdev->dev,
+					"s1d1374x: YYC not ready\n");
+			break;
+		}
+	}
+}
+
+static void disable_overlay(void)
+{
+	blizzard_write_reg(BLIZZARD_DATA_SOURCE_SELECT,
+				BLIZZARD_SRC_DISABLE_OVERLAY);
+}
+
+static void set_window_regs(int x_start, int y_start, int x_end, int y_end,
+			    int x_out_start, int y_out_start,
+			    int x_out_end, int y_out_end, int color_mode,
+			    int zoom_off, int flags)
+{
+	u8 tmp[18];
+	u8 cmd;
+
+	x_end--;
+	y_end--;
+	tmp[0] = x_start;
+	tmp[1] = x_start >> 8;
+	tmp[2] = y_start;
+	tmp[3] = y_start >> 8;
+	tmp[4] = x_end;
+	tmp[5] = x_end >> 8;
+	tmp[6] = y_end;
+	tmp[7] = y_end >> 8;
+
+	x_out_end--;
+	y_out_end--;
+	tmp[8]  = x_out_start;
+	tmp[9]  = x_out_start >> 8;
+	tmp[10] = y_out_start;
+	tmp[11] = y_out_start >> 8;
+	tmp[12] = x_out_end;
+	tmp[13] = x_out_end >> 8;
+	tmp[14] = y_out_end;
+	tmp[15] = y_out_end >> 8;
+
+	tmp[16] = color_mode;
+	if (zoom_off && blizzard.version == BLIZZARD_VERSION_S1D13745)
+		tmp[17] = BLIZZARD_SRC_WRITE_LCD_BACKGROUND;
+	else if (flags & OMAPFB_FORMAT_FLAG_ENABLE_OVERLAY)
+		tmp[17] = BLIZZARD_SRC_WRITE_OVERLAY_ENABLE;
+	else
+		tmp[17] = blizzard.version == BLIZZARD_VERSION_S1D13744 ?
+				BLIZZARD_SRC_WRITE_LCD :
+				BLIZZARD_SRC_WRITE_LCD_DESTRUCTIVE;
+
+	blizzard.extif->set_bits_per_cycle(8);
+	cmd = BLIZZARD_INPUT_WIN_X_START_0;
+	blizzard.extif->write_command(&cmd, 1);
+	blizzard.extif->write_data(tmp, 18);
+}
+
+static void enable_tearsync(int y, int width, int height, int screen_height,
+			    int out_height, int force_vsync)
+{
+	u8 b;
+
+	b = blizzard_read_reg(BLIZZARD_NDISP_CTRL_STATUS);
+	b |= 1 << 3;
+	blizzard_write_reg(BLIZZARD_NDISP_CTRL_STATUS, b);
+
+	if (likely(blizzard.vsync_only || force_vsync)) {
+		blizzard.extif->enable_tearsync(1, 0);
+		return;
+	}
+
+	if (width * blizzard.pix_tx_time < blizzard.line_upd_time) {
+		blizzard.extif->enable_tearsync(1, 0);
+		return;
+	}
+
+	if ((width * blizzard.pix_tx_time / 1000) * height <
+	    (y + out_height) * (blizzard.line_upd_time / 1000)) {
+		blizzard.extif->enable_tearsync(1, 0);
+		return;
+	}
+
+	blizzard.extif->enable_tearsync(1, y + 1);
+}
+
+static void disable_tearsync(void)
+{
+	u8 b;
+
+	blizzard.extif->enable_tearsync(0, 0);
+	b = blizzard_read_reg(BLIZZARD_NDISP_CTRL_STATUS);
+	b &= ~(1 << 3);
+	blizzard_write_reg(BLIZZARD_NDISP_CTRL_STATUS, b);
+	b = blizzard_read_reg(BLIZZARD_NDISP_CTRL_STATUS);
+}
+
+static inline void set_extif_timings(const struct extif_timings *t);
+
+static inline struct blizzard_request *alloc_req(void)
+{
+	unsigned long flags;
+	struct blizzard_request *req;
+	int req_flags = 0;
+
+	if (!in_interrupt())
+		down(&blizzard.req_sema);
+	else
+		req_flags = REQ_FROM_IRQ_POOL;
+
+	spin_lock_irqsave(&blizzard.req_lock, flags);
+	BUG_ON(list_empty(&blizzard.free_req_list));
+	req = list_entry(blizzard.free_req_list.next,
+			 struct blizzard_request, entry);
+	list_del(&req->entry);
+	spin_unlock_irqrestore(&blizzard.req_lock, flags);
+
+	INIT_LIST_HEAD(&req->entry);
+	req->flags = req_flags;
+
+	return req;
+}
+
+static inline void free_req(struct blizzard_request *req)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&blizzard.req_lock, flags);
+
+	list_del(&req->entry);
+	list_add(&req->entry, &blizzard.free_req_list);
+	if (!(req->flags & REQ_FROM_IRQ_POOL))
+		up(&blizzard.req_sema);
+
+	spin_unlock_irqrestore(&blizzard.req_lock, flags);
+}
+
+static void process_pending_requests(void)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&blizzard.req_lock, flags);
+
+	while (!list_empty(&blizzard.pending_req_list)) {
+		struct blizzard_request *req;
+		void (*complete)(void *);
+		void *complete_data;
+
+		req = list_entry(blizzard.pending_req_list.next,
+				 struct blizzard_request, entry);
+		spin_unlock_irqrestore(&blizzard.req_lock, flags);
+
+		if (req->handler(req) == REQ_PENDING)
+			return;
+
+		complete = req->complete;
+		complete_data = req->complete_data;
+		free_req(req);
+
+		if (complete)
+			complete(complete_data);
+
+		spin_lock_irqsave(&blizzard.req_lock, flags);
+	}
+
+	spin_unlock_irqrestore(&blizzard.req_lock, flags);
+}
+
+static void submit_req_list(struct list_head *head)
+{
+	unsigned long flags;
+	int process = 1;
+
+	spin_lock_irqsave(&blizzard.req_lock, flags);
+	if (likely(!list_empty(&blizzard.pending_req_list)))
+		process = 0;
+	list_splice_init(head, blizzard.pending_req_list.prev);
+	spin_unlock_irqrestore(&blizzard.req_lock, flags);
+
+	if (process)
+		process_pending_requests();
+}
+
+static void request_complete(void *data)
+{
+	struct blizzard_request	*req = (struct blizzard_request *)data;
+	void			(*complete)(void *);
+	void			*complete_data;
+
+	complete = req->complete;
+	complete_data = req->complete_data;
+
+	free_req(req);
+
+	if (complete)
+		complete(complete_data);
+
+	process_pending_requests();
+}
+
+
+static int do_full_screen_update(struct blizzard_request *req)
+{
+	int i;
+	int flags;
+
+	for (i = 0; i < 3; i++) {
+		struct plane_info *p = &blizzard.plane[i];
+		if (!(blizzard.enabled_planes & (1 << i))) {
+			blizzard.int_ctrl->enable_plane(i, 0);
+			continue;
+		}
+		dev_dbg(blizzard.fbdev->dev, "pw %d ph %d\n",
+			p->width, p->height);
+		blizzard.int_ctrl->setup_plane(i,
+				OMAPFB_CHANNEL_OUT_LCD, p->offset,
+				p->scr_width, p->pos_x, p->pos_y,
+				p->width, p->height,
+				p->color_mode);
+		blizzard.int_ctrl->enable_plane(i, 1);
+	}
+
+	dev_dbg(blizzard.fbdev->dev, "sw %d sh %d\n",
+		blizzard.screen_width, blizzard.screen_height);
+	blizzard_wait_line_buffer();
+	flags = req->par.update.flags;
+	if (flags & OMAPFB_FORMAT_FLAG_TEARSYNC)
+		enable_tearsync(0, blizzard.screen_width,
+				blizzard.screen_height,
+				blizzard.screen_height,
+				blizzard.screen_height,
+				flags & OMAPFB_FORMAT_FLAG_FORCE_VSYNC);
+	else
+		disable_tearsync();
+
+	set_window_regs(0, 0, blizzard.screen_width, blizzard.screen_height,
+			0, 0, blizzard.screen_width, blizzard.screen_height,
+			BLIZZARD_COLOR_RGB565, blizzard.zoom_on, flags);
+	blizzard.zoom_on = 0;
+
+	blizzard.extif->set_bits_per_cycle(16);
+	/* set_window_regs has left the register index at the right
+	 * place, so no need to set it here.
+	 */
+	blizzard.extif->transfer_area(blizzard.screen_width,
+				      blizzard.screen_height,
+				      request_complete, req);
+	return REQ_PENDING;
+}
+
+/* Setup all planes with an overlapping area with the update window. */
+static int do_partial_update(struct blizzard_request *req, int plane,
+			     int x, int y, int w, int h,
+			     int x_out, int y_out, int w_out, int h_out,
+			     int wnd_color_mode, int bpp)
+{
+	int i;
+	int gx1, gy1, gx2, gy2;
+	int gx1_out, gy1_out, gx2_out, gy2_out;
+	int color_mode;
+	int flags;
+	int zoom_off;
+
+	/* Global coordinates, relative to pixel 0,0 of the LCD */
+	gx1 = x + blizzard.plane[plane].pos_x;
+	gy1 = y + blizzard.plane[plane].pos_y;
+	gx2 = gx1 + w;
+	gy2 = gy1 + h;
+
+	flags = req->par.update.flags;
+	if (flags & OMAPFB_FORMAT_FLAG_DOUBLE) {
+		gx1_out = gx1;
+		gy1_out = gy1;
+		gx2_out = gx1 + w * 2;
+		gy2_out = gy1 + h * 2;
+	} else {
+		gx1_out = x_out + blizzard.plane[plane].pos_x;
+		gy1_out = y_out + blizzard.plane[plane].pos_y;
+		gx2_out = gx1_out + w_out;
+		gy2_out = gy1_out + h_out;
+	}
+	zoom_off = blizzard.zoom_on && gx1 == 0 && gy1 == 0 &&
+		w == blizzard.screen_width && h == blizzard.screen_height;
+	blizzard.zoom_on = (!zoom_off && blizzard.zoom_on) ||
+			   (w < w_out || h < h_out);
+
+	for (i = 0; i < OMAPFB_PLANE_NUM; i++) {
+		struct plane_info *p = &blizzard.plane[i];
+		int px1, py1;
+		int px2, py2;
+		int pw, ph;
+		int pposx, pposy;
+		unsigned long offset;
+
+		if (!(blizzard.enabled_planes & (1 << i))  ||
+		    (wnd_color_mode && i != plane)) {
+			blizzard.int_ctrl->enable_plane(i, 0);
+			continue;
+		}
+		/* Plane coordinates */
+		if (i == plane) {
+			/* Plane in which we are doing the update.
+			 * Local coordinates are the one in the update
+			 * request.
+			 */
+			px1 = x;
+			py1 = y;
+			px2 = x + w;
+			py2 = y + h;
+			pposx = 0;
+			pposy = 0;
+		} else {
+			/* Check if this plane has an overlapping part */
+			px1 = gx1 - p->pos_x;
+			py1 = gy1 - p->pos_y;
+			px2 = gx2 - p->pos_x;
+			py2 = gy2 - p->pos_y;
+			if (px1 >= p->width || py1 >= p->height ||
+			    px2 <= 0 || py2 <= 0) {
+				blizzard.int_ctrl->enable_plane(i, 0);
+				continue;
+			}
+			/* Calculate the coordinates for the overlapping
+			 * part in the plane's local coordinates.
+			 */
+			pposx = -px1;
+			pposy = -py1;
+			if (px1 < 0)
+				px1 = 0;
+			if (py1 < 0)
+				py1 = 0;
+			if (px2 > p->width)
+				px2 = p->width;
+			if (py2 > p->height)
+				py2 = p->height;
+			if (pposx < 0)
+				pposx = 0;
+			if (pposy < 0)
+				pposy = 0;
+		}
+		pw = px2 - px1;
+		ph = py2 - py1;
+		offset = p->offset + (p->scr_width * py1 + px1) * p->bpp / 8;
+		if (wnd_color_mode)
+			/* Window embedded in the plane with a differing
+			 * color mode / bpp. Calculate the number of DMA
+			 * transfer elements in terms of the plane's bpp.
+			 */
+			pw = (pw + 1) * bpp / p->bpp;
+#ifdef VERBOSE
+		dev_dbg(blizzard.fbdev->dev,
+			"plane %d offset %#08lx pposx %d pposy %d "
+			"px1 %d py1 %d pw %d ph %d\n",
+			i, offset, pposx, pposy, px1, py1, pw, ph);
+#endif
+		blizzard.int_ctrl->setup_plane(i,
+				OMAPFB_CHANNEL_OUT_LCD, offset,
+				p->scr_width,
+				pposx, pposy, pw, ph,
+				p->color_mode);
+
+		blizzard.int_ctrl->enable_plane(i, 1);
+	}
+
+	switch (wnd_color_mode) {
+	case OMAPFB_COLOR_YUV420:
+		color_mode = BLIZZARD_COLOR_YUV420;
+		/* Currently only the 16 bits/pixel cycle format is
+		 * supported on the external interface. Adjust the number
+		 * of transfer elements per line for 12bpp format.
+		 */
+		w = (w + 1) * 3 / 4;
+		break;
+	default:
+		color_mode = BLIZZARD_COLOR_RGB565;
+		break;
+	}
+
+	blizzard_wait_line_buffer();
+	if (blizzard.last_color_mode == BLIZZARD_COLOR_YUV420)
+		blizzard_wait_yyc();
+	blizzard.last_color_mode = color_mode;
+	if (flags & OMAPFB_FORMAT_FLAG_TEARSYNC)
+		enable_tearsync(gy1, w, h,
+				blizzard.screen_height,
+				h_out,
+				flags & OMAPFB_FORMAT_FLAG_FORCE_VSYNC);
+	else
+		disable_tearsync();
+
+	set_window_regs(gx1, gy1, gx2, gy2, gx1_out, gy1_out, gx2_out, gy2_out,
+			color_mode, zoom_off, flags);
+
+	blizzard.extif->set_bits_per_cycle(16);
+	/* set_window_regs has left the register index at the right
+	 * place, so no need to set it here.
+	 */
+	blizzard.extif->transfer_area(w, h, request_complete, req);
+
+	return REQ_PENDING;
+}
+
+static int send_frame_handler(struct blizzard_request *req)
+{
+	struct update_param *par = &req->par.update;
+	int plane = par->plane;
+
+#ifdef VERBOSE
+	dev_dbg(blizzard.fbdev->dev,
+		"send_frame: x %d y %d w %d h %d "
+		"x_out %d y_out %d w_out %d h_out %d "
+		"color_mode %04x flags %04x planes %01x\n",
+		par->x, par->y, par->width, par->height,
+		par->out_x, par->out_y, par->out_width, par->out_height,
+		par->color_mode, par->flags, blizzard.enabled_planes);
+#endif
+	if (par->flags & OMAPFB_FORMAT_FLAG_DISABLE_OVERLAY)
+		disable_overlay();
+
+	if ((blizzard.enabled_planes & blizzard.vid_nonstd_color) ||
+	     (blizzard.enabled_planes & blizzard.vid_scaled))
+		return do_full_screen_update(req);
+
+	return do_partial_update(req, plane, par->x, par->y,
+				 par->width, par->height,
+				 par->out_x, par->out_y,
+				 par->out_width, par->out_height,
+				 par->color_mode, par->bpp);
+}
+
+static void send_frame_complete(void *data)
+{
+}
+
+#define ADD_PREQ(_x, _y, _w, _h, _x_out, _y_out, _w_out, _h_out) do {	\
+	req = alloc_req();			\
+	req->handler	= send_frame_handler;	\
+	req->complete	= send_frame_complete;	\
+	req->par.update.plane = plane_idx;	\
+	req->par.update.x = _x;			\
+	req->par.update.y = _y;			\
+	req->par.update.width  = _w;		\
+	req->par.update.height = _h;		\
+	req->par.update.out_x = _x_out;		\
+	req->par.update.out_y = _y_out;		\
+	req->par.update.out_width = _w_out;	\
+	req->par.update.out_height = _h_out;	\
+	req->par.update.bpp = bpp;		\
+	req->par.update.color_mode = color_mode;\
+	req->par.update.flags	  = flags;	\
+	list_add_tail(&req->entry, req_head);	\
+} while(0)
+
+static void create_req_list(int plane_idx,
+			    struct omapfb_update_window *win,
+			    struct list_head *req_head)
+{
+	struct blizzard_request *req;
+	int x = win->x;
+	int y = win->y;
+	int width = win->width;
+	int height = win->height;
+	int x_out = win->out_x;
+	int y_out = win->out_y;
+	int width_out = win->out_width;
+	int height_out = win->out_height;
+	int color_mode;
+	int bpp;
+	int flags;
+	unsigned int ystart = y;
+	unsigned int yspan = height;
+	unsigned int ystart_out = y_out;
+	unsigned int yspan_out = height_out;
+
+	flags = win->format & ~OMAPFB_FORMAT_MASK;
+	color_mode = win->format & OMAPFB_FORMAT_MASK;
+	switch (color_mode) {
+	case OMAPFB_COLOR_YUV420:
+		/* Embedded window with different color mode */
+		bpp = 12;
+		/* X, Y, height must be aligned at 2, width at 4 pixels */
+		x &= ~1;
+		y &= ~1;
+		height = yspan = height & ~1;
+		width = width & ~3;
+		break;
+	default:
+		/* Same as the plane color mode */
+		bpp = blizzard.plane[plane_idx].bpp;
+		break;
+	}
+	if (width * height * bpp / 8 > blizzard.max_transmit_size) {
+		yspan = blizzard.max_transmit_size / (width * bpp / 8);
+		yspan_out = yspan * height_out / height;
+		ADD_PREQ(x, ystart, width, yspan, x_out, ystart_out,
+			 width_out, yspan_out);
+		ystart += yspan;
+		ystart_out += yspan_out;
+		yspan = height - yspan;
+		yspan_out = height_out - yspan_out;
+		flags &= ~OMAPFB_FORMAT_FLAG_TEARSYNC;
+	}
+
+	ADD_PREQ(x, ystart, width, yspan, x_out, ystart_out,
+		 width_out, yspan_out);
+}
+
+static void auto_update_complete(void *data)
+{
+	if (!blizzard.stop_auto_update)
+		mod_timer(&blizzard.auto_update_timer,
+			  jiffies + BLIZZARD_AUTO_UPDATE_TIME);
+}
+
+static void blizzard_update_window_auto(unsigned long arg)
+{
+	LIST_HEAD(req_list);
+	struct blizzard_request *last;
+	struct omapfb_plane_struct *plane;
+
+	plane = blizzard.fbdev->fb_info[0]->par;
+	create_req_list(plane->idx,
+			&blizzard.auto_update_window, &req_list);
+	last = list_entry(req_list.prev, struct blizzard_request, entry);
+
+	last->complete = auto_update_complete;
+	last->complete_data = NULL;
+
+	submit_req_list(&req_list);
+}
+
+int blizzard_update_window_async(struct fb_info *fbi,
+				 struct omapfb_update_window *win,
+				 void (*complete_callback)(void *arg),
+				 void *complete_callback_data)
+{
+	LIST_HEAD(req_list);
+	struct blizzard_request *last;
+	struct omapfb_plane_struct *plane = fbi->par;
+
+	if (unlikely(blizzard.update_mode != OMAPFB_MANUAL_UPDATE))
+		return -EINVAL;
+	if (unlikely(!blizzard.te_connected &&
+		     (win->format & OMAPFB_FORMAT_FLAG_TEARSYNC)))
+		return -EINVAL;
+
+	create_req_list(plane->idx, win, &req_list);
+	last = list_entry(req_list.prev, struct blizzard_request, entry);
+
+	last->complete = complete_callback;
+	last->complete_data = (void *)complete_callback_data;
+
+	submit_req_list(&req_list);
+
+	return 0;
+}
+EXPORT_SYMBOL(blizzard_update_window_async);
+
+static int update_full_screen(void)
+{
+	return blizzard_update_window_async(blizzard.fbdev->fb_info[0],
+				     &blizzard.auto_update_window, NULL, NULL);
+
+}
+
+static int blizzard_setup_plane(int plane, int channel_out,
+				  unsigned long offset, int screen_width,
+				  int pos_x, int pos_y, int width, int height,
+				  int color_mode)
+{
+	struct plane_info *p;
+
+#ifdef VERBOSE
+	dev_dbg(blizzard.fbdev->dev,
+		    "plane %d ch_out %d offset %#08lx scr_width %d "
+		    "pos_x %d pos_y %d width %d height %d color_mode %d\n",
+		    plane, channel_out, offset, screen_width,
+		    pos_x, pos_y, width, height, color_mode);
+#endif
+	if ((unsigned)plane > OMAPFB_PLANE_NUM)
+		return -EINVAL;
+	p = &blizzard.plane[plane];
+
+	switch (color_mode) {
+	case OMAPFB_COLOR_YUV422:
+	case OMAPFB_COLOR_YUY422:
+		p->bpp = 16;
+		blizzard.vid_nonstd_color &= ~(1 << plane);
+		break;
+	case OMAPFB_COLOR_YUV420:
+		p->bpp = 12;
+		blizzard.vid_nonstd_color |= 1 << plane;
+		break;
+	case OMAPFB_COLOR_RGB565:
+		p->bpp = 16;
+		blizzard.vid_nonstd_color &= ~(1 << plane);
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	p->offset = offset;
+	p->pos_x = pos_x;
+	p->pos_y = pos_y;
+	p->width = width;
+	p->height = height;
+	p->scr_width = screen_width;
+	if (!p->out_width)
+		p->out_width = width;
+	if (!p->out_height)
+		p->out_height = height;
+
+	p->color_mode = color_mode;
+
+	return 0;
+}
+
+static int blizzard_set_scale(int plane, int orig_w, int orig_h,
+			      int out_w, int out_h)
+{
+	struct plane_info *p = &blizzard.plane[plane];
+	int r;
+
+	dev_dbg(blizzard.fbdev->dev,
+		"plane %d orig_w %d orig_h %d out_w %d out_h %d\n",
+		plane, orig_w, orig_h, out_w, out_h);
+	if ((unsigned)plane > OMAPFB_PLANE_NUM)
+		return -ENODEV;
+
+	r = blizzard.int_ctrl->set_scale(plane, orig_w, orig_h, out_w, out_h);
+	if (r < 0)
+		return r;
+
+	p->width = orig_w;
+	p->height = orig_h;
+	p->out_width = out_w;
+	p->out_height = out_h;
+	if (orig_w == out_w && orig_h == out_h)
+		blizzard.vid_scaled &= ~(1 << plane);
+	else
+		blizzard.vid_scaled |= 1 << plane;
+
+	return 0;
+}
+
+static int blizzard_enable_plane(int plane, int enable)
+{
+	if (enable)
+		blizzard.enabled_planes |= 1 << plane;
+	else
+		blizzard.enabled_planes &= ~(1 << plane);
+
+	return 0;
+}
+
+static int sync_handler(struct blizzard_request *req)
+{
+	complete(req->par.sync);
+	return REQ_COMPLETE;
+}
+
+static void blizzard_sync(void)
+{
+	LIST_HEAD(req_list);
+	struct blizzard_request *req;
+	struct completion comp;
+
+	req = alloc_req();
+
+	req->handler = sync_handler;
+	req->complete = NULL;
+	init_completion(&comp);
+	req->par.sync = &comp;
+
+	list_add(&req->entry, &req_list);
+	submit_req_list(&req_list);
+
+	wait_for_completion(&comp);
+}
+
+
+static void blizzard_bind_client(struct omapfb_notifier_block *nb)
+{
+	if (blizzard.update_mode == OMAPFB_MANUAL_UPDATE) {
+		omapfb_notify_clients(blizzard.fbdev, OMAPFB_EVENT_READY);
+	}
+}
+
+static int blizzard_set_update_mode(enum omapfb_update_mode mode)
+{
+	if (unlikely(mode != OMAPFB_MANUAL_UPDATE &&
+		     mode != OMAPFB_AUTO_UPDATE &&
+		     mode != OMAPFB_UPDATE_DISABLED))
+		return -EINVAL;
+
+	if (mode == blizzard.update_mode)
+		return 0;
+
+	dev_info(blizzard.fbdev->dev, "s1d1374x: setting update mode to %s\n",
+			mode == OMAPFB_UPDATE_DISABLED ? "disabled" :
+			(mode == OMAPFB_AUTO_UPDATE ? "auto" : "manual"));
+
+	switch (blizzard.update_mode) {
+	case OMAPFB_MANUAL_UPDATE:
+		omapfb_notify_clients(blizzard.fbdev, OMAPFB_EVENT_DISABLED);
+		break;
+	case OMAPFB_AUTO_UPDATE:
+		blizzard.stop_auto_update = 1;
+		del_timer_sync(&blizzard.auto_update_timer);
+		break;
+	case OMAPFB_UPDATE_DISABLED:
+		break;
+	}
+
+	blizzard.update_mode = mode;
+	blizzard_sync();
+	blizzard.stop_auto_update = 0;
+
+	switch (mode) {
+	case OMAPFB_MANUAL_UPDATE:
+		omapfb_notify_clients(blizzard.fbdev, OMAPFB_EVENT_READY);
+		break;
+	case OMAPFB_AUTO_UPDATE:
+		blizzard_update_window_auto(0);
+		break;
+	case OMAPFB_UPDATE_DISABLED:
+		break;
+	}
+
+	return 0;
+}
+
+static enum omapfb_update_mode blizzard_get_update_mode(void)
+{
+	return blizzard.update_mode;
+}
+
+static inline void set_extif_timings(const struct extif_timings *t)
+{
+	blizzard.extif->set_timings(t);
+}
+
+static inline unsigned long round_to_extif_ticks(unsigned long ps, int div)
+{
+	int bus_tick = blizzard.extif_clk_period * div;
+	return (ps + bus_tick - 1) / bus_tick * bus_tick;
+}
+
+static int calc_reg_timing(unsigned long sysclk, int div)
+{
+	struct extif_timings *t;
+	unsigned long systim;
+
+	/* CSOnTime 0, WEOnTime 2 ns, REOnTime 2 ns,
+	 * AccessTime 2 ns + 12.2 ns (regs),
+	 * WEOffTime = WEOnTime + 1 ns,
+	 * REOffTime = REOnTime + 12 ns (regs),
+	 * CSOffTime = REOffTime + 1 ns
+	 * ReadCycle = 2ns + 2*SYSCLK  (regs),
+	 * WriteCycle = 2*SYSCLK + 2 ns,
+	 * CSPulseWidth = 10 ns */
+
+	systim = 1000000000 / (sysclk / 1000);
+	dev_dbg(blizzard.fbdev->dev,
+		  "Blizzard systim %lu ps extif_clk_period %u div %d\n",
+		  systim, blizzard.extif_clk_period, div);
+
+	t = &blizzard.reg_timings;
+	memset(t, 0, sizeof(*t));
+
+	t->clk_div = div;
+
+	t->cs_on_time = 0;
+	t->we_on_time = round_to_extif_ticks(t->cs_on_time + 2000, div);
+	t->re_on_time = round_to_extif_ticks(t->cs_on_time + 2000, div);
+	t->access_time = round_to_extif_ticks(t->re_on_time + 12200, div);
+	t->we_off_time = round_to_extif_ticks(t->we_on_time + 1000, div);
+	t->re_off_time = round_to_extif_ticks(t->re_on_time + 13000, div);
+	t->cs_off_time = round_to_extif_ticks(t->re_off_time + 1000, div);
+	t->we_cycle_time = round_to_extif_ticks(2 * systim + 2000, div);
+	if (t->we_cycle_time < t->we_off_time)
+		t->we_cycle_time = t->we_off_time;
+	t->re_cycle_time = round_to_extif_ticks(2 * systim + 2000, div);
+	if (t->re_cycle_time < t->re_off_time)
+		t->re_cycle_time = t->re_off_time;
+	t->cs_pulse_width = 0;
+
+	dev_dbg(blizzard.fbdev->dev, "[reg]cson %d csoff %d reon %d reoff %d\n",
+		 t->cs_on_time, t->cs_off_time, t->re_on_time, t->re_off_time);
+	dev_dbg(blizzard.fbdev->dev, "[reg]weon %d weoff %d recyc %d wecyc %d\n",
+		 t->we_on_time, t->we_off_time, t->re_cycle_time,
+		 t->we_cycle_time);
+	dev_dbg(blizzard.fbdev->dev, "[reg]rdaccess %d cspulse %d\n",
+		 t->access_time, t->cs_pulse_width);
+
+	return blizzard.extif->convert_timings(t);
+}
+
+static int calc_lut_timing(unsigned long sysclk, int div)
+{
+	struct extif_timings *t;
+	unsigned long systim;
+
+	/* CSOnTime 0, WEOnTime 2 ns, REOnTime 2 ns,
+	 * AccessTime 2 ns + 4 * SYSCLK + 26 (lut),
+	 * WEOffTime = WEOnTime + 1 ns,
+	 * REOffTime = REOnTime + 4*SYSCLK + 26 ns (lut),
+	 * CSOffTime = REOffTime + 1 ns
+	 * ReadCycle = 2ns + 4*SYSCLK + 26 ns (lut),
+	 * WriteCycle = 2*SYSCLK + 2 ns,
+	 * CSPulseWidth = 10 ns */
+
+	systim = 1000000000 / (sysclk / 1000);
+	dev_dbg(blizzard.fbdev->dev,
+		"Blizzard systim %lu ps extif_clk_period %u div %d\n",
+		systim, blizzard.extif_clk_period, div);
+
+	t = &blizzard.lut_timings;
+	memset(t, 0, sizeof(*t));
+
+	t->clk_div = div;
+
+	t->cs_on_time = 0;
+	t->we_on_time = round_to_extif_ticks(t->cs_on_time + 2000, div);
+	t->re_on_time = round_to_extif_ticks(t->cs_on_time + 2000, div);
+	t->access_time = round_to_extif_ticks(t->re_on_time + 4 * systim +
+					      26000, div);
+	t->we_off_time = round_to_extif_ticks(t->we_on_time + 1000, div);
+	t->re_off_time = round_to_extif_ticks(t->re_on_time + 4 * systim +
+					      26000, div);
+	t->cs_off_time = round_to_extif_ticks(t->re_off_time + 1000, div);
+	t->we_cycle_time = round_to_extif_ticks(2 * systim + 2000, div);
+	if (t->we_cycle_time < t->we_off_time)
+		t->we_cycle_time = t->we_off_time;
+	t->re_cycle_time = round_to_extif_ticks(2000 + 4 * systim + 26000, div);
+	if (t->re_cycle_time < t->re_off_time)
+		t->re_cycle_time = t->re_off_time;
+	t->cs_pulse_width = 0;
+
+	dev_dbg(blizzard.fbdev->dev,
+		 "[lut]cson %d csoff %d reon %d reoff %d\n",
+		 t->cs_on_time, t->cs_off_time, t->re_on_time, t->re_off_time);
+	dev_dbg(blizzard.fbdev->dev,
+		 "[lut]weon %d weoff %d recyc %d wecyc %d\n",
+		 t->we_on_time, t->we_off_time, t->re_cycle_time,
+		 t->we_cycle_time);
+	dev_dbg(blizzard.fbdev->dev, "[lut]rdaccess %d cspulse %d\n",
+		 t->access_time, t->cs_pulse_width);
+
+	return blizzard.extif->convert_timings(t);
+}
+
+static int calc_extif_timings(unsigned long sysclk, int *extif_mem_div)
+{
+	int max_clk_div;
+	int div;
+
+	blizzard.extif->get_clk_info(&blizzard.extif_clk_period, &max_clk_div);
+	for (div = 1; div <= max_clk_div; div++) {
+		if (calc_reg_timing(sysclk, div) == 0)
+			break;
+	}
+	if (div > max_clk_div) {
+		dev_dbg(blizzard.fbdev->dev, "reg timing failed\n");
+		goto err;
+	}
+	*extif_mem_div = div;
+
+	for (div = 1; div <= max_clk_div; div++) {
+		if (calc_lut_timing(sysclk, div) == 0)
+			break;
+	}
+
+	if (div > max_clk_div)
+		goto err;
+
+	blizzard.extif_clk_div = div;
+
+	return 0;
+err:
+	dev_err(blizzard.fbdev->dev, "can't setup timings\n");
+	return -1;
+}
+
+static void calc_blizzard_clk_rates(unsigned long ext_clk,
+				unsigned long *sys_clk, unsigned long *pix_clk)
+{
+	int pix_clk_src;
+	int sys_div = 0, sys_mul = 0;
+	int pix_div;
+
+	pix_clk_src = blizzard_read_reg(BLIZZARD_CLK_SRC);
+	pix_div = ((pix_clk_src >> 3) & 0x1f) + 1;
+	if ((pix_clk_src & (0x3 << 1)) == 0) {
+		/* Source is the PLL */
+		sys_div = (blizzard_read_reg(BLIZZARD_PLL_DIV) & 0x3f) + 1;
+		sys_mul = blizzard_read_reg(BLIZZARD_PLL_CLOCK_SYNTH_0);
+		sys_mul |= ((blizzard_read_reg(BLIZZARD_PLL_CLOCK_SYNTH_1)
+				& 0x0f)	<< 11);
+		*sys_clk = ext_clk * sys_mul / sys_div;
+	} else	/* else source is ext clk, or oscillator */
+		*sys_clk = ext_clk;
+
+	*pix_clk = *sys_clk / pix_div;			/* HZ */
+	dev_dbg(blizzard.fbdev->dev,
+		"ext_clk %ld pix_src %d pix_div %d sys_div %d sys_mul %d\n",
+		ext_clk, pix_clk_src & (0x3 << 1), pix_div, sys_div, sys_mul);
+	dev_dbg(blizzard.fbdev->dev, "sys_clk %ld pix_clk %ld\n",
+		*sys_clk, *pix_clk);
+}
+
+static int setup_tearsync(unsigned long pix_clk, int extif_div)
+{
+	int hdisp, vdisp;
+	int hndp, vndp;
+	int hsw, vsw;
+	int hs, vs;
+	int hs_pol_inv, vs_pol_inv;
+	int use_hsvs, use_ndp;
+	u8  b;
+
+	hsw = blizzard_read_reg(BLIZZARD_HSW);
+	vsw = blizzard_read_reg(BLIZZARD_VSW);
+	hs_pol_inv = !(hsw & 0x80);
+	vs_pol_inv = !(vsw & 0x80);
+	hsw = hsw & 0x7f;
+	vsw = vsw & 0x3f;
+
+	hdisp = blizzard_read_reg(BLIZZARD_HDISP) * 8;
+	vdisp = blizzard_read_reg(BLIZZARD_VDISP0) +
+		((blizzard_read_reg(BLIZZARD_VDISP1) & 0x3) << 8);
+
+	hndp = blizzard_read_reg(BLIZZARD_HNDP) & 0x3f;
+	vndp = blizzard_read_reg(BLIZZARD_VNDP);
+
+	/* time to transfer one pixel (16bpp) in ps */
+	blizzard.pix_tx_time = blizzard.reg_timings.we_cycle_time;
+	if (blizzard.extif->get_max_tx_rate != NULL) {
+		/* The external interface might have a rate limitation,
+		 * if so, we have to maximize our transfer rate.
+		 */
+		unsigned long min_tx_time;
+		unsigned long max_tx_rate = blizzard.extif->get_max_tx_rate();
+
+		dev_dbg(blizzard.fbdev->dev, "max_tx_rate %ld HZ\n",
+			max_tx_rate);
+		min_tx_time = 1000000000 / (max_tx_rate / 1000);  /* ps */
+		if (blizzard.pix_tx_time < min_tx_time)
+			blizzard.pix_tx_time = min_tx_time;
+	}
+
+	/* time to update one line in ps */
+	blizzard.line_upd_time = (hdisp + hndp) * 1000000 / (pix_clk / 1000);
+	blizzard.line_upd_time *= 1000;
+	if (hdisp * blizzard.pix_tx_time > blizzard.line_upd_time)
+		/* transfer speed too low, we might have to use both
+		 * HS and VS */
+		use_hsvs = 1;
+	else
+		/* decent transfer speed, we'll always use only VS */
+		use_hsvs = 0;
+
+	if (use_hsvs && (hs_pol_inv || vs_pol_inv)) {
+		/* HS or'ed with VS doesn't work, use the active high
+		 * TE signal based on HNDP / VNDP */
+		use_ndp = 1;
+		hs_pol_inv = 0;
+		vs_pol_inv = 0;
+		hs = hndp;
+		vs = vndp;
+	} else {
+		/* Use HS or'ed with VS as a TE signal if both are needed
+		 * or VNDP if only vsync is needed. */
+		use_ndp = 0;
+		hs = hsw;
+		vs = vsw;
+		if (!use_hsvs) {
+			hs_pol_inv = 0;
+			vs_pol_inv = 0;
+		}
+	}
+
+	hs = hs * 1000000 / (pix_clk / 1000);		  /* ps */
+	hs *= 1000;
+
+	vs = vs * (hdisp + hndp) * 1000000 / (pix_clk / 1000); /* ps */
+	vs *= 1000;
+
+	if (vs <= hs)
+		return -EDOM;
+	/* set VS to 120% of HS to minimize VS detection time */
+	vs = hs * 12 / 10;
+	/* minimize HS too */
+	if (hs > 10000)
+		hs = 10000;
+
+	b = blizzard_read_reg(BLIZZARD_NDISP_CTRL_STATUS);
+	b &= ~0x3;
+	b |= use_hsvs ? 1 : 0;
+	b |= (use_ndp && use_hsvs) ? 0 : 2;
+	blizzard_write_reg(BLIZZARD_NDISP_CTRL_STATUS, b);
+
+	blizzard.vsync_only = !use_hsvs;
+
+	dev_dbg(blizzard.fbdev->dev,
+		"pix_clk %ld HZ pix_tx_time %ld ps line_upd_time %ld ps\n",
+		pix_clk, blizzard.pix_tx_time, blizzard.line_upd_time);
+	dev_dbg(blizzard.fbdev->dev,
+		"hs %d ps vs %d ps mode %d vsync_only %d\n",
+		hs, vs, b & 0x3, !use_hsvs);
+
+	return blizzard.extif->setup_tearsync(1, hs, vs,
+					      hs_pol_inv, vs_pol_inv,
+					      extif_div);
+}
+
+static void blizzard_get_caps(int plane, struct omapfb_caps *caps)
+{
+	blizzard.int_ctrl->get_caps(plane, caps);
+	caps->ctrl |= OMAPFB_CAPS_MANUAL_UPDATE |
+		OMAPFB_CAPS_WINDOW_PIXEL_DOUBLE |
+		OMAPFB_CAPS_WINDOW_SCALE |
+		OMAPFB_CAPS_WINDOW_OVERLAY;
+	if (blizzard.te_connected)
+		caps->ctrl |= OMAPFB_CAPS_TEARSYNC;
+	caps->wnd_color |= (1 << OMAPFB_COLOR_RGB565) |
+			   (1 << OMAPFB_COLOR_YUV420);
+}
+
+static void _save_regs(struct blizzard_reg_list *list, int cnt)
+{
+	int i;
+
+	for (i = 0; i < cnt; i++, list++) {
+		int reg;
+		for (reg = list->start; reg <= list->end; reg += 2)
+			blizzard_reg_cache[reg / 2] = blizzard_read_reg(reg);
+	}
+}
+
+static void _restore_regs(struct blizzard_reg_list *list, int cnt)
+{
+	int i;
+
+	for (i = 0; i < cnt; i++, list++) {
+		int reg;
+		for (reg = list->start; reg <= list->end; reg += 2)
+			blizzard_write_reg(reg, blizzard_reg_cache[reg / 2]);
+	}
+}
+
+static void blizzard_save_all_regs(void)
+{
+	_save_regs(blizzard_pll_regs, ARRAY_SIZE(blizzard_pll_regs));
+	_save_regs(blizzard_gen_regs, ARRAY_SIZE(blizzard_gen_regs));
+}
+
+static void blizzard_restore_pll_regs(void)
+{
+	_restore_regs(blizzard_pll_regs, ARRAY_SIZE(blizzard_pll_regs));
+}
+
+static void blizzard_restore_gen_regs(void)
+{
+	_restore_regs(blizzard_gen_regs, ARRAY_SIZE(blizzard_gen_regs));
+}
+
+static void blizzard_suspend(void)
+{
+	u32 l;
+	unsigned long tmo;
+
+	if (blizzard.last_color_mode) {
+		update_full_screen();
+		blizzard_sync();
+	}
+	blizzard.update_mode_before_suspend = blizzard.update_mode;
+	/* the following will disable clocks as well */
+	blizzard_set_update_mode(OMAPFB_UPDATE_DISABLED);
+
+	blizzard_save_all_regs();
+
+	blizzard_stop_sdram();
+
+	l = blizzard_read_reg(BLIZZARD_POWER_SAVE);
+	/* Standby, Sleep. We assume we use an external clock. */
+	l |= 0x03;
+	blizzard_write_reg(BLIZZARD_POWER_SAVE, l);
+
+	tmo = jiffies + msecs_to_jiffies(100);
+	while (!(blizzard_read_reg(BLIZZARD_PLL_MODE) & (1 << 1))) {
+		if (time_after(jiffies, tmo)) {
+			dev_err(blizzard.fbdev->dev,
+				"s1d1374x: sleep timeout, stopping PLL manually\n");
+			l = blizzard_read_reg(BLIZZARD_PLL_MODE);
+			l &= ~0x03;
+			/* Disable PLL, counter function */
+			l |= 0x2;
+			blizzard_write_reg(BLIZZARD_PLL_MODE, l);
+			break;
+		}
+		msleep(1);
+	}
+
+	if (blizzard.power_down != NULL)
+		blizzard.power_down(blizzard.fbdev->dev);
+}
+
+static void blizzard_resume(void)
+{
+	u32 l;
+
+	if (blizzard.power_up != NULL)
+		blizzard.power_up(blizzard.fbdev->dev);
+
+	l = blizzard_read_reg(BLIZZARD_POWER_SAVE);
+	/* Standby, Sleep */
+	l &= ~0x03;
+	blizzard_write_reg(BLIZZARD_POWER_SAVE, l);
+
+	blizzard_restore_pll_regs();
+	l = blizzard_read_reg(BLIZZARD_PLL_MODE);
+	l &= ~0x03;
+	/* Enable PLL, counter function */
+	l |= 0x1;
+	blizzard_write_reg(BLIZZARD_PLL_MODE, l);
+
+	while (!(blizzard_read_reg(BLIZZARD_PLL_DIV) & (1 << 7)))
+		msleep(1);
+
+	blizzard_restart_sdram();
+
+	blizzard_restore_gen_regs();
+
+	/* Enable display */
+	blizzard_write_reg(BLIZZARD_DISPLAY_MODE, 0x01);
+
+	/* the following will enable clocks as necessary */
+	blizzard_set_update_mode(blizzard.update_mode_before_suspend);
+
+	/* Force a background update */
+	blizzard.zoom_on = 1;
+	update_full_screen();
+	blizzard_sync();
+}
+
+static int blizzard_init(struct omapfb_device *fbdev, int ext_mode,
+			 struct omapfb_mem_desc *req_vram)
+{
+	int r = 0, i;
+	u8 rev, conf;
+	unsigned long ext_clk;
+	int extif_div;
+	unsigned long sys_clk, pix_clk;
+	struct omapfb_platform_data *omapfb_conf;
+	struct blizzard_platform_data *ctrl_conf;
+
+	blizzard.fbdev = fbdev;
+
+	BUG_ON(!fbdev->ext_if || !fbdev->int_ctrl);
+
+	blizzard.fbdev = fbdev;
+	blizzard.extif = fbdev->ext_if;
+	blizzard.int_ctrl = fbdev->int_ctrl;
+
+	omapfb_conf = fbdev->dev->platform_data;
+	ctrl_conf = omapfb_conf->ctrl_platform_data;
+	if (ctrl_conf == NULL || ctrl_conf->get_clock_rate == NULL) {
+		dev_err(fbdev->dev, "s1d1374x: missing platform data\n");
+		r = -ENOENT;
+		goto err1;
+	}
+
+	blizzard.power_down = ctrl_conf->power_down;
+	blizzard.power_up = ctrl_conf->power_up;
+
+	spin_lock_init(&blizzard.req_lock);
+
+	if ((r = blizzard.int_ctrl->init(fbdev, 1, req_vram)) < 0)
+		goto err1;
+
+	if ((r = blizzard.extif->init(fbdev)) < 0)
+		goto err2;
+
+	blizzard_ctrl.set_color_key = blizzard.int_ctrl->set_color_key;
+	blizzard_ctrl.get_color_key = blizzard.int_ctrl->get_color_key;
+	blizzard_ctrl.setup_mem = blizzard.int_ctrl->setup_mem;
+	blizzard_ctrl.mmap = blizzard.int_ctrl->mmap;
+
+	ext_clk = ctrl_conf->get_clock_rate(fbdev->dev);
+	if ((r = calc_extif_timings(ext_clk, &extif_div)) < 0)
+		goto err3;
+
+	set_extif_timings(&blizzard.reg_timings);
+
+	if (blizzard.power_up != NULL)
+		blizzard.power_up(fbdev->dev);
+
+	calc_blizzard_clk_rates(ext_clk, &sys_clk, &pix_clk);
+
+	if ((r = calc_extif_timings(sys_clk, &extif_div)) < 0)
+		goto err3;
+	set_extif_timings(&blizzard.reg_timings);
+
+	if (!(blizzard_read_reg(BLIZZARD_PLL_DIV) & 0x80)) {
+		dev_err(fbdev->dev,
+			"controller not initialized by the bootloader\n");
+		r = -ENODEV;
+		goto err3;
+	}
+
+	if (ctrl_conf->te_connected) {
+		if ((r = setup_tearsync(pix_clk, extif_div)) < 0)
+			goto err3;
+		blizzard.te_connected = 1;
+	}
+
+	rev = blizzard_read_reg(BLIZZARD_REV_CODE);
+	conf = blizzard_read_reg(BLIZZARD_CONFIG);
+
+	switch (rev & 0xfc) {
+	case 0x9c:
+		blizzard.version = BLIZZARD_VERSION_S1D13744;
+		pr_info("omapfb: s1d13744 LCD controller rev %d "
+			"initialized (CNF pins %x)\n", rev & 0x03, conf & 0x07);
+		break;
+	case 0xa4:
+		blizzard.version = BLIZZARD_VERSION_S1D13745;
+		pr_info("omapfb: s1d13745 LCD controller rev %d "
+			"initialized (CNF pins %x)\n", rev & 0x03, conf & 0x07);
+		break;
+	default:
+		dev_err(fbdev->dev, "invalid s1d1374x revision %02x\n",
+			rev);
+		r = -ENODEV;
+		goto err3;
+	}
+
+	blizzard.max_transmit_size = blizzard.extif->max_transmit_size;
+
+	blizzard.update_mode = OMAPFB_UPDATE_DISABLED;
+
+	blizzard.auto_update_window.x = 0;
+	blizzard.auto_update_window.y = 0;
+	blizzard.auto_update_window.width = fbdev->panel->x_res;
+	blizzard.auto_update_window.height = fbdev->panel->y_res;
+	blizzard.auto_update_window.out_x = 0;
+	blizzard.auto_update_window.out_x = 0;
+	blizzard.auto_update_window.out_width = fbdev->panel->x_res;
+	blizzard.auto_update_window.out_height = fbdev->panel->y_res;
+	blizzard.auto_update_window.format = 0;
+
+	blizzard.screen_width = fbdev->panel->x_res;
+	blizzard.screen_height = fbdev->panel->y_res;
+
+	init_timer(&blizzard.auto_update_timer);
+	blizzard.auto_update_timer.function = blizzard_update_window_auto;
+	blizzard.auto_update_timer.data = 0;
+
+	INIT_LIST_HEAD(&blizzard.free_req_list);
+	INIT_LIST_HEAD(&blizzard.pending_req_list);
+	for (i = 0; i < ARRAY_SIZE(blizzard.req_pool); i++)
+		list_add(&blizzard.req_pool[i].entry, &blizzard.free_req_list);
+	BUG_ON(i <= IRQ_REQ_POOL_SIZE);
+	sema_init(&blizzard.req_sema, i - IRQ_REQ_POOL_SIZE);
+
+	return 0;
+err3:
+	if (blizzard.power_down != NULL)
+		blizzard.power_down(fbdev->dev);
+	blizzard.extif->cleanup();
+err2:
+	blizzard.int_ctrl->cleanup();
+err1:
+	return r;
+}
+
+static void blizzard_cleanup(void)
+{
+	blizzard_set_update_mode(OMAPFB_UPDATE_DISABLED);
+	blizzard.extif->cleanup();
+	blizzard.int_ctrl->cleanup();
+	if (blizzard.power_down != NULL)
+		blizzard.power_down(blizzard.fbdev->dev);
+}
+
+struct lcd_ctrl blizzard_ctrl = {
+	.name			= "blizzard",
+	.init			= blizzard_init,
+	.cleanup		= blizzard_cleanup,
+	.bind_client		= blizzard_bind_client,
+	.get_caps		= blizzard_get_caps,
+	.set_update_mode	= blizzard_set_update_mode,
+	.get_update_mode	= blizzard_get_update_mode,
+	.setup_plane		= blizzard_setup_plane,
+	.set_scale		= blizzard_set_scale,
+	.enable_plane		= blizzard_enable_plane,
+	.update_window		= blizzard_update_window_async,
+	.sync			= blizzard_sync,
+	.suspend		= blizzard_suspend,
+	.resume			= blizzard_resume,
+};
+
diff --git a/drivers/video/omap/dispc.c b/drivers/video/omap/dispc.c
new file mode 100644
index 0000000..f4c2343
--- /dev/null
+++ b/drivers/video/omap/dispc.c
@@ -0,0 +1,1502 @@
+/*
+ * OMAP2 display controller support
+ *
+ * Copyright (C) 2005 Nokia Corporation
+ * Author: Imre Deak <imre.deak@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.
+ */
+#include <linux/kernel.h>
+#include <linux/dma-mapping.h>
+#include <linux/vmalloc.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+
+#include <asm/arch/sram.h>
+#include <asm/arch/omapfb.h>
+#include <asm/arch/board.h>
+
+#include "dispc.h"
+
+#define MODULE_NAME			"dispc"
+
+#define DSS_BASE			0x48050000
+#define DSS_SYSCONFIG			0x0010
+
+#define DISPC_BASE			0x48050400
+
+/* DISPC common */
+#define DISPC_REVISION			0x0000
+#define DISPC_SYSCONFIG			0x0010
+#define DISPC_SYSSTATUS			0x0014
+#define DISPC_IRQSTATUS			0x0018
+#define DISPC_IRQENABLE			0x001C
+#define DISPC_CONTROL			0x0040
+#define DISPC_CONFIG			0x0044
+#define DISPC_CAPABLE			0x0048
+#define DISPC_DEFAULT_COLOR0		0x004C
+#define DISPC_DEFAULT_COLOR1		0x0050
+#define DISPC_TRANS_COLOR0		0x0054
+#define DISPC_TRANS_COLOR1		0x0058
+#define DISPC_LINE_STATUS		0x005C
+#define DISPC_LINE_NUMBER		0x0060
+#define DISPC_TIMING_H			0x0064
+#define DISPC_TIMING_V			0x0068
+#define DISPC_POL_FREQ			0x006C
+#define DISPC_DIVISOR			0x0070
+#define DISPC_SIZE_DIG			0x0078
+#define DISPC_SIZE_LCD			0x007C
+
+#define DISPC_DATA_CYCLE1		0x01D4
+#define DISPC_DATA_CYCLE2		0x01D8
+#define DISPC_DATA_CYCLE3		0x01DC
+
+/* DISPC GFX plane */
+#define DISPC_GFX_BA0			0x0080
+#define DISPC_GFX_BA1			0x0084
+#define DISPC_GFX_POSITION		0x0088
+#define DISPC_GFX_SIZE			0x008C
+#define DISPC_GFX_ATTRIBUTES		0x00A0
+#define DISPC_GFX_FIFO_THRESHOLD	0x00A4
+#define DISPC_GFX_FIFO_SIZE_STATUS	0x00A8
+#define DISPC_GFX_ROW_INC		0x00AC
+#define DISPC_GFX_PIXEL_INC		0x00B0
+#define DISPC_GFX_WINDOW_SKIP		0x00B4
+#define DISPC_GFX_TABLE_BA		0x00B8
+
+/* DISPC Video plane 1/2 */
+#define DISPC_VID1_BASE			0x00BC
+#define DISPC_VID2_BASE			0x014C
+
+/* Offsets into DISPC_VID1/2_BASE */
+#define DISPC_VID_BA0			0x0000
+#define DISPC_VID_BA1			0x0004
+#define DISPC_VID_POSITION		0x0008
+#define DISPC_VID_SIZE			0x000C
+#define DISPC_VID_ATTRIBUTES		0x0010
+#define DISPC_VID_FIFO_THRESHOLD	0x0014
+#define DISPC_VID_FIFO_SIZE_STATUS	0x0018
+#define DISPC_VID_ROW_INC		0x001C
+#define DISPC_VID_PIXEL_INC		0x0020
+#define DISPC_VID_FIR			0x0024
+#define DISPC_VID_PICTURE_SIZE		0x0028
+#define DISPC_VID_ACCU0			0x002C
+#define DISPC_VID_ACCU1			0x0030
+
+/* 8 elements in 8 byte increments */
+#define DISPC_VID_FIR_COEF_H0		0x0034
+/* 8 elements in 8 byte increments */
+#define DISPC_VID_FIR_COEF_HV0		0x0038
+/* 5 elements in 4 byte increments */
+#define DISPC_VID_CONV_COEF0		0x0074
+
+#define DISPC_IRQ_FRAMEMASK		0x0001
+#define DISPC_IRQ_VSYNC			0x0002
+#define DISPC_IRQ_EVSYNC_EVEN		0x0004
+#define DISPC_IRQ_EVSYNC_ODD		0x0008
+#define DISPC_IRQ_ACBIAS_COUNT_STAT	0x0010
+#define DISPC_IRQ_PROG_LINE_NUM		0x0020
+#define DISPC_IRQ_GFX_FIFO_UNDERFLOW	0x0040
+#define DISPC_IRQ_GFX_END_WIN		0x0080
+#define DISPC_IRQ_PAL_GAMMA_MASK	0x0100
+#define DISPC_IRQ_OCP_ERR		0x0200
+#define DISPC_IRQ_VID1_FIFO_UNDERFLOW	0x0400
+#define DISPC_IRQ_VID1_END_WIN		0x0800
+#define DISPC_IRQ_VID2_FIFO_UNDERFLOW	0x1000
+#define DISPC_IRQ_VID2_END_WIN		0x2000
+#define DISPC_IRQ_SYNC_LOST		0x4000
+
+#define DISPC_IRQ_MASK_ALL		0x7fff
+
+#define DISPC_IRQ_MASK_ERROR		(DISPC_IRQ_GFX_FIFO_UNDERFLOW |	\
+					     DISPC_IRQ_VID1_FIFO_UNDERFLOW | \
+					     DISPC_IRQ_VID2_FIFO_UNDERFLOW | \
+					     DISPC_IRQ_SYNC_LOST)
+
+#define RFBI_CONTROL			0x48050040
+
+#define MAX_PALETTE_SIZE		(256 * 16)
+
+#define FLD_MASK(pos, len)	(((1 << len) - 1) << pos)
+
+#define MOD_REG_FLD(reg, mask, val) \
+	dispc_write_reg((reg), (dispc_read_reg(reg) & ~(mask)) | (val));
+
+#define OMAP2_SRAM_START		0x40200000
+/* Maximum size, in reality this is smaller if SRAM is partially locked. */
+#define OMAP2_SRAM_SIZE			0xa0000		/* 640k */
+
+/* We support the SDRAM / SRAM types. See OMAPFB_PLANE_MEMTYPE_* in omapfb.h */
+#define DISPC_MEMTYPE_NUM		2
+
+#define RESMAP_SIZE(_page_cnt)						\
+	((_page_cnt + (sizeof(unsigned long) * 8) - 1) / 8)
+#define RESMAP_PTR(_res_map, _page_nr)					\
+	(((_res_map)->map) + (_page_nr) / (sizeof(unsigned long) * 8))
+#define RESMAP_MASK(_page_nr)						\
+	(1 << ((_page_nr) & (sizeof(unsigned long) * 8 - 1)))
+
+struct resmap {
+	unsigned long	start;
+	unsigned	page_cnt;
+	unsigned long	*map;
+};
+
+static struct {
+	u32		base;
+
+	struct omapfb_mem_desc	mem_desc;
+	struct resmap		*res_map[DISPC_MEMTYPE_NUM];
+	atomic_t		map_count[OMAPFB_PLANE_NUM];
+
+	dma_addr_t	palette_paddr;
+	void		*palette_vaddr;
+
+	int		ext_mode;
+
+	unsigned long	enabled_irqs;
+	void		(*irq_callback)(void *);
+	void		*irq_callback_data;
+	struct completion	frame_done;
+
+	int		fir_hinc[OMAPFB_PLANE_NUM];
+	int		fir_vinc[OMAPFB_PLANE_NUM];
+
+	struct clk	*dss_ick, *dss1_fck;
+	struct clk	*dss_54m_fck;
+
+	enum omapfb_update_mode	update_mode;
+	struct omapfb_device	*fbdev;
+
+	struct omapfb_color_key	color_key;
+} dispc;
+
+static void enable_lcd_clocks(int enable);
+
+static void inline dispc_write_reg(int idx, u32 val)
+{
+	__raw_writel(val, dispc.base + idx);
+}
+
+static u32 inline dispc_read_reg(int idx)
+{
+	u32 l = __raw_readl(dispc.base + idx);
+	return l;
+}
+
+/* Select RFBI or bypass mode */
+static void enable_rfbi_mode(int enable)
+{
+	u32 l;
+
+	l = dispc_read_reg(DISPC_CONTROL);
+	/* Enable RFBI, GPIO0/1 */
+	l &= ~((1 << 11) | (1 << 15) | (1 << 16));
+	l |= enable ? (1 << 11) : 0;
+	/* RFBI En: GPIO0/1=10  RFBI Dis: GPIO0/1=11 */
+	l |= 1 << 15;
+	l |= enable ? 0 : (1 << 16);
+	dispc_write_reg(DISPC_CONTROL, l);
+
+	/* Set bypass mode in RFBI module */
+	l = __raw_readl(io_p2v(RFBI_CONTROL));
+	l |= enable ? 0 : (1 << 1);
+	__raw_writel(l, io_p2v(RFBI_CONTROL));
+}
+
+static void set_lcd_data_lines(int data_lines)
+{
+	u32 l;
+	int code = 0;
+
+	switch (data_lines) {
+	case 12:
+		code = 0;
+		break;
+	case 16:
+		code = 1;
+		break;
+	case 18:
+		code = 2;
+		break;
+	case 24:
+		code = 3;
+		break;
+	default:
+		BUG();
+	}
+
+	l = dispc_read_reg(DISPC_CONTROL);
+	l &= ~(0x03 << 8);
+	l |= code << 8;
+	dispc_write_reg(DISPC_CONTROL, l);
+}
+
+static void set_load_mode(int mode)
+{
+	BUG_ON(mode & ~(DISPC_LOAD_CLUT_ONLY | DISPC_LOAD_FRAME_ONLY |
+			DISPC_LOAD_CLUT_ONCE_FRAME));
+	MOD_REG_FLD(DISPC_CONFIG, 0x03 << 1, mode << 1);
+}
+
+void omap_dispc_set_lcd_size(int x, int y)
+{
+	BUG_ON((x > (1 << 11)) || (y > (1 << 11)));
+	enable_lcd_clocks(1);
+	MOD_REG_FLD(DISPC_SIZE_LCD, FLD_MASK(16, 11) | FLD_MASK(0, 11),
+			((y - 1) << 16) | (x - 1));
+	enable_lcd_clocks(0);
+}
+EXPORT_SYMBOL(omap_dispc_set_lcd_size);
+
+void omap_dispc_set_digit_size(int x, int y)
+{
+	BUG_ON((x > (1 << 11)) || (y > (1 << 11)));
+	enable_lcd_clocks(1);
+	MOD_REG_FLD(DISPC_SIZE_DIG, FLD_MASK(16, 11) | FLD_MASK(0, 11),
+			((y - 1) << 16) | (x - 1));
+	enable_lcd_clocks(0);
+}
+EXPORT_SYMBOL(omap_dispc_set_digit_size);
+
+static void setup_plane_fifo(int plane, int ext_mode)
+{
+	const u32 ftrs_reg[] = { DISPC_GFX_FIFO_THRESHOLD,
+				DISPC_VID1_BASE + DISPC_VID_FIFO_THRESHOLD,
+			        DISPC_VID2_BASE + DISPC_VID_FIFO_THRESHOLD };
+	const u32 fsz_reg[] = { DISPC_GFX_FIFO_SIZE_STATUS,
+				DISPC_VID1_BASE + DISPC_VID_FIFO_SIZE_STATUS,
+				DISPC_VID2_BASE + DISPC_VID_FIFO_SIZE_STATUS };
+	int low, high;
+	u32 l;
+
+	BUG_ON(plane > 2);
+
+	l = dispc_read_reg(fsz_reg[plane]);
+	l &= FLD_MASK(0, 9);
+	if (ext_mode) {
+		low = l * 3 / 4;
+		high = l;
+	} else {
+		low = l / 4;
+		high = l * 3 / 4;
+	}
+	MOD_REG_FLD(ftrs_reg[plane], FLD_MASK(16, 9) | FLD_MASK(0, 9),
+			(high << 16) | low);
+}
+
+void omap_dispc_enable_lcd_out(int enable)
+{
+	enable_lcd_clocks(1);
+	MOD_REG_FLD(DISPC_CONTROL, 1, enable ? 1 : 0);
+	enable_lcd_clocks(0);
+}
+EXPORT_SYMBOL(omap_dispc_enable_lcd_out);
+
+void omap_dispc_enable_digit_out(int enable)
+{
+	enable_lcd_clocks(1);
+	MOD_REG_FLD(DISPC_CONTROL, 1 << 1, enable ? 1 << 1 : 0);
+	enable_lcd_clocks(0);
+}
+EXPORT_SYMBOL(omap_dispc_enable_digit_out);
+
+static inline int _setup_plane(int plane, int channel_out,
+				  u32 paddr, int screen_width,
+				  int pos_x, int pos_y, int width, int height,
+				  int color_mode)
+{
+	const u32 at_reg[] = { DISPC_GFX_ATTRIBUTES,
+				DISPC_VID1_BASE + DISPC_VID_ATTRIBUTES,
+			        DISPC_VID2_BASE + DISPC_VID_ATTRIBUTES };
+	const u32 ba_reg[] = { DISPC_GFX_BA0, DISPC_VID1_BASE + DISPC_VID_BA0,
+				DISPC_VID2_BASE + DISPC_VID_BA0 };
+	const u32 ps_reg[] = { DISPC_GFX_POSITION,
+				DISPC_VID1_BASE + DISPC_VID_POSITION,
+				DISPC_VID2_BASE + DISPC_VID_POSITION };
+	const u32 sz_reg[] = { DISPC_GFX_SIZE,
+				DISPC_VID1_BASE + DISPC_VID_PICTURE_SIZE,
+				DISPC_VID2_BASE + DISPC_VID_PICTURE_SIZE };
+	const u32 ri_reg[] = { DISPC_GFX_ROW_INC,
+				DISPC_VID1_BASE + DISPC_VID_ROW_INC,
+			        DISPC_VID2_BASE + DISPC_VID_ROW_INC };
+	const u32 vs_reg[] = { 0, DISPC_VID1_BASE + DISPC_VID_SIZE,
+				DISPC_VID2_BASE + DISPC_VID_SIZE };
+
+	int chout_shift, burst_shift;
+	int chout_val;
+	int color_code;
+	int bpp;
+	int cconv_en;
+	int set_vsize;
+	u32 l;
+
+#ifdef VERBOSE
+	dev_dbg(dispc.fbdev->dev, "plane %d channel %d paddr %#08x scr_width %d"
+		    " pos_x %d pos_y %d width %d height %d color_mode %d\n",
+		    plane, channel_out, paddr, screen_width, pos_x, pos_y,
+		    width, height, color_mode);
+#endif
+
+	set_vsize = 0;
+	switch (plane) {
+	case OMAPFB_PLANE_GFX:
+		burst_shift = 6;
+		chout_shift = 8;
+		break;
+	case OMAPFB_PLANE_VID1:
+	case OMAPFB_PLANE_VID2:
+		burst_shift = 14;
+		chout_shift = 16;
+		set_vsize = 1;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	switch (channel_out) {
+	case OMAPFB_CHANNEL_OUT_LCD:
+		chout_val = 0;
+		break;
+	case OMAPFB_CHANNEL_OUT_DIGIT:
+		chout_val = 1;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	cconv_en = 0;
+	switch (color_mode) {
+	case OMAPFB_COLOR_RGB565:
+		color_code = DISPC_RGB_16_BPP;
+		bpp = 16;
+		break;
+	case OMAPFB_COLOR_YUV422:
+		if (plane == 0)
+			return -EINVAL;
+		color_code = DISPC_UYVY_422;
+		cconv_en = 1;
+		bpp = 16;
+		break;
+	case OMAPFB_COLOR_YUY422:
+		if (plane == 0)
+			return -EINVAL;
+		color_code = DISPC_YUV2_422;
+		cconv_en = 1;
+		bpp = 16;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	l = dispc_read_reg(at_reg[plane]);
+
+	l &= ~(0x0f << 1);
+	l |= color_code << 1;
+	l &= ~(1 << 9);
+	l |= cconv_en << 9;
+
+	l &= ~(0x03 << burst_shift);
+	l |= DISPC_BURST_8x32 << burst_shift;
+
+	l &= ~(1 << chout_shift);
+	l |= chout_val << chout_shift;
+
+	dispc_write_reg(at_reg[plane], l);
+
+	dispc_write_reg(ba_reg[plane], paddr);
+	MOD_REG_FLD(ps_reg[plane],
+		    FLD_MASK(16, 11) | FLD_MASK(0, 11), (pos_y << 16) | pos_x);
+
+	MOD_REG_FLD(sz_reg[plane], FLD_MASK(16, 11) | FLD_MASK(0, 11),
+			((height - 1) << 16) | (width - 1));
+
+	if (set_vsize) {
+		/* Set video size if set_scale hasn't set it */
+		if (!dispc.fir_vinc[plane])
+			MOD_REG_FLD(vs_reg[plane],
+				FLD_MASK(16, 11), (height - 1) << 16);
+		if (!dispc.fir_hinc[plane])
+			MOD_REG_FLD(vs_reg[plane],
+				FLD_MASK(0, 11), width - 1);
+	}
+
+	dispc_write_reg(ri_reg[plane], (screen_width - width) * bpp / 8 + 1);
+
+	return height * screen_width * bpp / 8;
+}
+
+static int omap_dispc_setup_plane(int plane, int channel_out,
+				  unsigned long offset,
+				  int screen_width,
+				  int pos_x, int pos_y, int width, int height,
+				  int color_mode)
+{
+	u32 paddr;
+	int r;
+
+	if ((unsigned)plane > dispc.mem_desc.region_cnt)
+		return -EINVAL;
+	paddr = dispc.mem_desc.region[plane].paddr + offset;
+	enable_lcd_clocks(1);
+	r = _setup_plane(plane, channel_out, paddr,
+			screen_width,
+			pos_x, pos_y, width, height, color_mode);
+	enable_lcd_clocks(0);
+	return r;
+}
+
+static void write_firh_reg(int plane, int reg, u32 value)
+{
+	u32 base;
+
+	if (plane == 1)
+		base = DISPC_VID1_BASE + DISPC_VID_FIR_COEF_H0;
+	else
+		base = DISPC_VID2_BASE + DISPC_VID_FIR_COEF_H0;
+	dispc_write_reg(base + reg * 8,	value);
+}
+
+static void write_firhv_reg(int plane, int reg, u32 value)
+{
+	u32 base;
+
+	if (plane == 1)
+		base = DISPC_VID1_BASE + DISPC_VID_FIR_COEF_HV0;
+	else
+		base = DISPC_VID2_BASE + DISPC_VID_FIR_COEF_HV0;
+	dispc_write_reg(base + reg * 8,	value);
+}
+
+static void set_upsampling_coef_table(int plane)
+{
+	const u32 coef[][2] = {
+		{ 0x00800000, 0x00800000 },
+		{ 0x0D7CF800, 0x037B02FF },
+		{ 0x1E70F5FF, 0x0C6F05FE },
+		{ 0x335FF5FE, 0x205907FB },
+		{ 0xF74949F7, 0x00404000 },
+		{ 0xF55F33FB, 0x075920FE },
+		{ 0xF5701EFE, 0x056F0CFF },
+		{ 0xF87C0DFF, 0x027B0300 },
+	};
+	int i;
+
+	for (i = 0; i < 8; i++) {
+		write_firh_reg(plane, i, coef[i][0]);
+		write_firhv_reg(plane, i, coef[i][1]);
+	}
+}
+
+static int omap_dispc_set_scale(int plane,
+				int orig_width, int orig_height,
+				int out_width, int out_height)
+{
+	const u32 at_reg[]  = { 0, DISPC_VID1_BASE + DISPC_VID_ATTRIBUTES,
+				DISPC_VID2_BASE + DISPC_VID_ATTRIBUTES };
+	const u32 vs_reg[]  = { 0, DISPC_VID1_BASE + DISPC_VID_SIZE,
+				DISPC_VID2_BASE + DISPC_VID_SIZE };
+	const u32 fir_reg[] = { 0, DISPC_VID1_BASE + DISPC_VID_FIR,
+				DISPC_VID2_BASE + DISPC_VID_FIR };
+
+	u32 l;
+	int fir_hinc;
+	int fir_vinc;
+
+	if ((unsigned)plane > OMAPFB_PLANE_NUM)
+		return -ENODEV;
+
+	if (plane == OMAPFB_PLANE_GFX &&
+	    (out_width != orig_width || out_height != orig_height))
+		return -EINVAL;
+
+	enable_lcd_clocks(1);
+	if (orig_width < out_width) {
+		/*
+		 * Upsampling.
+		 * Currently you can only scale both dimensions in one way.
+		 */
+		if (orig_height > out_height ||
+		    orig_width * 8 < out_width ||
+		    orig_height * 8 < out_height) {
+			enable_lcd_clocks(0);
+			return -EINVAL;
+		}
+		set_upsampling_coef_table(plane);
+	} else if (orig_width > out_width) {
+		/* Downsampling not yet supported
+		*/
+
+		enable_lcd_clocks(0);
+		return -EINVAL;
+	}
+	if (!orig_width || orig_width == out_width)
+		fir_hinc = 0;
+	else
+		fir_hinc = 1024 * orig_width / out_width;
+	if (!orig_height || orig_height == out_height)
+		fir_vinc = 0;
+	else
+		fir_vinc = 1024 * orig_height / out_height;
+	dispc.fir_hinc[plane] = fir_hinc;
+	dispc.fir_vinc[plane] = fir_vinc;
+
+	MOD_REG_FLD(fir_reg[plane],
+		    FLD_MASK(16, 12) | FLD_MASK(0, 12),
+		    ((fir_vinc & 4095) << 16) |
+		    (fir_hinc & 4095));
+
+	dev_dbg(dispc.fbdev->dev, "out_width %d out_height %d orig_width %d "
+		"orig_height %d fir_hinc  %d fir_vinc %d\n",
+		out_width, out_height, orig_width, orig_height,
+		fir_hinc, fir_vinc);
+
+	MOD_REG_FLD(vs_reg[plane],
+		    FLD_MASK(16, 11) | FLD_MASK(0, 11),
+		    ((out_height - 1) << 16) | (out_width - 1));
+
+	l = dispc_read_reg(at_reg[plane]);
+	l &= ~(0x03 << 5);
+	l |= fir_hinc ? (1 << 5) : 0;
+	l |= fir_vinc ? (1 << 6) : 0;
+	dispc_write_reg(at_reg[plane], l);
+
+	enable_lcd_clocks(0);
+	return 0;
+}
+
+static int omap_dispc_enable_plane(int plane, int enable)
+{
+	const u32 at_reg[] = { DISPC_GFX_ATTRIBUTES,
+				DISPC_VID1_BASE + DISPC_VID_ATTRIBUTES,
+				DISPC_VID2_BASE + DISPC_VID_ATTRIBUTES };
+	if ((unsigned int)plane > dispc.mem_desc.region_cnt)
+		return -EINVAL;
+
+	enable_lcd_clocks(1);
+	MOD_REG_FLD(at_reg[plane], 1, enable ? 1 : 0);
+	enable_lcd_clocks(0);
+
+	return 0;
+}
+
+static int omap_dispc_set_color_key(struct omapfb_color_key *ck)
+{
+	u32 df_reg, tr_reg;
+	int shift, val;
+
+	switch (ck->channel_out) {
+	case OMAPFB_CHANNEL_OUT_LCD:
+		df_reg = DISPC_DEFAULT_COLOR0;
+		tr_reg = DISPC_TRANS_COLOR0;
+		shift = 10;
+		break;
+	case OMAPFB_CHANNEL_OUT_DIGIT:
+		df_reg = DISPC_DEFAULT_COLOR1;
+		tr_reg = DISPC_TRANS_COLOR1;
+		shift = 12;
+		break;
+	default:
+		return -EINVAL;
+	}
+	switch (ck->key_type) {
+	case OMAPFB_COLOR_KEY_DISABLED:
+		val = 0;
+		break;
+	case OMAPFB_COLOR_KEY_GFX_DST:
+		val = 1;
+		break;
+	case OMAPFB_COLOR_KEY_VID_SRC:
+		val = 3;
+		break;
+	default:
+		return -EINVAL;
+	}
+	enable_lcd_clocks(1);
+	MOD_REG_FLD(DISPC_CONFIG, FLD_MASK(shift, 2), val << shift);
+
+	if (val != 0)
+		dispc_write_reg(tr_reg, ck->trans_key);
+	dispc_write_reg(df_reg, ck->background);
+	enable_lcd_clocks(0);
+
+	dispc.color_key = *ck;
+
+	return 0;
+}
+
+static int omap_dispc_get_color_key(struct omapfb_color_key *ck)
+{
+	*ck = dispc.color_key;
+	return 0;
+}
+
+static void load_palette(void)
+{
+}
+
+static int omap_dispc_set_update_mode(enum omapfb_update_mode mode)
+{
+	int r = 0;
+
+	if (mode != dispc.update_mode) {
+		switch (mode) {
+		case OMAPFB_AUTO_UPDATE:
+		case OMAPFB_MANUAL_UPDATE:
+			enable_lcd_clocks(1);
+			omap_dispc_enable_lcd_out(1);
+			dispc.update_mode = mode;
+			break;
+		case OMAPFB_UPDATE_DISABLED:
+			init_completion(&dispc.frame_done);
+			omap_dispc_enable_lcd_out(0);
+			if (!wait_for_completion_timeout(&dispc.frame_done,
+					msecs_to_jiffies(500))) {
+				dev_err(dispc.fbdev->dev,
+					 "timeout waiting for FRAME DONE\n");
+			}
+			dispc.update_mode = mode;
+			enable_lcd_clocks(0);
+			break;
+		default:
+			r = -EINVAL;
+		}
+	}
+
+	return r;
+}
+
+static void omap_dispc_get_caps(int plane, struct omapfb_caps *caps)
+{
+	caps->ctrl |= OMAPFB_CAPS_PLANE_RELOCATE_MEM;
+	if (plane > 0)
+		caps->ctrl |= OMAPFB_CAPS_PLANE_SCALE;
+	caps->plane_color |= (1 << OMAPFB_COLOR_RGB565) |
+			     (1 << OMAPFB_COLOR_YUV422) |
+			     (1 << OMAPFB_COLOR_YUY422);
+	if (plane == 0)
+		caps->plane_color |= (1 << OMAPFB_COLOR_CLUT_8BPP) |
+				     (1 << OMAPFB_COLOR_CLUT_4BPP) |
+				     (1 << OMAPFB_COLOR_CLUT_2BPP) |
+				     (1 << OMAPFB_COLOR_CLUT_1BPP) |
+				     (1 << OMAPFB_COLOR_RGB444);
+}
+
+static enum omapfb_update_mode omap_dispc_get_update_mode(void)
+{
+	return dispc.update_mode;
+}
+
+static void setup_color_conv_coef(void)
+{
+	u32 mask = FLD_MASK(16, 11) | FLD_MASK(0, 11);
+	int cf1_reg = DISPC_VID1_BASE + DISPC_VID_CONV_COEF0;
+	int cf2_reg = DISPC_VID2_BASE + DISPC_VID_CONV_COEF0;
+	int at1_reg = DISPC_VID1_BASE + DISPC_VID_ATTRIBUTES;
+	int at2_reg = DISPC_VID2_BASE + DISPC_VID_ATTRIBUTES;
+	const struct color_conv_coef {
+		int  ry,  rcr,  rcb,   gy,  gcr,  gcb,   by,  bcr,  bcb;
+		int  full_range;
+	}  ctbl_bt601_5 = {
+		    298,  409,    0,  298, -208, -100,  298,    0,  517, 0,
+	};
+	const struct color_conv_coef *ct;
+#define CVAL(x, y)	(((x & 2047) << 16) | (y & 2047))
+
+	ct = &ctbl_bt601_5;
+
+	MOD_REG_FLD(cf1_reg,		mask,	CVAL(ct->rcr, ct->ry));
+	MOD_REG_FLD(cf1_reg + 4,	mask,	CVAL(ct->gy,  ct->rcb));
+	MOD_REG_FLD(cf1_reg + 8,	mask,	CVAL(ct->gcb, ct->gcr));
+	MOD_REG_FLD(cf1_reg + 12,	mask,	CVAL(ct->bcr, ct->by));
+	MOD_REG_FLD(cf1_reg + 16,	mask,	CVAL(0,	      ct->bcb));
+
+	MOD_REG_FLD(cf2_reg,		mask,	CVAL(ct->rcr, ct->ry));
+	MOD_REG_FLD(cf2_reg + 4,	mask,	CVAL(ct->gy,  ct->rcb));
+	MOD_REG_FLD(cf2_reg + 8,	mask,	CVAL(ct->gcb, ct->gcr));
+	MOD_REG_FLD(cf2_reg + 12,	mask,	CVAL(ct->bcr, ct->by));
+	MOD_REG_FLD(cf2_reg + 16,	mask,	CVAL(0,	      ct->bcb));
+#undef CVAL
+
+	MOD_REG_FLD(at1_reg, (1 << 11), ct->full_range);
+	MOD_REG_FLD(at2_reg, (1 << 11), ct->full_range);
+}
+
+static void calc_ck_div(int is_tft, int pck, int *lck_div, int *pck_div)
+{
+	unsigned long fck, lck;
+
+	*lck_div = 1;
+	pck = max(1, pck);
+	fck = clk_get_rate(dispc.dss1_fck);
+	lck = fck;
+	*pck_div = (lck + pck - 1) / pck;
+	if (is_tft)
+		*pck_div = max(2, *pck_div);
+	else
+		*pck_div = max(3, *pck_div);
+	if (*pck_div > 255) {
+		*pck_div = 255;
+		lck = pck * *pck_div;
+		*lck_div = fck / lck;
+		BUG_ON(*lck_div < 1);
+		if (*lck_div > 255) {
+			*lck_div = 255;
+			dev_warn(dispc.fbdev->dev, "pixclock %d kHz too low.\n",
+				 pck / 1000);
+		}
+	}
+}
+
+static void set_lcd_tft_mode(int enable)
+{
+	u32 mask;
+
+	mask = 1 << 3;
+	MOD_REG_FLD(DISPC_CONTROL, mask, enable ? mask : 0);
+}
+
+static void set_lcd_timings(void)
+{
+	u32 l;
+	int lck_div, pck_div;
+	struct lcd_panel *panel = dispc.fbdev->panel;
+	int is_tft = panel->config & OMAP_LCDC_PANEL_TFT;
+	unsigned long fck;
+
+	l = dispc_read_reg(DISPC_TIMING_H);
+	l &= ~(FLD_MASK(0, 6) | FLD_MASK(8, 8) | FLD_MASK(20, 8));
+	l |= ( max(1, (min(64,  panel->hsw))) - 1 ) << 0;
+	l |= ( max(1, (min(256, panel->hfp))) - 1 ) << 8;
+	l |= ( max(1, (min(256, panel->hbp))) - 1 ) << 20;
+	dispc_write_reg(DISPC_TIMING_H, l);
+
+	l = dispc_read_reg(DISPC_TIMING_V);
+	l &= ~(FLD_MASK(0, 6) | FLD_MASK(8, 8) | FLD_MASK(20, 8));
+	l |= ( max(1, (min(64,  panel->vsw))) - 1 ) << 0;
+	l |= ( max(0, (min(255, panel->vfp))) - 0 ) << 8;
+	l |= ( max(0, (min(255, panel->vbp))) - 0 ) << 20;
+	dispc_write_reg(DISPC_TIMING_V, l);
+
+	l = dispc_read_reg(DISPC_POL_FREQ);
+	l &= ~FLD_MASK(12, 6);
+	l |= (panel->config & OMAP_LCDC_SIGNAL_MASK) << 12;
+	l |= panel->acb & 0xff;
+	dispc_write_reg(DISPC_POL_FREQ, l);
+
+	calc_ck_div(is_tft, panel->pixel_clock * 1000, &lck_div, &pck_div);
+
+	l = dispc_read_reg(DISPC_DIVISOR);
+	l &= ~(FLD_MASK(16, 8) | FLD_MASK(0, 8));
+	l |= (lck_div << 16) | (pck_div << 0);
+	dispc_write_reg(DISPC_DIVISOR, l);
+
+	/* update panel info with the exact clock */
+	fck = clk_get_rate(dispc.dss1_fck);
+	panel->pixel_clock = fck / lck_div / pck_div / 1000;
+}
+
+int omap_dispc_request_irq(void (*callback)(void *data), void *data)
+{
+	int r = 0;
+
+	BUG_ON(callback == NULL);
+
+	if (dispc.irq_callback)
+		r = -EBUSY;
+	else {
+		dispc.irq_callback = callback;
+		dispc.irq_callback_data = data;
+	}
+
+	return r;
+}
+EXPORT_SYMBOL(omap_dispc_request_irq);
+
+void omap_dispc_enable_irqs(int irq_mask)
+{
+	enable_lcd_clocks(1);
+	dispc.enabled_irqs = irq_mask;
+	irq_mask |= DISPC_IRQ_MASK_ERROR;
+	MOD_REG_FLD(DISPC_IRQENABLE, 0x7fff, irq_mask);
+	enable_lcd_clocks(0);
+}
+EXPORT_SYMBOL(omap_dispc_enable_irqs);
+
+void omap_dispc_disable_irqs(int irq_mask)
+{
+	enable_lcd_clocks(1);
+	dispc.enabled_irqs &= ~irq_mask;
+	irq_mask &= ~DISPC_IRQ_MASK_ERROR;
+	MOD_REG_FLD(DISPC_IRQENABLE, 0x7fff, irq_mask);
+	enable_lcd_clocks(0);
+}
+EXPORT_SYMBOL(omap_dispc_disable_irqs);
+
+void omap_dispc_free_irq(void)
+{
+	enable_lcd_clocks(1);
+	omap_dispc_disable_irqs(DISPC_IRQ_MASK_ALL);
+	dispc.irq_callback = NULL;
+	dispc.irq_callback_data = NULL;
+	enable_lcd_clocks(0);
+}
+EXPORT_SYMBOL(omap_dispc_free_irq);
+
+static irqreturn_t omap_dispc_irq_handler(int irq, void *dev)
+{
+	u32 stat = dispc_read_reg(DISPC_IRQSTATUS);
+
+	if (stat & DISPC_IRQ_FRAMEMASK)
+		complete(&dispc.frame_done);
+
+	if (stat & DISPC_IRQ_MASK_ERROR) {
+		if (printk_ratelimit()) {
+			dev_err(dispc.fbdev->dev, "irq error status %04x\n",
+				stat & 0x7fff);
+		}
+	}
+
+	if ((stat & dispc.enabled_irqs) && dispc.irq_callback)
+		dispc.irq_callback(dispc.irq_callback_data);
+
+	dispc_write_reg(DISPC_IRQSTATUS, stat);
+
+	return IRQ_HANDLED;
+}
+
+static int get_dss_clocks(void)
+{
+	if (IS_ERR((dispc.dss_ick = clk_get(dispc.fbdev->dev, "dss_ick")))) {
+		dev_err(dispc.fbdev->dev, "can't get dss_ick");
+		return PTR_ERR(dispc.dss_ick);
+	}
+
+	if (IS_ERR((dispc.dss1_fck = clk_get(dispc.fbdev->dev, "dss1_fck")))) {
+		dev_err(dispc.fbdev->dev, "can't get dss1_fck");
+		clk_put(dispc.dss_ick);
+		return PTR_ERR(dispc.dss1_fck);
+	}
+
+	if (IS_ERR((dispc.dss_54m_fck =
+				clk_get(dispc.fbdev->dev, "dss_54m_fck")))) {
+		dev_err(dispc.fbdev->dev, "can't get dss_54m_fck");
+		clk_put(dispc.dss_ick);
+		clk_put(dispc.dss1_fck);
+		return PTR_ERR(dispc.dss_54m_fck);
+	}
+
+	return 0;
+}
+
+static void put_dss_clocks(void)
+{
+	clk_put(dispc.dss_54m_fck);
+	clk_put(dispc.dss1_fck);
+	clk_put(dispc.dss_ick);
+}
+
+static void enable_lcd_clocks(int enable)
+{
+	if (enable)
+		clk_enable(dispc.dss1_fck);
+	else
+		clk_disable(dispc.dss1_fck);
+}
+
+static void enable_interface_clocks(int enable)
+{
+	if (enable)
+		clk_enable(dispc.dss_ick);
+	else
+		clk_disable(dispc.dss_ick);
+}
+
+static void enable_digit_clocks(int enable)
+{
+	if (enable)
+		clk_enable(dispc.dss_54m_fck);
+	else
+		clk_disable(dispc.dss_54m_fck);
+}
+
+static void omap_dispc_suspend(void)
+{
+	if (dispc.update_mode == OMAPFB_AUTO_UPDATE) {
+		init_completion(&dispc.frame_done);
+		omap_dispc_enable_lcd_out(0);
+		if (!wait_for_completion_timeout(&dispc.frame_done,
+				msecs_to_jiffies(500))) {
+			dev_err(dispc.fbdev->dev,
+				"timeout waiting for FRAME DONE\n");
+		}
+		enable_lcd_clocks(0);
+	}
+}
+
+static void omap_dispc_resume(void)
+{
+	if (dispc.update_mode == OMAPFB_AUTO_UPDATE) {
+		enable_lcd_clocks(1);
+		if (!dispc.ext_mode) {
+			set_lcd_timings();
+			load_palette();
+		}
+		omap_dispc_enable_lcd_out(1);
+	}
+}
+
+
+static int omap_dispc_update_window(struct fb_info *fbi,
+				 struct omapfb_update_window *win,
+				 void (*complete_callback)(void *arg),
+				 void *complete_callback_data)
+{
+	return dispc.update_mode == OMAPFB_UPDATE_DISABLED ? -ENODEV : 0;
+}
+
+static int mmap_kern(struct omapfb_mem_region *region)
+{
+	struct vm_struct	*kvma;
+	struct vm_area_struct	vma;
+	pgprot_t		pgprot;
+	unsigned long		vaddr;
+
+	kvma = get_vm_area(region->size, VM_IOREMAP);
+	if (kvma == NULL) {
+		dev_err(dispc.fbdev->dev, "can't get kernel vm area\n");
+		return -ENOMEM;
+	}
+	vma.vm_mm = &init_mm;
+
+	vaddr = (unsigned long)kvma->addr;
+
+	pgprot = pgprot_writecombine(pgprot_kernel);
+	vma.vm_start = vaddr;
+	vma.vm_end = vaddr + region->size;
+	if (io_remap_pfn_range(&vma, vaddr, region->paddr >> PAGE_SHIFT,
+			   region->size, pgprot) < 0) {
+		dev_err(dispc.fbdev->dev, "kernel mmap for FBMEM failed\n");
+		return -EAGAIN;
+	}
+	region->vaddr = (void *)vaddr;
+
+	return 0;
+}
+
+static void mmap_user_open(struct vm_area_struct *vma)
+{
+	int plane = (int)vma->vm_private_data;
+
+	atomic_inc(&dispc.map_count[plane]);
+}
+
+static void mmap_user_close(struct vm_area_struct *vma)
+{
+	int plane = (int)vma->vm_private_data;
+
+	atomic_dec(&dispc.map_count[plane]);
+}
+
+static struct vm_operations_struct mmap_user_ops = {
+	.open = mmap_user_open,
+	.close = mmap_user_close,
+};
+
+static int omap_dispc_mmap_user(struct fb_info *info,
+				struct vm_area_struct *vma)
+{
+	struct omapfb_plane_struct *plane = info->par;
+	unsigned long off;
+	unsigned long start;
+	u32 len;
+
+	if (vma->vm_end - vma->vm_start == 0)
+		return 0;
+	if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT))
+		return -EINVAL;
+	off = vma->vm_pgoff << PAGE_SHIFT;
+
+	start = info->fix.smem_start;
+	len = info->fix.smem_len;
+	if (off >= len)
+		return -EINVAL;
+	if ((vma->vm_end - vma->vm_start + off) > len)
+		return -EINVAL;
+	off += start;
+	vma->vm_pgoff = off >> PAGE_SHIFT;
+	vma->vm_flags |= VM_IO | VM_RESERVED;
+	vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+	vma->vm_ops = &mmap_user_ops;
+	vma->vm_private_data = (void *)plane->idx;
+	if (io_remap_pfn_range(vma, vma->vm_start, off >> PAGE_SHIFT,
+			     vma->vm_end - vma->vm_start, vma->vm_page_prot))
+		return -EAGAIN;
+	/* vm_ops.open won't be called for mmap itself. */
+	atomic_inc(&dispc.map_count[plane->idx]);
+	return 0;
+}
+
+static void unmap_kern(struct omapfb_mem_region *region)
+{
+	vunmap(region->vaddr);
+}
+
+static int alloc_palette_ram(void)
+{
+	dispc.palette_vaddr = dma_alloc_writecombine(dispc.fbdev->dev,
+		MAX_PALETTE_SIZE, &dispc.palette_paddr, GFP_KERNEL);
+	if (dispc.palette_vaddr == NULL) {
+		dev_err(dispc.fbdev->dev, "failed to alloc palette memory\n");
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+
+static void free_palette_ram(void)
+{
+	dma_free_writecombine(dispc.fbdev->dev, MAX_PALETTE_SIZE,
+			dispc.palette_vaddr, dispc.palette_paddr);
+}
+
+static int alloc_fbmem(struct omapfb_mem_region *region)
+{
+	region->vaddr = dma_alloc_writecombine(dispc.fbdev->dev,
+			region->size, &region->paddr, GFP_KERNEL);
+
+	if (region->vaddr == NULL) {
+		dev_err(dispc.fbdev->dev, "unable to allocate FB DMA memory\n");
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+
+static void free_fbmem(struct omapfb_mem_region *region)
+{
+	dma_free_writecombine(dispc.fbdev->dev, region->size,
+			      region->vaddr, region->paddr);
+}
+
+static struct resmap *init_resmap(unsigned long start, size_t size)
+{
+	unsigned page_cnt;
+	struct resmap *res_map;
+
+	page_cnt = PAGE_ALIGN(size) / PAGE_SIZE;
+	res_map =
+	    kzalloc(sizeof(struct resmap) + RESMAP_SIZE(page_cnt), GFP_KERNEL);
+	if (res_map == NULL)
+		return NULL;
+	res_map->start = start;
+	res_map->page_cnt = page_cnt;
+	res_map->map = (unsigned long *)(res_map + 1);
+	return res_map;
+}
+
+static void cleanup_resmap(struct resmap *res_map)
+{
+	kfree(res_map);
+}
+
+static inline int resmap_mem_type(unsigned long start)
+{
+	if (start >= OMAP2_SRAM_START &&
+	    start < OMAP2_SRAM_START + OMAP2_SRAM_SIZE)
+		return OMAPFB_MEMTYPE_SRAM;
+	else
+		return OMAPFB_MEMTYPE_SDRAM;
+}
+
+static inline int resmap_page_reserved(struct resmap *res_map, unsigned page_nr)
+{
+	return *RESMAP_PTR(res_map, page_nr) & RESMAP_MASK(page_nr) ? 1 : 0;
+}
+
+static inline void resmap_reserve_page(struct resmap *res_map, unsigned page_nr)
+{
+	BUG_ON(resmap_page_reserved(res_map, page_nr));
+	*RESMAP_PTR(res_map, page_nr) |= RESMAP_MASK(page_nr);
+}
+
+static inline void resmap_free_page(struct resmap *res_map, unsigned page_nr)
+{
+	BUG_ON(!resmap_page_reserved(res_map, page_nr));
+	*RESMAP_PTR(res_map, page_nr) &= ~RESMAP_MASK(page_nr);
+}
+
+static void resmap_reserve_region(unsigned long start, size_t size)
+{
+
+	struct resmap	*res_map;
+	unsigned	start_page;
+	unsigned	end_page;
+	int		mtype;
+	unsigned	i;
+
+	mtype = resmap_mem_type(start);
+	res_map = dispc.res_map[mtype];
+	dev_dbg(dispc.fbdev->dev, "reserve mem type %d start %08lx size %d\n",
+		mtype, start, size);
+	start_page = (start - res_map->start) / PAGE_SIZE;
+	end_page = start_page + PAGE_ALIGN(size) / PAGE_SIZE;
+	for (i = start_page; i < end_page; i++)
+		resmap_reserve_page(res_map, i);
+}
+
+static void resmap_free_region(unsigned long start, size_t size)
+{
+	struct resmap	*res_map;
+	unsigned	start_page;
+	unsigned	end_page;
+	unsigned	i;
+	int		mtype;
+
+	mtype = resmap_mem_type(start);
+	res_map = dispc.res_map[mtype];
+	dev_dbg(dispc.fbdev->dev, "free mem type %d start %08lx size %d\n",
+		mtype, start, size);
+	start_page = (start - res_map->start) / PAGE_SIZE;
+	end_page = start_page + PAGE_ALIGN(size) / PAGE_SIZE;
+	for (i = start_page; i < end_page; i++)
+		resmap_free_page(res_map, i);
+}
+
+static unsigned long resmap_alloc_region(int mtype, size_t size)
+{
+	unsigned i;
+	unsigned total;
+	unsigned start_page;
+	unsigned long start;
+	struct resmap *res_map = dispc.res_map[mtype];
+
+	BUG_ON(mtype >= DISPC_MEMTYPE_NUM || res_map == NULL || !size);
+
+	size = PAGE_ALIGN(size) / PAGE_SIZE;
+	start_page = 0;
+	total = 0;
+	for (i = 0; i < res_map->page_cnt; i++) {
+		if (resmap_page_reserved(res_map, i)) {
+			start_page = i + 1;
+			total = 0;
+		} else if (++total == size)
+			break;
+	}
+	if (total < size)
+		return 0;
+
+	start = res_map->start + start_page * PAGE_SIZE;
+	resmap_reserve_region(start, size * PAGE_SIZE);
+
+	return start;
+}
+
+/* Note that this will only work for user mappings, we don't deal with
+ * kernel mappings here, so fbcon will keep using the old region.
+ */
+static int omap_dispc_setup_mem(int plane, size_t size, int mem_type,
+				unsigned long *paddr)
+{
+	struct omapfb_mem_region *rg;
+	unsigned long new_addr = 0;
+
+	if ((unsigned)plane > dispc.mem_desc.region_cnt)
+		return -EINVAL;
+	if (mem_type >= DISPC_MEMTYPE_NUM)
+		return -EINVAL;
+	if (dispc.res_map[mem_type] == NULL)
+		return -ENOMEM;
+	rg = &dispc.mem_desc.region[plane];
+	if (size == rg->size && mem_type == rg->type)
+		return 0;
+	if (atomic_read(&dispc.map_count[plane]))
+		return -EBUSY;
+	if (rg->size != 0)
+		resmap_free_region(rg->paddr, rg->size);
+	if (size != 0) {
+		new_addr = resmap_alloc_region(mem_type, size);
+		if (!new_addr) {
+			/* Reallocate old region. */
+			resmap_reserve_region(rg->paddr, rg->size);
+			return -ENOMEM;
+		}
+	}
+	rg->paddr = new_addr;
+	rg->size = size;
+	rg->type = mem_type;
+
+	*paddr = new_addr;
+
+	return 0;
+}
+
+static int setup_fbmem(struct omapfb_mem_desc *req_md)
+{
+	struct omapfb_mem_region	*rg;
+	int i;
+	int r;
+	unsigned long			mem_start[DISPC_MEMTYPE_NUM];
+	unsigned long			mem_end[DISPC_MEMTYPE_NUM];
+
+	if (!req_md->region_cnt) {
+		dev_err(dispc.fbdev->dev, "no memory regions defined\n");
+		return -ENOENT;
+	}
+
+	rg = &req_md->region[0];
+	memset(mem_start, 0xff, sizeof(mem_start));
+	memset(mem_end, 0, sizeof(mem_end));
+
+	for (i = 0; i < req_md->region_cnt; i++, rg++) {
+		int mtype;
+		if (rg->paddr) {
+			rg->alloc = 0;
+			if (rg->vaddr == NULL) {
+				rg->map = 1;
+				if ((r = mmap_kern(rg)) < 0)
+					return r;
+			}
+		} else {
+			if (rg->type != OMAPFB_MEMTYPE_SDRAM) {
+				dev_err(dispc.fbdev->dev,
+					"unsupported memory type\n");
+				return -EINVAL;
+			}
+			rg->alloc = rg->map = 1;
+			if ((r = alloc_fbmem(rg)) < 0)
+				return r;
+		}
+		mtype = rg->type;
+
+		if (rg->paddr < mem_start[mtype])
+			mem_start[mtype] = rg->paddr;
+		if (rg->paddr + rg->size > mem_end[mtype])
+			mem_end[mtype] = rg->paddr + rg->size;
+	}
+
+	for (i = 0; i < DISPC_MEMTYPE_NUM; i++) {
+		unsigned long start;
+		size_t size;
+		if (mem_end[i] == 0)
+			continue;
+		start = mem_start[i];
+		size = mem_end[i] - start;
+		dispc.res_map[i] = init_resmap(start, size);
+		r = -ENOMEM;
+		if (dispc.res_map[i] == NULL)
+			goto fail;
+		/* Initial state is that everything is reserved. This
+		 * includes possible holes as well, which will never be
+		 * freed.
+		 */
+		resmap_reserve_region(start, size);
+	}
+
+	dispc.mem_desc = *req_md;
+
+	return 0;
+fail:
+	for (i = 0; i < DISPC_MEMTYPE_NUM; i++) {
+		if (dispc.res_map[i] != NULL)
+			cleanup_resmap(dispc.res_map[i]);
+	}
+	return r;
+}
+
+static void cleanup_fbmem(void)
+{
+	struct omapfb_mem_region *rg;
+	int i;
+
+	for (i = 0; i < DISPC_MEMTYPE_NUM; i++) {
+		if (dispc.res_map[i] != NULL)
+			cleanup_resmap(dispc.res_map[i]);
+	}
+	rg = &dispc.mem_desc.region[0];
+	for (i = 0; i < dispc.mem_desc.region_cnt; i++, rg++) {
+		if (rg->alloc)
+			free_fbmem(rg);
+		else {
+			if (rg->map)
+				unmap_kern(rg);
+		}
+	}
+}
+
+static int omap_dispc_init(struct omapfb_device *fbdev, int ext_mode,
+			   struct omapfb_mem_desc *req_vram)
+{
+	int r;
+	u32 l;
+	struct lcd_panel *panel = fbdev->panel;
+	int tmo = 10000;
+	int skip_init = 0;
+	int i;
+
+	memset(&dispc, 0, sizeof(dispc));
+
+	dispc.base = io_p2v(DISPC_BASE);
+	dispc.fbdev = fbdev;
+	dispc.ext_mode = ext_mode;
+
+	init_completion(&dispc.frame_done);
+
+	if ((r = get_dss_clocks()) < 0)
+		return r;
+
+	enable_interface_clocks(1);
+	enable_lcd_clocks(1);
+
+#ifdef CONFIG_FB_OMAP_BOOTLOADER_INIT
+	l = dispc_read_reg(DISPC_CONTROL);
+	/* LCD enabled ? */
+	if (l & 1) {
+		pr_info("omapfb: skipping hardware initialization\n");
+		skip_init = 1;
+	}
+#endif
+
+	if (!skip_init) {
+		/* Reset monitoring works only w/ the 54M clk */
+		enable_digit_clocks(1);
+
+		/* Soft reset */
+		MOD_REG_FLD(DISPC_SYSCONFIG, 1 << 1, 1 << 1);
+
+		while (!(dispc_read_reg(DISPC_SYSSTATUS) & 1)) {
+			if (!--tmo) {
+				dev_err(dispc.fbdev->dev, "soft reset failed\n");
+				r = -ENODEV;
+				enable_digit_clocks(0);
+				goto fail1;
+			}
+		}
+
+		enable_digit_clocks(0);
+	}
+
+	/* Enable smart idle and autoidle */
+	l = dispc_read_reg(DISPC_CONTROL);
+	l &= ~((3 << 12) | (3 << 3));
+	l |= (2 << 12) | (2 << 3) | (1 << 0);
+	dispc_write_reg(DISPC_SYSCONFIG, l);
+	omap_writel(1 << 0, DSS_BASE + DSS_SYSCONFIG);
+
+	/* Set functional clock autogating */
+	l = dispc_read_reg(DISPC_CONFIG);
+	l |= 1 << 9;
+	dispc_write_reg(DISPC_CONFIG, l);
+
+	l = dispc_read_reg(DISPC_IRQSTATUS);
+	dispc_write_reg(l, DISPC_IRQSTATUS);
+
+	/* Enable those that we handle always */
+	omap_dispc_enable_irqs(DISPC_IRQ_FRAMEMASK);
+
+	if ((r = request_irq(INT_24XX_DSS_IRQ, omap_dispc_irq_handler,
+			   0, MODULE_NAME, fbdev)) < 0) {
+		dev_err(dispc.fbdev->dev, "can't get DSS IRQ\n");
+		goto fail1;
+	}
+
+	/* L3 firewall setting: enable access to OCM RAM */
+	__raw_writel(0x402000b0, io_p2v(0x680050a0));
+
+	if ((r = alloc_palette_ram()) < 0)
+		goto fail2;
+
+	if ((r = setup_fbmem(req_vram)) < 0)
+		goto fail3;
+
+	if (!skip_init) {
+		for (i = 0; i < dispc.mem_desc.region_cnt; i++) {
+			memset(dispc.mem_desc.region[i].vaddr, 0,
+				dispc.mem_desc.region[i].size);
+		}
+
+		/* Set logic clock to fck, pixel clock to fck/2 for now */
+		MOD_REG_FLD(DISPC_DIVISOR, FLD_MASK(16, 8), 1 << 16);
+		MOD_REG_FLD(DISPC_DIVISOR, FLD_MASK(0, 8), 2 << 0);
+
+		setup_plane_fifo(0, ext_mode);
+		setup_plane_fifo(1, ext_mode);
+		setup_plane_fifo(2, ext_mode);
+
+		setup_color_conv_coef();
+
+		set_lcd_tft_mode(panel->config & OMAP_LCDC_PANEL_TFT);
+		set_load_mode(DISPC_LOAD_FRAME_ONLY);
+
+		if (!ext_mode) {
+			set_lcd_data_lines(panel->data_lines);
+			omap_dispc_set_lcd_size(panel->x_res, panel->y_res);
+			set_lcd_timings();
+		} else
+			set_lcd_data_lines(panel->bpp);
+		enable_rfbi_mode(ext_mode);
+	}
+
+	l = dispc_read_reg(DISPC_REVISION);
+	pr_info("omapfb: DISPC version %d.%d initialized\n",
+		 l >> 4 & 0x0f, l & 0x0f);
+	enable_lcd_clocks(0);
+
+	return 0;
+fail3:
+	free_palette_ram();
+fail2:
+	free_irq(INT_24XX_DSS_IRQ, fbdev);
+fail1:
+	enable_lcd_clocks(0);
+	enable_interface_clocks(0);
+	put_dss_clocks();
+
+	return r;
+}
+
+static void omap_dispc_cleanup(void)
+{
+	int i;
+
+	omap_dispc_set_update_mode(OMAPFB_UPDATE_DISABLED);
+	/* This will also disable clocks that are on */
+	for (i = 0; i < dispc.mem_desc.region_cnt; i++)
+		omap_dispc_enable_plane(i, 0);
+	cleanup_fbmem();
+	free_palette_ram();
+	free_irq(INT_24XX_DSS_IRQ, dispc.fbdev);
+	enable_interface_clocks(0);
+	put_dss_clocks();
+}
+
+const struct lcd_ctrl omap2_int_ctrl = {
+	.name			= "internal",
+	.init			= omap_dispc_init,
+	.cleanup		= omap_dispc_cleanup,
+	.get_caps		= omap_dispc_get_caps,
+	.set_update_mode	= omap_dispc_set_update_mode,
+	.get_update_mode	= omap_dispc_get_update_mode,
+	.update_window		= omap_dispc_update_window,
+	.suspend		= omap_dispc_suspend,
+	.resume			= omap_dispc_resume,
+	.setup_plane		= omap_dispc_setup_plane,
+	.setup_mem		= omap_dispc_setup_mem,
+	.set_scale		= omap_dispc_set_scale,
+	.enable_plane		= omap_dispc_enable_plane,
+	.set_color_key		= omap_dispc_set_color_key,
+	.get_color_key		= omap_dispc_get_color_key,
+	.mmap			= omap_dispc_mmap_user,
+};
diff --git a/drivers/video/omap/dispc.h b/drivers/video/omap/dispc.h
new file mode 100644
index 0000000..eb1512b
--- /dev/null
+++ b/drivers/video/omap/dispc.h
@@ -0,0 +1,43 @@
+#ifndef _DISPC_H
+#define _DISPC_H
+
+#include <linux/interrupt.h>
+
+#define DISPC_PLANE_GFX			0
+#define DISPC_PLANE_VID1		1
+#define DISPC_PLANE_VID2		2
+
+#define DISPC_RGB_1_BPP			0x00
+#define DISPC_RGB_2_BPP			0x01
+#define DISPC_RGB_4_BPP			0x02
+#define DISPC_RGB_8_BPP			0x03
+#define DISPC_RGB_12_BPP		0x04
+#define DISPC_RGB_16_BPP		0x06
+#define DISPC_RGB_24_BPP		0x08
+#define DISPC_RGB_24_BPP_UNPACK_32	0x09
+#define DISPC_YUV2_422			0x0a
+#define DISPC_UYVY_422			0x0b
+
+#define DISPC_BURST_4x32		0
+#define DISPC_BURST_8x32		1
+#define DISPC_BURST_16x32		2
+
+#define DISPC_LOAD_CLUT_AND_FRAME	0x00
+#define DISPC_LOAD_CLUT_ONLY		0x01
+#define DISPC_LOAD_FRAME_ONLY		0x02
+#define DISPC_LOAD_CLUT_ONCE_FRAME	0x03
+
+#define DISPC_TFT_DATA_LINES_12		0
+#define DISPC_TFT_DATA_LINES_16		1
+#define DISPC_TFT_DATA_LINES_18		2
+#define DISPC_TFT_DATA_LINES_24		3
+
+extern void omap_dispc_set_lcd_size(int width, int height);
+
+extern void omap_dispc_enable_lcd_out(int enable);
+extern void omap_dispc_enable_digit_out(int enable);
+
+extern int  omap_dispc_request_irq(void (*callback)(void *data), void *data);
+extern void omap_dispc_free_irq(void);
+
+#endif
diff --git a/drivers/video/omap/hwa742.c b/drivers/video/omap/hwa742.c
new file mode 100644
index 0000000..dc48e02
--- /dev/null
+++ b/drivers/video/omap/hwa742.c
@@ -0,0 +1,1077 @@
+/*
+ * Epson HWA742 LCD controller driver
+ *
+ * Copyright (C) 2004-2005 Nokia Corporation
+ * Authors:     Juha Yrjölä   <juha.yrjola@nokia.com>
+ *	        Imre Deak     <imre.deak@nokia.com>
+ * YUV support: Jussi Laako   <jussi.laako@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.
+ */
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <linux/fb.h>
+#include <linux/delay.h>
+#include <linux/clk.h>
+
+#include <asm/arch/dma.h>
+#include <asm/arch/omapfb.h>
+#include <asm/arch/hwa742.h>
+
+#define HWA742_REV_CODE_REG       0x0
+#define HWA742_CONFIG_REG         0x2
+#define HWA742_PLL_DIV_REG        0x4
+#define HWA742_PLL_0_REG          0x6
+#define HWA742_PLL_1_REG          0x8
+#define HWA742_PLL_2_REG          0xa
+#define HWA742_PLL_3_REG          0xc
+#define HWA742_PLL_4_REG          0xe
+#define HWA742_CLK_SRC_REG        0x12
+#define HWA742_PANEL_TYPE_REG     0x14
+#define HWA742_H_DISP_REG         0x16
+#define HWA742_H_NDP_REG          0x18
+#define HWA742_V_DISP_1_REG       0x1a
+#define HWA742_V_DISP_2_REG       0x1c
+#define HWA742_V_NDP_REG          0x1e
+#define HWA742_HS_W_REG           0x20
+#define HWA742_HP_S_REG           0x22
+#define HWA742_VS_W_REG           0x24
+#define HWA742_VP_S_REG           0x26
+#define HWA742_PCLK_POL_REG       0x28
+#define HWA742_INPUT_MODE_REG     0x2a
+#define HWA742_TRANSL_MODE_REG1   0x2e
+#define HWA742_DISP_MODE_REG      0x34
+#define HWA742_WINDOW_TYPE        0x36
+#define HWA742_WINDOW_X_START_0   0x38
+#define HWA742_WINDOW_X_START_1   0x3a
+#define HWA742_WINDOW_Y_START_0   0x3c
+#define HWA742_WINDOW_Y_START_1   0x3e
+#define HWA742_WINDOW_X_END_0     0x40
+#define HWA742_WINDOW_X_END_1     0x42
+#define HWA742_WINDOW_Y_END_0     0x44
+#define HWA742_WINDOW_Y_END_1     0x46
+#define HWA742_MEMORY_WRITE_LSB   0x48
+#define HWA742_MEMORY_WRITE_MSB   0x49
+#define HWA742_MEMORY_READ_0      0x4a
+#define HWA742_MEMORY_READ_1      0x4c
+#define HWA742_MEMORY_READ_2      0x4e
+#define HWA742_POWER_SAVE         0x56
+#define HWA742_NDP_CTRL           0x58
+
+#define HWA742_AUTO_UPDATE_TIME		(HZ / 20)
+
+/* Reserve 4 request slots for requests in irq context */
+#define REQ_POOL_SIZE			24
+#define IRQ_REQ_POOL_SIZE		4
+
+#define REQ_FROM_IRQ_POOL 0x01
+
+#define REQ_COMPLETE	0
+#define REQ_PENDING	1
+
+struct update_param {
+	int	x, y, width, height;
+	int	color_mode;
+	int	flags;
+};
+
+struct hwa742_request {
+	struct list_head entry;
+	unsigned int	 flags;
+
+	int		 (*handler)(struct hwa742_request *req);
+	void		 (*complete)(void *data);
+	void		 *complete_data;
+
+	union {
+		struct update_param	update;
+		struct completion	*sync;
+	} par;
+};
+
+struct {
+	enum omapfb_update_mode	update_mode;
+	enum omapfb_update_mode	update_mode_before_suspend;
+
+	struct timer_list	auto_update_timer;
+	int			stop_auto_update;
+	struct omapfb_update_window	auto_update_window;
+	unsigned		te_connected:1;
+	unsigned		vsync_only:1;
+
+	struct hwa742_request	req_pool[REQ_POOL_SIZE];
+	struct list_head	pending_req_list;
+	struct list_head	free_req_list;
+	struct semaphore	req_sema;
+	spinlock_t		req_lock;
+
+	struct extif_timings	reg_timings, lut_timings;
+
+	int			prev_color_mode;
+	int			prev_flags;
+	int			window_type;
+
+	u32			max_transmit_size;
+	u32			extif_clk_period;
+	unsigned long		pix_tx_time;
+	unsigned long		line_upd_time;
+
+
+	struct omapfb_device	*fbdev;
+	struct lcd_ctrl_extif	*extif;
+	struct lcd_ctrl		*int_ctrl;
+
+	void			(*power_up)(struct device *dev);
+	void			(*power_down)(struct device *dev);
+} hwa742;
+
+struct lcd_ctrl hwa742_ctrl;
+
+static u8 hwa742_read_reg(u8 reg)
+{
+	u8 data;
+
+	hwa742.extif->set_bits_per_cycle(8);
+	hwa742.extif->write_command(&reg, 1);
+	hwa742.extif->read_data(&data, 1);
+
+	return data;
+}
+
+static void hwa742_write_reg(u8 reg, u8 data)
+{
+	hwa742.extif->set_bits_per_cycle(8);
+	hwa742.extif->write_command(&reg, 1);
+	hwa742.extif->write_data(&data, 1);
+}
+
+static void set_window_regs(int x_start, int y_start, int x_end, int y_end)
+{
+	u8 tmp[8];
+	u8 cmd;
+
+	x_end--;
+	y_end--;
+	tmp[0] = x_start;
+	tmp[1] = x_start >> 8;
+	tmp[2] = y_start;
+	tmp[3] = y_start >> 8;
+	tmp[4] = x_end;
+	tmp[5] = x_end >> 8;
+	tmp[6] = y_end;
+	tmp[7] = y_end >> 8;
+
+	hwa742.extif->set_bits_per_cycle(8);
+	cmd = HWA742_WINDOW_X_START_0;
+
+	hwa742.extif->write_command(&cmd, 1);
+
+	hwa742.extif->write_data(tmp, 8);
+}
+
+static void set_format_regs(int conv, int transl, int flags)
+{
+	if (flags & OMAPFB_FORMAT_FLAG_DOUBLE) {
+		hwa742.window_type = ((hwa742.window_type & 0xfc) | 0x01);
+#ifdef VERBOSE
+		dev_dbg(hwa742.fbdev->dev, "hwa742: enabled pixel doubling\n");
+#endif
+	} else {
+		hwa742.window_type = (hwa742.window_type & 0xfc);
+#ifdef VERBOSE
+		dev_dbg(hwa742.fbdev->dev, "hwa742: disabled pixel doubling\n");
+#endif
+	}
+
+	hwa742_write_reg(HWA742_INPUT_MODE_REG, conv);
+	hwa742_write_reg(HWA742_TRANSL_MODE_REG1, transl);
+	hwa742_write_reg(HWA742_WINDOW_TYPE, hwa742.window_type);
+}
+
+static void enable_tearsync(int y, int width, int height, int screen_height,
+			    int force_vsync)
+{
+	u8 b;
+
+	b = hwa742_read_reg(HWA742_NDP_CTRL);
+	b |= 1 << 2;
+	hwa742_write_reg(HWA742_NDP_CTRL, b);
+
+	if (likely(hwa742.vsync_only || force_vsync)) {
+		hwa742.extif->enable_tearsync(1, 0);
+		return;
+	}
+
+	if (width * hwa742.pix_tx_time < hwa742.line_upd_time) {
+		hwa742.extif->enable_tearsync(1, 0);
+		return;
+	}
+
+	if ((width * hwa742.pix_tx_time / 1000) * height <
+	    (y + height) * (hwa742.line_upd_time / 1000)) {
+		hwa742.extif->enable_tearsync(1, 0);
+		return;
+	}
+
+	hwa742.extif->enable_tearsync(1, y + 1);
+}
+
+static void disable_tearsync(void)
+{
+	u8 b;
+
+	hwa742.extif->enable_tearsync(0, 0);
+
+	b = hwa742_read_reg(HWA742_NDP_CTRL);
+	b &= ~(1 << 2);
+	hwa742_write_reg(HWA742_NDP_CTRL, b);
+}
+
+static inline struct hwa742_request *alloc_req(void)
+{
+	unsigned long flags;
+	struct hwa742_request *req;
+	int req_flags = 0;
+
+	if (!in_interrupt())
+		down(&hwa742.req_sema);
+	else
+		req_flags = REQ_FROM_IRQ_POOL;
+
+	spin_lock_irqsave(&hwa742.req_lock, flags);
+	BUG_ON(list_empty(&hwa742.free_req_list));
+	req = list_entry(hwa742.free_req_list.next,
+			 struct hwa742_request, entry);
+	list_del(&req->entry);
+	spin_unlock_irqrestore(&hwa742.req_lock, flags);
+
+	INIT_LIST_HEAD(&req->entry);
+	req->flags = req_flags;
+
+	return req;
+}
+
+static inline void free_req(struct hwa742_request *req)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&hwa742.req_lock, flags);
+
+	list_del(&req->entry);
+	list_add(&req->entry, &hwa742.free_req_list);
+	if (!(req->flags & REQ_FROM_IRQ_POOL))
+		up(&hwa742.req_sema);
+
+	spin_unlock_irqrestore(&hwa742.req_lock, flags);
+}
+
+static void process_pending_requests(void)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&hwa742.req_lock, flags);
+
+	while (!list_empty(&hwa742.pending_req_list)) {
+		struct hwa742_request *req;
+		void (*complete)(void *);
+		void *complete_data;
+
+		req = list_entry(hwa742.pending_req_list.next,
+				 struct hwa742_request, entry);
+		spin_unlock_irqrestore(&hwa742.req_lock, flags);
+
+		if (req->handler(req) == REQ_PENDING)
+			return;
+
+		complete = req->complete;
+		complete_data = req->complete_data;
+		free_req(req);
+
+		if (complete)
+			complete(complete_data);
+
+		spin_lock_irqsave(&hwa742.req_lock, flags);
+	}
+
+	spin_unlock_irqrestore(&hwa742.req_lock, flags);
+}
+
+static void submit_req_list(struct list_head *head)
+{
+	unsigned long flags;
+	int process = 1;
+
+	spin_lock_irqsave(&hwa742.req_lock, flags);
+	if (likely(!list_empty(&hwa742.pending_req_list)))
+		process = 0;
+	list_splice_init(head, hwa742.pending_req_list.prev);
+	spin_unlock_irqrestore(&hwa742.req_lock, flags);
+
+	if (process)
+		process_pending_requests();
+}
+
+static void request_complete(void *data)
+{
+	struct hwa742_request	*req = (struct hwa742_request *)data;
+	void			(*complete)(void *);
+	void			*complete_data;
+
+	complete = req->complete;
+	complete_data = req->complete_data;
+
+	free_req(req);
+
+	if (complete)
+		complete(complete_data);
+
+	process_pending_requests();
+}
+
+static int send_frame_handler(struct hwa742_request *req)
+{
+	struct update_param *par = &req->par.update;
+	int x = par->x;
+	int y = par->y;
+	int w = par->width;
+	int h = par->height;
+	int bpp;
+	int conv, transl;
+	unsigned long offset;
+	int color_mode = par->color_mode;
+	int flags = par->flags;
+	int scr_width = hwa742.fbdev->panel->x_res;
+	int scr_height = hwa742.fbdev->panel->y_res;
+
+#ifdef VERBOSE
+	dev_dbg(hwa742.fbdev->dev, "x %d y %d w %d h %d scr_width %d "
+		"color_mode %d flags %d\n",
+		x, y, w, h, scr_width, color_mode, flags);
+#endif
+
+	switch (color_mode) {
+	case OMAPFB_COLOR_YUV422:
+		bpp = 16;
+		conv = 0x08;
+		transl = 0x25;
+		break;
+	case OMAPFB_COLOR_YUV420:
+		bpp = 12;
+		conv = 0x09;
+		transl = 0x25;
+		break;
+	case OMAPFB_COLOR_RGB565:
+		bpp = 16;
+		conv = 0x01;
+		transl = 0x05;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	if (hwa742.prev_flags != flags ||
+	    hwa742.prev_color_mode != color_mode) {
+		set_format_regs(conv, transl, flags);
+		hwa742.prev_color_mode = color_mode;
+		hwa742.prev_flags = flags;
+	}
+	flags = req->par.update.flags;
+	if (flags & OMAPFB_FORMAT_FLAG_TEARSYNC)
+		enable_tearsync(y, scr_width, h, scr_height,
+				flags & OMAPFB_FORMAT_FLAG_FORCE_VSYNC);
+	else
+		disable_tearsync();
+
+	set_window_regs(x, y, x + w, y + h);
+
+	offset = (scr_width * y + x) * bpp / 8;
+
+	hwa742.int_ctrl->setup_plane(OMAPFB_PLANE_GFX,
+			OMAPFB_CHANNEL_OUT_LCD, offset, scr_width, 0, 0, w, h,
+			color_mode);
+
+	hwa742.extif->set_bits_per_cycle(16);
+
+	hwa742.int_ctrl->enable_plane(OMAPFB_PLANE_GFX, 1);
+	hwa742.extif->transfer_area(w, h, request_complete, req);
+
+	return REQ_PENDING;
+}
+
+static void send_frame_complete(void *data)
+{
+	hwa742.int_ctrl->enable_plane(OMAPFB_PLANE_GFX, 0);
+}
+
+#define ADD_PREQ(_x, _y, _w, _h) do {		\
+	req = alloc_req();			\
+	req->handler	= send_frame_handler;	\
+	req->complete	= send_frame_complete;	\
+	req->par.update.x = _x;			\
+	req->par.update.y = _y;			\
+	req->par.update.width  = _w;		\
+	req->par.update.height = _h;		\
+	req->par.update.color_mode = color_mode;\
+	req->par.update.flags	  = flags;	\
+	list_add_tail(&req->entry, req_head);	\
+} while(0)
+
+static void create_req_list(struct omapfb_update_window *win,
+			    struct list_head *req_head)
+{
+	struct hwa742_request *req;
+	int x = win->x;
+	int y = win->y;
+	int width = win->width;
+	int height = win->height;
+	int color_mode;
+	int flags;
+
+	flags = win->format & ~OMAPFB_FORMAT_MASK;
+	color_mode = win->format & OMAPFB_FORMAT_MASK;
+
+	if (x & 1) {
+		ADD_PREQ(x, y, 1, height);
+		width--;
+		x++;
+		flags &= ~OMAPFB_FORMAT_FLAG_TEARSYNC;
+	}
+	if (width & ~1) {
+		unsigned int xspan = width & ~1;
+		unsigned int ystart = y;
+		unsigned int yspan = height;
+
+		if (xspan * height * 2 > hwa742.max_transmit_size) {
+			yspan = hwa742.max_transmit_size / (xspan * 2);
+			ADD_PREQ(x, ystart, xspan, yspan);
+			ystart += yspan;
+			yspan = height - yspan;
+			flags &= ~OMAPFB_FORMAT_FLAG_TEARSYNC;
+		}
+
+		ADD_PREQ(x, ystart, xspan, yspan);
+		x += xspan;
+		width -= xspan;
+		flags &= ~OMAPFB_FORMAT_FLAG_TEARSYNC;
+	}
+	if (width)
+		ADD_PREQ(x, y, 1, height);
+}
+
+static void auto_update_complete(void *data)
+{
+	if (!hwa742.stop_auto_update)
+		mod_timer(&hwa742.auto_update_timer,
+			  jiffies + HWA742_AUTO_UPDATE_TIME);
+}
+
+static void hwa742_update_window_auto(unsigned long arg)
+{
+	LIST_HEAD(req_list);
+	struct hwa742_request *last;
+
+	create_req_list(&hwa742.auto_update_window, &req_list);
+	last = list_entry(req_list.prev, struct hwa742_request, entry);
+
+	last->complete = auto_update_complete;
+	last->complete_data = NULL;
+
+	submit_req_list(&req_list);
+}
+
+int hwa742_update_window_async(struct fb_info *fbi,
+				 struct omapfb_update_window *win,
+				 void (*complete_callback)(void *arg),
+				 void *complete_callback_data)
+{
+	LIST_HEAD(req_list);
+	struct hwa742_request *last;
+	int r = 0;
+
+	if (hwa742.update_mode != OMAPFB_MANUAL_UPDATE) {
+		dev_dbg(hwa742.fbdev->dev, "invalid update mode\n");
+		r = -EINVAL;
+		goto out;
+	}
+	if (unlikely(win->format &
+	    ~(0x03 | OMAPFB_FORMAT_FLAG_DOUBLE |
+	    OMAPFB_FORMAT_FLAG_TEARSYNC | OMAPFB_FORMAT_FLAG_FORCE_VSYNC))) {
+		dev_dbg(hwa742.fbdev->dev, "invalid window flag");
+		r = -EINVAL;
+		goto out;
+	}
+
+	create_req_list(win, &req_list);
+	last = list_entry(req_list.prev, struct hwa742_request, entry);
+
+	last->complete = complete_callback;
+	last->complete_data = (void *)complete_callback_data;
+
+	submit_req_list(&req_list);
+
+out:
+	return r;
+}
+EXPORT_SYMBOL(hwa742_update_window_async);
+
+static int hwa742_setup_plane(int plane, int channel_out,
+				  unsigned long offset, int screen_width,
+				  int pos_x, int pos_y, int width, int height,
+				  int color_mode)
+{
+	if (plane != OMAPFB_PLANE_GFX ||
+	    channel_out != OMAPFB_CHANNEL_OUT_LCD)
+		return -EINVAL;
+
+	return 0;
+}
+
+static int hwa742_enable_plane(int plane, int enable)
+{
+	if (plane != 0)
+		return -EINVAL;
+
+	hwa742.int_ctrl->enable_plane(plane, enable);
+
+	return 0;
+}
+
+static int sync_handler(struct hwa742_request *req)
+{
+	complete(req->par.sync);
+	return REQ_COMPLETE;
+}
+
+static void hwa742_sync(void)
+{
+	LIST_HEAD(req_list);
+	struct hwa742_request *req;
+	struct completion comp;
+
+	req = alloc_req();
+
+	req->handler = sync_handler;
+	req->complete = NULL;
+	init_completion(&comp);
+	req->par.sync = &comp;
+
+	list_add(&req->entry, &req_list);
+	submit_req_list(&req_list);
+
+	wait_for_completion(&comp);
+}
+
+static void hwa742_bind_client(struct omapfb_notifier_block *nb)
+{
+	dev_dbg(hwa742.fbdev->dev, "update_mode %d\n", hwa742.update_mode);
+	if (hwa742.update_mode == OMAPFB_MANUAL_UPDATE) {
+		omapfb_notify_clients(hwa742.fbdev, OMAPFB_EVENT_READY);
+	}
+}
+
+static int hwa742_set_update_mode(enum omapfb_update_mode mode)
+{
+	if (mode != OMAPFB_MANUAL_UPDATE && mode != OMAPFB_AUTO_UPDATE &&
+	    mode != OMAPFB_UPDATE_DISABLED)
+		return -EINVAL;
+
+	if (mode == hwa742.update_mode)
+		return 0;
+
+	dev_info(hwa742.fbdev->dev, "HWA742: setting update mode to %s\n",
+			mode == OMAPFB_UPDATE_DISABLED ? "disabled" :
+			(mode == OMAPFB_AUTO_UPDATE ? "auto" : "manual"));
+
+	switch (hwa742.update_mode) {
+	case OMAPFB_MANUAL_UPDATE:
+		omapfb_notify_clients(hwa742.fbdev, OMAPFB_EVENT_DISABLED);
+		break;
+	case OMAPFB_AUTO_UPDATE:
+		hwa742.stop_auto_update = 1;
+		del_timer_sync(&hwa742.auto_update_timer);
+		break;
+	case OMAPFB_UPDATE_DISABLED:
+		break;
+	}
+
+	hwa742.update_mode = mode;
+	hwa742_sync();
+	hwa742.stop_auto_update = 0;
+
+	switch (mode) {
+	case OMAPFB_MANUAL_UPDATE:
+		omapfb_notify_clients(hwa742.fbdev, OMAPFB_EVENT_READY);
+		break;
+	case OMAPFB_AUTO_UPDATE:
+		hwa742_update_window_auto(0);
+		break;
+	case OMAPFB_UPDATE_DISABLED:
+		break;
+	}
+
+	return 0;
+}
+
+static enum omapfb_update_mode hwa742_get_update_mode(void)
+{
+	return hwa742.update_mode;
+}
+
+static unsigned long round_to_extif_ticks(unsigned long ps, int div)
+{
+	int bus_tick = hwa742.extif_clk_period * div;
+	return (ps + bus_tick - 1) / bus_tick * bus_tick;
+}
+
+static int calc_reg_timing(unsigned long sysclk, int div)
+{
+	struct extif_timings *t;
+	unsigned long systim;
+
+	/* CSOnTime 0, WEOnTime 2 ns, REOnTime 2 ns,
+	 * AccessTime 2 ns + 12.2 ns (regs),
+	 * WEOffTime = WEOnTime + 1 ns,
+	 * REOffTime = REOnTime + 16 ns (regs),
+	 * CSOffTime = REOffTime + 1 ns
+	 * ReadCycle = 2ns + 2*SYSCLK  (regs),
+	 * WriteCycle = 2*SYSCLK + 2 ns,
+	 * CSPulseWidth = 10 ns */
+	systim = 1000000000 / (sysclk / 1000);
+	dev_dbg(hwa742.fbdev->dev, "HWA742 systim %lu ps extif_clk_period %u ps"
+		  "extif_clk_div %d\n", systim, hwa742.extif_clk_period, div);
+
+	t = &hwa742.reg_timings;
+	memset(t, 0, sizeof(*t));
+	t->clk_div = div;
+	t->cs_on_time = 0;
+	t->we_on_time = round_to_extif_ticks(t->cs_on_time + 2000, div);
+	t->re_on_time = round_to_extif_ticks(t->cs_on_time + 2000, div);
+	t->access_time = round_to_extif_ticks(t->re_on_time + 12200, div);
+	t->we_off_time = round_to_extif_ticks(t->we_on_time + 1000, div);
+	t->re_off_time = round_to_extif_ticks(t->re_on_time + 16000, div);
+	t->cs_off_time = round_to_extif_ticks(t->re_off_time + 1000, div);
+	t->we_cycle_time = round_to_extif_ticks(2 * systim + 2000, div);
+	if (t->we_cycle_time < t->we_off_time)
+		t->we_cycle_time = t->we_off_time;
+	t->re_cycle_time = round_to_extif_ticks(2 * systim + 2000, div);
+	if (t->re_cycle_time < t->re_off_time)
+		t->re_cycle_time = t->re_off_time;
+	t->cs_pulse_width = 0;
+
+	dev_dbg(hwa742.fbdev->dev, "[reg]cson %d csoff %d reon %d reoff %d\n",
+		 t->cs_on_time, t->cs_off_time, t->re_on_time, t->re_off_time);
+	dev_dbg(hwa742.fbdev->dev, "[reg]weon %d weoff %d recyc %d wecyc %d\n",
+		 t->we_on_time, t->we_off_time, t->re_cycle_time,
+		 t->we_cycle_time);
+	dev_dbg(hwa742.fbdev->dev, "[reg]rdaccess %d cspulse %d\n",
+		 t->access_time, t->cs_pulse_width);
+
+	return hwa742.extif->convert_timings(t);
+}
+
+static int calc_lut_timing(unsigned long sysclk, int div)
+{
+	struct extif_timings *t;
+	unsigned long systim;
+
+	/* CSOnTime 0, WEOnTime 2 ns, REOnTime 2 ns,
+	 * AccessTime 2 ns + 4 * SYSCLK + 26 (lut),
+	 * WEOffTime = WEOnTime + 1 ns,
+	 * REOffTime = REOnTime + 4*SYSCLK + 26 ns (lut),
+	 * CSOffTime = REOffTime + 1 ns
+	 * ReadCycle = 2ns + 4*SYSCLK + 26 ns (lut),
+	 * WriteCycle = 2*SYSCLK + 2 ns,
+	 * CSPulseWidth = 10 ns
+	 */
+	systim = 1000000000 / (sysclk / 1000);
+	dev_dbg(hwa742.fbdev->dev, "HWA742 systim %lu ps extif_clk_period %u ps"
+		  "extif_clk_div %d\n", systim, hwa742.extif_clk_period, div);
+
+	t = &hwa742.lut_timings;
+	memset(t, 0, sizeof(*t));
+
+	t->clk_div = div;
+
+	t->cs_on_time = 0;
+	t->we_on_time = round_to_extif_ticks(t->cs_on_time + 2000, div);
+	t->re_on_time = round_to_extif_ticks(t->cs_on_time + 2000, div);
+	t->access_time = round_to_extif_ticks(t->re_on_time + 4 * systim +
+					      26000, div);
+	t->we_off_time = round_to_extif_ticks(t->we_on_time + 1000, div);
+	t->re_off_time = round_to_extif_ticks(t->re_on_time + 4 * systim +
+					      26000, div);
+	t->cs_off_time = round_to_extif_ticks(t->re_off_time + 1000, div);
+	t->we_cycle_time = round_to_extif_ticks(2 * systim + 2000, div);
+	if (t->we_cycle_time < t->we_off_time)
+		t->we_cycle_time = t->we_off_time;
+	t->re_cycle_time = round_to_extif_ticks(2000 + 4 * systim + 26000, div);
+	if (t->re_cycle_time < t->re_off_time)
+		t->re_cycle_time = t->re_off_time;
+	t->cs_pulse_width = 0;
+
+	dev_dbg(hwa742.fbdev->dev, "[lut]cson %d csoff %d reon %d reoff %d\n",
+		 t->cs_on_time, t->cs_off_time, t->re_on_time, t->re_off_time);
+	dev_dbg(hwa742.fbdev->dev, "[lut]weon %d weoff %d recyc %d wecyc %d\n",
+		 t->we_on_time, t->we_off_time, t->re_cycle_time,
+		 t->we_cycle_time);
+	dev_dbg(hwa742.fbdev->dev, "[lut]rdaccess %d cspulse %d\n",
+		 t->access_time, t->cs_pulse_width);
+
+	return hwa742.extif->convert_timings(t);
+}
+
+static int calc_extif_timings(unsigned long sysclk, int *extif_mem_div)
+{
+	int max_clk_div;
+	int div;
+
+	hwa742.extif->get_clk_info(&hwa742.extif_clk_period, &max_clk_div);
+	for (div = 1; div < max_clk_div; div++) {
+		if (calc_reg_timing(sysclk, div) == 0)
+			break;
+	}
+	if (div > max_clk_div)
+		goto err;
+
+	*extif_mem_div = div;
+
+	for (div = 1; div < max_clk_div; div++) {
+		if (calc_lut_timing(sysclk, div) == 0)
+			break;
+	}
+
+	if (div > max_clk_div)
+		goto err;
+
+	return 0;
+
+err:
+	dev_err(hwa742.fbdev->dev, "can't setup timings\n");
+	return -1;
+}
+
+static void calc_hwa742_clk_rates(unsigned long ext_clk,
+				unsigned long *sys_clk, unsigned long *pix_clk)
+{
+	int pix_clk_src;
+	int sys_div = 0, sys_mul = 0;
+	int pix_div;
+
+	pix_clk_src = hwa742_read_reg(HWA742_CLK_SRC_REG);
+	pix_div = ((pix_clk_src >> 3) & 0x1f) + 1;
+	if ((pix_clk_src & (0x3 << 1)) == 0) {
+		/* Source is the PLL */
+		sys_div = (hwa742_read_reg(HWA742_PLL_DIV_REG) & 0x3f) + 1;
+		sys_mul = (hwa742_read_reg(HWA742_PLL_4_REG) & 0x7f) + 1;
+		*sys_clk = ext_clk * sys_mul / sys_div;
+	} else	/* else source is ext clk, or oscillator */
+		*sys_clk = ext_clk;
+
+	*pix_clk = *sys_clk / pix_div;			/* HZ */
+	dev_dbg(hwa742.fbdev->dev,
+		"ext_clk %ld pix_src %d pix_div %d sys_div %d sys_mul %d\n",
+		ext_clk, pix_clk_src & (0x3 << 1), pix_div, sys_div, sys_mul);
+	dev_dbg(hwa742.fbdev->dev, "sys_clk %ld pix_clk %ld\n",
+		*sys_clk, *pix_clk);
+}
+
+
+static int setup_tearsync(unsigned long pix_clk, int extif_div)
+{
+	int hdisp, vdisp;
+	int hndp, vndp;
+	int hsw, vsw;
+	int hs, vs;
+	int hs_pol_inv, vs_pol_inv;
+	int use_hsvs, use_ndp;
+	u8  b;
+
+	hsw = hwa742_read_reg(HWA742_HS_W_REG);
+	vsw = hwa742_read_reg(HWA742_VS_W_REG);
+	hs_pol_inv = !(hsw & 0x80);
+	vs_pol_inv = !(vsw & 0x80);
+	hsw = hsw & 0x7f;
+	vsw = vsw & 0x3f;
+
+	hdisp = (hwa742_read_reg(HWA742_H_DISP_REG) & 0x7f) * 8;
+	vdisp = hwa742_read_reg(HWA742_V_DISP_1_REG) +
+		((hwa742_read_reg(HWA742_V_DISP_2_REG) & 0x3) << 8);
+
+	hndp = hwa742_read_reg(HWA742_H_NDP_REG) & 0x7f;
+	vndp = hwa742_read_reg(HWA742_V_NDP_REG);
+
+	/* time to transfer one pixel (16bpp) in ps */
+	hwa742.pix_tx_time = hwa742.reg_timings.we_cycle_time;
+	if (hwa742.extif->get_max_tx_rate != NULL) {
+		/*
+		 * The external interface might have a rate limitation,
+		 * if so, we have to maximize our transfer rate.
+		 */
+		unsigned long min_tx_time;
+		unsigned long max_tx_rate = hwa742.extif->get_max_tx_rate();
+
+		dev_dbg(hwa742.fbdev->dev, "max_tx_rate %ld HZ\n",
+			max_tx_rate);
+		min_tx_time = 1000000000 / (max_tx_rate / 1000);  /* ps */
+		if (hwa742.pix_tx_time < min_tx_time)
+			hwa742.pix_tx_time = min_tx_time;
+	}
+
+	/* time to update one line in ps */
+	hwa742.line_upd_time = (hdisp + hndp) * 1000000 / (pix_clk / 1000);
+	hwa742.line_upd_time *= 1000;
+	if (hdisp * hwa742.pix_tx_time > hwa742.line_upd_time)
+		/*
+		 * transfer speed too low, we might have to use both
+		 * HS and VS
+		 */
+		use_hsvs = 1;
+	else
+		/* decent transfer speed, we'll always use only VS */
+		use_hsvs = 0;
+
+	if (use_hsvs && (hs_pol_inv || vs_pol_inv)) {
+		/*
+		 * HS or'ed with VS doesn't work, use the active high
+		 * TE signal based on HNDP / VNDP
+		 */
+		use_ndp = 1;
+		hs_pol_inv = 0;
+		vs_pol_inv = 0;
+		hs = hndp;
+		vs = vndp;
+	} else {
+		/*
+		 * Use HS or'ed with VS as a TE signal if both are needed
+		 * or VNDP if only vsync is needed.
+		 */
+		use_ndp = 0;
+		hs = hsw;
+		vs = vsw;
+		if (!use_hsvs) {
+			hs_pol_inv = 0;
+			vs_pol_inv = 0;
+		}
+	}
+
+	hs = hs * 1000000 / (pix_clk / 1000);			/* ps */
+	hs *= 1000;
+
+	vs = vs * (hdisp + hndp) * 1000000 / (pix_clk / 1000);	/* ps */
+	vs *= 1000;
+
+	if (vs <= hs)
+		return -EDOM;
+	/* set VS to 120% of HS to minimize VS detection time */
+	vs = hs * 12 / 10;
+	/* minimize HS too */
+	hs = 10000;
+
+	b = hwa742_read_reg(HWA742_NDP_CTRL);
+	b &= ~0x3;
+	b |= use_hsvs ? 1 : 0;
+	b |= (use_ndp && use_hsvs) ? 0 : 2;
+	hwa742_write_reg(HWA742_NDP_CTRL, b);
+
+	hwa742.vsync_only = !use_hsvs;
+
+	dev_dbg(hwa742.fbdev->dev,
+		"pix_clk %ld HZ pix_tx_time %ld ps line_upd_time %ld ps\n",
+		pix_clk, hwa742.pix_tx_time, hwa742.line_upd_time);
+	dev_dbg(hwa742.fbdev->dev,
+		"hs %d ps vs %d ps mode %d vsync_only %d\n",
+		hs, vs, (b & 0x3), !use_hsvs);
+
+	return hwa742.extif->setup_tearsync(1, hs, vs,
+					    hs_pol_inv, vs_pol_inv, extif_div);
+}
+
+static void hwa742_get_caps(int plane, struct omapfb_caps *caps)
+{
+	hwa742.int_ctrl->get_caps(plane, caps);
+	caps->ctrl |= OMAPFB_CAPS_MANUAL_UPDATE |
+		      OMAPFB_CAPS_WINDOW_PIXEL_DOUBLE;
+	if (hwa742.te_connected)
+		caps->ctrl |= OMAPFB_CAPS_TEARSYNC;
+	caps->wnd_color |= (1 << OMAPFB_COLOR_RGB565) |
+			   (1 << OMAPFB_COLOR_YUV420);
+}
+
+static void hwa742_suspend(void)
+{
+	hwa742.update_mode_before_suspend = hwa742.update_mode;
+	hwa742_set_update_mode(OMAPFB_UPDATE_DISABLED);
+	/* Enable sleep mode */
+	hwa742_write_reg(HWA742_POWER_SAVE, 1 << 1);
+	if (hwa742.power_down != NULL)
+		hwa742.power_down(hwa742.fbdev->dev);
+}
+
+static void hwa742_resume(void)
+{
+	if (hwa742.power_up != NULL)
+		hwa742.power_up(hwa742.fbdev->dev);
+	/* Disable sleep mode */
+	hwa742_write_reg(HWA742_POWER_SAVE, 0);
+	while (1) {
+		/* Loop until PLL output is stabilized */
+		if (hwa742_read_reg(HWA742_PLL_DIV_REG) & (1 << 7))
+			break;
+		set_current_state(TASK_UNINTERRUPTIBLE);
+		schedule_timeout(msecs_to_jiffies(5));
+	}
+	hwa742_set_update_mode(hwa742.update_mode_before_suspend);
+}
+
+static int hwa742_init(struct omapfb_device *fbdev, int ext_mode,
+		       struct omapfb_mem_desc *req_vram)
+{
+	int r = 0, i;
+	u8 rev, conf;
+	unsigned long ext_clk;
+	unsigned long sys_clk, pix_clk;
+	int extif_mem_div;
+	struct omapfb_platform_data *omapfb_conf;
+	struct hwa742_platform_data *ctrl_conf;
+
+	BUG_ON(!fbdev->ext_if || !fbdev->int_ctrl);
+
+	hwa742.fbdev = fbdev;
+	hwa742.extif = fbdev->ext_if;
+	hwa742.int_ctrl = fbdev->int_ctrl;
+
+	omapfb_conf = fbdev->dev->platform_data;
+	ctrl_conf = omapfb_conf->ctrl_platform_data;
+
+	if (ctrl_conf == NULL || ctrl_conf->get_clock_rate == NULL) {
+		dev_err(fbdev->dev, "HWA742: missing platform data\n");
+		r = -ENOENT;
+		goto err1;
+	}
+
+	hwa742.power_down = ctrl_conf->power_down;
+	hwa742.power_up = ctrl_conf->power_up;
+
+	spin_lock_init(&hwa742.req_lock);
+
+	if ((r = hwa742.int_ctrl->init(fbdev, 1, req_vram)) < 0)
+		goto err1;
+
+	if ((r = hwa742.extif->init(fbdev)) < 0)
+		goto err2;
+
+	ext_clk = ctrl_conf->get_clock_rate(fbdev->dev);
+	if ((r = calc_extif_timings(ext_clk, &extif_mem_div)) < 0)
+		goto err3;
+	hwa742.extif->set_timings(&hwa742.reg_timings);
+	if (hwa742.power_up != NULL)
+		hwa742.power_up(fbdev->dev);
+
+	calc_hwa742_clk_rates(ext_clk, &sys_clk, &pix_clk);
+	if ((r = calc_extif_timings(sys_clk, &extif_mem_div)) < 0)
+		goto err4;
+	hwa742.extif->set_timings(&hwa742.reg_timings);
+
+	rev = hwa742_read_reg(HWA742_REV_CODE_REG);
+	if ((rev & 0xfc) != 0x80) {
+		dev_err(fbdev->dev, "HWA742: invalid revision %02x\n", rev);
+		r = -ENODEV;
+		goto err4;
+	}
+
+
+	if (!(hwa742_read_reg(HWA742_PLL_DIV_REG) & 0x80)) {
+		dev_err(fbdev->dev,
+		      "HWA742: controller not initialized by the bootloader\n");
+		r = -ENODEV;
+		goto err4;
+	}
+
+	if (ctrl_conf->te_connected) {
+		if ((r = setup_tearsync(pix_clk, extif_mem_div)) < 0) {
+			dev_err(hwa742.fbdev->dev,
+			       "HWA742: can't setup tearing synchronization\n");
+			goto err4;
+		}
+		hwa742.te_connected = 1;
+	}
+
+	hwa742.max_transmit_size = hwa742.extif->max_transmit_size;
+
+	hwa742.update_mode = OMAPFB_UPDATE_DISABLED;
+
+	hwa742.auto_update_window.x = 0;
+	hwa742.auto_update_window.y = 0;
+	hwa742.auto_update_window.width = fbdev->panel->x_res;
+	hwa742.auto_update_window.height = fbdev->panel->y_res;
+	hwa742.auto_update_window.format = 0;
+
+	init_timer(&hwa742.auto_update_timer);
+	hwa742.auto_update_timer.function = hwa742_update_window_auto;
+	hwa742.auto_update_timer.data = 0;
+
+	hwa742.prev_color_mode = -1;
+	hwa742.prev_flags = 0;
+
+	hwa742.fbdev = fbdev;
+
+	INIT_LIST_HEAD(&hwa742.free_req_list);
+	INIT_LIST_HEAD(&hwa742.pending_req_list);
+	for (i = 0; i < ARRAY_SIZE(hwa742.req_pool); i++)
+		list_add(&hwa742.req_pool[i].entry, &hwa742.free_req_list);
+	BUG_ON(i <= IRQ_REQ_POOL_SIZE);
+	sema_init(&hwa742.req_sema, i - IRQ_REQ_POOL_SIZE);
+
+	conf = hwa742_read_reg(HWA742_CONFIG_REG);
+	dev_info(fbdev->dev, ": Epson HWA742 LCD controller rev %d "
+			"initialized (CNF pins %x)\n", rev & 0x03, conf & 0x07);
+
+	return 0;
+err4:
+	if (hwa742.power_down != NULL)
+		hwa742.power_down(fbdev->dev);
+err3:
+	hwa742.extif->cleanup();
+err2:
+	hwa742.int_ctrl->cleanup();
+err1:
+	return r;
+}
+
+static void hwa742_cleanup(void)
+{
+	hwa742_set_update_mode(OMAPFB_UPDATE_DISABLED);
+	hwa742.extif->cleanup();
+	hwa742.int_ctrl->cleanup();
+	if (hwa742.power_down != NULL)
+		hwa742.power_down(hwa742.fbdev->dev);
+}
+
+struct lcd_ctrl hwa742_ctrl = {
+	.name			= "hwa742",
+	.init			= hwa742_init,
+	.cleanup		= hwa742_cleanup,
+	.bind_client		= hwa742_bind_client,
+	.get_caps		= hwa742_get_caps,
+	.set_update_mode	= hwa742_set_update_mode,
+	.get_update_mode	= hwa742_get_update_mode,
+	.setup_plane		= hwa742_setup_plane,
+	.enable_plane		= hwa742_enable_plane,
+	.update_window		= hwa742_update_window_async,
+	.sync			= hwa742_sync,
+	.suspend		= hwa742_suspend,
+	.resume			= hwa742_resume,
+};
+
diff --git a/drivers/video/omap/lcd_h3.c b/drivers/video/omap/lcd_h3.c
new file mode 100644
index 0000000..51807b4
--- /dev/null
+++ b/drivers/video/omap/lcd_h3.c
@@ -0,0 +1,141 @@
+/*
+ * LCD panel support for the TI OMAP H3 board
+ *
+ * Copyright (C) 2004 Nokia Corporation
+ * Author: Imre Deak <imre.deak@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include <asm/arch/gpio.h>
+#include <asm/arch/tps65010.h>
+#include <asm/arch/omapfb.h>
+
+#define MODULE_NAME	"omapfb-lcd_h3"
+
+#define pr_err(fmt, args...) printk(KERN_ERR MODULE_NAME ": " fmt, ## args)
+
+static int h3_panel_init(struct lcd_panel *panel, struct omapfb_device *fbdev)
+{
+	return 0;
+}
+
+static void h3_panel_cleanup(struct lcd_panel *panel)
+{
+}
+
+static int h3_panel_enable(struct lcd_panel *panel)
+{
+	int r = 0;
+
+	/* GPIO1 and GPIO2 of TPS65010 send LCD_ENBKL and LCD_ENVDD signals */
+	r = tps65010_set_gpio_out_value(GPIO1, HIGH);
+	if (!r)
+		r = tps65010_set_gpio_out_value(GPIO2, HIGH);
+	if (r)
+		pr_err("Unable to turn on LCD panel\n");
+
+	return r;
+}
+
+static void h3_panel_disable(struct lcd_panel *panel)
+{
+	int r = 0;
+
+	/* GPIO1 and GPIO2 of TPS65010 send LCD_ENBKL and LCD_ENVDD signals */
+	r = tps65010_set_gpio_out_value(GPIO1, LOW);
+	if (!r)
+		tps65010_set_gpio_out_value(GPIO2, LOW);
+	if (r)
+		pr_err("Unable to turn off LCD panel\n");
+}
+
+static unsigned long h3_panel_get_caps(struct lcd_panel *panel)
+{
+	return 0;
+}
+
+struct lcd_panel h3_panel = {
+	.name		= "h3",
+	.config		= OMAP_LCDC_PANEL_TFT,
+
+	.data_lines	= 16,
+	.bpp		= 16,
+	.x_res		= 240,
+	.y_res		= 320,
+	.pixel_clock	= 12000,
+	.hsw		= 12,
+	.hfp		= 14,
+	.hbp		= 72 - 12,
+	.vsw		= 1,
+	.vfp		= 1,
+	.vbp		= 0,
+	.pcd		= 0,
+
+	.init		= h3_panel_init,
+	.cleanup	= h3_panel_cleanup,
+	.enable		= h3_panel_enable,
+	.disable	= h3_panel_disable,
+	.get_caps	= h3_panel_get_caps,
+};
+
+static int h3_panel_probe(struct platform_device *pdev)
+{
+	omapfb_register_panel(&h3_panel);
+	return 0;
+}
+
+static int h3_panel_remove(struct platform_device *pdev)
+{
+	return 0;
+}
+
+static int h3_panel_suspend(struct platform_device *pdev, pm_message_t mesg)
+{
+	return 0;
+}
+
+static int h3_panel_resume(struct platform_device *pdev)
+{
+	return 0;
+}
+
+struct platform_driver h3_panel_driver = {
+	.probe		= h3_panel_probe,
+	.remove		= h3_panel_remove,
+	.suspend	= h3_panel_suspend,
+	.resume		= h3_panel_resume,
+	.driver		= {
+		.name	= "lcd_h3",
+		.owner	= THIS_MODULE,
+	},
+};
+
+static int h3_panel_drv_init(void)
+{
+	return platform_driver_register(&h3_panel_driver);
+}
+
+static void h3_panel_drv_cleanup(void)
+{
+	platform_driver_unregister(&h3_panel_driver);
+}
+
+module_init(h3_panel_drv_init);
+module_exit(h3_panel_drv_cleanup);
+
diff --git a/drivers/video/omap/lcd_h4.c b/drivers/video/omap/lcd_h4.c
new file mode 100644
index 0000000..fd6f0eb
--- /dev/null
+++ b/drivers/video/omap/lcd_h4.c
@@ -0,0 +1,117 @@
+/*
+ * LCD panel support for the TI OMAP H4 board
+ *
+ * Copyright (C) 2004 Nokia Corporation
+ * Author: Imre Deak <imre.deak@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include <asm/arch/omapfb.h>
+
+static int h4_panel_init(struct lcd_panel *panel, struct omapfb_device *fbdev)
+{
+	return 0;
+}
+
+static void h4_panel_cleanup(struct lcd_panel *panel)
+{
+}
+
+static int h4_panel_enable(struct lcd_panel *panel)
+{
+	return 0;
+}
+
+static void h4_panel_disable(struct lcd_panel *panel)
+{
+}
+
+static unsigned long h4_panel_get_caps(struct lcd_panel *panel)
+{
+	return 0;
+}
+
+struct lcd_panel h4_panel = {
+	.name		= "h4",
+	.config		= OMAP_LCDC_PANEL_TFT,
+
+	.bpp		= 16,
+	.data_lines	= 16,
+	.x_res		= 240,
+	.y_res		= 320,
+	.pixel_clock	= 6250,
+	.hsw		= 15,
+	.hfp		= 15,
+	.hbp		= 60,
+	.vsw		= 1,
+	.vfp		= 1,
+	.vbp		= 1,
+
+	.init		= h4_panel_init,
+	.cleanup	= h4_panel_cleanup,
+	.enable		= h4_panel_enable,
+	.disable	= h4_panel_disable,
+	.get_caps	= h4_panel_get_caps,
+};
+
+static int h4_panel_probe(struct platform_device *pdev)
+{
+	omapfb_register_panel(&h4_panel);
+	return 0;
+}
+
+static int h4_panel_remove(struct platform_device *pdev)
+{
+	return 0;
+}
+
+static int h4_panel_suspend(struct platform_device *pdev, pm_message_t mesg)
+{
+	return 0;
+}
+
+static int h4_panel_resume(struct platform_device *pdev)
+{
+	return 0;
+}
+
+struct platform_driver h4_panel_driver = {
+	.probe		= h4_panel_probe,
+	.remove		= h4_panel_remove,
+	.suspend	= h4_panel_suspend,
+	.resume		= h4_panel_resume,
+	.driver		= {
+		.name	= "lcd_h4",
+		.owner	= THIS_MODULE,
+	},
+};
+
+static int h4_panel_drv_init(void)
+{
+	return platform_driver_register(&h4_panel_driver);
+}
+
+static void h4_panel_drv_cleanup(void)
+{
+	platform_driver_unregister(&h4_panel_driver);
+}
+
+module_init(h4_panel_drv_init);
+module_exit(h4_panel_drv_cleanup);
+
diff --git a/drivers/video/omap/lcd_inn1510.c b/drivers/video/omap/lcd_inn1510.c
new file mode 100644
index 0000000..551f385
--- /dev/null
+++ b/drivers/video/omap/lcd_inn1510.c
@@ -0,0 +1,124 @@
+/*
+ * LCD panel support for the TI OMAP1510 Innovator board
+ *
+ * Copyright (C) 2004 Nokia Corporation
+ * Author: Imre Deak <imre.deak@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+
+#include <asm/arch/fpga.h>
+#include <asm/arch/omapfb.h>
+
+static int innovator1510_panel_init(struct lcd_panel *panel,
+				    struct omapfb_device *fbdev)
+{
+	return 0;
+}
+
+static void innovator1510_panel_cleanup(struct lcd_panel *panel)
+{
+}
+
+static int innovator1510_panel_enable(struct lcd_panel *panel)
+{
+	fpga_write(0x7, OMAP1510_FPGA_LCD_PANEL_CONTROL);
+	return 0;
+}
+
+static void innovator1510_panel_disable(struct lcd_panel *panel)
+{
+	fpga_write(0x0, OMAP1510_FPGA_LCD_PANEL_CONTROL);
+}
+
+static unsigned long innovator1510_panel_get_caps(struct lcd_panel *panel)
+{
+	return 0;
+}
+
+struct lcd_panel innovator1510_panel = {
+	.name		= "inn1510",
+	.config		= OMAP_LCDC_PANEL_TFT,
+
+	.bpp		= 16,
+	.data_lines	= 16,
+	.x_res		= 240,
+	.y_res		= 320,
+	.pixel_clock	= 12500,
+	.hsw		= 40,
+	.hfp		= 40,
+	.hbp		= 72,
+	.vsw		= 1,
+	.vfp		= 1,
+	.vbp		= 0,
+	.pcd		= 12,
+
+	.init		= innovator1510_panel_init,
+	.cleanup	= innovator1510_panel_cleanup,
+	.enable		= innovator1510_panel_enable,
+	.disable	= innovator1510_panel_disable,
+	.get_caps	= innovator1510_panel_get_caps,
+};
+
+static int innovator1510_panel_probe(struct platform_device *pdev)
+{
+	omapfb_register_panel(&innovator1510_panel);
+	return 0;
+}
+
+static int innovator1510_panel_remove(struct platform_device *pdev)
+{
+	return 0;
+}
+
+static int innovator1510_panel_suspend(struct platform_device *pdev,
+				       pm_message_t mesg)
+{
+	return 0;
+}
+
+static int innovator1510_panel_resume(struct platform_device *pdev)
+{
+	return 0;
+}
+
+struct platform_driver innovator1510_panel_driver = {
+	.probe		= innovator1510_panel_probe,
+	.remove		= innovator1510_panel_remove,
+	.suspend	= innovator1510_panel_suspend,
+	.resume		= innovator1510_panel_resume,
+	.driver		= {
+		.name	= "lcd_inn1510",
+		.owner	= THIS_MODULE,
+	},
+};
+
+static int innovator1510_panel_drv_init(void)
+{
+	return platform_driver_register(&innovator1510_panel_driver);
+}
+
+static void innovator1510_panel_drv_cleanup(void)
+{
+	platform_driver_unregister(&innovator1510_panel_driver);
+}
+
+module_init(innovator1510_panel_drv_init);
+module_exit(innovator1510_panel_drv_cleanup);
+
diff --git a/drivers/video/omap/lcd_inn1610.c b/drivers/video/omap/lcd_inn1610.c
new file mode 100644
index 0000000..95604ca
--- /dev/null
+++ b/drivers/video/omap/lcd_inn1610.c
@@ -0,0 +1,150 @@
+/*
+ * LCD panel support for the TI OMAP1610 Innovator board
+ *
+ * Copyright (C) 2004 Nokia Corporation
+ * Author: Imre Deak <imre.deak@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include <asm/arch/gpio.h>
+#include <asm/arch/omapfb.h>
+
+#define MODULE_NAME	"omapfb-lcd_h3"
+
+#define pr_err(fmt, args...) printk(KERN_ERR MODULE_NAME ": " fmt, ## args)
+
+static int innovator1610_panel_init(struct lcd_panel *panel,
+				    struct omapfb_device *fbdev)
+{
+	int r = 0;
+
+	if (omap_request_gpio(14)) {
+		pr_err("can't request GPIO 14\n");
+		r = -1;
+		goto exit;
+	}
+	if (omap_request_gpio(15)) {
+		pr_err("can't request GPIO 15\n");
+		omap_free_gpio(14);
+		r = -1;
+		goto exit;
+	}
+	/* configure GPIO(14, 15) as outputs */
+	omap_set_gpio_direction(14, 0);
+	omap_set_gpio_direction(15, 0);
+exit:
+	return r;
+}
+
+static void innovator1610_panel_cleanup(struct lcd_panel *panel)
+{
+	omap_free_gpio(15);
+	omap_free_gpio(14);
+}
+
+static int innovator1610_panel_enable(struct lcd_panel *panel)
+{
+	/* set GPIO14 and GPIO15 high */
+	omap_set_gpio_dataout(14, 1);
+	omap_set_gpio_dataout(15, 1);
+	return 0;
+}
+
+static void innovator1610_panel_disable(struct lcd_panel *panel)
+{
+	/* set GPIO13, GPIO14 and GPIO15 low */
+	omap_set_gpio_dataout(14, 0);
+	omap_set_gpio_dataout(15, 0);
+}
+
+static unsigned long innovator1610_panel_get_caps(struct lcd_panel *panel)
+{
+	return 0;
+}
+
+struct lcd_panel innovator1610_panel = {
+	.name		= "inn1610",
+	.config		= OMAP_LCDC_PANEL_TFT,
+
+	.bpp		= 16,
+	.data_lines	= 16,
+	.x_res		= 320,
+	.y_res		= 240,
+	.pixel_clock	= 12500,
+	.hsw		= 40,
+	.hfp		= 40,
+	.hbp		= 72,
+	.vsw		= 1,
+	.vfp		= 1,
+	.vbp		= 0,
+	.pcd		= 12,
+
+	.init		= innovator1610_panel_init,
+	.cleanup	= innovator1610_panel_cleanup,
+	.enable		= innovator1610_panel_enable,
+	.disable	= innovator1610_panel_disable,
+	.get_caps	= innovator1610_panel_get_caps,
+};
+
+static int innovator1610_panel_probe(struct platform_device *pdev)
+{
+	omapfb_register_panel(&innovator1610_panel);
+	return 0;
+}
+
+static int innovator1610_panel_remove(struct platform_device *pdev)
+{
+	return 0;
+}
+
+static int innovator1610_panel_suspend(struct platform_device *pdev,
+				       pm_message_t mesg)
+{
+	return 0;
+}
+
+static int innovator1610_panel_resume(struct platform_device *pdev)
+{
+	return 0;
+}
+
+struct platform_driver innovator1610_panel_driver = {
+	.probe		= innovator1610_panel_probe,
+	.remove		= innovator1610_panel_remove,
+	.suspend	= innovator1610_panel_suspend,
+	.resume		= innovator1610_panel_resume,
+	.driver		= {
+		.name	= "lcd_inn1610",
+		.owner	= THIS_MODULE,
+	},
+};
+
+static int innovator1610_panel_drv_init(void)
+{
+	return platform_driver_register(&innovator1610_panel_driver);
+}
+
+static void innovator1610_panel_drv_cleanup(void)
+{
+	platform_driver_unregister(&innovator1610_panel_driver);
+}
+
+module_init(innovator1610_panel_drv_init);
+module_exit(innovator1610_panel_drv_cleanup);
+
diff --git a/drivers/video/omap/lcd_osk.c b/drivers/video/omap/lcd_osk.c
new file mode 100644
index 0000000..a380388
--- /dev/null
+++ b/drivers/video/omap/lcd_osk.c
@@ -0,0 +1,144 @@
+/*
+ * LCD panel support for the TI OMAP OSK board
+ *
+ * Copyright (C) 2004 Nokia Corporation
+ * Author: Imre Deak <imre.deak@nokia.com>
+ * Adapted for OSK by <dirk.behme@de.bosch.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include <asm/arch/gpio.h>
+#include <asm/arch/mux.h>
+#include <asm/arch/omapfb.h>
+
+static int osk_panel_init(struct lcd_panel *panel, struct omapfb_device *fbdev)
+{
+	return 0;
+}
+
+static void osk_panel_cleanup(struct lcd_panel *panel)
+{
+}
+
+static int osk_panel_enable(struct lcd_panel *panel)
+{
+	/* configure PWL pin */
+	omap_cfg_reg(PWL);
+
+	/* Enable PWL unit */
+	omap_writeb(0x01, OMAP_PWL_CLK_ENABLE);
+
+	/* Set PWL level */
+	omap_writeb(0xFF, OMAP_PWL_ENABLE);
+
+	/* configure GPIO2 as output */
+	omap_set_gpio_direction(2, 0);
+
+	/* set GPIO2 high */
+	omap_set_gpio_dataout(2, 1);
+
+	return 0;
+}
+
+static void osk_panel_disable(struct lcd_panel *panel)
+{
+	/* Set PWL level to zero */
+	omap_writeb(0x00, OMAP_PWL_ENABLE);
+
+	/* Disable PWL unit */
+	omap_writeb(0x00, OMAP_PWL_CLK_ENABLE);
+
+	/* set GPIO2 low */
+	omap_set_gpio_dataout(2, 0);
+}
+
+static unsigned long osk_panel_get_caps(struct lcd_panel *panel)
+{
+	return 0;
+}
+
+struct lcd_panel osk_panel = {
+	.name		= "osk",
+	.config		= OMAP_LCDC_PANEL_TFT,
+
+	.bpp		= 16,
+	.data_lines	= 16,
+	.x_res		= 240,
+	.y_res		= 320,
+	.pixel_clock	= 12500,
+	.hsw		= 40,
+	.hfp		= 40,
+	.hbp		= 72,
+	.vsw		= 1,
+	.vfp		= 1,
+	.vbp		= 0,
+	.pcd		= 12,
+
+	.init		= osk_panel_init,
+	.cleanup	= osk_panel_cleanup,
+	.enable		= osk_panel_enable,
+	.disable	= osk_panel_disable,
+	.get_caps	= osk_panel_get_caps,
+};
+
+static int osk_panel_probe(struct platform_device *pdev)
+{
+	omapfb_register_panel(&osk_panel);
+	return 0;
+}
+
+static int osk_panel_remove(struct platform_device *pdev)
+{
+	return 0;
+}
+
+static int osk_panel_suspend(struct platform_device *pdev, pm_message_t mesg)
+{
+	return 0;
+}
+
+static int osk_panel_resume(struct platform_device *pdev)
+{
+	return 0;
+}
+
+struct platform_driver osk_panel_driver = {
+	.probe		= osk_panel_probe,
+	.remove		= osk_panel_remove,
+	.suspend	= osk_panel_suspend,
+	.resume		= osk_panel_resume,
+	.driver		= {
+		.name	= "lcd_osk",
+		.owner	= THIS_MODULE,
+	},
+};
+
+static int osk_panel_drv_init(void)
+{
+	return platform_driver_register(&osk_panel_driver);
+}
+
+static void osk_panel_drv_cleanup(void)
+{
+	platform_driver_unregister(&osk_panel_driver);
+}
+
+module_init(osk_panel_drv_init);
+module_exit(osk_panel_drv_cleanup);
+
diff --git a/drivers/video/omap/lcd_palmte.c b/drivers/video/omap/lcd_palmte.c
new file mode 100644
index 0000000..52bdfda
--- /dev/null
+++ b/drivers/video/omap/lcd_palmte.c
@@ -0,0 +1,123 @@
+/*
+ * LCD panel support for the Palm Tungsten E
+ *
+ * Original version : Romain Goyet <r.goyet@gmail.com>
+ * Current version : Laurent Gonzalez <palmte.linux@free.fr>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+
+#include <asm/arch/fpga.h>
+#include <asm/arch/omapfb.h>
+
+static int palmte_panel_init(struct lcd_panel *panel,
+				struct omapfb_device *fbdev)
+{
+	return 0;
+}
+
+static void palmte_panel_cleanup(struct lcd_panel *panel)
+{
+}
+
+static int palmte_panel_enable(struct lcd_panel *panel)
+{
+	return 0;
+}
+
+static void palmte_panel_disable(struct lcd_panel *panel)
+{
+}
+
+static unsigned long palmte_panel_get_caps(struct lcd_panel *panel)
+{
+	return 0;
+}
+
+struct lcd_panel palmte_panel = {
+	.name		= "palmte",
+	.config		= OMAP_LCDC_PANEL_TFT | OMAP_LCDC_INV_VSYNC |
+			  OMAP_LCDC_INV_HSYNC | OMAP_LCDC_HSVS_RISING_EDGE |
+			  OMAP_LCDC_HSVS_OPPOSITE,
+
+	.data_lines	= 16,
+	.bpp		= 8,
+	.pixel_clock	= 12000,
+	.x_res		= 320,
+	.y_res		= 320,
+	.hsw		= 4,
+	.hfp		= 8,
+	.hbp		= 28,
+	.vsw		= 1,
+	.vfp		= 8,
+	.vbp		= 7,
+	.pcd		= 0,
+
+	.init		= palmte_panel_init,
+	.cleanup	= palmte_panel_cleanup,
+	.enable		= palmte_panel_enable,
+	.disable	= palmte_panel_disable,
+	.get_caps	= palmte_panel_get_caps,
+};
+
+static int palmte_panel_probe(struct platform_device *pdev)
+{
+	omapfb_register_panel(&palmte_panel);
+	return 0;
+}
+
+static int palmte_panel_remove(struct platform_device *pdev)
+{
+	return 0;
+}
+
+static int palmte_panel_suspend(struct platform_device *pdev, pm_message_t mesg)
+{
+	return 0;
+}
+
+static int palmte_panel_resume(struct platform_device *pdev)
+{
+	return 0;
+}
+
+struct platform_driver palmte_panel_driver = {
+	.probe		= palmte_panel_probe,
+	.remove		= palmte_panel_remove,
+	.suspend	= palmte_panel_suspend,
+	.resume		= palmte_panel_resume,
+	.driver		= {
+		.name	= "lcd_palmte",
+		.owner	= THIS_MODULE,
+	},
+};
+
+static int palmte_panel_drv_init(void)
+{
+	return platform_driver_register(&palmte_panel_driver);
+}
+
+static void palmte_panel_drv_cleanup(void)
+{
+	platform_driver_unregister(&palmte_panel_driver);
+}
+
+module_init(palmte_panel_drv_init);
+module_exit(palmte_panel_drv_cleanup);
+
diff --git a/drivers/video/omap/lcd_palmtt.c b/drivers/video/omap/lcd_palmtt.c
new file mode 100644
index 0000000..4bb349f
--- /dev/null
+++ b/drivers/video/omap/lcd_palmtt.c
@@ -0,0 +1,127 @@
+/*
+ * LCD panel support for Palm Tungsten|T
+ * Current version : Marek Vasut <marek.vasut@gmail.com>
+ *
+ * Modified from lcd_inn1510.c
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.
+ */
+
+/*
+GPIO11 - backlight
+GPIO12 - screen blanking
+GPIO13 - screen blanking
+*/
+
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/io.h>
+
+#include <asm/arch/gpio.h>
+#include <asm/arch/omapfb.h>
+
+static int palmtt_panel_init(struct lcd_panel *panel,
+	struct omapfb_device *fbdev)
+{
+	return 0;
+}
+
+static void palmtt_panel_cleanup(struct lcd_panel *panel)
+{
+}
+
+static int palmtt_panel_enable(struct lcd_panel *panel)
+{
+	return 0;
+}
+
+static void palmtt_panel_disable(struct lcd_panel *panel)
+{
+}
+
+static unsigned long palmtt_panel_get_caps(struct lcd_panel *panel)
+{
+	return OMAPFB_CAPS_SET_BACKLIGHT;
+}
+
+struct lcd_panel palmtt_panel = {
+	.name		= "palmtt",
+	.config		= OMAP_LCDC_PANEL_TFT | OMAP_LCDC_INV_VSYNC |
+			OMAP_LCDC_INV_HSYNC | OMAP_LCDC_HSVS_RISING_EDGE |
+			OMAP_LCDC_HSVS_OPPOSITE,
+	.bpp		= 16,
+	.data_lines	= 16,
+	.x_res		= 320,
+	.y_res		= 320,
+	.pixel_clock	= 10000,
+	.hsw		= 4,
+	.hfp		= 8,
+	.hbp		= 28,
+	.vsw		= 1,
+	.vfp		= 8,
+	.vbp		= 7,
+	.pcd		= 0,
+
+	.init		= palmtt_panel_init,
+	.cleanup	= palmtt_panel_cleanup,
+	.enable		= palmtt_panel_enable,
+	.disable	= palmtt_panel_disable,
+	.get_caps	= palmtt_panel_get_caps,
+};
+
+static int palmtt_panel_probe(struct platform_device *pdev)
+{
+	omapfb_register_panel(&palmtt_panel);
+	return 0;
+}
+
+static int palmtt_panel_remove(struct platform_device *pdev)
+{
+	return 0;
+}
+
+static int palmtt_panel_suspend(struct platform_device *pdev, pm_message_t mesg)
+{
+	return 0;
+}
+
+static int palmtt_panel_resume(struct platform_device *pdev)
+{
+	return 0;
+}
+
+struct platform_driver palmtt_panel_driver = {
+	.probe		= palmtt_panel_probe,
+	.remove		= palmtt_panel_remove,
+	.suspend	= palmtt_panel_suspend,
+	.resume		= palmtt_panel_resume,
+	.driver		= {
+		.name	= "lcd_palmtt",
+		.owner	= THIS_MODULE,
+	},
+};
+
+static int palmtt_panel_drv_init(void)
+{
+	return platform_driver_register(&palmtt_panel_driver);
+}
+
+static void palmtt_panel_drv_cleanup(void)
+{
+	platform_driver_unregister(&palmtt_panel_driver);
+}
+
+module_init(palmtt_panel_drv_init);
+module_exit(palmtt_panel_drv_cleanup);
diff --git a/drivers/video/omap/lcd_palmz71.c b/drivers/video/omap/lcd_palmz71.c
new file mode 100644
index 0000000..ea6170d
--- /dev/null
+++ b/drivers/video/omap/lcd_palmz71.c
@@ -0,0 +1,123 @@
+/*
+ * LCD panel support for the Palm Zire71
+ *
+ * Original version : Romain Goyet
+ * Current version : Laurent Gonzalez
+ * Modified for zire71 : Marek Vasut
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+
+#include <asm/arch/omapfb.h>
+
+static int palmz71_panel_init(struct lcd_panel *panel,
+			      struct omapfb_device *fbdev)
+{
+	return 0;
+}
+
+static void palmz71_panel_cleanup(struct lcd_panel *panel)
+{
+
+}
+
+static int palmz71_panel_enable(struct lcd_panel *panel)
+{
+	return 0;
+}
+
+static void palmz71_panel_disable(struct lcd_panel *panel)
+{
+}
+
+static unsigned long palmz71_panel_get_caps(struct lcd_panel *panel)
+{
+	return OMAPFB_CAPS_SET_BACKLIGHT;
+}
+
+struct lcd_panel palmz71_panel = {
+	.name		= "palmz71",
+	.config		= OMAP_LCDC_PANEL_TFT | OMAP_LCDC_INV_VSYNC |
+			  OMAP_LCDC_INV_HSYNC | OMAP_LCDC_HSVS_RISING_EDGE |
+			  OMAP_LCDC_HSVS_OPPOSITE,
+	.data_lines	= 16,
+	.bpp		= 16,
+	.pixel_clock	= 24000,
+	.x_res		= 320,
+	.y_res		= 320,
+	.hsw		= 4,
+	.hfp		= 8,
+	.hbp		= 28,
+	.vsw		= 1,
+	.vfp		= 8,
+	.vbp		= 7,
+	.pcd		= 0,
+
+	.init		= palmz71_panel_init,
+	.cleanup	= palmz71_panel_cleanup,
+	.enable		= palmz71_panel_enable,
+	.disable	= palmz71_panel_disable,
+	.get_caps	= palmz71_panel_get_caps,
+};
+
+static int palmz71_panel_probe(struct platform_device *pdev)
+{
+	omapfb_register_panel(&palmz71_panel);
+	return 0;
+}
+
+static int palmz71_panel_remove(struct platform_device *pdev)
+{
+	return 0;
+}
+
+static int palmz71_panel_suspend(struct platform_device *pdev,
+				 pm_message_t mesg)
+{
+	return 0;
+}
+
+static int palmz71_panel_resume(struct platform_device *pdev)
+{
+	return 0;
+}
+
+struct platform_driver palmz71_panel_driver = {
+	.probe		= palmz71_panel_probe,
+	.remove		= palmz71_panel_remove,
+	.suspend	= palmz71_panel_suspend,
+	.resume		= palmz71_panel_resume,
+	.driver		= {
+		.name	= "lcd_palmz71",
+		.owner	= THIS_MODULE,
+	},
+};
+
+static int palmz71_panel_drv_init(void)
+{
+	return platform_driver_register(&palmz71_panel_driver);
+}
+
+static void palmz71_panel_drv_cleanup(void)
+{
+	platform_driver_unregister(&palmz71_panel_driver);
+}
+
+module_init(palmz71_panel_drv_init);
+module_exit(palmz71_panel_drv_cleanup);
diff --git a/drivers/video/omap/lcd_sx1.c b/drivers/video/omap/lcd_sx1.c
new file mode 100644
index 0000000..c4f306a
--- /dev/null
+++ b/drivers/video/omap/lcd_sx1.c
@@ -0,0 +1,334 @@
+/*
+ * LCD panel support for the Siemens SX1 mobile phone
+ *
+ * Current version : Vovan888@gmail.com, great help from FCA00000
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+
+#include <asm/arch/gpio.h>
+#include <asm/arch/omapfb.h>
+#include <asm/arch/mcbsp.h>
+#include <asm/arch/mux.h>
+
+/*
+ * OMAP310 GPIO registers
+ */
+#define GPIO_DATA_INPUT		0xfffce000
+#define GPIO_DATA_OUTPUT	0xfffce004
+#define GPIO_DIR_CONTROL	0xfffce008
+#define GPIO_INT_CONTROL	0xfffce00c
+#define GPIO_INT_MASK		0xfffce010
+#define GPIO_INT_STATUS		0xfffce014
+#define GPIO_PIN_CONTROL	0xfffce018
+
+
+#define A_LCD_SSC_RD	3
+#define A_LCD_SSC_SD	7
+#define _A_LCD_RESET	9
+#define _A_LCD_SSC_CS	12
+#define _A_LCD_SSC_A0	13
+
+#define DSP_REG		0xE1017024
+
+const unsigned char INIT_1[12] = {
+	0x1C, 0x02, 0x88, 0x00, 0x1E, 0xE0, 0x00, 0xDC, 0x00, 0x02, 0x00
+};
+
+const unsigned char INIT_2[127] = {
+	0x15, 0x00, 0x29, 0x00, 0x3E, 0x00, 0x51, 0x00,
+	0x65, 0x00, 0x7A, 0x00, 0x8D, 0x00, 0xA1, 0x00,
+	0xB6, 0x00, 0xC7, 0x00, 0xD8, 0x00, 0xEB, 0x00,
+	0xFB, 0x00, 0x0B, 0x01, 0x1B, 0x01, 0x27, 0x01,
+	0x34, 0x01, 0x41, 0x01, 0x4C, 0x01, 0x55, 0x01,
+	0x5F, 0x01, 0x68, 0x01, 0x70, 0x01, 0x78, 0x01,
+	0x7E, 0x01, 0x86, 0x01, 0x8C, 0x01, 0x94, 0x01,
+	0x9B, 0x01, 0xA1, 0x01, 0xA4, 0x01, 0xA9, 0x01,
+	0xAD, 0x01, 0xB2, 0x01, 0xB7, 0x01, 0xBC, 0x01,
+	0xC0, 0x01, 0xC4, 0x01, 0xC8, 0x01, 0xCB, 0x01,
+	0xCF, 0x01, 0xD2, 0x01, 0xD5, 0x01, 0xD8, 0x01,
+	0xDB, 0x01, 0xE0, 0x01, 0xE3, 0x01, 0xE6, 0x01,
+	0xE8, 0x01, 0xEB, 0x01, 0xEE, 0x01, 0xF1, 0x01,
+	0xF3, 0x01, 0xF8, 0x01, 0xF9, 0x01, 0xFC, 0x01,
+	0x00, 0x02, 0x03, 0x02, 0x07, 0x02, 0x09, 0x02,
+	0x0E, 0x02, 0x13, 0x02, 0x1C, 0x02, 0x00
+};
+
+const unsigned char INIT_3[15] = {
+	0x14, 0x26, 0x33, 0x3D, 0x45, 0x4D, 0x53, 0x59,
+	0x5E, 0x63, 0x67, 0x6D, 0x71, 0x78, 0xFF
+};
+
+static void epson_sendbyte(int flag, unsigned char byte)
+{
+	int i, shifter = 0x80;
+
+	if (!flag)
+		omap_set_gpio_dataout(_A_LCD_SSC_A0, 0);
+	mdelay(2);
+	omap_set_gpio_dataout(A_LCD_SSC_RD, 1);
+
+	omap_set_gpio_dataout(A_LCD_SSC_SD, flag);
+
+	OMAP_MCBSP_WRITE(OMAP1510_MCBSP3_BASE, PCR0, 0x2200);
+	OMAP_MCBSP_WRITE(OMAP1510_MCBSP3_BASE, PCR0, 0x2202);
+	for (i = 0; i < 8; i++) {
+		OMAP_MCBSP_WRITE(OMAP1510_MCBSP3_BASE, PCR0, 0x2200);
+		omap_set_gpio_dataout(A_LCD_SSC_SD, shifter & byte);
+		OMAP_MCBSP_WRITE(OMAP1510_MCBSP3_BASE, PCR0, 0x2202);
+		shifter >>= 1;
+	}
+	omap_set_gpio_dataout(_A_LCD_SSC_A0, 1);
+}
+
+static void init_system(void)
+{
+	omap_mcbsp_request(OMAP_MCBSP3);
+	omap_mcbsp_stop(OMAP_MCBSP3);
+}
+
+static void setup_GPIO(void)
+{
+	/* new wave */
+	omap_request_gpio(A_LCD_SSC_RD);
+	omap_request_gpio(A_LCD_SSC_SD);
+	omap_request_gpio(_A_LCD_RESET);
+	omap_request_gpio(_A_LCD_SSC_CS);
+	omap_request_gpio(_A_LCD_SSC_A0);
+
+	/* set all GPIOs to output */
+	omap_set_gpio_direction(A_LCD_SSC_RD, 0);
+	omap_set_gpio_direction(A_LCD_SSC_SD, 0);
+	omap_set_gpio_direction(_A_LCD_RESET, 0);
+	omap_set_gpio_direction(_A_LCD_SSC_CS, 0);
+	omap_set_gpio_direction(_A_LCD_SSC_A0, 0);
+
+	/* set GPIO data */
+	omap_set_gpio_dataout(A_LCD_SSC_RD, 1);
+	omap_set_gpio_dataout(A_LCD_SSC_SD, 0);
+	omap_set_gpio_dataout(_A_LCD_RESET, 0);
+	omap_set_gpio_dataout(_A_LCD_SSC_CS, 1);
+	omap_set_gpio_dataout(_A_LCD_SSC_A0, 1);
+}
+
+static void display_init(void)
+{
+	int i;
+
+	omap_cfg_reg(MCBSP3_CLKX);
+
+	mdelay(2);
+	setup_GPIO();
+	mdelay(2);
+
+	/* reset LCD */
+	omap_set_gpio_dataout(A_LCD_SSC_SD, 1);
+	epson_sendbyte(0, 0x25);
+
+	omap_set_gpio_dataout(_A_LCD_RESET, 0);
+	mdelay(10);
+	omap_set_gpio_dataout(_A_LCD_RESET, 1);
+
+	omap_set_gpio_dataout(_A_LCD_SSC_CS, 1);
+	mdelay(2);
+	omap_set_gpio_dataout(_A_LCD_SSC_CS, 0);
+
+	/* init LCD, phase 1 */
+	epson_sendbyte(0, 0xCA);
+	for (i = 0; i < 10; i++)
+		epson_sendbyte(1, INIT_1[i]);
+	omap_set_gpio_dataout(_A_LCD_SSC_CS, 1);
+	omap_set_gpio_dataout(_A_LCD_SSC_CS, 0);
+
+	/* init LCD phase 2 */
+	epson_sendbyte(0, 0xCB);
+	for (i = 0; i < 125; i++)
+		epson_sendbyte(1, INIT_2[i]);
+	omap_set_gpio_dataout(_A_LCD_SSC_CS, 1);
+	omap_set_gpio_dataout(_A_LCD_SSC_CS, 0);
+
+	/* init LCD phase 2a */
+	epson_sendbyte(0, 0xCC);
+	for (i = 0; i < 14; i++)
+		epson_sendbyte(1, INIT_3[i]);
+	omap_set_gpio_dataout(_A_LCD_SSC_CS, 1);
+	omap_set_gpio_dataout(_A_LCD_SSC_CS, 0);
+
+	/* init LCD phase 3 */
+	epson_sendbyte(0, 0xBC);
+	epson_sendbyte(1, 0x08);
+	omap_set_gpio_dataout(_A_LCD_SSC_CS, 1);
+	omap_set_gpio_dataout(_A_LCD_SSC_CS, 0);
+
+	/* init LCD phase 4 */
+	epson_sendbyte(0, 0x07);
+	epson_sendbyte(1, 0x05);
+	omap_set_gpio_dataout(_A_LCD_SSC_CS, 1);
+	omap_set_gpio_dataout(_A_LCD_SSC_CS, 0);
+
+	/* init LCD phase 5 */
+	epson_sendbyte(0, 0x94);
+	omap_set_gpio_dataout(_A_LCD_SSC_CS, 1);
+	omap_set_gpio_dataout(_A_LCD_SSC_CS, 0);
+
+	/* init LCD phase 6 */
+	epson_sendbyte(0, 0xC6);
+	epson_sendbyte(1, 0x80);
+	omap_set_gpio_dataout(_A_LCD_SSC_CS, 1);
+	mdelay(100); /* used to be 1000 */
+	omap_set_gpio_dataout(_A_LCD_SSC_CS, 0);
+
+	/* init LCD phase 7 */
+	epson_sendbyte(0, 0x16);
+	epson_sendbyte(1, 0x02);
+	epson_sendbyte(1, 0x00);
+	epson_sendbyte(1, 0xB1);
+	epson_sendbyte(1, 0x00);
+	omap_set_gpio_dataout(_A_LCD_SSC_CS, 1);
+	omap_set_gpio_dataout(_A_LCD_SSC_CS, 0);
+
+	/* init LCD phase 8 */
+	epson_sendbyte(0, 0x76);
+	epson_sendbyte(1, 0x00);
+	epson_sendbyte(1, 0x00);
+	epson_sendbyte(1, 0xDB);
+	epson_sendbyte(1, 0x00);
+	omap_set_gpio_dataout(_A_LCD_SSC_CS, 1);
+	omap_set_gpio_dataout(_A_LCD_SSC_CS, 0);
+
+	/* init LCD phase 9 */
+	epson_sendbyte(0, 0xAF);
+	omap_set_gpio_dataout(_A_LCD_SSC_CS, 1);
+}
+
+static int sx1_panel_init(struct lcd_panel *panel, struct omapfb_device *fbdev)
+{
+	return 0;
+}
+
+static void sx1_panel_cleanup(struct lcd_panel *panel)
+{
+}
+
+static void sx1_panel_disable(struct lcd_panel *panel)
+{
+	printk(KERN_INFO "SX1: LCD panel disable\n");
+	sx1_setmmipower(0);
+	omap_set_gpio_dataout(_A_LCD_SSC_CS, 1);
+
+	epson_sendbyte(0, 0x25);
+	omap_set_gpio_dataout(_A_LCD_SSC_CS, 0);
+
+	epson_sendbyte(0, 0xAE);
+	omap_set_gpio_dataout(_A_LCD_SSC_CS, 1);
+	mdelay(100);
+	omap_set_gpio_dataout(_A_LCD_SSC_CS, 0);
+
+	epson_sendbyte(0, 0x95);
+	omap_set_gpio_dataout(_A_LCD_SSC_CS, 1);
+}
+
+static int sx1_panel_enable(struct lcd_panel *panel)
+{
+	printk(KERN_INFO "lcd_sx1: LCD panel enable\n");
+	init_system();
+	display_init();
+
+	sx1_setmmipower(1);
+	sx1_setbacklight(0x18);
+	sx1_setkeylight (0x06);
+	return 0;
+}
+
+
+static unsigned long sx1_panel_get_caps(struct lcd_panel *panel)
+{
+	return 0;
+}
+
+struct lcd_panel sx1_panel = {
+	.name		= "sx1",
+	.config		= OMAP_LCDC_PANEL_TFT | OMAP_LCDC_INV_VSYNC |
+			  OMAP_LCDC_INV_HSYNC | OMAP_LCDC_INV_PIX_CLOCK |
+			  OMAP_LCDC_INV_OUTPUT_EN,
+
+	.x_res		= 176,
+	.y_res		= 220,
+	.data_lines	= 16,
+	.bpp		= 16,
+	.hsw		= 5,
+	.hfp		= 5,
+	.hbp		= 5,
+	.vsw		= 2,
+	.vfp		= 1,
+	.vbp		= 1,
+	.pixel_clock	= 1500,
+
+	.init		= sx1_panel_init,
+	.cleanup	= sx1_panel_cleanup,
+	.enable		= sx1_panel_enable,
+	.disable	= sx1_panel_disable,
+	.get_caps	= sx1_panel_get_caps,
+};
+
+static int sx1_panel_probe(struct platform_device *pdev)
+{
+	omapfb_register_panel(&sx1_panel);
+	return 0;
+}
+
+static int sx1_panel_remove(struct platform_device *pdev)
+{
+	return 0;
+}
+
+static int sx1_panel_suspend(struct platform_device *pdev, pm_message_t mesg)
+{
+	return 0;
+}
+
+static int sx1_panel_resume(struct platform_device *pdev)
+{
+	return 0;
+}
+
+struct platform_driver sx1_panel_driver = {
+	.probe		= sx1_panel_probe,
+	.remove		= sx1_panel_remove,
+	.suspend	= sx1_panel_suspend,
+	.resume		= sx1_panel_resume,
+	.driver	= {
+		.name	= "lcd_sx1",
+		.owner	= THIS_MODULE,
+	},
+};
+
+static int sx1_panel_drv_init(void)
+{
+	return platform_driver_register(&sx1_panel_driver);
+}
+
+static void sx1_panel_drv_cleanup(void)
+{
+	platform_driver_unregister(&sx1_panel_driver);
+}
+
+module_init(sx1_panel_drv_init);
+module_exit(sx1_panel_drv_cleanup);
diff --git a/drivers/video/omap/lcdc.c b/drivers/video/omap/lcdc.c
new file mode 100644
index 0000000..9085188
--- /dev/null
+++ b/drivers/video/omap/lcdc.c
@@ -0,0 +1,893 @@
+/*
+ * OMAP1 internal LCD controller
+ *
+ * Copyright (C) 2004 Nokia Corporation
+ * Author: Imre Deak <imre.deak@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.
+ */
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/err.h>
+#include <linux/mm.h>
+#include <linux/fb.h>
+#include <linux/dma-mapping.h>
+#include <linux/vmalloc.h>
+#include <linux/clk.h>
+
+#include <asm/arch/dma.h>
+#include <asm/arch/omapfb.h>
+
+#include <asm/mach-types.h>
+
+#define MODULE_NAME			"lcdc"
+
+#define OMAP_LCDC_BASE			0xfffec000
+#define OMAP_LCDC_SIZE			256
+#define OMAP_LCDC_IRQ			INT_LCD_CTRL
+
+#define OMAP_LCDC_CONTROL		(OMAP_LCDC_BASE + 0x00)
+#define OMAP_LCDC_TIMING0		(OMAP_LCDC_BASE + 0x04)
+#define OMAP_LCDC_TIMING1		(OMAP_LCDC_BASE + 0x08)
+#define OMAP_LCDC_TIMING2		(OMAP_LCDC_BASE + 0x0c)
+#define OMAP_LCDC_STATUS		(OMAP_LCDC_BASE + 0x10)
+#define OMAP_LCDC_SUBPANEL		(OMAP_LCDC_BASE + 0x14)
+#define OMAP_LCDC_LINE_INT		(OMAP_LCDC_BASE + 0x18)
+#define OMAP_LCDC_DISPLAY_STATUS	(OMAP_LCDC_BASE + 0x1c)
+
+#define OMAP_LCDC_STAT_DONE		(1 << 0)
+#define OMAP_LCDC_STAT_VSYNC		(1 << 1)
+#define OMAP_LCDC_STAT_SYNC_LOST	(1 << 2)
+#define OMAP_LCDC_STAT_ABC		(1 << 3)
+#define OMAP_LCDC_STAT_LINE_INT		(1 << 4)
+#define OMAP_LCDC_STAT_FUF		(1 << 5)
+#define OMAP_LCDC_STAT_LOADED_PALETTE	(1 << 6)
+
+#define OMAP_LCDC_CTRL_LCD_EN		(1 << 0)
+#define OMAP_LCDC_CTRL_LCD_TFT		(1 << 7)
+#define OMAP_LCDC_CTRL_LINE_IRQ_CLR_SEL	(1 << 10)
+
+#define OMAP_LCDC_IRQ_VSYNC		(1 << 2)
+#define OMAP_LCDC_IRQ_DONE		(1 << 3)
+#define OMAP_LCDC_IRQ_LOADED_PALETTE	(1 << 4)
+#define OMAP_LCDC_IRQ_LINE_NIRQ		(1 << 5)
+#define OMAP_LCDC_IRQ_LINE		(1 << 6)
+#define OMAP_LCDC_IRQ_MASK		(((1 << 5) - 1) << 2)
+
+#define MAX_PALETTE_SIZE		PAGE_SIZE
+
+enum lcdc_load_mode {
+	OMAP_LCDC_LOAD_PALETTE,
+	OMAP_LCDC_LOAD_FRAME,
+	OMAP_LCDC_LOAD_PALETTE_AND_FRAME
+};
+
+static struct omap_lcd_controller {
+	enum omapfb_update_mode	update_mode;
+	int			ext_mode;
+
+	unsigned long		frame_offset;
+	int			screen_width;
+	int			xres;
+	int			yres;
+
+	enum omapfb_color_format	color_mode;
+	int			bpp;
+	void			*palette_virt;
+	dma_addr_t		palette_phys;
+	int			palette_code;
+	int			palette_size;
+
+	unsigned int		irq_mask;
+	struct completion	last_frame_complete;
+	struct completion	palette_load_complete;
+	struct clk		*lcd_ck;
+	struct omapfb_device	*fbdev;
+
+	void			(*dma_callback)(void *data);
+	void			*dma_callback_data;
+
+	int			fbmem_allocated;
+	dma_addr_t		vram_phys;
+	void			*vram_virt;
+	unsigned long		vram_size;
+} lcdc;
+
+static void inline enable_irqs(int mask)
+{
+	lcdc.irq_mask |= mask;
+}
+
+static void inline disable_irqs(int mask)
+{
+	lcdc.irq_mask &= ~mask;
+}
+
+static void set_load_mode(enum lcdc_load_mode mode)
+{
+	u32 l;
+
+	l = omap_readl(OMAP_LCDC_CONTROL);
+	l &= ~(3 << 20);
+	switch (mode) {
+	case OMAP_LCDC_LOAD_PALETTE:
+		l |= 1 << 20;
+		break;
+	case OMAP_LCDC_LOAD_FRAME:
+		l |= 2 << 20;
+		break;
+	case OMAP_LCDC_LOAD_PALETTE_AND_FRAME:
+		break;
+	default:
+		BUG();
+	}
+	omap_writel(l, OMAP_LCDC_CONTROL);
+}
+
+static void enable_controller(void)
+{
+	u32 l;
+
+	l = omap_readl(OMAP_LCDC_CONTROL);
+	l |= OMAP_LCDC_CTRL_LCD_EN;
+	l &= ~OMAP_LCDC_IRQ_MASK;
+	l |= lcdc.irq_mask | OMAP_LCDC_IRQ_DONE;	/* enabled IRQs */
+	omap_writel(l, OMAP_LCDC_CONTROL);
+}
+
+static void disable_controller_async(void)
+{
+	u32 l;
+	u32 mask;
+
+	l = omap_readl(OMAP_LCDC_CONTROL);
+	mask = OMAP_LCDC_CTRL_LCD_EN | OMAP_LCDC_IRQ_MASK;
+	/*
+	 * Preserve the DONE mask, since we still want to get the
+	 * final DONE irq. It will be disabled in the IRQ handler.
+	 */
+	mask &= ~OMAP_LCDC_IRQ_DONE;
+	l &= ~mask;
+	omap_writel(l, OMAP_LCDC_CONTROL);
+}
+
+static void disable_controller(void)
+{
+	init_completion(&lcdc.last_frame_complete);
+	disable_controller_async();
+	if (!wait_for_completion_timeout(&lcdc.last_frame_complete,
+				msecs_to_jiffies(500)))
+		dev_err(lcdc.fbdev->dev, "timeout waiting for FRAME DONE\n");
+}
+
+static void reset_controller(u32 status)
+{
+	static unsigned long reset_count;
+	static unsigned long last_jiffies;
+
+	disable_controller_async();
+	reset_count++;
+	if (reset_count == 1 || time_after(jiffies, last_jiffies + HZ)) {
+		dev_err(lcdc.fbdev->dev,
+			  "resetting (status %#010x,reset count %lu)\n",
+			  status, reset_count);
+		last_jiffies = jiffies;
+	}
+	if (reset_count < 100) {
+		enable_controller();
+	} else {
+		reset_count = 0;
+		dev_err(lcdc.fbdev->dev,
+			"too many reset attempts, giving up.\n");
+	}
+}
+
+/*
+ * Configure the LCD DMA according to the current mode specified by parameters
+ * in lcdc.fbdev and fbdev->var.
+ */
+static void setup_lcd_dma(void)
+{
+	static const int dma_elem_type[] = {
+		0,
+		OMAP_DMA_DATA_TYPE_S8,
+		OMAP_DMA_DATA_TYPE_S16,
+		0,
+		OMAP_DMA_DATA_TYPE_S32,
+	};
+	struct omapfb_plane_struct *plane = lcdc.fbdev->fb_info[0]->par;
+	struct fb_var_screeninfo *var = &lcdc.fbdev->fb_info[0]->var;
+	unsigned long	src;
+	int		esize, xelem, yelem;
+
+	src = lcdc.vram_phys + lcdc.frame_offset;
+
+	switch (var->rotate) {
+	case 0:
+		if (plane->info.mirror || (src & 3) ||
+		    lcdc.color_mode == OMAPFB_COLOR_YUV420 ||
+		    (lcdc.xres & 1))
+			esize = 2;
+		else
+			esize = 4;
+		xelem = lcdc.xres * lcdc.bpp / 8 / esize;
+		yelem = lcdc.yres;
+		break;
+	case 90:
+	case 180:
+	case 270:
+		if (cpu_is_omap15xx()) {
+			BUG();
+		}
+		esize = 2;
+		xelem = lcdc.yres * lcdc.bpp / 16;
+		yelem = lcdc.xres;
+		break;
+	default:
+		BUG();
+		return;
+	}
+#ifdef VERBOSE
+	dev_dbg(lcdc.fbdev->dev,
+		 "setup_dma: src %#010lx esize %d xelem %d yelem %d\n",
+		 src, esize, xelem, yelem);
+#endif
+	omap_set_lcd_dma_b1(src, xelem, yelem, dma_elem_type[esize]);
+	if (!cpu_is_omap15xx()) {
+		int bpp = lcdc.bpp;
+
+		/*
+		 * YUV support is only for external mode when we have the
+		 * YUV window embedded in a 16bpp frame buffer.
+		 */
+		if (lcdc.color_mode == OMAPFB_COLOR_YUV420)
+			bpp = 16;
+		/* Set virtual xres elem size */
+		omap_set_lcd_dma_b1_vxres(
+			lcdc.screen_width * bpp / 8 / esize);
+		/* Setup transformations */
+		omap_set_lcd_dma_b1_rotation(var->rotate);
+		omap_set_lcd_dma_b1_mirror(plane->info.mirror);
+	}
+	omap_setup_lcd_dma();
+}
+
+static irqreturn_t lcdc_irq_handler(int irq, void *dev_id)
+{
+	u32 status;
+
+	status = omap_readl(OMAP_LCDC_STATUS);
+
+	if (status & (OMAP_LCDC_STAT_FUF | OMAP_LCDC_STAT_SYNC_LOST))
+		reset_controller(status);
+	else {
+		if (status & OMAP_LCDC_STAT_DONE) {
+			u32 l;
+
+			/*
+			 * Disable IRQ_DONE. The status bit will be cleared
+			 * only when the controller is reenabled and we don't
+			 * want to get more interrupts.
+			 */
+			l = omap_readl(OMAP_LCDC_CONTROL);
+			l &= ~OMAP_LCDC_IRQ_DONE;
+			omap_writel(l, OMAP_LCDC_CONTROL);
+			complete(&lcdc.last_frame_complete);
+		}
+		if (status & OMAP_LCDC_STAT_LOADED_PALETTE) {
+			disable_controller_async();
+			complete(&lcdc.palette_load_complete);
+		}
+	}
+
+	/*
+	 * Clear these interrupt status bits.
+	 * Sync_lost, FUF bits were cleared by disabling the LCD controller
+	 * LOADED_PALETTE can be cleared this way only in palette only
+	 * load mode. In other load modes it's cleared by disabling the
+	 * controller.
+	 */
+	status &= ~(OMAP_LCDC_STAT_VSYNC |
+		    OMAP_LCDC_STAT_LOADED_PALETTE |
+		    OMAP_LCDC_STAT_ABC |
+		    OMAP_LCDC_STAT_LINE_INT);
+	omap_writel(status, OMAP_LCDC_STATUS);
+	return IRQ_HANDLED;
+}
+
+/*
+ * Change to a new video mode. We defer this to a later time to avoid any
+ * flicker and not to mess up the current LCD DMA context. For this we disable
+ * the LCD controler, which will generate a DONE irq after the last frame has
+ * been transferred. Then it'll be safe to reconfigure both the LCD controller
+ * as well as the LCD DMA.
+ */
+static int omap_lcdc_setup_plane(int plane, int channel_out,
+				 unsigned long offset, int screen_width,
+				 int pos_x, int pos_y, int width, int height,
+				 int color_mode)
+{
+	struct fb_var_screeninfo *var = &lcdc.fbdev->fb_info[0]->var;
+	struct lcd_panel *panel = lcdc.fbdev->panel;
+	int rot_x, rot_y;
+
+	if (var->rotate == 0) {
+		rot_x = panel->x_res;
+		rot_y = panel->y_res;
+	} else {
+		rot_x = panel->y_res;
+		rot_y = panel->x_res;
+	}
+	if (plane != 0 || channel_out != 0 || pos_x != 0 || pos_y != 0 ||
+	    width > rot_x || height > rot_y) {
+#ifdef VERBOSE
+		dev_dbg(lcdc.fbdev->dev,
+			"invalid plane params plane %d pos_x %d pos_y %d "
+			"w %d h %d\n", plane, pos_x, pos_y, width, height);
+#endif
+		return -EINVAL;
+	}
+
+	lcdc.frame_offset = offset;
+	lcdc.xres = width;
+	lcdc.yres = height;
+	lcdc.screen_width = screen_width;
+	lcdc.color_mode = color_mode;
+
+	switch (color_mode) {
+	case OMAPFB_COLOR_CLUT_8BPP:
+		lcdc.bpp = 8;
+		lcdc.palette_code = 0x3000;
+		lcdc.palette_size = 512;
+		break;
+	case OMAPFB_COLOR_RGB565:
+		lcdc.bpp = 16;
+		lcdc.palette_code = 0x4000;
+		lcdc.palette_size = 32;
+		break;
+	case OMAPFB_COLOR_RGB444:
+		lcdc.bpp = 16;
+		lcdc.palette_code = 0x4000;
+		lcdc.palette_size = 32;
+		break;
+	case OMAPFB_COLOR_YUV420:
+		if (lcdc.ext_mode) {
+			lcdc.bpp = 12;
+			break;
+		}
+		/* fallthrough */
+	case OMAPFB_COLOR_YUV422:
+		if (lcdc.ext_mode) {
+			lcdc.bpp = 16;
+			break;
+		}
+		/* fallthrough */
+	default:
+		/* FIXME: other BPPs.
+		 * bpp1: code  0,     size 256
+		 * bpp2: code  0x1000 size 256
+		 * bpp4: code  0x2000 size 256
+		 * bpp12: code 0x4000 size 32
+		 */
+		dev_dbg(lcdc.fbdev->dev, "invalid color mode %d\n", color_mode);
+		BUG();
+		return -1;
+	}
+
+	if (lcdc.ext_mode) {
+		setup_lcd_dma();
+		return 0;
+	}
+
+	if (lcdc.update_mode == OMAPFB_AUTO_UPDATE) {
+		disable_controller();
+		omap_stop_lcd_dma();
+		setup_lcd_dma();
+		enable_controller();
+	}
+
+	return 0;
+}
+
+static int omap_lcdc_enable_plane(int plane, int enable)
+{
+	dev_dbg(lcdc.fbdev->dev,
+		"plane %d enable %d update_mode %d ext_mode %d\n",
+		plane, enable, lcdc.update_mode, lcdc.ext_mode);
+	if (plane != OMAPFB_PLANE_GFX)
+		return -EINVAL;
+
+	return 0;
+}
+
+/*
+ * Configure the LCD DMA for a palette load operation and do the palette
+ * downloading synchronously. We don't use the frame+palette load mode of
+ * the controller, since the palette can always be downloaded seperately.
+ */
+static void load_palette(void)
+{
+	u16	*palette;
+
+	palette = (u16 *)lcdc.palette_virt;
+
+	*(u16 *)palette &= 0x0fff;
+	*(u16 *)palette |= lcdc.palette_code;
+
+	omap_set_lcd_dma_b1(lcdc.palette_phys,
+		lcdc.palette_size / 4 + 1, 1, OMAP_DMA_DATA_TYPE_S32);
+
+	omap_set_lcd_dma_single_transfer(1);
+	omap_setup_lcd_dma();
+
+	init_completion(&lcdc.palette_load_complete);
+	enable_irqs(OMAP_LCDC_IRQ_LOADED_PALETTE);
+	set_load_mode(OMAP_LCDC_LOAD_PALETTE);
+	enable_controller();
+	if (!wait_for_completion_timeout(&lcdc.palette_load_complete,
+				msecs_to_jiffies(500)))
+		dev_err(lcdc.fbdev->dev, "timeout waiting for FRAME DONE\n");
+	/* The controller gets disabled in the irq handler */
+	disable_irqs(OMAP_LCDC_IRQ_LOADED_PALETTE);
+	omap_stop_lcd_dma();
+
+	omap_set_lcd_dma_single_transfer(lcdc.ext_mode);
+}
+
+/* Used only in internal controller mode */
+static int omap_lcdc_setcolreg(u_int regno, u16 red, u16 green, u16 blue,
+			       u16 transp, int update_hw_pal)
+{
+	u16 *palette;
+
+	if (lcdc.color_mode != OMAPFB_COLOR_CLUT_8BPP || regno > 255)
+		return -EINVAL;
+
+	palette = (u16 *)lcdc.palette_virt;
+
+	palette[regno] &= ~0x0fff;
+	palette[regno] |= ((red >> 12) << 8) | ((green >> 12) << 4 ) |
+			   (blue >> 12);
+
+	if (update_hw_pal) {
+		disable_controller();
+		omap_stop_lcd_dma();
+		load_palette();
+		setup_lcd_dma();
+		set_load_mode(OMAP_LCDC_LOAD_FRAME);
+		enable_controller();
+	}
+
+	return 0;
+}
+
+static void calc_ck_div(int is_tft, int pck, int *pck_div)
+{
+	unsigned long lck;
+
+	pck = max(1, pck);
+	lck = clk_get_rate(lcdc.lcd_ck);
+	*pck_div = (lck + pck - 1) / pck;
+	if (is_tft)
+		*pck_div = max(2, *pck_div);
+	else
+		*pck_div = max(3, *pck_div);
+	if (*pck_div > 255) {
+		/* FIXME: try to adjust logic clock divider as well */
+		*pck_div = 255;
+		dev_warn(lcdc.fbdev->dev, "pixclock %d kHz too low.\n",
+			 pck / 1000);
+	}
+}
+
+static void inline setup_regs(void)
+{
+	u32 l;
+	struct lcd_panel *panel = lcdc.fbdev->panel;
+	int is_tft = panel->config & OMAP_LCDC_PANEL_TFT;
+	unsigned long lck;
+	int pcd;
+
+	l = omap_readl(OMAP_LCDC_CONTROL);
+	l &= ~OMAP_LCDC_CTRL_LCD_TFT;
+	l |= is_tft ? OMAP_LCDC_CTRL_LCD_TFT : 0;
+#ifdef CONFIG_MACH_OMAP_PALMTE
+/* FIXME:if (machine_is_omap_palmte()) { */
+		/* PalmTE uses alternate TFT setting in 8BPP mode */
+		l |= (is_tft && panel->bpp == 8) ? 0x810000 : 0;
+/*	} */
+#endif
+	omap_writel(l, OMAP_LCDC_CONTROL);
+
+	l = omap_readl(OMAP_LCDC_TIMING2);
+	l &= ~(((1 << 6) - 1) << 20);
+	l |= (panel->config & OMAP_LCDC_SIGNAL_MASK) << 20;
+	omap_writel(l, OMAP_LCDC_TIMING2);
+
+	l = panel->x_res - 1;
+	l |= (panel->hsw - 1) << 10;
+	l |= (panel->hfp - 1) << 16;
+	l |= (panel->hbp - 1) << 24;
+	omap_writel(l, OMAP_LCDC_TIMING0);
+
+	l = panel->y_res - 1;
+	l |= (panel->vsw - 1) << 10;
+	l |= panel->vfp << 16;
+	l |= panel->vbp << 24;
+	omap_writel(l, OMAP_LCDC_TIMING1);
+
+	l = omap_readl(OMAP_LCDC_TIMING2);
+	l &= ~0xff;
+
+	lck = clk_get_rate(lcdc.lcd_ck);
+
+	if (!panel->pcd)
+		calc_ck_div(is_tft, panel->pixel_clock * 1000, &pcd);
+	else {
+		dev_warn(lcdc.fbdev->dev,
+		    "Pixel clock divider value is obsolete.\n"
+		    "Try to set pixel_clock to %lu and pcd to 0 "
+		    "in drivers/video/omap/lcd_%s.c and submit a patch.\n",
+			lck / panel->pcd / 1000, panel->name);
+
+		pcd = panel->pcd;
+	}
+	l |= pcd & 0xff;
+	l |= panel->acb << 8;
+	omap_writel(l, OMAP_LCDC_TIMING2);
+
+	/* update panel info with the exact clock */
+	panel->pixel_clock = lck / pcd / 1000;
+}
+
+/*
+ * Configure the LCD controller, download the color palette and start a looped
+ * DMA transfer of the frame image data. Called only in internal
+ * controller mode.
+ */
+static int omap_lcdc_set_update_mode(enum omapfb_update_mode mode)
+{
+	int r = 0;
+
+	if (mode != lcdc.update_mode) {
+		switch (mode) {
+		case OMAPFB_AUTO_UPDATE:
+			setup_regs();
+			load_palette();
+
+			/* Setup and start LCD DMA */
+			setup_lcd_dma();
+
+			set_load_mode(OMAP_LCDC_LOAD_FRAME);
+			enable_irqs(OMAP_LCDC_IRQ_DONE);
+			/* This will start the actual DMA transfer */
+			enable_controller();
+			lcdc.update_mode = mode;
+			break;
+		case OMAPFB_UPDATE_DISABLED:
+			disable_controller();
+			omap_stop_lcd_dma();
+			lcdc.update_mode = mode;
+			break;
+		default:
+			r = -EINVAL;
+		}
+	}
+
+	return r;
+}
+
+static enum omapfb_update_mode omap_lcdc_get_update_mode(void)
+{
+	return lcdc.update_mode;
+}
+
+/* PM code called only in internal controller mode */
+static void omap_lcdc_suspend(void)
+{
+	if (lcdc.update_mode == OMAPFB_AUTO_UPDATE) {
+		disable_controller();
+		omap_stop_lcd_dma();
+	}
+}
+
+static void omap_lcdc_resume(void)
+{
+	if (lcdc.update_mode == OMAPFB_AUTO_UPDATE) {
+		setup_regs();
+		load_palette();
+		setup_lcd_dma();
+		set_load_mode(OMAP_LCDC_LOAD_FRAME);
+		enable_irqs(OMAP_LCDC_IRQ_DONE);
+		enable_controller();
+	}
+}
+
+static void omap_lcdc_get_caps(int plane, struct omapfb_caps *caps)
+{
+	return;
+}
+
+int omap_lcdc_set_dma_callback(void (*callback)(void *data), void *data)
+{
+	BUG_ON(callback == NULL);
+
+	if (lcdc.dma_callback)
+		return -EBUSY;
+	else {
+		lcdc.dma_callback = callback;
+		lcdc.dma_callback_data = data;
+	}
+	return 0;
+}
+EXPORT_SYMBOL(omap_lcdc_set_dma_callback);
+
+void omap_lcdc_free_dma_callback(void)
+{
+	lcdc.dma_callback = NULL;
+}
+EXPORT_SYMBOL(omap_lcdc_free_dma_callback);
+
+static void lcdc_dma_handler(u16 status, void *data)
+{
+	if (lcdc.dma_callback)
+		lcdc.dma_callback(lcdc.dma_callback_data);
+}
+
+static int mmap_kern(void)
+{
+	struct vm_struct	*kvma;
+	struct vm_area_struct	vma;
+	pgprot_t		pgprot;
+	unsigned long		vaddr;
+
+	kvma = get_vm_area(lcdc.vram_size, VM_IOREMAP);
+	if (kvma == NULL) {
+		dev_err(lcdc.fbdev->dev, "can't get kernel vm area\n");
+		return -ENOMEM;
+	}
+	vma.vm_mm = &init_mm;
+
+	vaddr = (unsigned long)kvma->addr;
+	vma.vm_start = vaddr;
+	vma.vm_end = vaddr + lcdc.vram_size;
+
+	pgprot = pgprot_writecombine(pgprot_kernel);
+	if (io_remap_pfn_range(&vma, vaddr,
+			   lcdc.vram_phys >> PAGE_SHIFT,
+			   lcdc.vram_size, pgprot) < 0) {
+		dev_err(lcdc.fbdev->dev, "kernel mmap for FB memory failed\n");
+		return -EAGAIN;
+	}
+
+	lcdc.vram_virt = (void *)vaddr;
+
+	return 0;
+}
+
+static void unmap_kern(void)
+{
+	vunmap(lcdc.vram_virt);
+}
+
+static int alloc_palette_ram(void)
+{
+	lcdc.palette_virt = dma_alloc_writecombine(lcdc.fbdev->dev,
+		MAX_PALETTE_SIZE, &lcdc.palette_phys, GFP_KERNEL);
+	if (lcdc.palette_virt == NULL) {
+		dev_err(lcdc.fbdev->dev, "failed to alloc palette memory\n");
+		return -ENOMEM;
+	}
+	memset(lcdc.palette_virt, 0, MAX_PALETTE_SIZE);
+
+	return 0;
+}
+
+static void free_palette_ram(void)
+{
+	dma_free_writecombine(lcdc.fbdev->dev, MAX_PALETTE_SIZE,
+			lcdc.palette_virt, lcdc.palette_phys);
+}
+
+static int alloc_fbmem(struct omapfb_mem_region *region)
+{
+	int bpp;
+	int frame_size;
+	struct lcd_panel *panel = lcdc.fbdev->panel;
+
+	bpp = panel->bpp;
+	if (bpp == 12)
+		bpp = 16;
+	frame_size = PAGE_ALIGN(panel->x_res * bpp / 8 * panel->y_res);
+	if (region->size > frame_size)
+		frame_size = region->size;
+	lcdc.vram_size = frame_size;
+	lcdc.vram_virt = dma_alloc_writecombine(lcdc.fbdev->dev,
+			lcdc.vram_size, &lcdc.vram_phys, GFP_KERNEL);
+	if (lcdc.vram_virt == NULL) {
+		dev_err(lcdc.fbdev->dev, "unable to allocate FB DMA memory\n");
+		return -ENOMEM;
+	}
+	region->size = frame_size;
+	region->paddr = lcdc.vram_phys;
+	region->vaddr = lcdc.vram_virt;
+	region->alloc = 1;
+
+	memset(lcdc.vram_virt, 0, lcdc.vram_size);
+
+	return 0;
+}
+
+static void free_fbmem(void)
+{
+	dma_free_writecombine(lcdc.fbdev->dev, lcdc.vram_size,
+			      lcdc.vram_virt, lcdc.vram_phys);
+}
+
+static int setup_fbmem(struct omapfb_mem_desc *req_md)
+{
+	int r;
+
+	if (!req_md->region_cnt) {
+		dev_err(lcdc.fbdev->dev, "no memory regions defined\n");
+		return -EINVAL;
+	}
+
+	if (req_md->region_cnt > 1) {
+		dev_err(lcdc.fbdev->dev, "only one plane is supported\n");
+		req_md->region_cnt = 1;
+	}
+
+	if (req_md->region[0].paddr == 0) {
+		lcdc.fbmem_allocated = 1;
+		if ((r = alloc_fbmem(&req_md->region[0])) < 0)
+			return r;
+		return 0;
+	}
+
+	lcdc.vram_phys = req_md->region[0].paddr;
+	lcdc.vram_size = req_md->region[0].size;
+
+	if ((r = mmap_kern()) < 0)
+		return r;
+
+	dev_dbg(lcdc.fbdev->dev, "vram at %08x size %08lx mapped to 0x%p\n",
+		 lcdc.vram_phys, lcdc.vram_size, lcdc.vram_virt);
+
+	return 0;
+}
+
+static void cleanup_fbmem(void)
+{
+	if (lcdc.fbmem_allocated)
+		free_fbmem();
+	else
+		unmap_kern();
+}
+
+static int omap_lcdc_init(struct omapfb_device *fbdev, int ext_mode,
+			  struct omapfb_mem_desc *req_vram)
+{
+	int r;
+	u32 l;
+	int rate;
+	struct clk *tc_ck;
+
+	lcdc.irq_mask = 0;
+
+	lcdc.fbdev = fbdev;
+	lcdc.ext_mode = ext_mode;
+
+	l = 0;
+	omap_writel(l, OMAP_LCDC_CONTROL);
+
+	/* FIXME:
+	 * According to errata some platforms have a clock rate limitiation
+	 */
+	lcdc.lcd_ck = clk_get(NULL, "lcd_ck");
+	if (IS_ERR(lcdc.lcd_ck)) {
+		dev_err(fbdev->dev, "unable to access LCD clock\n");
+		r = PTR_ERR(lcdc.lcd_ck);
+		goto fail0;
+	}
+
+	tc_ck = clk_get(NULL, "tc_ck");
+	if (IS_ERR(tc_ck)) {
+		dev_err(fbdev->dev, "unable to access TC clock\n");
+		r = PTR_ERR(tc_ck);
+		goto fail1;
+	}
+
+	rate = clk_get_rate(tc_ck);
+	clk_put(tc_ck);
+
+	if (machine_is_ams_delta())
+		rate /= 4;
+	if (machine_is_omap_h3())
+		rate /= 3;
+	r = clk_set_rate(lcdc.lcd_ck, rate);
+	if (r) {
+		dev_err(fbdev->dev, "failed to adjust LCD rate\n");
+		goto fail1;
+	}
+	clk_enable(lcdc.lcd_ck);
+
+	r = request_irq(OMAP_LCDC_IRQ, lcdc_irq_handler, 0, MODULE_NAME, fbdev);
+	if (r) {
+		dev_err(fbdev->dev, "unable to get IRQ\n");
+		goto fail2;
+	}
+
+	r = omap_request_lcd_dma(lcdc_dma_handler, NULL);
+	if (r) {
+		dev_err(fbdev->dev, "unable to get LCD DMA\n");
+		goto fail3;
+	}
+
+	omap_set_lcd_dma_single_transfer(ext_mode);
+	omap_set_lcd_dma_ext_controller(ext_mode);
+
+	if (!ext_mode)
+		if ((r = alloc_palette_ram()) < 0)
+			goto fail4;
+
+	if ((r = setup_fbmem(req_vram)) < 0)
+		goto fail5;
+
+	pr_info("omapfb: LCDC initialized\n");
+
+	return 0;
+fail5:
+	if (!ext_mode)
+		free_palette_ram();
+fail4:
+	omap_free_lcd_dma();
+fail3:
+	free_irq(OMAP_LCDC_IRQ, lcdc.fbdev);
+fail2:
+	clk_disable(lcdc.lcd_ck);
+fail1:
+	clk_put(lcdc.lcd_ck);
+fail0:
+	return r;
+}
+
+static void omap_lcdc_cleanup(void)
+{
+	if (!lcdc.ext_mode)
+		free_palette_ram();
+	cleanup_fbmem();
+	omap_free_lcd_dma();
+	free_irq(OMAP_LCDC_IRQ, lcdc.fbdev);
+	clk_disable(lcdc.lcd_ck);
+	clk_put(lcdc.lcd_ck);
+}
+
+const struct lcd_ctrl omap1_int_ctrl = {
+	.name			= "internal",
+	.init			= omap_lcdc_init,
+	.cleanup		= omap_lcdc_cleanup,
+	.get_caps		= omap_lcdc_get_caps,
+	.set_update_mode	= omap_lcdc_set_update_mode,
+	.get_update_mode	= omap_lcdc_get_update_mode,
+	.update_window		= NULL,
+	.suspend		= omap_lcdc_suspend,
+	.resume			= omap_lcdc_resume,
+	.setup_plane		= omap_lcdc_setup_plane,
+	.enable_plane		= omap_lcdc_enable_plane,
+	.setcolreg		= omap_lcdc_setcolreg,
+};
diff --git a/drivers/video/omap/lcdc.h b/drivers/video/omap/lcdc.h
new file mode 100644
index 0000000..adb731e5
--- /dev/null
+++ b/drivers/video/omap/lcdc.h
@@ -0,0 +1,7 @@
+#ifndef LCDC_H
+#define LCDC_H
+
+int omap_lcdc_set_dma_callback(void (*callback)(void *data), void *data);
+void omap_lcdc_free_dma_callback(void);
+
+#endif
diff --git a/drivers/video/omap/omapfb_main.c b/drivers/video/omap/omapfb_main.c
new file mode 100644
index 0000000..14d0f7a
--- /dev/null
+++ b/drivers/video/omap/omapfb_main.c
@@ -0,0 +1,1941 @@
+/*
+ * Framebuffer driver for TI OMAP boards
+ *
+ * Copyright (C) 2004 Nokia Corporation
+ * Author: Imre Deak <imre.deak@nokia.com>
+ *
+ * Acknowledgements:
+ *   Alex McMains <aam@ridgerun.com>       - Original driver
+ *   Juha Yrjola <juha.yrjola@nokia.com>   - Original driver and improvements
+ *   Dirk Behme <dirk.behme@de.bosch.com>  - changes for 2.6 kernel API
+ *   Texas Instruments                     - H3 support
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.
+ */
+#include <linux/platform_device.h>
+#include <linux/uaccess.h>
+
+#include <asm/mach-types.h>
+#include <asm/arch/dma.h>
+#include <asm/arch/omapfb.h>
+
+#define MODULE_NAME	"omapfb"
+
+static unsigned int	def_accel;
+static unsigned long	def_vram[OMAPFB_PLANE_NUM];
+static int		def_vram_cnt;
+static unsigned long	def_vxres;
+static unsigned long	def_vyres;
+static unsigned int	def_rotate;
+static unsigned int	def_mirror;
+
+#ifdef CONFIG_FB_OMAP_MANUAL_UPDATE
+static int		manual_update = 1;
+#else
+static int		manual_update;
+#endif
+
+static struct platform_device	*fbdev_pdev;
+static struct lcd_panel		*fbdev_panel;
+static struct omapfb_device	*omapfb_dev;
+
+struct caps_table_struct {
+	unsigned long flag;
+	const char *name;
+};
+
+static struct caps_table_struct ctrl_caps[] = {
+	{ OMAPFB_CAPS_MANUAL_UPDATE,  "manual update" },
+	{ OMAPFB_CAPS_TEARSYNC,       "tearing synchronization" },
+	{ OMAPFB_CAPS_PLANE_RELOCATE_MEM, "relocate plane memory" },
+	{ OMAPFB_CAPS_PLANE_SCALE,    "scale plane" },
+	{ OMAPFB_CAPS_WINDOW_PIXEL_DOUBLE, "pixel double window" },
+	{ OMAPFB_CAPS_WINDOW_SCALE,   "scale window" },
+	{ OMAPFB_CAPS_WINDOW_OVERLAY, "overlay window" },
+	{ OMAPFB_CAPS_SET_BACKLIGHT,  "backlight setting" },
+};
+
+static struct caps_table_struct color_caps[] = {
+	{ 1 << OMAPFB_COLOR_RGB565,	"RGB565", },
+	{ 1 << OMAPFB_COLOR_YUV422,	"YUV422", },
+	{ 1 << OMAPFB_COLOR_YUV420,	"YUV420", },
+	{ 1 << OMAPFB_COLOR_CLUT_8BPP,	"CLUT8", },
+	{ 1 << OMAPFB_COLOR_CLUT_4BPP,	"CLUT4", },
+	{ 1 << OMAPFB_COLOR_CLUT_2BPP,	"CLUT2", },
+	{ 1 << OMAPFB_COLOR_CLUT_1BPP,	"CLUT1", },
+	{ 1 << OMAPFB_COLOR_RGB444,	"RGB444", },
+	{ 1 << OMAPFB_COLOR_YUY422,	"YUY422", },
+};
+
+/*
+ * ---------------------------------------------------------------------------
+ * LCD panel
+ * ---------------------------------------------------------------------------
+ */
+extern struct lcd_ctrl omap1_int_ctrl;
+extern struct lcd_ctrl omap2_int_ctrl;
+extern struct lcd_ctrl hwa742_ctrl;
+extern struct lcd_ctrl blizzard_ctrl;
+
+static struct lcd_ctrl *ctrls[] = {
+#ifdef CONFIG_ARCH_OMAP1
+	&omap1_int_ctrl,
+#else
+	&omap2_int_ctrl,
+#endif
+
+#ifdef CONFIG_FB_OMAP_LCDC_HWA742
+	&hwa742_ctrl,
+#endif
+#ifdef CONFIG_FB_OMAP_LCDC_BLIZZARD
+	&blizzard_ctrl,
+#endif
+};
+
+#ifdef CONFIG_FB_OMAP_LCDC_EXTERNAL
+#ifdef CONFIG_ARCH_OMAP1
+extern struct lcd_ctrl_extif omap1_ext_if;
+#else
+extern struct lcd_ctrl_extif omap2_ext_if;
+#endif
+#endif
+
+static void omapfb_rqueue_lock(struct omapfb_device *fbdev)
+{
+	mutex_lock(&fbdev->rqueue_mutex);
+}
+
+static void omapfb_rqueue_unlock(struct omapfb_device *fbdev)
+{
+	mutex_unlock(&fbdev->rqueue_mutex);
+}
+
+/*
+ * ---------------------------------------------------------------------------
+ * LCD controller and LCD DMA
+ * ---------------------------------------------------------------------------
+ */
+/* Lookup table to map elem size to elem type. */
+static const int dma_elem_type[] = {
+	0,
+	OMAP_DMA_DATA_TYPE_S8,
+	OMAP_DMA_DATA_TYPE_S16,
+	0,
+	OMAP_DMA_DATA_TYPE_S32,
+};
+
+/*
+ * Allocate resources needed for LCD controller and LCD DMA operations. Video
+ * memory is allocated from system memory according to the virtual display
+ * size, except if a bigger memory size is specified explicitly as a kernel
+ * parameter.
+ */
+static int ctrl_init(struct omapfb_device *fbdev)
+{
+	int r;
+	int i;
+
+	/* kernel/module vram parameters override boot tags/board config */
+	if (def_vram_cnt) {
+		for (i = 0; i < def_vram_cnt; i++)
+			fbdev->mem_desc.region[i].size =
+				PAGE_ALIGN(def_vram[i]);
+		fbdev->mem_desc.region_cnt = i;
+	} else {
+		struct omapfb_platform_data *conf;
+
+		conf = fbdev->dev->platform_data;
+		fbdev->mem_desc = conf->mem_desc;
+	}
+
+	if (!fbdev->mem_desc.region_cnt) {
+		struct lcd_panel *panel = fbdev->panel;
+		int def_size;
+		int bpp = panel->bpp;
+
+		/* 12 bpp is packed in 16 bits */
+		if (bpp == 12)
+			bpp = 16;
+		def_size = def_vxres * def_vyres * bpp / 8;
+		fbdev->mem_desc.region_cnt = 1;
+		fbdev->mem_desc.region[0].size = PAGE_ALIGN(def_size);
+	}
+	r = fbdev->ctrl->init(fbdev, 0, &fbdev->mem_desc);
+	if (r < 0) {
+		dev_err(fbdev->dev, "controller initialization failed (%d)\n",
+			r);
+		return r;
+	}
+
+#ifdef DEBUG
+	for (i = 0; i < fbdev->mem_desc.region_cnt; i++) {
+		dev_dbg(fbdev->dev, "region%d phys %08x virt %p size=%lu\n",
+			 i,
+			 fbdev->mem_desc.region[i].paddr,
+			 fbdev->mem_desc.region[i].vaddr,
+			 fbdev->mem_desc.region[i].size);
+	}
+#endif
+	return 0;
+}
+
+static void ctrl_cleanup(struct omapfb_device *fbdev)
+{
+	fbdev->ctrl->cleanup();
+}
+
+/* Must be called with fbdev->rqueue_mutex held. */
+static int ctrl_change_mode(struct fb_info *fbi)
+{
+	int r;
+	unsigned long offset;
+	struct omapfb_plane_struct *plane = fbi->par;
+	struct omapfb_device *fbdev = plane->fbdev;
+	struct fb_var_screeninfo *var = &fbi->var;
+
+	offset = var->yoffset * fbi->fix.line_length +
+		 var->xoffset * var->bits_per_pixel / 8;
+
+	if (fbdev->ctrl->sync)
+		fbdev->ctrl->sync();
+	r = fbdev->ctrl->setup_plane(plane->idx, plane->info.channel_out,
+				 offset, var->xres_virtual,
+				 plane->info.pos_x, plane->info.pos_y,
+				 var->xres, var->yres, plane->color_mode);
+	if (fbdev->ctrl->set_scale != NULL)
+		r = fbdev->ctrl->set_scale(plane->idx,
+				   var->xres, var->yres,
+				   plane->info.out_width,
+				   plane->info.out_height);
+
+	return r;
+}
+
+/*
+ * ---------------------------------------------------------------------------
+ * fbdev framework callbacks and the ioctl interface
+ * ---------------------------------------------------------------------------
+ */
+/* Called each time the omapfb device is opened */
+static int omapfb_open(struct fb_info *info, int user)
+{
+	return 0;
+}
+
+static void omapfb_sync(struct fb_info *info);
+
+/* Called when the omapfb device is closed. We make sure that any pending
+ * gfx DMA operations are ended, before we return. */
+static int omapfb_release(struct fb_info *info, int user)
+{
+	omapfb_sync(info);
+	return 0;
+}
+
+/* Store a single color palette entry into a pseudo palette or the hardware
+ * palette if one is available. For now we support only 16bpp and thus store
+ * the entry only to the pseudo palette.
+ */
+static int _setcolreg(struct fb_info *info, u_int regno, u_int red, u_int green,
+			u_int blue, u_int transp, int update_hw_pal)
+{
+	struct omapfb_plane_struct *plane = info->par;
+	struct omapfb_device *fbdev = plane->fbdev;
+	struct fb_var_screeninfo *var = &info->var;
+	int r = 0;
+
+	switch (plane->color_mode) {
+	case OMAPFB_COLOR_YUV422:
+	case OMAPFB_COLOR_YUV420:
+	case OMAPFB_COLOR_YUY422:
+		r = -EINVAL;
+		break;
+	case OMAPFB_COLOR_CLUT_8BPP:
+	case OMAPFB_COLOR_CLUT_4BPP:
+	case OMAPFB_COLOR_CLUT_2BPP:
+	case OMAPFB_COLOR_CLUT_1BPP:
+		if (fbdev->ctrl->setcolreg)
+			r = fbdev->ctrl->setcolreg(regno, red, green, blue,
+							transp, update_hw_pal);
+		/* Fallthrough */
+	case OMAPFB_COLOR_RGB565:
+	case OMAPFB_COLOR_RGB444:
+		if (r != 0)
+			break;
+
+		if (regno < 0) {
+			r = -EINVAL;
+			break;
+		}
+
+		if (regno < 16) {
+			u16 pal;
+			pal = ((red >> (16 - var->red.length)) <<
+					var->red.offset) |
+			      ((green >> (16 - var->green.length)) <<
+					var->green.offset) |
+			      (blue >> (16 - var->blue.length));
+			((u32 *)(info->pseudo_palette))[regno] = pal;
+		}
+		break;
+	default:
+		BUG();
+	}
+	return r;
+}
+
+static int omapfb_setcolreg(u_int regno, u_int red, u_int green, u_int blue,
+			    u_int transp, struct fb_info *info)
+{
+	return _setcolreg(info, regno, red, green, blue, transp, 1);
+}
+
+static int omapfb_setcmap(struct fb_cmap *cmap, struct fb_info *info)
+{
+	int count, index, r;
+	u16 *red, *green, *blue, *transp;
+	u16 trans = 0xffff;
+
+	red     = cmap->red;
+	green   = cmap->green;
+	blue    = cmap->blue;
+	transp  = cmap->transp;
+	index   = cmap->start;
+
+	for (count = 0; count < cmap->len; count++) {
+		if (transp)
+			trans = *transp++;
+		r = _setcolreg(info, index++, *red++, *green++, *blue++, trans,
+				count == cmap->len - 1);
+		if (r != 0)
+			return r;
+	}
+
+	return 0;
+}
+
+static int omapfb_update_full_screen(struct fb_info *fbi);
+
+static int omapfb_blank(int blank, struct fb_info *fbi)
+{
+	struct omapfb_plane_struct *plane = fbi->par;
+	struct omapfb_device *fbdev = plane->fbdev;
+	int do_update = 0;
+	int r = 0;
+
+	omapfb_rqueue_lock(fbdev);
+	switch (blank) {
+	case VESA_NO_BLANKING:
+		if (fbdev->state == OMAPFB_SUSPENDED) {
+			if (fbdev->ctrl->resume)
+				fbdev->ctrl->resume();
+			fbdev->panel->enable(fbdev->panel);
+			fbdev->state = OMAPFB_ACTIVE;
+			if (fbdev->ctrl->get_update_mode() ==
+					OMAPFB_MANUAL_UPDATE)
+				do_update = 1;
+		}
+		break;
+	case VESA_POWERDOWN:
+		if (fbdev->state == OMAPFB_ACTIVE) {
+			fbdev->panel->disable(fbdev->panel);
+			if (fbdev->ctrl->suspend)
+				fbdev->ctrl->suspend();
+			fbdev->state = OMAPFB_SUSPENDED;
+		}
+		break;
+	default:
+		r = -EINVAL;
+	}
+	omapfb_rqueue_unlock(fbdev);
+
+	if (r == 0 && do_update)
+		r = omapfb_update_full_screen(fbi);
+
+	return r;
+}
+
+static void omapfb_sync(struct fb_info *fbi)
+{
+	struct omapfb_plane_struct *plane = fbi->par;
+	struct omapfb_device *fbdev = plane->fbdev;
+
+	omapfb_rqueue_lock(fbdev);
+	if (fbdev->ctrl->sync)
+		fbdev->ctrl->sync();
+	omapfb_rqueue_unlock(fbdev);
+}
+
+/*
+ * Set fb_info.fix fields and also updates fbdev.
+ * When calling this fb_info.var must be set up already.
+ */
+static void set_fb_fix(struct fb_info *fbi)
+{
+	struct fb_fix_screeninfo *fix = &fbi->fix;
+	struct fb_var_screeninfo *var = &fbi->var;
+	struct omapfb_plane_struct *plane = fbi->par;
+	struct omapfb_mem_region *rg;
+	int bpp;
+
+	rg = &plane->fbdev->mem_desc.region[plane->idx];
+	fbi->screen_base	= (char __iomem *)rg->vaddr;
+	fix->smem_start		= rg->paddr;
+	fix->smem_len		= rg->size;
+
+	fix->type = FB_TYPE_PACKED_PIXELS;
+	bpp = var->bits_per_pixel;
+	if (var->nonstd)
+		fix->visual = FB_VISUAL_PSEUDOCOLOR;
+	else switch (var->bits_per_pixel) {
+	case 16:
+	case 12:
+		fix->visual = FB_VISUAL_TRUECOLOR;
+		/* 12bpp is stored in 16 bits */
+		bpp = 16;
+		break;
+	case 1:
+	case 2:
+	case 4:
+	case 8:
+		fix->visual = FB_VISUAL_PSEUDOCOLOR;
+		break;
+	}
+	fix->accel		= FB_ACCEL_OMAP1610;
+	fix->line_length	= var->xres_virtual * bpp / 8;
+}
+
+static int set_color_mode(struct omapfb_plane_struct *plane,
+			  struct fb_var_screeninfo *var)
+{
+	switch (var->nonstd) {
+	case 0:
+		break;
+	case OMAPFB_COLOR_YUV422:
+		var->bits_per_pixel = 16;
+		plane->color_mode = var->nonstd;
+		return 0;
+	case OMAPFB_COLOR_YUV420:
+		var->bits_per_pixel = 12;
+		plane->color_mode = var->nonstd;
+		return 0;
+	case OMAPFB_COLOR_YUY422:
+		var->bits_per_pixel = 16;
+		plane->color_mode = var->nonstd;
+		return 0;
+	default:
+		return -EINVAL;
+	}
+
+	switch (var->bits_per_pixel) {
+	case 1:
+		plane->color_mode = OMAPFB_COLOR_CLUT_1BPP;
+		return 0;
+	case 2:
+		plane->color_mode = OMAPFB_COLOR_CLUT_2BPP;
+		return 0;
+	case 4:
+		plane->color_mode = OMAPFB_COLOR_CLUT_4BPP;
+		return 0;
+	case 8:
+		plane->color_mode = OMAPFB_COLOR_CLUT_8BPP;
+		return 0;
+	case 12:
+		var->bits_per_pixel = 16;
+		plane->color_mode = OMAPFB_COLOR_RGB444;
+		return 0;
+	case 16:
+		plane->color_mode = OMAPFB_COLOR_RGB565;
+		return 0;
+	default:
+		return -EINVAL;
+	}
+}
+
+/*
+ * Check the values in var against our capabilities and in case of out of
+ * bound values try to adjust them.
+ */
+static int set_fb_var(struct fb_info *fbi,
+		      struct fb_var_screeninfo *var)
+{
+	int		bpp;
+	unsigned long	max_frame_size;
+	unsigned long	line_size;
+	int		xres_min, xres_max;
+	int		yres_min, yres_max;
+	struct omapfb_plane_struct *plane = fbi->par;
+	struct omapfb_device *fbdev = plane->fbdev;
+	struct lcd_panel *panel = fbdev->panel;
+
+	if (set_color_mode(plane, var) < 0)
+		return -EINVAL;
+
+	bpp = var->bits_per_pixel;
+	if (plane->color_mode == OMAPFB_COLOR_RGB444)
+		bpp = 16;
+
+	switch (var->rotate) {
+	case 0:
+	case 180:
+		xres_min = OMAPFB_PLANE_XRES_MIN;
+		xres_max = panel->x_res;
+		yres_min = OMAPFB_PLANE_YRES_MIN;
+		yres_max = panel->y_res;
+		if (cpu_is_omap15xx()) {
+			var->xres = panel->x_res;
+			var->yres = panel->y_res;
+		}
+		break;
+	case 90:
+	case 270:
+		xres_min = OMAPFB_PLANE_YRES_MIN;
+		xres_max = panel->y_res;
+		yres_min = OMAPFB_PLANE_XRES_MIN;
+		yres_max = panel->x_res;
+		if (cpu_is_omap15xx()) {
+			var->xres = panel->y_res;
+			var->yres = panel->x_res;
+		}
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	if (var->xres < xres_min)
+		var->xres = xres_min;
+	if (var->yres < yres_min)
+		var->yres = yres_min;
+	if (var->xres > xres_max)
+		var->xres = xres_max;
+	if (var->yres > yres_max)
+		var->yres = yres_max;
+
+	if (var->xres_virtual < var->xres)
+		var->xres_virtual = var->xres;
+	if (var->yres_virtual < var->yres)
+		var->yres_virtual = var->yres;
+	max_frame_size = fbdev->mem_desc.region[plane->idx].size;
+	line_size = var->xres_virtual * bpp / 8;
+	if (line_size * var->yres_virtual > max_frame_size) {
+		/* Try to keep yres_virtual first */
+		line_size = max_frame_size / var->yres_virtual;
+		var->xres_virtual = line_size * 8 / bpp;
+		if (var->xres_virtual < var->xres) {
+			/* Still doesn't fit. Shrink yres_virtual too */
+			var->xres_virtual = var->xres;
+			line_size = var->xres * bpp / 8;
+			var->yres_virtual = max_frame_size / line_size;
+		}
+		/* Recheck this, as the virtual size changed. */
+		if (var->xres_virtual < var->xres)
+			var->xres = var->xres_virtual;
+		if (var->yres_virtual < var->yres)
+			var->yres = var->yres_virtual;
+		if (var->xres < xres_min || var->yres < yres_min)
+			return -EINVAL;
+	}
+	if (var->xres + var->xoffset > var->xres_virtual)
+		var->xoffset = var->xres_virtual - var->xres;
+	if (var->yres + var->yoffset > var->yres_virtual)
+		var->yoffset = var->yres_virtual - var->yres;
+	line_size = var->xres * bpp / 8;
+
+	if (plane->color_mode == OMAPFB_COLOR_RGB444) {
+		var->red.offset	  = 8; var->red.length	 = 4;
+						var->red.msb_right   = 0;
+		var->green.offset = 4; var->green.length = 4;
+						var->green.msb_right = 0;
+		var->blue.offset  = 0; var->blue.length  = 4;
+						var->blue.msb_right  = 0;
+	} else {
+		var->red.offset	 = 11; var->red.length	 = 5;
+						var->red.msb_right   = 0;
+		var->green.offset = 5;  var->green.length = 6;
+						var->green.msb_right = 0;
+		var->blue.offset = 0;  var->blue.length  = 5;
+						var->blue.msb_right  = 0;
+	}
+
+	var->height		= -1;
+	var->width		= -1;
+	var->grayscale		= 0;
+
+	/* pixclock in ps, the rest in pixclock */
+	var->pixclock		= 10000000 / (panel->pixel_clock / 100);
+	var->left_margin	= panel->hfp;
+	var->right_margin	= panel->hbp;
+	var->upper_margin	= panel->vfp;
+	var->lower_margin	= panel->vbp;
+	var->hsync_len		= panel->hsw;
+	var->vsync_len		= panel->vsw;
+
+	/* TODO: get these from panel->config */
+	var->vmode		= FB_VMODE_NONINTERLACED;
+	var->sync		= 0;
+
+	return 0;
+}
+
+
+/* Set rotation (0, 90, 180, 270 degree), and switch to the new mode. */
+static void omapfb_rotate(struct fb_info *fbi, int rotate)
+{
+	struct omapfb_plane_struct *plane = fbi->par;
+	struct omapfb_device *fbdev = plane->fbdev;
+
+	omapfb_rqueue_lock(fbdev);
+	if (cpu_is_omap15xx() && rotate != fbi->var.rotate) {
+		struct fb_var_screeninfo *new_var = &fbdev->new_var;
+
+		memcpy(new_var, &fbi->var, sizeof(*new_var));
+		new_var->rotate = rotate;
+		if (set_fb_var(fbi, new_var) == 0 &&
+		    memcmp(new_var, &fbi->var, sizeof(*new_var))) {
+			memcpy(&fbi->var, new_var, sizeof(*new_var));
+			ctrl_change_mode(fbi);
+		}
+	}
+	omapfb_rqueue_unlock(fbdev);
+}
+
+/*
+ * Set new x,y offsets in the virtual display for the visible area and switch
+ * to the new mode.
+ */
+static int omapfb_pan_display(struct fb_var_screeninfo *var,
+			       struct fb_info *fbi)
+{
+	struct omapfb_plane_struct *plane = fbi->par;
+	struct omapfb_device *fbdev = plane->fbdev;
+	int r = 0;
+
+	omapfb_rqueue_lock(fbdev);
+	if (var->xoffset != fbi->var.xoffset ||
+	    var->yoffset != fbi->var.yoffset) {
+		struct fb_var_screeninfo *new_var = &fbdev->new_var;
+
+		memcpy(new_var, &fbi->var, sizeof(*new_var));
+		new_var->xoffset = var->xoffset;
+		new_var->yoffset = var->yoffset;
+		if (set_fb_var(fbi, new_var))
+			r = -EINVAL;
+		else {
+			memcpy(&fbi->var, new_var, sizeof(*new_var));
+			ctrl_change_mode(fbi);
+		}
+	}
+	omapfb_rqueue_unlock(fbdev);
+
+	return r;
+}
+
+/* Set mirror to vertical axis and switch to the new mode. */
+static int omapfb_mirror(struct fb_info *fbi, int mirror)
+{
+	struct omapfb_plane_struct *plane = fbi->par;
+	struct omapfb_device *fbdev = plane->fbdev;
+	int r = 0;
+
+	omapfb_rqueue_lock(fbdev);
+	mirror = mirror ? 1 : 0;
+	if (cpu_is_omap15xx())
+		r = -EINVAL;
+	else if (mirror != plane->info.mirror) {
+		plane->info.mirror = mirror;
+		r = ctrl_change_mode(fbi);
+	}
+	omapfb_rqueue_unlock(fbdev);
+
+	return r;
+}
+
+/*
+ * Check values in var, try to adjust them in case of out of bound values if
+ * possible, or return error.
+ */
+static int omapfb_check_var(struct fb_var_screeninfo *var, struct fb_info *fbi)
+{
+	struct omapfb_plane_struct *plane = fbi->par;
+	struct omapfb_device *fbdev = plane->fbdev;
+	int r;
+
+	omapfb_rqueue_lock(fbdev);
+	if (fbdev->ctrl->sync != NULL)
+		fbdev->ctrl->sync();
+	r = set_fb_var(fbi, var);
+	omapfb_rqueue_unlock(fbdev);
+
+	return r;
+}
+
+/*
+ * Switch to a new mode. The parameters for it has been check already by
+ * omapfb_check_var.
+ */
+static int omapfb_set_par(struct fb_info *fbi)
+{
+	struct omapfb_plane_struct *plane = fbi->par;
+	struct omapfb_device *fbdev = plane->fbdev;
+	int r = 0;
+
+	omapfb_rqueue_lock(fbdev);
+	set_fb_fix(fbi);
+	r = ctrl_change_mode(fbi);
+	omapfb_rqueue_unlock(fbdev);
+
+	return r;
+}
+
+int omapfb_update_window_async(struct fb_info *fbi,
+				struct omapfb_update_window *win,
+				void (*callback)(void *),
+				void *callback_data)
+{
+	struct omapfb_plane_struct *plane = fbi->par;
+	struct omapfb_device *fbdev = plane->fbdev;
+	struct fb_var_screeninfo *var;
+
+	var = &fbi->var;
+	if (win->x >= var->xres || win->y >= var->yres ||
+	    win->out_x > var->xres || win->out_y >= var->yres)
+		return -EINVAL;
+
+	if (!fbdev->ctrl->update_window ||
+	    fbdev->ctrl->get_update_mode() != OMAPFB_MANUAL_UPDATE)
+		return -ENODEV;
+
+	if (win->x + win->width >= var->xres)
+		win->width = var->xres - win->x;
+	if (win->y + win->height >= var->yres)
+		win->height = var->yres - win->y;
+	/* The out sizes should be cropped to the LCD size */
+	if (win->out_x + win->out_width > fbdev->panel->x_res)
+		win->out_width = fbdev->panel->x_res - win->out_x;
+	if (win->out_y + win->out_height > fbdev->panel->y_res)
+		win->out_height = fbdev->panel->y_res - win->out_y;
+	if (!win->width || !win->height || !win->out_width || !win->out_height)
+		return 0;
+
+	return fbdev->ctrl->update_window(fbi, win, callback, callback_data);
+}
+EXPORT_SYMBOL(omapfb_update_window_async);
+
+static int omapfb_update_win(struct fb_info *fbi,
+				struct omapfb_update_window *win)
+{
+	struct omapfb_plane_struct *plane = fbi->par;
+	int ret;
+
+	omapfb_rqueue_lock(plane->fbdev);
+	ret = omapfb_update_window_async(fbi, win, NULL, 0);
+	omapfb_rqueue_unlock(plane->fbdev);
+
+	return ret;
+}
+
+static int omapfb_update_full_screen(struct fb_info *fbi)
+{
+	struct omapfb_plane_struct *plane = fbi->par;
+	struct omapfb_device *fbdev = plane->fbdev;
+	struct omapfb_update_window win;
+	int r;
+
+	if (!fbdev->ctrl->update_window ||
+	    fbdev->ctrl->get_update_mode() != OMAPFB_MANUAL_UPDATE)
+		return -ENODEV;
+
+	win.x = 0;
+	win.y = 0;
+	win.width = fbi->var.xres;
+	win.height = fbi->var.yres;
+	win.out_x = 0;
+	win.out_y = 0;
+	win.out_width = fbi->var.xres;
+	win.out_height = fbi->var.yres;
+	win.format = 0;
+
+	omapfb_rqueue_lock(fbdev);
+	r = fbdev->ctrl->update_window(fbi, &win, NULL, 0);
+	omapfb_rqueue_unlock(fbdev);
+
+	return r;
+}
+
+static int omapfb_setup_plane(struct fb_info *fbi, struct omapfb_plane_info *pi)
+{
+	struct omapfb_plane_struct *plane = fbi->par;
+	struct omapfb_device *fbdev = plane->fbdev;
+	struct lcd_panel *panel = fbdev->panel;
+	struct omapfb_plane_info old_info;
+	int r = 0;
+
+	if (pi->pos_x + pi->out_width > panel->x_res ||
+	    pi->pos_y + pi->out_height > panel->y_res)
+		return -EINVAL;
+
+	omapfb_rqueue_lock(fbdev);
+	if (pi->enabled && !fbdev->mem_desc.region[plane->idx].size) {
+		/*
+		 * This plane's memory was freed, can't enable it
+		 * until it's reallocated.
+		 */
+		r = -EINVAL;
+		goto out;
+	}
+	old_info = plane->info;
+	plane->info = *pi;
+	if (pi->enabled) {
+		r = ctrl_change_mode(fbi);
+		if (r < 0) {
+			plane->info = old_info;
+			goto out;
+		}
+	}
+	r = fbdev->ctrl->enable_plane(plane->idx, pi->enabled);
+	if (r < 0) {
+		plane->info = old_info;
+		goto out;
+	}
+out:
+	omapfb_rqueue_unlock(fbdev);
+	return r;
+}
+
+static int omapfb_query_plane(struct fb_info *fbi, struct omapfb_plane_info *pi)
+{
+	struct omapfb_plane_struct *plane = fbi->par;
+
+	*pi = plane->info;
+	return 0;
+}
+
+static int omapfb_setup_mem(struct fb_info *fbi, struct omapfb_mem_info *mi)
+{
+	struct omapfb_plane_struct *plane = fbi->par;
+	struct omapfb_device *fbdev = plane->fbdev;
+	struct omapfb_mem_region *rg = &fbdev->mem_desc.region[plane->idx];
+	size_t size;
+	int r = 0;
+
+	if (fbdev->ctrl->setup_mem == NULL)
+		return -ENODEV;
+	if (mi->type > OMAPFB_MEMTYPE_MAX)
+		return -EINVAL;
+
+	size = PAGE_ALIGN(mi->size);
+	omapfb_rqueue_lock(fbdev);
+	if (plane->info.enabled) {
+		r = -EBUSY;
+		goto out;
+	}
+	if (rg->size != size || rg->type != mi->type) {
+		struct fb_var_screeninfo *new_var = &fbdev->new_var;
+		unsigned long old_size = rg->size;
+		u8	      old_type = rg->type;
+		unsigned long paddr;
+
+		rg->size = size;
+		rg->type = mi->type;
+		/*
+		 * size == 0 is a special case, for which we
+		 * don't check / adjust the screen parameters.
+		 * This isn't a problem since the plane can't
+		 * be reenabled unless its size is > 0.
+		 */
+		if (old_size != size && size) {
+			if (size) {
+				memcpy(new_var, &fbi->var, sizeof(*new_var));
+				r = set_fb_var(fbi, new_var);
+				if (r < 0)
+					goto out;
+			}
+		}
+
+		if (fbdev->ctrl->sync)
+			fbdev->ctrl->sync();
+		r = fbdev->ctrl->setup_mem(plane->idx, size, mi->type, &paddr);
+		if (r < 0) {
+			/* Revert changes. */
+			rg->size = old_size;
+			rg->type = old_type;
+			goto out;
+		}
+		rg->paddr = paddr;
+
+		if (old_size != size) {
+			if (size) {
+				memcpy(&fbi->var, new_var, sizeof(fbi->var));
+				set_fb_fix(fbi);
+			} else {
+				/*
+				 * Set these explicitly to indicate that the
+				 * plane memory is dealloce'd, the other
+				 * screen parameters in var / fix are invalid.
+				 */
+				fbi->fix.smem_start = 0;
+				fbi->fix.smem_len = 0;
+			}
+		}
+	}
+out:
+	omapfb_rqueue_unlock(fbdev);
+
+	return r;
+}
+
+static int omapfb_query_mem(struct fb_info *fbi, struct omapfb_mem_info *mi)
+{
+	struct omapfb_plane_struct *plane = fbi->par;
+	struct omapfb_device *fbdev = plane->fbdev;
+	struct omapfb_mem_region *rg;
+
+	rg = &fbdev->mem_desc.region[plane->idx];
+	memset(mi, 0, sizeof(*mi));
+	mi->size = rg->size;
+	mi->type = rg->type;
+
+	return 0;
+}
+
+static int omapfb_set_color_key(struct omapfb_device *fbdev,
+				struct omapfb_color_key *ck)
+{
+	int r;
+
+	if (!fbdev->ctrl->set_color_key)
+		return -ENODEV;
+
+	omapfb_rqueue_lock(fbdev);
+	r = fbdev->ctrl->set_color_key(ck);
+	omapfb_rqueue_unlock(fbdev);
+
+	return r;
+}
+
+static int omapfb_get_color_key(struct omapfb_device *fbdev,
+				struct omapfb_color_key *ck)
+{
+	int r;
+
+	if (!fbdev->ctrl->get_color_key)
+		return -ENODEV;
+
+	omapfb_rqueue_lock(fbdev);
+	r = fbdev->ctrl->get_color_key(ck);
+	omapfb_rqueue_unlock(fbdev);
+
+	return r;
+}
+
+static struct blocking_notifier_head omapfb_client_list[OMAPFB_PLANE_NUM];
+static int notifier_inited;
+
+static void omapfb_init_notifier(void)
+{
+	int i;
+
+	for (i = 0; i < OMAPFB_PLANE_NUM; i++)
+		BLOCKING_INIT_NOTIFIER_HEAD(&omapfb_client_list[i]);
+}
+
+int omapfb_register_client(struct omapfb_notifier_block *omapfb_nb,
+				omapfb_notifier_callback_t callback,
+				void *callback_data)
+{
+	int r;
+
+	if ((unsigned)omapfb_nb->plane_idx > OMAPFB_PLANE_NUM)
+		return -EINVAL;
+
+	if (!notifier_inited) {
+		omapfb_init_notifier();
+		notifier_inited = 1;
+	}
+
+	omapfb_nb->nb.notifier_call = (int (*)(struct notifier_block *,
+					unsigned long, void *))callback;
+	omapfb_nb->data = callback_data;
+	r = blocking_notifier_chain_register(
+				&omapfb_client_list[omapfb_nb->plane_idx],
+				&omapfb_nb->nb);
+	if (r)
+		return r;
+	if (omapfb_dev != NULL &&
+	    omapfb_dev->ctrl && omapfb_dev->ctrl->bind_client) {
+		omapfb_dev->ctrl->bind_client(omapfb_nb);
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(omapfb_register_client);
+
+int omapfb_unregister_client(struct omapfb_notifier_block *omapfb_nb)
+{
+	return blocking_notifier_chain_unregister(
+		&omapfb_client_list[omapfb_nb->plane_idx], &omapfb_nb->nb);
+}
+EXPORT_SYMBOL(omapfb_unregister_client);
+
+void omapfb_notify_clients(struct omapfb_device *fbdev, unsigned long event)
+{
+	int i;
+
+	if (!notifier_inited)
+		/* no client registered yet */
+		return;
+
+	for (i = 0; i < OMAPFB_PLANE_NUM; i++)
+		blocking_notifier_call_chain(&omapfb_client_list[i], event,
+				    fbdev->fb_info[i]);
+}
+EXPORT_SYMBOL(omapfb_notify_clients);
+
+static int omapfb_set_update_mode(struct omapfb_device *fbdev,
+				   enum omapfb_update_mode mode)
+{
+	int r;
+
+	omapfb_rqueue_lock(fbdev);
+	r = fbdev->ctrl->set_update_mode(mode);
+	omapfb_rqueue_unlock(fbdev);
+
+	return r;
+}
+
+static enum omapfb_update_mode omapfb_get_update_mode(struct omapfb_device *fbdev)
+{
+	int r;
+
+	omapfb_rqueue_lock(fbdev);
+	r = fbdev->ctrl->get_update_mode();
+	omapfb_rqueue_unlock(fbdev);
+
+	return r;
+}
+
+static void omapfb_get_caps(struct omapfb_device *fbdev, int plane,
+				     struct omapfb_caps *caps)
+{
+	memset(caps, 0, sizeof(*caps));
+	fbdev->ctrl->get_caps(plane, caps);
+	caps->ctrl |= fbdev->panel->get_caps(fbdev->panel);
+}
+
+/* For lcd testing */
+void omapfb_write_first_pixel(struct omapfb_device *fbdev, u16 pixval)
+{
+	omapfb_rqueue_lock(fbdev);
+	*(u16 *)fbdev->mem_desc.region[0].vaddr = pixval;
+	if (fbdev->ctrl->get_update_mode() == OMAPFB_MANUAL_UPDATE) {
+		struct omapfb_update_window win;
+
+		memset(&win, 0, sizeof(win));
+		win.width = 2;
+		win.height = 2;
+		win.out_width = 2;
+		win.out_height = 2;
+		fbdev->ctrl->update_window(fbdev->fb_info[0], &win, NULL, 0);
+	}
+	omapfb_rqueue_unlock(fbdev);
+}
+EXPORT_SYMBOL(omapfb_write_first_pixel);
+
+/*
+ * Ioctl interface. Part of the kernel mode frame buffer API is duplicated
+ * here to be accessible by user mode code.
+ */
+static int omapfb_ioctl(struct fb_info *fbi, unsigned int cmd,
+			unsigned long arg)
+{
+	struct omapfb_plane_struct *plane = fbi->par;
+	struct omapfb_device	*fbdev = plane->fbdev;
+	struct fb_ops		*ops = fbi->fbops;
+	union {
+		struct omapfb_update_window	update_window;
+		struct omapfb_plane_info	plane_info;
+		struct omapfb_mem_info		mem_info;
+		struct omapfb_color_key		color_key;
+		enum omapfb_update_mode		update_mode;
+		struct omapfb_caps		caps;
+		unsigned int		mirror;
+		int			plane_out;
+		int			enable_plane;
+	} p;
+	int r = 0;
+
+	BUG_ON(!ops);
+	switch (cmd) {
+	case OMAPFB_MIRROR:
+		if (get_user(p.mirror, (int __user *)arg))
+			r = -EFAULT;
+		else
+			omapfb_mirror(fbi, p.mirror);
+		break;
+	case OMAPFB_SYNC_GFX:
+		omapfb_sync(fbi);
+		break;
+	case OMAPFB_VSYNC:
+		break;
+	case OMAPFB_SET_UPDATE_MODE:
+		if (get_user(p.update_mode, (int __user *)arg))
+			r = -EFAULT;
+		else
+			r = omapfb_set_update_mode(fbdev, p.update_mode);
+		break;
+	case OMAPFB_GET_UPDATE_MODE:
+		p.update_mode = omapfb_get_update_mode(fbdev);
+		if (put_user(p.update_mode,
+					(enum omapfb_update_mode __user *)arg))
+			r = -EFAULT;
+		break;
+	case OMAPFB_UPDATE_WINDOW_OLD:
+		if (copy_from_user(&p.update_window, (void __user *)arg,
+				   sizeof(struct omapfb_update_window_old)))
+			r = -EFAULT;
+		else {
+			struct omapfb_update_window *u = &p.update_window;
+			u->out_x = u->x;
+			u->out_y = u->y;
+			u->out_width = u->width;
+			u->out_height = u->height;
+			memset(u->reserved, 0, sizeof(u->reserved));
+			r = omapfb_update_win(fbi, u);
+		}
+		break;
+	case OMAPFB_UPDATE_WINDOW:
+		if (copy_from_user(&p.update_window, (void __user *)arg,
+				   sizeof(p.update_window)))
+			r = -EFAULT;
+		else
+			r = omapfb_update_win(fbi, &p.update_window);
+		break;
+	case OMAPFB_SETUP_PLANE:
+		if (copy_from_user(&p.plane_info, (void __user *)arg,
+				   sizeof(p.plane_info)))
+			r = -EFAULT;
+		else
+			r = omapfb_setup_plane(fbi, &p.plane_info);
+		break;
+	case OMAPFB_QUERY_PLANE:
+		if ((r = omapfb_query_plane(fbi, &p.plane_info)) < 0)
+			break;
+		if (copy_to_user((void __user *)arg, &p.plane_info,
+				   sizeof(p.plane_info)))
+			r = -EFAULT;
+		break;
+	case OMAPFB_SETUP_MEM:
+		if (copy_from_user(&p.mem_info, (void __user *)arg,
+				   sizeof(p.mem_info)))
+			r = -EFAULT;
+		else
+			r = omapfb_setup_mem(fbi, &p.mem_info);
+		break;
+	case OMAPFB_QUERY_MEM:
+		if ((r = omapfb_query_mem(fbi, &p.mem_info)) < 0)
+			break;
+		if (copy_to_user((void __user *)arg, &p.mem_info,
+				   sizeof(p.mem_info)))
+			r = -EFAULT;
+		break;
+	case OMAPFB_SET_COLOR_KEY:
+		if (copy_from_user(&p.color_key, (void __user *)arg,
+				   sizeof(p.color_key)))
+			r = -EFAULT;
+		else
+			r = omapfb_set_color_key(fbdev, &p.color_key);
+		break;
+	case OMAPFB_GET_COLOR_KEY:
+		if ((r = omapfb_get_color_key(fbdev, &p.color_key)) < 0)
+			break;
+		if (copy_to_user((void __user *)arg, &p.color_key,
+				 sizeof(p.color_key)))
+			r = -EFAULT;
+		break;
+	case OMAPFB_GET_CAPS:
+		omapfb_get_caps(fbdev, plane->idx, &p.caps);
+		if (copy_to_user((void __user *)arg, &p.caps, sizeof(p.caps)))
+			r = -EFAULT;
+		break;
+	case OMAPFB_LCD_TEST:
+		{
+			int test_num;
+
+			if (get_user(test_num, (int __user *)arg)) {
+				r = -EFAULT;
+				break;
+			}
+			if (!fbdev->panel->run_test) {
+				r = -EINVAL;
+				break;
+			}
+			r = fbdev->panel->run_test(fbdev->panel, test_num);
+			break;
+		}
+	case OMAPFB_CTRL_TEST:
+		{
+			int test_num;
+
+			if (get_user(test_num, (int __user *)arg)) {
+				r = -EFAULT;
+				break;
+			}
+			if (!fbdev->ctrl->run_test) {
+				r = -EINVAL;
+				break;
+			}
+			r = fbdev->ctrl->run_test(test_num);
+			break;
+		}
+	default:
+		r = -EINVAL;
+	}
+
+	return r;
+}
+
+static int omapfb_mmap(struct fb_info *info, struct vm_area_struct *vma)
+{
+	struct omapfb_plane_struct *plane = info->par;
+	struct omapfb_device *fbdev = plane->fbdev;
+	int r;
+
+	omapfb_rqueue_lock(fbdev);
+	r = fbdev->ctrl->mmap(info, vma);
+	omapfb_rqueue_unlock(fbdev);
+
+	return r;
+}
+
+/*
+ * Callback table for the frame buffer framework. Some of these pointers
+ * will be changed according to the current setting of fb_info->accel_flags.
+ */
+static struct fb_ops omapfb_ops = {
+	.owner		= THIS_MODULE,
+	.fb_open        = omapfb_open,
+	.fb_release     = omapfb_release,
+	.fb_setcolreg	= omapfb_setcolreg,
+	.fb_setcmap	= omapfb_setcmap,
+	.fb_fillrect	= cfb_fillrect,
+	.fb_copyarea	= cfb_copyarea,
+	.fb_imageblit	= cfb_imageblit,
+	.fb_blank       = omapfb_blank,
+	.fb_ioctl	= omapfb_ioctl,
+	.fb_check_var	= omapfb_check_var,
+	.fb_set_par	= omapfb_set_par,
+	.fb_rotate	= omapfb_rotate,
+	.fb_pan_display = omapfb_pan_display,
+};
+
+/*
+ * ---------------------------------------------------------------------------
+ * Sysfs interface
+ * ---------------------------------------------------------------------------
+ */
+/* omapfbX sysfs entries */
+static ssize_t omapfb_show_caps_num(struct device *dev,
+				    struct device_attribute *attr, char *buf)
+{
+	struct omapfb_device *fbdev = (struct omapfb_device *)dev->driver_data;
+	int plane;
+	size_t size;
+	struct omapfb_caps caps;
+
+	plane = 0;
+	size = 0;
+	while (size < PAGE_SIZE && plane < OMAPFB_PLANE_NUM) {
+		omapfb_get_caps(fbdev, plane, &caps);
+		size += snprintf(&buf[size], PAGE_SIZE - size,
+			"plane#%d %#010x %#010x %#010x\n",
+			plane, caps.ctrl, caps.plane_color, caps.wnd_color);
+		plane++;
+	}
+	return size;
+}
+
+static ssize_t omapfb_show_caps_text(struct device *dev,
+				     struct device_attribute *attr, char *buf)
+{
+	struct omapfb_device *fbdev = (struct omapfb_device *)dev->driver_data;
+	int i;
+	struct omapfb_caps caps;
+	int plane;
+	size_t size;
+
+	plane = 0;
+	size = 0;
+	while (size < PAGE_SIZE && plane < OMAPFB_PLANE_NUM) {
+		omapfb_get_caps(fbdev, plane, &caps);
+		size += snprintf(&buf[size], PAGE_SIZE - size,
+				 "plane#%d:\n", plane);
+		for (i = 0; i < ARRAY_SIZE(ctrl_caps) &&
+		     size < PAGE_SIZE; i++) {
+			if (ctrl_caps[i].flag & caps.ctrl)
+				size += snprintf(&buf[size], PAGE_SIZE - size,
+					" %s\n", ctrl_caps[i].name);
+		}
+		size += snprintf(&buf[size], PAGE_SIZE - size,
+				 " plane colors:\n");
+		for (i = 0; i < ARRAY_SIZE(color_caps) &&
+		     size < PAGE_SIZE; i++) {
+			if (color_caps[i].flag & caps.plane_color)
+				size += snprintf(&buf[size], PAGE_SIZE - size,
+					"  %s\n", color_caps[i].name);
+		}
+		size += snprintf(&buf[size], PAGE_SIZE - size,
+				 " window colors:\n");
+		for (i = 0; i < ARRAY_SIZE(color_caps) &&
+		     size < PAGE_SIZE; i++) {
+			if (color_caps[i].flag & caps.wnd_color)
+				size += snprintf(&buf[size], PAGE_SIZE - size,
+					"  %s\n", color_caps[i].name);
+		}
+
+		plane++;
+	}
+	return size;
+}
+
+static DEVICE_ATTR(caps_num, 0444, omapfb_show_caps_num, NULL);
+static DEVICE_ATTR(caps_text, 0444, omapfb_show_caps_text, NULL);
+
+/* panel sysfs entries */
+static ssize_t omapfb_show_panel_name(struct device *dev,
+				      struct device_attribute *attr, char *buf)
+{
+	struct omapfb_device *fbdev = (struct omapfb_device *)dev->driver_data;
+
+	return snprintf(buf, PAGE_SIZE, "%s\n", fbdev->panel->name);
+}
+
+static ssize_t omapfb_show_bklight_level(struct device *dev,
+					 struct device_attribute *attr,
+					 char *buf)
+{
+	struct omapfb_device *fbdev = (struct omapfb_device *)dev->driver_data;
+	int r;
+
+	if (fbdev->panel->get_bklight_level) {
+		r = snprintf(buf, PAGE_SIZE, "%d\n",
+			     fbdev->panel->get_bklight_level(fbdev->panel));
+	} else
+		r = -ENODEV;
+	return r;
+}
+
+static ssize_t omapfb_store_bklight_level(struct device *dev,
+					  struct device_attribute *attr,
+					  const char *buf, size_t size)
+{
+	struct omapfb_device *fbdev = (struct omapfb_device *)dev->driver_data;
+	int r;
+
+	if (fbdev->panel->set_bklight_level) {
+		unsigned int level;
+
+		if (sscanf(buf, "%10d", &level) == 1) {
+			r = fbdev->panel->set_bklight_level(fbdev->panel,
+							    level);
+		} else
+			r = -EINVAL;
+	} else
+		r = -ENODEV;
+	return r ? r : size;
+}
+
+static ssize_t omapfb_show_bklight_max(struct device *dev,
+				       struct device_attribute *attr, char *buf)
+{
+	struct omapfb_device *fbdev = (struct omapfb_device *)dev->driver_data;
+	int r;
+
+	if (fbdev->panel->get_bklight_level) {
+		r = snprintf(buf, PAGE_SIZE, "%d\n",
+			     fbdev->panel->get_bklight_max(fbdev->panel));
+	} else
+		r = -ENODEV;
+	return r;
+}
+
+static struct device_attribute dev_attr_panel_name =
+	__ATTR(name, 0444, omapfb_show_panel_name, NULL);
+static DEVICE_ATTR(backlight_level, 0664,
+		   omapfb_show_bklight_level, omapfb_store_bklight_level);
+static DEVICE_ATTR(backlight_max, 0444, omapfb_show_bklight_max, NULL);
+
+static struct attribute *panel_attrs[] = {
+	&dev_attr_panel_name.attr,
+	&dev_attr_backlight_level.attr,
+	&dev_attr_backlight_max.attr,
+	NULL,
+};
+
+static struct attribute_group panel_attr_grp = {
+	.name  = "panel",
+	.attrs = panel_attrs,
+};
+
+/* ctrl sysfs entries */
+static ssize_t omapfb_show_ctrl_name(struct device *dev,
+				     struct device_attribute *attr, char *buf)
+{
+	struct omapfb_device *fbdev = (struct omapfb_device *)dev->driver_data;
+
+	return snprintf(buf, PAGE_SIZE, "%s\n", fbdev->ctrl->name);
+}
+
+static struct device_attribute dev_attr_ctrl_name =
+	__ATTR(name, 0444, omapfb_show_ctrl_name, NULL);
+
+static struct attribute *ctrl_attrs[] = {
+	&dev_attr_ctrl_name.attr,
+	NULL,
+};
+
+static struct attribute_group ctrl_attr_grp = {
+	.name  = "ctrl",
+	.attrs = ctrl_attrs,
+};
+
+static int omapfb_register_sysfs(struct omapfb_device *fbdev)
+{
+	int r;
+
+	if ((r = device_create_file(fbdev->dev, &dev_attr_caps_num)))
+		goto fail0;
+
+	if ((r = device_create_file(fbdev->dev, &dev_attr_caps_text)))
+		goto fail1;
+
+	if ((r = sysfs_create_group(&fbdev->dev->kobj, &panel_attr_grp)))
+		goto fail2;
+
+	if ((r = sysfs_create_group(&fbdev->dev->kobj, &ctrl_attr_grp)))
+		goto fail3;
+
+	return 0;
+fail3:
+	sysfs_remove_group(&fbdev->dev->kobj, &panel_attr_grp);
+fail2:
+	device_remove_file(fbdev->dev, &dev_attr_caps_text);
+fail1:
+	device_remove_file(fbdev->dev, &dev_attr_caps_num);
+fail0:
+	dev_err(fbdev->dev, "unable to register sysfs interface\n");
+	return r;
+}
+
+static void omapfb_unregister_sysfs(struct omapfb_device *fbdev)
+{
+	sysfs_remove_group(&fbdev->dev->kobj, &ctrl_attr_grp);
+	sysfs_remove_group(&fbdev->dev->kobj, &panel_attr_grp);
+	device_remove_file(fbdev->dev, &dev_attr_caps_num);
+	device_remove_file(fbdev->dev, &dev_attr_caps_text);
+}
+
+/*
+ * ---------------------------------------------------------------------------
+ * LDM callbacks
+ * ---------------------------------------------------------------------------
+ */
+/* Initialize system fb_info object and set the default video mode.
+ * The frame buffer memory already allocated by lcddma_init
+ */
+static int fbinfo_init(struct omapfb_device *fbdev, struct fb_info *info)
+{
+	struct fb_var_screeninfo	*var = &info->var;
+	struct fb_fix_screeninfo	*fix = &info->fix;
+	int				r = 0;
+
+	info->fbops = &omapfb_ops;
+	info->flags = FBINFO_FLAG_DEFAULT;
+
+	strncpy(fix->id, MODULE_NAME, sizeof(fix->id));
+
+	info->pseudo_palette = fbdev->pseudo_palette;
+
+	var->accel_flags  = def_accel ? FB_ACCELF_TEXT : 0;
+	var->xres = def_vxres;
+	var->yres = def_vyres;
+	var->xres_virtual = def_vxres;
+	var->yres_virtual = def_vyres;
+	var->rotate	  = def_rotate;
+	var->bits_per_pixel = fbdev->panel->bpp;
+
+	set_fb_var(info, var);
+	set_fb_fix(info);
+
+	r = fb_alloc_cmap(&info->cmap, 16, 0);
+	if (r != 0)
+		dev_err(fbdev->dev, "unable to allocate color map memory\n");
+
+	return r;
+}
+
+/* Release the fb_info object */
+static void fbinfo_cleanup(struct omapfb_device *fbdev, struct fb_info *fbi)
+{
+	fb_dealloc_cmap(&fbi->cmap);
+}
+
+static void planes_cleanup(struct omapfb_device *fbdev)
+{
+	int i;
+
+	for (i = 0; i < fbdev->mem_desc.region_cnt; i++) {
+		if (fbdev->fb_info[i] == NULL)
+			break;
+		fbinfo_cleanup(fbdev, fbdev->fb_info[i]);
+		framebuffer_release(fbdev->fb_info[i]);
+	}
+}
+
+static int planes_init(struct omapfb_device *fbdev)
+{
+	struct fb_info *fbi;
+	int i;
+	int r;
+
+	for (i = 0; i < fbdev->mem_desc.region_cnt; i++) {
+		struct omapfb_plane_struct *plane;
+		fbi = framebuffer_alloc(sizeof(struct omapfb_plane_struct),
+					fbdev->dev);
+		if (fbi == NULL) {
+			dev_err(fbdev->dev,
+				"unable to allocate memory for plane info\n");
+			planes_cleanup(fbdev);
+			return -ENOMEM;
+		}
+		plane = fbi->par;
+		plane->idx = i;
+		plane->fbdev = fbdev;
+		plane->info.mirror = def_mirror;
+		fbdev->fb_info[i] = fbi;
+
+		if ((r = fbinfo_init(fbdev, fbi)) < 0) {
+			framebuffer_release(fbi);
+			planes_cleanup(fbdev);
+			return r;
+		}
+		plane->info.out_width = fbi->var.xres;
+		plane->info.out_height = fbi->var.yres;
+	}
+	return 0;
+}
+
+/*
+ * Free driver resources. Can be called to rollback an aborted initialization
+ * sequence.
+ */
+static void omapfb_free_resources(struct omapfb_device *fbdev, int state)
+{
+	int i;
+
+	switch (state) {
+	case OMAPFB_ACTIVE:
+		for (i = 0; i < fbdev->mem_desc.region_cnt; i++)
+			unregister_framebuffer(fbdev->fb_info[i]);
+	case 7:
+		omapfb_unregister_sysfs(fbdev);
+	case 6:
+		fbdev->panel->disable(fbdev->panel);
+	case 5:
+		omapfb_set_update_mode(fbdev, OMAPFB_UPDATE_DISABLED);
+	case 4:
+		planes_cleanup(fbdev);
+	case 3:
+		ctrl_cleanup(fbdev);
+	case 2:
+		fbdev->panel->cleanup(fbdev->panel);
+	case 1:
+		dev_set_drvdata(fbdev->dev, NULL);
+		kfree(fbdev);
+	case 0:
+		/* nothing to free */
+		break;
+	default:
+		BUG();
+	}
+}
+
+static int omapfb_find_ctrl(struct omapfb_device *fbdev)
+{
+	struct omapfb_platform_data *conf;
+	char name[17];
+	int i;
+
+	conf = fbdev->dev->platform_data;
+
+	fbdev->ctrl = NULL;
+
+	strncpy(name, conf->lcd.ctrl_name, sizeof(name) - 1);
+	name[sizeof(name) - 1] = '\0';
+
+	if (strcmp(name, "internal") == 0) {
+		fbdev->ctrl = fbdev->int_ctrl;
+		return 0;
+	}
+
+	for (i = 0; i < ARRAY_SIZE(ctrls); i++) {
+		dev_dbg(fbdev->dev, "ctrl %s\n", ctrls[i]->name);
+		if (strcmp(ctrls[i]->name, name) == 0) {
+			fbdev->ctrl = ctrls[i];
+			break;
+		}
+	}
+
+	if (fbdev->ctrl == NULL) {
+		dev_dbg(fbdev->dev, "ctrl %s not supported\n", name);
+		return -1;
+	}
+
+	return 0;
+}
+
+static void check_required_callbacks(struct omapfb_device *fbdev)
+{
+#define _C(x) (fbdev->ctrl->x != NULL)
+#define _P(x) (fbdev->panel->x != NULL)
+	BUG_ON(fbdev->ctrl == NULL || fbdev->panel == NULL);
+	BUG_ON(!(_C(init) && _C(cleanup) && _C(get_caps) &&
+		 _C(set_update_mode) && _C(setup_plane) && _C(enable_plane) &&
+		 _P(init) && _P(cleanup) && _P(enable) && _P(disable) &&
+		 _P(get_caps)));
+#undef _P
+#undef _C
+}
+
+/*
+ * Called by LDM binding to probe and attach a new device.
+ * Initialization sequence:
+ *   1. allocate system omapfb_device structure
+ *   2. select controller type according to platform configuration
+ *      init LCD panel
+ *   3. init LCD controller and LCD DMA
+ *   4. init system fb_info structure for all planes
+ *   5. setup video mode for first plane and enable it
+ *   6. enable LCD panel
+ *   7. register sysfs attributes
+ *   OMAPFB_ACTIVE: register system fb_info structure for all planes
+ */
+static int omapfb_do_probe(struct platform_device *pdev,
+				struct lcd_panel *panel)
+{
+	struct omapfb_device	*fbdev = NULL;
+	int			init_state;
+	unsigned long		phz, hhz, vhz;
+	unsigned long		vram;
+	int			i;
+	int			r = 0;
+
+	init_state = 0;
+
+	if (pdev->num_resources != 0) {
+		dev_err(&pdev->dev, "probed for an unknown device\n");
+		r = -ENODEV;
+		goto cleanup;
+	}
+
+	if (pdev->dev.platform_data == NULL) {
+		dev_err(&pdev->dev, "missing platform data\n");
+		r = -ENOENT;
+		goto cleanup;
+	}
+
+	fbdev = kzalloc(sizeof(struct omapfb_device), GFP_KERNEL);
+	if (fbdev == NULL) {
+		dev_err(&pdev->dev,
+			"unable to allocate memory for device info\n");
+		r = -ENOMEM;
+		goto cleanup;
+	}
+	init_state++;
+
+	fbdev->dev = &pdev->dev;
+	fbdev->panel = panel;
+	platform_set_drvdata(pdev, fbdev);
+
+	mutex_init(&fbdev->rqueue_mutex);
+
+#ifdef CONFIG_ARCH_OMAP1
+	fbdev->int_ctrl = &omap1_int_ctrl;
+#ifdef CONFIG_FB_OMAP_LCDC_EXTERNAL
+	fbdev->ext_if = &omap1_ext_if;
+#endif
+#else	/* OMAP2 */
+	fbdev->int_ctrl = &omap2_int_ctrl;
+#ifdef CONFIG_FB_OMAP_LCDC_EXTERNAL
+	fbdev->ext_if = &omap2_ext_if;
+#endif
+#endif
+	if (omapfb_find_ctrl(fbdev) < 0) {
+		dev_err(fbdev->dev,
+			"LCD controller not found, board not supported\n");
+		r = -ENODEV;
+		goto cleanup;
+	}
+
+	r = fbdev->panel->init(fbdev->panel, fbdev);
+	if (r)
+		goto cleanup;
+
+	pr_info("omapfb: configured for panel %s\n", fbdev->panel->name);
+
+	def_vxres = def_vxres ? : fbdev->panel->x_res;
+	def_vyres = def_vyres ? : fbdev->panel->y_res;
+
+	init_state++;
+
+	r = ctrl_init(fbdev);
+	if (r)
+		goto cleanup;
+	if (fbdev->ctrl->mmap != NULL)
+		omapfb_ops.fb_mmap = omapfb_mmap;
+	init_state++;
+
+	check_required_callbacks(fbdev);
+
+	r = planes_init(fbdev);
+	if (r)
+		goto cleanup;
+	init_state++;
+
+#ifdef CONFIG_FB_OMAP_DMA_TUNE
+	/* Set DMA priority for EMIFF access to highest */
+	if (cpu_class_is_omap1())
+		omap_set_dma_priority(0, OMAP_DMA_PORT_EMIFF, 15);
+#endif
+
+	r = ctrl_change_mode(fbdev->fb_info[0]);
+	if (r) {
+		dev_err(fbdev->dev, "mode setting failed\n");
+		goto cleanup;
+	}
+
+	/* GFX plane is enabled by default */
+	r = fbdev->ctrl->enable_plane(OMAPFB_PLANE_GFX, 1);
+	if (r)
+		goto cleanup;
+
+	omapfb_set_update_mode(fbdev, manual_update ?
+				   OMAPFB_MANUAL_UPDATE : OMAPFB_AUTO_UPDATE);
+	init_state++;
+
+	r = fbdev->panel->enable(fbdev->panel);
+	if (r)
+		goto cleanup;
+	init_state++;
+
+	r = omapfb_register_sysfs(fbdev);
+	if (r)
+		goto cleanup;
+	init_state++;
+
+	vram = 0;
+	for (i = 0; i < fbdev->mem_desc.region_cnt; i++) {
+		r = register_framebuffer(fbdev->fb_info[i]);
+		if (r != 0) {
+			dev_err(fbdev->dev,
+				"registering framebuffer %d failed\n", i);
+			goto cleanup;
+		}
+		vram += fbdev->mem_desc.region[i].size;
+	}
+
+	fbdev->state = OMAPFB_ACTIVE;
+
+	panel = fbdev->panel;
+	phz = panel->pixel_clock * 1000;
+	hhz = phz * 10 / (panel->hfp + panel->x_res + panel->hbp + panel->hsw);
+	vhz = hhz / (panel->vfp + panel->y_res + panel->vbp + panel->vsw);
+
+	omapfb_dev = fbdev;
+
+	pr_info("omapfb: Framebuffer initialized. Total vram %lu planes %d\n",
+			vram, fbdev->mem_desc.region_cnt);
+	pr_info("omapfb: Pixclock %lu kHz hfreq %lu.%lu kHz "
+			"vfreq %lu.%lu Hz\n",
+			phz / 1000, hhz / 10000, hhz % 10, vhz / 10, vhz % 10);
+
+	return 0;
+
+cleanup:
+	omapfb_free_resources(fbdev, init_state);
+
+	return r;
+}
+
+static int omapfb_probe(struct platform_device *pdev)
+{
+	BUG_ON(fbdev_pdev != NULL);
+
+	/* Delay actual initialization until the LCD is registered */
+	fbdev_pdev = pdev;
+	if (fbdev_panel != NULL)
+		omapfb_do_probe(fbdev_pdev, fbdev_panel);
+	return 0;
+}
+
+void omapfb_register_panel(struct lcd_panel *panel)
+{
+	BUG_ON(fbdev_panel != NULL);
+
+	fbdev_panel = panel;
+	if (fbdev_pdev != NULL)
+		omapfb_do_probe(fbdev_pdev, fbdev_panel);
+}
+
+/* Called when the device is being detached from the driver */
+static int omapfb_remove(struct platform_device *pdev)
+{
+	struct omapfb_device *fbdev = platform_get_drvdata(pdev);
+	enum omapfb_state saved_state = fbdev->state;
+
+	/* FIXME: wait till completion of pending events */
+
+	fbdev->state = OMAPFB_DISABLED;
+	omapfb_free_resources(fbdev, saved_state);
+
+	return 0;
+}
+
+/* PM suspend */
+static int omapfb_suspend(struct platform_device *pdev, pm_message_t mesg)
+{
+	struct omapfb_device *fbdev = platform_get_drvdata(pdev);
+
+	omapfb_blank(VESA_POWERDOWN, fbdev->fb_info[0]);
+
+	return 0;
+}
+
+/* PM resume */
+static int omapfb_resume(struct platform_device *pdev)
+{
+	struct omapfb_device *fbdev = platform_get_drvdata(pdev);
+
+	omapfb_blank(VESA_NO_BLANKING, fbdev->fb_info[0]);
+	return 0;
+}
+
+static struct platform_driver omapfb_driver = {
+	.probe		= omapfb_probe,
+	.remove		= omapfb_remove,
+	.suspend	= omapfb_suspend,
+	.resume		= omapfb_resume,
+	.driver		= {
+		.name	= MODULE_NAME,
+		.owner	= THIS_MODULE,
+	},
+};
+
+#ifndef MODULE
+
+/* Process kernel command line parameters */
+static int __init omapfb_setup(char *options)
+{
+	char *this_opt = NULL;
+	int r = 0;
+
+	pr_debug("omapfb: options %s\n", options);
+
+	if (!options || !*options)
+		return 0;
+
+	while (!r && (this_opt = strsep(&options, ",")) != NULL) {
+		if (!strncmp(this_opt, "accel", 5))
+			def_accel = 1;
+		else if (!strncmp(this_opt, "vram:", 5)) {
+			char *suffix;
+			unsigned long vram;
+			vram = (simple_strtoul(this_opt + 5, &suffix, 0));
+			switch (suffix[0]) {
+			case '\0':
+				break;
+			case 'm':
+			case 'M':
+				vram *= 1024;
+				/* Fall through */
+			case 'k':
+			case 'K':
+				vram *= 1024;
+				break;
+			default:
+				pr_debug("omapfb: invalid vram suffix %c\n",
+					 suffix[0]);
+				r = -1;
+			}
+			def_vram[def_vram_cnt++] = vram;
+		}
+		else if (!strncmp(this_opt, "vxres:", 6))
+			def_vxres = simple_strtoul(this_opt + 6, NULL, 0);
+		else if (!strncmp(this_opt, "vyres:", 6))
+			def_vyres = simple_strtoul(this_opt + 6, NULL, 0);
+		else if (!strncmp(this_opt, "rotate:", 7))
+			def_rotate = (simple_strtoul(this_opt + 7, NULL, 0));
+		else if (!strncmp(this_opt, "mirror:", 7))
+			def_mirror = (simple_strtoul(this_opt + 7, NULL, 0));
+		else if (!strncmp(this_opt, "manual_update", 13))
+			manual_update = 1;
+		else {
+			pr_debug("omapfb: invalid option\n");
+			r = -1;
+		}
+	}
+
+	return r;
+}
+
+#endif
+
+/* Register both the driver and the device */
+static int __init omapfb_init(void)
+{
+#ifndef MODULE
+	char *option;
+
+	if (fb_get_options("omapfb", &option))
+		return -ENODEV;
+	omapfb_setup(option);
+#endif
+	/* Register the driver with LDM */
+	if (platform_driver_register(&omapfb_driver)) {
+		pr_debug("failed to register omapfb driver\n");
+		return -ENODEV;
+	}
+
+	return 0;
+}
+
+static void __exit omapfb_cleanup(void)
+{
+	platform_driver_unregister(&omapfb_driver);
+}
+
+module_param_named(accel, def_accel, uint, 0664);
+module_param_array_named(vram, def_vram, ulong, &def_vram_cnt, 0664);
+module_param_named(vxres, def_vxres, long, 0664);
+module_param_named(vyres, def_vyres, long, 0664);
+module_param_named(rotate, def_rotate, uint, 0664);
+module_param_named(mirror, def_mirror, uint, 0664);
+module_param_named(manual_update, manual_update, bool, 0664);
+
+module_init(omapfb_init);
+module_exit(omapfb_cleanup);
+
+MODULE_DESCRIPTION("TI OMAP framebuffer driver");
+MODULE_AUTHOR("Imre Deak <imre.deak@nokia.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/video/omap/rfbi.c b/drivers/video/omap/rfbi.c
new file mode 100644
index 0000000..2b42698
--- /dev/null
+++ b/drivers/video/omap/rfbi.c
@@ -0,0 +1,588 @@
+/*
+ * OMAP2 Remote Frame Buffer Interface support
+ *
+ * Copyright (C) 2005 Nokia Corporation
+ * Author: Juha Yrjölä <juha.yrjola@nokia.com>
+ *	   Imre Deak <imre.deak@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.
+ */
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+
+#include <asm/arch/omapfb.h>
+
+#include "dispc.h"
+
+/* To work around an RFBI transfer rate limitation */
+#define OMAP_RFBI_RATE_LIMIT	1
+
+#define RFBI_BASE		0x48050800
+#define RFBI_REVISION		0x0000
+#define RFBI_SYSCONFIG		0x0010
+#define RFBI_SYSSTATUS		0x0014
+#define RFBI_CONTROL		0x0040
+#define RFBI_PIXEL_CNT		0x0044
+#define RFBI_LINE_NUMBER	0x0048
+#define RFBI_CMD		0x004c
+#define RFBI_PARAM		0x0050
+#define RFBI_DATA		0x0054
+#define RFBI_READ		0x0058
+#define RFBI_STATUS		0x005c
+#define RFBI_CONFIG0		0x0060
+#define RFBI_ONOFF_TIME0	0x0064
+#define RFBI_CYCLE_TIME0	0x0068
+#define RFBI_DATA_CYCLE1_0	0x006c
+#define RFBI_DATA_CYCLE2_0	0x0070
+#define RFBI_DATA_CYCLE3_0	0x0074
+#define RFBI_VSYNC_WIDTH	0x0090
+#define RFBI_HSYNC_WIDTH	0x0094
+
+#define DISPC_BASE		0x48050400
+#define DISPC_CONTROL		0x0040
+
+static struct {
+	u32		base;
+	void		(*lcdc_callback)(void *data);
+	void		*lcdc_callback_data;
+	unsigned long	l4_khz;
+	int		bits_per_cycle;
+	struct omapfb_device *fbdev;
+	struct clk	*dss_ick;
+	struct clk	*dss1_fck;
+	unsigned	tearsync_pin_cnt;
+	unsigned	tearsync_mode;
+} rfbi;
+
+static inline void rfbi_write_reg(int idx, u32 val)
+{
+	__raw_writel(val, rfbi.base + idx);
+}
+
+static inline u32 rfbi_read_reg(int idx)
+{
+	return __raw_readl(rfbi.base + idx);
+}
+
+static int rfbi_get_clocks(void)
+{
+	if (IS_ERR((rfbi.dss_ick = clk_get(rfbi.fbdev->dev, "dss_ick")))) {
+		dev_err(rfbi.fbdev->dev, "can't get dss_ick");
+		return PTR_ERR(rfbi.dss_ick);
+	}
+
+	if (IS_ERR((rfbi.dss1_fck = clk_get(rfbi.fbdev->dev, "dss1_fck")))) {
+		dev_err(rfbi.fbdev->dev, "can't get dss1_fck");
+		clk_put(rfbi.dss_ick);
+		return PTR_ERR(rfbi.dss1_fck);
+	}
+
+	return 0;
+}
+
+static void rfbi_put_clocks(void)
+{
+	clk_put(rfbi.dss1_fck);
+	clk_put(rfbi.dss_ick);
+}
+
+static void rfbi_enable_clocks(int enable)
+{
+	if (enable) {
+		clk_enable(rfbi.dss_ick);
+		clk_enable(rfbi.dss1_fck);
+	} else {
+		clk_disable(rfbi.dss1_fck);
+		clk_disable(rfbi.dss_ick);
+	}
+}
+
+
+#ifdef VERBOSE
+static void rfbi_print_timings(void)
+{
+	u32 l;
+	u32 time;
+
+	l = rfbi_read_reg(RFBI_CONFIG0);
+	time = 1000000000 / rfbi.l4_khz;
+	if (l & (1 << 4))
+		time *= 2;
+
+	dev_dbg(rfbi.fbdev->dev, "Tick time %u ps\n", time);
+	l = rfbi_read_reg(RFBI_ONOFF_TIME0);
+	dev_dbg(rfbi.fbdev->dev,
+		"CSONTIME %d, CSOFFTIME %d, WEONTIME %d, WEOFFTIME %d, "
+		"REONTIME %d, REOFFTIME %d\n",
+		l & 0x0f, (l >> 4) & 0x3f, (l >> 10) & 0x0f, (l >> 14) & 0x3f,
+		(l >> 20) & 0x0f, (l >> 24) & 0x3f);
+
+	l = rfbi_read_reg(RFBI_CYCLE_TIME0);
+	dev_dbg(rfbi.fbdev->dev,
+		"WECYCLETIME %d, RECYCLETIME %d, CSPULSEWIDTH %d, "
+		"ACCESSTIME %d\n",
+		(l & 0x3f), (l >> 6) & 0x3f, (l >> 12) & 0x3f,
+		(l >> 22) & 0x3f);
+}
+#else
+static void rfbi_print_timings(void) {}
+#endif
+
+static void rfbi_set_timings(const struct extif_timings *t)
+{
+	u32 l;
+
+	BUG_ON(!t->converted);
+
+	rfbi_enable_clocks(1);
+	rfbi_write_reg(RFBI_ONOFF_TIME0, t->tim[0]);
+	rfbi_write_reg(RFBI_CYCLE_TIME0, t->tim[1]);
+
+	l = rfbi_read_reg(RFBI_CONFIG0);
+	l &= ~(1 << 4);
+	l |= (t->tim[2] ? 1 : 0) << 4;
+	rfbi_write_reg(RFBI_CONFIG0, l);
+
+	rfbi_print_timings();
+	rfbi_enable_clocks(0);
+}
+
+static void rfbi_get_clk_info(u32 *clk_period, u32 *max_clk_div)
+{
+	*clk_period = 1000000000 / rfbi.l4_khz;
+	*max_clk_div = 2;
+}
+
+static int ps_to_rfbi_ticks(int time, int div)
+{
+	unsigned long tick_ps;
+	int ret;
+
+	/* Calculate in picosecs to yield more exact results */
+	tick_ps = 1000000000 / (rfbi.l4_khz) * div;
+
+	ret = (time + tick_ps - 1) / tick_ps;
+
+	return ret;
+}
+
+#ifdef OMAP_RFBI_RATE_LIMIT
+static unsigned long rfbi_get_max_tx_rate(void)
+{
+	unsigned long	l4_rate, dss1_rate;
+	int		min_l4_ticks = 0;
+	int		i;
+
+	/* According to TI this can't be calculated so make the
+	 * adjustments for a couple of known frequencies and warn for
+	 * others.
+	 */
+	static const struct {
+		unsigned long l4_clk;		/* HZ */
+		unsigned long dss1_clk;		/* HZ */
+		unsigned long min_l4_ticks;
+	} ftab[] = {
+		{ 55,	132,	7, },		/* 7.86 MPix/s */
+		{ 110,	110,	12, },		/* 9.16 MPix/s */
+		{ 110,	132,	10, },		/* 11   Mpix/s */
+		{ 120,	120,	10, },		/* 12   Mpix/s */
+		{ 133,	133,	10, },		/* 13.3 Mpix/s */
+	};
+
+	l4_rate = rfbi.l4_khz / 1000;
+	dss1_rate = clk_get_rate(rfbi.dss1_fck) / 1000000;
+
+	for (i = 0; i < ARRAY_SIZE(ftab); i++) {
+		/* Use a window instead of an exact match, to account
+		 * for different DPLL multiplier / divider pairs.
+		 */
+		if (abs(ftab[i].l4_clk - l4_rate) < 3 &&
+		    abs(ftab[i].dss1_clk - dss1_rate) < 3) {
+			min_l4_ticks = ftab[i].min_l4_ticks;
+			break;
+		}
+	}
+	if (i == ARRAY_SIZE(ftab)) {
+		/* Can't be sure, return anyway the maximum not
+		 * rate-limited. This might cause a problem only for the
+		 * tearing synchronisation.
+		 */
+		dev_err(rfbi.fbdev->dev,
+			"can't determine maximum RFBI transfer rate\n");
+		return rfbi.l4_khz * 1000;
+	}
+	return rfbi.l4_khz * 1000 / min_l4_ticks;
+}
+#else
+static int rfbi_get_max_tx_rate(void)
+{
+	return rfbi.l4_khz * 1000;
+}
+#endif
+
+
+static int rfbi_convert_timings(struct extif_timings *t)
+{
+	u32 l;
+	int reon, reoff, weon, weoff, cson, csoff, cs_pulse;
+	int actim, recyc, wecyc;
+	int div = t->clk_div;
+
+	if (div <= 0 || div > 2)
+		return -1;
+
+	/* Make sure that after conversion it still holds that:
+	 * weoff > weon, reoff > reon, recyc >= reoff, wecyc >= weoff,
+	 * csoff > cson, csoff >= max(weoff, reoff), actim > reon
+	 */
+	weon = ps_to_rfbi_ticks(t->we_on_time, div);
+	weoff = ps_to_rfbi_ticks(t->we_off_time, div);
+	if (weoff <= weon)
+		weoff = weon + 1;
+	if (weon > 0x0f)
+		return -1;
+	if (weoff > 0x3f)
+		return -1;
+
+	reon = ps_to_rfbi_ticks(t->re_on_time, div);
+	reoff = ps_to_rfbi_ticks(t->re_off_time, div);
+	if (reoff <= reon)
+		reoff = reon + 1;
+	if (reon > 0x0f)
+		return -1;
+	if (reoff > 0x3f)
+		return -1;
+
+	cson = ps_to_rfbi_ticks(t->cs_on_time, div);
+	csoff = ps_to_rfbi_ticks(t->cs_off_time, div);
+	if (csoff <= cson)
+		csoff = cson + 1;
+	if (csoff < max(weoff, reoff))
+		csoff = max(weoff, reoff);
+	if (cson > 0x0f)
+		return -1;
+	if (csoff > 0x3f)
+		return -1;
+
+	l =  cson;
+	l |= csoff << 4;
+	l |= weon  << 10;
+	l |= weoff << 14;
+	l |= reon  << 20;
+	l |= reoff << 24;
+
+	t->tim[0] = l;
+
+	actim = ps_to_rfbi_ticks(t->access_time, div);
+	if (actim <= reon)
+		actim = reon + 1;
+	if (actim > 0x3f)
+		return -1;
+
+	wecyc = ps_to_rfbi_ticks(t->we_cycle_time, div);
+	if (wecyc < weoff)
+		wecyc = weoff;
+	if (wecyc > 0x3f)
+		return -1;
+
+	recyc = ps_to_rfbi_ticks(t->re_cycle_time, div);
+	if (recyc < reoff)
+		recyc = reoff;
+	if (recyc > 0x3f)
+		return -1;
+
+	cs_pulse = ps_to_rfbi_ticks(t->cs_pulse_width, div);
+	if (cs_pulse > 0x3f)
+		return -1;
+
+	l =  wecyc;
+	l |= recyc    << 6;
+	l |= cs_pulse << 12;
+	l |= actim    << 22;
+
+	t->tim[1] = l;
+
+	t->tim[2] = div - 1;
+
+	t->converted = 1;
+
+	return 0;
+}
+
+static int rfbi_setup_tearsync(unsigned pin_cnt,
+			       unsigned hs_pulse_time, unsigned vs_pulse_time,
+			       int hs_pol_inv, int vs_pol_inv, int extif_div)
+{
+	int hs, vs;
+	int min;
+	u32 l;
+
+	if (pin_cnt != 1 && pin_cnt != 2)
+		return -EINVAL;
+
+	hs = ps_to_rfbi_ticks(hs_pulse_time, 1);
+	vs = ps_to_rfbi_ticks(vs_pulse_time, 1);
+	if (hs < 2)
+		return -EDOM;
+	if (pin_cnt == 2)
+		min = 2;
+	else
+		min = 4;
+	if (vs < min)
+		return -EDOM;
+	if (vs == hs)
+		return -EINVAL;
+	rfbi.tearsync_pin_cnt = pin_cnt;
+	dev_dbg(rfbi.fbdev->dev,
+		"setup_tearsync: pins %d hs %d vs %d hs_inv %d vs_inv %d\n",
+		pin_cnt, hs, vs, hs_pol_inv, vs_pol_inv);
+
+	rfbi_enable_clocks(1);
+	rfbi_write_reg(RFBI_HSYNC_WIDTH, hs);
+	rfbi_write_reg(RFBI_VSYNC_WIDTH, vs);
+
+	l = rfbi_read_reg(RFBI_CONFIG0);
+	if (hs_pol_inv)
+		l &= ~(1 << 21);
+	else
+		l |= 1 << 21;
+	if (vs_pol_inv)
+		l &= ~(1 << 20);
+	else
+		l |= 1 << 20;
+	rfbi_enable_clocks(0);
+
+	return 0;
+}
+
+static int rfbi_enable_tearsync(int enable, unsigned line)
+{
+	u32 l;
+
+	dev_dbg(rfbi.fbdev->dev, "tearsync %d line %d mode %d\n",
+		enable, line, rfbi.tearsync_mode);
+	if (line > (1 << 11) - 1)
+		return -EINVAL;
+
+	rfbi_enable_clocks(1);
+	l = rfbi_read_reg(RFBI_CONFIG0);
+	l &= ~(0x3 << 2);
+	if (enable) {
+		rfbi.tearsync_mode = rfbi.tearsync_pin_cnt;
+		l |= rfbi.tearsync_mode << 2;
+	} else
+		rfbi.tearsync_mode = 0;
+	rfbi_write_reg(RFBI_CONFIG0, l);
+	rfbi_write_reg(RFBI_LINE_NUMBER, line);
+	rfbi_enable_clocks(0);
+
+	return 0;
+}
+
+static void rfbi_write_command(const void *buf, unsigned int len)
+{
+	rfbi_enable_clocks(1);
+	if (rfbi.bits_per_cycle == 16) {
+		const u16 *w = buf;
+		BUG_ON(len & 1);
+		for (; len; len -= 2)
+			rfbi_write_reg(RFBI_CMD, *w++);
+	} else {
+		const u8 *b = buf;
+		BUG_ON(rfbi.bits_per_cycle != 8);
+		for (; len; len--)
+			rfbi_write_reg(RFBI_CMD, *b++);
+	}
+	rfbi_enable_clocks(0);
+}
+
+static void rfbi_read_data(void *buf, unsigned int len)
+{
+	rfbi_enable_clocks(1);
+	if (rfbi.bits_per_cycle == 16) {
+		u16 *w = buf;
+		BUG_ON(len & ~1);
+		for (; len; len -= 2) {
+			rfbi_write_reg(RFBI_READ, 0);
+			*w++ = rfbi_read_reg(RFBI_READ);
+		}
+	} else {
+		u8 *b = buf;
+		BUG_ON(rfbi.bits_per_cycle != 8);
+		for (; len; len--) {
+			rfbi_write_reg(RFBI_READ, 0);
+			*b++ = rfbi_read_reg(RFBI_READ);
+		}
+	}
+	rfbi_enable_clocks(0);
+}
+
+static void rfbi_write_data(const void *buf, unsigned int len)
+{
+	rfbi_enable_clocks(1);
+	if (rfbi.bits_per_cycle == 16) {
+		const u16 *w = buf;
+		BUG_ON(len & 1);
+		for (; len; len -= 2)
+			rfbi_write_reg(RFBI_PARAM, *w++);
+	} else {
+		const u8 *b = buf;
+		BUG_ON(rfbi.bits_per_cycle != 8);
+		for (; len; len--)
+			rfbi_write_reg(RFBI_PARAM, *b++);
+	}
+	rfbi_enable_clocks(0);
+}
+
+static void rfbi_transfer_area(int width, int height,
+				void (callback)(void * data), void *data)
+{
+	u32 w;
+
+	BUG_ON(callback == NULL);
+
+	rfbi_enable_clocks(1);
+	omap_dispc_set_lcd_size(width, height);
+
+	rfbi.lcdc_callback = callback;
+	rfbi.lcdc_callback_data = data;
+
+	rfbi_write_reg(RFBI_PIXEL_CNT, width * height);
+
+	w = rfbi_read_reg(RFBI_CONTROL);
+	w |= 1;				/* enable */
+	if (!rfbi.tearsync_mode)
+		w |= 1 << 4;		/* internal trigger, reset by HW */
+	rfbi_write_reg(RFBI_CONTROL, w);
+
+	omap_dispc_enable_lcd_out(1);
+}
+
+static inline void _stop_transfer(void)
+{
+	u32 w;
+
+	w = rfbi_read_reg(RFBI_CONTROL);
+	rfbi_write_reg(RFBI_CONTROL, w & ~(1 << 0));
+	rfbi_enable_clocks(0);
+}
+
+static void rfbi_dma_callback(void *data)
+{
+	_stop_transfer();
+	rfbi.lcdc_callback(rfbi.lcdc_callback_data);
+}
+
+static void rfbi_set_bits_per_cycle(int bpc)
+{
+	u32 l;
+
+	rfbi_enable_clocks(1);
+	l = rfbi_read_reg(RFBI_CONFIG0);
+	l &= ~(0x03 << 0);
+
+	switch (bpc) {
+	case 8:
+		break;
+	case 16:
+		l |= 3;
+		break;
+	default:
+		BUG();
+	}
+	rfbi_write_reg(RFBI_CONFIG0, l);
+	rfbi.bits_per_cycle = bpc;
+	rfbi_enable_clocks(0);
+}
+
+static int rfbi_init(struct omapfb_device *fbdev)
+{
+	u32 l;
+	int r;
+
+	rfbi.fbdev = fbdev;
+	rfbi.base = io_p2v(RFBI_BASE);
+
+	if ((r = rfbi_get_clocks()) < 0)
+		return r;
+	rfbi_enable_clocks(1);
+
+	rfbi.l4_khz = clk_get_rate(rfbi.dss_ick) / 1000;
+
+	/* Reset */
+	rfbi_write_reg(RFBI_SYSCONFIG, 1 << 1);
+	while (!(rfbi_read_reg(RFBI_SYSSTATUS) & (1 << 0)));
+
+	l = rfbi_read_reg(RFBI_SYSCONFIG);
+	/* Enable autoidle and smart-idle */
+	l |= (1 << 0) | (2 << 3);
+	rfbi_write_reg(RFBI_SYSCONFIG, l);
+
+	/* 16-bit interface, ITE trigger mode, 16-bit data */
+	l = (0x03 << 0) | (0x00 << 2) | (0x01 << 5) | (0x02 << 7);
+	l |= (0 << 9) | (1 << 20) | (1 << 21);
+	rfbi_write_reg(RFBI_CONFIG0, l);
+
+	rfbi_write_reg(RFBI_DATA_CYCLE1_0, 0x00000010);
+
+	l = rfbi_read_reg(RFBI_CONTROL);
+	/* Select CS0, clear bypass mode */
+	l = (0x01 << 2);
+	rfbi_write_reg(RFBI_CONTROL, l);
+
+	if ((r = omap_dispc_request_irq(rfbi_dma_callback, NULL)) < 0) {
+		dev_err(fbdev->dev, "can't get DISPC irq\n");
+		rfbi_enable_clocks(0);
+		return r;
+	}
+
+	l = rfbi_read_reg(RFBI_REVISION);
+	pr_info("omapfb: RFBI version %d.%d initialized\n",
+		(l >> 4) & 0x0f, l & 0x0f);
+
+	rfbi_enable_clocks(0);
+
+	return 0;
+}
+
+static void rfbi_cleanup(void)
+{
+	omap_dispc_free_irq();
+	rfbi_put_clocks();
+}
+
+const struct lcd_ctrl_extif omap2_ext_if = {
+	.init			= rfbi_init,
+	.cleanup		= rfbi_cleanup,
+	.get_clk_info		= rfbi_get_clk_info,
+	.get_max_tx_rate	= rfbi_get_max_tx_rate,
+	.set_bits_per_cycle	= rfbi_set_bits_per_cycle,
+	.convert_timings	= rfbi_convert_timings,
+	.set_timings		= rfbi_set_timings,
+	.write_command		= rfbi_write_command,
+	.read_data		= rfbi_read_data,
+	.write_data		= rfbi_write_data,
+	.transfer_area		= rfbi_transfer_area,
+	.setup_tearsync		= rfbi_setup_tearsync,
+	.enable_tearsync	= rfbi_enable_tearsync,
+
+	.max_transmit_size	= (u32) ~0,
+};
+
diff --git a/drivers/video/omap/sossi.c b/drivers/video/omap/sossi.c
new file mode 100644
index 0000000..81dbcf5
--- /dev/null
+++ b/drivers/video/omap/sossi.c
@@ -0,0 +1,686 @@
+/*
+ * OMAP1 Special OptimiSed Screen Interface support
+ *
+ * Copyright (C) 2004-2005 Nokia Corporation
+ * Author: Juha Yrjölä <juha.yrjola@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.
+ */
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <linux/clk.h>
+#include <linux/irq.h>
+#include <linux/io.h>
+
+#include <asm/arch/dma.h>
+#include <asm/arch/omapfb.h>
+
+#include "lcdc.h"
+
+#define MODULE_NAME		"omapfb-sossi"
+
+#define OMAP_SOSSI_BASE         0xfffbac00
+#define SOSSI_ID_REG		0x00
+#define SOSSI_INIT1_REG		0x04
+#define SOSSI_INIT2_REG		0x08
+#define SOSSI_INIT3_REG		0x0c
+#define SOSSI_FIFO_REG		0x10
+#define SOSSI_REOTABLE_REG	0x14
+#define SOSSI_TEARING_REG	0x18
+#define SOSSI_INIT1B_REG	0x1c
+#define SOSSI_FIFOB_REG		0x20
+
+#define DMA_GSCR          0xfffedc04
+#define DMA_LCD_CCR       0xfffee3c2
+#define DMA_LCD_CTRL      0xfffee3c4
+#define DMA_LCD_LCH_CTRL  0xfffee3ea
+
+#define CONF_SOSSI_RESET_R      (1 << 23)
+
+#define RD_ACCESS		0
+#define WR_ACCESS		1
+
+#define SOSSI_MAX_XMIT_BYTES	(512 * 1024)
+
+static struct {
+	void __iomem	*base;
+	struct clk	*fck;
+	unsigned long	fck_hz;
+	spinlock_t	lock;
+	int		bus_pick_count;
+	int		bus_pick_width;
+	int		tearsync_mode;
+	int		tearsync_line;
+	void		(*lcdc_callback)(void *data);
+	void		*lcdc_callback_data;
+	int		vsync_dma_pending;
+	/* timing for read and write access */
+	int		clk_div;
+	u8		clk_tw0[2];
+	u8		clk_tw1[2];
+	/*
+	 * if last_access is the same as current we don't have to change
+	 * the timings
+	 */
+	int		last_access;
+
+	struct omapfb_device	*fbdev;
+} sossi;
+
+static inline u32 sossi_read_reg(int reg)
+{
+	return readl(sossi.base + reg);
+}
+
+static inline u16 sossi_read_reg16(int reg)
+{
+	return readw(sossi.base + reg);
+}
+
+static inline u8 sossi_read_reg8(int reg)
+{
+	return readb(sossi.base + reg);
+}
+
+static inline void sossi_write_reg(int reg, u32 value)
+{
+	writel(value, sossi.base + reg);
+}
+
+static inline void sossi_write_reg16(int reg, u16 value)
+{
+	writew(value, sossi.base + reg);
+}
+
+static inline void sossi_write_reg8(int reg, u8 value)
+{
+	writeb(value, sossi.base + reg);
+}
+
+static void sossi_set_bits(int reg, u32 bits)
+{
+	sossi_write_reg(reg, sossi_read_reg(reg) | bits);
+}
+
+static void sossi_clear_bits(int reg, u32 bits)
+{
+	sossi_write_reg(reg, sossi_read_reg(reg) & ~bits);
+}
+
+#define HZ_TO_PS(x)	(1000000000 / (x / 1000))
+
+static u32 ps_to_sossi_ticks(u32 ps, int div)
+{
+	u32 clk_period = HZ_TO_PS(sossi.fck_hz) * div;
+	return (clk_period + ps - 1) / clk_period;
+}
+
+static int calc_rd_timings(struct extif_timings *t)
+{
+	u32 tw0, tw1;
+	int reon, reoff, recyc, actim;
+	int div = t->clk_div;
+
+	/*
+	 * Make sure that after conversion it still holds that:
+	 * reoff > reon, recyc >= reoff, actim > reon
+	 */
+	reon = ps_to_sossi_ticks(t->re_on_time, div);
+	/* reon will be exactly one sossi tick */
+	if (reon > 1)
+		return -1;
+
+	reoff = ps_to_sossi_ticks(t->re_off_time, div);
+
+	if (reoff <= reon)
+		reoff = reon + 1;
+
+	tw0 = reoff - reon;
+	if (tw0 > 0x10)
+		return -1;
+
+	recyc = ps_to_sossi_ticks(t->re_cycle_time, div);
+	if (recyc <= reoff)
+		recyc = reoff + 1;
+
+	tw1 = recyc - tw0;
+	/* values less then 3 result in the SOSSI block resetting itself */
+	if (tw1 < 3)
+		tw1 = 3;
+	if (tw1 > 0x40)
+		return -1;
+
+	actim = ps_to_sossi_ticks(t->access_time, div);
+	if (actim < reoff)
+		actim++;
+	/*
+	 * access time (data hold time) will be exactly one sossi
+	 * tick
+	 */
+	if (actim - reoff > 1)
+		return -1;
+
+	t->tim[0] = tw0 - 1;
+	t->tim[1] = tw1 - 1;
+
+	return 0;
+}
+
+static int calc_wr_timings(struct extif_timings *t)
+{
+	u32 tw0, tw1;
+	int weon, weoff, wecyc;
+	int div = t->clk_div;
+
+	/*
+	 * Make sure that after conversion it still holds that:
+	 * weoff > weon, wecyc >= weoff
+	 */
+	weon = ps_to_sossi_ticks(t->we_on_time, div);
+	/* weon will be exactly one sossi tick */
+	if (weon > 1)
+		return -1;
+
+	weoff = ps_to_sossi_ticks(t->we_off_time, div);
+	if (weoff <= weon)
+		weoff = weon + 1;
+	tw0 = weoff - weon;
+	if (tw0 > 0x10)
+		return -1;
+
+	wecyc = ps_to_sossi_ticks(t->we_cycle_time, div);
+	if (wecyc <= weoff)
+		wecyc = weoff + 1;
+
+	tw1 = wecyc - tw0;
+	/* values less then 3 result in the SOSSI block resetting itself */
+	if (tw1 < 3)
+		tw1 = 3;
+	if (tw1 > 0x40)
+		return -1;
+
+	t->tim[2] = tw0 - 1;
+	t->tim[3] = tw1 - 1;
+
+	return 0;
+}
+
+static void _set_timing(int div, int tw0, int tw1)
+{
+	u32 l;
+
+#ifdef VERBOSE
+	dev_dbg(sossi.fbdev->dev, "Using TW0 = %d, TW1 = %d, div = %d\n",
+		 tw0 + 1, tw1 + 1, div);
+#endif
+
+	clk_set_rate(sossi.fck, sossi.fck_hz / div);
+	clk_enable(sossi.fck);
+	l = sossi_read_reg(SOSSI_INIT1_REG);
+	l &= ~((0x0f << 20) | (0x3f << 24));
+	l |= (tw0 << 20) | (tw1 << 24);
+	sossi_write_reg(SOSSI_INIT1_REG, l);
+	clk_disable(sossi.fck);
+}
+
+static void _set_bits_per_cycle(int bus_pick_count, int bus_pick_width)
+{
+	u32 l;
+
+	l = sossi_read_reg(SOSSI_INIT3_REG);
+	l &= ~0x3ff;
+	l |= ((bus_pick_count - 1) << 5) | ((bus_pick_width - 1) & 0x1f);
+	sossi_write_reg(SOSSI_INIT3_REG, l);
+}
+
+static void _set_tearsync_mode(int mode, unsigned line)
+{
+	u32 l;
+
+	l = sossi_read_reg(SOSSI_TEARING_REG);
+	l &= ~(((1 << 11) - 1) << 15);
+	l |= line << 15;
+	l &= ~(0x3 << 26);
+	l |= mode << 26;
+	sossi_write_reg(SOSSI_TEARING_REG, l);
+	if (mode)
+		sossi_set_bits(SOSSI_INIT2_REG, 1 << 6);	/* TE logic */
+	else
+		sossi_clear_bits(SOSSI_INIT2_REG, 1 << 6);
+}
+
+static inline void set_timing(int access)
+{
+	if (access != sossi.last_access) {
+		sossi.last_access = access;
+		_set_timing(sossi.clk_div,
+			    sossi.clk_tw0[access], sossi.clk_tw1[access]);
+	}
+}
+
+static void sossi_start_transfer(void)
+{
+	/* WE */
+	sossi_clear_bits(SOSSI_INIT2_REG, 1 << 4);
+	/* CS active low */
+	sossi_clear_bits(SOSSI_INIT1_REG, 1 << 30);
+}
+
+static void sossi_stop_transfer(void)
+{
+	/* WE */
+	sossi_set_bits(SOSSI_INIT2_REG, 1 << 4);
+	/* CS active low */
+	sossi_set_bits(SOSSI_INIT1_REG, 1 << 30);
+}
+
+static void wait_end_of_write(void)
+{
+	/* Before reading we must check if some writings are going on */
+	while (!(sossi_read_reg(SOSSI_INIT2_REG) & (1 << 3)));
+}
+
+static void send_data(const void *data, unsigned int len)
+{
+	while (len >= 4) {
+		sossi_write_reg(SOSSI_FIFO_REG, *(const u32 *) data);
+		len -= 4;
+		data += 4;
+	}
+	while (len >= 2) {
+		sossi_write_reg16(SOSSI_FIFO_REG, *(const u16 *) data);
+		len -= 2;
+		data += 2;
+	}
+	while (len) {
+		sossi_write_reg8(SOSSI_FIFO_REG, *(const u8 *) data);
+		len--;
+		data++;
+	}
+}
+
+static void set_cycles(unsigned int len)
+{
+	unsigned long nr_cycles = len / (sossi.bus_pick_width / 8);
+
+	BUG_ON((nr_cycles - 1) & ~0x3ffff);
+
+	sossi_clear_bits(SOSSI_INIT1_REG, 0x3ffff);
+	sossi_set_bits(SOSSI_INIT1_REG, (nr_cycles - 1) & 0x3ffff);
+}
+
+static int sossi_convert_timings(struct extif_timings *t)
+{
+	int r = 0;
+	int div = t->clk_div;
+
+	t->converted = 0;
+
+	if (div <= 0 || div > 8)
+		return -1;
+
+	/* no CS on SOSSI, so ignore cson, csoff, cs_pulsewidth */
+	if ((r = calc_rd_timings(t)) < 0)
+		return r;
+
+	if ((r = calc_wr_timings(t)) < 0)
+		return r;
+
+	t->tim[4] = div;
+
+	t->converted = 1;
+
+	return 0;
+}
+
+static void sossi_set_timings(const struct extif_timings *t)
+{
+	BUG_ON(!t->converted);
+
+	sossi.clk_tw0[RD_ACCESS] = t->tim[0];
+	sossi.clk_tw1[RD_ACCESS] = t->tim[1];
+
+	sossi.clk_tw0[WR_ACCESS] = t->tim[2];
+	sossi.clk_tw1[WR_ACCESS] = t->tim[3];
+
+	sossi.clk_div = t->tim[4];
+}
+
+static void sossi_get_clk_info(u32 *clk_period, u32 *max_clk_div)
+{
+	*clk_period = HZ_TO_PS(sossi.fck_hz);
+	*max_clk_div = 8;
+}
+
+static void sossi_set_bits_per_cycle(int bpc)
+{
+	int bus_pick_count, bus_pick_width;
+
+	/*
+	 * We set explicitly the the bus_pick_count as well, although
+	 * with remapping/reordering disabled it will be calculated by HW
+	 * as (32 / bus_pick_width).
+	 */
+	switch (bpc) {
+	case 8:
+		bus_pick_count = 4;
+		bus_pick_width = 8;
+		break;
+	case 16:
+		bus_pick_count = 2;
+		bus_pick_width = 16;
+		break;
+	default:
+		BUG();
+		return;
+	}
+	sossi.bus_pick_width = bus_pick_width;
+	sossi.bus_pick_count = bus_pick_count;
+}
+
+static int sossi_setup_tearsync(unsigned pin_cnt,
+				unsigned hs_pulse_time, unsigned vs_pulse_time,
+				int hs_pol_inv, int vs_pol_inv, int div)
+{
+	int hs, vs;
+	u32 l;
+
+	if (pin_cnt != 1 || div < 1 || div > 8)
+		return -EINVAL;
+
+	hs = ps_to_sossi_ticks(hs_pulse_time, div);
+	vs = ps_to_sossi_ticks(vs_pulse_time, div);
+	if (vs < 8 || vs <= hs || vs >= (1 << 12))
+		return -EDOM;
+	vs /= 8;
+	vs--;
+	if (hs > 8)
+		hs = 8;
+	if (hs)
+		hs--;
+
+	dev_dbg(sossi.fbdev->dev,
+		"setup_tearsync: hs %d vs %d hs_inv %d vs_inv %d\n",
+		hs, vs, hs_pol_inv, vs_pol_inv);
+
+	clk_enable(sossi.fck);
+	l = sossi_read_reg(SOSSI_TEARING_REG);
+	l &= ~((1 << 15) - 1);
+	l |= vs << 3;
+	l |= hs;
+	if (hs_pol_inv)
+		l |= 1 << 29;
+	else
+		l &= ~(1 << 29);
+	if (vs_pol_inv)
+		l |= 1 << 28;
+	else
+		l &= ~(1 << 28);
+	sossi_write_reg(SOSSI_TEARING_REG, l);
+	clk_disable(sossi.fck);
+
+	return 0;
+}
+
+static int sossi_enable_tearsync(int enable, unsigned line)
+{
+	int mode;
+
+	dev_dbg(sossi.fbdev->dev, "tearsync %d line %d\n", enable, line);
+	if (line >= 1 << 11)
+		return -EINVAL;
+	if (enable) {
+		if (line)
+			mode = 2;		/* HS or VS */
+		else
+			mode = 3;		/* VS only */
+	} else
+		mode = 0;
+	sossi.tearsync_line = line;
+	sossi.tearsync_mode = mode;
+
+	return 0;
+}
+
+static void sossi_write_command(const void *data, unsigned int len)
+{
+	clk_enable(sossi.fck);
+	set_timing(WR_ACCESS);
+	_set_bits_per_cycle(sossi.bus_pick_count, sossi.bus_pick_width);
+	/* CMD#/DATA */
+	sossi_clear_bits(SOSSI_INIT1_REG, 1 << 18);
+	set_cycles(len);
+	sossi_start_transfer();
+	send_data(data, len);
+	sossi_stop_transfer();
+	wait_end_of_write();
+	clk_disable(sossi.fck);
+}
+
+static void sossi_write_data(const void *data, unsigned int len)
+{
+	clk_enable(sossi.fck);
+	set_timing(WR_ACCESS);
+	_set_bits_per_cycle(sossi.bus_pick_count, sossi.bus_pick_width);
+	/* CMD#/DATA */
+	sossi_set_bits(SOSSI_INIT1_REG, 1 << 18);
+	set_cycles(len);
+	sossi_start_transfer();
+	send_data(data, len);
+	sossi_stop_transfer();
+	wait_end_of_write();
+	clk_disable(sossi.fck);
+}
+
+static void sossi_transfer_area(int width, int height,
+				void (callback)(void *data), void *data)
+{
+	BUG_ON(callback == NULL);
+
+	sossi.lcdc_callback = callback;
+	sossi.lcdc_callback_data = data;
+
+	clk_enable(sossi.fck);
+	set_timing(WR_ACCESS);
+	_set_bits_per_cycle(sossi.bus_pick_count, sossi.bus_pick_width);
+	_set_tearsync_mode(sossi.tearsync_mode, sossi.tearsync_line);
+	/* CMD#/DATA */
+	sossi_set_bits(SOSSI_INIT1_REG, 1 << 18);
+	set_cycles(width * height * sossi.bus_pick_width / 8);
+
+	sossi_start_transfer();
+	if (sossi.tearsync_mode) {
+		/*
+		 * Wait for the sync signal and start the transfer only
+		 * then. We can't seem to be able to use HW sync DMA for
+		 * this since LCD DMA shows huge latencies, as if it
+		 * would ignore some of the DMA requests from SoSSI.
+		 */
+		unsigned long flags;
+
+		spin_lock_irqsave(&sossi.lock, flags);
+		sossi.vsync_dma_pending++;
+		spin_unlock_irqrestore(&sossi.lock, flags);
+	} else
+		/* Just start the transfer right away. */
+		omap_enable_lcd_dma();
+}
+
+static void sossi_dma_callback(void *data)
+{
+	omap_stop_lcd_dma();
+	sossi_stop_transfer();
+	clk_disable(sossi.fck);
+	sossi.lcdc_callback(sossi.lcdc_callback_data);
+}
+
+static void sossi_read_data(void *data, unsigned int len)
+{
+	clk_enable(sossi.fck);
+	set_timing(RD_ACCESS);
+	_set_bits_per_cycle(sossi.bus_pick_count, sossi.bus_pick_width);
+	/* CMD#/DATA */
+	sossi_set_bits(SOSSI_INIT1_REG, 1 << 18);
+	set_cycles(len);
+	sossi_start_transfer();
+	while (len >= 4) {
+		*(u32 *) data = sossi_read_reg(SOSSI_FIFO_REG);
+		len -= 4;
+		data += 4;
+	}
+	while (len >= 2) {
+		*(u16 *) data = sossi_read_reg16(SOSSI_FIFO_REG);
+		len -= 2;
+		data += 2;
+	}
+	while (len) {
+		*(u8 *) data = sossi_read_reg8(SOSSI_FIFO_REG);
+		len--;
+		data++;
+	}
+	sossi_stop_transfer();
+	clk_disable(sossi.fck);
+}
+
+static irqreturn_t sossi_match_irq(int irq, void *data)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&sossi.lock, flags);
+	if (sossi.vsync_dma_pending) {
+		sossi.vsync_dma_pending--;
+		omap_enable_lcd_dma();
+	}
+	spin_unlock_irqrestore(&sossi.lock, flags);
+	return IRQ_HANDLED;
+}
+
+static int sossi_init(struct omapfb_device *fbdev)
+{
+	u32 l, k;
+	struct clk *fck;
+	struct clk *dpll1out_ck;
+	int r;
+
+	sossi.base = (void __iomem *)IO_ADDRESS(OMAP_SOSSI_BASE);
+	sossi.fbdev = fbdev;
+	spin_lock_init(&sossi.lock);
+
+	dpll1out_ck = clk_get(fbdev->dev, "ck_dpll1out");
+	if (IS_ERR(dpll1out_ck)) {
+		dev_err(fbdev->dev, "can't get DPLL1OUT clock\n");
+		return PTR_ERR(dpll1out_ck);
+	}
+	/*
+	 * We need the parent clock rate, which we might divide further
+	 * depending on the timing requirements of the controller. See
+	 * _set_timings.
+	 */
+	sossi.fck_hz = clk_get_rate(dpll1out_ck);
+	clk_put(dpll1out_ck);
+
+	fck = clk_get(fbdev->dev, "ck_sossi");
+	if (IS_ERR(fck)) {
+		dev_err(fbdev->dev, "can't get SoSSI functional clock\n");
+		return PTR_ERR(fck);
+	}
+	sossi.fck = fck;
+
+	/* Reset and enable the SoSSI module */
+	l = omap_readl(MOD_CONF_CTRL_1);
+	l |= CONF_SOSSI_RESET_R;
+	omap_writel(l, MOD_CONF_CTRL_1);
+	l &= ~CONF_SOSSI_RESET_R;
+	omap_writel(l, MOD_CONF_CTRL_1);
+
+	clk_enable(sossi.fck);
+	l = omap_readl(ARM_IDLECT2);
+	l &= ~(1 << 8);			/* DMACK_REQ */
+	omap_writel(l, ARM_IDLECT2);
+
+	l = sossi_read_reg(SOSSI_INIT2_REG);
+	/* Enable and reset the SoSSI block */
+	l |= (1 << 0) | (1 << 1);
+	sossi_write_reg(SOSSI_INIT2_REG, l);
+	/* Take SoSSI out of reset */
+	l &= ~(1 << 1);
+	sossi_write_reg(SOSSI_INIT2_REG, l);
+
+	sossi_write_reg(SOSSI_ID_REG, 0);
+	l = sossi_read_reg(SOSSI_ID_REG);
+	k = sossi_read_reg(SOSSI_ID_REG);
+
+	if (l != 0x55555555 || k != 0xaaaaaaaa) {
+		dev_err(fbdev->dev,
+			"invalid SoSSI sync pattern: %08x, %08x\n", l, k);
+		r = -ENODEV;
+		goto err;
+	}
+
+	if ((r = omap_lcdc_set_dma_callback(sossi_dma_callback, NULL)) < 0) {
+		dev_err(fbdev->dev, "can't get LCDC IRQ\n");
+		r = -ENODEV;
+		goto err;
+	}
+
+	l = sossi_read_reg(SOSSI_ID_REG); /* Component code */
+	l = sossi_read_reg(SOSSI_ID_REG);
+	dev_info(fbdev->dev, "SoSSI version %d.%d initialized\n",
+		l >> 16, l & 0xffff);
+
+	l = sossi_read_reg(SOSSI_INIT1_REG);
+	l |= (1 << 19); /* DMA_MODE */
+	l &= ~(1 << 31); /* REORDERING */
+	sossi_write_reg(SOSSI_INIT1_REG, l);
+
+	if ((r = request_irq(INT_1610_SoSSI_MATCH, sossi_match_irq,
+			     IRQT_FALLING,
+	     "sossi_match", sossi.fbdev->dev)) < 0) {
+		dev_err(sossi.fbdev->dev, "can't get SoSSI match IRQ\n");
+		goto err;
+	}
+
+	clk_disable(sossi.fck);
+	return 0;
+
+err:
+	clk_disable(sossi.fck);
+	clk_put(sossi.fck);
+	return r;
+}
+
+static void sossi_cleanup(void)
+{
+	omap_lcdc_free_dma_callback();
+	clk_put(sossi.fck);
+}
+
+struct lcd_ctrl_extif omap1_ext_if = {
+	.init			= sossi_init,
+	.cleanup		= sossi_cleanup,
+	.get_clk_info		= sossi_get_clk_info,
+	.convert_timings	= sossi_convert_timings,
+	.set_timings		= sossi_set_timings,
+	.set_bits_per_cycle	= sossi_set_bits_per_cycle,
+	.setup_tearsync		= sossi_setup_tearsync,
+	.enable_tearsync	= sossi_enable_tearsync,
+	.write_command		= sossi_write_command,
+	.read_data		= sossi_read_data,
+	.write_data		= sossi_write_data,
+	.transfer_area		= sossi_transfer_area,
+
+	.max_transmit_size	= SOSSI_MAX_XMIT_BYTES,
+};
+
diff --git a/drivers/video/platinumfb.c b/drivers/video/platinumfb.c
index e64f8b5..8503e73 100644
--- a/drivers/video/platinumfb.c
+++ b/drivers/video/platinumfb.c
@@ -52,7 +52,7 @@
 	struct {
 		__u8 red, green, blue;
 	}				palette[256];
-	u32				pseudo_palette[17];
+	u32				pseudo_palette[16];
 	
 	volatile struct cmap_regs	__iomem *cmap_regs;
 	unsigned long			cmap_regs_phys;
diff --git a/drivers/video/pm2fb.c b/drivers/video/pm2fb.c
index 0a04483a..10c0cc6 100644
--- a/drivers/video/pm2fb.c
+++ b/drivers/video/pm2fb.c
@@ -24,7 +24,7 @@
  * License. See the file COPYING in the main directory of this archive for
  * more details.
  *
- * 
+ *
  */
 
 #include <linux/module.h>
@@ -58,7 +58,7 @@
 #endif
 
 /*
- * Driver data 
+ * Driver data
  */
 static char *mode __devinitdata = NULL;
 
@@ -82,12 +82,12 @@
 {
 	pm2type_t	type;		/* Board type */
 	unsigned char	__iomem *v_regs;/* virtual address of p_regs */
-	u32 	   	memclock;	/* memclock */
+	u32		memclock;	/* memclock */
 	u32		video;		/* video flags before blanking */
 	u32		mem_config;	/* MemConfig reg at probe */
 	u32		mem_control;	/* MemControl reg at probe */
 	u32		boot_address;	/* BootAddress reg at probe */
-	u32             palette[16];
+	u32		palette[16];
 };
 
 /*
@@ -95,12 +95,12 @@
  * if we don't use modedb.
  */
 static struct fb_fix_screeninfo pm2fb_fix __devinitdata = {
-	.id =		"", 
+	.id =		"",
 	.type =		FB_TYPE_PACKED_PIXELS,
 	.visual =	FB_VISUAL_PSEUDOCOLOR,
 	.xpanstep =	1,
 	.ypanstep =	1,
-	.ywrapstep =	0, 
+	.ywrapstep =	0,
 	.accel =	FB_ACCEL_3DLABS_PERMEDIA2,
 };
 
@@ -109,26 +109,26 @@
  */
 static struct fb_var_screeninfo pm2fb_var __devinitdata = {
 	/* "640x480, 8 bpp @ 60 Hz */
-	.xres =		640,
-	.yres =		480,
-	.xres_virtual =	640,
-	.yres_virtual =	480,
-	.bits_per_pixel =8,
-	.red =		{0, 8, 0},
-	.blue =		{0, 8, 0},
-	.green =	{0, 8, 0},
-	.activate =	FB_ACTIVATE_NOW,
-	.height =	-1,
-	.width =	-1,
-	.accel_flags =	0,
-	.pixclock =	39721,
-	.left_margin =	40,
-	.right_margin =	24,
-	.upper_margin =	32,
-	.lower_margin =	11,
-	.hsync_len =	96,
-	.vsync_len =	2,
-	.vmode =	FB_VMODE_NONINTERLACED
+	.xres =			640,
+	.yres =			480,
+	.xres_virtual =		640,
+	.yres_virtual =		480,
+	.bits_per_pixel =	8,
+	.red =			{0, 8, 0},
+	.blue =			{0, 8, 0},
+	.green =		{0, 8, 0},
+	.activate =		FB_ACTIVATE_NOW,
+	.height =		-1,
+	.width =		-1,
+	.accel_flags =		0,
+	.pixclock =		39721,
+	.left_margin =		40,
+	.right_margin =		24,
+	.upper_margin =		32,
+	.lower_margin =		11,
+	.hsync_len =		96,
+	.vsync_len =		2,
+	.vmode =		FB_VMODE_NONINTERLACED
 };
 
 /*
@@ -166,7 +166,7 @@
 		pm2_WR(p, PM2VR_RD_INDEX_LOW, idx & 0xff);
 		index = PM2VR_RD_INDEXED_DATA;
 		break;
-	}	
+	}
 	mb();
 	return pm2_RD(p, index);
 }
@@ -182,7 +182,7 @@
 		pm2_WR(p, PM2VR_RD_INDEX_LOW, idx & 0xff);
 		index = PM2VR_RD_INDEXED_DATA;
 		break;
-	}	
+	}
 	wmb();
 	pm2_WR(p, index, v);
 	wmb();
@@ -197,7 +197,7 @@
 }
 
 #ifdef CONFIG_FB_PM2_FIFO_DISCONNECT
-#define WAIT_FIFO(p,a)
+#define WAIT_FIFO(p, a)
 #else
 static inline void WAIT_FIFO(struct pm2fb_par* p, u32 a)
 {
@@ -209,7 +209,7 @@
 /*
  * partial products for the supported horizontal resolutions.
  */
-#define PACKPP(p0,p1,p2)	(((p2) << 6) | ((p1) << 3) | (p0))
+#define PACKPP(p0, p1, p2)	(((p2) << 6) | ((p1) << 3) | (p0))
 static const struct {
 	u16 width;
 	u16 pp;
@@ -357,7 +357,7 @@
 static void reset_config(struct pm2fb_par* p)
 {
 	WAIT_FIFO(p, 52);
-	pm2_WR(p, PM2R_CHIP_CONFIG, pm2_RD(p, PM2R_CHIP_CONFIG)&
+	pm2_WR(p, PM2R_CHIP_CONFIG, pm2_RD(p, PM2R_CHIP_CONFIG) &
 	       ~(PM2F_VGA_ENABLE|PM2F_VGA_FIXED));
 	pm2_WR(p, PM2R_BYPASS_WRITE_MASK, ~(0L));
 	pm2_WR(p, PM2R_FRAMEBUFFER_WRITE_MASK, ~(0L));
@@ -367,7 +367,7 @@
 	pm2_WR(p, PM2R_RASTERIZER_MODE, 0);
 	pm2_WR(p, PM2R_DELTA_MODE, PM2F_DELTA_ORDER_RGB);
 	pm2_WR(p, PM2R_LB_READ_FORMAT, 0);
-	pm2_WR(p, PM2R_LB_WRITE_FORMAT, 0); 
+	pm2_WR(p, PM2R_LB_WRITE_FORMAT, 0);
 	pm2_WR(p, PM2R_LB_READ_MODE, 0);
 	pm2_WR(p, PM2R_LB_SOURCE_OFFSET, 0);
 	pm2_WR(p, PM2R_FB_SOURCE_OFFSET, 0);
@@ -535,7 +535,7 @@
 	vsync = video;
 
 	DPRINTK("video = 0x%x\n", video);
-	
+
 	/*
 	 * The hardware cursor needs +vsync to recognise vert retrace.
 	 * We may not be using the hardware cursor, but the X Glint
@@ -574,9 +574,9 @@
  */
 
 /**
- *      pm2fb_check_var - Optional function. Validates a var passed in. 
- *      @var: frame buffer variable screen structure
- *      @info: frame buffer structure that represents a single frame buffer 
+ *	pm2fb_check_var - Optional function. Validates a var passed in.
+ *	@var: frame buffer variable screen structure
+ *	@info: frame buffer structure that represents a single frame buffer
  *
  *	Checks to see if the hardware supports the state requested by
  *	var passed in.
@@ -615,23 +615,23 @@
 
 	var->xres = (var->xres + 15) & ~15; /* could sometimes be 8 */
 	lpitch = var->xres * ((var->bits_per_pixel + 7)>>3);
-  
+
 	if (var->xres < 320 || var->xres > 1600) {
 		DPRINTK("width not supported: %u\n", var->xres);
 		return -EINVAL;
 	}
-  
+
 	if (var->yres < 200 || var->yres > 1200) {
 		DPRINTK("height not supported: %u\n", var->yres);
 		return -EINVAL;
 	}
-  
+
 	if (lpitch * var->yres_virtual > info->fix.smem_len) {
 		DPRINTK("no memory for screen (%ux%ux%u)\n",
 			var->xres, var->yres_virtual, var->bits_per_pixel);
 		return -EINVAL;
 	}
-  
+
 	if (PICOS2KHZ(var->pixclock) > PM2_MAX_PIXCLOCK) {
 		DPRINTK("pixclock too high (%ldKHz)\n", PICOS2KHZ(var->pixclock));
 		return -EINVAL;
@@ -672,17 +672,17 @@
 		break;
 	}
 	var->height = var->width = -1;
-  
+
 	var->accel_flags = 0;	/* Can't mmap if this is on */
-	
+
 	DPRINTK("Checking graphics mode at %dx%d depth %d\n",
 		var->xres, var->yres, var->bits_per_pixel);
 	return 0;
 }
 
 /**
- *      pm2fb_set_par - Alters the hardware state.
- *      @info: frame buffer structure that represents a single frame buffer
+ *	pm2fb_set_par - Alters the hardware state.
+ *	@info: frame buffer structure that represents a single frame buffer
  *
  *	Using the fb_var_screeninfo in fb_info we set the resolution of the
  *	this particular framebuffer.
@@ -709,7 +709,7 @@
 	clear_palette(par);
 	if ( par->memclock )
 		set_memclock(par, par->memclock);
-    
+
 	width = (info->var.xres_virtual + 7) & ~7;
 	height = info->var.yres_virtual;
 	depth = (info->var.bits_per_pixel + 7) & ~7;
@@ -722,7 +722,7 @@
 		DPRINTK("pixclock too high (%uKHz)\n", pixclock);
 		return -EINVAL;
 	}
-    
+
 	hsstart = to3264(info->var.right_margin, depth, data64);
 	hsend = hsstart + to3264(info->var.hsync_len, depth, data64);
 	hbend = hsend + to3264(info->var.left_margin, depth, data64);
@@ -737,7 +737,7 @@
 	base = to3264(info->var.yoffset * xres + info->var.xoffset, depth, 1);
 	if (data64)
 		video |= PM2F_DATA_64_ENABLE;
-    
+
 	if (info->var.sync & FB_SYNC_HOR_HIGH_ACT) {
 		if (lowhsync) {
 			DPRINTK("ignoring +hsync, using -hsync.\n");
@@ -778,9 +778,9 @@
 		WAIT_FIFO(par, 1);
 		pm2_WR(par, PM2VR_RD_INDEX_HIGH, 0);
 	}
-    
+
 	set_aperture(par, depth);
-    
+
 	mb();
 	WAIT_FIFO(par, 19);
 	pm2_RDAC_WR(par, PM2I_RD_COLOR_KEY_CONTROL,
@@ -847,22 +847,22 @@
 	set_pixclock(par, pixclock);
 	DPRINTK("Setting graphics mode at %dx%d depth %d\n",
 		info->var.xres, info->var.yres, info->var.bits_per_pixel);
-	return 0;	
+	return 0;
 }
 
 /**
- *  	pm2fb_setcolreg - Sets a color register.
- *      @regno: boolean, 0 copy local, 1 get_user() function
- *      @red: frame buffer colormap structure
- *	@green: The green value which can be up to 16 bits wide 
+ *	pm2fb_setcolreg - Sets a color register.
+ *	@regno: boolean, 0 copy local, 1 get_user() function
+ *	@red: frame buffer colormap structure
+ *	@green: The green value which can be up to 16 bits wide
  *	@blue:  The blue value which can be up to 16 bits wide.
- *	@transp: If supported the alpha value which can be up to 16 bits wide.	
- *      @info: frame buffer info structure
- * 
- *  	Set a single color register. The values supplied have a 16 bit
- *  	magnitude which needs to be scaled in this function for the hardware.
+ *	@transp: If supported the alpha value which can be up to 16 bits wide.
+ *	@info: frame buffer info structure
+ *
+ *	Set a single color register. The values supplied have a 16 bit
+ *	magnitude which needs to be scaled in this function for the hardware.
  *	Pretty much a direct lift from tdfxfb.c.
- * 
+ *
  *	Returns negative errno on error, or zero on success.
  */
 static int pm2fb_setcolreg(unsigned regno, unsigned red, unsigned green,
@@ -906,7 +906,7 @@
 	 *    (blue << blue.offset) | (transp << transp.offset)
 	 *    RAMDAC does not exist
 	 */
-#define CNVT_TOHW(val,width) ((((val)<<(width))+0x7FFF-(val))>>16)
+#define CNVT_TOHW(val, width) ((((val) << (width)) + 0x7FFF -(val)) >> 16)
 	switch (info->fix.visual) {
 	case FB_VISUAL_TRUECOLOR:
 	case FB_VISUAL_PSEUDOCOLOR:
@@ -916,9 +916,9 @@
 		transp = CNVT_TOHW(transp, info->var.transp.length);
 		break;
 	case FB_VISUAL_DIRECTCOLOR:
-		/* example here assumes 8 bit DAC. Might be different 
-		 * for your hardware */	
-		red = CNVT_TOHW(red, 8);       
+		/* example here assumes 8 bit DAC. Might be different
+		 * for your hardware */
+		red = CNVT_TOHW(red, 8);
 		green = CNVT_TOHW(green, 8);
 		blue = CNVT_TOHW(blue, 8);
 		/* hey, there is bug in transp handling... */
@@ -940,11 +940,11 @@
 
 		switch (info->var.bits_per_pixel) {
 		case 8:
-			break;	
-   		case 16:
+			break;
+		case 16:
 		case 24:
-		case 32:	
-           		par->palette[regno] = v;
+		case 32:
+			par->palette[regno] = v;
 			break;
 		}
 		return 0;
@@ -956,15 +956,15 @@
 }
 
 /**
- *      pm2fb_pan_display - Pans the display.
- *      @var: frame buffer variable screen structure
- *      @info: frame buffer structure that represents a single frame buffer
+ *	pm2fb_pan_display - Pans the display.
+ *	@var: frame buffer variable screen structure
+ *	@info: frame buffer structure that represents a single frame buffer
  *
  *	Pan (or wrap, depending on the `vmode' field) the display using the
- *  	`xoffset' and `yoffset' fields of the `var' structure.
- *  	If the values don't fit, return -EINVAL.
+ *	`xoffset' and `yoffset' fields of the `var' structure.
+ *	If the values don't fit, return -EINVAL.
  *
- *      Returns negative errno on error, or zero on success.
+ *	Returns negative errno on error, or zero on success.
  *
  */
 static int pm2fb_pan_display(struct fb_var_screeninfo *var,
@@ -980,24 +980,24 @@
 	depth = (depth > 32) ? 32 : depth;
 	base = to3264(var->yoffset * xres + var->xoffset, depth, 1);
 	WAIT_FIFO(p, 1);
-	pm2_WR(p, PM2R_SCREEN_BASE, base);    
+	pm2_WR(p, PM2R_SCREEN_BASE, base);
 	return 0;
 }
 
 /**
- *      pm2fb_blank - Blanks the display.
- *      @blank_mode: the blank mode we want. 
- *      @info: frame buffer structure that represents a single frame buffer
+ *	pm2fb_blank - Blanks the display.
+ *	@blank_mode: the blank mode we want.
+ *	@info: frame buffer structure that represents a single frame buffer
  *
- *      Blank the screen if blank_mode != 0, else unblank. Return 0 if
- *      blanking succeeded, != 0 if un-/blanking failed due to e.g. a 
- *      video mode which doesn't support it. Implements VESA suspend
- *      and powerdown modes on hardware that supports disabling hsync/vsync:
- *      blank_mode == 2: suspend vsync
- *      blank_mode == 3: suspend hsync
- *      blank_mode == 4: powerdown
+ *	Blank the screen if blank_mode != 0, else unblank. Return 0 if
+ *	blanking succeeded, != 0 if un-/blanking failed due to e.g. a
+ *	video mode which doesn't support it. Implements VESA suspend
+ *	and powerdown modes on hardware that supports disabling hsync/vsync:
+ *	blank_mode == 2: suspend vsync
+ *	blank_mode == 3: suspend hsync
+ *	blank_mode == 4: powerdown
  *
- *      Returns negative errno on error, or zero on success.
+ *	Returns negative errno on error, or zero on success.
  *
  */
 static int pm2fb_blank(int blank_mode, struct fb_info *info)
@@ -1071,7 +1071,7 @@
 	pm2_WR(par, PM2R_RECTANGLE_ORIGIN, (y << 16) | x);
 	pm2_WR(par, PM2R_RECTANGLE_SIZE, (h << 16) | w);
 	wmb();
-	pm2_WR(par, PM2R_RENDER,PM2F_RENDER_RECTANGLE |
+	pm2_WR(par, PM2R_RENDER, PM2F_RENDER_RECTANGLE |
 				(x<xsrc ? PM2F_INCREASE_X : 0) |
 				(y<ysrc ? PM2F_INCREASE_Y : 0) |
 				(copy ? 0 : PM2F_RENDER_FASTFILL));
@@ -1234,7 +1234,7 @@
 	DPRINTK("Adjusting register base for big-endian.\n");
 #endif
 	DPRINTK("Register base at 0x%lx\n", pm2fb_fix.mmio_start);
-    
+
 	/* Registers - request region and map it. */
 	if ( !request_mem_region(pm2fb_fix.mmio_start, pm2fb_fix.mmio_len,
 				 "pm2fb regbase") ) {
@@ -1317,17 +1317,17 @@
 	}
 
 	info->fbops		= &pm2fb_ops;
-	info->fix		= pm2fb_fix; 	
+	info->fix		= pm2fb_fix;
 	info->pseudo_palette	= default_par->palette;
 	info->flags		= FBINFO_DEFAULT |
-                                  FBINFO_HWACCEL_YPAN |
-	                          FBINFO_HWACCEL_COPYAREA |
-	                          FBINFO_HWACCEL_FILLRECT;
+				  FBINFO_HWACCEL_YPAN |
+				  FBINFO_HWACCEL_COPYAREA |
+				  FBINFO_HWACCEL_FILLRECT;
 
 	if (!mode)
 		mode = "640x480@60";
-	 
-	err = fb_find_mode(&info->var, info, mode, NULL, 0, NULL, 8); 
+
+	err = fb_find_mode(&info->var, info, mode, NULL, 0, NULL, 8);
 	if (!err || err == 4)
 		info->var = pm2fb_var;
 
@@ -1348,8 +1348,8 @@
 	return 0;
 
  err_exit_all:
-	fb_dealloc_cmap(&info->cmap);	
- err_exit_both:    
+	fb_dealloc_cmap(&info->cmap);
+ err_exit_both:
 	iounmap(info->screen_base);
 	release_mem_region(pm2fb_fix.smem_start, pm2fb_fix.smem_len);
  err_exit_mmio:
@@ -1374,7 +1374,7 @@
 	struct pm2fb_par *par = info->par;
 
 	unregister_framebuffer(info);
-    
+
 	iounmap(info->screen_base);
 	release_mem_region(fix->smem_start, fix->smem_len);
 	iounmap(par->v_regs);
@@ -1402,9 +1402,9 @@
 
 static struct pci_driver pm2fb_driver = {
 	.name		= "pm2fb",
-	.id_table 	= pm2fb_id_table,
-	.probe 		= pm2fb_probe,
-	.remove 	= __devexit_p(pm2fb_remove),
+	.id_table	= pm2fb_id_table,
+	.probe		= pm2fb_probe,
+	.remove		= __devexit_p(pm2fb_remove),
 };
 
 MODULE_DEVICE_TABLE(pci, pm2fb_id_table);
@@ -1423,7 +1423,7 @@
 	if (!options || !*options)
 		return 0;
 
-	while ((this_opt = strsep(&options, ",")) != NULL) {	
+	while ((this_opt = strsep(&options, ",")) != NULL) {
 		if (!*this_opt)
 			continue;
 		if(!strcmp(this_opt, "lowhsync")) {
diff --git a/drivers/video/pm3fb.c b/drivers/video/pm3fb.c
index b52e883..5b3f54c 100644
--- a/drivers/video/pm3fb.c
+++ b/drivers/video/pm3fb.c
@@ -77,7 +77,7 @@
 	.xpanstep =	1,
 	.ypanstep =	1,
 	.ywrapstep =	0,
-	.accel =	FB_ACCEL_NONE,
+	.accel =	FB_ACCEL_3DLABS_PERMEDIA3,
 };
 
 /*
@@ -185,6 +185,238 @@
 	return 0;
 }
 
+/* acceleration */
+static int pm3fb_sync(struct fb_info *info)
+{
+	struct pm3_par *par = info->par;
+
+	PM3_WAIT(par, 2);
+	PM3_WRITE_REG(par, PM3FilterMode, PM3FilterModeSync);
+	PM3_WRITE_REG(par, PM3Sync, 0);
+	mb();
+	do {
+		while ((PM3_READ_REG(par, PM3OutFIFOWords)) == 0);
+		rmb();
+	} while ((PM3_READ_REG(par, PM3OutputFifo)) != PM3Sync_Tag);
+
+	return 0;
+}
+
+static void pm3fb_init_engine(struct fb_info *info)
+{
+	struct pm3_par *par = info->par;
+	const u32 width = (info->var.xres_virtual + 7) & ~7;
+
+	PM3_WAIT(par, 50);
+	PM3_WRITE_REG(par, PM3FilterMode, PM3FilterModeSync);
+	PM3_WRITE_REG(par, PM3StatisticMode, 0x0);
+	PM3_WRITE_REG(par, PM3DeltaMode, 0x0);
+	PM3_WRITE_REG(par, PM3RasterizerMode, 0x0);
+	PM3_WRITE_REG(par, PM3ScissorMode, 0x0);
+	PM3_WRITE_REG(par, PM3LineStippleMode, 0x0);
+	PM3_WRITE_REG(par, PM3AreaStippleMode, 0x0);
+	PM3_WRITE_REG(par, PM3GIDMode, 0x0);
+	PM3_WRITE_REG(par, PM3DepthMode, 0x0);
+	PM3_WRITE_REG(par, PM3StencilMode, 0x0);
+	PM3_WRITE_REG(par, PM3StencilData, 0x0);
+	PM3_WRITE_REG(par, PM3ColorDDAMode, 0x0);
+	PM3_WRITE_REG(par, PM3TextureCoordMode, 0x0);
+	PM3_WRITE_REG(par, PM3TextureIndexMode0, 0x0);
+	PM3_WRITE_REG(par, PM3TextureIndexMode1, 0x0);
+	PM3_WRITE_REG(par, PM3TextureReadMode, 0x0);
+	PM3_WRITE_REG(par, PM3LUTMode, 0x0);
+	PM3_WRITE_REG(par, PM3TextureFilterMode, 0x0);
+	PM3_WRITE_REG(par, PM3TextureCompositeMode, 0x0);
+	PM3_WRITE_REG(par, PM3TextureApplicationMode, 0x0);
+	PM3_WRITE_REG(par, PM3TextureCompositeColorMode1, 0x0);
+	PM3_WRITE_REG(par, PM3TextureCompositeAlphaMode1, 0x0);
+	PM3_WRITE_REG(par, PM3TextureCompositeColorMode0, 0x0);
+	PM3_WRITE_REG(par, PM3TextureCompositeAlphaMode0, 0x0);
+	PM3_WRITE_REG(par, PM3FogMode, 0x0);
+	PM3_WRITE_REG(par, PM3ChromaTestMode, 0x0);
+	PM3_WRITE_REG(par, PM3AlphaTestMode, 0x0);
+	PM3_WRITE_REG(par, PM3AntialiasMode, 0x0);
+	PM3_WRITE_REG(par, PM3YUVMode, 0x0);
+	PM3_WRITE_REG(par, PM3AlphaBlendColorMode, 0x0);
+	PM3_WRITE_REG(par, PM3AlphaBlendAlphaMode, 0x0);
+	PM3_WRITE_REG(par, PM3DitherMode, 0x0);
+	PM3_WRITE_REG(par, PM3LogicalOpMode, 0x0);
+	PM3_WRITE_REG(par, PM3RouterMode, 0x0);
+	PM3_WRITE_REG(par, PM3Window, 0x0);
+
+	PM3_WRITE_REG(par, PM3Config2D, 0x0);
+
+	PM3_WRITE_REG(par, PM3SpanColorMask, 0xffffffff);
+
+	PM3_WRITE_REG(par, PM3XBias, 0x0);
+	PM3_WRITE_REG(par, PM3YBias, 0x0);
+	PM3_WRITE_REG(par, PM3DeltaControl, 0x0);
+
+	PM3_WRITE_REG(par, PM3BitMaskPattern, 0xffffffff);
+
+	PM3_WRITE_REG(par, PM3FBDestReadEnables,
+			   PM3FBDestReadEnables_E(0xff) |
+			   PM3FBDestReadEnables_R(0xff) |
+			   PM3FBDestReadEnables_ReferenceAlpha(0xff));
+	PM3_WRITE_REG(par, PM3FBDestReadBufferAddr0, 0x0);
+	PM3_WRITE_REG(par, PM3FBDestReadBufferOffset0, 0x0);
+	PM3_WRITE_REG(par, PM3FBDestReadBufferWidth0,
+			   PM3FBDestReadBufferWidth_Width(width));
+
+	PM3_WRITE_REG(par, PM3FBDestReadMode,
+			   PM3FBDestReadMode_ReadEnable |
+			   PM3FBDestReadMode_Enable0);
+	PM3_WRITE_REG(par, PM3FBSourceReadBufferAddr, 0x0);
+	PM3_WRITE_REG(par, PM3FBSourceReadBufferOffset, 0x0);
+	PM3_WRITE_REG(par, PM3FBSourceReadBufferWidth,
+			   PM3FBSourceReadBufferWidth_Width(width));
+	PM3_WRITE_REG(par, PM3FBSourceReadMode,
+			   PM3FBSourceReadMode_Blocking |
+			   PM3FBSourceReadMode_ReadEnable);
+
+	PM3_WAIT(par, 2);
+	{
+		unsigned long rm = 1;
+		switch (info->var.bits_per_pixel) {
+		case 8:
+			PM3_WRITE_REG(par, PM3PixelSize,
+					   PM3PixelSize_GLOBAL_8BIT);
+			break;
+		case 16:
+			PM3_WRITE_REG(par, PM3PixelSize,
+					   PM3PixelSize_GLOBAL_16BIT);
+			break;
+		case 32:
+			PM3_WRITE_REG(par, PM3PixelSize,
+					   PM3PixelSize_GLOBAL_32BIT);
+			break;
+		default:
+			DPRINTK(1, "Unsupported depth %d\n",
+				info->var.bits_per_pixel);
+			break;
+		}
+		PM3_WRITE_REG(par, PM3RasterizerMode, rm);
+	}
+
+	PM3_WAIT(par, 20);
+	PM3_WRITE_REG(par, PM3FBSoftwareWriteMask, 0xffffffff);
+	PM3_WRITE_REG(par, PM3FBHardwareWriteMask, 0xffffffff);
+	PM3_WRITE_REG(par, PM3FBWriteMode,
+			   PM3FBWriteMode_WriteEnable |
+			   PM3FBWriteMode_OpaqueSpan |
+			   PM3FBWriteMode_Enable0);
+	PM3_WRITE_REG(par, PM3FBWriteBufferAddr0, 0x0);
+	PM3_WRITE_REG(par, PM3FBWriteBufferOffset0, 0x0);
+	PM3_WRITE_REG(par, PM3FBWriteBufferWidth0,
+			   PM3FBWriteBufferWidth_Width(width));
+
+	PM3_WRITE_REG(par, PM3SizeOfFramebuffer, 0x0);
+	{
+		/* size in lines of FB */
+		unsigned long sofb = info->screen_size /
+			info->fix.line_length;
+		if (sofb > 4095)
+			PM3_WRITE_REG(par, PM3SizeOfFramebuffer, 4095);
+		else
+			PM3_WRITE_REG(par, PM3SizeOfFramebuffer, sofb);
+
+		switch (info->var.bits_per_pixel) {
+		case 8:
+			PM3_WRITE_REG(par, PM3DitherMode,
+					   (1 << 10) | (2 << 3));
+			break;
+		case 16:
+			PM3_WRITE_REG(par, PM3DitherMode,
+					   (1 << 10) | (1 << 3));
+			break;
+		case 32:
+			PM3_WRITE_REG(par, PM3DitherMode,
+					   (1 << 10) | (0 << 3));
+			break;
+		default:
+			DPRINTK(1, "Unsupported depth %d\n",
+				info->current_par->depth);
+			break;
+		}
+	}
+
+	PM3_WRITE_REG(par, PM3dXDom, 0x0);
+	PM3_WRITE_REG(par, PM3dXSub, 0x0);
+	PM3_WRITE_REG(par, PM3dY, (1 << 16));
+	PM3_WRITE_REG(par, PM3StartXDom, 0x0);
+	PM3_WRITE_REG(par, PM3StartXSub, 0x0);
+	PM3_WRITE_REG(par, PM3StartY, 0x0);
+	PM3_WRITE_REG(par, PM3Count, 0x0);
+
+/* Disable LocalBuffer. better safe than sorry */
+	PM3_WRITE_REG(par, PM3LBDestReadMode, 0x0);
+	PM3_WRITE_REG(par, PM3LBDestReadEnables, 0x0);
+	PM3_WRITE_REG(par, PM3LBSourceReadMode, 0x0);
+	PM3_WRITE_REG(par, PM3LBWriteMode, 0x0);
+
+	pm3fb_sync(info);
+}
+
+static void pm3fb_fillrect (struct fb_info *info,
+				const struct fb_fillrect *region)
+{
+	struct pm3_par *par = info->par;
+	struct fb_fillrect modded;
+	int vxres, vyres;
+	u32 color = (info->fix.visual == FB_VISUAL_TRUECOLOR) ?
+		((u32*)info->pseudo_palette)[region->color] : region->color;
+
+	if (info->state != FBINFO_STATE_RUNNING)
+		return;
+	if ((info->flags & FBINFO_HWACCEL_DISABLED) ||
+		region->rop != ROP_COPY ) {
+		cfb_fillrect(info, region);
+		return;
+	}
+
+	vxres = info->var.xres_virtual;
+	vyres = info->var.yres_virtual;
+
+	memcpy(&modded, region, sizeof(struct fb_fillrect));
+
+	if(!modded.width || !modded.height ||
+	   modded.dx >= vxres || modded.dy >= vyres)
+		return;
+
+	if(modded.dx + modded.width  > vxres)
+		modded.width  = vxres - modded.dx;
+	if(modded.dy + modded.height > vyres)
+		modded.height = vyres - modded.dy;
+
+	if(info->var.bits_per_pixel == 8)
+		color |= color << 8;
+	if(info->var.bits_per_pixel <= 16)
+		color |= color << 16;
+
+	PM3_WAIT(par, 4);
+
+	PM3_WRITE_REG(par, PM3Config2D,
+				  PM3Config2D_UseConstantSource |
+				  PM3Config2D_ForegroundROPEnable |
+				  (PM3Config2D_ForegroundROP(0x3)) |	/* Ox3 is GXcopy */
+				  PM3Config2D_FBWriteEnable);
+
+	PM3_WRITE_REG(par, PM3ForegroundColor, color);
+
+	PM3_WRITE_REG(par, PM3RectanglePosition,
+		      (PM3RectanglePosition_XOffset(modded.dx)) |
+		      (PM3RectanglePosition_YOffset(modded.dy)));
+
+	PM3_WRITE_REG(par, PM3Render2D,
+		      PM3Render2D_XPositive |
+		      PM3Render2D_YPositive |
+		      PM3Render2D_Operation_Normal |
+		      PM3Render2D_SpanOperation |
+		      (PM3Render2D_Width(modded.width)) |
+		      (PM3Render2D_Height(modded.height)));
+}
+/* end of acceleration functions */
+
 /* write the mode to registers */
 static void pm3fb_write_mode(struct fb_info *info)
 {
@@ -380,8 +612,6 @@
 /*
  * hardware independent functions
  */
-int pm3fb_init(void);
-
 static int pm3fb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
 {
 	u32 lpitch;
@@ -528,6 +758,7 @@
 	pm3fb_clear_colormap(par, 0, 0, 0);
 	PM3_WRITE_DAC_REG(par, PM3RD_CursorMode,
 			  PM3RD_CursorMode_CURSOR_DISABLE);
+	pm3fb_init_engine(info);
 	pm3fb_write_mode(info);
 	return 0;
 }
@@ -675,10 +906,11 @@
 	.fb_set_par	= pm3fb_set_par,
 	.fb_setcolreg	= pm3fb_setcolreg,
 	.fb_pan_display	= pm3fb_pan_display,
-	.fb_fillrect	= cfb_fillrect,
+	.fb_fillrect	= pm3fb_fillrect,
 	.fb_copyarea	= cfb_copyarea,
 	.fb_imageblit	= cfb_imageblit,
 	.fb_blank	= pm3fb_blank,
+	.fb_sync	= pm3fb_sync,
 };
 
 /* ------------------------------------------------------------------------- */
@@ -847,7 +1079,8 @@
 
 	info->fix = pm3fb_fix;
 	info->pseudo_palette = par->palette;
-	info->flags = FBINFO_DEFAULT;/* | FBINFO_HWACCEL_YPAN;*/
+	info->flags = FBINFO_DEFAULT |
+			FBINFO_HWACCEL_FILLRECT;/* | FBINFO_HWACCEL_YPAN;*/
 
 	/*
 	 * This should give a reasonable default video mode. The following is
@@ -935,35 +1168,12 @@
 
 MODULE_DEVICE_TABLE(pci, pm3fb_id_table);
 
-#ifndef MODULE
-	/*
-	 *  Setup
-	 */
-
-/*
- * Only necessary if your driver takes special options,
- * otherwise we fall back on the generic fb_setup().
- */
-static int __init pm3fb_setup(char *options)
+static int __init pm3fb_init(void)
 {
-	/* Parse user speficied options (`video=pm3fb:') */
-	return 0;
-}
-#endif /* MODULE */
-
-int __init pm3fb_init(void)
-{
-	/*
-	 *  For kernel boot options (in 'video=pm3fb:<options>' format)
-	 */
 #ifndef MODULE
-	char *option = NULL;
-
-	if (fb_get_options("pm3fb", &option))
+	if (fb_get_options("pm3fb", NULL))
 		return -ENODEV;
-	pm3fb_setup(option);
 #endif
-
 	return pci_register_driver(&pm3fb_driver);
 }
 
diff --git a/drivers/video/ps3fb.c b/drivers/video/ps3fb.c
index 9cf92ba..3972aa8 100644
--- a/drivers/video/ps3fb.c
+++ b/drivers/video/ps3fb.c
@@ -27,7 +27,6 @@
 #include <linux/vmalloc.h>
 #include <linux/delay.h>
 #include <linux/interrupt.h>
-#include <linux/platform_device.h>
 #include <linux/console.h>
 #include <linux/ioctl.h>
 #include <linux/notifier.h>
@@ -46,6 +45,9 @@
 #include <asm/ps3fb.h>
 #include <asm/ps3.h>
 
+
+#define DEVICE_NAME		"ps3fb"
+
 #ifdef PS3FB_DEBUG
 #define DPRINTK(fmt, args...) printk("%s: " fmt, __func__ , ##args)
 #else
@@ -126,7 +128,6 @@
 
 struct ps3fb_priv {
 	unsigned int irq_no;
-	void *dev;
 
 	u64 context_handle, memory_handle;
 	void *xdr_ea;
@@ -171,7 +172,7 @@
 	{    0,    0,   0,   0 , 0} };
 
 /* default resolution */
-#define GPU_RES_INDEX 0		/* 720 x 480 */
+#define GPU_RES_INDEX	0		/* 720 x 480 */
 
 static const struct fb_videomode ps3fb_modedb[] = {
     /* 60 Hz broadcast modes (modes "1" to "5") */
@@ -298,10 +299,9 @@
 #define FB_OFF(i)	(GPU_OFFSET - VP_OFF(i) % GPU_OFFSET)
 
 static int ps3fb_mode;
-module_param(ps3fb_mode, bool, 0);
+module_param(ps3fb_mode, int, 0);
 
-static char *mode_option __initdata;
-
+static char *mode_option __devinitdata;
 
 static int ps3fb_get_res_table(u32 xres, u32 yres)
 {
@@ -681,15 +681,15 @@
 
 EXPORT_SYMBOL_GPL(ps3fb_wait_for_vsync);
 
-void ps3fb_flip_ctl(int on)
+void ps3fb_flip_ctl(int on, void *data)
 {
+	struct ps3fb_priv *priv = data;
 	if (on)
-		atomic_dec_if_positive(&ps3fb.ext_flip);
+		atomic_dec_if_positive(&priv->ext_flip);
 	else
-		atomic_inc(&ps3fb.ext_flip);
+		atomic_inc(&priv->ext_flip);
 }
 
-EXPORT_SYMBOL_GPL(ps3fb_flip_ctl);
 
     /*
      * ioctl
@@ -812,6 +812,7 @@
 
 static int ps3fbd(void *arg)
 {
+	set_freezable();
 	while (!kthread_should_stop()) {
 		try_to_freeze();
 		set_current_state(TASK_INTERRUPTIBLE);
@@ -851,37 +852,9 @@
 	return IRQ_HANDLED;
 }
 
-#ifndef MODULE
-static int __init ps3fb_setup(char *options)
-{
-	char *this_opt;
-	int mode = 0;
 
-	if (!options || !*options)
-		return 0;	/* no options */
-
-	while ((this_opt = strsep(&options, ",")) != NULL) {
-		if (!*this_opt)
-			continue;
-		if (!strncmp(this_opt, "mode:", 5))
-			mode = simple_strtoul(this_opt + 5, NULL, 0);
-		else
-			mode_option = this_opt;
-	}
-	return mode;
-}
-#endif	/* MODULE */
-
-    /*
-     *  Initialisation
-     */
-
-static void ps3fb_platform_release(struct device *device)
-{
-	/* This is called when the reference count goes to zero. */
-}
-
-static int ps3fb_vsync_settings(struct gpu_driver_info *dinfo, void *dev)
+static int ps3fb_vsync_settings(struct gpu_driver_info *dinfo,
+				struct ps3_system_bus_device *dev)
 {
 	int error;
 
@@ -897,7 +870,6 @@
 		return -EINVAL;
 	}
 
-	ps3fb.dev = dev;
 	error = ps3_irq_plug_setup(PS3_BINDING_CPU_ANY, dinfo->irq.irq_outlet,
 				   &ps3fb.irq_no);
 	if (error) {
@@ -907,7 +879,7 @@
 	}
 
 	error = request_irq(ps3fb.irq_no, ps3fb_vsync_interrupt, IRQF_DISABLED,
-			    "ps3fb vsync", ps3fb.dev);
+			    DEVICE_NAME, dev);
 	if (error) {
 		printk(KERN_ERR "%s: request_irq failed %d\n", __func__,
 		       error);
@@ -966,16 +938,45 @@
 };
 
 static struct fb_fix_screeninfo ps3fb_fix __initdata = {
-	.id =		"PS3 FB",
+	.id =		DEVICE_NAME,
 	.type =		FB_TYPE_PACKED_PIXELS,
 	.visual =	FB_VISUAL_TRUECOLOR,
 	.accel =	FB_ACCEL_NONE,
 };
 
-static int __init ps3fb_probe(struct platform_device *dev)
+static int ps3fb_set_sync(void)
+{
+	int status;
+
+#ifdef HEAD_A
+	status = lv1_gpu_context_attribute(0x0,
+					   L1GPU_CONTEXT_ATTRIBUTE_DISPLAY_SYNC,
+					   0, L1GPU_DISPLAY_SYNC_VSYNC, 0, 0);
+	if (status) {
+		printk(KERN_ERR "%s: lv1_gpu_context_attribute DISPLAY_SYNC "
+		       "failed: %d\n", __func__, status);
+		return -1;
+	}
+#endif
+#ifdef HEAD_B
+	status = lv1_gpu_context_attribute(0x0,
+					   L1GPU_CONTEXT_ATTRIBUTE_DISPLAY_SYNC,
+					   1, L1GPU_DISPLAY_SYNC_VSYNC, 0, 0);
+
+	if (status) {
+		printk(KERN_ERR "%s: lv1_gpu_context_attribute DISPLAY_MODE "
+		       "failed: %d\n", __func__, status);
+		return -1;
+	}
+#endif
+	return 0;
+}
+
+static int __devinit ps3fb_probe(struct ps3_system_bus_device *dev)
 {
 	struct fb_info *info;
 	int retval = -ENOMEM;
+	u32 xres, yres;
 	u64 ddr_lpar = 0;
 	u64 lpar_dma_control = 0;
 	u64 lpar_driver_info = 0;
@@ -986,6 +987,30 @@
 	unsigned long offset;
 	struct task_struct *task;
 
+	status = ps3_open_hv_device(dev);
+	if (status) {
+		printk(KERN_ERR "%s: ps3_open_hv_device failed\n", __func__);
+		goto err;
+	}
+
+	if (!ps3fb_mode)
+		ps3fb_mode = ps3av_get_mode();
+	DPRINTK("ps3av_mode:%d\n", ps3fb_mode);
+
+	if (ps3fb_mode > 0 &&
+	    !ps3av_video_mode2res(ps3fb_mode, &xres, &yres)) {
+		ps3fb.res_index = ps3fb_get_res_table(xres, yres);
+		DPRINTK("res_index:%d\n", ps3fb.res_index);
+	} else
+		ps3fb.res_index = GPU_RES_INDEX;
+
+	atomic_set(&ps3fb.f_count, -1);	/* fbcon opens ps3fb */
+	atomic_set(&ps3fb.ext_flip, 0);	/* for flip with vsync */
+	init_waitqueue_head(&ps3fb.wait_vsync);
+	ps3fb.num_frames = 1;
+
+	ps3fb_set_sync();
+
 	/* get gpu context handle */
 	status = lv1_gpu_memory_allocate(DDR_SIZE, 0, 0, 0, 0,
 					 &ps3fb.memory_handle, &ddr_lpar);
@@ -1029,7 +1054,7 @@
 	 * leakage into userspace
 	 */
 	memset(ps3fb.xdr_ea, 0, ps3fb_videomemory.size);
-	info = framebuffer_alloc(sizeof(u32) * 16, &dev->dev);
+	info = framebuffer_alloc(sizeof(u32) * 16, &dev->core);
 	if (!info)
 		goto err_free_irq;
 
@@ -1061,19 +1086,20 @@
 	if (retval < 0)
 		goto err_fb_dealloc;
 
-	platform_set_drvdata(dev, info);
+	dev->core.driver_data = info;
 
 	printk(KERN_INFO
 	       "fb%d: PS3 frame buffer device, using %ld KiB of video memory\n",
 	       info->node, ps3fb_videomemory.size >> 10);
 
-	task = kthread_run(ps3fbd, info, "ps3fbd");
+	task = kthread_run(ps3fbd, info, DEVICE_NAME);
 	if (IS_ERR(task)) {
 		retval = PTR_ERR(task);
 		goto err_unregister_framebuffer;
 	}
 
 	ps3fb.task = task;
+	ps3av_register_flip_ctl(ps3fb_flip_ctl, &ps3fb);
 
 	return 0;
 
@@ -1084,7 +1110,7 @@
 err_framebuffer_release:
 	framebuffer_release(info);
 err_free_irq:
-	free_irq(ps3fb.irq_no, ps3fb.dev);
+	free_irq(ps3fb.irq_no, dev);
 	ps3_irq_plug_destroy(ps3fb.irq_no);
 err_iounmap_dinfo:
 	iounmap((u8 __iomem *)ps3fb.dinfo);
@@ -1096,26 +1122,30 @@
 	return retval;
 }
 
-static void ps3fb_shutdown(struct platform_device *dev)
-{
-	ps3fb_flip_ctl(0);	/* flip off */
-	ps3fb.dinfo->irq.mask = 0;
-	free_irq(ps3fb.irq_no, ps3fb.dev);
-	ps3_irq_plug_destroy(ps3fb.irq_no);
-	iounmap((u8 __iomem *)ps3fb.dinfo);
-}
-
-void ps3fb_cleanup(void)
+static int ps3fb_shutdown(struct ps3_system_bus_device *dev)
 {
 	int status;
+	struct fb_info *info = dev->core.driver_data;
 
+	DPRINTK(" -> %s:%d\n", __func__, __LINE__);
+
+	ps3fb_flip_ctl(0, &ps3fb);	/* flip off */
+	ps3fb.dinfo->irq.mask = 0;
+
+	if (info) {
+		unregister_framebuffer(info);
+		fb_dealloc_cmap(&info->cmap);
+		framebuffer_release(info);
+	}
+
+	ps3av_register_flip_ctl(NULL, NULL);
 	if (ps3fb.task) {
 		struct task_struct *task = ps3fb.task;
 		ps3fb.task = NULL;
 		kthread_stop(task);
 	}
 	if (ps3fb.irq_no) {
-		free_irq(ps3fb.irq_no, ps3fb.dev);
+		free_irq(ps3fb.irq_no, dev);
 		ps3_irq_plug_destroy(ps3fb.irq_no);
 	}
 	iounmap((u8 __iomem *)ps3fb.dinfo);
@@ -1128,134 +1158,69 @@
 	if (status)
 		DPRINTK("lv1_gpu_memory_free failed: %d\n", status);
 
-	ps3av_dev_close();
-}
+	ps3_close_hv_device(dev);
+	DPRINTK(" <- %s:%d\n", __func__, __LINE__);
 
-EXPORT_SYMBOL_GPL(ps3fb_cleanup);
-
-static int ps3fb_remove(struct platform_device *dev)
-{
-	struct fb_info *info = platform_get_drvdata(dev);
-
-	if (info) {
-		unregister_framebuffer(info);
-		fb_dealloc_cmap(&info->cmap);
-		framebuffer_release(info);
-	}
-	ps3fb_cleanup();
 	return 0;
 }
 
-static struct platform_driver ps3fb_driver = {
-	.probe	= ps3fb_probe,
-	.remove = ps3fb_remove,
-	.shutdown = ps3fb_shutdown,
-	.driver = { .name = "ps3fb" }
+static struct ps3_system_bus_driver ps3fb_driver = {
+	.match_id	= PS3_MATCH_ID_GRAPHICS,
+	.core.name	= DEVICE_NAME,
+	.core.owner	= THIS_MODULE,
+	.probe		= ps3fb_probe,
+	.remove		= ps3fb_shutdown,
+	.shutdown	= ps3fb_shutdown,
 };
 
-static struct platform_device ps3fb_device = {
-	.name	= "ps3fb",
-	.id	= 0,
-	.dev	= { .release = ps3fb_platform_release }
-};
-
-int ps3fb_set_sync(void)
+static int __init ps3fb_setup(void)
 {
-	int status;
+	char *options;
 
-#ifdef HEAD_A
-	status = lv1_gpu_context_attribute(0x0,
-					   L1GPU_CONTEXT_ATTRIBUTE_DISPLAY_SYNC,
-					   0, L1GPU_DISPLAY_SYNC_VSYNC, 0, 0);
-	if (status) {
-		printk(KERN_ERR
-		       "%s: lv1_gpu_context_attribute DISPLAY_SYNC failed: %d\n",
-		       __func__, status);
-		return -1;
-	}
+#ifdef MODULE
+	return 0;
 #endif
-#ifdef HEAD_B
-	status = lv1_gpu_context_attribute(0x0,
-					   L1GPU_CONTEXT_ATTRIBUTE_DISPLAY_SYNC,
-					   1, L1GPU_DISPLAY_SYNC_VSYNC, 0, 0);
 
-	if (status) {
-		printk(KERN_ERR
-		       "%s: lv1_gpu_context_attribute DISPLAY_MODE failed: %d\n",
-		       __func__, status);
-		return -1;
+	if (fb_get_options(DEVICE_NAME, &options))
+		return -ENXIO;
+
+	if (!options || !*options)
+		return 0;
+
+	while (1) {
+		char *this_opt = strsep(&options, ",");
+
+		if (!this_opt)
+			break;
+		if (!*this_opt)
+			continue;
+		if (!strncmp(this_opt, "mode:", 5))
+			ps3fb_mode = simple_strtoul(this_opt + 5, NULL, 0);
+		else
+			mode_option = this_opt;
 	}
-#endif
 	return 0;
 }
 
-EXPORT_SYMBOL_GPL(ps3fb_set_sync);
-
 static int __init ps3fb_init(void)
 {
-	int error;
-#ifndef MODULE
-	int mode;
-	char *option = NULL;
+	if (!ps3fb_videomemory.address ||  ps3fb_setup())
+		return -ENXIO;
 
-	if (fb_get_options("ps3fb", &option))
-		goto err;
-#endif
+	return ps3_system_bus_driver_register(&ps3fb_driver);
+}
 
-	if (!ps3fb_videomemory.address)
-		goto err;
-
-	error = ps3av_dev_open();
-	if (error) {
-		printk(KERN_ERR "%s: ps3av_dev_open failed\n", __func__);
-		goto err;
-	}
-
-	ps3fb_mode = ps3av_get_mode();
-	DPRINTK("ps3av_mode:%d\n", ps3fb_mode);
-#ifndef MODULE
-	mode = ps3fb_setup(option);	/* check boot option */
-	if (mode)
-		ps3fb_mode = mode;
-#endif
-	if (ps3fb_mode > 0) {
-		u32 xres, yres;
-		ps3av_video_mode2res(ps3fb_mode, &xres, &yres);
-		ps3fb.res_index = ps3fb_get_res_table(xres, yres);
-		DPRINTK("res_index:%d\n", ps3fb.res_index);
-	} else
-		ps3fb.res_index = GPU_RES_INDEX;
-
-	atomic_set(&ps3fb.f_count, -1);	/* fbcon opens ps3fb */
-	atomic_set(&ps3fb.ext_flip, 0);	/* for flip with vsync */
-	init_waitqueue_head(&ps3fb.wait_vsync);
-	ps3fb.num_frames = 1;
-
-	error = platform_driver_register(&ps3fb_driver);
-	if (!error) {
-		error = platform_device_register(&ps3fb_device);
-		if (error)
-			platform_driver_unregister(&ps3fb_driver);
-	}
-
-	ps3fb_set_sync();
-
-	return error;
-
-err:
-	return -ENXIO;
+static void __exit ps3fb_exit(void)
+{
+	DPRINTK(" -> %s:%d\n", __func__, __LINE__);
+	ps3_system_bus_driver_unregister(&ps3fb_driver);
+	DPRINTK(" <- %s:%d\n", __func__, __LINE__);
 }
 
 module_init(ps3fb_init);
-
-#ifdef MODULE
-static void __exit ps3fb_exit(void)
-{
-	platform_device_unregister(&ps3fb_device);
-	platform_driver_unregister(&ps3fb_driver);
-}
-
 module_exit(ps3fb_exit);
 
 MODULE_LICENSE("GPL");
-#endif				/* MODULE */
+MODULE_DESCRIPTION("PS3 GPU Frame Buffer Driver");
+MODULE_AUTHOR("Sony Computer Entertainment Inc.");
+MODULE_ALIAS(PS3_MODULE_ALIAS_GRAPHICS);
diff --git a/drivers/video/pvr2fb.c b/drivers/video/pvr2fb.c
index 2ba959a..f930026 100644
--- a/drivers/video/pvr2fb.c
+++ b/drivers/video/pvr2fb.c
@@ -333,24 +333,25 @@
 		      ((blue  & 0xf800) >> 11);
 
 		pvr2fb_set_pal_entry(par, regno, tmp);
-		((u16*)(info->pseudo_palette))[regno] = tmp;
 		break;
 	    case 24: /* RGB 888 */
 		red >>= 8; green >>= 8; blue >>= 8;
-		((u32*)(info->pseudo_palette))[regno] = (red << 16) | (green << 8) | blue;
+		tmp = (red << 16) | (green << 8) | blue;
 		break;
 	    case 32: /* ARGB 8888 */
 		red >>= 8; green >>= 8; blue >>= 8;
 		tmp = (transp << 24) | (red << 16) | (green << 8) | blue;
 
 		pvr2fb_set_pal_entry(par, regno, tmp);
-		((u32*)(info->pseudo_palette))[regno] = tmp;
 		break;
 	    default:
 		pr_debug("Invalid bit depth %d?!?\n", info->var.bits_per_pixel);
 		return 1;
 	}
 
+	if (regno < 16)
+		((u32*)(info->pseudo_palette))[regno] = tmp;
+
 	return 0;
 }
 
@@ -1081,13 +1082,12 @@
 #endif
 	size = sizeof(struct fb_info) + sizeof(struct pvr2fb_par) + 16 * sizeof(u32);
 
-	fb_info = kmalloc(size, GFP_KERNEL);
+	fb_info = kzalloc(size, GFP_KERNEL);
 	if (!fb_info) {
 		printk(KERN_ERR "Failed to allocate memory for fb_info\n");
 		return -ENOMEM;
 	}
 
-	memset(fb_info, 0, size);
 
 	currentpar = (struct pvr2fb_par *)(fb_info + 1);
 
diff --git a/drivers/video/q40fb.c b/drivers/video/q40fb.c
index 48536c3..4beac1d 100644
--- a/drivers/video/q40fb.c
+++ b/drivers/video/q40fb.c
@@ -95,7 +95,7 @@
 	/* mapped in q40/config.c */
 	q40fb_fix.smem_start = Q40_PHYS_SCREEN_ADDR;
 
-	info = framebuffer_alloc(sizeof(u32) * 256, &dev->dev);
+	info = framebuffer_alloc(sizeof(u32) * 16, &dev->dev);
 	if (!info)
 		return -ENOMEM;
 
diff --git a/drivers/video/riva/riva_hw.c b/drivers/video/riva/riva_hw.c
index 70bfd78..1330770 100644
--- a/drivers/video/riva/riva_hw.c
+++ b/drivers/video/riva/riva_hw.c
@@ -1223,6 +1223,8 @@
         }
     }
     }
+
+    /* non-zero: M/N/P/clock values assigned.  zero: error (not set) */
     return (DeltaOld != 0xFFFFFFFF);
 }
 /*
@@ -1240,7 +1242,10 @@
     int            dotClock
 )
 {
-    int pixelDepth, VClk, m, n, p;
+    int pixelDepth;
+    int uninitialized_var(VClk),uninitialized_var(m),
+        uninitialized_var(n),	uninitialized_var(p);
+
     /*
      * Save mode parameters.
      */
diff --git a/drivers/video/savage/savagefb_driver.c b/drivers/video/savage/savagefb_driver.c
index 3d7507a..b855f4a 100644
--- a/drivers/video/savage/savagefb_driver.c
+++ b/drivers/video/savage/savagefb_driver.c
@@ -2174,11 +2174,10 @@
 
 #if defined(CONFIG_FB_SAVAGE_ACCEL)
 	/* FIFO size + padding for commands */
-	info->pixmap.addr = kmalloc(8*1024, GFP_KERNEL);
+	info->pixmap.addr = kcalloc(8, 1024, GFP_KERNEL);
 
 	err = -ENOMEM;
 	if (info->pixmap.addr) {
-		memset(info->pixmap.addr, 0, 8*1024);
 		info->pixmap.size = 8*1024;
 		info->pixmap.scan_align = 4;
 		info->pixmap.buf_align = 4;
diff --git a/drivers/video/sgivwfb.c b/drivers/video/sgivwfb.c
index ebb6756..4fb1624 100644
--- a/drivers/video/sgivwfb.c
+++ b/drivers/video/sgivwfb.c
@@ -752,7 +752,7 @@
 	struct fb_info *info;
 	char *monitor;
 
-	info = framebuffer_alloc(sizeof(struct sgivw_par) + sizeof(u32) * 256, &dev->dev);
+	info = framebuffer_alloc(sizeof(struct sgivw_par) + sizeof(u32) * 16, &dev->dev);
 	if (!info)
 		return -ENOMEM;
 	par = info->par;
diff --git a/drivers/video/sis/sis.h b/drivers/video/sis/sis.h
index d5e2d9c..d53bf69 100644
--- a/drivers/video/sis/sis.h
+++ b/drivers/video/sis/sis.h
@@ -479,7 +479,7 @@
 	struct fb_var_screeninfo default_var;
 
 	struct fb_fix_screeninfo sisfb_fix;
-	u32		pseudo_palette[17];
+	u32		pseudo_palette[16];
 
 	struct sisfb_monitor {
 		u16 hmin;
diff --git a/drivers/video/sis/sis_main.c b/drivers/video/sis/sis_main.c
index 93d07ef..e8ccace 100644
--- a/drivers/video/sis/sis_main.c
+++ b/drivers/video/sis/sis_main.c
@@ -1405,12 +1405,18 @@
 		}
 		break;
 	case 16:
+		if (regno >= 16)
+			break;
+
 		((u32 *)(info->pseudo_palette))[regno] =
 				(red & 0xf800)          |
 				((green & 0xfc00) >> 5) |
 				((blue & 0xf800) >> 11);
 		break;
 	case 32:
+		if (regno >= 16)
+			break;
+
 		red >>= 8;
 		green >>= 8;
 		blue >>= 8;
diff --git a/drivers/video/tgafb.c b/drivers/video/tgafb.c
index 5c0dab6..89facb7 100644
--- a/drivers/video/tgafb.c
+++ b/drivers/video/tgafb.c
@@ -1634,7 +1634,7 @@
 		      FBINFO_HWACCEL_IMAGEBLIT | FBINFO_HWACCEL_FILLRECT;
 	info->fbops = &tgafb_ops;
 	info->screen_base = par->tga_fb_base;
-	info->pseudo_palette = (void *)(par + 1);
+	info->pseudo_palette = par->palette;
 
 	/* This should give a reasonable default video mode.  */
 	if (tga_bus_pci) {
diff --git a/drivers/video/tridentfb.c b/drivers/video/tridentfb.c
index 55e8aa4..c699864 100644
--- a/drivers/video/tridentfb.c
+++ b/drivers/video/tridentfb.c
@@ -976,7 +976,7 @@
 		return 1;
 
 
-	if (bpp==8) {
+	if (bpp == 8) {
 		t_outb(0xFF,0x3C6);
 		t_outb(regno,0x3C8);
 
@@ -984,19 +984,21 @@
 		t_outb(green>>10,0x3C9);
 		t_outb(blue>>10,0x3C9);
 
-	} else if (bpp == 16) {	/* RGB 565 */
-		u32 col;
+	} else if (regno < 16) {
+		if (bpp == 16) {	/* RGB 565 */
+			u32 col;
 
-		col = (red & 0xF800) | ((green & 0xFC00) >> 5) |
-			((blue & 0xF800) >> 11);
-		col |= col << 16;	
-		((u32 *)(info->pseudo_palette))[regno] = col;
-	} else if (bpp == 32)		/* ARGB 8888 */
-		((u32*)info->pseudo_palette)[regno] =
-			((transp & 0xFF00) <<16) 	|
-			((red & 0xFF00) << 8) 		|
-			((green & 0xFF00))		|
-			((blue & 0xFF00)>>8);
+			col = (red & 0xF800) | ((green & 0xFC00) >> 5) |
+				((blue & 0xF800) >> 11);
+			col |= col << 16;
+			((u32 *)(info->pseudo_palette))[regno] = col;
+		} else if (bpp == 32)		/* ARGB 8888 */
+			((u32*)info->pseudo_palette)[regno] =
+				((transp & 0xFF00) <<16) 	|
+				((red & 0xFF00) << 8) 		|
+				((green & 0xFF00))		|
+				((blue & 0xFF00)>>8);
+	}
 
 //	debug("exit\n");
 	return 0;
diff --git a/drivers/video/tx3912fb.c b/drivers/video/tx3912fb.c
index 07389ba..e6f7c78 100644
--- a/drivers/video/tx3912fb.c
+++ b/drivers/video/tx3912fb.c
@@ -291,7 +291,7 @@
 	fb_info.fbops = &tx3912fb_ops;
 	fb_info.var = tx3912fb_var;
 	fb_info.fix = tx3912fb_fix;
-	fb_info.pseudo_palette = pseudo_palette;
+	fb_info.pseudo_palette = cfb8;
 	fb_info.flags = FBINFO_DEFAULT;
 
 	/* Clear the framebuffer */
diff --git a/drivers/video/valkyriefb.c b/drivers/video/valkyriefb.c
index ad66f07..7b0cef9 100644
--- a/drivers/video/valkyriefb.c
+++ b/drivers/video/valkyriefb.c
@@ -356,10 +356,9 @@
 	}
 #endif /* ppc (!CONFIG_MAC) */
 
-	p = kmalloc(sizeof(*p), GFP_ATOMIC);
+	p = kzalloc(sizeof(*p), GFP_ATOMIC);
 	if (p == 0)
 		return -ENOMEM;
-	memset(p, 0, sizeof(*p));
 
 	/* Map in frame buffer and registers */
 	if (!request_mem_region(frame_buffer_phys, 0x100000, "valkyriefb")) {
diff --git a/drivers/video/vt8623fb.c b/drivers/video/vt8623fb.c
index 30c0b94..4c3a633 100644
--- a/drivers/video/vt8623fb.c
+++ b/drivers/video/vt8623fb.c
@@ -68,26 +68,26 @@
 
 /* CRT timing register sets */
 
-struct vga_regset vt8623_h_total_regs[]       = {{0x00, 0, 7}, {0x36, 3, 3}, VGA_REGSET_END};
-struct vga_regset vt8623_h_display_regs[]     = {{0x01, 0, 7}, VGA_REGSET_END};
-struct vga_regset vt8623_h_blank_start_regs[] = {{0x02, 0, 7}, VGA_REGSET_END};
-struct vga_regset vt8623_h_blank_end_regs[]   = {{0x03, 0, 4}, {0x05, 7, 7}, {0x33, 5, 5}, VGA_REGSET_END};
-struct vga_regset vt8623_h_sync_start_regs[]  = {{0x04, 0, 7}, {0x33, 4, 4}, VGA_REGSET_END};
-struct vga_regset vt8623_h_sync_end_regs[]    = {{0x05, 0, 4}, VGA_REGSET_END};
+static struct vga_regset vt8623_h_total_regs[]       = {{0x00, 0, 7}, {0x36, 3, 3}, VGA_REGSET_END};
+static struct vga_regset vt8623_h_display_regs[]     = {{0x01, 0, 7}, VGA_REGSET_END};
+static struct vga_regset vt8623_h_blank_start_regs[] = {{0x02, 0, 7}, VGA_REGSET_END};
+static struct vga_regset vt8623_h_blank_end_regs[]   = {{0x03, 0, 4}, {0x05, 7, 7}, {0x33, 5, 5}, VGA_REGSET_END};
+static struct vga_regset vt8623_h_sync_start_regs[]  = {{0x04, 0, 7}, {0x33, 4, 4}, VGA_REGSET_END};
+static struct vga_regset vt8623_h_sync_end_regs[]    = {{0x05, 0, 4}, VGA_REGSET_END};
 
-struct vga_regset vt8623_v_total_regs[]       = {{0x06, 0, 7}, {0x07, 0, 0}, {0x07, 5, 5}, {0x35, 0, 0}, VGA_REGSET_END};
-struct vga_regset vt8623_v_display_regs[]     = {{0x12, 0, 7}, {0x07, 1, 1}, {0x07, 6, 6}, {0x35, 2, 2}, VGA_REGSET_END};
-struct vga_regset vt8623_v_blank_start_regs[] = {{0x15, 0, 7}, {0x07, 3, 3}, {0x09, 5, 5}, {0x35, 3, 3}, VGA_REGSET_END};
-struct vga_regset vt8623_v_blank_end_regs[]   = {{0x16, 0, 7}, VGA_REGSET_END};
-struct vga_regset vt8623_v_sync_start_regs[]  = {{0x10, 0, 7}, {0x07, 2, 2}, {0x07, 7, 7}, {0x35, 1, 1}, VGA_REGSET_END};
-struct vga_regset vt8623_v_sync_end_regs[]    = {{0x11, 0, 3}, VGA_REGSET_END};
+static struct vga_regset vt8623_v_total_regs[]       = {{0x06, 0, 7}, {0x07, 0, 0}, {0x07, 5, 5}, {0x35, 0, 0}, VGA_REGSET_END};
+static struct vga_regset vt8623_v_display_regs[]     = {{0x12, 0, 7}, {0x07, 1, 1}, {0x07, 6, 6}, {0x35, 2, 2}, VGA_REGSET_END};
+static struct vga_regset vt8623_v_blank_start_regs[] = {{0x15, 0, 7}, {0x07, 3, 3}, {0x09, 5, 5}, {0x35, 3, 3}, VGA_REGSET_END};
+static struct vga_regset vt8623_v_blank_end_regs[]   = {{0x16, 0, 7}, VGA_REGSET_END};
+static struct vga_regset vt8623_v_sync_start_regs[]  = {{0x10, 0, 7}, {0x07, 2, 2}, {0x07, 7, 7}, {0x35, 1, 1}, VGA_REGSET_END};
+static struct vga_regset vt8623_v_sync_end_regs[]    = {{0x11, 0, 3}, VGA_REGSET_END};
 
-struct vga_regset vt8623_offset_regs[]        = {{0x13, 0, 7}, {0x35, 5, 7}, VGA_REGSET_END};
-struct vga_regset vt8623_line_compare_regs[]  = {{0x18, 0, 7}, {0x07, 4, 4}, {0x09, 6, 6}, {0x33, 0, 2}, {0x35, 4, 4}, VGA_REGSET_END};
-struct vga_regset vt8623_fetch_count_regs[]   = {{0x1C, 0, 7}, {0x1D, 0, 1}, VGA_REGSET_END};
-struct vga_regset vt8623_start_address_regs[] = {{0x0d, 0, 7}, {0x0c, 0, 7}, {0x34, 0, 7}, {0x48, 0, 1}, VGA_REGSET_END};
+static struct vga_regset vt8623_offset_regs[]        = {{0x13, 0, 7}, {0x35, 5, 7}, VGA_REGSET_END};
+static struct vga_regset vt8623_line_compare_regs[]  = {{0x18, 0, 7}, {0x07, 4, 4}, {0x09, 6, 6}, {0x33, 0, 2}, {0x35, 4, 4}, VGA_REGSET_END};
+static struct vga_regset vt8623_fetch_count_regs[]   = {{0x1C, 0, 7}, {0x1D, 0, 1}, VGA_REGSET_END};
+static struct vga_regset vt8623_start_address_regs[] = {{0x0d, 0, 7}, {0x0c, 0, 7}, {0x34, 0, 7}, {0x48, 0, 1}, VGA_REGSET_END};
 
-struct svga_timing_regs vt8623_timing_regs     = {
+static struct svga_timing_regs vt8623_timing_regs     = {
 	vt8623_h_total_regs, vt8623_h_display_regs, vt8623_h_blank_start_regs,
 	vt8623_h_blank_end_regs, vt8623_h_sync_start_regs, vt8623_h_sync_end_regs,
 	vt8623_v_total_regs, vt8623_v_display_regs, vt8623_v_blank_start_regs,
@@ -903,7 +903,7 @@
 
 /* Driver Initialisation */
 
-int __init vt8623fb_init(void)
+static int __init vt8623fb_init(void)
 {
 
 #ifndef MODULE
diff --git a/drivers/w1/masters/matrox_w1.c b/drivers/w1/masters/matrox_w1.c
index 6f9d880..d356da5 100644
--- a/drivers/w1/masters/matrox_w1.c
+++ b/drivers/w1/masters/matrox_w1.c
@@ -164,7 +164,7 @@
 	if (pdev->vendor != PCI_VENDOR_ID_MATROX || pdev->device != PCI_DEVICE_ID_MATROX_G400)
 		return -ENODEV;
 
-	dev = kmalloc(sizeof(struct matrox_device) +
+	dev = kzalloc(sizeof(struct matrox_device) +
 		       sizeof(struct w1_bus_master), GFP_KERNEL);
 	if (!dev) {
 		dev_err(&pdev->dev,
@@ -173,7 +173,6 @@
 		return -ENOMEM;
 	}
 
-	memset(dev, 0, sizeof(struct matrox_device) + sizeof(struct w1_bus_master));
 
 	dev->bus_master = (struct w1_bus_master *)(dev + 1);
 
diff --git a/drivers/w1/slaves/w1_ds2433.c b/drivers/w1/slaves/w1_ds2433.c
index cab5600..858c16a 100644
--- a/drivers/w1/slaves/w1_ds2433.c
+++ b/drivers/w1/slaves/w1_ds2433.c
@@ -266,10 +266,9 @@
 #ifdef CONFIG_W1_SLAVE_DS2433_CRC
 	struct w1_f23_data *data;
 
-	data = kmalloc(sizeof(struct w1_f23_data), GFP_KERNEL);
+	data = kzalloc(sizeof(struct w1_f23_data), GFP_KERNEL);
 	if (!data)
 		return -ENOMEM;
-	memset(data, 0, sizeof(struct w1_f23_data));
 	sl->family_data = data;
 
 #endif	/* CONFIG_W1_SLAVE_DS2433_CRC */
diff --git a/drivers/w1/w1.c b/drivers/w1/w1.c
index f5c5b76..8d7ab74 100644
--- a/drivers/w1/w1.c
+++ b/drivers/w1/w1.c
@@ -520,7 +520,7 @@
 	int err;
 	struct w1_netlink_msg msg;
 
-	sl = kmalloc(sizeof(struct w1_slave), GFP_KERNEL);
+	sl = kzalloc(sizeof(struct w1_slave), GFP_KERNEL);
 	if (!sl) {
 		dev_err(&dev->dev,
 			 "%s: failed to allocate new slave device.\n",
@@ -528,7 +528,6 @@
 		return -ENOMEM;
 	}
 
-	memset(sl, 0, sizeof(*sl));
 
 	sl->owner = THIS_MODULE;
 	sl->master = dev;
@@ -805,6 +804,7 @@
 	struct w1_master *dev, *n;
 	int have_to_wait = 0;
 
+	set_freezable();
 	while (!kthread_should_stop() || have_to_wait) {
 		have_to_wait = 0;
 
diff --git a/drivers/w1/w1_int.c b/drivers/w1/w1_int.c
index 258defd..2fbd8dd 100644
--- a/drivers/w1/w1_int.c
+++ b/drivers/w1/w1_int.c
@@ -41,7 +41,7 @@
 	/*
 	 * We are in process context(kernel thread), so can sleep.
 	 */
-	dev = kmalloc(sizeof(struct w1_master) + sizeof(struct w1_bus_master), GFP_KERNEL);
+	dev = kzalloc(sizeof(struct w1_master) + sizeof(struct w1_bus_master), GFP_KERNEL);
 	if (!dev) {
 		printk(KERN_ERR
 			"Failed to allocate %zd bytes for new w1 device.\n",
@@ -49,7 +49,6 @@
 		return NULL;
 	}
 
-	memset(dev, 0, sizeof(struct w1_master) + sizeof(struct w1_bus_master));
 
 	dev->bus_master = (struct w1_bus_master *)(dev + 1);
 
diff --git a/drivers/xen/Makefile b/drivers/xen/Makefile
new file mode 100644
index 0000000..56592f0
--- /dev/null
+++ b/drivers/xen/Makefile
@@ -0,0 +1,2 @@
+obj-y	+= grant-table.o
+obj-y	+= xenbus/
diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c
new file mode 100644
index 0000000..ea94dba
--- /dev/null
+++ b/drivers/xen/grant-table.c
@@ -0,0 +1,582 @@
+/******************************************************************************
+ * grant_table.c
+ *
+ * Granting foreign access to our memory reservation.
+ *
+ * Copyright (c) 2005-2006, Christopher Clark
+ * Copyright (c) 2004-2005, K A Fraser
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation; or, when distributed
+ * separately from the Linux kernel or incorporated into other
+ * software packages, subject to the following license:
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this source file (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy, modify,
+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/vmalloc.h>
+#include <linux/uaccess.h>
+
+#include <xen/interface/xen.h>
+#include <xen/page.h>
+#include <xen/grant_table.h>
+
+#include <asm/pgtable.h>
+#include <asm/sync_bitops.h>
+
+
+/* External tools reserve first few grant table entries. */
+#define NR_RESERVED_ENTRIES 8
+#define GNTTAB_LIST_END 0xffffffff
+#define GREFS_PER_GRANT_FRAME (PAGE_SIZE / sizeof(struct grant_entry))
+
+static grant_ref_t **gnttab_list;
+static unsigned int nr_grant_frames;
+static unsigned int boot_max_nr_grant_frames;
+static int gnttab_free_count;
+static grant_ref_t gnttab_free_head;
+static DEFINE_SPINLOCK(gnttab_list_lock);
+
+static struct grant_entry *shared;
+
+static struct gnttab_free_callback *gnttab_free_callback_list;
+
+static int gnttab_expand(unsigned int req_entries);
+
+#define RPP (PAGE_SIZE / sizeof(grant_ref_t))
+
+static inline grant_ref_t *__gnttab_entry(grant_ref_t entry)
+{
+	return &gnttab_list[(entry) / RPP][(entry) % RPP];
+}
+/* This can be used as an l-value */
+#define gnttab_entry(entry) (*__gnttab_entry(entry))
+
+static int get_free_entries(unsigned count)
+{
+	unsigned long flags;
+	int ref, rc;
+	grant_ref_t head;
+
+	spin_lock_irqsave(&gnttab_list_lock, flags);
+
+	if ((gnttab_free_count < count) &&
+	    ((rc = gnttab_expand(count - gnttab_free_count)) < 0)) {
+		spin_unlock_irqrestore(&gnttab_list_lock, flags);
+		return rc;
+	}
+
+	ref = head = gnttab_free_head;
+	gnttab_free_count -= count;
+	while (count-- > 1)
+		head = gnttab_entry(head);
+	gnttab_free_head = gnttab_entry(head);
+	gnttab_entry(head) = GNTTAB_LIST_END;
+
+	spin_unlock_irqrestore(&gnttab_list_lock, flags);
+
+	return ref;
+}
+
+static void do_free_callbacks(void)
+{
+	struct gnttab_free_callback *callback, *next;
+
+	callback = gnttab_free_callback_list;
+	gnttab_free_callback_list = NULL;
+
+	while (callback != NULL) {
+		next = callback->next;
+		if (gnttab_free_count >= callback->count) {
+			callback->next = NULL;
+			callback->fn(callback->arg);
+		} else {
+			callback->next = gnttab_free_callback_list;
+			gnttab_free_callback_list = callback;
+		}
+		callback = next;
+	}
+}
+
+static inline void check_free_callbacks(void)
+{
+	if (unlikely(gnttab_free_callback_list))
+		do_free_callbacks();
+}
+
+static void put_free_entry(grant_ref_t ref)
+{
+	unsigned long flags;
+	spin_lock_irqsave(&gnttab_list_lock, flags);
+	gnttab_entry(ref) = gnttab_free_head;
+	gnttab_free_head = ref;
+	gnttab_free_count++;
+	check_free_callbacks();
+	spin_unlock_irqrestore(&gnttab_list_lock, flags);
+}
+
+static void update_grant_entry(grant_ref_t ref, domid_t domid,
+			       unsigned long frame, unsigned flags)
+{
+	/*
+	 * Introducing a valid entry into the grant table:
+	 *  1. Write ent->domid.
+	 *  2. Write ent->frame:
+	 *      GTF_permit_access:   Frame to which access is permitted.
+	 *      GTF_accept_transfer: Pseudo-phys frame slot being filled by new
+	 *                           frame, or zero if none.
+	 *  3. Write memory barrier (WMB).
+	 *  4. Write ent->flags, inc. valid type.
+	 */
+	shared[ref].frame = frame;
+	shared[ref].domid = domid;
+	wmb();
+	shared[ref].flags = flags;
+}
+
+/*
+ * Public grant-issuing interface functions
+ */
+void gnttab_grant_foreign_access_ref(grant_ref_t ref, domid_t domid,
+				     unsigned long frame, int readonly)
+{
+	update_grant_entry(ref, domid, frame,
+			   GTF_permit_access | (readonly ? GTF_readonly : 0));
+}
+EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access_ref);
+
+int gnttab_grant_foreign_access(domid_t domid, unsigned long frame,
+				int readonly)
+{
+	int ref;
+
+	ref = get_free_entries(1);
+	if (unlikely(ref < 0))
+		return -ENOSPC;
+
+	gnttab_grant_foreign_access_ref(ref, domid, frame, readonly);
+
+	return ref;
+}
+EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access);
+
+int gnttab_query_foreign_access(grant_ref_t ref)
+{
+	u16 nflags;
+
+	nflags = shared[ref].flags;
+
+	return (nflags & (GTF_reading|GTF_writing));
+}
+EXPORT_SYMBOL_GPL(gnttab_query_foreign_access);
+
+int gnttab_end_foreign_access_ref(grant_ref_t ref, int readonly)
+{
+	u16 flags, nflags;
+
+	nflags = shared[ref].flags;
+	do {
+		flags = nflags;
+		if (flags & (GTF_reading|GTF_writing)) {
+			printk(KERN_ALERT "WARNING: g.e. still in use!\n");
+			return 0;
+		}
+	} while ((nflags = sync_cmpxchg(&shared[ref].flags, flags, 0)) != flags);
+
+	return 1;
+}
+EXPORT_SYMBOL_GPL(gnttab_end_foreign_access_ref);
+
+void gnttab_end_foreign_access(grant_ref_t ref, int readonly,
+			       unsigned long page)
+{
+	if (gnttab_end_foreign_access_ref(ref, readonly)) {
+		put_free_entry(ref);
+		if (page != 0)
+			free_page(page);
+	} else {
+		/* XXX This needs to be fixed so that the ref and page are
+		   placed on a list to be freed up later. */
+		printk(KERN_WARNING
+		       "WARNING: leaking g.e. and page still in use!\n");
+	}
+}
+EXPORT_SYMBOL_GPL(gnttab_end_foreign_access);
+
+int gnttab_grant_foreign_transfer(domid_t domid, unsigned long pfn)
+{
+	int ref;
+
+	ref = get_free_entries(1);
+	if (unlikely(ref < 0))
+		return -ENOSPC;
+	gnttab_grant_foreign_transfer_ref(ref, domid, pfn);
+
+	return ref;
+}
+EXPORT_SYMBOL_GPL(gnttab_grant_foreign_transfer);
+
+void gnttab_grant_foreign_transfer_ref(grant_ref_t ref, domid_t domid,
+				       unsigned long pfn)
+{
+	update_grant_entry(ref, domid, pfn, GTF_accept_transfer);
+}
+EXPORT_SYMBOL_GPL(gnttab_grant_foreign_transfer_ref);
+
+unsigned long gnttab_end_foreign_transfer_ref(grant_ref_t ref)
+{
+	unsigned long frame;
+	u16           flags;
+
+	/*
+	 * If a transfer is not even yet started, try to reclaim the grant
+	 * reference and return failure (== 0).
+	 */
+	while (!((flags = shared[ref].flags) & GTF_transfer_committed)) {
+		if (sync_cmpxchg(&shared[ref].flags, flags, 0) == flags)
+			return 0;
+		cpu_relax();
+	}
+
+	/* If a transfer is in progress then wait until it is completed. */
+	while (!(flags & GTF_transfer_completed)) {
+		flags = shared[ref].flags;
+		cpu_relax();
+	}
+
+	rmb();	/* Read the frame number /after/ reading completion status. */
+	frame = shared[ref].frame;
+	BUG_ON(frame == 0);
+
+	return frame;
+}
+EXPORT_SYMBOL_GPL(gnttab_end_foreign_transfer_ref);
+
+unsigned long gnttab_end_foreign_transfer(grant_ref_t ref)
+{
+	unsigned long frame = gnttab_end_foreign_transfer_ref(ref);
+	put_free_entry(ref);
+	return frame;
+}
+EXPORT_SYMBOL_GPL(gnttab_end_foreign_transfer);
+
+void gnttab_free_grant_reference(grant_ref_t ref)
+{
+	put_free_entry(ref);
+}
+EXPORT_SYMBOL_GPL(gnttab_free_grant_reference);
+
+void gnttab_free_grant_references(grant_ref_t head)
+{
+	grant_ref_t ref;
+	unsigned long flags;
+	int count = 1;
+	if (head == GNTTAB_LIST_END)
+		return;
+	spin_lock_irqsave(&gnttab_list_lock, flags);
+	ref = head;
+	while (gnttab_entry(ref) != GNTTAB_LIST_END) {
+		ref = gnttab_entry(ref);
+		count++;
+	}
+	gnttab_entry(ref) = gnttab_free_head;
+	gnttab_free_head = head;
+	gnttab_free_count += count;
+	check_free_callbacks();
+	spin_unlock_irqrestore(&gnttab_list_lock, flags);
+}
+EXPORT_SYMBOL_GPL(gnttab_free_grant_references);
+
+int gnttab_alloc_grant_references(u16 count, grant_ref_t *head)
+{
+	int h = get_free_entries(count);
+
+	if (h < 0)
+		return -ENOSPC;
+
+	*head = h;
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(gnttab_alloc_grant_references);
+
+int gnttab_empty_grant_references(const grant_ref_t *private_head)
+{
+	return (*private_head == GNTTAB_LIST_END);
+}
+EXPORT_SYMBOL_GPL(gnttab_empty_grant_references);
+
+int gnttab_claim_grant_reference(grant_ref_t *private_head)
+{
+	grant_ref_t g = *private_head;
+	if (unlikely(g == GNTTAB_LIST_END))
+		return -ENOSPC;
+	*private_head = gnttab_entry(g);
+	return g;
+}
+EXPORT_SYMBOL_GPL(gnttab_claim_grant_reference);
+
+void gnttab_release_grant_reference(grant_ref_t *private_head,
+				    grant_ref_t release)
+{
+	gnttab_entry(release) = *private_head;
+	*private_head = release;
+}
+EXPORT_SYMBOL_GPL(gnttab_release_grant_reference);
+
+void gnttab_request_free_callback(struct gnttab_free_callback *callback,
+				  void (*fn)(void *), void *arg, u16 count)
+{
+	unsigned long flags;
+	spin_lock_irqsave(&gnttab_list_lock, flags);
+	if (callback->next)
+		goto out;
+	callback->fn = fn;
+	callback->arg = arg;
+	callback->count = count;
+	callback->next = gnttab_free_callback_list;
+	gnttab_free_callback_list = callback;
+	check_free_callbacks();
+out:
+	spin_unlock_irqrestore(&gnttab_list_lock, flags);
+}
+EXPORT_SYMBOL_GPL(gnttab_request_free_callback);
+
+void gnttab_cancel_free_callback(struct gnttab_free_callback *callback)
+{
+	struct gnttab_free_callback **pcb;
+	unsigned long flags;
+
+	spin_lock_irqsave(&gnttab_list_lock, flags);
+	for (pcb = &gnttab_free_callback_list; *pcb; pcb = &(*pcb)->next) {
+		if (*pcb == callback) {
+			*pcb = callback->next;
+			break;
+		}
+	}
+	spin_unlock_irqrestore(&gnttab_list_lock, flags);
+}
+EXPORT_SYMBOL_GPL(gnttab_cancel_free_callback);
+
+static int grow_gnttab_list(unsigned int more_frames)
+{
+	unsigned int new_nr_grant_frames, extra_entries, i;
+
+	new_nr_grant_frames = nr_grant_frames + more_frames;
+	extra_entries       = more_frames * GREFS_PER_GRANT_FRAME;
+
+	for (i = nr_grant_frames; i < new_nr_grant_frames; i++) {
+		gnttab_list[i] = (grant_ref_t *)__get_free_page(GFP_ATOMIC);
+		if (!gnttab_list[i])
+			goto grow_nomem;
+	}
+
+
+	for (i = GREFS_PER_GRANT_FRAME * nr_grant_frames;
+	     i < GREFS_PER_GRANT_FRAME * new_nr_grant_frames - 1; i++)
+		gnttab_entry(i) = i + 1;
+
+	gnttab_entry(i) = gnttab_free_head;
+	gnttab_free_head = GREFS_PER_GRANT_FRAME * nr_grant_frames;
+	gnttab_free_count += extra_entries;
+
+	nr_grant_frames = new_nr_grant_frames;
+
+	check_free_callbacks();
+
+	return 0;
+
+grow_nomem:
+	for ( ; i >= nr_grant_frames; i--)
+		free_page((unsigned long) gnttab_list[i]);
+	return -ENOMEM;
+}
+
+static unsigned int __max_nr_grant_frames(void)
+{
+	struct gnttab_query_size query;
+	int rc;
+
+	query.dom = DOMID_SELF;
+
+	rc = HYPERVISOR_grant_table_op(GNTTABOP_query_size, &query, 1);
+	if ((rc < 0) || (query.status != GNTST_okay))
+		return 4; /* Legacy max supported number of frames */
+
+	return query.max_nr_frames;
+}
+
+static inline unsigned int max_nr_grant_frames(void)
+{
+	unsigned int xen_max = __max_nr_grant_frames();
+
+	if (xen_max > boot_max_nr_grant_frames)
+		return boot_max_nr_grant_frames;
+	return xen_max;
+}
+
+static int map_pte_fn(pte_t *pte, struct page *pmd_page,
+		      unsigned long addr, void *data)
+{
+	unsigned long **frames = (unsigned long **)data;
+
+	set_pte_at(&init_mm, addr, pte, mfn_pte((*frames)[0], PAGE_KERNEL));
+	(*frames)++;
+	return 0;
+}
+
+static int unmap_pte_fn(pte_t *pte, struct page *pmd_page,
+			unsigned long addr, void *data)
+{
+
+	set_pte_at(&init_mm, addr, pte, __pte(0));
+	return 0;
+}
+
+static int gnttab_map(unsigned int start_idx, unsigned int end_idx)
+{
+	struct gnttab_setup_table setup;
+	unsigned long *frames;
+	unsigned int nr_gframes = end_idx + 1;
+	int rc;
+
+	frames = kmalloc(nr_gframes * sizeof(unsigned long), GFP_ATOMIC);
+	if (!frames)
+		return -ENOMEM;
+
+	setup.dom        = DOMID_SELF;
+	setup.nr_frames  = nr_gframes;
+	setup.frame_list = frames;
+
+	rc = HYPERVISOR_grant_table_op(GNTTABOP_setup_table, &setup, 1);
+	if (rc == -ENOSYS) {
+		kfree(frames);
+		return -ENOSYS;
+	}
+
+	BUG_ON(rc || setup.status);
+
+	if (shared == NULL) {
+		struct vm_struct *area;
+		area = alloc_vm_area(PAGE_SIZE * max_nr_grant_frames());
+		BUG_ON(area == NULL);
+		shared = area->addr;
+	}
+	rc = apply_to_page_range(&init_mm, (unsigned long)shared,
+				 PAGE_SIZE * nr_gframes,
+				 map_pte_fn, &frames);
+	BUG_ON(rc);
+	frames -= nr_gframes; /* adjust after map_pte_fn() */
+
+	kfree(frames);
+
+	return 0;
+}
+
+static int gnttab_resume(void)
+{
+	if (max_nr_grant_frames() < nr_grant_frames)
+		return -ENOSYS;
+	return gnttab_map(0, nr_grant_frames - 1);
+}
+
+static int gnttab_suspend(void)
+{
+	apply_to_page_range(&init_mm, (unsigned long)shared,
+			    PAGE_SIZE * nr_grant_frames,
+			    unmap_pte_fn, NULL);
+
+	return 0;
+}
+
+static int gnttab_expand(unsigned int req_entries)
+{
+	int rc;
+	unsigned int cur, extra;
+
+	cur = nr_grant_frames;
+	extra = ((req_entries + (GREFS_PER_GRANT_FRAME-1)) /
+		 GREFS_PER_GRANT_FRAME);
+	if (cur + extra > max_nr_grant_frames())
+		return -ENOSPC;
+
+	rc = gnttab_map(cur, cur + extra - 1);
+	if (rc == 0)
+		rc = grow_gnttab_list(extra);
+
+	return rc;
+}
+
+static int __devinit gnttab_init(void)
+{
+	int i;
+	unsigned int max_nr_glist_frames;
+	unsigned int nr_init_grefs;
+
+	if (!is_running_on_xen())
+		return -ENODEV;
+
+	nr_grant_frames = 1;
+	boot_max_nr_grant_frames = __max_nr_grant_frames();
+
+	/* Determine the maximum number of frames required for the
+	 * grant reference free list on the current hypervisor.
+	 */
+	max_nr_glist_frames = (boot_max_nr_grant_frames *
+			       GREFS_PER_GRANT_FRAME /
+			       (PAGE_SIZE / sizeof(grant_ref_t)));
+
+	gnttab_list = kmalloc(max_nr_glist_frames * sizeof(grant_ref_t *),
+			      GFP_KERNEL);
+	if (gnttab_list == NULL)
+		return -ENOMEM;
+
+	for (i = 0; i < nr_grant_frames; i++) {
+		gnttab_list[i] = (grant_ref_t *)__get_free_page(GFP_KERNEL);
+		if (gnttab_list[i] == NULL)
+			goto ini_nomem;
+	}
+
+	if (gnttab_resume() < 0)
+		return -ENODEV;
+
+	nr_init_grefs = nr_grant_frames * GREFS_PER_GRANT_FRAME;
+
+	for (i = NR_RESERVED_ENTRIES; i < nr_init_grefs - 1; i++)
+		gnttab_entry(i) = i + 1;
+
+	gnttab_entry(nr_init_grefs - 1) = GNTTAB_LIST_END;
+	gnttab_free_count = nr_init_grefs - NR_RESERVED_ENTRIES;
+	gnttab_free_head  = NR_RESERVED_ENTRIES;
+
+	printk("Grant table initialized\n");
+	return 0;
+
+ ini_nomem:
+	for (i--; i >= 0; i--)
+		free_page((unsigned long)gnttab_list[i]);
+	kfree(gnttab_list);
+	return -ENOMEM;
+}
+
+core_initcall(gnttab_init);
diff --git a/drivers/xen/xenbus/Makefile b/drivers/xen/xenbus/Makefile
new file mode 100644
index 0000000..5571f5b
--- /dev/null
+++ b/drivers/xen/xenbus/Makefile
@@ -0,0 +1,7 @@
+obj-y	+= xenbus.o
+
+xenbus-objs =
+xenbus-objs += xenbus_client.o
+xenbus-objs += xenbus_comms.o
+xenbus-objs += xenbus_xs.o
+xenbus-objs += xenbus_probe.o
diff --git a/drivers/xen/xenbus/xenbus_client.c b/drivers/xen/xenbus/xenbus_client.c
new file mode 100644
index 0000000..9fd2f70
--- /dev/null
+++ b/drivers/xen/xenbus/xenbus_client.c
@@ -0,0 +1,569 @@
+/******************************************************************************
+ * Client-facing interface for the Xenbus driver.  In other words, the
+ * interface between the Xenbus and the device-specific code, be it the
+ * frontend or the backend of that driver.
+ *
+ * Copyright (C) 2005 XenSource Ltd
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation; or, when distributed
+ * separately from the Linux kernel or incorporated into other
+ * software packages, subject to the following license:
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this source file (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy, modify,
+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include <linux/types.h>
+#include <linux/vmalloc.h>
+#include <asm/xen/hypervisor.h>
+#include <xen/interface/xen.h>
+#include <xen/interface/event_channel.h>
+#include <xen/events.h>
+#include <xen/grant_table.h>
+#include <xen/xenbus.h>
+
+const char *xenbus_strstate(enum xenbus_state state)
+{
+	static const char *const name[] = {
+		[ XenbusStateUnknown      ] = "Unknown",
+		[ XenbusStateInitialising ] = "Initialising",
+		[ XenbusStateInitWait     ] = "InitWait",
+		[ XenbusStateInitialised  ] = "Initialised",
+		[ XenbusStateConnected    ] = "Connected",
+		[ XenbusStateClosing      ] = "Closing",
+		[ XenbusStateClosed	  ] = "Closed",
+	};
+	return (state < ARRAY_SIZE(name)) ? name[state] : "INVALID";
+}
+EXPORT_SYMBOL_GPL(xenbus_strstate);
+
+/**
+ * xenbus_watch_path - register a watch
+ * @dev: xenbus device
+ * @path: path to watch
+ * @watch: watch to register
+ * @callback: callback to register
+ *
+ * Register a @watch on the given path, using the given xenbus_watch structure
+ * for storage, and the given @callback function as the callback.  Return 0 on
+ * success, or -errno on error.  On success, the given @path will be saved as
+ * @watch->node, and remains the caller's to free.  On error, @watch->node will
+ * be NULL, the device will switch to %XenbusStateClosing, and the error will
+ * be saved in the store.
+ */
+int xenbus_watch_path(struct xenbus_device *dev, const char *path,
+		      struct xenbus_watch *watch,
+		      void (*callback)(struct xenbus_watch *,
+				       const char **, unsigned int))
+{
+	int err;
+
+	watch->node = path;
+	watch->callback = callback;
+
+	err = register_xenbus_watch(watch);
+
+	if (err) {
+		watch->node = NULL;
+		watch->callback = NULL;
+		xenbus_dev_fatal(dev, err, "adding watch on %s", path);
+	}
+
+	return err;
+}
+EXPORT_SYMBOL_GPL(xenbus_watch_path);
+
+
+/**
+ * xenbus_watch_pathfmt - register a watch on a sprintf-formatted path
+ * @dev: xenbus device
+ * @watch: watch to register
+ * @callback: callback to register
+ * @pathfmt: format of path to watch
+ *
+ * Register a watch on the given @path, using the given xenbus_watch
+ * structure for storage, and the given @callback function as the callback.
+ * Return 0 on success, or -errno on error.  On success, the watched path
+ * (@path/@path2) will be saved as @watch->node, and becomes the caller's to
+ * kfree().  On error, watch->node will be NULL, so the caller has nothing to
+ * free, the device will switch to %XenbusStateClosing, and the error will be
+ * saved in the store.
+ */
+int xenbus_watch_pathfmt(struct xenbus_device *dev,
+			 struct xenbus_watch *watch,
+			 void (*callback)(struct xenbus_watch *,
+					const char **, unsigned int),
+			 const char *pathfmt, ...)
+{
+	int err;
+	va_list ap;
+	char *path;
+
+	va_start(ap, pathfmt);
+	path = kvasprintf(GFP_KERNEL, pathfmt, ap);
+	va_end(ap);
+
+	if (!path) {
+		xenbus_dev_fatal(dev, -ENOMEM, "allocating path for watch");
+		return -ENOMEM;
+	}
+	err = xenbus_watch_path(dev, path, watch, callback);
+
+	if (err)
+		kfree(path);
+	return err;
+}
+EXPORT_SYMBOL_GPL(xenbus_watch_pathfmt);
+
+
+/**
+ * xenbus_switch_state
+ * @dev: xenbus device
+ * @xbt: transaction handle
+ * @state: new state
+ *
+ * Advertise in the store a change of the given driver to the given new_state.
+ * Return 0 on success, or -errno on error.  On error, the device will switch
+ * to XenbusStateClosing, and the error will be saved in the store.
+ */
+int xenbus_switch_state(struct xenbus_device *dev, enum xenbus_state state)
+{
+	/* We check whether the state is currently set to the given value, and
+	   if not, then the state is set.  We don't want to unconditionally
+	   write the given state, because we don't want to fire watches
+	   unnecessarily.  Furthermore, if the node has gone, we don't write
+	   to it, as the device will be tearing down, and we don't want to
+	   resurrect that directory.
+
+	   Note that, because of this cached value of our state, this function
+	   will not work inside a Xenstore transaction (something it was
+	   trying to in the past) because dev->state would not get reset if
+	   the transaction was aborted.
+
+	 */
+
+	int current_state;
+	int err;
+
+	if (state == dev->state)
+		return 0;
+
+	err = xenbus_scanf(XBT_NIL, dev->nodename, "state", "%d",
+			   &current_state);
+	if (err != 1)
+		return 0;
+
+	err = xenbus_printf(XBT_NIL, dev->nodename, "state", "%d", state);
+	if (err) {
+		if (state != XenbusStateClosing) /* Avoid looping */
+			xenbus_dev_fatal(dev, err, "writing new state");
+		return err;
+	}
+
+	dev->state = state;
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(xenbus_switch_state);
+
+int xenbus_frontend_closed(struct xenbus_device *dev)
+{
+	xenbus_switch_state(dev, XenbusStateClosed);
+	complete(&dev->down);
+	return 0;
+}
+EXPORT_SYMBOL_GPL(xenbus_frontend_closed);
+
+/**
+ * Return the path to the error node for the given device, or NULL on failure.
+ * If the value returned is non-NULL, then it is the caller's to kfree.
+ */
+static char *error_path(struct xenbus_device *dev)
+{
+	return kasprintf(GFP_KERNEL, "error/%s", dev->nodename);
+}
+
+
+static void xenbus_va_dev_error(struct xenbus_device *dev, int err,
+				const char *fmt, va_list ap)
+{
+	int ret;
+	unsigned int len;
+	char *printf_buffer = NULL;
+	char *path_buffer = NULL;
+
+#define PRINTF_BUFFER_SIZE 4096
+	printf_buffer = kmalloc(PRINTF_BUFFER_SIZE, GFP_KERNEL);
+	if (printf_buffer == NULL)
+		goto fail;
+
+	len = sprintf(printf_buffer, "%i ", -err);
+	ret = vsnprintf(printf_buffer+len, PRINTF_BUFFER_SIZE-len, fmt, ap);
+
+	BUG_ON(len + ret > PRINTF_BUFFER_SIZE-1);
+
+	dev_err(&dev->dev, "%s\n", printf_buffer);
+
+	path_buffer = error_path(dev);
+
+	if (path_buffer == NULL) {
+		dev_err(&dev->dev, "failed to write error node for %s (%s)\n",
+		       dev->nodename, printf_buffer);
+		goto fail;
+	}
+
+	if (xenbus_write(XBT_NIL, path_buffer, "error", printf_buffer) != 0) {
+		dev_err(&dev->dev, "failed to write error node for %s (%s)\n",
+		       dev->nodename, printf_buffer);
+		goto fail;
+	}
+
+fail:
+	kfree(printf_buffer);
+	kfree(path_buffer);
+}
+
+
+/**
+ * xenbus_dev_error
+ * @dev: xenbus device
+ * @err: error to report
+ * @fmt: error message format
+ *
+ * Report the given negative errno into the store, along with the given
+ * formatted message.
+ */
+void xenbus_dev_error(struct xenbus_device *dev, int err, const char *fmt, ...)
+{
+	va_list ap;
+
+	va_start(ap, fmt);
+	xenbus_va_dev_error(dev, err, fmt, ap);
+	va_end(ap);
+}
+EXPORT_SYMBOL_GPL(xenbus_dev_error);
+
+/**
+ * xenbus_dev_fatal
+ * @dev: xenbus device
+ * @err: error to report
+ * @fmt: error message format
+ *
+ * Equivalent to xenbus_dev_error(dev, err, fmt, args), followed by
+ * xenbus_switch_state(dev, NULL, XenbusStateClosing) to schedule an orderly
+ * closedown of this driver and its peer.
+ */
+
+void xenbus_dev_fatal(struct xenbus_device *dev, int err, const char *fmt, ...)
+{
+	va_list ap;
+
+	va_start(ap, fmt);
+	xenbus_va_dev_error(dev, err, fmt, ap);
+	va_end(ap);
+
+	xenbus_switch_state(dev, XenbusStateClosing);
+}
+EXPORT_SYMBOL_GPL(xenbus_dev_fatal);
+
+/**
+ * xenbus_grant_ring
+ * @dev: xenbus device
+ * @ring_mfn: mfn of ring to grant
+
+ * Grant access to the given @ring_mfn to the peer of the given device.  Return
+ * 0 on success, or -errno on error.  On error, the device will switch to
+ * XenbusStateClosing, and the error will be saved in the store.
+ */
+int xenbus_grant_ring(struct xenbus_device *dev, unsigned long ring_mfn)
+{
+	int err = gnttab_grant_foreign_access(dev->otherend_id, ring_mfn, 0);
+	if (err < 0)
+		xenbus_dev_fatal(dev, err, "granting access to ring page");
+	return err;
+}
+EXPORT_SYMBOL_GPL(xenbus_grant_ring);
+
+
+/**
+ * Allocate an event channel for the given xenbus_device, assigning the newly
+ * created local port to *port.  Return 0 on success, or -errno on error.  On
+ * error, the device will switch to XenbusStateClosing, and the error will be
+ * saved in the store.
+ */
+int xenbus_alloc_evtchn(struct xenbus_device *dev, int *port)
+{
+	struct evtchn_alloc_unbound alloc_unbound;
+	int err;
+
+	alloc_unbound.dom = DOMID_SELF;
+	alloc_unbound.remote_dom = dev->otherend_id;
+
+	err = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound,
+					  &alloc_unbound);
+	if (err)
+		xenbus_dev_fatal(dev, err, "allocating event channel");
+	else
+		*port = alloc_unbound.port;
+
+	return err;
+}
+EXPORT_SYMBOL_GPL(xenbus_alloc_evtchn);
+
+
+/**
+ * Bind to an existing interdomain event channel in another domain. Returns 0
+ * on success and stores the local port in *port. On error, returns -errno,
+ * switches the device to XenbusStateClosing, and saves the error in XenStore.
+ */
+int xenbus_bind_evtchn(struct xenbus_device *dev, int remote_port, int *port)
+{
+	struct evtchn_bind_interdomain bind_interdomain;
+	int err;
+
+	bind_interdomain.remote_dom = dev->otherend_id;
+	bind_interdomain.remote_port = remote_port;
+
+	err = HYPERVISOR_event_channel_op(EVTCHNOP_bind_interdomain,
+					  &bind_interdomain);
+	if (err)
+		xenbus_dev_fatal(dev, err,
+				 "binding to event channel %d from domain %d",
+				 remote_port, dev->otherend_id);
+	else
+		*port = bind_interdomain.local_port;
+
+	return err;
+}
+EXPORT_SYMBOL_GPL(xenbus_bind_evtchn);
+
+
+/**
+ * Free an existing event channel. Returns 0 on success or -errno on error.
+ */
+int xenbus_free_evtchn(struct xenbus_device *dev, int port)
+{
+	struct evtchn_close close;
+	int err;
+
+	close.port = port;
+
+	err = HYPERVISOR_event_channel_op(EVTCHNOP_close, &close);
+	if (err)
+		xenbus_dev_error(dev, err, "freeing event channel %d", port);
+
+	return err;
+}
+EXPORT_SYMBOL_GPL(xenbus_free_evtchn);
+
+
+/**
+ * xenbus_map_ring_valloc
+ * @dev: xenbus device
+ * @gnt_ref: grant reference
+ * @vaddr: pointer to address to be filled out by mapping
+ *
+ * Based on Rusty Russell's skeleton driver's map_page.
+ * Map a page of memory into this domain from another domain's grant table.
+ * xenbus_map_ring_valloc allocates a page of virtual address space, maps the
+ * page to that address, and sets *vaddr to that address.
+ * Returns 0 on success, and GNTST_* (see xen/include/interface/grant_table.h)
+ * or -ENOMEM on error. If an error is returned, device will switch to
+ * XenbusStateClosing and the error message will be saved in XenStore.
+ */
+int xenbus_map_ring_valloc(struct xenbus_device *dev, int gnt_ref, void **vaddr)
+{
+	struct gnttab_map_grant_ref op = {
+		.flags = GNTMAP_host_map,
+		.ref   = gnt_ref,
+		.dom   = dev->otherend_id,
+	};
+	struct vm_struct *area;
+
+	*vaddr = NULL;
+
+	area = alloc_vm_area(PAGE_SIZE);
+	if (!area)
+		return -ENOMEM;
+
+	op.host_addr = (unsigned long)area->addr;
+
+	if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1))
+		BUG();
+
+	if (op.status != GNTST_okay) {
+		free_vm_area(area);
+		xenbus_dev_fatal(dev, op.status,
+				 "mapping in shared page %d from domain %d",
+				 gnt_ref, dev->otherend_id);
+		return op.status;
+	}
+
+	/* Stuff the handle in an unused field */
+	area->phys_addr = (unsigned long)op.handle;
+
+	*vaddr = area->addr;
+	return 0;
+}
+EXPORT_SYMBOL_GPL(xenbus_map_ring_valloc);
+
+
+/**
+ * xenbus_map_ring
+ * @dev: xenbus device
+ * @gnt_ref: grant reference
+ * @handle: pointer to grant handle to be filled
+ * @vaddr: address to be mapped to
+ *
+ * Map a page of memory into this domain from another domain's grant table.
+ * xenbus_map_ring does not allocate the virtual address space (you must do
+ * this yourself!). It only maps in the page to the specified address.
+ * Returns 0 on success, and GNTST_* (see xen/include/interface/grant_table.h)
+ * or -ENOMEM on error. If an error is returned, device will switch to
+ * XenbusStateClosing and the error message will be saved in XenStore.
+ */
+int xenbus_map_ring(struct xenbus_device *dev, int gnt_ref,
+		    grant_handle_t *handle, void *vaddr)
+{
+	struct gnttab_map_grant_ref op = {
+		.host_addr = (unsigned long)vaddr,
+		.flags     = GNTMAP_host_map,
+		.ref       = gnt_ref,
+		.dom       = dev->otherend_id,
+	};
+
+	if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1))
+		BUG();
+
+	if (op.status != GNTST_okay) {
+		xenbus_dev_fatal(dev, op.status,
+				 "mapping in shared page %d from domain %d",
+				 gnt_ref, dev->otherend_id);
+	} else
+		*handle = op.handle;
+
+	return op.status;
+}
+EXPORT_SYMBOL_GPL(xenbus_map_ring);
+
+
+/**
+ * xenbus_unmap_ring_vfree
+ * @dev: xenbus device
+ * @vaddr: addr to unmap
+ *
+ * Based on Rusty Russell's skeleton driver's unmap_page.
+ * Unmap a page of memory in this domain that was imported from another domain.
+ * Use xenbus_unmap_ring_vfree if you mapped in your memory with
+ * xenbus_map_ring_valloc (it will free the virtual address space).
+ * Returns 0 on success and returns GNTST_* on error
+ * (see xen/include/interface/grant_table.h).
+ */
+int xenbus_unmap_ring_vfree(struct xenbus_device *dev, void *vaddr)
+{
+	struct vm_struct *area;
+	struct gnttab_unmap_grant_ref op = {
+		.host_addr = (unsigned long)vaddr,
+	};
+
+	/* It'd be nice if linux/vmalloc.h provided a find_vm_area(void *addr)
+	 * method so that we don't have to muck with vmalloc internals here.
+	 * We could force the user to hang on to their struct vm_struct from
+	 * xenbus_map_ring_valloc, but these 6 lines considerably simplify
+	 * this API.
+	 */
+	read_lock(&vmlist_lock);
+	for (area = vmlist; area != NULL; area = area->next) {
+		if (area->addr == vaddr)
+			break;
+	}
+	read_unlock(&vmlist_lock);
+
+	if (!area) {
+		xenbus_dev_error(dev, -ENOENT,
+				 "can't find mapped virtual address %p", vaddr);
+		return GNTST_bad_virt_addr;
+	}
+
+	op.handle = (grant_handle_t)area->phys_addr;
+
+	if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1))
+		BUG();
+
+	if (op.status == GNTST_okay)
+		free_vm_area(area);
+	else
+		xenbus_dev_error(dev, op.status,
+				 "unmapping page at handle %d error %d",
+				 (int16_t)area->phys_addr, op.status);
+
+	return op.status;
+}
+EXPORT_SYMBOL_GPL(xenbus_unmap_ring_vfree);
+
+
+/**
+ * xenbus_unmap_ring
+ * @dev: xenbus device
+ * @handle: grant handle
+ * @vaddr: addr to unmap
+ *
+ * Unmap a page of memory in this domain that was imported from another domain.
+ * Returns 0 on success and returns GNTST_* on error
+ * (see xen/include/interface/grant_table.h).
+ */
+int xenbus_unmap_ring(struct xenbus_device *dev,
+		      grant_handle_t handle, void *vaddr)
+{
+	struct gnttab_unmap_grant_ref op = {
+		.host_addr = (unsigned long)vaddr,
+		.handle    = handle,
+	};
+
+	if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1))
+		BUG();
+
+	if (op.status != GNTST_okay)
+		xenbus_dev_error(dev, op.status,
+				 "unmapping page at handle %d error %d",
+				 handle, op.status);
+
+	return op.status;
+}
+EXPORT_SYMBOL_GPL(xenbus_unmap_ring);
+
+
+/**
+ * xenbus_read_driver_state
+ * @path: path for driver
+ *
+ * Return the state of the driver rooted at the given store path, or
+ * XenbusStateUnknown if no state can be read.
+ */
+enum xenbus_state xenbus_read_driver_state(const char *path)
+{
+	enum xenbus_state result;
+	int err = xenbus_gather(XBT_NIL, path, "state", "%d", &result, NULL);
+	if (err)
+		result = XenbusStateUnknown;
+
+	return result;
+}
+EXPORT_SYMBOL_GPL(xenbus_read_driver_state);
diff --git a/drivers/xen/xenbus/xenbus_comms.c b/drivers/xen/xenbus/xenbus_comms.c
new file mode 100644
index 0000000..6efbe3f
--- /dev/null
+++ b/drivers/xen/xenbus/xenbus_comms.c
@@ -0,0 +1,233 @@
+/******************************************************************************
+ * xenbus_comms.c
+ *
+ * Low level code to talks to Xen Store: ringbuffer and event channel.
+ *
+ * Copyright (C) 2005 Rusty Russell, IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation; or, when distributed
+ * separately from the Linux kernel or incorporated into other
+ * software packages, subject to the following license:
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this source file (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy, modify,
+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include <linux/wait.h>
+#include <linux/interrupt.h>
+#include <linux/sched.h>
+#include <linux/err.h>
+#include <xen/xenbus.h>
+#include <asm/xen/hypervisor.h>
+#include <xen/events.h>
+#include <xen/page.h>
+#include "xenbus_comms.h"
+
+static int xenbus_irq;
+
+static DECLARE_WORK(probe_work, xenbus_probe);
+
+static DECLARE_WAIT_QUEUE_HEAD(xb_waitq);
+
+static irqreturn_t wake_waiting(int irq, void *unused)
+{
+	if (unlikely(xenstored_ready == 0)) {
+		xenstored_ready = 1;
+		schedule_work(&probe_work);
+	}
+
+	wake_up(&xb_waitq);
+	return IRQ_HANDLED;
+}
+
+static int check_indexes(XENSTORE_RING_IDX cons, XENSTORE_RING_IDX prod)
+{
+	return ((prod - cons) <= XENSTORE_RING_SIZE);
+}
+
+static void *get_output_chunk(XENSTORE_RING_IDX cons,
+			      XENSTORE_RING_IDX prod,
+			      char *buf, uint32_t *len)
+{
+	*len = XENSTORE_RING_SIZE - MASK_XENSTORE_IDX(prod);
+	if ((XENSTORE_RING_SIZE - (prod - cons)) < *len)
+		*len = XENSTORE_RING_SIZE - (prod - cons);
+	return buf + MASK_XENSTORE_IDX(prod);
+}
+
+static const void *get_input_chunk(XENSTORE_RING_IDX cons,
+				   XENSTORE_RING_IDX prod,
+				   const char *buf, uint32_t *len)
+{
+	*len = XENSTORE_RING_SIZE - MASK_XENSTORE_IDX(cons);
+	if ((prod - cons) < *len)
+		*len = prod - cons;
+	return buf + MASK_XENSTORE_IDX(cons);
+}
+
+/**
+ * xb_write - low level write
+ * @data: buffer to send
+ * @len: length of buffer
+ *
+ * Returns 0 on success, error otherwise.
+ */
+int xb_write(const void *data, unsigned len)
+{
+	struct xenstore_domain_interface *intf = xen_store_interface;
+	XENSTORE_RING_IDX cons, prod;
+	int rc;
+
+	while (len != 0) {
+		void *dst;
+		unsigned int avail;
+
+		rc = wait_event_interruptible(
+			xb_waitq,
+			(intf->req_prod - intf->req_cons) !=
+			XENSTORE_RING_SIZE);
+		if (rc < 0)
+			return rc;
+
+		/* Read indexes, then verify. */
+		cons = intf->req_cons;
+		prod = intf->req_prod;
+		if (!check_indexes(cons, prod)) {
+			intf->req_cons = intf->req_prod = 0;
+			return -EIO;
+		}
+
+		dst = get_output_chunk(cons, prod, intf->req, &avail);
+		if (avail == 0)
+			continue;
+		if (avail > len)
+			avail = len;
+
+		/* Must write data /after/ reading the consumer index. */
+		mb();
+
+		memcpy(dst, data, avail);
+		data += avail;
+		len -= avail;
+
+		/* Other side must not see new producer until data is there. */
+		wmb();
+		intf->req_prod += avail;
+
+		/* Implies mb(): other side will see the updated producer. */
+		notify_remote_via_evtchn(xen_store_evtchn);
+	}
+
+	return 0;
+}
+
+int xb_data_to_read(void)
+{
+	struct xenstore_domain_interface *intf = xen_store_interface;
+	return (intf->rsp_cons != intf->rsp_prod);
+}
+
+int xb_wait_for_data_to_read(void)
+{
+	return wait_event_interruptible(xb_waitq, xb_data_to_read());
+}
+
+int xb_read(void *data, unsigned len)
+{
+	struct xenstore_domain_interface *intf = xen_store_interface;
+	XENSTORE_RING_IDX cons, prod;
+	int rc;
+
+	while (len != 0) {
+		unsigned int avail;
+		const char *src;
+
+		rc = xb_wait_for_data_to_read();
+		if (rc < 0)
+			return rc;
+
+		/* Read indexes, then verify. */
+		cons = intf->rsp_cons;
+		prod = intf->rsp_prod;
+		if (!check_indexes(cons, prod)) {
+			intf->rsp_cons = intf->rsp_prod = 0;
+			return -EIO;
+		}
+
+		src = get_input_chunk(cons, prod, intf->rsp, &avail);
+		if (avail == 0)
+			continue;
+		if (avail > len)
+			avail = len;
+
+		/* Must read data /after/ reading the producer index. */
+		rmb();
+
+		memcpy(data, src, avail);
+		data += avail;
+		len -= avail;
+
+		/* Other side must not see free space until we've copied out */
+		mb();
+		intf->rsp_cons += avail;
+
+		pr_debug("Finished read of %i bytes (%i to go)\n", avail, len);
+
+		/* Implies mb(): other side will see the updated consumer. */
+		notify_remote_via_evtchn(xen_store_evtchn);
+	}
+
+	return 0;
+}
+
+/**
+ * xb_init_comms - Set up interrupt handler off store event channel.
+ */
+int xb_init_comms(void)
+{
+	struct xenstore_domain_interface *intf = xen_store_interface;
+	int err;
+
+	if (intf->req_prod != intf->req_cons)
+		printk(KERN_ERR "XENBUS request ring is not quiescent "
+		       "(%08x:%08x)!\n", intf->req_cons, intf->req_prod);
+
+	if (intf->rsp_prod != intf->rsp_cons) {
+		printk(KERN_WARNING "XENBUS response ring is not quiescent "
+		       "(%08x:%08x): fixing up\n",
+		       intf->rsp_cons, intf->rsp_prod);
+		intf->rsp_cons = intf->rsp_prod;
+	}
+
+	if (xenbus_irq)
+		unbind_from_irqhandler(xenbus_irq, &xb_waitq);
+
+	err = bind_evtchn_to_irqhandler(
+		xen_store_evtchn, wake_waiting,
+		0, "xenbus", &xb_waitq);
+	if (err <= 0) {
+		printk(KERN_ERR "XENBUS request irq failed %i\n", err);
+		return err;
+	}
+
+	xenbus_irq = err;
+
+	return 0;
+}
diff --git a/drivers/xen/xenbus/xenbus_comms.h b/drivers/xen/xenbus/xenbus_comms.h
new file mode 100644
index 0000000..c21db75
--- /dev/null
+++ b/drivers/xen/xenbus/xenbus_comms.h
@@ -0,0 +1,46 @@
+/*
+ * Private include for xenbus communications.
+ *
+ * Copyright (C) 2005 Rusty Russell, IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation; or, when distributed
+ * separately from the Linux kernel or incorporated into other
+ * software packages, subject to the following license:
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this source file (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy, modify,
+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#ifndef _XENBUS_COMMS_H
+#define _XENBUS_COMMS_H
+
+int xs_init(void);
+int xb_init_comms(void);
+
+/* Low level routines. */
+int xb_write(const void *data, unsigned len);
+int xb_read(void *data, unsigned len);
+int xb_data_to_read(void);
+int xb_wait_for_data_to_read(void);
+int xs_input_avail(void);
+extern struct xenstore_domain_interface *xen_store_interface;
+extern int xen_store_evtchn;
+
+#endif /* _XENBUS_COMMS_H */
diff --git a/drivers/xen/xenbus/xenbus_probe.c b/drivers/xen/xenbus/xenbus_probe.c
new file mode 100644
index 0000000..0b769f7
--- /dev/null
+++ b/drivers/xen/xenbus/xenbus_probe.c
@@ -0,0 +1,935 @@
+/******************************************************************************
+ * Talks to Xen Store to figure out what devices we have.
+ *
+ * Copyright (C) 2005 Rusty Russell, IBM Corporation
+ * Copyright (C) 2005 Mike Wray, Hewlett-Packard
+ * Copyright (C) 2005, 2006 XenSource Ltd
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation; or, when distributed
+ * separately from the Linux kernel or incorporated into other
+ * software packages, subject to the following license:
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this source file (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy, modify,
+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#define DPRINTK(fmt, args...)				\
+	pr_debug("xenbus_probe (%s:%d) " fmt ".\n",	\
+		 __func__, __LINE__, ##args)
+
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/string.h>
+#include <linux/ctype.h>
+#include <linux/fcntl.h>
+#include <linux/mm.h>
+#include <linux/notifier.h>
+#include <linux/kthread.h>
+#include <linux/mutex.h>
+#include <linux/io.h>
+
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <asm/xen/hypervisor.h>
+#include <xen/xenbus.h>
+#include <xen/events.h>
+#include <xen/page.h>
+
+#include "xenbus_comms.h"
+#include "xenbus_probe.h"
+
+int xen_store_evtchn;
+struct xenstore_domain_interface *xen_store_interface;
+static unsigned long xen_store_mfn;
+
+static BLOCKING_NOTIFIER_HEAD(xenstore_chain);
+
+static void wait_for_devices(struct xenbus_driver *xendrv);
+
+static int xenbus_probe_frontend(const char *type, const char *name);
+
+static void xenbus_dev_shutdown(struct device *_dev);
+
+/* If something in array of ids matches this device, return it. */
+static const struct xenbus_device_id *
+match_device(const struct xenbus_device_id *arr, struct xenbus_device *dev)
+{
+	for (; *arr->devicetype != '\0'; arr++) {
+		if (!strcmp(arr->devicetype, dev->devicetype))
+			return arr;
+	}
+	return NULL;
+}
+
+int xenbus_match(struct device *_dev, struct device_driver *_drv)
+{
+	struct xenbus_driver *drv = to_xenbus_driver(_drv);
+
+	if (!drv->ids)
+		return 0;
+
+	return match_device(drv->ids, to_xenbus_device(_dev)) != NULL;
+}
+
+/* device/<type>/<id> => <type>-<id> */
+static int frontend_bus_id(char bus_id[BUS_ID_SIZE], const char *nodename)
+{
+	nodename = strchr(nodename, '/');
+	if (!nodename || strlen(nodename + 1) >= BUS_ID_SIZE) {
+		printk(KERN_WARNING "XENBUS: bad frontend %s\n", nodename);
+		return -EINVAL;
+	}
+
+	strlcpy(bus_id, nodename + 1, BUS_ID_SIZE);
+	if (!strchr(bus_id, '/')) {
+		printk(KERN_WARNING "XENBUS: bus_id %s no slash\n", bus_id);
+		return -EINVAL;
+	}
+	*strchr(bus_id, '/') = '-';
+	return 0;
+}
+
+
+static void free_otherend_details(struct xenbus_device *dev)
+{
+	kfree(dev->otherend);
+	dev->otherend = NULL;
+}
+
+
+static void free_otherend_watch(struct xenbus_device *dev)
+{
+	if (dev->otherend_watch.node) {
+		unregister_xenbus_watch(&dev->otherend_watch);
+		kfree(dev->otherend_watch.node);
+		dev->otherend_watch.node = NULL;
+	}
+}
+
+
+int read_otherend_details(struct xenbus_device *xendev,
+				 char *id_node, char *path_node)
+{
+	int err = xenbus_gather(XBT_NIL, xendev->nodename,
+				id_node, "%i", &xendev->otherend_id,
+				path_node, NULL, &xendev->otherend,
+				NULL);
+	if (err) {
+		xenbus_dev_fatal(xendev, err,
+				 "reading other end details from %s",
+				 xendev->nodename);
+		return err;
+	}
+	if (strlen(xendev->otherend) == 0 ||
+	    !xenbus_exists(XBT_NIL, xendev->otherend, "")) {
+		xenbus_dev_fatal(xendev, -ENOENT,
+				 "unable to read other end from %s.  "
+				 "missing or inaccessible.",
+				 xendev->nodename);
+		free_otherend_details(xendev);
+		return -ENOENT;
+	}
+
+	return 0;
+}
+
+
+static int read_backend_details(struct xenbus_device *xendev)
+{
+	return read_otherend_details(xendev, "backend-id", "backend");
+}
+
+
+/* Bus type for frontend drivers. */
+static struct xen_bus_type xenbus_frontend = {
+	.root = "device",
+	.levels = 2, 		/* device/type/<id> */
+	.get_bus_id = frontend_bus_id,
+	.probe = xenbus_probe_frontend,
+	.bus = {
+		.name     = "xen",
+		.match    = xenbus_match,
+		.probe    = xenbus_dev_probe,
+		.remove   = xenbus_dev_remove,
+		.shutdown = xenbus_dev_shutdown,
+	},
+};
+
+static void otherend_changed(struct xenbus_watch *watch,
+			     const char **vec, unsigned int len)
+{
+	struct xenbus_device *dev =
+		container_of(watch, struct xenbus_device, otherend_watch);
+	struct xenbus_driver *drv = to_xenbus_driver(dev->dev.driver);
+	enum xenbus_state state;
+
+	/* Protect us against watches firing on old details when the otherend
+	   details change, say immediately after a resume. */
+	if (!dev->otherend ||
+	    strncmp(dev->otherend, vec[XS_WATCH_PATH],
+		    strlen(dev->otherend))) {
+		dev_dbg(&dev->dev, "Ignoring watch at %s", vec[XS_WATCH_PATH]);
+		return;
+	}
+
+	state = xenbus_read_driver_state(dev->otherend);
+
+	dev_dbg(&dev->dev, "state is %d, (%s), %s, %s",
+		state, xenbus_strstate(state), dev->otherend_watch.node,
+		vec[XS_WATCH_PATH]);
+
+	/*
+	 * Ignore xenbus transitions during shutdown. This prevents us doing
+	 * work that can fail e.g., when the rootfs is gone.
+	 */
+	if (system_state > SYSTEM_RUNNING) {
+		struct xen_bus_type *bus = bus;
+		bus = container_of(dev->dev.bus, struct xen_bus_type, bus);
+		/* If we're frontend, drive the state machine to Closed. */
+		/* This should cause the backend to release our resources. */
+		if ((bus == &xenbus_frontend) && (state == XenbusStateClosing))
+			xenbus_frontend_closed(dev);
+		return;
+	}
+
+	if (drv->otherend_changed)
+		drv->otherend_changed(dev, state);
+}
+
+
+static int talk_to_otherend(struct xenbus_device *dev)
+{
+	struct xenbus_driver *drv = to_xenbus_driver(dev->dev.driver);
+
+	free_otherend_watch(dev);
+	free_otherend_details(dev);
+
+	return drv->read_otherend_details(dev);
+}
+
+
+static int watch_otherend(struct xenbus_device *dev)
+{
+	return xenbus_watch_pathfmt(dev, &dev->otherend_watch, otherend_changed,
+				    "%s/%s", dev->otherend, "state");
+}
+
+
+int xenbus_dev_probe(struct device *_dev)
+{
+	struct xenbus_device *dev = to_xenbus_device(_dev);
+	struct xenbus_driver *drv = to_xenbus_driver(_dev->driver);
+	const struct xenbus_device_id *id;
+	int err;
+
+	DPRINTK("%s", dev->nodename);
+
+	if (!drv->probe) {
+		err = -ENODEV;
+		goto fail;
+	}
+
+	id = match_device(drv->ids, dev);
+	if (!id) {
+		err = -ENODEV;
+		goto fail;
+	}
+
+	err = talk_to_otherend(dev);
+	if (err) {
+		dev_warn(&dev->dev, "talk_to_otherend on %s failed.\n",
+			 dev->nodename);
+		return err;
+	}
+
+	err = drv->probe(dev, id);
+	if (err)
+		goto fail;
+
+	err = watch_otherend(dev);
+	if (err) {
+		dev_warn(&dev->dev, "watch_otherend on %s failed.\n",
+		       dev->nodename);
+		return err;
+	}
+
+	return 0;
+fail:
+	xenbus_dev_error(dev, err, "xenbus_dev_probe on %s", dev->nodename);
+	xenbus_switch_state(dev, XenbusStateClosed);
+	return -ENODEV;
+}
+
+int xenbus_dev_remove(struct device *_dev)
+{
+	struct xenbus_device *dev = to_xenbus_device(_dev);
+	struct xenbus_driver *drv = to_xenbus_driver(_dev->driver);
+
+	DPRINTK("%s", dev->nodename);
+
+	free_otherend_watch(dev);
+	free_otherend_details(dev);
+
+	if (drv->remove)
+		drv->remove(dev);
+
+	xenbus_switch_state(dev, XenbusStateClosed);
+	return 0;
+}
+
+static void xenbus_dev_shutdown(struct device *_dev)
+{
+	struct xenbus_device *dev = to_xenbus_device(_dev);
+	unsigned long timeout = 5*HZ;
+
+	DPRINTK("%s", dev->nodename);
+
+	get_device(&dev->dev);
+	if (dev->state != XenbusStateConnected) {
+		printk(KERN_INFO "%s: %s: %s != Connected, skipping\n", __func__,
+		       dev->nodename, xenbus_strstate(dev->state));
+		goto out;
+	}
+	xenbus_switch_state(dev, XenbusStateClosing);
+	timeout = wait_for_completion_timeout(&dev->down, timeout);
+	if (!timeout)
+		printk(KERN_INFO "%s: %s timeout closing device\n",
+		       __func__, dev->nodename);
+ out:
+	put_device(&dev->dev);
+}
+
+int xenbus_register_driver_common(struct xenbus_driver *drv,
+				  struct xen_bus_type *bus,
+				  struct module *owner,
+				  const char *mod_name)
+{
+	drv->driver.name = drv->name;
+	drv->driver.bus = &bus->bus;
+	drv->driver.owner = owner;
+	drv->driver.mod_name = mod_name;
+
+	return driver_register(&drv->driver);
+}
+
+int __xenbus_register_frontend(struct xenbus_driver *drv,
+			       struct module *owner, const char *mod_name)
+{
+	int ret;
+
+	drv->read_otherend_details = read_backend_details;
+
+	ret = xenbus_register_driver_common(drv, &xenbus_frontend,
+					    owner, mod_name);
+	if (ret)
+		return ret;
+
+	/* If this driver is loaded as a module wait for devices to attach. */
+	wait_for_devices(drv);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(__xenbus_register_frontend);
+
+void xenbus_unregister_driver(struct xenbus_driver *drv)
+{
+	driver_unregister(&drv->driver);
+}
+EXPORT_SYMBOL_GPL(xenbus_unregister_driver);
+
+struct xb_find_info
+{
+	struct xenbus_device *dev;
+	const char *nodename;
+};
+
+static int cmp_dev(struct device *dev, void *data)
+{
+	struct xenbus_device *xendev = to_xenbus_device(dev);
+	struct xb_find_info *info = data;
+
+	if (!strcmp(xendev->nodename, info->nodename)) {
+		info->dev = xendev;
+		get_device(dev);
+		return 1;
+	}
+	return 0;
+}
+
+struct xenbus_device *xenbus_device_find(const char *nodename,
+					 struct bus_type *bus)
+{
+	struct xb_find_info info = { .dev = NULL, .nodename = nodename };
+
+	bus_for_each_dev(bus, NULL, &info, cmp_dev);
+	return info.dev;
+}
+
+static int cleanup_dev(struct device *dev, void *data)
+{
+	struct xenbus_device *xendev = to_xenbus_device(dev);
+	struct xb_find_info *info = data;
+	int len = strlen(info->nodename);
+
+	DPRINTK("%s", info->nodename);
+
+	/* Match the info->nodename path, or any subdirectory of that path. */
+	if (strncmp(xendev->nodename, info->nodename, len))
+		return 0;
+
+	/* If the node name is longer, ensure it really is a subdirectory. */
+	if ((strlen(xendev->nodename) > len) && (xendev->nodename[len] != '/'))
+		return 0;
+
+	info->dev = xendev;
+	get_device(dev);
+	return 1;
+}
+
+static void xenbus_cleanup_devices(const char *path, struct bus_type *bus)
+{
+	struct xb_find_info info = { .nodename = path };
+
+	do {
+		info.dev = NULL;
+		bus_for_each_dev(bus, NULL, &info, cleanup_dev);
+		if (info.dev) {
+			device_unregister(&info.dev->dev);
+			put_device(&info.dev->dev);
+		}
+	} while (info.dev);
+}
+
+static void xenbus_dev_release(struct device *dev)
+{
+	if (dev)
+		kfree(to_xenbus_device(dev));
+}
+
+static ssize_t xendev_show_nodename(struct device *dev,
+				    struct device_attribute *attr, char *buf)
+{
+	return sprintf(buf, "%s\n", to_xenbus_device(dev)->nodename);
+}
+DEVICE_ATTR(nodename, S_IRUSR | S_IRGRP | S_IROTH, xendev_show_nodename, NULL);
+
+static ssize_t xendev_show_devtype(struct device *dev,
+				   struct device_attribute *attr, char *buf)
+{
+	return sprintf(buf, "%s\n", to_xenbus_device(dev)->devicetype);
+}
+DEVICE_ATTR(devtype, S_IRUSR | S_IRGRP | S_IROTH, xendev_show_devtype, NULL);
+
+
+int xenbus_probe_node(struct xen_bus_type *bus,
+		      const char *type,
+		      const char *nodename)
+{
+	int err;
+	struct xenbus_device *xendev;
+	size_t stringlen;
+	char *tmpstring;
+
+	enum xenbus_state state = xenbus_read_driver_state(nodename);
+
+	if (state != XenbusStateInitialising) {
+		/* Device is not new, so ignore it.  This can happen if a
+		   device is going away after switching to Closed.  */
+		return 0;
+	}
+
+	stringlen = strlen(nodename) + 1 + strlen(type) + 1;
+	xendev = kzalloc(sizeof(*xendev) + stringlen, GFP_KERNEL);
+	if (!xendev)
+		return -ENOMEM;
+
+	xendev->state = XenbusStateInitialising;
+
+	/* Copy the strings into the extra space. */
+
+	tmpstring = (char *)(xendev + 1);
+	strcpy(tmpstring, nodename);
+	xendev->nodename = tmpstring;
+
+	tmpstring += strlen(tmpstring) + 1;
+	strcpy(tmpstring, type);
+	xendev->devicetype = tmpstring;
+	init_completion(&xendev->down);
+
+	xendev->dev.bus = &bus->bus;
+	xendev->dev.release = xenbus_dev_release;
+
+	err = bus->get_bus_id(xendev->dev.bus_id, xendev->nodename);
+	if (err)
+		goto fail;
+
+	/* Register with generic device framework. */
+	err = device_register(&xendev->dev);
+	if (err)
+		goto fail;
+
+	err = device_create_file(&xendev->dev, &dev_attr_nodename);
+	if (err)
+		goto fail_unregister;
+
+	err = device_create_file(&xendev->dev, &dev_attr_devtype);
+	if (err)
+		goto fail_remove_file;
+
+	return 0;
+fail_remove_file:
+	device_remove_file(&xendev->dev, &dev_attr_nodename);
+fail_unregister:
+	device_unregister(&xendev->dev);
+fail:
+	kfree(xendev);
+	return err;
+}
+
+/* device/<typename>/<name> */
+static int xenbus_probe_frontend(const char *type, const char *name)
+{
+	char *nodename;
+	int err;
+
+	nodename = kasprintf(GFP_KERNEL, "%s/%s/%s",
+			     xenbus_frontend.root, type, name);
+	if (!nodename)
+		return -ENOMEM;
+
+	DPRINTK("%s", nodename);
+
+	err = xenbus_probe_node(&xenbus_frontend, type, nodename);
+	kfree(nodename);
+	return err;
+}
+
+static int xenbus_probe_device_type(struct xen_bus_type *bus, const char *type)
+{
+	int err = 0;
+	char **dir;
+	unsigned int dir_n = 0;
+	int i;
+
+	dir = xenbus_directory(XBT_NIL, bus->root, type, &dir_n);
+	if (IS_ERR(dir))
+		return PTR_ERR(dir);
+
+	for (i = 0; i < dir_n; i++) {
+		err = bus->probe(type, dir[i]);
+		if (err)
+			break;
+	}
+	kfree(dir);
+	return err;
+}
+
+int xenbus_probe_devices(struct xen_bus_type *bus)
+{
+	int err = 0;
+	char **dir;
+	unsigned int i, dir_n;
+
+	dir = xenbus_directory(XBT_NIL, bus->root, "", &dir_n);
+	if (IS_ERR(dir))
+		return PTR_ERR(dir);
+
+	for (i = 0; i < dir_n; i++) {
+		err = xenbus_probe_device_type(bus, dir[i]);
+		if (err)
+			break;
+	}
+	kfree(dir);
+	return err;
+}
+
+static unsigned int char_count(const char *str, char c)
+{
+	unsigned int i, ret = 0;
+
+	for (i = 0; str[i]; i++)
+		if (str[i] == c)
+			ret++;
+	return ret;
+}
+
+static int strsep_len(const char *str, char c, unsigned int len)
+{
+	unsigned int i;
+
+	for (i = 0; str[i]; i++)
+		if (str[i] == c) {
+			if (len == 0)
+				return i;
+			len--;
+		}
+	return (len == 0) ? i : -ERANGE;
+}
+
+void xenbus_dev_changed(const char *node, struct xen_bus_type *bus)
+{
+	int exists, rootlen;
+	struct xenbus_device *dev;
+	char type[BUS_ID_SIZE];
+	const char *p, *root;
+
+	if (char_count(node, '/') < 2)
+		return;
+
+	exists = xenbus_exists(XBT_NIL, node, "");
+	if (!exists) {
+		xenbus_cleanup_devices(node, &bus->bus);
+		return;
+	}
+
+	/* backend/<type>/... or device/<type>/... */
+	p = strchr(node, '/') + 1;
+	snprintf(type, BUS_ID_SIZE, "%.*s", (int)strcspn(p, "/"), p);
+	type[BUS_ID_SIZE-1] = '\0';
+
+	rootlen = strsep_len(node, '/', bus->levels);
+	if (rootlen < 0)
+		return;
+	root = kasprintf(GFP_KERNEL, "%.*s", rootlen, node);
+	if (!root)
+		return;
+
+	dev = xenbus_device_find(root, &bus->bus);
+	if (!dev)
+		xenbus_probe_node(bus, type, root);
+	else
+		put_device(&dev->dev);
+
+	kfree(root);
+}
+
+static void frontend_changed(struct xenbus_watch *watch,
+			     const char **vec, unsigned int len)
+{
+	DPRINTK("");
+
+	xenbus_dev_changed(vec[XS_WATCH_PATH], &xenbus_frontend);
+}
+
+/* We watch for devices appearing and vanishing. */
+static struct xenbus_watch fe_watch = {
+	.node = "device",
+	.callback = frontend_changed,
+};
+
+static int suspend_dev(struct device *dev, void *data)
+{
+	int err = 0;
+	struct xenbus_driver *drv;
+	struct xenbus_device *xdev;
+
+	DPRINTK("");
+
+	if (dev->driver == NULL)
+		return 0;
+	drv = to_xenbus_driver(dev->driver);
+	xdev = container_of(dev, struct xenbus_device, dev);
+	if (drv->suspend)
+		err = drv->suspend(xdev);
+	if (err)
+		printk(KERN_WARNING
+		       "xenbus: suspend %s failed: %i\n", dev->bus_id, err);
+	return 0;
+}
+
+static int suspend_cancel_dev(struct device *dev, void *data)
+{
+	int err = 0;
+	struct xenbus_driver *drv;
+	struct xenbus_device *xdev;
+
+	DPRINTK("");
+
+	if (dev->driver == NULL)
+		return 0;
+	drv = to_xenbus_driver(dev->driver);
+	xdev = container_of(dev, struct xenbus_device, dev);
+	if (drv->suspend_cancel)
+		err = drv->suspend_cancel(xdev);
+	if (err)
+		printk(KERN_WARNING
+		       "xenbus: suspend_cancel %s failed: %i\n",
+		       dev->bus_id, err);
+	return 0;
+}
+
+static int resume_dev(struct device *dev, void *data)
+{
+	int err;
+	struct xenbus_driver *drv;
+	struct xenbus_device *xdev;
+
+	DPRINTK("");
+
+	if (dev->driver == NULL)
+		return 0;
+
+	drv = to_xenbus_driver(dev->driver);
+	xdev = container_of(dev, struct xenbus_device, dev);
+
+	err = talk_to_otherend(xdev);
+	if (err) {
+		printk(KERN_WARNING
+		       "xenbus: resume (talk_to_otherend) %s failed: %i\n",
+		       dev->bus_id, err);
+		return err;
+	}
+
+	xdev->state = XenbusStateInitialising;
+
+	if (drv->resume) {
+		err = drv->resume(xdev);
+		if (err) {
+			printk(KERN_WARNING
+			       "xenbus: resume %s failed: %i\n",
+			       dev->bus_id, err);
+			return err;
+		}
+	}
+
+	err = watch_otherend(xdev);
+	if (err) {
+		printk(KERN_WARNING
+		       "xenbus_probe: resume (watch_otherend) %s failed: "
+		       "%d.\n", dev->bus_id, err);
+		return err;
+	}
+
+	return 0;
+}
+
+void xenbus_suspend(void)
+{
+	DPRINTK("");
+
+	bus_for_each_dev(&xenbus_frontend.bus, NULL, NULL, suspend_dev);
+	xenbus_backend_suspend(suspend_dev);
+	xs_suspend();
+}
+EXPORT_SYMBOL_GPL(xenbus_suspend);
+
+void xenbus_resume(void)
+{
+	xb_init_comms();
+	xs_resume();
+	bus_for_each_dev(&xenbus_frontend.bus, NULL, NULL, resume_dev);
+	xenbus_backend_resume(resume_dev);
+}
+EXPORT_SYMBOL_GPL(xenbus_resume);
+
+void xenbus_suspend_cancel(void)
+{
+	xs_suspend_cancel();
+	bus_for_each_dev(&xenbus_frontend.bus, NULL, NULL, suspend_cancel_dev);
+	xenbus_backend_resume(suspend_cancel_dev);
+}
+EXPORT_SYMBOL_GPL(xenbus_suspend_cancel);
+
+/* A flag to determine if xenstored is 'ready' (i.e. has started) */
+int xenstored_ready = 0;
+
+
+int register_xenstore_notifier(struct notifier_block *nb)
+{
+	int ret = 0;
+
+	if (xenstored_ready > 0)
+		ret = nb->notifier_call(nb, 0, NULL);
+	else
+		blocking_notifier_chain_register(&xenstore_chain, nb);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(register_xenstore_notifier);
+
+void unregister_xenstore_notifier(struct notifier_block *nb)
+{
+	blocking_notifier_chain_unregister(&xenstore_chain, nb);
+}
+EXPORT_SYMBOL_GPL(unregister_xenstore_notifier);
+
+void xenbus_probe(struct work_struct *unused)
+{
+	BUG_ON((xenstored_ready <= 0));
+
+	/* Enumerate devices in xenstore and watch for changes. */
+	xenbus_probe_devices(&xenbus_frontend);
+	register_xenbus_watch(&fe_watch);
+	xenbus_backend_probe_and_watch();
+
+	/* Notify others that xenstore is up */
+	blocking_notifier_call_chain(&xenstore_chain, 0, NULL);
+}
+
+static int __init xenbus_probe_init(void)
+{
+	int err = 0;
+
+	DPRINTK("");
+
+	err = -ENODEV;
+	if (!is_running_on_xen())
+		goto out_error;
+
+	/* Register ourselves with the kernel bus subsystem */
+	err = bus_register(&xenbus_frontend.bus);
+	if (err)
+		goto out_error;
+
+	err = xenbus_backend_bus_register();
+	if (err)
+		goto out_unreg_front;
+
+	/*
+	 * Domain0 doesn't have a store_evtchn or store_mfn yet.
+	 */
+	if (is_initial_xendomain()) {
+		/* dom0 not yet supported */
+	} else {
+		xenstored_ready = 1;
+		xen_store_evtchn = xen_start_info->store_evtchn;
+		xen_store_mfn = xen_start_info->store_mfn;
+	}
+	xen_store_interface = mfn_to_virt(xen_store_mfn);
+
+	/* Initialize the interface to xenstore. */
+	err = xs_init();
+	if (err) {
+		printk(KERN_WARNING
+		       "XENBUS: Error initializing xenstore comms: %i\n", err);
+		goto out_unreg_back;
+	}
+
+	if (!is_initial_xendomain())
+		xenbus_probe(NULL);
+
+	return 0;
+
+  out_unreg_back:
+	xenbus_backend_bus_unregister();
+
+  out_unreg_front:
+	bus_unregister(&xenbus_frontend.bus);
+
+  out_error:
+	return err;
+}
+
+postcore_initcall(xenbus_probe_init);
+
+MODULE_LICENSE("GPL");
+
+static int is_disconnected_device(struct device *dev, void *data)
+{
+	struct xenbus_device *xendev = to_xenbus_device(dev);
+	struct device_driver *drv = data;
+
+	/*
+	 * A device with no driver will never connect. We care only about
+	 * devices which should currently be in the process of connecting.
+	 */
+	if (!dev->driver)
+		return 0;
+
+	/* Is this search limited to a particular driver? */
+	if (drv && (dev->driver != drv))
+		return 0;
+
+	return (xendev->state != XenbusStateConnected);
+}
+
+static int exists_disconnected_device(struct device_driver *drv)
+{
+	return bus_for_each_dev(&xenbus_frontend.bus, NULL, drv,
+				is_disconnected_device);
+}
+
+static int print_device_status(struct device *dev, void *data)
+{
+	struct xenbus_device *xendev = to_xenbus_device(dev);
+	struct device_driver *drv = data;
+
+	/* Is this operation limited to a particular driver? */
+	if (drv && (dev->driver != drv))
+		return 0;
+
+	if (!dev->driver) {
+		/* Information only: is this too noisy? */
+		printk(KERN_INFO "XENBUS: Device with no driver: %s\n",
+		       xendev->nodename);
+	} else if (xendev->state != XenbusStateConnected) {
+		printk(KERN_WARNING "XENBUS: Timeout connecting "
+		       "to device: %s (state %d)\n",
+		       xendev->nodename, xendev->state);
+	}
+
+	return 0;
+}
+
+/* We only wait for device setup after most initcalls have run. */
+static int ready_to_wait_for_devices;
+
+/*
+ * On a 10 second timeout, wait for all devices currently configured.  We need
+ * to do this to guarantee that the filesystems and / or network devices
+ * needed for boot are available, before we can allow the boot to proceed.
+ *
+ * This needs to be on a late_initcall, to happen after the frontend device
+ * drivers have been initialised, but before the root fs is mounted.
+ *
+ * A possible improvement here would be to have the tools add a per-device
+ * flag to the store entry, indicating whether it is needed at boot time.
+ * This would allow people who knew what they were doing to accelerate their
+ * boot slightly, but of course needs tools or manual intervention to set up
+ * those flags correctly.
+ */
+static void wait_for_devices(struct xenbus_driver *xendrv)
+{
+	unsigned long timeout = jiffies + 10*HZ;
+	struct device_driver *drv = xendrv ? &xendrv->driver : NULL;
+
+	if (!ready_to_wait_for_devices || !is_running_on_xen())
+		return;
+
+	while (exists_disconnected_device(drv)) {
+		if (time_after(jiffies, timeout))
+			break;
+		schedule_timeout_interruptible(HZ/10);
+	}
+
+	bus_for_each_dev(&xenbus_frontend.bus, NULL, drv,
+			 print_device_status);
+}
+
+#ifndef MODULE
+static int __init boot_wait_for_devices(void)
+{
+	ready_to_wait_for_devices = 1;
+	wait_for_devices(NULL);
+	return 0;
+}
+
+late_initcall(boot_wait_for_devices);
+#endif
diff --git a/drivers/xen/xenbus/xenbus_probe.h b/drivers/xen/xenbus/xenbus_probe.h
new file mode 100644
index 0000000..e09b194
--- /dev/null
+++ b/drivers/xen/xenbus/xenbus_probe.h
@@ -0,0 +1,74 @@
+/******************************************************************************
+ * xenbus_probe.h
+ *
+ * Talks to Xen Store to figure out what devices we have.
+ *
+ * Copyright (C) 2005 Rusty Russell, IBM Corporation
+ * Copyright (C) 2005 XenSource Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation; or, when distributed
+ * separately from the Linux kernel or incorporated into other
+ * software packages, subject to the following license:
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this source file (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy, modify,
+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#ifndef _XENBUS_PROBE_H
+#define _XENBUS_PROBE_H
+
+#ifdef CONFIG_XEN_BACKEND
+extern void xenbus_backend_suspend(int (*fn)(struct device *, void *));
+extern void xenbus_backend_resume(int (*fn)(struct device *, void *));
+extern void xenbus_backend_probe_and_watch(void);
+extern int xenbus_backend_bus_register(void);
+extern void xenbus_backend_bus_unregister(void);
+#else
+static inline void xenbus_backend_suspend(int (*fn)(struct device *, void *)) {}
+static inline void xenbus_backend_resume(int (*fn)(struct device *, void *)) {}
+static inline void xenbus_backend_probe_and_watch(void) {}
+static inline int xenbus_backend_bus_register(void) { return 0; }
+static inline void xenbus_backend_bus_unregister(void) {}
+#endif
+
+struct xen_bus_type
+{
+	char *root;
+	unsigned int levels;
+	int (*get_bus_id)(char bus_id[BUS_ID_SIZE], const char *nodename);
+	int (*probe)(const char *type, const char *dir);
+	struct bus_type bus;
+};
+
+extern int xenbus_match(struct device *_dev, struct device_driver *_drv);
+extern int xenbus_dev_probe(struct device *_dev);
+extern int xenbus_dev_remove(struct device *_dev);
+extern int xenbus_register_driver_common(struct xenbus_driver *drv,
+					 struct xen_bus_type *bus,
+					 struct module *owner,
+					 const char *mod_name);
+extern int xenbus_probe_node(struct xen_bus_type *bus,
+			     const char *type,
+			     const char *nodename);
+extern int xenbus_probe_devices(struct xen_bus_type *bus);
+
+extern void xenbus_dev_changed(const char *node, struct xen_bus_type *bus);
+
+#endif
diff --git a/drivers/xen/xenbus/xenbus_xs.c b/drivers/xen/xenbus/xenbus_xs.c
new file mode 100644
index 0000000..9e943fb
--- /dev/null
+++ b/drivers/xen/xenbus/xenbus_xs.c
@@ -0,0 +1,861 @@
+/******************************************************************************
+ * xenbus_xs.c
+ *
+ * This is the kernel equivalent of the "xs" library.  We don't need everything
+ * and we use xenbus_comms for communication.
+ *
+ * Copyright (C) 2005 Rusty Russell, IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation; or, when distributed
+ * separately from the Linux kernel or incorporated into other
+ * software packages, subject to the following license:
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this source file (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy, modify,
+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include <linux/unistd.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/uio.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/fcntl.h>
+#include <linux/kthread.h>
+#include <linux/rwsem.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <xen/xenbus.h>
+#include "xenbus_comms.h"
+
+struct xs_stored_msg {
+	struct list_head list;
+
+	struct xsd_sockmsg hdr;
+
+	union {
+		/* Queued replies. */
+		struct {
+			char *body;
+		} reply;
+
+		/* Queued watch events. */
+		struct {
+			struct xenbus_watch *handle;
+			char **vec;
+			unsigned int vec_size;
+		} watch;
+	} u;
+};
+
+struct xs_handle {
+	/* A list of replies. Currently only one will ever be outstanding. */
+	struct list_head reply_list;
+	spinlock_t reply_lock;
+	wait_queue_head_t reply_waitq;
+
+	/*
+	 * Mutex ordering: transaction_mutex -> watch_mutex -> request_mutex.
+	 * response_mutex is never taken simultaneously with the other three.
+	 */
+
+	/* One request at a time. */
+	struct mutex request_mutex;
+
+	/* Protect xenbus reader thread against save/restore. */
+	struct mutex response_mutex;
+
+	/* Protect transactions against save/restore. */
+	struct rw_semaphore transaction_mutex;
+
+	/* Protect watch (de)register against save/restore. */
+	struct rw_semaphore watch_mutex;
+};
+
+static struct xs_handle xs_state;
+
+/* List of registered watches, and a lock to protect it. */
+static LIST_HEAD(watches);
+static DEFINE_SPINLOCK(watches_lock);
+
+/* List of pending watch callback events, and a lock to protect it. */
+static LIST_HEAD(watch_events);
+static DEFINE_SPINLOCK(watch_events_lock);
+
+/*
+ * Details of the xenwatch callback kernel thread. The thread waits on the
+ * watch_events_waitq for work to do (queued on watch_events list). When it
+ * wakes up it acquires the xenwatch_mutex before reading the list and
+ * carrying out work.
+ */
+static pid_t xenwatch_pid;
+static DEFINE_MUTEX(xenwatch_mutex);
+static DECLARE_WAIT_QUEUE_HEAD(watch_events_waitq);
+
+static int get_error(const char *errorstring)
+{
+	unsigned int i;
+
+	for (i = 0; strcmp(errorstring, xsd_errors[i].errstring) != 0; i++) {
+		if (i == ARRAY_SIZE(xsd_errors) - 1) {
+			printk(KERN_WARNING
+			       "XENBUS xen store gave: unknown error %s",
+			       errorstring);
+			return EINVAL;
+		}
+	}
+	return xsd_errors[i].errnum;
+}
+
+static void *read_reply(enum xsd_sockmsg_type *type, unsigned int *len)
+{
+	struct xs_stored_msg *msg;
+	char *body;
+
+	spin_lock(&xs_state.reply_lock);
+
+	while (list_empty(&xs_state.reply_list)) {
+		spin_unlock(&xs_state.reply_lock);
+		/* XXX FIXME: Avoid synchronous wait for response here. */
+		wait_event(xs_state.reply_waitq,
+			   !list_empty(&xs_state.reply_list));
+		spin_lock(&xs_state.reply_lock);
+	}
+
+	msg = list_entry(xs_state.reply_list.next,
+			 struct xs_stored_msg, list);
+	list_del(&msg->list);
+
+	spin_unlock(&xs_state.reply_lock);
+
+	*type = msg->hdr.type;
+	if (len)
+		*len = msg->hdr.len;
+	body = msg->u.reply.body;
+
+	kfree(msg);
+
+	return body;
+}
+
+void *xenbus_dev_request_and_reply(struct xsd_sockmsg *msg)
+{
+	void *ret;
+	struct xsd_sockmsg req_msg = *msg;
+	int err;
+
+	if (req_msg.type == XS_TRANSACTION_START)
+		down_read(&xs_state.transaction_mutex);
+
+	mutex_lock(&xs_state.request_mutex);
+
+	err = xb_write(msg, sizeof(*msg) + msg->len);
+	if (err) {
+		msg->type = XS_ERROR;
+		ret = ERR_PTR(err);
+	} else
+		ret = read_reply(&msg->type, &msg->len);
+
+	mutex_unlock(&xs_state.request_mutex);
+
+	if ((msg->type == XS_TRANSACTION_END) ||
+	    ((req_msg.type == XS_TRANSACTION_START) &&
+	     (msg->type == XS_ERROR)))
+		up_read(&xs_state.transaction_mutex);
+
+	return ret;
+}
+
+/* Send message to xs, get kmalloc'ed reply.  ERR_PTR() on error. */
+static void *xs_talkv(struct xenbus_transaction t,
+		      enum xsd_sockmsg_type type,
+		      const struct kvec *iovec,
+		      unsigned int num_vecs,
+		      unsigned int *len)
+{
+	struct xsd_sockmsg msg;
+	void *ret = NULL;
+	unsigned int i;
+	int err;
+
+	msg.tx_id = t.id;
+	msg.req_id = 0;
+	msg.type = type;
+	msg.len = 0;
+	for (i = 0; i < num_vecs; i++)
+		msg.len += iovec[i].iov_len;
+
+	mutex_lock(&xs_state.request_mutex);
+
+	err = xb_write(&msg, sizeof(msg));
+	if (err) {
+		mutex_unlock(&xs_state.request_mutex);
+		return ERR_PTR(err);
+	}
+
+	for (i = 0; i < num_vecs; i++) {
+		err = xb_write(iovec[i].iov_base, iovec[i].iov_len);
+		if (err) {
+			mutex_unlock(&xs_state.request_mutex);
+			return ERR_PTR(err);
+		}
+	}
+
+	ret = read_reply(&msg.type, len);
+
+	mutex_unlock(&xs_state.request_mutex);
+
+	if (IS_ERR(ret))
+		return ret;
+
+	if (msg.type == XS_ERROR) {
+		err = get_error(ret);
+		kfree(ret);
+		return ERR_PTR(-err);
+	}
+
+	if (msg.type != type) {
+		if (printk_ratelimit())
+			printk(KERN_WARNING
+			       "XENBUS unexpected type [%d], expected [%d]\n",
+			       msg.type, type);
+		kfree(ret);
+		return ERR_PTR(-EINVAL);
+	}
+	return ret;
+}
+
+/* Simplified version of xs_talkv: single message. */
+static void *xs_single(struct xenbus_transaction t,
+		       enum xsd_sockmsg_type type,
+		       const char *string,
+		       unsigned int *len)
+{
+	struct kvec iovec;
+
+	iovec.iov_base = (void *)string;
+	iovec.iov_len = strlen(string) + 1;
+	return xs_talkv(t, type, &iovec, 1, len);
+}
+
+/* Many commands only need an ack, don't care what it says. */
+static int xs_error(char *reply)
+{
+	if (IS_ERR(reply))
+		return PTR_ERR(reply);
+	kfree(reply);
+	return 0;
+}
+
+static unsigned int count_strings(const char *strings, unsigned int len)
+{
+	unsigned int num;
+	const char *p;
+
+	for (p = strings, num = 0; p < strings + len; p += strlen(p) + 1)
+		num++;
+
+	return num;
+}
+
+/* Return the path to dir with /name appended. Buffer must be kfree()'ed. */
+static char *join(const char *dir, const char *name)
+{
+	char *buffer;
+
+	if (strlen(name) == 0)
+		buffer = kasprintf(GFP_KERNEL, "%s", dir);
+	else
+		buffer = kasprintf(GFP_KERNEL, "%s/%s", dir, name);
+	return (!buffer) ? ERR_PTR(-ENOMEM) : buffer;
+}
+
+static char **split(char *strings, unsigned int len, unsigned int *num)
+{
+	char *p, **ret;
+
+	/* Count the strings. */
+	*num = count_strings(strings, len);
+
+	/* Transfer to one big alloc for easy freeing. */
+	ret = kmalloc(*num * sizeof(char *) + len, GFP_KERNEL);
+	if (!ret) {
+		kfree(strings);
+		return ERR_PTR(-ENOMEM);
+	}
+	memcpy(&ret[*num], strings, len);
+	kfree(strings);
+
+	strings = (char *)&ret[*num];
+	for (p = strings, *num = 0; p < strings + len; p += strlen(p) + 1)
+		ret[(*num)++] = p;
+
+	return ret;
+}
+
+char **xenbus_directory(struct xenbus_transaction t,
+			const char *dir, const char *node, unsigned int *num)
+{
+	char *strings, *path;
+	unsigned int len;
+
+	path = join(dir, node);
+	if (IS_ERR(path))
+		return (char **)path;
+
+	strings = xs_single(t, XS_DIRECTORY, path, &len);
+	kfree(path);
+	if (IS_ERR(strings))
+		return (char **)strings;
+
+	return split(strings, len, num);
+}
+EXPORT_SYMBOL_GPL(xenbus_directory);
+
+/* Check if a path exists. Return 1 if it does. */
+int xenbus_exists(struct xenbus_transaction t,
+		  const char *dir, const char *node)
+{
+	char **d;
+	int dir_n;
+
+	d = xenbus_directory(t, dir, node, &dir_n);
+	if (IS_ERR(d))
+		return 0;
+	kfree(d);
+	return 1;
+}
+EXPORT_SYMBOL_GPL(xenbus_exists);
+
+/* Get the value of a single file.
+ * Returns a kmalloced value: call free() on it after use.
+ * len indicates length in bytes.
+ */
+void *xenbus_read(struct xenbus_transaction t,
+		  const char *dir, const char *node, unsigned int *len)
+{
+	char *path;
+	void *ret;
+
+	path = join(dir, node);
+	if (IS_ERR(path))
+		return (void *)path;
+
+	ret = xs_single(t, XS_READ, path, len);
+	kfree(path);
+	return ret;
+}
+EXPORT_SYMBOL_GPL(xenbus_read);
+
+/* Write the value of a single file.
+ * Returns -err on failure.
+ */
+int xenbus_write(struct xenbus_transaction t,
+		 const char *dir, const char *node, const char *string)
+{
+	const char *path;
+	struct kvec iovec[2];
+	int ret;
+
+	path = join(dir, node);
+	if (IS_ERR(path))
+		return PTR_ERR(path);
+
+	iovec[0].iov_base = (void *)path;
+	iovec[0].iov_len = strlen(path) + 1;
+	iovec[1].iov_base = (void *)string;
+	iovec[1].iov_len = strlen(string);
+
+	ret = xs_error(xs_talkv(t, XS_WRITE, iovec, ARRAY_SIZE(iovec), NULL));
+	kfree(path);
+	return ret;
+}
+EXPORT_SYMBOL_GPL(xenbus_write);
+
+/* Create a new directory. */
+int xenbus_mkdir(struct xenbus_transaction t,
+		 const char *dir, const char *node)
+{
+	char *path;
+	int ret;
+
+	path = join(dir, node);
+	if (IS_ERR(path))
+		return PTR_ERR(path);
+
+	ret = xs_error(xs_single(t, XS_MKDIR, path, NULL));
+	kfree(path);
+	return ret;
+}
+EXPORT_SYMBOL_GPL(xenbus_mkdir);
+
+/* Destroy a file or directory (directories must be empty). */
+int xenbus_rm(struct xenbus_transaction t, const char *dir, const char *node)
+{
+	char *path;
+	int ret;
+
+	path = join(dir, node);
+	if (IS_ERR(path))
+		return PTR_ERR(path);
+
+	ret = xs_error(xs_single(t, XS_RM, path, NULL));
+	kfree(path);
+	return ret;
+}
+EXPORT_SYMBOL_GPL(xenbus_rm);
+
+/* Start a transaction: changes by others will not be seen during this
+ * transaction, and changes will not be visible to others until end.
+ */
+int xenbus_transaction_start(struct xenbus_transaction *t)
+{
+	char *id_str;
+
+	down_read(&xs_state.transaction_mutex);
+
+	id_str = xs_single(XBT_NIL, XS_TRANSACTION_START, "", NULL);
+	if (IS_ERR(id_str)) {
+		up_read(&xs_state.transaction_mutex);
+		return PTR_ERR(id_str);
+	}
+
+	t->id = simple_strtoul(id_str, NULL, 0);
+	kfree(id_str);
+	return 0;
+}
+EXPORT_SYMBOL_GPL(xenbus_transaction_start);
+
+/* End a transaction.
+ * If abandon is true, transaction is discarded instead of committed.
+ */
+int xenbus_transaction_end(struct xenbus_transaction t, int abort)
+{
+	char abortstr[2];
+	int err;
+
+	if (abort)
+		strcpy(abortstr, "F");
+	else
+		strcpy(abortstr, "T");
+
+	err = xs_error(xs_single(t, XS_TRANSACTION_END, abortstr, NULL));
+
+	up_read(&xs_state.transaction_mutex);
+
+	return err;
+}
+EXPORT_SYMBOL_GPL(xenbus_transaction_end);
+
+/* Single read and scanf: returns -errno or num scanned. */
+int xenbus_scanf(struct xenbus_transaction t,
+		 const char *dir, const char *node, const char *fmt, ...)
+{
+	va_list ap;
+	int ret;
+	char *val;
+
+	val = xenbus_read(t, dir, node, NULL);
+	if (IS_ERR(val))
+		return PTR_ERR(val);
+
+	va_start(ap, fmt);
+	ret = vsscanf(val, fmt, ap);
+	va_end(ap);
+	kfree(val);
+	/* Distinctive errno. */
+	if (ret == 0)
+		return -ERANGE;
+	return ret;
+}
+EXPORT_SYMBOL_GPL(xenbus_scanf);
+
+/* Single printf and write: returns -errno or 0. */
+int xenbus_printf(struct xenbus_transaction t,
+		  const char *dir, const char *node, const char *fmt, ...)
+{
+	va_list ap;
+	int ret;
+#define PRINTF_BUFFER_SIZE 4096
+	char *printf_buffer;
+
+	printf_buffer = kmalloc(PRINTF_BUFFER_SIZE, GFP_KERNEL);
+	if (printf_buffer == NULL)
+		return -ENOMEM;
+
+	va_start(ap, fmt);
+	ret = vsnprintf(printf_buffer, PRINTF_BUFFER_SIZE, fmt, ap);
+	va_end(ap);
+
+	BUG_ON(ret > PRINTF_BUFFER_SIZE-1);
+	ret = xenbus_write(t, dir, node, printf_buffer);
+
+	kfree(printf_buffer);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(xenbus_printf);
+
+/* Takes tuples of names, scanf-style args, and void **, NULL terminated. */
+int xenbus_gather(struct xenbus_transaction t, const char *dir, ...)
+{
+	va_list ap;
+	const char *name;
+	int ret = 0;
+
+	va_start(ap, dir);
+	while (ret == 0 && (name = va_arg(ap, char *)) != NULL) {
+		const char *fmt = va_arg(ap, char *);
+		void *result = va_arg(ap, void *);
+		char *p;
+
+		p = xenbus_read(t, dir, name, NULL);
+		if (IS_ERR(p)) {
+			ret = PTR_ERR(p);
+			break;
+		}
+		if (fmt) {
+			if (sscanf(p, fmt, result) == 0)
+				ret = -EINVAL;
+			kfree(p);
+		} else
+			*(char **)result = p;
+	}
+	va_end(ap);
+	return ret;
+}
+EXPORT_SYMBOL_GPL(xenbus_gather);
+
+static int xs_watch(const char *path, const char *token)
+{
+	struct kvec iov[2];
+
+	iov[0].iov_base = (void *)path;
+	iov[0].iov_len = strlen(path) + 1;
+	iov[1].iov_base = (void *)token;
+	iov[1].iov_len = strlen(token) + 1;
+
+	return xs_error(xs_talkv(XBT_NIL, XS_WATCH, iov,
+				 ARRAY_SIZE(iov), NULL));
+}
+
+static int xs_unwatch(const char *path, const char *token)
+{
+	struct kvec iov[2];
+
+	iov[0].iov_base = (char *)path;
+	iov[0].iov_len = strlen(path) + 1;
+	iov[1].iov_base = (char *)token;
+	iov[1].iov_len = strlen(token) + 1;
+
+	return xs_error(xs_talkv(XBT_NIL, XS_UNWATCH, iov,
+				 ARRAY_SIZE(iov), NULL));
+}
+
+static struct xenbus_watch *find_watch(const char *token)
+{
+	struct xenbus_watch *i, *cmp;
+
+	cmp = (void *)simple_strtoul(token, NULL, 16);
+
+	list_for_each_entry(i, &watches, list)
+		if (i == cmp)
+			return i;
+
+	return NULL;
+}
+
+/* Register callback to watch this node. */
+int register_xenbus_watch(struct xenbus_watch *watch)
+{
+	/* Pointer in ascii is the token. */
+	char token[sizeof(watch) * 2 + 1];
+	int err;
+
+	sprintf(token, "%lX", (long)watch);
+
+	down_read(&xs_state.watch_mutex);
+
+	spin_lock(&watches_lock);
+	BUG_ON(find_watch(token));
+	list_add(&watch->list, &watches);
+	spin_unlock(&watches_lock);
+
+	err = xs_watch(watch->node, token);
+
+	/* Ignore errors due to multiple registration. */
+	if ((err != 0) && (err != -EEXIST)) {
+		spin_lock(&watches_lock);
+		list_del(&watch->list);
+		spin_unlock(&watches_lock);
+	}
+
+	up_read(&xs_state.watch_mutex);
+
+	return err;
+}
+EXPORT_SYMBOL_GPL(register_xenbus_watch);
+
+void unregister_xenbus_watch(struct xenbus_watch *watch)
+{
+	struct xs_stored_msg *msg, *tmp;
+	char token[sizeof(watch) * 2 + 1];
+	int err;
+
+	sprintf(token, "%lX", (long)watch);
+
+	down_read(&xs_state.watch_mutex);
+
+	spin_lock(&watches_lock);
+	BUG_ON(!find_watch(token));
+	list_del(&watch->list);
+	spin_unlock(&watches_lock);
+
+	err = xs_unwatch(watch->node, token);
+	if (err)
+		printk(KERN_WARNING
+		       "XENBUS Failed to release watch %s: %i\n",
+		       watch->node, err);
+
+	up_read(&xs_state.watch_mutex);
+
+	/* Make sure there are no callbacks running currently (unless
+	   its us) */
+	if (current->pid != xenwatch_pid)
+		mutex_lock(&xenwatch_mutex);
+
+	/* Cancel pending watch events. */
+	spin_lock(&watch_events_lock);
+	list_for_each_entry_safe(msg, tmp, &watch_events, list) {
+		if (msg->u.watch.handle != watch)
+			continue;
+		list_del(&msg->list);
+		kfree(msg->u.watch.vec);
+		kfree(msg);
+	}
+	spin_unlock(&watch_events_lock);
+
+	if (current->pid != xenwatch_pid)
+		mutex_unlock(&xenwatch_mutex);
+}
+EXPORT_SYMBOL_GPL(unregister_xenbus_watch);
+
+void xs_suspend(void)
+{
+	down_write(&xs_state.transaction_mutex);
+	down_write(&xs_state.watch_mutex);
+	mutex_lock(&xs_state.request_mutex);
+	mutex_lock(&xs_state.response_mutex);
+}
+
+void xs_resume(void)
+{
+	struct xenbus_watch *watch;
+	char token[sizeof(watch) * 2 + 1];
+
+	mutex_unlock(&xs_state.response_mutex);
+	mutex_unlock(&xs_state.request_mutex);
+	up_write(&xs_state.transaction_mutex);
+
+	/* No need for watches_lock: the watch_mutex is sufficient. */
+	list_for_each_entry(watch, &watches, list) {
+		sprintf(token, "%lX", (long)watch);
+		xs_watch(watch->node, token);
+	}
+
+	up_write(&xs_state.watch_mutex);
+}
+
+void xs_suspend_cancel(void)
+{
+	mutex_unlock(&xs_state.response_mutex);
+	mutex_unlock(&xs_state.request_mutex);
+	up_write(&xs_state.watch_mutex);
+	up_write(&xs_state.transaction_mutex);
+}
+
+static int xenwatch_thread(void *unused)
+{
+	struct list_head *ent;
+	struct xs_stored_msg *msg;
+
+	for (;;) {
+		wait_event_interruptible(watch_events_waitq,
+					 !list_empty(&watch_events));
+
+		if (kthread_should_stop())
+			break;
+
+		mutex_lock(&xenwatch_mutex);
+
+		spin_lock(&watch_events_lock);
+		ent = watch_events.next;
+		if (ent != &watch_events)
+			list_del(ent);
+		spin_unlock(&watch_events_lock);
+
+		if (ent != &watch_events) {
+			msg = list_entry(ent, struct xs_stored_msg, list);
+			msg->u.watch.handle->callback(
+				msg->u.watch.handle,
+				(const char **)msg->u.watch.vec,
+				msg->u.watch.vec_size);
+			kfree(msg->u.watch.vec);
+			kfree(msg);
+		}
+
+		mutex_unlock(&xenwatch_mutex);
+	}
+
+	return 0;
+}
+
+static int process_msg(void)
+{
+	struct xs_stored_msg *msg;
+	char *body;
+	int err;
+
+	/*
+	 * We must disallow save/restore while reading a xenstore message.
+	 * A partial read across s/r leaves us out of sync with xenstored.
+	 */
+	for (;;) {
+		err = xb_wait_for_data_to_read();
+		if (err)
+			return err;
+		mutex_lock(&xs_state.response_mutex);
+		if (xb_data_to_read())
+			break;
+		/* We raced with save/restore: pending data 'disappeared'. */
+		mutex_unlock(&xs_state.response_mutex);
+	}
+
+
+	msg = kmalloc(sizeof(*msg), GFP_KERNEL);
+	if (msg == NULL) {
+		err = -ENOMEM;
+		goto out;
+	}
+
+	err = xb_read(&msg->hdr, sizeof(msg->hdr));
+	if (err) {
+		kfree(msg);
+		goto out;
+	}
+
+	body = kmalloc(msg->hdr.len + 1, GFP_KERNEL);
+	if (body == NULL) {
+		kfree(msg);
+		err = -ENOMEM;
+		goto out;
+	}
+
+	err = xb_read(body, msg->hdr.len);
+	if (err) {
+		kfree(body);
+		kfree(msg);
+		goto out;
+	}
+	body[msg->hdr.len] = '\0';
+
+	if (msg->hdr.type == XS_WATCH_EVENT) {
+		msg->u.watch.vec = split(body, msg->hdr.len,
+					 &msg->u.watch.vec_size);
+		if (IS_ERR(msg->u.watch.vec)) {
+			kfree(msg);
+			err = PTR_ERR(msg->u.watch.vec);
+			goto out;
+		}
+
+		spin_lock(&watches_lock);
+		msg->u.watch.handle = find_watch(
+			msg->u.watch.vec[XS_WATCH_TOKEN]);
+		if (msg->u.watch.handle != NULL) {
+			spin_lock(&watch_events_lock);
+			list_add_tail(&msg->list, &watch_events);
+			wake_up(&watch_events_waitq);
+			spin_unlock(&watch_events_lock);
+		} else {
+			kfree(msg->u.watch.vec);
+			kfree(msg);
+		}
+		spin_unlock(&watches_lock);
+	} else {
+		msg->u.reply.body = body;
+		spin_lock(&xs_state.reply_lock);
+		list_add_tail(&msg->list, &xs_state.reply_list);
+		spin_unlock(&xs_state.reply_lock);
+		wake_up(&xs_state.reply_waitq);
+	}
+
+ out:
+	mutex_unlock(&xs_state.response_mutex);
+	return err;
+}
+
+static int xenbus_thread(void *unused)
+{
+	int err;
+
+	for (;;) {
+		err = process_msg();
+		if (err)
+			printk(KERN_WARNING "XENBUS error %d while reading "
+			       "message\n", err);
+		if (kthread_should_stop())
+			break;
+	}
+
+	return 0;
+}
+
+int xs_init(void)
+{
+	int err;
+	struct task_struct *task;
+
+	INIT_LIST_HEAD(&xs_state.reply_list);
+	spin_lock_init(&xs_state.reply_lock);
+	init_waitqueue_head(&xs_state.reply_waitq);
+
+	mutex_init(&xs_state.request_mutex);
+	mutex_init(&xs_state.response_mutex);
+	init_rwsem(&xs_state.transaction_mutex);
+	init_rwsem(&xs_state.watch_mutex);
+
+	/* Initialize the shared memory rings to talk to xenstored */
+	err = xb_init_comms();
+	if (err)
+		return err;
+
+	task = kthread_run(xenwatch_thread, NULL, "xenwatch");
+	if (IS_ERR(task))
+		return PTR_ERR(task);
+	xenwatch_pid = task->pid;
+
+	task = kthread_run(xenbus_thread, NULL, "xenbus");
+	if (IS_ERR(task))
+		return PTR_ERR(task);
+
+	return 0;
+}
diff --git a/fs/9p/v9fs.c b/fs/9p/v9fs.c
index 45c3598..0a7068e 100644
--- a/fs/9p/v9fs.c
+++ b/fs/9p/v9fs.c
@@ -131,7 +131,9 @@
 		switch (token) {
 		case Opt_debug:
 			v9ses->debug = option;
+#ifdef CONFIG_NET_9P_DEBUG
 			p9_debug_level = option;
+#endif
 			break;
 		case Opt_port:
 			v9ses->port = option;
diff --git a/fs/Kconfig b/fs/Kconfig
index ee11f8d..6a64990 100644
--- a/fs/Kconfig
+++ b/fs/Kconfig
@@ -251,7 +251,7 @@
 
 config JBD2_DEBUG
 	bool "JBD2 (ext4dev/ext4) debugging support"
-	depends on JBD2
+	depends on JBD2 && DEBUG_FS
 	help
 	  If you are using the ext4dev/ext4 journaled file system (or
 	  potentially any other filesystem/device using JBD2), this option
@@ -260,10 +260,10 @@
 	  By default, the debugging output will be turned off.
 
 	  If you select Y here, then you will be able to turn on debugging
-	  with "echo N > /proc/sys/fs/jbd2-debug", where N is a number between
-	  1 and 5. The higher the number, the more debugging output is
-	  generated.  To turn debugging off again, do
-	  "echo 0 > /proc/sys/fs/jbd2-debug".
+	  with "echo N > /sys/kernel/debug/jbd2/jbd2-debug", where N is a
+	  number between 1 and 5. The higher the number, the more debugging
+	  output is generated.  To turn debugging off again, do
+	  "echo 0 > /sys/kernel/debug/jbd2/jbd2-debug".
 
 config FS_MBCACHE
 # Meta block cache for Extended Attributes (ext2/ext3/ext4)
@@ -1675,6 +1675,7 @@
 config NFSD_V4
 	bool "Provide NFSv4 server support (EXPERIMENTAL)"
 	depends on NFSD_V3 && EXPERIMENTAL
+	select RPCSEC_GSS_KRB5
 	help
 	  If you would like to include the NFSv4 server as well as the NFSv2
 	  and NFSv3 servers, say Y here.  This feature is experimental, and
diff --git a/fs/afs/flock.c b/fs/afs/flock.c
index 8f07f8d1..4f77f3c 100644
--- a/fs/afs/flock.c
+++ b/fs/afs/flock.c
@@ -456,7 +456,8 @@
 
 	/* check local lock records first */
 	ret = 0;
-	if (posix_test_lock(file, fl) == 0) {
+	posix_test_lock(file, fl);
+	if (fl->fl_type == F_UNLCK) {
 		/* no local locks; consult the server */
 		ret = afs_vnode_fetch_status(vnode, NULL, key);
 		if (ret < 0)
diff --git a/fs/anon_inodes.c b/fs/anon_inodes.c
index a260198..b4a7588 100644
--- a/fs/anon_inodes.c
+++ b/fs/anon_inodes.c
@@ -139,6 +139,7 @@
 	put_filp(file);
 	return error;
 }
+EXPORT_SYMBOL_GPL(anon_inode_getfd);
 
 /*
  * A single inode exists for all anon_inode files. Contrary to pipes,
diff --git a/fs/attr.c b/fs/attr.c
index a0a0c7b..f8dfc22 100644
--- a/fs/attr.c
+++ b/fs/attr.c
@@ -42,7 +42,7 @@
 
 	/* Make sure a caller can chmod. */
 	if (ia_valid & ATTR_MODE) {
-		if ((current->fsuid != inode->i_uid) && !capable(CAP_FOWNER))
+		if (!is_owner_or_cap(inode))
 			goto error;
 		/* Also check the setgid bit! */
 		if (!in_group_p((ia_valid & ATTR_GID) ? attr->ia_gid :
@@ -52,7 +52,7 @@
 
 	/* Check for setting the inode time. */
 	if (ia_valid & (ATTR_MTIME_SET | ATTR_ATIME_SET)) {
-		if (current->fsuid != inode->i_uid && !capable(CAP_FOWNER))
+		if (!is_owner_or_cap(inode))
 			goto error;
 	}
 fine:
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
index a27e42b..ba24cb2 100644
--- a/fs/binfmt_elf.c
+++ b/fs/binfmt_elf.c
@@ -148,6 +148,7 @@
 	elf_addr_t *elf_info;
 	int ei_index = 0;
 	struct task_struct *tsk = current;
+	struct vm_area_struct *vma;
 
 	/*
 	 * If this architecture has a platform capability string, copy it
@@ -234,6 +235,15 @@
 	sp = (elf_addr_t __user *)bprm->p;
 #endif
 
+
+	/*
+	 * Grow the stack manually; some architectures have a limit on how
+	 * far ahead a user-space access may be in order to grow the stack.
+	 */
+	vma = find_extend_vma(current->mm, bprm->p);
+	if (!vma)
+		return -EFAULT;
+
 	/* Now, let's put argc (and argv, envp if appropriate) on the stack */
 	if (__put_user(argc, sp++))
 		return -EFAULT;
@@ -254,8 +264,8 @@
 		size_t len;
 		if (__put_user((elf_addr_t)p, argv++))
 			return -EFAULT;
-		len = strnlen_user((void __user *)p, PAGE_SIZE*MAX_ARG_PAGES);
-		if (!len || len > PAGE_SIZE*MAX_ARG_PAGES)
+		len = strnlen_user((void __user *)p, MAX_ARG_STRLEN);
+		if (!len || len > MAX_ARG_STRLEN)
 			return 0;
 		p += len;
 	}
@@ -266,8 +276,8 @@
 		size_t len;
 		if (__put_user((elf_addr_t)p, envp++))
 			return -EFAULT;
-		len = strnlen_user((void __user *)p, PAGE_SIZE*MAX_ARG_PAGES);
-		if (!len || len > PAGE_SIZE*MAX_ARG_PAGES)
+		len = strnlen_user((void __user *)p, MAX_ARG_STRLEN);
+		if (!len || len > MAX_ARG_STRLEN)
 			return 0;
 		p += len;
 	}
@@ -826,10 +836,6 @@
 	}
 
 	/* OK, This is the point of no return */
-	current->mm->start_data = 0;
-	current->mm->end_data = 0;
-	current->mm->end_code = 0;
-	current->mm->mmap = NULL;
 	current->flags &= ~PF_FORKNOEXEC;
 	current->mm->def_flags = def_flags;
 
@@ -1051,9 +1057,13 @@
 
 	compute_creds(bprm);
 	current->flags &= ~PF_FORKNOEXEC;
-	create_elf_tables(bprm, &loc->elf_ex,
+	retval = create_elf_tables(bprm, &loc->elf_ex,
 			  (interpreter_type == INTERPRETER_AOUT),
 			  load_addr, interp_load_addr);
+	if (retval < 0) {
+		send_sig(SIGKILL, current, 0);
+		goto out;
+	}
 	/* N.B. passed_fileno might not be initialized? */
 	if (interpreter_type == INTERPRETER_AOUT)
 		current->mm->arg_start += strlen(passed_fileno) + 1;
@@ -1252,7 +1262,7 @@
  *
  * I think we should skip something. But I am not sure how. H.J.
  */
-static int maydump(struct vm_area_struct *vma)
+static int maydump(struct vm_area_struct *vma, unsigned long mm_flags)
 {
 	/* The vma can be set up to tell us the answer directly.  */
 	if (vma->vm_flags & VM_ALWAYSDUMP)
@@ -1262,15 +1272,19 @@
 	if (vma->vm_flags & (VM_IO | VM_RESERVED))
 		return 0;
 
-	/* Dump shared memory only if mapped from an anonymous file. */
-	if (vma->vm_flags & VM_SHARED)
-		return vma->vm_file->f_path.dentry->d_inode->i_nlink == 0;
+	/* By default, dump shared memory if mapped from an anonymous file. */
+	if (vma->vm_flags & VM_SHARED) {
+		if (vma->vm_file->f_path.dentry->d_inode->i_nlink == 0)
+			return test_bit(MMF_DUMP_ANON_SHARED, &mm_flags);
+		else
+			return test_bit(MMF_DUMP_MAPPED_SHARED, &mm_flags);
+	}
 
-	/* If it hasn't been written to, don't write it out */
+	/* By default, if it hasn't been written to, don't write it out. */
 	if (!vma->anon_vma)
-		return 0;
+		return test_bit(MMF_DUMP_MAPPED_PRIVATE, &mm_flags);
 
-	return 1;
+	return test_bit(MMF_DUMP_ANON_PRIVATE, &mm_flags);
 }
 
 /* An ELF note in memory */
@@ -1562,6 +1576,7 @@
 #endif
 	int thread_status_size = 0;
 	elf_addr_t *auxv;
+	unsigned long mm_flags;
 #ifdef ELF_CORE_WRITE_EXTRA_NOTES
 	int extra_notes_size;
 #endif
@@ -1705,6 +1720,13 @@
 
 	dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
 
+	/*
+	 * We must use the same mm->flags while dumping core to avoid
+	 * inconsistency between the program headers and bodies, otherwise an
+	 * unusable core file can be generated.
+	 */
+	mm_flags = current->mm->flags;
+
 	/* Write program headers for segments dump */
 	for (vma = first_vma(current, gate_vma); vma != NULL;
 			vma = next_vma(vma, gate_vma)) {
@@ -1717,7 +1739,7 @@
 		phdr.p_offset = offset;
 		phdr.p_vaddr = vma->vm_start;
 		phdr.p_paddr = 0;
-		phdr.p_filesz = maydump(vma) ? sz : 0;
+		phdr.p_filesz = maydump(vma, mm_flags) ? sz : 0;
 		phdr.p_memsz = sz;
 		offset += phdr.p_filesz;
 		phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
@@ -1761,7 +1783,7 @@
 			vma = next_vma(vma, gate_vma)) {
 		unsigned long addr;
 
-		if (!maydump(vma))
+		if (!maydump(vma, mm_flags))
 			continue;
 
 		for (addr = vma->vm_start;
diff --git a/fs/binfmt_elf_fdpic.c b/fs/binfmt_elf_fdpic.c
index 9d62fbad..2f5d8db 100644
--- a/fs/binfmt_elf_fdpic.c
+++ b/fs/binfmt_elf_fdpic.c
@@ -621,8 +621,8 @@
 	p = (char __user *) current->mm->arg_start;
 	for (loop = bprm->argc; loop > 0; loop--) {
 		__put_user((elf_caddr_t) p, argv++);
-		len = strnlen_user(p, PAGE_SIZE * MAX_ARG_PAGES);
-		if (!len || len > PAGE_SIZE * MAX_ARG_PAGES)
+		len = strnlen_user(p, MAX_ARG_STRLEN);
+		if (!len || len > MAX_ARG_STRLEN)
 			return -EINVAL;
 		p += len;
 	}
@@ -633,8 +633,8 @@
 	current->mm->env_start = (unsigned long) p;
 	for (loop = bprm->envc; loop > 0; loop--) {
 		__put_user((elf_caddr_t)(unsigned long) p, envp++);
-		len = strnlen_user(p, PAGE_SIZE * MAX_ARG_PAGES);
-		if (!len || len > PAGE_SIZE * MAX_ARG_PAGES)
+		len = strnlen_user(p, MAX_ARG_STRLEN);
+		if (!len || len > MAX_ARG_STRLEN)
 			return -EINVAL;
 		p += len;
 	}
@@ -1181,8 +1181,10 @@
  *
  * I think we should skip something. But I am not sure how. H.J.
  */
-static int maydump(struct vm_area_struct *vma)
+static int maydump(struct vm_area_struct *vma, unsigned long mm_flags)
 {
+	int dump_ok;
+
 	/* Do not dump I/O mapped devices or special mappings */
 	if (vma->vm_flags & (VM_IO | VM_RESERVED)) {
 		kdcore("%08lx: %08lx: no (IO)", vma->vm_start, vma->vm_flags);
@@ -1197,27 +1199,35 @@
 		return 0;
 	}
 
-	/* Dump shared memory only if mapped from an anonymous file. */
+	/* By default, dump shared memory if mapped from an anonymous file. */
 	if (vma->vm_flags & VM_SHARED) {
 		if (vma->vm_file->f_path.dentry->d_inode->i_nlink == 0) {
-			kdcore("%08lx: %08lx: no (share)", vma->vm_start, vma->vm_flags);
-			return 1;
+			dump_ok = test_bit(MMF_DUMP_ANON_SHARED, &mm_flags);
+			kdcore("%08lx: %08lx: %s (share)", vma->vm_start,
+			       vma->vm_flags, dump_ok ? "yes" : "no");
+			return dump_ok;
 		}
 
-		kdcore("%08lx: %08lx: no (share)", vma->vm_start, vma->vm_flags);
-		return 0;
+		dump_ok = test_bit(MMF_DUMP_MAPPED_SHARED, &mm_flags);
+		kdcore("%08lx: %08lx: %s (share)", vma->vm_start,
+		       vma->vm_flags, dump_ok ? "yes" : "no");
+		return dump_ok;
 	}
 
 #ifdef CONFIG_MMU
-	/* If it hasn't been written to, don't write it out */
+	/* By default, if it hasn't been written to, don't write it out */
 	if (!vma->anon_vma) {
-		kdcore("%08lx: %08lx: no (!anon)", vma->vm_start, vma->vm_flags);
-		return 0;
+		dump_ok = test_bit(MMF_DUMP_MAPPED_PRIVATE, &mm_flags);
+		kdcore("%08lx: %08lx: %s (!anon)", vma->vm_start,
+		       vma->vm_flags, dump_ok ? "yes" : "no");
+		return dump_ok;
 	}
 #endif
 
-	kdcore("%08lx: %08lx: yes", vma->vm_start, vma->vm_flags);
-	return 1;
+	dump_ok = test_bit(MMF_DUMP_ANON_PRIVATE, &mm_flags);
+	kdcore("%08lx: %08lx: %s", vma->vm_start, vma->vm_flags,
+	       dump_ok ? "yes" : "no");
+	return dump_ok;
 }
 
 /* An ELF note in memory */
@@ -1456,15 +1466,15 @@
  * dump the segments for an MMU process
  */
 #ifdef CONFIG_MMU
-static int elf_fdpic_dump_segments(struct file *file, struct mm_struct *mm,
-				   size_t *size, unsigned long *limit)
+static int elf_fdpic_dump_segments(struct file *file, size_t *size,
+			   unsigned long *limit, unsigned long mm_flags)
 {
 	struct vm_area_struct *vma;
 
 	for (vma = current->mm->mmap; vma; vma = vma->vm_next) {
 		unsigned long addr;
 
-		if (!maydump(vma))
+		if (!maydump(vma, mm_flags))
 			continue;
 
 		for (addr = vma->vm_start;
@@ -1511,15 +1521,15 @@
  * dump the segments for a NOMMU process
  */
 #ifndef CONFIG_MMU
-static int elf_fdpic_dump_segments(struct file *file, struct mm_struct *mm,
-				   size_t *size, unsigned long *limit)
+static int elf_fdpic_dump_segments(struct file *file, size_t *size,
+			   unsigned long *limit, unsigned long mm_flags)
 {
 	struct vm_list_struct *vml;
 
 	for (vml = current->mm->context.vmlist; vml; vml = vml->next) {
 	struct vm_area_struct *vma = vml->vma;
 
-		if (!maydump(vma))
+		if (!maydump(vma, mm_flags))
 			continue;
 
 		if ((*size += PAGE_SIZE) > *limit)
@@ -1570,6 +1580,7 @@
 	struct vm_list_struct *vml;
 #endif
 	elf_addr_t *auxv;
+	unsigned long mm_flags;
 
 	/*
 	 * We no longer stop all VM operations.
@@ -1707,6 +1718,13 @@
 	/* Page-align dumped data */
 	dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
 
+	/*
+	 * We must use the same mm->flags while dumping core to avoid
+	 * inconsistency between the program headers and bodies, otherwise an
+	 * unusable core file can be generated.
+	 */
+	mm_flags = current->mm->flags;
+
 	/* write program headers for segments dump */
 	for (
 #ifdef CONFIG_MMU
@@ -1728,7 +1746,7 @@
 		phdr.p_offset = offset;
 		phdr.p_vaddr = vma->vm_start;
 		phdr.p_paddr = 0;
-		phdr.p_filesz = maydump(vma) ? sz : 0;
+		phdr.p_filesz = maydump(vma, mm_flags) ? sz : 0;
 		phdr.p_memsz = sz;
 		offset += phdr.p_filesz;
 		phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
@@ -1762,7 +1780,7 @@
 
 	DUMP_SEEK(dataoff);
 
-	if (elf_fdpic_dump_segments(file, current->mm, &size, &limit) < 0)
+	if (elf_fdpic_dump_segments(file, &size, &limit, mm_flags) < 0)
 		goto end_coredump;
 
 #ifdef ELF_CORE_WRITE_EXTRA_DATA
diff --git a/fs/binfmt_misc.c b/fs/binfmt_misc.c
index 330fd3f..42e94b3 100644
--- a/fs/binfmt_misc.c
+++ b/fs/binfmt_misc.c
@@ -126,7 +126,9 @@
 		goto _ret;
 
 	if (!(fmt->flags & MISC_FMT_PRESERVE_ARGV0)) {
-		remove_arg_zero(bprm);
+		retval = remove_arg_zero(bprm);
+		if (retval)
+			goto _ret;
 	}
 
 	if (fmt->flags & MISC_FMT_OPEN_BINARY) {
diff --git a/fs/binfmt_script.c b/fs/binfmt_script.c
index 304c885..4d0e0f6 100644
--- a/fs/binfmt_script.c
+++ b/fs/binfmt_script.c
@@ -67,7 +67,9 @@
 	 * This is done in reverse order, because of how the
 	 * user environment and arguments are stored.
 	 */
-	remove_arg_zero(bprm);
+	retval = remove_arg_zero(bprm);
+	if (retval)
+		return retval;
 	retval = copy_strings_kernel(1, &bprm->interp, bprm);
 	if (retval < 0) return retval; 
 	bprm->argc++;
diff --git a/fs/buffer.c b/fs/buffer.c
index 424165b..0f90067 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -356,7 +356,7 @@
 	for_each_online_pgdat(pgdat) {
 		zones = pgdat->node_zonelists[gfp_zone(GFP_NOFS)].zones;
 		if (*zones)
-			try_to_free_pages(zones, GFP_NOFS);
+			try_to_free_pages(zones, 0, GFP_NOFS);
 	}
 }
 
@@ -676,6 +676,39 @@
 EXPORT_SYMBOL(mark_buffer_dirty_inode);
 
 /*
+ * Mark the page dirty, and set it dirty in the radix tree, and mark the inode
+ * dirty.
+ *
+ * If warn is true, then emit a warning if the page is not uptodate and has
+ * not been truncated.
+ */
+static int __set_page_dirty(struct page *page,
+		struct address_space *mapping, int warn)
+{
+	if (unlikely(!mapping))
+		return !TestSetPageDirty(page);
+
+	if (TestSetPageDirty(page))
+		return 0;
+
+	write_lock_irq(&mapping->tree_lock);
+	if (page->mapping) {	/* Race with truncate? */
+		WARN_ON_ONCE(warn && !PageUptodate(page));
+
+		if (mapping_cap_account_dirty(mapping)) {
+			__inc_zone_page_state(page, NR_FILE_DIRTY);
+			task_io_account_write(PAGE_CACHE_SIZE);
+		}
+		radix_tree_tag_set(&mapping->page_tree,
+				page_index(page), PAGECACHE_TAG_DIRTY);
+	}
+	write_unlock_irq(&mapping->tree_lock);
+	__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
+
+	return 1;
+}
+
+/*
  * Add a page to the dirty page list.
  *
  * It is a sad fact of life that this function is called from several places
@@ -702,7 +735,7 @@
  */
 int __set_page_dirty_buffers(struct page *page)
 {
-	struct address_space * const mapping = page_mapping(page);
+	struct address_space *mapping = page_mapping(page);
 
 	if (unlikely(!mapping))
 		return !TestSetPageDirty(page);
@@ -719,21 +752,7 @@
 	}
 	spin_unlock(&mapping->private_lock);
 
-	if (TestSetPageDirty(page))
-		return 0;
-
-	write_lock_irq(&mapping->tree_lock);
-	if (page->mapping) {	/* Race with truncate? */
-		if (mapping_cap_account_dirty(mapping)) {
-			__inc_zone_page_state(page, NR_FILE_DIRTY);
-			task_io_account_write(PAGE_CACHE_SIZE);
-		}
-		radix_tree_tag_set(&mapping->page_tree,
-				page_index(page), PAGECACHE_TAG_DIRTY);
-	}
-	write_unlock_irq(&mapping->tree_lock);
-	__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
-	return 1;
+	return __set_page_dirty(page, mapping, 1);
 }
 EXPORT_SYMBOL(__set_page_dirty_buffers);
 
@@ -982,7 +1001,7 @@
 	struct buffer_head *bh;
 
 	page = find_or_create_page(inode->i_mapping, index,
-		mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS);
+		(mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS)|__GFP_MOVABLE);
 	if (!page)
 		return NULL;
 
@@ -1132,8 +1151,9 @@
  */
 void fastcall mark_buffer_dirty(struct buffer_head *bh)
 {
+	WARN_ON_ONCE(!buffer_uptodate(bh));
 	if (!buffer_dirty(bh) && !test_set_buffer_dirty(bh))
-		__set_page_dirty_nobuffers(bh->b_page);
+		__set_page_dirty(bh->b_page, page_mapping(bh->b_page), 0);
 }
 
 /*
diff --git a/fs/char_dev.c b/fs/char_dev.c
index 164a45c..bbbf07b 100644
--- a/fs/char_dev.c
+++ b/fs/char_dev.c
@@ -321,14 +321,13 @@
 	}
 }
 
-int unregister_chrdev(unsigned int major, const char *name)
+void unregister_chrdev(unsigned int major, const char *name)
 {
 	struct char_device_struct *cd;
 	cd = __unregister_chrdev_region(major, 0, 256);
 	if (cd && cd->cdev)
 		cdev_del(cd->cdev);
 	kfree(cd);
-	return 0;
 }
 
 static DEFINE_SPINLOCK(cdev_lock);
diff --git a/fs/cifs/CHANGES b/fs/cifs/CHANGES
index a9b6bc5..6d84ca2 100644
--- a/fs/cifs/CHANGES
+++ b/fs/cifs/CHANGES
@@ -1,3 +1,10 @@
+Version 1.50
+------------
+Fix NTLMv2 signing. NFS server mounted over cifs works (if cifs mount is
+done with "serverino" mount option).  Add support for POSIX Unlink
+(helps with certain sharing violation cases when server such as
+Samba supports newer POSIX CIFS Protocol Extensions).
+
 Version 1.49
 ------------
 IPv6 support.  Enable ipv6 addresses to be passed on mount (put the ipv6
@@ -8,7 +15,11 @@
 default uid and gid for files when they are certain that the uids or
 gids on the server do not match those of the client.  Make "sec=none"
 mount override username (so that null user connection is attempted)
-to match what documentation said.
+to match what documentation said. Support for very large reads, over 127K,
+available to some newer servers (such as Samba 3.0.26 and later but
+note that it also requires setting CIFSMaxBufSize at module install
+time to a larger value which may hurt performance in some cases).
+Make sign option force signing (or fail if server does not support it).
 
 Version 1.48
 ------------
diff --git a/fs/cifs/README b/fs/cifs/README
index 4d01697..85f1eb1 100644
--- a/fs/cifs/README
+++ b/fs/cifs/README
@@ -301,10 +301,21 @@
 		during the local client kernel build will be used.
 		If server does not support Unicode, this parameter is
 		unused.
-  rsize		default read size (usually 16K)
-  wsize		default write size (usually 16K, 32K is often better over GigE)
-		maximum wsize currently allowed by CIFS is 57344 (14 4096 byte
-		pages)
+  rsize		default read size (usually 16K). The client currently
+		can not use rsize larger than CIFSMaxBufSize. CIFSMaxBufSize
+		defaults to 16K and may be changed (from 8K to the maximum
+		kmalloc size allowed by your kernel) at module install time
+		for cifs.ko. Setting CIFSMaxBufSize to a very large value
+		will cause cifs to use more memory and may reduce performance
+		in some cases.  To use rsize greater than 127K (the original
+		cifs protocol maximum) also requires that the server support
+		a new Unix Capability flag (for very large read) which some
+		newer servers (e.g. Samba 3.0.26 or later) do. rsize can be
+		set from a minimum of 2048 to a maximum of 130048 (127K or
+		CIFSMaxBufSize, whichever is smaller)
+  wsize		default write size (default 57344)
+		maximum wsize currently allowed by CIFS is 57344 (fourteen
+		4096 byte pages)
   rw		mount the network share read-write (note that the
 		server may still consider the share read-only)
   ro		mount network share read-only
@@ -359,7 +370,7 @@
 		Note that this does not affect the normal ACL check on the
 		target machine done by the server software (of the server
 		ACL against the user name provided at mount time).
-  serverino	Use servers inode numbers instead of generating automatically
+  serverino	Use server's inode numbers instead of generating automatically
 		incrementing inode numbers on the client.  Although this will
 		make it easier to spot hardlinked files (as they will have
 		the same inode numbers) and inode numbers may be persistent,
@@ -367,12 +378,11 @@
 		are unique if multiple server side mounts are exported under a
 		single share (since inode numbers on the servers might not
 		be unique if multiple filesystems are mounted under the same
-		shared higher level directory).  Note that this requires that
-		the server support the CIFS Unix Extensions as other servers
-		do not return a unique IndexNumber on SMB FindFirst (most
-		servers return zero as the IndexNumber).  Parameter has no
-		effect to Windows servers and others which do not support the
-		CIFS Unix Extensions.
+		shared higher level directory).  Note that some older
+		(e.g. pre-Windows 2000) do not support returning UniqueIDs
+		or the CIFS Unix Extensions equivalent and for those
+		this mount option will have no effect.  Exporting cifs mounts
+		under nfsd requires this mount option on the cifs mount.
   noserverino   Client generates inode numbers (rather than using the actual one
 		from the server) by default.
   setuids       If the CIFS Unix extensions are negotiated with the server
@@ -582,10 +592,10 @@
 
 	echo 1 > /proc/fs/cifs/traceSMB
 
-Two other experimental features are under development and to test 
-require enabling CONFIG_CIFS_EXPERIMENTAL
+Two other experimental features are under development. To test these
+requires enabling CONFIG_CIFS_EXPERIMENTAL
 
-	More efficient write operations
+	ipv6 enablement
 
 	DNOTIFY fcntl: needed for support of directory change 
 			    notification and perhaps later for file leases)
diff --git a/fs/cifs/TODO b/fs/cifs/TODO
index 78b620e..d7bd515 100644
--- a/fs/cifs/TODO
+++ b/fs/cifs/TODO
@@ -18,9 +18,9 @@
 
 d) Kerberos/SPNEGO session setup support - (started)
 
-e) More testing of NTLMv2 authentication (mostly implemented - double check
-that NTLMv2 signing works, also need to cleanup now unneeded SessSetup code in
-fs/cifs/connect.c)
+e) Cleanup now unneeded SessSetup code in
+fs/cifs/connect.c and add back in NTLMSSP code if any servers
+need it
 
 f) MD5-HMAC signing SMB PDUs when SPNEGO style SessionSetup 
 used (Kerberos or NTLMSSP). Signing alreadyimplemented for NTLM
@@ -106,6 +106,12 @@
 succeed but still return access denied (appears to be Windows 
 server not cifs client problem) and has not been reproduced recently.
 NTFS partitions do not have this problem.
+4) Unix/POSIX capabilities are reset after reconnection, and affect
+a few fields in the tree connection but we do do not know which
+superblocks to apply these changes to.  We should probably walk
+the list of superblocks to set these.  Also need to check the
+flags on the second mount to the same share, and see if we
+can do the same trick that NFS does to remount duplicate shares.
 
 Misc testing to do
 ==================
diff --git a/fs/cifs/asn1.c b/fs/cifs/asn1.c
index 2e75883..f50a88d 100644
--- a/fs/cifs/asn1.c
+++ b/fs/cifs/asn1.c
@@ -1,7 +1,7 @@
-/* 
+/*
  * The ASB.1/BER parsing code is derived from ip_nat_snmp_basic.c which was in
  * turn derived from the gxsnmp package by Gregory McLean & Jochen Friedrich
- *      
+ *
  * Copyright (c) 2000 RP Internet (www.rpi.net.au).
  *
  * This program is free software; you can redistribute it and/or modify
@@ -80,7 +80,7 @@
 static unsigned long SPNEGO_OID[7] = { 1, 3, 6, 1, 5, 5, 2 };
 static unsigned long NTLMSSP_OID[10] = { 1, 3, 6, 1, 4, 1, 311, 2, 2, 10 };
 
-/* 
+/*
  * ASN.1 context.
  */
 struct asn1_ctx {
@@ -190,7 +190,7 @@
 		   unsigned char **eoc,
 		   unsigned int *cls, unsigned int *con, unsigned int *tag)
 {
-	unsigned int def = 0; 
+	unsigned int def = 0;
 	unsigned int len = 0;
 
 	if (!asn1_id_decode(ctx, cls, con, tag))
@@ -331,7 +331,7 @@
 		*integer |= ch;
 	}
 	return 1;
-} 
+}
 
 static unsigned char
 asn1_octets_decode(struct asn1_ctx *ctx,
@@ -376,7 +376,7 @@
 	return 1;
 }
 
-static int 
+static int
 asn1_oid_decode(struct asn1_ctx *ctx,
 		unsigned char *eoc, unsigned long **oid, unsigned int *len)
 {
@@ -459,7 +459,7 @@
 	unsigned int cls, con, tag, oidlen, rc;
 	int use_ntlmssp = FALSE;
 
-	*secType = NTLM; /* BB eventually make Kerberos or NLTMSSP the default */
+	*secType = NTLM; /* BB eventually make Kerberos or NLTMSSP the default*/
 
 	/* cifs_dump_mem(" Received SecBlob ", security_blob, length); */
 
@@ -498,7 +498,8 @@
 			return 0;
 		} else if ((cls != ASN1_CTX) || (con != ASN1_CON)
 			   || (tag != ASN1_EOC)) {
-			cFYI(1,("cls = %d con = %d tag = %d end = %p (%d) exit 0",
+			cFYI(1,
+			     ("cls = %d con = %d tag = %d end = %p (%d) exit 0",
 			      cls, con, tag, end, *end));
 			return 0;
 		}
@@ -508,7 +509,8 @@
 			return 0;
 		} else if ((cls != ASN1_UNI) || (con != ASN1_CON)
 			   || (tag != ASN1_SEQ)) {
-			cFYI(1,("cls = %d con = %d tag = %d end = %p (%d) exit 1",
+			cFYI(1,
+			     ("cls = %d con = %d tag = %d end = %p (%d) exit 1",
 			      cls, con, tag, end, *end));
 			return 0;
 		}
@@ -540,32 +542,34 @@
 			rc = asn1_header_decode(&ctx, &end, &cls, &con, &tag);
 			if (!rc) {
 				cFYI(1,
-				     ("Error 1 decoding negTokenInit header exit 2"));
+				     ("Error decoding negTokenInit hdr exit2"));
 				return 0;
 			}
 			if ((tag == ASN1_OJI) && (con == ASN1_PRI)) {
 				rc = asn1_oid_decode(&ctx, end, &oid, &oidlen);
-				if(rc) {		
+				if (rc) {
 					cFYI(1,
-					  ("OID len = %d oid = 0x%lx 0x%lx 0x%lx 0x%lx",
-					   oidlen, *oid, *(oid + 1), *(oid + 2),
-					   *(oid + 3)));
-					rc = compare_oid(oid, oidlen, NTLMSSP_OID,
-						 NTLMSSP_OID_LEN);
+					  ("OID len = %d oid = 0x%lx 0x%lx "
+					   "0x%lx 0x%lx",
+					   oidlen, *oid, *(oid + 1),
+					   *(oid + 2), *(oid + 3)));
+					rc = compare_oid(oid, oidlen,
+						 NTLMSSP_OID, NTLMSSP_OID_LEN);
 					kfree(oid);
 					if (rc)
 						use_ntlmssp = TRUE;
 				}
 			} else {
-				cFYI(1,("This should be an oid what is going on? "));
+				cFYI(1, ("Should be an oid what is going on?"));
 			}
 		}
 
 		if (asn1_header_decode(&ctx, &end, &cls, &con, &tag) == 0) {
 			cFYI(1,
-			     ("Error decoding last part of negTokenInit exit 3"));
+			     ("Error decoding last part negTokenInit exit3"));
 			return 0;
-		} else if ((cls != ASN1_CTX) || (con != ASN1_CON)) {	/* tag = 3 indicating mechListMIC */
+		} else if ((cls != ASN1_CTX) || (con != ASN1_CON)) {
+			/* tag = 3 indicating mechListMIC */
 			cFYI(1,
 			     ("Exit 4 cls = %d con = %d tag = %d end = %p (%d)",
 			      cls, con, tag, end, *end));
@@ -573,7 +577,7 @@
 		}
 		if (asn1_header_decode(&ctx, &end, &cls, &con, &tag) == 0) {
 			cFYI(1,
-			     ("Error decoding last part of negTokenInit exit 5"));
+			     ("Error decoding last part negTokenInit exit5"));
 			return 0;
 		} else if ((cls != ASN1_UNI) || (con != ASN1_CON)
 			   || (tag != ASN1_SEQ)) {
@@ -584,7 +588,7 @@
 
 		if (asn1_header_decode(&ctx, &end, &cls, &con, &tag) == 0) {
 			cFYI(1,
-			     ("Error decoding last part of negTokenInit exit 7"));
+			     ("Error decoding last part negTokenInit exit 7"));
 			return 0;
 		} else if ((cls != ASN1_CTX) || (con != ASN1_CON)) {
 			cFYI(1,
@@ -594,20 +598,21 @@
 		}
 		if (asn1_header_decode(&ctx, &end, &cls, &con, &tag) == 0) {
 			cFYI(1,
-			     ("Error decoding last part of negTokenInit exit 9"));
+			     ("Error decoding last part negTokenInit exit9"));
 			return 0;
 		} else if ((cls != ASN1_UNI) || (con != ASN1_PRI)
 			   || (tag != ASN1_GENSTR)) {
 			cFYI(1,
-			     ("Exit 10 cls = %d con = %d tag = %d end = %p (%d)",
+			     ("Exit10 cls = %d con = %d tag = %d end = %p (%d)",
 			      cls, con, tag, end, *end));
 			return 0;
 		}
-		cFYI(1, ("Need to call asn1_octets_decode() function for this %s", ctx.pointer));	/* is this UTF-8 or ASCII? */
+		cFYI(1, ("Need to call asn1_octets_decode() function for %s",
+			 ctx.pointer));	/* is this UTF-8 or ASCII? */
 	}
 
-	/* if (use_kerberos) 
-	   *secType = Kerberos 
+	/* if (use_kerberos)
+	   *secType = Kerberos
 	   else */
 	if (use_ntlmssp) {
 		*secType = NTLMSSP;
diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
index 07838b2..1bf8cf5 100644
--- a/fs/cifs/cifs_debug.c
+++ b/fs/cifs/cifs_debug.c
@@ -58,7 +58,7 @@
 }
 
 #ifdef CONFIG_CIFS_DEBUG2
-void cifs_dump_detail(struct smb_hdr * smb)
+void cifs_dump_detail(struct smb_hdr *smb)
 {
 	cERROR(1, ("Cmd: %d Err: 0x%x Flags: 0x%x Flgs2: 0x%x Mid: %d Pid: %d",
 		  smb->Command, smb->Status.CifsError,
@@ -67,10 +67,10 @@
 }
 
 
-void cifs_dump_mids(struct TCP_Server_Info * server)
+void cifs_dump_mids(struct TCP_Server_Info *server)
 {
 	struct list_head *tmp;
-	struct mid_q_entry * mid_entry;
+	struct mid_q_entry *mid_entry;
 
 	if (server == NULL)
 		return;
@@ -114,12 +114,12 @@
 {
 	struct list_head *tmp;
 	struct list_head *tmp1;
-	struct mid_q_entry * mid_entry;
+	struct mid_q_entry *mid_entry;
 	struct cifsSesInfo *ses;
 	struct cifsTconInfo *tcon;
 	int i;
 	int length = 0;
-	char * original_buf = buf;
+	char *original_buf = buf;
 
 	*beginBuffer = buf + offset;
 
@@ -145,7 +145,6 @@
 		   (ses->serverNOS == NULL)) {
 			buf += sprintf(buf, "\nentry for %s not fully "
 					"displayed\n\t", ses->serverName);
-			
 		} else {
 			length =
 			    sprintf(buf,
@@ -901,90 +900,14 @@
 	}
 	/* flags look ok - update the global security flags for cifs module */
 	extended_security = flags;
+	if (extended_security & CIFSSEC_MUST_SIGN) {
+		/* requiring signing implies signing is allowed */
+		extended_security |= CIFSSEC_MAY_SIGN;
+		cFYI(1, ("packet signing now required"));
+	} else if ((extended_security & CIFSSEC_MAY_SIGN) == 0) {
+		cFYI(1, ("packet signing disabled"));
+	}
+	/* BB should we turn on MAY flags for other MUST options? */
 	return count;
 }
-
-/* static int
-ntlmv2_enabled_read(char *page, char **start, off_t off,
-		       int count, int *eof, void *data)
-{
-	int len;
-
-	len = sprintf(page, "%d\n", ntlmv2_support);
-
-	len -= off;
-	*start = page + off;
-
-	if (len > count)
-		len = count;
-	else
-		*eof = 1;
-
-	if (len < 0)
-		len = 0;
-
-	return len;
-}
-static int
-ntlmv2_enabled_write(struct file *file, const char __user *buffer,
-			unsigned long count, void *data)
-{
-	char c;
-	int rc;
-
-	rc = get_user(c, buffer);
-	if (rc)
-		return rc;
-	if (c == '0' || c == 'n' || c == 'N')
-		ntlmv2_support = 0;
-	else if (c == '1' || c == 'y' || c == 'Y')
-		ntlmv2_support = 1;
-	else if (c == '2')
-		ntlmv2_support = 2;
-
-	return count;
-}
-
-static int
-packet_signing_enabled_read(char *page, char **start, off_t off,
-		       int count, int *eof, void *data)
-{
-	int len;
-
-	len = sprintf(page, "%d\n", sign_CIFS_PDUs);
-
-	len -= off;
-	*start = page + off;
-
-	if (len > count)
-		len = count;
-	else
-		*eof = 1;
-
-	if (len < 0)
-		len = 0;
-
-	return len;
-}
-static int
-packet_signing_enabled_write(struct file *file, const char __user *buffer,
-			unsigned long count, void *data)
-{
-	char c;
-	int rc;
-
-	rc = get_user(c, buffer);
-	if (rc)
-		return rc;
-	if (c == '0' || c == 'n' || c == 'N')
-		sign_CIFS_PDUs = 0;
-	else if (c == '1' || c == 'y' || c == 'Y')
-		sign_CIFS_PDUs = 1;
-	else if (c == '2')
-		sign_CIFS_PDUs = 2;
-
-	return count;
-} */
-
-
 #endif
diff --git a/fs/cifs/cifs_fs_sb.h b/fs/cifs/cifs_fs_sb.h
index 4cc2012e..34af556 100644
--- a/fs/cifs/cifs_fs_sb.h
+++ b/fs/cifs/cifs_fs_sb.h
@@ -43,6 +43,6 @@
 	mode_t	mnt_dir_mode;
 	int     mnt_cifs_flags;
 	int	prepathlen;
-	char *  prepath;
+	char   *prepath;
 };
 #endif				/* _CIFS_FS_SB_H */
diff --git a/fs/cifs/cifs_unicode.c b/fs/cifs/cifs_unicode.c
index 701e9a9..b5903b8 100644
--- a/fs/cifs/cifs_unicode.c
+++ b/fs/cifs/cifs_unicode.c
@@ -66,7 +66,7 @@
 {
 	int charlen;
 	int i;
-	wchar_t * wchar_to = (wchar_t *)to; /* needed to quiet sparse */
+	wchar_t *wchar_to = (wchar_t *)to; /* needed to quiet sparse */
 
 	for (i = 0; len && *from; i++, from += charlen, len -= charlen) {
 
diff --git a/fs/cifs/cifs_unicode.h b/fs/cifs/cifs_unicode.h
index 39e5b97..614c11f 100644
--- a/fs/cifs/cifs_unicode.h
+++ b/fs/cifs/cifs_unicode.h
@@ -5,20 +5,20 @@
  *     Convert a unicode character to upper or lower case using
  *     compressed tables.
  *
- *   Copyright (c) International Business Machines  Corp., 2000,2005555555555555555555555555555555555555555555555555555555
+ *   Copyright (c) International Business Machines  Corp., 2000,2007
  *
  *   This program is free software;  you can redistribute it and/or modify
  *   it under the terms of the GNU General Public License as published by
- *   the Free Software Foundation; either version 2 of the License, or 
+ *   the Free Software Foundation; either version 2 of the License, or
  *   (at your option) any later version.
- * 
+ *
  *   This program is distributed in the hope that it will be useful,
  *   but WITHOUT ANY WARRANTY;  without even the implied warranty of
  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See
  *   the GNU General Public License for more details.
  *
  *   You should have received a copy of the GNU General Public License
- *   along with this program;  if not, write to the Free Software 
+ *   along with this program;  if not, write to the Free Software
  *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  *
  *
@@ -70,7 +70,7 @@
  *     Address of the first string
  */
 static inline wchar_t *
-UniStrcat(wchar_t * ucs1, const wchar_t * ucs2)
+UniStrcat(wchar_t *ucs1, const wchar_t *ucs2)
 {
 	wchar_t *anchor = ucs1;	/* save a pointer to start of ucs1 */
 
@@ -88,7 +88,7 @@
  *     or NULL if the character is not in the string
  */
 static inline wchar_t *
-UniStrchr(const wchar_t * ucs, wchar_t uc)
+UniStrchr(const wchar_t *ucs, wchar_t uc)
 {
 	while ((*ucs != uc) && *ucs)
 		ucs++;
@@ -107,7 +107,7 @@
  *     > 0:  First string is greater than second
  */
 static inline int
-UniStrcmp(const wchar_t * ucs1, const wchar_t * ucs2)
+UniStrcmp(const wchar_t *ucs1, const wchar_t *ucs2)
 {
 	while ((*ucs1 == *ucs2) && *ucs1) {
 		ucs1++;
@@ -120,7 +120,7 @@
  * UniStrcpy:  Copy a string
  */
 static inline wchar_t *
-UniStrcpy(wchar_t * ucs1, const wchar_t * ucs2)
+UniStrcpy(wchar_t *ucs1, const wchar_t *ucs2)
 {
 	wchar_t *anchor = ucs1;	/* save the start of result string */
 
@@ -132,7 +132,7 @@
  * UniStrlen:  Return the length of a string (in 16 bit Unicode chars not bytes)
  */
 static inline size_t
-UniStrlen(const wchar_t * ucs1)
+UniStrlen(const wchar_t *ucs1)
 {
 	int i = 0;
 
@@ -142,10 +142,11 @@
 }
 
 /*
- * UniStrnlen:  Return the length (in 16 bit Unicode chars not bytes) of a string (length limited)
+ * UniStrnlen:  Return the length (in 16 bit Unicode chars not bytes) of a
+ *		string (length limited)
  */
 static inline size_t
-UniStrnlen(const wchar_t * ucs1, int maxlen)
+UniStrnlen(const wchar_t *ucs1, int maxlen)
 {
 	int i = 0;
 
@@ -161,7 +162,7 @@
  * UniStrncat:  Concatenate length limited string
  */
 static inline wchar_t *
-UniStrncat(wchar_t * ucs1, const wchar_t * ucs2, size_t n)
+UniStrncat(wchar_t *ucs1, const wchar_t *ucs2, size_t n)
 {
 	wchar_t *anchor = ucs1;	/* save pointer to string 1 */
 
@@ -179,7 +180,7 @@
  * UniStrncmp:  Compare length limited string
  */
 static inline int
-UniStrncmp(const wchar_t * ucs1, const wchar_t * ucs2, size_t n)
+UniStrncmp(const wchar_t *ucs1, const wchar_t *ucs2, size_t n)
 {
 	if (!n)
 		return 0;	/* Null strings are equal */
@@ -194,7 +195,7 @@
  * UniStrncmp_le:  Compare length limited string - native to little-endian
  */
 static inline int
-UniStrncmp_le(const wchar_t * ucs1, const wchar_t * ucs2, size_t n)
+UniStrncmp_le(const wchar_t *ucs1, const wchar_t *ucs2, size_t n)
 {
 	if (!n)
 		return 0;	/* Null strings are equal */
@@ -209,7 +210,7 @@
  * UniStrncpy:  Copy length limited string with pad
  */
 static inline wchar_t *
-UniStrncpy(wchar_t * ucs1, const wchar_t * ucs2, size_t n)
+UniStrncpy(wchar_t *ucs1, const wchar_t *ucs2, size_t n)
 {
 	wchar_t *anchor = ucs1;
 
@@ -226,7 +227,7 @@
  * UniStrncpy_le:  Copy length limited string with pad to little-endian
  */
 static inline wchar_t *
-UniStrncpy_le(wchar_t * ucs1, const wchar_t * ucs2, size_t n)
+UniStrncpy_le(wchar_t *ucs1, const wchar_t *ucs2, size_t n)
 {
 	wchar_t *anchor = ucs1;
 
@@ -247,7 +248,7 @@
  *     NULL if no matching string is found
  */
 static inline wchar_t *
-UniStrstr(const wchar_t * ucs1, const wchar_t * ucs2)
+UniStrstr(const wchar_t *ucs1, const wchar_t *ucs2)
 {
 	const wchar_t *anchor1 = ucs1;
 	const wchar_t *anchor2 = ucs2;
@@ -297,7 +298,7 @@
  * UniStrupr:  Upper case a unicode string
  */
 static inline wchar_t *
-UniStrupr(register wchar_t * upin)
+UniStrupr(register wchar_t *upin)
 {
 	register wchar_t *up;
 
@@ -338,7 +339,7 @@
  * UniStrlwr:  Lower case a unicode string
  */
 static inline wchar_t *
-UniStrlwr(register wchar_t * upin)
+UniStrlwr(register wchar_t *upin)
 {
 	register wchar_t *up;
 
diff --git a/fs/cifs/cifs_uniupr.h b/fs/cifs/cifs_uniupr.h
index da2ad5b..18a9d97 100644
--- a/fs/cifs/cifs_uniupr.h
+++ b/fs/cifs/cifs_uniupr.h
@@ -3,16 +3,16 @@
  *
  *   This program is free software;  you can redistribute it and/or modify
  *   it under the terms of the GNU General Public License as published by
- *   the Free Software Foundation; either version 2 of the License, or 
+ *   the Free Software Foundation; either version 2 of the License, or
  *   (at your option) any later version.
- * 
+ *
  *   This program is distributed in the hope that it will be useful,
  *   but WITHOUT ANY WARRANTY;  without even the implied warranty of
  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See
  *   the GNU General Public License for more details.
  *
  *   You should have received a copy of the GNU General Public License
- *   along with this program;  if not, write to the Free Software 
+ *   along with this program;  if not, write to the Free Software
  *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  *
  * uniupr.h - Unicode compressed case ranges
@@ -53,7 +53,7 @@
 	0, -1, 0, -1, 0, -1, 0, 0, -1, 0, 0, 0, 0, -1, 0, 0,	/* 1a0-1af */
 	-1, 0, 0, 0, -1, 0, -1, 0, 0, -1, 0, 0, 0, -1, 0, 0,	/* 1b0-1bf */
 	0, 0, 0, 0, 0, -1, -2, 0, -1, -2, 0, -1, -2, 0, -1, 0,	/* 1c0-1cf */
-	-1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, -79, 0, -1,	/* 1d0-1df */
+	-1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, -79, 0, -1, /* 1d0-1df */
 	0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1,	/* 1e0-1ef */
 	0, 0, -1, -2, 0, -1, 0, 0, 0, -1, 0, -1, 0, -1, 0, -1,	/* 1f0-1ff */
 };
diff --git a/fs/cifs/cifsencrypt.c b/fs/cifs/cifsencrypt.c
index fdeda51..3627229 100644
--- a/fs/cifs/cifsencrypt.c
+++ b/fs/cifs/cifsencrypt.c
@@ -21,7 +21,7 @@
 
 #include <linux/fs.h>
 #include "cifspdu.h"
-#include "cifsglob.h" 
+#include "cifsglob.h"
 #include "cifs_debug.h"
 #include "md5.h"
 #include "cifs_unicode.h"
@@ -29,54 +29,57 @@
 #include <linux/ctype.h>
 #include <linux/random.h>
 
-/* Calculate and return the CIFS signature based on the mac key and the smb pdu */
+/* Calculate and return the CIFS signature based on the mac key and SMB PDU */
 /* the 16 byte signature must be allocated by the caller  */
 /* Note we only use the 1st eight bytes */
-/* Note that the smb header signature field on input contains the  
+/* Note that the smb header signature field on input contains the
 	sequence number before this function is called */
 
 extern void mdfour(unsigned char *out, unsigned char *in, int n);
 extern void E_md4hash(const unsigned char *passwd, unsigned char *p16);
 extern void SMBencrypt(unsigned char *passwd, unsigned char *c8,
-                       unsigned char *p24);
-	
-static int cifs_calculate_signature(const struct smb_hdr * cifs_pdu, 
-				    const char * key, char * signature)
+		       unsigned char *p24);
+
+static int cifs_calculate_signature(const struct smb_hdr *cifs_pdu,
+				    const struct mac_key *key, char *signature)
 {
 	struct	MD5Context context;
 
-	if((cifs_pdu == NULL) || (signature == NULL))
+	if ((cifs_pdu == NULL) || (signature == NULL) || (key == NULL))
 		return -EINVAL;
 
 	MD5Init(&context);
-	MD5Update(&context,key,CIFS_SESS_KEY_SIZE+16);
-	MD5Update(&context,cifs_pdu->Protocol,cifs_pdu->smb_buf_length);
-	MD5Final(signature,&context);
+	MD5Update(&context, (char *)&key->data, key->len);
+	MD5Update(&context, cifs_pdu->Protocol, cifs_pdu->smb_buf_length);
+
+	MD5Final(signature, &context);
 	return 0;
 }
 
-int cifs_sign_smb(struct smb_hdr * cifs_pdu, struct TCP_Server_Info * server,
-	__u32 * pexpected_response_sequence_number)
+int cifs_sign_smb(struct smb_hdr *cifs_pdu, struct TCP_Server_Info *server,
+		  __u32 *pexpected_response_sequence_number)
 {
 	int rc = 0;
 	char smb_signature[20];
 
-	if((cifs_pdu == NULL) || (server == NULL))
+	if ((cifs_pdu == NULL) || (server == NULL))
 		return -EINVAL;
 
-	if((cifs_pdu->Flags2 & SMBFLG2_SECURITY_SIGNATURE) == 0) 
+	if ((cifs_pdu->Flags2 & SMBFLG2_SECURITY_SIGNATURE) == 0)
 		return rc;
 
 	spin_lock(&GlobalMid_Lock);
-	cifs_pdu->Signature.Sequence.SequenceNumber = cpu_to_le32(server->sequence_number);
+	cifs_pdu->Signature.Sequence.SequenceNumber =
+			cpu_to_le32(server->sequence_number);
 	cifs_pdu->Signature.Sequence.Reserved = 0;
-	
+
 	*pexpected_response_sequence_number = server->sequence_number++;
 	server->sequence_number++;
 	spin_unlock(&GlobalMid_Lock);
 
-	rc = cifs_calculate_signature(cifs_pdu, server->mac_signing_key,smb_signature);
-	if(rc)
+	rc = cifs_calculate_signature(cifs_pdu, &server->mac_signing_key,
+				      smb_signature);
+	if (rc)
 		memset(cifs_pdu->Signature.SecuritySignature, 0, 8);
 	else
 		memcpy(cifs_pdu->Signature.SecuritySignature, smb_signature, 8);
@@ -84,115 +87,119 @@
 	return rc;
 }
 
-static int cifs_calc_signature2(const struct kvec * iov, int n_vec,
-				const char * key, char * signature)
+static int cifs_calc_signature2(const struct kvec *iov, int n_vec,
+				const struct mac_key *key, char *signature)
 {
 	struct  MD5Context context;
 	int i;
 
-	if((iov == NULL) || (signature == NULL))
+	if ((iov == NULL) || (signature == NULL) || (key == NULL))
 		return -EINVAL;
 
 	MD5Init(&context);
-	MD5Update(&context,key,CIFS_SESS_KEY_SIZE+16);
-	for(i=0;i<n_vec;i++) {
-		if(iov[i].iov_base == NULL) {
-			cERROR(1,("null iovec entry"));
+	MD5Update(&context, (char *)&key->data, key->len);
+	for (i = 0; i < n_vec; i++) {
+		if (iov[i].iov_base == NULL) {
+			cERROR(1, ("null iovec entry"));
 			return -EIO;
-		} else if(iov[i].iov_len == 0)
+		} else if (iov[i].iov_len == 0)
 			break; /* bail out if we are sent nothing to sign */
-		/* The first entry includes a length field (which does not get 
+		/* The first entry includes a length field (which does not get
 		   signed that occupies the first 4 bytes before the header */
-		if(i==0) {
+		if (i == 0) {
 			if (iov[0].iov_len <= 8 ) /* cmd field at offset 9 */
 				break; /* nothing to sign or corrupt header */
-			MD5Update(&context,iov[0].iov_base+4, iov[0].iov_len-4);
+			MD5Update(&context, iov[0].iov_base+4,
+				  iov[0].iov_len-4);
 		} else
-			MD5Update(&context,iov[i].iov_base, iov[i].iov_len);
+			MD5Update(&context, iov[i].iov_base, iov[i].iov_len);
 	}
 
-	MD5Final(signature,&context);
+	MD5Final(signature, &context);
 
 	return 0;
 }
 
 
-int cifs_sign_smb2(struct kvec * iov, int n_vec, struct TCP_Server_Info *server,
+int cifs_sign_smb2(struct kvec *iov, int n_vec, struct TCP_Server_Info *server,
 		   __u32 * pexpected_response_sequence_number)
 {
 	int rc = 0;
 	char smb_signature[20];
-	struct smb_hdr * cifs_pdu = iov[0].iov_base;
+	struct smb_hdr *cifs_pdu = iov[0].iov_base;
 
-	if((cifs_pdu == NULL) || (server == NULL))
+	if ((cifs_pdu == NULL) || (server == NULL))
 		return -EINVAL;
 
-	if((cifs_pdu->Flags2 & SMBFLG2_SECURITY_SIGNATURE) == 0)
+	if ((cifs_pdu->Flags2 & SMBFLG2_SECURITY_SIGNATURE) == 0)
 		return rc;
 
-        spin_lock(&GlobalMid_Lock);
-        cifs_pdu->Signature.Sequence.SequenceNumber = 
+	spin_lock(&GlobalMid_Lock);
+	cifs_pdu->Signature.Sequence.SequenceNumber =
 				cpu_to_le32(server->sequence_number);
-        cifs_pdu->Signature.Sequence.Reserved = 0;
+	cifs_pdu->Signature.Sequence.Reserved = 0;
 
-        *pexpected_response_sequence_number = server->sequence_number++;
-        server->sequence_number++;
-        spin_unlock(&GlobalMid_Lock);
+	*pexpected_response_sequence_number = server->sequence_number++;
+	server->sequence_number++;
+	spin_unlock(&GlobalMid_Lock);
 
-        rc = cifs_calc_signature2(iov, n_vec, server->mac_signing_key,
+	rc = cifs_calc_signature2(iov, n_vec, &server->mac_signing_key,
 				      smb_signature);
-        if(rc)
-                memset(cifs_pdu->Signature.SecuritySignature, 0, 8);
-        else
-                memcpy(cifs_pdu->Signature.SecuritySignature, smb_signature, 8);
+	if (rc)
+		memset(cifs_pdu->Signature.SecuritySignature, 0, 8);
+	else
+		memcpy(cifs_pdu->Signature.SecuritySignature, smb_signature, 8);
 
-        return rc;
-
+	return rc;
 }
 
-int cifs_verify_signature(struct smb_hdr * cifs_pdu, const char * mac_key,
-	__u32 expected_sequence_number)
+int cifs_verify_signature(struct smb_hdr *cifs_pdu,
+			  const struct mac_key *mac_key,
+			  __u32 expected_sequence_number)
 {
 	unsigned int rc;
 	char server_response_sig[8];
 	char what_we_think_sig_should_be[20];
 
-	if((cifs_pdu == NULL) || (mac_key == NULL))
+	if ((cifs_pdu == NULL) || (mac_key == NULL))
 		return -EINVAL;
 
 	if (cifs_pdu->Command == SMB_COM_NEGOTIATE)
 		return 0;
 
 	if (cifs_pdu->Command == SMB_COM_LOCKING_ANDX) {
-		struct smb_com_lock_req * pSMB = (struct smb_com_lock_req *)cifs_pdu;
-	    if(pSMB->LockType & LOCKING_ANDX_OPLOCK_RELEASE)
+		struct smb_com_lock_req *pSMB =
+			(struct smb_com_lock_req *)cifs_pdu;
+	    if (pSMB->LockType & LOCKING_ANDX_OPLOCK_RELEASE)
 			return 0;
 	}
 
-	/* BB what if signatures are supposed to be on for session but server does not
-		send one? BB */
-	
+	/* BB what if signatures are supposed to be on for session but
+	   server does not send one? BB */
+
 	/* Do not need to verify session setups with signature "BSRSPYL "  */
-	if(memcmp(cifs_pdu->Signature.SecuritySignature,"BSRSPYL ",8)==0)
-		cFYI(1,("dummy signature received for smb command 0x%x",cifs_pdu->Command));
+	if (memcmp(cifs_pdu->Signature.SecuritySignature, "BSRSPYL ", 8) == 0)
+		cFYI(1, ("dummy signature received for smb command 0x%x",
+			cifs_pdu->Command));
 
 	/* save off the origiginal signature so we can modify the smb and check
 		its signature against what the server sent */
-	memcpy(server_response_sig,cifs_pdu->Signature.SecuritySignature,8);
+	memcpy(server_response_sig, cifs_pdu->Signature.SecuritySignature, 8);
 
-	cifs_pdu->Signature.Sequence.SequenceNumber = cpu_to_le32(expected_sequence_number);
+	cifs_pdu->Signature.Sequence.SequenceNumber =
+					cpu_to_le32(expected_sequence_number);
 	cifs_pdu->Signature.Sequence.Reserved = 0;
 
 	rc = cifs_calculate_signature(cifs_pdu, mac_key,
 		what_we_think_sig_should_be);
 
-	if(rc)
+	if (rc)
 		return rc;
 
-	
-/*	cifs_dump_mem("what we think it should be: ",what_we_think_sig_should_be,16); */
+/*	cifs_dump_mem("what we think it should be: ",
+		      what_we_think_sig_should_be, 16); */
 
-	if(memcmp(server_response_sig, what_we_think_sig_should_be, 8))
+	if (memcmp(server_response_sig, what_we_think_sig_should_be, 8))
 		return -EACCES;
 	else
 		return 0;
@@ -200,89 +207,94 @@
 }
 
 /* We fill in key by putting in 40 byte array which was allocated by caller */
-int cifs_calculate_mac_key(char * key, const char * rn, const char * password)
+int cifs_calculate_mac_key(struct mac_key *key, const char *rn,
+			   const char *password)
 {
 	char temp_key[16];
 	if ((key == NULL) || (rn == NULL))
 		return -EINVAL;
 
 	E_md4hash(password, temp_key);
-	mdfour(key,temp_key,16);
-	memcpy(key+16,rn, CIFS_SESS_KEY_SIZE);
+	mdfour(key->data.ntlm, temp_key, 16);
+	memcpy(key->data.ntlm+16, rn, CIFS_SESS_KEY_SIZE);
+	key->len = 40;
 	return 0;
 }
 
-int CalcNTLMv2_partial_mac_key(struct cifsSesInfo * ses, 
-				const struct nls_table * nls_info)
+int CalcNTLMv2_partial_mac_key(struct cifsSesInfo *ses,
+			       const struct nls_table *nls_info)
 {
 	char temp_hash[16];
 	struct HMACMD5Context ctx;
-	char * ucase_buf;
-	__le16 * unicode_buf;
-	unsigned int i,user_name_len,dom_name_len;
+	char *ucase_buf;
+	__le16 *unicode_buf;
+	unsigned int i, user_name_len, dom_name_len;
 
-	if(ses == NULL)
+	if (ses == NULL)
 		return -EINVAL;
 
 	E_md4hash(ses->password, temp_hash);
 
 	hmac_md5_init_limK_to_64(temp_hash, 16, &ctx);
 	user_name_len = strlen(ses->userName);
-	if(user_name_len > MAX_USERNAME_SIZE)
+	if (user_name_len > MAX_USERNAME_SIZE)
 		return -EINVAL;
-	if(ses->domainName == NULL)
+	if (ses->domainName == NULL)
 		return -EINVAL; /* BB should we use CIFS_LINUX_DOM */
 	dom_name_len = strlen(ses->domainName);
-	if(dom_name_len > MAX_USERNAME_SIZE)
+	if (dom_name_len > MAX_USERNAME_SIZE)
 		return -EINVAL;
-  
+
 	ucase_buf = kmalloc((MAX_USERNAME_SIZE+1), GFP_KERNEL);
-	if(ucase_buf == NULL)
+	if (ucase_buf == NULL)
 		return -ENOMEM;
 	unicode_buf = kmalloc((MAX_USERNAME_SIZE+1)*4, GFP_KERNEL);
-	if(unicode_buf == NULL) {
+	if (unicode_buf == NULL) {
 		kfree(ucase_buf);
 		return -ENOMEM;
 	}
-   
-	for(i=0;i<user_name_len;i++)
+
+	for (i = 0; i < user_name_len; i++)
 		ucase_buf[i] = nls_info->charset2upper[(int)ses->userName[i]];
 	ucase_buf[i] = 0;
-	user_name_len = cifs_strtoUCS(unicode_buf, ucase_buf, MAX_USERNAME_SIZE*2, nls_info);
+	user_name_len = cifs_strtoUCS(unicode_buf, ucase_buf,
+				      MAX_USERNAME_SIZE*2, nls_info);
 	unicode_buf[user_name_len] = 0;
 	user_name_len++;
 
-	for(i=0;i<dom_name_len;i++)
+	for (i = 0; i < dom_name_len; i++)
 		ucase_buf[i] = nls_info->charset2upper[(int)ses->domainName[i]];
 	ucase_buf[i] = 0;
-	dom_name_len = cifs_strtoUCS(unicode_buf+user_name_len, ucase_buf, MAX_USERNAME_SIZE*2, nls_info);
+	dom_name_len = cifs_strtoUCS(unicode_buf+user_name_len, ucase_buf,
+				     MAX_USERNAME_SIZE*2, nls_info);
 
 	unicode_buf[user_name_len + dom_name_len] = 0;
 	hmac_md5_update((const unsigned char *) unicode_buf,
-		(user_name_len+dom_name_len)*2,&ctx);
+		(user_name_len+dom_name_len)*2, &ctx);
 
-	hmac_md5_final(ses->server->mac_signing_key,&ctx);
+	hmac_md5_final(ses->server->ntlmv2_hash, &ctx);
 	kfree(ucase_buf);
 	kfree(unicode_buf);
 	return 0;
 }
 
 #ifdef CONFIG_CIFS_WEAK_PW_HASH
-void calc_lanman_hash(struct cifsSesInfo * ses, char * lnm_session_key)
+void calc_lanman_hash(struct cifsSesInfo *ses, char *lnm_session_key)
 {
 	int i;
 	char password_with_pad[CIFS_ENCPWD_SIZE];
 
-	if(ses->server == NULL)
+	if (ses->server == NULL)
 		return;
 
 	memset(password_with_pad, 0, CIFS_ENCPWD_SIZE);
-	if(ses->password)
+	if (ses->password)
 		strncpy(password_with_pad, ses->password, CIFS_ENCPWD_SIZE);
 
-	if((ses->server->secMode & SECMODE_PW_ENCRYPT) == 0)
-		if(extended_security & CIFSSEC_MAY_PLNTXT) {
-			memcpy(lnm_session_key, password_with_pad, CIFS_ENCPWD_SIZE); 
+	if ((ses->server->secMode & SECMODE_PW_ENCRYPT) == 0)
+		if (extended_security & CIFSSEC_MAY_PLNTXT) {
+			memcpy(lnm_session_key, password_with_pad,
+				CIFS_ENCPWD_SIZE);
 			return;
 		}
 
@@ -297,7 +309,7 @@
 	utf8 and other multibyte codepages each need their own strupper
 	function since a byte at a time will ont work. */
 
-	for(i = 0; i < CIFS_ENCPWD_SIZE; i++) {
+	for (i = 0; i < CIFS_ENCPWD_SIZE; i++) {
 		password_with_pad[i] = toupper(password_with_pad[i]);
 	}
 
@@ -307,19 +319,19 @@
 }
 #endif /* CIFS_WEAK_PW_HASH */
 
-static int calc_ntlmv2_hash(struct cifsSesInfo *ses, 
-			    const struct nls_table * nls_cp)
+static int calc_ntlmv2_hash(struct cifsSesInfo *ses,
+			    const struct nls_table *nls_cp)
 {
 	int rc = 0;
 	int len;
 	char nt_hash[16];
-	struct HMACMD5Context * pctxt;
-	wchar_t * user;
-	wchar_t * domain;
+	struct HMACMD5Context *pctxt;
+	wchar_t *user;
+	wchar_t *domain;
 
 	pctxt = kmalloc(sizeof(struct HMACMD5Context), GFP_KERNEL);
 
-	if(pctxt == NULL)
+	if (pctxt == NULL)
 		return -ENOMEM;
 
 	/* calculate md4 hash of password */
@@ -331,41 +343,45 @@
 	/* convert ses->userName to unicode and uppercase */
 	len = strlen(ses->userName);
 	user = kmalloc(2 + (len * 2), GFP_KERNEL);
-	if(user == NULL)
+	if (user == NULL)
 		goto calc_exit_2;
 	len = cifs_strtoUCS(user, ses->userName, len, nls_cp);
 	UniStrupr(user);
 	hmac_md5_update((char *)user, 2*len, pctxt);
 
 	/* convert ses->domainName to unicode and uppercase */
-	if(ses->domainName) {
+	if (ses->domainName) {
 		len = strlen(ses->domainName);
 
-        	domain = kmalloc(2 + (len * 2), GFP_KERNEL);
-		if(domain == NULL)
+		domain = kmalloc(2 + (len * 2), GFP_KERNEL);
+		if (domain == NULL)
 			goto calc_exit_1;
 		len = cifs_strtoUCS(domain, ses->domainName, len, nls_cp);
-		UniStrupr(domain);
+		/* the following line was removed since it didn't work well
+		   with lower cased domain name that passed as an option.
+		   Maybe converting the domain name earlier makes sense */
+		/* UniStrupr(domain); */
 
 		hmac_md5_update((char *)domain, 2*len, pctxt);
-	
+
 		kfree(domain);
 	}
 calc_exit_1:
 	kfree(user);
 calc_exit_2:
-	/* BB FIXME what about bytes 24 through 40 of the signing key? 
+	/* BB FIXME what about bytes 24 through 40 of the signing key?
 	   compare with the NTLM example */
-	hmac_md5_final(ses->server->mac_signing_key, pctxt);
+	hmac_md5_final(ses->server->ntlmv2_hash, pctxt);
 
 	return rc;
 }
 
-void setup_ntlmv2_rsp(struct cifsSesInfo * ses, char * resp_buf, 
-		      const struct nls_table * nls_cp)
+void setup_ntlmv2_rsp(struct cifsSesInfo *ses, char *resp_buf,
+		      const struct nls_table *nls_cp)
 {
 	int rc;
-	struct ntlmv2_resp * buf = (struct ntlmv2_resp *)resp_buf;
+	struct ntlmv2_resp *buf = (struct ntlmv2_resp *)resp_buf;
+	struct HMACMD5Context context;
 
 	buf->blob_signature = cpu_to_le32(0x00000101);
 	buf->reserved = 0;
@@ -379,21 +395,31 @@
 
 	/* calculate buf->ntlmv2_hash */
 	rc = calc_ntlmv2_hash(ses, nls_cp);
-	if(rc)
-		cERROR(1,("could not get v2 hash rc %d",rc));
+	if (rc)
+		cERROR(1, ("could not get v2 hash rc %d", rc));
 	CalcNTLMv2_response(ses, resp_buf);
+
+	/* now calculate the MAC key for NTLMv2 */
+	hmac_md5_init_limK_to_64(ses->server->ntlmv2_hash, 16, &context);
+	hmac_md5_update(resp_buf, 16, &context);
+	hmac_md5_final(ses->server->mac_signing_key.data.ntlmv2.key, &context);
+
+	memcpy(&ses->server->mac_signing_key.data.ntlmv2.resp, resp_buf,
+	       sizeof(struct ntlmv2_resp));
+	ses->server->mac_signing_key.len = 16 + sizeof(struct ntlmv2_resp);
 }
 
-void CalcNTLMv2_response(const struct cifsSesInfo * ses, char * v2_session_response)
+void CalcNTLMv2_response(const struct cifsSesInfo *ses,
+			 char *v2_session_response)
 {
 	struct HMACMD5Context context;
 	/* rest of v2 struct already generated */
-	memcpy(v2_session_response + 8, ses->server->cryptKey,8);
-	hmac_md5_init_limK_to_64(ses->server->mac_signing_key, 16, &context);
+	memcpy(v2_session_response + 8, ses->server->cryptKey, 8);
+	hmac_md5_init_limK_to_64(ses->server->ntlmv2_hash, 16, &context);
 
-	hmac_md5_update(v2_session_response+8, 
+	hmac_md5_update(v2_session_response+8,
 			sizeof(struct ntlmv2_resp) - 8, &context);
 
-	hmac_md5_final(v2_session_response,&context);
+	hmac_md5_final(v2_session_response, &context);
 /*	cifs_dump_mem("v2_sess_rsp: ", v2_session_response, 32); */
 }
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index 8b0cbf4..1fd0dc8 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -64,23 +64,27 @@
 unsigned int extended_security = CIFSSEC_DEF;
 /* unsigned int ntlmv2_support = 0; */
 unsigned int sign_CIFS_PDUs = 1;
-extern struct task_struct * oplockThread; /* remove sparse warning */
-struct task_struct * oplockThread = NULL;
+extern struct task_struct *oplockThread; /* remove sparse warning */
+struct task_struct *oplockThread = NULL;
 /* extern struct task_struct * dnotifyThread; remove sparse warning */
-static struct task_struct * dnotifyThread = NULL;
+static struct task_struct *dnotifyThread = NULL;
 static const struct super_operations cifs_super_ops;
 unsigned int CIFSMaxBufSize = CIFS_MAX_MSGSIZE;
 module_param(CIFSMaxBufSize, int, 0);
-MODULE_PARM_DESC(CIFSMaxBufSize,"Network buffer size (not including header). Default: 16384 Range: 8192 to 130048");
+MODULE_PARM_DESC(CIFSMaxBufSize, "Network buffer size (not including header). "
+				 "Default: 16384 Range: 8192 to 130048");
 unsigned int cifs_min_rcv = CIFS_MIN_RCV_POOL;
 module_param(cifs_min_rcv, int, 0);
-MODULE_PARM_DESC(cifs_min_rcv,"Network buffers in pool. Default: 4 Range: 1 to 64");
+MODULE_PARM_DESC(cifs_min_rcv, "Network buffers in pool. Default: 4 Range: "
+				"1 to 64");
 unsigned int cifs_min_small = 30;
 module_param(cifs_min_small, int, 0);
-MODULE_PARM_DESC(cifs_min_small,"Small network buffers in pool. Default: 30 Range: 2 to 256");
+MODULE_PARM_DESC(cifs_min_small, "Small network buffers in pool. Default: 30 "
+				 "Range: 2 to 256");
 unsigned int cifs_max_pending = CIFS_MAX_REQ;
 module_param(cifs_max_pending, int, 0);
-MODULE_PARM_DESC(cifs_max_pending,"Simultaneous requests to server. Default: 50 Range: 2 to 256");
+MODULE_PARM_DESC(cifs_max_pending, "Simultaneous requests to server. "
+				   "Default: 50 Range: 2 to 256");
 
 extern mempool_t *cifs_sm_req_poolp;
 extern mempool_t *cifs_req_poolp;
@@ -95,10 +99,10 @@
 	struct inode *inode;
 	struct cifs_sb_info *cifs_sb;
 	int rc = 0;
-	
+
 	/* BB should we make this contingent on mount parm? */
 	sb->s_flags |= MS_NODIRATIME | MS_NOATIME;
-	sb->s_fs_info = kzalloc(sizeof(struct cifs_sb_info),GFP_KERNEL);
+	sb->s_fs_info = kzalloc(sizeof(struct cifs_sb_info), GFP_KERNEL);
 	cifs_sb = CIFS_SB(sb);
 	if (cifs_sb == NULL)
 		return -ENOMEM;
@@ -114,12 +118,9 @@
 
 	sb->s_magic = CIFS_MAGIC_NUMBER;
 	sb->s_op = &cifs_super_ops;
-#ifdef CONFIG_CIFS_EXPERIMENTAL
-	if (experimEnabled != 0)
-		sb->s_export_op = &cifs_export_ops;
-#endif /* EXPERIMENTAL */	
 /*	if (cifs_sb->tcon->ses->server->maxBuf > MAX_CIFS_HDR_SIZE + 512)
-	    sb->s_blocksize = cifs_sb->tcon->ses->server->maxBuf - MAX_CIFS_HDR_SIZE; */
+	    sb->s_blocksize =
+		cifs_sb->tcon->ses->server->maxBuf - MAX_CIFS_HDR_SIZE; */
 #ifdef CONFIG_CIFS_QUOTA
 	sb->s_qcop = &cifs_quotactl_ops;
 #endif
@@ -139,6 +140,13 @@
 		goto out_no_root;
 	}
 
+#ifdef CONFIG_CIFS_EXPERIMENTAL
+	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) {
+		cFYI(1, ("export ops supported"));
+		sb->s_export_op = &cifs_export_ops;
+	}
+#endif /* EXPERIMENTAL */
+
 	return 0;
 
 out_no_root:
@@ -149,7 +157,7 @@
 out_mount_failed:
 	if (cifs_sb) {
 		if (cifs_sb->local_nls)
-			unload_nls(cifs_sb->local_nls);	
+			unload_nls(cifs_sb->local_nls);
 		kfree(cifs_sb);
 	}
 	return rc;
@@ -164,10 +172,10 @@
 	cFYI(1, ("In cifs_put_super"));
 	cifs_sb = CIFS_SB(sb);
 	if (cifs_sb == NULL) {
-		cFYI(1,("Empty cifs superblock info passed to unmount"));
+		cFYI(1, ("Empty cifs superblock info passed to unmount"));
 		return;
 	}
-	rc = cifs_umount(sb, cifs_sb); 
+	rc = cifs_umount(sb, cifs_sb);
 	if (rc) {
 		cERROR(1, ("cifs_umount failed with return code %d", rc));
 	}
@@ -180,7 +188,7 @@
 cifs_statfs(struct dentry *dentry, struct kstatfs *buf)
 {
 	struct super_block *sb = dentry->d_sb;
-	int xid; 
+	int xid;
 	int rc = -EOPNOTSUPP;
 	struct cifs_sb_info *cifs_sb;
 	struct cifsTconInfo *pTcon;
@@ -193,7 +201,7 @@
 	buf->f_type = CIFS_MAGIC_NUMBER;
 
 	/* instead could get the real value via SMB_QUERY_FS_ATTRIBUTE_INFO */
-	buf->f_namelen = PATH_MAX; /* PATH_MAX may be too long - it would 
+	buf->f_namelen = PATH_MAX; /* PATH_MAX may be too long - it would
 				      presumably be total path, but note
 				      that some servers (includinng Samba 3)
 				      have a shorter maximum path */
@@ -217,8 +225,7 @@
 	   bypassed it because we detected that this was an older LANMAN sess */
 	if (rc)
 		rc = SMBOldQFSInfo(xid, pTcon, buf);
-	/*     
-	   int f_type;
+	/* int f_type;
 	   __fsid_t f_fsid;
 	   int f_namelen;  */
 	/* BB get from info in tcon struct at mount time call to QFSAttrInfo */
@@ -227,7 +234,7 @@
 				   longer available? */
 }
 
-static int cifs_permission(struct inode * inode, int mask, struct nameidata *nd)
+static int cifs_permission(struct inode *inode, int mask, struct nameidata *nd)
 {
 	struct cifs_sb_info *cifs_sb;
 
@@ -235,10 +242,10 @@
 
 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM) {
 		return 0;
-	} else /* file mode might have been restricted at mount time 
-		on the client (above and beyond ACL on servers) for  
+	} else /* file mode might have been restricted at mount time
+		on the client (above and beyond ACL on servers) for
 		servers which do not support setting and viewing mode bits,
-		so allowing client to check permissions is useful */ 
+		so allowing client to check permissions is useful */
 		return generic_permission(inode, mask, NULL);
 }
 
@@ -267,7 +274,7 @@
 	cifs_inode->clientCanCacheRead = FALSE;
 	cifs_inode->clientCanCacheAll = FALSE;
 	cifs_inode->vfs_inode.i_blkbits = 14;  /* 2**14 = CIFS_MAX_MSGSIZE */
-	
+
 	/* Can not set i_flags here - they get immediately overwritten
 	   to zero by the VFS */
 /*	cifs_inode->vfs_inode.i_flags = S_NOATIME | S_NOCMTIME;*/
@@ -309,26 +316,26 @@
 		if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS)
 			seq_printf(s, ",posixpaths");
 		if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_UID) ||
-		   !(cifs_sb->tcon->ses->capabilities & CAP_UNIX))
+		   !(cifs_sb->tcon->unix_ext))
 			seq_printf(s, ",uid=%d", cifs_sb->mnt_uid);
 		if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_GID) ||
-		   !(cifs_sb->tcon->ses->capabilities & CAP_UNIX))
+		   !(cifs_sb->tcon->unix_ext))
 			seq_printf(s, ",gid=%d", cifs_sb->mnt_gid);
-		seq_printf(s, ",rsize=%d",cifs_sb->rsize);
-		seq_printf(s, ",wsize=%d",cifs_sb->wsize);
+		seq_printf(s, ",rsize=%d", cifs_sb->rsize);
+		seq_printf(s, ",wsize=%d", cifs_sb->wsize);
 	}
 	return 0;
 }
 
 #ifdef CONFIG_CIFS_QUOTA
-int cifs_xquota_set(struct super_block * sb, int quota_type, qid_t qid,
-		struct fs_disk_quota * pdquota)
+int cifs_xquota_set(struct super_block *sb, int quota_type, qid_t qid,
+		struct fs_disk_quota *pdquota)
 {
 	int xid;
 	int rc = 0;
 	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
 	struct cifsTconInfo *pTcon;
-	
+
 	if (cifs_sb)
 		pTcon = cifs_sb->tcon;
 	else
@@ -337,7 +344,7 @@
 
 	xid = GetXid();
 	if (pTcon) {
-		cFYI(1,("set type: 0x%x id: %d",quota_type,qid));		
+		cFYI(1, ("set type: 0x%x id: %d", quota_type, qid));
 	} else {
 		return -EIO;
 	}
@@ -346,8 +353,8 @@
 	return rc;
 }
 
-int cifs_xquota_get(struct super_block * sb, int quota_type, qid_t qid,
-                struct fs_disk_quota * pdquota)
+int cifs_xquota_get(struct super_block *sb, int quota_type, qid_t qid,
+		    struct fs_disk_quota *pdquota)
 {
 	int xid;
 	int rc = 0;
@@ -361,7 +368,7 @@
 
 	xid = GetXid();
 	if (pTcon) {
-                cFYI(1,("set type: 0x%x id: %d",quota_type,qid));
+		cFYI(1, ("set type: 0x%x id: %d", quota_type, qid));
 	} else {
 		rc = -EIO;
 	}
@@ -370,9 +377,9 @@
 	return rc;
 }
 
-int cifs_xstate_set(struct super_block * sb, unsigned int flags, int operation)
+int cifs_xstate_set(struct super_block *sb, unsigned int flags, int operation)
 {
-	int xid; 
+	int xid;
 	int rc = 0;
 	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
 	struct cifsTconInfo *pTcon;
@@ -384,7 +391,7 @@
 
 	xid = GetXid();
 	if (pTcon) {
-                cFYI(1,("flags: 0x%x operation: 0x%x",flags,operation));
+		cFYI(1, ("flags: 0x%x operation: 0x%x", flags, operation));
 	} else {
 		rc = -EIO;
 	}
@@ -393,7 +400,7 @@
 	return rc;
 }
 
-int cifs_xstate_get(struct super_block * sb, struct fs_quota_stat *qstats)
+int cifs_xstate_get(struct super_block *sb, struct fs_quota_stat *qstats)
 {
 	int xid;
 	int rc = 0;
@@ -407,7 +414,7 @@
 	}
 	xid = GetXid();
 	if (pTcon) {
-		cFYI(1,("pqstats %p",qstats));		
+		cFYI(1, ("pqstats %p", qstats));
 	} else {
 		rc = -EIO;
 	}
@@ -424,10 +431,10 @@
 };
 #endif
 
-static void cifs_umount_begin(struct vfsmount * vfsmnt, int flags)
+static void cifs_umount_begin(struct vfsmount *vfsmnt, int flags)
 {
 	struct cifs_sb_info *cifs_sb;
-	struct cifsTconInfo * tcon;
+	struct cifsTconInfo *tcon;
 
 	if (!(flags & MNT_FORCE))
 		return;
@@ -445,9 +452,8 @@
 
 	/* cancel_brl_requests(tcon); */ /* BB mark all brl mids as exiting */
 	/* cancel_notify_requests(tcon); */
-	if (tcon->ses && tcon->ses->server)
-	{
-		cFYI(1,("wake up tasks now - umount begin not complete"));
+	if (tcon->ses && tcon->ses->server) {
+		cFYI(1, ("wake up tasks now - umount begin not complete"));
 		wake_up_all(&tcon->ses->server->request_q);
 		wake_up_all(&tcon->ses->server->response_q);
 		msleep(1); /* yield */
@@ -480,10 +486,11 @@
 	.statfs = cifs_statfs,
 	.alloc_inode = cifs_alloc_inode,
 	.destroy_inode = cifs_destroy_inode,
-/*	.drop_inode	    = generic_delete_inode, 
-	.delete_inode	= cifs_delete_inode,  *//* Do not need the above two functions     
-   unless later we add lazy close of inodes or unless the kernel forgets to call
-   us with the same number of releases (closes) as opens */
+/*	.drop_inode	    = generic_delete_inode,
+	.delete_inode	= cifs_delete_inode,  */  /* Do not need above two
+	functions unless later we add lazy close of inodes or unless the
+	kernel forgets to call us with the same number of releases (closes)
+	as opens */
 	.show_options = cifs_show_options,
 	.umount_begin   = cifs_umount_begin,
 	.remount_fs = cifs_remount,
@@ -586,11 +593,11 @@
 	.getxattr = cifs_getxattr,
 	.listxattr = cifs_listxattr,
 	.removexattr = cifs_removexattr,
-#endif 
+#endif
 };
 
 const struct inode_operations cifs_symlink_inode_ops = {
-	.readlink = generic_readlink, 
+	.readlink = generic_readlink,
 	.follow_link = cifs_follow_link,
 	.put_link = cifs_put_link,
 	.permission = cifs_permission,
@@ -602,7 +609,7 @@
 	.getxattr = cifs_getxattr,
 	.listxattr = cifs_listxattr,
 	.removexattr = cifs_removexattr,
-#endif 
+#endif
 };
 
 const struct file_operations cifs_file_ops = {
@@ -628,7 +635,7 @@
 };
 
 const struct file_operations cifs_file_direct_ops = {
-	/* no mmap, no aio, no readv - 
+	/* no mmap, no aio, no readv -
 	   BB reevaluate whether they can be done with directio, no cache */
 	.read = cifs_user_read,
 	.write = cifs_user_write,
@@ -668,7 +675,7 @@
 };
 
 const struct file_operations cifs_file_direct_nobrl_ops = {
-	/* no mmap, no aio, no readv - 
+	/* no mmap, no aio, no readv -
 	   BB reevaluate whether they can be done with directio, no cache */
 	.read = cifs_user_read,
 	.write = cifs_user_write,
@@ -693,11 +700,11 @@
 #ifdef CONFIG_CIFS_EXPERIMENTAL
 	.dir_notify = cifs_dir_notify,
 #endif /* CONFIG_CIFS_EXPERIMENTAL */
-        .ioctl  = cifs_ioctl,
+	.ioctl  = cifs_ioctl,
 };
 
 static void
-cifs_init_once(void *inode, struct kmem_cache * cachep, unsigned long flags)
+cifs_init_once(void *inode, struct kmem_cache *cachep, unsigned long flags)
 {
 	struct cifsInodeInfo *cifsi = inode;
 
@@ -749,7 +756,7 @@
 		cifs_min_rcv = 1;
 	else if (cifs_min_rcv > 64) {
 		cifs_min_rcv = 64;
-		cERROR(1,("cifs_min_rcv set to maximum (64)"));
+		cERROR(1, ("cifs_min_rcv set to maximum (64)"));
 	}
 
 	cifs_req_poolp = mempool_create_slab_pool(cifs_min_rcv,
@@ -762,25 +769,25 @@
 	/* MAX_CIFS_SMALL_BUFFER_SIZE bytes is enough for most SMB responses and
 	almost all handle based requests (but not write response, nor is it
 	sufficient for path based requests).  A smaller size would have
-	been more efficient (compacting multiple slab items on one 4k page) 
+	been more efficient (compacting multiple slab items on one 4k page)
 	for the case in which debug was on, but this larger size allows
 	more SMBs to use small buffer alloc and is still much more
-	efficient to alloc 1 per page off the slab compared to 17K (5page) 
+	efficient to alloc 1 per page off the slab compared to 17K (5page)
 	alloc of large cifs buffers even when page debugging is on */
 	cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq",
-			MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN, 
+			MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
 			NULL, NULL);
 	if (cifs_sm_req_cachep == NULL) {
 		mempool_destroy(cifs_req_poolp);
 		kmem_cache_destroy(cifs_req_cachep);
-		return -ENOMEM;              
+		return -ENOMEM;
 	}
 
 	if (cifs_min_small < 2)
 		cifs_min_small = 2;
 	else if (cifs_min_small > 256) {
 		cifs_min_small = 256;
-		cFYI(1,("cifs_min_small set to maximum (256)"));
+		cFYI(1, ("cifs_min_small set to maximum (256)"));
 	}
 
 	cifs_sm_req_poolp = mempool_create_slab_pool(cifs_min_small,
@@ -841,41 +848,43 @@
 	kmem_cache_destroy(cifs_oplock_cachep);
 }
 
-static int cifs_oplock_thread(void * dummyarg)
+static int cifs_oplock_thread(void *dummyarg)
 {
-	struct oplock_q_entry * oplock_item;
+	struct oplock_q_entry *oplock_item;
 	struct cifsTconInfo *pTcon;
-	struct inode * inode;
+	struct inode *inode;
 	__u16  netfid;
 	int rc;
 
+	set_freezable();
 	do {
-		if (try_to_freeze()) 
+		if (try_to_freeze())
 			continue;
-		
+
 		spin_lock(&GlobalMid_Lock);
 		if (list_empty(&GlobalOplock_Q)) {
 			spin_unlock(&GlobalMid_Lock);
 			set_current_state(TASK_INTERRUPTIBLE);
 			schedule_timeout(39*HZ);
 		} else {
-			oplock_item = list_entry(GlobalOplock_Q.next, 
+			oplock_item = list_entry(GlobalOplock_Q.next,
 				struct oplock_q_entry, qhead);
 			if (oplock_item) {
-				cFYI(1,("found oplock item to write out")); 
+				cFYI(1, ("found oplock item to write out"));
 				pTcon = oplock_item->tcon;
 				inode = oplock_item->pinode;
 				netfid = oplock_item->netfid;
 				spin_unlock(&GlobalMid_Lock);
 				DeleteOplockQEntry(oplock_item);
 				/* can not grab inode sem here since it would
-				deadlock when oplock received on delete 
+				deadlock when oplock received on delete
 				since vfs_unlink holds the i_mutex across
 				the call */
 				/* mutex_lock(&inode->i_mutex);*/
 				if (S_ISREG(inode->i_mode)) {
 					rc = filemap_fdatawrite(inode->i_mapping);
-					if (CIFS_I(inode)->clientCanCacheRead == 0) {
+					if (CIFS_I(inode)->clientCanCacheRead
+									 == 0) {
 						filemap_fdatawait(inode->i_mapping);
 						invalidate_remote_inode(inode);
 					}
@@ -884,20 +893,22 @@
 				/* mutex_unlock(&inode->i_mutex);*/
 				if (rc)
 					CIFS_I(inode)->write_behind_rc = rc;
-				cFYI(1,("Oplock flush inode %p rc %d",inode,rc));
+				cFYI(1, ("Oplock flush inode %p rc %d",
+					inode, rc));
 
-				/* releasing a stale oplock after recent reconnection 
-				of smb session using a now incorrect file 
-				handle is not a data integrity issue but do  
-				not bother sending an oplock release if session 
-				to server still is disconnected since oplock 
+				/* releasing stale oplock after recent reconnect
+				of smb session using a now incorrect file
+				handle is not a data integrity issue but do
+				not bother sending an oplock release if session
+				to server still is disconnected since oplock
 				already released by the server in that case */
 				if (pTcon->tidStatus != CifsNeedReconnect) {
 				    rc = CIFSSMBLock(0, pTcon, netfid,
-					    0 /* len */ , 0 /* offset */, 0, 
+					    0 /* len */ , 0 /* offset */, 0,
 					    0, LOCKING_ANDX_OPLOCK_RELEASE,
 					    0 /* wait flag */);
-					cFYI(1,("Oplock release rc = %d ",rc));
+					cFYI(1, 
+					      ("Oplock release rc = %d ", rc));
 				}
 			} else
 				spin_unlock(&GlobalMid_Lock);
@@ -909,7 +920,7 @@
 	return 0;
 }
 
-static int cifs_dnotify_thread(void * dummyarg)
+static int cifs_dnotify_thread(void *dummyarg)
 {
 	struct list_head *tmp;
 	struct cifsSesInfo *ses;
@@ -924,9 +935,9 @@
 		   to be woken up and wakeq so the
 		   thread can wake up and error out */
 		list_for_each(tmp, &GlobalSMBSessionList) {
-			ses = list_entry(tmp, struct cifsSesInfo, 
+			ses = list_entry(tmp, struct cifsSesInfo,
 				cifsSessionList);
-			if (ses && ses->server && 
+			if (ses && ses->server &&
 			     atomic_read(&ses->server->inFlight))
 				wake_up_all(&ses->server->response_q);
 		}
@@ -950,13 +961,13 @@
 #ifdef CONFIG_CIFS_EXPERIMENTAL
 	INIT_LIST_HEAD(&GlobalDnotifyReqList);
 	INIT_LIST_HEAD(&GlobalDnotifyRsp_Q);
-#endif	
+#endif
 /*
  *  Initialize Global counters
  */
 	atomic_set(&sesInfoAllocCount, 0);
 	atomic_set(&tconInfoAllocCount, 0);
-	atomic_set(&tcpSesAllocCount,0);
+	atomic_set(&tcpSesAllocCount, 0);
 	atomic_set(&tcpSesReconnectCount, 0);
 	atomic_set(&tconInfoReconnectCount, 0);
 
@@ -977,10 +988,10 @@
 
 	if (cifs_max_pending < 2) {
 		cifs_max_pending = 2;
-		cFYI(1,("cifs_max_pending set to min of 2"));
+		cFYI(1, ("cifs_max_pending set to min of 2"));
 	} else if (cifs_max_pending > 256) {
 		cifs_max_pending = 256;
-		cFYI(1,("cifs_max_pending set to max of 256"));
+		cFYI(1, ("cifs_max_pending set to max of 256"));
 	}
 
 	rc = cifs_init_inodecache();
@@ -1002,14 +1013,14 @@
 	oplockThread = kthread_run(cifs_oplock_thread, NULL, "cifsoplockd");
 	if (IS_ERR(oplockThread)) {
 		rc = PTR_ERR(oplockThread);
-		cERROR(1,("error %d create oplock thread", rc));
+		cERROR(1, ("error %d create oplock thread", rc));
 		goto out_unregister_filesystem;
 	}
 
 	dnotifyThread = kthread_run(cifs_dnotify_thread, NULL, "cifsdnotifyd");
 	if (IS_ERR(dnotifyThread)) {
 		rc = PTR_ERR(dnotifyThread);
-		cERROR(1,("error %d create dnotify thread", rc));
+		cERROR(1, ("error %d create dnotify thread", rc));
 		goto out_stop_oplock_thread;
 	}
 
@@ -1035,7 +1046,7 @@
 static void __exit
 exit_cifs(void)
 {
-	cFYI(0, ("In unregister ie exit_cifs"));
+	cFYI(0, ("exit_cifs"));
 #ifdef CONFIG_PROC_FS
 	cifs_proc_clean();
 #endif
@@ -1048,9 +1059,10 @@
 }
 
 MODULE_AUTHOR("Steve French <sfrench@us.ibm.com>");
-MODULE_LICENSE("GPL");		/* combination of LGPL + GPL source behaves as GPL */
+MODULE_LICENSE("GPL");	/* combination of LGPL + GPL source behaves as GPL */
 MODULE_DESCRIPTION
-    ("VFS to access servers complying with the SNIA CIFS Specification e.g. Samba and Windows");
+    ("VFS to access servers complying with the SNIA CIFS Specification "
+     "e.g. Samba and Windows");
 MODULE_VERSION(CIFS_VERSION);
 module_init(init_cifs)
 module_exit(exit_cifs)
diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h
index c235d32..a20de77 100644
--- a/fs/cifs/cifsfs.h
+++ b/fs/cifs/cifsfs.h
@@ -16,7 +16,7 @@
  *
  *   You should have received a copy of the GNU Lesser General Public License
  *   along with this library; if not, write to the Free Software
- *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 
+ *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  */
 
 #ifndef _CIFSFS_H
@@ -43,9 +43,9 @@
 
 /* Functions related to inodes */
 extern const struct inode_operations cifs_dir_inode_ops;
-extern int cifs_create(struct inode *, struct dentry *, int, 
+extern int cifs_create(struct inode *, struct dentry *, int,
 		       struct nameidata *);
-extern struct dentry * cifs_lookup(struct inode *, struct dentry *,
+extern struct dentry *cifs_lookup(struct inode *, struct dentry *,
 				  struct nameidata *);
 extern int cifs_unlink(struct inode *, struct dentry *);
 extern int cifs_hardlink(struct dentry *, struct inode *, struct dentry *);
@@ -63,16 +63,16 @@
 
 /* Functions related to files and directories */
 extern const struct file_operations cifs_file_ops;
-extern const struct file_operations cifs_file_direct_ops; /* if directio mount */
+extern const struct file_operations cifs_file_direct_ops; /* if directio mnt */
 extern const struct file_operations cifs_file_nobrl_ops;
-extern const struct file_operations cifs_file_direct_nobrl_ops; /* if directio mount */
+extern const struct file_operations cifs_file_direct_nobrl_ops; /* no brlocks */
 extern int cifs_open(struct inode *inode, struct file *file);
 extern int cifs_close(struct inode *inode, struct file *file);
 extern int cifs_closedir(struct inode *inode, struct file *file);
 extern ssize_t cifs_user_read(struct file *file, char __user *read_data,
-			 size_t read_size, loff_t * poffset);
+			 size_t read_size, loff_t *poffset);
 extern ssize_t cifs_user_write(struct file *file, const char __user *write_data,
-			 size_t write_size, loff_t * poffset);
+			 size_t write_size, loff_t *poffset);
 extern int cifs_lock(struct file *, int, struct file_lock *);
 extern int cifs_fsync(struct file *, struct dentry *, int);
 extern int cifs_flush(struct file *, fl_owner_t id);
@@ -88,8 +88,9 @@
 
 /* Functions related to symlinks */
 extern void *cifs_follow_link(struct dentry *direntry, struct nameidata *nd);
-extern void cifs_put_link(struct dentry *direntry, struct nameidata *nd, void *);
-extern int cifs_readlink(struct dentry *direntry, char __user *buffer, 
+extern void cifs_put_link(struct dentry *direntry,
+			  struct nameidata *nd, void *);
+extern int cifs_readlink(struct dentry *direntry, char __user *buffer,
 			 int buflen);
 extern int cifs_symlink(struct inode *inode, struct dentry *direntry,
 			const char *symname);
@@ -98,7 +99,7 @@
 			size_t, int);
 extern ssize_t	cifs_getxattr(struct dentry *, const char *, void *, size_t);
 extern ssize_t	cifs_listxattr(struct dentry *, char *, size_t);
-extern int cifs_ioctl (struct inode * inode, struct file * filep,
+extern int cifs_ioctl (struct inode *inode, struct file *filep,
 		       unsigned int command, unsigned long arg);
-#define CIFS_VERSION   "1.49"
+#define CIFS_VERSION   "1.50"
 #endif				/* _CIFSFS_H */
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index 23655de..b98742f 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -1,7 +1,7 @@
 /*
  *   fs/cifs/cifsglob.h
  *
- *   Copyright (C) International Business Machines  Corp., 2002,2006
+ *   Copyright (C) International Business Machines  Corp., 2002,2007
  *   Author(s): Steve French (sfrench@us.ibm.com)
  *              Jeremy Allison (jra@samba.org)
  *
@@ -14,7 +14,7 @@
  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See
  *   the GNU Lesser General Public License for more details.
- * 
+ *
  */
 #include <linux/in.h>
 #include <linux/in6.h>
@@ -28,7 +28,7 @@
 
 #define MAX_TREE_SIZE 2 + MAX_SERVER_SIZE + 1 + MAX_SHARE_SIZE + 1
 #define MAX_SERVER_SIZE 15
-#define MAX_SHARE_SIZE  64	/* used to be 20 - this should still be enough */
+#define MAX_SHARE_SIZE  64	/* used to be 20, this should still be enough */
 #define MAX_USERNAME_SIZE 32	/* 32 is to allow for 15 char names + null
 				   termination then *2 for unicode versions */
 #define MAX_PASSWORD_SIZE 16
@@ -38,13 +38,13 @@
 /*
  * MAX_REQ is the maximum number of requests that WE will send
  * on one socket concurently. It also matches the most common
- * value of max multiplex returned by servers.  We may 
+ * value of max multiplex returned by servers.  We may
  * eventually want to use the negotiated value (in case
  * future servers can handle more) when we are more confident that
  * we will not have problems oveloading the socket with pending
  * write data.
  */
-#define CIFS_MAX_REQ 50 
+#define CIFS_MAX_REQ 50
 
 #define SERVER_NAME_LENGTH 15
 #define SERVER_NAME_LEN_WITH_NULL     (SERVER_NAME_LENGTH + 1)
@@ -104,6 +104,17 @@
 	/* Netbios frames protocol not supported at this time */
 };
 
+struct mac_key {
+	unsigned int len;
+	union {
+		char ntlm[CIFS_SESS_KEY_SIZE + 16];
+		struct {
+			char key[16];
+			struct ntlmv2_resp resp;
+		} ntlmv2;
+	} data;
+};
+
 /*
  *****************************************************************
  * Except the CIFS PDUs themselves all the
@@ -120,13 +131,13 @@
 		struct sockaddr_in sockAddr;
 		struct sockaddr_in6 sockAddr6;
 	} addr;
-	wait_queue_head_t response_q; 
+	wait_queue_head_t response_q;
 	wait_queue_head_t request_q; /* if more than maxmpx to srvr must block*/
 	struct list_head pending_mid_q;
 	void *Server_NlsInfo;	/* BB - placeholder for future NLS info  */
 	unsigned short server_codepage;	/* codepage for the server    */
 	unsigned long ip_address;	/* IP addr for the server if known */
-	enum protocolEnum protocolType;	
+	enum protocolEnum protocolType;
 	char versionMajor;
 	char versionMinor;
 	unsigned svlocal:1;	/* local server or remote */
@@ -159,14 +170,15 @@
 	/* 16th byte of RFC1001 workstation name is always null */
 	char workstation_RFC1001_name[SERVER_NAME_LEN_WITH_NULL];
 	__u32 sequence_number; /* needed for CIFS PDU signature */
-	char mac_signing_key[CIFS_SESS_KEY_SIZE + 16];
+	struct mac_key mac_signing_key;
+	char ntlmv2_hash[16];
 	unsigned long lstrp; /* when we got last response from this server */
 };
 
 /*
  * The following is our shortcut to user information.  We surface the uid,
  * and name. We always get the password on the fly in case it
- * has changed. We also hang a list of sessions owned by this user off here. 
+ * has changed. We also hang a list of sessions owned by this user off here.
  */
 struct cifsUidInfo {
 	struct list_head userList;
@@ -197,11 +209,11 @@
 	int Suid;		/* remote smb uid  */
 	uid_t linux_uid;        /* local Linux uid */
 	int capabilities;
-	char serverName[SERVER_NAME_LEN_WITH_NULL * 2];	/* BB make bigger for 
+	char serverName[SERVER_NAME_LEN_WITH_NULL * 2];	/* BB make bigger for
 				TCP names - will ipv6 and sctp addresses fit? */
 	char userName[MAX_USERNAME_SIZE + 1];
-	char * domainName;
-	char * password;
+	char *domainName;
+	char *password;
 };
 /* no more than one of the following three session flags may be set */
 #define CIFS_SES_NT4 1
@@ -213,7 +225,7 @@
 #define CIFS_SES_LANMAN 8
 /*
  * there is one of these for each connection to a resource on a particular
- * session 
+ * session
  */
 struct cifsTconInfo {
 	struct list_head cifsConnectionList;
@@ -269,7 +281,9 @@
 	FILE_SYSTEM_UNIX_INFO fsUnixInfo;
 	unsigned retry:1;
 	unsigned nocase:1;
-	/* BB add field for back pointer to sb struct? */
+	unsigned unix_ext:1; /* if off disable Linux extensions to CIFS protocol
+				for this mount even if server would support */
+	/* BB add field for back pointer to sb struct(s)? */
 };
 
 /*
@@ -291,9 +305,9 @@
 	__u16 entries_in_buffer;
 	__u16 info_level;
 	__u32 resume_key;
-	char * ntwrk_buf_start;
-	char * srch_entries_start;
-	char * presume_name;
+	char *ntwrk_buf_start;
+	char *srch_entries_start;
+	char *presume_name;
 	unsigned int resume_name_len;
 	unsigned endOfSearch:1;
 	unsigned emptyDir:1;
@@ -309,15 +323,15 @@
 	__u16 netfid;		/* file id from remote */
 	/* BB add lock scope info here if needed */ ;
 	/* lock scope id (0 if none) */
-	struct file * pfile; /* needed for writepage */
-	struct inode * pInode; /* needed for oplock break */
+	struct file *pfile; /* needed for writepage */
+	struct inode *pInode; /* needed for oplock break */
 	struct mutex lock_mutex;
 	struct list_head llist; /* list of byte range locks we have. */
 	unsigned closePend:1;	/* file is marked to close */
 	unsigned invalidHandle:1;  /* file closed via session abend */
 	atomic_t wrtPending;   /* handle in use - defer close */
 	struct semaphore fh_sem; /* prevents reopen race after dead ses*/
-	char * search_resume_name; /* BB removeme BB */
+	char *search_resume_name; /* BB removeme BB */
 	struct cifs_search_info srch_inf;
 };
 
@@ -327,7 +341,7 @@
 
 struct cifsInodeInfo {
 	struct list_head lockList;
-	/* BB add in lists for dirty pages - i.e. write caching info for oplock */
+	/* BB add in lists for dirty pages i.e. write caching info for oplock */
 	struct list_head openFileList;
 	int write_behind_rc;
 	__u32 cifsAttrs; /* e.g. DOS archive bit, sparse, compressed, system */
@@ -381,9 +395,9 @@
 }
 #else
 
-#define  cifs_stats_inc(field) do {} while(0)
-#define  cifs_stats_bytes_written(tcon, bytes) do {} while(0)
-#define  cifs_stats_bytes_read(tcon, bytes) do {} while(0)
+#define  cifs_stats_inc(field) do {} while (0)
+#define  cifs_stats_bytes_written(tcon, bytes) do {} while (0)
+#define  cifs_stats_bytes_read(tcon, bytes) do {} while (0)
 
 #endif
 
@@ -410,8 +424,8 @@
 
 struct oplock_q_entry {
 	struct list_head qhead;
-	struct inode * pinode;
-	struct cifsTconInfo * tcon; 
+	struct inode *pinode;
+	struct cifsTconInfo *tcon;
 	__u16 netfid;
 };
 
@@ -426,7 +440,7 @@
        __u16 netfid;
        __u32 filter; /* CompletionFilter (for multishot) */
        int multishot;
-       struct file * pfile;
+       struct file *pfile;
 };
 
 #define   MID_FREE 0
@@ -464,7 +478,7 @@
 #define   CIFSSEC_MUST_LANMAN	0x10010
 #define   CIFSSEC_MUST_PLNTXT	0x20020
 #define   CIFSSEC_MASK          0x37037 /* current flags supported if weak */
-#else	  
+#else
 #define	  CIFSSEC_MASK          0x07007 /* flags supported if no weak config */
 #endif /* WEAK_PW_HASH */
 #define   CIFSSEC_MUST_SEAL	0x40040 /* not supported yet */
@@ -502,7 +516,7 @@
  *  ----------
  *  sesSem     operations on smb session
  *  tconSem    operations on tree connection
- *  fh_sem      file handle reconnection operations 
+ *  fh_sem      file handle reconnection operations
  *
  ****************************************************************************/
 
@@ -515,7 +529,7 @@
 /*
  * The list of servers that did not respond with NT LM 0.12.
  * This list helps improve performance and eliminate the messages indicating
- * that we had a communications error talking to the server in this list. 
+ * that we had a communications error talking to the server in this list.
  */
 /* Feature not supported */
 /* GLOBAL_EXTERN struct servers_not_supported *NotSuppList; */
@@ -568,12 +582,12 @@
 /* Misc globals */
 GLOBAL_EXTERN unsigned int multiuser_mount; /* if enabled allows new sessions
 				to be established on existing mount if we
-				have the uid/password or Kerberos credential 
+				have the uid/password or Kerberos credential
 				or equivalent for current user */
 GLOBAL_EXTERN unsigned int oplockEnabled;
 GLOBAL_EXTERN unsigned int experimEnabled;
 GLOBAL_EXTERN unsigned int lookupCacheEnabled;
-GLOBAL_EXTERN unsigned int extended_security;	/* if on, session setup sent 
+GLOBAL_EXTERN unsigned int extended_security;	/* if on, session setup sent
 				with more secure ntlmssp2 challenge/resp */
 GLOBAL_EXTERN unsigned int sign_CIFS_PDUs;  /* enable smb packet signing */
 GLOBAL_EXTERN unsigned int linuxExtEnabled;/*enable Linux/Unix CIFS extensions*/
diff --git a/fs/cifs/cifspdu.h b/fs/cifs/cifspdu.h
index d619ca7..6a2056e 100644
--- a/fs/cifs/cifspdu.h
+++ b/fs/cifs/cifspdu.h
@@ -144,7 +144,7 @@
 #define SMBOPEN_OAPPEND       0x0001
 
 /*
- * SMB flag definitions 
+ * SMB flag definitions
  */
 #define SMBFLG_EXTD_LOCK 0x01	/* server supports lock-read write-unlock smb */
 #define SMBFLG_RCV_POSTED 0x02	/* obsolete */
@@ -157,9 +157,9 @@
 #define SMBFLG_RESPONSE 0x80	/* this PDU is a response from server */
 
 /*
- * SMB flag2 definitions 
+ * SMB flag2 definitions
  */
-#define SMBFLG2_KNOWS_LONG_NAMES cpu_to_le16(1)	/* can send long (non-8.3) 
+#define SMBFLG2_KNOWS_LONG_NAMES cpu_to_le16(1)	/* can send long (non-8.3)
 						   path names in response */
 #define SMBFLG2_KNOWS_EAS cpu_to_le16(2)
 #define SMBFLG2_SECURITY_SIGNATURE cpu_to_le16(4)
@@ -260,7 +260,7 @@
 #define ATTR_SPARSE    0x0200
 #define ATTR_REPARSE   0x0400
 #define ATTR_COMPRESSED 0x0800
-#define ATTR_OFFLINE    0x1000	/* ie file not immediately available - 
+#define ATTR_OFFLINE    0x1000	/* ie file not immediately available -
 					on offline storage */
 #define ATTR_NOT_CONTENT_INDEXED 0x2000
 #define ATTR_ENCRYPTED  0x4000
@@ -300,7 +300,7 @@
 #define CREATE_DELETE_ON_CLOSE	0x00001000
 #define CREATE_OPEN_BY_ID       0x00002000
 #define OPEN_REPARSE_POINT	0x00200000
-#define CREATE_OPTIONS_MASK     0x007FFFFF 
+#define CREATE_OPTIONS_MASK     0x007FFFFF
 #define CREATE_OPTION_SPECIAL   0x20000000   /* system. NB not sent over wire */
 
 /* ImpersonationLevel flags */
@@ -366,17 +366,19 @@
 #define pByteArea(smb_var) ((unsigned char *)smb_var + sizeof(struct smb_hdr) + (2* smb_var->WordCount) + 2 )
 
 /*
- * Computer Name Length
+ * Computer Name Length (since Netbios name was length 16 with last byte 0x20)
+ * No longer as important, now that TCP names are more commonly used to
+ * resolve hosts.
  */
 #define CNLEN 15
 
 /*
- * Share Name Length					  @S8A
- * Note:  This length is limited by the SMB used to get   @S8A
- *        the Share info.   NetShareEnum only returns 13  @S8A
- *        chars, including the null termination.          @S8A 
+ * Share Name Length (SNLEN)
+ * Note:  This length was limited by the SMB used to get
+ *        the Share info.   NetShareEnum only returned 13
+ *        chars, including the null termination.
+ * This was removed because it no longer is limiting.
  */
-#define SNLEN 12		/*@S8A */
 
 /*
  * Comment Length
@@ -394,8 +396,8 @@
  *
  *  The Naming convention is the lower case version of the
  *  smb command code name for the struct and this is typedef to the
- *  uppercase version of the same name with the prefix SMB_ removed 
- *  for brevity.  Although typedefs are not commonly used for 
+ *  uppercase version of the same name with the prefix SMB_ removed
+ *  for brevity.  Although typedefs are not commonly used for
  *  structure definitions in the Linux kernel, their use in the
  *  CIFS standards document, which this code is based on, may
  *  make this one of the cases where typedefs for structures make
@@ -403,7 +405,7 @@
  *  Typedefs can always be removed later if they are too distracting
  *  and they are only used for the CIFSs PDUs themselves, not
  *  internal cifs vfs structures
- *  
+ *
  */
 
 typedef struct negotiate_req {
@@ -511,7 +513,7 @@
 		unsigned char SecurityBlob[1];	/* followed by */
 		/* STRING NativeOS */
 		/* STRING NativeLanMan */
-	} __attribute__((packed)) req;	/* NTLM request format (with 
+	} __attribute__((packed)) req;	/* NTLM request format (with
 					extended security */
 
 	struct {		/* request format */
@@ -549,7 +551,7 @@
 /*      unsigned char  * NativeOS;      */
 /*	unsigned char  * NativeLanMan;  */
 /*      unsigned char  * PrimaryDomain; */
-	} __attribute__((packed)) resp;	/* NTLM response 
+	} __attribute__((packed)) resp;	/* NTLM response
 					   (with or without extended sec) */
 
 	struct {		/* request format */
@@ -618,7 +620,7 @@
 #define CAP_NT_SMBS            0x00000010
 #define CAP_STATUS32           0x00000040
 #define CAP_LEVEL_II_OPLOCKS   0x00000080
-#define CAP_NT_FIND            0x00000200	/* reserved should be zero 
+#define CAP_NT_FIND            0x00000200	/* reserved should be zero
 				(because NT_SMBs implies the same thing?) */
 #define CAP_BULK_TRANSFER      0x20000000
 #define CAP_EXTENDED_SECURITY  0x80000000
@@ -676,7 +678,7 @@
 	__u16 ByteCount;
 } __attribute__((packed)) LOGOFF_ANDX_RSP;
 
-typedef union smb_com_tree_disconnect {	/* as an altetnative can use flag on 
+typedef union smb_com_tree_disconnect {	/* as an altetnative can use flag on
 					tree_connect PDU to effect disconnect */
 					/* tdis is probably simplest SMB PDU */
 	struct {
@@ -712,6 +714,7 @@
 #define REQ_OPLOCK         0x00000002
 #define REQ_BATCHOPLOCK    0x00000004
 #define REQ_OPENDIRONLY    0x00000008
+#define REQ_EXTENDED_INFO  0x00000010
 
 typedef struct smb_com_open_req {	/* also handles create */
 	struct smb_hdr hdr;	/* wct = 24 */
@@ -799,27 +802,28 @@
 	__u32  FileId;
 	__u16  Reserved;
 	__u16  ByteCount;
-} __attribute__((packed)) OPENX_RSP; 
+} __attribute__((packed)) OPENX_RSP;
 
 /* For encoding of POSIX Open Request - see trans2 function 0x209 data struct */
 
 /* Legacy write request for older servers */
 typedef struct smb_com_writex_req {
-        struct smb_hdr hdr;     /* wct = 12 */
-        __u8 AndXCommand;
-        __u8 AndXReserved;
-        __le16 AndXOffset;
-        __u16 Fid;
-        __le32 OffsetLow;
-        __u32 Reserved; /* Timeout */
-        __le16 WriteMode; /* 1 = write through */
-        __le16 Remaining;
-        __le16 Reserved2;
-        __le16 DataLengthLow;
-        __le16 DataOffset;
-        __le16 ByteCount;
-        __u8 Pad;               /* BB check for whether padded to DWORD boundary and optimum performance here */
-        char Data[0];
+	struct smb_hdr hdr;     /* wct = 12 */
+	__u8 AndXCommand;
+	__u8 AndXReserved;
+	__le16 AndXOffset;
+	__u16 Fid;
+	__le32 OffsetLow;
+	__u32 Reserved; /* Timeout */
+	__le16 WriteMode; /* 1 = write through */
+	__le16 Remaining;
+	__le16 Reserved2;
+	__le16 DataLengthLow;
+	__le16 DataOffset;
+	__le16 ByteCount;
+	__u8 Pad;		/* BB check for whether padded to DWORD
+				   boundary and optimum performance here */
+	char Data[0];
 } __attribute__((packed)) WRITEX_REQ;
 
 typedef struct smb_com_write_req {
@@ -837,7 +841,8 @@
 	__le16 DataOffset;
 	__le32 OffsetHigh;
 	__le16 ByteCount;
-	__u8 Pad;		/* BB check for whether padded to DWORD boundary and optimum performance here */
+	__u8 Pad;		/* BB check for whether padded to DWORD
+				   boundary and optimum performance here */
 	char Data[0];
 } __attribute__((packed)) WRITE_REQ;
 
@@ -855,17 +860,17 @@
 
 /* legacy read request for older servers */
 typedef struct smb_com_readx_req {
-        struct smb_hdr hdr;     /* wct = 10 */
-        __u8 AndXCommand;
-        __u8 AndXReserved;
-        __le16 AndXOffset;
-        __u16 Fid;
-        __le32 OffsetLow;
-        __le16 MaxCount;
-        __le16 MinCount;                /* obsolete */
-        __le32 Reserved;
-        __le16 Remaining;
-        __le16 ByteCount;
+	struct smb_hdr hdr;	/* wct = 10 */
+	__u8 AndXCommand;
+	__u8 AndXReserved;
+	__le16 AndXOffset;
+	__u16 Fid;
+	__le32 OffsetLow;
+	__le16 MaxCount;
+	__le16 MinCount;	/* obsolete */
+	__le32 Reserved;
+	__le16 Remaining;
+	__le16 ByteCount;
 } __attribute__((packed)) READX_REQ;
 
 typedef struct smb_com_read_req {
@@ -896,7 +901,8 @@
 	__le16 DataLengthHigh;
 	__u64 Reserved2;
 	__u16 ByteCount;
-	__u8 Pad;		/* BB check for whether padded to DWORD boundary and optimum performance here */
+	__u8 Pad;		/* BB check for whether padded to DWORD
+				   boundary and optimum performance here */
 	char Data[1];
 } __attribute__((packed)) READ_RSP;
 
@@ -967,7 +973,7 @@
 #define COPY_TARGET_MODE_ASCII 0x0004 /* if not set, binary */
 #define COPY_SOURCE_MODE_ASCII 0x0008 /* if not set, binary */
 #define COPY_VERIFY_WRITES     0x0010
-#define COPY_TREE              0x0020 
+#define COPY_TREE              0x0020
 
 typedef struct smb_com_copy_req {
 	struct smb_hdr hdr;	/* wct = 3 */
@@ -975,7 +981,7 @@
 	__le16 OpenFunction;
 	__le16 Flags;
 	__le16 ByteCount;
-	__u8 BufferFormat;	/* 4 = ASCII or Unicode */ 
+	__u8 BufferFormat;	/* 4 = ASCII or Unicode */
 	unsigned char OldFileName[1];
 	/* followed by __u8 BufferFormat2 */
 	/* followed by NewFileName string */
@@ -1083,28 +1089,28 @@
 
 /*******************************************************/
 /* NT Transact structure defintions follow             */
-/* Currently only ioctl, acl (get security descriptor) */  
+/* Currently only ioctl, acl (get security descriptor) */
 /* and notify are implemented                          */
 /*******************************************************/
 typedef struct smb_com_ntransact_req {
-        struct smb_hdr hdr; /* wct >= 19 */
-        __u8 MaxSetupCount;
-        __u16 Reserved;
-        __le32 TotalParameterCount;
-        __le32 TotalDataCount;
-        __le32 MaxParameterCount;
-        __le32 MaxDataCount;
-        __le32 ParameterCount;
-        __le32 ParameterOffset;
-        __le32 DataCount;
-        __le32 DataOffset;
-        __u8 SetupCount; /* four setup words follow subcommand */
-        /* SNIA spec incorrectly included spurious pad here */
-        __le16 SubCommand; /* 2 = IOCTL/FSCTL */
-	/* SetupCount words follow then */ 
-        __le16 ByteCount;
-        __u8 Pad[3];
-        __u8 Parms[0];
+	struct smb_hdr hdr; /* wct >= 19 */
+	__u8 MaxSetupCount;
+	__u16 Reserved;
+	__le32 TotalParameterCount;
+	__le32 TotalDataCount;
+	__le32 MaxParameterCount;
+	__le32 MaxDataCount;
+	__le32 ParameterCount;
+	__le32 ParameterOffset;
+	__le32 DataCount;
+	__le32 DataOffset;
+	__u8 SetupCount; /* four setup words follow subcommand */
+	/* SNIA spec incorrectly included spurious pad here */
+	__le16 SubCommand; /* 2 = IOCTL/FSCTL */
+	/* SetupCount words follow then */
+	__le16 ByteCount;
+	__u8 Pad[3];
+	__u8 Parms[0];
 } __attribute__((packed)) NTRANSACT_REQ;
 
 typedef struct smb_com_ntransact_rsp {
@@ -1120,7 +1126,7 @@
 	__le32 DataDisplacement;
 	__u8 SetupCount;   /* 0 */
 	__u16 ByteCount;
-        /* __u8 Pad[3]; */
+	/* __u8 Pad[3]; */
 	/* parms and data follow */
 } __attribute__((packed)) NTRANSACT_RSP;
 
@@ -1215,7 +1221,7 @@
 /*	__u8 Data[1];*/
 } __attribute__((packed)) TRANSACT_CHANGE_NOTIFY_REQ;
 
-/* BB eventually change to use generic ntransact rsp struct 
+/* BB eventually change to use generic ntransact rsp struct
       and validation routine */
 typedef struct smb_com_transaction_change_notify_rsp {
 	struct smb_hdr hdr;	/* wct = 18 */
@@ -1262,7 +1268,7 @@
 	__le32 Action;
 	__le32 FileNameLength;
 	__u8  FileName[0];
-} __attribute__((packed)); 
+} __attribute__((packed));
 
 struct reparse_data {
 	__u32	ReparseTag;
@@ -1331,7 +1337,7 @@
 	__u8 Reserved1;
 	/* SetupWords[SetupCount];
 	__u16 ByteCount;
-	__u16 Reserved2;*/	
+	__u16 Reserved2;*/
 	/* data area follows */
 } __attribute__((packed));
 
@@ -1370,9 +1376,9 @@
 #define SMB_QUERY_FILE_INTERNAL_INFO    0x3ee
 #define SMB_QUERY_FILE_ACCESS_INFO      0x3f0
 #define SMB_QUERY_FILE_NAME_INFO2       0x3f1 /* 0x30 bytes */
-#define SMB_QUERY_FILE_POSITION_INFO    0x3f6 
+#define SMB_QUERY_FILE_POSITION_INFO    0x3f6
 #define SMB_QUERY_FILE_MODE_INFO        0x3f8
-#define SMB_QUERY_FILE_ALGN_INFO        0x3f9 
+#define SMB_QUERY_FILE_ALGN_INFO        0x3f9
 
 
 #define SMB_SET_FILE_BASIC_INFO	        0x101
@@ -1506,35 +1512,35 @@
 	__u16 Pad1;
 	__u16 Fid;
 	__le16 InformationLevel;
-	__u16 Reserved4;	
+	__u16 Reserved4;
 } __attribute__((packed));
 
 struct smb_com_transaction2_sfi_rsp {
 	struct smb_hdr hdr;	/* wct = 10 + SetupCount */
 	struct trans2_resp t2;
 	__u16 ByteCount;
-	__u16 Reserved2;	/* parameter word reserved - 
+	__u16 Reserved2;	/* parameter word reserved -
 					present for infolevels > 100 */
 } __attribute__((packed));
 
 struct smb_t2_qfi_req {
-        struct	smb_hdr hdr;
-        struct	trans2_req t2;
+	struct	smb_hdr hdr;
+	struct	trans2_req t2;
 	__u8	Pad;
 	__u16	Fid;
 	__le16	InformationLevel;
 } __attribute__((packed));
 
 struct smb_t2_qfi_rsp {
-        struct smb_hdr hdr;     /* wct = 10 + SetupCount */
-        struct trans2_resp t2;
-        __u16 ByteCount;
-        __u16 Reserved2;        /* parameter word reserved - 
-					present for infolevels > 100 */
+	struct smb_hdr hdr;     /* wct = 10 + SetupCount */
+	struct trans2_resp t2;
+	__u16 ByteCount;
+	__u16 Reserved2;        /* parameter word reserved -
+				   present for infolevels > 100 */
 } __attribute__((packed));
 
 /*
- * Flags on T2 FINDFIRST and FINDNEXT 
+ * Flags on T2 FINDFIRST and FINDNEXT
  */
 #define CIFS_SEARCH_CLOSE_ALWAYS  0x0001
 #define CIFS_SEARCH_CLOSE_AT_END  0x0002
@@ -1743,7 +1749,9 @@
 	__u8 Reserved3;
 	__le16 SubCommand;	/* one setup word */
 	__le16 ByteCount;
-	__u8 Pad[3];		/* Win2K has sent 0x0F01 (max resp length perhaps?) followed by one byte pad - doesn't seem to matter though */
+	__u8 Pad[3];		/* Win2K has sent 0x0F01 (max response length
+				   perhaps?) followed by one byte pad - doesn't
+				   seem to matter though */
 	__le16 MaxReferralLevel;
 	char RequestFileName[1];
 } __attribute__((packed)) TRANSACTION2_GET_DFS_REFER_REQ;
@@ -1752,7 +1760,10 @@
 	__le16 VersionNumber;
 	__le16 ReferralSize;
 	__le16 ServerType;	/* 0x0001 = CIFS server */
-	__le16 ReferralFlags;	/* or proximity - not clear which since always set to zero - SNIA spec says 0x01 means strip off PathConsumed chars before submitting RequestFileName to remote node */
+	__le16 ReferralFlags;	/* or proximity - not clear which since it is
+				   always set to zero - SNIA spec says 0x01
+				   means strip off PathConsumed chars before
+				   submitting RequestFileName to remote node */
 	__le16 TimeToLive;
 	__le16 Proximity;
 	__le16 DfsPathOffset;
@@ -1778,11 +1789,13 @@
 #define DFSREF_STORAGE_SERVER   0x0002
 
 /* IOCTL information */
-/* List of ioctl function codes that look to be of interest to remote clients like this. */
-/* Need to do some experimentation to make sure they all work remotely.                  */
-/* Some of the following such as the encryption/compression ones would be                */
-/* invoked from tools via a specialized hook into the VFS rather than via the            */
-/* standard vfs entry points */
+/*
+ * List of ioctl function codes that look to be of interest to remote clients
+ * like this one.  Need to do some experimentation to make sure they all work
+ * remotely.  Some of the following, such as the encryption/compression ones
+ * would be invoked from tools via a specialized hook into the VFS rather
+ * than via the standard vfs entry points
+ */
 #define FSCTL_REQUEST_OPLOCK_LEVEL_1 0x00090000
 #define FSCTL_REQUEST_OPLOCK_LEVEL_2 0x00090004
 #define FSCTL_REQUEST_BATCH_OPLOCK   0x00090008
@@ -1811,7 +1824,7 @@
 /*
  ************************************************************************
  * All structs for everything above the SMB PDUs themselves
- * (such as the T2 level specific data) go here                  
+ * (such as the T2 level specific data) go here
  ************************************************************************
  */
 
@@ -1857,7 +1870,7 @@
 	__le64 FreeAllocationUnits;
 	__le32 SectorsPerAllocationUnit;
 	__le32 BytesPerSector;
-} __attribute__((packed)) FILE_SYSTEM_INFO;		/* size info, level 0x103 */
+} __attribute__((packed)) FILE_SYSTEM_INFO;	/* size info, level 0x103 */
 
 typedef struct {
 	__le32 fsid;
@@ -1871,7 +1884,7 @@
 	__le16 MajorVersionNumber;
 	__le16 MinorVersionNumber;
 	__le64 Capability;
-} __attribute__((packed)) FILE_SYSTEM_UNIX_INFO;	/* Unix extensions info, level 0x200 */
+} __attribute__((packed)) FILE_SYSTEM_UNIX_INFO; /* Unix extension level 0x200*/
 
 /* Version numbers for CIFS UNIX major and minor. */
 #define CIFS_UNIX_MAJOR_VERSION 1
@@ -1885,16 +1898,20 @@
 #define CIFS_UNIX_POSIX_PATHNAMES_CAP   0x00000010 /* Allow POSIX path chars  */
 #define CIFS_UNIX_POSIX_PATH_OPS_CAP    0x00000020 /* Allow new POSIX path based
 						      calls including posix open
-						      and posix unlink */ 
+						      and posix unlink */
+#define CIFS_UNIX_LARGE_READ_CAP        0x00000040 /* support reads >128K (up
+						      to 0xFFFF00 */
+#define CIFS_UNIX_LARGE_WRITE_CAP       0x00000080
+
 #ifdef CONFIG_CIFS_POSIX
 /* Can not set pathnames cap yet until we send new posix create SMB since
    otherwise server can treat such handles opened with older ntcreatex
    (by a new client which knows how to send posix path ops)
    as non-posix handles (can affect write behavior with byte range locks.
    We can add back in POSIX_PATH_OPS cap when Posix Create/Mkdir finished */
-/* #define CIFS_UNIX_CAP_MASK              0x0000003b */
-#define CIFS_UNIX_CAP_MASK              0x0000001b 
-#else 
+/* #define CIFS_UNIX_CAP_MASK              0x000000fb */
+#define CIFS_UNIX_CAP_MASK              0x000000db
+#else
 #define CIFS_UNIX_CAP_MASK              0x00000013
 #endif /* CONFIG_CIFS_POSIX */
 
@@ -1904,10 +1921,10 @@
 typedef struct {
 	/* For undefined recommended transfer size return -1 in that field */
 	__le32 OptimalTransferSize;  /* bsize on some os, iosize on other os */
-	__le32 BlockSize; 
+	__le32 BlockSize;
     /* The next three fields are in terms of the block size.
 	(above). If block size is unknown, 4096 would be a
-	reasonable block size for a server to report. 
+	reasonable block size for a server to report.
 	Note that returning the blocks/blocksavail removes need
 	to make a second call (to QFSInfo level 0x103 to get this info.
 	UserBlockAvail is typically less than or equal to BlocksAvail,
@@ -2062,9 +2079,9 @@
 
 struct file_stream_info {
 	__le32 number_of_streams;  /* BB check sizes and verify location */
-	/* followed by info on streams themselves 
+	/* followed by info on streams themselves
 		u64 size;
-		u64 allocation_size 
+		u64 allocation_size
 		stream info */
 };      /* level 0x109 */
 
@@ -2083,7 +2100,7 @@
 	__u8  cifs_e_tag;
 	__u8  cifs_e_perm;
 	__le64 cifs_uid; /* or gid */
-} __attribute__((packed)); 
+} __attribute__((packed));
 
 struct cifs_posix_acl { /* access conrol list  (ACL) */
 	__le16	version;
@@ -2138,6 +2155,12 @@
 	/* struct following varies based on requested level */
 } __attribute__((packed)) OPEN_PSX_RSP; /* level 0x209 SetPathInfo data */
 
+#define SMB_POSIX_UNLINK_FILE_TARGET		0
+#define SMB_POSIX_UNLINK_DIRECTORY_TARGET	1
+
+struct unlink_psx_rq { /* level 0x20a SetPathInfo */
+	__le16 type;
+} __attribute__((packed));
 
 struct file_internal_info {
 	__u64  UniqueId; /* inode number */
@@ -2154,7 +2177,7 @@
 
 
 /********************************************************/
-/*  FindFirst/FindNext transact2 data buffer formats    */ 
+/*  FindFirst/FindNext transact2 data buffer formats    */
 /********************************************************/
 
 typedef struct {
@@ -2232,7 +2255,7 @@
 	__le64 EndOfFile;
 	__le64 AllocationSize;
 	__le32 ExtFileAttributes;
-	__le32 FileNameLength; 
+	__le32 FileNameLength;
 	__le32 EaSize; /* length of the xattrs */
 	__u8   ShortNameLength;
 	__u8   Reserved;
@@ -2259,7 +2282,7 @@
 struct win_dev {
 	unsigned char type[8]; /* IntxCHR or IntxBLK */
 	__le64 major;
-	__le64 minor;	
+	__le64 minor;
 } __attribute__((packed));
 
 struct gea {
@@ -2291,36 +2314,36 @@
 struct data_blob {
 	__u8 *data;
 	size_t length;
-	void (*free) (struct data_blob * data_blob);
+	void (*free) (struct data_blob *data_blob);
 } __attribute__((packed));
 
 
 #ifdef CONFIG_CIFS_POSIX
-/* 
+/*
 	For better POSIX semantics from Linux client, (even better
 	than the existing CIFS Unix Extensions) we need updated PDUs for:
-	
+
 	1) PosixCreateX - to set and return the mode, inode#, device info and
 	perhaps add a CreateDevice - to create Pipes and other special .inodes
 	Also note POSIX open flags
-	2) Close - to return the last write time to do cache across close 
+	2) Close - to return the last write time to do cache across close
 		more safely
-	3) FindFirst return unique inode number - what about resume key, two 
+	3) FindFirst return unique inode number - what about resume key, two
 	forms short (matches readdir) and full (enough info to cache inodes)
 	4) Mkdir - set mode
-	
-	And under consideration: 
+
+	And under consideration:
 	5) FindClose2 (return nanosecond timestamp ??)
-	6) Use nanosecond timestamps throughout all time fields if 
+	6) Use nanosecond timestamps throughout all time fields if
 	   corresponding attribute flag is set
 	7) sendfile - handle based copy
 	8) Direct i/o
 	9) Misc fcntls?
-	
+
 	what about fixing 64 bit alignment
-	
+
 	There are also various legacy SMB/CIFS requests used as is
-	
+
 	From existing Lanman and NTLM dialects:
 	--------------------------------------
 	NEGOTIATE
@@ -2341,48 +2364,48 @@
 		(BB verify that never need to set allocation size)
 		SMB_SET_FILE_BASIC_INFO2 (setting times - BB can it be done via
 			 Unix ext?)
-	
+
 	COPY (note support for copy across directories) - FUTURE, OPTIONAL
 	setting/getting OS/2 EAs - FUTURE (BB can this handle
 	setting Linux xattrs perfectly)         - OPTIONAL
 	dnotify                                 - FUTURE, OPTIONAL
 	quota                                   - FUTURE, OPTIONAL
-			
-	Note that various requests implemented for NT interop such as 
+
+	Note that various requests implemented for NT interop such as
 		NT_TRANSACT (IOCTL) QueryReparseInfo
 	are unneeded to servers compliant with the CIFS POSIX extensions
-	
+
 	From CIFS Unix Extensions:
 	-------------------------
 	T2 SET_PATH_INFO (SMB_SET_FILE_UNIX_LINK) for symlinks
 	T2 SET_PATH_INFO (SMB_SET_FILE_BASIC_INFO2)
 	T2 QUERY_PATH_INFO (SMB_QUERY_FILE_UNIX_LINK)
-	T2 QUERY_PATH_INFO (SMB_QUERY_FILE_UNIX_BASIC) - BB check for missing inode fields
-					Actually need QUERY_FILE_UNIX_INFO since has inode num
-					BB what about a) blksize/blkbits/blocks
+	T2 QUERY_PATH_INFO (SMB_QUERY_FILE_UNIX_BASIC)	BB check for missing
+							inode fields
+				Actually a need QUERY_FILE_UNIX_INFO
+				since has inode num
+				BB what about a) blksize/blkbits/blocks
 							  b) i_version
 							  c) i_rdev
 							  d) notify mask?
 							  e) generation
 							  f) size_seqcount
 	T2 FIND_FIRST/FIND_NEXT FIND_FILE_UNIX
-	TRANS2_GET_DFS_REFERRAL			  - OPTIONAL but recommended
+	TRANS2_GET_DFS_REFERRAL		      - OPTIONAL but recommended
 	T2_QFS_INFO QueryDevice/AttributeInfo - OPTIONAL
-	
-	
  */
 
 /* xsymlink is a symlink format (used by MacOS) that can be used
-   to save symlink info in a regular file when 
+   to save symlink info in a regular file when
    mounted to operating systems that do not
    support the cifs Unix extensions or EAs (for xattr
    based symlinks).  For such a file to be recognized
-   as containing symlink data: 
+   as containing symlink data:
 
-   1) file size must be 1067, 
+   1) file size must be 1067,
    2) signature must begin file data,
    3) length field must be set to ASCII representation
-	of a number which is less than or equal to 1024, 
+	of a number which is less than or equal to 1024,
    4) md5 must match that of the path data */
 
 struct xsymlink {
@@ -2393,10 +2416,10 @@
 	char length[4];
 	char cr1;         /* \n */
 /* md5 of valid subset of path ie path[0] through path[length-1] */
-	__u8 md5[32];    
+	__u8 md5[32];
 	char cr2;        /* \n */
 /* if room left, then end with \n then 0x20s by convention but not required */
-	char path[1024];  
+	char path[1024];
 } __attribute__((packed));
 
 typedef struct file_xattr_info {
@@ -2405,7 +2428,8 @@
 	__u32 xattr_value_len;
 	char  xattr_name[0];
 	/* followed by xattr_value[xattr_value_len], no pad */
-} __attribute__((packed)) FILE_XATTR_INFO; /* extended attribute, info level 0x205 */
+} __attribute__((packed)) FILE_XATTR_INFO; /* extended attribute info
+					      level 0x205 */
 
 
 /* flags for chattr command */
@@ -2431,8 +2455,9 @@
 typedef struct file_chattr_info {
 	__le64	mask; /* list of all possible attribute bits */
 	__le64	mode; /* list of actual attribute bits on this inode */
-} __attribute__((packed)) FILE_CHATTR_INFO;  /* ext attributes (chattr, chflags) level 0x206 */
+} __attribute__((packed)) FILE_CHATTR_INFO;  /* ext attributes
+						(chattr, chflags) level 0x206 */
 
-#endif 
+#endif
 
 #endif				/* _CIFSPDU_H */
diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h
index 5d163e2..04a69da 100644
--- a/fs/cifs/cifsproto.h
+++ b/fs/cifs/cifsproto.h
@@ -16,7 +16,7 @@
  *
  *   You should have received a copy of the GNU Lesser General Public License
  *   along with this library; if not, write to the Free Software
- *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 
+ *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  */
 #ifndef _CIFSPROTO_H
 #define _CIFSPROTO_H
@@ -49,9 +49,9 @@
 			struct smb_hdr * /* out */ ,
 			int * /* bytes returned */ , const int long_op);
 extern int SendReceive2(const unsigned int /* xid */ , struct cifsSesInfo *,
-			struct kvec *, int /* nvec to send */, 
+			struct kvec *, int /* nvec to send */,
 			int * /* type of buf returned */ , const int long_op);
-extern int SendReceiveBlockingLock(const unsigned int /* xid */ , 
+extern int SendReceiveBlockingLock(const unsigned int /* xid */ ,
 					struct cifsTconInfo *,
 				struct smb_hdr * /* input */ ,
 				struct smb_hdr * /* out */ ,
@@ -64,19 +64,19 @@
 extern unsigned int smbCalcSize_LE(struct smb_hdr *ptr);
 extern int decode_negTokenInit(unsigned char *security_blob, int length,
 			enum securityEnum *secType);
-extern int cifs_inet_pton(int, char * source, void *dst);
+extern int cifs_inet_pton(int, char *source, void *dst);
 extern int map_smb_to_linux_error(struct smb_hdr *smb);
 extern void header_assemble(struct smb_hdr *, char /* command */ ,
 			    const struct cifsTconInfo *, int /* length of
 			    fixed section (word count) in two byte units */);
 extern int small_smb_init_no_tc(const int smb_cmd, const int wct,
 				struct cifsSesInfo *ses,
-				void ** request_buf);
+				void **request_buf);
 extern int CIFS_SessSetup(unsigned int xid, struct cifsSesInfo *ses,
-			     const int stage, 
+			     const int stage,
 			     const struct nls_table *nls_cp);
 extern __u16 GetNextMid(struct TCP_Server_Info *server);
-extern struct oplock_q_entry * AllocOplockQEntry(struct inode *, u16, 
+extern struct oplock_q_entry *AllocOplockQEntry(struct inode *, u16,
 						 struct cifsTconInfo *);
 extern void DeleteOplockQEntry(struct oplock_q_entry *);
 extern struct timespec cifs_NTtimeToUnix(u64 /* utc nanoseconds since 1601 */ );
@@ -85,12 +85,12 @@
 extern struct timespec cnvrtDosUnixTm(__u16 date, __u16 time);
 
 extern int cifs_get_inode_info(struct inode **pinode,
-			const unsigned char *search_path, 
+			const unsigned char *search_path,
 			FILE_ALL_INFO * pfile_info,
 			struct super_block *sb, int xid);
 extern int cifs_get_inode_info_unix(struct inode **pinode,
 			const unsigned char *search_path,
-			struct super_block *sb,int xid);
+			struct super_block *sb, int xid);
 
 extern int cifs_mount(struct super_block *, struct cifs_sb_info *, char *,
 			const char *);
@@ -98,8 +98,8 @@
 void cifs_proc_init(void);
 void cifs_proc_clean(void);
 
-extern int cifs_setup_session(unsigned int xid, struct cifsSesInfo *pSesInfo, 
-			struct nls_table * nls_info);
+extern int cifs_setup_session(unsigned int xid, struct cifsSesInfo *pSesInfo,
+			struct nls_table *nls_info);
 extern int CIFSSMBNegotiate(unsigned int xid, struct cifsSesInfo *ses);
 
 extern int CIFSTCon(unsigned int xid, struct cifsSesInfo *ses,
@@ -108,11 +108,11 @@
 
 extern int CIFSFindFirst(const int xid, struct cifsTconInfo *tcon,
 		const char *searchName, const struct nls_table *nls_codepage,
-		__u16 *searchHandle, struct cifs_search_info * psrch_inf, 
+		__u16 *searchHandle, struct cifs_search_info *psrch_inf,
 		int map, const char dirsep);
 
 extern int CIFSFindNext(const int xid, struct cifsTconInfo *tcon,
-		__u16 searchHandle, struct cifs_search_info * psrch_inf);
+		__u16 searchHandle, struct cifs_search_info *psrch_inf);
 
 extern int CIFSFindClose(const int, struct cifsTconInfo *tcon,
 			const __u16 search_handle);
@@ -123,9 +123,9 @@
 			int legacy /* whether to use old info level */,
 			const struct nls_table *nls_codepage, int remap);
 extern int SMBQueryInformation(const int xid, struct cifsTconInfo *tcon,
-                        const unsigned char *searchName,
-                        FILE_ALL_INFO * findData,
-                        const struct nls_table *nls_codepage, int remap);
+			const unsigned char *searchName,
+			FILE_ALL_INFO *findData,
+			const struct nls_table *nls_codepage, int remap);
 
 extern int CIFSSMBUnixQPathInfo(const int xid,
 			struct cifsTconInfo *tcon,
@@ -143,13 +143,13 @@
 			const char *old_path,
 			const struct nls_table *nls_codepage, int remap);
 extern int get_dfs_path(int xid, struct cifsSesInfo *pSesInfo,
-			const char *old_path, 
+			const char *old_path,
 			const struct nls_table *nls_codepage,
-			unsigned int *pnum_referrals, 
-			unsigned char ** preferrals,
+			unsigned int *pnum_referrals,
+			unsigned char **preferrals,
 			int remap);
 extern void reset_cifs_unix_caps(int xid, struct cifsTconInfo *tcon,
-				 struct super_block * sb, struct smb_vol * vol);
+				 struct super_block *sb, struct smb_vol *vol);
 extern int CIFSSMBQFSInfo(const int xid, struct cifsTconInfo *tcon,
 			struct kstatfs *FSData);
 extern int SMBOldQFSInfo(const int xid, struct cifsTconInfo *tcon,
@@ -181,11 +181,11 @@
 			const struct nls_table *nls_codepage,
 			int remap_special_chars);
 extern int CIFSSMBSetFileSize(const int xid, struct cifsTconInfo *tcon,
-			 __u64 size, __u16 fileHandle,__u32 opener_pid, 
+			 __u64 size, __u16 fileHandle, __u32 opener_pid,
 			int AllocSizeFlag);
 extern int CIFSSMBUnixSetPerms(const int xid, struct cifsTconInfo *pTcon,
 			char *full_path, __u64 mode, __u64 uid,
-			__u64 gid, dev_t dev, 
+			__u64 gid, dev_t dev,
 			const struct nls_table *nls_codepage,
 			int remap_special_chars);
 
@@ -196,7 +196,10 @@
 extern int CIFSSMBRmDir(const int xid, struct cifsTconInfo *tcon,
 			const char *name, const struct nls_table *nls_codepage,
 			int remap_special_chars);
-
+extern int CIFSPOSIXDelFile(const int xid, struct cifsTconInfo *tcon,
+			const char *name, __u16 type,
+			const struct nls_table *nls_codepage,
+			int remap_special_chars);
 extern int CIFSSMBDelFile(const int xid, struct cifsTconInfo *tcon,
 			const char *name,
 			const struct nls_table *nls_codepage,
@@ -205,8 +208,8 @@
 			const char *fromName, const char *toName,
 			const struct nls_table *nls_codepage,
 			int remap_special_chars);
-extern int CIFSSMBRenameOpenFile(const int xid,struct cifsTconInfo *pTcon,
-			int netfid, char * target_name, 
+extern int CIFSSMBRenameOpenFile(const int xid, struct cifsTconInfo *pTcon,
+			int netfid, char *target_name,
 			const struct nls_table *nls_codepage,
 			int remap_special_chars);
 extern int CIFSCreateHardLink(const int xid,
@@ -217,7 +220,7 @@
 extern int CIFSUnixCreateHardLink(const int xid,
 			struct cifsTconInfo *tcon,
 			const char *fromName, const char *toName,
-			const struct nls_table *nls_codepage, 
+			const struct nls_table *nls_codepage,
 			int remap_special_chars);
 extern int CIFSUnixCreateSymLink(const int xid,
 			struct cifsTconInfo *tcon,
@@ -228,7 +231,7 @@
 			const unsigned char *searchName,
 			char *syminfo, const int buflen,
 			const struct nls_table *nls_codepage);
-extern int CIFSSMBQueryReparseLinkInfo(const int xid, 
+extern int CIFSSMBQueryReparseLinkInfo(const int xid,
 			struct cifsTconInfo *tcon,
 			const unsigned char *searchName,
 			char *symlinkinfo, const int buflen, __u16 fid,
@@ -244,35 +247,35 @@
 			const int access_flags, const int omode,
 			__u16 * netfid, int *pOplock, FILE_ALL_INFO *,
 			const struct nls_table *nls_codepage, int remap);
-extern int CIFSPOSIXCreate(const int xid, struct cifsTconInfo *tcon, 
+extern int CIFSPOSIXCreate(const int xid, struct cifsTconInfo *tcon,
 			u32 posix_flags, __u64 mode, __u16 * netfid,
 			FILE_UNIX_BASIC_INFO *pRetData,
 			__u32 *pOplock, const char *name,
-			const struct nls_table *nls_codepage, int remap);			
+			const struct nls_table *nls_codepage, int remap);
 extern int CIFSSMBClose(const int xid, struct cifsTconInfo *tcon,
 			const int smb_file_id);
 
 extern int CIFSSMBRead(const int xid, struct cifsTconInfo *tcon,
-                        const int netfid, unsigned int count,
-                        const __u64 lseek, unsigned int *nbytes, char **buf,
-			int * return_buf_type);
+			const int netfid, unsigned int count,
+			const __u64 lseek, unsigned int *nbytes, char **buf,
+			int *return_buf_type);
 extern int CIFSSMBWrite(const int xid, struct cifsTconInfo *tcon,
 			const int netfid, const unsigned int count,
 			const __u64 lseek, unsigned int *nbytes,
-			const char *buf, const char __user *ubuf, 
+			const char *buf, const char __user *ubuf,
 			const int long_op);
 extern int CIFSSMBWrite2(const int xid, struct cifsTconInfo *tcon,
 			const int netfid, const unsigned int count,
-			const __u64 offset, unsigned int *nbytes, 
+			const __u64 offset, unsigned int *nbytes,
 			struct kvec *iov, const int nvec, const int long_op);
 extern int CIFSGetSrvInodeNumber(const int xid, struct cifsTconInfo *tcon,
 			const unsigned char *searchName, __u64 * inode_number,
-			const struct nls_table *nls_codepage, 
+			const struct nls_table *nls_codepage,
 			int remap_special_chars);
 extern int cifs_convertUCSpath(char *target, const __le16 *source, int maxlen,
-			const struct nls_table * codepage);
-extern int cifsConvertToUCS(__le16 * target, const char *source, int maxlen,
-			const struct nls_table * cp, int mapChars);
+			const struct nls_table *codepage);
+extern int cifsConvertToUCS(__le16 *target, const char *source, int maxlen,
+			const struct nls_table *cp, int mapChars);
 
 extern int CIFSSMBLock(const int xid, struct cifsTconInfo *tcon,
 			const __u16 netfid, const __u64 len,
@@ -281,7 +284,7 @@
 			const int waitFlag);
 extern int CIFSSMBPosixLock(const int xid, struct cifsTconInfo *tcon,
 			const __u16 smb_file_id, const int get_flag,
-			const __u64 len, struct file_lock *, 
+			const __u64 len, struct file_lock *,
 			const __u16 lock_type, const int waitFlag);
 extern int CIFSSMBTDis(const int xid, struct cifsTconInfo *tcon);
 extern int CIFSSMBLogoff(const int xid, struct cifsSesInfo *ses);
@@ -291,54 +294,56 @@
 extern struct cifsTconInfo *tconInfoAlloc(void);
 extern void tconInfoFree(struct cifsTconInfo *);
 
-extern int cifs_sign_smb(struct smb_hdr *, struct TCP_Server_Info *,__u32 *);
+extern int cifs_sign_smb(struct smb_hdr *, struct TCP_Server_Info *, __u32 *);
 extern int cifs_sign_smb2(struct kvec *iov, int n_vec, struct TCP_Server_Info *,
 			  __u32 *);
-extern int cifs_verify_signature(struct smb_hdr *, const char * mac_key,
-	__u32 expected_sequence_number);
-extern int cifs_calculate_mac_key(char * key,const char * rn,const char * pass);
-extern int CalcNTLMv2_partial_mac_key(struct cifsSesInfo *, 
+extern int cifs_verify_signature(struct smb_hdr *,
+				 const struct mac_key *mac_key,
+				__u32 expected_sequence_number);
+extern int cifs_calculate_mac_key(struct mac_key *key, const char *rn,
+				 const char *pass);
+extern int CalcNTLMv2_partial_mac_key(struct cifsSesInfo *,
 			const struct nls_table *);
 extern void CalcNTLMv2_response(const struct cifsSesInfo *, char * );
-extern void setup_ntlmv2_rsp(struct cifsSesInfo *, char *, 
+extern void setup_ntlmv2_rsp(struct cifsSesInfo *, char *,
 			     const struct nls_table *);
 #ifdef CONFIG_CIFS_WEAK_PW_HASH
-extern void calc_lanman_hash(struct cifsSesInfo * ses, char * lnm_session_key);
+extern void calc_lanman_hash(struct cifsSesInfo *ses, char *lnm_session_key);
 #endif /* CIFS_WEAK_PW_HASH */
 extern int CIFSSMBCopy(int xid,
 			struct cifsTconInfo *source_tcon,
 			const char *fromName,
 			const __u16 target_tid,
 			const char *toName, const int flags,
-			const struct nls_table *nls_codepage, 
+			const struct nls_table *nls_codepage,
 			int remap_special_chars);
-extern int CIFSSMBNotify(const int xid, struct cifsTconInfo *tcon, 
-			const int notify_subdirs,const __u16 netfid,
-			__u32 filter, struct file * file, int multishot, 
+extern int CIFSSMBNotify(const int xid, struct cifsTconInfo *tcon,
+			const int notify_subdirs, const __u16 netfid,
+			__u32 filter, struct file *file, int multishot,
 			const struct nls_table *nls_codepage);
 extern ssize_t CIFSSMBQAllEAs(const int xid, struct cifsTconInfo *tcon,
-			const unsigned char *searchName, char * EAData,
+			const unsigned char *searchName, char *EAData,
 			size_t bufsize, const struct nls_table *nls_codepage,
 			int remap_special_chars);
-extern ssize_t CIFSSMBQueryEA(const int xid,struct cifsTconInfo * tcon,
-		const unsigned char * searchName,const unsigned char * ea_name,
-		unsigned char * ea_value, size_t buf_size, 
+extern ssize_t CIFSSMBQueryEA(const int xid, struct cifsTconInfo *tcon,
+		const unsigned char *searchName, const unsigned char *ea_name,
+		unsigned char *ea_value, size_t buf_size,
 		const struct nls_table *nls_codepage, int remap_special_chars);
-extern int CIFSSMBSetEA(const int xid, struct cifsTconInfo *tcon, 
-		const char *fileName, const char * ea_name, 
-		const void * ea_value, const __u16 ea_value_len, 
+extern int CIFSSMBSetEA(const int xid, struct cifsTconInfo *tcon,
+		const char *fileName, const char *ea_name,
+		const void *ea_value, const __u16 ea_value_len,
 		const struct nls_table *nls_codepage, int remap_special_chars);
 extern int CIFSSMBGetCIFSACL(const int xid, struct cifsTconInfo *tcon,
 			__u16 fid, char *acl_inf, const int buflen,
 			const int acl_type /* ACCESS vs. DEFAULT */);
 extern int CIFSSMBGetPosixACL(const int xid, struct cifsTconInfo *tcon,
 		const unsigned char *searchName,
-		char *acl_inf, const int buflen,const int acl_type,
+		char *acl_inf, const int buflen, const int acl_type,
 		const struct nls_table *nls_codepage, int remap_special_chars);
 extern int CIFSSMBSetPosixACL(const int xid, struct cifsTconInfo *tcon,
 		const unsigned char *fileName,
 		const char *local_acl, const int buflen, const int acl_type,
 		const struct nls_table *nls_codepage, int remap_special_chars);
 extern int CIFSGetExtAttr(const int xid, struct cifsTconInfo *tcon,
-                const int netfid, __u64 * pExtAttrBits, __u64 *pMask);
+			const int netfid, __u64 * pExtAttrBits, __u64 *pMask);
 #endif			/* _CIFSPROTO_H */
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
index 57419a1..8eb102f 100644
--- a/fs/cifs/cifssmb.c
+++ b/fs/cifs/cifssmb.c
@@ -48,7 +48,7 @@
 	{LANMAN_PROT, "\2LM1.2X002"},
 	{LANMAN2_PROT, "\2LANMAN2.1"},
 #endif /* weak password hashing for legacy clients */
-	{CIFS_PROT, "\2NT LM 0.12"}, 
+	{CIFS_PROT, "\2NT LM 0.12"},
 	{POSIX_PROT, "\2POSIX 2"},
 	{BAD_PROT, "\2"}
 };
@@ -61,7 +61,7 @@
 	{LANMAN_PROT, "\2LM1.2X002"},
 	{LANMAN2_PROT, "\2LANMAN2.1"},
 #endif /* weak password hashing for legacy clients */
-	{CIFS_PROT, "\2NT LM 0.12"}, 
+	{CIFS_PROT, "\2NT LM 0.12"},
 	{BAD_PROT, "\2"}
 };
 #endif
@@ -84,17 +84,17 @@
 
 /* Mark as invalid, all open files on tree connections since they
    were closed when session to server was lost */
-static void mark_open_files_invalid(struct cifsTconInfo * pTcon)
+static void mark_open_files_invalid(struct cifsTconInfo *pTcon)
 {
 	struct cifsFileInfo *open_file = NULL;
-	struct list_head * tmp;
-	struct list_head * tmp1;
+	struct list_head *tmp;
+	struct list_head *tmp1;
 
 /* list all files open on tree connection and mark them invalid */
 	write_lock(&GlobalSMBSeslock);
 	list_for_each_safe(tmp, tmp1, &pTcon->openFileList) {
-		open_file = list_entry(tmp,struct cifsFileInfo, tlist);
-		if(open_file) {
+		open_file = list_entry(tmp, struct cifsFileInfo, tlist);
+		if (open_file) {
 			open_file->invalidHandle = TRUE;
 		}
 	}
@@ -113,75 +113,78 @@
 	/* SMBs NegProt, SessSetup, uLogoff do not have tcon yet so
 	   check for tcp and smb session status done differently
 	   for those three - in the calling routine */
-	if(tcon) {
-		if(tcon->tidStatus == CifsExiting) {
+	if (tcon) {
+		if (tcon->tidStatus == CifsExiting) {
 			/* only tree disconnect, open, and write,
 			(and ulogoff which does not have tcon)
 			are allowed as we start force umount */
-			if((smb_command != SMB_COM_WRITE_ANDX) && 
-			   (smb_command != SMB_COM_OPEN_ANDX) && 
+			if ((smb_command != SMB_COM_WRITE_ANDX) &&
+			   (smb_command != SMB_COM_OPEN_ANDX) &&
 			   (smb_command != SMB_COM_TREE_DISCONNECT)) {
-				cFYI(1,("can not send cmd %d while umounting",
+				cFYI(1, ("can not send cmd %d while umounting",
 					smb_command));
 				return -ENODEV;
 			}
 		}
-		if((tcon->ses) && (tcon->ses->status != CifsExiting) &&
-				  (tcon->ses->server)){
+		if ((tcon->ses) && (tcon->ses->status != CifsExiting) &&
+				  (tcon->ses->server)) {
 			struct nls_table *nls_codepage;
-				/* Give Demultiplex thread up to 10 seconds to 
+				/* Give Demultiplex thread up to 10 seconds to
 				   reconnect, should be greater than cifs socket
 				   timeout which is 7 seconds */
-			while(tcon->ses->server->tcpStatus == CifsNeedReconnect) {
+			while (tcon->ses->server->tcpStatus ==
+							 CifsNeedReconnect) {
 				wait_event_interruptible_timeout(tcon->ses->server->response_q,
-					(tcon->ses->server->tcpStatus == CifsGood), 10 * HZ);
-				if(tcon->ses->server->tcpStatus == CifsNeedReconnect) {
+					(tcon->ses->server->tcpStatus ==
+							CifsGood), 10 * HZ);
+				if (tcon->ses->server->tcpStatus ==
+							CifsNeedReconnect) {
 					/* on "soft" mounts we wait once */
-					if((tcon->retry == FALSE) || 
+					if ((tcon->retry == FALSE) ||
 					   (tcon->ses->status == CifsExiting)) {
-						cFYI(1,("gave up waiting on reconnect in smb_init"));
+						cFYI(1, ("gave up waiting on "
+						      "reconnect in smb_init"));
 						return -EHOSTDOWN;
 					} /* else "hard" mount - keep retrying
 					     until process is killed or server
 					     comes back on-line */
 				} else /* TCP session is reestablished now */
 					break;
-				 
 			}
-			
+
 			nls_codepage = load_nls_default();
 		/* need to prevent multiple threads trying to
 		simultaneously reconnect the same SMB session */
 			down(&tcon->ses->sesSem);
-			if(tcon->ses->status == CifsNeedReconnect)
-				rc = cifs_setup_session(0, tcon->ses, 
+			if (tcon->ses->status == CifsNeedReconnect)
+				rc = cifs_setup_session(0, tcon->ses,
 							nls_codepage);
-			if(!rc && (tcon->tidStatus == CifsNeedReconnect)) {
+			if (!rc && (tcon->tidStatus == CifsNeedReconnect)) {
 				mark_open_files_invalid(tcon);
-				rc = CIFSTCon(0, tcon->ses, tcon->treeName, 
+				rc = CIFSTCon(0, tcon->ses, tcon->treeName,
 					      tcon, nls_codepage);
 				up(&tcon->ses->sesSem);
 				/* tell server which Unix caps we support */
 				if (tcon->ses->capabilities & CAP_UNIX)
 					reset_cifs_unix_caps(0 /* no xid */,
-						tcon, 
+						tcon,
 						NULL /* we do not know sb */,
-						NULL /* no vol info */);	
+						NULL /* no vol info */);
 				/* BB FIXME add code to check if wsize needs
 				   update due to negotiated smb buffer size
 				   shrinking */
-				if(rc == 0)
+				if (rc == 0)
 					atomic_inc(&tconInfoReconnectCount);
 
 				cFYI(1, ("reconnect tcon rc = %d", rc));
-				/* Removed call to reopen open files here - 
-				   it is safer (and faster) to reopen files
+				/* Removed call to reopen open files here.
+				   It is safer (and faster) to reopen files
 				   one at a time as needed in read and write */
 
-				/* Check if handle based operation so we 
+				/* Check if handle based operation so we
 				   know whether we can continue or not without
 				   returning to caller to reset file handle */
-				switch(smb_command) {
+				switch (smb_command) {
 					case SMB_COM_READ_ANDX:
 					case SMB_COM_WRITE_ANDX:
 					case SMB_COM_CLOSE:
@@ -200,7 +203,7 @@
 			return -EIO;
 		}
 	}
-	if(rc)
+	if (rc)
 		return rc;
 
 	*request_buf = cifs_small_buf_get();
@@ -209,23 +212,24 @@
 		return -ENOMEM;
 	}
 
-	header_assemble((struct smb_hdr *) *request_buf, smb_command, tcon,wct);
+	header_assemble((struct smb_hdr *) *request_buf, smb_command,
+			tcon, wct);
 
-        if(tcon != NULL)
-                cifs_stats_inc(&tcon->num_smbs_sent);
+	if (tcon != NULL)
+		cifs_stats_inc(&tcon->num_smbs_sent);
 
 	return rc;
 }
 
 int
-small_smb_init_no_tc(const int smb_command, const int wct, 
+small_smb_init_no_tc(const int smb_command, const int wct,
 		     struct cifsSesInfo *ses, void **request_buf)
 {
 	int rc;
-	struct smb_hdr * buffer;
+	struct smb_hdr *buffer;
 
 	rc = small_smb_init(smb_command, wct, NULL, request_buf);
-	if(rc)
+	if (rc)
 		return rc;
 
 	buffer = (struct smb_hdr *)*request_buf;
@@ -237,7 +241,7 @@
 
 	/* uid, tid can stay at zero as set in header assemble */
 
-	/* BB add support for turning on the signing when 
+	/* BB add support for turning on the signing when
 	this function is used after 1st of session setup requests */
 
 	return rc;
@@ -254,52 +258,53 @@
 	/* SMBs NegProt, SessSetup, uLogoff do not have tcon yet so
 	   check for tcp and smb session status done differently
 	   for those three - in the calling routine */
-	if(tcon) {
-		if(tcon->tidStatus == CifsExiting) {
+	if (tcon) {
+		if (tcon->tidStatus == CifsExiting) {
 			/* only tree disconnect, open, and write,
 			  (and ulogoff which does not have tcon)
 			  are allowed as we start force umount */
-			if((smb_command != SMB_COM_WRITE_ANDX) &&
+			if ((smb_command != SMB_COM_WRITE_ANDX) &&
 			   (smb_command != SMB_COM_OPEN_ANDX) &&
 			   (smb_command != SMB_COM_TREE_DISCONNECT)) {
-				cFYI(1,("can not send cmd %d while umounting",
+				cFYI(1, ("can not send cmd %d while umounting",
 					smb_command));
 				return -ENODEV;
 			}
 		}
 
-		if((tcon->ses) && (tcon->ses->status != CifsExiting) && 
-				  (tcon->ses->server)){
+		if ((tcon->ses) && (tcon->ses->status != CifsExiting) &&
+				  (tcon->ses->server)) {
 			struct nls_table *nls_codepage;
 				/* Give Demultiplex thread up to 10 seconds to
 				   reconnect, should be greater than cifs socket
 				   timeout which is 7 seconds */
-			while(tcon->ses->server->tcpStatus == CifsNeedReconnect) {
+			while (tcon->ses->server->tcpStatus ==
+							CifsNeedReconnect) {
 				wait_event_interruptible_timeout(tcon->ses->server->response_q,
-					(tcon->ses->server->tcpStatus == CifsGood), 10 * HZ);
-				if(tcon->ses->server->tcpStatus == 
+					(tcon->ses->server->tcpStatus ==
+							CifsGood), 10 * HZ);
+				if (tcon->ses->server->tcpStatus ==
 						CifsNeedReconnect) {
 					/* on "soft" mounts we wait once */
-					if((tcon->retry == FALSE) || 
+					if ((tcon->retry == FALSE) ||
 					   (tcon->ses->status == CifsExiting)) {
-						cFYI(1,("gave up waiting on reconnect in smb_init"));
+						cFYI(1, ("gave up waiting on "
+						      "reconnect in smb_init"));
 						return -EHOSTDOWN;
 					} /* else "hard" mount - keep retrying
 					     until process is killed or server
 					     comes on-line */
 				} else /* TCP session is reestablished now */
 					break;
-				 
 			}
-			
 			nls_codepage = load_nls_default();
 		/* need to prevent multiple threads trying to
 		simultaneously reconnect the same SMB session */
 			down(&tcon->ses->sesSem);
-			if(tcon->ses->status == CifsNeedReconnect)
-				rc = cifs_setup_session(0, tcon->ses, 
+			if (tcon->ses->status == CifsNeedReconnect)
+				rc = cifs_setup_session(0, tcon->ses,
 							nls_codepage);
-			if(!rc && (tcon->tidStatus == CifsNeedReconnect)) {
+			if (!rc && (tcon->tidStatus == CifsNeedReconnect)) {
 				mark_open_files_invalid(tcon);
 				rc = CIFSTCon(0, tcon->ses, tcon->treeName,
 					      tcon, nls_codepage);
@@ -307,24 +312,24 @@
 				/* tell server which Unix caps we support */
 				if (tcon->ses->capabilities & CAP_UNIX)
 					reset_cifs_unix_caps(0 /* no xid */,
-						tcon, 
+						tcon,
 						NULL /* do not know sb */,
 						NULL /* no vol info */);
 				/* BB FIXME add code to check if wsize needs
 				update due to negotiated smb buffer size
 				shrinking */
-				if(rc == 0)
+				if (rc == 0)
 					atomic_inc(&tconInfoReconnectCount);
 
 				cFYI(1, ("reconnect tcon rc = %d", rc));
-				/* Removed call to reopen open files here - 
-				   it is safer (and faster) to reopen files
+				/* Removed call to reopen open files here.
+				   It is safer (and faster) to reopen files
 				   one at a time as needed in read and write */
 
-				/* Check if handle based operation so we 
+				/* Check if handle based operation so we
 				   know whether we can continue or not without
 				   returning to caller to reset file handle */
-				switch(smb_command) {
+				switch (smb_command) {
 					case SMB_COM_READ_ANDX:
 					case SMB_COM_WRITE_ANDX:
 					case SMB_COM_CLOSE:
@@ -343,7 +348,7 @@
 			return -EIO;
 		}
 	}
-	if(rc)
+	if (rc)
 		return rc;
 
 	*request_buf = cifs_buf_get();
@@ -355,48 +360,48 @@
     /* potential retries of smb operations it turns out we can determine */
     /* from the mid flags when the request buffer can be resent without  */
     /* having to use a second distinct buffer for the response */
-	if(response_buf)
-		*response_buf = *request_buf; 
+	if (response_buf)
+		*response_buf = *request_buf;
 
 	header_assemble((struct smb_hdr *) *request_buf, smb_command, tcon,
 			wct /*wct */ );
 
-        if(tcon != NULL)
-                cifs_stats_inc(&tcon->num_smbs_sent);
+	if (tcon != NULL)
+		cifs_stats_inc(&tcon->num_smbs_sent);
 
 	return rc;
 }
 
-static int validate_t2(struct smb_t2_rsp * pSMB) 
+static int validate_t2(struct smb_t2_rsp *pSMB)
 {
 	int rc = -EINVAL;
 	int total_size;
-	char * pBCC;
+	char *pBCC;
 
 	/* check for plausible wct, bcc and t2 data and parm sizes */
 	/* check for parm and data offset going beyond end of smb */
-	if(pSMB->hdr.WordCount >= 10) {
-		if((le16_to_cpu(pSMB->t2_rsp.ParameterOffset) <= 1024) &&
+	if (pSMB->hdr.WordCount >= 10) {
+		if ((le16_to_cpu(pSMB->t2_rsp.ParameterOffset) <= 1024) &&
 		   (le16_to_cpu(pSMB->t2_rsp.DataOffset) <= 1024)) {
 			/* check that bcc is at least as big as parms + data */
 			/* check that bcc is less than negotiated smb buffer */
 			total_size = le16_to_cpu(pSMB->t2_rsp.ParameterCount);
-			if(total_size < 512) {
-				total_size+=le16_to_cpu(pSMB->t2_rsp.DataCount);
+			if (total_size < 512) {
+				total_size +=
+					le16_to_cpu(pSMB->t2_rsp.DataCount);
 				/* BCC le converted in SendReceive */
-				pBCC = (pSMB->hdr.WordCount * 2) + 
+				pBCC = (pSMB->hdr.WordCount * 2) +
 					sizeof(struct smb_hdr) +
 					(char *)pSMB;
-				if((total_size <= (*(u16 *)pBCC)) && 
-				   (total_size < 
+				if ((total_size <= (*(u16 *)pBCC)) &&
+				   (total_size <
 					CIFSMaxBufSize+MAX_CIFS_HDR_SIZE)) {
 					return 0;
 				}
-				
 			}
 		}
 	}
-	cifs_dump_mem("Invalid transact2 SMB: ",(char *)pSMB,
+	cifs_dump_mem("Invalid transact2 SMB: ", (char *)pSMB,
 		sizeof(struct smb_t2_rsp) + 16);
 	return rc;
 }
@@ -408,12 +413,12 @@
 	int rc = 0;
 	int bytes_returned;
 	int i;
-	struct TCP_Server_Info * server;
+	struct TCP_Server_Info *server;
 	u16 count;
 	unsigned int secFlags;
 	u16 dialect;
 
-	if(ses->server)
+	if (ses->server)
 		server = ses->server;
 	else {
 		rc = -EIO;
@@ -425,20 +430,20 @@
 		return rc;
 
 	/* if any of auth flags (ie not sign or seal) are overriden use them */
-	if(ses->overrideSecFlg & (~(CIFSSEC_MUST_SIGN | CIFSSEC_MUST_SEAL)))
-		secFlags = ses->overrideSecFlg;
+	if (ses->overrideSecFlg & (~(CIFSSEC_MUST_SIGN | CIFSSEC_MUST_SEAL)))
+		secFlags = ses->overrideSecFlg;  /* BB FIXME fix sign flags? */
 	else /* if override flags set only sign/seal OR them with global auth */
 		secFlags = extended_security | ses->overrideSecFlg;
 
-	cFYI(1,("secFlags 0x%x",secFlags));
+	cFYI(1, ("secFlags 0x%x", secFlags));
 
 	pSMB->hdr.Mid = GetNextMid(server);
 	pSMB->hdr.Flags2 |= (SMBFLG2_UNICODE | SMBFLG2_ERR_STATUS);
 	if ((secFlags & CIFSSEC_MUST_KRB5) == CIFSSEC_MUST_KRB5)
 		pSMB->hdr.Flags2 |= SMBFLG2_EXT_SEC;
-	
+
 	count = 0;
-	for(i=0;i<CIFS_NUM_PROT;i++) {
+	for (i = 0; i < CIFS_NUM_PROT; i++) {
 		strncpy(pSMB->DialectsArray+count, protocols[i].name, 16);
 		count += strlen(protocols[i].name) + 1;
 		/* null at end of source and target buffers anyway */
@@ -448,26 +453,26 @@
 
 	rc = SendReceive(xid, ses, (struct smb_hdr *) pSMB,
 			 (struct smb_hdr *) pSMBr, &bytes_returned, 0);
-	if (rc != 0) 
+	if (rc != 0)
 		goto neg_err_exit;
 
 	dialect = le16_to_cpu(pSMBr->DialectIndex);
-	cFYI(1,("Dialect: %d", dialect));
+	cFYI(1, ("Dialect: %d", dialect));
 	/* Check wct = 1 error case */
-	if((pSMBr->hdr.WordCount < 13) || (dialect == BAD_PROT)) {
+	if ((pSMBr->hdr.WordCount < 13) || (dialect == BAD_PROT)) {
 		/* core returns wct = 1, but we do not ask for core - otherwise
-		small wct just comes when dialect index is -1 indicating we 
+		small wct just comes when dialect index is -1 indicating we
 		could not negotiate a common dialect */
 		rc = -EOPNOTSUPP;
 		goto neg_err_exit;
-#ifdef CONFIG_CIFS_WEAK_PW_HASH 
-	} else if((pSMBr->hdr.WordCount == 13)
+#ifdef CONFIG_CIFS_WEAK_PW_HASH
+	} else if ((pSMBr->hdr.WordCount == 13)
 			&& ((dialect == LANMAN_PROT)
 				|| (dialect == LANMAN2_PROT))) {
 		__s16 tmp;
-		struct lanman_neg_rsp * rsp = (struct lanman_neg_rsp *)pSMBr;
+		struct lanman_neg_rsp *rsp = (struct lanman_neg_rsp *)pSMBr;
 
-		if((secFlags & CIFSSEC_MAY_LANMAN) || 
+		if ((secFlags & CIFSSEC_MAY_LANMAN) ||
 			(secFlags & CIFSSEC_MAY_PLNTXT))
 			server->secType = LANMAN;
 		else {
@@ -475,7 +480,7 @@
 				   " in /proc/fs/cifs/SecurityFlags"));
 			rc = -EOPNOTSUPP;
 			goto neg_err_exit;
-		}	
+		}
 		server->secMode = (__u8)le16_to_cpu(rsp->SecurityMode);
 		server->maxReq = le16_to_cpu(rsp->MaxMpxCount);
 		server->maxBuf = min((__u32)le16_to_cpu(rsp->MaxBufSize),
@@ -483,7 +488,7 @@
 		GETU32(server->sessid) = le32_to_cpu(rsp->SessionKey);
 		/* even though we do not use raw we might as well set this
 		accurately, in case we ever find a need for it */
-		if((le16_to_cpu(rsp->RawMode) & RAW_ENABLE) == RAW_ENABLE) {
+		if ((le16_to_cpu(rsp->RawMode) & RAW_ENABLE) == RAW_ENABLE) {
 			server->maxRw = 0xFF00;
 			server->capabilities = CAP_MPX_MODE | CAP_RAW_MODE;
 		} else {
@@ -504,29 +509,29 @@
 			utc = CURRENT_TIME;
 			ts = cnvrtDosUnixTm(le16_to_cpu(rsp->SrvTime.Date),
 						le16_to_cpu(rsp->SrvTime.Time));
-			cFYI(1,("SrvTime: %d sec since 1970 (utc: %d) diff: %d",
-				(int)ts.tv_sec, (int)utc.tv_sec, 
+			cFYI(1, ("SrvTime %d sec since 1970 (utc: %d) diff: %d",
+				(int)ts.tv_sec, (int)utc.tv_sec,
 				(int)(utc.tv_sec - ts.tv_sec)));
 			val = (int)(utc.tv_sec - ts.tv_sec);
 			seconds = val < 0 ? -val : val;
 			result = (seconds / MIN_TZ_ADJ) * MIN_TZ_ADJ;
 			remain = seconds % MIN_TZ_ADJ;
-			if(remain >= (MIN_TZ_ADJ / 2))
+			if (remain >= (MIN_TZ_ADJ / 2))
 				result += MIN_TZ_ADJ;
-			if(val < 0)
+			if (val < 0)
 				result = - result;
 			server->timeAdj = result;
 		} else {
 			server->timeAdj = (int)tmp;
 			server->timeAdj *= 60; /* also in seconds */
 		}
-		cFYI(1,("server->timeAdj: %d seconds", server->timeAdj));
+		cFYI(1, ("server->timeAdj: %d seconds", server->timeAdj));
 
 
 		/* BB get server time for time conversions and add
-		code to use it and timezone since this is not UTC */	
+		code to use it and timezone since this is not UTC */
 
-		if (rsp->EncryptionKeyLength == 
+		if (rsp->EncryptionKeyLength ==
 				cpu_to_le16(CIFS_CRYPTO_KEY_SIZE)) {
 			memcpy(server->cryptKey, rsp->EncryptionKey,
 				CIFS_CRYPTO_KEY_SIZE);
@@ -535,39 +540,39 @@
 			goto neg_err_exit;
 		}
 
-		cFYI(1,("LANMAN negotiated"));
+		cFYI(1, ("LANMAN negotiated"));
 		/* we will not end up setting signing flags - as no signing
 		was in LANMAN and server did not return the flags on */
 		goto signing_check;
 #else /* weak security disabled */
-	} else if(pSMBr->hdr.WordCount == 13) {
-		cERROR(1,("mount failed, cifs module not built "
+	} else if (pSMBr->hdr.WordCount == 13) {
+		cERROR(1, ("mount failed, cifs module not built "
 			  "with CIFS_WEAK_PW_HASH support"));
 			rc = -EOPNOTSUPP;
 #endif /* WEAK_PW_HASH */
 		goto neg_err_exit;
-	} else if(pSMBr->hdr.WordCount != 17) {
+	} else if (pSMBr->hdr.WordCount != 17) {
 		/* unknown wct */
 		rc = -EOPNOTSUPP;
 		goto neg_err_exit;
 	}
 	/* else wct == 17 NTLM */
 	server->secMode = pSMBr->SecurityMode;
-	if((server->secMode & SECMODE_USER) == 0)
-		cFYI(1,("share mode security"));
+	if ((server->secMode & SECMODE_USER) == 0)
+		cFYI(1, ("share mode security"));
 
-	if((server->secMode & SECMODE_PW_ENCRYPT) == 0)
+	if ((server->secMode & SECMODE_PW_ENCRYPT) == 0)
 #ifdef CONFIG_CIFS_WEAK_PW_HASH
 		if ((secFlags & CIFSSEC_MAY_PLNTXT) == 0)
 #endif /* CIFS_WEAK_PW_HASH */
-			cERROR(1,("Server requests plain text password"
+			cERROR(1, ("Server requests plain text password"
 				  " but client support disabled"));
 
-	if((secFlags & CIFSSEC_MUST_NTLMV2) == CIFSSEC_MUST_NTLMV2)
+	if ((secFlags & CIFSSEC_MUST_NTLMV2) == CIFSSEC_MUST_NTLMV2)
 		server->secType = NTLMv2;
-	else if(secFlags & CIFSSEC_MAY_NTLM)
+	else if (secFlags & CIFSSEC_MAY_NTLM)
 		server->secType = NTLM;
-	else if(secFlags & CIFSSEC_MAY_NTLMV2)
+	else if (secFlags & CIFSSEC_MAY_NTLMV2)
 		server->secType = NTLMv2;
 	/* else krb5 ... any others ... */
 
@@ -596,7 +601,7 @@
 
 	/* BB might be helpful to save off the domain of server here */
 
-	if ((pSMBr->hdr.Flags2 & SMBFLG2_EXT_SEC) && 
+	if ((pSMBr->hdr.Flags2 & SMBFLG2_EXT_SEC) &&
 		(server->capabilities & CAP_EXTENDED_SECURITY)) {
 		count = pSMBr->ByteCount;
 		if (count < 16)
@@ -620,7 +625,7 @@
 						 SecurityBlob,
 						 count - 16,
 						 &server->secType);
-			if(rc == 1) {
+			if (rc == 1) {
 			/* BB Need to fill struct for sessetup here */
 				rc = -EOPNOTSUPP;
 			} else {
@@ -633,26 +638,37 @@
 #ifdef CONFIG_CIFS_WEAK_PW_HASH
 signing_check:
 #endif
-	if(sign_CIFS_PDUs == FALSE) {        
-		if(server->secMode & SECMODE_SIGN_REQUIRED)
-			cERROR(1,("Server requires "
-				 "/proc/fs/cifs/PacketSigningEnabled to be on"));
-		server->secMode &= 
+	if ((secFlags & CIFSSEC_MAY_SIGN) == 0) {
+		/* MUST_SIGN already includes the MAY_SIGN FLAG
+		   so if this is zero it means that signing is disabled */
+		cFYI(1, ("Signing disabled"));
+		if (server->secMode & SECMODE_SIGN_REQUIRED)
+			cERROR(1, ("Server requires "
+				   "/proc/fs/cifs/PacketSigningEnabled "
+				   "to be on"));
+		server->secMode &=
 			~(SECMODE_SIGN_ENABLED | SECMODE_SIGN_REQUIRED);
-	} else if(sign_CIFS_PDUs == 1) {
-		if((server->secMode & SECMODE_SIGN_REQUIRED) == 0)
-			server->secMode &= 
-				~(SECMODE_SIGN_ENABLED | SECMODE_SIGN_REQUIRED);
-	} else if(sign_CIFS_PDUs == 2) {
-		if((server->secMode & 
+	} else if ((secFlags & CIFSSEC_MUST_SIGN) == CIFSSEC_MUST_SIGN) {
+		/* signing required */
+		cFYI(1, ("Must sign - secFlags 0x%x", secFlags));
+		if ((server->secMode &
 			(SECMODE_SIGN_ENABLED | SECMODE_SIGN_REQUIRED)) == 0) {
-			cERROR(1,("signing required but server lacks support"));
-		}
+			cERROR(1,
+				("signing required but server lacks support"));
+			rc = -EOPNOTSUPP;
+		} else
+			server->secMode |= SECMODE_SIGN_REQUIRED;
+	} else {
+		/* signing optional ie CIFSSEC_MAY_SIGN */
+		if ((server->secMode & SECMODE_SIGN_REQUIRED) == 0)
+			server->secMode &=
+				~(SECMODE_SIGN_ENABLED | SECMODE_SIGN_REQUIRED);
 	}
-neg_err_exit:	
+
+neg_err_exit:
 	cifs_buf_release(pSMB);
 
-	cFYI(1,("negprot rc %d",rc));
+	cFYI(1, ("negprot rc %d", rc));
 	return rc;
 }
 
@@ -669,7 +685,7 @@
 	 *  If last user of the connection and
 	 *  connection alive - disconnect it
 	 *  If this is the last connection on the server session disconnect it
-	 *  (and inside session disconnect we should check if tcp socket needs 
+	 *  (and inside session disconnect we should check if tcp socket needs
 	 *  to be freed and kernel thread woken up).
 	 */
 	if (tcon)
@@ -683,18 +699,18 @@
 		return -EBUSY;
 	}
 
-	/* No need to return error on this operation if tid invalidated and 
+	/* No need to return error on this operation if tid invalidated and
 	closed on server already e.g. due to tcp session crashing */
-	if(tcon->tidStatus == CifsNeedReconnect) {
+	if (tcon->tidStatus == CifsNeedReconnect) {
 		up(&tcon->tconSem);
-		return 0;  
+		return 0;
 	}
 
-	if((tcon->ses == NULL) || (tcon->ses->server == NULL)) {    
+	if ((tcon->ses == NULL) || (tcon->ses->server == NULL)) {
 		up(&tcon->tconSem);
 		return -EIO;
 	}
-	rc = small_smb_init(SMB_COM_TREE_DISCONNECT, 0, tcon, 
+	rc = small_smb_init(SMB_COM_TREE_DISCONNECT, 0, tcon,
 			    (void **)&smb_buffer);
 	if (rc) {
 		up(&tcon->tconSem);
@@ -711,7 +727,7 @@
 		cifs_small_buf_release(smb_buffer);
 	up(&tcon->tconSem);
 
-	/* No need to return error on this operation if tid invalidated and 
+	/* No need to return error on this operation if tid invalidated and
 	closed on server already e.g. due to tcp session crashing */
 	if (rc == -EAGAIN)
 		rc = 0;
@@ -745,11 +761,11 @@
 	}
 
 	smb_buffer_response = (struct smb_hdr *)pSMB; /* BB removeme BB */
-	
-	if(ses->server) {
+
+	if (ses->server) {
 		pSMB->hdr.Mid = GetNextMid(ses->server);
 
-		if(ses->server->secMode & 
+		if (ses->server->secMode &
 		   (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
 			pSMB->hdr.Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
 	}
@@ -772,7 +788,7 @@
 	cifs_small_buf_release(pSMB);
 
 	/* if session dead then we do not need to do ulogoff,
-		since server closed smb session, no sense reporting 
+		since server closed smb session, no sense reporting
 		error */
 	if (rc == -EAGAIN)
 		rc = 0;
@@ -780,6 +796,82 @@
 }
 
 int
+CIFSPOSIXDelFile(const int xid, struct cifsTconInfo *tcon, const char *fileName,
+		 __u16 type, const struct nls_table *nls_codepage, int remap)
+{
+	TRANSACTION2_SPI_REQ *pSMB = NULL;
+	TRANSACTION2_SPI_RSP *pSMBr = NULL;
+	struct unlink_psx_rq *pRqD;
+	int name_len;
+	int rc = 0;
+	int bytes_returned = 0;
+	__u16 params, param_offset, offset, byte_count;
+
+	cFYI(1, ("In POSIX delete"));
+PsxDelete:
+	rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB,
+		      (void **) &pSMBr);
+	if (rc)
+		return rc;
+
+	if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
+		name_len =
+		    cifsConvertToUCS((__le16 *) pSMB->FileName, fileName,
+				     PATH_MAX, nls_codepage, remap);
+		name_len++;	/* trailing null */
+		name_len *= 2;
+	} else { /* BB add path length overrun check */
+		name_len = strnlen(fileName, PATH_MAX);
+		name_len++;	/* trailing null */
+		strncpy(pSMB->FileName, fileName, name_len);
+	}
+
+	params = 6 + name_len;
+	pSMB->MaxParameterCount = cpu_to_le16(2);
+	pSMB->MaxDataCount = 0; /* BB double check this with jra */
+	pSMB->MaxSetupCount = 0;
+	pSMB->Reserved = 0;
+	pSMB->Flags = 0;
+	pSMB->Timeout = 0;
+	pSMB->Reserved2 = 0;
+	param_offset = offsetof(struct smb_com_transaction2_spi_req,
+				InformationLevel) - 4;
+	offset = param_offset + params;
+
+	/* Setup pointer to Request Data (inode type) */
+	pRqD = (struct unlink_psx_rq *)(((char *)&pSMB->hdr.Protocol) + offset);
+	pRqD->type = cpu_to_le16(type);
+	pSMB->ParameterOffset = cpu_to_le16(param_offset);
+	pSMB->DataOffset = cpu_to_le16(offset);
+	pSMB->SetupCount = 1;
+	pSMB->Reserved3 = 0;
+	pSMB->SubCommand = cpu_to_le16(TRANS2_SET_PATH_INFORMATION);
+	byte_count = 3 /* pad */  + params + sizeof(struct unlink_psx_rq);
+
+	pSMB->DataCount = cpu_to_le16(sizeof(struct unlink_psx_rq));
+	pSMB->TotalDataCount = cpu_to_le16(sizeof(struct unlink_psx_rq));
+	pSMB->ParameterCount = cpu_to_le16(params);
+	pSMB->TotalParameterCount = pSMB->ParameterCount;
+	pSMB->InformationLevel = cpu_to_le16(SMB_POSIX_UNLINK);
+	pSMB->Reserved4 = 0;
+	pSMB->hdr.smb_buf_length += byte_count;
+	pSMB->ByteCount = cpu_to_le16(byte_count);
+	rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
+			 (struct smb_hdr *) pSMBr, &bytes_returned, 0);
+	if (rc) {
+		cFYI(1, ("Posix delete returned %d", rc));
+	}
+	cifs_buf_release(pSMB);
+
+	cifs_stats_inc(&tcon->num_deletes);
+
+	if (rc == -EAGAIN)
+		goto PsxDelete;
+
+	return rc;
+}
+
+int
 CIFSSMBDelFile(const int xid, struct cifsTconInfo *tcon, const char *fileName,
 	       const struct nls_table *nls_codepage, int remap)
 {
@@ -797,7 +889,7 @@
 
 	if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
 		name_len =
-		    cifsConvertToUCS((__le16 *) pSMB->fileName, fileName, 
+		    cifsConvertToUCS((__le16 *) pSMB->fileName, fileName,
 				     PATH_MAX, nls_codepage, remap);
 		name_len++;	/* trailing null */
 		name_len *= 2;
@@ -816,7 +908,7 @@
 	cifs_stats_inc(&tcon->num_deletes);
 	if (rc) {
 		cFYI(1, ("Error in RMFile = %d", rc));
-	} 
+	}
 
 	cifs_buf_release(pSMB);
 	if (rc == -EAGAIN)
@@ -826,7 +918,7 @@
 }
 
 int
-CIFSSMBRmDir(const int xid, struct cifsTconInfo *tcon, const char *dirName, 
+CIFSSMBRmDir(const int xid, struct cifsTconInfo *tcon, const char *dirName,
 	     const struct nls_table *nls_codepage, int remap)
 {
 	DELETE_DIRECTORY_REQ *pSMB = NULL;
@@ -887,7 +979,7 @@
 		return rc;
 
 	if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
-		name_len = cifsConvertToUCS((__le16 *) pSMB->DirName, name, 
+		name_len = cifsConvertToUCS((__le16 *) pSMB->DirName, name,
 					    PATH_MAX, nls_codepage, remap);
 		name_len++;	/* trailing null */
 		name_len *= 2;
@@ -916,7 +1008,7 @@
 int
 CIFSPOSIXCreate(const int xid, struct cifsTconInfo *tcon, __u32 posix_flags,
 		__u64 mode, __u16 * netfid, FILE_UNIX_BASIC_INFO *pRetData,
-		__u32 *pOplock, const char *name, 
+		__u32 *pOplock, const char *name,
 		const struct nls_table *nls_codepage, int remap)
 {
 	TRANSACTION2_SPI_REQ *pSMB = NULL;
@@ -924,7 +1016,6 @@
 	int name_len;
 	int rc = 0;
 	int bytes_returned = 0;
-	char *data_offset;
 	__u16 params, param_offset, offset, byte_count, count;
 	OPEN_PSX_REQ * pdata;
 	OPEN_PSX_RSP * psx_rsp;
@@ -958,13 +1049,12 @@
 	pSMB->Timeout = 0;
 	pSMB->Reserved2 = 0;
 	param_offset = offsetof(struct smb_com_transaction2_spi_req,
-                                     InformationLevel) - 4;
+				InformationLevel) - 4;
 	offset = param_offset + params;
-	data_offset = (char *) (&pSMB->hdr.Protocol) + offset;
 	pdata = (OPEN_PSX_REQ *)(((char *)&pSMB->hdr.Protocol) + offset);
 	pdata->Level = SMB_QUERY_FILE_UNIX_BASIC;
 	pdata->Permissions = cpu_to_le64(mode);
-	pdata->PosixOpenFlags = cpu_to_le32(posix_flags); 
+	pdata->PosixOpenFlags = cpu_to_le32(posix_flags);
 	pdata->OpenFlags =  cpu_to_le32(*pOplock);
 	pSMB->ParameterOffset = cpu_to_le16(param_offset);
 	pSMB->DataOffset = cpu_to_le16(offset);
@@ -979,7 +1069,7 @@
 	pSMB->TotalParameterCount = pSMB->ParameterCount;
 	pSMB->InformationLevel = cpu_to_le16(SMB_POSIX_OPEN);
 	pSMB->Reserved4 = 0;
-	pSMB->hdr.smb_buf_length += byte_count; 
+	pSMB->hdr.smb_buf_length += byte_count;
 	pSMB->ByteCount = cpu_to_le16(byte_count);
 	rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
 			 (struct smb_hdr *) pSMBr, &bytes_returned, 0);
@@ -988,7 +1078,7 @@
 		goto psx_create_err;
 	}
 
-	cFYI(1,("copying inode info"));
+	cFYI(1, ("copying inode info"));
 	rc = validate_t2((struct smb_t2_rsp *)pSMBr);
 
 	if (rc || (pSMBr->ByteCount < sizeof(OPEN_PSX_RSP))) {
@@ -997,34 +1087,33 @@
 	}
 
 	/* copy return information to pRetData */
-	psx_rsp = (OPEN_PSX_RSP *)((char *) &pSMBr->hdr.Protocol 
+	psx_rsp = (OPEN_PSX_RSP *)((char *) &pSMBr->hdr.Protocol
 			+ le16_to_cpu(pSMBr->t2.DataOffset));
-		
+
 	*pOplock = le16_to_cpu(psx_rsp->OplockFlags);
-	if(netfid)
+	if (netfid)
 		*netfid = psx_rsp->Fid;   /* cifs fid stays in le */
 	/* Let caller know file was created so we can set the mode. */
 	/* Do we care about the CreateAction in any other cases? */
-	if(cpu_to_le32(FILE_CREATE) == psx_rsp->CreateAction)
+	if (cpu_to_le32(FILE_CREATE) == psx_rsp->CreateAction)
 		*pOplock |= CIFS_CREATE_ACTION;
 	/* check to make sure response data is there */
-	if(psx_rsp->ReturnedLevel != SMB_QUERY_FILE_UNIX_BASIC) {
+	if (psx_rsp->ReturnedLevel != SMB_QUERY_FILE_UNIX_BASIC) {
 		pRetData->Type = -1; /* unknown */
 #ifdef CONFIG_CIFS_DEBUG2
-		cFYI(1,("unknown type"));
+		cFYI(1, ("unknown type"));
 #endif
 	} else {
-		if(pSMBr->ByteCount < sizeof(OPEN_PSX_RSP) 
+		if (pSMBr->ByteCount < sizeof(OPEN_PSX_RSP)
 					+ sizeof(FILE_UNIX_BASIC_INFO)) {
-			cERROR(1,("Open response data too small"));
+			cERROR(1, ("Open response data too small"));
 			pRetData->Type = -1;
 			goto psx_create_err;
 		}
-		memcpy((char *) pRetData, 
+		memcpy((char *) pRetData,
 			(char *)psx_rsp + sizeof(OPEN_PSX_RSP),
 			sizeof (FILE_UNIX_BASIC_INFO));
 	}
-			
 
 psx_create_err:
 	cifs_buf_release(pSMB);
@@ -1034,7 +1123,7 @@
 	if (rc == -EAGAIN)
 		goto PsxCreat;
 
-	return rc;	
+	return rc;
 }
 
 static __u16 convert_disposition(int disposition)
@@ -1061,7 +1150,7 @@
 			ofun = SMBOPEN_OCREATE | SMBOPEN_OTRUNC;
 			break;
 		default:
-			cFYI(1,("unknown disposition %d",disposition));
+			cFYI(1, ("unknown disposition %d", disposition));
 			ofun =  SMBOPEN_OAPPEND; /* regular open */
 	}
 	return ofun;
@@ -1071,7 +1160,7 @@
 SMBLegacyOpen(const int xid, struct cifsTconInfo *tcon,
 	    const char *fileName, const int openDisposition,
 	    const int access_flags, const int create_options, __u16 * netfid,
-            int *pOplock, FILE_ALL_INFO * pfile_info,
+	    int *pOplock, FILE_ALL_INFO * pfile_info,
 	    const struct nls_table *nls_codepage, int remap)
 {
 	int rc = -EACCES;
@@ -1113,16 +1202,16 @@
 	   1 = write
 	   2 = rw
 	   3 = execute
-        */
+	 */
 	pSMB->Mode = cpu_to_le16(2);
 	pSMB->Mode |= cpu_to_le16(0x40); /* deny none */
 	/* set file as system file if special file such
 	   as fifo and server expecting SFU style and
 	   no Unix extensions */
 
-        if(create_options & CREATE_OPTION_SPECIAL)
-                pSMB->FileAttributes = cpu_to_le16(ATTR_SYSTEM);
-        else
+	if (create_options & CREATE_OPTION_SPECIAL)
+		pSMB->FileAttributes = cpu_to_le16(ATTR_SYSTEM);
+	else
                 pSMB->FileAttributes = cpu_to_le16(0/*ATTR_NORMAL*/); /* BB FIXME */
 
 	/* if ((omode & S_IWUGO) == 0)
@@ -1132,7 +1221,8 @@
 	    being created */
 
 	/* BB FIXME BB */
-/*	pSMB->CreateOptions = cpu_to_le32(create_options & CREATE_OPTIONS_MASK); */
+/*	pSMB->CreateOptions = cpu_to_le32(create_options &
+						 CREATE_OPTIONS_MASK); */
 	/* BB FIXME END BB */
 
 	pSMB->Sattr = cpu_to_le16(ATTR_HIDDEN | ATTR_SYSTEM | ATTR_DIRECTORY);
@@ -1143,7 +1233,7 @@
 	pSMB->ByteCount = cpu_to_le16(count);
 	/* long_op set to 1 to allow for oplock break timeouts */
 	rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
-		         (struct smb_hdr *) pSMBr, &bytes_returned, 1);
+			 (struct smb_hdr *) pSMBr, &bytes_returned, 1);
 	cifs_stats_inc(&tcon->num_opens);
 	if (rc) {
 		cFYI(1, ("Error in Open = %d", rc));
@@ -1156,17 +1246,17 @@
 		/* Let caller know file was created so we can set the mode. */
 		/* Do we care about the CreateAction in any other cases? */
 	/* BB FIXME BB */
-/*		if(cpu_to_le32(FILE_CREATE) == pSMBr->CreateAction)
+/*		if (cpu_to_le32(FILE_CREATE) == pSMBr->CreateAction)
 			*pOplock |= CIFS_CREATE_ACTION; */
 	/* BB FIXME END */
 
-		if(pfile_info) {
+		if (pfile_info) {
 			pfile_info->CreationTime = 0; /* BB convert CreateTime*/
 			pfile_info->LastAccessTime = 0; /* BB fixme */
 			pfile_info->LastWriteTime = 0; /* BB fixme */
 			pfile_info->ChangeTime = 0;  /* BB fixme */
 			pfile_info->Attributes =
-				cpu_to_le32(le16_to_cpu(pSMBr->FileAttributes)); 
+				cpu_to_le32(le16_to_cpu(pSMBr->FileAttributes));
 			/* the file_info buf is endian converted by caller */
 			pfile_info->AllocationSize =
 				cpu_to_le64(le32_to_cpu(pSMBr->EndOfFile));
@@ -1185,7 +1275,7 @@
 CIFSSMBOpen(const int xid, struct cifsTconInfo *tcon,
 	    const char *fileName, const int openDisposition,
 	    const int access_flags, const int create_options, __u16 * netfid,
-	    int *pOplock, FILE_ALL_INFO * pfile_info, 
+	    int *pOplock, FILE_ALL_INFO * pfile_info,
 	    const struct nls_table *nls_codepage, int remap)
 {
 	int rc = -EACCES;
@@ -1228,7 +1318,7 @@
 	/* set file as system file if special file such
 	   as fifo and server expecting SFU style and
 	   no Unix extensions */
-	if(create_options & CREATE_OPTION_SPECIAL)
+	if (create_options & CREATE_OPTION_SPECIAL)
 		pSMB->FileAttributes = cpu_to_le32(ATTR_SYSTEM);
 	else
 		pSMB->FileAttributes = cpu_to_le32(ATTR_NORMAL);
@@ -1266,10 +1356,10 @@
 		*netfid = pSMBr->Fid;	/* cifs fid stays in le */
 		/* Let caller know file was created so we can set the mode. */
 		/* Do we care about the CreateAction in any other cases? */
-		if(cpu_to_le32(FILE_CREATE) == pSMBr->CreateAction)
-			*pOplock |= CIFS_CREATE_ACTION; 
-		if(pfile_info) {
-		    memcpy((char *)pfile_info,(char *)&pSMBr->CreationTime,
+		if (cpu_to_le32(FILE_CREATE) == pSMBr->CreateAction)
+			*pOplock |= CIFS_CREATE_ACTION;
+		if (pfile_info) {
+		    memcpy((char *)pfile_info, (char *)&pSMBr->CreationTime,
 			36 /* CreationTime to Attributes */);
 		    /* the file_info buf is endian converted by caller */
 		    pfile_info->AllocationSize = pSMBr->AllocationSize;
@@ -1285,10 +1375,9 @@
 }
 
 int
-CIFSSMBRead(const int xid, struct cifsTconInfo *tcon,
-            const int netfid, const unsigned int count,
-            const __u64 lseek, unsigned int *nbytes, char **buf,
-	    int * pbuf_type)
+CIFSSMBRead(const int xid, struct cifsTconInfo *tcon, const int netfid,
+	    const unsigned int count, const __u64 lseek, unsigned int *nbytes,
+	    char **buf, int *pbuf_type)
 {
 	int rc = -EACCES;
 	READ_REQ *pSMB = NULL;
@@ -1298,8 +1387,8 @@
 	int resp_buf_type = 0;
 	struct kvec iov[1];
 
-	cFYI(1,("Reading %d bytes on fid %d",count,netfid));
-	if(tcon->ses->capabilities & CAP_LARGE_FILES)
+	cFYI(1, ("Reading %d bytes on fid %d", count, netfid));
+	if (tcon->ses->capabilities & CAP_LARGE_FILES)
 		wct = 12;
 	else
 		wct = 10; /* old style read */
@@ -1316,28 +1405,28 @@
 	pSMB->AndXCommand = 0xFF;       /* none */
 	pSMB->Fid = netfid;
 	pSMB->OffsetLow = cpu_to_le32(lseek & 0xFFFFFFFF);
-	if(wct == 12)
+	if (wct == 12)
 		pSMB->OffsetHigh = cpu_to_le32(lseek >> 32);
-	else if((lseek >> 32) > 0) /* can not handle this big offset for old */
+	else if ((lseek >> 32) > 0) /* can not handle this big offset for old */
 		return -EIO;
 
 	pSMB->Remaining = 0;
 	pSMB->MaxCount = cpu_to_le16(count & 0xFFFF);
 	pSMB->MaxCountHigh = cpu_to_le32(count >> 16);
-	if(wct == 12)
+	if (wct == 12)
 		pSMB->ByteCount = 0;  /* no need to do le conversion since 0 */
 	else {
 		/* old style read */
-		struct smb_com_readx_req * pSMBW =
+		struct smb_com_readx_req *pSMBW =
 			(struct smb_com_readx_req *)pSMB;
 		pSMBW->ByteCount = 0;
 	}
 
 	iov[0].iov_base = (char *)pSMB;
 	iov[0].iov_len = pSMB->hdr.smb_buf_length + 4;
-	rc = SendReceive2(xid, tcon->ses, iov, 
+	rc = SendReceive2(xid, tcon->ses, iov,
 			  1 /* num iovecs */,
-			  &resp_buf_type, 0); 
+			  &resp_buf_type, 0);
 	cifs_stats_inc(&tcon->num_reads);
 	pSMBr = (READ_RSP *)iov[0].iov_base;
 	if (rc) {
@@ -1351,33 +1440,34 @@
 		/*check that DataLength would not go beyond end of SMB */
 		if ((data_length > CIFSMaxBufSize)
 				|| (data_length > count)) {
-			cFYI(1,("bad length %d for count %d",data_length,count));
+			cFYI(1, ("bad length %d for count %d",
+				 data_length, count));
 			rc = -EIO;
 			*nbytes = 0;
 		} else {
 			pReadData = (char *) (&pSMBr->hdr.Protocol) +
 			    le16_to_cpu(pSMBr->DataOffset);
-/*                      if(rc = copy_to_user(buf, pReadData, data_length)) {
-                                cERROR(1,("Faulting on read rc = %d",rc));
-                                rc = -EFAULT;
+/*                      if (rc = copy_to_user(buf, pReadData, data_length)) {
+				cERROR(1,("Faulting on read rc = %d",rc));
+				rc = -EFAULT;
                         }*/ /* can not use copy_to_user when using page cache*/
-			if(*buf)
-				memcpy(*buf,pReadData,data_length);
+			if (*buf)
+				memcpy(*buf, pReadData, data_length);
 		}
 	}
 
 /*	cifs_small_buf_release(pSMB); */ /* Freed earlier now in SendReceive2 */
-	if(*buf) {
-		if(resp_buf_type == CIFS_SMALL_BUFFER)
+	if (*buf) {
+		if (resp_buf_type == CIFS_SMALL_BUFFER)
 			cifs_small_buf_release(iov[0].iov_base);
-		else if(resp_buf_type == CIFS_LARGE_BUFFER)
+		else if (resp_buf_type == CIFS_LARGE_BUFFER)
 			cifs_buf_release(iov[0].iov_base);
-	} else if(resp_buf_type != CIFS_NO_BUFFER) {
-		/* return buffer to caller to free */ 
-		*buf = iov[0].iov_base;		
-		if(resp_buf_type == CIFS_SMALL_BUFFER)
+	} else if (resp_buf_type != CIFS_NO_BUFFER) {
+		/* return buffer to caller to free */
+		*buf = iov[0].iov_base;
+		if (resp_buf_type == CIFS_SMALL_BUFFER)
 			*pbuf_type = CIFS_SMALL_BUFFER;
-		else if(resp_buf_type == CIFS_LARGE_BUFFER)
+		else if (resp_buf_type == CIFS_LARGE_BUFFER)
 			*pbuf_type = CIFS_LARGE_BUFFER;
 	} /* else no valid buffer on return - leave as null */
 
@@ -1391,7 +1481,7 @@
 CIFSSMBWrite(const int xid, struct cifsTconInfo *tcon,
 	     const int netfid, const unsigned int count,
 	     const __u64 offset, unsigned int *nbytes, const char *buf,
-	     const char __user * ubuf, const int long_op)
+	     const char __user *ubuf, const int long_op)
 {
 	int rc = -EACCES;
 	WRITE_REQ *pSMB = NULL;
@@ -1401,10 +1491,10 @@
 	__u16 byte_count;
 
 	/* cFYI(1,("write at %lld %d bytes",offset,count));*/
-	if(tcon->ses == NULL)
+	if (tcon->ses == NULL)
 		return -ECONNABORTED;
 
-	if(tcon->ses->capabilities & CAP_LARGE_FILES)
+	if (tcon->ses->capabilities & CAP_LARGE_FILES)
 		wct = 14;
 	else
 		wct = 12;
@@ -1420,20 +1510,20 @@
 	pSMB->AndXCommand = 0xFF;	/* none */
 	pSMB->Fid = netfid;
 	pSMB->OffsetLow = cpu_to_le32(offset & 0xFFFFFFFF);
-	if(wct == 14) 
+	if (wct == 14)
 		pSMB->OffsetHigh = cpu_to_le32(offset >> 32);
-	else if((offset >> 32) > 0) /* can not handle this big offset for old */
+	else if ((offset >> 32) > 0) /* can not handle big offset for old srv */
 		return -EIO;
-	
+
 	pSMB->Reserved = 0xFFFFFFFF;
 	pSMB->WriteMode = 0;
 	pSMB->Remaining = 0;
 
-	/* Can increase buffer size if buffer is big enough in some cases - ie we 
+	/* Can increase buffer size if buffer is big enough in some cases ie we
 	can send more if LARGE_WRITE_X capability returned by the server and if
 	our buffer is big enough or if we convert to iovecs on socket writes
 	and eliminate the copy to the CIFS buffer */
-	if(tcon->ses->capabilities & CAP_LARGE_WRITE_X) {
+	if (tcon->ses->capabilities & CAP_LARGE_WRITE_X) {
 		bytes_sent = min_t(const unsigned int, CIFSMaxBufSize, count);
 	} else {
 		bytes_sent = (tcon->ses->server->maxBuf - MAX_CIFS_HDR_SIZE)
@@ -1443,11 +1533,11 @@
 	if (bytes_sent > count)
 		bytes_sent = count;
 	pSMB->DataOffset =
-		cpu_to_le16(offsetof(struct smb_com_write_req,Data) - 4);
-	if(buf)
-	    memcpy(pSMB->Data,buf,bytes_sent);
-	else if(ubuf) {
-		if(copy_from_user(pSMB->Data,ubuf,bytes_sent)) {
+		cpu_to_le16(offsetof(struct smb_com_write_req, Data) - 4);
+	if (buf)
+	    memcpy(pSMB->Data, buf, bytes_sent);
+	else if (ubuf) {
+		if (copy_from_user(pSMB->Data, ubuf, bytes_sent)) {
 			cifs_buf_release(pSMB);
 			return -EFAULT;
 		}
@@ -1456,7 +1546,7 @@
 		cifs_buf_release(pSMB);
 		return -EINVAL;
 	} /* else setting file size with write of zero bytes */
-	if(wct == 14)
+	if (wct == 14)
 		byte_count = bytes_sent + 1; /* pad */
 	else /* wct == 12 */ {
 		byte_count = bytes_sent + 5; /* bigger pad, smaller smb hdr */
@@ -1465,10 +1555,11 @@
 	pSMB->DataLengthHigh = cpu_to_le16(bytes_sent >> 16);
 	pSMB->hdr.smb_buf_length += byte_count;
 
-	if(wct == 14)
+	if (wct == 14)
 		pSMB->ByteCount = cpu_to_le16(byte_count);
-	else { /* old style write has byte count 4 bytes earlier so 4 bytes pad  */
-		struct smb_com_writex_req * pSMBW = 
+	else { /* old style write has byte count 4 bytes earlier
+		  so 4 bytes pad  */
+		struct smb_com_writex_req *pSMBW =
 			(struct smb_com_writex_req *)pSMB;
 		pSMBW->ByteCount = cpu_to_le16(byte_count);
 	}
@@ -1487,7 +1578,7 @@
 
 	cifs_buf_release(pSMB);
 
-	/* Note: On -EAGAIN error only caller can retry on handle based calls 
+	/* Note: On -EAGAIN error only caller can retry on handle based calls
 		since file handle passed in no longer valid */
 
 	return rc;
@@ -1505,9 +1596,9 @@
 	int smb_hdr_len;
 	int resp_buf_type = 0;
 
-	cFYI(1,("write2 at %lld %d bytes", (long long)offset, count));
+	cFYI(1, ("write2 at %lld %d bytes", (long long)offset, count));
 
-	if(tcon->ses->capabilities & CAP_LARGE_FILES)
+	if (tcon->ses->capabilities & CAP_LARGE_FILES)
 		wct = 14;
 	else
 		wct = 12;
@@ -1521,37 +1612,37 @@
 	pSMB->AndXCommand = 0xFF;	/* none */
 	pSMB->Fid = netfid;
 	pSMB->OffsetLow = cpu_to_le32(offset & 0xFFFFFFFF);
-	if(wct == 14)
+	if (wct == 14)
 		pSMB->OffsetHigh = cpu_to_le32(offset >> 32);
-	else if((offset >> 32) > 0) /* can not handle this big offset for old */
+	else if ((offset >> 32) > 0) /* can not handle big offset for old srv */
 		return -EIO;
 	pSMB->Reserved = 0xFFFFFFFF;
 	pSMB->WriteMode = 0;
 	pSMB->Remaining = 0;
 
 	pSMB->DataOffset =
-	    cpu_to_le16(offsetof(struct smb_com_write_req,Data) - 4);
+	    cpu_to_le16(offsetof(struct smb_com_write_req, Data) - 4);
 
 	pSMB->DataLengthLow = cpu_to_le16(count & 0xFFFF);
 	pSMB->DataLengthHigh = cpu_to_le16(count >> 16);
 	smb_hdr_len = pSMB->hdr.smb_buf_length + 1; /* hdr + 1 byte pad */
-	if(wct == 14)
+	if (wct == 14)
 		pSMB->hdr.smb_buf_length += count+1;
 	else /* wct == 12 */
-		pSMB->hdr.smb_buf_length += count+5; /* smb data starts later */ 
-	if(wct == 14)
+		pSMB->hdr.smb_buf_length += count+5; /* smb data starts later */
+	if (wct == 14)
 		pSMB->ByteCount = cpu_to_le16(count + 1);
 	else /* wct == 12 */ /* bigger pad, smaller smb hdr, keep offset ok */ {
-		struct smb_com_writex_req * pSMBW =
+		struct smb_com_writex_req *pSMBW =
 				(struct smb_com_writex_req *)pSMB;
 		pSMBW->ByteCount = cpu_to_le16(count + 5);
 	}
 	iov[0].iov_base = pSMB;
-	if(wct == 14)
+	if (wct == 14)
 		iov[0].iov_len = smb_hdr_len + 4;
 	else /* wct == 12 pad bigger by four bytes */
 		iov[0].iov_len = smb_hdr_len + 8;
-	
+
 
 	rc = SendReceive2(xid, tcon->ses, iov, n_vec + 1, &resp_buf_type,
 			  long_op);
@@ -1559,7 +1650,7 @@
 	if (rc) {
 		cFYI(1, ("Send error Write2 = %d", rc));
 		*nbytes = 0;
-	} else if(resp_buf_type == 0) {
+	} else if (resp_buf_type == 0) {
 		/* presumably this can not happen, but best to be safe */
 		rc = -EIO;
 		*nbytes = 0;
@@ -1568,15 +1659,15 @@
 		*nbytes = le16_to_cpu(pSMBr->CountHigh);
 		*nbytes = (*nbytes) << 16;
 		*nbytes += le16_to_cpu(pSMBr->Count);
-	} 
+	}
 
 /*	cifs_small_buf_release(pSMB); */ /* Freed earlier now in SendReceive2 */
-	if(resp_buf_type == CIFS_SMALL_BUFFER)
+	if (resp_buf_type == CIFS_SMALL_BUFFER)
 		cifs_small_buf_release(iov[0].iov_base);
-	else if(resp_buf_type == CIFS_LARGE_BUFFER)
+	else if (resp_buf_type == CIFS_LARGE_BUFFER)
 		cifs_buf_release(iov[0].iov_base);
 
-	/* Note: On -EAGAIN error only caller can retry on handle based calls 
+	/* Note: On -EAGAIN error only caller can retry on handle based calls
 		since file handle passed in no longer valid */
 
 	return rc;
@@ -1596,7 +1687,7 @@
 	int timeout = 0;
 	__u16 count;
 
-	cFYI(1, ("In CIFSSMBLock - timeout %d numLock %d",waitFlag,numLock));
+	cFYI(1, ("In CIFSSMBLock - timeout %d numLock %d", waitFlag, numLock));
 	rc = small_smb_init(SMB_COM_LOCKING_ANDX, 8, tcon, (void **) &pSMB);
 
 	if (rc)
@@ -1604,7 +1695,7 @@
 
 	pSMBr = (LOCK_RSP *)pSMB; /* BB removeme BB */
 
-	if(lockType == LOCKING_ANDX_OPLOCK_RELEASE) {
+	if (lockType == LOCKING_ANDX_OPLOCK_RELEASE) {
 		timeout = -1; /* no response expected */
 		pSMB->Timeout = 0;
 	} else if (waitFlag == TRUE) {
@@ -1620,7 +1711,7 @@
 	pSMB->AndXCommand = 0xFF;	/* none */
 	pSMB->Fid = smb_file_id; /* netfid stays le */
 
-	if((numLock != 0) || (numUnlock != 0)) {
+	if ((numLock != 0) || (numUnlock != 0)) {
 		pSMB->Locks[0].Pid = cpu_to_le16(current->tgid);
 		/* BB where to store pid high? */
 		pSMB->Locks[0].LengthLow = cpu_to_le32((u32)len);
@@ -1648,7 +1739,7 @@
 	}
 	cifs_small_buf_release(pSMB);
 
-	/* Note: On -EAGAIN error only caller can retry on handle based calls 
+	/* Note: On -EAGAIN error only caller can retry on handle based calls
 	since file handle passed in no longer valid */
 	return rc;
 }
@@ -1656,12 +1747,11 @@
 int
 CIFSSMBPosixLock(const int xid, struct cifsTconInfo *tcon,
 		const __u16 smb_file_id, const int get_flag, const __u64 len,
-		struct file_lock *pLockData, const __u16 lock_type, 
+		struct file_lock *pLockData, const __u16 lock_type,
 		const int waitFlag)
 {
 	struct smb_com_transaction2_sfi_req *pSMB  = NULL;
 	struct smb_com_transaction2_sfi_rsp *pSMBr = NULL;
-	char *data_offset;
 	struct cifs_posix_lock *parm_data;
 	int rc = 0;
 	int timeout = 0;
@@ -1670,7 +1760,7 @@
 
 	cFYI(1, ("Posix Lock"));
 
-	if(pLockData == NULL)
+	if (pLockData == NULL)
 		return EINVAL;
 
 	rc = small_smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB);
@@ -1680,7 +1770,7 @@
 
 	pSMBr = (struct smb_com_transaction2_sfi_rsp *)pSMB;
 
-	params = 6; 
+	params = 6;
 	pSMB->MaxSetupCount = 0;
 	pSMB->Reserved = 0;
 	pSMB->Flags = 0;
@@ -1688,14 +1778,12 @@
 	param_offset = offsetof(struct smb_com_transaction2_sfi_req, Fid) - 4;
 	offset = param_offset + params;
 
-	data_offset = (char *) (&pSMB->hdr.Protocol) + offset;
-
 	count = sizeof(struct cifs_posix_lock);
 	pSMB->MaxParameterCount = cpu_to_le16(2);
 	pSMB->MaxDataCount = cpu_to_le16(1000); /* BB find max SMB PDU from sess */
 	pSMB->SetupCount = 1;
 	pSMB->Reserved3 = 0;
-	if(get_flag)
+	if (get_flag)
 		pSMB->SubCommand = cpu_to_le16(TRANS2_QUERY_FILE_INFORMATION);
 	else
 		pSMB->SubCommand = cpu_to_le16(TRANS2_SET_FILE_INFORMATION);
@@ -1705,11 +1793,11 @@
 	pSMB->TotalDataCount = pSMB->DataCount;
 	pSMB->TotalParameterCount = pSMB->ParameterCount;
 	pSMB->ParameterOffset = cpu_to_le16(param_offset);
-	parm_data = (struct cifs_posix_lock *) 
+	parm_data = (struct cifs_posix_lock *)
 			(((char *) &pSMB->hdr.Protocol) + offset);
 
 	parm_data->lock_type = cpu_to_le16(lock_type);
-	if(waitFlag) {
+	if (waitFlag) {
 		timeout = 3;  /* blocking operation, no timeout */
 		parm_data->lock_flags = cpu_to_le16(1);
 		pSMB->Timeout = cpu_to_le32(-1);
@@ -1746,22 +1834,22 @@
 			rc = -EIO;      /* bad smb */
 			goto plk_err_exit;
 		}
-		if(pLockData == NULL) {
+		if (pLockData == NULL) {
 			rc = -EINVAL;
 			goto plk_err_exit;
 		}
 		data_offset = le16_to_cpu(pSMBr->t2.DataOffset);
 		data_count  = le16_to_cpu(pSMBr->t2.DataCount);
-		if(data_count < sizeof(struct cifs_posix_lock)) {
+		if (data_count < sizeof(struct cifs_posix_lock)) {
 			rc = -EIO;
 			goto plk_err_exit;
 		}
 		parm_data = (struct cifs_posix_lock *)
 			((char *)&pSMBr->hdr.Protocol + data_offset);
-		if(parm_data->lock_type == cpu_to_le16(CIFS_UNLCK))
+		if (parm_data->lock_type == cpu_to_le16(CIFS_UNLCK))
 			pLockData->fl_type = F_UNLCK;
 	}
- 
+
 plk_err_exit:
 	if (pSMB)
 		cifs_small_buf_release(pSMB);
@@ -1784,7 +1872,7 @@
 
 /* do not retry on dead session on close */
 	rc = small_smb_init(SMB_COM_CLOSE, 3, tcon, (void **) &pSMB);
-	if(rc == -EAGAIN)
+	if (rc == -EAGAIN)
 		return 0;
 	if (rc)
 		return rc;
@@ -1798,7 +1886,7 @@
 			 (struct smb_hdr *) pSMBr, &bytes_returned, 0);
 	cifs_stats_inc(&tcon->num_closes);
 	if (rc) {
-		if(rc!=-EINTR) {
+		if (rc != -EINTR) {
 			/* EINTR is expected when user ctl-c to kill app */
 			cERROR(1, ("Send error in Close = %d", rc));
 		}
@@ -1807,7 +1895,7 @@
 	cifs_small_buf_release(pSMB);
 
 	/* Since session is dead, file will be closed on server already */
-	if(rc == -EAGAIN)
+	if (rc == -EAGAIN)
 		rc = 0;
 
 	return rc;
@@ -1839,7 +1927,7 @@
 
 	if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
 		name_len =
-		    cifsConvertToUCS((__le16 *) pSMB->OldFileName, fromName, 
+		    cifsConvertToUCS((__le16 *) pSMB->OldFileName, fromName,
 				     PATH_MAX, nls_codepage, remap);
 		name_len++;	/* trailing null */
 		name_len *= 2;
@@ -1851,7 +1939,7 @@
 				     toName, PATH_MAX, nls_codepage, remap);
 		name_len2 += 1 /* trailing null */  + 1 /* Signature word */ ;
 		name_len2 *= 2;	/* convert to bytes */
-	} else {		/* BB improve the check for buffer overruns BB */
+	} else {	/* BB improve the check for buffer overruns BB */
 		name_len = strnlen(fromName, PATH_MAX);
 		name_len++;	/* trailing null */
 		strncpy(pSMB->OldFileName, fromName, name_len);
@@ -1872,7 +1960,7 @@
 	cifs_stats_inc(&tcon->num_renames);
 	if (rc) {
 		cFYI(1, ("Send error in rename = %d", rc));
-	} 
+	}
 
 	cifs_buf_release(pSMB);
 
@@ -1882,13 +1970,13 @@
 	return rc;
 }
 
-int CIFSSMBRenameOpenFile(const int xid,struct cifsTconInfo *pTcon, 
-		int netfid, char * target_name, 
-		const struct nls_table * nls_codepage, int remap)
+int CIFSSMBRenameOpenFile(const int xid, struct cifsTconInfo *pTcon,
+		int netfid, char *target_name,
+		const struct nls_table *nls_codepage, int remap)
 {
 	struct smb_com_transaction2_sfi_req *pSMB  = NULL;
 	struct smb_com_transaction2_sfi_rsp *pSMBr = NULL;
-	struct set_file_rename * rename_info;
+	struct set_file_rename *rename_info;
 	char *data_offset;
 	char dummy_string[30];
 	int rc = 0;
@@ -1927,13 +2015,14 @@
 	rename_info->overwrite = cpu_to_le32(1);
 	rename_info->root_fid  = 0;
 	/* unicode only call */
-	if(target_name == NULL) {
-		sprintf(dummy_string,"cifs%x",pSMB->hdr.Mid);
-	        len_of_str = cifsConvertToUCS((__le16 *)rename_info->target_name,
+	if (target_name == NULL) {
+		sprintf(dummy_string, "cifs%x", pSMB->hdr.Mid);
+		len_of_str = cifsConvertToUCS((__le16 *)rename_info->target_name,
 					dummy_string, 24, nls_codepage, remap);
 	} else {
 		len_of_str = cifsConvertToUCS((__le16 *)rename_info->target_name,
-					target_name, PATH_MAX, nls_codepage, remap);
+					target_name, PATH_MAX, nls_codepage,
+					remap);
 	}
 	rename_info->target_name_len = cpu_to_le32(2 * len_of_str);
 	count = 12 /* sizeof(struct set_file_rename) */ + (2 * len_of_str) + 2;
@@ -1947,10 +2036,10 @@
 	pSMB->hdr.smb_buf_length += byte_count;
 	pSMB->ByteCount = cpu_to_le16(byte_count);
 	rc = SendReceive(xid, pTcon->ses, (struct smb_hdr *) pSMB,
-                         (struct smb_hdr *) pSMBr, &bytes_returned, 0);
+			 (struct smb_hdr *) pSMBr, &bytes_returned, 0);
 	cifs_stats_inc(&pTcon->num_t2renames);
 	if (rc) {
-		cFYI(1,("Send error in Rename (by file handle) = %d", rc));
+		cFYI(1, ("Send error in Rename (by file handle) = %d", rc));
 	}
 
 	cifs_buf_release(pSMB);
@@ -1962,9 +2051,9 @@
 }
 
 int
-CIFSSMBCopy(const int xid, struct cifsTconInfo *tcon, const char * fromName, 
-            const __u16 target_tid, const char *toName, const int flags,
-            const struct nls_table *nls_codepage, int remap)
+CIFSSMBCopy(const int xid, struct cifsTconInfo *tcon, const char *fromName,
+	    const __u16 target_tid, const char *toName, const int flags,
+	    const struct nls_table *nls_codepage, int remap)
 {
 	int rc = 0;
 	COPY_REQ *pSMB = NULL;
@@ -1986,7 +2075,7 @@
 	pSMB->Flags = cpu_to_le16(flags & COPY_TREE);
 
 	if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
-		name_len = cifsConvertToUCS((__le16 *) pSMB->OldFileName, 
+		name_len = cifsConvertToUCS((__le16 *) pSMB->OldFileName,
 					    fromName, PATH_MAX, nls_codepage,
 					    remap);
 		name_len++;     /* trailing null */
@@ -1994,11 +2083,12 @@
 		pSMB->OldFileName[name_len] = 0x04;     /* pad */
 		/* protocol requires ASCII signature byte on Unicode string */
 		pSMB->OldFileName[name_len + 1] = 0x00;
-		name_len2 = cifsConvertToUCS((__le16 *)&pSMB->OldFileName[name_len + 2], 
+		name_len2 =
+		    cifsConvertToUCS((__le16 *)&pSMB->OldFileName[name_len + 2],
 				toName, PATH_MAX, nls_codepage, remap);
 		name_len2 += 1 /* trailing null */  + 1 /* Signature word */ ;
 		name_len2 *= 2; /* convert to bytes */
-	} else {                /* BB improve the check for buffer overruns BB */
+	} else { 	/* BB improve the check for buffer overruns BB */
 		name_len = strnlen(fromName, PATH_MAX);
 		name_len++;     /* trailing null */
 		strncpy(pSMB->OldFileName, fromName, name_len);
@@ -2058,7 +2148,7 @@
 		name_len++;	/* trailing null */
 		name_len *= 2;
 
-	} else {		/* BB improve the check for buffer overruns BB */
+	} else {	/* BB improve the check for buffer overruns BB */
 		name_len = strnlen(fromName, PATH_MAX);
 		name_len++;	/* trailing null */
 		strncpy(pSMB->FileName, fromName, name_len);
@@ -2070,7 +2160,7 @@
 	pSMB->Timeout = 0;
 	pSMB->Reserved2 = 0;
 	param_offset = offsetof(struct smb_com_transaction2_spi_req,
-                                     InformationLevel) - 4;
+				InformationLevel) - 4;
 	offset = param_offset + params;
 
 	data_offset = (char *) (&pSMB->hdr.Protocol) + offset;
@@ -2081,7 +2171,7 @@
 				  , nls_codepage);
 		name_len_target++;	/* trailing null */
 		name_len_target *= 2;
-	} else {		/* BB improve the check for buffer overruns BB */
+	} else {	/* BB improve the check for buffer overruns BB */
 		name_len_target = strnlen(toName, PATH_MAX);
 		name_len_target++;	/* trailing null */
 		strncpy(data_offset, toName, name_len_target);
@@ -2108,9 +2198,7 @@
 			 (struct smb_hdr *) pSMBr, &bytes_returned, 0);
 	cifs_stats_inc(&tcon->num_symlinks);
 	if (rc) {
-		cFYI(1,
-		     ("Send error in SetPathInfo (create symlink) = %d",
-		      rc));
+		cFYI(1, ("Send error in SetPathInfo create symlink = %d", rc));
 	}
 
 	if (pSMB)
@@ -2149,7 +2237,7 @@
 		name_len++;	/* trailing null */
 		name_len *= 2;
 
-	} else {		/* BB improve the check for buffer overruns BB */
+	} else {	/* BB improve the check for buffer overruns BB */
 		name_len = strnlen(toName, PATH_MAX);
 		name_len++;	/* trailing null */
 		strncpy(pSMB->FileName, toName, name_len);
@@ -2161,7 +2249,7 @@
 	pSMB->Timeout = 0;
 	pSMB->Reserved2 = 0;
 	param_offset = offsetof(struct smb_com_transaction2_spi_req,
-                                     InformationLevel) - 4;
+				InformationLevel) - 4;
 	offset = param_offset + params;
 
 	data_offset = (char *) (&pSMB->hdr.Protocol) + offset;
@@ -2171,7 +2259,7 @@
 				     nls_codepage, remap);
 		name_len_target++;	/* trailing null */
 		name_len_target *= 2;
-	} else {		/* BB improve the check for buffer overruns BB */
+	} else {	/* BB improve the check for buffer overruns BB */
 		name_len_target = strnlen(fromName, PATH_MAX);
 		name_len_target++;	/* trailing null */
 		strncpy(data_offset, fromName, name_len_target);
@@ -2243,13 +2331,13 @@
 		name_len++;	/* trailing null */
 		name_len *= 2;
 		pSMB->OldFileName[name_len] = 0;	/* pad */
-		pSMB->OldFileName[name_len + 1] = 0x04; 
+		pSMB->OldFileName[name_len + 1] = 0x04;
 		name_len2 =
-		    cifsConvertToUCS((__le16 *)&pSMB->OldFileName[name_len + 2], 
+		    cifsConvertToUCS((__le16 *)&pSMB->OldFileName[name_len + 2],
 				     toName, PATH_MAX, nls_codepage, remap);
 		name_len2 += 1 /* trailing null */  + 1 /* Signature word */ ;
 		name_len2 *= 2;	/* convert to bytes */
-	} else {		/* BB improve the check for buffer overruns BB */
+	} else {	/* BB improve the check for buffer overruns BB */
 		name_len = strnlen(fromName, PATH_MAX);
 		name_len++;	/* trailing null */
 		strncpy(pSMB->OldFileName, fromName, name_len);
@@ -2302,12 +2390,11 @@
 
 	if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
 		name_len =
-		    cifs_strtoUCS((__le16 *) pSMB->FileName, searchName, PATH_MAX
-				  /* find define for this maxpathcomponent */
-				  , nls_codepage);
+		    cifs_strtoUCS((__le16 *) pSMB->FileName, searchName,
+				  PATH_MAX, nls_codepage);
 		name_len++;	/* trailing null */
 		name_len *= 2;
-	} else {		/* BB improve the check for buffer overruns BB */
+	} else {	/* BB improve the check for buffer overruns BB */
 		name_len = strnlen(searchName, PATH_MAX);
 		name_len++;	/* trailing null */
 		strncpy(pSMB->FileName, searchName, name_len);
@@ -2324,7 +2411,7 @@
 	pSMB->Timeout = 0;
 	pSMB->Reserved2 = 0;
 	pSMB->ParameterOffset = cpu_to_le16(offsetof(
-        struct smb_com_transaction2_qpi_req ,InformationLevel) - 4);
+	struct smb_com_transaction2_qpi_req, InformationLevel) - 4);
 	pSMB->DataCount = 0;
 	pSMB->DataOffset = 0;
 	pSMB->SetupCount = 1;
@@ -2355,16 +2442,16 @@
 
 			if (pSMBr->hdr.Flags2 & SMBFLG2_UNICODE) {
 				name_len = UniStrnlen((wchar_t *) ((char *)
-					&pSMBr->hdr.Protocol +data_offset),
-					min_t(const int, buflen,count) / 2);
+					&pSMBr->hdr.Protocol + data_offset),
+					min_t(const int, buflen, count) / 2);
 			/* BB FIXME investigate remapping reserved chars here */
 				cifs_strfromUCS_le(symlinkinfo,
-					(__le16 *) ((char *)&pSMBr->hdr.Protocol +
-						data_offset),
+					(__le16 *) ((char *)&pSMBr->hdr.Protocol
+							+ data_offset),
 					name_len, nls_codepage);
 			} else {
 				strncpy(symlinkinfo,
-					(char *) &pSMBr->hdr.Protocol + 
+					(char *) &pSMBr->hdr.Protocol +
 						data_offset,
 					min_t(const int, buflen, count));
 			}
@@ -2385,14 +2472,14 @@
 	Setup words themselves and ByteCount
 	MaxSetupCount (size of returned setup area) and
 	MaxParameterCount (returned parms size) must be set by caller */
-static int 
+static int
 smb_init_ntransact(const __u16 sub_command, const int setup_count,
 		   const int parm_len, struct cifsTconInfo *tcon,
-		   void ** ret_buf)
+		   void **ret_buf)
 {
 	int rc;
 	__u32 temp_offset;
-	struct smb_com_ntransact_req * pSMB;
+	struct smb_com_ntransact_req *pSMB;
 
 	rc = small_smb_init(SMB_COM_NT_TRANSACT, 19 + setup_count, tcon,
 				(void **)&pSMB);
@@ -2416,47 +2503,47 @@
 }
 
 static int
-validate_ntransact(char * buf, char ** ppparm, char ** ppdata,
-		   int * pdatalen, int * pparmlen)
+validate_ntransact(char *buf, char **ppparm, char **ppdata,
+		   int *pdatalen, int *pparmlen)
 {
-	char * end_of_smb;
+	char *end_of_smb;
 	__u32 data_count, data_offset, parm_count, parm_offset;
-	struct smb_com_ntransact_rsp * pSMBr;
+	struct smb_com_ntransact_rsp *pSMBr;
 
-	if(buf == NULL)
+	if (buf == NULL)
 		return -EINVAL;
 
 	pSMBr = (struct smb_com_ntransact_rsp *)buf;
 
 	/* ByteCount was converted from little endian in SendReceive */
-	end_of_smb = 2 /* sizeof byte count */ + pSMBr->ByteCount + 
+	end_of_smb = 2 /* sizeof byte count */ + pSMBr->ByteCount +
 			(char *)&pSMBr->ByteCount;
 
-		
 	data_offset = le32_to_cpu(pSMBr->DataOffset);
 	data_count = le32_to_cpu(pSMBr->DataCount);
-        parm_offset = le32_to_cpu(pSMBr->ParameterOffset);
+	parm_offset = le32_to_cpu(pSMBr->ParameterOffset);
 	parm_count = le32_to_cpu(pSMBr->ParameterCount);
 
 	*ppparm = (char *)&pSMBr->hdr.Protocol + parm_offset;
 	*ppdata = (char *)&pSMBr->hdr.Protocol + data_offset;
 
 	/* should we also check that parm and data areas do not overlap? */
-	if(*ppparm > end_of_smb) {
-		cFYI(1,("parms start after end of smb"));
+	if (*ppparm > end_of_smb) {
+		cFYI(1, ("parms start after end of smb"));
 		return -EINVAL;
-	} else if(parm_count + *ppparm > end_of_smb) {
-		cFYI(1,("parm end after end of smb"));
+	} else if (parm_count + *ppparm > end_of_smb) {
+		cFYI(1, ("parm end after end of smb"));
 		return -EINVAL;
-	} else if(*ppdata > end_of_smb) {
-		cFYI(1,("data starts after end of smb"));
+	} else if (*ppdata > end_of_smb) {
+		cFYI(1, ("data starts after end of smb"));
 		return -EINVAL;
-	} else if(data_count + *ppdata > end_of_smb) {
+	} else if (data_count + *ppdata > end_of_smb) {
 		cFYI(1,("data %p + count %d (%p) ends after end of smb %p start %p",
-			*ppdata, data_count, (data_count + *ppdata), end_of_smb, pSMBr));  /* BB FIXME */
+			*ppdata, data_count, (data_count + *ppdata),
+			end_of_smb, pSMBr));
 		return -EINVAL;
-	} else if(parm_count + data_count > pSMBr->ByteCount) {
-		cFYI(1,("parm count and data count larger than SMB"));
+	} else if (parm_count + data_count > pSMBr->ByteCount) {
+		cFYI(1, ("parm count and data count larger than SMB"));
 		return -EINVAL;
 	}
 	return 0;
@@ -2465,14 +2552,14 @@
 int
 CIFSSMBQueryReparseLinkInfo(const int xid, struct cifsTconInfo *tcon,
 			const unsigned char *searchName,
-			char *symlinkinfo, const int buflen,__u16 fid,
+			char *symlinkinfo, const int buflen, __u16 fid,
 			const struct nls_table *nls_codepage)
 {
 	int rc = 0;
 	int bytes_returned;
 	int name_len;
-	struct smb_com_transaction_ioctl_req * pSMB;
-	struct smb_com_transaction_ioctl_rsp * pSMBr;
+	struct smb_com_transaction_ioctl_req *pSMB;
+	struct smb_com_transaction_ioctl_rsp *pSMBr;
 
 	cFYI(1, ("In Windows reparse style QueryLink for path %s", searchName));
 	rc = smb_init(SMB_COM_NT_TRANSACT, 23, tcon, (void **) &pSMB,
@@ -2511,47 +2598,53 @@
 		/* BB also check enough total bytes returned */
 			rc = -EIO;	/* bad smb */
 		else {
-			if(data_count && (data_count < 2048)) {
-				char * end_of_smb = 2 /* sizeof byte count */ +
+			if (data_count && (data_count < 2048)) {
+				char *end_of_smb = 2 /* sizeof byte count */ +
 						pSMBr->ByteCount +
 						(char *)&pSMBr->ByteCount;
 
-				struct reparse_data * reparse_buf = (struct reparse_data *)
-					((char *)&pSMBr->hdr.Protocol + data_offset);
-				if((char*)reparse_buf >= end_of_smb) {
+				struct reparse_data *reparse_buf =
+						(struct reparse_data *)
+						((char *)&pSMBr->hdr.Protocol
+								 + data_offset);
+				if ((char *)reparse_buf >= end_of_smb) {
 					rc = -EIO;
 					goto qreparse_out;
 				}
-				if((reparse_buf->LinkNamesBuf + 
+				if ((reparse_buf->LinkNamesBuf +
 					reparse_buf->TargetNameOffset +
 					reparse_buf->TargetNameLen) >
 						end_of_smb) {
-					cFYI(1,("reparse buf extended beyond SMB"));
+					cFYI(1,("reparse buf goes beyond SMB"));
 					rc = -EIO;
 					goto qreparse_out;
 				}
-				
+
 				if (pSMBr->hdr.Flags2 & SMBFLG2_UNICODE) {
 					name_len = UniStrnlen((wchar_t *)
-							(reparse_buf->LinkNamesBuf + 
-							reparse_buf->TargetNameOffset),
-							min(buflen/2, reparse_buf->TargetNameLen / 2)); 
+						(reparse_buf->LinkNamesBuf +
+						reparse_buf->TargetNameOffset),
+						min(buflen/2,
+						reparse_buf->TargetNameLen / 2));
 					cifs_strfromUCS_le(symlinkinfo,
-						(__le16 *) (reparse_buf->LinkNamesBuf + 
+						(__le16 *) (reparse_buf->LinkNamesBuf +
 						reparse_buf->TargetNameOffset),
 						name_len, nls_codepage);
 				} else { /* ASCII names */
-					strncpy(symlinkinfo,reparse_buf->LinkNamesBuf + 
-						reparse_buf->TargetNameOffset, 
-						min_t(const int, buflen, reparse_buf->TargetNameLen));
+					strncpy(symlinkinfo,
+						reparse_buf->LinkNamesBuf +
+						reparse_buf->TargetNameOffset,
+						min_t(const int, buflen,
+						   reparse_buf->TargetNameLen));
 				}
 			} else {
 				rc = -EIO;
-				cFYI(1,("Invalid return data count on get reparse info ioctl"));
+				cFYI(1, ("Invalid return data count on "
+					 "get reparse info ioctl"));
 			}
 			symlinkinfo[buflen] = 0; /* just in case so the caller
 					does not go off the end of the buffer */
-			cFYI(1,("readlink result - %s",symlinkinfo));
+			cFYI(1, ("readlink result - %s", symlinkinfo));
 		}
 	}
 qreparse_out:
@@ -2566,7 +2659,8 @@
 #ifdef CONFIG_CIFS_POSIX
 
 /*Convert an Access Control Entry from wire format to local POSIX xattr format*/
-static void cifs_convert_ace(posix_acl_xattr_entry * ace, struct cifs_posix_ace * cifs_ace)
+static void cifs_convert_ace(posix_acl_xattr_entry *ace,
+			     struct cifs_posix_ace *cifs_ace)
 {
 	/* u8 cifs fields do not need le conversion */
 	ace->e_perm = cpu_to_le16(cifs_ace->cifs_e_perm);
@@ -2578,30 +2672,31 @@
 }
 
 /* Convert ACL from CIFS POSIX wire format to local Linux POSIX ACL xattr */
-static int cifs_copy_posix_acl(char * trgt,char * src, const int buflen,
-				const int acl_type,const int size_of_data_area)
+static int cifs_copy_posix_acl(char *trgt, char *src, const int buflen,
+			       const int acl_type, const int size_of_data_area)
 {
 	int size =  0;
 	int i;
 	__u16 count;
-	struct cifs_posix_ace * pACE;
-	struct cifs_posix_acl * cifs_acl = (struct cifs_posix_acl *)src;
-	posix_acl_xattr_header * local_acl = (posix_acl_xattr_header *)trgt;
+	struct cifs_posix_ace *pACE;
+	struct cifs_posix_acl *cifs_acl = (struct cifs_posix_acl *)src;
+	posix_acl_xattr_header *local_acl = (posix_acl_xattr_header *)trgt;
 
 	if (le16_to_cpu(cifs_acl->version) != CIFS_ACL_VERSION)
 		return -EOPNOTSUPP;
 
-	if(acl_type & ACL_TYPE_ACCESS) {
+	if (acl_type & ACL_TYPE_ACCESS) {
 		count = le16_to_cpu(cifs_acl->access_entry_count);
 		pACE = &cifs_acl->ace_array[0];
 		size = sizeof(struct cifs_posix_acl);
 		size += sizeof(struct cifs_posix_ace) * count;
 		/* check if we would go beyond end of SMB */
-		if(size_of_data_area < size) {
-			cFYI(1,("bad CIFS POSIX ACL size %d vs. %d",size_of_data_area,size));
+		if (size_of_data_area < size) {
+			cFYI(1, ("bad CIFS POSIX ACL size %d vs. %d",
+				size_of_data_area, size));
 			return -EINVAL;
 		}
-	} else if(acl_type & ACL_TYPE_DEFAULT) {
+	} else if (acl_type & ACL_TYPE_DEFAULT) {
 		count = le16_to_cpu(cifs_acl->access_entry_count);
 		size = sizeof(struct cifs_posix_acl);
 		size += sizeof(struct cifs_posix_ace) * count;
@@ -2610,7 +2705,7 @@
 		count = le16_to_cpu(cifs_acl->default_entry_count);
 		size += sizeof(struct cifs_posix_ace) * count;
 		/* check if we would go beyond end of SMB */
-		if(size_of_data_area < size)
+		if (size_of_data_area < size)
 			return -EINVAL;
 	} else {
 		/* illegal type */
@@ -2618,76 +2713,77 @@
 	}
 
 	size = posix_acl_xattr_size(count);
-	if((buflen == 0) || (local_acl == NULL)) {
-		/* used to query ACL EA size */				
-	} else if(size > buflen) {
+	if ((buflen == 0) || (local_acl == NULL)) {
+		/* used to query ACL EA size */
+	} else if (size > buflen) {
 		return -ERANGE;
 	} else /* buffer big enough */ {
 		local_acl->a_version = cpu_to_le32(POSIX_ACL_XATTR_VERSION);
-		for(i = 0;i < count ;i++) {
-			cifs_convert_ace(&local_acl->a_entries[i],pACE);
-			pACE ++;
+		for (i = 0; i < count ; i++) {
+			cifs_convert_ace(&local_acl->a_entries[i], pACE);
+			pACE++;
 		}
 	}
 	return size;
 }
 
-static __u16 convert_ace_to_cifs_ace(struct cifs_posix_ace * cifs_ace,
-			const posix_acl_xattr_entry * local_ace)
+static __u16 convert_ace_to_cifs_ace(struct cifs_posix_ace *cifs_ace,
+				     const posix_acl_xattr_entry *local_ace)
 {
 	__u16 rc = 0; /* 0 = ACL converted ok */
 
 	cifs_ace->cifs_e_perm = le16_to_cpu(local_ace->e_perm);
 	cifs_ace->cifs_e_tag =  le16_to_cpu(local_ace->e_tag);
 	/* BB is there a better way to handle the large uid? */
-	if(local_ace->e_id == cpu_to_le32(-1)) {
+	if (local_ace->e_id == cpu_to_le32(-1)) {
 	/* Probably no need to le convert -1 on any arch but can not hurt */
 		cifs_ace->cifs_uid = cpu_to_le64(-1);
-	} else 
+	} else
 		cifs_ace->cifs_uid = cpu_to_le64(le32_to_cpu(local_ace->e_id));
-        /*cFYI(1,("perm %d tag %d id %d",ace->e_perm,ace->e_tag,ace->e_id));*/
+	/*cFYI(1,("perm %d tag %d id %d",ace->e_perm,ace->e_tag,ace->e_id));*/
 	return rc;
 }
 
 /* Convert ACL from local Linux POSIX xattr to CIFS POSIX ACL wire format */
-static __u16 ACL_to_cifs_posix(char * parm_data,const char * pACL,const int buflen,
-		const int acl_type)
+static __u16 ACL_to_cifs_posix(char *parm_data, const char *pACL,
+			       const int buflen, const int acl_type)
 {
 	__u16 rc = 0;
-        struct cifs_posix_acl * cifs_acl = (struct cifs_posix_acl *)parm_data;
-        posix_acl_xattr_header * local_acl = (posix_acl_xattr_header *)pACL;
+	struct cifs_posix_acl *cifs_acl = (struct cifs_posix_acl *)parm_data;
+	posix_acl_xattr_header *local_acl = (posix_acl_xattr_header *)pACL;
 	int count;
 	int i;
 
-	if((buflen == 0) || (pACL == NULL) || (cifs_acl == NULL))
+	if ((buflen == 0) || (pACL == NULL) || (cifs_acl == NULL))
 		return 0;
 
 	count = posix_acl_xattr_count((size_t)buflen);
-	cFYI(1,("setting acl with %d entries from buf of length %d and version of %d",
+	cFYI(1, ("setting acl with %d entries from buf of length %d and "
+		"version of %d",
 		count, buflen, le32_to_cpu(local_acl->a_version)));
-	if(le32_to_cpu(local_acl->a_version) != 2) {
-		cFYI(1,("unknown POSIX ACL version %d",
+	if (le32_to_cpu(local_acl->a_version) != 2) {
+		cFYI(1, ("unknown POSIX ACL version %d",
 		     le32_to_cpu(local_acl->a_version)));
 		return 0;
 	}
 	cifs_acl->version = cpu_to_le16(1);
-	if(acl_type == ACL_TYPE_ACCESS) 
+	if (acl_type == ACL_TYPE_ACCESS)
 		cifs_acl->access_entry_count = cpu_to_le16(count);
-	else if(acl_type == ACL_TYPE_DEFAULT)
+	else if (acl_type == ACL_TYPE_DEFAULT)
 		cifs_acl->default_entry_count = cpu_to_le16(count);
 	else {
-		cFYI(1,("unknown ACL type %d",acl_type));
+		cFYI(1, ("unknown ACL type %d", acl_type));
 		return 0;
 	}
-	for(i=0;i<count;i++) {
+	for (i = 0; i < count; i++) {
 		rc = convert_ace_to_cifs_ace(&cifs_acl->ace_array[i],
 					&local_acl->a_entries[i]);
-		if(rc != 0) {
+		if (rc != 0) {
 			/* ACE not converted */
 			break;
 		}
 	}
-	if(rc == 0) {
+	if (rc == 0) {
 		rc = (__u16)(count * sizeof(struct cifs_posix_ace));
 		rc += sizeof(struct cifs_posix_acl);
 		/* BB add check to make sure ACL does not overflow SMB */
@@ -2697,9 +2793,9 @@
 
 int
 CIFSSMBGetPosixACL(const int xid, struct cifsTconInfo *tcon,
-                        const unsigned char *searchName,
-                        char *acl_inf, const int buflen, const int acl_type,
-                        const struct nls_table *nls_codepage, int remap)
+		   const unsigned char *searchName,
+		   char *acl_inf, const int buflen, const int acl_type,
+		   const struct nls_table *nls_codepage, int remap)
 {
 /* SMB_QUERY_POSIX_ACL */
 	TRANSACTION2_QPI_REQ *pSMB = NULL;
@@ -2708,7 +2804,7 @@
 	int bytes_returned;
 	int name_len;
 	__u16 params, byte_count;
-                                                                                                                                             
+
 	cFYI(1, ("In GetPosixACL (Unix) for path %s", searchName));
 
 queryAclRetry:
@@ -2716,16 +2812,16 @@
 		(void **) &pSMBr);
 	if (rc)
 		return rc;
-                                                                                                                                             
+
 	if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
 		name_len =
-			cifsConvertToUCS((__le16 *) pSMB->FileName, searchName, 
+			cifsConvertToUCS((__le16 *) pSMB->FileName, searchName,
 					 PATH_MAX, nls_codepage, remap);
 		name_len++;     /* trailing null */
 		name_len *= 2;
 		pSMB->FileName[name_len] = 0;
 		pSMB->FileName[name_len+1] = 0;
-	} else {                /* BB improve the check for buffer overruns BB */
+	} else {	/* BB improve the check for buffer overruns BB */
 		name_len = strnlen(searchName, PATH_MAX);
 		name_len++;     /* trailing null */
 		strncpy(pSMB->FileName, searchName, name_len);
@@ -2734,7 +2830,7 @@
 	params = 2 /* level */  + 4 /* rsrvd */  + name_len /* incl null */ ;
 	pSMB->TotalDataCount = 0;
 	pSMB->MaxParameterCount = cpu_to_le16(2);
-        /* BB find exact max data count below from sess structure BB */
+	/* BB find exact max data count below from sess structure BB */
 	pSMB->MaxDataCount = cpu_to_le16(4000);
 	pSMB->MaxSetupCount = 0;
 	pSMB->Reserved = 0;
@@ -2742,7 +2838,8 @@
 	pSMB->Timeout = 0;
 	pSMB->Reserved2 = 0;
 	pSMB->ParameterOffset = cpu_to_le16(
-		offsetof(struct smb_com_transaction2_qpi_req ,InformationLevel) - 4);
+		offsetof(struct smb_com_transaction2_qpi_req,
+			 InformationLevel) - 4);
 	pSMB->DataCount = 0;
 	pSMB->DataOffset = 0;
 	pSMB->SetupCount = 1;
@@ -2763,7 +2860,7 @@
 		cFYI(1, ("Send error in Query POSIX ACL = %d", rc));
 	} else {
 		/* decode response */
- 
+
 		rc = validate_t2((struct smb_t2_rsp *)pSMBr);
 		if (rc || (pSMBr->ByteCount < 2))
 		/* BB also check enough total bytes returned */
@@ -2773,7 +2870,7 @@
 			__u16 count = le16_to_cpu(pSMBr->t2.DataCount);
 			rc = cifs_copy_posix_acl(acl_inf,
 				(char *)&pSMBr->hdr.Protocol+data_offset,
-				buflen,acl_type,count);
+				buflen, acl_type, count);
 		}
 	}
 	cifs_buf_release(pSMB);
@@ -2784,10 +2881,10 @@
 
 int
 CIFSSMBSetPosixACL(const int xid, struct cifsTconInfo *tcon,
-                        const unsigned char *fileName,
-                        const char *local_acl, const int buflen, 
-			const int acl_type,
-                        const struct nls_table *nls_codepage, int remap)
+		   const unsigned char *fileName,
+		   const char *local_acl, const int buflen,
+		   const int acl_type,
+		   const struct nls_table *nls_codepage, int remap)
 {
 	struct smb_com_transaction2_spi_req *pSMB = NULL;
 	struct smb_com_transaction2_spi_rsp *pSMBr = NULL;
@@ -2800,16 +2897,16 @@
 	cFYI(1, ("In SetPosixACL (Unix) for path %s", fileName));
 setAclRetry:
 	rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB,
-                      (void **) &pSMBr);
+		      (void **) &pSMBr);
 	if (rc)
 		return rc;
 	if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
 		name_len =
-			cifsConvertToUCS((__le16 *) pSMB->FileName, fileName, 
+			cifsConvertToUCS((__le16 *) pSMB->FileName, fileName,
 				      PATH_MAX, nls_codepage, remap);
 		name_len++;     /* trailing null */
 		name_len *= 2;
-	} else {                /* BB improve the check for buffer overruns BB */
+	} else {	/* BB improve the check for buffer overruns BB */
 		name_len = strnlen(fileName, PATH_MAX);
 		name_len++;     /* trailing null */
 		strncpy(pSMB->FileName, fileName, name_len);
@@ -2823,15 +2920,15 @@
 	pSMB->Timeout = 0;
 	pSMB->Reserved2 = 0;
 	param_offset = offsetof(struct smb_com_transaction2_spi_req,
-                                     InformationLevel) - 4;
+				InformationLevel) - 4;
 	offset = param_offset + params;
 	parm_data = ((char *) &pSMB->hdr.Protocol) + offset;
 	pSMB->ParameterOffset = cpu_to_le16(param_offset);
 
 	/* convert to on the wire format for POSIX ACL */
-	data_count = ACL_to_cifs_posix(parm_data,local_acl,buflen,acl_type);
+	data_count = ACL_to_cifs_posix(parm_data, local_acl, buflen, acl_type);
 
-	if(data_count == 0) {
+	if (data_count == 0) {
 		rc = -EOPNOTSUPP;
 		goto setACLerrorExit;
 	}
@@ -2849,7 +2946,7 @@
 	pSMB->hdr.smb_buf_length += byte_count;
 	pSMB->ByteCount = cpu_to_le16(byte_count);
 	rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
-                         (struct smb_hdr *) pSMBr, &bytes_returned, 0);
+			 (struct smb_hdr *) pSMBr, &bytes_returned, 0);
 	if (rc) {
 		cFYI(1, ("Set POSIX ACL returned %d", rc));
 	}
@@ -2864,86 +2961,85 @@
 /* BB fix tabs in this function FIXME BB */
 int
 CIFSGetExtAttr(const int xid, struct cifsTconInfo *tcon,
-                const int netfid, __u64 * pExtAttrBits, __u64 *pMask)
+	       const int netfid, __u64 * pExtAttrBits, __u64 *pMask)
 {
-        int rc = 0;
-        struct smb_t2_qfi_req *pSMB = NULL;
-        struct smb_t2_qfi_rsp *pSMBr = NULL;
-        int bytes_returned;
-        __u16 params, byte_count;
+	int rc = 0;
+	struct smb_t2_qfi_req *pSMB = NULL;
+	struct smb_t2_qfi_rsp *pSMBr = NULL;
+	int bytes_returned;
+	__u16 params, byte_count;
 
-        cFYI(1,("In GetExtAttr"));
-        if(tcon == NULL)
-                return -ENODEV;
+	cFYI(1, ("In GetExtAttr"));
+	if (tcon == NULL)
+		return -ENODEV;
 
 GetExtAttrRetry:
-        rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB,
-                      (void **) &pSMBr);
-        if (rc)
-                return rc;
+	rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB,
+			(void **) &pSMBr);
+	if (rc)
+		return rc;
 
-        params = 2 /* level */ +2 /* fid */;
-        pSMB->t2.TotalDataCount = 0;
-        pSMB->t2.MaxParameterCount = cpu_to_le16(4);
-        /* BB find exact max data count below from sess structure BB */
-        pSMB->t2.MaxDataCount = cpu_to_le16(4000);
-        pSMB->t2.MaxSetupCount = 0;
-        pSMB->t2.Reserved = 0;
-        pSMB->t2.Flags = 0;
-        pSMB->t2.Timeout = 0;
-        pSMB->t2.Reserved2 = 0;
-        pSMB->t2.ParameterOffset = cpu_to_le16(offsetof(struct smb_t2_qfi_req,
-			Fid) - 4);
-        pSMB->t2.DataCount = 0;
-        pSMB->t2.DataOffset = 0;
-        pSMB->t2.SetupCount = 1;
-        pSMB->t2.Reserved3 = 0;
-        pSMB->t2.SubCommand = cpu_to_le16(TRANS2_QUERY_FILE_INFORMATION);
-        byte_count = params + 1 /* pad */ ;
-        pSMB->t2.TotalParameterCount = cpu_to_le16(params);
-        pSMB->t2.ParameterCount = pSMB->t2.TotalParameterCount;
-        pSMB->InformationLevel = cpu_to_le16(SMB_QUERY_ATTR_FLAGS);
-        pSMB->Pad = 0;
+	params = 2 /* level */ +2 /* fid */;
+	pSMB->t2.TotalDataCount = 0;
+	pSMB->t2.MaxParameterCount = cpu_to_le16(4);
+	/* BB find exact max data count below from sess structure BB */
+	pSMB->t2.MaxDataCount = cpu_to_le16(4000);
+	pSMB->t2.MaxSetupCount = 0;
+	pSMB->t2.Reserved = 0;
+	pSMB->t2.Flags = 0;
+	pSMB->t2.Timeout = 0;
+	pSMB->t2.Reserved2 = 0;
+	pSMB->t2.ParameterOffset = cpu_to_le16(offsetof(struct smb_t2_qfi_req,
+					       Fid) - 4);
+	pSMB->t2.DataCount = 0;
+	pSMB->t2.DataOffset = 0;
+	pSMB->t2.SetupCount = 1;
+	pSMB->t2.Reserved3 = 0;
+	pSMB->t2.SubCommand = cpu_to_le16(TRANS2_QUERY_FILE_INFORMATION);
+	byte_count = params + 1 /* pad */ ;
+	pSMB->t2.TotalParameterCount = cpu_to_le16(params);
+	pSMB->t2.ParameterCount = pSMB->t2.TotalParameterCount;
+	pSMB->InformationLevel = cpu_to_le16(SMB_QUERY_ATTR_FLAGS);
+	pSMB->Pad = 0;
 	pSMB->Fid = netfid;
-        pSMB->hdr.smb_buf_length += byte_count;
-        pSMB->t2.ByteCount = cpu_to_le16(byte_count);
+	pSMB->hdr.smb_buf_length += byte_count;
+	pSMB->t2.ByteCount = cpu_to_le16(byte_count);
 
-        rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
-                (struct smb_hdr *) pSMBr, &bytes_returned, 0);
-        if (rc) {
-                cFYI(1, ("error %d in GetExtAttr", rc));
-        } else {
-                /* decode response */
-                rc = validate_t2((struct smb_t2_rsp *)pSMBr);
-                if (rc || (pSMBr->ByteCount < 2))
-                /* BB also check enough total bytes returned */
-                        /* If rc should we check for EOPNOSUPP and
-                        disable the srvino flag? or in caller? */
-                        rc = -EIO;      /* bad smb */
-                else {
-                        __u16 data_offset = le16_to_cpu(pSMBr->t2.DataOffset);
-                        __u16 count = le16_to_cpu(pSMBr->t2.DataCount);
-                        struct file_chattr_info * pfinfo;
-                        /* BB Do we need a cast or hash here ? */
-                        if(count != 16) {
-                                cFYI(1, ("Illegal size ret in GetExtAttr"));
-                                rc = -EIO;
-                                goto GetExtAttrOut;
-                        }
-                        pfinfo = (struct file_chattr_info *)
-                                (data_offset + (char *) &pSMBr->hdr.Protocol);
-                        *pExtAttrBits = le64_to_cpu(pfinfo->mode);
+	rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
+			 (struct smb_hdr *) pSMBr, &bytes_returned, 0);
+	if (rc) {
+		cFYI(1, ("error %d in GetExtAttr", rc));
+	} else {
+		/* decode response */
+		rc = validate_t2((struct smb_t2_rsp *)pSMBr);
+		if (rc || (pSMBr->ByteCount < 2))
+		/* BB also check enough total bytes returned */
+			/* If rc should we check for EOPNOSUPP and
+			   disable the srvino flag? or in caller? */
+			rc = -EIO;      /* bad smb */
+		else {
+			__u16 data_offset = le16_to_cpu(pSMBr->t2.DataOffset);
+			__u16 count = le16_to_cpu(pSMBr->t2.DataCount);
+			struct file_chattr_info *pfinfo;
+			/* BB Do we need a cast or hash here ? */
+			if (count != 16) {
+				cFYI(1, ("Illegal size ret in GetExtAttr"));
+				rc = -EIO;
+				goto GetExtAttrOut;
+			}
+			pfinfo = (struct file_chattr_info *)
+				 (data_offset + (char *) &pSMBr->hdr.Protocol);
+			*pExtAttrBits = le64_to_cpu(pfinfo->mode);
 			*pMask = le64_to_cpu(pfinfo->mask);
-                }
-        }
+		}
+	}
 GetExtAttrOut:
-        cifs_buf_release(pSMB);
-        if (rc == -EAGAIN)
-                goto GetExtAttrRetry;
-        return rc;
+	cifs_buf_release(pSMB);
+	if (rc == -EAGAIN)
+		goto GetExtAttrRetry;
+	return rc;
 }
 
-
 #endif /* CONFIG_POSIX */
 
 
@@ -2955,7 +3051,7 @@
 		{1, 2 , {0, 0, 0, 0, 0, 5}, {32, 545, 0, 0}};
 
 /* Convert CIFS ACL to POSIX form */
-static int parse_sec_desc(struct cifs_sid * psec_desc, int acl_len)
+static int parse_sec_desc(struct cifs_sid *psec_desc, int acl_len)
 {
 	return 0;
 }
@@ -2963,7 +3059,7 @@
 /* Get Security Descriptor (by handle) from remote server for a file or dir */
 int
 CIFSSMBGetCIFSACL(const int xid, struct cifsTconInfo *tcon, __u16 fid,
-         /*  BB fix up return info */ char *acl_inf, const int buflen, 
+		/* BB fix up return info */ char *acl_inf, const int buflen,
 		  const int acl_type /* ACCESS/DEFAULT not sure implication */)
 {
 	int rc = 0;
@@ -2973,7 +3069,7 @@
 
 	cFYI(1, ("GetCifsACL"));
 
-	rc = smb_init_ntransact(NT_TRANSACT_QUERY_SECURITY_DESC, 0, 
+	rc = smb_init_ntransact(NT_TRANSACT_QUERY_SECURITY_DESC, 0,
 			8 /* parm len */, tcon, (void **) &pSMB);
 	if (rc)
 		return rc;
@@ -2994,23 +3090,23 @@
 	if (rc) {
 		cFYI(1, ("Send error in QuerySecDesc = %d", rc));
 	} else {                /* decode response */
-		struct cifs_sid * psec_desc;
+		struct cifs_sid *psec_desc;
 		__le32 * parm;
 		int parm_len;
 		int data_len;
 		int acl_len;
-		struct smb_com_ntransact_rsp * pSMBr;
+		struct smb_com_ntransact_rsp *pSMBr;
 
 /* validate_nttransact */
-		rc = validate_ntransact(iov[0].iov_base, (char **)&parm, 
+		rc = validate_ntransact(iov[0].iov_base, (char **)&parm,
 					(char **)&psec_desc,
 					&parm_len, &data_len);
-		
-		if(rc)
+		if (rc)
 			goto qsec_out;
 		pSMBr = (struct smb_com_ntransact_rsp *)iov[0].iov_base;
 
-		cERROR(1,("smb %p parm %p data %p",pSMBr,parm,psec_desc));  /* BB removeme BB */
+		cERROR(1, ("smb %p parm %p data %p",
+			  pSMBr, parm, psec_desc));  /* BB removeme BB */
 
 		if (le32_to_cpu(pSMBr->ParameterCount) != 4) {
 			rc = -EIO;      /* bad smb */
@@ -3020,14 +3116,14 @@
 /* BB check that data area is minimum length and as big as acl_len */
 
 		acl_len = le32_to_cpu(*(__le32 *)parm);
-		/* BB check if(acl_len > bufsize) */
+		/* BB check if (acl_len > bufsize) */
 
 		parse_sec_desc(psec_desc, acl_len);
 	}
 qsec_out:
-	if(buf_type == CIFS_SMALL_BUFFER)
+	if (buf_type == CIFS_SMALL_BUFFER)
 		cifs_small_buf_release(iov[0].iov_base);
-	else if(buf_type == CIFS_LARGE_BUFFER)
+	else if (buf_type == CIFS_LARGE_BUFFER)
 		cifs_buf_release(iov[0].iov_base);
 /*	cifs_small_buf_release(pSMB); */ /* Freed earlier now in SendReceive2 */
 	return rc;
@@ -3036,9 +3132,9 @@
 /* Legacy Query Path Information call for lookup to old servers such
    as Win9x/WinME */
 int SMBQueryInformation(const int xid, struct cifsTconInfo *tcon,
-                 const unsigned char *searchName,
-                 FILE_ALL_INFO * pFinfo,
-                 const struct nls_table *nls_codepage, int remap)
+			const unsigned char *searchName,
+			FILE_ALL_INFO *pFinfo,
+			const struct nls_table *nls_codepage, int remap)
 {
 	QUERY_INFORMATION_REQ * pSMB;
 	QUERY_INFORMATION_RSP * pSMBr;
@@ -3046,31 +3142,31 @@
 	int bytes_returned;
 	int name_len;
 
-	cFYI(1, ("In SMBQPath path %s", searchName)); 
+	cFYI(1, ("In SMBQPath path %s", searchName));
 QInfRetry:
 	rc = smb_init(SMB_COM_QUERY_INFORMATION, 0, tcon, (void **) &pSMB,
-                      (void **) &pSMBr);
+		      (void **) &pSMBr);
 	if (rc)
 		return rc;
 
 	if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
 		name_len =
-                    cifsConvertToUCS((__le16 *) pSMB->FileName, searchName,
-                                     PATH_MAX, nls_codepage, remap);
+			cifsConvertToUCS((__le16 *) pSMB->FileName, searchName,
+					PATH_MAX, nls_codepage, remap);
 		name_len++;     /* trailing null */
 		name_len *= 2;
-	} else {               
+	} else {
 		name_len = strnlen(searchName, PATH_MAX);
 		name_len++;     /* trailing null */
 		strncpy(pSMB->FileName, searchName, name_len);
 	}
 	pSMB->BufferFormat = 0x04;
-	name_len++; /* account for buffer type byte */	
+	name_len++; /* account for buffer type byte */
 	pSMB->hdr.smb_buf_length += (__u16) name_len;
 	pSMB->ByteCount = cpu_to_le16(name_len);
 
 	rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
-                         (struct smb_hdr *) pSMBr, &bytes_returned, 0);
+			 (struct smb_hdr *) pSMBr, &bytes_returned, 0);
 	if (rc) {
 		cFYI(1, ("Send error in QueryInfo = %d", rc));
 	} else if (pFinfo) {            /* decode response */
@@ -3127,17 +3223,17 @@
 
 	if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
 		name_len =
-		    cifsConvertToUCS((__le16 *) pSMB->FileName, searchName, 
+		    cifsConvertToUCS((__le16 *) pSMB->FileName, searchName,
 				     PATH_MAX, nls_codepage, remap);
 		name_len++;	/* trailing null */
 		name_len *= 2;
-	} else {		/* BB improve the check for buffer overruns BB */
+	} else {	/* BB improve the check for buffer overruns BB */
 		name_len = strnlen(searchName, PATH_MAX);
 		name_len++;	/* trailing null */
 		strncpy(pSMB->FileName, searchName, name_len);
 	}
 
-	params = 2 /* level */ + 4 /* reserved */ + name_len /* includes NUL */ ;
+	params = 2 /* level */ + 4 /* reserved */ + name_len /* includes NUL */;
 	pSMB->TotalDataCount = 0;
 	pSMB->MaxParameterCount = cpu_to_le16(2);
 	pSMB->MaxDataCount = cpu_to_le16(4000);	/* BB find exact max SMB PDU from sess structure BB */
@@ -3147,7 +3243,7 @@
 	pSMB->Timeout = 0;
 	pSMB->Reserved2 = 0;
 	pSMB->ParameterOffset = cpu_to_le16(offsetof(
-        struct smb_com_transaction2_qpi_req ,InformationLevel) - 4);
+	struct smb_com_transaction2_qpi_req, InformationLevel) - 4);
 	pSMB->DataCount = 0;
 	pSMB->DataOffset = 0;
 	pSMB->SetupCount = 1;
@@ -3156,7 +3252,7 @@
 	byte_count = params + 1 /* pad */ ;
 	pSMB->TotalParameterCount = cpu_to_le16(params);
 	pSMB->ParameterCount = pSMB->TotalParameterCount;
-	if(legacy)
+	if (legacy)
 		pSMB->InformationLevel = cpu_to_le16(SMB_INFO_STANDARD);
 	else
 		pSMB->InformationLevel = cpu_to_le16(SMB_QUERY_FILE_ALL_INFO);
@@ -3173,16 +3269,18 @@
 
 		if (rc) /* BB add auto retry on EOPNOTSUPP? */
 			rc = -EIO;
-		else if (!legacy && (pSMBr->ByteCount < 40)) 
+		else if (!legacy && (pSMBr->ByteCount < 40))
 			rc = -EIO;	/* bad smb */
-		else if(legacy && (pSMBr->ByteCount < 24))
-			rc = -EIO;  /* 24 or 26 expected but we do not read last field */
-		else if (pFindData){
+		else if (legacy && (pSMBr->ByteCount < 24))
+			rc = -EIO;  /* 24 or 26 expected but we do not read
+					last field */
+		else if (pFindData) {
 			int size;
 			__u16 data_offset = le16_to_cpu(pSMBr->t2.DataOffset);
-			if(legacy) /* we do not read the last field, EAsize, fortunately
-					   since it varies by subdialect and on Set vs. Get, is  
-					   two bytes or 4 bytes depending but we don't care here */
+			if (legacy) /* we do not read the last field, EAsize,
+				       fortunately since it varies by subdialect
+				       and on Set vs. Get, is two bytes or 4
+				       bytes depending but we don't care here */
 				size = sizeof(FILE_INFO_STANDARD);
 			else
 				size = sizeof(FILE_ALL_INFO);
@@ -3226,24 +3324,24 @@
 				  PATH_MAX, nls_codepage, remap);
 		name_len++;	/* trailing null */
 		name_len *= 2;
-	} else {		/* BB improve the check for buffer overruns BB */
+	} else {	/* BB improve the check for buffer overruns BB */
 		name_len = strnlen(searchName, PATH_MAX);
 		name_len++;	/* trailing null */
 		strncpy(pSMB->FileName, searchName, name_len);
 	}
 
-	params = 2 /* level */ + 4 /* reserved */ + name_len /* includes NUL */ ;
+	params = 2 /* level */ + 4 /* reserved */ + name_len /* includes NUL */;
 	pSMB->TotalDataCount = 0;
 	pSMB->MaxParameterCount = cpu_to_le16(2);
 	/* BB find exact max SMB PDU from sess structure BB */
-	pSMB->MaxDataCount = cpu_to_le16(4000); 
+	pSMB->MaxDataCount = cpu_to_le16(4000);
 	pSMB->MaxSetupCount = 0;
 	pSMB->Reserved = 0;
 	pSMB->Flags = 0;
 	pSMB->Timeout = 0;
 	pSMB->Reserved2 = 0;
 	pSMB->ParameterOffset = cpu_to_le16(offsetof(
-        struct smb_com_transaction2_qpi_req ,InformationLevel) - 4);
+	struct smb_com_transaction2_qpi_req, InformationLevel) - 4);
 	pSMB->DataCount = 0;
 	pSMB->DataOffset = 0;
 	pSMB->SetupCount = 1;
@@ -3303,12 +3401,11 @@
 
 	if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
 		name_len =
-		    cifsConvertToUCS((__le16 *) pSMB->FileName, searchName, PATH_MAX
-				  /* find define for this maxpathcomponent */
-				  , nls_codepage);
+		    cifsConvertToUCS((__le16 *) pSMB->FileName, searchName,
+				     PATH_MAX, nls_codepage);
 		name_len++;	/* trailing null */
 		name_len *= 2;
-	} else {		/* BB improve the check for buffer overruns BB */
+	} else {	/* BB improve the check for buffer overruns BB */
 		name_len = strnlen(searchName, PATH_MAX);
 		name_len++;	/* trailing null */
 		strncpy(pSMB->FileName, searchName, name_len);
@@ -3324,7 +3421,7 @@
 	pSMB->Timeout = 0;
 	pSMB->Reserved2 = 0;
 	pSMB->ParameterOffset = cpu_to_le16(
-         offsetof(struct smb_com_transaction2_ffirst_req,InformationLevel) - 4);
+	 offsetof(struct smb_com_transaction2_ffirst_req, InformationLevel)-4);
 	pSMB->DataCount = 0;
 	pSMB->DataOffset = 0;
 	pSMB->SetupCount = 1;	/* one byte, no need to le convert */
@@ -3364,10 +3461,10 @@
 /* xid, tcon, searchName and codepage are input parms, rest are returned */
 int
 CIFSFindFirst(const int xid, struct cifsTconInfo *tcon,
-	      const char *searchName, 
+	      const char *searchName,
 	      const struct nls_table *nls_codepage,
-	      __u16 *	pnetfid,
-	      struct cifs_search_info * psrch_inf, int remap, const char dirsep)
+	      __u16 *pnetfid,
+	      struct cifs_search_info *psrch_inf, int remap, const char dirsep)
 {
 /* level 257 SMB_ */
 	TRANSACTION2_FFIRST_REQ *pSMB = NULL;
@@ -3378,7 +3475,7 @@
 	int name_len;
 	__u16 params, byte_count;
 
-	cFYI(1, ("In FindFirst for %s",searchName));
+	cFYI(1, ("In FindFirst for %s", searchName));
 
 findFirstRetry:
 	rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB,
@@ -3388,7 +3485,7 @@
 
 	if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
 		name_len =
-		    cifsConvertToUCS((__le16 *) pSMB->FileName,searchName,
+		    cifsConvertToUCS((__le16 *) pSMB->FileName, searchName,
 				 PATH_MAX, nls_codepage, remap);
 		/* We can not add the asterik earlier in case
 		it got remapped to 0xF03A as if it were part of the
@@ -3405,7 +3502,7 @@
 	} else {	/* BB add check for overrun of SMB buf BB */
 		name_len = strnlen(searchName, PATH_MAX);
 /* BB fix here and in unicode clause above ie
-		if(name_len > buffersize-header)
+		if (name_len > buffersize-header)
 			free buffer exit; BB */
 		strncpy(pSMB->FileName, searchName, name_len);
 		pSMB->FileName[name_len] = dirsep;
@@ -3438,8 +3535,8 @@
 	pSMB->SearchAttributes =
 	    cpu_to_le16(ATTR_READONLY | ATTR_HIDDEN | ATTR_SYSTEM |
 			ATTR_DIRECTORY);
-	pSMB->SearchCount= cpu_to_le16(CIFSMaxBufSize/sizeof(FILE_UNIX_INFO));
-	pSMB->SearchFlags = cpu_to_le16(CIFS_SEARCH_CLOSE_AT_END | 
+	pSMB->SearchCount = cpu_to_le16(CIFSMaxBufSize/sizeof(FILE_UNIX_INFO));
+	pSMB->SearchFlags = cpu_to_le16(CIFS_SEARCH_CLOSE_AT_END |
 		CIFS_SEARCH_RETURN_RESUME);
 	pSMB->InformationLevel = cpu_to_le16(psrch_inf->info_level);
 
@@ -3466,7 +3563,7 @@
 	} else { /* decode response */
 		/* BB remember to free buffer if error BB */
 		rc = validate_t2((struct smb_t2_rsp *)pSMBr);
-		if(rc == 0) {
+		if (rc == 0) {
 			if (pSMBr->hdr.Flags2 & SMBFLG2_UNICODE)
 				psrch_inf->unicode = TRUE;
 			else
@@ -3474,18 +3571,19 @@
 
 			psrch_inf->ntwrk_buf_start = (char *)pSMBr;
 			psrch_inf->smallBuf = 0;
-			psrch_inf->srch_entries_start = 
-				(char *) &pSMBr->hdr.Protocol + 
+			psrch_inf->srch_entries_start =
+				(char *) &pSMBr->hdr.Protocol +
 					le16_to_cpu(pSMBr->t2.DataOffset);
 			parms = (T2_FFIRST_RSP_PARMS *)((char *) &pSMBr->hdr.Protocol +
 			       le16_to_cpu(pSMBr->t2.ParameterOffset));
 
-			if(parms->EndofSearch)
+			if (parms->EndofSearch)
 				psrch_inf->endOfSearch = TRUE;
 			else
 				psrch_inf->endOfSearch = FALSE;
 
-			psrch_inf->entries_in_buffer  = le16_to_cpu(parms->SearchCount);
+			psrch_inf->entries_in_buffer =
+					le16_to_cpu(parms->SearchCount);
 			psrch_inf->index_of_last_entry = 2 /* skip . and .. */ +
 				psrch_inf->entries_in_buffer;
 			*pnetfid = parms->SearchHandle;
@@ -3498,7 +3596,7 @@
 }
 
 int CIFSFindNext(const int xid, struct cifsTconInfo *tcon,
-            __u16 searchHandle, struct cifs_search_info * psrch_inf)
+		 __u16 searchHandle, struct cifs_search_info *psrch_inf)
 {
 	TRANSACTION2_FNEXT_REQ *pSMB = NULL;
 	TRANSACTION2_FNEXT_RSP *pSMBr = NULL;
@@ -3510,7 +3608,7 @@
 
 	cFYI(1, ("In FindNext"));
 
-	if(psrch_inf->endOfSearch == TRUE)
+	if (psrch_inf->endOfSearch == TRUE)
 		return -ENOENT;
 
 	rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB,
@@ -3518,12 +3616,13 @@
 	if (rc)
 		return rc;
 
-	params = 14;    /* includes 2 bytes of null string, converted to LE below */
+	params = 14; /* includes 2 bytes of null string, converted to LE below*/
 	byte_count = 0;
 	pSMB->TotalDataCount = 0;       /* no EAs */
 	pSMB->MaxParameterCount = cpu_to_le16(8);
 	pSMB->MaxDataCount =
-            cpu_to_le16((tcon->ses->server->maxBuf - MAX_CIFS_HDR_SIZE) & 0xFFFFFF00);
+		cpu_to_le16((tcon->ses->server->maxBuf - MAX_CIFS_HDR_SIZE) &
+				0xFFFFFF00);
 	pSMB->MaxSetupCount = 0;
 	pSMB->Reserved = 0;
 	pSMB->Flags = 0;
@@ -3539,15 +3638,6 @@
 	pSMB->SearchHandle = searchHandle;      /* always kept as le */
 	pSMB->SearchCount =
 		cpu_to_le16(CIFSMaxBufSize / sizeof (FILE_UNIX_INFO));
-	/* test for Unix extensions */
-/*	if (tcon->ses->capabilities & CAP_UNIX) {
-		pSMB->InformationLevel = cpu_to_le16(SMB_FIND_FILE_UNIX);
-		psrch_inf->info_level = SMB_FIND_FILE_UNIX;
-	} else {
-		pSMB->InformationLevel =
-		   cpu_to_le16(SMB_FIND_FILE_DIRECTORY_INFO);
-		psrch_inf->info_level = SMB_FIND_FILE_DIRECTORY_INFO;
-	} */
 	pSMB->InformationLevel = cpu_to_le16(psrch_inf->info_level);
 	pSMB->ResumeKey = psrch_inf->resume_key;
 	pSMB->SearchFlags =
@@ -3555,7 +3645,7 @@
 
 	name_len = psrch_inf->resume_name_len;
 	params += name_len;
-	if(name_len < PATH_MAX) {
+	if (name_len < PATH_MAX) {
 		memcpy(pSMB->ResumeFileName, psrch_inf->presume_name, name_len);
 		byte_count += name_len;
 		/* 14 byte parm len above enough for 2 byte null terminator */
@@ -3570,20 +3660,20 @@
 	pSMB->ParameterCount = pSMB->TotalParameterCount;
 	pSMB->hdr.smb_buf_length += byte_count;
 	pSMB->ByteCount = cpu_to_le16(byte_count);
-                                                                                              
+
 	rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
 			(struct smb_hdr *) pSMBr, &bytes_returned, 0);
 	cifs_stats_inc(&tcon->num_fnext);
 	if (rc) {
 		if (rc == -EBADF) {
 			psrch_inf->endOfSearch = TRUE;
-			rc = 0; /* search probably was closed at end of search above */
+			rc = 0; /* search probably was closed at end of search*/
 		} else
 			cFYI(1, ("FindNext returned = %d", rc));
 	} else {                /* decode response */
 		rc = validate_t2((struct smb_t2_rsp *)pSMBr);
-		
-		if(rc == 0) {
+
+		if (rc == 0) {
 			/* BB fixme add lock for file (srch_info) struct here */
 			if (pSMBr->hdr.Flags2 & SMBFLG2_UNICODE)
 				psrch_inf->unicode = TRUE;
@@ -3594,7 +3684,7 @@
 			parms = (T2_FNEXT_RSP_PARMS *)response_data;
 			response_data = (char *)&pSMBr->hdr.Protocol +
 				le16_to_cpu(pSMBr->t2.DataOffset);
-			if(psrch_inf->smallBuf)
+			if (psrch_inf->smallBuf)
 				cifs_small_buf_release(
 					psrch_inf->ntwrk_buf_start);
 			else
@@ -3602,15 +3692,16 @@
 			psrch_inf->srch_entries_start = response_data;
 			psrch_inf->ntwrk_buf_start = (char *)pSMB;
 			psrch_inf->smallBuf = 0;
-			if(parms->EndofSearch)
+			if (parms->EndofSearch)
 				psrch_inf->endOfSearch = TRUE;
 			else
 				psrch_inf->endOfSearch = FALSE;
-                                                                                              
-			psrch_inf->entries_in_buffer  = le16_to_cpu(parms->SearchCount);
+			psrch_inf->entries_in_buffer =
+						le16_to_cpu(parms->SearchCount);
 			psrch_inf->index_of_last_entry +=
 				psrch_inf->entries_in_buffer;
-/*  cFYI(1,("fnxt2 entries in buf %d index_of_last %d",psrch_inf->entries_in_buffer,psrch_inf->index_of_last_entry)); */
+/*  cFYI(1,("fnxt2 entries in buf %d index_of_last %d",
+	    psrch_inf->entries_in_buffer, psrch_inf->index_of_last_entry)); */
 
 			/* BB fixme add unlock here */
 		}
@@ -3625,12 +3716,12 @@
 FNext2_err_exit:
 	if (rc != 0)
 		cifs_buf_release(pSMB);
-                                                                                              
 	return rc;
 }
 
 int
-CIFSFindClose(const int xid, struct cifsTconInfo *tcon, const __u16 searchHandle)
+CIFSFindClose(const int xid, struct cifsTconInfo *tcon,
+	      const __u16 searchHandle)
 {
 	int rc = 0;
 	FINDCLOSE_REQ *pSMB = NULL;
@@ -3642,7 +3733,7 @@
 
 	/* no sense returning error if session restarted
 		as file handle has been closed */
-	if(rc == -EAGAIN)
+	if (rc == -EAGAIN)
 		return 0;
 	if (rc)
 		return rc;
@@ -3667,9 +3758,9 @@
 
 int
 CIFSGetSrvInodeNumber(const int xid, struct cifsTconInfo *tcon,
-                const unsigned char *searchName,
-                __u64 * inode_number,
-                const struct nls_table *nls_codepage, int remap)
+		      const unsigned char *searchName,
+		      __u64 * inode_number,
+		      const struct nls_table *nls_codepage, int remap)
 {
 	int rc = 0;
 	TRANSACTION2_QPI_REQ *pSMB = NULL;
@@ -3677,24 +3768,23 @@
 	int name_len, bytes_returned;
 	__u16 params, byte_count;
 
-	cFYI(1,("In GetSrvInodeNum for %s",searchName));
-	if(tcon == NULL)
-		return -ENODEV; 
+	cFYI(1, ("In GetSrvInodeNum for %s", searchName));
+	if (tcon == NULL)
+		return -ENODEV;
 
 GetInodeNumberRetry:
 	rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB,
-                      (void **) &pSMBr);
+		      (void **) &pSMBr);
 	if (rc)
 		return rc;
 
-
 	if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
 		name_len =
 			cifsConvertToUCS((__le16 *) pSMB->FileName, searchName,
-				PATH_MAX,nls_codepage, remap);
+					 PATH_MAX, nls_codepage, remap);
 		name_len++;     /* trailing null */
 		name_len *= 2;
-	} else {                /* BB improve the check for buffer overruns BB */
+	} else {	/* BB improve the check for buffer overruns BB */
 		name_len = strnlen(searchName, PATH_MAX);
 		name_len++;     /* trailing null */
 		strncpy(pSMB->FileName, searchName, name_len);
@@ -3711,7 +3801,7 @@
 	pSMB->Timeout = 0;
 	pSMB->Reserved2 = 0;
 	pSMB->ParameterOffset = cpu_to_le16(offsetof(
-		struct smb_com_transaction2_qpi_req ,InformationLevel) - 4);
+		struct smb_com_transaction2_qpi_req, InformationLevel) - 4);
 	pSMB->DataCount = 0;
 	pSMB->DataOffset = 0;
 	pSMB->SetupCount = 1;
@@ -3737,12 +3827,12 @@
 			/* If rc should we check for EOPNOSUPP and
 			disable the srvino flag? or in caller? */
 			rc = -EIO;      /* bad smb */
-                else {
+		else {
 			__u16 data_offset = le16_to_cpu(pSMBr->t2.DataOffset);
 			__u16 count = le16_to_cpu(pSMBr->t2.DataCount);
-			struct file_internal_info * pfinfo;
+			struct file_internal_info *pfinfo;
 			/* BB Do we need a cast or hash here ? */
-			if(count < 8) {
+			if (count < 8) {
 				cFYI(1, ("Illegal size ret in QryIntrnlInf"));
 				rc = -EIO;
 				goto GetInodeNumOut;
@@ -3769,12 +3859,12 @@
 /* TRANS2_GET_DFS_REFERRAL */
 	TRANSACTION2_GET_DFS_REFER_REQ *pSMB = NULL;
 	TRANSACTION2_GET_DFS_REFER_RSP *pSMBr = NULL;
-	struct dfs_referral_level_3 * referrals = NULL;
+	struct dfs_referral_level_3 *referrals = NULL;
 	int rc = 0;
 	int bytes_returned;
 	int name_len;
 	unsigned int i;
-	char * temp;
+	char *temp;
 	__u16 params, byte_count;
 	*number_of_UNC_in_array = 0;
 	*targetUNCs = NULL;
@@ -3787,8 +3877,8 @@
 		      (void **) &pSMBr);
 	if (rc)
 		return rc;
-	
-	/* server pointer checked in called function, 
+
+	/* server pointer checked in called function,
 	but should never be null here anyway */
 	pSMB->hdr.Mid = GetNextMid(ses->server);
 	pSMB->hdr.Tid = ses->ipc_tid;
@@ -3807,19 +3897,19 @@
 				     searchName, PATH_MAX, nls_codepage, remap);
 		name_len++;	/* trailing null */
 		name_len *= 2;
-	} else {		/* BB improve the check for buffer overruns BB */
+	} else {	/* BB improve the check for buffer overruns BB */
 		name_len = strnlen(searchName, PATH_MAX);
 		name_len++;	/* trailing null */
 		strncpy(pSMB->RequestFileName, searchName, name_len);
 	}
 
-	if(ses->server) {
-		if(ses->server->secMode &
+	if (ses->server) {
+		if (ses->server->secMode &
 		   (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
 			pSMB->hdr.Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
 	}
 
-        pSMB->hdr.Uid = ses->Suid;
+	pSMB->hdr.Uid = ses->Suid;
 
 	params = 2 /* level */  + name_len /*includes null */ ;
 	pSMB->TotalDataCount = 0;
@@ -3833,7 +3923,7 @@
 	pSMB->Timeout = 0;
 	pSMB->Reserved2 = 0;
 	pSMB->ParameterOffset = cpu_to_le16(offsetof(
-        struct smb_com_transaction2_get_dfs_refer_req, MaxReferralLevel) - 4);
+	  struct smb_com_transaction2_get_dfs_refer_req, MaxReferralLevel) - 4);
 	pSMB->SetupCount = 1;
 	pSMB->Reserved3 = 0;
 	pSMB->SubCommand = cpu_to_le16(TRANS2_GET_DFS_REFERRAL);
@@ -3852,74 +3942,87 @@
 /* BB Add logic to parse referrals here */
 		rc = validate_t2((struct smb_t2_rsp *)pSMBr);
 
-		if (rc || (pSMBr->ByteCount < 17))      /* BB also check enough total bytes returned */
+		/* BB Also check if enough total bytes returned? */
+		if (rc || (pSMBr->ByteCount < 17))
 			rc = -EIO;      /* bad smb */
 		else {
-			__u16 data_offset = le16_to_cpu(pSMBr->t2.DataOffset); 
+			__u16 data_offset = le16_to_cpu(pSMBr->t2.DataOffset);
 			__u16 data_count = le16_to_cpu(pSMBr->t2.DataCount);
 
 			cFYI(1,
-			     ("Decoding GetDFSRefer response.  BCC: %d  Offset %d",
+			    ("Decoding GetDFSRefer response BCC: %d  Offset %d",
 			      pSMBr->ByteCount, data_offset));
-			referrals = 
-			    (struct dfs_referral_level_3 *) 
+			referrals =
+			    (struct dfs_referral_level_3 *)
 					(8 /* sizeof start of data block */ +
 					data_offset +
-					(char *) &pSMBr->hdr.Protocol); 
-			cFYI(1,("num_referrals: %d dfs flags: 0x%x ... \nfor referral one refer size: 0x%x srv type: 0x%x refer flags: 0x%x ttl: 0x%x",
-				le16_to_cpu(pSMBr->NumberOfReferrals),le16_to_cpu(pSMBr->DFSFlags), le16_to_cpu(referrals->ReferralSize),le16_to_cpu(referrals->ServerType),le16_to_cpu(referrals->ReferralFlags),le16_to_cpu(referrals->TimeToLive)));
+					(char *) &pSMBr->hdr.Protocol);
+			cFYI(1, ("num_referrals: %d dfs flags: 0x%x ... \n"
+				"for referral one refer size: 0x%x srv "
+				"type: 0x%x refer flags: 0x%x ttl: 0x%x",
+				le16_to_cpu(pSMBr->NumberOfReferrals),
+				le16_to_cpu(pSMBr->DFSFlags),
+				le16_to_cpu(referrals->ReferralSize),
+				le16_to_cpu(referrals->ServerType),
+				le16_to_cpu(referrals->ReferralFlags),
+				le16_to_cpu(referrals->TimeToLive)));
 			/* BB This field is actually two bytes in from start of
 			   data block so we could do safety check that DataBlock
 			   begins at address of pSMBr->NumberOfReferrals */
-			*number_of_UNC_in_array = le16_to_cpu(pSMBr->NumberOfReferrals);
+			*number_of_UNC_in_array =
+					le16_to_cpu(pSMBr->NumberOfReferrals);
 
 			/* BB Fix below so can return more than one referral */
-			if(*number_of_UNC_in_array > 1)
+			if (*number_of_UNC_in_array > 1)
 				*number_of_UNC_in_array = 1;
 
 			/* get the length of the strings describing refs */
 			name_len = 0;
-			for(i=0;i<*number_of_UNC_in_array;i++) {
+			for (i = 0; i < *number_of_UNC_in_array; i++) {
 				/* make sure that DfsPathOffset not past end */
-				__u16 offset = le16_to_cpu(referrals->DfsPathOffset);
+				__u16 offset =
+					le16_to_cpu(referrals->DfsPathOffset);
 				if (offset > data_count) {
-					/* if invalid referral, stop here and do 
+					/* if invalid referral, stop here and do
 					not try to copy any more */
 					*number_of_UNC_in_array = i;
 					break;
-				} 
+				}
 				temp = ((char *)referrals) + offset;
 
 				if (pSMBr->hdr.Flags2 & SMBFLG2_UNICODE) {
-					name_len += UniStrnlen((wchar_t *)temp,data_count);
+					name_len += UniStrnlen((wchar_t *)temp,
+								data_count);
 				} else {
-					name_len += strnlen(temp,data_count);
+					name_len += strnlen(temp, data_count);
 				}
 				referrals++;
-				/* BB add check that referral pointer does not fall off end PDU */
-				
+				/* BB add check that referral pointer does
+				   not fall off end PDU */
 			}
 			/* BB add check for name_len bigger than bcc */
-			*targetUNCs = 
-				kmalloc(name_len+1+ (*number_of_UNC_in_array),GFP_KERNEL);
-			if(*targetUNCs == NULL) {
+			*targetUNCs =
+				kmalloc(name_len+1+(*number_of_UNC_in_array),
+					GFP_KERNEL);
+			if (*targetUNCs == NULL) {
 				rc = -ENOMEM;
 				goto GetDFSRefExit;
 			}
 			/* copy the ref strings */
-			referrals =  
-			    (struct dfs_referral_level_3 *) 
-					(8 /* sizeof data hdr */ +
-					data_offset + 
+			referrals = (struct dfs_referral_level_3 *)
+					(8 /* sizeof data hdr */ + data_offset +
 					(char *) &pSMBr->hdr.Protocol);
 
-			for(i=0;i<*number_of_UNC_in_array;i++) {
-				temp = ((char *)referrals) + le16_to_cpu(referrals->DfsPathOffset);
+			for (i = 0; i < *number_of_UNC_in_array; i++) {
+				temp = ((char *)referrals) +
+					  le16_to_cpu(referrals->DfsPathOffset);
 				if (pSMBr->hdr.Flags2 & SMBFLG2_UNICODE) {
 					cifs_strfromUCS_le(*targetUNCs,
-						(__le16 *) temp, name_len, nls_codepage);
+							  (__le16 *) temp,
+							  name_len,
+							  nls_codepage);
 				} else {
-					strncpy(*targetUNCs,temp,name_len);
+					strncpy(*targetUNCs, temp, name_len);
 				}
 				/*  BB update target_uncs pointers */
 				referrals++;
@@ -3996,18 +4099,17 @@
 			rc = -EIO;      /* bad smb */
 		else {
 			__u16 data_offset = le16_to_cpu(pSMBr->t2.DataOffset);
-			cFYI(1,("qfsinf resp BCC: %d  Offset %d",
+			cFYI(1, ("qfsinf resp BCC: %d  Offset %d",
 				 pSMBr->ByteCount, data_offset));
 
-			response_data =
-				(FILE_SYSTEM_ALLOC_INFO *) 
+			response_data = (FILE_SYSTEM_ALLOC_INFO *)
 				(((char *) &pSMBr->hdr.Protocol) + data_offset);
 			FSData->f_bsize =
 				le16_to_cpu(response_data->BytesPerSector) *
 				le32_to_cpu(response_data->
 					SectorsPerAllocationUnit);
 			FSData->f_blocks =
-				le32_to_cpu(response_data->TotalAllocationUnits);
+			       le32_to_cpu(response_data->TotalAllocationUnits);
 			FSData->f_bfree = FSData->f_bavail =
 				le32_to_cpu(response_data->FreeAllocationUnits);
 			cFYI(1,
@@ -4056,7 +4158,7 @@
 	pSMB->TotalParameterCount = cpu_to_le16(params);
 	pSMB->ParameterCount = pSMB->TotalParameterCount;
 	pSMB->ParameterOffset = cpu_to_le16(offsetof(
-        struct smb_com_transaction2_qfsi_req, InformationLevel) - 4);
+		struct smb_com_transaction2_qfsi_req, InformationLevel) - 4);
 	pSMB->DataCount = 0;
 	pSMB->DataOffset = 0;
 	pSMB->SetupCount = 1;
@@ -4071,7 +4173,7 @@
 	if (rc) {
 		cFYI(1, ("Send error in QFSInfo = %d", rc));
 	} else {		/* decode response */
-                rc = validate_t2((struct smb_t2_rsp *)pSMBr);
+		rc = validate_t2((struct smb_t2_rsp *)pSMBr);
 
 		if (rc || (pSMBr->ByteCount < 24))
 			rc = -EIO;	/* bad smb */
@@ -4136,7 +4238,7 @@
 	pSMB->TotalParameterCount = cpu_to_le16(params);
 	pSMB->ParameterCount = pSMB->TotalParameterCount;
 	pSMB->ParameterOffset = cpu_to_le16(offsetof(
-        struct smb_com_transaction2_qfsi_req, InformationLevel) - 4);
+		struct smb_com_transaction2_qfsi_req, InformationLevel) - 4);
 	pSMB->DataCount = 0;
 	pSMB->DataOffset = 0;
 	pSMB->SetupCount = 1;
@@ -4153,7 +4255,8 @@
 	} else {		/* decode response */
 		rc = validate_t2((struct smb_t2_rsp *)pSMBr);
 
-		if (rc || (pSMBr->ByteCount < 13)) {	/* BB also check enough bytes returned */
+		if (rc || (pSMBr->ByteCount < 13)) {
+			/* BB also check if enough bytes returned */
 			rc = -EIO;	/* bad smb */
 		} else {
 			__u16 data_offset = le16_to_cpu(pSMBr->t2.DataOffset);
@@ -4204,7 +4307,7 @@
 	pSMB->TotalParameterCount = cpu_to_le16(params);
 	pSMB->ParameterCount = pSMB->TotalParameterCount;
 	pSMB->ParameterOffset = cpu_to_le16(offsetof(
-        struct smb_com_transaction2_qfsi_req, InformationLevel) - 4);
+		struct smb_com_transaction2_qfsi_req, InformationLevel) - 4);
 
 	pSMB->DataCount = 0;
 	pSMB->DataOffset = 0;
@@ -4274,8 +4377,8 @@
 	byte_count = params + 1 /* pad */ ;
 	pSMB->ParameterCount = cpu_to_le16(params);
 	pSMB->TotalParameterCount = pSMB->ParameterCount;
-	pSMB->ParameterOffset = cpu_to_le16(offsetof(struct 
-        smb_com_transaction2_qfsi_req, InformationLevel) - 4);
+	pSMB->ParameterOffset = cpu_to_le16(offsetof(struct
+			smb_com_transaction2_qfsi_req, InformationLevel) - 4);
 	pSMB->SetupCount = 1;
 	pSMB->Reserved3 = 0;
 	pSMB->SubCommand = cpu_to_le16(TRANS2_QUERY_FS_INFORMATION);
@@ -4335,7 +4438,8 @@
 	pSMB->Flags = 0;
 	pSMB->Timeout = 0;
 	pSMB->Reserved2 = 0;
-	param_offset = offsetof(struct smb_com_transaction2_setfsi_req, FileNum) - 4;
+	param_offset = offsetof(struct smb_com_transaction2_setfsi_req, FileNum)
+				- 4;
 	offset = param_offset + params;
 
 	pSMB->MaxParameterCount = cpu_to_le16(4);
@@ -4417,8 +4521,8 @@
 	byte_count = params + 1 /* pad */ ;
 	pSMB->ParameterCount = cpu_to_le16(params);
 	pSMB->TotalParameterCount = pSMB->ParameterCount;
-	pSMB->ParameterOffset = cpu_to_le16(offsetof(struct 
-        smb_com_transaction2_qfsi_req, InformationLevel) - 4);
+	pSMB->ParameterOffset = cpu_to_le16(offsetof(struct
+			smb_com_transaction2_qfsi_req, InformationLevel) - 4);
 	pSMB->SetupCount = 1;
 	pSMB->Reserved3 = 0;
 	pSMB->SubCommand = cpu_to_le16(TRANS2_QUERY_FS_INFORMATION);
@@ -4447,18 +4551,18 @@
 					le64_to_cpu(response_data->TotalBlocks);
 			FSData->f_bfree =
 			    le64_to_cpu(response_data->BlocksAvail);
-			if(response_data->UserBlocksAvail == cpu_to_le64(-1)) {
+			if (response_data->UserBlocksAvail == cpu_to_le64(-1)) {
 				FSData->f_bavail = FSData->f_bfree;
 			} else {
 				FSData->f_bavail =
-					le64_to_cpu(response_data->UserBlocksAvail);
+				    le64_to_cpu(response_data->UserBlocksAvail);
 			}
-			if(response_data->TotalFileNodes != cpu_to_le64(-1))
+			if (response_data->TotalFileNodes != cpu_to_le64(-1))
 				FSData->f_files =
-					le64_to_cpu(response_data->TotalFileNodes);
-			if(response_data->FreeFileNodes != cpu_to_le64(-1))
+				     le64_to_cpu(response_data->TotalFileNodes);
+			if (response_data->FreeFileNodes != cpu_to_le64(-1))
 				FSData->f_ffree =
-					le64_to_cpu(response_data->FreeFileNodes);
+				      le64_to_cpu(response_data->FreeFileNodes);
 		}
 	}
 	cifs_buf_release(pSMB);
@@ -4470,15 +4574,15 @@
 }
 
 
-/* We can not use write of zero bytes trick to 
-   set file size due to need for large file support.  Also note that 
-   this SetPathInfo is preferred to SetFileInfo based method in next 
+/* We can not use write of zero bytes trick to
+   set file size due to need for large file support.  Also note that
+   this SetPathInfo is preferred to SetFileInfo based method in next
    routine which is only needed to work around a sharing violation bug
    in Samba which this routine can run into */
 
 int
 CIFSSMBSetEOF(const int xid, struct cifsTconInfo *tcon, const char *fileName,
-	      __u64 size, int SetAllocation, 
+	      __u64 size, int SetAllocation,
 	      const struct nls_table *nls_codepage, int remap)
 {
 	struct smb_com_transaction2_spi_req *pSMB = NULL;
@@ -4517,22 +4621,22 @@
 	pSMB->Timeout = 0;
 	pSMB->Reserved2 = 0;
 	param_offset = offsetof(struct smb_com_transaction2_spi_req,
-                                     InformationLevel) - 4;
+				InformationLevel) - 4;
 	offset = param_offset + params;
-	if(SetAllocation) {
-        	if (tcon->ses->capabilities & CAP_INFOLEVEL_PASSTHRU)
-	            pSMB->InformationLevel =
-                	cpu_to_le16(SMB_SET_FILE_ALLOCATION_INFO2);
-        	else
-	            pSMB->InformationLevel =
-        	        cpu_to_le16(SMB_SET_FILE_ALLOCATION_INFO);
-	} else /* Set File Size */  {    
+	if (SetAllocation) {
+		if (tcon->ses->capabilities & CAP_INFOLEVEL_PASSTHRU)
+			pSMB->InformationLevel =
+				cpu_to_le16(SMB_SET_FILE_ALLOCATION_INFO2);
+		else
+			pSMB->InformationLevel =
+				cpu_to_le16(SMB_SET_FILE_ALLOCATION_INFO);
+	} else /* Set File Size */  {
 	    if (tcon->ses->capabilities & CAP_INFOLEVEL_PASSTHRU)
 		    pSMB->InformationLevel =
-		        cpu_to_le16(SMB_SET_FILE_END_OF_FILE_INFO2);
+				cpu_to_le16(SMB_SET_FILE_END_OF_FILE_INFO2);
 	    else
 		    pSMB->InformationLevel =
-		        cpu_to_le16(SMB_SET_FILE_END_OF_FILE_INFO);
+				cpu_to_le16(SMB_SET_FILE_END_OF_FILE_INFO);
 	}
 
 	parm_data =
@@ -4567,8 +4671,8 @@
 }
 
 int
-CIFSSMBSetFileSize(const int xid, struct cifsTconInfo *tcon, __u64 size, 
-                   __u16 fid, __u32 pid_of_opener, int SetAllocation)
+CIFSSMBSetFileSize(const int xid, struct cifsTconInfo *tcon, __u64 size,
+		   __u16 fid, __u32 pid_of_opener, int SetAllocation)
 {
 	struct smb_com_transaction2_sfi_req *pSMB  = NULL;
 	struct smb_com_transaction2_sfi_rsp *pSMBr = NULL;
@@ -4589,7 +4693,7 @@
 
 	pSMB->hdr.Pid = cpu_to_le16((__u16)pid_of_opener);
 	pSMB->hdr.PidHigh = cpu_to_le16((__u16)(pid_of_opener >> 16));
-    
+
 	params = 6;
 	pSMB->MaxSetupCount = 0;
 	pSMB->Reserved = 0;
@@ -4599,7 +4703,7 @@
 	param_offset = offsetof(struct smb_com_transaction2_sfi_req, Fid) - 4;
 	offset = param_offset + params;
 
-	data_offset = (char *) (&pSMB->hdr.Protocol) + offset;	
+	data_offset = (char *) (&pSMB->hdr.Protocol) + offset;
 
 	count = sizeof(struct file_end_of_file_info);
 	pSMB->MaxParameterCount = cpu_to_le16(2);
@@ -4614,25 +4718,25 @@
 	pSMB->TotalParameterCount = pSMB->ParameterCount;
 	pSMB->ParameterOffset = cpu_to_le16(param_offset);
 	parm_data =
-		(struct file_end_of_file_info *) (((char *) &pSMB->hdr.Protocol) +
-			offset);
+		(struct file_end_of_file_info *) (((char *) &pSMB->hdr.Protocol)
+				+ offset);
 	pSMB->DataOffset = cpu_to_le16(offset);
 	parm_data->FileSize = cpu_to_le64(size);
 	pSMB->Fid = fid;
-	if(SetAllocation) {
+	if (SetAllocation) {
 		if (tcon->ses->capabilities & CAP_INFOLEVEL_PASSTHRU)
 			pSMB->InformationLevel =
 				cpu_to_le16(SMB_SET_FILE_ALLOCATION_INFO2);
 		else
 			pSMB->InformationLevel =
 				cpu_to_le16(SMB_SET_FILE_ALLOCATION_INFO);
-	} else /* Set File Size */  {    
+	} else /* Set File Size */  {
 	    if (tcon->ses->capabilities & CAP_INFOLEVEL_PASSTHRU)
 		    pSMB->InformationLevel =
-		        cpu_to_le16(SMB_SET_FILE_END_OF_FILE_INFO2);
+				cpu_to_le16(SMB_SET_FILE_END_OF_FILE_INFO2);
 	    else
 		    pSMB->InformationLevel =
-		        cpu_to_le16(SMB_SET_FILE_END_OF_FILE_INFO);
+				cpu_to_le16(SMB_SET_FILE_END_OF_FILE_INFO);
 	}
 	pSMB->Reserved4 = 0;
 	pSMB->hdr.smb_buf_length += byte_count;
@@ -4648,21 +4752,21 @@
 	if (pSMB)
 		cifs_small_buf_release(pSMB);
 
-	/* Note: On -EAGAIN error only caller can retry on handle based calls 
+	/* Note: On -EAGAIN error only caller can retry on handle based calls
 		since file handle passed in no longer valid */
 
 	return rc;
 }
 
-/* Some legacy servers such as NT4 require that the file times be set on 
+/* Some legacy servers such as NT4 require that the file times be set on
    an open handle, rather than by pathname - this is awkward due to
    potential access conflicts on the open, but it is unavoidable for these
    old servers since the only other choice is to go from 100 nanosecond DCE
    time and resort to the original setpathinfo level which takes the ancient
    DOS time format with 2 second granularity */
 int
-CIFSSMBSetFileTimes(const int xid, struct cifsTconInfo *tcon, const FILE_BASIC_INFO * data, 
-                   __u16 fid)
+CIFSSMBSetFileTimes(const int xid, struct cifsTconInfo *tcon,
+		    const FILE_BASIC_INFO *data, __u16 fid)
 {
 	struct smb_com_transaction2_sfi_req *pSMB  = NULL;
 	struct smb_com_transaction2_sfi_rsp *pSMBr = NULL;
@@ -4684,7 +4788,7 @@
 	use an existing handle (rather than opening one on the fly) */
 	/* pSMB->hdr.Pid = cpu_to_le16((__u16)pid_of_opener);
 	pSMB->hdr.PidHigh = cpu_to_le16((__u16)(pid_of_opener >> 16));*/
-    
+
 	params = 6;
 	pSMB->MaxSetupCount = 0;
 	pSMB->Reserved = 0;
@@ -4694,7 +4798,7 @@
 	param_offset = offsetof(struct smb_com_transaction2_sfi_req, Fid) - 4;
 	offset = param_offset + params;
 
-	data_offset = (char *) (&pSMB->hdr.Protocol) + offset; 
+	data_offset = (char *) (&pSMB->hdr.Protocol) + offset;
 
 	count = sizeof (FILE_BASIC_INFO);
 	pSMB->MaxParameterCount = cpu_to_le16(2);
@@ -4717,16 +4821,16 @@
 	pSMB->Reserved4 = 0;
 	pSMB->hdr.smb_buf_length += byte_count;
 	pSMB->ByteCount = cpu_to_le16(byte_count);
-	memcpy(data_offset,data,sizeof(FILE_BASIC_INFO));
+	memcpy(data_offset, data, sizeof(FILE_BASIC_INFO));
 	rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
 			 (struct smb_hdr *) pSMBr, &bytes_returned, 0);
 	if (rc) {
-		cFYI(1,("Send error in Set Time (SetFileInfo) = %d",rc));
+		cFYI(1, ("Send error in Set Time (SetFileInfo) = %d", rc));
 	}
 
 	cifs_small_buf_release(pSMB);
 
-	/* Note: On -EAGAIN error only caller can retry on handle based calls 
+	/* Note: On -EAGAIN error only caller can retry on handle based calls
 		since file handle passed in no longer valid */
 
 	return rc;
@@ -4735,7 +4839,7 @@
 
 int
 CIFSSMBSetTimes(const int xid, struct cifsTconInfo *tcon, const char *fileName,
-		const FILE_BASIC_INFO * data, 
+		const FILE_BASIC_INFO *data,
 		const struct nls_table *nls_codepage, int remap)
 {
 	TRANSACTION2_SPI_REQ *pSMB = NULL;
@@ -4760,7 +4864,7 @@
 				     PATH_MAX, nls_codepage, remap);
 		name_len++;	/* trailing null */
 		name_len *= 2;
-	} else {		/* BB improve the check for buffer overruns BB */
+	} else {	/* BB improve the check for buffer overruns BB */
 		name_len = strnlen(fileName, PATH_MAX);
 		name_len++;	/* trailing null */
 		strncpy(pSMB->FileName, fileName, name_len);
@@ -4776,7 +4880,7 @@
 	pSMB->Timeout = 0;
 	pSMB->Reserved2 = 0;
 	param_offset = offsetof(struct smb_com_transaction2_spi_req,
-                                     InformationLevel) - 4;
+				InformationLevel) - 4;
 	offset = param_offset + params;
 	data_offset = (char *) (&pSMB->hdr.Protocol) + offset;
 	pSMB->ParameterOffset = cpu_to_le16(param_offset);
@@ -4837,11 +4941,11 @@
 
 	if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
 		name_len =
-			ConvertToUCS((__le16 *) pSMB->fileName, fileName, 
+			ConvertToUCS((__le16 *) pSMB->fileName, fileName,
 				PATH_MAX, nls_codepage);
 		name_len++;     /* trailing null */
 		name_len *= 2;
-	} else {                /* BB improve the check for buffer overruns BB */
+	} else {	/* BB improve the check for buffer overruns BB */
 		name_len = strnlen(fileName, PATH_MAX);
 		name_len++;     /* trailing null */
 		strncpy(pSMB->fileName, fileName, name_len);
@@ -4867,8 +4971,8 @@
 
 int
 CIFSSMBUnixSetPerms(const int xid, struct cifsTconInfo *tcon,
-		    char *fileName, __u64 mode, __u64 uid, __u64 gid, 
-		    dev_t device, const struct nls_table *nls_codepage, 
+		    char *fileName, __u64 mode, __u64 uid, __u64 gid,
+		    dev_t device, const struct nls_table *nls_codepage,
 		    int remap)
 {
 	TRANSACTION2_SPI_REQ *pSMB = NULL;
@@ -4888,7 +4992,7 @@
 
 	if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
 		name_len =
-		    cifsConvertToUCS((__le16 *) pSMB->FileName, fileName, 
+		    cifsConvertToUCS((__le16 *) pSMB->FileName, fileName,
 				     PATH_MAX, nls_codepage, remap);
 		name_len++;	/* trailing null */
 		name_len *= 2;
@@ -4908,7 +5012,7 @@
 	pSMB->Timeout = 0;
 	pSMB->Reserved2 = 0;
 	param_offset = offsetof(struct smb_com_transaction2_spi_req,
-                                     InformationLevel) - 4;
+				InformationLevel) - 4;
 	offset = param_offset + params;
 	data_offset =
 	    (FILE_UNIX_BASIC_INFO *) ((char *) &pSMB->hdr.Protocol +
@@ -4931,7 +5035,7 @@
 	older clients, but we should be precise - we use SetFileSize to
 	set file size and do not want to truncate file size to zero
 	accidently as happened on one Samba server beta by putting
-	zero instead of -1 here */ 
+	zero instead of -1 here */
 	data_offset->EndOfFile = NO_CHANGE_64;
 	data_offset->NumOfBytes = NO_CHANGE_64;
 	data_offset->LastStatusChange = NO_CHANGE_64;
@@ -4943,20 +5047,20 @@
 	data_offset->DevMajor = cpu_to_le64(MAJOR(device));
 	data_offset->DevMinor = cpu_to_le64(MINOR(device));
 	data_offset->Permissions = cpu_to_le64(mode);
-    
-	if(S_ISREG(mode))
+
+	if (S_ISREG(mode))
 		data_offset->Type = cpu_to_le32(UNIX_FILE);
-	else if(S_ISDIR(mode))
+	else if (S_ISDIR(mode))
 		data_offset->Type = cpu_to_le32(UNIX_DIR);
-	else if(S_ISLNK(mode))
+	else if (S_ISLNK(mode))
 		data_offset->Type = cpu_to_le32(UNIX_SYMLINK);
-	else if(S_ISCHR(mode))
+	else if (S_ISCHR(mode))
 		data_offset->Type = cpu_to_le32(UNIX_CHARDEV);
-	else if(S_ISBLK(mode))
+	else if (S_ISBLK(mode))
 		data_offset->Type = cpu_to_le32(UNIX_BLOCKDEV);
-	else if(S_ISFIFO(mode))
+	else if (S_ISFIFO(mode))
 		data_offset->Type = cpu_to_le32(UNIX_FIFO);
-	else if(S_ISSOCK(mode))
+	else if (S_ISSOCK(mode))
 		data_offset->Type = cpu_to_le32(UNIX_SOCKET);
 
 
@@ -4974,20 +5078,20 @@
 	return rc;
 }
 
-int CIFSSMBNotify(const int xid, struct cifsTconInfo *tcon, 
+int CIFSSMBNotify(const int xid, struct cifsTconInfo *tcon,
 		  const int notify_subdirs, const __u16 netfid,
-		  __u32 filter, struct file * pfile, int multishot, 
+		  __u32 filter, struct file *pfile, int multishot,
 		  const struct nls_table *nls_codepage)
 {
 	int rc = 0;
-	struct smb_com_transaction_change_notify_req * pSMB = NULL;
-	struct smb_com_ntransaction_change_notify_rsp * pSMBr = NULL;
+	struct smb_com_transaction_change_notify_req *pSMB = NULL;
+	struct smb_com_ntransaction_change_notify_rsp *pSMBr = NULL;
 	struct dir_notify_req *dnotify_req;
 	int bytes_returned;
 
-	cFYI(1, ("In CIFSSMBNotify for file handle %d",(int)netfid));
+	cFYI(1, ("In CIFSSMBNotify for file handle %d", (int)netfid));
 	rc = smb_init(SMB_COM_NT_TRANSACT, 23, tcon, (void **) &pSMB,
-                      (void **) &pSMBr);
+		      (void **) &pSMBr);
 	if (rc)
 		return rc;
 
@@ -5008,7 +5112,7 @@
 	pSMB->SetupCount = 4; /* single byte does not need le conversion */
 	pSMB->SubCommand = cpu_to_le16(NT_TRANSACT_NOTIFY_CHANGE);
 	pSMB->ParameterCount = pSMB->TotalParameterCount;
-	if(notify_subdirs)
+	if (notify_subdirs)
 		pSMB->WatchTree = 1; /* one byte - no le conversion needed */
 	pSMB->Reserved2 = 0;
 	pSMB->CompletionFilter = cpu_to_le32(filter);
@@ -5021,11 +5125,11 @@
 		cFYI(1, ("Error in Notify = %d", rc));
 	} else {
 		/* Add file to outstanding requests */
-		/* BB change to kmem cache alloc */	
+		/* BB change to kmem cache alloc */
 		dnotify_req = kmalloc(
 						sizeof(struct dir_notify_req),
 						 GFP_KERNEL);
-		if(dnotify_req) {
+		if (dnotify_req) {
 			dnotify_req->Pid = pSMB->hdr.Pid;
 			dnotify_req->PidHigh = pSMB->hdr.PidHigh;
 			dnotify_req->Mid = pSMB->hdr.Mid;
@@ -5036,20 +5140,20 @@
 			dnotify_req->filter = filter;
 			dnotify_req->multishot = multishot;
 			spin_lock(&GlobalMid_Lock);
-			list_add_tail(&dnotify_req->lhead, 
+			list_add_tail(&dnotify_req->lhead,
 					&GlobalDnotifyReqList);
 			spin_unlock(&GlobalMid_Lock);
-		} else 
+		} else
 			rc = -ENOMEM;
 	}
 	cifs_buf_release(pSMB);
-	return rc;	
+	return rc;
 }
 #ifdef CONFIG_CIFS_XATTR
 ssize_t
 CIFSSMBQAllEAs(const int xid, struct cifsTconInfo *tcon,
 		 const unsigned char *searchName,
-		 char * EAData, size_t buf_size,
+		 char *EAData, size_t buf_size,
 		 const struct nls_table *nls_codepage, int remap)
 {
 		/* BB assumes one setup word */
@@ -5058,8 +5162,8 @@
 	int rc = 0;
 	int bytes_returned;
 	int name_len;
-	struct fea * temp_fea;
-	char * temp_ptr;
+	struct fea *temp_fea;
+	char *temp_ptr;
 	__u16 params, byte_count;
 
 	cFYI(1, ("In Query All EAs path %s", searchName));
@@ -5071,7 +5175,7 @@
 
 	if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
 		name_len =
-		    cifsConvertToUCS((__le16 *) pSMB->FileName, searchName, 
+		    cifsConvertToUCS((__le16 *) pSMB->FileName, searchName,
 				     PATH_MAX, nls_codepage, remap);
 		name_len++;	/* trailing null */
 		name_len *= 2;
@@ -5081,7 +5185,7 @@
 		strncpy(pSMB->FileName, searchName, name_len);
 	}
 
-	params = 2 /* level */ + 4 /* reserved */ + name_len /* includes NUL */ ;
+	params = 2 /* level */ + 4 /* reserved */ + name_len /* includes NUL */;
 	pSMB->TotalDataCount = 0;
 	pSMB->MaxParameterCount = cpu_to_le16(2);
 	pSMB->MaxDataCount = cpu_to_le16(4000);	/* BB find exact max SMB PDU from sess structure BB */
@@ -5091,7 +5195,7 @@
 	pSMB->Timeout = 0;
 	pSMB->Reserved2 = 0;
 	pSMB->ParameterOffset = cpu_to_le16(offsetof(
-        struct smb_com_transaction2_qpi_req ,InformationLevel) - 4);
+	struct smb_com_transaction2_qpi_req, InformationLevel) - 4);
 	pSMB->DataCount = 0;
 	pSMB->DataOffset = 0;
 	pSMB->SetupCount = 1;
@@ -5115,7 +5219,7 @@
 		/* BB also check enough total bytes returned */
 		/* BB we need to improve the validity checking
 		of these trans2 responses */
-		if (rc || (pSMBr->ByteCount < 4)) 
+		if (rc || (pSMBr->ByteCount < 4))
 			rc = -EIO;	/* bad smb */
 	   /* else if (pFindData){
 			memcpy((char *) pFindData,
@@ -5128,39 +5232,40 @@
 			/* check that each element of each entry does not
 			   go beyond end of list */
 			__u16 data_offset = le16_to_cpu(pSMBr->t2.DataOffset);
-			struct fealist * ea_response_data;
+			struct fealist *ea_response_data;
 			rc = 0;
 			/* validate_trans2_offsets() */
-			/* BB to check if(start of smb + data_offset > &bcc+ bcc)*/
+			/* BB check if start of smb + data_offset > &bcc+ bcc */
 			ea_response_data = (struct fealist *)
 				(((char *) &pSMBr->hdr.Protocol) +
 				data_offset);
 			name_len = le32_to_cpu(ea_response_data->list_len);
-			cFYI(1,("ea length %d", name_len));
-			if(name_len <= 8) {
+			cFYI(1, ("ea length %d", name_len));
+			if (name_len <= 8) {
 			/* returned EA size zeroed at top of function */
-				cFYI(1,("empty EA list returned from server"));
+				cFYI(1, ("empty EA list returned from server"));
 			} else {
 				/* account for ea list len */
 				name_len -= 4;
 				temp_fea = ea_response_data->list;
 				temp_ptr = (char *)temp_fea;
-				while(name_len > 0) {
+				while (name_len > 0) {
 					__u16 value_len;
 					name_len -= 4;
 					temp_ptr += 4;
 					rc += temp_fea->name_len;
 				/* account for prefix user. and trailing null */
-					rc = rc + 5 + 1; 
-					if(rc<(int)buf_size) {
-						memcpy(EAData,"user.",5);
-						EAData+=5;
-						memcpy(EAData,temp_ptr,temp_fea->name_len);
-						EAData+=temp_fea->name_len;
+					rc = rc + 5 + 1;
+					if (rc < (int)buf_size) {
+						memcpy(EAData, "user.", 5);
+						EAData += 5;
+						memcpy(EAData, temp_ptr,
+						       temp_fea->name_len);
+						EAData += temp_fea->name_len;
 						/* null terminate name */
 						*EAData = 0;
 						EAData = EAData + 1;
-					} else if(buf_size == 0) {
+					} else if (buf_size == 0) {
 						/* skip copy - calc size only */
 					} else {
 						/* stop before overrun buffer */
@@ -5172,11 +5277,15 @@
 					/* account for trailing null */
 					name_len--;
 					temp_ptr++;
-					value_len = le16_to_cpu(temp_fea->value_len);
+					value_len =
+					      le16_to_cpu(temp_fea->value_len);
 					name_len -= value_len;
 					temp_ptr += value_len;
-					/* BB check that temp_ptr is still within smb BB*/
-				/* no trailing null to account for in value len */
+					/* BB check that temp_ptr is still
+					      within the SMB BB*/
+
+					/* no trailing null to account for
+					   in value len */
 					/* go on to next EA */
 					temp_fea = (struct fea *)temp_ptr;
 				}
@@ -5191,9 +5300,9 @@
 	return (ssize_t)rc;
 }
 
-ssize_t CIFSSMBQueryEA(const int xid,struct cifsTconInfo * tcon,
-		const unsigned char * searchName,const unsigned char * ea_name,
-		unsigned char * ea_value, size_t buf_size, 
+ssize_t CIFSSMBQueryEA(const int xid, struct cifsTconInfo *tcon,
+		const unsigned char *searchName, const unsigned char *ea_name,
+		unsigned char *ea_value, size_t buf_size,
 		const struct nls_table *nls_codepage, int remap)
 {
 	TRANSACTION2_QPI_REQ *pSMB = NULL;
@@ -5201,8 +5310,8 @@
 	int rc = 0;
 	int bytes_returned;
 	int name_len;
-	struct fea * temp_fea;
-	char * temp_ptr;
+	struct fea *temp_fea;
+	char *temp_ptr;
 	__u16 params, byte_count;
 
 	cFYI(1, ("In Query EA path %s", searchName));
@@ -5214,7 +5323,7 @@
 
 	if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
 		name_len =
-		    cifsConvertToUCS((__le16 *) pSMB->FileName, searchName, 
+		    cifsConvertToUCS((__le16 *) pSMB->FileName, searchName,
 				     PATH_MAX, nls_codepage, remap);
 		name_len++;	/* trailing null */
 		name_len *= 2;
@@ -5224,7 +5333,7 @@
 		strncpy(pSMB->FileName, searchName, name_len);
 	}
 
-	params = 2 /* level */ + 4 /* reserved */ + name_len /* includes NUL */ ;
+	params = 2 /* level */ + 4 /* reserved */ + name_len /* includes NUL */;
 	pSMB->TotalDataCount = 0;
 	pSMB->MaxParameterCount = cpu_to_le16(2);
 	pSMB->MaxDataCount = cpu_to_le16(4000);	/* BB find exact max SMB PDU from sess structure BB */
@@ -5234,7 +5343,7 @@
 	pSMB->Timeout = 0;
 	pSMB->Reserved2 = 0;
 	pSMB->ParameterOffset = cpu_to_le16(offsetof(
-        struct smb_com_transaction2_qpi_req ,InformationLevel) - 4);
+		struct smb_com_transaction2_qpi_req, InformationLevel) - 4);
 	pSMB->DataCount = 0;
 	pSMB->DataOffset = 0;
 	pSMB->SetupCount = 1;
@@ -5258,7 +5367,7 @@
 		/* BB also check enough total bytes returned */
 		/* BB we need to improve the validity checking
 		of these trans2 responses */
-		if (rc || (pSMBr->ByteCount < 4)) 
+		if (rc || (pSMBr->ByteCount < 4))
 			rc = -EIO;	/* bad smb */
 	   /* else if (pFindData){
 			memcpy((char *) pFindData,
@@ -5271,18 +5380,18 @@
 			/* check that each element of each entry does not
 			   go beyond end of list */
 			__u16 data_offset = le16_to_cpu(pSMBr->t2.DataOffset);
-			struct fealist * ea_response_data;
+			struct fealist *ea_response_data;
 			rc = -ENODATA;
 			/* validate_trans2_offsets() */
-			/* BB to check if(start of smb + data_offset > &bcc+ bcc)*/
+			/* BB check if start of smb + data_offset > &bcc+ bcc*/
 			ea_response_data = (struct fealist *)
 				(((char *) &pSMBr->hdr.Protocol) +
 				data_offset);
 			name_len = le32_to_cpu(ea_response_data->list_len);
-			cFYI(1,("ea length %d", name_len));
-			if(name_len <= 8) {
+			cFYI(1, ("ea length %d", name_len));
+			if (name_len <= 8) {
 			/* returned EA size zeroed at top of function */
-				cFYI(1,("empty EA list returned from server"));
+				cFYI(1, ("empty EA list returned from server"));
 			} else {
 				/* account for ea list len */
 				name_len -= 4;
@@ -5290,28 +5399,30 @@
 				temp_ptr = (char *)temp_fea;
 				/* loop through checking if we have a matching
 				name and then return the associated value */
-				while(name_len > 0) {
+				while (name_len > 0) {
 					__u16 value_len;
 					name_len -= 4;
 					temp_ptr += 4;
-					value_len = le16_to_cpu(temp_fea->value_len);
-				/* BB validate that value_len falls within SMB, 
-				even though maximum for name_len is 255 */ 
-					if(memcmp(temp_fea->name,ea_name,
+					value_len =
+					      le16_to_cpu(temp_fea->value_len);
+				/* BB validate that value_len falls within SMB,
+				even though maximum for name_len is 255 */
+					if (memcmp(temp_fea->name, ea_name,
 						  temp_fea->name_len) == 0) {
 						/* found a match */
 						rc = value_len;
 				/* account for prefix user. and trailing null */
-						if(rc<=(int)buf_size) {
+						if (rc <= (int)buf_size) {
 							memcpy(ea_value,
 								temp_fea->name+temp_fea->name_len+1,
 								rc);
-							/* ea values, unlike ea names,
-							are not null terminated */
-						} else if(buf_size == 0) {
+							/* ea values, unlike ea
+							   names, are not null
+							   terminated */
+						} else if (buf_size == 0) {
 						/* skip copy - calc size only */
 						} else {
-							/* stop before overrun buffer */
+						/* stop before overrun buffer */
 							rc = -ERANGE;
 						}
 						break;
@@ -5323,11 +5434,11 @@
 					temp_ptr++;
 					name_len -= value_len;
 					temp_ptr += value_len;
-				/* no trailing null to account for in value len */
-					/* go on to next EA */
+					/* No trailing null to account for in
+					   value_len.  Go on to next EA */
 					temp_fea = (struct fea *)temp_ptr;
 				}
-			} 
+			}
 		}
 	}
 	if (pSMB)
@@ -5340,9 +5451,9 @@
 
 int
 CIFSSMBSetEA(const int xid, struct cifsTconInfo *tcon, const char *fileName,
-		const char * ea_name, const void * ea_value, 
-		const __u16 ea_value_len, const struct nls_table *nls_codepage,
-		int remap)
+	     const char *ea_name, const void *ea_value,
+	     const __u16 ea_value_len, const struct nls_table *nls_codepage,
+	     int remap)
 {
 	struct smb_com_transaction2_spi_req *pSMB = NULL;
 	struct smb_com_transaction2_spi_rsp *pSMBr = NULL;
@@ -5361,11 +5472,11 @@
 
 	if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
 		name_len =
-		    cifsConvertToUCS((__le16 *) pSMB->FileName, fileName, 
+		    cifsConvertToUCS((__le16 *) pSMB->FileName, fileName,
 				     PATH_MAX, nls_codepage, remap);
 		name_len++;	/* trailing null */
 		name_len *= 2;
-	} else {		/* BB improve the check for buffer overruns BB */
+	} else {	/* BB improve the check for buffer overruns BB */
 		name_len = strnlen(fileName, PATH_MAX);
 		name_len++;	/* trailing null */
 		strncpy(pSMB->FileName, fileName, name_len);
@@ -5376,10 +5487,10 @@
 	/* done calculating parms using name_len of file name,
 	now use name_len to calculate length of ea name
 	we are going to create in the inode xattrs */
-	if(ea_name == NULL)
+	if (ea_name == NULL)
 		name_len = 0;
 	else
-		name_len = strnlen(ea_name,255);
+		name_len = strnlen(ea_name, 255);
 
 	count = sizeof(*parm_data) + ea_value_len + name_len + 1;
 	pSMB->MaxParameterCount = cpu_to_le16(2);
@@ -5390,7 +5501,7 @@
 	pSMB->Timeout = 0;
 	pSMB->Reserved2 = 0;
 	param_offset = offsetof(struct smb_com_transaction2_spi_req,
-                                     InformationLevel) - 4;
+				InformationLevel) - 4;
 	offset = param_offset + params;
 	pSMB->InformationLevel =
 		cpu_to_le16(SMB_SET_FILE_EA);
@@ -5410,17 +5521,19 @@
 	/* we checked above that name len is less than 255 */
 	parm_data->list[0].name_len = (__u8)name_len;
 	/* EA names are always ASCII */
-	if(ea_name)
-		strncpy(parm_data->list[0].name,ea_name,name_len);
+	if (ea_name)
+		strncpy(parm_data->list[0].name, ea_name, name_len);
 	parm_data->list[0].name[name_len] = 0;
 	parm_data->list[0].value_len = cpu_to_le16(ea_value_len);
 	/* caller ensures that ea_value_len is less than 64K but
 	we need to ensure that it fits within the smb */
 
-	/*BB add length check that it would fit in negotiated SMB buffer size BB */
-	/* if(ea_value_len > buffer_size - 512 (enough for header)) */
-	if(ea_value_len)
-		memcpy(parm_data->list[0].name+name_len+1,ea_value,ea_value_len);
+	/*BB add length check to see if it would fit in
+	     negotiated SMB buffer size BB */
+	/* if (ea_value_len > buffer_size - 512 (enough for header)) */
+	if (ea_value_len)
+		memcpy(parm_data->list[0].name+name_len+1,
+		       ea_value, ea_value_len);
 
 	pSMB->TotalDataCount = pSMB->DataCount;
 	pSMB->ParameterCount = cpu_to_le16(params);
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index f4e9266..4af3588 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -1,7 +1,7 @@
 /*
  *   fs/cifs/connect.c
  *
- *   Copyright (C) International Business Machines  Corp., 2002,2006
+ *   Copyright (C) International Business Machines  Corp., 2002,2007
  *   Author(s): Steve French (sfrench@us.ibm.com)
  *
  *   This library is free software; you can redistribute it and/or modify
@@ -16,7 +16,7 @@
  *
  *   You should have received a copy of the GNU Lesser General Public License
  *   along with this library; if not, write to the Free Software
- *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 
+ *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  */
 #include <linux/fs.h>
 #include <linux/net.h>
@@ -85,6 +85,7 @@
 	unsigned direct_io:1;
 	unsigned remap:1;   /* set to remap seven reserved chars in filenames */
 	unsigned posix_paths:1;   /* unset to not ask for posix pathnames. */
+	unsigned no_linux_ext:1;
 	unsigned sfu_emul:1;
 	unsigned nullauth:1; /* attempt to authenticate with null user */
 	unsigned nocase;     /* request case insensitive filenames */
@@ -93,20 +94,20 @@
 	unsigned int wsize;
 	unsigned int sockopt;
 	unsigned short int port;
-	char * prepath;
+	char *prepath;
 };
 
-static int ipv4_connect(struct sockaddr_in *psin_server, 
+static int ipv4_connect(struct sockaddr_in *psin_server,
 			struct socket **csocket,
-			char * netb_name,
-			char * server_netb_name);
-static int ipv6_connect(struct sockaddr_in6 *psin_server, 
+			char *netb_name,
+			char *server_netb_name);
+static int ipv6_connect(struct sockaddr_in6 *psin_server,
 			struct socket **csocket);
 
 
-	/* 
+	/*
 	 * cifs tcp session reconnection
-	 * 
+	 *
 	 * mark tcp session as reconnecting so temporarily locked
 	 * mark all smb sessions as reconnecting for tcp session
 	 * reconnect tcp session
@@ -120,11 +121,11 @@
 	struct list_head *tmp;
 	struct cifsSesInfo *ses;
 	struct cifsTconInfo *tcon;
-	struct mid_q_entry * mid_entry;
-	
+	struct mid_q_entry *mid_entry;
+
 	spin_lock(&GlobalMid_Lock);
-	if( kthread_should_stop() ) {
-		/* the demux thread will exit normally 
+	if ( kthread_should_stop() ) {
+		/* the demux thread will exit normally
 		next time through the loop */
 		spin_unlock(&GlobalMid_Lock);
 		return rc;
@@ -150,18 +151,19 @@
 	}
 	list_for_each(tmp, &GlobalTreeConnectionList) {
 		tcon = list_entry(tmp, struct cifsTconInfo, cifsConnectionList);
-		if((tcon) && (tcon->ses) && (tcon->ses->server == server)) {
+		if ((tcon) && (tcon->ses) && (tcon->ses->server == server)) {
 			tcon->tidStatus = CifsNeedReconnect;
 		}
 	}
 	read_unlock(&GlobalSMBSeslock);
 	/* do not want to be sending data on a socket we are freeing */
-	down(&server->tcpSem); 
-	if(server->ssocket) {
-		cFYI(1,("State: 0x%x Flags: 0x%lx", server->ssocket->state,
+	down(&server->tcpSem);
+	if (server->ssocket) {
+		cFYI(1, ("State: 0x%x Flags: 0x%lx", server->ssocket->state,
 			server->ssocket->flags));
-		server->ssocket->ops->shutdown(server->ssocket,SEND_SHUTDOWN);
-		cFYI(1,("Post shutdown state: 0x%x Flags: 0x%lx", server->ssocket->state,
+		server->ssocket->ops->shutdown(server->ssocket, SEND_SHUTDOWN);
+		cFYI(1, ("Post shutdown state: 0x%x Flags: 0x%lx",
+			server->ssocket->state,
 			server->ssocket->flags));
 		sock_release(server->ssocket);
 		server->ssocket = NULL;
@@ -172,8 +174,8 @@
 		mid_entry = list_entry(tmp, struct
 					mid_q_entry,
 					qhead);
-		if(mid_entry) {
-			if(mid_entry->midState == MID_REQUEST_SUBMITTED) {
+		if (mid_entry) {
+			if (mid_entry->midState == MID_REQUEST_SUBMITTED) {
 				/* Mark other intransit requests as needing
 				   retry so we do not immediately mark the
 				   session bad again (ie after we reconnect
@@ -183,29 +185,29 @@
 		}
 	}
 	spin_unlock(&GlobalMid_Lock);
-	up(&server->tcpSem); 
+	up(&server->tcpSem);
 
-	while ( (!kthread_should_stop()) && (server->tcpStatus != CifsGood))
-	{
+	while ( (!kthread_should_stop()) && (server->tcpStatus != CifsGood)) {
 		try_to_freeze();
-		if(server->protocolType == IPV6) {
-			rc = ipv6_connect(&server->addr.sockAddr6,&server->ssocket);
+		if (server->protocolType == IPV6) {
+			rc = ipv6_connect(&server->addr.sockAddr6,
+					  &server->ssocket);
 		} else {
-			rc = ipv4_connect(&server->addr.sockAddr, 
+			rc = ipv4_connect(&server->addr.sockAddr,
 					&server->ssocket,
 					server->workstation_RFC1001_name,
 					server->server_RFC1001_name);
 		}
-		if(rc) {
-			cFYI(1,("reconnect error %d",rc));
+		if (rc) {
+			cFYI(1, ("reconnect error %d", rc));
 			msleep(3000);
 		} else {
 			atomic_inc(&tcpSesReconnectCount);
 			spin_lock(&GlobalMid_Lock);
-			if( !kthread_should_stop() )
+			if ( !kthread_should_stop() )
 				server->tcpStatus = CifsGood;
 			server->sequence_number = 0;
-			spin_unlock(&GlobalMid_Lock);			
+			spin_unlock(&GlobalMid_Lock);
 	/*		atomic_set(&server->inFlight,0);*/
 			wake_up(&server->response_q);
 		}
@@ -213,27 +215,27 @@
 	return rc;
 }
 
-/* 
+/*
 	return codes:
 		0 	not a transact2, or all data present
 		>0 	transact2 with that much data missing
 		-EINVAL = invalid transact2
 
  */
-static int check2ndT2(struct smb_hdr * pSMB, unsigned int maxBufSize)
+static int check2ndT2(struct smb_hdr *pSMB, unsigned int maxBufSize)
 {
-	struct smb_t2_rsp * pSMBt;
-        int total_data_size;
+	struct smb_t2_rsp *pSMBt;
+	int total_data_size;
 	int data_in_this_rsp;
 	int remaining;
 
-	if(pSMB->Command != SMB_COM_TRANSACTION2)
+	if (pSMB->Command != SMB_COM_TRANSACTION2)
 		return 0;
 
-        /* check for plausible wct, bcc and t2 data and parm sizes */
-        /* check for parm and data offset going beyond end of smb */
-	if(pSMB->WordCount != 10) { /* coalesce_t2 depends on this */
-		cFYI(1,("invalid transact2 word count"));
+	/* check for plausible wct, bcc and t2 data and parm sizes */
+	/* check for parm and data offset going beyond end of smb */
+	if (pSMB->WordCount != 10) { /* coalesce_t2 depends on this */
+		cFYI(1, ("invalid transact2 word count"));
 		return -EINVAL;
 	}
 
@@ -244,25 +246,25 @@
 
 	remaining = total_data_size - data_in_this_rsp;
 
-	if(remaining == 0)
+	if (remaining == 0)
 		return 0;
-	else if(remaining < 0) {
-		cFYI(1,("total data %d smaller than data in frame %d",
+	else if (remaining < 0) {
+		cFYI(1, ("total data %d smaller than data in frame %d",
 			total_data_size, data_in_this_rsp));
 		return -EINVAL;
 	} else {
-		cFYI(1,("missing %d bytes from transact2, check next response",
+		cFYI(1, ("missing %d bytes from transact2, check next response",
 			remaining));
-		if(total_data_size > maxBufSize) {
-			cERROR(1,("TotalDataSize %d is over maximum buffer %d",
-				total_data_size,maxBufSize));
-			return -EINVAL; 
+		if (total_data_size > maxBufSize) {
+			cERROR(1, ("TotalDataSize %d is over maximum buffer %d",
+				total_data_size, maxBufSize));
+			return -EINVAL;
 		}
 		return remaining;
 	}
 }
 
-static int coalesce_t2(struct smb_hdr * psecond, struct smb_hdr *pTargetSMB)
+static int coalesce_t2(struct smb_hdr *psecond, struct smb_hdr *pTargetSMB)
 {
 	struct smb_t2_rsp *pSMB2 = (struct smb_t2_rsp *)psecond;
 	struct smb_t2_rsp *pSMBt  = (struct smb_t2_rsp *)pTargetSMB;
@@ -270,43 +272,43 @@
 	int total_in_buf;
 	int remaining;
 	int total_in_buf2;
-	char * data_area_of_target;
-	char * data_area_of_buf2;
+	char *data_area_of_target;
+	char *data_area_of_buf2;
 	__u16 byte_count;
 
 	total_data_size = le16_to_cpu(pSMBt->t2_rsp.TotalDataCount);
 
-	if(total_data_size != le16_to_cpu(pSMB2->t2_rsp.TotalDataCount)) {
-		cFYI(1,("total data sizes of primary and secondary t2 differ"));
+	if (total_data_size != le16_to_cpu(pSMB2->t2_rsp.TotalDataCount)) {
+		cFYI(1, ("total data size of primary and secondary t2 differ"));
 	}
 
 	total_in_buf = le16_to_cpu(pSMBt->t2_rsp.DataCount);
 
 	remaining = total_data_size - total_in_buf;
-	
-	if(remaining < 0)
+
+	if (remaining < 0)
 		return -EINVAL;
 
-	if(remaining == 0) /* nothing to do, ignore */
+	if (remaining == 0) /* nothing to do, ignore */
 		return 0;
-	
+
 	total_in_buf2 = le16_to_cpu(pSMB2->t2_rsp.DataCount);
-	if(remaining < total_in_buf2) {
-		cFYI(1,("transact2 2nd response contains too much data"));
+	if (remaining < total_in_buf2) {
+		cFYI(1, ("transact2 2nd response contains too much data"));
 	}
 
 	/* find end of first SMB data area */
-	data_area_of_target = (char *)&pSMBt->hdr.Protocol + 
+	data_area_of_target = (char *)&pSMBt->hdr.Protocol +
 				le16_to_cpu(pSMBt->t2_rsp.DataOffset);
 	/* validate target area */
 
 	data_area_of_buf2 = (char *) &pSMB2->hdr.Protocol +
-                                        le16_to_cpu(pSMB2->t2_rsp.DataOffset);
+					le16_to_cpu(pSMB2->t2_rsp.DataOffset);
 
 	data_area_of_target += total_in_buf;
 
 	/* copy second buffer into end of first buffer */
-	memcpy(data_area_of_target,data_area_of_buf2,total_in_buf2);
+	memcpy(data_area_of_target, data_area_of_buf2, total_in_buf2);
 	total_in_buf += total_in_buf2;
 	pSMBt->t2_rsp.DataCount = cpu_to_le16(total_in_buf);
 	byte_count = le16_to_cpu(BCC_LE(pTargetSMB));
@@ -317,11 +319,11 @@
 	byte_count += total_in_buf2;
 
 	/* BB also add check that we are not beyond maximum buffer size */
-		
+
 	pTargetSMB->smb_buf_length = byte_count;
 
-	if(remaining == total_in_buf2) {
-		cFYI(1,("found the last secondary response"));
+	if (remaining == total_in_buf2) {
+		cFYI(1, ("found the last secondary response"));
 		return 0; /* we are done */
 	} else /* more responses to go */
 		return 1;
@@ -348,21 +350,21 @@
 	int isMultiRsp;
 	int reconnect;
 
-	allow_signal(SIGKILL);
 	current->flags |= PF_MEMALLOC;
 	server->tsk = current;	/* save process info to wake at shutdown */
 	cFYI(1, ("Demultiplex PID: %d", current->pid));
-	write_lock(&GlobalSMBSeslock); 
+	write_lock(&GlobalSMBSeslock);
 	atomic_inc(&tcpSesAllocCount);
 	length = tcpSesAllocCount.counter;
 	write_unlock(&GlobalSMBSeslock);
 	complete(&cifsd_complete);
-	if(length  > 1) {
+	if (length  > 1) {
 		mempool_resize(cifs_req_poolp,
 			length + cifs_min_rcv,
 			GFP_KERNEL);
 	}
 
+	set_freezable();
 	while (!kthread_should_stop()) {
 		if (try_to_freeze())
 			continue;
@@ -425,10 +427,10 @@
 				break;
 			}
 			if (!try_to_freeze() && (length == -EINTR)) {
-				cFYI(1,("cifsd thread killed"));
+				cFYI(1, ("cifsd thread killed"));
 				break;
 			}
-			cFYI(1,("Reconnect after unexpected peek error %d",
+			cFYI(1, ("Reconnect after unexpected peek error %d",
 				length));
 			cifs_reconnect(server);
 			csocket = server->ssocket;
@@ -452,26 +454,26 @@
 		with the most common, zero, as regular data */
 		temp = *((char *) smb_buffer);
 
-		/* Note that FC 1001 length is big endian on the wire, 
+		/* Note that FC 1001 length is big endian on the wire,
 		but we convert it here so it is always manipulated
 		as host byte order */
 		pdu_length = ntohl(smb_buffer->smb_buf_length);
 		smb_buffer->smb_buf_length = pdu_length;
 
-		cFYI(1,("rfc1002 length 0x%x)", pdu_length+4));
+		cFYI(1, ("rfc1002 length 0x%x", pdu_length+4));
 
 		if (temp == (char) RFC1002_SESSION_KEEP_ALIVE) {
-			continue; 
+			continue;
 		} else if (temp == (char)RFC1002_POSITIVE_SESSION_RESPONSE) {
-			cFYI(1,("Good RFC 1002 session rsp"));
+			cFYI(1, ("Good RFC 1002 session rsp"));
 			continue;
 		} else if (temp == (char)RFC1002_NEGATIVE_SESSION_RESPONSE) {
-			/* we get this from Windows 98 instead of 
+			/* we get this from Windows 98 instead of
 			   an error on SMB negprot response */
-			cFYI(1,("Negative RFC1002 Session Response Error 0x%x)",
+			cFYI(1, ("Negative RFC1002 Session Response Error 0x%x)",
 				pdu_length));
-			if(server->tcpStatus == CifsNew) {
-				/* if nack on negprot (rather than 
+			if (server->tcpStatus == CifsNew) {
+				/* if nack on negprot (rather than
 				ret of smb negprot error) reconnecting
 				not going to help, ret error to mount */
 				break;
@@ -481,10 +483,10 @@
 				msleep(1000);
 				/* always try 445 first on reconnect
 				since we get NACK on some if we ever
-				connected to port 139 (the NACK is 
+				connected to port 139 (the NACK is
 				since we do not begin with RFC1001
 				session initialize frame) */
-				server->addr.sockAddr.sin_port = 
+				server->addr.sockAddr.sin_port =
 					htons(CIFS_PORT);
 				cifs_reconnect(server);
 				csocket = server->ssocket;
@@ -492,7 +494,7 @@
 				continue;
 			}
 		} else if (temp != (char) 0) {
-			cERROR(1,("Unknown RFC 1002 frame"));
+			cERROR(1, ("Unknown RFC 1002 frame"));
 			cifs_dump_mem(" Received Data: ", (char *)smb_buffer,
 				      length);
 			cifs_reconnect(server);
@@ -501,7 +503,7 @@
 		}
 
 		/* else we have an SMB response */
-		if((pdu_length > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) ||
+		if ((pdu_length > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) ||
 			    (pdu_length < sizeof (struct smb_hdr) - 1 - 4)) {
 			cERROR(1, ("Invalid size SMB length %d pdu_length %d",
 					length, pdu_length+4));
@@ -509,12 +511,12 @@
 			csocket = server->ssocket;
 			wake_up(&server->response_q);
 			continue;
-		} 
+		}
 
 		/* else length ok */
 		reconnect = 0;
 
-		if(pdu_length > MAX_CIFS_SMALL_BUFFER_SIZE - 4) {
+		if (pdu_length > MAX_CIFS_SMALL_BUFFER_SIZE - 4) {
 			isLargeBuf = TRUE;
 			memcpy(bigbuf, smallbuf, 4);
 			smb_buffer = bigbuf;
@@ -522,11 +524,11 @@
 		length = 0;
 		iov.iov_base = 4 + (char *)smb_buffer;
 		iov.iov_len = pdu_length;
-		for (total_read = 0; total_read < pdu_length; 
+		for (total_read = 0; total_read < pdu_length;
 		     total_read += length) {
 			length = kernel_recvmsg(csocket, &smb_msg, &iov, 1,
 						pdu_length - total_read, 0);
-			if( kthread_should_stop() ||
+			if ( kthread_should_stop() ||
 			    (length == -EINTR)) {
 				/* then will exit */
 				reconnect = 2;
@@ -534,19 +536,19 @@
 			} else if (server->tcpStatus == CifsNeedReconnect) {
 				cifs_reconnect(server);
 				csocket = server->ssocket;
-			        /* Reconnect wakes up rspns q */
+				/* Reconnect wakes up rspns q */
 				/* Now we will reread sock */
 				reconnect = 1;
 				break;
-			} else if ((length == -ERESTARTSYS) || 
+			} else if ((length == -ERESTARTSYS) ||
 				   (length == -EAGAIN)) {
 				msleep(1); /* minimum sleep to prevent looping,
-                                              allowing socket to clear and app 
+					      allowing socket to clear and app
 					      threads to set tcpStatus
 					      CifsNeedReconnect if server hung*/
 				continue;
 			} else if (length <= 0) {
-				cERROR(1,("Received no data, expecting %d",
+				cERROR(1, ("Received no data, expecting %d",
 					      pdu_length - total_read));
 				cifs_reconnect(server);
 				csocket = server->ssocket;
@@ -554,13 +556,13 @@
 				break;
 			}
 		}
-		if(reconnect == 2)
+		if (reconnect == 2)
 			break;
-		else if(reconnect == 1)
+		else if (reconnect == 1)
 			continue;
 
 		length += 4; /* account for rfc1002 hdr */
-	
+
 
 		dump_smb(smb_buffer, length);
 		if (checkSMB(smb_buffer, smb_buffer->Mid, total_read+4)) {
@@ -574,28 +576,28 @@
 		list_for_each(tmp, &server->pending_mid_q) {
 			mid_entry = list_entry(tmp, struct mid_q_entry, qhead);
 
-			if ((mid_entry->mid == smb_buffer->Mid) && 
+			if ((mid_entry->mid == smb_buffer->Mid) &&
 			    (mid_entry->midState == MID_REQUEST_SUBMITTED) &&
 			    (mid_entry->command == smb_buffer->Command)) {
-				if(check2ndT2(smb_buffer,server->maxBuf) > 0) {
+				if (check2ndT2(smb_buffer,server->maxBuf) > 0) {
 					/* We have a multipart transact2 resp */
 					isMultiRsp = TRUE;
-					if(mid_entry->resp_buf) {
+					if (mid_entry->resp_buf) {
 						/* merge response - fix up 1st*/
-						if(coalesce_t2(smb_buffer, 
+						if (coalesce_t2(smb_buffer,
 							mid_entry->resp_buf)) {
 							mid_entry->multiRsp = 1;
 							break;
 						} else {
 							/* all parts received */
 							mid_entry->multiEnd = 1;
-							goto multi_t2_fnd; 
+							goto multi_t2_fnd;
 						}
 					} else {
-						if(!isLargeBuf) {
+						if (!isLargeBuf) {
 							cERROR(1,("1st trans2 resp needs bigbuf"));
 					/* BB maybe we can fix this up,  switch
-				   	   to already allocated large buffer? */
+					   to already allocated large buffer? */
 						} else {
 							/* Have first buffer */
 							mid_entry->resp_buf =
@@ -605,9 +607,9 @@
 						}
 					}
 					break;
-				} 
+				}
 				mid_entry->resp_buf = smb_buffer;
-				if(isLargeBuf)
+				if (isLargeBuf)
 					mid_entry->largeBuf = 1;
 				else
 					mid_entry->largeBuf = 0;
@@ -627,24 +629,25 @@
 		spin_unlock(&GlobalMid_Lock);
 		if (task_to_wake) {
 			/* Was previous buf put in mpx struct for multi-rsp? */
-			if(!isMultiRsp) {
+			if (!isMultiRsp) {
 				/* smb buffer will be freed by user thread */
-				if(isLargeBuf) {
+				if (isLargeBuf) {
 					bigbuf = NULL;
 				} else
 					smallbuf = NULL;
 			}
 			wake_up_process(task_to_wake);
 		} else if ((is_valid_oplock_break(smb_buffer, server) == FALSE)
-		    && (isMultiRsp == FALSE)) {                          
-			cERROR(1, ("No task to wake, unknown frame rcvd! NumMids %d", midCount.counter));
-			cifs_dump_mem("Received Data is: ",(char *)smb_buffer,
+		    && (isMultiRsp == FALSE)) {
+			cERROR(1, ("No task to wake, unknown frame received! "
+				   "NumMids %d", midCount.counter));
+			cifs_dump_mem("Received Data is: ", (char *)smb_buffer,
 				      sizeof(struct smb_hdr));
 #ifdef CONFIG_CIFS_DEBUG2
 			cifs_dump_detail(smb_buffer);
 			cifs_dump_mids(server);
 #endif /* CIFS_DEBUG2 */
-			
+
 		}
 	} /* end while !EXITING */
 
@@ -654,12 +657,12 @@
 	/* check if we have blocked requests that need to free */
 	/* Note that cifs_max_pending is normally 50, but
 	can be set at module install time to as little as two */
-	if(atomic_read(&server->inFlight) >= cifs_max_pending)
+	if (atomic_read(&server->inFlight) >= cifs_max_pending)
 		atomic_set(&server->inFlight, cifs_max_pending - 1);
 	/* We do not want to set the max_pending too low or we
 	could end up with the counter going negative */
 	spin_unlock(&GlobalMid_Lock);
-	/* Although there should not be any requests blocked on 
+	/* Although there should not be any requests blocked on
 	this queue it can not hurt to be paranoid and try to wake up requests
 	that may haven been blocked when more than 50 at time were on the wire
 	to the same server - they now will see the session is in exit state
@@ -667,8 +670,8 @@
 	wake_up_all(&server->request_q);
 	/* give those requests time to exit */
 	msleep(125);
-	
-	if(server->ssocket) {
+
+	if (server->ssocket) {
 		sock_release(csocket);
 		server->ssocket = NULL;
 	}
@@ -708,10 +711,10 @@
 		list_for_each(tmp, &server->pending_mid_q) {
 		mid_entry = list_entry(tmp, struct mid_q_entry, qhead);
 			if (mid_entry->midState == MID_REQUEST_SUBMITTED) {
-				cFYI(1,
-				  ("Clearing Mid 0x%x - waking up ",mid_entry->mid));
+				cFYI(1, ("Clearing Mid 0x%x - waking up ",
+					 mid_entry->mid));
 				task_to_wake = mid_entry->tsk;
-				if(task_to_wake) {
+				if (task_to_wake) {
 					wake_up_process(task_to_wake);
 				}
 			}
@@ -723,7 +726,7 @@
 	}
 
 	if (!list_empty(&server->pending_mid_q)) {
-		/* mpx threads have not exited yet give them 
+		/* mpx threads have not exited yet give them
 		at least the smb send timeout time for long ops */
 		/* due to delays on oplock break requests, we need
 		to wait at least 45 seconds before giving up
@@ -741,7 +744,7 @@
 
 	/* last chance to mark ses pointers invalid
 	if there are any pointing to this (e.g
-	if a crazy root user tried to kill cifsd 
+	if a crazy root user tried to kill cifsd
 	kernel thread explicitly this might happen) */
 	list_for_each(tmp, &GlobalSMBSessionList) {
 		ses = list_entry(tmp, struct cifsSesInfo,
@@ -753,17 +756,18 @@
 	write_unlock(&GlobalSMBSeslock);
 
 	kfree(server);
-	if(length  > 0) {
+	if (length  > 0) {
 		mempool_resize(cifs_req_poolp,
 			length + cifs_min_rcv,
 			GFP_KERNEL);
 	}
-	
+
 	return 0;
 }
 
 static int
-cifs_parse_mount_options(char *options, const char *devname,struct smb_vol *vol)
+cifs_parse_mount_options(char *options, const char *devname,
+			 struct smb_vol *vol)
 {
 	char *value;
 	char *data;
@@ -771,15 +775,15 @@
 	char separator[2];
 
 	separator[0] = ',';
-	separator[1] = 0; 
+	separator[1] = 0;
 
 	if (Local_System_Name[0] != 0)
-		memcpy(vol->source_rfc1001_name, Local_System_Name,15);
+		memcpy(vol->source_rfc1001_name, Local_System_Name, 15);
 	else {
 		char *nodename = utsname()->nodename;
-		int n = strnlen(nodename,15);
-		memset(vol->source_rfc1001_name,0x20,15);
-		for(i=0 ; i < n ; i++) {
+		int n = strnlen(nodename, 15);
+		memset(vol->source_rfc1001_name, 0x20, 15);
+		for (i = 0; i < n; i++) {
 			/* does not have to be perfect mapping since field is
 			informational, only used for servers that do not support
 			port 445 and it can be overridden at mount time */
@@ -804,31 +808,32 @@
 	if (!options)
 		return 1;
 
-	if(strncmp(options,"sep=",4) == 0) {
-		if(options[4] != 0) {
+	if (strncmp(options, "sep=", 4) == 0) {
+		if (options[4] != 0) {
 			separator[0] = options[4];
 			options += 5;
 		} else {
-			cFYI(1,("Null separator not allowed"));
+			cFYI(1, ("Null separator not allowed"));
 		}
 	}
-		
+
 	while ((data = strsep(&options, separator)) != NULL) {
 		if (!*data)
 			continue;
 		if ((value = strchr(data, '=')) != NULL)
 			*value++ = '\0';
 
-		if (strnicmp(data, "user_xattr",10) == 0) {/*parse before user*/
+		/* Have to parse this before we parse for "user" */
+		if (strnicmp(data, "user_xattr", 10) == 0) {
 			vol->no_xattr = 0;
-		} else if (strnicmp(data, "nouser_xattr",12) == 0) {
+		} else if (strnicmp(data, "nouser_xattr", 12) == 0) {
 			vol->no_xattr = 1;
 		} else if (strnicmp(data, "user", 4) == 0) {
 			if (!value) {
 				printk(KERN_WARNING
 				       "CIFS: invalid or missing username\n");
 				return 1;	/* needs_arg; */
-			} else if(!*value) {
+			} else if (!*value) {
 				/* null user, ie anonymous, authentication */
 				vol->nullauth = 1;
 			}
@@ -842,12 +847,12 @@
 			if (!value) {
 				vol->password = NULL;
 				continue;
-			} else if(value[0] == 0) {
+			} else if (value[0] == 0) {
 				/* check if string begins with double comma
 				   since that would mean the password really
 				   does start with a comma, and would not
 				   indicate an empty string */
-				if(value[1] != separator[0]) {
+				if (value[1] != separator[0]) {
 					vol->password = NULL;
 					continue;
 				}
@@ -856,7 +861,7 @@
 			/* removed password length check, NTLM passwords
 				can be arbitrarily long */
 
-			/* if comma in password, the string will be 
+			/* if comma in password, the string will be
 			prematurely null terminated.  Commas in password are
 			specified across the cifs mount interface by a double
 			comma ie ,, and a comma used as in other cases ie ','
@@ -866,18 +871,18 @@
 			/* NB: password legally can have multiple commas and
 			the only illegal character in a password is null */
 
-			if ((value[temp_len] == 0) && 
+			if ((value[temp_len] == 0) &&
 			    (value[temp_len+1] == separator[0])) {
 				/* reinsert comma */
 				value[temp_len] = separator[0];
-				temp_len+=2;  /* move after the second comma */
-				while(value[temp_len] != 0)  {
+				temp_len += 2;  /* move after second comma */
+				while (value[temp_len] != 0)  {
 					if (value[temp_len] == separator[0]) {
-						if (value[temp_len+1] == 
+						if (value[temp_len+1] ==
 						     separator[0]) {
 						/* skip second comma */
 							temp_len++;
-						} else { 
+						} else {
 						/* single comma indicating start
 							 of next parm */
 							break;
@@ -885,24 +890,25 @@
 					}
 					temp_len++;
 				}
-				if(value[temp_len] == 0) {
+				if (value[temp_len] == 0) {
 					options = NULL;
 				} else {
 					value[temp_len] = 0;
 					/* point option to start of next parm */
 					options = value + temp_len + 1;
 				}
-				/* go from value to value + temp_len condensing 
+				/* go from value to value + temp_len condensing
 				double commas to singles. Note that this ends up
 				allocating a few bytes too many, which is ok */
 				vol->password = kzalloc(temp_len, GFP_KERNEL);
-				if(vol->password == NULL) {
-					printk("CIFS: no memory for pass\n");
+				if (vol->password == NULL) {
+					printk(KERN_WARNING "CIFS: no memory "
+							    "for password\n");
 					return 1;
 				}
-				for(i=0,j=0;i<temp_len;i++,j++) {
+				for (i = 0, j = 0; i < temp_len; i++, j++) {
 					vol->password[j] = value[i];
-					if(value[i] == separator[0]
+					if (value[i] == separator[0]
 						&& value[i+1] == separator[0]) {
 						/* skip second comma */
 						i++;
@@ -911,8 +917,9 @@
 				vol->password[j] = 0;
 			} else {
 				vol->password = kzalloc(temp_len+1, GFP_KERNEL);
-				if(vol->password == NULL) {
-					printk("CIFS: no memory for pass\n");
+				if (vol->password == NULL) {
+					printk(KERN_WARNING "CIFS: no memory "
+							    "for password\n");
 					return 1;
 				}
 				strcpy(vol->password, value);
@@ -923,20 +930,21 @@
 			} else if (strnlen(value, 35) < 35) {
 				vol->UNCip = value;
 			} else {
-				printk(KERN_WARNING "CIFS: ip address too long\n");
+				printk(KERN_WARNING "CIFS: ip address "
+						    "too long\n");
 				return 1;
 			}
-                } else if (strnicmp(data, "sec", 3) == 0) { 
-                        if (!value || !*value) {
-				cERROR(1,("no security value specified"));
-                                continue;
-                        } else if (strnicmp(value, "krb5i", 5) == 0) {
-				vol->secFlg |= CIFSSEC_MAY_KRB5 | 
+		} else if (strnicmp(data, "sec", 3) == 0) {
+			if (!value || !*value) {
+				cERROR(1, ("no security value specified"));
+				continue;
+			} else if (strnicmp(value, "krb5i", 5) == 0) {
+				vol->secFlg |= CIFSSEC_MAY_KRB5 |
 					CIFSSEC_MUST_SIGN;
 			} else if (strnicmp(value, "krb5p", 5) == 0) {
-				/* vol->secFlg |= CIFSSEC_MUST_SEAL | 
-					CIFSSEC_MAY_KRB5; */ 
-				cERROR(1,("Krb5 cifs privacy not supported"));
+				/* vol->secFlg |= CIFSSEC_MUST_SEAL |
+					CIFSSEC_MAY_KRB5; */
+				cERROR(1, ("Krb5 cifs privacy not supported"));
 				return 1;
 			} else if (strnicmp(value, "krb5", 4) == 0) {
 				vol->secFlg |= CIFSSEC_MAY_KRB5;
@@ -956,33 +964,34 @@
 				vol->secFlg |= CIFSSEC_MAY_NTLMV2;
 #ifdef CONFIG_CIFS_WEAK_PW_HASH
 			} else if (strnicmp(value, "lanman", 6) == 0) {
-                                vol->secFlg |= CIFSSEC_MAY_LANMAN;
+				vol->secFlg |= CIFSSEC_MAY_LANMAN;
 #endif
 			} else if (strnicmp(value, "none", 4) == 0) {
 				vol->nullauth = 1;
-                        } else {
-                                cERROR(1,("bad security option: %s", value));
-                                return 1;
-                        }
+			} else {
+				cERROR(1, ("bad security option: %s", value));
+				return 1;
+			}
 		} else if ((strnicmp(data, "unc", 3) == 0)
 			   || (strnicmp(data, "target", 6) == 0)
 			   || (strnicmp(data, "path", 4) == 0)) {
 			if (!value || !*value) {
-				printk(KERN_WARNING
-				       "CIFS: invalid path to network resource\n");
+				printk(KERN_WARNING "CIFS: invalid path to "
+						    "network resource\n");
 				return 1;	/* needs_arg; */
 			}
 			if ((temp_len = strnlen(value, 300)) < 300) {
-				vol->UNC = kmalloc(temp_len+1,GFP_KERNEL);
+				vol->UNC = kmalloc(temp_len+1, GFP_KERNEL);
 				if (vol->UNC == NULL)
 					return 1;
-				strcpy(vol->UNC,value);
+				strcpy(vol->UNC, value);
 				if (strncmp(vol->UNC, "//", 2) == 0) {
 					vol->UNC[0] = '\\';
 					vol->UNC[1] = '\\';
-				} else if (strncmp(vol->UNC, "\\\\", 2) != 0) {	                   
+				} else if (strncmp(vol->UNC, "\\\\", 2) != 0) {
 					printk(KERN_WARNING
-					       "CIFS: UNC Path does not begin with // or \\\\ \n");
+					       "CIFS: UNC Path does not begin "
+					       "with // or \\\\ \n");
 					return 1;
 				}
 			} else {
@@ -1001,43 +1010,47 @@
 				vol->domainname = value;
 				cFYI(1, ("Domain name set"));
 			} else {
-				printk(KERN_WARNING "CIFS: domain name too long\n");
+				printk(KERN_WARNING "CIFS: domain name too "
+						    "long\n");
 				return 1;
 			}
-                } else if (strnicmp(data, "prefixpath", 10) == 0) {
-                        if (!value || !*value) {
-                                printk(KERN_WARNING
-                                       "CIFS: invalid path prefix\n");
-                                return 1;       /* needs_arg; */
-                        }
-                        if ((temp_len = strnlen(value, 1024)) < 1024) {
+		} else if (strnicmp(data, "prefixpath", 10) == 0) {
+			if (!value || !*value) {
+				printk(KERN_WARNING
+					"CIFS: invalid path prefix\n");
+				return 1;       /* needs_argument */
+			}
+			if ((temp_len = strnlen(value, 1024)) < 1024) {
 				if (value[0] != '/')
 					temp_len++;  /* missing leading slash */
-                                vol->prepath = kmalloc(temp_len+1,GFP_KERNEL);
-                                if (vol->prepath == NULL)
-                                        return 1;
+				vol->prepath = kmalloc(temp_len+1, GFP_KERNEL);
+				if (vol->prepath == NULL)
+					return 1;
 				if (value[0] != '/') {
 					vol->prepath[0] = '/';
-	                                strcpy(vol->prepath+1,value);
+					strcpy(vol->prepath+1, value);
 				} else
-					strcpy(vol->prepath,value);
-				cFYI(1,("prefix path %s",vol->prepath));
-                        } else {
-                                printk(KERN_WARNING "CIFS: prefix too long\n");
-                                return 1;
-                        }
+					strcpy(vol->prepath, value);
+				cFYI(1, ("prefix path %s", vol->prepath));
+			} else {
+				printk(KERN_WARNING "CIFS: prefix too long\n");
+				return 1;
+			}
 		} else if (strnicmp(data, "iocharset", 9) == 0) {
 			if (!value || !*value) {
-				printk(KERN_WARNING "CIFS: invalid iocharset specified\n");
+				printk(KERN_WARNING "CIFS: invalid iocharset "
+						    "specified\n");
 				return 1;	/* needs_arg; */
 			}
 			if (strnlen(value, 65) < 65) {
-				if (strnicmp(value,"default",7))
+				if (strnicmp(value, "default", 7))
 					vol->iocharset = value;
-				/* if iocharset not set load_nls_default used by caller */
-				cFYI(1, ("iocharset set to %s",value));
+				/* if iocharset not set then load_nls_default
+				   is used by caller */
+				cFYI(1, ("iocharset set to %s", value));
 			} else {
-				printk(KERN_WARNING "CIFS: iocharset name too long.\n");
+				printk(KERN_WARNING "CIFS: iocharset name "
+						    "too long.\n");
 				return 1;
 			}
 		} else if (strnicmp(data, "uid", 3) == 0) {
@@ -1089,54 +1102,59 @@
 			}
 		} else if (strnicmp(data, "netbiosname", 4) == 0) {
 			if (!value || !*value || (*value == ' ')) {
-				cFYI(1,("invalid (empty) netbiosname specified"));
+				cFYI(1, ("invalid (empty) netbiosname"));
 			} else {
-				memset(vol->source_rfc1001_name,0x20,15);
-				for(i=0;i<15;i++) {
-				/* BB are there cases in which a comma can be 
+				memset(vol->source_rfc1001_name, 0x20, 15);
+				for (i = 0; i < 15; i++) {
+				/* BB are there cases in which a comma can be
 				valid in this workstation netbios name (and need
 				special handling)? */
 
 				/* We do not uppercase netbiosname for user */
-					if (value[i]==0)
+					if (value[i] == 0)
 						break;
-					else 
-						vol->source_rfc1001_name[i] = value[i];
+					else
+						vol->source_rfc1001_name[i] =
+								value[i];
 				}
 				/* The string has 16th byte zero still from
 				set at top of the function  */
-				if ((i==15) && (value[i] != 0))
-					printk(KERN_WARNING "CIFS: netbiosname longer than 15 truncated.\n");
+				if ((i == 15) && (value[i] != 0))
+					printk(KERN_WARNING "CIFS: netbiosname"
+						" longer than 15 truncated.\n");
 			}
 		} else if (strnicmp(data, "servern", 7) == 0) {
 			/* servernetbiosname specified override *SMBSERVER */
 			if (!value || !*value || (*value == ' ')) {
-				cFYI(1,("empty server netbiosname specified"));
+				cFYI(1, ("empty server netbiosname specified"));
 			} else {
 				/* last byte, type, is 0x20 for servr type */
-				memset(vol->target_rfc1001_name,0x20,16);
+				memset(vol->target_rfc1001_name, 0x20, 16);
 
-				for(i=0;i<15;i++) {
+				for (i = 0; i < 15; i++) {
 				/* BB are there cases in which a comma can be
-				   valid in this workstation netbios name (and need
-				   special handling)? */
+				   valid in this workstation netbios name
+				   (and need special handling)? */
 
-				/* user or mount helper must uppercase netbiosname */
-					if (value[i]==0)
+				/* user or mount helper must uppercase
+				   the netbiosname */
+					if (value[i] == 0)
 						break;
 					else
-						vol->target_rfc1001_name[i] = value[i];
+						vol->target_rfc1001_name[i] =
+								value[i];
 				}
 				/* The string has 16th byte zero still from
 				   set at top of the function  */
-				if ((i==15) && (value[i] != 0))
-					printk(KERN_WARNING "CIFS: server netbiosname longer than 15 truncated.\n");
+				if ((i == 15) && (value[i] != 0))
+					printk(KERN_WARNING "CIFS: server net"
+					"biosname longer than 15 truncated.\n");
 			}
 		} else if (strnicmp(data, "credentials", 4) == 0) {
 			/* ignore */
 		} else if (strnicmp(data, "version", 3) == 0) {
 			/* ignore */
-		} else if (strnicmp(data, "guest",5) == 0) {
+		} else if (strnicmp(data, "guest", 5) == 0) {
 			/* ignore */
 		} else if (strnicmp(data, "rw", 2) == 0) {
 			vol->rw = TRUE;
@@ -1148,11 +1166,11 @@
 				   (strnicmp(data, "noauto", 6) == 0) ||
 				   (strnicmp(data, "dev", 3) == 0)) {
 			/*  The mount tool or mount.cifs helper (if present)
-				uses these opts to set flags, and the flags are read
-				by the kernel vfs layer before we get here (ie
-				before read super) so there is no point trying to
-				parse these options again and set anything and it
-				is ok to just ignore them */
+			    uses these opts to set flags, and the flags are read
+			    by the kernel vfs layer before we get here (ie
+			    before read super) so there is no point trying to
+			    parse these options again and set anything and it
+			    is ok to just ignore them */
 			continue;
 		} else if (strnicmp(data, "ro", 2) == 0) {
 			vol->rw = FALSE;
@@ -1168,26 +1186,31 @@
 			vol->remap = 1;
 		} else if (strnicmp(data, "nomapchars", 10) == 0) {
 			vol->remap = 0;
-                } else if (strnicmp(data, "sfu", 3) == 0) {
-                        vol->sfu_emul = 1;
-                } else if (strnicmp(data, "nosfu", 5) == 0) {
-                        vol->sfu_emul = 0;
+		} else if (strnicmp(data, "sfu", 3) == 0) {
+			vol->sfu_emul = 1;
+		} else if (strnicmp(data, "nosfu", 5) == 0) {
+			vol->sfu_emul = 0;
 		} else if (strnicmp(data, "posixpaths", 10) == 0) {
 			vol->posix_paths = 1;
 		} else if (strnicmp(data, "noposixpaths", 12) == 0) {
 			vol->posix_paths = 0;
-                } else if ((strnicmp(data, "nocase", 6) == 0) ||
+		} else if (strnicmp(data, "nounix", 6) == 0) {
+			vol->no_linux_ext = 1;
+		} else if (strnicmp(data, "nolinux", 7) == 0) {
+			vol->no_linux_ext = 1;
+		} else if ((strnicmp(data, "nocase", 6) == 0) ||
 			   (strnicmp(data, "ignorecase", 10)  == 0)) {
-                        vol->nocase = 1;
+			vol->nocase = 1;
 		} else if (strnicmp(data, "brl", 3) == 0) {
 			vol->nobrl =  0;
-		} else if ((strnicmp(data, "nobrl", 5) == 0) || 
+		} else if ((strnicmp(data, "nobrl", 5) == 0) ||
 			   (strnicmp(data, "nolock", 6) == 0)) {
 			vol->nobrl =  1;
 			/* turn off mandatory locking in mode
 			if remote locking is turned off since the
 			local vfs will do advisory */
-			if(vol->file_mode == (S_IALLUGO & ~(S_ISUID | S_IXGRP)))
+			if (vol->file_mode ==
+				(S_IALLUGO & ~(S_ISUID | S_IXGRP)))
 				vol->file_mode = S_IALLUGO;
 		} else if (strnicmp(data, "setuids", 7) == 0) {
 			vol->setuids = 1;
@@ -1201,55 +1224,61 @@
 			vol->intr = 0;
 		} else if (strnicmp(data, "intr", 4) == 0) {
 			vol->intr = 1;
-		} else if (strnicmp(data, "serverino",7) == 0) {
+		} else if (strnicmp(data, "serverino", 7) == 0) {
 			vol->server_ino = 1;
-		} else if (strnicmp(data, "noserverino",9) == 0) {
+		} else if (strnicmp(data, "noserverino", 9) == 0) {
 			vol->server_ino = 0;
-		} else if (strnicmp(data, "cifsacl",7) == 0) {
+		} else if (strnicmp(data, "cifsacl", 7) == 0) {
 			vol->cifs_acl = 1;
 		} else if (strnicmp(data, "nocifsacl", 9) == 0) {
 			vol->cifs_acl = 0;
-		} else if (strnicmp(data, "acl",3) == 0) {
+		} else if (strnicmp(data, "acl", 3) == 0) {
 			vol->no_psx_acl = 0;
-		} else if (strnicmp(data, "noacl",5) == 0) {
+		} else if (strnicmp(data, "noacl", 5) == 0) {
 			vol->no_psx_acl = 1;
-		} else if (strnicmp(data, "sign",4) == 0) {
+		} else if (strnicmp(data, "sign", 4) == 0) {
 			vol->secFlg |= CIFSSEC_MUST_SIGN;
 /*		} else if (strnicmp(data, "seal",4) == 0) {
 			vol->secFlg |= CIFSSEC_MUST_SEAL; */
-		} else if (strnicmp(data, "direct",6) == 0) {
+		} else if (strnicmp(data, "direct", 6) == 0) {
 			vol->direct_io = 1;
-		} else if (strnicmp(data, "forcedirectio",13) == 0) {
+		} else if (strnicmp(data, "forcedirectio", 13) == 0) {
 			vol->direct_io = 1;
-		} else if (strnicmp(data, "in6_addr",8) == 0) {
+		} else if (strnicmp(data, "in6_addr", 8) == 0) {
 			if (!value || !*value) {
 				vol->in6_addr = NULL;
 			} else if (strnlen(value, 49) == 48) {
 				vol->in6_addr = value;
 			} else {
-				printk(KERN_WARNING "CIFS: ip v6 address not 48 characters long\n");
+				printk(KERN_WARNING "CIFS: ip v6 address not "
+						    "48 characters long\n");
 				return 1;
 			}
 		} else if (strnicmp(data, "noac", 4) == 0) {
-			printk(KERN_WARNING "CIFS: Mount option noac not supported. Instead set /proc/fs/cifs/LookupCacheEnabled to 0\n");
+			printk(KERN_WARNING "CIFS: Mount option noac not "
+				"supported. Instead set "
+				"/proc/fs/cifs/LookupCacheEnabled to 0\n");
 		} else
-			printk(KERN_WARNING "CIFS: Unknown mount option %s\n",data);
+			printk(KERN_WARNING "CIFS: Unknown mount option %s\n",
+						data);
 	}
 	if (vol->UNC == NULL) {
 		if (devname == NULL) {
-			printk(KERN_WARNING "CIFS: Missing UNC name for mount target\n");
+			printk(KERN_WARNING "CIFS: Missing UNC name for mount "
+						"target\n");
 			return 1;
 		}
 		if ((temp_len = strnlen(devname, 300)) < 300) {
-			vol->UNC = kmalloc(temp_len+1,GFP_KERNEL);
+			vol->UNC = kmalloc(temp_len+1, GFP_KERNEL);
 			if (vol->UNC == NULL)
 				return 1;
-			strcpy(vol->UNC,devname);
+			strcpy(vol->UNC, devname);
 			if (strncmp(vol->UNC, "//", 2) == 0) {
 				vol->UNC[0] = '\\';
 				vol->UNC[1] = '\\';
 			} else if (strncmp(vol->UNC, "\\\\", 2) != 0) {
-				printk(KERN_WARNING "CIFS: UNC Path does not begin with // or \\\\ \n");
+				printk(KERN_WARNING "CIFS: UNC Path does not "
+						    "begin with // or \\\\ \n");
 				return 1;
 			}
 		} else {
@@ -1257,14 +1286,14 @@
 			return 1;
 		}
 	}
-	if(vol->UNCip == NULL)
+	if (vol->UNCip == NULL)
 		vol->UNCip = &vol->UNC[2];
 
 	return 0;
 }
 
 static struct cifsSesInfo *
-cifs_find_tcp_session(struct in_addr * target_ip_addr, 
+cifs_find_tcp_session(struct in_addr *target_ip_addr,
 		struct in6_addr *target_ip6_addr,
 		 char *userName, struct TCP_Server_Info **psrvTcp)
 {
@@ -1276,19 +1305,25 @@
 	list_for_each(tmp, &GlobalSMBSessionList) {
 		ses = list_entry(tmp, struct cifsSesInfo, cifsSessionList);
 		if (ses->server) {
-			if((target_ip_addr && 
+			if ((target_ip_addr &&
 				(ses->server->addr.sockAddr.sin_addr.s_addr
 				  == target_ip_addr->s_addr)) || (target_ip6_addr
 				&& memcmp(&ses->server->addr.sockAddr6.sin6_addr,
-					target_ip6_addr,sizeof(*target_ip6_addr)))){
-				/* BB lock server and tcp session and increment use count here?? */
-				*psrvTcp = ses->server;	/* found a match on the TCP session */
+					target_ip6_addr, sizeof(*target_ip6_addr)))) {
+				/* BB lock server and tcp session and increment
+				      use count here?? */
+
+				/* found a match on the TCP session */
+				*psrvTcp = ses->server;
+
 				/* BB check if reconnection needed */
 				if (strncmp
 				    (ses->userName, userName,
 				     MAX_USERNAME_SIZE) == 0){
 					read_unlock(&GlobalSMBSeslock);
-					return ses;	/* found exact match on both tcp and SMB sessions */
+					/* Found exact match on both TCP and
+					   SMB sessions */
+					return ses;
 				}
 			}
 		}
@@ -1319,7 +1354,8 @@
 	/* BB lock tcon, server and tcp session and increment use count here? */
 					/* found a match on the TCP session */
 					/* BB check if reconnection needed */
-					cFYI(1,("IP match, old UNC: %s new: %s",
+					cFYI(1,
+					      ("IP match, old UNC: %s new: %s",
 					      tcon->treeName, uncName));
 					if (strncmp
 					    (tcon->treeName, uncName,
@@ -1354,11 +1390,11 @@
 	unsigned int num_referrals;
 	int rc = 0;
 
-	rc = get_dfs_path(xid, pSesInfo,old_path, nls_codepage, 
+	rc = get_dfs_path(xid, pSesInfo, old_path, nls_codepage,
 			&num_referrals, &referrals, remap);
 
 	/* BB Add in code to: if valid refrl, if not ip address contact
-		the helper that resolves tcp names, mount to it, try to 
+		the helper that resolves tcp names, mount to it, try to
 		tcon to it unmount it if fail */
 
 	kfree(referrals);
@@ -1367,10 +1403,9 @@
 }
 
 int
-get_dfs_path(int xid, struct cifsSesInfo *pSesInfo,
-			const char *old_path, const struct nls_table *nls_codepage, 
-			unsigned int *pnum_referrals, 
-			unsigned char ** preferrals, int remap)
+get_dfs_path(int xid, struct cifsSesInfo *pSesInfo, const char *old_path,
+	     const struct nls_table *nls_codepage, unsigned int *pnum_referrals,
+	     unsigned char **preferrals, int remap)
 {
 	char *temp_unc;
 	int rc = 0;
@@ -1379,7 +1414,8 @@
 
 	if (pSesInfo->ipc_tid == 0) {
 		temp_unc = kmalloc(2 /* for slashes */ +
-			strnlen(pSesInfo->serverName,SERVER_NAME_LEN_WITH_NULL * 2)
+			strnlen(pSesInfo->serverName,
+				SERVER_NAME_LEN_WITH_NULL * 2)
 				 + 1 + 4 /* slash IPC$ */  + 2,
 				GFP_KERNEL);
 		if (temp_unc == NULL)
@@ -1390,7 +1426,7 @@
 		strcpy(temp_unc + 2 + strlen(pSesInfo->serverName), "\\IPC$");
 		rc = CIFSTCon(xid, pSesInfo, temp_unc, NULL, nls_codepage);
 		cFYI(1,
-		     ("CIFS Tcon rc = %d ipc_tid = %d", rc,pSesInfo->ipc_tid));
+		     ("CIFS Tcon rc = %d ipc_tid = %d", rc, pSesInfo->ipc_tid));
 		kfree(temp_unc);
 	}
 	if (rc == 0)
@@ -1401,62 +1437,63 @@
 }
 
 /* See RFC1001 section 14 on representation of Netbios names */
-static void rfc1002mangle(char * target,char * source, unsigned int length)
+static void rfc1002mangle(char *target, char *source, unsigned int length)
 {
-	unsigned int i,j;
+	unsigned int i, j;
 
-	for(i=0,j=0;i<(length);i++) {
+	for (i = 0, j = 0; i < (length); i++) {
 		/* mask a nibble at a time and encode */
 		target[j] = 'A' + (0x0F & (source[i] >> 4));
 		target[j+1] = 'A' + (0x0F & source[i]);
-		j+=2;
+		j += 2;
 	}
 
 }
 
 
 static int
-ipv4_connect(struct sockaddr_in *psin_server, struct socket **csocket, 
-	     char * netbios_name, char * target_name)
+ipv4_connect(struct sockaddr_in *psin_server, struct socket **csocket,
+	     char *netbios_name, char *target_name)
 {
 	int rc = 0;
 	int connected = 0;
 	__be16 orig_port = 0;
 
-	if(*csocket == NULL) {
-		rc = sock_create_kern(PF_INET, SOCK_STREAM, IPPROTO_TCP, csocket);
+	if (*csocket == NULL) {
+		rc = sock_create_kern(PF_INET, SOCK_STREAM,
+				      IPPROTO_TCP, csocket);
 		if (rc < 0) {
-			cERROR(1, ("Error %d creating socket",rc));
+			cERROR(1, ("Error %d creating socket", rc));
 			*csocket = NULL;
 			return rc;
 		} else {
 		/* BB other socket options to set KEEPALIVE, NODELAY? */
-			cFYI(1,("Socket created"));
-			(*csocket)->sk->sk_allocation = GFP_NOFS; 
+			cFYI(1, ("Socket created"));
+			(*csocket)->sk->sk_allocation = GFP_NOFS;
 		}
 	}
 
 	psin_server->sin_family = AF_INET;
-	if(psin_server->sin_port) { /* user overrode default port */
+	if (psin_server->sin_port) { /* user overrode default port */
 		rc = (*csocket)->ops->connect(*csocket,
 				(struct sockaddr *) psin_server,
-				sizeof (struct sockaddr_in),0);
+				sizeof (struct sockaddr_in), 0);
 		if (rc >= 0)
 			connected = 1;
-	} 
+	}
 
-	if(!connected) {
-		/* save original port so we can retry user specified port  
+	if (!connected) {
+		/* save original port so we can retry user specified port
 			later if fall back ports fail this time  */
 		orig_port = psin_server->sin_port;
 
 		/* do not retry on the same port we just failed on */
-		if(psin_server->sin_port != htons(CIFS_PORT)) {
+		if (psin_server->sin_port != htons(CIFS_PORT)) {
 			psin_server->sin_port = htons(CIFS_PORT);
 
 			rc = (*csocket)->ops->connect(*csocket,
 					(struct sockaddr *) psin_server,
-					sizeof (struct sockaddr_in),0);
+					sizeof (struct sockaddr_in), 0);
 			if (rc >= 0)
 				connected = 1;
 		}
@@ -1464,60 +1501,63 @@
 	if (!connected) {
 		psin_server->sin_port = htons(RFC1001_PORT);
 		rc = (*csocket)->ops->connect(*csocket, (struct sockaddr *)
-					      psin_server, sizeof (struct sockaddr_in),0);
-		if (rc >= 0) 
+					      psin_server,
+					      sizeof (struct sockaddr_in), 0);
+		if (rc >= 0)
 			connected = 1;
 	}
 
 	/* give up here - unless we want to retry on different
 		protocol families some day */
 	if (!connected) {
-		if(orig_port)
+		if (orig_port)
 			psin_server->sin_port = orig_port;
-		cFYI(1,("Error %d connecting to server via ipv4",rc));
+		cFYI(1, ("Error %d connecting to server via ipv4", rc));
 		sock_release(*csocket);
 		*csocket = NULL;
 		return rc;
 	}
-	/* Eventually check for other socket options to change from 
-		the default. sock_setsockopt not used because it expects 
+	/* Eventually check for other socket options to change from
+		the default. sock_setsockopt not used because it expects
 		user space buffer */
-	 cFYI(1,("sndbuf %d rcvbuf %d rcvtimeo 0x%lx",(*csocket)->sk->sk_sndbuf,
+	 cFYI(1, ("sndbuf %d rcvbuf %d rcvtimeo 0x%lx",
+		 (*csocket)->sk->sk_sndbuf,
 		 (*csocket)->sk->sk_rcvbuf, (*csocket)->sk->sk_rcvtimeo));
 	(*csocket)->sk->sk_rcvtimeo = 7 * HZ;
 	/* make the bufsizes depend on wsize/rsize and max requests */
-	if((*csocket)->sk->sk_sndbuf < (200 * 1024))
+	if ((*csocket)->sk->sk_sndbuf < (200 * 1024))
 		(*csocket)->sk->sk_sndbuf = 200 * 1024;
-	if((*csocket)->sk->sk_rcvbuf < (140 * 1024))
+	if ((*csocket)->sk->sk_rcvbuf < (140 * 1024))
 		(*csocket)->sk->sk_rcvbuf = 140 * 1024;
 
 	/* send RFC1001 sessinit */
-	if(psin_server->sin_port == htons(RFC1001_PORT)) {
+	if (psin_server->sin_port == htons(RFC1001_PORT)) {
 		/* some servers require RFC1001 sessinit before sending
-		negprot - BB check reconnection in case where second 
+		negprot - BB check reconnection in case where second
 		sessinit is sent but no second negprot */
-		struct rfc1002_session_packet * ses_init_buf;
-		struct smb_hdr * smb_buf;
-		ses_init_buf = kzalloc(sizeof(struct rfc1002_session_packet), GFP_KERNEL);
-		if(ses_init_buf) {
+		struct rfc1002_session_packet *ses_init_buf;
+		struct smb_hdr *smb_buf;
+		ses_init_buf = kzalloc(sizeof(struct rfc1002_session_packet),
+				       GFP_KERNEL);
+		if (ses_init_buf) {
 			ses_init_buf->trailer.session_req.called_len = 32;
-			if(target_name && (target_name[0] != 0)) {
+			if (target_name && (target_name[0] != 0)) {
 				rfc1002mangle(ses_init_buf->trailer.session_req.called_name,
 					target_name, 16);
 			} else {
 				rfc1002mangle(ses_init_buf->trailer.session_req.called_name,
-					DEFAULT_CIFS_CALLED_NAME,16);
+					DEFAULT_CIFS_CALLED_NAME, 16);
 			}
 
 			ses_init_buf->trailer.session_req.calling_len = 32;
 			/* calling name ends in null (byte 16) from old smb
 			convention. */
-			if(netbios_name && (netbios_name[0] !=0)) {
+			if (netbios_name && (netbios_name[0] != 0)) {
 				rfc1002mangle(ses_init_buf->trailer.session_req.calling_name,
-					netbios_name,16);
+					netbios_name, 16);
 			} else {
 				rfc1002mangle(ses_init_buf->trailer.session_req.calling_name,
-					"LINUX_CIFS_CLNT",16);
+					"LINUX_CIFS_CLNT", 16);
 			}
 			ses_init_buf->trailer.session_req.scope1 = 0;
 			ses_init_buf->trailer.session_req.scope2 = 0;
@@ -1527,20 +1567,20 @@
 			rc = smb_send(*csocket, smb_buf, 0x44,
 				(struct sockaddr *)psin_server);
 			kfree(ses_init_buf);
-			msleep(1); /* RFC1001 layer in at least one server 
+			msleep(1); /* RFC1001 layer in at least one server
 				      requires very short break before negprot
 				      presumably because not expecting negprot
 				      to follow so fast.  This is a simple
-				      solution that works without 
+				      solution that works without
 				      complicating the code and causes no
 				      significant slowing down on mount
 				      for everyone else */
 		}
-		/* else the negprot may still work without this 
+		/* else the negprot may still work without this
 		even though malloc failed */
-		
+
 	}
-		
+
 	return rc;
 }
 
@@ -1551,41 +1591,42 @@
 	int connected = 0;
 	__be16 orig_port = 0;
 
-	if(*csocket == NULL) {
-		rc = sock_create_kern(PF_INET6, SOCK_STREAM, IPPROTO_TCP, csocket);
+	if (*csocket == NULL) {
+		rc = sock_create_kern(PF_INET6, SOCK_STREAM,
+				      IPPROTO_TCP, csocket);
 		if (rc < 0) {
-			cERROR(1, ("Error %d creating ipv6 socket",rc));
+			cERROR(1, ("Error %d creating ipv6 socket", rc));
 			*csocket = NULL;
 			return rc;
 		} else {
 		/* BB other socket options to set KEEPALIVE, NODELAY? */
-			 cFYI(1,("ipv6 Socket created"));
+			 cFYI(1, ("ipv6 Socket created"));
 			(*csocket)->sk->sk_allocation = GFP_NOFS;
 		}
 	}
 
 	psin_server->sin6_family = AF_INET6;
 
-	if(psin_server->sin6_port) { /* user overrode default port */
+	if (psin_server->sin6_port) { /* user overrode default port */
 		rc = (*csocket)->ops->connect(*csocket,
 				(struct sockaddr *) psin_server,
-				sizeof (struct sockaddr_in6),0);
+				sizeof (struct sockaddr_in6), 0);
 		if (rc >= 0)
 			connected = 1;
-	} 
+	}
 
-	if(!connected) {
-		/* save original port so we can retry user specified port  
+	if (!connected) {
+		/* save original port so we can retry user specified port
 			later if fall back ports fail this time  */
 
 		orig_port = psin_server->sin6_port;
 		/* do not retry on the same port we just failed on */
-		if(psin_server->sin6_port != htons(CIFS_PORT)) {
+		if (psin_server->sin6_port != htons(CIFS_PORT)) {
 			psin_server->sin6_port = htons(CIFS_PORT);
 
 			rc = (*csocket)->ops->connect(*csocket,
 					(struct sockaddr *) psin_server,
-					sizeof (struct sockaddr_in6),0);
+					sizeof (struct sockaddr_in6), 0);
 			if (rc >= 0)
 				connected = 1;
 		}
@@ -1593,31 +1634,31 @@
 	if (!connected) {
 		psin_server->sin6_port = htons(RFC1001_PORT);
 		rc = (*csocket)->ops->connect(*csocket, (struct sockaddr *)
-					 psin_server, sizeof (struct sockaddr_in6),0);
-		if (rc >= 0) 
+				 psin_server, sizeof (struct sockaddr_in6), 0);
+		if (rc >= 0)
 			connected = 1;
 	}
 
 	/* give up here - unless we want to retry on different
 		protocol families some day */
 	if (!connected) {
-		if(orig_port)
+		if (orig_port)
 			psin_server->sin6_port = orig_port;
-		cFYI(1,("Error %d connecting to server via ipv6",rc));
+		cFYI(1, ("Error %d connecting to server via ipv6", rc));
 		sock_release(*csocket);
 		*csocket = NULL;
 		return rc;
 	}
-	/* Eventually check for other socket options to change from 
-		the default. sock_setsockopt not used because it expects 
+	/* Eventually check for other socket options to change from
+		the default. sock_setsockopt not used because it expects
 		user space buffer */
 	(*csocket)->sk->sk_rcvtimeo = 7 * HZ;
-		
+
 	return rc;
 }
 
-void reset_cifs_unix_caps(int xid, struct cifsTconInfo * tcon, 
-			  struct super_block * sb, struct smb_vol * vol_info)
+void reset_cifs_unix_caps(int xid, struct cifsTconInfo *tcon,
+			  struct super_block *sb, struct smb_vol *vol_info)
 {
 	/* if we are reconnecting then should we check to see if
 	 * any requested capabilities changed locally e.g. via
@@ -1629,65 +1670,87 @@
 	 * What if we wanted to mount the server share twice once with
 	 * and once without posixacls or posix paths? */
 	__u64 saved_cap = le64_to_cpu(tcon->fsUnixInfo.Capability);
-	   
-	 
-	if(!CIFSSMBQFSUnixInfo(xid, tcon)) {
+
+	if (vol_info && vol_info->no_linux_ext) {
+		tcon->fsUnixInfo.Capability = 0;
+		tcon->unix_ext = 0; /* Unix Extensions disabled */
+		cFYI(1, ("Linux protocol extensions disabled"));
+		return;
+	} else if (vol_info)
+		tcon->unix_ext = 1; /* Unix Extensions supported */
+
+	if (tcon->unix_ext == 0) {
+		cFYI(1, ("Unix extensions disabled so not set on reconnect"));
+		return;
+	}
+
+	if (!CIFSSMBQFSUnixInfo(xid, tcon)) {
 		__u64 cap = le64_to_cpu(tcon->fsUnixInfo.Capability);
-		
+
 		/* check for reconnect case in which we do not
 		   want to change the mount behavior if we can avoid it */
-		if(vol_info == NULL) {
-			/* turn off POSIX ACL and PATHNAMES if not set 
+		if (vol_info == NULL) {
+			/* turn off POSIX ACL and PATHNAMES if not set
 			   originally at mount time */
 			if ((saved_cap & CIFS_UNIX_POSIX_ACL_CAP) == 0)
 				cap &= ~CIFS_UNIX_POSIX_ACL_CAP;
 			if ((saved_cap & CIFS_UNIX_POSIX_PATHNAMES_CAP) == 0)
 				cap &= ~CIFS_UNIX_POSIX_PATHNAMES_CAP;
-				
-
-			 
-			
 		}
-		
+
 		cap &= CIFS_UNIX_CAP_MASK;
-		if(vol_info && vol_info->no_psx_acl)
+		if (vol_info && vol_info->no_psx_acl)
 			cap &= ~CIFS_UNIX_POSIX_ACL_CAP;
-		else if(CIFS_UNIX_POSIX_ACL_CAP & cap) {
-			cFYI(1,("negotiated posix acl support"));
-			if(sb)
+		else if (CIFS_UNIX_POSIX_ACL_CAP & cap) {
+			cFYI(1, ("negotiated posix acl support"));
+			if (sb)
 				sb->s_flags |= MS_POSIXACL;
 		}
 
-		if(vol_info && vol_info->posix_paths == 0)
+		if (vol_info && vol_info->posix_paths == 0)
 			cap &= ~CIFS_UNIX_POSIX_PATHNAMES_CAP;
-		else if(cap & CIFS_UNIX_POSIX_PATHNAMES_CAP) {
-			cFYI(1,("negotiate posix pathnames"));
-			if(sb)
-				CIFS_SB(sb)->mnt_cifs_flags |= 
+		else if (cap & CIFS_UNIX_POSIX_PATHNAMES_CAP) {
+			cFYI(1, ("negotiate posix pathnames"));
+			if (sb)
+				CIFS_SB(sb)->mnt_cifs_flags |=
 					CIFS_MOUNT_POSIX_PATHS;
 		}
-	
+
 		/* We might be setting the path sep back to a different
 		form if we are reconnecting and the server switched its
-		posix path capability for this share */	
-		if(sb && (CIFS_SB(sb)->prepathlen > 0))
+		posix path capability for this share */
+		if (sb && (CIFS_SB(sb)->prepathlen > 0))
 			CIFS_SB(sb)->prepath[0] = CIFS_DIR_SEP(CIFS_SB(sb));
-	
-		cFYI(1,("Negotiate caps 0x%x",(int)cap));
+
+		if (sb && (CIFS_SB(sb)->rsize > 127 * 1024)) {
+			if ((cap & CIFS_UNIX_LARGE_READ_CAP) == 0) {
+				CIFS_SB(sb)->rsize = 127 * 1024;
 #ifdef CONFIG_CIFS_DEBUG2
-		if(cap & CIFS_UNIX_FCNTL_CAP)
-			cFYI(1,("FCNTL cap"));
-		if(cap & CIFS_UNIX_EXTATTR_CAP)
-			cFYI(1,("EXTATTR cap"));
-		if(cap & CIFS_UNIX_POSIX_PATHNAMES_CAP)
-			cFYI(1,("POSIX path cap"));
-		if(cap & CIFS_UNIX_XATTR_CAP)
-			cFYI(1,("XATTR cap"));
-		if(cap & CIFS_UNIX_POSIX_ACL_CAP)
-			cFYI(1,("POSIX ACL cap"));
+				cFYI(1, ("larger reads not supported by srv"));
+#endif
+			}
+		}
+
+
+		cFYI(1, ("Negotiate caps 0x%x", (int)cap));
+#ifdef CONFIG_CIFS_DEBUG2
+		if (cap & CIFS_UNIX_FCNTL_CAP)
+			cFYI(1, ("FCNTL cap"));
+		if (cap & CIFS_UNIX_EXTATTR_CAP)
+			cFYI(1, ("EXTATTR cap"));
+		if (cap & CIFS_UNIX_POSIX_PATHNAMES_CAP)
+			cFYI(1, ("POSIX path cap"));
+		if (cap & CIFS_UNIX_XATTR_CAP)
+			cFYI(1, ("XATTR cap"));
+		if (cap & CIFS_UNIX_POSIX_ACL_CAP)
+			cFYI(1, ("POSIX ACL cap"));
+		if (cap & CIFS_UNIX_LARGE_READ_CAP)
+			cFYI(1, ("very large read cap"));
+		if (cap & CIFS_UNIX_LARGE_WRITE_CAP)
+			cFYI(1, ("very large write cap"));
 #endif /* CIFS_DEBUG2 */
 		if (CIFSSMBSetFSUnixInfo(xid, tcon, cap)) {
-			cFYI(1,("setting capabilities failed"));
+			cFYI(1, ("setting capabilities failed"));
 		}
 	}
 }
@@ -1711,8 +1774,8 @@
 	xid = GetXid();
 
 /* cFYI(1, ("Entering cifs_mount. Xid: %d with: %s", xid, mount_data)); */
-	
-	memset(&volume_info,0,sizeof(struct smb_vol));
+
+	memset(&volume_info, 0, sizeof(struct smb_vol));
 	if (cifs_parse_mount_options(mount_data, devname, &volume_info)) {
 		kfree(volume_info.UNC);
 		kfree(volume_info.password);
@@ -1722,15 +1785,15 @@
 	}
 
 	if (volume_info.nullauth) {
-		cFYI(1,("null user"));
+		cFYI(1, ("null user"));
 		volume_info.username = NULL;
 	} else if (volume_info.username) {
 		/* BB fixme parse for domain name here */
-		cFYI(1, ("Username: %s ", volume_info.username));
+		cFYI(1, ("Username: %s", volume_info.username));
 	} else {
 		cifserror("No username specified");
-        /* In userspace mount helper we can get user name from alternate
-           locations such as env variables and files on disk */
+	/* In userspace mount helper we can get user name from alternate
+	   locations such as env variables and files on disk */
 		kfree(volume_info.UNC);
 		kfree(volume_info.password);
 		kfree(volume_info.prepath);
@@ -1739,18 +1802,20 @@
 	}
 
 	if (volume_info.UNCip && volume_info.UNC) {
-		rc = cifs_inet_pton(AF_INET, volume_info.UNCip,&sin_server.sin_addr.s_addr);
+		rc = cifs_inet_pton(AF_INET, volume_info.UNCip,
+				    &sin_server.sin_addr.s_addr);
 
-		if(rc <= 0) {
+		if (rc <= 0) {
 			/* not ipv4 address, try ipv6 */
-			rc = cifs_inet_pton(AF_INET6,volume_info.UNCip,&sin_server6.sin6_addr.in6_u); 
-			if(rc > 0)
+			rc = cifs_inet_pton(AF_INET6, volume_info.UNCip,
+					    &sin_server6.sin6_addr.in6_u);
+			if (rc > 0)
 				address_type = AF_INET6;
 		} else {
 			address_type = AF_INET;
 		}
-       
-		if(rc <= 0) {
+
+		if (rc <= 0) {
 			/* we failed translating address */
 			kfree(volume_info.UNC);
 			kfree(volume_info.password);
@@ -1762,9 +1827,10 @@
 		cFYI(1, ("UNC: %s ip: %s", volume_info.UNC, volume_info.UNCip));
 		/* success */
 		rc = 0;
-	} else if (volume_info.UNCip){
-		/* BB using ip addr as server name connect to the DFS root below */
-		cERROR(1,("Connecting to DFS root not implemented yet"));
+	} else if (volume_info.UNCip) {
+		/* BB using ip addr as server name to connect to the
+		   DFS root below */
+		cERROR(1, ("Connecting to DFS root not implemented yet"));
 		kfree(volume_info.UNC);
 		kfree(volume_info.password);
 		kfree(volume_info.prepath);
@@ -1772,7 +1838,8 @@
 		return -EINVAL;
 	} else /* which servers DFS root would we conect to */ {
 		cERROR(1,
-		       ("CIFS mount error: No UNC path (e.g. -o unc=//192.168.1.100/public) specified"));
+		       ("CIFS mount error: No UNC path (e.g. -o "
+			"unc=//192.168.1.100/public) specified"));
 		kfree(volume_info.UNC);
 		kfree(volume_info.password);
 		kfree(volume_info.prepath);
@@ -1781,13 +1848,14 @@
 	}
 
 	/* this is needed for ASCII cp to Unicode converts */
-	if(volume_info.iocharset == NULL) {
+	if (volume_info.iocharset == NULL) {
 		cifs_sb->local_nls = load_nls_default();
 	/* load_nls_default can not return null */
 	} else {
 		cifs_sb->local_nls = load_nls(volume_info.iocharset);
-		if(cifs_sb->local_nls == NULL) {
-			cERROR(1,("CIFS mount error: iocharset %s not found",volume_info.iocharset));
+		if (cifs_sb->local_nls == NULL) {
+			cERROR(1, ("CIFS mount error: iocharset %s not found",
+				 volume_info.iocharset));
 			kfree(volume_info.UNC);
 			kfree(volume_info.password);
 			kfree(volume_info.prepath);
@@ -1796,12 +1864,12 @@
 		}
 	}
 
-	if(address_type == AF_INET)
+	if (address_type == AF_INET)
 		existingCifsSes = cifs_find_tcp_session(&sin_server.sin_addr,
 			NULL /* no ipv6 addr */,
 			volume_info.username, &srvTcp);
-	else if(address_type == AF_INET6) {
-		cFYI(1,("looking for ipv6 address"));
+	else if (address_type == AF_INET6) {
+		cFYI(1, ("looking for ipv6 address"));
 		existingCifsSes = cifs_find_tcp_session(NULL /* no ipv4 addr */,
 			&sin_server6.sin6_addr,
 			volume_info.username, &srvTcp);
@@ -1813,26 +1881,25 @@
 		return -EINVAL;
 	}
 
-
 	if (srvTcp) {
-		cFYI(1, ("Existing tcp session with server found"));                
+		cFYI(1, ("Existing tcp session with server found"));
 	} else {	/* create socket */
 		if (volume_info.port)
 			sin_server.sin_port = htons(volume_info.port);
 		else
 			sin_server.sin_port = 0;
 		if (address_type == AF_INET6) {
-			cFYI(1,("attempting ipv6 connect"));
+			cFYI(1, ("attempting ipv6 connect"));
 			/* BB should we allow ipv6 on port 139? */
 			/* other OS never observed in Wild doing 139 with v6 */
-			rc = ipv6_connect(&sin_server6,&csocket);
-		} else 
-			rc = ipv4_connect(&sin_server,&csocket,
+			rc = ipv6_connect(&sin_server6, &csocket);
+		} else
+			rc = ipv4_connect(&sin_server, &csocket,
 				  volume_info.source_rfc1001_name,
 				  volume_info.target_rfc1001_name);
 		if (rc < 0) {
-			cERROR(1,
-			       ("Error connecting to IPv4 socket. Aborting operation"));			       
+			cERROR(1, ("Error connecting to IPv4 socket. "
+				   "Aborting operation"));
 			if (csocket != NULL)
 				sock_release(csocket);
 			kfree(volume_info.UNC);
@@ -1853,8 +1920,9 @@
 			return rc;
 		} else {
 			memset(srvTcp, 0, sizeof (struct TCP_Server_Info));
-			memcpy(&srvTcp->addr.sockAddr, &sin_server, sizeof (struct sockaddr_in));
-			atomic_set(&srvTcp->inFlight,0);
+			memcpy(&srvTcp->addr.sockAddr, &sin_server,
+				sizeof (struct sockaddr_in));
+			atomic_set(&srvTcp->inFlight, 0);
 			/* BB Add code for ipv6 case too */
 			srvTcp->ssocket = csocket;
 			srvTcp->protocolType = IPV4;
@@ -1869,7 +1937,7 @@
 			srvTcp->tsk = kthread_run((void *)(void *)cifs_demultiplex_thread, srvTcp, "cifsd");
 			if ( IS_ERR(srvTcp->tsk) ) {
 				rc = PTR_ERR(srvTcp->tsk);
-				cERROR(1,("error %d create cifsd thread", rc));
+				cERROR(1, ("error %d create cifsd thread", rc));
 				srvTcp->tsk = NULL;
 				sock_release(csocket);
 				kfree(volume_info.UNC);
@@ -1880,8 +1948,10 @@
 			}
 			wait_for_completion(&cifsd_complete);
 			rc = 0;
-			memcpy(srvTcp->workstation_RFC1001_name, volume_info.source_rfc1001_name,16);
-			memcpy(srvTcp->server_RFC1001_name, volume_info.target_rfc1001_name,16);
+			memcpy(srvTcp->workstation_RFC1001_name,
+				volume_info.source_rfc1001_name, 16);
+			memcpy(srvTcp->server_RFC1001_name,
+				volume_info.target_rfc1001_name, 16);
 			srvTcp->sequence_number = 0;
 		}
 	}
@@ -1902,16 +1972,17 @@
 				NIPQUAD(sin_server.sin_addr.s_addr));
 		}
 
-		if (!rc){
-			/* volume_info.password freed at unmount */   
+		if (!rc) {
+			/* volume_info.password freed at unmount */
 			if (volume_info.password)
 				pSesInfo->password = volume_info.password;
 			if (volume_info.username)
 				strncpy(pSesInfo->userName,
-					volume_info.username,MAX_USERNAME_SIZE);
+					volume_info.username,
+					MAX_USERNAME_SIZE);
 			if (volume_info.domainname) {
 				int len = strlen(volume_info.domainname);
-				pSesInfo->domainName = 
+				pSesInfo->domainName =
 					kmalloc(len + 1, GFP_KERNEL);
 				if (pSesInfo->domainName)
 					strcpy(pSesInfo->domainName,
@@ -1921,46 +1992,48 @@
 			pSesInfo->overrideSecFlg = volume_info.secFlg;
 			down(&pSesInfo->sesSem);
 			/* BB FIXME need to pass vol->secFlgs BB */
-			rc = cifs_setup_session(xid,pSesInfo, cifs_sb->local_nls);
+			rc = cifs_setup_session(xid, pSesInfo,
+						cifs_sb->local_nls);
 			up(&pSesInfo->sesSem);
 			if (!rc)
 				atomic_inc(&srvTcp->socketUseCount);
 		} else
 			kfree(volume_info.password);
 	}
-    
+
 	/* search for existing tcon to this server share */
 	if (!rc) {
 		if (volume_info.rsize > CIFSMaxBufSize) {
-			cERROR(1,("rsize %d too large, using MaxBufSize",
+			cERROR(1, ("rsize %d too large, using MaxBufSize",
 				volume_info.rsize));
 			cifs_sb->rsize = CIFSMaxBufSize;
-		} else if((volume_info.rsize) && (volume_info.rsize <= CIFSMaxBufSize))
+		} else if ((volume_info.rsize) &&
+				(volume_info.rsize <= CIFSMaxBufSize))
 			cifs_sb->rsize = volume_info.rsize;
 		else /* default */
 			cifs_sb->rsize = CIFSMaxBufSize;
 
 		if (volume_info.wsize > PAGEVEC_SIZE * PAGE_CACHE_SIZE) {
-			cERROR(1,("wsize %d too large using 4096 instead",
+			cERROR(1, ("wsize %d too large, using 4096 instead",
 				  volume_info.wsize));
 			cifs_sb->wsize = 4096;
 		} else if (volume_info.wsize)
 			cifs_sb->wsize = volume_info.wsize;
 		else
-			cifs_sb->wsize = 
+			cifs_sb->wsize =
 				min_t(const int, PAGEVEC_SIZE * PAGE_CACHE_SIZE,
 					127*1024);
 			/* old default of CIFSMaxBufSize was too small now
-			   that SMB Write2 can send multiple pages in kvec.   
+			   that SMB Write2 can send multiple pages in kvec.
 			   RFC1001 does not describe what happens when frame
 			   bigger than 128K is sent so use that as max in
 			   conjunction with 52K kvec constraint on arch with 4K
 			   page size  */
 
 		if (cifs_sb->rsize < 2048) {
-			cifs_sb->rsize = 2048; 
+			cifs_sb->rsize = 2048;
 			/* Windows ME may prefer this */
-			cFYI(1,("readsize set to minimum 2048"));
+			cFYI(1, ("readsize set to minimum: 2048"));
 		}
 		/* calculate prepath */
 		cifs_sb->prepath = volume_info.prepath;
@@ -1968,14 +2041,14 @@
 			cifs_sb->prepathlen = strlen(cifs_sb->prepath);
 			cifs_sb->prepath[0] = CIFS_DIR_SEP(cifs_sb);
 			volume_info.prepath = NULL;
-		} else 
+		} else
 			cifs_sb->prepathlen = 0;
 		cifs_sb->mnt_uid = volume_info.linux_uid;
 		cifs_sb->mnt_gid = volume_info.linux_gid;
 		cifs_sb->mnt_file_mode = volume_info.file_mode;
 		cifs_sb->mnt_dir_mode = volume_info.dir_mode;
-		cFYI(1,("file mode: 0x%x  dir mode: 0x%x",
-			cifs_sb->mnt_file_mode,cifs_sb->mnt_dir_mode));
+		cFYI(1, ("file mode: 0x%x  dir mode: 0x%x",
+			cifs_sb->mnt_file_mode, cifs_sb->mnt_dir_mode));
 
 		if (volume_info.noperm)
 			cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_NO_PERM;
@@ -1998,7 +2071,7 @@
 		if (volume_info.override_gid)
 			cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_OVERR_GID;
 		if (volume_info.direct_io) {
-			cFYI(1,("mounting share using direct i/o"));
+			cFYI(1, ("mounting share using direct i/o"));
 			cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_DIRECT_IO;
 		}
 
@@ -2009,7 +2082,7 @@
 			cFYI(1, ("Found match on UNC path"));
 			/* we can have only one retry value for a connection
 			   to a share so for resources mounted more than once
-			   to the same server share the last value passed in 
+			   to the same server share the last value passed in
 			   for the retry flag is used */
 			tcon->retry = volume_info.retry;
 			tcon->nocase = volume_info.nocase;
@@ -2018,17 +2091,17 @@
 			if (tcon == NULL)
 				rc = -ENOMEM;
 			else {
-				/* check for null share name ie connecting to 
+				/* check for null share name ie connecting to
 				 * dfs root */
 
-				/* BB check if this works for exactly length 
+				/* BB check if this works for exactly length
 				 * three strings */
 				if ((strchr(volume_info.UNC + 3, '\\') == NULL)
 				    && (strchr(volume_info.UNC + 3, '/') ==
 					NULL)) {
 					rc = connect_to_dfs_path(xid, pSesInfo,
 						"", cifs_sb->local_nls,
-						cifs_sb->mnt_cifs_flags & 
+						cifs_sb->mnt_cifs_flags &
 						  CIFS_MOUNT_MAP_SPECIAL_CHR);
 					kfree(volume_info.UNC);
 					FreeXid(xid);
@@ -2037,7 +2110,7 @@
 					/* BB Do we need to wrap sesSem around
 					 * this TCon call and Unix SetFS as
 					 * we do on SessSetup and reconnect? */
-					rc = CIFSTCon(xid, pSesInfo, 
+					rc = CIFSTCon(xid, pSesInfo,
 						volume_info.UNC,
 						tcon, cifs_sb->local_nls);
 					cFYI(1, ("CIFS Tcon rc = %d", rc));
@@ -2074,9 +2147,9 @@
 				   always wake up processes blocked in
 				   tcp in recv_mesg then we could remove the
 				   send_sig call */
-				send_sig(SIGKILL,srvTcp->tsk,1);
+				force_sig(SIGKILL, srvTcp->tsk);
 				tsk = srvTcp->tsk;
-				if(tsk)
+				if (tsk)
 					kthread_stop(tsk);
 			}
 		}
@@ -2085,15 +2158,17 @@
 			tconInfoFree(tcon);
 		if (existingCifsSes == NULL) {
 			if (pSesInfo) {
-				if ((pSesInfo->server) && 
+				if ((pSesInfo->server) &&
 				    (pSesInfo->status == CifsGood)) {
 					int temp_rc;
 					temp_rc = CIFSSMBLogoff(xid, pSesInfo);
 					/* if the socketUseCount is now zero */
 					if ((temp_rc == -ESHUTDOWN) &&
-					   (pSesInfo->server) && (pSesInfo->server->tsk)) {
+					    (pSesInfo->server) &&
+					    (pSesInfo->server->tsk)) {
 						struct task_struct *tsk;
-						send_sig(SIGKILL,pSesInfo->server->tsk,1);
+						force_sig(SIGKILL,
+							pSesInfo->server->tsk);
 						tsk = pSesInfo->server->tsk;
 						if (tsk)
 							kthread_stop(tsk);
@@ -2112,19 +2187,29 @@
 		/* do not care if following two calls succeed - informational */
 		CIFSSMBQFSDeviceInfo(xid, tcon);
 		CIFSSMBQFSAttributeInfo(xid, tcon);
-		
+
 		/* tell server which Unix caps we support */
 		if (tcon->ses->capabilities & CAP_UNIX)
+			/* reset of caps checks mount to see if unix extensions
+			   disabled for just this mount */
 			reset_cifs_unix_caps(xid, tcon, sb, &volume_info);
-		
+		else
+			tcon->unix_ext = 0; /* server does not support them */
+
+		if ((tcon->unix_ext == 0) && (cifs_sb->rsize > (1024 * 127))) {
+			cifs_sb->rsize = 1024 * 127;
+#ifdef CONFIG_CIFS_DEBUG2
+			cFYI(1, ("no very large read support, rsize now 127K"));
+#endif
+		}
 		if (!(tcon->ses->capabilities & CAP_LARGE_WRITE_X))
 			cifs_sb->wsize = min(cifs_sb->wsize,
 					     (tcon->ses->server->maxBuf -
 					      MAX_CIFS_HDR_SIZE));
 		if (!(tcon->ses->capabilities & CAP_LARGE_READ_X))
-                        cifs_sb->rsize = min(cifs_sb->rsize,
-                                             (tcon->ses->server->maxBuf -
-                                              MAX_CIFS_HDR_SIZE));
+			cifs_sb->rsize = min(cifs_sb->rsize,
+					     (tcon->ses->server->maxBuf -
+					      MAX_CIFS_HDR_SIZE));
 	}
 
 	/* volume_info.password is freed above when existing session found
@@ -2177,7 +2262,8 @@
 	pSMB->req_no_secext.MaxBufferSize = cpu_to_le16(ses->server->maxBuf);
 	pSMB->req_no_secext.MaxMpxCount = cpu_to_le16(ses->server->maxReq);
 
-	if(ses->server->secMode & (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
+	if (ses->server->secMode &
+			(SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
 		smb_buffer->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
 
 	capabilities = CAP_LARGE_FILES | CAP_NT_SMBS | CAP_LEVEL_II_OPLOCKS |
@@ -2196,7 +2282,7 @@
 	}
 	pSMB->req_no_secext.Capabilities = cpu_to_le32(capabilities);
 
-	pSMB->req_no_secext.CaseInsensitivePasswordLength = 
+	pSMB->req_no_secext.CaseInsensitivePasswordLength =
 		cpu_to_le16(CIFS_SESS_KEY_SIZE);
 
 	pSMB->req_no_secext.CaseSensitivePasswordLength =
@@ -2214,9 +2300,9 @@
 		}
 		if (user == NULL)
 			bytes_returned = 0; /* skip null user */
-	        else
+		else
 			bytes_returned =
-			        cifs_strtoUCS((__le16 *) bcc_ptr, user, 100,
+				cifs_strtoUCS((__le16 *) bcc_ptr, user, 100,
 					nls_codepage);
 		/* convert number of 16 bit words to bytes */
 		bcc_ptr += 2 * bytes_returned;
@@ -2246,7 +2332,7 @@
 		bcc_ptr += 2 * bytes_returned;
 		bcc_ptr += 2;
 	} else {
-		if (user != NULL) {                
+		if (user != NULL) {
 		    strncpy(bcc_ptr, user, 200);
 		    bcc_ptr += strnlen(user, 200);
 		}
@@ -2281,11 +2367,12 @@
 		__u16 action = le16_to_cpu(pSMBr->resp.Action);
 		__u16 blob_len = le16_to_cpu(pSMBr->resp.SecurityBlobLength);
 		if (action & GUEST_LOGIN)
-			cFYI(1, (" Guest login"));	/* do we want to mark SesInfo struct ? */
-		ses->Suid = smb_buffer_response->Uid;	/* UID left in wire format (le) */
+			cFYI(1, (" Guest login")); /* BB mark SesInfo struct? */
+		ses->Suid = smb_buffer_response->Uid; /* UID left in wire format
+							 (little endian) */
 		cFYI(1, ("UID = %d ", ses->Suid));
-         /* response can have either 3 or 4 word count - Samba sends 3 */
-		bcc_ptr = pByteArea(smb_buffer_response);	
+	/* response can have either 3 or 4 word count - Samba sends 3 */
+		bcc_ptr = pByteArea(smb_buffer_response);
 		if ((pSMBr->resp.hdr.WordCount == 3)
 		    || ((pSMBr->resp.hdr.WordCount == 4)
 			&& (blob_len < pSMBr->resp.ByteCount))) {
@@ -2295,8 +2382,10 @@
 			if (smb_buffer->Flags2 & SMBFLG2_UNICODE) {
 				if ((long) (bcc_ptr) % 2) {
 					remaining_words =
-					    (BCC(smb_buffer_response) - 1) /2;
-					bcc_ptr++;	/* Unicode strings must be word aligned */
+					    (BCC(smb_buffer_response) - 1) / 2;
+					/* Unicode strings must be word
+					   aligned */
+					bcc_ptr++;
 				} else {
 					remaining_words =
 						BCC(smb_buffer_response) / 2;
@@ -2307,13 +2396,15 @@
 /* We look for obvious messed up bcc or strings in response so we do not go off
    the end since (at least) WIN2K and Windows XP have a major bug in not null
    terminating last Unicode string in response  */
-				if(ses->serverOS)
+				if (ses->serverOS)
 					kfree(ses->serverOS);
-				ses->serverOS = kzalloc(2 * (len + 1), GFP_KERNEL);
-				if(ses->serverOS == NULL)
+				ses->serverOS = kzalloc(2 * (len + 1),
+							GFP_KERNEL);
+				if (ses->serverOS == NULL)
 					goto sesssetup_nomem;
 				cifs_strfromUCS_le(ses->serverOS,
-					   (__le16 *)bcc_ptr, len,nls_codepage);
+						   (__le16 *)bcc_ptr,
+						   len, nls_codepage);
 				bcc_ptr += 2 * (len + 1);
 				remaining_words -= len + 1;
 				ses->serverOS[2 * len] = 0;
@@ -2322,42 +2413,49 @@
 					len = UniStrnlen((wchar_t *)bcc_ptr,
 							 remaining_words-1);
 					kfree(ses->serverNOS);
-					ses->serverNOS = kzalloc(2 * (len + 1),GFP_KERNEL);
-					if(ses->serverNOS == NULL)
+					ses->serverNOS = kzalloc(2 * (len + 1),
+								 GFP_KERNEL);
+					if (ses->serverNOS == NULL)
 						goto sesssetup_nomem;
 					cifs_strfromUCS_le(ses->serverNOS,
-							   (__le16 *)bcc_ptr,len,nls_codepage);
+							   (__le16 *)bcc_ptr,
+							   len, nls_codepage);
 					bcc_ptr += 2 * (len + 1);
 					ses->serverNOS[2 * len] = 0;
 					ses->serverNOS[1 + (2 * len)] = 0;
-					if(strncmp(ses->serverNOS,
-						"NT LAN Manager 4",16) == 0) {
-						cFYI(1,("NT4 server"));
+					if (strncmp(ses->serverNOS,
+						"NT LAN Manager 4", 16) == 0) {
+						cFYI(1, ("NT4 server"));
 						ses->flags |= CIFS_SES_NT4;
 					}
 					remaining_words -= len + 1;
 					if (remaining_words > 0) {
 						len = UniStrnlen((wchar_t *) bcc_ptr, remaining_words);
-          /* last string is not always null terminated (for e.g. for Windows XP & 2000) */
-						if(ses->serverDomain)
+				/* last string is not always null terminated
+				   (for e.g. for Windows XP & 2000) */
+						if (ses->serverDomain)
 							kfree(ses->serverDomain);
 						ses->serverDomain =
-						    kzalloc(2*(len+1),GFP_KERNEL);
-						if(ses->serverDomain == NULL)
+						    kzalloc(2*(len+1),
+							    GFP_KERNEL);
+						if (ses->serverDomain == NULL)
 							goto sesssetup_nomem;
 						cifs_strfromUCS_le(ses->serverDomain,
-						     (__le16 *)bcc_ptr,len,nls_codepage);
+							(__le16 *)bcc_ptr,
+							len, nls_codepage);
 						bcc_ptr += 2 * (len + 1);
 						ses->serverDomain[2*len] = 0;
 						ses->serverDomain[1+(2*len)] = 0;
-					} /* else no more room so create dummy domain string */
-					else {
-						if(ses->serverDomain)
+					} else { /* else no more room so create
+						  dummy domain string */
+						if (ses->serverDomain)
 							kfree(ses->serverDomain);
-						ses->serverDomain = 
+						ses->serverDomain =
 							kzalloc(2, GFP_KERNEL);
 					}
-				} else {	/* no room so create dummy domain and NOS string */
+				} else { /* no room so create dummy domain
+					    and NOS string */
+
 					/* if these kcallocs fail not much we
 					   can do, but better to not fail the
 					   sesssetup itself */
@@ -2374,19 +2472,22 @@
 				    pByteArea(smb_buffer_response)
 					    <= BCC(smb_buffer_response)) {
 					kfree(ses->serverOS);
-					ses->serverOS = kzalloc(len + 1,GFP_KERNEL);
-					if(ses->serverOS == NULL)
+					ses->serverOS = kzalloc(len + 1,
+								GFP_KERNEL);
+					if (ses->serverOS == NULL)
 						goto sesssetup_nomem;
-					strncpy(ses->serverOS,bcc_ptr, len);
+					strncpy(ses->serverOS, bcc_ptr, len);
 
 					bcc_ptr += len;
-					bcc_ptr[0] = 0;	/* null terminate the string */
+					/* null terminate the string */
+					bcc_ptr[0] = 0;
 					bcc_ptr++;
 
 					len = strnlen(bcc_ptr, 1024);
 					kfree(ses->serverNOS);
-					ses->serverNOS = kzalloc(len + 1,GFP_KERNEL);
-					if(ses->serverNOS == NULL)
+					ses->serverNOS = kzalloc(len + 1,
+								 GFP_KERNEL);
+					if (ses->serverNOS == NULL)
 						goto sesssetup_nomem;
 					strncpy(ses->serverNOS, bcc_ptr, len);
 					bcc_ptr += len;
@@ -2394,23 +2495,27 @@
 					bcc_ptr++;
 
 					len = strnlen(bcc_ptr, 1024);
-					if(ses->serverDomain)
+					if (ses->serverDomain)
 						kfree(ses->serverDomain);
-					ses->serverDomain = kzalloc(len + 1,GFP_KERNEL);
-					if(ses->serverDomain == NULL)
+					ses->serverDomain = kzalloc(len + 1,
+								    GFP_KERNEL);
+					if (ses->serverDomain == NULL)
 						goto sesssetup_nomem;
-					strncpy(ses->serverDomain, bcc_ptr, len);
+					strncpy(ses->serverDomain, bcc_ptr,
+						len);
 					bcc_ptr += len;
 					bcc_ptr[0] = 0;
 					bcc_ptr++;
 				} else
 					cFYI(1,
-					     ("Variable field of length %d extends beyond end of smb ",
+					     ("Variable field of length %d "
+						"extends beyond end of smb ",
 					      len));
 			}
 		} else {
 			cERROR(1,
-			       (" Security Blob Length extends beyond end of SMB"));
+			       (" Security Blob Length extends beyond "
+				"end of SMB"));
 		}
 	} else {
 		cERROR(1,
@@ -2429,7 +2534,7 @@
 
 static int
 CIFSNTLMSSPNegotiateSessSetup(unsigned int xid,
-			      struct cifsSesInfo *ses, int * pNTLMv2_flag,
+			      struct cifsSesInfo *ses, int *pNTLMv2_flag,
 			      const struct nls_table *nls_codepage)
 {
 	struct smb_hdr *smb_buffer;
@@ -2449,7 +2554,7 @@
 	__u16 count;
 
 	cFYI(1, ("In NTLMSSP sesssetup (negotiate)"));
-	if(ses == NULL)
+	if (ses == NULL)
 		return -EINVAL;
 	domain = ses->domainName;
 	*pNTLMv2_flag = FALSE;
@@ -2473,7 +2578,7 @@
 	pSMB->req.MaxBufferSize = cpu_to_le16(ses->server->maxBuf);
 	pSMB->req.MaxMpxCount = cpu_to_le16(ses->server->maxReq);
 
-	if(ses->server->secMode & (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
+	if (ses->server->secMode & (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
 		smb_buffer->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
 
 	capabilities = CAP_LARGE_FILES | CAP_NT_SMBS | CAP_LEVEL_II_OPLOCKS |
@@ -2501,9 +2606,9 @@
 	    NTLMSSP_REQUEST_TARGET | NTLMSSP_NEGOTIATE_NTLM |
 	    NTLMSSP_NEGOTIATE_56 |
 	    /* NTLMSSP_NEGOTIATE_ALWAYS_SIGN | */ NTLMSSP_NEGOTIATE_128;
-	if(sign_CIFS_PDUs)
+	if (sign_CIFS_PDUs)
 		negotiate_flags |= NTLMSSP_NEGOTIATE_SIGN;
-/*	if(ntlmv2_support)
+/*	if (ntlmv2_support)
 		negotiate_flags |= NTLMSSP_NEGOTIATE_NTLMV2;*/
 	/* setup pointers to domain name and workstation name */
 	bcc_ptr += SecurityBlobLength;
@@ -2573,11 +2678,11 @@
 		__u16 blob_len = le16_to_cpu(pSMBr->resp.SecurityBlobLength);
 
 		if (action & GUEST_LOGIN)
-			cFYI(1, (" Guest login"));	
-        /* Do we want to set anything in SesInfo struct when guest login? */
+			cFYI(1, (" Guest login"));
+	/* Do we want to set anything in SesInfo struct when guest login? */
 
-		bcc_ptr = pByteArea(smb_buffer_response);	
-        /* response can have either 3 or 4 word count - Samba sends 3 */
+		bcc_ptr = pByteArea(smb_buffer_response);
+	/* response can have either 3 or 4 word count - Samba sends 3 */
 
 		SecurityBlob2 = (PCHALLENGE_MESSAGE) bcc_ptr;
 		if (SecurityBlob2->MessageType != NtLmChallenge) {
@@ -2585,7 +2690,7 @@
 			     ("Unexpected NTLMSSP message type received %d",
 			      SecurityBlob2->MessageType));
 		} else if (ses) {
-			ses->Suid = smb_buffer_response->Uid; /* UID left in le format */ 
+			ses->Suid = smb_buffer_response->Uid; /* UID left in le format */
 			cFYI(1, ("UID = %d", ses->Suid));
 			if ((pSMBr->resp.hdr.WordCount == 3)
 			    || ((pSMBr->resp.hdr.WordCount == 4)
@@ -2603,18 +2708,18 @@
 				memcpy(ses->server->cryptKey,
 				       SecurityBlob2->Challenge,
 				       CIFS_CRYPTO_KEY_SIZE);
-				if(SecurityBlob2->NegotiateFlags & 
+				if (SecurityBlob2->NegotiateFlags &
 					cpu_to_le32(NTLMSSP_NEGOTIATE_NTLMV2))
 					*pNTLMv2_flag = TRUE;
 
-				if((SecurityBlob2->NegotiateFlags & 
-					cpu_to_le32(NTLMSSP_NEGOTIATE_ALWAYS_SIGN)) 
+				if ((SecurityBlob2->NegotiateFlags &
+					cpu_to_le32(NTLMSSP_NEGOTIATE_ALWAYS_SIGN))
 					|| (sign_CIFS_PDUs > 1))
-						ses->server->secMode |= 
-							SECMODE_SIGN_REQUIRED;	
-				if ((SecurityBlob2->NegotiateFlags & 
+						ses->server->secMode |=
+							SECMODE_SIGN_REQUIRED;
+				if ((SecurityBlob2->NegotiateFlags &
 					cpu_to_le32(NTLMSSP_NEGOTIATE_SIGN)) && (sign_CIFS_PDUs))
-						ses->server->secMode |= 
+						ses->server->secMode |=
 							SECMODE_SIGN_ENABLED;
 
 				if (smb_buffer->Flags2 & SMBFLG2_UNICODE) {
@@ -2622,7 +2727,8 @@
 						remaining_words =
 						    (BCC(smb_buffer_response)
 						     - 1) / 2;
-						bcc_ptr++;	/* Unicode strings must be word aligned */
+					 /* Must word align unicode strings */
+						bcc_ptr++;
 					} else {
 						remaining_words =
 						    BCC
@@ -2634,7 +2740,7 @@
 /* We look for obvious messed up bcc or strings in response so we do not go off
    the end since (at least) WIN2K and Windows XP have a major bug in not null
    terminating last Unicode string in response  */
-					if(ses->serverOS)
+					if (ses->serverOS)
 						kfree(ses->serverOS);
 					ses->serverOS =
 					    kzalloc(2 * (len + 1), GFP_KERNEL);
@@ -2667,8 +2773,9 @@
 							       (2 * len)] = 0;
 						remaining_words -= len + 1;
 						if (remaining_words > 0) {
-							len = UniStrnlen((wchar_t *) bcc_ptr, remaining_words);	
-           /* last string is not always null terminated (for e.g. for Windows XP & 2000) */
+							len = UniStrnlen((wchar_t *) bcc_ptr, remaining_words);
+				/* last string not always null terminated
+				   (for e.g. for Windows XP & 2000) */
 							kfree(ses->serverDomain);
 							ses->serverDomain =
 							    kzalloc(2 *
@@ -2706,7 +2813,7 @@
 					if (((long) bcc_ptr + len) - (long)
 					    pByteArea(smb_buffer_response)
 					    <= BCC(smb_buffer_response)) {
-						if(ses->serverOS)
+						if (ses->serverOS)
 							kfree(ses->serverOS);
 						ses->serverOS =
 						    kzalloc(len + 1,
@@ -2733,18 +2840,20 @@
 						ses->serverDomain =
 						    kzalloc(len + 1,
 							    GFP_KERNEL);
-						strncpy(ses->serverDomain, bcc_ptr, len);	
+						strncpy(ses->serverDomain,
+							bcc_ptr, len);
 						bcc_ptr += len;
 						bcc_ptr[0] = 0;
 						bcc_ptr++;
 					} else
 						cFYI(1,
-						     ("Variable field of length %d extends beyond end of smb",
+						     ("field of length %d "
+						    "extends beyond end of smb",
 						      len));
 				}
 			} else {
-				cERROR(1,
-				       (" Security Blob Length extends beyond end of SMB"));
+				cERROR(1, ("Security Blob Length extends beyond"
+					   " end of SMB"));
 			}
 		} else {
 			cERROR(1, ("No session structure passed in."));
@@ -2783,7 +2892,7 @@
 	__u16 count;
 
 	cFYI(1, ("In NTLMSSPSessSetup (Authenticate)"));
-	if(ses == NULL)
+	if (ses == NULL)
 		return -EINVAL;
 	user = ses->userName;
 	domain = ses->domainName;
@@ -2808,7 +2917,7 @@
 
 	pSMB->req.hdr.Uid = ses->Suid;
 
-	if(ses->server->secMode & (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
+	if (ses->server->secMode & (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
 		smb_buffer->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
 
 	capabilities = CAP_LARGE_FILES | CAP_NT_SMBS | CAP_LEVEL_II_OPLOCKS |
@@ -2832,13 +2941,13 @@
 	strncpy(SecurityBlob->Signature, NTLMSSP_SIGNATURE, 8);
 	SecurityBlob->MessageType = NtLmAuthenticate;
 	bcc_ptr += SecurityBlobLength;
-	negotiate_flags = 
+	negotiate_flags =
 	    NTLMSSP_NEGOTIATE_UNICODE | NTLMSSP_REQUEST_TARGET |
 	    NTLMSSP_NEGOTIATE_NTLM | NTLMSSP_NEGOTIATE_TARGET_INFO |
 	    0x80000000 | NTLMSSP_NEGOTIATE_128;
-	if(sign_CIFS_PDUs)
+	if (sign_CIFS_PDUs)
 		negotiate_flags |= /* NTLMSSP_NEGOTIATE_ALWAYS_SIGN |*/ NTLMSSP_NEGOTIATE_SIGN;
-	if(ntlmv2_flag)
+	if (ntlmv2_flag)
 		negotiate_flags |= NTLMSSP_NEGOTIATE_NTLMV2;
 
 /* setup pointers to domain name and workstation name */
@@ -2902,13 +3011,17 @@
 			    cpu_to_le16(len);
 		}
 
-		/* SecurityBlob->WorkstationName.Length = cifs_strtoUCS((__le16 *) bcc_ptr, "AMACHINE",64, nls_codepage);
+		/* SecurityBlob->WorkstationName.Length =
+		 cifs_strtoUCS((__le16 *) bcc_ptr, "AMACHINE",64, nls_codepage);
 		   SecurityBlob->WorkstationName.Length *= 2;
-		   SecurityBlob->WorkstationName.MaximumLength = cpu_to_le16(SecurityBlob->WorkstationName.Length);
-		   SecurityBlob->WorkstationName.Buffer = cpu_to_le32(SecurityBlobLength);
+		   SecurityBlob->WorkstationName.MaximumLength =
+			cpu_to_le16(SecurityBlob->WorkstationName.Length);
+		   SecurityBlob->WorkstationName.Buffer =
+				 cpu_to_le32(SecurityBlobLength);
 		   bcc_ptr += SecurityBlob->WorkstationName.Length;
 		   SecurityBlobLength += SecurityBlob->WorkstationName.Length;
-		   SecurityBlob->WorkstationName.Length = cpu_to_le16(SecurityBlob->WorkstationName.Length);  */
+		   SecurityBlob->WorkstationName.Length =
+			cpu_to_le16(SecurityBlob->WorkstationName.Length);  */
 
 		if ((long) bcc_ptr % 2) {
 			*bcc_ptr = 0;
@@ -2994,17 +3107,20 @@
 		__u16 blob_len =
 		    le16_to_cpu(pSMBr->resp.SecurityBlobLength);
 		if (action & GUEST_LOGIN)
-			cFYI(1, (" Guest login"));	/* BB do we want to set anything in SesInfo struct ? */
-/*        if(SecurityBlob2->MessageType != NtLm??){                               
-                 cFYI("Unexpected message type on auth response is %d ")); 
-        } */
+			cFYI(1, (" Guest login")); /* BB Should we set anything
+							 in SesInfo struct ? */
+/*		if (SecurityBlob2->MessageType != NtLm??) {
+			cFYI("Unexpected message type on auth response is %d"));
+		} */
+
 		if (ses) {
 			cFYI(1,
-			     ("Does UID on challenge %d match auth response UID %d ",
+			     ("Check challenge UID %d vs auth response UID %d",
 			      ses->Suid, smb_buffer_response->Uid));
-			ses->Suid = smb_buffer_response->Uid; /* UID left in wire format */
-			bcc_ptr = pByteArea(smb_buffer_response);	
-            /* response can have either 3 or 4 word count - Samba sends 3 */
+			/* UID left in wire format */
+			ses->Suid = smb_buffer_response->Uid;
+			bcc_ptr = pByteArea(smb_buffer_response);
+		/* response can have either 3 or 4 word count - Samba sends 3 */
 			if ((pSMBr->resp.hdr.WordCount == 3)
 			    || ((pSMBr->resp.hdr.WordCount == 4)
 				&& (blob_len <
@@ -3034,7 +3150,7 @@
 /* We look for obvious messed up bcc or strings in response so we do not go off
   the end since (at least) WIN2K and Windows XP have a major bug in not null
   terminating last Unicode string in response  */
-					if(ses->serverOS)
+					if (ses->serverOS)
 						kfree(ses->serverOS);
 					ses->serverOS =
 					    kzalloc(2 * (len + 1), GFP_KERNEL);
@@ -3066,9 +3182,9 @@
 						ses->serverNOS[1+(2*len)] = 0;
 						remaining_words -= len + 1;
 						if (remaining_words > 0) {
-							len = UniStrnlen((wchar_t *) bcc_ptr, remaining_words);	
+							len = UniStrnlen((wchar_t *) bcc_ptr, remaining_words);
      /* last string not always null terminated (e.g. for Windows XP & 2000) */
-							if(ses->serverDomain)
+							if (ses->serverDomain)
 								kfree(ses->serverDomain);
 							ses->serverDomain =
 							    kzalloc(2 *
@@ -3096,12 +3212,12 @@
 							    = 0;
 						} /* else no more room so create dummy domain string */
 						else {
-							if(ses->serverDomain)
+							if (ses->serverDomain)
 								kfree(ses->serverDomain);
 							ses->serverDomain = kzalloc(2,GFP_KERNEL);
 						}
 					} else {  /* no room so create dummy domain and NOS string */
-						if(ses->serverDomain)
+						if (ses->serverDomain)
 							kfree(ses->serverDomain);
 						ses->serverDomain = kzalloc(2, GFP_KERNEL);
 						kfree(ses->serverNOS);
@@ -3109,10 +3225,10 @@
 					}
 				} else {	/* ASCII */
 					len = strnlen(bcc_ptr, 1024);
-					if (((long) bcc_ptr + len) - 
-                        (long) pByteArea(smb_buffer_response) 
-                            <= BCC(smb_buffer_response)) {
-						if(ses->serverOS)
+					if (((long) bcc_ptr + len) -
+					   (long) pByteArea(smb_buffer_response)
+						<= BCC(smb_buffer_response)) {
+						if (ses->serverOS)
 							kfree(ses->serverOS);
 						ses->serverOS = kzalloc(len + 1,GFP_KERNEL);
 						strncpy(ses->serverOS,bcc_ptr, len);
@@ -3123,28 +3239,35 @@
 
 						len = strnlen(bcc_ptr, 1024);
 						kfree(ses->serverNOS);
-						ses->serverNOS = kzalloc(len+1,GFP_KERNEL);
-						strncpy(ses->serverNOS, bcc_ptr, len);	
+						ses->serverNOS = kzalloc(len+1,
+								    GFP_KERNEL);
+						strncpy(ses->serverNOS,
+							bcc_ptr, len);
 						bcc_ptr += len;
 						bcc_ptr[0] = 0;
 						bcc_ptr++;
 
 						len = strnlen(bcc_ptr, 1024);
-						if(ses->serverDomain)
+						if (ses->serverDomain)
 							kfree(ses->serverDomain);
-						ses->serverDomain = kzalloc(len+1,GFP_KERNEL);
-						strncpy(ses->serverDomain, bcc_ptr, len);
+						ses->serverDomain =
+								kzalloc(len+1,
+								    GFP_KERNEL);
+						strncpy(ses->serverDomain,
+							bcc_ptr, len);
 						bcc_ptr += len;
 						bcc_ptr[0] = 0;
 						bcc_ptr++;
 					} else
 						cFYI(1,
-						     ("Variable field of length %d extends beyond end of smb ",
+						     ("field of length %d "
+						   "extends beyond end of smb ",
 						      len));
 				}
 			} else {
 				cERROR(1,
-				       (" Security Blob Length extends beyond end of SMB"));
+				       (" Security Blob extends beyond end "
+					"of SMB"));
 			}
 		} else {
 			cERROR(1, ("No session structure passed in."));
@@ -3196,7 +3319,7 @@
 	pSMB->AndXCommand = 0xFF;
 	pSMB->Flags = cpu_to_le16(TCON_EXTENDED_SECINFO);
 	bcc_ptr = &pSMB->Password[0];
-	if((ses->server->secMode) & SECMODE_USER) {
+	if ((ses->server->secMode) & SECMODE_USER) {
 		pSMB->PasswordLength = cpu_to_le16(1);	/* minimum */
 		*bcc_ptr = 0; /* password is null byte */
 		bcc_ptr++;              /* skip password */
@@ -3210,7 +3333,7 @@
 		   by Samba (not sure whether other servers allow
 		   NTLMv2 password here) */
 #ifdef CONFIG_CIFS_WEAK_PW_HASH
-		if((extended_security & CIFSSEC_MAY_LANMAN) && 
+		if ((extended_security & CIFSSEC_MAY_LANMAN) &&
 			(ses->server->secType == LANMAN))
 			calc_lanman_hash(ses, bcc_ptr);
 		else
@@ -3220,14 +3343,14 @@
 			     bcc_ptr);
 
 		bcc_ptr += CIFS_SESS_KEY_SIZE;
-		if(ses->capabilities & CAP_UNICODE) {
+		if (ses->capabilities & CAP_UNICODE) {
 			/* must align unicode strings */
 			*bcc_ptr = 0; /* null byte password */
 			bcc_ptr++;
 		}
 	}
 
-	if(ses->server->secMode & 
+	if (ses->server->secMode &
 			(SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
 		smb_buffer->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
 
@@ -3240,8 +3363,8 @@
 	if (ses->capabilities & CAP_UNICODE) {
 		smb_buffer->Flags2 |= SMBFLG2_UNICODE;
 		length =
-		    cifs_strtoUCS((__le16 *) bcc_ptr, tree, 
-			6 /* max utf8 char length in bytes */ * 
+		    cifs_strtoUCS((__le16 *) bcc_ptr, tree,
+			6 /* max utf8 char length in bytes */ *
 			(/* server len*/ + 256 /* share len */), nls_codepage);
 		bcc_ptr += 2 * length;	/* convert num 16 bit words to bytes */
 		bcc_ptr += 2;	/* skip trailing null */
@@ -3265,8 +3388,8 @@
 		tcon->tid = smb_buffer_response->Tid;
 		bcc_ptr = pByteArea(smb_buffer_response);
 		length = strnlen(bcc_ptr, BCC(smb_buffer_response) - 2);
-        /* skip service field (NB: this field is always ASCII) */
-		bcc_ptr += length + 1;	
+		/* skip service field (NB: this field is always ASCII) */
+		bcc_ptr += length + 1;
 		strncpy(tcon->treeName, tree, MAX_TREE_SIZE);
 		if (smb_buffer->Flags2 & SMBFLG2_UNICODE) {
 			length = UniStrnlen((wchar_t *) bcc_ptr, 512);
@@ -3284,7 +3407,7 @@
 				bcc_ptr[1] = 0;
 				bcc_ptr += 2;
 			}
-			/* else do not bother copying these informational fields */
+			/* else do not bother copying these information fields*/
 		} else {
 			length = strnlen(bcc_ptr, 1024);
 			if ((bcc_ptr + length) -
@@ -3296,9 +3419,9 @@
 				strncpy(tcon->nativeFileSystem, bcc_ptr,
 					length);
 			}
-			/* else do not bother copying these informational fields */
+			/* else do not bother copying these information fields*/
 		}
-		if((smb_buffer_response->WordCount == 3) ||
+		if ((smb_buffer_response->WordCount == 3) ||
 			 (smb_buffer_response->WordCount == 7))
 			/* field is in same location */
 			tcon->Flags = le16_to_cpu(pSMBr->OptionalSupport);
@@ -3306,7 +3429,7 @@
 			tcon->Flags = 0;
 		cFYI(1, ("Tcon flags: 0x%x ", tcon->Flags));
 	} else if ((rc == 0) && tcon == NULL) {
-        /* all we need to save for IPC$ connection */
+		/* all we need to save for IPC$ connection */
 		ses->ipc_tid = smb_buffer_response->Tid;
 	}
 
@@ -3322,7 +3445,7 @@
 	int xid;
 	struct cifsSesInfo *ses = NULL;
 	struct task_struct *cifsd_task;
-	char * tmp;
+	char *tmp;
 
 	xid = GetXid();
 
@@ -3343,9 +3466,9 @@
 				FreeXid(xid);
 				return 0;
 			} else if (rc == -ESHUTDOWN) {
-				cFYI(1,("Waking up socket by sending it signal"));
+				cFYI(1, ("Waking up socket by sending signal"));
 				if (cifsd_task) {
-					send_sig(SIGKILL,cifsd_task,1);
+					force_sig(SIGKILL, cifsd_task);
 					kthread_stop(cifsd_task);
 				}
 				rc = 0;
@@ -3354,7 +3477,7 @@
 		} else
 			cFYI(1, ("No session or bad tcon"));
 	}
-	
+
 	cifs_sb->tcon = NULL;
 	tmp = cifs_sb->prepath;
 	cifs_sb->prepathlen = 0;
@@ -3366,11 +3489,11 @@
 		sesInfoFree(ses);
 
 	FreeXid(xid);
-	return rc;		/* BB check if we should always return zero here */
-} 
+	return rc;	/* BB check if we should always return zero here */
+}
 
 int cifs_setup_session(unsigned int xid, struct cifsSesInfo *pSesInfo,
-					   struct nls_table * nls_info)
+					   struct nls_table *nls_info)
 {
 	int rc = 0;
 	char ntlm_session_key[CIFS_SESS_KEY_SIZE];
@@ -3378,16 +3501,16 @@
 	int first_time = 0;
 
 	/* what if server changes its buffer size after dropping the session? */
-	if(pSesInfo->server->maxBuf == 0) /* no need to send on reconnect */ {
+	if (pSesInfo->server->maxBuf == 0) /* no need to send on reconnect */ {
 		rc = CIFSSMBNegotiate(xid, pSesInfo);
-		if(rc == -EAGAIN) /* retry only once on 1st time connection */ {
+		if (rc == -EAGAIN) /* retry only once on 1st time connection */ {
 			rc = CIFSSMBNegotiate(xid, pSesInfo);
-			if(rc == -EAGAIN) 
+			if (rc == -EAGAIN)
 				rc = -EHOSTDOWN;
 		}
-		if(rc == 0) {
+		if (rc == 0) {
 			spin_lock(&GlobalMid_Lock);
-			if(pSesInfo->server->tcpStatus != CifsExiting)
+			if (pSesInfo->server->tcpStatus != CifsExiting)
 				pSesInfo->server->tcpStatus = CifsGood;
 			else
 				rc = -EHOSTDOWN;
@@ -3399,18 +3522,19 @@
 	if (!rc) {
 		pSesInfo->flags = 0;
 		pSesInfo->capabilities = pSesInfo->server->capabilities;
-		if(linuxExtEnabled == 0)
+		if (linuxExtEnabled == 0)
 			pSesInfo->capabilities &= (~CAP_UNIX);
 	/*	pSesInfo->sequence_number = 0;*/
-		cFYI(1,("Security Mode: 0x%x Capabilities: 0x%x TimeAdjust: %d",
+		cFYI(1,
+		      ("Security Mode: 0x%x Capabilities: 0x%x TimeAdjust: %d",
 			pSesInfo->server->secMode,
 			pSesInfo->server->capabilities,
 			pSesInfo->server->timeAdj));
-		if(experimEnabled < 2)
+		if (experimEnabled < 2)
 			rc = CIFS_SessSetup(xid, pSesInfo,
 					    first_time, nls_info);
 		else if (extended_security
-				&& (pSesInfo->capabilities 
+				&& (pSesInfo->capabilities
 					& CAP_EXTENDED_SECURITY)
 				&& (pSesInfo->server->secType == NTLMSSP)) {
 			rc = -EOPNOTSUPP;
@@ -3423,21 +3547,22 @@
 						&ntlmv2_flag,
 						nls_info);
 			if (!rc) {
-				if(ntlmv2_flag) {
-					char * v2_response;
-					cFYI(1,("more secure NTLM ver2 hash"));
-					if(CalcNTLMv2_partial_mac_key(pSesInfo, 
+				if (ntlmv2_flag) {
+					char *v2_response;
+					cFYI(1, ("more secure NTLM ver2 hash"));
+					if (CalcNTLMv2_partial_mac_key(pSesInfo,
 						nls_info)) {
 						rc = -ENOMEM;
 						goto ss_err_exit;
 					} else
 						v2_response = kmalloc(16 + 64 /* blob */, GFP_KERNEL);
-					if(v2_response) {
-						CalcNTLMv2_response(pSesInfo,v2_response);
-				/*		if(first_time)
-							cifs_calculate_ntlmv2_mac_key(
-							  pSesInfo->server->mac_signing_key, 
-							  response, ntlm_session_key, */
+					if (v2_response) {
+						CalcNTLMv2_response(pSesInfo,
+								   v2_response);
+				/*		if (first_time)
+						  cifs_calculate_ntlmv2_mac_key(
+						   pSesInfo->server->mac_signing_key,
+						   response, ntlm_session_key,*/
 						kfree(v2_response);
 					/* BB Put dummy sig in SessSetup PDU? */
 					} else {
@@ -3450,9 +3575,9 @@
 						pSesInfo->server->cryptKey,
 						ntlm_session_key);
 
-					if(first_time)
+					if (first_time)
 						cifs_calculate_mac_key(
-							pSesInfo->server->mac_signing_key,
+							&pSesInfo->server->mac_signing_key,
 							ntlm_session_key,
 							pSesInfo->password);
 				}
@@ -3470,18 +3595,18 @@
 				pSesInfo->server->cryptKey,
 				ntlm_session_key);
 
-			if(first_time) 		
+			if (first_time)
 				cifs_calculate_mac_key(
-					pSesInfo->server->mac_signing_key,
+					&pSesInfo->server->mac_signing_key,
 					ntlm_session_key, pSesInfo->password);
 
 			rc = CIFSSessSetup(xid, pSesInfo,
 				ntlm_session_key, nls_info);
 		}
 		if (rc) {
-			cERROR(1,("Send error in SessSetup = %d",rc));
+			cERROR(1, ("Send error in SessSetup = %d", rc));
 		} else {
-			cFYI(1,("CIFS Session Established successfully"));
+			cFYI(1, ("CIFS Session Established successfully"));
 			pSesInfo->status = CifsGood;
 		}
 	}
diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c
index 8e86aac..4830acc 100644
--- a/fs/cifs/dir.c
+++ b/fs/cifs/dir.c
@@ -135,10 +135,10 @@
 	struct cifs_sb_info *cifs_sb;
 	struct cifsTconInfo *pTcon;
 	char *full_path = NULL;
-	FILE_ALL_INFO * buf = NULL;
+	FILE_ALL_INFO *buf = NULL;
 	struct inode *newinode = NULL;
-	struct cifsFileInfo * pCifsFile = NULL;
-	struct cifsInodeInfo * pCifsInode;
+	struct cifsFileInfo *pCifsFile = NULL;
+	struct cifsInodeInfo *pCifsInode;
 	int disposition = FILE_OVERWRITE_IF;
 	int write_only = FALSE;
 
@@ -207,8 +207,7 @@
 	} else {
 		/* If Open reported that we actually created a file
 		then we now have to set the mode if possible */
-		if ((cifs_sb->tcon->ses->capabilities & CAP_UNIX) &&
-			(oplock & CIFS_CREATE_ACTION)) {
+		if ((pTcon->unix_ext) && (oplock & CIFS_CREATE_ACTION)) {
 			mode &= ~current->fs->umask;
 			if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID) {
 				CIFSSMBUnixSetPerms(xid, pTcon, full_path, mode,
@@ -235,8 +234,8 @@
 			/* Could set r/o dos attribute if mode & 0222 == 0 */
 		}
 
-	/* BB server might mask mode so we have to query for Unix case*/
-		if (pTcon->ses->capabilities & CAP_UNIX)
+		/* server might mask mode so we have to query for it */
+		if (pTcon->unix_ext)
 			rc = cifs_get_inode_info_unix(&newinode, full_path,
 						 inode->i_sb, xid);
 		else {
@@ -264,7 +263,8 @@
 				direntry->d_op = &cifs_dentry_ops;
 			d_instantiate(direntry, newinode);
 		}
-		if ((nd->flags & LOOKUP_OPEN) == FALSE) {
+		if ((nd == NULL /* nfsd case - nfs srv does not set nd */) ||
+			((nd->flags & LOOKUP_OPEN) == FALSE)) {
 			/* mknod case - do not leave file open */
 			CIFSSMBClose(xid, pTcon, fileHandle);
 		} else if (newinode) {
@@ -323,7 +323,7 @@
 	struct cifs_sb_info *cifs_sb;
 	struct cifsTconInfo *pTcon;
 	char *full_path = NULL;
-	struct inode * newinode = NULL;
+	struct inode *newinode = NULL;
 
 	if (!old_valid_dev(device_number))
 		return -EINVAL;
@@ -336,7 +336,7 @@
 	full_path = build_path_from_dentry(direntry);
 	if (full_path == NULL)
 		rc = -ENOMEM;
-	else if (pTcon->ses->capabilities & CAP_UNIX) {
+	else if (pTcon->unix_ext) {
 		mode &= ~current->fs->umask;
 		if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID) {
 			rc = CIFSSMBUnixSetPerms(xid, pTcon, full_path,
@@ -490,7 +490,7 @@
 	cFYI(1,
 	     (" Full path: %s inode = 0x%p", full_path, direntry->d_inode));
 
-	if (pTcon->ses->capabilities & CAP_UNIX)
+	if (pTcon->unix_ext)
 		rc = cifs_get_inode_info_unix(&newInode, full_path,
 					      parent_dir_inode->i_sb, xid);
 	else
diff --git a/fs/cifs/export.c b/fs/cifs/export.c
index 1d71639..893fd0a 100644
--- a/fs/cifs/export.c
+++ b/fs/cifs/export.c
@@ -5,7 +5,7 @@
  *   Author(s): Steve French (sfrench@us.ibm.com)
  *
  *   Common Internet FileSystem (CIFS) client
- * 
+ *
  *   Operations related to support for exporting files via NFSD
  *
  *   This library is free software; you can redistribute it and/or modify
@@ -22,31 +22,45 @@
  *   along with this library; if not, write to the Free Software
  *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  */
- 
- /* 
+
+ /*
   * See Documentation/filesystems/Exporting
   * and examples in fs/exportfs
+  *
+  * Since cifs is a network file system, an "fsid" must be included for
+  * any nfs exports file entries which refer to cifs paths.  In addition
+  * the cifs mount must be mounted with the "serverino" option (ie use stable
+  * server inode numbers instead of locally generated temporary ones).
+  * Although cifs inodes do not use generation numbers (have generation number
+  * of zero) - the inode number alone should be good enough for simple cases
+  * in which users want to export cifs shares with NFS. The decode and encode
+  * could be improved by using a new routine which expects 64 bit inode numbers
+  * instead of the default 32 bit routines in fs/exportfs
+  *
   */
 
 #include <linux/fs.h>
- 
+#include <linux/exportfs.h>
+#include "cifsglob.h"
+#include "cifs_debug.h"
+
 #ifdef CONFIG_CIFS_EXPERIMENTAL
- 
 static struct dentry *cifs_get_parent(struct dentry *dentry)
 {
- 	/* BB need to add code here eventually to enable export via NFSD */
- 	return ERR_PTR(-EACCES);
+	/* BB need to add code here eventually to enable export via NFSD */
+	cFYI(1, ("get parent for %p", dentry));
+	return ERR_PTR(-EACCES);
 }
- 
+
 struct export_operations cifs_export_ops = {
- 	.get_parent = cifs_get_parent,
-/*	Following five export operations are unneeded so far and can default */ 	
-/* 	.get_dentry =
- 	.get_name =
- 	.find_exported_dentry =
- 	.decode_fh = 
- 	.encode_fs =  */
- };
- 
+	.get_parent = cifs_get_parent,
+/*	Following five export operations are unneeded so far and can default:
+	.get_dentry =
+	.get_name =
+	.find_exported_dentry =
+	.decode_fh =
+	.encode_fs =  */
+};
+
 #endif /* EXPERIMENTAL */
- 
+
diff --git a/fs/cifs/fcntl.c b/fs/cifs/fcntl.c
index 8e375bb..995474c 100644
--- a/fs/cifs/fcntl.c
+++ b/fs/cifs/fcntl.c
@@ -66,7 +66,7 @@
 	return cifs_ntfy_flags;
 }
 
-int cifs_dir_notify(struct file * file, unsigned long arg)
+int cifs_dir_notify(struct file *file, unsigned long arg)
 {
 	int xid;
 	int rc = -EINVAL;
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index 94d5b49..e13592a 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -2,8 +2,8 @@
  *   fs/cifs/file.c
  *
  *   vfs operations that deal with files
- * 
- *   Copyright (C) International Business Machines  Corp., 2002,2003
+ *
+ *   Copyright (C) International Business Machines  Corp., 2002,2007
  *   Author(s): Steve French (sfrench@us.ibm.com)
  *              Jeremy Allison (jra@samba.org)
  *
@@ -45,7 +45,7 @@
 {
 	memset(private_data, 0, sizeof(struct cifsFileInfo));
 	private_data->netfid = netfid;
-	private_data->pid = current->tgid;	
+	private_data->pid = current->tgid;
 	init_MUTEX(&private_data->fh_sem);
 	mutex_init(&private_data->lock_mutex);
 	INIT_LIST_HEAD(&private_data->llist);
@@ -57,7 +57,7 @@
 	does not tell us which handle the write is for so there can
 	be a close (overlapping with write) of the filehandle that
 	cifs_writepages chose to use */
-	atomic_set(&private_data->wrtPending,0); 
+	atomic_set(&private_data->wrtPending, 0);
 
 	return private_data;
 }
@@ -105,7 +105,7 @@
 	   in the list so we do not have to walk the
 	   list to search for one in prepare_write */
 	if ((file->f_flags & O_ACCMODE) == O_WRONLY) {
-		list_add_tail(&pCifsFile->flist, 
+		list_add_tail(&pCifsFile->flist,
 			      &pCifsInode->openFileList);
 	} else {
 		list_add(&pCifsFile->flist,
@@ -138,7 +138,7 @@
 	}
 
 client_can_cache:
-	if (pTcon->ses->capabilities & CAP_UNIX)
+	if (pTcon->unix_ext)
 		rc = cifs_get_inode_info_unix(&file->f_path.dentry->d_inode,
 			full_path, inode->i_sb, xid);
 	else
@@ -189,7 +189,7 @@
 
 				/* needed for writepage */
 				pCifsFile->pfile = file;
-				
+
 				file->private_data = pCifsFile;
 				break;
 			}
@@ -212,15 +212,15 @@
 		return -ENOMEM;
 	}
 
-	cFYI(1, (" inode = 0x%p file flags are 0x%x for %s",
+	cFYI(1, ("inode = 0x%p file flags are 0x%x for %s",
 		 inode, file->f_flags, full_path));
 	desiredAccess = cifs_convert_flags(file->f_flags);
 
 /*********************************************************************
  *  open flag mapping table:
- *  
+ *
  *	POSIX Flag            CIFS Disposition
- *	----------            ---------------- 
+ *	----------            ----------------
  *	O_CREAT               FILE_OPEN_IF
  *	O_CREAT | O_EXCL      FILE_CREATE
  *	O_CREAT | O_TRUNC     FILE_OVERWRITE_IF
@@ -228,12 +228,12 @@
  *	none of the above     FILE_OPEN
  *
  *	Note that there is not a direct match between disposition
- *	FILE_SUPERSEDE (ie create whether or not file exists although 
+ *	FILE_SUPERSEDE (ie create whether or not file exists although
  *	O_CREAT | O_TRUNC is similar but truncates the existing
  *	file rather than creating a new file as FILE_SUPERSEDE does
  *	(which uses the attributes / metadata passed in on open call)
  *?
- *?  O_SYNC is a reasonable match to CIFS writethrough flag  
+ *?  O_SYNC is a reasonable match to CIFS writethrough flag
  *?  and the read write flags match reasonably.  O_LARGEFILE
  *?  is irrelevant because largefile support is always used
  *?  by this client. Flags O_APPEND, O_DIRECT, O_DIRECTORY,
@@ -253,8 +253,8 @@
 	   and calling get_inode_info with returned buf (at least helps
 	   non-Unix server case) */
 
-	/* BB we can not do this if this is the second open of a file 
-	   and the first handle has writebehind data, we might be 
+	/* BB we can not do this if this is the second open of a file
+	   and the first handle has writebehind data, we might be
 	   able to simply do a filemap_fdatawrite/filemap_fdatawait first */
 	buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
 	if (!buf) {
@@ -263,7 +263,7 @@
 	}
 
 	if (cifs_sb->tcon->ses->capabilities & CAP_NT_SMBS)
-		rc = CIFSSMBOpen(xid, pTcon, full_path, disposition, 
+		rc = CIFSSMBOpen(xid, pTcon, full_path, disposition,
 			 desiredAccess, CREATE_NOT_DIR, &netfid, &oplock, buf,
 			 cifs_sb->local_nls, cifs_sb->mnt_cifs_flags
 				 & CIFS_MOUNT_MAP_SPECIAL_CHR);
@@ -300,15 +300,15 @@
 		write_unlock(&GlobalSMBSeslock);
 	}
 
-	if (oplock & CIFS_CREATE_ACTION) {           
+	if (oplock & CIFS_CREATE_ACTION) {
 		/* time to set mode which we can not set earlier due to
 		   problems creating new read-only files */
-		if (cifs_sb->tcon->ses->capabilities & CAP_UNIX) {
+		if (pTcon->unix_ext) {
 			CIFSSMBUnixSetPerms(xid, pTcon, full_path,
 					    inode->i_mode,
 					    (__u64)-1, (__u64)-1, 0 /* dev */,
 					    cifs_sb->local_nls,
-					    cifs_sb->mnt_cifs_flags & 
+					    cifs_sb->mnt_cifs_flags &
 						CIFS_MOUNT_MAP_SPECIAL_CHR);
 		} else {
 			/* BB implement via Windows security descriptors eg
@@ -345,7 +345,7 @@
 	struct cifsTconInfo *pTcon;
 	struct cifsFileInfo *pCifsFile;
 	struct cifsInodeInfo *pCifsInode;
-	struct inode * inode;
+	struct inode *inode;
 	char *full_path = NULL;
 	int desiredAccess;
 	int disposition = FILE_OPEN;
@@ -372,13 +372,13 @@
 	}
 
 	inode = file->f_path.dentry->d_inode;
-	if(inode == NULL) {
+	if (inode == NULL) {
 		cERROR(1, ("inode not valid"));
 		dump_stack();
 		rc = -EBADF;
 		goto reopen_error_exit;
 	}
-		
+
 	cifs_sb = CIFS_SB(inode->i_sb);
 	pTcon = cifs_sb->tcon;
 
@@ -396,7 +396,7 @@
 	}
 
 	cFYI(1, ("inode = 0x%p file flags 0x%x for %s",
-		 inode, file->f_flags,full_path));
+		 inode, file->f_flags, full_path));
 	desiredAccess = cifs_convert_flags(file->f_flags);
 
 	if (oplockEnabled)
@@ -405,14 +405,14 @@
 		oplock = FALSE;
 
 	/* Can not refresh inode by passing in file_info buf to be returned
-	   by SMBOpen and then calling get_inode_info with returned buf 
-	   since file might have write behind data that needs to be flushed 
+	   by SMBOpen and then calling get_inode_info with returned buf
+	   since file might have write behind data that needs to be flushed
 	   and server version of file size can be stale. If we knew for sure
 	   that inode was not dirty locally we could do this */
 
 	rc = CIFSSMBOpen(xid, pTcon, full_path, disposition, desiredAccess,
 			 CREATE_NOT_DIR, &netfid, &oplock, NULL,
-			 cifs_sb->local_nls, cifs_sb->mnt_cifs_flags & 
+			 cifs_sb->local_nls, cifs_sb->mnt_cifs_flags &
 				CIFS_MOUNT_MAP_SPECIAL_CHR);
 	if (rc) {
 		up(&pCifsFile->fh_sem);
@@ -430,7 +430,7 @@
 			   go to server to get inode info */
 				pCifsInode->clientCanCacheAll = FALSE;
 				pCifsInode->clientCanCacheRead = FALSE;
-				if (pTcon->ses->capabilities & CAP_UNIX)
+				if (pTcon->unix_ext)
 					rc = cifs_get_inode_info_unix(&inode,
 						full_path, inode->i_sb, xid);
 				else
@@ -486,23 +486,24 @@
 			   already closed */
 			if (pTcon->tidStatus != CifsNeedReconnect) {
 				int timeout = 2;
-				while((atomic_read(&pSMBFile->wrtPending) != 0)
+				while ((atomic_read(&pSMBFile->wrtPending) != 0)
 					 && (timeout < 1000) ) {
 					/* Give write a better chance to get to
 					server ahead of the close.  We do not
 					want to add a wait_q here as it would
 					increase the memory utilization as
 					the struct would be in each open file,
-					but this should give enough time to 
+					but this should give enough time to
 					clear the socket */
 #ifdef CONFIG_CIFS_DEBUG2
-					cFYI(1,("close delay, write pending"));
+					cFYI(1, ("close delay, write pending"));
 #endif /* DEBUG2 */
 					msleep(timeout);
 					timeout *= 4;
 				}
-				if(atomic_read(&pSMBFile->wrtPending))
-					cERROR(1,("close with pending writes"));
+				if (atomic_read(&pSMBFile->wrtPending))
+					cERROR(1,
+						("close with pending writes"));
 				rc = CIFSSMBClose(xid, pTcon,
 						  pSMBFile->netfid);
 			}
@@ -534,7 +535,7 @@
 		CIFS_I(inode)->clientCanCacheRead = FALSE;
 		CIFS_I(inode)->clientCanCacheAll  = FALSE;
 	}
-	if ((rc ==0) && CIFS_I(inode)->write_behind_rc)
+	if ((rc == 0) && CIFS_I(inode)->write_behind_rc)
 		rc = CIFS_I(inode)->write_behind_rc;
 	FreeXid(xid);
 	return rc;
@@ -554,7 +555,8 @@
 
 	if (pCFileStruct) {
 		struct cifsTconInfo *pTcon;
-		struct cifs_sb_info *cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
+		struct cifs_sb_info *cifs_sb =
+			CIFS_SB(file->f_path.dentry->d_sb);
 
 		pTcon = cifs_sb->tcon;
 
@@ -572,7 +574,7 @@
 		if (ptmp) {
 			cFYI(1, ("closedir free smb buf in srch struct"));
 			pCFileStruct->srch_inf.ntwrk_buf_start = NULL;
-			if(pCFileStruct->srch_inf.smallBuf)
+			if (pCFileStruct->srch_inf.smallBuf)
 				cifs_small_buf_release(ptmp);
 			else
 				cifs_buf_release(ptmp);
@@ -594,7 +596,8 @@
 static int store_file_lock(struct cifsFileInfo *fid, __u64 len,
 				__u64 offset, __u8 lockType)
 {
-	struct cifsLockInfo *li = kmalloc(sizeof(struct cifsLockInfo), GFP_KERNEL);
+	struct cifsLockInfo *li =
+		kmalloc(sizeof(struct cifsLockInfo), GFP_KERNEL);
 	if (li == NULL)
 		return -ENOMEM;
 	li->offset = offset;
@@ -625,8 +628,8 @@
 
 	cFYI(1, ("Lock parm: 0x%x flockflags: "
 		 "0x%x flocktype: 0x%x start: %lld end: %lld",
-	        cmd, pfLock->fl_flags, pfLock->fl_type, pfLock->fl_start,
-	        pfLock->fl_end));
+		cmd, pfLock->fl_flags, pfLock->fl_type, pfLock->fl_start,
+		pfLock->fl_end));
 
 	if (pfLock->fl_flags & FL_POSIX)
 		cFYI(1, ("Posix"));
@@ -641,7 +644,7 @@
 			 "not implemented yet"));
 	if (pfLock->fl_flags & FL_LEASE)
 		cFYI(1, ("Lease on file - not implemented yet"));
-	if (pfLock->fl_flags & 
+	if (pfLock->fl_flags &
 	    (~(FL_POSIX | FL_FLOCK | FL_SLEEP | FL_ACCESS | FL_LEASE)))
 		cFYI(1, ("Unknown lock flags 0x%x", pfLock->fl_flags));
 
@@ -683,9 +686,9 @@
 	account for negative length which we can not accept over the
 	wire */
 	if (IS_GETLK(cmd)) {
-		if(posix_locking) {
+		if (posix_locking) {
 			int posix_lock_type;
-			if(lockType & LOCKING_ANDX_SHARED_LOCK)
+			if (lockType & LOCKING_ANDX_SHARED_LOCK)
 				posix_lock_type = CIFS_RDLCK;
 			else
 				posix_lock_type = CIFS_WRLCK;
@@ -700,7 +703,7 @@
 		rc = CIFSSMBLock(xid, pTcon, netfid, length, pfLock->fl_start,
 				 0, 1, lockType, 0 /* wait flag */ );
 		if (rc == 0) {
-			rc = CIFSSMBLock(xid, pTcon, netfid, length, 
+			rc = CIFSSMBLock(xid, pTcon, netfid, length,
 					 pfLock->fl_start, 1 /* numUnlock */ ,
 					 0 /* numLock */ , lockType,
 					 0 /* wait flag */ );
@@ -729,22 +732,24 @@
 
 	if (posix_locking) {
 		int posix_lock_type;
-		if(lockType & LOCKING_ANDX_SHARED_LOCK)
+		if (lockType & LOCKING_ANDX_SHARED_LOCK)
 			posix_lock_type = CIFS_RDLCK;
 		else
 			posix_lock_type = CIFS_WRLCK;
-		
-		if(numUnlock == 1)
+
+		if (numUnlock == 1)
 			posix_lock_type = CIFS_UNLCK;
 
 		rc = CIFSSMBPosixLock(xid, pTcon, netfid, 0 /* set */,
 				      length, pfLock,
 				      posix_lock_type, wait_flag);
 	} else {
-		struct cifsFileInfo *fid = (struct cifsFileInfo *)file->private_data;
+		struct cifsFileInfo *fid =
+			(struct cifsFileInfo *)file->private_data;
 
 		if (numLock) {
-			rc = CIFSSMBLock(xid, pTcon, netfid, length, pfLock->fl_start,
+			rc = CIFSSMBLock(xid, pTcon, netfid, length,
+					pfLock->fl_start,
 					0, numLock, lockType, wait_flag);
 
 			if (rc == 0) {
@@ -763,7 +768,8 @@
 			list_for_each_entry_safe(li, tmp, &fid->llist, llist) {
 				if (pfLock->fl_start <= li->offset &&
 						length >= li->length) {
-					stored_rc = CIFSSMBLock(xid, pTcon, netfid,
+					stored_rc = CIFSSMBLock(xid, pTcon,
+							netfid,
 							li->length, li->offset,
 							1, 0, li->type, FALSE);
 					if (stored_rc)
@@ -805,7 +811,7 @@
 	if (file->private_data == NULL)
 		return -EBADF;
 	open_file = (struct cifsFileInfo *) file->private_data;
-	
+
 	xid = GetXid();
 
 	if (*poffset > file->f_path.dentry->d_inode->i_size)
@@ -824,7 +830,7 @@
 			   and blocked, and the file has been freed on us while
 			   we blocked so return what we managed to write */
 				return total_written;
-			} 
+			}
 			if (open_file->closePend) {
 				FreeXid(xid);
 				if (total_written)
@@ -867,8 +873,8 @@
 	/* since the write may have blocked check these pointers again */
 	if ((file->f_path.dentry) && (file->f_path.dentry->d_inode)) {
 		struct inode *inode = file->f_path.dentry->d_inode;
-/* Do not update local mtime - server will set its actual value on write		
- *		inode->i_ctime = inode->i_mtime = 
+/* Do not update local mtime - server will set its actual value on write
+ *		inode->i_ctime = inode->i_mtime =
  * 			current_fs_time(inode->i_sb);*/
 		if (total_written > 0) {
 			spin_lock(&inode->i_lock);
@@ -877,7 +883,7 @@
 					*poffset);
 			spin_unlock(&inode->i_lock);
 		}
-		mark_inode_dirty_sync(file->f_path.dentry->d_inode);	
+		mark_inode_dirty_sync(file->f_path.dentry->d_inode);
 	}
 	FreeXid(xid);
 	return total_written;
@@ -898,13 +904,13 @@
 
 	pTcon = cifs_sb->tcon;
 
-	cFYI(1,("write %zd bytes to offset %lld of %s", write_size,
+	cFYI(1, ("write %zd bytes to offset %lld of %s", write_size,
 	   *poffset, file->f_path.dentry->d_name.name));
 
 	if (file->private_data == NULL)
 		return -EBADF;
 	open_file = (struct cifsFileInfo *)file->private_data;
-	
+
 	xid = GetXid();
 
 	if (*poffset > file->f_path.dentry->d_inode->i_size)
@@ -921,10 +927,10 @@
 				FreeXid(xid);
 			/* if we have gotten here we have written some data
 			   and blocked, and the file has been freed on us
-			   while we blocked so return what we managed to 
+			   while we blocked so return what we managed to
 			   write */
 				return total_written;
-			} 
+			}
 			if (open_file->closePend) {
 				FreeXid(xid);
 				if (total_written)
@@ -935,14 +941,14 @@
 			if (open_file->invalidHandle) {
 				/* we could deadlock if we called
 				   filemap_fdatawait from here so tell
-				   reopen_file not to flush data to 
+				   reopen_file not to flush data to
 				   server now */
 				rc = cifs_reopen_file(file, FALSE);
 				if (rc != 0)
 					break;
 			}
-			if(experimEnabled || (pTcon->ses->server &&
-				((pTcon->ses->server->secMode & 
+			if (experimEnabled || (pTcon->ses->server &&
+				((pTcon->ses->server->secMode &
 				(SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
 				== 0))) {
 				struct kvec iov[2];
@@ -976,7 +982,7 @@
 			}
 		} else
 			*poffset += bytes_written;
-		long_op = FALSE; /* subsequent writes fast - 
+		long_op = FALSE; /* subsequent writes fast -
 				    15 seconds is plenty */
 	}
 
@@ -1009,8 +1015,8 @@
 	the VFS or MM) should not happen but we had reports of on oops (due to
 	it being zero) during stress testcases so we need to check for it */
 
-	if(cifs_inode == NULL) {
-		cERROR(1,("Null inode passed to cifs_writeable_file"));
+	if (cifs_inode == NULL) {
+		cERROR(1, ("Null inode passed to cifs_writeable_file"));
 		dump_stack();
 		return NULL;
 	}
@@ -1024,13 +1030,14 @@
 		     (open_file->pfile->f_flags & O_WRONLY))) {
 			atomic_inc(&open_file->wrtPending);
 			read_unlock(&GlobalSMBSeslock);
-			if((open_file->invalidHandle) && 
+			if ((open_file->invalidHandle) &&
 			   (!open_file->closePend) /* BB fixme -since the second clause can not be true remove it BB */) {
 				rc = cifs_reopen_file(open_file->pfile, FALSE);
 				/* if it fails, try another handle - might be */
 				/* dangerous to hold up writepages with retry */
-				if(rc) {
-					cFYI(1,("failed on reopen file in wp"));
+				if (rc) {
+					cFYI(1,
+					      ("failed on reopen file in wp"));
 					read_lock(&GlobalSMBSeslock);
 					/* can not use this handle, no write
 					pending on this one after all */
@@ -1082,7 +1089,7 @@
 
 	/* check to make sure that we are not extending the file */
 	if (mapping->host->i_size - offset < (loff_t)to)
-		to = (unsigned)(mapping->host->i_size - offset); 
+		to = (unsigned)(mapping->host->i_size - offset);
 
 	open_file = find_writable_file(CIFS_I(mapping->host));
 	if (open_file) {
@@ -1116,8 +1123,8 @@
 	int done = 0;
 	pgoff_t end;
 	pgoff_t index;
- 	int range_whole = 0;
-	struct kvec * iov;
+	int range_whole = 0;
+	struct kvec *iov;
 	int len;
 	int n_iov = 0;
 	pgoff_t next;
@@ -1131,7 +1138,7 @@
 	int xid;
 
 	cifs_sb = CIFS_SB(mapping->host->i_sb);
-	
+
 	/*
 	 * If wsize is smaller that the page cache size, default to writing
 	 * one page at a time via cifs_writepage
@@ -1139,14 +1146,14 @@
 	if (cifs_sb->wsize < PAGE_CACHE_SIZE)
 		return generic_writepages(mapping, wbc);
 
-	if((cifs_sb->tcon->ses) && (cifs_sb->tcon->ses->server))
-		if(cifs_sb->tcon->ses->server->secMode &
-                          (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
-			if(!experimEnabled) 
+	if ((cifs_sb->tcon->ses) && (cifs_sb->tcon->ses->server))
+		if (cifs_sb->tcon->ses->server->secMode &
+				(SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
+			if (!experimEnabled)
 				return generic_writepages(mapping, wbc);
 
 	iov = kmalloc(32 * sizeof(struct kvec), GFP_KERNEL);
-	if(iov == NULL)
+	if (iov == NULL)
 		return generic_writepages(mapping, wbc);
 
 
@@ -1279,7 +1286,7 @@
 						   1);
 				atomic_dec(&open_file->wrtPending);
 				if (rc || bytes_written < bytes_to_write) {
-					cERROR(1,("Write2 ret %d, written = %d",
+					cERROR(1, ("Write2 ret %d, wrote %d",
 						  rc, bytes_written));
 					/* BB what if continued retry is
 					   requested via mount flags? */
@@ -1295,8 +1302,8 @@
 				success rc but too little data written? */
 				/* BB investigate retry logic on temporary
 				server crash cases and how recovery works
-				when page marked as error */ 
-				if(rc)
+				when page marked as error */
+				if (rc)
 					SetPageError(page);
 				kunmap(page);
 				unlock_page(page);
@@ -1326,7 +1333,7 @@
 	return rc;
 }
 
-static int cifs_writepage(struct page* page, struct writeback_control *wbc)
+static int cifs_writepage(struct page *page, struct writeback_control *wbc)
 {
 	int rc = -EFAULT;
 	int xid;
@@ -1334,7 +1341,7 @@
 	xid = GetXid();
 /* BB add check for wbc flags */
 	page_cache_get(page);
-        if (!PageUptodate(page)) {
+	if (!PageUptodate(page)) {
 		cFYI(1, ("ppw - page not up to date"));
 	}
 
@@ -1348,7 +1355,7 @@
 	 * Just unlocking the page will cause the radix tree tag-bits
 	 * to fail to update with the state of the page correctly.
 	 */
-	set_page_writeback(page);		
+	set_page_writeback(page);
 	rc = cifs_partialpagewrite(page, 0, PAGE_CACHE_SIZE);
 	SetPageUptodate(page); /* BB add check for error and Clearuptodate? */
 	unlock_page(page);
@@ -1368,7 +1375,7 @@
 	char *page_data;
 
 	xid = GetXid();
-	cFYI(1, ("commit write for page %p up to position %lld for %d", 
+	cFYI(1, ("commit write for page %p up to position %lld for %d",
 		 page, position, to));
 	spin_lock(&inode->i_lock);
 	if (position > inode->i_size) {
@@ -1396,7 +1403,7 @@
 			rc = 0;
 		/* else if (rc < 0) should we set writebehind rc? */
 		kunmap(page);
-	} else {	
+	} else {
 		set_page_dirty(page);
 	}
 
@@ -1412,9 +1419,9 @@
 
 	xid = GetXid();
 
-	cFYI(1, ("Sync file - name: %s datasync: 0x%x", 
+	cFYI(1, ("Sync file - name: %s datasync: 0x%x",
 		dentry->d_name.name, datasync));
-	
+
 	rc = filemap_fdatawrite(inode->i_mapping);
 	if (rc == 0)
 		CIFS_I(inode)->write_behind_rc = 0;
@@ -1438,7 +1445,7 @@
 	if (!inode)
 		return; */
 
-/*	fill in rpages then 
+/*	fill in rpages then
 	result = cifs_pagein_inode(inode, index, rpages); */ /* BB finish */
 
 /*	cFYI(1, ("rpages is %d for sync page of Index %ld", rpages, index));
@@ -1456,7 +1463,7 @@
  */
 int cifs_flush(struct file *file, fl_owner_t id)
 {
-	struct inode * inode = file->f_path.dentry->d_inode;
+	struct inode *inode = file->f_path.dentry->d_inode;
 	int rc = 0;
 
 	/* Rather than do the steps manually:
@@ -1471,8 +1478,8 @@
 	rc = filemap_fdatawrite(inode->i_mapping);
 	if (!rc) /* reset wb rc if we were able to write out dirty pages */
 		CIFS_I(inode)->write_behind_rc = 0;
-		
-	cFYI(1, ("Flush inode %p file %p rc %d",inode,file,rc));
+
+	cFYI(1, ("Flush inode %p file %p rc %d", inode, file, rc));
 
 	return rc;
 }
@@ -1508,13 +1515,13 @@
 	for (total_read = 0, current_offset = read_data;
 	     read_size > total_read;
 	     total_read += bytes_read, current_offset += bytes_read) {
-		current_read_size = min_t(const int, read_size - total_read, 
+		current_read_size = min_t(const int, read_size - total_read,
 					  cifs_sb->rsize);
 		rc = -EAGAIN;
 		smb_read_data = NULL;
 		while (rc == -EAGAIN) {
 			int buf_type = CIFS_NO_BUFFER;
-			if ((open_file->invalidHandle) && 
+			if ((open_file->invalidHandle) &&
 			    (!open_file->closePend)) {
 				rc = cifs_reopen_file(file, TRUE);
 				if (rc != 0)
@@ -1535,9 +1542,9 @@
 					rc = -EFAULT;
 				}
 
-				if(buf_type == CIFS_SMALL_BUFFER)
+				if (buf_type == CIFS_SMALL_BUFFER)
 					cifs_small_buf_release(smb_read_data);
-				else if(buf_type == CIFS_LARGE_BUFFER)
+				else if (buf_type == CIFS_LARGE_BUFFER)
 					cifs_buf_release(smb_read_data);
 				smb_read_data = NULL;
 			}
@@ -1586,21 +1593,21 @@
 	if ((file->f_flags & O_ACCMODE) == O_WRONLY)
 		cFYI(1, ("attempting read on write only file instance"));
 
-	for (total_read = 0, current_offset = read_data; 
+	for (total_read = 0, current_offset = read_data;
 	     read_size > total_read;
 	     total_read += bytes_read, current_offset += bytes_read) {
 		current_read_size = min_t(const int, read_size - total_read,
 					  cifs_sb->rsize);
 		/* For windows me and 9x we do not want to request more
 		than it negotiated since it will refuse the read then */
-		if((pTcon->ses) && 
+		if ((pTcon->ses) &&
 			!(pTcon->ses->capabilities & CAP_LARGE_FILES)) {
 			current_read_size = min_t(const int, current_read_size,
 					pTcon->ses->server->maxBuf - 128);
 		}
 		rc = -EAGAIN;
 		while (rc == -EAGAIN) {
-			if ((open_file->invalidHandle) && 
+			if ((open_file->invalidHandle) &&
 			    (!open_file->closePend)) {
 				rc = cifs_reopen_file(file, TRUE);
 				if (rc != 0)
@@ -1646,7 +1653,7 @@
 }
 
 
-static void cifs_copy_cache_pages(struct address_space *mapping, 
+static void cifs_copy_cache_pages(struct address_space *mapping,
 	struct list_head *pages, int bytes_read, char *data,
 	struct pagevec *plru_pvec)
 {
@@ -1669,12 +1676,12 @@
 			continue;
 		}
 
-		target = kmap_atomic(page,KM_USER0);
+		target = kmap_atomic(page, KM_USER0);
 
 		if (PAGE_CACHE_SIZE > bytes_read) {
 			memcpy(target, data, bytes_read);
 			/* zero the tail end of this partial page */
-			memset(target + bytes_read, 0, 
+			memset(target + bytes_read, 0,
 			       PAGE_CACHE_SIZE - bytes_read);
 			bytes_read = 0;
 		} else {
@@ -1703,7 +1710,7 @@
 	struct cifs_sb_info *cifs_sb;
 	struct cifsTconInfo *pTcon;
 	int bytes_read = 0;
-	unsigned int read_size,i;
+	unsigned int read_size, i;
 	char *smb_read_data = NULL;
 	struct smb_com_read_rsp *pSMBr;
 	struct pagevec lru_pvec;
@@ -1720,7 +1727,9 @@
 	pTcon = cifs_sb->tcon;
 
 	pagevec_init(&lru_pvec, 0);
-
+#ifdef CONFIG_CIFS_DEBUG2
+		cFYI(1, ("rpages: num pages %d", num_pages));
+#endif
 	for (i = 0; i < num_pages; ) {
 		unsigned contig_pages;
 		struct page *tmp_page;
@@ -1734,14 +1743,14 @@
 
 		/* count adjacent pages that we will read into */
 		contig_pages = 0;
-		expected_index = 
+		expected_index =
 			list_entry(page_list->prev, struct page, lru)->index;
-		list_for_each_entry_reverse(tmp_page,page_list,lru) {
+		list_for_each_entry_reverse(tmp_page, page_list, lru) {
 			if (tmp_page->index == expected_index) {
 				contig_pages++;
 				expected_index++;
 			} else
-				break; 
+				break;
 		}
 		if (contig_pages + i >  num_pages)
 			contig_pages = num_pages - i;
@@ -1753,10 +1762,13 @@
 		/* Read size needs to be in multiples of one page */
 		read_size = min_t(const unsigned int, read_size,
 				  cifs_sb->rsize & PAGE_CACHE_MASK);
-
+#ifdef CONFIG_CIFS_DEBUG2
+		cFYI(1, ("rpages: read size 0x%x  contiguous pages %d",
+				read_size, contig_pages));
+#endif
 		rc = -EAGAIN;
 		while (rc == -EAGAIN) {
-			if ((open_file->invalidHandle) && 
+			if ((open_file->invalidHandle) &&
 			    (!open_file->closePend)) {
 				rc = cifs_reopen_file(file, TRUE);
 				if (rc != 0)
@@ -1769,11 +1781,11 @@
 					 &bytes_read, &smb_read_data,
 					 &buf_type);
 			/* BB more RC checks ? */
-			if (rc== -EAGAIN) {
+			if (rc == -EAGAIN) {
 				if (smb_read_data) {
-					if(buf_type == CIFS_SMALL_BUFFER)
+					if (buf_type == CIFS_SMALL_BUFFER)
 						cifs_small_buf_release(smb_read_data);
-					else if(buf_type == CIFS_LARGE_BUFFER)
+					else if (buf_type == CIFS_LARGE_BUFFER)
 						cifs_buf_release(smb_read_data);
 					smb_read_data = NULL;
 				}
@@ -1794,10 +1806,10 @@
 			if ((int)(bytes_read & PAGE_CACHE_MASK) != bytes_read) {
 				i++; /* account for partial page */
 
-				/* server copy of file can have smaller size 
+				/* server copy of file can have smaller size
 				   than client */
-				/* BB do we need to verify this common case ? 
-				   this case is ok - if we are at server EOF 
+				/* BB do we need to verify this common case ?
+				   this case is ok - if we are at server EOF
 				   we will hit it on next read */
 
 				/* break; */
@@ -1806,14 +1818,14 @@
 			cFYI(1, ("No bytes read (%d) at offset %lld . "
 				 "Cleaning remaining pages from readahead list",
 				 bytes_read, offset));
-			/* BB turn off caching and do new lookup on 
+			/* BB turn off caching and do new lookup on
 			   file size at server? */
 			break;
 		}
 		if (smb_read_data) {
-			if(buf_type == CIFS_SMALL_BUFFER)
+			if (buf_type == CIFS_SMALL_BUFFER)
 				cifs_small_buf_release(smb_read_data);
-			else if(buf_type == CIFS_LARGE_BUFFER)
+			else if (buf_type == CIFS_LARGE_BUFFER)
 				cifs_buf_release(smb_read_data);
 			smb_read_data = NULL;
 		}
@@ -1824,12 +1836,12 @@
 
 /* need to free smb_read_data buf before exit */
 	if (smb_read_data) {
-		if(buf_type == CIFS_SMALL_BUFFER)
+		if (buf_type == CIFS_SMALL_BUFFER)
 			cifs_small_buf_release(smb_read_data);
-		else if(buf_type == CIFS_LARGE_BUFFER)
+		else if (buf_type == CIFS_LARGE_BUFFER)
 			cifs_buf_release(smb_read_data);
 		smb_read_data = NULL;
-	} 
+	}
 
 	FreeXid(xid);
 	return rc;
@@ -1844,26 +1856,26 @@
 	page_cache_get(page);
 	read_data = kmap(page);
 	/* for reads over a certain size could initiate async read ahead */
-                                                                                                                           
+
 	rc = cifs_read(file, read_data, PAGE_CACHE_SIZE, poffset);
-                                                                                                                           
+
 	if (rc < 0)
 		goto io_error;
 	else
-		cFYI(1, ("Bytes read %d",rc));
-                                                                                                                           
+		cFYI(1, ("Bytes read %d", rc));
+
 	file->f_path.dentry->d_inode->i_atime =
 		current_fs_time(file->f_path.dentry->d_inode->i_sb);
-                                                                                                                           
+
 	if (PAGE_CACHE_SIZE > rc)
 		memset(read_data + rc, 0, PAGE_CACHE_SIZE - rc);
 
 	flush_dcache_page(page);
 	SetPageUptodate(page);
 	rc = 0;
-                                                                                                                           
+
 io_error:
-        kunmap(page);
+	kunmap(page);
 	page_cache_release(page);
 	return rc;
 }
@@ -1881,7 +1893,7 @@
 		return -EBADF;
 	}
 
-	cFYI(1, ("readpage %p at offset %d 0x%x\n", 
+	cFYI(1, ("readpage %p at offset %d 0x%x\n",
 		 page, (int)offset, (int)offset));
 
 	rc = cifs_readpage_worker(file, page, &offset);
@@ -1895,7 +1907,7 @@
 /* We do not want to update the file size from server for inodes
    open for write - to avoid races with writepage extending
    the file - in the future we could consider allowing
-   refreshing the inode only on increases in the file size 
+   refreshing the inode only on increases in the file size
    but this is tricky to do without racing with writebehind
    page caching in the current Linux kernel design */
 int is_size_safe_to_change(struct cifsInodeInfo *cifsInode, __u64 end_of_file)
@@ -1904,8 +1916,8 @@
 
 	if (cifsInode)
 		open_file =  find_writable_file(cifsInode);
- 
-	if(open_file) {
+
+	if (open_file) {
 		struct cifs_sb_info *cifs_sb;
 
 		/* there is not actually a write pending so let
@@ -1915,12 +1927,12 @@
 
 		cifs_sb = CIFS_SB(cifsInode->vfs_inode.i_sb);
 		if ( cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO ) {
-			/* since no page cache to corrupt on directio 
+			/* since no page cache to corrupt on directio
 			we can change size safely */
 			return 1;
 		}
 
-		if(i_size_read(&cifsInode->vfs_inode) < end_of_file)
+		if (i_size_read(&cifsInode->vfs_inode) < end_of_file)
 			return 1;
 
 		return 0;
@@ -1935,7 +1947,7 @@
 	loff_t i_size;
 	loff_t offset;
 
-	cFYI(1, ("prepare write for page %p from %d to %d",page,from,to));
+	cFYI(1, ("prepare write for page %p from %d to %d", page, from, to));
 	if (PageUptodate(page))
 		return 0;
 
@@ -1955,14 +1967,7 @@
 		 * We don't need to read data beyond the end of the file.
 		 * zero it, and set the page uptodate
 		 */
-		void *kaddr = kmap_atomic(page, KM_USER0);
-
-		if (from)
-			memset(kaddr, 0, from);
-		if (to < PAGE_CACHE_SIZE)
-			memset(kaddr + to, 0, PAGE_CACHE_SIZE - to);
-		flush_dcache_page(page);
-		kunmap_atomic(kaddr, KM_USER0);
+		simple_prepare_write(file, page, from, to);
 		SetPageUptodate(page);
 	} else if ((file->f_flags & O_ACCMODE) != O_WRONLY) {
 		/* might as well read a page, it is fast enough */
@@ -1974,8 +1979,8 @@
 		   this will be written out by commit_write so is fine */
 	}
 
-	/* we do not need to pass errors back 
-	   e.g. if we do not have read access to the file 
+	/* we do not need to pass errors back
+	   e.g. if we do not have read access to the file
 	   because cifs_commit_write will do the right thing.  -- shaggy */
 
 	return 0;
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index f0ff12b..dd41677 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -57,14 +57,14 @@
 			if (tmp_path == NULL) {
 				return -ENOMEM;
 			}
-        		/* have to skip first of the double backslash of
+			/* have to skip first of the double backslash of
 			   UNC name */
 			strncpy(tmp_path, pTcon->treeName, MAX_TREE_SIZE);
 			strncat(tmp_path, search_path, MAX_PATHCONF);
 			rc = connect_to_dfs_path(xid, pTcon->ses,
 						 /* treename + */ tmp_path,
-						 cifs_sb->local_nls, 
-						 cifs_sb->mnt_cifs_flags & 
+						 cifs_sb->local_nls,
+						 cifs_sb->mnt_cifs_flags &
 						    CIFS_MOUNT_MAP_SPECIAL_CHR);
 			kfree(tmp_path);
 
@@ -81,7 +81,7 @@
 		/* get new inode */
 		if (*pinode == NULL) {
 			*pinode = new_inode(sb);
-			if (*pinode == NULL) 
+			if (*pinode == NULL)
 				return -ENOMEM;
 			/* Is an i_ino of zero legal? */
 			/* Are there sanity checks we can use to ensure that
@@ -92,7 +92,7 @@
 			} /* note ino incremented to unique num in new_inode */
 			if (sb->s_flags & MS_NOATIME)
 				(*pinode)->i_flags |= S_NOATIME | S_NOCMTIME;
-				
+
 			insert_inode_hash(*pinode);
 		}
 
@@ -103,7 +103,7 @@
 		cifsInfo->time = jiffies;
 		cFYI(1, ("New time %ld", cifsInfo->time));
 		/* this is ok to set on every inode revalidate */
-		atomic_set(&cifsInfo->inUse,1);
+		atomic_set(&cifsInfo->inUse, 1);
 
 		inode->i_atime =
 		    cifs_NTtimeToUnix(le64_to_cpu(findData.LastAccessTime));
@@ -114,8 +114,8 @@
 		    cifs_NTtimeToUnix(le64_to_cpu(findData.LastStatusChange));
 		inode->i_mode = le64_to_cpu(findData.Permissions);
 		/* since we set the inode type below we need to mask off
-                   to avoid strange results if bits set above */
-                        inode->i_mode &= ~S_IFMT;
+		   to avoid strange results if bits set above */
+			inode->i_mode &= ~S_IFMT;
 		if (type == UNIX_FILE) {
 			inode->i_mode |= S_IFREG;
 		} else if (type == UNIX_SYMLINK) {
@@ -137,9 +137,9 @@
 		} else {
 			/* safest to call it a file if we do not know */
 			inode->i_mode |= S_IFREG;
-			cFYI(1,("unknown type %d",type));
+			cFYI(1, ("unknown type %d", type));
 		}
-		
+
 		if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_UID)
 			inode->i_uid = cifs_sb->mnt_uid;
 		else
@@ -149,7 +149,7 @@
 			inode->i_gid = cifs_sb->mnt_gid;
 		else
 			inode->i_gid = le64_to_cpu(findData.Gid);
-			
+
 		inode->i_nlink = le64_to_cpu(findData.Nlinks);
 
 		spin_lock(&inode->i_lock);
@@ -183,17 +183,17 @@
 			inode->i_op = &cifs_file_inode_ops;
 			if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) {
 				if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
-					inode->i_fop = 
+					inode->i_fop =
 						&cifs_file_direct_nobrl_ops;
 				else
 					inode->i_fop = &cifs_file_direct_ops;
 			} else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
 				inode->i_fop = &cifs_file_nobrl_ops;
-			else /* not direct, send byte range locks */ 
+			else /* not direct, send byte range locks */
 				inode->i_fop = &cifs_file_ops;
 
 			/* check if server can support readpages */
-			if (pTcon->ses->server->maxBuf < 
+			if (pTcon->ses->server->maxBuf <
 			    PAGE_CACHE_SIZE + MAX_CIFS_HDR_SIZE)
 				inode->i_data.a_ops = &cifs_addr_ops_smallbuf;
 			else
@@ -215,7 +215,7 @@
 	return rc;
 }
 
-static int decode_sfu_inode(struct inode * inode, __u64 size,
+static int decode_sfu_inode(struct inode *inode, __u64 size,
 			    const unsigned char *path,
 			    struct cifs_sb_info *cifs_sb, int xid)
 {
@@ -225,7 +225,7 @@
 	struct cifsTconInfo *pTcon = cifs_sb->tcon;
 	char buf[24];
 	unsigned int bytes_read;
-	char * pbuf;
+	char *pbuf;
 
 	pbuf = buf;
 
@@ -235,22 +235,22 @@
 	} else if (size < 8) {
 		return -EINVAL;	 /* EOPNOTSUPP? */
 	}
-		
+
 	rc = CIFSSMBOpen(xid, pTcon, path, FILE_OPEN, GENERIC_READ,
 			 CREATE_NOT_DIR, &netfid, &oplock, NULL,
 			 cifs_sb->local_nls,
 			 cifs_sb->mnt_cifs_flags &
 				CIFS_MOUNT_MAP_SPECIAL_CHR);
-	if (rc==0) {
+	if (rc == 0) {
 		int buf_type = CIFS_NO_BUFFER;
 			/* Read header */
 		rc = CIFSSMBRead(xid, pTcon,
-			         netfid,
+				 netfid,
 				 24 /* length */, 0 /* offset */,
 				 &bytes_read, &pbuf, &buf_type);
 		if ((rc == 0) && (bytes_read >= 8)) {
 			if (memcmp("IntxBLK", pbuf, 8) == 0) {
-				cFYI(1,("Block device"));
+				cFYI(1, ("Block device"));
 				inode->i_mode |= S_IFBLK;
 				if (bytes_read == 24) {
 					/* we have enough to decode dev num */
@@ -261,7 +261,7 @@
 					inode->i_rdev = MKDEV(mjr, mnr);
 				}
 			} else if (memcmp("IntxCHR", pbuf, 8) == 0) {
-				cFYI(1,("Char device"));
+				cFYI(1, ("Char device"));
 				inode->i_mode |= S_IFCHR;
 				if (bytes_read == 24) {
 					/* we have enough to decode dev num */
@@ -270,27 +270,26 @@
 					mjr = le64_to_cpu(*(__le64 *)(pbuf+8));
 					mnr = le64_to_cpu(*(__le64 *)(pbuf+16));
 					inode->i_rdev = MKDEV(mjr, mnr);
-                                }
+				}
 			} else if (memcmp("IntxLNK", pbuf, 7) == 0) {
-				cFYI(1,("Symlink"));
+				cFYI(1, ("Symlink"));
 				inode->i_mode |= S_IFLNK;
 			} else {
 				inode->i_mode |= S_IFREG; /* file? */
-				rc = -EOPNOTSUPP; 
+				rc = -EOPNOTSUPP;
 			}
 		} else {
 			inode->i_mode |= S_IFREG; /* then it is a file */
-			rc = -EOPNOTSUPP; /* or some unknown SFU type */	
-		}		
+			rc = -EOPNOTSUPP; /* or some unknown SFU type */
+		}
 		CIFSSMBClose(xid, pTcon, netfid);
 	}
 	return rc;
-	
 }
 
 #define SFBITS_MASK (S_ISVTX | S_ISGID | S_ISUID)  /* SETFILEBITS valid bits */
 
-static int get_sfu_uid_mode(struct inode * inode,
+static int get_sfu_uid_mode(struct inode *inode,
 			const unsigned char *path,
 			struct cifs_sb_info *cifs_sb, int xid)
 {
@@ -301,15 +300,15 @@
 
 	rc = CIFSSMBQueryEA(xid, cifs_sb->tcon, path, "SETFILEBITS",
 			ea_value, 4 /* size of buf */, cifs_sb->local_nls,
-                        cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR);
+		cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR);
 	if (rc < 0)
 		return (int)rc;
 	else if (rc > 3) {
 		mode = le32_to_cpu(*((__le32 *)ea_value));
-		inode->i_mode &= ~SFBITS_MASK; 
-		cFYI(1,("special bits 0%o org mode 0%o", mode, inode->i_mode));
+		inode->i_mode &= ~SFBITS_MASK;
+		cFYI(1, ("special bits 0%o org mode 0%o", mode, inode->i_mode));
 		inode->i_mode = (mode &  SFBITS_MASK) | inode->i_mode;
-		cFYI(1,("special mode bits 0%o", mode));
+		cFYI(1, ("special mode bits 0%o", mode));
 		return 0;
 	} else {
 		return 0;
@@ -317,8 +316,6 @@
 #else
 	return -EOPNOTSUPP;
 #endif
-
-		
 }
 
 int cifs_get_inode_info(struct inode **pinode,
@@ -334,11 +331,11 @@
 	int adjustTZ = FALSE;
 
 	pTcon = cifs_sb->tcon;
-	cFYI(1,("Getting info on %s", search_path));
+	cFYI(1, ("Getting info on %s", search_path));
 
 	if ((pfindData == NULL) && (*pinode != NULL)) {
 		if (CIFS_I(*pinode)->clientCanCacheRead) {
-			cFYI(1,("No need to revalidate cached inode sizes"));
+			cFYI(1, ("No need to revalidate cached inode sizes"));
 			return rc;
 		}
 	}
@@ -359,12 +356,11 @@
 		failed at least once - set flag in tcon or mount */
 		if ((rc == -EOPNOTSUPP) || (rc == -EINVAL)) {
 			rc = SMBQueryInformation(xid, pTcon, search_path,
-					pfindData, cifs_sb->local_nls, 
+					pfindData, cifs_sb->local_nls,
 					cifs_sb->mnt_cifs_flags &
 					  CIFS_MOUNT_MAP_SPECIAL_CHR);
 			adjustTZ = TRUE;
 		}
-		
 	}
 	/* dump_mem("\nQPathInfo return data",&findData, sizeof(findData)); */
 	if (rc) {
@@ -384,8 +380,8 @@
 			strncat(tmp_path, search_path, MAX_PATHCONF);
 			rc = connect_to_dfs_path(xid, pTcon->ses,
 						 /* treename + */ tmp_path,
-						 cifs_sb->local_nls, 
-						 cifs_sb->mnt_cifs_flags & 
+						 cifs_sb->local_nls,
+						 cifs_sb->mnt_cifs_flags &
 						   CIFS_MOUNT_MAP_SPECIAL_CHR);
 			kfree(tmp_path);
 			/* BB fix up inode etc. */
@@ -419,17 +415,17 @@
 			   there Windows server or network appliances for which
 			   IndexNumber field is not guaranteed unique? */
 
-			if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM){
+			if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) {
 				int rc1 = 0;
 				__u64 inode_num;
 
-				rc1 = CIFSGetSrvInodeNumber(xid, pTcon, 
-					search_path, &inode_num, 
+				rc1 = CIFSGetSrvInodeNumber(xid, pTcon,
+					search_path, &inode_num,
 					cifs_sb->local_nls,
 					cifs_sb->mnt_cifs_flags &
 						CIFS_MOUNT_MAP_SPECIAL_CHR);
 				if (rc1) {
-					cFYI(1,("GetSrvInodeNum rc %d", rc1));
+					cFYI(1, ("GetSrvInodeNum rc %d", rc1));
 					/* BB EOPNOSUPP disable SERVER_INUM? */
 				} else /* do we need cast or hash to ino? */
 					(*pinode)->i_ino = inode_num;
@@ -463,7 +459,7 @@
 		cFYI(0, ("Attributes came in as 0x%x", attr));
 		if (adjustTZ && (pTcon->ses) && (pTcon->ses->server)) {
 			inode->i_ctime.tv_sec += pTcon->ses->server->timeAdj;
-	                inode->i_mtime.tv_sec += pTcon->ses->server->timeAdj;
+			inode->i_mtime.tv_sec += pTcon->ses->server->timeAdj;
 		}
 
 		/* set default mode. will override for dirs below */
@@ -471,8 +467,9 @@
 			/* new inode, can safely set these fields */
 			inode->i_mode = cifs_sb->mnt_file_mode;
 		else /* since we set the inode type below we need to mask off
-		     to avoid strange results if type changes and both get orred in */ 
-			inode->i_mode &= ~S_IFMT; 
+		     to avoid strange results if type changes and both
+		     get orred in */
+			inode->i_mode &= ~S_IFMT;
 /*		if (attr & ATTR_REPARSE)  */
 		/* We no longer handle these as symlinks because we could not
 		   follow them due to the absolute path with drive letter */
@@ -490,13 +487,13 @@
 /* BB Finish for SFU style symlinks and devices */
 		} else if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL) &&
 			   (cifsInfo->cifsAttrs & ATTR_SYSTEM)) {
-			if (decode_sfu_inode(inode, 
+			if (decode_sfu_inode(inode,
 					 le64_to_cpu(pfindData->EndOfFile),
 					 search_path,
 					 cifs_sb, xid)) {
-				cFYI(1,("Unrecognized sfu inode type"));
+				cFYI(1, ("Unrecognized sfu inode type"));
 			}
-			cFYI(1,("sfu mode 0%o",inode->i_mode));
+			cFYI(1, ("sfu mode 0%o", inode->i_mode));
 		} else {
 			inode->i_mode |= S_IFREG;
 			/* treat the dos attribute of read-only as read-only
@@ -512,12 +509,12 @@
 		/* BB add code here -
 		   validate if device or weird share or device type? */
 		}
-		
+
 		spin_lock(&inode->i_lock);
 		if (is_size_safe_to_change(cifsInfo, le64_to_cpu(pfindData->EndOfFile))) {
 			/* can not safely shrink the file size here if the
 			   client is writing to it due to potential races */
-			i_size_write(inode,le64_to_cpu(pfindData->EndOfFile));
+			i_size_write(inode, le64_to_cpu(pfindData->EndOfFile));
 
 			/* 512 bytes (2**9) is the fake blocksize that must be
 			   used for this calculation */
@@ -528,7 +525,7 @@
 
 		inode->i_nlink = le32_to_cpu(pfindData->NumberOfLinks);
 
-		/* BB fill in uid and gid here? with help from winbind? 
+		/* BB fill in uid and gid here? with help from winbind?
 		   or retrieve from NTFS stream extended attribute */
 		if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL) {
 			/* fill in uid, gid, mode from server ACL */
@@ -540,7 +537,7 @@
 			inode->i_gid = cifs_sb->mnt_gid;
 			/* set so we do not keep refreshing these fields with
 			   bad data after user has changed them in memory */
-			atomic_set(&cifsInfo->inUse,1);
+			atomic_set(&cifsInfo->inUse, 1);
 		}
 
 		if (S_ISREG(inode->i_mode)) {
@@ -557,7 +554,7 @@
 			else /* not direct, send byte range locks */
 				inode->i_fop = &cifs_file_ops;
 
-			if (pTcon->ses->server->maxBuf < 
+			if (pTcon->ses->server->maxBuf <
 			     PAGE_CACHE_SIZE + MAX_CIFS_HDR_SIZE)
 				inode->i_data.a_ops = &cifs_addr_ops_smallbuf;
 			else
@@ -586,10 +583,11 @@
 
 	cifs_sb = CIFS_SB(inode->i_sb);
 	xid = GetXid();
-	if (cifs_sb->tcon->ses->capabilities & CAP_UNIX)
-		cifs_get_inode_info_unix(&inode, "", inode->i_sb,xid);
+
+	if (cifs_sb->tcon->unix_ext)
+		cifs_get_inode_info_unix(&inode, "", inode->i_sb, xid);
 	else
-		cifs_get_inode_info(&inode, "", NULL, inode->i_sb,xid);
+		cifs_get_inode_info(&inode, "", NULL, inode->i_sb, xid);
 	/* can not call macro FreeXid here since in a void func */
 	_FreeXid(xid);
 }
@@ -623,9 +621,21 @@
 		FreeXid(xid);
 		return -ENOMEM;
 	}
+
+	if ((pTcon->ses->capabilities & CAP_UNIX) &&
+		(CIFS_UNIX_POSIX_PATH_OPS_CAP &
+			le64_to_cpu(pTcon->fsUnixInfo.Capability))) {
+		rc = CIFSPOSIXDelFile(xid, pTcon, full_path,
+			SMB_POSIX_UNLINK_FILE_TARGET, cifs_sb->local_nls,
+			cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR);
+		cFYI(1, ("posix del rc %d", rc));
+		if ((rc == 0) || (rc == -ENOENT))
+			goto psx_del_no_retry;
+	}
+
 	rc = CIFSSMBDelFile(xid, pTcon, full_path, cifs_sb->local_nls,
 			cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR);
-
+psx_del_no_retry:
 	if (!rc) {
 		if (direntry->d_inode)
 			drop_nlink(direntry->d_inode);
@@ -638,12 +648,12 @@
 		rc = CIFSSMBOpen(xid, pTcon, full_path, FILE_OPEN, DELETE,
 				 CREATE_NOT_DIR | CREATE_DELETE_ON_CLOSE,
 				 &netfid, &oplock, NULL, cifs_sb->local_nls,
-				 cifs_sb->mnt_cifs_flags & 
+				 cifs_sb->mnt_cifs_flags &
 					CIFS_MOUNT_MAP_SPECIAL_CHR);
-		if (rc==0) {
+		if (rc == 0) {
 			CIFSSMBRenameOpenFile(xid, pTcon, netfid, NULL,
-					      cifs_sb->local_nls, 
-					      cifs_sb->mnt_cifs_flags & 
+					      cifs_sb->local_nls,
+					      cifs_sb->mnt_cifs_flags &
 						CIFS_MOUNT_MAP_SPECIAL_CHR);
 			CIFSSMBClose(xid, pTcon, netfid);
 			if (direntry->d_inode)
@@ -659,7 +669,7 @@
 				rc = CIFSSMBSetTimes(xid, pTcon, full_path,
 						     pinfo_buf,
 						     cifs_sb->local_nls,
-						     cifs_sb->mnt_cifs_flags & 
+						     cifs_sb->mnt_cifs_flags &
 							CIFS_MOUNT_MAP_SPECIAL_CHR);
 			else
 				rc = -EOPNOTSUPP;
@@ -670,7 +680,7 @@
 			/*	rc = CIFSSMBSetAttrLegacy(xid, pTcon,
 							  full_path,
 							  (__u16)ATTR_NORMAL,
-							  cifs_sb->local_nls); 
+							  cifs_sb->local_nls);
 			   For some strange reason it seems that NT4 eats the
 			   old setattr call without actually setting the
 			   attributes so on to the third attempted workaround
@@ -683,9 +693,9 @@
 						 FILE_WRITE_ATTRIBUTES, 0,
 						 &netfid, &oplock, NULL,
 						 cifs_sb->local_nls,
-						 cifs_sb->mnt_cifs_flags & 
+						 cifs_sb->mnt_cifs_flags &
 						    CIFS_MOUNT_MAP_SPECIAL_CHR);
-				if (rc==0) {
+				if (rc == 0) {
 					rc = CIFSSMBSetFileTimes(xid, pTcon,
 								 pinfo_buf,
 								 netfid);
@@ -694,10 +704,10 @@
 			}
 			kfree(pinfo_buf);
 		}
-		if (rc==0) {
-			rc = CIFSSMBDelFile(xid, pTcon, full_path, 
-					    cifs_sb->local_nls, 
-					    cifs_sb->mnt_cifs_flags & 
+		if (rc == 0) {
+			rc = CIFSSMBDelFile(xid, pTcon, full_path,
+					    cifs_sb->local_nls,
+					    cifs_sb->mnt_cifs_flags &
 						CIFS_MOUNT_MAP_SPECIAL_CHR);
 			if (!rc) {
 				if (direntry->d_inode)
@@ -711,10 +721,10 @@
 						 CREATE_NOT_DIR |
 						 CREATE_DELETE_ON_CLOSE,
 						 &netfid, &oplock, NULL,
-						 cifs_sb->local_nls, 
-						 cifs_sb->mnt_cifs_flags & 
+						 cifs_sb->local_nls,
+						 cifs_sb->mnt_cifs_flags &
 						    CIFS_MOUNT_MAP_SPECIAL_CHR);
-				if (rc==0) {
+				if (rc == 0) {
 					CIFSSMBRenameOpenFile(xid, pTcon,
 						netfid, NULL,
 						cifs_sb->local_nls,
@@ -773,8 +783,8 @@
 
 	tmp_inode->i_mode = le64_to_cpu(pData->Permissions);
 	/* since we set the inode type below we need to mask off type
-           to avoid strange results if bits above were corrupt */
-        tmp_inode->i_mode &= ~S_IFMT;
+	   to avoid strange results if bits above were corrupt */
+	tmp_inode->i_mode &= ~S_IFMT;
 	if (type == UNIX_FILE) {
 		*pobject_type = DT_REG;
 		tmp_inode->i_mode |= S_IFREG;
@@ -804,11 +814,11 @@
 		/* safest to just call it a file */
 		*pobject_type = DT_REG;
 		tmp_inode->i_mode |= S_IFREG;
-		cFYI(1,("unknown inode type %d",type)); 
+		cFYI(1, ("unknown inode type %d", type));
 	}
 
 #ifdef CONFIG_CIFS_DEBUG2
-	cFYI(1,("object type: %d", type));
+	cFYI(1, ("object type: %d", type));
 #endif
 	tmp_inode->i_uid = le64_to_cpu(pData->Uid);
 	tmp_inode->i_gid = le64_to_cpu(pData->Gid);
@@ -816,7 +826,7 @@
 
 	spin_lock(&tmp_inode->i_lock);
 	if (is_size_safe_to_change(cifsInfo, end_of_file)) {
-		/* can not safely change the file size here if the 
+		/* can not safely change the file size here if the
 		client is writing to it due to potential races */
 		i_size_write(tmp_inode, end_of_file);
 
@@ -830,27 +840,28 @@
 		cFYI(1, ("File inode"));
 		tmp_inode->i_op = &cifs_file_inode_ops;
 
-		if(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) {
-			if(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
+		if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) {
+			if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
 				tmp_inode->i_fop = &cifs_file_direct_nobrl_ops;
 			else
 				tmp_inode->i_fop = &cifs_file_direct_ops;
-		
-		} else if(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
+
+		} else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
 			tmp_inode->i_fop = &cifs_file_nobrl_ops;
 		else
 			tmp_inode->i_fop = &cifs_file_ops;
 
-		if((cifs_sb->tcon) && (cifs_sb->tcon->ses) &&
-		   (cifs_sb->tcon->ses->server->maxBuf < 
+		if ((cifs_sb->tcon) && (cifs_sb->tcon->ses) &&
+		   (cifs_sb->tcon->ses->server->maxBuf <
 			PAGE_CACHE_SIZE + MAX_CIFS_HDR_SIZE))
 			tmp_inode->i_data.a_ops = &cifs_addr_ops_smallbuf;
 		else
 			tmp_inode->i_data.a_ops = &cifs_addr_ops;
 
-		if(isNewInode)
-			return; /* No sense invalidating pages for new inode since we
-					   have not started caching readahead file data yet */
+		if (isNewInode)
+			return; /* No sense invalidating pages for new inode
+				   since we we have not started caching
+				   readahead file data yet */
 
 		if (timespec_equal(&tmp_inode->i_mtime, &local_mtime) &&
 			(local_size == tmp_inode->i_size)) {
@@ -869,10 +880,10 @@
 		tmp_inode->i_op = &cifs_symlink_inode_ops;
 /* tmp_inode->i_fop = *//* do not need to set to anything */
 	} else {
-		cFYI(1, ("Special inode")); 
+		cFYI(1, ("Special inode"));
 		init_special_inode(tmp_inode, tmp_inode->i_mode,
 				   tmp_inode->i_rdev);
-	}	
+	}
 }
 
 int cifs_mkdir(struct inode *inode, struct dentry *direntry, int mode)
@@ -896,22 +907,22 @@
 		FreeXid(xid);
 		return -ENOMEM;
 	}
-	
-	if((pTcon->ses->capabilities & CAP_UNIX) && 
-		(CIFS_UNIX_POSIX_PATH_OPS_CAP &	
+
+	if ((pTcon->ses->capabilities & CAP_UNIX) &&
+		(CIFS_UNIX_POSIX_PATH_OPS_CAP &
 			le64_to_cpu(pTcon->fsUnixInfo.Capability))) {
 		u32 oplock = 0;
-		FILE_UNIX_BASIC_INFO * pInfo = 
+		FILE_UNIX_BASIC_INFO * pInfo =
 			kzalloc(sizeof(FILE_UNIX_BASIC_INFO), GFP_KERNEL);
-		if(pInfo == NULL) {
+		if (pInfo == NULL) {
 			rc = -ENOMEM;
 			goto mkdir_out;
 		}
-			
+
 		rc = CIFSPOSIXCreate(xid, pTcon, SMB_O_DIRECTORY | SMB_O_CREAT,
 				mode, NULL /* netfid */, pInfo, &oplock,
-				full_path, cifs_sb->local_nls, 
-				cifs_sb->mnt_cifs_flags & 
+				full_path, cifs_sb->local_nls,
+				cifs_sb->mnt_cifs_flags &
 					CIFS_MOUNT_MAP_SPECIAL_CHR);
 		if (rc) {
 			cFYI(1, ("posix mkdir returned 0x%x", rc));
@@ -919,8 +930,9 @@
 		} else {
 			int obj_type;
 			if (pInfo->Type == -1) /* no return info - go query */
-				goto mkdir_get_info; 
-/*BB check (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID ) to see if need to set uid/gid */
+				goto mkdir_get_info;
+/*BB check (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID ) to see if need
+	to set uid/gid */
 			inc_nlink(inode);
 			if (pTcon->nocase)
 				direntry->d_op = &cifs_ci_dentry_ops;
@@ -937,7 +949,7 @@
 				newinode->i_ino =
 					(unsigned long)pInfo->UniqueId;
 			} /* note ino incremented to unique num in new_inode */
-			if(inode->i_sb->s_flags & MS_NOATIME)
+			if (inode->i_sb->s_flags & MS_NOATIME)
 				newinode->i_flags |= S_NOATIME | S_NOCMTIME;
 			newinode->i_nlink = 2;
 
@@ -949,18 +961,18 @@
 			posix_fill_in_inode(direntry->d_inode,
 					pInfo, &obj_type, 1 /* NewInode */);
 #ifdef CONFIG_CIFS_DEBUG2
-			cFYI(1,("instantiated dentry %p %s to inode %p",
+			cFYI(1, ("instantiated dentry %p %s to inode %p",
 				direntry, direntry->d_name.name, newinode));
 
-			if(newinode->i_nlink != 2)
-				cFYI(1,("unexpected number of links %d",
+			if (newinode->i_nlink != 2)
+				cFYI(1, ("unexpected number of links %d",
 					newinode->i_nlink));
 #endif
 		}
 		kfree(pInfo);
 		goto mkdir_out;
-	}	
-	
+	}
+
 	/* BB add setting the equivalent of mode via CreateX w/ACLs */
 	rc = CIFSSMBMkDir(xid, pTcon, full_path, cifs_sb->local_nls,
 			  cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR);
@@ -968,14 +980,14 @@
 		cFYI(1, ("cifs_mkdir returned 0x%x", rc));
 		d_drop(direntry);
 	} else {
-mkdir_get_info:		
+mkdir_get_info:
 		inc_nlink(inode);
-		if (pTcon->ses->capabilities & CAP_UNIX)
+		if (pTcon->unix_ext)
 			rc = cifs_get_inode_info_unix(&newinode, full_path,
-						      inode->i_sb,xid);
+						      inode->i_sb, xid);
 		else
 			rc = cifs_get_inode_info(&newinode, full_path, NULL,
-						 inode->i_sb,xid);
+						 inode->i_sb, xid);
 
 		if (pTcon->nocase)
 			direntry->d_op = &cifs_ci_dentry_ops;
@@ -983,10 +995,10 @@
 			direntry->d_op = &cifs_dentry_ops;
 		d_instantiate(direntry, newinode);
 		 /* setting nlink not necessary except in cases where we
-		  * failed to get it from the server or was set bogus */ 
+		  * failed to get it from the server or was set bogus */
 		if ((direntry->d_inode) && (direntry->d_inode->i_nlink < 2))
-				direntry->d_inode->i_nlink = 2; 
-		if (cifs_sb->tcon->ses->capabilities & CAP_UNIX) {
+				direntry->d_inode->i_nlink = 2;
+		if (pTcon->unix_ext) {
 			mode &= ~current->fs->umask;
 			if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID) {
 				CIFSSMBUnixSetPerms(xid, pTcon, full_path,
@@ -1002,27 +1014,27 @@
 						    mode, (__u64)-1,
 						    (__u64)-1, 0 /* dev_t */,
 						    cifs_sb->local_nls,
-						    cifs_sb->mnt_cifs_flags & 
+						    cifs_sb->mnt_cifs_flags &
 						    CIFS_MOUNT_MAP_SPECIAL_CHR);
 			}
 		} else {
 			/* BB to be implemented via Windows secrty descriptors
 			   eg CIFSSMBWinSetPerms(xid, pTcon, full_path, mode,
 						 -1, -1, local_nls); */
-			if(direntry->d_inode) {
+			if (direntry->d_inode) {
 				direntry->d_inode->i_mode = mode;
 				direntry->d_inode->i_mode |= S_IFDIR;
-				if(cifs_sb->mnt_cifs_flags & 
+				if (cifs_sb->mnt_cifs_flags &
 				     CIFS_MOUNT_SET_UID) {
-					direntry->d_inode->i_uid = 
+					direntry->d_inode->i_uid =
 						current->fsuid;
-					direntry->d_inode->i_gid = 
+					direntry->d_inode->i_gid =
 						current->fsgid;
 				}
 			}
 		}
 	}
-mkdir_out:	
+mkdir_out:
 	kfree(full_path);
 	FreeXid(xid);
 	return rc;
@@ -1056,7 +1068,7 @@
 	if (!rc) {
 		drop_nlink(inode);
 		spin_lock(&direntry->d_inode->i_lock);
-		i_size_write(direntry->d_inode,0);
+		i_size_write(direntry->d_inode, 0);
 		clear_nlink(direntry->d_inode);
 		spin_unlock(&direntry->d_inode->i_lock);
 	}
@@ -1119,9 +1131,9 @@
 			kmalloc(2 * sizeof(FILE_UNIX_BASIC_INFO), GFP_KERNEL);
 		if (info_buf_source != NULL) {
 			info_buf_target = info_buf_source + 1;
-			if (pTcon->ses->capabilities & CAP_UNIX)
+			if (pTcon->unix_ext)
 				rc = CIFSSMBUnixQPathInfo(xid, pTcon, fromName,
-					info_buf_source, 
+					info_buf_source,
 					cifs_sb_source->local_nls,
 					cifs_sb_source->mnt_cifs_flags &
 						CIFS_MOUNT_MAP_SPECIAL_CHR);
@@ -1171,12 +1183,12 @@
 		   might not right be right access to request */
 		rc = CIFSSMBOpen(xid, pTcon, fromName, FILE_OPEN, GENERIC_READ,
 				 CREATE_NOT_DIR, &netfid, &oplock, NULL,
-				 cifs_sb_source->local_nls, 
-				 cifs_sb_source->mnt_cifs_flags & 
+				 cifs_sb_source->local_nls,
+				 cifs_sb_source->mnt_cifs_flags &
 					CIFS_MOUNT_MAP_SPECIAL_CHR);
-		if (rc==0) {
+		if (rc == 0) {
 			rc = CIFSSMBRenameOpenFile(xid, pTcon, netfid, toName,
-					      cifs_sb_source->local_nls, 
+					      cifs_sb_source->local_nls,
 					      cifs_sb_source->mnt_cifs_flags &
 						CIFS_MOUNT_MAP_SPECIAL_CHR);
 			CIFSSMBClose(xid, pTcon, netfid);
@@ -1247,9 +1259,9 @@
 	local_mtime = direntry->d_inode->i_mtime;
 	local_size = direntry->d_inode->i_size;
 
-	if (cifs_sb->tcon->ses->capabilities & CAP_UNIX) {
+	if (cifs_sb->tcon->unix_ext) {
 		rc = cifs_get_inode_info_unix(&direntry->d_inode, full_path,
-					      direntry->d_sb,xid);
+					      direntry->d_sb, xid);
 		if (rc) {
 			cFYI(1, ("error on getting revalidate info %d", rc));
 /*			if (rc != -ENOENT)
@@ -1258,7 +1270,7 @@
 		}
 	} else {
 		rc = cifs_get_inode_info(&direntry->d_inode, full_path, NULL,
-					 direntry->d_sb,xid);
+					 direntry->d_sb, xid);
 		if (rc) {
 			cFYI(1, ("error on getting revalidate info %d", rc));
 /*			if (rc != -ENOENT)
@@ -1271,7 +1283,7 @@
 	/* if not oplocked, we invalidate inode pages if mtime or file size
 	   had changed on server */
 
-	if (timespec_equal(&local_mtime,&direntry->d_inode->i_mtime) && 
+	if (timespec_equal(&local_mtime, &direntry->d_inode->i_mtime) &&
 	    (local_size == direntry->d_inode->i_size)) {
 		cFYI(1, ("cifs_revalidate - inode unchanged"));
 	} else {
@@ -1298,7 +1310,7 @@
 	if (invalidate_inode) {
 	/* shrink_dcache not necessary now that cifs dentry ops
 	are exported for negative dentries */
-/*		if(S_ISDIR(direntry->d_inode->i_mode)) 
+/*		if (S_ISDIR(direntry->d_inode->i_mode))
 			shrink_dcache_parent(direntry); */
 		if (S_ISREG(direntry->d_inode->i_mode)) {
 			if (direntry->d_inode->i_mapping)
@@ -1313,7 +1325,7 @@
 		}
 	}
 /*	mutex_unlock(&direntry->d_inode->i_mutex); */
-	
+
 	kfree(full_path);
 	FreeXid(xid);
 	return rc;
@@ -1335,23 +1347,19 @@
 	pgoff_t index = from >> PAGE_CACHE_SHIFT;
 	unsigned offset = from & (PAGE_CACHE_SIZE - 1);
 	struct page *page;
-	char *kaddr;
 	int rc = 0;
 
 	page = grab_cache_page(mapping, index);
 	if (!page)
 		return -ENOMEM;
 
-	kaddr = kmap_atomic(page, KM_USER0);
-	memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset);
-	flush_dcache_page(page);
-	kunmap_atomic(kaddr, KM_USER0);
+	zero_user_page(page, offset, PAGE_CACHE_SIZE - offset, KM_USER0);
 	unlock_page(page);
 	page_cache_release(page);
 	return rc;
 }
 
-static int cifs_vmtruncate(struct inode * inode, loff_t offset)
+static int cifs_vmtruncate(struct inode *inode, loff_t offset)
 {
 	struct address_space *mapping = inode->i_mapping;
 	unsigned long limit;
@@ -1424,13 +1432,13 @@
 	if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM) == 0) {
 		/* check if we have permission to change attrs */
 		rc = inode_change_ok(direntry->d_inode, attrs);
-		if(rc < 0) {
+		if (rc < 0) {
 			FreeXid(xid);
 			return rc;
 		} else
 			rc = 0;
 	}
-		
+
 	full_path = build_path_from_dentry(direntry);
 	if (full_path == NULL) {
 		FreeXid(xid);
@@ -1459,16 +1467,16 @@
 			rc = CIFSSMBSetFileSize(xid, pTcon, attrs->ia_size,
 						nfid, npid, FALSE);
 			atomic_dec(&open_file->wrtPending);
-			cFYI(1,("SetFSize for attrs rc = %d", rc));
-			if((rc == -EINVAL) || (rc == -EOPNOTSUPP)) {
+			cFYI(1, ("SetFSize for attrs rc = %d", rc));
+			if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) {
 				int bytes_written;
 				rc = CIFSSMBWrite(xid, pTcon,
 						  nfid, 0, attrs->ia_size,
 						  &bytes_written, NULL, NULL,
 						  1 /* 45 seconds */);
-				cFYI(1,("Wrt seteof rc %d", rc));
+				cFYI(1, ("Wrt seteof rc %d", rc));
 			}
-		} else 
+		} else
 			rc = -EINVAL;
 
 		if (rc != 0) {
@@ -1478,11 +1486,11 @@
 			   it by handle */
 			rc = CIFSSMBSetEOF(xid, pTcon, full_path,
 					   attrs->ia_size, FALSE,
-					   cifs_sb->local_nls, 
+					   cifs_sb->local_nls,
 					   cifs_sb->mnt_cifs_flags &
 						CIFS_MOUNT_MAP_SPECIAL_CHR);
 			cFYI(1, ("SetEOF by path (setattrs) rc = %d", rc));
-			if((rc == -EINVAL) || (rc == -EOPNOTSUPP)) {
+			if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) {
 				__u16 netfid;
 				int oplock = FALSE;
 
@@ -1493,14 +1501,14 @@
 					NULL, cifs_sb->local_nls,
 					cifs_sb->mnt_cifs_flags &
 						CIFS_MOUNT_MAP_SPECIAL_CHR);
-				if (rc==0) {
+				if (rc == 0) {
 					int bytes_written;
 					rc = CIFSSMBWrite(xid, pTcon,
 							netfid, 0,
 							attrs->ia_size,
 							&bytes_written, NULL,
 							NULL, 1 /* 45 sec */);
-					cFYI(1,("wrt seteof rc %d",rc));
+					cFYI(1, ("wrt seteof rc %d", rc));
 					CIFSSMBClose(xid, pTcon, netfid);
 				}
 
@@ -1517,7 +1525,7 @@
 			rc = cifs_vmtruncate(direntry->d_inode, attrs->ia_size);
 			cifs_truncate_page(direntry->d_inode->i_mapping,
 					   direntry->d_inode->i_size);
-		} else 
+		} else
 			goto cifs_setattr_exit;
 	}
 	if (attrs->ia_valid & ATTR_UID) {
@@ -1535,11 +1543,11 @@
 		mode = attrs->ia_mode;
 	}
 
-	if ((cifs_sb->tcon->ses->capabilities & CAP_UNIX)
+	if ((pTcon->unix_ext)
 	    && (attrs->ia_valid & (ATTR_MODE | ATTR_GID | ATTR_UID)))
 		rc = CIFSSMBUnixSetPerms(xid, pTcon, full_path, mode, uid, gid,
 					 0 /* dev_t */, cifs_sb->local_nls,
-					 cifs_sb->mnt_cifs_flags & 
+					 cifs_sb->mnt_cifs_flags &
 						CIFS_MOUNT_MAP_SPECIAL_CHR);
 	else if (attrs->ia_valid & ATTR_MODE) {
 		rc = 0;
@@ -1559,7 +1567,7 @@
 			time_buf.Attributes = cpu_to_le32(cifsInode->cifsAttrs &
 					    (~ATTR_READONLY));
 			/* Windows ignores set to zero */
-			if(time_buf.Attributes == 0)
+			if (time_buf.Attributes == 0)
 				time_buf.Attributes |= cpu_to_le32(ATTR_NORMAL);
 		}
 		/* BB to be implemented -
@@ -1585,7 +1593,7 @@
 	   stamps are changed explicitly (i.e. by utime()
 	   since we would then have a mix of client and
 	   server times */
-	   
+
 	if (set_time && (attrs->ia_valid & ATTR_CTIME)) {
 		set_time = TRUE;
 		/* Although Samba throws this field away
@@ -1624,7 +1632,7 @@
 					 NULL, cifs_sb->local_nls,
 					 cifs_sb->mnt_cifs_flags &
 						CIFS_MOUNT_MAP_SPECIAL_CHR);
-			if (rc==0) {
+			if (rc == 0) {
 				rc = CIFSSMBSetFileTimes(xid, pTcon, &time_buf,
 							 netfid);
 				CIFSSMBClose(xid, pTcon, netfid);
@@ -1634,7 +1642,7 @@
 			   granularity */
 
 			/* rc = CIFSSMBSetTimesLegacy(xid, pTcon, full_path,
-        	        		&time_buf, cifs_sb->local_nls); */
+					&time_buf, cifs_sb->local_nls); */
 			}
 		}
 		/* Even if error on time set, no sense failing the call if
@@ -1642,7 +1650,7 @@
 		and this check ensures that we are not being called from
 		sys_utimes in which case we ought to fail the call back to
 		the user when the server rejects the call */
-		if((rc) && (attrs->ia_valid &
+		if ((rc) && (attrs->ia_valid &
 			 (ATTR_MODE | ATTR_GID | ATTR_UID | ATTR_SIZE)))
 			rc = 0;
 	}
diff --git a/fs/cifs/ioctl.c b/fs/cifs/ioctl.c
index a414f17..d24fe68 100644
--- a/fs/cifs/ioctl.c
+++ b/fs/cifs/ioctl.c
@@ -3,7 +3,7 @@
  *
  *   vfs operations that deal with io control
  *
- *   Copyright (C) International Business Machines  Corp., 2005
+ *   Copyright (C) International Business Machines  Corp., 2005,2007
  *   Author(s): Steve French (sfrench@us.ibm.com)
  *
  *   This library is free software; you can redistribute it and/or modify
@@ -30,7 +30,7 @@
 
 #define CIFS_IOC_CHECKUMOUNT _IO(0xCF, 2)
 
-int cifs_ioctl (struct inode * inode, struct file * filep,
+int cifs_ioctl (struct inode *inode, struct file *filep,
 		unsigned int command, unsigned long arg)
 {
 	int rc = -ENOTTY; /* strange error - but the precedent */
diff --git a/fs/cifs/link.c b/fs/cifs/link.c
index 6baea85..6a85ef7 100644
--- a/fs/cifs/link.c
+++ b/fs/cifs/link.c
@@ -50,32 +50,33 @@
 
 	fromName = build_path_from_dentry(old_file);
 	toName = build_path_from_dentry(direntry);
-	if((fromName == NULL) || (toName == NULL)) {
+	if ((fromName == NULL) || (toName == NULL)) {
 		rc = -ENOMEM;
 		goto cifs_hl_exit;
 	}
 
-	if (cifs_sb_target->tcon->ses->capabilities & CAP_UNIX)
+/*	if (cifs_sb_target->tcon->ses->capabilities & CAP_UNIX)*/
+	if (pTcon->unix_ext)
 		rc = CIFSUnixCreateHardLink(xid, pTcon, fromName, toName,
-					    cifs_sb_target->local_nls, 
+					    cifs_sb_target->local_nls,
 					    cifs_sb_target->mnt_cifs_flags &
 						CIFS_MOUNT_MAP_SPECIAL_CHR);
 	else {
 		rc = CIFSCreateHardLink(xid, pTcon, fromName, toName,
-					cifs_sb_target->local_nls, 
+					cifs_sb_target->local_nls,
 					cifs_sb_target->mnt_cifs_flags &
 						CIFS_MOUNT_MAP_SPECIAL_CHR);
-		if((rc == -EIO) || (rc == -EINVAL))
-			rc = -EOPNOTSUPP;  
+		if ((rc == -EIO) || (rc == -EINVAL))
+			rc = -EOPNOTSUPP;
 	}
 
 	d_drop(direntry);	/* force new lookup from server of target */
 
 	/* if source file is cached (oplocked) revalidate will not go to server
 	   until the file is closed or oplock broken so update nlinks locally */
-	if(old_file->d_inode) {
+	if (old_file->d_inode) {
 		cifsInode = CIFS_I(old_file->d_inode);
-		if(rc == 0) {
+		if (rc == 0) {
 			old_file->d_inode->i_nlink++;
 /* BB should we make this contingent on superblock flag NOATIME? */
 /*			old_file->d_inode->i_ctime = CURRENT_TIME;*/
@@ -84,14 +85,14 @@
 			to set the parent dir cifs inode time to zero
 			to force revalidate (faster) for it too? */
 		}
-		/* if not oplocked will force revalidate to get info 
+		/* if not oplocked will force revalidate to get info
 		   on source file from srv */
 		cifsInode->time = 0;
 
-                /* Will update parent dir timestamps from srv within a second.
+		/* Will update parent dir timestamps from srv within a second.
 		   Would it really be worth it to set the parent dir (cifs
 		   inode) time field to zero to force revalidate on parent
-		   directory faster ie 
+		   directory faster ie
 			CIFS_I(inode)->time = 0;  */
 	}
 
@@ -109,7 +110,7 @@
 	int rc = -EACCES;
 	int xid;
 	char *full_path = NULL;
-	char * target_path = ERR_PTR(-ENOMEM);
+	char *target_path = ERR_PTR(-ENOMEM);
 	struct cifs_sb_info *cifs_sb;
 	struct cifsTconInfo *pTcon;
 
@@ -129,13 +130,19 @@
 		goto out;
 	}
 
-/* BB add read reparse point symlink code and Unix extensions symlink code here BB */
+	/* We could change this to:
+		if (pTcon->unix_ext)
+	   but there does not seem any point in refusing to
+	   get symlink info if we can, even if unix extensions
+	   turned off for this mount */
+
 	if (pTcon->ses->capabilities & CAP_UNIX)
 		rc = CIFSSMBUnixQuerySymLink(xid, pTcon, full_path,
 					     target_path,
 					     PATH_MAX-1,
 					     cifs_sb->local_nls);
 	else {
+		/* BB add read reparse point symlink code here */
 		/* rc = CIFSSMBQueryReparseLinkInfo */
 		/* BB Add code to Query ReparsePoint info */
 		/* BB Add MAC style xsymlink check here if enabled */
@@ -176,7 +183,7 @@
 
 	full_path = build_path_from_dentry(direntry);
 
-	if(full_path == NULL) {
+	if (full_path == NULL) {
 		FreeXid(xid);
 		return -ENOMEM;
 	}
@@ -185,19 +192,20 @@
 	cFYI(1, ("symname is %s", symname));
 
 	/* BB what if DFS and this volume is on different share? BB */
-	if (cifs_sb->tcon->ses->capabilities & CAP_UNIX)
+	if (pTcon->unix_ext)
 		rc = CIFSUnixCreateSymLink(xid, pTcon, full_path, symname,
 					   cifs_sb->local_nls);
 	/* else
-	   rc = CIFSCreateReparseSymLink(xid, pTcon, fromName, toName,cifs_sb_target->local_nls); */
+	   rc = CIFSCreateReparseSymLink(xid, pTcon, fromName, toName,
+					cifs_sb_target->local_nls); */
 
 	if (rc == 0) {
-		if (pTcon->ses->capabilities & CAP_UNIX)
+		if (pTcon->unix_ext)
 			rc = cifs_get_inode_info_unix(&newinode, full_path,
-						      inode->i_sb,xid);
+						      inode->i_sb, xid);
 		else
 			rc = cifs_get_inode_info(&newinode, full_path, NULL,
-						 inode->i_sb,xid);
+						 inode->i_sb, xid);
 
 		if (rc != 0) {
 			cFYI(1, ("Create symlink ok, getinodeinfo fail rc = %d",
@@ -226,9 +234,9 @@
 	struct cifs_sb_info *cifs_sb;
 	struct cifsTconInfo *pTcon;
 	char *full_path = NULL;
-	char *tmp_path =  NULL;
-	char * tmpbuffer;
-	unsigned char * referrals = NULL;
+	char *tmp_path = NULL;
+	char *tmpbuffer;
+	unsigned char *referrals = NULL;
 	int num_referrals = 0;
 	int len;
 	__u16 fid;
@@ -237,13 +245,13 @@
 	cifs_sb = CIFS_SB(inode->i_sb);
 	pTcon = cifs_sb->tcon;
 
-/* BB would it be safe against deadlock to grab this sem 
+/* BB would it be safe against deadlock to grab this sem
       even though rename itself grabs the sem and calls lookup? */
 /*       mutex_lock(&inode->i_sb->s_vfs_rename_mutex);*/
 	full_path = build_path_from_dentry(direntry);
 /*       mutex_unlock(&inode->i_sb->s_vfs_rename_mutex);*/
 
-	if(full_path == NULL) {
+	if (full_path == NULL) {
 		FreeXid(xid);
 		return -ENOMEM;
 	}
@@ -251,70 +259,80 @@
 	cFYI(1,
 	     ("Full path: %s inode = 0x%p pBuffer = 0x%p buflen = %d",
 	      full_path, inode, pBuffer, buflen));
-	if(buflen > PATH_MAX)
+	if (buflen > PATH_MAX)
 		len = PATH_MAX;
 	else
 		len = buflen;
-	tmpbuffer = kmalloc(len,GFP_KERNEL);   
-	if(tmpbuffer == NULL) {
+	tmpbuffer = kmalloc(len, GFP_KERNEL);
+	if (tmpbuffer == NULL) {
 		kfree(full_path);
 		FreeXid(xid);
 		return -ENOMEM;
 	}
 
-/* BB add read reparse point symlink code and Unix extensions symlink code here BB */
+/* BB add read reparse point symlink code and
+	Unix extensions symlink code here BB */
+/* We could disable this based on pTcon->unix_ext flag instead ... but why? */
 	if (cifs_sb->tcon->ses->capabilities & CAP_UNIX)
 		rc = CIFSSMBUnixQuerySymLink(xid, pTcon, full_path,
 				tmpbuffer,
 				len - 1,
 				cifs_sb->local_nls);
 	else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL) {
-		cERROR(1,("SFU style symlinks not implemented yet"));
+		cERROR(1, ("SFU style symlinks not implemented yet"));
 		/* add open and read as in fs/cifs/inode.c */
-	
 	} else {
 		rc = CIFSSMBOpen(xid, pTcon, full_path, FILE_OPEN, GENERIC_READ,
-				OPEN_REPARSE_POINT,&fid, &oplock, NULL, 
-				cifs_sb->local_nls, 
-				cifs_sb->mnt_cifs_flags & 
+				OPEN_REPARSE_POINT, &fid, &oplock, NULL,
+				cifs_sb->local_nls,
+				cifs_sb->mnt_cifs_flags &
 					CIFS_MOUNT_MAP_SPECIAL_CHR);
-		if(!rc) {
+		if (!rc) {
 			rc = CIFSSMBQueryReparseLinkInfo(xid, pTcon, full_path,
 				tmpbuffer,
-				len - 1, 
+				len - 1,
 				fid,
 				cifs_sb->local_nls);
-			if(CIFSSMBClose(xid, pTcon, fid)) {
-				cFYI(1,("Error closing junction point (open for ioctl)"));
+			if (CIFSSMBClose(xid, pTcon, fid)) {
+				cFYI(1, ("Error closing junction point "
+					 "(open for ioctl)"));
 			}
-			if(rc == -EIO) {
+			if (rc == -EIO) {
 				/* Query if DFS Junction */
 				tmp_path =
 					kmalloc(MAX_TREE_SIZE + MAX_PATHCONF + 1,
 						GFP_KERNEL);
 				if (tmp_path) {
-					strncpy(tmp_path, pTcon->treeName, MAX_TREE_SIZE);
-					strncat(tmp_path, full_path, MAX_PATHCONF);
-					rc = get_dfs_path(xid, pTcon->ses, tmp_path,
+					strncpy(tmp_path, pTcon->treeName,
+						MAX_TREE_SIZE);
+					strncat(tmp_path, full_path,
+						MAX_PATHCONF);
+					rc = get_dfs_path(xid, pTcon->ses,
+						tmp_path,
 						cifs_sb->local_nls,
 						&num_referrals, &referrals,
 						cifs_sb->mnt_cifs_flags &
 						    CIFS_MOUNT_MAP_SPECIAL_CHR);
-					cFYI(1,("Get DFS for %s rc = %d ",tmp_path, rc));
-					if((num_referrals == 0) && (rc == 0))
+					cFYI(1, ("Get DFS for %s rc = %d ",
+						tmp_path, rc));
+					if ((num_referrals == 0) && (rc == 0))
 						rc = -EACCES;
 					else {
-						cFYI(1,("num referral: %d",num_referrals));
-						if(referrals) {
-							cFYI(1,("referral string: %s",referrals));
-							strncpy(tmpbuffer, referrals, len-1);                            
+						cFYI(1, ("num referral: %d",
+							num_referrals));
+						if (referrals) {
+							cFYI(1,("referral string: %s", referrals));
+							strncpy(tmpbuffer,
+								referrals,
+								len-1);
 						}
 					}
 					kfree(referrals);
 					kfree(tmp_path);
 }
-				/* BB add code like else decode referrals then memcpy to
-				  tmpbuffer and free referrals string array BB */
+				/* BB add code like else decode referrals
+				then memcpy to tmpbuffer and free referrals
+				string array BB */
 			}
 		}
 	}
diff --git a/fs/cifs/md4.c b/fs/cifs/md4.c
index 46d62c9..a2415c1 100644
--- a/fs/cifs/md4.c
+++ b/fs/cifs/md4.c
@@ -1,20 +1,20 @@
-/* 
+/*
    Unix SMB/Netbios implementation.
    Version 1.9.
    a implementation of MD4 designed for use in the SMB authentication protocol
    Copyright (C) Andrew Tridgell 1997-1998.
    Modified by Steve French (sfrench@us.ibm.com) 2002-2003
-   
+
    This program is free software; you can redistribute it and/or modify
    it under the terms of the GNU General Public License as published by
    the Free Software Foundation; either version 2 of the License, or
    (at your option) any later version.
-   
+
    This program is distributed in the hope that it will be useful,
    but WITHOUT ANY WARRANTY; without even the implied warranty of
    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    GNU General Public License for more details.
-   
+
    You should have received a copy of the GNU General Public License
    along with this program; if not, write to the Free Software
    Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
@@ -170,7 +170,7 @@
 
 	while (n > 64) {
 		copy64(M, in);
-		mdfour64(M,&A,&B, &C, &D);
+		mdfour64(M, &A, &B, &C, &D);
 		in += 64;
 		n -= 64;
 	}
diff --git a/fs/cifs/md5.c b/fs/cifs/md5.c
index ccebf9b..e5c3e12 100644
--- a/fs/cifs/md5.c
+++ b/fs/cifs/md5.c
@@ -15,9 +15,9 @@
  * will fill a supplied 16-byte array with the digest.
  */
 
-/* This code slightly modified to fit into Samba by 
-   abartlet@samba.org Jun 2001 
-   and to fit the cifs vfs by 
+/* This code slightly modified to fit into Samba by
+   abartlet@samba.org Jun 2001
+   and to fit the cifs vfs by
    Steve French sfrench@us.ibm.com */
 
 #include <linux/string.h>
@@ -106,7 +106,7 @@
 }
 
 /*
- * Final wrapup - pad to 64-byte boundary with the bit pattern 
+ * Final wrapup - pad to 64-byte boundary with the bit pattern
  * 1 0* (64-bit count of bits processed, MSB-first)
  */
 void
diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
index 19cc294..0bcec08 100644
--- a/fs/cifs/misc.c
+++ b/fs/cifs/misc.c
@@ -1,7 +1,7 @@
 /*
  *   fs/cifs/misc.c
  *
- *   Copyright (C) International Business Machines  Corp., 2002,2005
+ *   Copyright (C) International Business Machines  Corp., 2002,2007
  *   Author(s): Steve French (sfrench@us.ibm.com)
  *
  *   This library is free software; you can redistribute it and/or modify
@@ -16,7 +16,7 @@
  *
  *   You should have received a copy of the GNU Lesser General Public License
  *   along with this library; if not, write to the Free Software
- *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 
+ *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  */
 
 #include <linux/slab.h>
@@ -32,12 +32,12 @@
 
 extern mempool_t *cifs_sm_req_poolp;
 extern mempool_t *cifs_req_poolp;
-extern struct task_struct * oplockThread;
+extern struct task_struct *oplockThread;
 
-/* The xid serves as a useful identifier for each incoming vfs request, 
-   in a similar way to the mid which is useful to track each sent smb, 
-   and CurrentXid can also provide a running counter (although it 
-   will eventually wrap past zero) of the total vfs operations handled 
+/* The xid serves as a useful identifier for each incoming vfs request,
+   in a similar way to the mid which is useful to track each sent smb,
+   and CurrentXid can also provide a running counter (although it
+   will eventually wrap past zero) of the total vfs operations handled
    since the cifs fs was mounted */
 
 unsigned int
@@ -47,10 +47,12 @@
 
 	spin_lock(&GlobalMid_Lock);
 	GlobalTotalActiveXid++;
+
+	/* keep high water mark for number of simultaneous ops in filesystem */
 	if (GlobalTotalActiveXid > GlobalMaxActiveXid)
-		GlobalMaxActiveXid = GlobalTotalActiveXid;	/* keep high water mark for number of simultaneous vfs ops in our filesystem */
-	if(GlobalTotalActiveXid > 65000)
-		cFYI(1,("warning: more than 65000 requests active"));
+		GlobalMaxActiveXid = GlobalTotalActiveXid;
+	if (GlobalTotalActiveXid > 65000)
+		cFYI(1, ("warning: more than 65000 requests active"));
 	xid = GlobalCurrentXid++;
 	spin_unlock(&GlobalMid_Lock);
 	return xid;
@@ -60,7 +62,7 @@
 _FreeXid(unsigned int xid)
 {
 	spin_lock(&GlobalMid_Lock);
-	/* if(GlobalTotalActiveXid == 0)
+	/* if (GlobalTotalActiveXid == 0)
 		BUG(); */
 	GlobalTotalActiveXid--;
 	spin_unlock(&GlobalMid_Lock);
@@ -144,12 +146,12 @@
 {
 	struct smb_hdr *ret_buf = NULL;
 
-/* We could use negotiated size instead of max_msgsize - 
-   but it may be more efficient to always alloc same size 
-   albeit slightly larger than necessary and maxbuffersize 
+/* We could use negotiated size instead of max_msgsize -
+   but it may be more efficient to always alloc same size
+   albeit slightly larger than necessary and maxbuffersize
    defaults to this and can not be bigger */
-	ret_buf =
-	    (struct smb_hdr *) mempool_alloc(cifs_req_poolp, GFP_KERNEL | GFP_NOFS);
+	ret_buf = (struct smb_hdr *) mempool_alloc(cifs_req_poolp,
+						   GFP_KERNEL | GFP_NOFS);
 
 	/* clear the first few header bytes */
 	/* for most paths, more is cleared in header_assemble */
@@ -172,7 +174,7 @@
 		/* cFYI(1, ("Null buffer passed to cifs_buf_release"));*/
 		return;
 	}
-	mempool_free(buf_to_free,cifs_req_poolp);
+	mempool_free(buf_to_free, cifs_req_poolp);
 
 	atomic_dec(&bufAllocCount);
 	return;
@@ -183,12 +185,12 @@
 {
 	struct smb_hdr *ret_buf = NULL;
 
-/* We could use negotiated size instead of max_msgsize - 
-   but it may be more efficient to always alloc same size 
-   albeit slightly larger than necessary and maxbuffersize 
+/* We could use negotiated size instead of max_msgsize -
+   but it may be more efficient to always alloc same size
+   albeit slightly larger than necessary and maxbuffersize
    defaults to this and can not be bigger */
-	ret_buf =
-	    (struct smb_hdr *) mempool_alloc(cifs_sm_req_poolp, GFP_KERNEL | GFP_NOFS);
+	ret_buf = (struct smb_hdr *) mempool_alloc(cifs_sm_req_poolp,
+						   GFP_KERNEL | GFP_NOFS);
 	if (ret_buf) {
 	/* No need to clear memory here, cleared in header assemble */
 	/*	memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/
@@ -209,30 +211,30 @@
 		cFYI(1, ("Null buffer passed to cifs_small_buf_release"));
 		return;
 	}
-	mempool_free(buf_to_free,cifs_sm_req_poolp);
+	mempool_free(buf_to_free, cifs_sm_req_poolp);
 
 	atomic_dec(&smBufAllocCount);
 	return;
 }
 
-/* 
+/*
 	Find a free multiplex id (SMB mid). Otherwise there could be
 	mid collisions which might cause problems, demultiplexing the
 	wrong response to this request. Multiplex ids could collide if
 	one of a series requests takes much longer than the others, or
 	if a very large number of long lived requests (byte range
 	locks or FindNotify requests) are pending.  No more than
-	64K-1 requests can be outstanding at one time.  If no 
+	64K-1 requests can be outstanding at one time.  If no
 	mids are available, return zero.  A future optimization
 	could make the combination of mids and uid the key we use
-	to demultiplex on (rather than mid alone).  
+	to demultiplex on (rather than mid alone).
 	In addition to the above check, the cifs demultiplex
 	code already used the command code as a secondary
 	check of the frame and if signing is negotiated the
 	response would be discarded if the mid were the same
 	but the signature was wrong.  Since the mid is not put in the
 	pending queue until later (when it is about to be dispatched)
-	we do have to limit the number of outstanding requests 
+	we do have to limit the number of outstanding requests
 	to somewhat less than 64K-1 although it is hard to imagine
 	so many threads being in the vfs at one time.
 */
@@ -240,27 +242,27 @@
 {
 	__u16 mid = 0;
 	__u16 last_mid;
-	int   collision;  
+	int   collision;
 
-	if(server == NULL)
+	if (server == NULL)
 		return mid;
 
 	spin_lock(&GlobalMid_Lock);
 	last_mid = server->CurrentMid; /* we do not want to loop forever */
 	server->CurrentMid++;
 	/* This nested loop looks more expensive than it is.
-	In practice the list of pending requests is short, 
+	In practice the list of pending requests is short,
 	fewer than 50, and the mids are likely to be unique
 	on the first pass through the loop unless some request
 	takes longer than the 64 thousand requests before it
 	(and it would also have to have been a request that
 	 did not time out) */
-	while(server->CurrentMid != last_mid) {
+	while (server->CurrentMid != last_mid) {
 		struct list_head *tmp;
 		struct mid_q_entry *mid_entry;
 
 		collision = 0;
-		if(server->CurrentMid == 0)
+		if (server->CurrentMid == 0)
 			server->CurrentMid++;
 
 		list_for_each(tmp, &server->pending_mid_q) {
@@ -273,7 +275,7 @@
 				break;
 			}
 		}
-		if(collision == 0) {
+		if (collision == 0) {
 			mid = server->CurrentMid;
 			break;
 		}
@@ -290,11 +292,11 @@
 		const struct cifsTconInfo *treeCon, int word_count
 		/* length of fixed section (word count) in two byte units  */)
 {
-	struct list_head* temp_item;
-	struct cifsSesInfo * ses;
+	struct list_head *temp_item;
+	struct cifsSesInfo *ses;
 	char *temp = (char *) buffer;
 
-	memset(temp,0,256); /* bigger than MAX_CIFS_HDR_SIZE */
+	memset(temp, 0, 256); /* bigger than MAX_CIFS_HDR_SIZE */
 
 	buffer->smb_buf_length =
 	    (2 * word_count) + sizeof (struct smb_hdr) -
@@ -325,7 +327,7 @@
 			/* Uid is not converted */
 			buffer->Uid = treeCon->ses->Suid;
 			buffer->Mid = GetNextMid(treeCon->ses->server);
-			if(multiuser_mount != 0) {
+			if (multiuser_mount != 0) {
 		/* For the multiuser case, there are few obvious technically  */
 		/* possible mechanisms to match the local linux user (uid)    */
 		/* to a valid remote smb user (smb_uid):		      */
@@ -348,21 +350,22 @@
 		/* 	   flag were disabled.  */
 
 		/*  BB Add support for establishing new tCon and SMB Session  */
-		/*      with userid/password pairs found on the smb session   */ 
+		/*      with userid/password pairs found on the smb session   */
 		/*	for other target tcp/ip addresses 		BB    */
-				if(current->fsuid != treeCon->ses->linux_uid) {
-					cFYI(1,("Multiuser mode and UID did not match tcon uid"));
+				if (current->fsuid != treeCon->ses->linux_uid) {
+					cFYI(1, ("Multiuser mode and UID "
+						 "did not match tcon uid"));
 					read_lock(&GlobalSMBSeslock);
 					list_for_each(temp_item, &GlobalSMBSessionList) {
 						ses = list_entry(temp_item, struct cifsSesInfo, cifsSessionList);
-						if(ses->linux_uid == current->fsuid) {
-							if(ses->server == treeCon->ses->server) {
-								cFYI(1,("found matching uid substitute right smb_uid"));  
+						if (ses->linux_uid == current->fsuid) {
+							if (ses->server == treeCon->ses->server) {
+								cFYI(1, ("found matching uid substitute right smb_uid"));
 								buffer->Uid = ses->Suid;
 								break;
 							} else {
-								/* BB eventually call cifs_setup_session here */
-								cFYI(1,("local UID found but smb sess with this server does not exist"));  
+				/* BB eventually call cifs_setup_session here */
+								cFYI(1, ("local UID found but no smb sess with this server exists"));
 							}
 						}
 					}
@@ -374,8 +377,8 @@
 			buffer->Flags2 |= SMBFLG2_DFS;
 		if (treeCon->nocase)
 			buffer->Flags  |= SMBFLG_CASELESS;
-		if((treeCon->ses) && (treeCon->ses->server))
-			if(treeCon->ses->server->secMode & 
+		if ((treeCon->ses) && (treeCon->ses->server))
+			if (treeCon->ses->server->secMode &
 			  (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
 				buffer->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
 	}
@@ -388,18 +391,18 @@
 static int
 checkSMBhdr(struct smb_hdr *smb, __u16 mid)
 {
-	/* Make sure that this really is an SMB, that it is a response, 
+	/* Make sure that this really is an SMB, that it is a response,
 	   and that the message ids match */
-	if ((*(__le32 *) smb->Protocol == cpu_to_le32(0x424d53ff)) && 
-		(mid == smb->Mid)) {    
-		if(smb->Flags & SMBFLG_RESPONSE)
-			return 0;                    
-		else {        
+	if ((*(__le32 *) smb->Protocol == cpu_to_le32(0x424d53ff)) &&
+		(mid == smb->Mid)) {
+		if (smb->Flags & SMBFLG_RESPONSE)
+			return 0;
+		else {
 		/* only one valid case where server sends us request */
-			if(smb->Command == SMB_COM_LOCKING_ANDX)
+			if (smb->Command == SMB_COM_LOCKING_ANDX)
 				return 0;
 			else
-				cERROR(1, ("Rcvd Request not response"));         
+				cERROR(1, ("Received Request not response"));
 		}
 	} else { /* bad signature or mid */
 		if (*(__le32 *) smb->Protocol != cpu_to_le32(0x424d53ff))
@@ -426,9 +429,9 @@
 			smb->WordCount = 0;
 			/* some error cases do not return wct and bcc */
 			return 0;
-		} else if ((length == sizeof(struct smb_hdr) + 1) && 
+		} else if ((length == sizeof(struct smb_hdr) + 1) &&
 				(smb->WordCount == 0)) {
-			char * tmp = (char *)smb;
+			char *tmp = (char *)smb;
 			/* Need to work around a bug in two servers here */
 			/* First, check if the part of bcc they sent was zero */
 			if (tmp[sizeof(struct smb_hdr)] == 0) {
@@ -442,7 +445,7 @@
 				tmp[sizeof(struct smb_hdr)+1] = 0;
 				return 0;
 			}
-			cERROR(1,("rcvd invalid byte count (bcc)"));
+			cERROR(1, ("rcvd invalid byte count (bcc)"));
 		} else {
 			cERROR(1, ("Length less than smb header size"));
 		}
@@ -458,32 +461,33 @@
 		return 1;
 	clc_len = smbCalcSize_LE(smb);
 
-	if(4 + len != length) {
-		cERROR(1, ("Length read does not match RFC1001 length %d",len));
+	if (4 + len != length) {
+		cERROR(1, ("Length read does not match RFC1001 length %d",
+			   len));
 		return 1;
 	}
 
 	if (4 + len != clc_len) {
 		/* check if bcc wrapped around for large read responses */
-		if((len > 64 * 1024) && (len > clc_len)) {
+		if ((len > 64 * 1024) && (len > clc_len)) {
 			/* check if lengths match mod 64K */
-			if(((4 + len) & 0xFFFF) == (clc_len & 0xFFFF))
-				return 0; /* bcc wrapped */			
+			if (((4 + len) & 0xFFFF) == (clc_len & 0xFFFF))
+				return 0; /* bcc wrapped */
 		}
 		cFYI(1, ("Calculated size %d vs length %d mismatch for mid %d",
 				clc_len, 4 + len, smb->Mid));
 		/* Windows XP can return a few bytes too much, presumably
-		an illegal pad, at the end of byte range lock responses 
+		an illegal pad, at the end of byte range lock responses
 		so we allow for that three byte pad, as long as actual
 		received length is as long or longer than calculated length */
-		/* We have now had to extend this more, since there is a 
+		/* We have now had to extend this more, since there is a
 		case in which it needs to be bigger still to handle a
 		malformed response to transact2 findfirst from WinXP when
 		access denied is returned and thus bcc and wct are zero
 		but server says length is 0x21 bytes too long as if the server
 		forget to reset the smb rfc1001 length when it reset the
 		wct and bcc to minimum size and drop the t2 parms and data */
-		if((4+len > clc_len) && (len <= clc_len + 512))
+		if ((4+len > clc_len) && (len <= clc_len + 512))
 			return 0;
 		else {
 			cERROR(1, ("RFC1001 size %d bigger than SMB for Mid=%d",
@@ -495,61 +499,64 @@
 }
 int
 is_valid_oplock_break(struct smb_hdr *buf, struct TCP_Server_Info *srv)
-{    
-	struct smb_com_lock_req * pSMB = (struct smb_com_lock_req *)buf;
+{
+	struct smb_com_lock_req *pSMB = (struct smb_com_lock_req *)buf;
 	struct list_head *tmp;
 	struct list_head *tmp1;
 	struct cifsTconInfo *tcon;
 	struct cifsFileInfo *netfile;
 
-	cFYI(1,("Checking for oplock break or dnotify response"));
-	if((pSMB->hdr.Command == SMB_COM_NT_TRANSACT) &&
+	cFYI(1, ("Checking for oplock break or dnotify response"));
+	if ((pSMB->hdr.Command == SMB_COM_NT_TRANSACT) &&
 	   (pSMB->hdr.Flags & SMBFLG_RESPONSE)) {
-		struct smb_com_transaction_change_notify_rsp * pSMBr =
+		struct smb_com_transaction_change_notify_rsp *pSMBr =
 			(struct smb_com_transaction_change_notify_rsp *)buf;
-		struct file_notify_information * pnotify;
+		struct file_notify_information *pnotify;
 		__u32 data_offset = 0;
-		if(pSMBr->ByteCount > sizeof(struct file_notify_information)) {
+		if (pSMBr->ByteCount > sizeof(struct file_notify_information)) {
 			data_offset = le32_to_cpu(pSMBr->DataOffset);
 
 			pnotify = (struct file_notify_information *)
 				((char *)&pSMBr->hdr.Protocol + data_offset);
-			cFYI(1,("dnotify on %s Action: 0x%x",pnotify->FileName,
+			cFYI(1, ("dnotify on %s Action: 0x%x",
+				 pnotify->FileName,
 				pnotify->Action));  /* BB removeme BB */
-	             /*   cifs_dump_mem("Rcvd notify Data: ",buf,
+			/*   cifs_dump_mem("Rcvd notify Data: ",buf,
 				sizeof(struct smb_hdr)+60); */
 			return TRUE;
 		}
-		if(pSMBr->hdr.Status.CifsError) {
-			cFYI(1,("notify err 0x%d",pSMBr->hdr.Status.CifsError));
+		if (pSMBr->hdr.Status.CifsError) {
+			cFYI(1, ("notify err 0x%d",
+				pSMBr->hdr.Status.CifsError));
 			return TRUE;
 		}
 		return FALSE;
-	}  
-	if(pSMB->hdr.Command != SMB_COM_LOCKING_ANDX)
+	}
+	if (pSMB->hdr.Command != SMB_COM_LOCKING_ANDX)
 		return FALSE;
-	if(pSMB->hdr.Flags & SMBFLG_RESPONSE) {
+	if (pSMB->hdr.Flags & SMBFLG_RESPONSE) {
 		/* no sense logging error on invalid handle on oplock
 		   break - harmless race between close request and oplock
 		   break response is expected from time to time writing out
 		   large dirty files cached on the client */
-		if ((NT_STATUS_INVALID_HANDLE) == 
-		   le32_to_cpu(pSMB->hdr.Status.CifsError)) { 
-			cFYI(1,("invalid handle on oplock break"));
+		if ((NT_STATUS_INVALID_HANDLE) ==
+		   le32_to_cpu(pSMB->hdr.Status.CifsError)) {
+			cFYI(1, ("invalid handle on oplock break"));
 			return TRUE;
-		} else if (ERRbadfid == 
+		} else if (ERRbadfid ==
 		   le16_to_cpu(pSMB->hdr.Status.DosError.Error)) {
-			return TRUE;	  
+			return TRUE;
 		} else {
 			return FALSE; /* on valid oplock brk we get "request" */
 		}
 	}
-	if(pSMB->hdr.WordCount != 8)
+	if (pSMB->hdr.WordCount != 8)
 		return FALSE;
 
-	cFYI(1,(" oplock type 0x%d level 0x%d",pSMB->LockType,pSMB->OplockLevel));
-	if(!(pSMB->LockType & LOCKING_ANDX_OPLOCK_RELEASE))
-		return FALSE;    
+	cFYI(1, ("oplock type 0x%d level 0x%d",
+		 pSMB->LockType, pSMB->OplockLevel));
+	if (!(pSMB->LockType & LOCKING_ANDX_OPLOCK_RELEASE))
+		return FALSE;
 
 	/* look up tcon based on tid & uid */
 	read_lock(&GlobalSMBSeslock);
@@ -557,36 +564,38 @@
 		tcon = list_entry(tmp, struct cifsTconInfo, cifsConnectionList);
 		if ((tcon->tid == buf->Tid) && (srv == tcon->ses->server)) {
 			cifs_stats_inc(&tcon->num_oplock_brks);
-			list_for_each(tmp1,&tcon->openFileList){
-				netfile = list_entry(tmp1,struct cifsFileInfo,
+			list_for_each(tmp1, &tcon->openFileList) {
+				netfile = list_entry(tmp1, struct cifsFileInfo,
 						     tlist);
-				if(pSMB->Fid == netfile->netfid) {
+				if (pSMB->Fid == netfile->netfid) {
 					struct cifsInodeInfo *pCifsInode;
 					read_unlock(&GlobalSMBSeslock);
-					cFYI(1,("file id match, oplock break"));
-					pCifsInode = 
+					cFYI(1,
+					    ("file id match, oplock break"));
+					pCifsInode =
 						CIFS_I(netfile->pInode);
 					pCifsInode->clientCanCacheAll = FALSE;
-					if(pSMB->OplockLevel == 0)
+					if (pSMB->OplockLevel == 0)
 						pCifsInode->clientCanCacheRead
 							= FALSE;
 					pCifsInode->oplockPending = TRUE;
 					AllocOplockQEntry(netfile->pInode,
 							  netfile->netfid,
 							  tcon);
-					cFYI(1,("about to wake up oplock thd"));
-					if(oplockThread)
+					cFYI(1,
+					    ("about to wake up oplock thread"));
+					if (oplockThread)
 					    wake_up_process(oplockThread);
 					return TRUE;
 				}
 			}
 			read_unlock(&GlobalSMBSeslock);
-			cFYI(1,("No matching file for oplock break"));
+			cFYI(1, ("No matching file for oplock break"));
 			return TRUE;
 		}
 	}
 	read_unlock(&GlobalSMBSeslock);
-	cFYI(1,("Can not process oplock break for non-existent connection"));
+	cFYI(1, ("Can not process oplock break for non-existent connection"));
 	return TRUE;
 }
 
@@ -643,13 +652,13 @@
    only legal in POSIX-like OS (if they are present in the string). Path
    names are little endian 16 bit Unicode on the wire */
 int
-cifs_convertUCSpath(char *target, const __le16 * source, int maxlen,
-		    const struct nls_table * cp)
+cifs_convertUCSpath(char *target, const __le16 *source, int maxlen,
+		    const struct nls_table *cp)
 {
-	int i,j,len;
+	int i, j, len;
 	__u16 src_char;
 
-	for(i = 0, j = 0; i < maxlen; i++) {
+	for (i = 0, j = 0; i < maxlen; i++) {
 		src_char = le16_to_cpu(source[i]);
 		switch (src_char) {
 			case 0:
@@ -678,10 +687,10 @@
 			case UNI_LESSTHAN:
 				target[j] = '<';
 				break;
-			default: 
-				len = cp->uni2char(src_char, &target[j], 
+			default:
+				len = cp->uni2char(src_char, &target[j],
 						NLS_MAX_CHARSET_SIZE);
-				if(len > 0) {
+				if (len > 0) {
 					j += len;
 					continue;
 				} else {
@@ -690,7 +699,7 @@
 		}
 		j++;
 		/* make sure we do not overrun callers allocated temp buffer */
-		if(j >= (2 * NAME_MAX))
+		if (j >= (2 * NAME_MAX))
 			break;
 	}
 cUCS_out:
@@ -703,18 +712,18 @@
    only legal in POSIX-like OS (if they are present in the string). Path
    names are little endian 16 bit Unicode on the wire */
 int
-cifsConvertToUCS(__le16 * target, const char *source, int maxlen, 
-		 const struct nls_table * cp, int mapChars)
+cifsConvertToUCS(__le16 *target, const char *source, int maxlen,
+		 const struct nls_table *cp, int mapChars)
 {
-	int i,j,charlen;
+	int i, j, charlen;
 	int len_remaining = maxlen;
 	char src_char;
 	__u16 temp;
 
-	if(!mapChars) 
+	if (!mapChars)
 		return cifs_strtoUCS(target, source, PATH_MAX, cp);
 
-	for(i = 0, j = 0; i < maxlen; j++) {
+	for (i = 0, j = 0; i < maxlen; j++) {
 		src_char = source[i];
 		switch (src_char) {
 			case 0:
@@ -737,7 +746,7 @@
 				break;
 			case '|':
 				target[j] = cpu_to_le16(UNI_PIPE);
-				break;			
+				break;
 			/* BB We can not handle remapping slash until
 			   all the calls to build_path_from_dentry
 			   are modified, as they use slash as separator BB */
@@ -749,7 +758,7 @@
 					len_remaining, &temp);
 				/* if no match, use question mark, which
 				at least in some cases servers as wild card */
-				if(charlen < 1) {
+				if (charlen < 1) {
 					target[j] = cpu_to_le16(0x003f);
 					charlen = 1;
 				} else
@@ -758,7 +767,7 @@
 				/* character may take more than one byte in the
 				   the source string, but will take exactly two
 				   bytes in the target string */
-				i+= charlen;
+				i += charlen;
 				continue;
 		}
 		i++; /* move to next char in source string */
diff --git a/fs/cifs/netmisc.c b/fs/cifs/netmisc.c
index 53e304d..2bfed3f 100644
--- a/fs/cifs/netmisc.c
+++ b/fs/cifs/netmisc.c
@@ -3,23 +3,22 @@
  *
  *   Copyright (c) International Business Machines  Corp., 2002
  *   Author(s): Steve French (sfrench@us.ibm.com)
- * 
+ *
  *   Error mapping routines from Samba libsmb/errormap.c
  *   Copyright (C) Andrew Tridgell 2001
  *
- *
  *   This program is free software;  you can redistribute it and/or modify
  *   it under the terms of the GNU General Public License as published by
- *   the Free Software Foundation; either version 2 of the License, or 
+ *   the Free Software Foundation; either version 2 of the License, or
  *   (at your option) any later version.
- * 
+ *
  *   This program is distributed in the hope that it will be useful,
  *   but WITHOUT ANY WARRANTY;  without even the implied warranty of
  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See
  *   the GNU General Public License for more details.
  *
  *   You should have received a copy of the GNU General Public License
- *   along with this program;  if not, write to the Free Software 
+ *   along with this program;  if not, write to the Free Software
  *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  */
 
@@ -30,9 +29,7 @@
 #include <linux/fs.h>
 #include <asm/div64.h>
 #include <asm/byteorder.h>
-#ifdef CONFIG_CIFS_EXPERIMENTAL
 #include <linux/inet.h>
-#endif
 #include "cifsfs.h"
 #include "cifspdu.h"
 #include "cifsglob.h"
@@ -67,22 +64,22 @@
 	{ERRbadshare, -ETXTBSY},
 	{ERRlock, -EACCES},
 	{ERRunsup, -EINVAL},
-	{ERRnosuchshare,-ENXIO},
+	{ERRnosuchshare, -ENXIO},
 	{ERRfilexists, -EEXIST},
 	{ERRinvparm, -EINVAL},
 	{ERRdiskfull, -ENOSPC},
 	{ERRinvname, -ENOENT},
-	{ERRinvlevel,-EOPNOTSUPP},
+	{ERRinvlevel, -EOPNOTSUPP},
 	{ERRdirnotempty, -ENOTEMPTY},
 	{ERRnotlocked, -ENOLCK},
 	{ERRcancelviolation, -ENOLCK},
 	{ERRalreadyexists, -EEXIST},
 	{ERRmoredata, -EOVERFLOW},
-	{ERReasnotsupported,-EOPNOTSUPP},
+	{ERReasnotsupported, -EOPNOTSUPP},
 	{ErrQuota, -EDQUOT},
 	{ErrNotALink, -ENOLINK},
-	{ERRnetlogonNotStarted,-ENOPROTOOPT},
-	{ErrTooManyLinks,-EMLINK},
+	{ERRnetlogonNotStarted, -ENOPROTOOPT},
+	{ErrTooManyLinks, -EMLINK},
 	{0, 0}
 };
 
@@ -133,85 +130,24 @@
 /* returns 0 if invalid address */
 
 int
-cifs_inet_pton(int address_family, char *cp,void *dst)
+cifs_inet_pton(int address_family, char *cp, void *dst)
 {
-#ifdef CONFIG_CIFS_EXPERIMENTAL
 	int ret = 0;
 
 	/* calculate length by finding first slash or NULL */
-	/* BB Should we convert '/' slash to '\' here since it seems already done
-	   before this */
-	if( address_family == AF_INET ){
-		ret = in4_pton(cp, -1 /* len */, dst , '\\', NULL);	
-	} else if( address_family == AF_INET6 ){
+	/* BB Should we convert '/' slash to '\' here since it seems already
+	 * done before this */
+	if ( address_family == AF_INET ) {
+		ret = in4_pton(cp, -1 /* len */, dst , '\\', NULL);
+	} else if ( address_family == AF_INET6 ) {
 		ret = in6_pton(cp, -1 /* len */, dst , '\\', NULL);
 	}
 #ifdef CONFIG_CIFS_DEBUG2
-	cFYI(1,("address conversion returned %d for %s", ret, cp));
+	cFYI(1, ("address conversion returned %d for %s", ret, cp));
 #endif
 	if (ret > 0)
 		ret = 1;
 	return ret;
-#else
-	int value;
-	int digit;
-	int i;
-	char temp;
-	char bytes[4];
-	char *end = bytes;
-	static const int addr_class_max[4] =
-	    { 0xffffffff, 0xffffff, 0xffff, 0xff };
-
-	if(address_family != AF_INET)
-		return -EAFNOSUPPORT;
-
-	for (i = 0; i < 4; i++) {
-		bytes[i] = 0;
-	}
-
-	temp = *cp;
-
-	while (TRUE) {
-		if (!isdigit(temp))
-			return 0;
-
-		value = 0;
-		digit = 0;
-		for (;;) {
-			if (isascii(temp) && isdigit(temp)) {
-				value = (value * 10) + temp - '0';
-				temp = *++cp;
-				digit = 1;
-			} else
-				break;
-		}
-
-		if (temp == '.') {
-			if ((end > bytes + 2) || (value > 255))
-				return 0;
-			*end++ = value;
-			temp = *++cp;
-		} else if (temp == ':') {
-			cFYI(1,("IPv6 addresses not supported for CIFS mounts yet"));
-			return -1;
-		} else
-			break;
-	}
-
-	/* check for last characters */
-	if (temp != '\0' && (!isascii(temp) || !isspace(temp)))
-		if (temp != '\\') {
-			if (temp != '/')
-				return 0;
-			else
-				(*cp = '\\');	/* switch the slash the expected way */
-		}
-	if (value > addr_class_max[end - bytes])
-		return 0;
-
-	*((__be32 *)dst) = *((__be32 *) bytes) | htonl(value);
-	return 1; /* success */
-#endif /* EXPERIMENTAL */	
 }
 
 /*****************************************************************************
@@ -246,7 +182,7 @@
 	ERRHRD, ERRgeneral, NT_STATUS_UNRECOGNIZED_MEDIA}, {
 	ERRDOS, 27, NT_STATUS_NONEXISTENT_SECTOR},
 /*	{ This NT error code was 'sqashed'
-	 from NT_STATUS_MORE_PROCESSING_REQUIRED to NT_STATUS_OK 
+	 from NT_STATUS_MORE_PROCESSING_REQUIRED to NT_STATUS_OK
 	 during the session setup } */
 	{
 	ERRDOS, ERRnomem, NT_STATUS_NO_MEMORY}, {
@@ -261,7 +197,7 @@
 	ERRDOS, 193, NT_STATUS_INVALID_FILE_FOR_SECTION}, {
 	ERRDOS, ERRnoaccess, NT_STATUS_ALREADY_COMMITTED},
 /*	{ This NT error code was 'sqashed'
-	 from NT_STATUS_ACCESS_DENIED to NT_STATUS_TRUSTED_RELATIONSHIP_FAILURE 
+	 from NT_STATUS_ACCESS_DENIED to NT_STATUS_TRUSTED_RELATIONSHIP_FAILURE
 	 during the session setup }   */
 	{
 	ERRDOS, ERRnoaccess, NT_STATUS_ACCESS_DENIED}, {
@@ -331,7 +267,7 @@
 	ERRHRD, ERRgeneral, NT_STATUS_INVALID_ACCOUNT_NAME}, {
 	ERRHRD, ERRgeneral, NT_STATUS_USER_EXISTS},
 /*	{ This NT error code was 'sqashed'
-	 from NT_STATUS_NO_SUCH_USER to NT_STATUS_LOGON_FAILURE 
+	 from NT_STATUS_NO_SUCH_USER to NT_STATUS_LOGON_FAILURE
 	 during the session setup } */
 	{
 	ERRDOS, ERRnoaccess, NT_STATUS_NO_SUCH_USER}, {
@@ -341,7 +277,7 @@
 	ERRHRD, ERRgeneral, NT_STATUS_MEMBER_NOT_IN_GROUP}, {
 	ERRHRD, ERRgeneral, NT_STATUS_LAST_ADMIN},
 /*	{ This NT error code was 'sqashed'
-	 from NT_STATUS_WRONG_PASSWORD to NT_STATUS_LOGON_FAILURE 
+	 from NT_STATUS_WRONG_PASSWORD to NT_STATUS_LOGON_FAILURE
 	 during the session setup } */
 	{
 	ERRSRV, ERRbadpw, NT_STATUS_WRONG_PASSWORD}, {
@@ -393,8 +329,8 @@
 	ERRHRD, ERRgeneral, NT_STATUS_FILE_INVALID}, {
 	ERRHRD, ERRgeneral, NT_STATUS_ALLOTTED_SPACE_EXCEEDED},
 /*	{ This NT error code was 'sqashed'
-	 from NT_STATUS_INSUFFICIENT_RESOURCES to NT_STATUS_INSUFF_SERVER_RESOURCES 
-	 during the session setup } */
+	 from NT_STATUS_INSUFFICIENT_RESOURCES to
+	 NT_STATUS_INSUFF_SERVER_RESOURCES during the session setup } */
 	{
 	ERRDOS, ERRnomem, NT_STATUS_INSUFFICIENT_RESOURCES}, {
 	ERRDOS, ERRbadpath, NT_STATUS_DFS_EXIT_PATH_FOUND}, {
@@ -638,8 +574,8 @@
 	ERRDOS, 19, NT_STATUS_TOO_LATE}, {
 	ERRDOS, ERRnoaccess, NT_STATUS_NO_TRUST_LSA_SECRET},
 /*	{ This NT error code was 'sqashed'
-	 from NT_STATUS_NO_TRUST_SAM_ACCOUNT to NT_STATUS_TRUSTED_RELATIONSHIP_FAILURE 
-	 during the session setup } */
+	 from NT_STATUS_NO_TRUST_SAM_ACCOUNT to
+	 NT_STATUS_TRUSTED_RELATIONSHIP_FAILURE during the session setup } */
 	{
 	ERRDOS, ERRnoaccess, NT_STATUS_NO_TRUST_SAM_ACCOUNT}, {
 	ERRDOS, ERRnoaccess, NT_STATUS_TRUSTED_DOMAIN_FAILURE}, {
@@ -658,7 +594,7 @@
 	ERRDOS, ERRnoaccess, NT_STATUS_NOLOGON_WORKSTATION_TRUST_ACCOUNT}, {
 	ERRDOS, ERRnoaccess, NT_STATUS_NOLOGON_SERVER_TRUST_ACCOUNT},
 /*	{ This NT error code was 'sqashed'
-	 from NT_STATUS_DOMAIN_TRUST_INCONSISTENT to NT_STATUS_LOGON_FAILURE 
+	 from NT_STATUS_DOMAIN_TRUST_INCONSISTENT to NT_STATUS_LOGON_FAILURE
 	 during the session setup }  */
 	{
 	ERRDOS, ERRnoaccess, NT_STATUS_DOMAIN_TRUST_INCONSISTENT}, {
@@ -789,7 +725,7 @@
 		if (((nt_errs[idx].nt_errcode) & 0xFFFFFF) ==
 		    (status_code & 0xFFFFFF)) {
 			printk(KERN_NOTICE "Status code returned 0x%08x %s\n",
-				   status_code,nt_errs[idx].nt_errstr);
+				   status_code, nt_errs[idx].nt_errstr);
 		}
 		idx++;
 	}
@@ -821,7 +757,7 @@
 map_smb_to_linux_error(struct smb_hdr *smb)
 {
 	unsigned int i;
-	int rc = -EIO;		/* if transport error smb error may not be set */
+	int rc = -EIO;	/* if transport error smb error may not be set */
 	__u8 smberrclass;
 	__u16 smberrcode;
 
@@ -832,9 +768,10 @@
 		return 0;
 
 	if (smb->Flags2 & SMBFLG2_ERR_STATUS) {
-		/* translate the newer STATUS codes to old style errors and then to POSIX errors */
+		/* translate the newer STATUS codes to old style SMB errors
+		 * and then to POSIX errors */
 		__u32 err = le32_to_cpu(smb->Status.CifsError);
-		if(cifsFYI & CIFS_RC)
+		if (cifsFYI & CIFS_RC)
 			cifs_print_status(err);
 		ntstatus_to_dos(err, &smberrclass, &smberrcode);
 	} else {
@@ -845,38 +782,42 @@
 	/* old style errors */
 
 	/* DOS class smb error codes - map DOS */
-	if (smberrclass == ERRDOS) {	/* one byte field no need to byte reverse */
+	if (smberrclass == ERRDOS) {  /* 1 byte field no need to byte reverse */
 		for (i = 0;
 		     i <
 		     sizeof (mapping_table_ERRDOS) /
 		     sizeof (struct smb_to_posix_error); i++) {
 			if (mapping_table_ERRDOS[i].smb_err == 0)
 				break;
-			else if (mapping_table_ERRDOS[i].smb_err == smberrcode) {
+			else if (mapping_table_ERRDOS[i].smb_err ==
+								smberrcode) {
 				rc = mapping_table_ERRDOS[i].posix_code;
 				break;
 			}
-			/* else try the next error mapping one to see if it will match */
+			/* else try next error mapping one to see if match */
 		}
-	} else if (smberrclass == ERRSRV) {	/* server class of error codes */
+	} else if (smberrclass == ERRSRV) {   /* server class of error codes */
 		for (i = 0;
 		     i <
 		     sizeof (mapping_table_ERRSRV) /
 		     sizeof (struct smb_to_posix_error); i++) {
 			if (mapping_table_ERRSRV[i].smb_err == 0)
 				break;
-			else if (mapping_table_ERRSRV[i].smb_err == smberrcode) {
+			else if (mapping_table_ERRSRV[i].smb_err ==
+								smberrcode) {
 				rc = mapping_table_ERRSRV[i].posix_code;
 				break;
 			}
-			/* else try the next error mapping one to see if it will match */
+			/* else try next error mapping to see if match */
 		}
 	}
 	/* else ERRHRD class errors or junk  - return EIO */
 
-	cFYI(1, (" !!Mapping smb error code %d to POSIX err %d !!", smberrcode,rc));
+	cFYI(1, (" !!Mapping smb error code %d to POSIX err %d !!",
+		 smberrcode, rc));
 
-	/* generic corrective action e.g. reconnect SMB session on ERRbaduid could be added */
+	/* generic corrective action e.g. reconnect SMB session on
+	 * ERRbaduid could be added */
 
 	return rc;
 }
@@ -910,7 +851,7 @@
 struct timespec
 cifs_NTtimeToUnix(u64 ntutc)
 {
-	struct timespec ts; 
+	struct timespec ts;
 	/* BB what about the timezone? BB */
 
 	/* Subtract the NTFS time offset, then convert to 1s intervals. */
@@ -918,7 +859,7 @@
 
 	t = ntutc - NTFS_TIME_OFFSET;
 	ts.tv_nsec = do_div(t, 10000000) * 100;
-	ts.tv_sec = t; 
+	ts.tv_sec = t;
 	return ts;
 }
 
@@ -946,20 +887,20 @@
 	SMB_TIME * st = (SMB_TIME *)&time;
 	SMB_DATE * sd = (SMB_DATE *)&date;
 
-	cFYI(1,("date %d time %d",date, time));
+	cFYI(1, ("date %d time %d", date, time));
 
 	sec = 2 * st->TwoSeconds;
 	min = st->Minutes;
-	if((sec > 59) || (min > 59))
-		cERROR(1,("illegal time min %d sec %d", min, sec));
+	if ((sec > 59) || (min > 59))
+		cERROR(1, ("illegal time min %d sec %d", min, sec));
 	sec += (min * 60);
 	sec += 60 * 60 * st->Hours;
-	if(st->Hours > 24)
-		cERROR(1,("illegal hours %d",st->Hours));
+	if (st->Hours > 24)
+		cERROR(1, ("illegal hours %d", st->Hours));
 	days = sd->Day;
 	month = sd->Month;
-	if((days > 31) || (month > 12))
-		cERROR(1,("illegal date, month %d day: %d", month, days));
+	if ((days > 31) || (month > 12))
+		cERROR(1, ("illegal date, month %d day: %d", month, days));
 	month -= 1;
 	days += total_days_of_prev_months[month];
 	days += 3652; /* account for difference in days between 1980 and 1970 */
@@ -970,15 +911,15 @@
 	for years/100 except for years/400, but since the maximum number for DOS
 	 year is 2**7, the last year is 1980+127, which means we need only
 	 consider 2 special case years, ie the years 2000 and 2100, and only
-	 adjust for the lack of leap year for the year 2100, as 2000 was a 
+	 adjust for the lack of leap year for the year 2100, as 2000 was a
 	 leap year (divisable by 400) */
-	if(year >= 120)  /* the year 2100 */
+	if (year >= 120)  /* the year 2100 */
 		days = days - 1;  /* do not count leap year for the year 2100 */
 
 	/* adjust for leap year where we are still before leap day */
-	if(year != 120)
+	if (year != 120)
 		days -= ((year & 0x03) == 0) && (month < 2 ? 1 : 0);
-	sec += 24 * 60 * 60 * days; 
+	sec += 24 * 60 * 60 * days;
 
 	ts.tv_sec = sec;
 
@@ -986,4 +927,4 @@
 
 	ts.tv_nsec = 0;
 	return ts;
-} 
+}
diff --git a/fs/cifs/nterr.c b/fs/cifs/nterr.c
index 4da50cd..819fd99 100644
--- a/fs/cifs/nterr.c
+++ b/fs/cifs/nterr.c
@@ -1,19 +1,19 @@
-/* 
+/*
  *  Unix SMB/Netbios implementation.
  *  Version 1.9.
  *  RPC Pipe client / server routines
  *  Copyright (C) Luke Kenneth Casson Leighton 1997-2001.
- *  
+ *
  *  This program is free software; you can redistribute it and/or modify
  *  it under the terms of the GNU General Public License as published by
  *  the Free Software Foundation; either version 2 of the License, or
  *  (at your option) any later version.
- *  
+ *
  *  This program is distributed in the hope that it will be useful,
  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  *  GNU General Public License for more details.
- *  
+ *
  *  You should have received a copy of the GNU General Public License
  *  along with this program; if not, write to the Free Software
  *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
diff --git a/fs/cifs/nterr.h b/fs/cifs/nterr.h
index d2fb06c..588abbb 100644
--- a/fs/cifs/nterr.h
+++ b/fs/cifs/nterr.h
@@ -1,4 +1,4 @@
-/* 
+/*
    Unix SMB/Netbios implementation.
    Version 1.9.
    NT error code constants
@@ -6,17 +6,17 @@
    Copyright (C) John H Terpstra              1996-2000
    Copyright (C) Luke Kenneth Casson Leighton 1996-2000
    Copyright (C) Paul Ashton                  1998-2000
-   
+
    This program is free software; you can redistribute it and/or modify
    it under the terms of the GNU General Public License as published by
    the Free Software Foundation; either version 2 of the License, or
    (at your option) any later version.
-   
+
    This program is distributed in the hope that it will be useful,
    but WITHOUT ANY WARRANTY; without even the implied warranty of
    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    GNU General Public License for more details.
-   
+
    You should have received a copy of the GNU General Public License
    along with this program; if not, write to the Free Software
    Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
diff --git a/fs/cifs/ntlmssp.h b/fs/cifs/ntlmssp.h
index d39b712..7170a9b 100644
--- a/fs/cifs/ntlmssp.h
+++ b/fs/cifs/ntlmssp.h
@@ -1,7 +1,7 @@
 /*
  *   fs/cifs/ntlmssp.h
  *
- *   Copyright (c) International Business Machines  Corp., 2002,2006
+ *   Copyright (c) International Business Machines  Corp., 2002,2007
  *   Author(s): Steve French (sfrench@us.ibm.com)
  *
  *   This library is free software; you can redistribute it and/or modify
@@ -16,7 +16,7 @@
  *
  *   You should have received a copy of the GNU Lesser General Public License
  *   along with this library; if not, write to the Free Software
- *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 
+ *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  */
 
 #define NTLMSSP_SIGNATURE "NTLMSSP"
@@ -27,18 +27,18 @@
 #define UnknownMessage    cpu_to_le32(8)
 
 /* Negotiate Flags */
-#define NTLMSSP_NEGOTIATE_UNICODE       0x01	// Text strings are in unicode
-#define NTLMSSP_NEGOTIATE_OEM           0x02	// Text strings are in OEM
-#define NTLMSSP_REQUEST_TARGET          0x04	// Server return its auth realm
-#define NTLMSSP_NEGOTIATE_SIGN        0x0010	// Request signature capability
-#define NTLMSSP_NEGOTIATE_SEAL        0x0020	// Request confidentiality
+#define NTLMSSP_NEGOTIATE_UNICODE       0x01 /* Text strings are in unicode */
+#define NTLMSSP_NEGOTIATE_OEM           0x02 /* Text strings are in OEM */
+#define NTLMSSP_REQUEST_TARGET          0x04 /* Server return its auth realm */
+#define NTLMSSP_NEGOTIATE_SIGN        0x0010 /* Request signature capability */
+#define NTLMSSP_NEGOTIATE_SEAL        0x0020 /*  Request confidentiality */
 #define NTLMSSP_NEGOTIATE_DGRAM       0x0040
-#define NTLMSSP_NEGOTIATE_LM_KEY      0x0080 // Use LM session key for sign/seal
-#define NTLMSSP_NEGOTIATE_NTLM        0x0200	// NTLM authentication
+#define NTLMSSP_NEGOTIATE_LM_KEY      0x0080 /* Sign/seal use LM session key */
+#define NTLMSSP_NEGOTIATE_NTLM        0x0200 /* NTLM authentication */
 #define NTLMSSP_NEGOTIATE_DOMAIN_SUPPLIED 0x1000
 #define NTLMSSP_NEGOTIATE_WORKSTATION_SUPPLIED 0x2000
-#define NTLMSSP_NEGOTIATE_LOCAL_CALL  0x4000	// client/server on same machine
-#define NTLMSSP_NEGOTIATE_ALWAYS_SIGN 0x8000	// Sign for all security levels
+#define NTLMSSP_NEGOTIATE_LOCAL_CALL  0x4000 /* client/server on same machine */
+#define NTLMSSP_NEGOTIATE_ALWAYS_SIGN 0x8000 /* Sign for all security levels */
 #define NTLMSSP_TARGET_TYPE_DOMAIN   0x10000
 #define NTLMSSP_TARGET_TYPE_SERVER   0x20000
 #define NTLMSSP_TARGET_TYPE_SHARE    0x40000
diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c
index c08bda9..916df94 100644
--- a/fs/cifs/readdir.c
+++ b/fs/cifs/readdir.c
@@ -2,7 +2,7 @@
  *   fs/cifs/readdir.c
  *
  *   Directory search handling
- * 
+ *
  *   Copyright (C) International Business Machines  Corp., 2004, 2007
  *   Author(s): Steve French (sfrench@us.ibm.com)
  *
@@ -34,24 +34,23 @@
 #ifdef CONFIG_CIFS_DEBUG2
 static void dump_cifs_file_struct(struct file *file, char *label)
 {
-	struct cifsFileInfo * cf;
+	struct cifsFileInfo *cf;
 
 	if (file) {
 		cf = file->private_data;
 		if (cf == NULL) {
-			cFYI(1,("empty cifs private file data"));
+			cFYI(1, ("empty cifs private file data"));
 			return;
 		}
 		if (cf->invalidHandle) {
-			cFYI(1,("invalid handle"));
+			cFYI(1, ("invalid handle"));
 		}
 		if (cf->srch_inf.endOfSearch) {
-			cFYI(1,("end of search"));
+			cFYI(1, ("end of search"));
 		}
 		if (cf->srch_inf.emptyDir) {
-			cFYI(1,("empty dir"));
+			cFYI(1, ("empty dir"));
 		}
-		
 	}
 }
 #endif /* DEBUG2 */
@@ -73,7 +72,8 @@
 	qstring->hash = full_name_hash(qstring->name, qstring->len);
 	tmp_dentry = d_lookup(file->f_path.dentry, qstring);
 	if (tmp_dentry) {
-		cFYI(0, ("existing dentry with inode 0x%p", tmp_dentry->d_inode));
+		cFYI(0, ("existing dentry with inode 0x%p",
+			 tmp_dentry->d_inode));
 		*ptmp_inode = tmp_dentry->d_inode;
 /* BB overwrite old name? i.e. tmp_dentry->d_name and tmp_dentry->d_name.len??*/
 		if (*ptmp_inode == NULL) {
@@ -87,7 +87,7 @@
 	} else {
 		tmp_dentry = d_alloc(file->f_path.dentry, qstring);
 		if (tmp_dentry == NULL) {
-			cERROR(1,("Failed allocating dentry"));
+			cERROR(1, ("Failed allocating dentry"));
 			*ptmp_inode = NULL;
 			return rc;
 		}
@@ -100,7 +100,7 @@
 		if (*ptmp_inode == NULL)
 			return rc;
 		if (file->f_path.dentry->d_sb->s_flags & MS_NOATIME)
-			(*ptmp_inode)->i_flags |= S_NOATIME | S_NOCMTIME;			
+			(*ptmp_inode)->i_flags |= S_NOATIME | S_NOCMTIME;
 		rc = 2;
 	}
 
@@ -109,7 +109,7 @@
 	return rc;
 }
 
-static void AdjustForTZ(struct cifsTconInfo * tcon, struct inode * inode)
+static void AdjustForTZ(struct cifsTconInfo *tcon, struct inode *inode)
 {
 	if ((tcon) && (tcon->ses) && (tcon->ses->server)) {
 		inode->i_ctime.tv_sec += tcon->ses->server->timeAdj;
@@ -121,7 +121,7 @@
 
 
 static void fill_in_inode(struct inode *tmp_inode, int new_buf_type,
-		char * buf, int *pobject_type, int isNewInode)
+			  char *buf, int *pobject_type, int isNewInode)
 {
 	loff_t local_size;
 	struct timespec local_mtime;
@@ -150,7 +150,7 @@
 		      cifs_NTtimeToUnix(le64_to_cpu(pfindData->ChangeTime));
 	} else { /* legacy, OS2 and DOS style */
 /*		struct timespec ts;*/
-		FIND_FILE_STANDARD_INFO * pfindData = 
+		FIND_FILE_STANDARD_INFO * pfindData =
 			(FIND_FILE_STANDARD_INFO *)buf;
 
 		tmp_inode->i_mtime = cnvrtDosUnixTm(
@@ -175,7 +175,7 @@
 
 	/* treat dos attribute of read-only as read-only mode bit e.g. 555? */
 	/* 2767 perms - indicate mandatory locking */
-		/* BB fill in uid and gid here? with help from winbind? 
+		/* BB fill in uid and gid here? with help from winbind?
 		   or retrieve from NTFS stream extended attribute */
 	if (atomic_read(&cifsInfo->inUse) == 0) {
 		tmp_inode->i_uid = cifs_sb->mnt_uid;
@@ -196,7 +196,7 @@
 			tmp_inode->i_mode = cifs_sb->mnt_dir_mode;
 		}
 		tmp_inode->i_mode |= S_IFDIR;
-	} else if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL) && 
+	} else if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL) &&
 		   (attr & ATTR_SYSTEM)) {
 		if (end_of_file == 0)  {
 			*pobject_type = DT_FIFO;
@@ -206,13 +206,13 @@
 			inode as needing revalidate and get the real type
 			(blk vs chr vs. symlink) later ie in lookup */
 			*pobject_type = DT_REG;
-			tmp_inode->i_mode |= S_IFREG; 
-			cifsInfo->time = 0;	
+			tmp_inode->i_mode |= S_IFREG;
+			cifsInfo->time = 0;
 		}
 /* we no longer mark these because we could not follow them */
 /*        } else if (attr & ATTR_REPARSE) {
-                *pobject_type = DT_LNK;
-                tmp_inode->i_mode |= S_IFLNK; */
+		*pobject_type = DT_LNK;
+		tmp_inode->i_mode |= S_IFLNK; */
 	} else {
 		*pobject_type = DT_REG;
 		tmp_inode->i_mode |= S_IFREG;
@@ -220,7 +220,7 @@
 			tmp_inode->i_mode &= ~(S_IWUGO);
 		else if ((tmp_inode->i_mode & S_IWUGO) == 0)
 			/* the ATTR_READONLY flag may have been changed on   */
-		   	/* server -- set any w bits allowed by mnt_file_mode */
+			/* server -- set any w bits allowed by mnt_file_mode */
 			tmp_inode->i_mode |= (S_IWUGO & cifs_sb->mnt_file_mode);
 	} /* could add code here - to validate if device or weird share type? */
 
@@ -231,7 +231,7 @@
 
 	spin_lock(&tmp_inode->i_lock);
 	if (is_size_safe_to_change(cifsInfo, end_of_file)) {
-		/* can not safely change the file size here if the 
+		/* can not safely change the file size here if the
 		client is writing to it due to potential races */
 		i_size_write(tmp_inode, end_of_file);
 
@@ -254,7 +254,6 @@
 				tmp_inode->i_fop = &cifs_file_direct_nobrl_ops;
 			else
 				tmp_inode->i_fop = &cifs_file_direct_ops;
-		
 		} else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
 			tmp_inode->i_fop = &cifs_file_nobrl_ops;
 		else
@@ -322,8 +321,8 @@
 
 	tmp_inode->i_mode = le64_to_cpu(pfindData->Permissions);
 	/* since we set the inode type below we need to mask off type
-           to avoid strange results if bits above were corrupt */
-        tmp_inode->i_mode &= ~S_IFMT;
+	   to avoid strange results if bits above were corrupt */
+	tmp_inode->i_mode &= ~S_IFMT;
 	if (type == UNIX_FILE) {
 		*pobject_type = DT_REG;
 		tmp_inode->i_mode |= S_IFREG;
@@ -353,7 +352,7 @@
 		/* safest to just call it a file */
 		*pobject_type = DT_REG;
 		tmp_inode->i_mode |= S_IFREG;
-		cFYI(1,("unknown inode type %d",type)); 
+		cFYI(1, ("unknown inode type %d", type));
 	}
 
 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_UID)
@@ -368,7 +367,7 @@
 
 	spin_lock(&tmp_inode->i_lock);
 	if (is_size_safe_to_change(cifsInfo, end_of_file)) {
-		/* can not safely change the file size here if the 
+		/* can not safely change the file size here if the
 		client is writing to it due to potential races */
 		i_size_write(tmp_inode, end_of_file);
 
@@ -393,15 +392,16 @@
 			tmp_inode->i_fop = &cifs_file_ops;
 
 		if ((cifs_sb->tcon) && (cifs_sb->tcon->ses) &&
-		   (cifs_sb->tcon->ses->server->maxBuf < 
+		   (cifs_sb->tcon->ses->server->maxBuf <
 			PAGE_CACHE_SIZE + MAX_CIFS_HDR_SIZE))
 			tmp_inode->i_data.a_ops = &cifs_addr_ops_smallbuf;
 		else
 			tmp_inode->i_data.a_ops = &cifs_addr_ops;
 
 		if (isNewInode)
-			return; /* No sense invalidating pages for new inode since we
-					   have not started caching readahead file data yet */
+			return; /* No sense invalidating pages for new inode
+				   since we have not started caching readahead
+				   file data for it yet */
 
 		if (timespec_equal(&tmp_inode->i_mtime, &local_mtime) &&
 			(local_size == tmp_inode->i_size)) {
@@ -420,7 +420,7 @@
 		tmp_inode->i_op = &cifs_symlink_inode_ops;
 /* tmp_inode->i_fop = *//* do not need to set to anything */
 	} else {
-		cFYI(1, ("Special inode")); 
+		cFYI(1, ("Special inode"));
 		init_special_inode(tmp_inode, tmp_inode->i_mode,
 				   tmp_inode->i_rdev);
 	}
@@ -429,14 +429,14 @@
 static int initiate_cifs_search(const int xid, struct file *file)
 {
 	int rc = 0;
-	char * full_path;
-	struct cifsFileInfo * cifsFile;
+	char *full_path;
+	struct cifsFileInfo *cifsFile;
 	struct cifs_sb_info *cifs_sb;
 	struct cifsTconInfo *pTcon;
 
 	if (file->private_data == NULL) {
-		file->private_data = 
-			kzalloc(sizeof(struct cifsFileInfo),GFP_KERNEL);
+		file->private_data =
+			kzalloc(sizeof(struct cifsFileInfo), GFP_KERNEL);
 	}
 
 	if (file->private_data == NULL)
@@ -463,9 +463,11 @@
 
 ffirst_retry:
 	/* test for Unix extensions */
-	if (pTcon->ses->capabilities & CAP_UNIX) {
+	/* but now check for them on the share/mount not on the SMB session */
+/*	if (pTcon->ses->capabilities & CAP_UNIX) { */
+	if (pTcon->unix_ext) {
 		cifsFile->srch_inf.info_level = SMB_FIND_FILE_UNIX;
-	} else if ((pTcon->ses->capabilities & 
+	} else if ((pTcon->ses->capabilities &
 			(CAP_NT_SMBS | CAP_NT_FIND)) == 0) {
 		cifsFile->srch_inf.info_level = SMB_FIND_FILE_INFO_STANDARD;
 	} else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) {
@@ -474,13 +476,13 @@
 		cifsFile->srch_inf.info_level = SMB_FIND_FILE_DIRECTORY_INFO;
 	}
 
-	rc = CIFSFindFirst(xid, pTcon,full_path,cifs_sb->local_nls,
+	rc = CIFSFindFirst(xid, pTcon, full_path, cifs_sb->local_nls,
 		&cifsFile->netfid, &cifsFile->srch_inf,
-		cifs_sb->mnt_cifs_flags & 
+		cifs_sb->mnt_cifs_flags &
 			CIFS_MOUNT_MAP_SPECIAL_CHR, CIFS_DIR_SEP(cifs_sb));
 	if (rc == 0)
 		cifsFile->invalidHandle = FALSE;
-	if ((rc == -EOPNOTSUPP) && 
+	if ((rc == -EOPNOTSUPP) &&
 		(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM)) {
 		cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_SERVER_INUM;
 		goto ffirst_retry;
@@ -495,17 +497,17 @@
 	int len;
 	__le16 * ustr = (__le16 *)str;
 
-	for(len=0;len <= PATH_MAX;len++) {
+	for (len = 0; len <= PATH_MAX; len++) {
 		if (ustr[len] == 0)
 			return len << 1;
 	}
-	cFYI(1,("Unicode string longer than PATH_MAX found"));
+	cFYI(1, ("Unicode string longer than PATH_MAX found"));
 	return len << 1;
 }
 
 static char *nxt_dir_entry(char *old_entry, char *end_of_smb, int level)
 {
-	char * new_entry;
+	char *new_entry;
 	FILE_DIRECTORY_INFO * pDirInfo = (FILE_DIRECTORY_INFO *)old_entry;
 
 	if (level == SMB_FIND_FILE_INFO_STANDARD) {
@@ -516,21 +518,21 @@
 				pfData->FileNameLength;
 	} else
 		new_entry = old_entry + le32_to_cpu(pDirInfo->NextEntryOffset);
-	cFYI(1,("new entry %p old entry %p",new_entry,old_entry));
+	cFYI(1, ("new entry %p old entry %p", new_entry, old_entry));
 	/* validate that new_entry is not past end of SMB */
 	if (new_entry >= end_of_smb) {
 		cERROR(1,
 		      ("search entry %p began after end of SMB %p old entry %p",
-			new_entry, end_of_smb, old_entry)); 
+			new_entry, end_of_smb, old_entry));
 		return NULL;
 	} else if (((level == SMB_FIND_FILE_INFO_STANDARD) &&
-		   (new_entry + sizeof(FIND_FILE_STANDARD_INFO) > end_of_smb)) ||
-		  ((level != SMB_FIND_FILE_INFO_STANDARD) &&
+		    (new_entry + sizeof(FIND_FILE_STANDARD_INFO) > end_of_smb))
+		  || ((level != SMB_FIND_FILE_INFO_STANDARD) &&
 		   (new_entry + sizeof(FILE_DIRECTORY_INFO) > end_of_smb)))  {
-		cERROR(1,("search entry %p extends after end of SMB %p",
+		cERROR(1, ("search entry %p extends after end of SMB %p",
 			new_entry, end_of_smb));
 		return NULL;
-	} else 
+	} else
 		return new_entry;
 
 }
@@ -541,8 +543,8 @@
 static int cifs_entry_is_dot(char *current_entry, struct cifsFileInfo *cfile)
 {
 	int rc = 0;
-	char * filename = NULL;
-	int len = 0; 
+	char *filename = NULL;
+	int len = 0;
 
 	if (cfile->srch_inf.info_level == SMB_FIND_FILE_UNIX) {
 		FILE_UNIX_INFO * pFindData = (FILE_UNIX_INFO *)current_entry;
@@ -554,25 +556,25 @@
 			len = strnlen(filename, 5);
 		}
 	} else if (cfile->srch_inf.info_level == SMB_FIND_FILE_DIRECTORY_INFO) {
-		FILE_DIRECTORY_INFO * pFindData = 
+		FILE_DIRECTORY_INFO * pFindData =
 			(FILE_DIRECTORY_INFO *)current_entry;
 		filename = &pFindData->FileName[0];
 		len = le32_to_cpu(pFindData->FileNameLength);
-	} else if (cfile->srch_inf.info_level == 
+	} else if (cfile->srch_inf.info_level ==
 			SMB_FIND_FILE_FULL_DIRECTORY_INFO) {
-		FILE_FULL_DIRECTORY_INFO * pFindData = 
+		FILE_FULL_DIRECTORY_INFO * pFindData =
 			(FILE_FULL_DIRECTORY_INFO *)current_entry;
 		filename = &pFindData->FileName[0];
 		len = le32_to_cpu(pFindData->FileNameLength);
 	} else if (cfile->srch_inf.info_level ==
 			SMB_FIND_FILE_ID_FULL_DIR_INFO) {
-		SEARCH_ID_FULL_DIR_INFO * pFindData = 
+		SEARCH_ID_FULL_DIR_INFO * pFindData =
 			(SEARCH_ID_FULL_DIR_INFO *)current_entry;
 		filename = &pFindData->FileName[0];
 		len = le32_to_cpu(pFindData->FileNameLength);
-	} else if (cfile->srch_inf.info_level == 
+	} else if (cfile->srch_inf.info_level ==
 			SMB_FIND_FILE_BOTH_DIRECTORY_INFO) {
-		FILE_BOTH_DIRECTORY_INFO * pFindData = 
+		FILE_BOTH_DIRECTORY_INFO * pFindData =
 			(FILE_BOTH_DIRECTORY_INFO *)current_entry;
 		filename = &pFindData->FileName[0];
 		len = le32_to_cpu(pFindData->FileNameLength);
@@ -582,7 +584,8 @@
 		filename = &pFindData->FileName[0];
 		len = pFindData->FileNameLength;
 	} else {
-		cFYI(1,("Unknown findfirst level %d",cfile->srch_inf.info_level));
+		cFYI(1, ("Unknown findfirst level %d",
+			 cfile->srch_inf.info_level));
 	}
 
 	if (filename) {
@@ -595,15 +598,15 @@
 			} else if (len == 4) {
 				/* check for .. */
 				if ((ufilename[0] == UNICODE_DOT)
-				   &&(ufilename[1] == UNICODE_DOT))
+				   && (ufilename[1] == UNICODE_DOT))
 					rc = 2;
 			}
 		} else /* ASCII */ {
 			if (len == 1) {
-				if (filename[0] == '.') 
+				if (filename[0] == '.')
 					rc = 1;
 			} else if (len == 2) {
-				if((filename[0] == '.') && (filename[1] == '.'))
+				if ((filename[0] == '.') && (filename[1] == '.'))
 					rc = 2;
 			}
 		}
@@ -614,7 +617,7 @@
 
 /* Check if directory that we are searching has changed so we can decide
    whether we can use the cached search results from the previous search */
-static int is_dir_changed(struct file * file)
+static int is_dir_changed(struct file *file)
 {
 	struct inode *inode = file->f_path.dentry->d_inode;
 	struct cifsInodeInfo *cifsInfo = CIFS_I(inode);
@@ -633,22 +636,22 @@
 /* We start counting in the buffer with entry 2 and increment for every
    entry (do not increment for . or .. entry) */
 static int find_cifs_entry(const int xid, struct cifsTconInfo *pTcon,
-	struct file *file, char **ppCurrentEntry, int *num_to_ret) 
+	struct file *file, char **ppCurrentEntry, int *num_to_ret)
 {
 	int rc = 0;
 	int pos_in_buf = 0;
 	loff_t first_entry_in_buffer;
 	loff_t index_to_find = file->f_pos;
-	struct cifsFileInfo * cifsFile = file->private_data;
+	struct cifsFileInfo *cifsFile = file->private_data;
 	/* check if index in the buffer */
-	
-	if ((cifsFile == NULL) || (ppCurrentEntry == NULL) || 
+
+	if ((cifsFile == NULL) || (ppCurrentEntry == NULL) ||
 	   (num_to_ret == NULL))
 		return -ENOENT;
-	
+
 	*ppCurrentEntry = NULL;
-	first_entry_in_buffer = 
-		cifsFile->srch_inf.index_of_last_entry - 
+	first_entry_in_buffer =
+		cifsFile->srch_inf.index_of_last_entry -
 			cifsFile->srch_inf.entries_in_buffer;
 
 	/* if first entry in buf is zero then is first buffer
@@ -660,17 +663,17 @@
 #ifdef CONFIG_CIFS_DEBUG2
 	dump_cifs_file_struct(file, "In fce ");
 #endif
-	if (((index_to_find < cifsFile->srch_inf.index_of_last_entry) && 
-	     is_dir_changed(file)) || 
+	if (((index_to_find < cifsFile->srch_inf.index_of_last_entry) &&
+	     is_dir_changed(file)) ||
 	   (index_to_find < first_entry_in_buffer)) {
 		/* close and restart search */
-		cFYI(1,("search backing up - close and restart search"));
+		cFYI(1, ("search backing up - close and restart search"));
 		cifsFile->invalidHandle = TRUE;
 		CIFSFindClose(xid, pTcon, cifsFile->netfid);
 		kfree(cifsFile->search_resume_name);
 		cifsFile->search_resume_name = NULL;
 		if (cifsFile->srch_inf.ntwrk_buf_start) {
-			cFYI(1,("freeing SMB ff cache buf on search rewind"));
+			cFYI(1, ("freeing SMB ff cache buf on search rewind"));
 			if (cifsFile->srch_inf.smallBuf)
 				cifs_small_buf_release(cifsFile->srch_inf.
 						ntwrk_buf_start);
@@ -678,17 +681,18 @@
 				cifs_buf_release(cifsFile->srch_inf.
 						ntwrk_buf_start);
 		}
-		rc = initiate_cifs_search(xid,file);
+		rc = initiate_cifs_search(xid, file);
 		if (rc) {
-			cFYI(1,("error %d reinitiating a search on rewind",rc));
+			cFYI(1, ("error %d reinitiating a search on rewind",
+				 rc));
 			return rc;
 		}
 	}
 
-	while((index_to_find >= cifsFile->srch_inf.index_of_last_entry) && 
-	      (rc == 0) && (cifsFile->srch_inf.endOfSearch == FALSE)){
-	 	cFYI(1,("calling findnext2"));
-		rc = CIFSFindNext(xid,pTcon,cifsFile->netfid, 
+	while ((index_to_find >= cifsFile->srch_inf.index_of_last_entry) &&
+	      (rc == 0) && (cifsFile->srch_inf.endOfSearch == FALSE)) {
+		cFYI(1, ("calling findnext2"));
+		rc = CIFSFindNext(xid, pTcon, cifsFile->netfid,
 				  &cifsFile->srch_inf);
 		if (rc)
 			return -ENOENT;
@@ -697,8 +701,8 @@
 		/* we found the buffer that contains the entry */
 		/* scan and find it */
 		int i;
-		char * current_entry;
-		char * end_of_smb = cifsFile->srch_inf.ntwrk_buf_start + 
+		char *current_entry;
+		char *end_of_smb = cifsFile->srch_inf.ntwrk_buf_start +
 			smbCalcSize((struct smb_hdr *)
 				cifsFile->srch_inf.ntwrk_buf_start);
 
@@ -706,28 +710,28 @@
 		first_entry_in_buffer = cifsFile->srch_inf.index_of_last_entry
 					- cifsFile->srch_inf.entries_in_buffer;
 		pos_in_buf = index_to_find - first_entry_in_buffer;
-		cFYI(1,("found entry - pos_in_buf %d",pos_in_buf));
+		cFYI(1, ("found entry - pos_in_buf %d", pos_in_buf));
 
-		for(i=0;(i<(pos_in_buf)) && (current_entry != NULL);i++) {
+		for (i=0; (i < (pos_in_buf)) && (current_entry != NULL); i++) {
 			/* go entry by entry figuring out which is first */
-			current_entry = nxt_dir_entry(current_entry,end_of_smb,
+			current_entry = nxt_dir_entry(current_entry, end_of_smb,
 						cifsFile->srch_inf.info_level);
 		}
-		if((current_entry == NULL) && (i < pos_in_buf)) {
+		if ((current_entry == NULL) && (i < pos_in_buf)) {
 			/* BB fixme - check if we should flag this error */
-			cERROR(1,("reached end of buf searching for pos in buf"
+			cERROR(1, ("reached end of buf searching for pos in buf"
 			  " %d index to find %lld rc %d",
-			  pos_in_buf,index_to_find,rc));
+			  pos_in_buf, index_to_find, rc));
 		}
 		rc = 0;
 		*ppCurrentEntry = current_entry;
 	} else {
-		cFYI(1,("index not in buffer - could not findnext into it"));
+		cFYI(1, ("index not in buffer - could not findnext into it"));
 		return 0;
 	}
 
-	if(pos_in_buf >= cifsFile->srch_inf.entries_in_buffer) {
-		cFYI(1,("can not return entries pos_in_buf beyond last entry"));
+	if (pos_in_buf >= cifsFile->srch_inf.entries_in_buffer) {
+		cFYI(1, ("can not return entries pos_in_buf beyond last"));
 		*num_to_ret = 0;
 	} else
 		*num_to_ret = cifsFile->srch_inf.entries_in_buffer - pos_in_buf;
@@ -738,81 +742,81 @@
 /* inode num, inode type and filename returned */
 static int cifs_get_name_from_search_buf(struct qstr *pqst,
 	char *current_entry, __u16 level, unsigned int unicode,
-	struct cifs_sb_info * cifs_sb, int max_len, ino_t *pinum)
+	struct cifs_sb_info *cifs_sb, int max_len, ino_t *pinum)
 {
 	int rc = 0;
 	unsigned int len = 0;
-	char * filename;
-	struct nls_table * nlt = cifs_sb->local_nls;
+	char *filename;
+	struct nls_table *nlt = cifs_sb->local_nls;
 
 	*pinum = 0;
 
-	if(level == SMB_FIND_FILE_UNIX) {
-		FILE_UNIX_INFO * pFindData = (FILE_UNIX_INFO *)current_entry;
+	if (level == SMB_FIND_FILE_UNIX) {
+		FILE_UNIX_INFO *pFindData = (FILE_UNIX_INFO *)current_entry;
 
 		filename = &pFindData->FileName[0];
-		if(unicode) {
+		if (unicode) {
 			len = cifs_unicode_bytelen(filename);
 		} else {
 			/* BB should we make this strnlen of PATH_MAX? */
 			len = strnlen(filename, PATH_MAX);
 		}
 
-		/* BB fixme - hash low and high 32 bits if not 64 bit arch BB fixme */
-		if(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM)
+		/* BB fixme - hash low and high 32 bits if not 64 bit arch BB */
+		if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM)
 			*pinum = pFindData->UniqueId;
-	} else if(level == SMB_FIND_FILE_DIRECTORY_INFO) {
-		FILE_DIRECTORY_INFO * pFindData = 
+	} else if (level == SMB_FIND_FILE_DIRECTORY_INFO) {
+		FILE_DIRECTORY_INFO *pFindData =
 			(FILE_DIRECTORY_INFO *)current_entry;
 		filename = &pFindData->FileName[0];
 		len = le32_to_cpu(pFindData->FileNameLength);
-	} else if(level == SMB_FIND_FILE_FULL_DIRECTORY_INFO) {
-		FILE_FULL_DIRECTORY_INFO * pFindData = 
+	} else if (level == SMB_FIND_FILE_FULL_DIRECTORY_INFO) {
+		FILE_FULL_DIRECTORY_INFO *pFindData =
 			(FILE_FULL_DIRECTORY_INFO *)current_entry;
 		filename = &pFindData->FileName[0];
 		len = le32_to_cpu(pFindData->FileNameLength);
-	} else if(level == SMB_FIND_FILE_ID_FULL_DIR_INFO) {
-		SEARCH_ID_FULL_DIR_INFO * pFindData = 
+	} else if (level == SMB_FIND_FILE_ID_FULL_DIR_INFO) {
+		SEARCH_ID_FULL_DIR_INFO *pFindData =
 			(SEARCH_ID_FULL_DIR_INFO *)current_entry;
 		filename = &pFindData->FileName[0];
 		len = le32_to_cpu(pFindData->FileNameLength);
 		*pinum = pFindData->UniqueId;
-	} else if(level == SMB_FIND_FILE_BOTH_DIRECTORY_INFO) {
-		FILE_BOTH_DIRECTORY_INFO * pFindData = 
+	} else if (level == SMB_FIND_FILE_BOTH_DIRECTORY_INFO) {
+		FILE_BOTH_DIRECTORY_INFO *pFindData =
 			(FILE_BOTH_DIRECTORY_INFO *)current_entry;
 		filename = &pFindData->FileName[0];
 		len = le32_to_cpu(pFindData->FileNameLength);
-	} else if(level == SMB_FIND_FILE_INFO_STANDARD) {
+	} else if (level == SMB_FIND_FILE_INFO_STANDARD) {
 		FIND_FILE_STANDARD_INFO * pFindData =
 			(FIND_FILE_STANDARD_INFO *)current_entry;
 		filename = &pFindData->FileName[0];
 		/* one byte length, no name conversion */
 		len = (unsigned int)pFindData->FileNameLength;
 	} else {
-		cFYI(1,("Unknown findfirst level %d",level));
+		cFYI(1, ("Unknown findfirst level %d", level));
 		return -EINVAL;
 	}
 
-	if(len > max_len) {
-		cERROR(1,("bad search response length %d past smb end", len));
+	if (len > max_len) {
+		cERROR(1, ("bad search response length %d past smb end", len));
 		return -EINVAL;
 	}
 
-	if(unicode) {
+	if (unicode) {
 		/* BB fixme - test with long names */
 		/* Note converted filename can be longer than in unicode */
-		if(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR)
+		if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR)
 			pqst->len = cifs_convertUCSpath((char *)pqst->name,
 					(__le16 *)filename, len/2, nlt);
 		else
 			pqst->len = cifs_strfromUCS_le((char *)pqst->name,
-					(__le16 *)filename,len/2,nlt);
+					(__le16 *)filename, len/2, nlt);
 	} else {
 		pqst->name = filename;
 		pqst->len = len;
 	}
-	pqst->hash = full_name_hash(pqst->name,pqst->len);
-/*	cFYI(1,("filldir on %s",pqst->name));  */
+	pqst->hash = full_name_hash(pqst->name, pqst->len);
+/*	cFYI(1, ("filldir on %s",pqst->name));  */
 	return rc;
 }
 
@@ -821,49 +825,50 @@
 {
 	int rc = 0;
 	struct qstr qstring;
-	struct cifsFileInfo * pCifsF;
+	struct cifsFileInfo *pCifsF;
 	unsigned obj_type;
 	ino_t  inum;
-	struct cifs_sb_info * cifs_sb;
+	struct cifs_sb_info *cifs_sb;
 	struct inode *tmp_inode;
 	struct dentry *tmp_dentry;
 
 	/* get filename and len into qstring */
 	/* get dentry */
 	/* decide whether to create and populate ionde */
-	if((direntry == NULL) || (file == NULL))
+	if ((direntry == NULL) || (file == NULL))
 		return -EINVAL;
 
 	pCifsF = file->private_data;
-	
-	if((scratch_buf == NULL) || (pfindEntry == NULL) || (pCifsF == NULL))
+
+	if ((scratch_buf == NULL) || (pfindEntry == NULL) || (pCifsF == NULL))
 		return -ENOENT;
 
-	rc = cifs_entry_is_dot(pfindEntry,pCifsF);
+	rc = cifs_entry_is_dot(pfindEntry, pCifsF);
 	/* skip . and .. since we added them first */
-	if(rc != 0) 
+	if (rc != 0)
 		return 0;
 
 	cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
 
 	qstring.name = scratch_buf;
-	rc = cifs_get_name_from_search_buf(&qstring,pfindEntry,
+	rc = cifs_get_name_from_search_buf(&qstring, pfindEntry,
 			pCifsF->srch_inf.info_level,
-			pCifsF->srch_inf.unicode,cifs_sb,
+			pCifsF->srch_inf.unicode, cifs_sb,
 			max_len,
 			&inum /* returned */);
 
-	if(rc)
+	if (rc)
 		return rc;
 
-	rc = construct_dentry(&qstring,file,&tmp_inode, &tmp_dentry);
-	if((tmp_inode == NULL) || (tmp_dentry == NULL))
+	rc = construct_dentry(&qstring, file, &tmp_inode, &tmp_dentry);
+	if ((tmp_inode == NULL) || (tmp_dentry == NULL))
 		return -ENOMEM;
 
-	if(rc) {
+	if (rc) {
 		/* inode created, we need to hash it with right inode number */
-		if(inum != 0) {
-			/* BB fixme - hash the 2 32 quantities bits together if necessary BB */
+		if (inum != 0) {
+			/* BB fixme - hash the 2 32 quantities bits together if
+			 *  necessary BB */
 			tmp_inode->i_ino = inum;
 		}
 		insert_inode_hash(tmp_inode);
@@ -872,27 +877,27 @@
 	/* we pass in rc below, indicating whether it is a new inode,
 	   so we can figure out whether to invalidate the inode cached
 	   data if the file has changed */
-	if(pCifsF->srch_inf.info_level == SMB_FIND_FILE_UNIX)
+	if (pCifsF->srch_inf.info_level == SMB_FIND_FILE_UNIX)
 		unix_fill_in_inode(tmp_inode,
 				   (FILE_UNIX_INFO *)pfindEntry,
 				   &obj_type, rc);
-	else if(pCifsF->srch_inf.info_level == SMB_FIND_FILE_INFO_STANDARD)
+	else if (pCifsF->srch_inf.info_level == SMB_FIND_FILE_INFO_STANDARD)
 		fill_in_inode(tmp_inode, 0 /* old level 1 buffer type */,
 				pfindEntry, &obj_type, rc);
 	else
 		fill_in_inode(tmp_inode, 1 /* NT */, pfindEntry, &obj_type, rc);
 
-	if(rc) /* new inode - needs to be tied to dentry */ {
+	if (rc) /* new inode - needs to be tied to dentry */ {
 		d_instantiate(tmp_dentry, tmp_inode);
-		if(rc == 2)
+		if (rc == 2)
 			d_rehash(tmp_dentry);
 	}
-	
-	
-	rc = filldir(direntry,qstring.name,qstring.len,file->f_pos,
-		     tmp_inode->i_ino,obj_type);
-	if(rc) {
-		cFYI(1,("filldir rc = %d",rc));
+
+
+	rc = filldir(direntry, qstring.name, qstring.len, file->f_pos,
+		     tmp_inode->i_ino, obj_type);
+	if (rc) {
+		cFYI(1, ("filldir rc = %d", rc));
 		/* we can not return filldir errors to the caller
 		since they are "normal" when the stat blocksize
 		is too small - we return remapped error instead */
@@ -909,57 +914,57 @@
 	int rc = 0;
 	unsigned int len = 0;
 	__u16 level;
-	char * filename;
+	char *filename;
 
-	if((cifsFile == NULL) || (current_entry == NULL))
+	if ((cifsFile == NULL) || (current_entry == NULL))
 		return -EINVAL;
 
 	level = cifsFile->srch_inf.info_level;
 
-	if(level == SMB_FIND_FILE_UNIX) {
+	if (level == SMB_FIND_FILE_UNIX) {
 		FILE_UNIX_INFO * pFindData = (FILE_UNIX_INFO *)current_entry;
 
 		filename = &pFindData->FileName[0];
-		if(cifsFile->srch_inf.unicode) {
+		if (cifsFile->srch_inf.unicode) {
 			len = cifs_unicode_bytelen(filename);
 		} else {
 			/* BB should we make this strnlen of PATH_MAX? */
 			len = strnlen(filename, PATH_MAX);
 		}
 		cifsFile->srch_inf.resume_key = pFindData->ResumeKey;
-	} else if(level == SMB_FIND_FILE_DIRECTORY_INFO) {
-		FILE_DIRECTORY_INFO * pFindData = 
+	} else if (level == SMB_FIND_FILE_DIRECTORY_INFO) {
+		FILE_DIRECTORY_INFO *pFindData =
 			(FILE_DIRECTORY_INFO *)current_entry;
 		filename = &pFindData->FileName[0];
 		len = le32_to_cpu(pFindData->FileNameLength);
 		cifsFile->srch_inf.resume_key = pFindData->FileIndex;
-	} else if(level == SMB_FIND_FILE_FULL_DIRECTORY_INFO) {
-		FILE_FULL_DIRECTORY_INFO * pFindData = 
+	} else if (level == SMB_FIND_FILE_FULL_DIRECTORY_INFO) {
+		FILE_FULL_DIRECTORY_INFO *pFindData =
 			(FILE_FULL_DIRECTORY_INFO *)current_entry;
 		filename = &pFindData->FileName[0];
 		len = le32_to_cpu(pFindData->FileNameLength);
 		cifsFile->srch_inf.resume_key = pFindData->FileIndex;
-	} else if(level == SMB_FIND_FILE_ID_FULL_DIR_INFO) {
-		SEARCH_ID_FULL_DIR_INFO * pFindData = 
+	} else if (level == SMB_FIND_FILE_ID_FULL_DIR_INFO) {
+		SEARCH_ID_FULL_DIR_INFO *pFindData =
 			(SEARCH_ID_FULL_DIR_INFO *)current_entry;
 		filename = &pFindData->FileName[0];
 		len = le32_to_cpu(pFindData->FileNameLength);
 		cifsFile->srch_inf.resume_key = pFindData->FileIndex;
-	} else if(level == SMB_FIND_FILE_BOTH_DIRECTORY_INFO) {
-		FILE_BOTH_DIRECTORY_INFO * pFindData = 
+	} else if (level == SMB_FIND_FILE_BOTH_DIRECTORY_INFO) {
+		FILE_BOTH_DIRECTORY_INFO *pFindData =
 			(FILE_BOTH_DIRECTORY_INFO *)current_entry;
 		filename = &pFindData->FileName[0];
 		len = le32_to_cpu(pFindData->FileNameLength);
 		cifsFile->srch_inf.resume_key = pFindData->FileIndex;
-	} else if(level == SMB_FIND_FILE_INFO_STANDARD) {
-		FIND_FILE_STANDARD_INFO * pFindData =
+	} else if (level == SMB_FIND_FILE_INFO_STANDARD) {
+		FIND_FILE_STANDARD_INFO *pFindData =
 			(FIND_FILE_STANDARD_INFO *)current_entry;
 		filename = &pFindData->FileName[0];
 		/* one byte length, no name conversion */
 		len = (unsigned int)pFindData->FileNameLength;
 		cifsFile->srch_inf.resume_key = pFindData->ResumeKey;
 	} else {
-		cFYI(1,("Unknown findfirst level %d",level));
+		cFYI(1, ("Unknown findfirst level %d", level));
 		return -EINVAL;
 	}
 	cifsFile->srch_inf.resume_name_len = len;
@@ -970,21 +975,21 @@
 int cifs_readdir(struct file *file, void *direntry, filldir_t filldir)
 {
 	int rc = 0;
-	int xid,i;
+	int xid, i;
 	struct cifs_sb_info *cifs_sb;
 	struct cifsTconInfo *pTcon;
 	struct cifsFileInfo *cifsFile = NULL;
-	char * current_entry;
+	char *current_entry;
 	int num_to_fill = 0;
-	char * tmp_buf = NULL;
-	char * end_of_smb;
+	char *tmp_buf = NULL;
+	char *end_of_smb;
 	int max_len;
 
 	xid = GetXid();
 
 	cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
 	pTcon = cifs_sb->tcon;
-	if(pTcon == NULL)
+	if (pTcon == NULL)
 		return -EINVAL;
 
 	switch ((int) file->f_pos) {
@@ -1005,27 +1010,27 @@
 		}
 		file->f_pos++;
 	default:
-		/* 1) If search is active, 
-			is in current search buffer? 
+		/* 1) If search is active,
+			is in current search buffer?
 			if it before then restart search
 			if after then keep searching till find it */
 
-		if(file->private_data == NULL) {
-			rc = initiate_cifs_search(xid,file);
-			cFYI(1,("initiate cifs search rc %d",rc));
-			if(rc) {
+		if (file->private_data == NULL) {
+			rc = initiate_cifs_search(xid, file);
+			cFYI(1, ("initiate cifs search rc %d", rc));
+			if (rc) {
 				FreeXid(xid);
 				return rc;
 			}
 		}
-		if(file->private_data == NULL) {
+		if (file->private_data == NULL) {
 			rc = -EINVAL;
 			FreeXid(xid);
 			return rc;
 		}
 		cifsFile = file->private_data;
 		if (cifsFile->srch_inf.endOfSearch) {
-			if(cifsFile->srch_inf.emptyDir) {
+			if (cifsFile->srch_inf.emptyDir) {
 				cFYI(1, ("End of search, empty dir"));
 				rc = 0;
 				break;
@@ -1033,23 +1038,23 @@
 		} /* else {
 			cifsFile->invalidHandle = TRUE;
 			CIFSFindClose(xid, pTcon, cifsFile->netfid);
-		} 
+		}
 		kfree(cifsFile->search_resume_name);
 		cifsFile->search_resume_name = NULL; */
 
-		rc = find_cifs_entry(xid,pTcon, file,
-				&current_entry,&num_to_fill);
-		if(rc) {
-			cFYI(1,("fce error %d",rc)); 
+		rc = find_cifs_entry(xid, pTcon, file,
+				&current_entry, &num_to_fill);
+		if (rc) {
+			cFYI(1, ("fce error %d", rc));
 			goto rddir2_exit;
 		} else if (current_entry != NULL) {
-			cFYI(1,("entry %lld found",file->f_pos));
+			cFYI(1, ("entry %lld found", file->f_pos));
 		} else {
-			cFYI(1,("could not find entry"));
+			cFYI(1, ("could not find entry"));
 			goto rddir2_exit;
 		}
-		cFYI(1,("loop through %d times filling dir for net buf %p",
-			num_to_fill,cifsFile->srch_inf.ntwrk_buf_start));
+		cFYI(1, ("loop through %d times filling dir for net buf %p",
+			num_to_fill, cifsFile->srch_inf.ntwrk_buf_start));
 		max_len = smbCalcSize((struct smb_hdr *)
 				cifsFile->srch_inf.ntwrk_buf_start);
 		end_of_smb = cifsFile->srch_inf.ntwrk_buf_start + max_len;
@@ -1059,8 +1064,8 @@
 		such multibyte target UTF-8 characters. cifs_unicode.c,
 		which actually does the conversion, has the same limit */
 		tmp_buf = kmalloc((2 * NAME_MAX) + 4, GFP_KERNEL);
-		for(i=0;(i<num_to_fill) && (rc == 0);i++) {
-			if(current_entry == NULL) {
+		for (i = 0; (i < num_to_fill) && (rc == 0); i++) {
+			if (current_entry == NULL) {
 				/* evaluate whether this case is an error */
 				cERROR(1,("past end of SMB num to fill %d i %d",
 					  num_to_fill, i));
@@ -1070,20 +1075,20 @@
 			we want to check for that here? */
 			rc = cifs_filldir(current_entry, file,
 					filldir, direntry, tmp_buf, max_len);
-			if(rc == -EOVERFLOW) {
+			if (rc == -EOVERFLOW) {
 				rc = 0;
 				break;
 			}
 
 			file->f_pos++;
-			if(file->f_pos == 
+			if (file->f_pos ==
 				cifsFile->srch_inf.index_of_last_entry) {
-				cFYI(1,("last entry in buf at pos %lld %s",
-					file->f_pos,tmp_buf));
-				cifs_save_resume_key(current_entry,cifsFile);
+				cFYI(1, ("last entry in buf at pos %lld %s",
+					file->f_pos, tmp_buf));
+				cifs_save_resume_key(current_entry, cifsFile);
 				break;
-			} else 
-				current_entry = 
+			} else
+				current_entry =
 					nxt_dir_entry(current_entry, end_of_smb,
 						cifsFile->srch_inf.info_level);
 		}
diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c
index 7584646..2ea027d 100644
--- a/fs/cifs/sess.c
+++ b/fs/cifs/sess.c
@@ -3,7 +3,7 @@
  *
  *   SMB/CIFS session setup handling routines
  *
- *   Copyright (c) International Business Machines  Corp., 2006
+ *   Copyright (c) International Business Machines  Corp., 2006, 2007
  *   Author(s): Steve French (sfrench@us.ibm.com)
  *
  *   This library is free software; you can redistribute it and/or modify
@@ -31,7 +31,7 @@
 #include <linux/utsname.h>
 
 extern void SMBNTencrypt(unsigned char *passwd, unsigned char *c8,
-                         unsigned char *p24);
+			 unsigned char *p24);
 
 static __u32 cifs_ssetup_hdr(struct cifsSesInfo *ses, SESSION_SETUP_ANDX *pSMB)
 {
@@ -45,13 +45,14 @@
 
 	/* Now no need to set SMBFLG_CASELESS or obsolete CANONICAL PATH */
 
-	/* BB verify whether signing required on neg or just on auth frame 
+	/* BB verify whether signing required on neg or just on auth frame
 	   (and NTLM case) */
 
 	capabilities = CAP_LARGE_FILES | CAP_NT_SMBS | CAP_LEVEL_II_OPLOCKS |
 			CAP_LARGE_WRITE_X | CAP_LARGE_READ_X;
 
-	if(ses->server->secMode & (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
+	if (ses->server->secMode &
+	    (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
 		pSMB->req.hdr.Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
 
 	if (ses->capabilities & CAP_UNICODE) {
@@ -74,10 +75,10 @@
 	return capabilities;
 }
 
-static void unicode_ssetup_strings(char ** pbcc_area, struct cifsSesInfo *ses,
-			    const struct nls_table * nls_cp)
+static void unicode_ssetup_strings(char **pbcc_area, struct cifsSesInfo *ses,
+				   const struct nls_table *nls_cp)
 {
-	char * bcc_ptr = *pbcc_area;
+	char *bcc_ptr = *pbcc_area;
 	int bytes_ret = 0;
 
 	/* BB FIXME add check that strings total less
@@ -89,7 +90,7 @@
 		bcc_ptr++;
 	} */
 	/* copy user */
-	if(ses->userName == NULL) {
+	if (ses->userName == NULL) {
 		/* null user mount */
 		*bcc_ptr = 0;
 		*(bcc_ptr+1) = 0;
@@ -100,14 +101,14 @@
 	bcc_ptr += 2 * bytes_ret;
 	bcc_ptr += 2; /* account for null termination */
 	/* copy domain */
-	if(ses->domainName == NULL) {
+	if (ses->domainName == NULL) {
 		/* Sending null domain better than using a bogus domain name (as
 		we did briefly in 2.6.18) since server will use its default */
 		*bcc_ptr = 0;
 		*(bcc_ptr+1) = 0;
 		bytes_ret = 0;
 	} else
-		bytes_ret = cifs_strtoUCS((__le16 *) bcc_ptr, ses->domainName, 
+		bytes_ret = cifs_strtoUCS((__le16 *) bcc_ptr, ses->domainName,
 					  256, nls_cp);
 	bcc_ptr += 2 * bytes_ret;
 	bcc_ptr += 2;  /* account for null terminator */
@@ -122,37 +123,37 @@
 	bcc_ptr += 2; /* trailing null */
 
 	bytes_ret = cifs_strtoUCS((__le16 *) bcc_ptr, CIFS_NETWORK_OPSYS,
-                                  32, nls_cp);
+				  32, nls_cp);
 	bcc_ptr += 2 * bytes_ret;
 	bcc_ptr += 2; /* trailing null */
 
 	*pbcc_area = bcc_ptr;
 }
 
-static void ascii_ssetup_strings(char ** pbcc_area, struct cifsSesInfo *ses,
-			  const struct nls_table * nls_cp)
+static void ascii_ssetup_strings(char **pbcc_area, struct cifsSesInfo *ses,
+				 const struct nls_table *nls_cp)
 {
-	char * bcc_ptr = *pbcc_area;
+	char *bcc_ptr = *pbcc_area;
 
 	/* copy user */
 	/* BB what about null user mounts - check that we do this BB */
-        /* copy user */
-        if(ses->userName == NULL) {
-                /* BB what about null user mounts - check that we do this BB */
-        } else { /* 300 should be long enough for any conceivable user name */
-                strncpy(bcc_ptr, ses->userName, 300);
-        }
+	/* copy user */
+	if (ses->userName == NULL) {
+		/* BB what about null user mounts - check that we do this BB */
+	} else { /* 300 should be long enough for any conceivable user name */
+		strncpy(bcc_ptr, ses->userName, 300);
+	}
 	/* BB improve check for overflow */
-        bcc_ptr += strnlen(ses->userName, 300);
+	bcc_ptr += strnlen(ses->userName, 300);
 	*bcc_ptr = 0;
-        bcc_ptr++; /* account for null termination */
+	bcc_ptr++; /* account for null termination */
 
-        /* copy domain */
-	
-        if(ses->domainName != NULL) {
-                strncpy(bcc_ptr, ses->domainName, 256); 
+	/* copy domain */
+
+	if (ses->domainName != NULL) {
+		strncpy(bcc_ptr, ses->domainName, 256);
 		bcc_ptr += strnlen(ses->domainName, 256);
-	} /* else we will send a null domain name 
+	} /* else we will send a null domain name
 	     so the server will default to its own domain */
 	*bcc_ptr = 0;
 	bcc_ptr++;
@@ -167,19 +168,20 @@
 	strcpy(bcc_ptr, CIFS_NETWORK_OPSYS);
 	bcc_ptr += strlen(CIFS_NETWORK_OPSYS) + 1;
 
-        *pbcc_area = bcc_ptr;
+	*pbcc_area = bcc_ptr;
 }
 
-static int decode_unicode_ssetup(char ** pbcc_area, int bleft, struct cifsSesInfo *ses,
-                            const struct nls_table * nls_cp)
+static int decode_unicode_ssetup(char **pbcc_area, int bleft,
+				 struct cifsSesInfo *ses,
+				 const struct nls_table *nls_cp)
 {
 	int rc = 0;
 	int words_left, len;
-	char * data = *pbcc_area;
+	char *data = *pbcc_area;
 
 
 
-	cFYI(1,("bleft %d",bleft));
+	cFYI(1, ("bleft %d", bleft));
 
 
 	/* SMB header is unaligned, so cifs servers word align start of
@@ -189,7 +191,7 @@
 		    their final Unicode string - in which case we
 		    now will not attempt to decode the byte of junk
 		    which follows it */
-		    
+
 	words_left = bleft / 2;
 
 	/* save off server operating system */
@@ -198,14 +200,14 @@
 /* We look for obvious messed up bcc or strings in response so we do not go off
    the end since (at least) WIN2K and Windows XP have a major bug in not null
    terminating last Unicode string in response  */
-	if(len >= words_left)
+	if (len >= words_left)
 		return rc;
 
-	if(ses->serverOS)
+	if (ses->serverOS)
 		kfree(ses->serverOS);
 	/* UTF-8 string will not grow more than four times as big as UCS-16 */
 	ses->serverOS = kzalloc(4 * len, GFP_KERNEL);
-	if(ses->serverOS != NULL) {
+	if (ses->serverOS != NULL) {
 		cifs_strfromUCS_le(ses->serverOS, (__le16 *)data, len,
 				   nls_cp);
 	}
@@ -215,67 +217,68 @@
 	/* save off server network operating system */
 	len = UniStrnlen((wchar_t *) data, words_left);
 
-	if(len >= words_left)
+	if (len >= words_left)
 		return rc;
 
-	if(ses->serverNOS)
+	if (ses->serverNOS)
 		kfree(ses->serverNOS);
 	ses->serverNOS = kzalloc(4 * len, GFP_KERNEL); /* BB this is wrong length FIXME BB */
-	if(ses->serverNOS != NULL) {
+	if (ses->serverNOS != NULL) {
 		cifs_strfromUCS_le(ses->serverNOS, (__le16 *)data, len,
 				   nls_cp);
-		if(strncmp(ses->serverNOS, "NT LAN Manager 4",16) == 0) {
-			cFYI(1,("NT4 server"));
+		if (strncmp(ses->serverNOS, "NT LAN Manager 4", 16) == 0) {
+			cFYI(1, ("NT4 server"));
 			ses->flags |= CIFS_SES_NT4;
 		}
 	}
 	data += 2 * (len + 1);
 	words_left -= len + 1;
 
-        /* save off server domain */
-        len = UniStrnlen((wchar_t *) data, words_left);
+	/* save off server domain */
+	len = UniStrnlen((wchar_t *) data, words_left);
 
-        if(len > words_left)
-                return rc;
+	if (len > words_left)
+		return rc;
 
-        if(ses->serverDomain)
-                kfree(ses->serverDomain);
-        ses->serverDomain = kzalloc(2 * (len + 1), GFP_KERNEL); /* BB FIXME wrong length */
-        if(ses->serverDomain != NULL) {
-                cifs_strfromUCS_le(ses->serverDomain, (__le16 *)data, len,
-                                   nls_cp);
-                ses->serverDomain[2*len] = 0;
-                ses->serverDomain[(2*len) + 1] = 0;
-        }
-        data += 2 * (len + 1);
-        words_left -= len + 1;
-	
-	cFYI(1,("words left: %d",words_left));
+	if (ses->serverDomain)
+		kfree(ses->serverDomain);
+	ses->serverDomain = kzalloc(2 * (len + 1), GFP_KERNEL); /* BB FIXME wrong length */
+	if (ses->serverDomain != NULL) {
+		cifs_strfromUCS_le(ses->serverDomain, (__le16 *)data, len,
+				   nls_cp);
+		ses->serverDomain[2*len] = 0;
+		ses->serverDomain[(2*len) + 1] = 0;
+	}
+	data += 2 * (len + 1);
+	words_left -= len + 1;
+
+	cFYI(1, ("words left: %d", words_left));
 
 	return rc;
 }
 
-static int decode_ascii_ssetup(char ** pbcc_area, int bleft, struct cifsSesInfo *ses,
-                            const struct nls_table * nls_cp)
+static int decode_ascii_ssetup(char **pbcc_area, int bleft,
+			       struct cifsSesInfo *ses,
+			       const struct nls_table *nls_cp)
 {
 	int rc = 0;
 	int len;
-	char * bcc_ptr = *pbcc_area;
+	char *bcc_ptr = *pbcc_area;
 
-	cFYI(1,("decode sessetup ascii. bleft %d", bleft));
-	
+	cFYI(1, ("decode sessetup ascii. bleft %d", bleft));
+
 	len = strnlen(bcc_ptr, bleft);
-	if(len >= bleft)
+	if (len >= bleft)
 		return rc;
-	
-	if(ses->serverOS)
+
+	if (ses->serverOS)
 		kfree(ses->serverOS);
 
 	ses->serverOS = kzalloc(len + 1, GFP_KERNEL);
-	if(ses->serverOS)
+	if (ses->serverOS)
 		strncpy(ses->serverOS, bcc_ptr, len);
-	if(strncmp(ses->serverOS, "OS/2",4) == 0) {
-			cFYI(1,("OS/2 server"));
+	if (strncmp(ses->serverOS, "OS/2", 4) == 0) {
+			cFYI(1, ("OS/2 server"));
 			ses->flags |= CIFS_SES_OS2;
 	}
 
@@ -283,34 +286,34 @@
 	bleft -= len + 1;
 
 	len = strnlen(bcc_ptr, bleft);
-	if(len >= bleft)
+	if (len >= bleft)
 		return rc;
 
-	if(ses->serverNOS)
+	if (ses->serverNOS)
 		kfree(ses->serverNOS);
 
 	ses->serverNOS = kzalloc(len + 1, GFP_KERNEL);
-	if(ses->serverNOS)
+	if (ses->serverNOS)
 		strncpy(ses->serverNOS, bcc_ptr, len);
 
 	bcc_ptr += len + 1;
 	bleft -= len + 1;
 
-        len = strnlen(bcc_ptr, bleft);
-        if(len > bleft)
-                return rc;
+	len = strnlen(bcc_ptr, bleft);
+	if (len > bleft)
+		return rc;
 
 	/* No domain field in LANMAN case. Domain is
 	   returned by old servers in the SMB negprot response */
 	/* BB For newer servers which do not support Unicode,
 	   but thus do return domain here we could add parsing
 	   for it later, but it is not very important */
-	cFYI(1,("ascii: bytes left %d",bleft));
+	cFYI(1, ("ascii: bytes left %d", bleft));
 
 	return rc;
 }
 
-int 
+int
 CIFS_SessSetup(unsigned int xid, struct cifsSesInfo *ses, int first_time,
 		const struct nls_table *nls_cp)
 {
@@ -328,13 +331,13 @@
 	__u16 action;
 	int bytes_remaining;
 
-	if(ses == NULL)
+	if (ses == NULL)
 		return -EINVAL;
 
 	type = ses->server->secType;
 
-	cFYI(1,("sess setup type %d",type));
-	if(type == LANMAN) {
+	cFYI(1, ("sess setup type %d", type));
+	if (type == LANMAN) {
 #ifndef CONFIG_CIFS_WEAK_PW_HASH
 		/* LANMAN and plaintext are less secure and off by default.
 		So we make this explicitly be turned on in kconfig (in the
@@ -344,15 +347,15 @@
 		return -EOPNOTSUPP;
 #endif
 		wct = 10; /* lanman 2 style sessionsetup */
-	} else if((type == NTLM) || (type == NTLMv2)) { 
+	} else if ((type == NTLM) || (type == NTLMv2)) {
 		/* For NTLMv2 failures eventually may need to retry NTLM */
 		wct = 13; /* old style NTLM sessionsetup */
-	} else /* same size for negotiate or auth, NTLMSSP or extended security */
+	} else /* same size: negotiate or auth, NTLMSSP or extended security */
 		wct = 12;
 
 	rc = small_smb_init_no_tc(SMB_COM_SESSION_SETUP_ANDX, wct, ses,
 			    (void **)&smb_buf);
-	if(rc)
+	if (rc)
 		return rc;
 
 	pSMB = (SESSION_SETUP_ANDX *)smb_buf;
@@ -364,8 +367,8 @@
 	second part which will include the strings
 	and rest of bcc area, in order to avoid having
 	to do a large buffer 17K allocation */
-        iov[0].iov_base = (char *)pSMB;
-        iov[0].iov_len = smb_buf->smb_buf_length + 4;
+	iov[0].iov_base = (char *)pSMB;
+	iov[0].iov_len = smb_buf->smb_buf_length + 4;
 
 	/* 2000 big enough to fit max user, domain, NOS name etc. */
 	str_area = kmalloc(2000, GFP_KERNEL);
@@ -373,18 +376,18 @@
 
 	ses->flags &= ~CIFS_SES_LANMAN;
 
-	if(type == LANMAN) {
+	if (type == LANMAN) {
 #ifdef CONFIG_CIFS_WEAK_PW_HASH
 		char lnm_session_key[CIFS_SESS_KEY_SIZE];
 
 		/* no capabilities flags in old lanman negotiation */
 
-		pSMB->old_req.PasswordLength = cpu_to_le16(CIFS_SESS_KEY_SIZE); 
+		pSMB->old_req.PasswordLength = cpu_to_le16(CIFS_SESS_KEY_SIZE);
 		/* BB calculate hash with password */
 		/* and copy into bcc */
 
 		calc_lanman_hash(ses, lnm_session_key);
-		ses->flags |= CIFS_SES_LANMAN; 
+		ses->flags |= CIFS_SES_LANMAN;
 /* #ifdef CONFIG_CIFS_DEBUG2
 		cifs_dump_mem("cryptkey: ",ses->server->cryptKey,
 			CIFS_SESS_KEY_SIZE);
@@ -397,10 +400,10 @@
 		changed to do higher than lanman dialect and
 		we reconnected would we ever calc signing_key? */
 
-		cFYI(1,("Negotiating LANMAN setting up strings"));
+		cFYI(1, ("Negotiating LANMAN setting up strings"));
 		/* Unicode not allowed for LANMAN dialects */
 		ascii_ssetup_strings(&bcc_ptr, ses, nls_cp);
-#endif    
+#endif
 	} else if (type == NTLM) {
 		char ntlm_session_key[CIFS_SESS_KEY_SIZE];
 
@@ -409,38 +412,38 @@
 			cpu_to_le16(CIFS_SESS_KEY_SIZE);
 		pSMB->req_no_secext.CaseSensitivePasswordLength =
 			cpu_to_le16(CIFS_SESS_KEY_SIZE);
-	
+
 		/* calculate session key */
 		SMBNTencrypt(ses->password, ses->server->cryptKey,
 			     ntlm_session_key);
 
-		if(first_time) /* should this be moved into common code 
+		if (first_time) /* should this be moved into common code
 				  with similar ntlmv2 path? */
-			cifs_calculate_mac_key(ses->server->mac_signing_key,
+			cifs_calculate_mac_key(&ses->server->mac_signing_key,
 				ntlm_session_key, ses->password);
 		/* copy session key */
 
-		memcpy(bcc_ptr, (char *)ntlm_session_key,CIFS_SESS_KEY_SIZE);
+		memcpy(bcc_ptr, (char *)ntlm_session_key, CIFS_SESS_KEY_SIZE);
 		bcc_ptr += CIFS_SESS_KEY_SIZE;
-		memcpy(bcc_ptr, (char *)ntlm_session_key,CIFS_SESS_KEY_SIZE);
+		memcpy(bcc_ptr, (char *)ntlm_session_key, CIFS_SESS_KEY_SIZE);
 		bcc_ptr += CIFS_SESS_KEY_SIZE;
-		if(ses->capabilities & CAP_UNICODE) {
+		if (ses->capabilities & CAP_UNICODE) {
 			/* unicode strings must be word aligned */
 			if (iov[0].iov_len % 2) {
 				*bcc_ptr = 0;
-				bcc_ptr++;		
-			}	
+				bcc_ptr++;
+			}
 			unicode_ssetup_strings(&bcc_ptr, ses, nls_cp);
 		} else
 			ascii_ssetup_strings(&bcc_ptr, ses, nls_cp);
 	} else if (type == NTLMv2) {
-		char * v2_sess_key = 
+		char *v2_sess_key =
 			kmalloc(sizeof(struct ntlmv2_resp), GFP_KERNEL);
 
 		/* BB FIXME change all users of v2_sess_key to
 		   struct ntlmv2_resp */
 
-		if(v2_sess_key == NULL) {
+		if (v2_sess_key == NULL) {
 			cifs_small_buf_release(smb_buf);
 			return -ENOMEM;
 		}
@@ -456,8 +459,8 @@
 
 		/* calculate session key */
 		setup_ntlmv2_rsp(ses, v2_sess_key, nls_cp);
-		if(first_time) /* should this be moved into common code
-			          with similar ntlmv2 path? */
+		if (first_time) /* should this be moved into common code
+				   with similar ntlmv2 path? */
 		/*   cifs_calculate_ntlmv2_mac_key(ses->server->mac_signing_key,
 				response BB FIXME, v2_sess_key); */
 
@@ -465,11 +468,12 @@
 
 	/*	memcpy(bcc_ptr, (char *)ntlm_session_key,LM2_SESS_KEY_SIZE);
 		bcc_ptr += LM2_SESS_KEY_SIZE; */
-		memcpy(bcc_ptr, (char *)v2_sess_key, sizeof(struct ntlmv2_resp));
+		memcpy(bcc_ptr, (char *)v2_sess_key,
+		       sizeof(struct ntlmv2_resp));
 		bcc_ptr += sizeof(struct ntlmv2_resp);
 		kfree(v2_sess_key);
-		if(ses->capabilities & CAP_UNICODE) {
-			if(iov[0].iov_len % 2) {
+		if (ses->capabilities & CAP_UNICODE) {
+			if (iov[0].iov_len % 2) {
 				*bcc_ptr = 0;
 			}	bcc_ptr++;
 			unicode_ssetup_strings(&bcc_ptr, ses, nls_cp);
@@ -488,20 +492,20 @@
 	BCC_LE(smb_buf) = cpu_to_le16(count);
 
 	iov[1].iov_base = str_area;
-	iov[1].iov_len = count; 
+	iov[1].iov_len = count;
 	rc = SendReceive2(xid, ses, iov, 2 /* num_iovecs */, &resp_buf_type, 0);
 	/* SMB request buf freed in SendReceive2 */
 
-	cFYI(1,("ssetup rc from sendrecv2 is %d",rc));
-	if(rc)
+	cFYI(1, ("ssetup rc from sendrecv2 is %d", rc));
+	if (rc)
 		goto ssetup_exit;
 
 	pSMB = (SESSION_SETUP_ANDX *)iov[0].iov_base;
 	smb_buf = (struct smb_hdr *)iov[0].iov_base;
 
-	if((smb_buf->WordCount != 3) && (smb_buf->WordCount != 4)) {
+	if ((smb_buf->WordCount != 3) && (smb_buf->WordCount != 4)) {
 		rc = -EIO;
-		cERROR(1,("bad word count %d", smb_buf->WordCount));
+		cERROR(1, ("bad word count %d", smb_buf->WordCount));
 		goto ssetup_exit;
 	}
 	action = le16_to_cpu(pSMB->resp.Action);
@@ -514,31 +518,32 @@
 	bytes_remaining = BCC(smb_buf);
 	bcc_ptr = pByteArea(smb_buf);
 
-	if(smb_buf->WordCount == 4) {
+	if (smb_buf->WordCount == 4) {
 		__u16 blob_len;
 		blob_len = le16_to_cpu(pSMB->resp.SecurityBlobLength);
 		bcc_ptr += blob_len;
-		if(blob_len > bytes_remaining) {
-			cERROR(1,("bad security blob length %d", blob_len));
+		if (blob_len > bytes_remaining) {
+			cERROR(1, ("bad security blob length %d", blob_len));
 			rc = -EINVAL;
 			goto ssetup_exit;
 		}
 		bytes_remaining -= blob_len;
-	}	
+	}
 
 	/* BB check if Unicode and decode strings */
-	if(smb_buf->Flags2 & SMBFLG2_UNICODE)
+	if (smb_buf->Flags2 & SMBFLG2_UNICODE)
 		rc = decode_unicode_ssetup(&bcc_ptr, bytes_remaining,
 						   ses, nls_cp);
 	else
-		rc = decode_ascii_ssetup(&bcc_ptr, bytes_remaining, ses,nls_cp);
-	
+		rc = decode_ascii_ssetup(&bcc_ptr, bytes_remaining,
+					 ses, nls_cp);
+
 ssetup_exit:
 	kfree(str_area);
-	if(resp_buf_type == CIFS_SMALL_BUFFER) {
-		cFYI(1,("ssetup freeing small buf %p", iov[0].iov_base));
+	if (resp_buf_type == CIFS_SMALL_BUFFER) {
+		cFYI(1, ("ssetup freeing small buf %p", iov[0].iov_base));
 		cifs_small_buf_release(iov[0].iov_base);
-	} else if(resp_buf_type == CIFS_LARGE_BUFFER)
+	} else if (resp_buf_type == CIFS_LARGE_BUFFER)
 		cifs_buf_release(iov[0].iov_base);
 
 	return rc;
diff --git a/fs/cifs/smbdes.c b/fs/cifs/smbdes.c
index 1b1daf6..cfa6d21 100644
--- a/fs/cifs/smbdes.c
+++ b/fs/cifs/smbdes.c
@@ -1,32 +1,32 @@
-/* 
+/*
    Unix SMB/Netbios implementation.
    Version 1.9.
 
-   a partial implementation of DES designed for use in the 
+   a partial implementation of DES designed for use in the
    SMB authentication protocol
 
    Copyright (C) Andrew Tridgell 1998
    Modified by Steve French (sfrench@us.ibm.com) 2002,2004
-   
+
    This program is free software; you can redistribute it and/or modify
    it under the terms of the GNU General Public License as published by
    the Free Software Foundation; either version 2 of the License, or
    (at your option) any later version.
-   
+
    This program is distributed in the hope that it will be useful,
    but WITHOUT ANY WARRANTY; without even the implied warranty of
    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    GNU General Public License for more details.
-   
+
    You should have received a copy of the GNU General Public License
    along with this program; if not, write to the Free Software
    Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
 */
 
-/* NOTES: 
+/* NOTES:
 
    This code makes no attempt to be fast! In fact, it is a very
-   slow implementation 
+   slow implementation
 
    This code is NOT a complete DES implementation. It implements only
    the minimum necessary for SMB authentication, as used by all SMB
@@ -153,7 +153,7 @@
 };
 
 static void
-permute(char *out, char *in, uchar * p, int n)
+permute(char *out, char *in, uchar *p, int n)
 {
 	int i;
 	for (i = 0; i < n; i++)
@@ -202,18 +202,18 @@
 	char *rl;
 
 	/* Have to reduce stack usage */
-	pk1 = kmalloc(56+56+64+64,GFP_KERNEL);
-	if(pk1 == NULL)
+	pk1 = kmalloc(56+56+64+64, GFP_KERNEL);
+	if (pk1 == NULL)
 		return;
 
 	ki = kmalloc(16*48, GFP_KERNEL);
-	if(ki == NULL) {
+	if (ki == NULL) {
 		kfree(pk1);
 		return;
 	}
 
 	cd = pk1 + 56;
-	pd1= cd  + 56;
+	pd1 = cd  + 56;
 	rl = pd1 + 64;
 
 	permute(pk1, key, perm1, 56);
@@ -247,7 +247,7 @@
 		char *r2;  /* r2[32]  */
 
 		er = kmalloc(48+48+32+32+32, GFP_KERNEL);
-		if(er == NULL) {
+		if (er == NULL) {
 			kfree(pk1);
 			kfree(ki);
 			return;
@@ -327,8 +327,8 @@
 	char *keyb; /* keyb[64] */
 	unsigned char key2[8];
 
-	outb = kmalloc(64 * 3,GFP_KERNEL);
-	if(outb == NULL)
+	outb = kmalloc(64 * 3, GFP_KERNEL);
+	if (outb == NULL)
 		return;
 
 	inb  = outb + 64;
diff --git a/fs/cifs/smbencrypt.c b/fs/cifs/smbencrypt.c
index 4b25ba9..90542a3 100644
--- a/fs/cifs/smbencrypt.c
+++ b/fs/cifs/smbencrypt.c
@@ -1,4 +1,4 @@
-/* 
+/*
    Unix SMB/Netbios implementation.
    Version 1.9.
    SMB parameters and setup
@@ -7,17 +7,17 @@
    Modified by Jeremy Allison 1995.
    Copyright (C) Andrew Bartlett <abartlet@samba.org> 2002-2003
    Modified by Steve French (sfrench@us.ibm.com) 2002-2003
-   
+
    This program is free software; you can redistribute it and/or modify
    it under the terms of the GNU General Public License as published by
    the Free Software Foundation; either version 2 of the License, or
    (at your option) any later version.
-   
+
    This program is distributed in the hope that it will be useful,
    but WITHOUT ANY WARRANTY; without even the implied warranty of
    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    GNU General Public License for more details.
-   
+
    You should have received a copy of the GNU General Public License
    along with this program; if not, write to the Free Software
    Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
@@ -57,7 +57,7 @@
 
 /*
    This implements the X/Open SMB password encryption
-   It takes a password, a 8 byte "crypt key" and puts 24 bytes of 
+   It takes a password, a 8 byte "crypt key" and puts 24 bytes of
    encrypted password into p24 */
 /* Note that password must be uppercased and null terminated */
 void
@@ -73,9 +73,9 @@
 	E_P16(p14, p21);
 
 	SMBOWFencrypt(p21, c8, p24);
-	
-	memset(p14,0,15);
-	memset(p21,0,21);
+
+	memset(p14, 0, 15);
+	memset(p21, 0, 21);
 }
 
 /* Routines for Windows NT MD4 Hash functions. */
@@ -90,14 +90,14 @@
 
 /*
  * Convert a string into an NT UNICODE string.
- * Note that regardless of processor type 
+ * Note that regardless of processor type
  * this must be in intel (little-endian)
  * format.
  */
 
 static int
 _my_mbstowcs(__u16 * dst, const unsigned char *src, int len)
-{				/* not a very good conversion routine - change/fix */
+{	/* BB not a very good conversion routine - change/fix */
 	int i;
 	__u16 val;
 
@@ -112,7 +112,7 @@
 	return i;
 }
 
-/* 
+/*
  * Creates the MD4 Hash of the users password in NT UNICODE.
  */
 
@@ -123,7 +123,7 @@
 	__u16 wpwd[129];
 
 	/* Password cannot be longer than 128 characters */
-	if(passwd) {
+	if (passwd) {
 		len = strlen((char *) passwd);
 		if (len > 128) {
 			len = 128;
@@ -138,7 +138,7 @@
 	len = _my_wcslen(wpwd) * sizeof (__u16);
 
 	mdfour(p16, (unsigned char *) wpwd, len);
-	memset(wpwd,0,129 * 2);
+	memset(wpwd, 0, 129 * 2);
 }
 
 #if 0 /* currently unused */
@@ -178,17 +178,17 @@
 		const char *domain_n, unsigned char kr_buf[16],
 		const struct nls_table *nls_codepage)
 {
-	wchar_t * user_u;
-	wchar_t * dom_u;
+	wchar_t *user_u;
+	wchar_t *dom_u;
 	int user_l, domain_l;
 	struct HMACMD5Context ctx;
 
 	/* might as well do one alloc to hold both (user_u and dom_u) */
-	user_u = kmalloc(2048 * sizeof(wchar_t),GFP_KERNEL); 
-	if(user_u == NULL)
+	user_u = kmalloc(2048 * sizeof(wchar_t), GFP_KERNEL);
+	if (user_u == NULL)
 		return;
 	dom_u = user_u + 1024;
-    
+
 	/* push_ucs2(NULL, user_u, user_n, (user_l+1)*2, STR_UNICODE|STR_NOALIGN|STR_TERMINATE|STR_UPPER);
 	   push_ucs2(NULL, dom_u, domain_n, (domain_l+1)*2, STR_UNICODE|STR_NOALIGN|STR_TERMINATE|STR_UPPER); */
 
@@ -206,7 +206,7 @@
 
 	kfree(user_u);
 }
-#endif 
+#endif
 
 /* Does the des encryption from the NT or LM MD4 hash. */
 static void
@@ -256,15 +256,15 @@
 #if 0
 static void
 SMBOWFencrypt_ntv2(const unsigned char kr[16],
-                   const struct data_blob * srv_chal,
-                   const struct data_blob * cli_chal, unsigned char resp_buf[16])
+		   const struct data_blob *srv_chal,
+		   const struct data_blob *cli_chal, unsigned char resp_buf[16])
 {
-        struct HMACMD5Context ctx;
+	struct HMACMD5Context ctx;
 
-        hmac_md5_init_limK_to_64(kr, 16, &ctx);
-        hmac_md5_update(srv_chal->data, srv_chal->length, &ctx);
-        hmac_md5_update(cli_chal->data, cli_chal->length, &ctx);
-        hmac_md5_final(resp_buf, &ctx);
+	hmac_md5_init_limK_to_64(kr, 16, &ctx);
+	hmac_md5_update(srv_chal->data, srv_chal->length, &ctx);
+	hmac_md5_update(cli_chal->data, cli_chal->length, &ctx);
+	hmac_md5_final(resp_buf, &ctx);
 }
 
 static void
diff --git a/fs/cifs/smberr.h b/fs/cifs/smberr.h
index 212c3c2..2ef0be2 100644
--- a/fs/cifs/smberr.h
+++ b/fs/cifs/smberr.h
@@ -4,8 +4,8 @@
  *   Copyright (c) International Business Machines  Corp., 2002,2004
  *   Author(s): Steve French (sfrench@us.ibm.com)
  *
- *   See Error Codes section of the SNIA CIFS Specification 
- *   for more information 
+ *   See Error Codes section of the SNIA CIFS Specification
+ *   for more information
  *
  *   This library is free software; you can redistribute it and/or modify
  *   it under the terms of the GNU Lesser General Public License as published
@@ -19,7 +19,7 @@
  *
  *   You should have received a copy of the GNU Lesser General Public License
  *   along with this library; if not, write to the Free Software
- *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 
+ *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  */
 
 #define SUCCESS	0x00	/* The request was successful. */
@@ -110,7 +110,7 @@
 
 /* Below errors are used internally (do not come over the wire) for passthrough
    from STATUS codes to POSIX only  */
-#define ErrTooManyLinks         0xFFFE   
+#define ErrTooManyLinks         0xFFFE
 
 /* Following error codes may be generated with the ERRSRV error class.*/
 
diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c
index 5f46845..746bc94 100644
--- a/fs/cifs/transport.c
+++ b/fs/cifs/transport.c
@@ -1,10 +1,10 @@
 /*
  *   fs/cifs/transport.c
  *
- *   Copyright (C) International Business Machines  Corp., 2002,2005
+ *   Copyright (C) International Business Machines  Corp., 2002,2007
  *   Author(s): Steve French (sfrench@us.ibm.com)
  *   Jeremy Allison (jra@samba.org) 2006.
- *    
+ *
  *   This library is free software; you can redistribute it and/or modify
  *   it under the terms of the GNU Lesser General Public License as published
  *   by the Free Software Foundation; either version 2.1 of the License, or
@@ -17,7 +17,7 @@
  *
  *   You should have received a copy of the GNU Lesser General Public License
  *   along with this library; if not, write to the Free Software
- *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 
+ *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  */
 
 #include <linux/fs.h>
@@ -32,7 +32,7 @@
 #include "cifsglob.h"
 #include "cifsproto.h"
 #include "cifs_debug.h"
-  
+
 extern mempool_t *cifs_mid_poolp;
 extern struct kmem_cache *cifs_oplock_cachep;
 
@@ -49,7 +49,7 @@
 		cERROR(1, ("Null TCP session in AllocMidQEntry"));
 		return NULL;
 	}
-	
+
 	temp = (struct mid_q_entry *) mempool_alloc(cifs_mid_poolp,
 						    GFP_KERNEL | GFP_NOFS);
 	if (temp == NULL)
@@ -86,7 +86,7 @@
 	list_del(&midEntry->qhead);
 	atomic_dec(&midCount);
 	spin_unlock(&GlobalMid_Lock);
-	if(midEntry->largeBuf)
+	if (midEntry->largeBuf)
 		cifs_buf_release(midEntry->resp_buf);
 	else
 		cifs_small_buf_release(midEntry->resp_buf);
@@ -94,8 +94,8 @@
 	now = jiffies;
 	/* commands taking longer than one second are indications that
 	   something is wrong, unless it is quite a slow link or server */
-	if((now - midEntry->when_alloc) > HZ) {
-		if((cifsFYI & CIFS_TIMER) && 
+	if ((now - midEntry->when_alloc) > HZ) {
+		if ((cifsFYI & CIFS_TIMER) &&
 		   (midEntry->command != SMB_COM_LOCKING_ANDX)) {
 			printk(KERN_DEBUG " CIFS slow rsp: cmd %d mid %d",
 			       midEntry->command, midEntry->mid);
@@ -110,10 +110,10 @@
 }
 
 struct oplock_q_entry *
-AllocOplockQEntry(struct inode * pinode, __u16 fid, struct cifsTconInfo * tcon)
+AllocOplockQEntry(struct inode *pinode, __u16 fid, struct cifsTconInfo *tcon)
 {
 	struct oplock_q_entry *temp;
-	if ((pinode== NULL) || (tcon == NULL)) {
+	if ((pinode == NULL) || (tcon == NULL)) {
 		cERROR(1, ("Null parms passed to AllocOplockQEntry"));
 		return NULL;
 	}
@@ -133,9 +133,9 @@
 
 }
 
-void DeleteOplockQEntry(struct oplock_q_entry * oplockEntry)
+void DeleteOplockQEntry(struct oplock_q_entry *oplockEntry)
 {
-	spin_lock(&GlobalMid_Lock); 
+	spin_lock(&GlobalMid_Lock);
     /* should we check if list empty first? */
 	list_del(&oplockEntry->qhead);
 	spin_unlock(&GlobalMid_Lock);
@@ -152,7 +152,7 @@
 	struct kvec iov;
 	unsigned len = smb_buf_length + 4;
 
-	if(ssocket == NULL)
+	if (ssocket == NULL)
 		return -ENOTSOCK; /* BB eventually add reconnect code here */
 	iov.iov_base = smb_buffer;
 	iov.iov_len = len;
@@ -164,8 +164,8 @@
 	smb_msg.msg_flags = MSG_DONTWAIT + MSG_NOSIGNAL; /* BB add more flags?*/
 
 	/* smb header is converted in header_assemble. bcc and rest of SMB word
-	   area, and byte area if necessary, is converted to littleendian in 
-	   cifssmb.c and RFC1001 len is converted to bigendian in smb_send 
+	   area, and byte area if necessary, is converted to littleendian in
+	   cifssmb.c and RFC1001 len is converted to bigendian in smb_send
 	   Flags2 is converted in SendReceive */
 
 	smb_buffer->smb_buf_length = cpu_to_be32(smb_buffer->smb_buf_length);
@@ -177,9 +177,9 @@
 		if ((rc == -ENOSPC) || (rc == -EAGAIN)) {
 			i++;
 		/* smaller timeout here than send2 since smaller size */
-		/* Although it may not be required, this also is smaller 
-		   oplock break time */  
-			if(i > 12) {
+		/* Although it may not be required, this also is smaller
+		   oplock break time */
+			if (i > 12) {
 				cERROR(1,
 				   ("sends on sock %p stuck for 7 seconds",
 				    ssocket));
@@ -189,7 +189,7 @@
 			msleep(1 << i);
 			continue;
 		}
-		if (rc < 0) 
+		if (rc < 0)
 			break;
 		else
 			i = 0; /* reset i after each successful send */
@@ -199,7 +199,7 @@
 	}
 
 	if (rc < 0) {
-		cERROR(1,("Error %d sending data on socket to server", rc));
+		cERROR(1, ("Error %d sending data on socket to server", rc));
 	} else {
 		rc = 0;
 	}
@@ -223,8 +223,8 @@
 	unsigned int total_len;
 	int first_vec = 0;
 	unsigned int smb_buf_length = smb_buffer->smb_buf_length;
-	
-	if(ssocket == NULL)
+
+	if (ssocket == NULL)
 		return -ENOTSOCK; /* BB eventually add reconnect code here */
 
 	smb_msg.msg_name = sin;
@@ -234,8 +234,8 @@
 	smb_msg.msg_flags = MSG_DONTWAIT + MSG_NOSIGNAL; /* BB add more flags?*/
 
 	/* smb header is converted in header_assemble. bcc and rest of SMB word
-	   area, and byte area if necessary, is converted to littleendian in 
-	   cifssmb.c and RFC1001 len is converted to bigendian in smb_send 
+	   area, and byte area if necessary, is converted to littleendian in
+	   cifssmb.c and RFC1001 len is converted to bigendian in smb_send
 	   Flags2 is converted in SendReceive */
 
 
@@ -252,7 +252,7 @@
 				    n_vec - first_vec, total_len);
 		if ((rc == -ENOSPC) || (rc == -EAGAIN)) {
 			i++;
-			if(i >= 14) {
+			if (i >= 14) {
 				cERROR(1,
 				   ("sends on sock %p stuck for 15 seconds",
 				    ssocket));
@@ -262,17 +262,17 @@
 			msleep(1 << i);
 			continue;
 		}
-		if (rc < 0) 
+		if (rc < 0)
 			break;
 
 		if (rc >= total_len) {
 			WARN_ON(rc > total_len);
 			break;
 		}
-		if(rc == 0) {
+		if (rc == 0) {
 			/* should never happen, letting socket clear before
 			   retrying is our only obvious option here */
-			cERROR(1,("tcp sent no data"));
+			cERROR(1, ("tcp sent no data"));
 			msleep(500);
 			continue;
 		}
@@ -295,7 +295,7 @@
 	}
 
 	if (rc < 0) {
-		cERROR(1,("Error %d sending data on socket to server", rc));
+		cERROR(1, ("Error %d sending data on socket to server", rc));
 	} else
 		rc = 0;
 
@@ -308,13 +308,13 @@
 
 static int wait_for_free_request(struct cifsSesInfo *ses, const int long_op)
 {
-	if(long_op == -1) {
+	if (long_op == -1) {
 		/* oplock breaks must not be held up */
 		atomic_inc(&ses->server->inFlight);
 	} else {
-		spin_lock(&GlobalMid_Lock); 
-		while(1) {        
-			if(atomic_read(&ses->server->inFlight) >= 
+		spin_lock(&GlobalMid_Lock);
+		while (1) {
+			if (atomic_read(&ses->server->inFlight) >=
 					cifs_max_pending){
 				spin_unlock(&GlobalMid_Lock);
 #ifdef CONFIG_CIFS_STATS2
@@ -328,14 +328,14 @@
 #endif
 				spin_lock(&GlobalMid_Lock);
 			} else {
-				if(ses->server->tcpStatus == CifsExiting) {
+				if (ses->server->tcpStatus == CifsExiting) {
 					spin_unlock(&GlobalMid_Lock);
 					return -ENOENT;
 				}
 
-				/* can not count locking commands against total since
-				   they are allowed to block on server */
-					
+				/* can not count locking commands against total
+				   as they are allowed to block on server */
+
 				/* update # of requests on the wire to server */
 				if (long_op < 3)
 					atomic_inc(&ses->server->inFlight);
@@ -353,11 +353,11 @@
 	if (ses->server->tcpStatus == CifsExiting) {
 		return -ENOENT;
 	} else if (ses->server->tcpStatus == CifsNeedReconnect) {
-		cFYI(1,("tcp session dead - return to caller to retry"));
+		cFYI(1, ("tcp session dead - return to caller to retry"));
 		return -EAGAIN;
 	} else if (ses->status != CifsGood) {
 		/* check if SMB session is bad because we are setting it up */
-		if((in_buf->Command != SMB_COM_SESSION_SETUP_ANDX) && 
+		if ((in_buf->Command != SMB_COM_SESSION_SETUP_ANDX) &&
 			(in_buf->Command != SMB_COM_NEGOTIATE)) {
 			return -EAGAIN;
 		} /* else ok - we are setting up session */
@@ -369,7 +369,7 @@
 	return 0;
 }
 
-static int wait_for_response(struct cifsSesInfo *ses, 
+static int wait_for_response(struct cifsSesInfo *ses,
 			struct mid_q_entry *midQ,
 			unsigned long timeout,
 			unsigned long time_to_wait)
@@ -379,8 +379,8 @@
 	for (;;) {
 		curr_timeout = timeout + jiffies;
 		wait_event(ses->server->response_q,
-			(!(midQ->midState == MID_REQUEST_SUBMITTED)) || 
-			time_after(jiffies, curr_timeout) || 
+			(!(midQ->midState == MID_REQUEST_SUBMITTED)) ||
+			time_after(jiffies, curr_timeout) ||
 			((ses->server->tcpStatus != CifsGood) &&
 			 (ses->server->tcpStatus != CifsNew)));
 
@@ -398,16 +398,16 @@
 			spin_unlock(&GlobalMid_Lock);
 
 			/* Calculate time_to_wait past last receive time.
-			 Although we prefer not to time out if the 
+			 Although we prefer not to time out if the
 			 server is still responding - we will time
-			 out if the server takes more than 15 (or 45 
+			 out if the server takes more than 15 (or 45
 			 or 180) seconds to respond to this request
-			 and has not responded to any request from 
+			 and has not responded to any request from
 			 other threads on the client within 10 seconds */
 			lrt += time_to_wait;
 			if (time_after(jiffies, lrt)) {
 				/* No replies for time_to_wait. */
-				cERROR(1,("server not responding"));
+				cERROR(1, ("server not responding"));
 				return -1;
 			}
 		} else {
@@ -417,8 +417,8 @@
 }
 
 int
-SendReceive2(const unsigned int xid, struct cifsSesInfo *ses, 
-	     struct kvec *iov, int n_vec, int * pRespBufType /* ret */, 
+SendReceive2(const unsigned int xid, struct cifsSesInfo *ses,
+	     struct kvec *iov, int n_vec, int *pRespBufType /* ret */,
 	     const int long_op)
 {
 	int rc = 0;
@@ -426,21 +426,21 @@
 	unsigned long timeout;
 	struct mid_q_entry *midQ;
 	struct smb_hdr *in_buf = iov[0].iov_base;
-	
+
 	*pRespBufType = CIFS_NO_BUFFER;  /* no response buf yet */
 
 	if ((ses == NULL) || (ses->server == NULL)) {
 		cifs_small_buf_release(in_buf);
-		cERROR(1,("Null session"));
+		cERROR(1, ("Null session"));
 		return -EIO;
 	}
 
-	if(ses->server->tcpStatus == CifsExiting) {
+	if (ses->server->tcpStatus == CifsExiting) {
 		cifs_small_buf_release(in_buf);
 		return -ENOENT;
 	}
 
-	/* Ensure that we do not send more than 50 overlapping requests 
+	/* Ensure that we do not send more than 50 overlapping requests
 	   to the same server. We may make this configurable later or
 	   use ses->maxReq */
 
@@ -450,23 +450,23 @@
 		return rc;
 	}
 
-	/* make sure that we sign in the same order that we send on this socket 
+	/* make sure that we sign in the same order that we send on this socket
 	   and avoid races inside tcp sendmsg code that could cause corruption
 	   of smb data */
 
-	down(&ses->server->tcpSem); 
+	down(&ses->server->tcpSem);
 
 	rc = allocate_mid(ses, in_buf, &midQ);
 	if (rc) {
 		up(&ses->server->tcpSem);
 		cifs_small_buf_release(in_buf);
 		/* Update # of requests on wire to server */
-		atomic_dec(&ses->server->inFlight); 
+		atomic_dec(&ses->server->inFlight);
 		wake_up(&ses->server->request_q);
 		return rc;
 	}
 
- 	rc = cifs_sign_smb2(iov, n_vec, ses->server, &midQ->sequence_number);
+	rc = cifs_sign_smb2(iov, n_vec, ses->server, &midQ->sequence_number);
 
 	midQ->midState = MID_REQUEST_SUBMITTED;
 #ifdef CONFIG_CIFS_STATS2
@@ -482,7 +482,7 @@
 	up(&ses->server->tcpSem);
 	cifs_small_buf_release(in_buf);
 
-	if(rc < 0)
+	if (rc < 0)
 		goto out;
 
 	if (long_op == -1)
@@ -490,18 +490,18 @@
 	else if (long_op == 2) /* writes past end of file can take loong time */
 		timeout = 180 * HZ;
 	else if (long_op == 1)
-		timeout = 45 * HZ; /* should be greater than 
+		timeout = 45 * HZ; /* should be greater than
 			servers oplock break timeout (about 43 seconds) */
 	else
 		timeout = 15 * HZ;
 
-	/* wait for 15 seconds or until woken up due to response arriving or 
+	/* wait for 15 seconds or until woken up due to response arriving or
 	   due to last connection to this server being unmounted */
 	if (signal_pending(current)) {
 		/* if signal pending do not hold up user for full smb timeout
 		but we still give response a chance to complete */
 		timeout = 2 * HZ;
-	}   
+	}
 
 	/* No user interrupts in wait - wreaks havoc with performance */
 	wait_for_response(ses, midQ, timeout, 10 * HZ);
@@ -511,10 +511,10 @@
 		spin_unlock(&GlobalMid_Lock);
 		receive_len = midQ->resp_buf->smb_buf_length;
 	} else {
-		cERROR(1,("No response to cmd %d mid %d",
+		cERROR(1, ("No response to cmd %d mid %d",
 			midQ->command, midQ->mid));
-		if(midQ->midState == MID_REQUEST_SUBMITTED) {
-			if(ses->server->tcpStatus == CifsExiting)
+		if (midQ->midState == MID_REQUEST_SUBMITTED) {
+			if (ses->server->tcpStatus == CifsExiting)
 				rc = -EHOSTDOWN;
 			else {
 				ses->server->tcpStatus = CifsNeedReconnect;
@@ -523,9 +523,9 @@
 		}
 
 		if (rc != -EHOSTDOWN) {
-			if(midQ->midState == MID_RETRY_NEEDED) {
+			if (midQ->midState == MID_RETRY_NEEDED) {
 				rc = -EAGAIN;
-				cFYI(1,("marking request for retry"));
+				cFYI(1, ("marking request for retry"));
 			} else {
 				rc = -EIO;
 			}
@@ -533,21 +533,21 @@
 		spin_unlock(&GlobalMid_Lock);
 		DeleteMidQEntry(midQ);
 		/* Update # of requests on wire to server */
-		atomic_dec(&ses->server->inFlight); 
+		atomic_dec(&ses->server->inFlight);
 		wake_up(&ses->server->request_q);
 		return rc;
 	}
-  
+
 	if (receive_len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE) {
 		cERROR(1, ("Frame too large received.  Length: %d  Xid: %d",
 			receive_len, xid));
 		rc = -EIO;
 	} else {		/* rcvd frame is ok */
-		if (midQ->resp_buf && 
+		if (midQ->resp_buf &&
 			(midQ->midState == MID_RESPONSE_RECEIVED)) {
 
 			iov[0].iov_base = (char *)midQ->resp_buf;
-			if(midQ->largeBuf)
+			if (midQ->largeBuf)
 				*pRespBufType = CIFS_LARGE_BUFFER;
 			else
 				*pRespBufType = CIFS_SMALL_BUFFER;
@@ -555,14 +555,14 @@
 
 			dump_smb(midQ->resp_buf, 80);
 			/* convert the length into a more usable form */
-			if((receive_len > 24) &&
+			if ((receive_len > 24) &&
 			   (ses->server->secMode & (SECMODE_SIGN_REQUIRED |
 					SECMODE_SIGN_ENABLED))) {
 				rc = cifs_verify_signature(midQ->resp_buf,
-						ses->server->mac_signing_key,
+						&ses->server->mac_signing_key,
 						midQ->sequence_number+1);
-				if(rc) {
-					cERROR(1,("Unexpected SMB signature"));
+				if (rc) {
+					cERROR(1, ("Unexpected SMB signature"));
 					/* BB FIXME add code to kill session */
 				}
 			}
@@ -576,19 +576,19 @@
 			    sizeof (struct smb_hdr) -
 			    4 /* do not count RFC1001 header */  +
 			    (2 * midQ->resp_buf->WordCount) + 2 /* bcc */ )
-				BCC(midQ->resp_buf) = 
+				BCC(midQ->resp_buf) =
 					le16_to_cpu(BCC_LE(midQ->resp_buf));
 			midQ->resp_buf = NULL;  /* mark it so will not be freed
 						by DeleteMidQEntry */
 		} else {
 			rc = -EIO;
-			cFYI(1,("Bad MID state?"));
+			cFYI(1, ("Bad MID state?"));
 		}
 	}
 
 out:
 	DeleteMidQEntry(midQ);
-	atomic_dec(&ses->server->inFlight); 
+	atomic_dec(&ses->server->inFlight);
 	wake_up(&ses->server->request_q);
 
 	return rc;
@@ -605,18 +605,18 @@
 	struct mid_q_entry *midQ;
 
 	if (ses == NULL) {
-		cERROR(1,("Null smb session"));
+		cERROR(1, ("Null smb session"));
 		return -EIO;
 	}
-	if(ses->server == NULL) {
-		cERROR(1,("Null tcp session"));
+	if (ses->server == NULL) {
+		cERROR(1, ("Null tcp session"));
 		return -EIO;
 	}
 
-	if(ses->server->tcpStatus == CifsExiting)
+	if (ses->server->tcpStatus == CifsExiting)
 		return -ENOENT;
 
-	/* Ensure that we do not send more than 50 overlapping requests 
+	/* Ensure that we do not send more than 50 overlapping requests
 	   to the same server. We may make this configurable later or
 	   use ses->maxReq */
 
@@ -624,17 +624,17 @@
 	if (rc)
 		return rc;
 
-	/* make sure that we sign in the same order that we send on this socket 
+	/* make sure that we sign in the same order that we send on this socket
 	   and avoid races inside tcp sendmsg code that could cause corruption
 	   of smb data */
 
-	down(&ses->server->tcpSem); 
+	down(&ses->server->tcpSem);
 
 	rc = allocate_mid(ses, in_buf, &midQ);
 	if (rc) {
 		up(&ses->server->tcpSem);
 		/* Update # of requests on wire to server */
-		atomic_dec(&ses->server->inFlight); 
+		atomic_dec(&ses->server->inFlight);
 		wake_up(&ses->server->request_q);
 		return rc;
 	}
@@ -645,7 +645,7 @@
 		DeleteMidQEntry(midQ);
 		up(&ses->server->tcpSem);
 		/* Update # of requests on wire to server */
-		atomic_dec(&ses->server->inFlight); 
+		atomic_dec(&ses->server->inFlight);
 		wake_up(&ses->server->request_q);
 		return -EIO;
 	}
@@ -664,7 +664,7 @@
 #endif
 	up(&ses->server->tcpSem);
 
-	if(rc < 0)
+	if (rc < 0)
 		goto out;
 
 	if (long_op == -1)
@@ -672,17 +672,17 @@
 	else if (long_op == 2) /* writes past end of file can take loong time */
 		timeout = 180 * HZ;
 	else if (long_op == 1)
-		timeout = 45 * HZ; /* should be greater than 
+		timeout = 45 * HZ; /* should be greater than
 			servers oplock break timeout (about 43 seconds) */
 	else
 		timeout = 15 * HZ;
-	/* wait for 15 seconds or until woken up due to response arriving or 
+	/* wait for 15 seconds or until woken up due to response arriving or
 	   due to last connection to this server being unmounted */
 	if (signal_pending(current)) {
 		/* if signal pending do not hold up user for full smb timeout
 		but we still give response a chance to complete */
 		timeout = 2 * HZ;
-	}   
+	}
 
 	/* No user interrupts in wait - wreaks havoc with performance */
 	wait_for_response(ses, midQ, timeout, 10 * HZ);
@@ -692,10 +692,10 @@
 		spin_unlock(&GlobalMid_Lock);
 		receive_len = midQ->resp_buf->smb_buf_length;
 	} else {
-		cERROR(1,("No response for cmd %d mid %d",
+		cERROR(1, ("No response for cmd %d mid %d",
 			  midQ->command, midQ->mid));
-		if(midQ->midState == MID_REQUEST_SUBMITTED) {
-			if(ses->server->tcpStatus == CifsExiting)
+		if (midQ->midState == MID_REQUEST_SUBMITTED) {
+			if (ses->server->tcpStatus == CifsExiting)
 				rc = -EHOSTDOWN;
 			else {
 				ses->server->tcpStatus = CifsNeedReconnect;
@@ -704,9 +704,9 @@
 		}
 
 		if (rc != -EHOSTDOWN) {
-			if(midQ->midState == MID_RETRY_NEEDED) {
+			if (midQ->midState == MID_RETRY_NEEDED) {
 				rc = -EAGAIN;
-				cFYI(1,("marking request for retry"));
+				cFYI(1, ("marking request for retry"));
 			} else {
 				rc = -EIO;
 			}
@@ -714,11 +714,11 @@
 		spin_unlock(&GlobalMid_Lock);
 		DeleteMidQEntry(midQ);
 		/* Update # of requests on wire to server */
-		atomic_dec(&ses->server->inFlight); 
+		atomic_dec(&ses->server->inFlight);
 		wake_up(&ses->server->request_q);
 		return rc;
 	}
-  
+
 	if (receive_len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE) {
 		cERROR(1, ("Frame too large received.  Length: %d  Xid: %d",
 			receive_len, xid));
@@ -734,14 +734,14 @@
 
 			dump_smb(out_buf, 92);
 			/* convert the length into a more usable form */
-			if((receive_len > 24) &&
+			if ((receive_len > 24) &&
 			   (ses->server->secMode & (SECMODE_SIGN_REQUIRED |
 					SECMODE_SIGN_ENABLED))) {
 				rc = cifs_verify_signature(out_buf,
-						ses->server->mac_signing_key,
+						&ses->server->mac_signing_key,
 						midQ->sequence_number+1);
-				if(rc) {
-					cERROR(1,("Unexpected SMB signature"));
+				if (rc) {
+					cERROR(1, ("Unexpected SMB signature"));
 					/* BB FIXME add code to kill session */
 				}
 			}
@@ -759,13 +759,13 @@
 				BCC(out_buf) = le16_to_cpu(BCC_LE(out_buf));
 		} else {
 			rc = -EIO;
-			cERROR(1,("Bad MID state?"));
+			cERROR(1, ("Bad MID state?"));
 		}
 	}
 
 out:
 	DeleteMidQEntry(midQ);
-	atomic_dec(&ses->server->inFlight); 
+	atomic_dec(&ses->server->inFlight);
 	wake_up(&ses->server->request_q);
 
 	return rc;
@@ -783,7 +783,7 @@
 
 	header_assemble(in_buf, SMB_COM_NT_CANCEL, tcon, 0);
 	in_buf->Mid = mid;
-	down(&ses->server->tcpSem); 
+	down(&ses->server->tcpSem);
 	rc = cifs_sign_smb(in_buf, ses->server, &midQ->sequence_number);
 	if (rc) {
 		up(&ses->server->tcpSem);
@@ -832,20 +832,20 @@
 	struct cifsSesInfo *ses;
 
 	if (tcon == NULL || tcon->ses == NULL) {
-		cERROR(1,("Null smb session"));
+		cERROR(1, ("Null smb session"));
 		return -EIO;
 	}
 	ses = tcon->ses;
 
-	if(ses->server == NULL) {
-		cERROR(1,("Null tcp session"));
+	if (ses->server == NULL) {
+		cERROR(1, ("Null tcp session"));
 		return -EIO;
 	}
 
-	if(ses->server->tcpStatus == CifsExiting)
+	if (ses->server->tcpStatus == CifsExiting)
 		return -ENOENT;
 
-	/* Ensure that we do not send more than 50 overlapping requests 
+	/* Ensure that we do not send more than 50 overlapping requests
 	   to the same server. We may make this configurable later or
 	   use ses->maxReq */
 
@@ -853,11 +853,11 @@
 	if (rc)
 		return rc;
 
-	/* make sure that we sign in the same order that we send on this socket 
+	/* make sure that we sign in the same order that we send on this socket
 	   and avoid races inside tcp sendmsg code that could cause corruption
 	   of smb data */
 
-	down(&ses->server->tcpSem); 
+	down(&ses->server->tcpSem);
 
 	rc = allocate_mid(ses, in_buf, &midQ);
 	if (rc) {
@@ -887,14 +887,14 @@
 #endif
 	up(&ses->server->tcpSem);
 
-	if(rc < 0) {
+	if (rc < 0) {
 		DeleteMidQEntry(midQ);
 		return rc;
 	}
 
 	/* Wait for a reply - allow signals to interrupt. */
 	rc = wait_event_interruptible(ses->server->response_q,
-		(!(midQ->midState == MID_REQUEST_SUBMITTED)) || 
+		(!(midQ->midState == MID_REQUEST_SUBMITTED)) ||
 		((ses->server->tcpStatus != CifsGood) &&
 		 (ses->server->tcpStatus != CifsNew)));
 
@@ -928,7 +928,7 @@
 		}
 
 		/* Wait 5 seconds for the response. */
-		if (wait_for_response(ses, midQ, 5 * HZ, 5 * HZ)==0) {
+		if (wait_for_response(ses, midQ, 5 * HZ, 5 * HZ) == 0) {
 			/* We got the response - restart system call. */
 			rstart = 1;
 		}
@@ -939,10 +939,10 @@
 		spin_unlock(&GlobalMid_Lock);
 		receive_len = midQ->resp_buf->smb_buf_length;
 	} else {
-		cERROR(1,("No response for cmd %d mid %d",
+		cERROR(1, ("No response for cmd %d mid %d",
 			  midQ->command, midQ->mid));
-		if(midQ->midState == MID_REQUEST_SUBMITTED) {
-			if(ses->server->tcpStatus == CifsExiting)
+		if (midQ->midState == MID_REQUEST_SUBMITTED) {
+			if (ses->server->tcpStatus == CifsExiting)
 				rc = -EHOSTDOWN;
 			else {
 				ses->server->tcpStatus = CifsNeedReconnect;
@@ -951,9 +951,9 @@
 		}
 
 		if (rc != -EHOSTDOWN) {
-			if(midQ->midState == MID_RETRY_NEEDED) {
+			if (midQ->midState == MID_RETRY_NEEDED) {
 				rc = -EAGAIN;
-				cFYI(1,("marking request for retry"));
+				cFYI(1, ("marking request for retry"));
 			} else {
 				rc = -EIO;
 			}
@@ -962,7 +962,7 @@
 		DeleteMidQEntry(midQ);
 		return rc;
 	}
-  
+
 	if (receive_len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE) {
 		cERROR(1, ("Frame too large received.  Length: %d  Xid: %d",
 			receive_len, xid));
@@ -978,14 +978,14 @@
 
 			dump_smb(out_buf, 92);
 			/* convert the length into a more usable form */
-			if((receive_len > 24) &&
+			if ((receive_len > 24) &&
 			   (ses->server->secMode & (SECMODE_SIGN_REQUIRED |
 					SECMODE_SIGN_ENABLED))) {
 				rc = cifs_verify_signature(out_buf,
-						ses->server->mac_signing_key,
+						&ses->server->mac_signing_key,
 						midQ->sequence_number+1);
-				if(rc) {
-					cERROR(1,("Unexpected SMB signature"));
+				if (rc) {
+					cERROR(1, ("Unexpected SMB signature"));
 					/* BB FIXME add code to kill session */
 				}
 			}
@@ -1003,7 +1003,7 @@
 				BCC(out_buf) = le16_to_cpu(BCC_LE(out_buf));
 		} else {
 			rc = -EIO;
-			cERROR(1,("Bad MID state?"));
+			cERROR(1, ("Bad MID state?"));
 		}
 	}
 	DeleteMidQEntry(midQ);
diff --git a/fs/cifs/xattr.c b/fs/cifs/xattr.c
index 18fcec1..f61e433 100644
--- a/fs/cifs/xattr.c
+++ b/fs/cifs/xattr.c
@@ -1,7 +1,7 @@
 /*
  *   fs/cifs/xattr.c
  *
- *   Copyright (c) International Business Machines  Corp., 2003
+ *   Copyright (c) International Business Machines  Corp., 2003, 2007
  *   Author(s): Steve French (sfrench@us.ibm.com)
  *
  *   This library is free software; you can redistribute it and/or modify
@@ -37,50 +37,52 @@
 #define XATTR_TRUSTED_PREFIX_LEN  8
 #define XATTR_SECURITY_PREFIX_LEN 9
 /* BB need to add server (Samba e.g) support for security and trusted prefix */
-  
 
 
-int cifs_removexattr(struct dentry * direntry, const char * ea_name)
+
+int cifs_removexattr(struct dentry *direntry, const char *ea_name)
 {
 	int rc = -EOPNOTSUPP;
 #ifdef CONFIG_CIFS_XATTR
 	int xid;
 	struct cifs_sb_info *cifs_sb;
 	struct cifsTconInfo *pTcon;
-	struct super_block * sb;
-	char * full_path;
-                                                                                     
-	if(direntry == NULL)
+	struct super_block *sb;
+	char *full_path;
+
+	if (direntry == NULL)
 		return -EIO;
-	if(direntry->d_inode == NULL)
+	if (direntry->d_inode == NULL)
 		return -EIO;
 	sb = direntry->d_inode->i_sb;
-	if(sb == NULL)
+	if (sb == NULL)
 		return -EIO;
 	xid = GetXid();
-                                                                                     
+
 	cifs_sb = CIFS_SB(sb);
 	pTcon = cifs_sb->tcon;
-                                                                                     
+
 	full_path = build_path_from_dentry(direntry);
-	if(full_path == NULL) {
+	if (full_path == NULL) {
 		FreeXid(xid);
 		return -ENOMEM;
 	}
-	if(ea_name == NULL) {
-		cFYI(1,("Null xattr names not supported"));
-	} else if(strncmp(ea_name,CIFS_XATTR_USER_PREFIX,5)
-		&& (strncmp(ea_name,CIFS_XATTR_OS2_PREFIX,4))) {
-		cFYI(1,("illegal xattr namespace %s (only user namespace supported)",ea_name));
+	if (ea_name == NULL) {
+		cFYI(1, ("Null xattr names not supported"));
+	} else if (strncmp(ea_name, CIFS_XATTR_USER_PREFIX, 5)
+		&& (strncmp(ea_name, CIFS_XATTR_OS2_PREFIX, 4))) {
+		cFYI(1,
+		    ("illegal xattr request %s (only user namespace supported)",
+			ea_name));
 		/* BB what if no namespace prefix? */
 		/* Should we just pass them to server, except for
 		system and perhaps security prefixes? */
 	} else {
-		if(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR)
+		if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR)
 			goto remove_ea_exit;
 
-		ea_name+=5; /* skip past user. prefix */
-		rc = CIFSSMBSetEA(xid,pTcon,full_path,ea_name,NULL,
+		ea_name += 5; /* skip past user. prefix */
+		rc = CIFSSMBSetEA(xid, pTcon, full_path, ea_name, NULL,
 			(__u16)0, cifs_sb->local_nls,
 			cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR);
 	}
@@ -91,23 +93,23 @@
 	return rc;
 }
 
-int cifs_setxattr(struct dentry * direntry, const char * ea_name,
-        const void * ea_value, size_t value_size, int flags)
+int cifs_setxattr(struct dentry *direntry, const char *ea_name,
+		  const void *ea_value, size_t value_size, int flags)
 {
 	int rc = -EOPNOTSUPP;
 #ifdef CONFIG_CIFS_XATTR
 	int xid;
 	struct cifs_sb_info *cifs_sb;
 	struct cifsTconInfo *pTcon;
-	struct super_block * sb;
-	char * full_path;
+	struct super_block *sb;
+	char *full_path;
 
-	if(direntry == NULL)
+	if (direntry == NULL)
 		return -EIO;
-	if(direntry->d_inode == NULL)
+	if (direntry->d_inode == NULL)
 		return -EIO;
 	sb = direntry->d_inode->i_sb;
-	if(sb == NULL)
+	if (sb == NULL)
 		return -EIO;
 	xid = GetXid();
 
@@ -115,7 +117,7 @@
 	pTcon = cifs_sb->tcon;
 
 	full_path = build_path_from_dentry(direntry);
-	if(full_path == NULL) {
+	if (full_path == NULL) {
 		FreeXid(xid);
 		return -ENOMEM;
 	}
@@ -123,67 +125,69 @@
 	/* return alt name if available as pseudo attr */
 
 	/* if proc/fs/cifs/streamstoxattr is set then
-		search server for EAs or streams to 
+		search server for EAs or streams to
 		returns as xattrs */
-	if(value_size > MAX_EA_VALUE_SIZE) {
-		cFYI(1,("size of EA value too large"));
+	if (value_size > MAX_EA_VALUE_SIZE) {
+		cFYI(1, ("size of EA value too large"));
 		kfree(full_path);
 		FreeXid(xid);
 		return -EOPNOTSUPP;
 	}
 
-	if(ea_name == NULL) {
-		cFYI(1,("Null xattr names not supported"));
-	} else if(strncmp(ea_name,CIFS_XATTR_USER_PREFIX,5) == 0) {
-		if(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR)
+	if (ea_name == NULL) {
+		cFYI(1, ("Null xattr names not supported"));
+	} else if (strncmp(ea_name, CIFS_XATTR_USER_PREFIX, 5) == 0) {
+		if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR)
 			goto set_ea_exit;
-		if(strncmp(ea_name,CIFS_XATTR_DOS_ATTRIB,14) == 0) {
-			cFYI(1,("attempt to set cifs inode metadata"));
+		if (strncmp(ea_name, CIFS_XATTR_DOS_ATTRIB, 14) == 0) {
+			cFYI(1, ("attempt to set cifs inode metadata"));
 		}
 		ea_name += 5; /* skip past user. prefix */
-		rc = CIFSSMBSetEA(xid,pTcon,full_path,ea_name,ea_value,
+		rc = CIFSSMBSetEA(xid, pTcon, full_path, ea_name, ea_value,
 			(__u16)value_size, cifs_sb->local_nls,
 			cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR);
-	} else if(strncmp(ea_name, CIFS_XATTR_OS2_PREFIX,4) == 0) {
-		if(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR)
+	} else if (strncmp(ea_name, CIFS_XATTR_OS2_PREFIX, 4) == 0) {
+		if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR)
 			goto set_ea_exit;
 
 		ea_name += 4; /* skip past os2. prefix */
-		rc = CIFSSMBSetEA(xid,pTcon,full_path,ea_name,ea_value,
+		rc = CIFSSMBSetEA(xid, pTcon, full_path, ea_name, ea_value,
 			(__u16)value_size, cifs_sb->local_nls,
 			cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR);
 	} else {
-		int temp; 
-		temp = strncmp(ea_name,POSIX_ACL_XATTR_ACCESS,
+		int temp;
+		temp = strncmp(ea_name, POSIX_ACL_XATTR_ACCESS,
 			strlen(POSIX_ACL_XATTR_ACCESS));
 		if (temp == 0) {
 #ifdef CONFIG_CIFS_POSIX
-			if(sb->s_flags & MS_POSIXACL)
-				rc = CIFSSMBSetPosixACL(xid, pTcon,full_path,
-					ea_value, (const int)value_size, 
-					ACL_TYPE_ACCESS,cifs_sb->local_nls,
-					cifs_sb->mnt_cifs_flags & 
+			if (sb->s_flags & MS_POSIXACL)
+				rc = CIFSSMBSetPosixACL(xid, pTcon, full_path,
+					ea_value, (const int)value_size,
+					ACL_TYPE_ACCESS, cifs_sb->local_nls,
+					cifs_sb->mnt_cifs_flags &
 						CIFS_MOUNT_MAP_SPECIAL_CHR);
-			cFYI(1,("set POSIX ACL rc %d",rc));
+			cFYI(1, ("set POSIX ACL rc %d", rc));
 #else
-			cFYI(1,("set POSIX ACL not supported"));
+			cFYI(1, ("set POSIX ACL not supported"));
 #endif
-		} else if(strncmp(ea_name,POSIX_ACL_XATTR_DEFAULT,strlen(POSIX_ACL_XATTR_DEFAULT)) == 0) {
+		} else if (strncmp(ea_name, POSIX_ACL_XATTR_DEFAULT,
+				   strlen(POSIX_ACL_XATTR_DEFAULT)) == 0) {
 #ifdef CONFIG_CIFS_POSIX
-			if(sb->s_flags & MS_POSIXACL)
-				rc = CIFSSMBSetPosixACL(xid, pTcon,full_path,
-					ea_value, (const int)value_size, 
+			if (sb->s_flags & MS_POSIXACL)
+				rc = CIFSSMBSetPosixACL(xid, pTcon, full_path,
+					ea_value, (const int)value_size,
 					ACL_TYPE_DEFAULT, cifs_sb->local_nls,
-					cifs_sb->mnt_cifs_flags & 
+					cifs_sb->mnt_cifs_flags &
 						CIFS_MOUNT_MAP_SPECIAL_CHR);
-			cFYI(1,("set POSIX default ACL rc %d",rc));
+			cFYI(1, ("set POSIX default ACL rc %d", rc));
 #else
-			cFYI(1,("set default POSIX ACL not supported"));
+			cFYI(1, ("set default POSIX ACL not supported"));
 #endif
 		} else {
-			cFYI(1,("illegal xattr request %s (only user namespace supported)",ea_name));
+			cFYI(1, ("illegal xattr request %s (only user namespace"
+				 " supported)", ea_name));
 		  /* BB what if no namespace prefix? */
-		  /* Should we just pass them to server, except for 
+		  /* Should we just pass them to server, except for
 		  system and perhaps security prefixes? */
 		}
 	}
@@ -195,23 +199,23 @@
 	return rc;
 }
 
-ssize_t cifs_getxattr(struct dentry * direntry, const char * ea_name,
-         void * ea_value, size_t buf_size)
+ssize_t cifs_getxattr(struct dentry *direntry, const char *ea_name,
+	void *ea_value, size_t buf_size)
 {
 	ssize_t rc = -EOPNOTSUPP;
 #ifdef CONFIG_CIFS_XATTR
 	int xid;
 	struct cifs_sb_info *cifs_sb;
 	struct cifsTconInfo *pTcon;
-	struct super_block * sb;
-	char * full_path;
+	struct super_block *sb;
+	char *full_path;
 
-	if(direntry == NULL)
+	if (direntry == NULL)
 		return -EIO;
-	if(direntry->d_inode == NULL)
+	if (direntry->d_inode == NULL)
 		return -EIO;
 	sb = direntry->d_inode->i_sb;
-	if(sb == NULL)
+	if (sb == NULL)
 		return -EIO;
 
 	xid = GetXid();
@@ -220,42 +224,42 @@
 	pTcon = cifs_sb->tcon;
 
 	full_path = build_path_from_dentry(direntry);
-	if(full_path == NULL) {
+	if (full_path == NULL) {
 		FreeXid(xid);
 		return -ENOMEM;
 	}
 	/* return dos attributes as pseudo xattr */
 	/* return alt name if available as pseudo attr */
-	if(ea_name == NULL) {
-		cFYI(1,("Null xattr names not supported"));
-	} else if(strncmp(ea_name,CIFS_XATTR_USER_PREFIX,5) == 0) {
-		if(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR)
+	if (ea_name == NULL) {
+		cFYI(1, ("Null xattr names not supported"));
+	} else if (strncmp(ea_name, CIFS_XATTR_USER_PREFIX, 5) == 0) {
+		if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR)
 			goto get_ea_exit;
 
-		if(strncmp(ea_name,CIFS_XATTR_DOS_ATTRIB,14) == 0) {
-			cFYI(1,("attempt to query cifs inode metadata"));
+		if (strncmp(ea_name, CIFS_XATTR_DOS_ATTRIB, 14) == 0) {
+			cFYI(1, ("attempt to query cifs inode metadata"));
 			/* revalidate/getattr then populate from inode */
 		} /* BB add else when above is implemented */
 		ea_name += 5; /* skip past user. prefix */
-		rc = CIFSSMBQueryEA(xid,pTcon,full_path,ea_name,ea_value,
+		rc = CIFSSMBQueryEA(xid, pTcon, full_path, ea_name, ea_value,
 			buf_size, cifs_sb->local_nls,
 			cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR);
-	} else if(strncmp(ea_name, CIFS_XATTR_OS2_PREFIX,4) == 0) {
-		if(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR)
+	} else if (strncmp(ea_name, CIFS_XATTR_OS2_PREFIX, 4) == 0) {
+		if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR)
 			goto get_ea_exit;
 
 		ea_name += 4; /* skip past os2. prefix */
-		rc = CIFSSMBQueryEA(xid,pTcon,full_path,ea_name,ea_value,
+		rc = CIFSSMBQueryEA(xid, pTcon, full_path, ea_name, ea_value,
 			buf_size, cifs_sb->local_nls,
 			cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR);
-	} else if(strncmp(ea_name,POSIX_ACL_XATTR_ACCESS,
+	} else if (strncmp(ea_name, POSIX_ACL_XATTR_ACCESS,
 			  strlen(POSIX_ACL_XATTR_ACCESS)) == 0) {
 #ifdef CONFIG_CIFS_POSIX
-		if(sb->s_flags & MS_POSIXACL)
+		if (sb->s_flags & MS_POSIXACL)
 			rc = CIFSSMBGetPosixACL(xid, pTcon, full_path,
-				ea_value, buf_size, ACL_TYPE_ACCESS, 
+				ea_value, buf_size, ACL_TYPE_ACCESS,
 				cifs_sb->local_nls,
-				cifs_sb->mnt_cifs_flags & 
+				cifs_sb->mnt_cifs_flags &
 					CIFS_MOUNT_MAP_SPECIAL_CHR);
 /*		else if(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL) {
 			__u16 fid;
@@ -272,39 +276,40 @@
 				CIFSSMBClose(xid, pTcon, fid);
 			}
 		} */  /* BB enable after fixing up return data */
-                  		
-#else 
-		cFYI(1,("query POSIX ACL not supported yet"));
+#else
+		cFYI(1, ("query POSIX ACL not supported yet"));
 #endif /* CONFIG_CIFS_POSIX */
-	} else if(strncmp(ea_name,POSIX_ACL_XATTR_DEFAULT,
+	} else if (strncmp(ea_name, POSIX_ACL_XATTR_DEFAULT,
 			  strlen(POSIX_ACL_XATTR_DEFAULT)) == 0) {
 #ifdef CONFIG_CIFS_POSIX
-		if(sb->s_flags & MS_POSIXACL)
+		if (sb->s_flags & MS_POSIXACL)
 			rc = CIFSSMBGetPosixACL(xid, pTcon, full_path,
-				ea_value, buf_size, ACL_TYPE_DEFAULT, 
+				ea_value, buf_size, ACL_TYPE_DEFAULT,
 				cifs_sb->local_nls,
-				cifs_sb->mnt_cifs_flags & 
+				cifs_sb->mnt_cifs_flags &
 					CIFS_MOUNT_MAP_SPECIAL_CHR);
-#else 
-		cFYI(1,("query POSIX default ACL not supported yet"));
+#else
+		cFYI(1, ("query POSIX default ACL not supported yet"));
 #endif
-	} else if(strncmp(ea_name,
-		  CIFS_XATTR_TRUSTED_PREFIX,XATTR_TRUSTED_PREFIX_LEN) == 0) {
-		cFYI(1,("Trusted xattr namespace not supported yet"));
-	} else if(strncmp(ea_name,
-		  CIFS_XATTR_SECURITY_PREFIX,XATTR_SECURITY_PREFIX_LEN) == 0) {
-		cFYI(1,("Security xattr namespace not supported yet"));
+	} else if (strncmp(ea_name,
+		  CIFS_XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN) == 0) {
+		cFYI(1, ("Trusted xattr namespace not supported yet"));
+	} else if (strncmp(ea_name,
+		  CIFS_XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN) == 0) {
+		cFYI(1, ("Security xattr namespace not supported yet"));
 	} else {
-		cFYI(1,("illegal xattr name request %s (only user namespace supported)",ea_name));
+		cFYI(1,
+		    ("illegal xattr request %s (only user namespace supported)",
+			ea_name));
 	}
 
-	/* We could add an additional check for streams ie 
+	/* We could add an additional check for streams ie
 	    if proc/fs/cifs/streamstoxattr is set then
-		search server for EAs or streams to 
+		search server for EAs or streams to
 		returns as xattrs */
 
-	if(rc == -EINVAL)
-		rc = -EOPNOTSUPP; 
+	if (rc == -EINVAL)
+		rc = -EOPNOTSUPP;
 
 get_ea_exit:
 	kfree(full_path);
@@ -313,34 +318,34 @@
 	return rc;
 }
 
-ssize_t cifs_listxattr(struct dentry * direntry, char * data, size_t buf_size)
+ssize_t cifs_listxattr(struct dentry *direntry, char *data, size_t buf_size)
 {
 	ssize_t rc = -EOPNOTSUPP;
 #ifdef CONFIG_CIFS_XATTR
 	int xid;
 	struct cifs_sb_info *cifs_sb;
 	struct cifsTconInfo *pTcon;
-	struct super_block * sb;
-	char * full_path;
+	struct super_block *sb;
+	char *full_path;
 
-	if(direntry == NULL)
+	if (direntry == NULL)
 		return -EIO;
-	if(direntry->d_inode == NULL)
+	if (direntry->d_inode == NULL)
 		return -EIO;
 	sb = direntry->d_inode->i_sb;
-	if(sb == NULL)
+	if (sb == NULL)
 		return -EIO;
 
 	cifs_sb = CIFS_SB(sb);
 	pTcon = cifs_sb->tcon;
 
-	if(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR)
+	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR)
 		return -EOPNOTSUPP;
 
 	xid = GetXid();
 
 	full_path = build_path_from_dentry(direntry);
-	if(full_path == NULL) {
+	if (full_path == NULL) {
 		FreeXid(xid);
 		return -ENOMEM;
 	}
@@ -348,11 +353,11 @@
 	/* return alt name if available as pseudo attr */
 
 	/* if proc/fs/cifs/streamstoxattr is set then
-		search server for EAs or streams to 
+		search server for EAs or streams to
 		returns as xattrs */
-	rc = CIFSSMBQAllEAs(xid,pTcon,full_path,data,buf_size,
+	rc = CIFSSMBQAllEAs(xid, pTcon, full_path, data, buf_size,
 				cifs_sb->local_nls,
-				cifs_sb->mnt_cifs_flags & 
+				cifs_sb->mnt_cifs_flags &
 					CIFS_MOUNT_MAP_SPECIAL_CHR);
 
 	kfree(full_path);
diff --git a/fs/coda/cache.c b/fs/coda/cache.c
index fcb88fa..8a23703 100644
--- a/fs/coda/cache.c
+++ b/fs/coda/cache.c
@@ -43,17 +43,12 @@
 void coda_cache_clear_inode(struct inode *inode)
 {
 	struct coda_inode_info *cii = ITOC(inode);
-        cii->c_cached_perm = 0;
+	cii->c_cached_epoch = atomic_read(&permission_epoch) - 1;
 }
 
 /* remove all acl caches */
 void coda_cache_clear_all(struct super_block *sb)
 {
-        struct coda_sb_info *sbi;
-
-        sbi = coda_sbp(sb);
-	BUG_ON(!sbi);
-
 	atomic_inc(&permission_epoch);
 }
 
diff --git a/fs/coda/cnode.c b/fs/coda/cnode.c
index 28c8727..a7a7809 100644
--- a/fs/coda/cnode.c
+++ b/fs/coda/cnode.c
@@ -55,11 +55,6 @@
 	return 0;
 }
 
-static int coda_fail_inode(struct inode *inode, void *data)
-{
-	return -1;
-}
-
 struct inode * coda_iget(struct super_block * sb, struct CodaFid * fid,
 			 struct coda_vattr * attr)
 {
@@ -141,7 +136,7 @@
 		return NULL;
 	}
 
-	inode = iget5_locked(sb, hash, coda_test_inode, coda_fail_inode, fid);
+	inode = ilookup5(sb, hash, coda_test_inode, fid);
 	if ( !inode )
 		return NULL;
 
diff --git a/fs/coda/coda_int.h b/fs/coda/coda_int.h
index 9e6338f..8ccd5ed 100644
--- a/fs/coda/coda_int.h
+++ b/fs/coda/coda_int.h
@@ -1,12 +1,19 @@
 #ifndef _CODA_INT_
 #define _CODA_INT_
 
+struct dentry;
+
 extern struct file_system_type coda_fs_type;
+extern unsigned long coda_timeout;
+extern int coda_hard;
+extern int coda_fake_statfs;
 
 void coda_destroy_inodecache(void);
 int coda_init_inodecache(void);
 int coda_fsync(struct file *coda_file, struct dentry *coda_dentry,
 	       int datasync);
+void coda_sysctl_init(void);
+void coda_sysctl_clean(void);
 
 #endif  /*  _CODA_INT_  */
 
diff --git a/fs/coda/dir.c b/fs/coda/dir.c
index 898a86d..04a3dd8 100644
--- a/fs/coda/dir.c
+++ b/fs/coda/dir.c
@@ -25,7 +25,6 @@
 #include <linux/coda_psdev.h>
 #include <linux/coda_fs_i.h>
 #include <linux/coda_cache.h>
-#include <linux/coda_proc.h>
 
 #include "coda_int.h"
 
@@ -43,15 +42,15 @@
                        struct inode *new_inode, struct dentry *new_dentry);
 
 /* dir file-ops */
-static int coda_readdir(struct file *file, void *dirent, filldir_t filldir);
+static int coda_readdir(struct file *file, void *buf, filldir_t filldir);
 
 /* dentry ops */
 static int coda_dentry_revalidate(struct dentry *de, struct nameidata *nd);
 static int coda_dentry_delete(struct dentry *);
 
 /* support routines */
-static int coda_venus_readdir(struct file *filp, filldir_t filldir,
-			      void *dirent, struct dentry *dir);
+static int coda_venus_readdir(struct file *coda_file, void *buf,
+			      filldir_t filldir);
 
 /* same as fs/bad_inode.c */
 static int coda_return_EIO(void)
@@ -97,58 +96,45 @@
 /* access routines: lookup, readlink, permission */
 static struct dentry *coda_lookup(struct inode *dir, struct dentry *entry, struct nameidata *nd)
 {
-	struct inode *res_inode = NULL;
+	struct inode *inode = NULL;
 	struct CodaFid resfid = { { 0, } };
-	int dropme = 0; /* to indicate entry should not be cached */
 	int type = 0;
 	int error = 0;
 	const char *name = entry->d_name.name;
 	size_t length = entry->d_name.len;
-	
-	if ( length > CODA_MAXNAMLEN ) {
-	        printk("name too long: lookup, %s (%*s)\n", 
+
+	if (length > CODA_MAXNAMLEN) {
+		printk(KERN_ERR "name too long: lookup, %s (%*s)\n",
 		       coda_i2s(dir), (int)length, name);
 		return ERR_PTR(-ENAMETOOLONG);
 	}
 
-	lock_kernel();
-        /* control object, create inode on the fly */
-        if (coda_isroot(dir) && coda_iscontrol(name, length)) {
-	        error = coda_cnode_makectl(&res_inode, dir->i_sb);
-		dropme = 1;
-                goto exit;
-        }
-
-	error = venus_lookup(dir->i_sb, coda_i2f(dir), 
-			     (const char *)name, length, &type, &resfid);
-
-	res_inode = NULL;
-	if (!error) {
-		if (type & CODA_NOCACHE) {
-			type &= (~CODA_NOCACHE);
-			dropme = 1;
-		}
-
-	    	error = coda_cnode_make(&res_inode, &resfid, dir->i_sb);
-		if (error) {
-			unlock_kernel();
-			return ERR_PTR(error);
-		}
-	} else if (error != -ENOENT) {
-		unlock_kernel();
-		return ERR_PTR(error);
+	/* control object, create inode on the fly */
+	if (coda_isroot(dir) && coda_iscontrol(name, length)) {
+		error = coda_cnode_makectl(&inode, dir->i_sb);
+		type = CODA_NOCACHE;
+		goto exit;
 	}
 
+	lock_kernel();
+
+	error = venus_lookup(dir->i_sb, coda_i2f(dir), name, length,
+			     &type, &resfid);
+	if (!error)
+		error = coda_cnode_make(&inode, &resfid, dir->i_sb);
+
+	unlock_kernel();
+
+	if (error && error != -ENOENT)
+		return ERR_PTR(error);
+
 exit:
-	entry->d_time = 0;
 	entry->d_op = &coda_dentry_operations;
-	d_add(entry, res_inode);
-	if ( dropme ) {
-		d_drop(entry);
-		coda_flag_inode(res_inode, C_VATTR);
-	}
-	unlock_kernel();
-        return NULL;
+
+	if (inode && (type & CODA_NOCACHE))
+		coda_flag_inode(inode, C_VATTR | C_PURGE);
+
+	return d_splice_alias(inode, entry);
 }
 
 
@@ -161,8 +147,6 @@
 
 	lock_kernel();
 
-	coda_vfs_stat.permission++;
-
 	if (coda_cache_check(inode, mask))
 		goto out; 
 
@@ -173,12 +157,11 @@
 
  out:
 	unlock_kernel();
-
-        return error; 
+	return error;
 }
 
 
-static inline void coda_dir_changed(struct inode *dir, int link)
+static inline void coda_dir_update_mtime(struct inode *dir)
 {
 #ifdef REQUERY_VENUS_FOR_MTIME
 	/* invalidate the directory cnode's attributes so we refetch the
@@ -186,12 +169,27 @@
 	coda_flag_inode(dir, C_VATTR);
 #else
 	/* optimistically we can also act as if our nose bleeds. The
-         * granularity of the mtime is coarse anyways so we might actually be
-         * right most of the time. Note: we only do this for directories. */
+	 * granularity of the mtime is coarse anyways so we might actually be
+	 * right most of the time. Note: we only do this for directories. */
 	dir->i_mtime = dir->i_ctime = CURRENT_TIME_SEC;
 #endif
-	if (link)
-		dir->i_nlink += link;
+}
+
+/* we have to wrap inc_nlink/drop_nlink because sometimes userspace uses a
+ * trick to fool GNU find's optimizations. If we can't be sure of the link
+ * (because of volume mount points) we set i_nlink to 1 which forces find
+ * to consider every child as a possible directory. We should also never
+ * see an increment or decrement for deleted directories where i_nlink == 0 */
+static inline void coda_dir_inc_nlink(struct inode *dir)
+{
+	if (dir->i_nlink >= 2)
+		inc_nlink(dir);
+}
+
+static inline void coda_dir_drop_nlink(struct inode *dir)
+{
+	if (dir->i_nlink > 2)
+		drop_nlink(dir);
 }
 
 /* creation routines: create, mknod, mkdir, link, symlink */
@@ -205,7 +203,6 @@
 	struct coda_vattr attrs;
 
 	lock_kernel();
-	coda_vfs_stat.create++;
 
 	if (coda_isroot(dir) && coda_iscontrol(name, length)) {
 		unlock_kernel();
@@ -229,10 +226,10 @@
 	}
 
 	/* invalidate the directory cnode's attributes */
-	coda_dir_changed(dir, 0);
+	coda_dir_update_mtime(dir);
 	unlock_kernel();
 	d_instantiate(de, inode);
-        return 0;
+	return 0;
 }
 
 static int coda_mkdir(struct inode *dir, struct dentry *de, int mode)
@@ -245,7 +242,6 @@
 	struct CodaFid newfid;
 
 	lock_kernel();
-	coda_vfs_stat.mkdir++;
 
 	if (coda_isroot(dir) && coda_iscontrol(name, len)) {
 		unlock_kernel();
@@ -268,12 +264,13 @@
 		d_drop(de);
 		return PTR_ERR(inode);
 	}
-	
+
 	/* invalidate the directory cnode's attributes */
-	coda_dir_changed(dir, 1);
+	coda_dir_inc_nlink(dir);
+	coda_dir_update_mtime(dir);
 	unlock_kernel();
 	d_instantiate(de, inode);
-        return 0;
+	return 0;
 }
 
 /* try to make de an entry in dir_inodde linked to source_de */ 
@@ -286,7 +283,6 @@
 	int error;
 
 	lock_kernel();
-	coda_vfs_stat.link++;
 
 	if (coda_isroot(dir_inode) && coda_iscontrol(name, len)) {
 		unlock_kernel();
@@ -296,16 +292,16 @@
 	error = venus_link(dir_inode->i_sb, coda_i2f(inode),
 			   coda_i2f(dir_inode), (const char *)name, len);
 
-	if (error) { 
+	if (error) {
 		d_drop(de);
 		goto out;
 	}
 
-	coda_dir_changed(dir_inode, 0);
+	coda_dir_update_mtime(dir_inode);
 	atomic_inc(&inode->i_count);
 	d_instantiate(de, inode);
 	inc_nlink(inode);
-        
+
 out:
 	unlock_kernel();
 	return(error);
@@ -318,10 +314,9 @@
         const char *name = de->d_name.name;
 	int len = de->d_name.len;
 	int symlen;
-        int error=0;
-        
+	int error = 0;
+
 	lock_kernel();
-	coda_vfs_stat.symlink++;
 
 	if (coda_isroot(dir_inode) && coda_iscontrol(name, len)) {
 		unlock_kernel();
@@ -336,18 +331,18 @@
 
 	/*
 	 * This entry is now negative. Since we do not create
-	 * an inode for the entry we have to drop it. 
+	 * an inode for the entry we have to drop it.
 	 */
 	d_drop(de);
-	error = venus_symlink(dir_inode->i_sb, coda_i2f(dir_inode), name, len, 
+	error = venus_symlink(dir_inode->i_sb, coda_i2f(dir_inode), name, len,
 			      symname, symlen);
 
 	/* mtime is no good anymore */
 	if ( !error )
-		coda_dir_changed(dir_inode, 0);
+		coda_dir_update_mtime(dir_inode);
 
 	unlock_kernel();
-        return error;
+	return error;
 }
 
 /* destruction routines: unlink, rmdir */
@@ -358,79 +353,70 @@
 	int len = de->d_name.len;
 
 	lock_kernel();
-	coda_vfs_stat.unlink++;
 
-        error = venus_remove(dir->i_sb, coda_i2f(dir), name, len);
-        if ( error ) {
+	error = venus_remove(dir->i_sb, coda_i2f(dir), name, len);
+	if ( error ) {
 		unlock_kernel();
-                return error;
-        }
+		return error;
+	}
 
-	coda_dir_changed(dir, 0);
+	coda_dir_update_mtime(dir);
 	drop_nlink(de->d_inode);
 	unlock_kernel();
-
-        return 0;
+	return 0;
 }
 
 int coda_rmdir(struct inode *dir, struct dentry *de)
 {
 	const char *name = de->d_name.name;
 	int len = de->d_name.len;
-        int error;
+	int error;
 
 	lock_kernel();
-	coda_vfs_stat.rmdir++;
 
-	if (!d_unhashed(de)) {
-		unlock_kernel();
-		return -EBUSY;
-	}
 	error = venus_rmdir(dir->i_sb, coda_i2f(dir), name, len);
+	if (!error) {
+		/* VFS may delete the child */
+		if (de->d_inode)
+		    de->d_inode->i_nlink = 0;
 
-        if ( error ) {
-		unlock_kernel();
-                return error;
-        }
-
-	coda_dir_changed(dir, -1);
-	drop_nlink(de->d_inode);
-	d_delete(de);
+		/* fix the link count of the parent */
+		coda_dir_drop_nlink(dir);
+		coda_dir_update_mtime(dir);
+	}
 	unlock_kernel();
-
-        return 0;
+	return error;
 }
 
 /* rename */
-static int coda_rename(struct inode *old_dir, struct dentry *old_dentry, 
+static int coda_rename(struct inode *old_dir, struct dentry *old_dentry,
 		       struct inode *new_dir, struct dentry *new_dentry)
 {
-        const char *old_name = old_dentry->d_name.name;
-        const char *new_name = new_dentry->d_name.name;
+	const char *old_name = old_dentry->d_name.name;
+	const char *new_name = new_dentry->d_name.name;
 	int old_length = old_dentry->d_name.len;
 	int new_length = new_dentry->d_name.len;
-        int link_adjust = 0;
-        int error;
+	int error;
 
 	lock_kernel();
-	coda_vfs_stat.rename++;
 
-        error = venus_rename(old_dir->i_sb, coda_i2f(old_dir), 
-			     coda_i2f(new_dir), old_length, new_length, 
+	error = venus_rename(old_dir->i_sb, coda_i2f(old_dir),
+			     coda_i2f(new_dir), old_length, new_length,
 			     (const char *) old_name, (const char *)new_name);
 
-        if ( !error ) {
+	if ( !error ) {
 		if ( new_dentry->d_inode ) {
-			if ( S_ISDIR(new_dentry->d_inode->i_mode) )
-                        	link_adjust = 1;
-
-                        coda_dir_changed(old_dir, -link_adjust);
-                        coda_dir_changed(new_dir,  link_adjust);
+			if ( S_ISDIR(new_dentry->d_inode->i_mode) ) {
+				coda_dir_drop_nlink(old_dir);
+				coda_dir_inc_nlink(new_dir);
+			}
+			coda_dir_update_mtime(old_dir);
+			coda_dir_update_mtime(new_dir);
 			coda_flag_inode(new_dentry->d_inode, C_VATTR);
 		} else {
 			coda_flag_inode(old_dir, C_VATTR);
 			coda_flag_inode(new_dir, C_VATTR);
-                }
+		}
 	}
 	unlock_kernel();
 
@@ -439,44 +425,41 @@
 
 
 /* file operations for directories */
-int coda_readdir(struct file *coda_file, void *dirent, filldir_t filldir)
+int coda_readdir(struct file *coda_file, void *buf, filldir_t filldir)
 {
-	struct dentry *coda_dentry = coda_file->f_path.dentry;
 	struct coda_file_info *cfi;
 	struct file *host_file;
-	struct inode *host_inode;
 	int ret;
 
 	cfi = CODA_FTOC(coda_file);
 	BUG_ON(!cfi || cfi->cfi_magic != CODA_MAGIC);
 	host_file = cfi->cfi_container;
 
-	coda_vfs_stat.readdir++;
+	if (!host_file->f_op)
+		return -ENOTDIR;
 
-	host_inode = host_file->f_path.dentry->d_inode;
-	mutex_lock(&host_inode->i_mutex);
-	host_file->f_pos = coda_file->f_pos;
+	if (host_file->f_op->readdir)
+	{
+		/* potemkin case: we were handed a directory inode.
+		 * We can't use vfs_readdir because we have to keep the file
+		 * position in sync between the coda_file and the host_file.
+		 * and as such we need grab the inode mutex. */
+		struct inode *host_inode = host_file->f_path.dentry->d_inode;
 
-	if (!host_file->f_op->readdir) {
-		/* Venus: we must read Venus dirents from the file */
-		ret = coda_venus_readdir(host_file, filldir, dirent, coda_dentry);
-	} else {
-		/* potemkin case: we were handed a directory inode. */
-		/* Yuk, we can't call vfs_readdir because we are already
-		 * holding the inode semaphore. */
-		ret = -ENOTDIR;
-		if (!host_file->f_op || !host_file->f_op->readdir)
-			goto out;
+		mutex_lock(&host_inode->i_mutex);
+		host_file->f_pos = coda_file->f_pos;
 
 		ret = -ENOENT;
 		if (!IS_DEADDIR(host_inode)) {
-			ret = host_file->f_op->readdir(host_file, dirent, filldir);
+			ret = host_file->f_op->readdir(host_file, buf, filldir);
 			file_accessed(host_file);
 		}
+
+		coda_file->f_pos = host_file->f_pos;
+		mutex_unlock(&host_inode->i_mutex);
 	}
-out:
-	coda_file->f_pos = host_file->f_pos;
-	mutex_unlock(&host_inode->i_mutex);
+	else /* Venus: we must read Venus dirents from a file */
+		ret = coda_venus_readdir(coda_file, buf, filldir);
 
 	return ret;
 }
@@ -501,57 +484,68 @@
 }
 
 /* support routines */
-static int coda_venus_readdir(struct file *filp, filldir_t filldir,
-			      void *dirent, struct dentry *dir)
+static int coda_venus_readdir(struct file *coda_file, void *buf,
+			      filldir_t filldir)
 {
 	int result = 0; /* # of entries returned */
+	struct coda_file_info *cfi;
+	struct coda_inode_info *cii;
+	struct file *host_file;
+	struct dentry *de;
 	struct venus_dirent *vdir;
 	unsigned long vdir_size =
 	    (unsigned long)(&((struct venus_dirent *)0)->d_name);
 	unsigned int type;
 	struct qstr name;
 	ino_t ino;
-	int ret, i;
+	int ret;
+
+	cfi = CODA_FTOC(coda_file);
+	BUG_ON(!cfi || cfi->cfi_magic != CODA_MAGIC);
+	host_file = cfi->cfi_container;
+
+	de = coda_file->f_path.dentry;
+	cii = ITOC(de->d_inode);
 
 	vdir = kmalloc(sizeof(*vdir), GFP_KERNEL);
 	if (!vdir) return -ENOMEM;
 
-	i = filp->f_pos;
-	switch(i) {
+	switch (coda_file->f_pos) {
 	case 0:
-		ret = filldir(dirent, ".", 1, 0, dir->d_inode->i_ino, DT_DIR);
+		ret = filldir(buf, ".", 1, 0, de->d_inode->i_ino, DT_DIR);
 		if (ret < 0) break;
 		result++;
-		filp->f_pos++;
+		coda_file->f_pos++;
 		/* fallthrough */
 	case 1:
-		ret = filldir(dirent, "..", 2, 1, dir->d_parent->d_inode->i_ino, DT_DIR);
+		ret = filldir(buf, "..", 2, 1, de->d_parent->d_inode->i_ino, DT_DIR);
 		if (ret < 0) break;
 		result++;
-		filp->f_pos++;
+		coda_file->f_pos++;
 		/* fallthrough */
 	default:
 	while (1) {
 		/* read entries from the directory file */
-		ret = kernel_read(filp, filp->f_pos - 2, (char *)vdir,
+		ret = kernel_read(host_file, coda_file->f_pos - 2, (char *)vdir,
 				  sizeof(*vdir));
 		if (ret < 0) {
-			printk("coda_venus_readdir: read dir failed %d\n", ret);
+			printk(KERN_ERR "coda readdir: read dir %s failed %d\n",
+			       coda_f2s(&cii->c_fid), ret);
 			break;
 		}
 		if (ret == 0) break; /* end of directory file reached */
 
 		/* catch truncated reads */
 		if (ret < vdir_size || ret < vdir_size + vdir->d_namlen) {
-			printk("coda_venus_readdir: short read: %ld\n",
-			       filp->f_path.dentry->d_inode->i_ino);
+			printk(KERN_ERR "coda readdir: short read on %s\n",
+			       coda_f2s(&cii->c_fid));
 			ret = -EBADF;
 			break;
 		}
 		/* validate whether the directory file actually makes sense */
 		if (vdir->d_reclen < vdir_size + vdir->d_namlen) {
-			printk("coda_venus_readdir: Invalid dir: %ld\n",
-			       filp->f_path.dentry->d_inode->i_ino);
+			printk(KERN_ERR "coda readdir: invalid dir %s\n",
+			       coda_f2s(&cii->c_fid));
 			ret = -EBADF;
 			break;
 		}
@@ -570,21 +564,21 @@
 			 * userspace doesn't have to worry about breaking
 			 * getcwd by having mismatched inode numbers for
 			 * internal volume mountpoints. */
-			ino = find_inode_number(dir, &name);
+			ino = find_inode_number(de, &name);
 			if (!ino) ino = vdir->d_fileno;
 
 			type = CDT2DT(vdir->d_type);
-			ret = filldir(dirent, name.name, name.len, filp->f_pos,
-				      ino, type); 
+			ret = filldir(buf, name.name, name.len,
+				      coda_file->f_pos, ino, type);
 			/* failure means no space for filling in this round */
 			if (ret < 0) break;
 			result++;
 		}
 		/* we'll always have progress because d_reclen is unsigned and
 		 * we've already established it is non-zero. */
-		filp->f_pos += vdir->d_reclen;
+		coda_file->f_pos += vdir->d_reclen;
 	}
-	} 
+	}
 	kfree(vdir);
 	return result ? result : ret;
 }
diff --git a/fs/coda/file.c b/fs/coda/file.c
index 99dbe86..7594962 100644
--- a/fs/coda/file.c
+++ b/fs/coda/file.c
@@ -22,7 +22,6 @@
 #include <linux/coda_linux.h>
 #include <linux/coda_fs_i.h>
 #include <linux/coda_psdev.h>
-#include <linux/coda_proc.h>
 
 #include "coda_int.h"
 
@@ -134,8 +133,6 @@
 	unsigned short coda_flags = coda_flags_to_cflags(flags);
 	struct coda_file_info *cfi;
 
-	coda_vfs_stat.open++;
-
 	cfi = kmalloc(sizeof(struct coda_file_info), GFP_KERNEL);
 	if (!cfi)
 		return -ENOMEM;
@@ -143,8 +140,11 @@
 	lock_kernel();
 
 	error = venus_open(coda_inode->i_sb, coda_i2f(coda_inode), coda_flags,
-			   &host_file); 
-	if (error || !host_file) {
+			   &host_file);
+	if (!host_file)
+		error = -EIO;
+
+	if (error) {
 		kfree(cfi);
 		unlock_kernel();
 		return error;
@@ -173,8 +173,6 @@
 
 	lock_kernel();
 
-	coda_vfs_stat.flush++;
-
 	/* last close semantics */
 	fcnt = file_count(coda_file);
 	if (fcnt > 1)
@@ -216,8 +214,7 @@
 	int err = 0;
 
 	lock_kernel();
-	coda_vfs_stat.release++;
- 
+
 	if (!use_coda_close) {
 		err = venus_release(coda_inode->i_sb, coda_i2f(coda_inode),
 				    coda_flags);
@@ -268,8 +265,6 @@
 	BUG_ON(!cfi || cfi->cfi_magic != CODA_MAGIC);
 	host_file = cfi->cfi_container;
 
-	coda_vfs_stat.fsync++;
-
 	if (host_file->f_op && host_file->f_op->fsync) {
 		host_dentry = host_file->f_path.dentry;
 		host_inode = host_dentry->d_inode;
diff --git a/fs/coda/inode.c b/fs/coda/inode.c
index dbff1bd..6771a42 100644
--- a/fs/coda/inode.c
+++ b/fs/coda/inode.c
@@ -83,7 +83,7 @@
 
 static int coda_remount(struct super_block *sb, int *flags, char *data)
 {
-	*flags |= MS_NODIRATIME;
+	*flags |= MS_NOATIME;
 	return 0;
 }
 
@@ -141,11 +141,10 @@
 
 static int coda_fill_super(struct super_block *sb, void *data, int silent)
 {
-        struct inode *root = NULL; 
-	struct coda_sb_info *sbi = NULL;
+	struct inode *root = NULL;
 	struct venus_comm *vc = NULL;
 	struct CodaFid fid;
-        int error;
+	int error;
 	int idx;
 
 	idx = get_device_index((struct coda_mount_data *) data);
@@ -167,21 +166,14 @@
 		return -EBUSY;
 	}
 
-	sbi = kmalloc(sizeof(struct coda_sb_info), GFP_KERNEL);
-	if(!sbi) {
-		return -ENOMEM;
-	}
-
 	vc->vc_sb = sb;
 
-	sbi->sbi_vcomm = vc;
-
-        sb->s_fs_info = sbi;
-	sb->s_flags |= MS_NODIRATIME; /* probably even noatime */
-        sb->s_blocksize = 1024;	/* XXXXX  what do we put here?? */
-        sb->s_blocksize_bits = 10;
-        sb->s_magic = CODA_SUPER_MAGIC;
-        sb->s_op = &coda_super_operations;
+	sb->s_fs_info = vc;
+	sb->s_flags |= MS_NOATIME;
+	sb->s_blocksize = 4096;	/* XXXXX  what do we put here?? */
+	sb->s_blocksize_bits = 12;
+	sb->s_magic = CODA_SUPER_MAGIC;
+	sb->s_op = &coda_super_operations;
 
 	/* get root fid from Venus: this needs the root inode */
 	error = venus_rootfid(sb, &fid);
@@ -207,26 +199,20 @@
         return 0;
 
  error:
-	if (sbi) {
-		kfree(sbi);
-		if(vc)
-			vc->vc_sb = NULL;		
-	}
 	if (root)
-                iput(root);
+		iput(root);
+	if (vc)
+		vc->vc_sb = NULL;
 
-        return -EINVAL;
+	return -EINVAL;
 }
 
 static void coda_put_super(struct super_block *sb)
 {
-        struct coda_sb_info *sbi;
-
-	sbi = coda_sbp(sb);
-	sbi->sbi_vcomm->vc_sb = NULL;
+	coda_vcp(sb)->vc_sb = NULL;
+	sb->s_fs_info = NULL;
 
 	printk("Coda: Bye bye.\n");
-	kfree(sbi);
 }
 
 static void coda_clear_inode(struct inode *inode)
@@ -296,7 +282,7 @@
 
 	/* and fill in the rest */
 	buf->f_type = CODA_SUPER_MAGIC;
-	buf->f_bsize = 1024;
+	buf->f_bsize = 4096;
 	buf->f_namelen = CODA_MAXNAMLEN;
 
 	return 0; 
diff --git a/fs/coda/psdev.c b/fs/coda/psdev.c
index 803aacf..dcc6aea 100644
--- a/fs/coda/psdev.c
+++ b/fs/coda/psdev.c
@@ -45,12 +45,9 @@
 #include <linux/coda_linux.h>
 #include <linux/coda_fs_i.h>
 #include <linux/coda_psdev.h>
-#include <linux/coda_proc.h>
 
 #include "coda_int.h"
 
-#define upc_free(r) kfree(r)
-
 /* statistics */
 int           coda_hard;         /* allows signals during upcalls */
 unsigned long coda_timeout = 30; /* .. secs, then signals will dequeue */
@@ -195,7 +192,8 @@
 	if (req->uc_opcode == CODA_OPEN_BY_FD) {
 		struct coda_open_by_fd_out *outp =
 			(struct coda_open_by_fd_out *)req->uc_data;
-		outp->fh = fget(outp->fd);
+		if (!outp->oh.result)
+			outp->fh = fget(outp->fd);
 	}
 
         wake_up(&req->uc_sleep);
@@ -263,7 +261,7 @@
 	}
 
 	CODA_FREE(req->uc_data, sizeof(struct coda_in_hdr));
-	upc_free(req);
+	kfree(req);
 out:
 	unlock_kernel();
 	return (count ? count : retval);
@@ -271,71 +269,70 @@
 
 static int coda_psdev_open(struct inode * inode, struct file * file)
 {
-        struct venus_comm *vcp;
-	int idx;
+	struct venus_comm *vcp;
+	int idx, err;
+
+	idx = iminor(inode);
+	if (idx < 0 || idx >= MAX_CODADEVS)
+		return -ENODEV;
 
 	lock_kernel();
-	idx = iminor(inode);
-	if(idx >= MAX_CODADEVS) {
-		unlock_kernel();
-		return -ENODEV;
-	}
 
+	err = -EBUSY;
 	vcp = &coda_comms[idx];
-	if(vcp->vc_inuse) {
-		unlock_kernel();
-		return -EBUSY;
-	}
-	
-	if (!vcp->vc_inuse++) {
+	if (!vcp->vc_inuse) {
+		vcp->vc_inuse++;
+
 		INIT_LIST_HEAD(&vcp->vc_pending);
 		INIT_LIST_HEAD(&vcp->vc_processing);
 		init_waitqueue_head(&vcp->vc_waitq);
 		vcp->vc_sb = NULL;
 		vcp->vc_seq = 0;
+
+		file->private_data = vcp;
+		err = 0;
 	}
-	
-	file->private_data = vcp;
 
 	unlock_kernel();
-        return 0;
+	return err;
 }
 
 
 static int coda_psdev_release(struct inode * inode, struct file * file)
 {
-        struct venus_comm *vcp = (struct venus_comm *) file->private_data;
-        struct upc_req *req, *tmp;
+	struct venus_comm *vcp = (struct venus_comm *) file->private_data;
+	struct upc_req *req, *tmp;
 
-	lock_kernel();
-	if ( !vcp->vc_inuse ) {
-		unlock_kernel();
+	if (!vcp || !vcp->vc_inuse ) {
 		printk("psdev_release: Not open.\n");
 		return -1;
 	}
 
-	if (--vcp->vc_inuse) {
-		unlock_kernel();
-		return 0;
-	}
-        
-        /* Wakeup clients so they can return. */
+	lock_kernel();
+
+	/* Wakeup clients so they can return. */
 	list_for_each_entry_safe(req, tmp, &vcp->vc_pending, uc_chain) {
+		list_del(&req->uc_chain);
+
 		/* Async requests need to be freed here */
 		if (req->uc_flags & REQ_ASYNC) {
 			CODA_FREE(req->uc_data, sizeof(struct coda_in_hdr));
-			upc_free(req);
+			kfree(req);
 			continue;
 		}
 		req->uc_flags |= REQ_ABORT;
 		wake_up(&req->uc_sleep);
-        }
-        
-	list_for_each_entry(req, &vcp->vc_processing, uc_chain) {
-		req->uc_flags |= REQ_ABORT;
-	        wake_up(&req->uc_sleep);
-        }
+	}
 
+	list_for_each_entry_safe(req, tmp, &vcp->vc_processing, uc_chain) {
+		list_del(&req->uc_chain);
+
+		req->uc_flags |= REQ_ABORT;
+		wake_up(&req->uc_sleep);
+	}
+
+	file->private_data = NULL;
+	vcp->vc_inuse--;
 	unlock_kernel();
 	return 0;
 }
@@ -376,21 +373,20 @@
 	return err;
 }
 
-
-MODULE_AUTHOR("Peter J. Braam <braam@cs.cmu.edu>");
+MODULE_AUTHOR("Jan Harkes, Peter J. Braam");
+MODULE_DESCRIPTION("Coda Distributed File System VFS interface");
+MODULE_ALIAS_CHARDEV_MAJOR(CODA_PSDEV_MAJOR);
 MODULE_LICENSE("GPL");
+#ifdef CONFIG_CODA_FS_OLD_API
+MODULE_VERSION("5.3.21");
+#else
+MODULE_VERSION("6.6");
+#endif
 
 static int __init init_coda(void)
 {
 	int status;
 	int i;
-	printk(KERN_INFO "Coda Kernel/Venus communications, "
-#ifdef CONFIG_CODA_FS_OLD_API
-	       "v5.3.20"
-#else
-	       "v6.0.0"
-#endif
-	       ", coda@cs.cmu.edu\n");
 
 	status = coda_init_inodecache();
 	if (status)
diff --git a/fs/coda/symlink.c b/fs/coda/symlink.c
index 76e00a6..4513b7258 100644
--- a/fs/coda/symlink.c
+++ b/fs/coda/symlink.c
@@ -20,7 +20,6 @@
 #include <linux/coda_linux.h>
 #include <linux/coda_psdev.h>
 #include <linux/coda_fs_i.h>
-#include <linux/coda_proc.h>
 
 static int coda_symlink_filler(struct file *file, struct page *page)
 {
@@ -32,7 +31,6 @@
 
 	lock_kernel();
 	cii = ITOC(inode);
-	coda_vfs_stat.follow_link++;
 
 	error = venus_readlink(inode->i_sb, &cii->c_fid, p, &len);
 	unlock_kernel();
diff --git a/fs/coda/sysctl.c b/fs/coda/sysctl.c
index c57a1fa..81b7771 100644
--- a/fs/coda/sysctl.c
+++ b/fs/coda/sysctl.c
@@ -5,181 +5,14 @@
  * 
  * Carnegie Mellon encourages users to contribute improvements to
  * the Coda project. Contact Peter Braam (coda@cs.cmu.edu).
- * 
- * CODA operation statistics
- * (c) March, 1998 Zhanyong Wan <zhanyong.wan@yale.edu>
- *
  */
 
-#include <linux/time.h>
-#include <linux/mm.h>
 #include <linux/sysctl.h>
-#include <linux/proc_fs.h>
-#include <linux/seq_file.h>
-#include <linux/slab.h>
-#include <linux/stat.h>
-#include <linux/ctype.h>
-#include <linux/bitops.h>
-#include <asm/uaccess.h>
-#include <linux/utsname.h>
-#include <linux/module.h>
 
-#include <linux/coda.h>
-#include <linux/coda_linux.h>
-#include <linux/coda_fs_i.h>
-#include <linux/coda_psdev.h>
-#include <linux/coda_cache.h>
-#include <linux/coda_proc.h>
+#include "coda_int.h"
 
 static struct ctl_table_header *fs_table_header;
 
-#define CODA_TIMEOUT    3       /* timeout on upcalls to become intrble */
-#define CODA_HARD       5       /* mount type "hard" or "soft" */
-#define CODA_VFS 	 6       /* vfs statistics */
-#define CODA_CACHE_INV 	 9       /* cache invalidation statistics */
-#define CODA_FAKE_STATFS 10	 /* don't query venus for actual cache usage */
-
-struct coda_vfs_stats		coda_vfs_stat;
-static struct coda_cache_inv_stats	coda_cache_inv_stat;
-
-static void reset_coda_vfs_stats( void )
-{
-	memset( &coda_vfs_stat, 0, sizeof( coda_vfs_stat ) );
-}
-
-static void reset_coda_cache_inv_stats( void )
-{
-	memset( &coda_cache_inv_stat, 0, sizeof( coda_cache_inv_stat ) );
-}
-
-static int do_reset_coda_vfs_stats( ctl_table * table, int write,
-				    struct file * filp, void __user * buffer,
-				    size_t * lenp, loff_t * ppos )
-{
-	if ( write ) {
-		reset_coda_vfs_stats();
-
-		*ppos += *lenp;
-	} else {
-		*lenp = 0;
-	}
-
-	return 0;
-}
-
-static int do_reset_coda_cache_inv_stats( ctl_table * table, int write,
-					  struct file * filp,
-					  void __user * buffer,
-					  size_t * lenp, loff_t * ppos )
-{
-	if ( write ) {
-		reset_coda_cache_inv_stats();
-
-		*ppos += *lenp;
-	} else {
-		*lenp = 0;
-	}
-  
-	return 0;
-}
-
-static int proc_vfs_stats_show(struct seq_file *m, void *v)
-{
-	struct coda_vfs_stats * ps = & coda_vfs_stat;
-  
-	seq_printf(m,
-			"Coda VFS statistics\n"
-			"===================\n\n"
-			"File Operations:\n"
-			"\topen\t\t%9d\n"
-			"\tflush\t\t%9d\n"
-			"\trelease\t\t%9d\n"
-			"\tfsync\t\t%9d\n\n"
-			"Dir Operations:\n"
-			"\treaddir\t\t%9d\n\n"
-			"Inode Operations\n"
-			"\tcreate\t\t%9d\n"
-			"\tlookup\t\t%9d\n"
-			"\tlink\t\t%9d\n"
-			"\tunlink\t\t%9d\n"
-			"\tsymlink\t\t%9d\n"
-			"\tmkdir\t\t%9d\n"
-			"\trmdir\t\t%9d\n"
-			"\trename\t\t%9d\n"
-			"\tpermission\t%9d\n",
-
-			/* file operations */
-			ps->open,
-			ps->flush,
-			ps->release,
-			ps->fsync,
-
-			/* dir operations */
-			ps->readdir,
-		  
-			/* inode operations */
-			ps->create,
-			ps->lookup,
-			ps->link,
-			ps->unlink,
-			ps->symlink,
-			ps->mkdir,
-			ps->rmdir,
-			ps->rename,
-			ps->permission); 
-	return 0;
-}
-
-static int proc_cache_inv_stats_show(struct seq_file *m, void *v)
-{
-	struct coda_cache_inv_stats * ps = & coda_cache_inv_stat;
-  
-	seq_printf(m,
-			"Coda cache invalidation statistics\n"
-			"==================================\n\n"
-			"flush\t\t%9d\n"
-			"purge user\t%9d\n"
-			"zap_dir\t\t%9d\n"
-			"zap_file\t%9d\n"
-			"zap_vnode\t%9d\n"
-			"purge_fid\t%9d\n"
-			"replace\t\t%9d\n",
-			ps->flush,
-			ps->purge_user,
-			ps->zap_dir,
-			ps->zap_file,
-			ps->zap_vnode,
-			ps->purge_fid,
-			ps->replace );
-	return 0;
-}
-
-static int proc_vfs_stats_open(struct inode *inode, struct file *file)
-{
-	return single_open(file, proc_vfs_stats_show, NULL);
-}
-
-static int proc_cache_inv_stats_open(struct inode *inode, struct file *file)
-{
-	return single_open(file, proc_cache_inv_stats_show, NULL);
-}
-
-static const struct file_operations proc_vfs_stats_fops = {
-	.owner		= THIS_MODULE,
-	.open		= proc_vfs_stats_open,
-	.read		= seq_read,
-	.llseek		= seq_lseek,
-	.release	= single_release,
-};
-
-static const struct file_operations proc_cache_inv_stats_fops = {
-	.owner		= THIS_MODULE,
-	.open		= proc_cache_inv_stats_open,
-	.read		= seq_read,
-	.llseek		= seq_lseek,
-	.release	= single_release,
-};
-
 static ctl_table coda_table[] = {
 	{
 		.ctl_name	= CTL_UNNUMBERED,
@@ -199,22 +32,6 @@
 	},
 	{
 		.ctl_name	= CTL_UNNUMBERED,
-		.procname	= "vfs_stats",
-		.data		= NULL,
-		.maxlen		= 0,
-		.mode		= 0644,
-		.proc_handler	= &do_reset_coda_vfs_stats
-	},
-	{
-		.ctl_name	= CTL_UNNUMBERED,
-		.procname	= "cache_inv_stats",
-		.data		= NULL,
-		.maxlen		= 0,
-		.mode		= 0644,
-		.proc_handler	= &do_reset_coda_cache_inv_stats
-	},
-	{
-		.ctl_name	= CTL_UNNUMBERED,
 		.procname	= "fake_statfs",
 		.data		= &coda_fake_statfs,
 		.maxlen		= sizeof(int),
@@ -235,59 +52,20 @@
 };
 
 
-#ifdef CONFIG_PROC_FS
-
-/*
- target directory structure:
-   /proc/fs  (see linux/fs/proc/root.c)
-   /proc/fs/coda
-   /proc/fs/coda/{vfs_stats,
-
-*/
-
-static struct proc_dir_entry* proc_fs_coda;
-
-#endif
-
 void coda_sysctl_init(void)
 {
-	reset_coda_vfs_stats();
-	reset_coda_cache_inv_stats();
-
-#ifdef CONFIG_PROC_FS
-	proc_fs_coda = proc_mkdir("coda", proc_root_fs);
-	if (proc_fs_coda) {
-		struct proc_dir_entry *pde;
-
-		proc_fs_coda->owner = THIS_MODULE;
-		pde = create_proc_entry("vfs_stats", 0, proc_fs_coda);
-		if (pde)
-			pde->proc_fops = &proc_vfs_stats_fops;
-		pde = create_proc_entry("cache_inv_stats", 0, proc_fs_coda);
-		if (pde)
-			pde->proc_fops = &proc_cache_inv_stats_fops;
-	}
-#endif
-
 #ifdef CONFIG_SYSCTL
 	if ( !fs_table_header )
 		fs_table_header = register_sysctl_table(fs_table);
-#endif 
+#endif
 }
 
-void coda_sysctl_clean(void) 
+void coda_sysctl_clean(void)
 {
-
 #ifdef CONFIG_SYSCTL
 	if ( fs_table_header ) {
 		unregister_sysctl_table(fs_table_header);
 		fs_table_header = NULL;
 	}
 #endif
-
-#ifdef CONFIG_PROC_FS
-        remove_proc_entry("cache_inv_stats", proc_fs_coda);
-        remove_proc_entry("vfs_stats", proc_fs_coda);
-	remove_proc_entry("coda", proc_root_fs);
-#endif 
 }
diff --git a/fs/coda/upcall.c b/fs/coda/upcall.c
index 5faacdb..e4e766e 100644
--- a/fs/coda/upcall.c
+++ b/fs/coda/upcall.c
@@ -35,12 +35,10 @@
 #include <linux/coda_psdev.h>
 #include <linux/coda_fs_i.h>
 #include <linux/coda_cache.h>
-#include <linux/coda_proc.h> 
 
-#define upc_alloc() kmalloc(sizeof(struct upc_req), GFP_KERNEL)
-#define upc_free(r) kfree(r)
+#include "coda_int.h"
 
-static int coda_upcall(struct coda_sb_info *mntinfo, int inSize, int *outSize, 
+static int coda_upcall(struct venus_comm *vc, int inSize, int *outSize,
 		       union inputArgs *buffer);
 
 static void *alloc_upcall(int opcode, int size)
@@ -86,13 +84,9 @@
         insize = SIZE(root);
         UPARG(CODA_ROOT);
 
-	error = coda_upcall(coda_sbp(sb), insize, &outsize, inp);
-	
-	if (error) {
-	        printk("coda_get_rootfid: error %d\n", error);
-	} else {
+	error = coda_upcall(coda_vcp(sb), insize, &outsize, inp);
+	if (!error)
 		*fidp = outp->coda_root.VFid;
-	}
 
 	CODA_FREE(inp, insize);
 	return error;
@@ -109,9 +103,9 @@
 	UPARG(CODA_GETATTR);
         inp->coda_getattr.VFid = *fid;
 
-        error = coda_upcall(coda_sbp(sb), insize, &outsize, inp);
-	
-	*attr = outp->coda_getattr.attr;
+	error = coda_upcall(coda_vcp(sb), insize, &outsize, inp);
+	if (!error)
+		*attr = outp->coda_getattr.attr;
 
 	CODA_FREE(inp, insize);
         return error;
@@ -130,7 +124,7 @@
         inp->coda_setattr.VFid = *fid;
 	inp->coda_setattr.attr = *vattr;
 
-        error = coda_upcall(coda_sbp(sb), insize, &outsize, inp);
+	error = coda_upcall(coda_vcp(sb), insize, &outsize, inp);
 
         CODA_FREE(inp, insize);
         return error;
@@ -156,10 +150,11 @@
         memcpy((char *)(inp) + offset, name, length);
         *((char *)inp + offset + length) = '\0';
 
-        error = coda_upcall(coda_sbp(sb), insize, &outsize, inp);
-
-	*resfid = outp->coda_lookup.VFid;
-	*type = outp->coda_lookup.vtype;
+	error = coda_upcall(coda_vcp(sb), insize, &outsize, inp);
+	if (!error) {
+		*resfid = outp->coda_lookup.VFid;
+		*type = outp->coda_lookup.vtype;
+	}
 
 	CODA_FREE(inp, insize);
 	return error;
@@ -188,7 +183,7 @@
         inp->coda_store.VFid = *fid;
         inp->coda_store.flags = flags;
 
-        error = coda_upcall(coda_sbp(sb), insize, &outsize, inp);
+	error = coda_upcall(coda_vcp(sb), insize, &outsize, inp);
 
 	CODA_FREE(inp, insize);
         return error;
@@ -206,7 +201,7 @@
 	inp->coda_release.VFid = *fid;
 	inp->coda_release.flags = flags;
 
-	error = coda_upcall(coda_sbp(sb), insize, &outsize, inp);
+	error = coda_upcall(coda_vcp(sb), insize, &outsize, inp);
 
 	CODA_FREE(inp, insize);
 	return error;
@@ -235,7 +230,7 @@
         inp->coda_close.VFid = *fid;
         inp->coda_close.flags = flags;
 
-        error = coda_upcall(coda_sbp(sb), insize, &outsize, inp);
+	error = coda_upcall(coda_vcp(sb), insize, &outsize, inp);
 
 	CODA_FREE(inp, insize);
         return error;
@@ -251,12 +246,12 @@
 	insize = SIZE(open_by_fd);
 	UPARG(CODA_OPEN_BY_FD);
 
-        inp->coda_open.VFid = *fid;
-        inp->coda_open.flags = flags;
+	inp->coda_open_by_fd.VFid = *fid;
+	inp->coda_open_by_fd.flags = flags;
 
-        error = coda_upcall(coda_sbp(sb), insize, &outsize, inp);
-
-	*fh = outp->coda_open_by_fd.fh;
+	error = coda_upcall(coda_vcp(sb), insize, &outsize, inp);
+	if (!error)
+		*fh = outp->coda_open_by_fd.fh;
 
 	CODA_FREE(inp, insize);
 	return error;
@@ -281,11 +276,12 @@
         /* Venus must get null terminated string */
         memcpy((char *)(inp) + offset, name, length);
         *((char *)inp + offset + length) = '\0';
-        
-        error = coda_upcall(coda_sbp(sb), insize, &outsize, inp);
 
-	*attrs = outp->coda_mkdir.attr;
-	*newfid = outp->coda_mkdir.VFid;
+	error = coda_upcall(coda_vcp(sb), insize, &outsize, inp);
+	if (!error) {
+		*attrs = outp->coda_mkdir.attr;
+		*newfid = outp->coda_mkdir.VFid;
+	}
 
 	CODA_FREE(inp, insize);
 	return error;        
@@ -323,7 +319,7 @@
         memcpy((char *)(inp) + offset, new_name, new_length);
         *((char *)inp + offset + new_length) = '\0';
 
-        error = coda_upcall(coda_sbp(sb), insize, &outsize, inp);
+	error = coda_upcall(coda_vcp(sb), insize, &outsize, inp);
 
 	CODA_FREE(inp, insize);
 	return error;
@@ -351,11 +347,12 @@
         /* Venus must get null terminated string */
         memcpy((char *)(inp) + offset, name, length);
         *((char *)inp + offset + length) = '\0';
-                
-        error = coda_upcall(coda_sbp(sb), insize, &outsize, inp);
 
-	*attrs = outp->coda_create.attr;
-	*newfid = outp->coda_create.VFid;
+	error = coda_upcall(coda_vcp(sb), insize, &outsize, inp);
+	if (!error) {
+		*attrs = outp->coda_create.attr;
+		*newfid = outp->coda_create.VFid;
+	}
 
 	CODA_FREE(inp, insize);
 	return error;        
@@ -377,8 +374,8 @@
         inp->coda_rmdir.name = offset;
         memcpy((char *)(inp) + offset, name, length);
 	*((char *)inp + offset + length) = '\0';
-        
-        error = coda_upcall(coda_sbp(sb), insize, &outsize, inp);
+
+	error = coda_upcall(coda_vcp(sb), insize, &outsize, inp);
 
 	CODA_FREE(inp, insize);
 	return error;
@@ -399,8 +396,8 @@
         inp->coda_remove.name = offset;
         memcpy((char *)(inp) + offset, name, length);
 	*((char *)inp + offset + length) = '\0';
-        
-        error = coda_upcall(coda_sbp(sb), insize, &outsize, inp);
+
+	error = coda_upcall(coda_vcp(sb), insize, &outsize, inp);
 
 	CODA_FREE(inp, insize);
 	return error;
@@ -420,19 +417,18 @@
 	UPARG(CODA_READLINK);
 
         inp->coda_readlink.VFid = *fid;
-    
-        error =  coda_upcall(coda_sbp(sb), insize, &outsize, inp);
-	
-	if (! error) {
-                retlen = outp->coda_readlink.count;
+
+	error = coda_upcall(coda_vcp(sb), insize, &outsize, inp);
+	if (!error) {
+		retlen = outp->coda_readlink.count;
 		if ( retlen > *length )
-		        retlen = *length;
+			retlen = *length;
 		*length = retlen;
 		result =  (char *)outp + (long)outp->coda_readlink.data;
 		memcpy(buffer, result, retlen);
 		*(buffer + retlen) = '\0';
 	}
-        
+
         CODA_FREE(inp, insize);
         return error;
 }
@@ -458,8 +454,8 @@
         /* make sure strings are null terminated */
         memcpy((char *)(inp) + offset, name, len);
         *((char *)inp + offset + len) = '\0';
-        
-        error = coda_upcall(coda_sbp(sb), insize, &outsize, inp);
+
+	error = coda_upcall(coda_vcp(sb), insize, &outsize, inp);
 
 	CODA_FREE(inp, insize);
         return error;
@@ -494,7 +490,7 @@
         memcpy((char *)(inp) + offset, name, len);
         *((char *)inp + offset + len) = '\0';
 
-	error = coda_upcall(coda_sbp(sb), insize, &outsize, inp);
+	error = coda_upcall(coda_vcp(sb), insize, &outsize, inp);
 
 	CODA_FREE(inp, insize);
         return error;
@@ -509,9 +505,9 @@
 	insize=SIZE(fsync);
 	UPARG(CODA_FSYNC);
 
-        inp->coda_fsync.VFid = *fid;
-        error = coda_upcall(coda_sbp(sb), sizeof(union inputArgs), 
-                            &outsize, inp);
+	inp->coda_fsync.VFid = *fid;
+	error = coda_upcall(coda_vcp(sb), sizeof(union inputArgs),
+			    &outsize, inp);
 
 	CODA_FREE(inp, insize);
 	return error;
@@ -529,7 +525,7 @@
         inp->coda_access.VFid = *fid;
         inp->coda_access.flags = mask;
 
-	error = coda_upcall(coda_sbp(sb), insize, &outsize, inp);
+	error = coda_upcall(coda_vcp(sb), insize, &outsize, inp);
 
 	CODA_FREE(inp, insize);
 	return error;
@@ -578,9 +574,9 @@
 	        goto exit;
 	}
 
-        error = coda_upcall(coda_sbp(sb), SIZE(ioctl) + data->vi.in_size,
-                            &outsize, inp);
-        
+	error = coda_upcall(coda_vcp(sb), SIZE(ioctl) + data->vi.in_size,
+			    &outsize, inp);
+
         if (error) {
 	        printk("coda_pioctl: Venus returns: %d for %s\n", 
 		       error, coda_f2s(fid));
@@ -620,16 +616,13 @@
 	insize = max_t(unsigned int, INSIZE(statfs), OUTSIZE(statfs));
 	UPARG(CODA_STATFS);
 
-        error = coda_upcall(coda_sbp(dentry->d_sb), insize, &outsize, inp);
-	
-        if (!error) {
+	error = coda_upcall(coda_vcp(dentry->d_sb), insize, &outsize, inp);
+	if (!error) {
 		sfs->f_blocks = outp->coda_statfs.stat.f_blocks;
 		sfs->f_bfree  = outp->coda_statfs.stat.f_bfree;
 		sfs->f_bavail = outp->coda_statfs.stat.f_bavail;
 		sfs->f_files  = outp->coda_statfs.stat.f_files;
 		sfs->f_ffree  = outp->coda_statfs.stat.f_ffree;
-	} else {
-		printk("coda_statfs: Venus returns: %d\n", error);
 	}
 
         CODA_FREE(inp, insize);
@@ -638,96 +631,129 @@
 
 /*
  * coda_upcall and coda_downcall routines.
- * 
  */
+static void block_signals(sigset_t *old)
+{
+	spin_lock_irq(&current->sighand->siglock);
+	*old = current->blocked;
 
-static inline void coda_waitfor_upcall(struct upc_req *vmp,
-				       struct venus_comm *vcommp)
+	sigfillset(&current->blocked);
+	sigdelset(&current->blocked, SIGKILL);
+	sigdelset(&current->blocked, SIGSTOP);
+	sigdelset(&current->blocked, SIGINT);
+
+	recalc_sigpending();
+	spin_unlock_irq(&current->sighand->siglock);
+}
+
+static void unblock_signals(sigset_t *old)
+{
+	spin_lock_irq(&current->sighand->siglock);
+	current->blocked = *old;
+	recalc_sigpending();
+	spin_unlock_irq(&current->sighand->siglock);
+}
+
+/* Don't allow signals to interrupt the following upcalls before venus
+ * has seen them,
+ * - CODA_CLOSE or CODA_RELEASE upcall  (to avoid reference count problems)
+ * - CODA_STORE				(to avoid data loss)
+ */
+#define CODA_INTERRUPTIBLE(r) (!coda_hard && \
+			       (((r)->uc_opcode != CODA_CLOSE && \
+				 (r)->uc_opcode != CODA_STORE && \
+				 (r)->uc_opcode != CODA_RELEASE) || \
+				(r)->uc_flags & REQ_READ))
+
+static inline void coda_waitfor_upcall(struct upc_req *req)
 {
 	DECLARE_WAITQUEUE(wait, current);
+	unsigned long timeout = jiffies + coda_timeout * HZ;
+	sigset_t old;
+	int blocked;
 
-	vmp->uc_posttime = jiffies;
+	block_signals(&old);
+	blocked = 1;
 
-	add_wait_queue(&vmp->uc_sleep, &wait);
+	add_wait_queue(&req->uc_sleep, &wait);
 	for (;;) {
-		if ( !coda_hard && vmp->uc_opcode != CODA_CLOSE ) 
+		if (CODA_INTERRUPTIBLE(req))
 			set_current_state(TASK_INTERRUPTIBLE);
 		else
 			set_current_state(TASK_UNINTERRUPTIBLE);
 
-                /* venus died */
-                if ( !vcommp->vc_inuse )
-                        break;
-
 		/* got a reply */
-		if ( vmp->uc_flags & ( REQ_WRITE | REQ_ABORT ) )
+		if (req->uc_flags & (REQ_WRITE | REQ_ABORT))
 			break;
 
-		if ( !coda_hard && vmp->uc_opcode != CODA_CLOSE && signal_pending(current) ) {
-			/* if this process really wants to die, let it go */
-			if ( sigismember(&(current->pending.signal), SIGKILL) ||
-			     sigismember(&(current->pending.signal), SIGINT) )
-				break;
-			/* signal is present: after timeout always return 
-			   really smart idea, probably useless ... */
-			if ( jiffies - vmp->uc_posttime > coda_timeout * HZ )
-				break; 
+		if (blocked && time_after(jiffies, timeout) &&
+		    CODA_INTERRUPTIBLE(req))
+		{
+			unblock_signals(&old);
+			blocked = 0;
 		}
-		schedule();
-	}
-	remove_wait_queue(&vmp->uc_sleep, &wait);
-	set_current_state(TASK_RUNNING);
 
-	return;
+		if (signal_pending(current)) {
+			list_del(&req->uc_chain);
+			break;
+		}
+
+		if (blocked)
+			schedule_timeout(HZ);
+		else
+			schedule();
+	}
+	if (blocked)
+		unblock_signals(&old);
+
+	remove_wait_queue(&req->uc_sleep, &wait);
+	set_current_state(TASK_RUNNING);
 }
 
 
-/* 
- * coda_upcall will return an error in the case of 
+/*
+ * coda_upcall will return an error in the case of
  * failed communication with Venus _or_ will peek at Venus
  * reply and return Venus' error.
  *
  * As venus has 2 types of errors, normal errors (positive) and internal
  * errors (negative), normal errors are negated, while internal errors
  * are all mapped to -EINTR, while showing a nice warning message. (jh)
- * 
  */
-static int coda_upcall(struct coda_sb_info *sbi, 
-		int inSize, int *outSize, 
-		union inputArgs *buffer) 
+static int coda_upcall(struct venus_comm *vcp,
+		       int inSize, int *outSize,
+		       union inputArgs *buffer)
 {
-	struct venus_comm *vcommp;
 	union outputArgs *out;
-	struct upc_req *req;
+	union inputArgs *sig_inputArgs;
+	struct upc_req *req, *sig_req;
 	int error = 0;
 
-	vcommp = sbi->sbi_vcomm;
-	if ( !vcommp->vc_inuse ) {
-		printk("No pseudo device in upcall comms at %p\n", vcommp);
-                return -ENXIO;
+	if (!vcp->vc_inuse) {
+		printk(KERN_NOTICE "coda: Venus dead, not sending upcall\n");
+		return -ENXIO;
 	}
 
 	/* Format the request message. */
-	req = upc_alloc();
-	if (!req) {
-		printk("Failed to allocate upc_req structure\n");
+	req = kmalloc(sizeof(struct upc_req), GFP_KERNEL);
+	if (!req)
 		return -ENOMEM;
-	}
+
 	req->uc_data = (void *)buffer;
 	req->uc_flags = 0;
 	req->uc_inSize = inSize;
 	req->uc_outSize = *outSize ? *outSize : inSize;
 	req->uc_opcode = ((union inputArgs *)buffer)->ih.opcode;
-	req->uc_unique = ++vcommp->vc_seq;
+	req->uc_unique = ++vcp->vc_seq;
 	init_waitqueue_head(&req->uc_sleep);
-	
+
 	/* Fill in the common input args. */
 	((union inputArgs *)buffer)->ih.unique = req->uc_unique;
 
 	/* Append msg to pending queue and poke Venus. */
-	list_add_tail(&(req->uc_chain), &vcommp->vc_pending);
-        
-	wake_up_interruptible(&vcommp->vc_waitq);
+	list_add_tail(&req->uc_chain, &vcp->vc_pending);
+
+	wake_up_interruptible(&vcp->vc_waitq);
 	/* We can be interrupted while we wait for Venus to process
 	 * our request.  If the interrupt occurs before Venus has read
 	 * the request, we dequeue and return. If it occurs after the
@@ -738,67 +764,60 @@
 	 * ENODEV.  */
 
 	/* Go to sleep.  Wake up on signals only after the timeout. */
-	coda_waitfor_upcall(req, vcommp);
+	coda_waitfor_upcall(req);
 
-	if (vcommp->vc_inuse) {      /* i.e. Venus is still alive */
-	    /* Op went through, interrupt or not... */
-	    if (req->uc_flags & REQ_WRITE) {
+	/* Op went through, interrupt or not... */
+	if (req->uc_flags & REQ_WRITE) {
 		out = (union outputArgs *)req->uc_data;
 		/* here we map positive Venus errors to kernel errors */
 		error = -out->oh.result;
 		*outSize = req->uc_outSize;
 		goto exit;
-	    }
-	    if ( !(req->uc_flags & REQ_READ) && signal_pending(current)) { 
-		/* Interrupted before venus read it. */
-		list_del(&(req->uc_chain));
-		/* perhaps the best way to convince the app to
-		   give up? */
-		error = -EINTR;
-		goto exit;
-	    } 
-	    if ( (req->uc_flags & REQ_READ) && signal_pending(current) ) {
-		    /* interrupted after Venus did its read, send signal */
-		    union inputArgs *sig_inputArgs;
-		    struct upc_req *sig_req;
-		    
-		    list_del(&(req->uc_chain));
-		    error = -ENOMEM;
-		    sig_req = upc_alloc();
-		    if (!sig_req) goto exit;
-
-		    CODA_ALLOC((sig_req->uc_data), char *, sizeof(struct coda_in_hdr));
-		    if (!sig_req->uc_data) {
-			upc_free(sig_req);
-			goto exit;
-		    }
-		    
-		    error = -EINTR;
-		    sig_inputArgs = (union inputArgs *)sig_req->uc_data;
-		    sig_inputArgs->ih.opcode = CODA_SIGNAL;
-		    sig_inputArgs->ih.unique = req->uc_unique;
-		    
-		    sig_req->uc_flags = REQ_ASYNC;
-		    sig_req->uc_opcode = sig_inputArgs->ih.opcode;
-		    sig_req->uc_unique = sig_inputArgs->ih.unique;
-		    sig_req->uc_inSize = sizeof(struct coda_in_hdr);
-		    sig_req->uc_outSize = sizeof(struct coda_in_hdr);
-		    
-		    /* insert at head of queue! */
-		    list_add(&(sig_req->uc_chain), &vcommp->vc_pending);
-		    wake_up_interruptible(&vcommp->vc_waitq);
-	    } else {
-		    printk("Coda: Strange interruption..\n");
-		    error = -EINTR;
-	    }
-	} else {	/* If venus died i.e. !VC_OPEN(vcommp) */
-	        printk("coda_upcall: Venus dead on (op,un) (%d.%d) flags %d\n",
-		       req->uc_opcode, req->uc_unique, req->uc_flags);
-		error = -ENODEV;
 	}
 
- exit:
-	upc_free(req);
+	error = -EINTR;
+	if ((req->uc_flags & REQ_ABORT) || !signal_pending(current)) {
+		printk(KERN_WARNING "coda: Unexpected interruption.\n");
+		goto exit;
+	}
+
+	/* Interrupted before venus read it. */
+	if (!(req->uc_flags & REQ_READ))
+		goto exit;
+
+	/* Venus saw the upcall, make sure we can send interrupt signal */
+	if (!vcp->vc_inuse) {
+		printk(KERN_INFO "coda: Venus dead, not sending signal.\n");
+		goto exit;
+	}
+
+	error = -ENOMEM;
+	sig_req = kmalloc(sizeof(struct upc_req), GFP_KERNEL);
+	if (!sig_req) goto exit;
+
+	CODA_ALLOC((sig_req->uc_data), char *, sizeof(struct coda_in_hdr));
+	if (!sig_req->uc_data) {
+		kfree(sig_req);
+		goto exit;
+	}
+
+	error = -EINTR;
+	sig_inputArgs = (union inputArgs *)sig_req->uc_data;
+	sig_inputArgs->ih.opcode = CODA_SIGNAL;
+	sig_inputArgs->ih.unique = req->uc_unique;
+
+	sig_req->uc_flags = REQ_ASYNC;
+	sig_req->uc_opcode = sig_inputArgs->ih.opcode;
+	sig_req->uc_unique = sig_inputArgs->ih.unique;
+	sig_req->uc_inSize = sizeof(struct coda_in_hdr);
+	sig_req->uc_outSize = sizeof(struct coda_in_hdr);
+
+	/* insert at head of queue! */
+	list_add(&(sig_req->uc_chain), &vcp->vc_pending);
+	wake_up_interruptible(&vcp->vc_waitq);
+
+exit:
+	kfree(req);
 	return error;
 }
 
@@ -838,77 +857,66 @@
 
 int coda_downcall(int opcode, union outputArgs * out, struct super_block *sb)
 {
+	struct inode *inode = NULL;
+	struct CodaFid *fid, *newfid;
+
 	/* Handle invalidation requests. */
-          if ( !sb || !sb->s_root || !sb->s_root->d_inode)
-		  return 0; 
+	if ( !sb || !sb->s_root)
+		return 0;
 
-	  switch (opcode) {
+	switch (opcode) {
+	case CODA_FLUSH:
+		coda_cache_clear_all(sb);
+		shrink_dcache_sb(sb);
+		if (sb->s_root->d_inode)
+		    coda_flag_inode(sb->s_root->d_inode, C_FLUSH);
+		break;
 
-	  case CODA_FLUSH : {
-		   coda_cache_clear_all(sb);
-		   shrink_dcache_sb(sb);
-		   coda_flag_inode(sb->s_root->d_inode, C_FLUSH);
-		   return(0);
-	  }
+	case CODA_PURGEUSER:
+		coda_cache_clear_all(sb);
+		break;
 
-	  case CODA_PURGEUSER : {
-		   coda_cache_clear_all(sb);
-		   return(0);
-	  }
+	case CODA_ZAPDIR:
+		fid = &out->coda_zapdir.CodaFid;
+		inode = coda_fid_to_inode(fid, sb);
+		if (inode) {
+			coda_flag_inode_children(inode, C_PURGE);
+			coda_flag_inode(inode, C_VATTR);
+		}
+		break;
 
-	  case CODA_ZAPDIR : {
-	          struct inode *inode;
-		  struct CodaFid *fid = &out->coda_zapdir.CodaFid;
+	case CODA_ZAPFILE:
+		fid = &out->coda_zapfile.CodaFid;
+		inode = coda_fid_to_inode(fid, sb);
+		if (inode)
+			coda_flag_inode(inode, C_VATTR);
+		break;
 
-		  inode = coda_fid_to_inode(fid, sb);
-		  if (inode) {
-			  coda_flag_inode_children(inode, C_PURGE);
-	                  coda_flag_inode(inode, C_VATTR);
-			  iput(inode);
-		  }
-		  
-		  return(0);
-	  }
-
-	  case CODA_ZAPFILE : {
-	          struct inode *inode;
-		  struct CodaFid *fid = &out->coda_zapfile.CodaFid;
-		  inode = coda_fid_to_inode(fid, sb);
-		  if ( inode ) {
-	                  coda_flag_inode(inode, C_VATTR);
-			  iput(inode);
-		  }
-		  return 0;
-	  }
-
-	  case CODA_PURGEFID : {
-	          struct inode *inode;
-		  struct CodaFid *fid = &out->coda_purgefid.CodaFid;
-		  inode = coda_fid_to_inode(fid, sb);
-		  if ( inode ) { 
+	case CODA_PURGEFID:
+		fid = &out->coda_purgefid.CodaFid;
+		inode = coda_fid_to_inode(fid, sb);
+		if (inode) {
 			coda_flag_inode_children(inode, C_PURGE);
 
 			/* catch the dentries later if some are still busy */
 			coda_flag_inode(inode, C_PURGE);
 			d_prune_aliases(inode);
 
-			iput(inode);
-		  }
-		  return 0;
-	  }
+		}
+		break;
 
-	  case CODA_REPLACE : {
-	          struct inode *inode;
-		  struct CodaFid *oldfid = &out->coda_replace.OldFid;
-		  struct CodaFid *newfid = &out->coda_replace.NewFid;
-		  inode = coda_fid_to_inode(oldfid, sb);
-		  if ( inode ) { 
-			  coda_replace_fid(inode, oldfid, newfid);
-			  iput(inode);
-		  }
-		  return 0;
-	  }
-	  }
-	  return 0;
+	case CODA_REPLACE:
+		fid = &out->coda_replace.OldFid;
+		newfid = &out->coda_replace.NewFid;
+		inode = coda_fid_to_inode(fid, sb);
+		if (inode)
+			coda_replace_fid(inode, fid, newfid);
+		break;
+	}
+
+	if (inode)
+		iput(inode);
+
+	return 0;
 }
 
diff --git a/fs/compat.c b/fs/compat.c
index 4db6216..15078ce 100644
--- a/fs/compat.c
+++ b/fs/compat.c
@@ -1257,6 +1257,7 @@
 {
 	struct page *kmapped_page = NULL;
 	char *kaddr = NULL;
+	unsigned long kpos = 0;
 	int ret;
 
 	while (argc-- > 0) {
@@ -1265,92 +1266,84 @@
 		unsigned long pos;
 
 		if (get_user(str, argv+argc) ||
-			!(len = strnlen_user(compat_ptr(str), bprm->p))) {
+		    !(len = strnlen_user(compat_ptr(str), MAX_ARG_STRLEN))) {
 			ret = -EFAULT;
 			goto out;
 		}
 
-		if (bprm->p < len)  {
+		if (len > MAX_ARG_STRLEN) {
 			ret = -E2BIG;
 			goto out;
 		}
 
-		bprm->p -= len;
-		/* XXX: add architecture specific overflow check here. */
+		/* We're going to work our way backwords. */
 		pos = bprm->p;
+		str += len;
+		bprm->p -= len;
 
 		while (len > 0) {
-			int i, new, err;
 			int offset, bytes_to_copy;
-			struct page *page;
 
 			offset = pos % PAGE_SIZE;
-			i = pos/PAGE_SIZE;
-			page = bprm->page[i];
-			new = 0;
-			if (!page) {
-				page = alloc_page(GFP_HIGHUSER);
-				bprm->page[i] = page;
-				if (!page) {
-					ret = -ENOMEM;
+			if (offset == 0)
+				offset = PAGE_SIZE;
+
+			bytes_to_copy = offset;
+			if (bytes_to_copy > len)
+				bytes_to_copy = len;
+
+			offset -= bytes_to_copy;
+			pos -= bytes_to_copy;
+			str -= bytes_to_copy;
+			len -= bytes_to_copy;
+
+			if (!kmapped_page || kpos != (pos & PAGE_MASK)) {
+				struct page *page;
+
+#ifdef CONFIG_STACK_GROWSUP
+				ret = expand_stack_downwards(bprm->vma, pos);
+				if (ret < 0) {
+					/* We've exceed the stack rlimit. */
+					ret = -E2BIG;
 					goto out;
 				}
-				new = 1;
-			}
+#endif
+				ret = get_user_pages(current, bprm->mm, pos,
+						     1, 1, 1, &page, NULL);
+				if (ret <= 0) {
+					/* We've exceed the stack rlimit. */
+					ret = -E2BIG;
+					goto out;
+				}
 
-			if (page != kmapped_page) {
-				if (kmapped_page)
+				if (kmapped_page) {
+					flush_kernel_dcache_page(kmapped_page);
 					kunmap(kmapped_page);
+					put_page(kmapped_page);
+				}
 				kmapped_page = page;
 				kaddr = kmap(kmapped_page);
+				kpos = pos & PAGE_MASK;
+				flush_cache_page(bprm->vma, kpos,
+						 page_to_pfn(kmapped_page));
 			}
-			if (new && offset)
-				memset(kaddr, 0, offset);
-			bytes_to_copy = PAGE_SIZE - offset;
-			if (bytes_to_copy > len) {
-				bytes_to_copy = len;
-				if (new)
-					memset(kaddr+offset+len, 0,
-						PAGE_SIZE-offset-len);
-			}
-			err = copy_from_user(kaddr+offset, compat_ptr(str),
-						bytes_to_copy);
-			if (err) {
+			if (copy_from_user(kaddr+offset, compat_ptr(str),
+						bytes_to_copy)) {
 				ret = -EFAULT;
 				goto out;
 			}
-
-			pos += bytes_to_copy;
-			str += bytes_to_copy;
-			len -= bytes_to_copy;
 		}
 	}
 	ret = 0;
 out:
-	if (kmapped_page)
+	if (kmapped_page) {
+		flush_kernel_dcache_page(kmapped_page);
 		kunmap(kmapped_page);
+		put_page(kmapped_page);
+	}
 	return ret;
 }
 
-#ifdef CONFIG_MMU
-
-#define free_arg_pages(bprm) do { } while (0)
-
-#else
-
-static inline void free_arg_pages(struct linux_binprm *bprm)
-{
-	int i;
-
-	for (i = 0; i < MAX_ARG_PAGES; i++) {
-		if (bprm->page[i])
-			__free_page(bprm->page[i]);
-		bprm->page[i] = NULL;
-	}
-}
-
-#endif /* CONFIG_MMU */
-
 /*
  * compat_do_execve() is mostly a copy of do_execve(), with the exception
  * that it processes 32 bit argv and envp pointers.
@@ -1363,7 +1356,6 @@
 	struct linux_binprm *bprm;
 	struct file *file;
 	int retval;
-	int i;
 
 	retval = -ENOMEM;
 	bprm = kzalloc(sizeof(*bprm), GFP_KERNEL);
@@ -1377,24 +1369,19 @@
 
 	sched_exec();
 
-	bprm->p = PAGE_SIZE*MAX_ARG_PAGES-sizeof(void *);
 	bprm->file = file;
 	bprm->filename = filename;
 	bprm->interp = filename;
-	bprm->mm = mm_alloc();
-	retval = -ENOMEM;
-	if (!bprm->mm)
+
+	retval = bprm_mm_init(bprm);
+	if (retval)
 		goto out_file;
 
-	retval = init_new_context(current, bprm->mm);
-	if (retval < 0)
-		goto out_mm;
-
-	bprm->argc = compat_count(argv, bprm->p / sizeof(compat_uptr_t));
+	bprm->argc = compat_count(argv, MAX_ARG_STRINGS);
 	if ((retval = bprm->argc) < 0)
 		goto out_mm;
 
-	bprm->envc = compat_count(envp, bprm->p / sizeof(compat_uptr_t));
+	bprm->envc = compat_count(envp, MAX_ARG_STRINGS);
 	if ((retval = bprm->envc) < 0)
 		goto out_mm;
 
@@ -1421,8 +1408,6 @@
 
 	retval = search_binary_handler(bprm, regs);
 	if (retval >= 0) {
-		free_arg_pages(bprm);
-
 		/* execve success */
 		security_bprm_free(bprm);
 		acct_update_integrals(current);
@@ -1431,19 +1416,12 @@
 	}
 
 out:
-	/* Something went wrong, return the inode and free the argument pages*/
-	for (i = 0 ; i < MAX_ARG_PAGES ; i++) {
-		struct page * page = bprm->page[i];
-		if (page)
-			__free_page(page);
-	}
-
 	if (bprm->security)
 		security_bprm_free(bprm);
 
 out_mm:
 	if (bprm->mm)
-		mmdrop(bprm->mm);
+		mmput(bprm->mm);
 
 out_file:
 	if (bprm->file) {
diff --git a/fs/dcache.c b/fs/dcache.c
index 0e73aa0..cb9d050 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -883,6 +883,11 @@
 	return (dentry_stat.nr_unused / 100) * sysctl_vfs_cache_pressure;
 }
 
+static struct shrinker dcache_shrinker = {
+	.shrink = shrink_dcache_memory,
+	.seeks = DEFAULT_SEEKS,
+};
+
 /**
  * d_alloc	-	allocate a dcache entry
  * @parent: parent of entry to allocate
@@ -2115,7 +2120,7 @@
 	dentry_cache = KMEM_CACHE(dentry,
 		SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|SLAB_MEM_SPREAD);
 	
-	set_shrinker(DEFAULT_SEEKS, shrink_dcache_memory);
+	register_shrinker(&dcache_shrinker);
 
 	/* Hash may have been set up in dcache_init_early */
 	if (!hashdist)
diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
index 1d533a2..11be8a3 100644
--- a/fs/debugfs/inode.c
+++ b/fs/debugfs/inode.c
@@ -345,11 +345,6 @@
 			switch (dentry->d_inode->i_mode & S_IFMT) {
 			case S_IFDIR:
 				ret = simple_rmdir(parent->d_inode, dentry);
-				if (ret)
-					printk(KERN_ERR
-						"DebugFS rmdir on %s failed : "
-						"directory not empty.\n",
-						dentry->d_name.name);
 				break;
 			case S_IFLNK:
 				kfree(dentry->d_inode->i_private);
diff --git a/fs/dlm/memory.c b/fs/dlm/memory.c
index f858fef..fb9e2ee 100644
--- a/fs/dlm/memory.c
+++ b/fs/dlm/memory.c
@@ -39,9 +39,7 @@
 {
 	char *p;
 
-	p = kmalloc(ls->ls_lvblen, GFP_KERNEL);
-	if (p)
-		memset(p, 0, ls->ls_lvblen);
+	p = kzalloc(ls->ls_lvblen, GFP_KERNEL);
 	return p;
 }
 
@@ -59,9 +57,7 @@
 
 	DLM_ASSERT(namelen <= DLM_RESNAME_MAXLEN,);
 
-	r = kmalloc(sizeof(*r) + namelen, GFP_KERNEL);
-	if (r)
-		memset(r, 0, sizeof(*r) + namelen);
+	r = kzalloc(sizeof(*r) + namelen, GFP_KERNEL);
 	return r;
 }
 
@@ -101,9 +97,7 @@
 	DLM_ASSERT(namelen <= DLM_RESNAME_MAXLEN,
 		   printk("namelen = %d\n", namelen););
 
-	de = kmalloc(sizeof(*de) + namelen, GFP_KERNEL);
-	if (de)
-		memset(de, 0, sizeof(*de) + namelen);
+	de = kzalloc(sizeof(*de) + namelen, GFP_KERNEL);
 	return de;
 }
 
diff --git a/fs/dquot.c b/fs/dquot.c
index 8819d28..7e27315 100644
--- a/fs/dquot.c
+++ b/fs/dquot.c
@@ -538,6 +538,11 @@
 	return (dqstats.free_dquots / 100) * sysctl_vfs_cache_pressure;
 }
 
+static struct shrinker dqcache_shrinker = {
+	.shrink = shrink_dqcache_memory,
+	.seeks = DEFAULT_SEEKS,
+};
+
 /*
  * Put reference to dquot
  * NOTE: If you change this function please check whether dqput_blocks() works right...
@@ -1870,7 +1875,7 @@
 	printk("Dquot-cache hash table entries: %ld (order %ld, %ld bytes)\n",
 			nr_hash, order, (PAGE_SIZE << order));
 
-	set_shrinker(DEFAULT_SEEKS, shrink_dqcache_memory);
+	register_shrinker(&dqcache_shrinker);
 
 	return 0;
 }
diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
index 83e94fe..0a50942 100644
--- a/fs/ecryptfs/inode.c
+++ b/fs/ecryptfs/inode.c
@@ -282,7 +282,7 @@
 	struct dentry *lower_dentry;
 	struct vfsmount *lower_mnt;
 	char *encoded_name;
-	unsigned int encoded_namelen;
+	int encoded_namelen;
 	struct ecryptfs_crypt_stat *crypt_stat = NULL;
 	struct ecryptfs_mount_crypt_stat *mount_crypt_stat;
 	char *page_virt = NULL;
@@ -473,7 +473,7 @@
 	struct dentry *lower_dir_dentry;
 	umode_t mode;
 	char *encoded_symname;
-	unsigned int encoded_symlen;
+	int encoded_symlen;
 	struct ecryptfs_crypt_stat *crypt_stat = NULL;
 
 	lower_dentry = ecryptfs_dentry_to_lower(dentry);
@@ -902,8 +902,9 @@
 	mutex_lock(&crypt_stat->cs_mutex);
 	if (S_ISDIR(dentry->d_inode->i_mode))
 		crypt_stat->flags &= ~(ECRYPTFS_ENCRYPTED);
-	else if (!(crypt_stat->flags & ECRYPTFS_POLICY_APPLIED)
-		 || !(crypt_stat->flags & ECRYPTFS_KEY_VALID)) {
+	else if (S_ISREG(dentry->d_inode->i_mode)
+		 && (!(crypt_stat->flags & ECRYPTFS_POLICY_APPLIED)
+		     || !(crypt_stat->flags & ECRYPTFS_KEY_VALID))) {
 		struct vfsmount *lower_mnt;
 		struct file *lower_file = NULL;
 		struct ecryptfs_mount_crypt_stat *mount_crypt_stat;
diff --git a/fs/efs/namei.c b/fs/efs/namei.c
index ed4a207..5276b19 100644
--- a/fs/efs/namei.c
+++ b/fs/efs/namei.c
@@ -75,6 +75,38 @@
 	return NULL;
 }
 
+struct dentry *efs_get_dentry(struct super_block *sb, void *vobjp)
+{
+	__u32 *objp = vobjp;
+	unsigned long ino = objp[0];
+	__u32 generation = objp[1];
+	struct inode *inode;
+	struct dentry *result;
+
+	if (ino == 0)
+		return ERR_PTR(-ESTALE);
+	inode = iget(sb, ino);
+	if (inode == NULL)
+		return ERR_PTR(-ENOMEM);
+
+	if (is_bad_inode(inode) ||
+	    (generation && inode->i_generation != generation)) {
+	    	result = ERR_PTR(-ESTALE);
+		goto out_iput;
+	}
+
+	result = d_alloc_anon(inode);
+	if (!result) {
+		result = ERR_PTR(-ENOMEM);
+		goto out_iput;
+	}
+	return result;
+
+ out_iput:
+	iput(inode);
+	return result;
+}
+
 struct dentry *efs_get_parent(struct dentry *child)
 {
 	struct dentry *parent;
diff --git a/fs/efs/super.c b/fs/efs/super.c
index e0a6839..d360c81 100644
--- a/fs/efs/super.c
+++ b/fs/efs/super.c
@@ -11,6 +11,7 @@
 #include <linux/efs_fs.h>
 #include <linux/efs_vh.h>
 #include <linux/efs_fs_sb.h>
+#include <linux/exportfs.h>
 #include <linux/slab.h>
 #include <linux/buffer_head.h>
 #include <linux/vfs.h>
@@ -113,6 +114,7 @@
 };
 
 static struct export_operations efs_export_ops = {
+	.get_dentry	= efs_get_dentry,
 	.get_parent	= efs_get_parent,
 };
 
diff --git a/fs/exec.c b/fs/exec.c
index f20561f..7bdea79 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -54,6 +54,7 @@
 
 #include <asm/uaccess.h>
 #include <asm/mmu_context.h>
+#include <asm/tlb.h>
 
 #ifdef CONFIG_KMOD
 #include <linux/kmod.h>
@@ -178,6 +179,207 @@
 	goto out;
 }
 
+#ifdef CONFIG_MMU
+
+static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
+		int write)
+{
+	struct page *page;
+	int ret;
+
+#ifdef CONFIG_STACK_GROWSUP
+	if (write) {
+		ret = expand_stack_downwards(bprm->vma, pos);
+		if (ret < 0)
+			return NULL;
+	}
+#endif
+	ret = get_user_pages(current, bprm->mm, pos,
+			1, write, 1, &page, NULL);
+	if (ret <= 0)
+		return NULL;
+
+	if (write) {
+		struct rlimit *rlim = current->signal->rlim;
+		unsigned long size = bprm->vma->vm_end - bprm->vma->vm_start;
+
+		/*
+		 * Limit to 1/4-th the stack size for the argv+env strings.
+		 * This ensures that:
+		 *  - the remaining binfmt code will not run out of stack space,
+		 *  - the program will have a reasonable amount of stack left
+		 *    to work from.
+		 */
+		if (size > rlim[RLIMIT_STACK].rlim_cur / 4) {
+			put_page(page);
+			return NULL;
+		}
+	}
+
+	return page;
+}
+
+static void put_arg_page(struct page *page)
+{
+	put_page(page);
+}
+
+static void free_arg_page(struct linux_binprm *bprm, int i)
+{
+}
+
+static void free_arg_pages(struct linux_binprm *bprm)
+{
+}
+
+static void flush_arg_page(struct linux_binprm *bprm, unsigned long pos,
+		struct page *page)
+{
+	flush_cache_page(bprm->vma, pos, page_to_pfn(page));
+}
+
+static int __bprm_mm_init(struct linux_binprm *bprm)
+{
+	int err = -ENOMEM;
+	struct vm_area_struct *vma = NULL;
+	struct mm_struct *mm = bprm->mm;
+
+	bprm->vma = vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
+	if (!vma)
+		goto err;
+
+	down_write(&mm->mmap_sem);
+	vma->vm_mm = mm;
+
+	/*
+	 * Place the stack at the largest stack address the architecture
+	 * supports. Later, we'll move this to an appropriate place. We don't
+	 * use STACK_TOP because that can depend on attributes which aren't
+	 * configured yet.
+	 */
+	vma->vm_end = STACK_TOP_MAX;
+	vma->vm_start = vma->vm_end - PAGE_SIZE;
+
+	vma->vm_flags = VM_STACK_FLAGS;
+	vma->vm_page_prot = protection_map[vma->vm_flags & 0x7];
+	err = insert_vm_struct(mm, vma);
+	if (err) {
+		up_write(&mm->mmap_sem);
+		goto err;
+	}
+
+	mm->stack_vm = mm->total_vm = 1;
+	up_write(&mm->mmap_sem);
+
+	bprm->p = vma->vm_end - sizeof(void *);
+
+	return 0;
+
+err:
+	if (vma) {
+		bprm->vma = NULL;
+		kmem_cache_free(vm_area_cachep, vma);
+	}
+
+	return err;
+}
+
+static bool valid_arg_len(struct linux_binprm *bprm, long len)
+{
+	return len <= MAX_ARG_STRLEN;
+}
+
+#else
+
+static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
+		int write)
+{
+	struct page *page;
+
+	page = bprm->page[pos / PAGE_SIZE];
+	if (!page && write) {
+		page = alloc_page(GFP_HIGHUSER|__GFP_ZERO);
+		if (!page)
+			return NULL;
+		bprm->page[pos / PAGE_SIZE] = page;
+	}
+
+	return page;
+}
+
+static void put_arg_page(struct page *page)
+{
+}
+
+static void free_arg_page(struct linux_binprm *bprm, int i)
+{
+	if (bprm->page[i]) {
+		__free_page(bprm->page[i]);
+		bprm->page[i] = NULL;
+	}
+}
+
+static void free_arg_pages(struct linux_binprm *bprm)
+{
+	int i;
+
+	for (i = 0; i < MAX_ARG_PAGES; i++)
+		free_arg_page(bprm, i);
+}
+
+static void flush_arg_page(struct linux_binprm *bprm, unsigned long pos,
+		struct page *page)
+{
+}
+
+static int __bprm_mm_init(struct linux_binprm *bprm)
+{
+	bprm->p = PAGE_SIZE * MAX_ARG_PAGES - sizeof(void *);
+	return 0;
+}
+
+static bool valid_arg_len(struct linux_binprm *bprm, long len)
+{
+	return len <= bprm->p;
+}
+
+#endif /* CONFIG_MMU */
+
+/*
+ * Create a new mm_struct and populate it with a temporary stack
+ * vm_area_struct.  We don't have enough context at this point to set the stack
+ * flags, permissions, and offset, so we use temporary values.  We'll update
+ * them later in setup_arg_pages().
+ */
+int bprm_mm_init(struct linux_binprm *bprm)
+{
+	int err;
+	struct mm_struct *mm = NULL;
+
+	bprm->mm = mm = mm_alloc();
+	err = -ENOMEM;
+	if (!mm)
+		goto err;
+
+	err = init_new_context(current, mm);
+	if (err)
+		goto err;
+
+	err = __bprm_mm_init(bprm);
+	if (err)
+		goto err;
+
+	return 0;
+
+err:
+	if (mm) {
+		bprm->mm = NULL;
+		mmdrop(mm);
+	}
+
+	return err;
+}
+
 /*
  * count() counts the number of strings in array ARGV.
  */
@@ -203,15 +405,16 @@
 }
 
 /*
- * 'copy_strings()' copies argument/environment strings from user
- * memory to free pages in kernel mem. These are in a format ready
- * to be put directly into the top of new user memory.
+ * 'copy_strings()' copies argument/environment strings from the old
+ * processes's memory to the new process's stack.  The call to get_user_pages()
+ * ensures the destination page is created and not swapped out.
  */
 static int copy_strings(int argc, char __user * __user * argv,
 			struct linux_binprm *bprm)
 {
 	struct page *kmapped_page = NULL;
 	char *kaddr = NULL;
+	unsigned long kpos = 0;
 	int ret;
 
 	while (argc-- > 0) {
@@ -220,69 +423,69 @@
 		unsigned long pos;
 
 		if (get_user(str, argv+argc) ||
-				!(len = strnlen_user(str, bprm->p))) {
+				!(len = strnlen_user(str, MAX_ARG_STRLEN))) {
 			ret = -EFAULT;
 			goto out;
 		}
 
-		if (bprm->p < len)  {
+		if (!valid_arg_len(bprm, len)) {
 			ret = -E2BIG;
 			goto out;
 		}
 
-		bprm->p -= len;
-		/* XXX: add architecture specific overflow check here. */
+		/* We're going to work our way backwords. */
 		pos = bprm->p;
+		str += len;
+		bprm->p -= len;
 
 		while (len > 0) {
-			int i, new, err;
 			int offset, bytes_to_copy;
-			struct page *page;
 
 			offset = pos % PAGE_SIZE;
-			i = pos/PAGE_SIZE;
-			page = bprm->page[i];
-			new = 0;
-			if (!page) {
-				page = alloc_page(GFP_HIGHUSER);
-				bprm->page[i] = page;
+			if (offset == 0)
+				offset = PAGE_SIZE;
+
+			bytes_to_copy = offset;
+			if (bytes_to_copy > len)
+				bytes_to_copy = len;
+
+			offset -= bytes_to_copy;
+			pos -= bytes_to_copy;
+			str -= bytes_to_copy;
+			len -= bytes_to_copy;
+
+			if (!kmapped_page || kpos != (pos & PAGE_MASK)) {
+				struct page *page;
+
+				page = get_arg_page(bprm, pos, 1);
 				if (!page) {
-					ret = -ENOMEM;
+					ret = -E2BIG;
 					goto out;
 				}
-				new = 1;
-			}
 
-			if (page != kmapped_page) {
-				if (kmapped_page)
+				if (kmapped_page) {
+					flush_kernel_dcache_page(kmapped_page);
 					kunmap(kmapped_page);
+					put_arg_page(kmapped_page);
+				}
 				kmapped_page = page;
 				kaddr = kmap(kmapped_page);
+				kpos = pos & PAGE_MASK;
+				flush_arg_page(bprm, kpos, kmapped_page);
 			}
-			if (new && offset)
-				memset(kaddr, 0, offset);
-			bytes_to_copy = PAGE_SIZE - offset;
-			if (bytes_to_copy > len) {
-				bytes_to_copy = len;
-				if (new)
-					memset(kaddr+offset+len, 0,
-						PAGE_SIZE-offset-len);
-			}
-			err = copy_from_user(kaddr+offset, str, bytes_to_copy);
-			if (err) {
+			if (copy_from_user(kaddr+offset, str, bytes_to_copy)) {
 				ret = -EFAULT;
 				goto out;
 			}
-
-			pos += bytes_to_copy;
-			str += bytes_to_copy;
-			len -= bytes_to_copy;
 		}
 	}
 	ret = 0;
 out:
-	if (kmapped_page)
+	if (kmapped_page) {
+		flush_kernel_dcache_page(kmapped_page);
 		kunmap(kmapped_page);
+		put_arg_page(kmapped_page);
+	}
 	return ret;
 }
 
@@ -298,181 +501,172 @@
 	set_fs(oldfs);
 	return r;
 }
-
 EXPORT_SYMBOL(copy_strings_kernel);
 
 #ifdef CONFIG_MMU
+
 /*
- * This routine is used to map in a page into an address space: needed by
- * execve() for the initial stack and environment pages.
+ * During bprm_mm_init(), we create a temporary stack at STACK_TOP_MAX.  Once
+ * the binfmt code determines where the new stack should reside, we shift it to
+ * its final location.  The process proceeds as follows:
  *
- * vma->vm_mm->mmap_sem is held for writing.
+ * 1) Use shift to calculate the new vma endpoints.
+ * 2) Extend vma to cover both the old and new ranges.  This ensures the
+ *    arguments passed to subsequent functions are consistent.
+ * 3) Move vma's page tables to the new range.
+ * 4) Free up any cleared pgd range.
+ * 5) Shrink the vma to cover only the new range.
  */
-void install_arg_page(struct vm_area_struct *vma,
-			struct page *page, unsigned long address)
+static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
 {
 	struct mm_struct *mm = vma->vm_mm;
-	pte_t * pte;
-	spinlock_t *ptl;
+	unsigned long old_start = vma->vm_start;
+	unsigned long old_end = vma->vm_end;
+	unsigned long length = old_end - old_start;
+	unsigned long new_start = old_start - shift;
+	unsigned long new_end = old_end - shift;
+	struct mmu_gather *tlb;
 
-	if (unlikely(anon_vma_prepare(vma)))
-		goto out;
+	BUG_ON(new_start > new_end);
 
-	flush_dcache_page(page);
-	pte = get_locked_pte(mm, address, &ptl);
-	if (!pte)
-		goto out;
-	if (!pte_none(*pte)) {
-		pte_unmap_unlock(pte, ptl);
-		goto out;
+	/*
+	 * ensure there are no vmas between where we want to go
+	 * and where we are
+	 */
+	if (vma != find_vma(mm, new_start))
+		return -EFAULT;
+
+	/*
+	 * cover the whole range: [new_start, old_end)
+	 */
+	vma_adjust(vma, new_start, old_end, vma->vm_pgoff, NULL);
+
+	/*
+	 * move the page tables downwards, on failure we rely on
+	 * process cleanup to remove whatever mess we made.
+	 */
+	if (length != move_page_tables(vma, old_start,
+				       vma, new_start, length))
+		return -ENOMEM;
+
+	lru_add_drain();
+	tlb = tlb_gather_mmu(mm, 0);
+	if (new_end > old_start) {
+		/*
+		 * when the old and new regions overlap clear from new_end.
+		 */
+		free_pgd_range(&tlb, new_end, old_end, new_end,
+			vma->vm_next ? vma->vm_next->vm_start : 0);
+	} else {
+		/*
+		 * otherwise, clean from old_start; this is done to not touch
+		 * the address space in [new_end, old_start) some architectures
+		 * have constraints on va-space that make this illegal (IA64) -
+		 * for the others its just a little faster.
+		 */
+		free_pgd_range(&tlb, old_start, old_end, new_end,
+			vma->vm_next ? vma->vm_next->vm_start : 0);
 	}
-	inc_mm_counter(mm, anon_rss);
-	lru_cache_add_active(page);
-	set_pte_at(mm, address, pte, pte_mkdirty(pte_mkwrite(mk_pte(
-					page, vma->vm_page_prot))));
-	page_add_new_anon_rmap(page, vma, address);
-	pte_unmap_unlock(pte, ptl);
+	tlb_finish_mmu(tlb, new_end, old_end);
 
-	/* no need for flush_tlb */
-	return;
-out:
-	__free_page(page);
-	force_sig(SIGKILL, current);
+	/*
+	 * shrink the vma to just the new range.
+	 */
+	vma_adjust(vma, new_start, new_end, vma->vm_pgoff, NULL);
+
+	return 0;
 }
 
 #define EXTRA_STACK_VM_PAGES	20	/* random */
 
+/*
+ * Finalizes the stack vm_area_struct. The flags and permissions are updated,
+ * the stack is optionally relocated, and some extra space is added.
+ */
 int setup_arg_pages(struct linux_binprm *bprm,
 		    unsigned long stack_top,
 		    int executable_stack)
 {
-	unsigned long stack_base;
-	struct vm_area_struct *mpnt;
+	unsigned long ret;
+	unsigned long stack_shift;
 	struct mm_struct *mm = current->mm;
-	int i, ret;
-	long arg_size;
+	struct vm_area_struct *vma = bprm->vma;
+	struct vm_area_struct *prev = NULL;
+	unsigned long vm_flags;
+	unsigned long stack_base;
 
 #ifdef CONFIG_STACK_GROWSUP
-	/* Move the argument and environment strings to the bottom of the
-	 * stack space.
-	 */
-	int offset, j;
-	char *to, *from;
-
-	/* Start by shifting all the pages down */
-	i = 0;
-	for (j = 0; j < MAX_ARG_PAGES; j++) {
-		struct page *page = bprm->page[j];
-		if (!page)
-			continue;
-		bprm->page[i++] = page;
-	}
-
-	/* Now move them within their pages */
-	offset = bprm->p % PAGE_SIZE;
-	to = kmap(bprm->page[0]);
-	for (j = 1; j < i; j++) {
-		memmove(to, to + offset, PAGE_SIZE - offset);
-		from = kmap(bprm->page[j]);
-		memcpy(to + PAGE_SIZE - offset, from, offset);
-		kunmap(bprm->page[j - 1]);
-		to = from;
-	}
-	memmove(to, to + offset, PAGE_SIZE - offset);
-	kunmap(bprm->page[j - 1]);
-
 	/* Limit stack size to 1GB */
 	stack_base = current->signal->rlim[RLIMIT_STACK].rlim_max;
 	if (stack_base > (1 << 30))
 		stack_base = 1 << 30;
-	stack_base = PAGE_ALIGN(stack_top - stack_base);
 
-	/* Adjust bprm->p to point to the end of the strings. */
-	bprm->p = stack_base + PAGE_SIZE * i - offset;
-
-	mm->arg_start = stack_base;
-	arg_size = i << PAGE_SHIFT;
-
-	/* zero pages that were copied above */
-	while (i < MAX_ARG_PAGES)
-		bprm->page[i++] = NULL;
-#else
-	stack_base = arch_align_stack(stack_top - MAX_ARG_PAGES*PAGE_SIZE);
-	stack_base = PAGE_ALIGN(stack_base);
-	bprm->p += stack_base;
-	mm->arg_start = bprm->p;
-	arg_size = stack_top - (PAGE_MASK & (unsigned long) mm->arg_start);
-#endif
-
-	arg_size += EXTRA_STACK_VM_PAGES * PAGE_SIZE;
-
-	if (bprm->loader)
-		bprm->loader += stack_base;
-	bprm->exec += stack_base;
-
-	mpnt = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
-	if (!mpnt)
+	/* Make sure we didn't let the argument array grow too large. */
+	if (vma->vm_end - vma->vm_start > stack_base)
 		return -ENOMEM;
 
-	down_write(&mm->mmap_sem);
-	{
-		mpnt->vm_mm = mm;
-#ifdef CONFIG_STACK_GROWSUP
-		mpnt->vm_start = stack_base;
-		mpnt->vm_end = stack_base + arg_size;
+	stack_base = PAGE_ALIGN(stack_top - stack_base);
+
+	stack_shift = vma->vm_start - stack_base;
+	mm->arg_start = bprm->p - stack_shift;
+	bprm->p = vma->vm_end - stack_shift;
 #else
-		mpnt->vm_end = stack_top;
-		mpnt->vm_start = mpnt->vm_end - arg_size;
+	stack_top = arch_align_stack(stack_top);
+	stack_top = PAGE_ALIGN(stack_top);
+	stack_shift = vma->vm_end - stack_top;
+
+	bprm->p -= stack_shift;
+	mm->arg_start = bprm->p;
 #endif
-		/* Adjust stack execute permissions; explicitly enable
-		 * for EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X
-		 * and leave alone (arch default) otherwise. */
-		if (unlikely(executable_stack == EXSTACK_ENABLE_X))
-			mpnt->vm_flags = VM_STACK_FLAGS |  VM_EXEC;
-		else if (executable_stack == EXSTACK_DISABLE_X)
-			mpnt->vm_flags = VM_STACK_FLAGS & ~VM_EXEC;
-		else
-			mpnt->vm_flags = VM_STACK_FLAGS;
-		mpnt->vm_flags |= mm->def_flags;
-		mpnt->vm_page_prot = protection_map[mpnt->vm_flags & 0x7];
-		if ((ret = insert_vm_struct(mm, mpnt))) {
+
+	if (bprm->loader)
+		bprm->loader -= stack_shift;
+	bprm->exec -= stack_shift;
+
+	down_write(&mm->mmap_sem);
+	vm_flags = vma->vm_flags;
+
+	/*
+	 * Adjust stack execute permissions; explicitly enable for
+	 * EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X and leave alone
+	 * (arch default) otherwise.
+	 */
+	if (unlikely(executable_stack == EXSTACK_ENABLE_X))
+		vm_flags |= VM_EXEC;
+	else if (executable_stack == EXSTACK_DISABLE_X)
+		vm_flags &= ~VM_EXEC;
+	vm_flags |= mm->def_flags;
+
+	ret = mprotect_fixup(vma, &prev, vma->vm_start, vma->vm_end,
+			vm_flags);
+	if (ret)
+		goto out_unlock;
+	BUG_ON(prev != vma);
+
+	/* Move stack pages down in memory. */
+	if (stack_shift) {
+		ret = shift_arg_pages(vma, stack_shift);
+		if (ret) {
 			up_write(&mm->mmap_sem);
-			kmem_cache_free(vm_area_cachep, mpnt);
 			return ret;
 		}
-		mm->stack_vm = mm->total_vm = vma_pages(mpnt);
 	}
 
-	for (i = 0 ; i < MAX_ARG_PAGES ; i++) {
-		struct page *page = bprm->page[i];
-		if (page) {
-			bprm->page[i] = NULL;
-			install_arg_page(mpnt, page, stack_base);
-		}
-		stack_base += PAGE_SIZE;
-	}
+#ifdef CONFIG_STACK_GROWSUP
+	stack_base = vma->vm_end + EXTRA_STACK_VM_PAGES * PAGE_SIZE;
+#else
+	stack_base = vma->vm_start - EXTRA_STACK_VM_PAGES * PAGE_SIZE;
+#endif
+	ret = expand_stack(vma, stack_base);
+	if (ret)
+		ret = -EFAULT;
+
+out_unlock:
 	up_write(&mm->mmap_sem);
-	
 	return 0;
 }
-
 EXPORT_SYMBOL(setup_arg_pages);
 
-#define free_arg_pages(bprm) do { } while (0)
-
-#else
-
-static inline void free_arg_pages(struct linux_binprm *bprm)
-{
-	int i;
-
-	for (i = 0; i < MAX_ARG_PAGES; i++) {
-		if (bprm->page[i])
-			__free_page(bprm->page[i]);
-		bprm->page[i] = NULL;
-	}
-}
-
 #endif /* CONFIG_MMU */
 
 struct file *open_exec(const char *name)
@@ -864,9 +1058,9 @@
 	current->sas_ss_sp = current->sas_ss_size = 0;
 
 	if (current->euid == current->uid && current->egid == current->gid)
-		current->mm->dumpable = 1;
+		set_dumpable(current->mm, 1);
 	else
-		current->mm->dumpable = suid_dumpable;
+		set_dumpable(current->mm, suid_dumpable);
 
 	name = bprm->filename;
 
@@ -894,7 +1088,7 @@
 	    file_permission(bprm->file, MAY_READ) ||
 	    (bprm->interp_flags & BINPRM_FLAGS_ENFORCE_NONDUMP)) {
 		suid_keys(current);
-		current->mm->dumpable = suid_dumpable;
+		set_dumpable(current->mm, suid_dumpable);
 	}
 
 	/* An exec changes our domain. We are no longer part of the thread
@@ -1000,43 +1194,42 @@
  * points to; chop off the first by relocating brpm->p to right after
  * the first '\0' encountered.
  */
-void remove_arg_zero(struct linux_binprm *bprm)
+int remove_arg_zero(struct linux_binprm *bprm)
 {
-	if (bprm->argc) {
-		char ch;
+	int ret = 0;
+	unsigned long offset;
+	char *kaddr;
+	struct page *page;
 
-		do {
-			unsigned long offset;
-			unsigned long index;
-			char *kaddr;
-			struct page *page;
+	if (!bprm->argc)
+		return 0;
 
-			offset = bprm->p & ~PAGE_MASK;
-			index = bprm->p >> PAGE_SHIFT;
+	do {
+		offset = bprm->p & ~PAGE_MASK;
+		page = get_arg_page(bprm, bprm->p, 0);
+		if (!page) {
+			ret = -EFAULT;
+			goto out;
+		}
+		kaddr = kmap_atomic(page, KM_USER0);
 
-			page = bprm->page[index];
-			kaddr = kmap_atomic(page, KM_USER0);
+		for (; offset < PAGE_SIZE && kaddr[offset];
+				offset++, bprm->p++)
+			;
 
-			/* run through page until we reach end or find NUL */
-			do {
-				ch = *(kaddr + offset);
+		kunmap_atomic(kaddr, KM_USER0);
+		put_arg_page(page);
 
-				/* discard that character... */
-				bprm->p++;
-				offset++;
-			} while (offset < PAGE_SIZE && ch != '\0');
+		if (offset == PAGE_SIZE)
+			free_arg_page(bprm, (bprm->p >> PAGE_SHIFT) - 1);
+	} while (offset == PAGE_SIZE);
 
-			kunmap_atomic(kaddr, KM_USER0);
+	bprm->p++;
+	bprm->argc--;
+	ret = 0;
 
-			/* free the old page */
-			if (offset == PAGE_SIZE) {
-				__free_page(page);
-				bprm->page[index] = NULL;
-			}
-		} while (ch != '\0');
-
-		bprm->argc--;
-	}
+out:
+	return ret;
 }
 EXPORT_SYMBOL(remove_arg_zero);
 
@@ -1062,7 +1255,7 @@
 		fput(bprm->file);
 		bprm->file = NULL;
 
-	        loader = PAGE_SIZE*MAX_ARG_PAGES-sizeof(void *);
+		loader = bprm->vma->vm_end - sizeof(void *);
 
 		file = open_exec("/sbin/loader");
 		retval = PTR_ERR(file);
@@ -1154,8 +1347,8 @@
 {
 	struct linux_binprm *bprm;
 	struct file *file;
+	unsigned long env_p;
 	int retval;
-	int i;
 
 	retval = -ENOMEM;
 	bprm = kzalloc(sizeof(*bprm), GFP_KERNEL);
@@ -1169,25 +1362,19 @@
 
 	sched_exec();
 
-	bprm->p = PAGE_SIZE*MAX_ARG_PAGES-sizeof(void *);
-
 	bprm->file = file;
 	bprm->filename = filename;
 	bprm->interp = filename;
-	bprm->mm = mm_alloc();
-	retval = -ENOMEM;
-	if (!bprm->mm)
+
+	retval = bprm_mm_init(bprm);
+	if (retval)
 		goto out_file;
 
-	retval = init_new_context(current, bprm->mm);
-	if (retval < 0)
-		goto out_mm;
-
-	bprm->argc = count(argv, bprm->p / sizeof(void *));
+	bprm->argc = count(argv, MAX_ARG_STRINGS);
 	if ((retval = bprm->argc) < 0)
 		goto out_mm;
 
-	bprm->envc = count(envp, bprm->p / sizeof(void *));
+	bprm->envc = count(envp, MAX_ARG_STRINGS);
 	if ((retval = bprm->envc) < 0)
 		goto out_mm;
 
@@ -1208,15 +1395,16 @@
 	if (retval < 0)
 		goto out;
 
+	env_p = bprm->p;
 	retval = copy_strings(bprm->argc, argv, bprm);
 	if (retval < 0)
 		goto out;
+	bprm->argv_len = env_p - bprm->p;
 
 	retval = search_binary_handler(bprm,regs);
 	if (retval >= 0) {
-		free_arg_pages(bprm);
-
 		/* execve success */
+		free_arg_pages(bprm);
 		security_bprm_free(bprm);
 		acct_update_integrals(current);
 		kfree(bprm);
@@ -1224,26 +1412,19 @@
 	}
 
 out:
-	/* Something went wrong, return the inode and free the argument pages*/
-	for (i = 0 ; i < MAX_ARG_PAGES ; i++) {
-		struct page * page = bprm->page[i];
-		if (page)
-			__free_page(page);
-	}
-
+	free_arg_pages(bprm);
 	if (bprm->security)
 		security_bprm_free(bprm);
 
 out_mm:
 	if (bprm->mm)
-		mmdrop(bprm->mm);
+		mmput (bprm->mm);
 
 out_file:
 	if (bprm->file) {
 		allow_write_access(bprm->file);
 		fput(bprm->file);
 	}
-
 out_kfree:
 	kfree(bprm);
 
@@ -1484,6 +1665,56 @@
 	return core_waiters;
 }
 
+/*
+ * set_dumpable converts traditional three-value dumpable to two flags and
+ * stores them into mm->flags.  It modifies lower two bits of mm->flags, but
+ * these bits are not changed atomically.  So get_dumpable can observe the
+ * intermediate state.  To avoid doing unexpected behavior, get get_dumpable
+ * return either old dumpable or new one by paying attention to the order of
+ * modifying the bits.
+ *
+ * dumpable |   mm->flags (binary)
+ * old  new | initial interim  final
+ * ---------+-----------------------
+ *  0    1  |   00      01      01
+ *  0    2  |   00      10(*)   11
+ *  1    0  |   01      00      00
+ *  1    2  |   01      11      11
+ *  2    0  |   11      10(*)   00
+ *  2    1  |   11      11      01
+ *
+ * (*) get_dumpable regards interim value of 10 as 11.
+ */
+void set_dumpable(struct mm_struct *mm, int value)
+{
+	switch (value) {
+	case 0:
+		clear_bit(MMF_DUMPABLE, &mm->flags);
+		smp_wmb();
+		clear_bit(MMF_DUMP_SECURELY, &mm->flags);
+		break;
+	case 1:
+		set_bit(MMF_DUMPABLE, &mm->flags);
+		smp_wmb();
+		clear_bit(MMF_DUMP_SECURELY, &mm->flags);
+		break;
+	case 2:
+		set_bit(MMF_DUMP_SECURELY, &mm->flags);
+		smp_wmb();
+		set_bit(MMF_DUMPABLE, &mm->flags);
+		break;
+	}
+}
+EXPORT_SYMBOL_GPL(set_dumpable);
+
+int get_dumpable(struct mm_struct *mm)
+{
+	int ret;
+
+	ret = mm->flags & 0x3;
+	return (ret >= 2) ? 2 : ret;
+}
+
 int do_coredump(long signr, int exit_code, struct pt_regs * regs)
 {
 	char corename[CORENAME_MAX_SIZE + 1];
@@ -1502,7 +1733,7 @@
 	if (!binfmt || !binfmt->core_dump)
 		goto fail;
 	down_write(&mm->mmap_sem);
-	if (!mm->dumpable) {
+	if (!get_dumpable(mm)) {
 		up_write(&mm->mmap_sem);
 		goto fail;
 	}
@@ -1512,11 +1743,11 @@
 	 *	process nor do we know its entire history. We only know it
 	 *	was tainted so we dump it as root in mode 2.
 	 */
-	if (mm->dumpable == 2) {	/* Setuid core dump mode */
+	if (get_dumpable(mm) == 2) {	/* Setuid core dump mode */
 		flag = O_EXCL;		/* Stop rewrite attacks */
 		current->fsuid = 0;	/* Dump root private */
 	}
-	mm->dumpable = 0;
+	set_dumpable(mm, 0);
 
 	retval = coredump_wait(exit_code);
 	if (retval < 0)
diff --git a/fs/exportfs/expfs.c b/fs/exportfs/expfs.c
index e98f6cd..8adb32a 100644
--- a/fs/exportfs/expfs.c
+++ b/fs/exportfs/expfs.c
@@ -1,15 +1,45 @@
 
+#include <linux/exportfs.h>
 #include <linux/fs.h>
 #include <linux/file.h>
 #include <linux/module.h>
+#include <linux/mount.h>
 #include <linux/namei.h>
 
-struct export_operations export_op_default;
-
-#define	CALL(ops,fun) ((ops->fun)?(ops->fun):export_op_default.fun)
-
 #define dprintk(fmt, args...) do{}while(0)
 
+
+static int get_name(struct dentry *dentry, char *name,
+		struct dentry *child);
+
+
+static struct dentry *exportfs_get_dentry(struct super_block *sb, void *obj)
+{
+	struct dentry *result = ERR_PTR(-ESTALE);
+
+	if (sb->s_export_op->get_dentry) {
+		result = sb->s_export_op->get_dentry(sb, obj);
+		if (!result)
+			result = ERR_PTR(-ESTALE);
+	}
+
+	return result;
+}
+
+static int exportfs_get_name(struct dentry *dir, char *name,
+		struct dentry *child)
+{
+	struct export_operations *nop = dir->d_sb->s_export_op;
+
+	if (nop->get_name)
+		return nop->get_name(dir, name, child);
+	else
+		return get_name(dir, name, child);
+}
+
+/*
+ * Check if the dentry or any of it's aliases is acceptable.
+ */
 static struct dentry *
 find_acceptable_alias(struct dentry *result,
 		int (*acceptable)(void *context, struct dentry *dentry),
@@ -17,6 +47,9 @@
 {
 	struct dentry *dentry, *toput = NULL;
 
+	if (acceptable(context, result))
+		return result;
+
 	spin_lock(&dcache_lock);
 	list_for_each_entry(dentry, &result->d_inode->i_dentry, d_alias) {
 		dget_locked(dentry);
@@ -37,6 +70,150 @@
 	return NULL;
 }
 
+/*
+ * Find root of a disconnected subtree and return a reference to it.
+ */
+static struct dentry *
+find_disconnected_root(struct dentry *dentry)
+{
+	dget(dentry);
+	spin_lock(&dentry->d_lock);
+	while (!IS_ROOT(dentry) &&
+	       (dentry->d_parent->d_flags & DCACHE_DISCONNECTED)) {
+		struct dentry *parent = dentry->d_parent;
+		dget(parent);
+		spin_unlock(&dentry->d_lock);
+		dput(dentry);
+		dentry = parent;
+		spin_lock(&dentry->d_lock);
+	}
+	spin_unlock(&dentry->d_lock);
+	return dentry;
+}
+
+
+/*
+ * Make sure target_dir is fully connected to the dentry tree.
+ *
+ * It may already be, as the flag isn't always updated when connection happens.
+ */
+static int
+reconnect_path(struct super_block *sb, struct dentry *target_dir)
+{
+	char nbuf[NAME_MAX+1];
+	int noprogress = 0;
+	int err = -ESTALE;
+
+	/*
+	 * It is possible that a confused file system might not let us complete
+	 * the path to the root.  For example, if get_parent returns a directory
+	 * in which we cannot find a name for the child.  While this implies a
+	 * very sick filesystem we don't want it to cause knfsd to spin.  Hence
+	 * the noprogress counter.  If we go through the loop 10 times (2 is
+	 * probably enough) without getting anywhere, we just give up
+	 */
+	while (target_dir->d_flags & DCACHE_DISCONNECTED && noprogress++ < 10) {
+		struct dentry *pd = find_disconnected_root(target_dir);
+
+		if (!IS_ROOT(pd)) {
+			/* must have found a connected parent - great */
+			spin_lock(&pd->d_lock);
+			pd->d_flags &= ~DCACHE_DISCONNECTED;
+			spin_unlock(&pd->d_lock);
+			noprogress = 0;
+		} else if (pd == sb->s_root) {
+			printk(KERN_ERR "export: Eeek filesystem root is not connected, impossible\n");
+			spin_lock(&pd->d_lock);
+			pd->d_flags &= ~DCACHE_DISCONNECTED;
+			spin_unlock(&pd->d_lock);
+			noprogress = 0;
+		} else {
+			/*
+			 * We have hit the top of a disconnected path, try to
+			 * find parent and connect.
+			 *
+			 * Racing with some other process renaming a directory
+			 * isn't much of a problem here.  If someone renames
+			 * the directory, it will end up properly connected,
+			 * which is what we want
+			 *
+			 * Getting the parent can't be supported generically,
+			 * the locking is too icky.
+			 *
+			 * Instead we just return EACCES.  If server reboots
+			 * or inodes get flushed, you lose
+			 */
+			struct dentry *ppd = ERR_PTR(-EACCES);
+			struct dentry *npd;
+
+			mutex_lock(&pd->d_inode->i_mutex);
+			if (sb->s_export_op->get_parent)
+				ppd = sb->s_export_op->get_parent(pd);
+			mutex_unlock(&pd->d_inode->i_mutex);
+
+			if (IS_ERR(ppd)) {
+				err = PTR_ERR(ppd);
+				dprintk("%s: get_parent of %ld failed, err %d\n",
+					__FUNCTION__, pd->d_inode->i_ino, err);
+				dput(pd);
+				break;
+			}
+
+			dprintk("%s: find name of %lu in %lu\n", __FUNCTION__,
+				pd->d_inode->i_ino, ppd->d_inode->i_ino);
+			err = exportfs_get_name(ppd, nbuf, pd);
+			if (err) {
+				dput(ppd);
+				dput(pd);
+				if (err == -ENOENT)
+					/* some race between get_parent and
+					 * get_name?  just try again
+					 */
+					continue;
+				break;
+			}
+			dprintk("%s: found name: %s\n", __FUNCTION__, nbuf);
+			mutex_lock(&ppd->d_inode->i_mutex);
+			npd = lookup_one_len(nbuf, ppd, strlen(nbuf));
+			mutex_unlock(&ppd->d_inode->i_mutex);
+			if (IS_ERR(npd)) {
+				err = PTR_ERR(npd);
+				dprintk("%s: lookup failed: %d\n",
+					__FUNCTION__, err);
+				dput(ppd);
+				dput(pd);
+				break;
+			}
+			/* we didn't really want npd, we really wanted
+			 * a side-effect of the lookup.
+			 * hopefully, npd == pd, though it isn't really
+			 * a problem if it isn't
+			 */
+			if (npd == pd)
+				noprogress = 0;
+			else
+				printk("%s: npd != pd\n", __FUNCTION__);
+			dput(npd);
+			dput(ppd);
+			if (IS_ROOT(pd)) {
+				/* something went wrong, we have to give up */
+				dput(pd);
+				break;
+			}
+		}
+		dput(pd);
+	}
+
+	if (target_dir->d_flags & DCACHE_DISCONNECTED) {
+		/* something went wrong - oh-well */
+		if (!err)
+			err = -ESTALE;
+		return err;
+	}
+
+	return 0;
+}
+
 /**
  * find_exported_dentry - helper routine to implement export_operations->decode_fh
  * @sb:		The &super_block identifying the filesystem
@@ -74,184 +251,58 @@
 		     int (*acceptable)(void *context, struct dentry *de),
 		     void *context)
 {
-	struct dentry *result = NULL;
-	struct dentry *target_dir;
-	int err;
-	struct export_operations *nops = sb->s_export_op;
-	struct dentry *alias;
-	int noprogress;
-	char nbuf[NAME_MAX+1];
+	struct dentry *result, *alias;
+	int err = -ESTALE;
 
 	/*
 	 * Attempt to find the inode.
 	 */
-	result = CALL(sb->s_export_op,get_dentry)(sb,obj);
-	err = -ESTALE;
-	if (result == NULL)
-		goto err_out;
-	if (IS_ERR(result)) {
-		err = PTR_ERR(result);
-		goto err_out;
-	}
-	if (S_ISDIR(result->d_inode->i_mode) &&
-	    (result->d_flags & DCACHE_DISCONNECTED)) {
-		/* it is an unconnected directory, we must connect it */
-		;
-	} else {
-		if (acceptable(context, result))
-			return result;
-		if (S_ISDIR(result->d_inode->i_mode)) {
+	result = exportfs_get_dentry(sb, obj);
+	if (IS_ERR(result))
+		return result;
+
+	if (S_ISDIR(result->d_inode->i_mode)) {
+		if (!(result->d_flags & DCACHE_DISCONNECTED)) {
+			if (acceptable(context, result))
+				return result;
 			err = -EACCES;
 			goto err_result;
 		}
 
+		err = reconnect_path(sb, result);
+		if (err)
+			goto err_result;
+	} else {
+		struct dentry *target_dir, *nresult;
+		char nbuf[NAME_MAX+1];
+
 		alias = find_acceptable_alias(result, acceptable, context);
 		if (alias)
 			return alias;
-	}			
 
-	/* It's a directory, or we are required to confirm the file's
-	 * location in the tree based on the parent information
- 	 */
-	dprintk("find_exported_dentry: need to look harder for %s/%d\n",sb->s_id,*(int*)obj);
-	if (S_ISDIR(result->d_inode->i_mode))
-		target_dir = dget(result);
-	else {
 		if (parent == NULL)
 			goto err_result;
 
-		target_dir = CALL(sb->s_export_op,get_dentry)(sb,parent);
-		if (IS_ERR(target_dir))
+		target_dir = exportfs_get_dentry(sb,parent);
+		if (IS_ERR(target_dir)) {
 			err = PTR_ERR(target_dir);
-		if (target_dir == NULL || IS_ERR(target_dir))
 			goto err_result;
-	}
-	/*
-	 * Now we need to make sure that target_dir is properly connected.
-	 * It may already be, as the flag isn't always updated when connection
-	 * happens.
-	 * So, we walk up parent links until we find a connected directory,
-	 * or we run out of directories.  Then we find the parent, find
-	 * the name of the child in that parent, and do a lookup.
-	 * This should connect the child into the parent
-	 * We then repeat.
-	 */
-
-	/* it is possible that a confused file system might not let us complete 
-	 * the path to the root.  For example, if get_parent returns a directory
-	 * in which we cannot find a name for the child.  While this implies a
-	 * very sick filesystem we don't want it to cause knfsd to spin.  Hence
-	 * the noprogress counter.  If we go through the loop 10 times (2 is
-	 * probably enough) without getting anywhere, we just give up
-	 */
-	noprogress= 0;
-	while (target_dir->d_flags & DCACHE_DISCONNECTED && noprogress++ < 10) {
-		struct dentry *pd = target_dir;
-
-		dget(pd);
-		spin_lock(&pd->d_lock);
-		while (!IS_ROOT(pd) &&
-				(pd->d_parent->d_flags&DCACHE_DISCONNECTED)) {
-			struct dentry *parent = pd->d_parent;
-
-			dget(parent);
-			spin_unlock(&pd->d_lock);
-			dput(pd);
-			pd = parent;
-			spin_lock(&pd->d_lock);
 		}
-		spin_unlock(&pd->d_lock);
 
-		if (!IS_ROOT(pd)) {
-			/* must have found a connected parent - great */
-			spin_lock(&pd->d_lock);
-			pd->d_flags &= ~DCACHE_DISCONNECTED;
-			spin_unlock(&pd->d_lock);
-			noprogress = 0;
-		} else if (pd == sb->s_root) {
-			printk(KERN_ERR "export: Eeek filesystem root is not connected, impossible\n");
-			spin_lock(&pd->d_lock);
-			pd->d_flags &= ~DCACHE_DISCONNECTED;
-			spin_unlock(&pd->d_lock);
-			noprogress = 0;
-		} else {
-			/* we have hit the top of a disconnected path.  Try
-			 * to find parent and connect
-			 * note: racing with some other process renaming a
-			 * directory isn't much of a problem here.  If someone
-			 * renames the directory, it will end up properly
-			 * connected, which is what we want
-			 */
-			struct dentry *ppd;
-			struct dentry *npd;
-
-			mutex_lock(&pd->d_inode->i_mutex);
-			ppd = CALL(nops,get_parent)(pd);
-			mutex_unlock(&pd->d_inode->i_mutex);
-
-			if (IS_ERR(ppd)) {
-				err = PTR_ERR(ppd);
-				dprintk("find_exported_dentry: get_parent of %ld failed, err %d\n",
-					pd->d_inode->i_ino, err);
-				dput(pd);
-				break;
-			}
-			dprintk("find_exported_dentry: find name of %lu in %lu\n", pd->d_inode->i_ino, ppd->d_inode->i_ino);
-			err = CALL(nops,get_name)(ppd, nbuf, pd);
-			if (err) {
-				dput(ppd);
-				dput(pd);
-				if (err == -ENOENT)
-					/* some race between get_parent and
-					 * get_name?  just try again
-					 */
-					continue;
-				break;
-			}
-			dprintk("find_exported_dentry: found name: %s\n", nbuf);
-			mutex_lock(&ppd->d_inode->i_mutex);
-			npd = lookup_one_len(nbuf, ppd, strlen(nbuf));
-			mutex_unlock(&ppd->d_inode->i_mutex);
-			if (IS_ERR(npd)) {
-				err = PTR_ERR(npd);
-				dprintk("find_exported_dentry: lookup failed: %d\n", err);
-				dput(ppd);
-				dput(pd);
-				break;
-			}
-			/* we didn't really want npd, we really wanted
-			 * a side-effect of the lookup.
-			 * hopefully, npd == pd, though it isn't really
-			 * a problem if it isn't
-			 */
-			if (npd == pd)
-				noprogress = 0;
-			else
-				printk("find_exported_dentry: npd != pd\n");
-			dput(npd);
-			dput(ppd);
-			if (IS_ROOT(pd)) {
-				/* something went wrong, we have to give up */
-				dput(pd);
-				break;
-			}
+		err = reconnect_path(sb, target_dir);
+		if (err) {
+			dput(target_dir);
+			goto err_result;
 		}
-		dput(pd);
-	}
 
-	if (target_dir->d_flags & DCACHE_DISCONNECTED) {
-		/* something went wrong - oh-well */
-		if (!err)
-			err = -ESTALE;
-		goto err_target;
-	}
-	/* if we weren't after a directory, have one more step to go */
-	if (result != target_dir) {
-		struct dentry *nresult;
-		err = CALL(nops,get_name)(target_dir, nbuf, result);
+		/*
+		 * As we weren't after a directory, have one more step to go.
+		 */
+		err = exportfs_get_name(target_dir, nbuf, result);
 		if (!err) {
 			mutex_lock(&target_dir->d_inode->i_mutex);
-			nresult = lookup_one_len(nbuf, target_dir, strlen(nbuf));
+			nresult = lookup_one_len(nbuf, target_dir,
+						 strlen(nbuf));
 			mutex_unlock(&target_dir->d_inode->i_mutex);
 			if (!IS_ERR(nresult)) {
 				if (nresult->d_inode) {
@@ -261,11 +312,8 @@
 					dput(nresult);
 			}
 		}
+		dput(target_dir);
 	}
-	dput(target_dir);
-	/* now result is properly connected, it is our best bet */
-	if (acceptable(context, result))
-		return result;
 
 	alias = find_acceptable_alias(result, acceptable, context);
 	if (alias)
@@ -275,32 +323,16 @@
 	dput(result);
 	/* It might be justifiable to return ESTALE here,
 	 * but the filehandle at-least looks reasonable good
-	 * and it just be a permission problem, so returning
+	 * and it may just be a permission problem, so returning
 	 * -EACCESS is safer
 	 */
 	return ERR_PTR(-EACCES);
 
- err_target:
-	dput(target_dir);
  err_result:
 	dput(result);
- err_out:
 	return ERR_PTR(err);
 }
 
-
-
-static struct dentry *get_parent(struct dentry *child)
-{
-	/* get_parent cannot be supported generically, the locking
-	 * is too icky.
-	 * instead, we just return EACCES.  If server reboots or inodes
-	 * get flushed, you lose
-	 */
-	return ERR_PTR(-EACCES);
-}
-
-
 struct getdents_callback {
 	char *name;		/* name that was found. It already points to a
 				   buffer NAME_MAX+1 is size */
@@ -390,61 +422,6 @@
 	return error;
 }
 
-
-static struct dentry *export_iget(struct super_block *sb, unsigned long ino, __u32 generation)
-{
-
-	/* iget isn't really right if the inode is currently unallocated!!
-	 * This should really all be done inside each filesystem
-	 *
-	 * ext2fs' read_inode has been strengthed to return a bad_inode if
-	 * the inode had been deleted.
-	 *
-	 * Currently we don't know the generation for parent directory, so
-	 * a generation of 0 means "accept any"
-	 */
-	struct inode *inode;
-	struct dentry *result;
-	if (ino == 0)
-		return ERR_PTR(-ESTALE);
-	inode = iget(sb, ino);
-	if (inode == NULL)
-		return ERR_PTR(-ENOMEM);
-	if (is_bad_inode(inode)
-	    || (generation && inode->i_generation != generation)
-		) {
-		/* we didn't find the right inode.. */
-		dprintk("fh_verify: Inode %lu, Bad count: %d %d or version  %u %u\n",
-			inode->i_ino,
-			inode->i_nlink, atomic_read(&inode->i_count),
-			inode->i_generation,
-			generation);
-
-		iput(inode);
-		return ERR_PTR(-ESTALE);
-	}
-	/* now to find a dentry.
-	 * If possible, get a well-connected one
-	 */
-	result = d_alloc_anon(inode);
-	if (!result) {
-		iput(inode);
-		return ERR_PTR(-ENOMEM);
-	}
-	return result;
-}
-
-
-static struct dentry *get_object(struct super_block *sb, void *vobjp)
-{
-	__u32 *objp = vobjp;
-	unsigned long ino = objp[0];
-	__u32 generation = objp[1];
-
-	return export_iget(sb, ino, generation);
-}
-
-
 /**
  * export_encode_fh - default export_operations->encode_fh function
  * @dentry:  the dentry to encode
@@ -517,16 +494,40 @@
 				   acceptable, context);
 }
 
-struct export_operations export_op_default = {
-	.decode_fh	= export_decode_fh,
-	.encode_fh	= export_encode_fh,
+int exportfs_encode_fh(struct dentry *dentry, __u32 *fh, int *max_len,
+		int connectable)
+{
+ 	struct export_operations *nop = dentry->d_sb->s_export_op;
+	int error;
 
-	.get_name	= get_name,
-	.get_parent	= get_parent,
-	.get_dentry	= get_object,
-};
+	if (nop->encode_fh)
+		error = nop->encode_fh(dentry, fh, max_len, connectable);
+	else
+		error = export_encode_fh(dentry, fh, max_len, connectable);
 
-EXPORT_SYMBOL(export_op_default);
+	return error;
+}
+EXPORT_SYMBOL_GPL(exportfs_encode_fh);
+
+struct dentry *exportfs_decode_fh(struct vfsmount *mnt, __u32 *fh, int fh_len,
+		int fileid_type, int (*acceptable)(void *, struct dentry *),
+		void *context)
+{
+	struct export_operations *nop = mnt->mnt_sb->s_export_op;
+	struct dentry *result;
+
+	if (nop->decode_fh) {
+		result = nop->decode_fh(mnt->mnt_sb, fh, fh_len, fileid_type,
+			acceptable, context);
+	} else {
+		result = export_decode_fh(mnt->mnt_sb, fh, fh_len, fileid_type,
+			acceptable, context);
+	}
+
+	return result;
+}
+EXPORT_SYMBOL_GPL(exportfs_decode_fh);
+
 EXPORT_SYMBOL(find_exported_dentry);
 
 MODULE_LICENSE("GPL");
diff --git a/fs/ext2/acl.c b/fs/ext2/acl.c
index 7c420b8..e58669e 100644
--- a/fs/ext2/acl.c
+++ b/fs/ext2/acl.c
@@ -464,7 +464,7 @@
 
 	if (!test_opt(inode->i_sb, POSIX_ACL))
 		return -EOPNOTSUPP;
-	if ((current->fsuid != inode->i_uid) && !capable(CAP_FOWNER))
+	if (!is_owner_or_cap(inode))
 		return -EPERM;
 
 	if (value) {
diff --git a/fs/ext2/ioctl.c b/fs/ext2/ioctl.c
index e85c482..3bcd254 100644
--- a/fs/ext2/ioctl.c
+++ b/fs/ext2/ioctl.c
@@ -36,7 +36,7 @@
 		if (IS_RDONLY(inode))
 			return -EROFS;
 
-		if ((current->fsuid != inode->i_uid) && !capable(CAP_FOWNER))
+		if (!is_owner_or_cap(inode))
 			return -EACCES;
 
 		if (get_user(flags, (int __user *) arg))
@@ -74,7 +74,7 @@
 	case EXT2_IOC_GETVERSION:
 		return put_user(inode->i_generation, (int __user *) arg);
 	case EXT2_IOC_SETVERSION:
-		if ((current->fsuid != inode->i_uid) && !capable(CAP_FOWNER))
+		if (!is_owner_or_cap(inode))
 			return -EPERM;
 		if (IS_RDONLY(inode))
 			return -EROFS;
diff --git a/fs/ext2/super.c b/fs/ext2/super.c
index b2efd90..a6b1072 100644
--- a/fs/ext2/super.c
+++ b/fs/ext2/super.c
@@ -25,6 +25,7 @@
 #include <linux/parser.h>
 #include <linux/random.h>
 #include <linux/buffer_head.h>
+#include <linux/exportfs.h>
 #include <linux/smp_lock.h>
 #include <linux/vfs.h>
 #include <linux/seq_file.h>
@@ -882,13 +883,11 @@
 		goto failed_mount;
 	}
 	bgl_lock_init(&sbi->s_blockgroup_lock);
-	sbi->s_debts = kmalloc(sbi->s_groups_count * sizeof(*sbi->s_debts),
-			       GFP_KERNEL);
+	sbi->s_debts = kcalloc(sbi->s_groups_count, sizeof(*sbi->s_debts), GFP_KERNEL);
 	if (!sbi->s_debts) {
 		printk ("EXT2-fs: not enough memory\n");
 		goto failed_mount_group_desc;
 	}
-	memset(sbi->s_debts, 0, sbi->s_groups_count * sizeof(*sbi->s_debts));
 	for (i = 0; i < db_count; i++) {
 		block = descriptor_loc(sb, logic_sb_block, i);
 		sbi->s_group_desc[i] = sb_bread(sb, block);
diff --git a/fs/ext3/acl.c b/fs/ext3/acl.c
index 1e5038d..d34e996 100644
--- a/fs/ext3/acl.c
+++ b/fs/ext3/acl.c
@@ -489,7 +489,7 @@
 
 	if (!test_opt(inode->i_sb, POSIX_ACL))
 		return -EOPNOTSUPP;
-	if ((current->fsuid != inode->i_uid) && !capable(CAP_FOWNER))
+	if (!is_owner_or_cap(inode))
 		return -EPERM;
 
 	if (value) {
diff --git a/fs/ext3/dir.c b/fs/ext3/dir.c
index 8528698..c00723a 100644
--- a/fs/ext3/dir.c
+++ b/fs/ext3/dir.c
@@ -136,12 +136,14 @@
 		err = ext3_get_blocks_handle(NULL, inode, blk, 1,
 						&map_bh, 0, 0);
 		if (err > 0) {
-			page_cache_readahead(sb->s_bdev->bd_inode->i_mapping,
-				&filp->f_ra,
-				filp,
-				map_bh.b_blocknr >>
-					(PAGE_CACHE_SHIFT - inode->i_blkbits),
-				1);
+			pgoff_t index = map_bh.b_blocknr >>
+					(PAGE_CACHE_SHIFT - inode->i_blkbits);
+			if (!ra_has_index(&filp->f_ra, index))
+				page_cache_sync_readahead(
+					sb->s_bdev->bd_inode->i_mapping,
+					&filp->f_ra, filp,
+					index, 1);
+			filp->f_ra.prev_index = index;
 			bh = ext3_bread(NULL, inode, blk, 0, &err);
 		}
 
diff --git a/fs/ext3/ioctl.c b/fs/ext3/ioctl.c
index 965006d..4a2a02c 100644
--- a/fs/ext3/ioctl.c
+++ b/fs/ext3/ioctl.c
@@ -41,7 +41,7 @@
 		if (IS_RDONLY(inode))
 			return -EROFS;
 
-		if ((current->fsuid != inode->i_uid) && !capable(CAP_FOWNER))
+		if (!is_owner_or_cap(inode))
 			return -EACCES;
 
 		if (get_user(flags, (int __user *) arg))
@@ -122,7 +122,7 @@
 		__u32 generation;
 		int err;
 
-		if ((current->fsuid != inode->i_uid) && !capable(CAP_FOWNER))
+		if (!is_owner_or_cap(inode))
 			return -EPERM;
 		if (IS_RDONLY(inode))
 			return -EROFS;
@@ -181,7 +181,7 @@
 		if (IS_RDONLY(inode))
 			return -EROFS;
 
-		if ((current->fsuid != inode->i_uid) && !capable(CAP_FOWNER))
+		if (!is_owner_or_cap(inode))
 			return -EACCES;
 
 		if (get_user(rsv_window_size, (int __user *)arg))
diff --git a/fs/ext3/super.c b/fs/ext3/super.c
index 51d1c45..4f84dc8 100644
--- a/fs/ext3/super.c
+++ b/fs/ext3/super.c
@@ -29,6 +29,7 @@
 #include <linux/parser.h>
 #include <linux/smp_lock.h>
 #include <linux/buffer_head.h>
+#include <linux/exportfs.h>
 #include <linux/vfs.h>
 #include <linux/random.h>
 #include <linux/mount.h>
diff --git a/fs/ext4/acl.c b/fs/ext4/acl.c
index 9e88254..a8bae8c 100644
--- a/fs/ext4/acl.c
+++ b/fs/ext4/acl.c
@@ -489,7 +489,7 @@
 
 	if (!test_opt(inode->i_sb, POSIX_ACL))
 		return -EOPNOTSUPP;
-	if ((current->fsuid != inode->i_uid) && !capable(CAP_FOWNER))
+	if (!is_owner_or_cap(inode))
 		return -EPERM;
 
 	if (value) {
diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
index 9de54ae..e53b4af 100644
--- a/fs/ext4/balloc.c
+++ b/fs/ext4/balloc.c
@@ -517,7 +517,7 @@
 		/*
 		 * An HJ special.  This is expensive...
 		 */
-#ifdef CONFIG_JBD_DEBUG
+#ifdef CONFIG_JBD2_DEBUG
 		jbd_unlock_bh_state(bitmap_bh);
 		{
 			struct buffer_head *debug_bh;
@@ -1597,7 +1597,7 @@
 
 	performed_allocation = 1;
 
-#ifdef CONFIG_JBD_DEBUG
+#ifdef CONFIG_JBD2_DEBUG
 	{
 		struct buffer_head *debug_bh;
 
diff --git a/fs/ext4/dir.c b/fs/ext4/dir.c
index e8ad06e..3ab01c0 100644
--- a/fs/ext4/dir.c
+++ b/fs/ext4/dir.c
@@ -135,12 +135,14 @@
 		map_bh.b_state = 0;
 		err = ext4_get_blocks_wrap(NULL, inode, blk, 1, &map_bh, 0, 0);
 		if (err > 0) {
-			page_cache_readahead(sb->s_bdev->bd_inode->i_mapping,
-				&filp->f_ra,
-				filp,
-				map_bh.b_blocknr >>
-					(PAGE_CACHE_SHIFT - inode->i_blkbits),
-				1);
+			pgoff_t index = map_bh.b_blocknr >>
+					(PAGE_CACHE_SHIFT - inode->i_blkbits);
+			if (!ra_has_index(&filp->f_ra, index))
+				page_cache_sync_readahead(
+					sb->s_bdev->bd_inode->i_mapping,
+					&filp->f_ra, filp,
+					index, 1);
+			filp->f_ra.prev_index = index;
 			bh = ext4_bread(NULL, inode, blk, 0, &err);
 		}
 
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index b9ce2412..750c46f 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -39,6 +39,7 @@
 #include <linux/quotaops.h>
 #include <linux/string.h>
 #include <linux/slab.h>
+#include <linux/falloc.h>
 #include <linux/ext4_fs_extents.h>
 #include <asm/uaccess.h>
 
@@ -91,36 +92,6 @@
 	ix->ei_leaf_hi = cpu_to_le16((unsigned long) ((pb >> 31) >> 1) & 0xffff);
 }
 
-static int ext4_ext_check_header(const char *function, struct inode *inode,
-				struct ext4_extent_header *eh)
-{
-	const char *error_msg = NULL;
-
-	if (unlikely(eh->eh_magic != EXT4_EXT_MAGIC)) {
-		error_msg = "invalid magic";
-		goto corrupted;
-	}
-	if (unlikely(eh->eh_max == 0)) {
-		error_msg = "invalid eh_max";
-		goto corrupted;
-	}
-	if (unlikely(le16_to_cpu(eh->eh_entries) > le16_to_cpu(eh->eh_max))) {
-		error_msg = "invalid eh_entries";
-		goto corrupted;
-	}
-	return 0;
-
-corrupted:
-	ext4_error(inode->i_sb, function,
-			"bad header in inode #%lu: %s - magic %x, "
-			"entries %u, max %u, depth %u",
-			inode->i_ino, error_msg, le16_to_cpu(eh->eh_magic),
-			le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max),
-			le16_to_cpu(eh->eh_depth));
-
-	return -EIO;
-}
-
 static handle_t *ext4_ext_journal_restart(handle_t *handle, int needed)
 {
 	int err;
@@ -269,6 +240,70 @@
 	return size;
 }
 
+static int
+ext4_ext_max_entries(struct inode *inode, int depth)
+{
+	int max;
+
+	if (depth == ext_depth(inode)) {
+		if (depth == 0)
+			max = ext4_ext_space_root(inode);
+		else
+			max = ext4_ext_space_root_idx(inode);
+	} else {
+		if (depth == 0)
+			max = ext4_ext_space_block(inode);
+		else
+			max = ext4_ext_space_block_idx(inode);
+	}
+
+	return max;
+}
+
+static int __ext4_ext_check_header(const char *function, struct inode *inode,
+					struct ext4_extent_header *eh,
+					int depth)
+{
+	const char *error_msg;
+	int max = 0;
+
+	if (unlikely(eh->eh_magic != EXT4_EXT_MAGIC)) {
+		error_msg = "invalid magic";
+		goto corrupted;
+	}
+	if (unlikely(le16_to_cpu(eh->eh_depth) != depth)) {
+		error_msg = "unexpected eh_depth";
+		goto corrupted;
+	}
+	if (unlikely(eh->eh_max == 0)) {
+		error_msg = "invalid eh_max";
+		goto corrupted;
+	}
+	max = ext4_ext_max_entries(inode, depth);
+	if (unlikely(le16_to_cpu(eh->eh_max) > max)) {
+		error_msg = "too large eh_max";
+		goto corrupted;
+	}
+	if (unlikely(le16_to_cpu(eh->eh_entries) > le16_to_cpu(eh->eh_max))) {
+		error_msg = "invalid eh_entries";
+		goto corrupted;
+	}
+	return 0;
+
+corrupted:
+	ext4_error(inode->i_sb, function,
+			"bad header in inode #%lu: %s - magic %x, "
+			"entries %u, max %u(%u), depth %u(%u)",
+			inode->i_ino, error_msg, le16_to_cpu(eh->eh_magic),
+			le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max),
+			max, le16_to_cpu(eh->eh_depth), depth);
+
+	return -EIO;
+}
+
+#define ext4_ext_check_header(inode, eh, depth)	\
+	__ext4_ext_check_header(__FUNCTION__, inode, eh, depth)
+
 #ifdef EXT_DEBUG
 static void ext4_ext_show_path(struct inode *inode, struct ext4_ext_path *path)
 {
@@ -282,7 +317,7 @@
 		} else if (path->p_ext) {
 			ext_debug("  %d:%d:%llu ",
 				  le32_to_cpu(path->p_ext->ee_block),
-				  le16_to_cpu(path->p_ext->ee_len),
+				  ext4_ext_get_actual_len(path->p_ext),
 				  ext_pblock(path->p_ext));
 		} else
 			ext_debug("  []");
@@ -305,7 +340,7 @@
 
 	for (i = 0; i < le16_to_cpu(eh->eh_entries); i++, ex++) {
 		ext_debug("%d:%d:%llu ", le32_to_cpu(ex->ee_block),
-			  le16_to_cpu(ex->ee_len), ext_pblock(ex));
+			  ext4_ext_get_actual_len(ex), ext_pblock(ex));
 	}
 	ext_debug("\n");
 }
@@ -329,6 +364,7 @@
 /*
  * ext4_ext_binsearch_idx:
  * binary search for the closest index of the given block
+ * the header must be checked before calling this
  */
 static void
 ext4_ext_binsearch_idx(struct inode *inode, struct ext4_ext_path *path, int block)
@@ -336,27 +372,25 @@
 	struct ext4_extent_header *eh = path->p_hdr;
 	struct ext4_extent_idx *r, *l, *m;
 
-	BUG_ON(eh->eh_magic != EXT4_EXT_MAGIC);
-	BUG_ON(le16_to_cpu(eh->eh_entries) > le16_to_cpu(eh->eh_max));
-	BUG_ON(le16_to_cpu(eh->eh_entries) <= 0);
 
 	ext_debug("binsearch for %d(idx):  ", block);
 
 	l = EXT_FIRST_INDEX(eh) + 1;
-	r = EXT_FIRST_INDEX(eh) + le16_to_cpu(eh->eh_entries) - 1;
+	r = EXT_LAST_INDEX(eh);
 	while (l <= r) {
 		m = l + (r - l) / 2;
 		if (block < le32_to_cpu(m->ei_block))
 			r = m - 1;
 		else
 			l = m + 1;
-		ext_debug("%p(%u):%p(%u):%p(%u) ", l, l->ei_block,
-				m, m->ei_block, r, r->ei_block);
+		ext_debug("%p(%u):%p(%u):%p(%u) ", l, le32_to_cpu(l->ei_block),
+				m, le32_to_cpu(m->ei_block),
+				r, le32_to_cpu(r->ei_block));
 	}
 
 	path->p_idx = l - 1;
 	ext_debug("  -> %d->%lld ", le32_to_cpu(path->p_idx->ei_block),
-		  idx_block(path->p_idx));
+		  idx_pblock(path->p_idx));
 
 #ifdef CHECK_BINSEARCH
 	{
@@ -388,6 +422,7 @@
 /*
  * ext4_ext_binsearch:
  * binary search for closest extent of the given block
+ * the header must be checked before calling this
  */
 static void
 ext4_ext_binsearch(struct inode *inode, struct ext4_ext_path *path, int block)
@@ -395,9 +430,6 @@
 	struct ext4_extent_header *eh = path->p_hdr;
 	struct ext4_extent *r, *l, *m;
 
-	BUG_ON(eh->eh_magic != EXT4_EXT_MAGIC);
-	BUG_ON(le16_to_cpu(eh->eh_entries) > le16_to_cpu(eh->eh_max));
-
 	if (eh->eh_entries == 0) {
 		/*
 		 * this leaf is empty:
@@ -409,7 +441,7 @@
 	ext_debug("binsearch for %d:  ", block);
 
 	l = EXT_FIRST_EXTENT(eh) + 1;
-	r = EXT_FIRST_EXTENT(eh) + le16_to_cpu(eh->eh_entries) - 1;
+	r = EXT_LAST_EXTENT(eh);
 
 	while (l <= r) {
 		m = l + (r - l) / 2;
@@ -417,15 +449,16 @@
 			r = m - 1;
 		else
 			l = m + 1;
-		ext_debug("%p(%u):%p(%u):%p(%u) ", l, l->ee_block,
-				m, m->ee_block, r, r->ee_block);
+		ext_debug("%p(%u):%p(%u):%p(%u) ", l, le32_to_cpu(l->ee_block),
+				m, le32_to_cpu(m->ee_block),
+				r, le32_to_cpu(r->ee_block));
 	}
 
 	path->p_ext = l - 1;
 	ext_debug("  -> %d:%llu:%d ",
 			le32_to_cpu(path->p_ext->ee_block),
 			ext_pblock(path->p_ext),
-			le16_to_cpu(path->p_ext->ee_len));
+			ext4_ext_get_actual_len(path->p_ext));
 
 #ifdef CHECK_BINSEARCH
 	{
@@ -468,11 +501,10 @@
 	short int depth, i, ppos = 0, alloc = 0;
 
 	eh = ext_inode_hdr(inode);
-	BUG_ON(eh == NULL);
-	if (ext4_ext_check_header(__FUNCTION__, inode, eh))
+	depth = ext_depth(inode);
+	if (ext4_ext_check_header(inode, eh, depth))
 		return ERR_PTR(-EIO);
 
-	i = depth = ext_depth(inode);
 
 	/* account possible depth increase */
 	if (!path) {
@@ -484,10 +516,12 @@
 	}
 	path[0].p_hdr = eh;
 
+	i = depth;
 	/* walk through the tree */
 	while (i) {
 		ext_debug("depth %d: num %d, max %d\n",
 			  ppos, le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max));
+
 		ext4_ext_binsearch_idx(inode, path + ppos, block);
 		path[ppos].p_block = idx_pblock(path[ppos].p_idx);
 		path[ppos].p_depth = i;
@@ -504,7 +538,7 @@
 		path[ppos].p_hdr = eh;
 		i--;
 
-		if (ext4_ext_check_header(__FUNCTION__, inode, eh))
+		if (ext4_ext_check_header(inode, eh, i))
 			goto err;
 	}
 
@@ -513,9 +547,6 @@
 	path[ppos].p_ext = NULL;
 	path[ppos].p_idx = NULL;
 
-	if (ext4_ext_check_header(__FUNCTION__, inode, eh))
-		goto err;
-
 	/* find extent */
 	ext4_ext_binsearch(inode, path + ppos, block);
 
@@ -553,7 +584,7 @@
 		if (curp->p_idx != EXT_LAST_INDEX(curp->p_hdr)) {
 			len = (len - 1) * sizeof(struct ext4_extent_idx);
 			len = len < 0 ? 0 : len;
-			ext_debug("insert new index %d after: %d. "
+			ext_debug("insert new index %d after: %llu. "
 					"move %d from 0x%p to 0x%p\n",
 					logical, ptr, len,
 					(curp->p_idx + 1), (curp->p_idx + 2));
@@ -564,7 +595,7 @@
 		/* insert before */
 		len = len * sizeof(struct ext4_extent_idx);
 		len = len < 0 ? 0 : len;
-		ext_debug("insert new index %d before: %d. "
+		ext_debug("insert new index %d before: %llu. "
 				"move %d from 0x%p to 0x%p\n",
 				logical, ptr, len,
 				curp->p_idx, (curp->p_idx + 1));
@@ -686,7 +717,7 @@
 		ext_debug("move %d:%llu:%d in new leaf %llu\n",
 				le32_to_cpu(path[depth].p_ext->ee_block),
 				ext_pblock(path[depth].p_ext),
-				le16_to_cpu(path[depth].p_ext->ee_len),
+				ext4_ext_get_actual_len(path[depth].p_ext),
 				newblock);
 		/*memmove(ex++, path[depth].p_ext++,
 				sizeof(struct ext4_extent));
@@ -764,7 +795,7 @@
 		BUG_ON(EXT_MAX_INDEX(path[i].p_hdr) !=
 				EXT_LAST_INDEX(path[i].p_hdr));
 		while (path[i].p_idx <= EXT_MAX_INDEX(path[i].p_hdr)) {
-			ext_debug("%d: move %d:%d in new index %llu\n", i,
+			ext_debug("%d: move %d:%llu in new index %llu\n", i,
 					le32_to_cpu(path[i].p_idx->ei_block),
 					idx_pblock(path[i].p_idx),
 					newblock);
@@ -893,8 +924,13 @@
 	curp->p_hdr->eh_max = cpu_to_le16(ext4_ext_space_root_idx(inode));
 	curp->p_hdr->eh_entries = cpu_to_le16(1);
 	curp->p_idx = EXT_FIRST_INDEX(curp->p_hdr);
-	/* FIXME: it works, but actually path[0] can be index */
-	curp->p_idx->ei_block = EXT_FIRST_EXTENT(path[0].p_hdr)->ee_block;
+
+	if (path[0].p_hdr->eh_depth)
+		curp->p_idx->ei_block =
+			EXT_FIRST_INDEX(path[0].p_hdr)->ei_block;
+	else
+		curp->p_idx->ei_block =
+			EXT_FIRST_EXTENT(path[0].p_hdr)->ee_block;
 	ext4_idx_store_pblock(curp->p_idx, newblock);
 
 	neh = ext_inode_hdr(inode);
@@ -1106,7 +1142,24 @@
 ext4_can_extents_be_merged(struct inode *inode, struct ext4_extent *ex1,
 				struct ext4_extent *ex2)
 {
-	if (le32_to_cpu(ex1->ee_block) + le16_to_cpu(ex1->ee_len) !=
+	unsigned short ext1_ee_len, ext2_ee_len, max_len;
+
+	/*
+	 * Make sure that either both extents are uninitialized, or
+	 * both are _not_.
+	 */
+	if (ext4_ext_is_uninitialized(ex1) ^ ext4_ext_is_uninitialized(ex2))
+		return 0;
+
+	if (ext4_ext_is_uninitialized(ex1))
+		max_len = EXT_UNINIT_MAX_LEN;
+	else
+		max_len = EXT_INIT_MAX_LEN;
+
+	ext1_ee_len = ext4_ext_get_actual_len(ex1);
+	ext2_ee_len = ext4_ext_get_actual_len(ex2);
+
+	if (le32_to_cpu(ex1->ee_block) + ext1_ee_len !=
 			le32_to_cpu(ex2->ee_block))
 		return 0;
 
@@ -1115,19 +1168,66 @@
 	 * as an RO_COMPAT feature, refuse to merge to extents if
 	 * this can result in the top bit of ee_len being set.
 	 */
-	if (le16_to_cpu(ex1->ee_len) + le16_to_cpu(ex2->ee_len) > EXT_MAX_LEN)
+	if (ext1_ee_len + ext2_ee_len > max_len)
 		return 0;
 #ifdef AGGRESSIVE_TEST
 	if (le16_to_cpu(ex1->ee_len) >= 4)
 		return 0;
 #endif
 
-	if (ext_pblock(ex1) + le16_to_cpu(ex1->ee_len) == ext_pblock(ex2))
+	if (ext_pblock(ex1) + ext1_ee_len == ext_pblock(ex2))
 		return 1;
 	return 0;
 }
 
 /*
+ * This function tries to merge the "ex" extent to the next extent in the tree.
+ * It always tries to merge towards right. If you want to merge towards
+ * left, pass "ex - 1" as argument instead of "ex".
+ * Returns 0 if the extents (ex and ex+1) were _not_ merged and returns
+ * 1 if they got merged.
+ */
+int ext4_ext_try_to_merge(struct inode *inode,
+			  struct ext4_ext_path *path,
+			  struct ext4_extent *ex)
+{
+	struct ext4_extent_header *eh;
+	unsigned int depth, len;
+	int merge_done = 0;
+	int uninitialized = 0;
+
+	depth = ext_depth(inode);
+	BUG_ON(path[depth].p_hdr == NULL);
+	eh = path[depth].p_hdr;
+
+	while (ex < EXT_LAST_EXTENT(eh)) {
+		if (!ext4_can_extents_be_merged(inode, ex, ex + 1))
+			break;
+		/* merge with next extent! */
+		if (ext4_ext_is_uninitialized(ex))
+			uninitialized = 1;
+		ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex)
+				+ ext4_ext_get_actual_len(ex + 1));
+		if (uninitialized)
+			ext4_ext_mark_uninitialized(ex);
+
+		if (ex + 1 < EXT_LAST_EXTENT(eh)) {
+			len = (EXT_LAST_EXTENT(eh) - ex - 1)
+				* sizeof(struct ext4_extent);
+			memmove(ex + 1, ex + 2, len);
+		}
+		eh->eh_entries = cpu_to_le16(le16_to_cpu(eh->eh_entries) - 1);
+		merge_done = 1;
+		WARN_ON(eh->eh_entries == 0);
+		if (!eh->eh_entries)
+			ext4_error(inode->i_sb, "ext4_ext_try_to_merge",
+			   "inode#%lu, eh->eh_entries = 0!", inode->i_ino);
+	}
+
+	return merge_done;
+}
+
+/*
  * check if a portion of the "newext" extent overlaps with an
  * existing extent.
  *
@@ -1144,7 +1244,7 @@
 	unsigned int ret = 0;
 
 	b1 = le32_to_cpu(newext->ee_block);
-	len1 = le16_to_cpu(newext->ee_len);
+	len1 = ext4_ext_get_actual_len(newext);
 	depth = ext_depth(inode);
 	if (!path[depth].p_ext)
 		goto out;
@@ -1191,8 +1291,9 @@
 	struct ext4_extent *nearex; /* nearest extent */
 	struct ext4_ext_path *npath = NULL;
 	int depth, len, err, next;
+	unsigned uninitialized = 0;
 
-	BUG_ON(newext->ee_len == 0);
+	BUG_ON(ext4_ext_get_actual_len(newext) == 0);
 	depth = ext_depth(inode);
 	ex = path[depth].p_ext;
 	BUG_ON(path[depth].p_hdr == NULL);
@@ -1200,14 +1301,24 @@
 	/* try to insert block into found extent and return */
 	if (ex && ext4_can_extents_be_merged(inode, ex, newext)) {
 		ext_debug("append %d block to %d:%d (from %llu)\n",
-				le16_to_cpu(newext->ee_len),
+				ext4_ext_get_actual_len(newext),
 				le32_to_cpu(ex->ee_block),
-				le16_to_cpu(ex->ee_len), ext_pblock(ex));
+				ext4_ext_get_actual_len(ex), ext_pblock(ex));
 		err = ext4_ext_get_access(handle, inode, path + depth);
 		if (err)
 			return err;
-		ex->ee_len = cpu_to_le16(le16_to_cpu(ex->ee_len)
-					 + le16_to_cpu(newext->ee_len));
+
+		/*
+		 * ext4_can_extents_be_merged should have checked that either
+		 * both extents are uninitialized, or both aren't. Thus we
+		 * need to check only one of them here.
+		 */
+		if (ext4_ext_is_uninitialized(ex))
+			uninitialized = 1;
+		ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex)
+					+ ext4_ext_get_actual_len(newext));
+		if (uninitialized)
+			ext4_ext_mark_uninitialized(ex);
 		eh = path[depth].p_hdr;
 		nearex = ex;
 		goto merge;
@@ -1263,7 +1374,7 @@
 		ext_debug("first extent in the leaf: %d:%llu:%d\n",
 				le32_to_cpu(newext->ee_block),
 				ext_pblock(newext),
-				le16_to_cpu(newext->ee_len));
+				ext4_ext_get_actual_len(newext));
 		path[depth].p_ext = EXT_FIRST_EXTENT(eh);
 	} else if (le32_to_cpu(newext->ee_block)
 			   > le32_to_cpu(nearex->ee_block)) {
@@ -1276,7 +1387,7 @@
 					"move %d from 0x%p to 0x%p\n",
 					le32_to_cpu(newext->ee_block),
 					ext_pblock(newext),
-					le16_to_cpu(newext->ee_len),
+					ext4_ext_get_actual_len(newext),
 					nearex, len, nearex + 1, nearex + 2);
 			memmove(nearex + 2, nearex + 1, len);
 		}
@@ -1289,7 +1400,7 @@
 				"move %d from 0x%p to 0x%p\n",
 				le32_to_cpu(newext->ee_block),
 				ext_pblock(newext),
-				le16_to_cpu(newext->ee_len),
+				ext4_ext_get_actual_len(newext),
 				nearex, len, nearex + 1, nearex + 2);
 		memmove(nearex + 1, nearex, len);
 		path[depth].p_ext = nearex;
@@ -1304,20 +1415,7 @@
 
 merge:
 	/* try to merge extents to the right */
-	while (nearex < EXT_LAST_EXTENT(eh)) {
-		if (!ext4_can_extents_be_merged(inode, nearex, nearex + 1))
-			break;
-		/* merge with next extent! */
-		nearex->ee_len = cpu_to_le16(le16_to_cpu(nearex->ee_len)
-					     + le16_to_cpu(nearex[1].ee_len));
-		if (nearex + 1 < EXT_LAST_EXTENT(eh)) {
-			len = (EXT_LAST_EXTENT(eh) - nearex - 1)
-					* sizeof(struct ext4_extent);
-			memmove(nearex + 1, nearex + 2, len);
-		}
-		eh->eh_entries = cpu_to_le16(le16_to_cpu(eh->eh_entries)-1);
-		BUG_ON(eh->eh_entries == 0);
-	}
+	ext4_ext_try_to_merge(inode, path, nearex);
 
 	/* try to merge extents to the left */
 
@@ -1379,8 +1477,8 @@
 			end = le32_to_cpu(ex->ee_block);
 			if (block + num < end)
 				end = block + num;
-		} else if (block >=
-			     le32_to_cpu(ex->ee_block) + le16_to_cpu(ex->ee_len)) {
+		} else if (block >= le32_to_cpu(ex->ee_block)
+					+ ext4_ext_get_actual_len(ex)) {
 			/* need to allocate space after found extent */
 			start = block;
 			end = block + num;
@@ -1392,7 +1490,8 @@
 			 * by found extent
 			 */
 			start = block;
-			end = le32_to_cpu(ex->ee_block) + le16_to_cpu(ex->ee_len);
+			end = le32_to_cpu(ex->ee_block)
+				+ ext4_ext_get_actual_len(ex);
 			if (block + num < end)
 				end = block + num;
 			exists = 1;
@@ -1408,7 +1507,7 @@
 			cbex.ec_type = EXT4_EXT_CACHE_GAP;
 		} else {
 			cbex.ec_block = le32_to_cpu(ex->ee_block);
-			cbex.ec_len = le16_to_cpu(ex->ee_len);
+			cbex.ec_len = ext4_ext_get_actual_len(ex);
 			cbex.ec_start = ext_pblock(ex);
 			cbex.ec_type = EXT4_EXT_CACHE_EXTENT;
 		}
@@ -1481,15 +1580,15 @@
 		ext_debug("cache gap(before): %lu [%lu:%lu]",
 				(unsigned long) block,
 				(unsigned long) le32_to_cpu(ex->ee_block),
-				(unsigned long) le16_to_cpu(ex->ee_len));
+				(unsigned long) ext4_ext_get_actual_len(ex));
 	} else if (block >= le32_to_cpu(ex->ee_block)
-			    + le16_to_cpu(ex->ee_len)) {
+			+ ext4_ext_get_actual_len(ex)) {
 		lblock = le32_to_cpu(ex->ee_block)
-			 + le16_to_cpu(ex->ee_len);
+			+ ext4_ext_get_actual_len(ex);
 		len = ext4_ext_next_allocated_block(path);
 		ext_debug("cache gap(after): [%lu:%lu] %lu",
 				(unsigned long) le32_to_cpu(ex->ee_block),
-				(unsigned long) le16_to_cpu(ex->ee_len),
+				(unsigned long) ext4_ext_get_actual_len(ex),
 				(unsigned long) block);
 		BUG_ON(len == lblock);
 		len = len - lblock;
@@ -1619,12 +1718,12 @@
 				unsigned long from, unsigned long to)
 {
 	struct buffer_head *bh;
+	unsigned short ee_len =  ext4_ext_get_actual_len(ex);
 	int i;
 
 #ifdef EXTENTS_STATS
 	{
 		struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
-		unsigned short ee_len =  le16_to_cpu(ex->ee_len);
 		spin_lock(&sbi->s_ext_stats_lock);
 		sbi->s_ext_blocks += ee_len;
 		sbi->s_ext_extents++;
@@ -1638,12 +1737,12 @@
 	}
 #endif
 	if (from >= le32_to_cpu(ex->ee_block)
-	    && to == le32_to_cpu(ex->ee_block) + le16_to_cpu(ex->ee_len) - 1) {
+	    && to == le32_to_cpu(ex->ee_block) + ee_len - 1) {
 		/* tail removal */
 		unsigned long num;
 		ext4_fsblk_t start;
-		num = le32_to_cpu(ex->ee_block) + le16_to_cpu(ex->ee_len) - from;
-		start = ext_pblock(ex) + le16_to_cpu(ex->ee_len) - num;
+		num = le32_to_cpu(ex->ee_block) + ee_len - from;
+		start = ext_pblock(ex) + ee_len - num;
 		ext_debug("free last %lu blocks starting %llu\n", num, start);
 		for (i = 0; i < num; i++) {
 			bh = sb_find_get_block(inode->i_sb, start + i);
@@ -1651,12 +1750,12 @@
 		}
 		ext4_free_blocks(handle, inode, start, num);
 	} else if (from == le32_to_cpu(ex->ee_block)
-		   && to <= le32_to_cpu(ex->ee_block) + le16_to_cpu(ex->ee_len) - 1) {
+		   && to <= le32_to_cpu(ex->ee_block) + ee_len - 1) {
 		printk("strange request: removal %lu-%lu from %u:%u\n",
-		       from, to, le32_to_cpu(ex->ee_block), le16_to_cpu(ex->ee_len));
+			from, to, le32_to_cpu(ex->ee_block), ee_len);
 	} else {
 		printk("strange request: removal(2) %lu-%lu from %u:%u\n",
-		       from, to, le32_to_cpu(ex->ee_block), le16_to_cpu(ex->ee_len));
+			from, to, le32_to_cpu(ex->ee_block), ee_len);
 	}
 	return 0;
 }
@@ -1671,21 +1770,23 @@
 	unsigned a, b, block, num;
 	unsigned long ex_ee_block;
 	unsigned short ex_ee_len;
+	unsigned uninitialized = 0;
 	struct ext4_extent *ex;
 
+	/* the header must be checked already in ext4_ext_remove_space() */
 	ext_debug("truncate since %lu in leaf\n", start);
 	if (!path[depth].p_hdr)
 		path[depth].p_hdr = ext_block_hdr(path[depth].p_bh);
 	eh = path[depth].p_hdr;
 	BUG_ON(eh == NULL);
-	BUG_ON(le16_to_cpu(eh->eh_entries) > le16_to_cpu(eh->eh_max));
-	BUG_ON(eh->eh_magic != EXT4_EXT_MAGIC);
 
 	/* find where to start removing */
 	ex = EXT_LAST_EXTENT(eh);
 
 	ex_ee_block = le32_to_cpu(ex->ee_block);
-	ex_ee_len = le16_to_cpu(ex->ee_len);
+	if (ext4_ext_is_uninitialized(ex))
+		uninitialized = 1;
+	ex_ee_len = ext4_ext_get_actual_len(ex);
 
 	while (ex >= EXT_FIRST_EXTENT(eh) &&
 			ex_ee_block + ex_ee_len > start) {
@@ -1753,6 +1854,12 @@
 
 		ex->ee_block = cpu_to_le32(block);
 		ex->ee_len = cpu_to_le16(num);
+		/*
+		 * Do not mark uninitialized if all the blocks in the
+		 * extent have been removed.
+		 */
+		if (uninitialized && num)
+			ext4_ext_mark_uninitialized(ex);
 
 		err = ext4_ext_dirty(handle, inode, path + depth);
 		if (err)
@@ -1762,7 +1869,7 @@
 				ext_pblock(ex));
 		ex--;
 		ex_ee_block = le32_to_cpu(ex->ee_block);
-		ex_ee_len = le16_to_cpu(ex->ee_len);
+		ex_ee_len = ext4_ext_get_actual_len(ex);
 	}
 
 	if (correct_index && eh->eh_entries)
@@ -1825,7 +1932,7 @@
 		return -ENOMEM;
 	}
 	path[0].p_hdr = ext_inode_hdr(inode);
-	if (ext4_ext_check_header(__FUNCTION__, inode, path[0].p_hdr)) {
+	if (ext4_ext_check_header(inode, path[0].p_hdr, depth)) {
 		err = -EIO;
 		goto out;
 	}
@@ -1846,17 +1953,8 @@
 		if (!path[i].p_hdr) {
 			ext_debug("initialize header\n");
 			path[i].p_hdr = ext_block_hdr(path[i].p_bh);
-			if (ext4_ext_check_header(__FUNCTION__, inode,
-							path[i].p_hdr)) {
-				err = -EIO;
-				goto out;
-			}
 		}
 
-		BUG_ON(le16_to_cpu(path[i].p_hdr->eh_entries)
-			   > le16_to_cpu(path[i].p_hdr->eh_max));
-		BUG_ON(path[i].p_hdr->eh_magic != EXT4_EXT_MAGIC);
-
 		if (!path[i].p_idx) {
 			/* this level hasn't been touched yet */
 			path[i].p_idx = EXT_LAST_INDEX(path[i].p_hdr);
@@ -1873,17 +1971,27 @@
 				i, EXT_FIRST_INDEX(path[i].p_hdr),
 				path[i].p_idx);
 		if (ext4_ext_more_to_rm(path + i)) {
+			struct buffer_head *bh;
 			/* go to the next level */
 			ext_debug("move to level %d (block %llu)\n",
 				  i + 1, idx_pblock(path[i].p_idx));
 			memset(path + i + 1, 0, sizeof(*path));
-			path[i+1].p_bh =
-				sb_bread(sb, idx_pblock(path[i].p_idx));
-			if (!path[i+1].p_bh) {
+			bh = sb_bread(sb, idx_pblock(path[i].p_idx));
+			if (!bh) {
 				/* should we reset i_size? */
 				err = -EIO;
 				break;
 			}
+			if (WARN_ON(i + 1 > depth)) {
+				err = -EIO;
+				break;
+			}
+			if (ext4_ext_check_header(inode, ext_block_hdr(bh),
+							depth - i - 1)) {
+				err = -EIO;
+				break;
+			}
+			path[i + 1].p_bh = bh;
 
 			/* save actual number of indexes since this
 			 * number is changed at the next iteration */
@@ -1977,15 +2085,158 @@
 #endif
 }
 
+/*
+ * This function is called by ext4_ext_get_blocks() if someone tries to write
+ * to an uninitialized extent. It may result in splitting the uninitialized
+ * extent into multiple extents (upto three - one initialized and two
+ * uninitialized).
+ * There are three possibilities:
+ *   a> There is no split required: Entire extent should be initialized
+ *   b> Splits in two extents: Write is happening at either end of the extent
+ *   c> Splits in three extents: Somone is writing in middle of the extent
+ */
+int ext4_ext_convert_to_initialized(handle_t *handle, struct inode *inode,
+					struct ext4_ext_path *path,
+					ext4_fsblk_t iblock,
+					unsigned long max_blocks)
+{
+	struct ext4_extent *ex, newex;
+	struct ext4_extent *ex1 = NULL;
+	struct ext4_extent *ex2 = NULL;
+	struct ext4_extent *ex3 = NULL;
+	struct ext4_extent_header *eh;
+	unsigned int allocated, ee_block, ee_len, depth;
+	ext4_fsblk_t newblock;
+	int err = 0;
+	int ret = 0;
+
+	depth = ext_depth(inode);
+	eh = path[depth].p_hdr;
+	ex = path[depth].p_ext;
+	ee_block = le32_to_cpu(ex->ee_block);
+	ee_len = ext4_ext_get_actual_len(ex);
+	allocated = ee_len - (iblock - ee_block);
+	newblock = iblock - ee_block + ext_pblock(ex);
+	ex2 = ex;
+
+	/* ex1: ee_block to iblock - 1 : uninitialized */
+	if (iblock > ee_block) {
+		ex1 = ex;
+		ex1->ee_len = cpu_to_le16(iblock - ee_block);
+		ext4_ext_mark_uninitialized(ex1);
+		ex2 = &newex;
+	}
+	/*
+	 * for sanity, update the length of the ex2 extent before
+	 * we insert ex3, if ex1 is NULL. This is to avoid temporary
+	 * overlap of blocks.
+	 */
+	if (!ex1 && allocated > max_blocks)
+		ex2->ee_len = cpu_to_le16(max_blocks);
+	/* ex3: to ee_block + ee_len : uninitialised */
+	if (allocated > max_blocks) {
+		unsigned int newdepth;
+		ex3 = &newex;
+		ex3->ee_block = cpu_to_le32(iblock + max_blocks);
+		ext4_ext_store_pblock(ex3, newblock + max_blocks);
+		ex3->ee_len = cpu_to_le16(allocated - max_blocks);
+		ext4_ext_mark_uninitialized(ex3);
+		err = ext4_ext_insert_extent(handle, inode, path, ex3);
+		if (err)
+			goto out;
+		/*
+		 * The depth, and hence eh & ex might change
+		 * as part of the insert above.
+		 */
+		newdepth = ext_depth(inode);
+		if (newdepth != depth) {
+			depth = newdepth;
+			path = ext4_ext_find_extent(inode, iblock, NULL);
+			if (IS_ERR(path)) {
+				err = PTR_ERR(path);
+				path = NULL;
+				goto out;
+			}
+			eh = path[depth].p_hdr;
+			ex = path[depth].p_ext;
+			if (ex2 != &newex)
+				ex2 = ex;
+		}
+		allocated = max_blocks;
+	}
+	/*
+	 * If there was a change of depth as part of the
+	 * insertion of ex3 above, we need to update the length
+	 * of the ex1 extent again here
+	 */
+	if (ex1 && ex1 != ex) {
+		ex1 = ex;
+		ex1->ee_len = cpu_to_le16(iblock - ee_block);
+		ext4_ext_mark_uninitialized(ex1);
+		ex2 = &newex;
+	}
+	/* ex2: iblock to iblock + maxblocks-1 : initialised */
+	ex2->ee_block = cpu_to_le32(iblock);
+	ex2->ee_start = cpu_to_le32(newblock);
+	ext4_ext_store_pblock(ex2, newblock);
+	ex2->ee_len = cpu_to_le16(allocated);
+	if (ex2 != ex)
+		goto insert;
+	err = ext4_ext_get_access(handle, inode, path + depth);
+	if (err)
+		goto out;
+	/*
+	 * New (initialized) extent starts from the first block
+	 * in the current extent. i.e., ex2 == ex
+	 * We have to see if it can be merged with the extent
+	 * on the left.
+	 */
+	if (ex2 > EXT_FIRST_EXTENT(eh)) {
+		/*
+		 * To merge left, pass "ex2 - 1" to try_to_merge(),
+		 * since it merges towards right _only_.
+		 */
+		ret = ext4_ext_try_to_merge(inode, path, ex2 - 1);
+		if (ret) {
+			err = ext4_ext_correct_indexes(handle, inode, path);
+			if (err)
+				goto out;
+			depth = ext_depth(inode);
+			ex2--;
+		}
+	}
+	/*
+	 * Try to Merge towards right. This might be required
+	 * only when the whole extent is being written to.
+	 * i.e. ex2 == ex and ex3 == NULL.
+	 */
+	if (!ex3) {
+		ret = ext4_ext_try_to_merge(inode, path, ex2);
+		if (ret) {
+			err = ext4_ext_correct_indexes(handle, inode, path);
+			if (err)
+				goto out;
+		}
+	}
+	/* Mark modified extent as dirty */
+	err = ext4_ext_dirty(handle, inode, path + depth);
+	goto out;
+insert:
+	err = ext4_ext_insert_extent(handle, inode, path, &newex);
+out:
+	return err ? err : allocated;
+}
+
 int ext4_ext_get_blocks(handle_t *handle, struct inode *inode,
 			ext4_fsblk_t iblock,
 			unsigned long max_blocks, struct buffer_head *bh_result,
 			int create, int extend_disksize)
 {
 	struct ext4_ext_path *path = NULL;
+	struct ext4_extent_header *eh;
 	struct ext4_extent newex, *ex;
 	ext4_fsblk_t goal, newblock;
-	int err = 0, depth;
+	int err = 0, depth, ret;
 	unsigned long allocated = 0;
 
 	__clear_bit(BH_New, &bh_result->b_state);
@@ -1998,8 +2249,10 @@
 	if (goal) {
 		if (goal == EXT4_EXT_CACHE_GAP) {
 			if (!create) {
-				/* block isn't allocated yet and
-				 * user doesn't want to allocate it */
+				/*
+				 * block isn't allocated yet and
+				 * user doesn't want to allocate it
+				 */
 				goto out2;
 			}
 			/* we should allocate requested block */
@@ -2033,21 +2286,19 @@
 	 * this is why assert can't be put in ext4_ext_find_extent()
 	 */
 	BUG_ON(path[depth].p_ext == NULL && depth != 0);
+	eh = path[depth].p_hdr;
 
 	ex = path[depth].p_ext;
 	if (ex) {
 		unsigned long ee_block = le32_to_cpu(ex->ee_block);
 		ext4_fsblk_t ee_start = ext_pblock(ex);
-		unsigned short ee_len  = le16_to_cpu(ex->ee_len);
+		unsigned short ee_len;
 
 		/*
-		 * Allow future support for preallocated extents to be added
-		 * as an RO_COMPAT feature:
 		 * Uninitialized extents are treated as holes, except that
-		 * we avoid (fail) allocating new blocks during a write.
+		 * we split out initialized portions during a write.
 		 */
-		if (ee_len > EXT_MAX_LEN)
-			goto out2;
+		ee_len = ext4_ext_get_actual_len(ex);
 		/* if found extent covers block, simply return it */
 		if (iblock >= ee_block && iblock < ee_block + ee_len) {
 			newblock = iblock - ee_block + ee_start;
@@ -2055,9 +2306,27 @@
 			allocated = ee_len - (iblock - ee_block);
 			ext_debug("%d fit into %lu:%d -> %llu\n", (int) iblock,
 					ee_block, ee_len, newblock);
-			ext4_ext_put_in_cache(inode, ee_block, ee_len,
-						ee_start, EXT4_EXT_CACHE_EXTENT);
-			goto out;
+
+			/* Do not put uninitialized extent in the cache */
+			if (!ext4_ext_is_uninitialized(ex)) {
+				ext4_ext_put_in_cache(inode, ee_block,
+							ee_len, ee_start,
+							EXT4_EXT_CACHE_EXTENT);
+				goto out;
+			}
+			if (create == EXT4_CREATE_UNINITIALIZED_EXT)
+				goto out;
+			if (!create)
+				goto out2;
+
+			ret = ext4_ext_convert_to_initialized(handle, inode,
+								path, iblock,
+								max_blocks);
+			if (ret <= 0)
+				goto out2;
+			else
+				allocated = ret;
+			goto outnew;
 		}
 	}
 
@@ -2066,8 +2335,10 @@
 	 * we couldn't try to create block if create flag is zero
 	 */
 	if (!create) {
-		/* put just found gap into cache to speed up
-		 * subsequent requests */
+		/*
+		 * put just found gap into cache to speed up
+		 * subsequent requests
+		 */
 		ext4_ext_put_gap_in_cache(inode, path, iblock);
 		goto out2;
 	}
@@ -2081,6 +2352,19 @@
 	/* allocate new block */
 	goal = ext4_ext_find_goal(inode, path, iblock);
 
+	/*
+	 * See if request is beyond maximum number of blocks we can have in
+	 * a single extent. For an initialized extent this limit is
+	 * EXT_INIT_MAX_LEN and for an uninitialized extent this limit is
+	 * EXT_UNINIT_MAX_LEN.
+	 */
+	if (max_blocks > EXT_INIT_MAX_LEN &&
+	    create != EXT4_CREATE_UNINITIALIZED_EXT)
+		max_blocks = EXT_INIT_MAX_LEN;
+	else if (max_blocks > EXT_UNINIT_MAX_LEN &&
+		 create == EXT4_CREATE_UNINITIALIZED_EXT)
+		max_blocks = EXT_UNINIT_MAX_LEN;
+
 	/* Check if we can really insert (iblock)::(iblock+max_blocks) extent */
 	newex.ee_block = cpu_to_le32(iblock);
 	newex.ee_len = cpu_to_le16(max_blocks);
@@ -2098,6 +2382,8 @@
 	/* try to insert new extent into found leaf and return */
 	ext4_ext_store_pblock(&newex, newblock);
 	newex.ee_len = cpu_to_le16(allocated);
+	if (create == EXT4_CREATE_UNINITIALIZED_EXT)  /* Mark uninitialized */
+		ext4_ext_mark_uninitialized(&newex);
 	err = ext4_ext_insert_extent(handle, inode, path, &newex);
 	if (err) {
 		/* free data blocks we just allocated */
@@ -2111,10 +2397,13 @@
 
 	/* previous routine could use block we allocated */
 	newblock = ext_pblock(&newex);
+outnew:
 	__set_bit(BH_New, &bh_result->b_state);
 
-	ext4_ext_put_in_cache(inode, iblock, allocated, newblock,
-				EXT4_EXT_CACHE_EXTENT);
+	/* Cache only when it is _not_ an uninitialized extent */
+	if (create != EXT4_CREATE_UNINITIALIZED_EXT)
+		ext4_ext_put_in_cache(inode, iblock, allocated, newblock,
+						EXT4_EXT_CACHE_EXTENT);
 out:
 	if (allocated > max_blocks)
 		allocated = max_blocks;
@@ -2178,7 +2467,8 @@
 	err = ext4_ext_remove_space(inode, last_block);
 
 	/* In a multi-transaction truncate, we only make the final
-	 * transaction synchronous. */
+	 * transaction synchronous.
+	 */
 	if (IS_SYNC(inode))
 		handle->h_sync = 1;
 
@@ -2217,3 +2507,127 @@
 
 	return needed;
 }
+
+/*
+ * preallocate space for a file. This implements ext4's fallocate inode
+ * operation, which gets called from sys_fallocate system call.
+ * For block-mapped files, posix_fallocate should fall back to the method
+ * of writing zeroes to the required new blocks (the same behavior which is
+ * expected for file systems which do not support fallocate() system call).
+ */
+long ext4_fallocate(struct inode *inode, int mode, loff_t offset, loff_t len)
+{
+	handle_t *handle;
+	ext4_fsblk_t block, max_blocks;
+	ext4_fsblk_t nblocks = 0;
+	int ret = 0;
+	int ret2 = 0;
+	int retries = 0;
+	struct buffer_head map_bh;
+	unsigned int credits, blkbits = inode->i_blkbits;
+
+	/*
+	 * currently supporting (pre)allocate mode for extent-based
+	 * files _only_
+	 */
+	if (!(EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL))
+		return -EOPNOTSUPP;
+
+	/* preallocation to directories is currently not supported */
+	if (S_ISDIR(inode->i_mode))
+		return -ENODEV;
+
+	block = offset >> blkbits;
+	max_blocks = (EXT4_BLOCK_ALIGN(len + offset, blkbits) >> blkbits)
+			- block;
+
+	/*
+	 * credits to insert 1 extent into extent tree + buffers to be able to
+	 * modify 1 super block, 1 block bitmap and 1 group descriptor.
+	 */
+	credits = EXT4_DATA_TRANS_BLOCKS(inode->i_sb) + 3;
+retry:
+	while (ret >= 0 && ret < max_blocks) {
+		block = block + ret;
+		max_blocks = max_blocks - ret;
+		handle = ext4_journal_start(inode, credits);
+		if (IS_ERR(handle)) {
+			ret = PTR_ERR(handle);
+			break;
+		}
+
+		ret = ext4_ext_get_blocks(handle, inode, block,
+					  max_blocks, &map_bh,
+					  EXT4_CREATE_UNINITIALIZED_EXT, 0);
+		WARN_ON(!ret);
+		if (!ret) {
+			ext4_error(inode->i_sb, "ext4_fallocate",
+				   "ext4_ext_get_blocks returned 0! inode#%lu"
+				   ", block=%llu, max_blocks=%llu",
+				   inode->i_ino, block, max_blocks);
+			ret = -EIO;
+			ext4_mark_inode_dirty(handle, inode);
+			ret2 = ext4_journal_stop(handle);
+			break;
+		}
+		if (ret > 0) {
+			/* check wrap through sign-bit/zero here */
+			if ((block + ret) < 0 || (block + ret) < block) {
+				ret = -EIO;
+				ext4_mark_inode_dirty(handle, inode);
+				ret2 = ext4_journal_stop(handle);
+				break;
+			}
+			if (buffer_new(&map_bh) && ((block + ret) >
+			    (EXT4_BLOCK_ALIGN(i_size_read(inode), blkbits)
+			    >> blkbits)))
+					nblocks = nblocks + ret;
+		}
+
+		/* Update ctime if new blocks get allocated */
+		if (nblocks) {
+			struct timespec now;
+
+			now = current_fs_time(inode->i_sb);
+			if (!timespec_equal(&inode->i_ctime, &now))
+				inode->i_ctime = now;
+		}
+
+		ext4_mark_inode_dirty(handle, inode);
+		ret2 = ext4_journal_stop(handle);
+		if (ret2)
+			break;
+	}
+
+	if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
+		goto retry;
+
+	/*
+	 * Time to update the file size.
+	 * Update only when preallocation was requested beyond the file size.
+	 */
+	if (!(mode & FALLOC_FL_KEEP_SIZE) &&
+	    (offset + len) > i_size_read(inode)) {
+		if (ret > 0) {
+			/*
+			 * if no error, we assume preallocation succeeded
+			 * completely
+			 */
+			mutex_lock(&inode->i_mutex);
+			i_size_write(inode, offset + len);
+			EXT4_I(inode)->i_disksize = i_size_read(inode);
+			mutex_unlock(&inode->i_mutex);
+		} else if (ret < 0 && nblocks) {
+			/* Handle partial allocation scenario */
+			loff_t newsize;
+
+			mutex_lock(&inode->i_mutex);
+			newsize  = (nblocks << blkbits) + i_size_read(inode);
+			i_size_write(inode, EXT4_BLOCK_ALIGN(newsize, blkbits));
+			EXT4_I(inode)->i_disksize = i_size_read(inode);
+			mutex_unlock(&inode->i_mutex);
+		}
+	}
+
+	return ret > 0 ? ret2 : ret;
+}
diff --git a/fs/ext4/file.c b/fs/ext4/file.c
index d4c8186..1a81cd6 100644
--- a/fs/ext4/file.c
+++ b/fs/ext4/file.c
@@ -134,5 +134,6 @@
 	.removexattr	= generic_removexattr,
 #endif
 	.permission	= ext4_permission,
+	.fallocate	= ext4_fallocate,
 };
 
diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
index c88b439..427f830 100644
--- a/fs/ext4/ialloc.c
+++ b/fs/ext4/ialloc.c
@@ -563,7 +563,8 @@
 	inode->i_ino = ino;
 	/* This is the optimal IO size (for stat), not the fs block size */
 	inode->i_blocks = 0;
-	inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME_SEC;
+	inode->i_mtime = inode->i_atime = inode->i_ctime = ei->i_crtime =
+						       ext4_current_time(inode);
 
 	memset(ei->i_data, 0, sizeof(ei->i_data));
 	ei->i_dir_start_lookup = 0;
@@ -595,9 +596,8 @@
 	spin_unlock(&sbi->s_next_gen_lock);
 
 	ei->i_state = EXT4_STATE_NEW;
-	ei->i_extra_isize =
-		(EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) ?
-		sizeof(struct ext4_inode) - EXT4_GOOD_OLD_INODE_SIZE : 0;
+
+	ei->i_extra_isize = EXT4_SB(sb)->s_want_extra_isize;
 
 	ret = inode;
 	if(DQUOT_ALLOC_INODE(inode)) {
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 8416fa2..a4848e0 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -726,7 +726,7 @@
 
 	/* We are done with atomic stuff, now do the rest of housekeeping */
 
-	inode->i_ctime = CURRENT_TIME_SEC;
+	inode->i_ctime = ext4_current_time(inode);
 	ext4_mark_inode_dirty(handle, inode);
 
 	/* had we spliced it onto indirect block? */
@@ -1766,7 +1766,6 @@
 	struct inode *inode = mapping->host;
 	struct buffer_head *bh;
 	int err = 0;
-	void *kaddr;
 
 	blocksize = inode->i_sb->s_blocksize;
 	length = blocksize - (offset & (blocksize - 1));
@@ -1778,10 +1777,7 @@
 	 */
 	if (!page_has_buffers(page) && test_opt(inode->i_sb, NOBH) &&
 	     ext4_should_writeback_data(inode) && PageUptodate(page)) {
-		kaddr = kmap_atomic(page, KM_USER0);
-		memset(kaddr + offset, 0, length);
-		flush_dcache_page(page);
-		kunmap_atomic(kaddr, KM_USER0);
+		zero_user_page(page, offset, length, KM_USER0);
 		set_page_dirty(page);
 		goto unlock;
 	}
@@ -1834,10 +1830,7 @@
 			goto unlock;
 	}
 
-	kaddr = kmap_atomic(page, KM_USER0);
-	memset(kaddr + offset, 0, length);
-	flush_dcache_page(page);
-	kunmap_atomic(kaddr, KM_USER0);
+	zero_user_page(page, offset, length, KM_USER0);
 
 	BUFFER_TRACE(bh, "zeroed end of block");
 
@@ -2375,7 +2368,7 @@
 	ext4_discard_reservation(inode);
 
 	mutex_unlock(&ei->truncate_mutex);
-	inode->i_mtime = inode->i_ctime = CURRENT_TIME_SEC;
+	inode->i_mtime = inode->i_ctime = ext4_current_time(inode);
 	ext4_mark_inode_dirty(handle, inode);
 
 	/*
@@ -2583,6 +2576,25 @@
 		inode->i_flags |= S_DIRSYNC;
 }
 
+/* Propagate flags from i_flags to EXT4_I(inode)->i_flags */
+void ext4_get_inode_flags(struct ext4_inode_info *ei)
+{
+	unsigned int flags = ei->vfs_inode.i_flags;
+
+	ei->i_flags &= ~(EXT4_SYNC_FL|EXT4_APPEND_FL|
+			EXT4_IMMUTABLE_FL|EXT4_NOATIME_FL|EXT4_DIRSYNC_FL);
+	if (flags & S_SYNC)
+		ei->i_flags |= EXT4_SYNC_FL;
+	if (flags & S_APPEND)
+		ei->i_flags |= EXT4_APPEND_FL;
+	if (flags & S_IMMUTABLE)
+		ei->i_flags |= EXT4_IMMUTABLE_FL;
+	if (flags & S_NOATIME)
+		ei->i_flags |= EXT4_NOATIME_FL;
+	if (flags & S_DIRSYNC)
+		ei->i_flags |= EXT4_DIRSYNC_FL;
+}
+
 void ext4_read_inode(struct inode * inode)
 {
 	struct ext4_iloc iloc;
@@ -2610,10 +2622,6 @@
 	}
 	inode->i_nlink = le16_to_cpu(raw_inode->i_links_count);
 	inode->i_size = le32_to_cpu(raw_inode->i_size);
-	inode->i_atime.tv_sec = (signed)le32_to_cpu(raw_inode->i_atime);
-	inode->i_ctime.tv_sec = (signed)le32_to_cpu(raw_inode->i_ctime);
-	inode->i_mtime.tv_sec = (signed)le32_to_cpu(raw_inode->i_mtime);
-	inode->i_atime.tv_nsec = inode->i_ctime.tv_nsec = inode->i_mtime.tv_nsec = 0;
 
 	ei->i_state = 0;
 	ei->i_dir_start_lookup = 0;
@@ -2691,6 +2699,11 @@
 	} else
 		ei->i_extra_isize = 0;
 
+	EXT4_INODE_GET_XTIME(i_ctime, inode, raw_inode);
+	EXT4_INODE_GET_XTIME(i_mtime, inode, raw_inode);
+	EXT4_INODE_GET_XTIME(i_atime, inode, raw_inode);
+	EXT4_EINODE_GET_XTIME(i_crtime, ei, raw_inode);
+
 	if (S_ISREG(inode->i_mode)) {
 		inode->i_op = &ext4_file_inode_operations;
 		inode->i_fop = &ext4_file_operations;
@@ -2744,6 +2757,7 @@
 	if (ei->i_state & EXT4_STATE_NEW)
 		memset(raw_inode, 0, EXT4_SB(inode->i_sb)->s_inode_size);
 
+	ext4_get_inode_flags(ei);
 	raw_inode->i_mode = cpu_to_le16(inode->i_mode);
 	if(!(test_opt(inode->i_sb, NO_UID32))) {
 		raw_inode->i_uid_low = cpu_to_le16(low_16_bits(inode->i_uid));
@@ -2771,9 +2785,12 @@
 	}
 	raw_inode->i_links_count = cpu_to_le16(inode->i_nlink);
 	raw_inode->i_size = cpu_to_le32(ei->i_disksize);
-	raw_inode->i_atime = cpu_to_le32(inode->i_atime.tv_sec);
-	raw_inode->i_ctime = cpu_to_le32(inode->i_ctime.tv_sec);
-	raw_inode->i_mtime = cpu_to_le32(inode->i_mtime.tv_sec);
+
+	EXT4_INODE_SET_XTIME(i_ctime, inode, raw_inode);
+	EXT4_INODE_SET_XTIME(i_mtime, inode, raw_inode);
+	EXT4_INODE_SET_XTIME(i_atime, inode, raw_inode);
+	EXT4_EINODE_SET_XTIME(i_crtime, ei, raw_inode);
+
 	raw_inode->i_blocks = cpu_to_le32(inode->i_blocks);
 	raw_inode->i_dtime = cpu_to_le32(ei->i_dtime);
 	raw_inode->i_flags = cpu_to_le32(ei->i_flags);
@@ -2886,7 +2903,7 @@
 		return 0;
 
 	if (ext4_journal_current_handle()) {
-		jbd_debug(0, "called recursively, non-PF_MEMALLOC!\n");
+		jbd_debug(1, "called recursively, non-PF_MEMALLOC!\n");
 		dump_stack();
 		return -EIO;
 	}
@@ -3082,6 +3099,39 @@
 }
 
 /*
+ * Expand an inode by new_extra_isize bytes.
+ * Returns 0 on success or negative error number on failure.
+ */
+int ext4_expand_extra_isize(struct inode *inode, unsigned int new_extra_isize,
+			struct ext4_iloc iloc, handle_t *handle)
+{
+	struct ext4_inode *raw_inode;
+	struct ext4_xattr_ibody_header *header;
+	struct ext4_xattr_entry *entry;
+
+	if (EXT4_I(inode)->i_extra_isize >= new_extra_isize)
+		return 0;
+
+	raw_inode = ext4_raw_inode(&iloc);
+
+	header = IHDR(inode, raw_inode);
+	entry = IFIRST(header);
+
+	/* No extended attributes present */
+	if (!(EXT4_I(inode)->i_state & EXT4_STATE_XATTR) ||
+		header->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC)) {
+		memset((void *)raw_inode + EXT4_GOOD_OLD_INODE_SIZE, 0,
+			new_extra_isize);
+		EXT4_I(inode)->i_extra_isize = new_extra_isize;
+		return 0;
+	}
+
+	/* try to expand with EAs present */
+	return ext4_expand_extra_isize_ea(inode, new_extra_isize,
+					  raw_inode, handle);
+}
+
+/*
  * What we do here is to mark the in-core inode as clean with respect to inode
  * dirtiness (it may still be data-dirty).
  * This means that the in-core inode may be reaped by prune_icache
@@ -3105,10 +3155,38 @@
 int ext4_mark_inode_dirty(handle_t *handle, struct inode *inode)
 {
 	struct ext4_iloc iloc;
-	int err;
+	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
+	static unsigned int mnt_count;
+	int err, ret;
 
 	might_sleep();
 	err = ext4_reserve_inode_write(handle, inode, &iloc);
+	if (EXT4_I(inode)->i_extra_isize < sbi->s_want_extra_isize &&
+	    !(EXT4_I(inode)->i_state & EXT4_STATE_NO_EXPAND)) {
+		/*
+		 * We need extra buffer credits since we may write into EA block
+		 * with this same handle. If journal_extend fails, then it will
+		 * only result in a minor loss of functionality for that inode.
+		 * If this is felt to be critical, then e2fsck should be run to
+		 * force a large enough s_min_extra_isize.
+		 */
+		if ((jbd2_journal_extend(handle,
+			     EXT4_DATA_TRANS_BLOCKS(inode->i_sb))) == 0) {
+			ret = ext4_expand_extra_isize(inode,
+						      sbi->s_want_extra_isize,
+						      iloc, handle);
+			if (ret) {
+				EXT4_I(inode)->i_state |= EXT4_STATE_NO_EXPAND;
+				if (mnt_count != sbi->s_es->s_mnt_count) {
+					ext4_warning(inode->i_sb, __FUNCTION__,
+					"Unable to expand inode %lu. Delete"
+					" some EAs or run e2fsck.",
+					inode->i_ino);
+					mnt_count = sbi->s_es->s_mnt_count;
+				}
+			}
+		}
+	}
 	if (!err)
 		err = ext4_mark_iloc_dirty(handle, inode, &iloc);
 	return err;
@@ -3197,7 +3275,7 @@
 	 */
 
 	journal = EXT4_JOURNAL(inode);
-	if (is_journal_aborted(journal) || IS_RDONLY(inode))
+	if (is_journal_aborted(journal))
 		return -EROFS;
 
 	jbd2_journal_lock_updates(journal);
diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
index 500567d..c04c7cc 100644
--- a/fs/ext4/ioctl.c
+++ b/fs/ext4/ioctl.c
@@ -28,6 +28,7 @@
 
 	switch (cmd) {
 	case EXT4_IOC_GETFLAGS:
+		ext4_get_inode_flags(ei);
 		flags = ei->i_flags & EXT4_FL_USER_VISIBLE;
 		return put_user(flags, (int __user *) arg);
 	case EXT4_IOC_SETFLAGS: {
@@ -40,7 +41,7 @@
 		if (IS_RDONLY(inode))
 			return -EROFS;
 
-		if ((current->fsuid != inode->i_uid) && !capable(CAP_FOWNER))
+		if (!is_owner_or_cap(inode))
 			return -EACCES;
 
 		if (get_user(flags, (int __user *) arg))
@@ -96,7 +97,7 @@
 		ei->i_flags = flags;
 
 		ext4_set_inode_flags(inode);
-		inode->i_ctime = CURRENT_TIME_SEC;
+		inode->i_ctime = ext4_current_time(inode);
 
 		err = ext4_mark_iloc_dirty(handle, inode, &iloc);
 flags_err:
@@ -121,7 +122,7 @@
 		__u32 generation;
 		int err;
 
-		if ((current->fsuid != inode->i_uid) && !capable(CAP_FOWNER))
+		if (!is_owner_or_cap(inode))
 			return -EPERM;
 		if (IS_RDONLY(inode))
 			return -EROFS;
@@ -133,14 +134,14 @@
 			return PTR_ERR(handle);
 		err = ext4_reserve_inode_write(handle, inode, &iloc);
 		if (err == 0) {
-			inode->i_ctime = CURRENT_TIME_SEC;
+			inode->i_ctime = ext4_current_time(inode);
 			inode->i_generation = generation;
 			err = ext4_mark_iloc_dirty(handle, inode, &iloc);
 		}
 		ext4_journal_stop(handle);
 		return err;
 	}
-#ifdef CONFIG_JBD_DEBUG
+#ifdef CONFIG_JBD2_DEBUG
 	case EXT4_IOC_WAIT_FOR_READONLY:
 		/*
 		 * This is racy - by the time we're woken up and running,
@@ -180,7 +181,7 @@
 		if (IS_RDONLY(inode))
 			return -EROFS;
 
-		if ((current->fsuid != inode->i_uid) && !capable(CAP_FOWNER))
+		if (!is_owner_or_cap(inode))
 			return -EACCES;
 
 		if (get_user(rsv_window_size, (int __user *)arg))
@@ -282,7 +283,7 @@
 	case EXT4_IOC32_SETVERSION_OLD:
 		cmd = EXT4_IOC_SETVERSION_OLD;
 		break;
-#ifdef CONFIG_JBD_DEBUG
+#ifdef CONFIG_JBD2_DEBUG
 	case EXT4_IOC32_WAIT_FOR_READONLY:
 		cmd = EXT4_IOC_WAIT_FOR_READONLY;
 		break;
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
index 2de339d..da22497 100644
--- a/fs/ext4/namei.c
+++ b/fs/ext4/namei.c
@@ -1295,7 +1295,7 @@
 	 * happen is that the times are slightly out of date
 	 * and/or different from the directory change time.
 	 */
-	dir->i_mtime = dir->i_ctime = CURRENT_TIME_SEC;
+	dir->i_mtime = dir->i_ctime = ext4_current_time(dir);
 	ext4_update_dx_flag(dir);
 	dir->i_version++;
 	ext4_mark_inode_dirty(handle, dir);
@@ -1629,6 +1629,35 @@
 	return -ENOENT;
 }
 
+/*
+ * DIR_NLINK feature is set if 1) nlinks > EXT4_LINK_MAX or 2) nlinks == 2,
+ * since this indicates that nlinks count was previously 1.
+ */
+static void ext4_inc_count(handle_t *handle, struct inode *inode)
+{
+	inc_nlink(inode);
+	if (is_dx(inode) && inode->i_nlink > 1) {
+		/* limit is 16-bit i_links_count */
+		if (inode->i_nlink >= EXT4_LINK_MAX || inode->i_nlink == 2) {
+			inode->i_nlink = 1;
+			EXT4_SET_RO_COMPAT_FEATURE(inode->i_sb,
+					      EXT4_FEATURE_RO_COMPAT_DIR_NLINK);
+		}
+	}
+}
+
+/*
+ * If a directory had nlink == 1, then we should let it be 1. This indicates
+ * directory has >EXT4_LINK_MAX subdirs.
+ */
+static void ext4_dec_count(handle_t *handle, struct inode *inode)
+{
+	drop_nlink(inode);
+	if (S_ISDIR(inode->i_mode) && inode->i_nlink == 0)
+		inc_nlink(inode);
+}
+
+
 static int ext4_add_nondir(handle_t *handle,
 		struct dentry *dentry, struct inode *inode)
 {
@@ -1725,7 +1754,7 @@
 	struct ext4_dir_entry_2 * de;
 	int err, retries = 0;
 
-	if (dir->i_nlink >= EXT4_LINK_MAX)
+	if (EXT4_DIR_LINK_MAX(dir))
 		return -EMLINK;
 
 retry:
@@ -1748,7 +1777,7 @@
 	inode->i_size = EXT4_I(inode)->i_disksize = inode->i_sb->s_blocksize;
 	dir_block = ext4_bread (handle, inode, 0, 1, &err);
 	if (!dir_block) {
-		drop_nlink(inode); /* is this nlink == 0? */
+		ext4_dec_count(handle, inode); /* is this nlink == 0? */
 		ext4_mark_inode_dirty(handle, inode);
 		iput (inode);
 		goto out_stop;
@@ -1780,7 +1809,7 @@
 		iput (inode);
 		goto out_stop;
 	}
-	inc_nlink(dir);
+	ext4_inc_count(handle, dir);
 	ext4_update_dx_flag(dir);
 	ext4_mark_inode_dirty(handle, dir);
 	d_instantiate(dentry, inode);
@@ -2045,9 +2074,9 @@
 	retval = ext4_delete_entry(handle, dir, de, bh);
 	if (retval)
 		goto end_rmdir;
-	if (inode->i_nlink != 2)
+	if (!EXT4_DIR_LINK_EMPTY(inode))
 		ext4_warning (inode->i_sb, "ext4_rmdir",
-			      "empty directory has nlink!=2 (%d)",
+			      "empty directory has too many links (%d)",
 			      inode->i_nlink);
 	inode->i_version++;
 	clear_nlink(inode);
@@ -2056,9 +2085,9 @@
 	 * recovery. */
 	inode->i_size = 0;
 	ext4_orphan_add(handle, inode);
-	inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME_SEC;
+	inode->i_ctime = dir->i_ctime = dir->i_mtime = ext4_current_time(inode);
 	ext4_mark_inode_dirty(handle, inode);
-	drop_nlink(dir);
+	ext4_dec_count(handle, dir);
 	ext4_update_dx_flag(dir);
 	ext4_mark_inode_dirty(handle, dir);
 
@@ -2106,13 +2135,13 @@
 	retval = ext4_delete_entry(handle, dir, de, bh);
 	if (retval)
 		goto end_unlink;
-	dir->i_ctime = dir->i_mtime = CURRENT_TIME_SEC;
+	dir->i_ctime = dir->i_mtime = ext4_current_time(dir);
 	ext4_update_dx_flag(dir);
 	ext4_mark_inode_dirty(handle, dir);
-	drop_nlink(inode);
+	ext4_dec_count(handle, inode);
 	if (!inode->i_nlink)
 		ext4_orphan_add(handle, inode);
-	inode->i_ctime = dir->i_ctime;
+	inode->i_ctime = ext4_current_time(inode);
 	ext4_mark_inode_dirty(handle, inode);
 	retval = 0;
 
@@ -2159,7 +2188,7 @@
 		err = __page_symlink(inode, symname, l,
 				mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS);
 		if (err) {
-			drop_nlink(inode);
+			ext4_dec_count(handle, inode);
 			ext4_mark_inode_dirty(handle, inode);
 			iput (inode);
 			goto out_stop;
@@ -2185,8 +2214,9 @@
 	struct inode *inode = old_dentry->d_inode;
 	int err, retries = 0;
 
-	if (inode->i_nlink >= EXT4_LINK_MAX)
+	if (EXT4_DIR_LINK_MAX(inode))
 		return -EMLINK;
+
 	/*
 	 * Return -ENOENT if we've raced with unlink and i_nlink is 0.  Doing
 	 * otherwise has the potential to corrupt the orphan inode list.
@@ -2203,8 +2233,8 @@
 	if (IS_DIRSYNC(dir))
 		handle->h_sync = 1;
 
-	inode->i_ctime = CURRENT_TIME_SEC;
-	inc_nlink(inode);
+	inode->i_ctime = ext4_current_time(inode);
+	ext4_inc_count(handle, inode);
 	atomic_inc(&inode->i_count);
 
 	err = ext4_add_nondir(handle, dentry, inode);
@@ -2305,7 +2335,7 @@
 	 * Like most other Unix systems, set the ctime for inodes on a
 	 * rename.
 	 */
-	old_inode->i_ctime = CURRENT_TIME_SEC;
+	old_inode->i_ctime = ext4_current_time(old_inode);
 	ext4_mark_inode_dirty(handle, old_inode);
 
 	/*
@@ -2337,10 +2367,10 @@
 	}
 
 	if (new_inode) {
-		drop_nlink(new_inode);
-		new_inode->i_ctime = CURRENT_TIME_SEC;
+		ext4_dec_count(handle, new_inode);
+		new_inode->i_ctime = ext4_current_time(new_inode);
 	}
-	old_dir->i_ctime = old_dir->i_mtime = CURRENT_TIME_SEC;
+	old_dir->i_ctime = old_dir->i_mtime = ext4_current_time(old_dir);
 	ext4_update_dx_flag(old_dir);
 	if (dir_bh) {
 		BUFFER_TRACE(dir_bh, "get_write_access");
@@ -2348,11 +2378,13 @@
 		PARENT_INO(dir_bh->b_data) = cpu_to_le32(new_dir->i_ino);
 		BUFFER_TRACE(dir_bh, "call ext4_journal_dirty_metadata");
 		ext4_journal_dirty_metadata(handle, dir_bh);
-		drop_nlink(old_dir);
+		ext4_dec_count(handle, old_dir);
 		if (new_inode) {
-			drop_nlink(new_inode);
+			/* checked empty_dir above, can't have another parent,
+			 * ext3_dec_count() won't work for many-linked dirs */
+			new_inode->i_nlink = 0;
 		} else {
-			inc_nlink(new_dir);
+			ext4_inc_count(handle, new_dir);
 			ext4_update_dx_flag(new_dir);
 			ext4_mark_inode_dirty(handle, new_dir);
 		}
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index d0d8c76..6dcbb28 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -29,12 +29,14 @@
 #include <linux/parser.h>
 #include <linux/smp_lock.h>
 #include <linux/buffer_head.h>
+#include <linux/exportfs.h>
 #include <linux/vfs.h>
 #include <linux/random.h>
 #include <linux/mount.h>
 #include <linux/namei.h>
 #include <linux/quotaops.h>
 #include <linux/seq_file.h>
+#include <linux/log2.h>
 
 #include <asm/uaccess.h>
 
@@ -733,7 +735,7 @@
 	Opt_usrjquota, Opt_grpjquota, Opt_offusrjquota, Opt_offgrpjquota,
 	Opt_jqfmt_vfsold, Opt_jqfmt_vfsv0, Opt_quota, Opt_noquota,
 	Opt_ignore, Opt_barrier, Opt_err, Opt_resize, Opt_usrquota,
-	Opt_grpquota, Opt_extents,
+	Opt_grpquota, Opt_extents, Opt_noextents,
 };
 
 static match_table_t tokens = {
@@ -784,6 +786,7 @@
 	{Opt_usrquota, "usrquota"},
 	{Opt_barrier, "barrier=%u"},
 	{Opt_extents, "extents"},
+	{Opt_noextents, "noextents"},
 	{Opt_err, NULL},
 	{Opt_resize, "resize"},
 };
@@ -1119,6 +1122,9 @@
 		case Opt_extents:
 			set_opt (sbi->s_mount_opt, EXTENTS);
 			break;
+		case Opt_noextents:
+			clear_opt (sbi->s_mount_opt, EXTENTS);
+			break;
 		default:
 			printk (KERN_ERR
 				"EXT4-fs: Unrecognized mount option \"%s\" "
@@ -1550,6 +1556,12 @@
 
 	set_opt(sbi->s_mount_opt, RESERVATION);
 
+	/*
+	 * turn on extents feature by default in ext4 filesystem
+	 * User -o noextents to turn it off
+	 */
+	set_opt(sbi->s_mount_opt, EXTENTS);
+
 	if (!parse_options ((char *) data, sb, &journal_inum, &journal_devnum,
 			    NULL, 0))
 		goto failed_mount;
@@ -1633,13 +1645,15 @@
 		sbi->s_inode_size = le16_to_cpu(es->s_inode_size);
 		sbi->s_first_ino = le32_to_cpu(es->s_first_ino);
 		if ((sbi->s_inode_size < EXT4_GOOD_OLD_INODE_SIZE) ||
-		    (sbi->s_inode_size & (sbi->s_inode_size - 1)) ||
+		    (!is_power_of_2(sbi->s_inode_size)) ||
 		    (sbi->s_inode_size > blocksize)) {
 			printk (KERN_ERR
 				"EXT4-fs: unsupported inode size: %d\n",
 				sbi->s_inode_size);
 			goto failed_mount;
 		}
+		if (sbi->s_inode_size > EXT4_GOOD_OLD_INODE_SIZE)
+			sb->s_time_gran = 1 << (EXT4_EPOCH_BITS - 2);
 	}
 	sbi->s_frag_size = EXT4_MIN_FRAG_SIZE <<
 				   le32_to_cpu(es->s_log_frag_size);
@@ -1802,6 +1816,13 @@
 		goto failed_mount3;
 	}
 
+	if (ext4_blocks_count(es) > 0xffffffffULL &&
+	    !jbd2_journal_set_features(EXT4_SB(sb)->s_journal, 0, 0,
+				       JBD2_FEATURE_INCOMPAT_64BIT)) {
+		printk(KERN_ERR "ext4: Failed to set 64-bit journal feature\n");
+		goto failed_mount4;
+	}
+
 	/* We have now updated the journal if required, so we can
 	 * validate the data journaling mode. */
 	switch (test_opt(sb, DATA_FLAGS)) {
@@ -1856,6 +1877,32 @@
 	}
 
 	ext4_setup_super (sb, es, sb->s_flags & MS_RDONLY);
+
+	/* determine the minimum size of new large inodes, if present */
+	if (sbi->s_inode_size > EXT4_GOOD_OLD_INODE_SIZE) {
+		sbi->s_want_extra_isize = sizeof(struct ext4_inode) -
+						     EXT4_GOOD_OLD_INODE_SIZE;
+		if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
+				       EXT4_FEATURE_RO_COMPAT_EXTRA_ISIZE)) {
+			if (sbi->s_want_extra_isize <
+			    le16_to_cpu(es->s_want_extra_isize))
+				sbi->s_want_extra_isize =
+					le16_to_cpu(es->s_want_extra_isize);
+			if (sbi->s_want_extra_isize <
+			    le16_to_cpu(es->s_min_extra_isize))
+				sbi->s_want_extra_isize =
+					le16_to_cpu(es->s_min_extra_isize);
+		}
+	}
+	/* Check if enough inode space is available */
+	if (EXT4_GOOD_OLD_INODE_SIZE + sbi->s_want_extra_isize >
+							sbi->s_inode_size) {
+		sbi->s_want_extra_isize = sizeof(struct ext4_inode) -
+						       EXT4_GOOD_OLD_INODE_SIZE;
+		printk(KERN_INFO "EXT4-fs: required extra inode space not"
+			"available.\n");
+	}
+
 	/*
 	 * akpm: core read_super() calls in here with the superblock locked.
 	 * That deadlocks, because orphan cleanup needs to lock the superblock
diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
index e832e96..b10d68f 100644
--- a/fs/ext4/xattr.c
+++ b/fs/ext4/xattr.c
@@ -66,13 +66,6 @@
 #define BFIRST(bh) ENTRY(BHDR(bh)+1)
 #define IS_LAST_ENTRY(entry) (*(__u32 *)(entry) == 0)
 
-#define IHDR(inode, raw_inode) \
-	((struct ext4_xattr_ibody_header *) \
-		((void *)raw_inode + \
-		 EXT4_GOOD_OLD_INODE_SIZE + \
-		 EXT4_I(inode)->i_extra_isize))
-#define IFIRST(hdr) ((struct ext4_xattr_entry *)((hdr)+1))
-
 #ifdef EXT4_XATTR_DEBUG
 # define ea_idebug(inode, f...) do { \
 		printk(KERN_DEBUG "inode %s:%lu: ", \
@@ -508,6 +501,24 @@
 	return;
 }
 
+/*
+ * Find the available free space for EAs. This also returns the total number of
+ * bytes used by EA entries.
+ */
+static size_t ext4_xattr_free_space(struct ext4_xattr_entry *last,
+				    size_t *min_offs, void *base, int *total)
+{
+	for (; !IS_LAST_ENTRY(last); last = EXT4_XATTR_NEXT(last)) {
+		*total += EXT4_XATTR_LEN(last->e_name_len);
+		if (!last->e_value_block && last->e_value_size) {
+			size_t offs = le16_to_cpu(last->e_value_offs);
+			if (offs < *min_offs)
+				*min_offs = offs;
+		}
+	}
+	return (*min_offs - ((void *)last - base) - sizeof(__u32));
+}
+
 struct ext4_xattr_info {
 	int name_index;
 	const char *name;
@@ -1013,7 +1024,9 @@
 	}
 	if (!error) {
 		ext4_xattr_update_super_block(handle, inode->i_sb);
-		inode->i_ctime = CURRENT_TIME_SEC;
+		inode->i_ctime = ext4_current_time(inode);
+		if (!value)
+			EXT4_I(inode)->i_state &= ~EXT4_STATE_NO_EXPAND;
 		error = ext4_mark_iloc_dirty(handle, inode, &is.iloc);
 		/*
 		 * The bh is consumed by ext4_mark_iloc_dirty, even with
@@ -1067,6 +1080,253 @@
 }
 
 /*
+ * Shift the EA entries in the inode to create space for the increased
+ * i_extra_isize.
+ */
+static void ext4_xattr_shift_entries(struct ext4_xattr_entry *entry,
+				     int value_offs_shift, void *to,
+				     void *from, size_t n, int blocksize)
+{
+	struct ext4_xattr_entry *last = entry;
+	int new_offs;
+
+	/* Adjust the value offsets of the entries */
+	for (; !IS_LAST_ENTRY(last); last = EXT4_XATTR_NEXT(last)) {
+		if (!last->e_value_block && last->e_value_size) {
+			new_offs = le16_to_cpu(last->e_value_offs) +
+							value_offs_shift;
+			BUG_ON(new_offs + le32_to_cpu(last->e_value_size)
+				 > blocksize);
+			last->e_value_offs = cpu_to_le16(new_offs);
+		}
+	}
+	/* Shift the entries by n bytes */
+	memmove(to, from, n);
+}
+
+/*
+ * Expand an inode by new_extra_isize bytes when EAs are present.
+ * Returns 0 on success or negative error number on failure.
+ */
+int ext4_expand_extra_isize_ea(struct inode *inode, int new_extra_isize,
+			       struct ext4_inode *raw_inode, handle_t *handle)
+{
+	struct ext4_xattr_ibody_header *header;
+	struct ext4_xattr_entry *entry, *last, *first;
+	struct buffer_head *bh = NULL;
+	struct ext4_xattr_ibody_find *is = NULL;
+	struct ext4_xattr_block_find *bs = NULL;
+	char *buffer = NULL, *b_entry_name = NULL;
+	size_t min_offs, free;
+	int total_ino, total_blk;
+	void *base, *start, *end;
+	int extra_isize = 0, error = 0, tried_min_extra_isize = 0;
+	int s_min_extra_isize = EXT4_SB(inode->i_sb)->s_es->s_min_extra_isize;
+
+	down_write(&EXT4_I(inode)->xattr_sem);
+retry:
+	if (EXT4_I(inode)->i_extra_isize >= new_extra_isize) {
+		up_write(&EXT4_I(inode)->xattr_sem);
+		return 0;
+	}
+
+	header = IHDR(inode, raw_inode);
+	entry = IFIRST(header);
+
+	/*
+	 * Check if enough free space is available in the inode to shift the
+	 * entries ahead by new_extra_isize.
+	 */
+
+	base = start = entry;
+	end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size;
+	min_offs = end - base;
+	last = entry;
+	total_ino = sizeof(struct ext4_xattr_ibody_header);
+
+	free = ext4_xattr_free_space(last, &min_offs, base, &total_ino);
+	if (free >= new_extra_isize) {
+		entry = IFIRST(header);
+		ext4_xattr_shift_entries(entry,	EXT4_I(inode)->i_extra_isize
+				- new_extra_isize, (void *)raw_inode +
+				EXT4_GOOD_OLD_INODE_SIZE + new_extra_isize,
+				(void *)header, total_ino,
+				inode->i_sb->s_blocksize);
+		EXT4_I(inode)->i_extra_isize = new_extra_isize;
+		error = 0;
+		goto cleanup;
+	}
+
+	/*
+	 * Enough free space isn't available in the inode, check if
+	 * EA block can hold new_extra_isize bytes.
+	 */
+	if (EXT4_I(inode)->i_file_acl) {
+		bh = sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl);
+		error = -EIO;
+		if (!bh)
+			goto cleanup;
+		if (ext4_xattr_check_block(bh)) {
+			ext4_error(inode->i_sb, __FUNCTION__,
+				"inode %lu: bad block %llu", inode->i_ino,
+				EXT4_I(inode)->i_file_acl);
+			error = -EIO;
+			goto cleanup;
+		}
+		base = BHDR(bh);
+		first = BFIRST(bh);
+		end = bh->b_data + bh->b_size;
+		min_offs = end - base;
+		free = ext4_xattr_free_space(first, &min_offs, base,
+					     &total_blk);
+		if (free < new_extra_isize) {
+			if (!tried_min_extra_isize && s_min_extra_isize) {
+				tried_min_extra_isize++;
+				new_extra_isize = s_min_extra_isize;
+				brelse(bh);
+				goto retry;
+			}
+			error = -1;
+			goto cleanup;
+		}
+	} else {
+		free = inode->i_sb->s_blocksize;
+	}
+
+	while (new_extra_isize > 0) {
+		size_t offs, size, entry_size;
+		struct ext4_xattr_entry *small_entry = NULL;
+		struct ext4_xattr_info i = {
+			.value = NULL,
+			.value_len = 0,
+		};
+		unsigned int total_size;  /* EA entry size + value size */
+		unsigned int shift_bytes; /* No. of bytes to shift EAs by? */
+		unsigned int min_total_size = ~0U;
+
+		is = kzalloc(sizeof(struct ext4_xattr_ibody_find), GFP_NOFS);
+		bs = kzalloc(sizeof(struct ext4_xattr_block_find), GFP_NOFS);
+		if (!is || !bs) {
+			error = -ENOMEM;
+			goto cleanup;
+		}
+
+		is->s.not_found = -ENODATA;
+		bs->s.not_found = -ENODATA;
+		is->iloc.bh = NULL;
+		bs->bh = NULL;
+
+		last = IFIRST(header);
+		/* Find the entry best suited to be pushed into EA block */
+		entry = NULL;
+		for (; !IS_LAST_ENTRY(last); last = EXT4_XATTR_NEXT(last)) {
+			total_size =
+			EXT4_XATTR_SIZE(le32_to_cpu(last->e_value_size)) +
+					EXT4_XATTR_LEN(last->e_name_len);
+			if (total_size <= free && total_size < min_total_size) {
+				if (total_size < new_extra_isize) {
+					small_entry = last;
+				} else {
+					entry = last;
+					min_total_size = total_size;
+				}
+			}
+		}
+
+		if (entry == NULL) {
+			if (small_entry) {
+				entry = small_entry;
+			} else {
+				if (!tried_min_extra_isize &&
+				    s_min_extra_isize) {
+					tried_min_extra_isize++;
+					new_extra_isize = s_min_extra_isize;
+					goto retry;
+				}
+				error = -1;
+				goto cleanup;
+			}
+		}
+		offs = le16_to_cpu(entry->e_value_offs);
+		size = le32_to_cpu(entry->e_value_size);
+		entry_size = EXT4_XATTR_LEN(entry->e_name_len);
+		i.name_index = entry->e_name_index,
+		buffer = kmalloc(EXT4_XATTR_SIZE(size), GFP_NOFS);
+		b_entry_name = kmalloc(entry->e_name_len + 1, GFP_NOFS);
+		if (!buffer || !b_entry_name) {
+			error = -ENOMEM;
+			goto cleanup;
+		}
+		/* Save the entry name and the entry value */
+		memcpy(buffer, (void *)IFIRST(header) + offs,
+		       EXT4_XATTR_SIZE(size));
+		memcpy(b_entry_name, entry->e_name, entry->e_name_len);
+		b_entry_name[entry->e_name_len] = '\0';
+		i.name = b_entry_name;
+
+		error = ext4_get_inode_loc(inode, &is->iloc);
+		if (error)
+			goto cleanup;
+
+		error = ext4_xattr_ibody_find(inode, &i, is);
+		if (error)
+			goto cleanup;
+
+		/* Remove the chosen entry from the inode */
+		error = ext4_xattr_ibody_set(handle, inode, &i, is);
+
+		entry = IFIRST(header);
+		if (entry_size + EXT4_XATTR_SIZE(size) >= new_extra_isize)
+			shift_bytes = new_extra_isize;
+		else
+			shift_bytes = entry_size + size;
+		/* Adjust the offsets and shift the remaining entries ahead */
+		ext4_xattr_shift_entries(entry, EXT4_I(inode)->i_extra_isize -
+			shift_bytes, (void *)raw_inode +
+			EXT4_GOOD_OLD_INODE_SIZE + extra_isize + shift_bytes,
+			(void *)header, total_ino - entry_size,
+			inode->i_sb->s_blocksize);
+
+		extra_isize += shift_bytes;
+		new_extra_isize -= shift_bytes;
+		EXT4_I(inode)->i_extra_isize = extra_isize;
+
+		i.name = b_entry_name;
+		i.value = buffer;
+		i.value_len = cpu_to_le32(size);
+		error = ext4_xattr_block_find(inode, &i, bs);
+		if (error)
+			goto cleanup;
+
+		/* Add entry which was removed from the inode into the block */
+		error = ext4_xattr_block_set(handle, inode, &i, bs);
+		if (error)
+			goto cleanup;
+		kfree(b_entry_name);
+		kfree(buffer);
+		brelse(is->iloc.bh);
+		kfree(is);
+		kfree(bs);
+	}
+	brelse(bh);
+	up_write(&EXT4_I(inode)->xattr_sem);
+	return 0;
+
+cleanup:
+	kfree(b_entry_name);
+	kfree(buffer);
+	if (is)
+		brelse(is->iloc.bh);
+	kfree(is);
+	kfree(bs);
+	brelse(bh);
+	up_write(&EXT4_I(inode)->xattr_sem);
+	return error;
+}
+
+
+
+/*
  * ext4_xattr_delete_inode()
  *
  * Free extended attribute resources associated with this inode. This
diff --git a/fs/ext4/xattr.h b/fs/ext4/xattr.h
index 79432b3..d7f5d6a 100644
--- a/fs/ext4/xattr.h
+++ b/fs/ext4/xattr.h
@@ -56,6 +56,13 @@
 #define EXT4_XATTR_SIZE(size) \
 	(((size) + EXT4_XATTR_ROUND) & ~EXT4_XATTR_ROUND)
 
+#define IHDR(inode, raw_inode) \
+	((struct ext4_xattr_ibody_header *) \
+		((void *)raw_inode + \
+		EXT4_GOOD_OLD_INODE_SIZE + \
+		EXT4_I(inode)->i_extra_isize))
+#define IFIRST(hdr) ((struct ext4_xattr_entry *)((hdr)+1))
+
 # ifdef CONFIG_EXT4DEV_FS_XATTR
 
 extern struct xattr_handler ext4_xattr_user_handler;
@@ -74,6 +81,9 @@
 extern void ext4_xattr_delete_inode(handle_t *, struct inode *);
 extern void ext4_xattr_put_super(struct super_block *);
 
+extern int ext4_expand_extra_isize_ea(struct inode *inode, int new_extra_isize,
+			    struct ext4_inode *raw_inode, handle_t *handle);
+
 extern int init_ext4_xattr(void);
 extern void exit_ext4_xattr(void);
 
@@ -129,6 +139,13 @@
 {
 }
 
+static inline int
+ext4_expand_extra_isize_ea(struct inode *inode, int new_extra_isize,
+			    struct ext4_inode *raw_inode, handle_t *handle)
+{
+	return -EOPNOTSUPP;
+}
+
 #define ext4_xattr_handlers	NULL
 
 # endif  /* CONFIG_EXT4DEV_FS_XATTR */
diff --git a/fs/fat/inode.c b/fs/fat/inode.c
index cfaf587..0a7ddb3 100644
--- a/fs/fat/inode.c
+++ b/fs/fat/inode.c
@@ -20,6 +20,7 @@
 #include <linux/pagemap.h>
 #include <linux/mpage.h>
 #include <linux/buffer_head.h>
+#include <linux/exportfs.h>
 #include <linux/mount.h>
 #include <linux/vfs.h>
 #include <linux/parser.h>
diff --git a/fs/fcntl.c b/fs/fcntl.c
index 8e382a5..3f22e9f 100644
--- a/fs/fcntl.c
+++ b/fs/fcntl.c
@@ -215,7 +215,7 @@
 
 	/* O_NOATIME can only be set by the owner or superuser */
 	if ((arg & O_NOATIME) && !(filp->f_flags & O_NOATIME))
-		if (current->fsuid != inode->i_uid && !capable(CAP_FOWNER))
+		if (!is_owner_or_cap(inode))
 			return -EPERM;
 
 	/* required for strict SunOS emulation */
diff --git a/fs/generic_acl.c b/fs/generic_acl.c
index 9ccb789..995d63b 100644
--- a/fs/generic_acl.c
+++ b/fs/generic_acl.c
@@ -78,7 +78,7 @@
 
 	if (S_ISLNK(inode->i_mode))
 		return -EOPNOTSUPP;
-	if (current->fsuid != inode->i_uid && !capable(CAP_FOWNER))
+	if (!is_owner_or_cap(inode))
 		return -EPERM;
 	if (value) {
 		acl = posix_acl_from_xattr(value, size);
diff --git a/fs/gfs2/acl.c b/fs/gfs2/acl.c
index 6e80844..1047a8c 100644
--- a/fs/gfs2/acl.c
+++ b/fs/gfs2/acl.c
@@ -74,7 +74,7 @@
 {
 	if (!GFS2_SB(&ip->i_inode)->sd_args.ar_posix_acl)
 		return -EOPNOTSUPP;
-	if (current->fsuid != ip->i_inode.i_uid && !capable(CAP_FOWNER))
+	if (!is_owner_or_cap(&ip->i_inode))
 		return -EPERM;
 	if (S_ISLNK(ip->i_inode.i_mode))
 		return -EOPNOTSUPP;
diff --git a/fs/gfs2/ops_address.c b/fs/gfs2/ops_address.c
index 26c8888..ce90032 100644
--- a/fs/gfs2/ops_address.c
+++ b/fs/gfs2/ops_address.c
@@ -251,7 +251,7 @@
 		if (file) {
 			gf = file->private_data;
 			if (test_bit(GFF_EXLOCK, &gf->f_flags))
-				/* gfs2_sharewrite_nopage has grabbed the ip->i_gl already */
+				/* gfs2_sharewrite_fault has grabbed the ip->i_gl already */
 				goto skip_lock;
 		}
 		gfs2_holder_init(ip->i_gl, LM_ST_SHARED, GL_ATIME|LM_FLAG_TRY_1CB, &gh);
diff --git a/fs/gfs2/ops_export.c b/fs/gfs2/ops_export.c
index 99ea565..b8312ed 100644
--- a/fs/gfs2/ops_export.c
+++ b/fs/gfs2/ops_export.c
@@ -11,6 +11,7 @@
 #include <linux/spinlock.h>
 #include <linux/completion.h>
 #include <linux/buffer_head.h>
+#include <linux/exportfs.h>
 #include <linux/gfs2_ondisk.h>
 #include <linux/crc32.h>
 #include <linux/lm_interface.h>
diff --git a/fs/gfs2/ops_file.c b/fs/gfs2/ops_file.c
index 196d832..1a5e8e8 100644
--- a/fs/gfs2/ops_file.c
+++ b/fs/gfs2/ops_file.c
@@ -489,6 +489,29 @@
 }
 
 /**
+ * gfs2_setlease - acquire/release a file lease
+ * @file: the file pointer
+ * @arg: lease type
+ * @fl: file lock
+ *
+ * Returns: errno
+ */
+
+static int gfs2_setlease(struct file *file, long arg, struct file_lock **fl)
+{
+	struct gfs2_sbd *sdp = GFS2_SB(file->f_mapping->host);
+
+	/*
+	 * We don't currently have a way to enforce a lease across the whole
+	 * cluster; until we do, disable leases (by just returning -EINVAL),
+	 * unless the administrator has requested purely local locking.
+	 */
+	if (!sdp->sd_args.ar_localflocks)
+		return -EINVAL;
+	return setlease(file, arg, fl);
+}
+
+/**
  * gfs2_lock - acquire/release a posix lock on a file
  * @file: the file pointer
  * @cmd: either modify or retrieve lock state, possibly wait
@@ -638,6 +661,7 @@
 	.flock		= gfs2_flock,
 	.splice_read	= generic_file_splice_read,
 	.splice_write	= generic_file_splice_write,
+	.setlease	= gfs2_setlease,
 };
 
 const struct file_operations gfs2_dir_fops = {
diff --git a/fs/gfs2/ops_vm.c b/fs/gfs2/ops_vm.c
index 404b7cc..927d739 100644
--- a/fs/gfs2/ops_vm.c
+++ b/fs/gfs2/ops_vm.c
@@ -27,13 +27,12 @@
 #include "trans.h"
 #include "util.h"
 
-static struct page *gfs2_private_nopage(struct vm_area_struct *area,
-					unsigned long address, int *type)
+static int gfs2_private_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 {
-	struct gfs2_inode *ip = GFS2_I(area->vm_file->f_mapping->host);
+	struct gfs2_inode *ip = GFS2_I(vma->vm_file->f_mapping->host);
 
 	set_bit(GIF_PAGED, &ip->i_flags);
-	return filemap_nopage(area, address, type);
+	return filemap_fault(vma, vmf);
 }
 
 static int alloc_page_backing(struct gfs2_inode *ip, struct page *page)
@@ -104,58 +103,67 @@
 	return error;
 }
 
-static struct page *gfs2_sharewrite_nopage(struct vm_area_struct *area,
-					   unsigned long address, int *type)
+static int gfs2_sharewrite_fault(struct vm_area_struct *vma,
+						struct vm_fault *vmf)
 {
-	struct file *file = area->vm_file;
+	struct file *file = vma->vm_file;
 	struct gfs2_file *gf = file->private_data;
 	struct gfs2_inode *ip = GFS2_I(file->f_mapping->host);
 	struct gfs2_holder i_gh;
-	struct page *result = NULL;
-	unsigned long index = ((address - area->vm_start) >> PAGE_CACHE_SHIFT) +
-			      area->vm_pgoff;
 	int alloc_required;
 	int error;
+	int ret = 0;
 
 	error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &i_gh);
 	if (error)
-		return NULL;
+		goto out;
 
 	set_bit(GIF_PAGED, &ip->i_flags);
 	set_bit(GIF_SW_PAGED, &ip->i_flags);
 
-	error = gfs2_write_alloc_required(ip, (u64)index << PAGE_CACHE_SHIFT,
-					  PAGE_CACHE_SIZE, &alloc_required);
-	if (error)
-		goto out;
-
-	set_bit(GFF_EXLOCK, &gf->f_flags);
-	result = filemap_nopage(area, address, type);
-	clear_bit(GFF_EXLOCK, &gf->f_flags);
-	if (!result || result == NOPAGE_OOM)
-		goto out;
-
-	if (alloc_required) {
-		error = alloc_page_backing(ip, result);
-		if (error) {
-			page_cache_release(result);
-			result = NULL;
-			goto out;
-		}
-		set_page_dirty(result);
+	error = gfs2_write_alloc_required(ip,
+					(u64)vmf->pgoff << PAGE_CACHE_SHIFT,
+					PAGE_CACHE_SIZE, &alloc_required);
+	if (error) {
+		ret = VM_FAULT_OOM; /* XXX: are these right? */
+		goto out_unlock;
 	}
 
-out:
-	gfs2_glock_dq_uninit(&i_gh);
+	set_bit(GFF_EXLOCK, &gf->f_flags);
+	ret = filemap_fault(vma, vmf);
+	clear_bit(GFF_EXLOCK, &gf->f_flags);
+	if (ret & VM_FAULT_ERROR)
+		goto out_unlock;
 
-	return result;
+	if (alloc_required) {
+		/* XXX: do we need to drop page lock around alloc_page_backing?*/
+		error = alloc_page_backing(ip, vmf->page);
+		if (error) {
+			/*
+			 * VM_FAULT_LOCKED should always be the case for
+			 * filemap_fault, but it may not be in a future
+			 * implementation.
+			 */
+			if (ret & VM_FAULT_LOCKED)
+				unlock_page(vmf->page);
+			page_cache_release(vmf->page);
+			ret = VM_FAULT_OOM;
+			goto out_unlock;
+		}
+		set_page_dirty(vmf->page);
+	}
+
+out_unlock:
+	gfs2_glock_dq_uninit(&i_gh);
+out:
+	return ret;
 }
 
 struct vm_operations_struct gfs2_vm_ops_private = {
-	.nopage = gfs2_private_nopage,
+	.fault = gfs2_private_fault,
 };
 
 struct vm_operations_struct gfs2_vm_ops_sharewrite = {
-	.nopage = gfs2_sharewrite_nopage,
+	.fault = gfs2_sharewrite_fault,
 };
 
diff --git a/fs/hfsplus/ioctl.c b/fs/hfsplus/ioctl.c
index 79fd104..b60c0af 100644
--- a/fs/hfsplus/ioctl.c
+++ b/fs/hfsplus/ioctl.c
@@ -38,7 +38,7 @@
 		if (IS_RDONLY(inode))
 			return -EROFS;
 
-		if ((current->fsuid != inode->i_uid) && !capable(CAP_FOWNER))
+		if (!is_owner_or_cap(inode))
 			return -EACCES;
 
 		if (get_user(flags, (int __user *)arg))
diff --git a/fs/inode.c b/fs/inode.c
index 9a012cc..320e088 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -145,7 +145,7 @@
 		mapping->a_ops = &empty_aops;
  		mapping->host = inode;
 		mapping->flags = 0;
-		mapping_set_gfp_mask(mapping, GFP_HIGHUSER);
+		mapping_set_gfp_mask(mapping, GFP_HIGHUSER_PAGECACHE);
 		mapping->assoc_mapping = NULL;
 		mapping->backing_dev_info = &default_backing_dev_info;
 
@@ -462,6 +462,11 @@
 	return (inodes_stat.nr_unused / 100) * sysctl_vfs_cache_pressure;
 }
 
+static struct shrinker icache_shrinker = {
+	.shrink = shrink_icache_memory,
+	.seeks = DEFAULT_SEEKS,
+};
+
 static void __wait_on_freeing_inode(struct inode *inode);
 /*
  * Called with the inode lock held.
@@ -519,7 +524,13 @@
  *	new_inode 	- obtain an inode
  *	@sb: superblock
  *
- *	Allocates a new inode for given superblock.
+ *	Allocates a new inode for given superblock. The default gfp_mask
+ *	for allocations related to inode->i_mapping is GFP_HIGHUSER_PAGECACHE.
+ *	If HIGHMEM pages are unsuitable or it is known that pages allocated
+ *	for the page cache are not reclaimable or migratable,
+ *	mapping_set_gfp_mask() must be called with suitable flags on the
+ *	newly created inode's mapping
+ *
  */
 struct inode *new_inode(struct super_block *sb)
 {
@@ -1379,7 +1390,7 @@
 					 SLAB_MEM_SPREAD),
 					 init_once,
 					 NULL);
-	set_shrinker(DEFAULT_SEEKS, shrink_icache_memory);
+	register_shrinker(&icache_shrinker);
 
 	/* Hash may have been set up in inode_init_early */
 	if (!hashdist)
diff --git a/fs/isofs/isofs.h b/fs/isofs/isofs.h
index efe2872..a07e67b 100644
--- a/fs/isofs/isofs.h
+++ b/fs/isofs/isofs.h
@@ -1,5 +1,6 @@
 #include <linux/fs.h>
 #include <linux/buffer_head.h>
+#include <linux/exportfs.h>
 #include <linux/iso_fs.h>
 #include <asm/unaligned.h>
 
diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
index 78d63b8..f290cb7 100644
--- a/fs/jbd2/journal.c
+++ b/fs/jbd2/journal.c
@@ -35,6 +35,7 @@
 #include <linux/kthread.h>
 #include <linux/poison.h>
 #include <linux/proc_fs.h>
+#include <linux/debugfs.h>
 
 #include <asm/uaccess.h>
 #include <asm/page.h>
@@ -528,7 +529,7 @@
 {
 	int err = 0;
 
-#ifdef CONFIG_JBD_DEBUG
+#ifdef CONFIG_JBD2_DEBUG
 	spin_lock(&journal->j_state_lock);
 	if (!tid_geq(journal->j_commit_request, tid)) {
 		printk(KERN_EMERG
@@ -1709,7 +1710,7 @@
  * Journal_head storage management
  */
 static struct kmem_cache *jbd2_journal_head_cache;
-#ifdef CONFIG_JBD_DEBUG
+#ifdef CONFIG_JBD2_DEBUG
 static atomic_t nr_journal_heads = ATOMIC_INIT(0);
 #endif
 
@@ -1747,7 +1748,7 @@
 	struct journal_head *ret;
 	static unsigned long last_warning;
 
-#ifdef CONFIG_JBD_DEBUG
+#ifdef CONFIG_JBD2_DEBUG
 	atomic_inc(&nr_journal_heads);
 #endif
 	ret = kmem_cache_alloc(jbd2_journal_head_cache, GFP_NOFS);
@@ -1768,7 +1769,7 @@
 
 static void journal_free_journal_head(struct journal_head *jh)
 {
-#ifdef CONFIG_JBD_DEBUG
+#ifdef CONFIG_JBD2_DEBUG
 	atomic_dec(&nr_journal_heads);
 	memset(jh, JBD_POISON_FREE, sizeof(*jh));
 #endif
@@ -1951,63 +1952,49 @@
 }
 
 /*
- * /proc tunables
+ * debugfs tunables
  */
-#if defined(CONFIG_JBD_DEBUG)
-int jbd2_journal_enable_debug;
+#if defined(CONFIG_JBD2_DEBUG)
+u8 jbd2_journal_enable_debug;
 EXPORT_SYMBOL(jbd2_journal_enable_debug);
 #endif
 
-#if defined(CONFIG_JBD_DEBUG) && defined(CONFIG_PROC_FS)
+#if defined(CONFIG_JBD2_DEBUG) && defined(CONFIG_DEBUG_FS)
 
-static struct proc_dir_entry *proc_jbd_debug;
+#define JBD2_DEBUG_NAME "jbd2-debug"
 
-static int read_jbd_debug(char *page, char **start, off_t off,
-			  int count, int *eof, void *data)
+struct dentry *jbd2_debugfs_dir, *jbd2_debug;
+
+static void __init jbd2_create_debugfs_entry(void)
 {
-	int ret;
-
-	ret = sprintf(page + off, "%d\n", jbd2_journal_enable_debug);
-	*eof = 1;
-	return ret;
+	jbd2_debugfs_dir = debugfs_create_dir("jbd2", NULL);
+	if (jbd2_debugfs_dir)
+		jbd2_debug = debugfs_create_u8(JBD2_DEBUG_NAME, S_IRUGO,
+					       jbd2_debugfs_dir,
+					       &jbd2_journal_enable_debug);
 }
 
-static int write_jbd_debug(struct file *file, const char __user *buffer,
-			   unsigned long count, void *data)
+static void __exit jbd2_remove_debugfs_entry(void)
 {
-	char buf[32];
-
-	if (count > ARRAY_SIZE(buf) - 1)
-		count = ARRAY_SIZE(buf) - 1;
-	if (copy_from_user(buf, buffer, count))
-		return -EFAULT;
-	buf[ARRAY_SIZE(buf) - 1] = '\0';
-	jbd2_journal_enable_debug = simple_strtoul(buf, NULL, 10);
-	return count;
-}
-
-#define JBD_PROC_NAME "sys/fs/jbd2-debug"
-
-static void __init create_jbd_proc_entry(void)
-{
-	proc_jbd_debug = create_proc_entry(JBD_PROC_NAME, 0644, NULL);
-	if (proc_jbd_debug) {
-		/* Why is this so hard? */
-		proc_jbd_debug->read_proc = read_jbd_debug;
-		proc_jbd_debug->write_proc = write_jbd_debug;
-	}
-}
-
-static void __exit jbd2_remove_jbd_proc_entry(void)
-{
-	if (proc_jbd_debug)
-		remove_proc_entry(JBD_PROC_NAME, NULL);
+	if (jbd2_debug)
+		debugfs_remove(jbd2_debug);
+	if (jbd2_debugfs_dir)
+		debugfs_remove(jbd2_debugfs_dir);
 }
 
 #else
 
-#define create_jbd_proc_entry() do {} while (0)
-#define jbd2_remove_jbd_proc_entry() do {} while (0)
+static void __init jbd2_create_debugfs_entry(void)
+{
+	do {
+	} while (0);
+}
+
+static void __exit jbd2_remove_debugfs_entry(void)
+{
+	do {
+	} while (0);
+}
 
 #endif
 
@@ -2067,18 +2054,18 @@
 	ret = journal_init_caches();
 	if (ret != 0)
 		jbd2_journal_destroy_caches();
-	create_jbd_proc_entry();
+	jbd2_create_debugfs_entry();
 	return ret;
 }
 
 static void __exit journal_exit(void)
 {
-#ifdef CONFIG_JBD_DEBUG
+#ifdef CONFIG_JBD2_DEBUG
 	int n = atomic_read(&nr_journal_heads);
 	if (n)
 		printk(KERN_EMERG "JBD: leaked %d journal_heads!\n", n);
 #endif
-	jbd2_remove_jbd_proc_entry();
+	jbd2_remove_debugfs_entry();
 	jbd2_journal_destroy_caches();
 }
 
diff --git a/fs/jbd2/recovery.c b/fs/jbd2/recovery.c
index 395c92a..b50be8a 100644
--- a/fs/jbd2/recovery.c
+++ b/fs/jbd2/recovery.c
@@ -251,10 +251,10 @@
 	if (!err)
 		err = do_one_pass(journal, &info, PASS_REPLAY);
 
-	jbd_debug(0, "JBD: recovery, exit status %d, "
+	jbd_debug(1, "JBD: recovery, exit status %d, "
 		  "recovered transactions %u to %u\n",
 		  err, info.start_transaction, info.end_transaction);
-	jbd_debug(0, "JBD: Replayed %d and revoked %d/%d blocks\n",
+	jbd_debug(1, "JBD: Replayed %d and revoked %d/%d blocks\n",
 		  info.nr_replays, info.nr_revoke_hits, info.nr_revokes);
 
 	/* Restart the log at the next transaction ID, thus invalidating
@@ -295,10 +295,10 @@
 		printk(KERN_ERR "JBD: error %d scanning journal\n", err);
 		++journal->j_transaction_sequence;
 	} else {
-#ifdef CONFIG_JBD_DEBUG
+#ifdef CONFIG_JBD2_DEBUG
 		int dropped = info.end_transaction - be32_to_cpu(sb->s_sequence);
 #endif
-		jbd_debug(0,
+		jbd_debug(1,
 			  "JBD: ignoring %d transaction%s from the journal.\n",
 			  dropped, (dropped == 1) ? "" : "s");
 		journal->j_transaction_sequence = ++info.end_transaction;
diff --git a/fs/jffs2/acl.c b/fs/jffs2/acl.c
index a46101e..65b3a1b 100644
--- a/fs/jffs2/acl.c
+++ b/fs/jffs2/acl.c
@@ -435,7 +435,7 @@
 	struct posix_acl *acl;
 	int rc;
 
-	if ((current->fsuid != inode->i_uid) && !capable(CAP_FOWNER))
+	if (!is_owner_or_cap(inode))
 		return -EPERM;
 
 	if (value) {
diff --git a/fs/jffs2/background.c b/fs/jffs2/background.c
index 0c82dfc..143c553 100644
--- a/fs/jffs2/background.c
+++ b/fs/jffs2/background.c
@@ -81,6 +81,7 @@
 
 	set_user_nice(current, 10);
 
+	set_freezable();
 	for (;;) {
 		allow_signal(SIGHUP);
 
diff --git a/fs/jfs/ioctl.c b/fs/jfs/ioctl.c
index fe063af..3c8663b 100644
--- a/fs/jfs/ioctl.c
+++ b/fs/jfs/ioctl.c
@@ -69,7 +69,7 @@
 		if (IS_RDONLY(inode))
 			return -EROFS;
 
-		if ((current->fsuid != inode->i_uid) && !capable(CAP_FOWNER))
+		if (!is_owner_or_cap(inode))
 			return -EACCES;
 
 		if (get_user(flags, (int __user *) arg))
diff --git a/fs/jfs/jfs_inode.h b/fs/jfs/jfs_inode.h
index 2374b59..f0ec72b 100644
--- a/fs/jfs/jfs_inode.h
+++ b/fs/jfs/jfs_inode.h
@@ -32,6 +32,7 @@
 extern void jfs_free_zero_link(struct inode *);
 extern struct dentry *jfs_get_parent(struct dentry *dentry);
 extern void jfs_get_inode_flags(struct jfs_inode_info *);
+extern struct dentry *jfs_get_dentry(struct super_block *sb, void *vobjp);
 extern void jfs_set_inode_flags(struct inode *);
 extern int jfs_get_block(struct inode *, sector_t, struct buffer_head *, int);
 
diff --git a/fs/jfs/namei.c b/fs/jfs/namei.c
index 25161c4..932797b 100644
--- a/fs/jfs/namei.c
+++ b/fs/jfs/namei.c
@@ -1477,6 +1477,38 @@
 	return dentry;
 }
 
+struct dentry *jfs_get_dentry(struct super_block *sb, void *vobjp)
+{
+	__u32 *objp = vobjp;
+	unsigned long ino = objp[0];
+	__u32 generation = objp[1];
+	struct inode *inode;
+	struct dentry *result;
+
+	if (ino == 0)
+		return ERR_PTR(-ESTALE);
+	inode = iget(sb, ino);
+	if (inode == NULL)
+		return ERR_PTR(-ENOMEM);
+
+	if (is_bad_inode(inode) ||
+	    (generation && inode->i_generation != generation)) {
+	    	result = ERR_PTR(-ESTALE);
+		goto out_iput;
+	}
+
+	result = d_alloc_anon(inode);
+	if (!result) {
+		result = ERR_PTR(-ENOMEM);
+		goto out_iput;
+	}
+	return result;
+
+ out_iput:
+	iput(inode);
+	return result;
+}
+
 struct dentry *jfs_get_parent(struct dentry *dentry)
 {
 	struct super_block *sb = dentry->d_inode->i_sb;
diff --git a/fs/jfs/super.c b/fs/jfs/super.c
index 20e4ac1..929fcec 100644
--- a/fs/jfs/super.c
+++ b/fs/jfs/super.c
@@ -27,6 +27,7 @@
 #include <linux/kthread.h>
 #include <linux/posix_acl.h>
 #include <linux/buffer_head.h>
+#include <linux/exportfs.h>
 #include <asm/uaccess.h>
 #include <linux/seq_file.h>
 
@@ -737,6 +738,7 @@
 };
 
 static struct export_operations jfs_export_operations = {
+	.get_dentry	= jfs_get_dentry,
 	.get_parent	= jfs_get_parent,
 };
 
diff --git a/fs/jfs/xattr.c b/fs/jfs/xattr.c
index b2375f0..9b7f2cd 100644
--- a/fs/jfs/xattr.c
+++ b/fs/jfs/xattr.c
@@ -697,7 +697,7 @@
 	struct posix_acl *acl;
 	int rc;
 
-	if ((current->fsuid != inode->i_uid) && !capable(CAP_FOWNER))
+	if (!is_owner_or_cap(inode))
 		return -EPERM;
 
 	/*
diff --git a/fs/lockd/svc.c b/fs/lockd/svc.c
index 2680932..82e2192 100644
--- a/fs/lockd/svc.c
+++ b/fs/lockd/svc.c
@@ -25,6 +25,7 @@
 #include <linux/smp.h>
 #include <linux/smp_lock.h>
 #include <linux/mutex.h>
+#include <linux/freezer.h>
 
 #include <linux/sunrpc/types.h>
 #include <linux/sunrpc/stats.h>
@@ -75,18 +76,31 @@
 
 static struct ctl_table_header * nlm_sysctl_table;
 
-static unsigned long set_grace_period(void)
+static unsigned long get_lockd_grace_period(void)
 {
-	unsigned long grace_period;
-
 	/* Note: nlm_timeout should always be nonzero */
 	if (nlm_grace_period)
-		grace_period = ((nlm_grace_period + nlm_timeout - 1)
-				/ nlm_timeout) * nlm_timeout * HZ;
+		return roundup(nlm_grace_period, nlm_timeout) * HZ;
 	else
-		grace_period = nlm_timeout * 5 * HZ;
+		return nlm_timeout * 5 * HZ;
+}
+
+unsigned long get_nfs_grace_period(void)
+{
+	unsigned long lockdgrace = get_lockd_grace_period();
+	unsigned long nfsdgrace = 0;
+
+	if (nlmsvc_ops)
+		nfsdgrace = nlmsvc_ops->get_grace_period();
+
+	return max(lockdgrace, nfsdgrace);
+}
+EXPORT_SYMBOL(get_nfs_grace_period);
+
+static unsigned long set_grace_period(void)
+{
 	nlmsvc_grace_period = 1;
-	return grace_period + jiffies;
+	return get_nfs_grace_period() + jiffies;
 }
 
 static inline void clear_grace_period(void)
@@ -119,6 +133,7 @@
 	complete(&lockd_start_done);
 
 	daemonize("lockd");
+	set_freezable();
 
 	/* Process request with signals blocked, but allow SIGKILL.  */
 	allow_signal(SIGKILL);
diff --git a/fs/locks.c b/fs/locks.c
index 431a8b8..4f2d749 100644
--- a/fs/locks.c
+++ b/fs/locks.c
@@ -458,22 +458,20 @@
 }
 
 /* Allocate a file_lock initialised to this type of lease */
-static int lease_alloc(struct file *filp, int type, struct file_lock **flp)
+static struct file_lock *lease_alloc(struct file *filp, int type)
 {
 	struct file_lock *fl = locks_alloc_lock();
 	int error = -ENOMEM;
 
 	if (fl == NULL)
-		goto out;
+		return ERR_PTR(error);
 
 	error = lease_init(filp, type, fl);
 	if (error) {
 		locks_free_lock(fl);
-		fl = NULL;
+		return ERR_PTR(error);
 	}
-out:
-	*flp = fl;
-	return error;
+	return fl;
 }
 
 /* Check if two locks overlap each other.
@@ -661,7 +659,7 @@
 	return result;
 }
 
-int
+void
 posix_test_lock(struct file *filp, struct file_lock *fl)
 {
 	struct file_lock *cfl;
@@ -673,14 +671,12 @@
 		if (posix_locks_conflict(cfl, fl))
 			break;
 	}
-	if (cfl) {
+	if (cfl)
 		__locks_copy_lock(fl, cfl);
-		unlock_kernel();
-		return 1;
-	} else
+	else
 		fl->fl_type = F_UNLCK;
 	unlock_kernel();
-	return 0;
+	return;
 }
 
 EXPORT_SYMBOL(posix_test_lock);
@@ -1169,9 +1165,9 @@
  *	@inode: the inode of the file to return
  *	@mode: the open mode (read or write)
  *
- *	break_lease (inlined for speed) has checked there already
- *	is a lease on this file.  Leases are broken on a call to open()
- *	or truncate().  This function can sleep unless you
+ *	break_lease (inlined for speed) has checked there already is at least
+ *	some kind of lock (maybe a lease) on this file.  Leases are broken on
+ *	a call to open() or truncate().  This function can sleep unless you
  *	specified %O_NONBLOCK to your open().
  */
 int __break_lease(struct inode *inode, unsigned int mode)
@@ -1179,12 +1175,10 @@
 	int error = 0, future;
 	struct file_lock *new_fl, *flock;
 	struct file_lock *fl;
-	int alloc_err;
 	unsigned long break_time;
 	int i_have_this_lease = 0;
 
-	alloc_err = lease_alloc(NULL, mode & FMODE_WRITE ? F_WRLCK : F_RDLCK,
-			&new_fl);
+	new_fl = lease_alloc(NULL, mode & FMODE_WRITE ? F_WRLCK : F_RDLCK);
 
 	lock_kernel();
 
@@ -1212,8 +1206,9 @@
 		goto out;
 	}
 
-	if (alloc_err && !i_have_this_lease && ((mode & O_NONBLOCK) == 0)) {
-		error = alloc_err;
+	if (IS_ERR(new_fl) && !i_have_this_lease
+			&& ((mode & O_NONBLOCK) == 0)) {
+		error = PTR_ERR(new_fl);
 		goto out;
 	}
 
@@ -1260,7 +1255,7 @@
 
 out:
 	unlock_kernel();
-	if (!alloc_err)
+	if (!IS_ERR(new_fl))
 		locks_free_lock(new_fl);
 	return error;
 }
@@ -1329,7 +1324,7 @@
 }
 
 /**
- *	__setlease	-	sets a lease on an open file
+ *	setlease	-	sets a lease on an open file
  *	@filp: file pointer
  *	@arg: type of lease to obtain
  *	@flp: input - file_lock to use, output - file_lock inserted
@@ -1339,18 +1334,24 @@
  *
  *	Called with kernel lock held.
  */
-static int __setlease(struct file *filp, long arg, struct file_lock **flp)
+int setlease(struct file *filp, long arg, struct file_lock **flp)
 {
 	struct file_lock *fl, **before, **my_before = NULL, *lease;
 	struct dentry *dentry = filp->f_path.dentry;
 	struct inode *inode = dentry->d_inode;
 	int error, rdlease_count = 0, wrlease_count = 0;
 
+	if ((current->fsuid != inode->i_uid) && !capable(CAP_LEASE))
+		return -EACCES;
+	if (!S_ISREG(inode->i_mode))
+		return -EINVAL;
+	error = security_file_lock(filp, arg);
+	if (error)
+		return error;
+
 	time_out_leases(inode);
 
-	error = -EINVAL;
-	if (!flp || !(*flp) || !(*flp)->fl_lmops || !(*flp)->fl_lmops->fl_break)
-		goto out;
+	BUG_ON(!(*flp)->fl_lmops->fl_break);
 
 	lease = *flp;
 
@@ -1418,39 +1419,49 @@
 out:
 	return error;
 }
+EXPORT_SYMBOL(setlease);
 
  /**
- *	setlease        -       sets a lease on an open file
+ *	vfs_setlease        -       sets a lease on an open file
  *	@filp: file pointer
  *	@arg: type of lease to obtain
  *	@lease: file_lock to use
  *
  *	Call this to establish a lease on the file.
- *	The fl_lmops fl_break function is required by break_lease
+ *	The (*lease)->fl_lmops->fl_break operation must be set; if not,
+ *	break_lease will oops!
+ *
+ *	This will call the filesystem's setlease file method, if
+ *	defined.  Note that there is no getlease method; instead, the
+ *	filesystem setlease method should call back to setlease() to
+ *	add a lease to the inode's lease list, where fcntl_getlease() can
+ *	find it.  Since fcntl_getlease() only reports whether the current
+ *	task holds a lease, a cluster filesystem need only do this for
+ *	leases held by processes on this node.
+ *
+ *	There is also no break_lease method; filesystems that
+ *	handle their own leases shoud break leases themselves from the
+ *	filesystem's open, create, and (on truncate) setattr methods.
+ *
+ *	Warning: the only current setlease methods exist only to disable
+ *	leases in certain cases.  More vfs changes may be required to
+ *	allow a full filesystem lease implementation.
  */
 
-int setlease(struct file *filp, long arg, struct file_lock **lease)
+int vfs_setlease(struct file *filp, long arg, struct file_lock **lease)
 {
-	struct dentry *dentry = filp->f_path.dentry;
-	struct inode *inode = dentry->d_inode;
 	int error;
 
-	if ((current->fsuid != inode->i_uid) && !capable(CAP_LEASE))
-		return -EACCES;
-	if (!S_ISREG(inode->i_mode))
-		return -EINVAL;
-	error = security_file_lock(filp, arg);
-	if (error)
-		return error;
-
 	lock_kernel();
-	error = __setlease(filp, arg, lease);
+	if (filp->f_op && filp->f_op->setlease)
+		error = filp->f_op->setlease(filp, arg, lease);
+	else
+		error = setlease(filp, arg, lease);
 	unlock_kernel();
 
 	return error;
 }
-
-EXPORT_SYMBOL(setlease);
+EXPORT_SYMBOL_GPL(vfs_setlease);
 
 /**
  *	fcntl_setlease	-	sets a lease on an open file
@@ -1469,14 +1480,6 @@
 	struct inode *inode = dentry->d_inode;
 	int error;
 
-	if ((current->fsuid != inode->i_uid) && !capable(CAP_LEASE))
-		return -EACCES;
-	if (!S_ISREG(inode->i_mode))
-		return -EINVAL;
-	error = security_file_lock(filp, arg);
-	if (error)
-		return error;
-
 	locks_init_lock(&fl);
 	error = lease_init(filp, arg, &fl);
 	if (error)
@@ -1484,15 +1487,15 @@
 
 	lock_kernel();
 
-	error = __setlease(filp, arg, &flp);
+	error = vfs_setlease(filp, arg, &flp);
 	if (error || arg == F_UNLCK)
 		goto out_unlock;
 
 	error = fasync_helper(fd, filp, 1, &flp->fl_fasync);
 	if (error < 0) {
-		/* remove lease just inserted by __setlease */
+		/* remove lease just inserted by setlease */
 		flp->fl_type = F_UNLCK | F_INPROGRESS;
-		flp->fl_break_time = jiffies- 10;
+		flp->fl_break_time = jiffies - 10;
 		time_out_leases(inode);
 		goto out_unlock;
 	}
@@ -1597,8 +1600,7 @@
 /**
  * vfs_test_lock - test file byte range lock
  * @filp: The file to test lock for
- * @fl: The lock to test
- * @conf: Place to return a copy of the conflicting lock, if found
+ * @fl: The lock to test; also used to hold result
  *
  * Returns -ERRNO on failure.  Indicates presence of conflicting lock by
  * setting conf->fl_type to something other than F_UNLCK.
diff --git a/fs/mbcache.c b/fs/mbcache.c
index deeb9dc..fbb1d02 100644
--- a/fs/mbcache.c
+++ b/fs/mbcache.c
@@ -100,7 +100,6 @@
 static LIST_HEAD(mb_cache_list);
 static LIST_HEAD(mb_cache_lru_list);
 static DEFINE_SPINLOCK(mb_cache_spinlock);
-static struct shrinker *mb_shrinker;
 
 static inline int
 mb_cache_indexes(struct mb_cache *cache)
@@ -118,6 +117,10 @@
 
 static int mb_cache_shrink_fn(int nr_to_scan, gfp_t gfp_mask);
 
+static struct shrinker mb_cache_shrinker = {
+	.shrink = mb_cache_shrink_fn,
+	.seeks = DEFAULT_SEEKS,
+};
 
 static inline int
 __mb_cache_entry_is_hashed(struct mb_cache_entry *ce)
@@ -662,13 +665,13 @@
 
 static int __init init_mbcache(void)
 {
-	mb_shrinker = set_shrinker(DEFAULT_SEEKS, mb_cache_shrink_fn);
+	register_shrinker(&mb_cache_shrinker);
 	return 0;
 }
 
 static void __exit exit_mbcache(void)
 {
-	remove_shrinker(mb_shrinker);
+	unregister_shrinker(&mb_cache_shrinker);
 }
 
 module_init(init_mbcache)
diff --git a/fs/namei.c b/fs/namei.c
index 5e2d98d..a83160a 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -107,6 +107,8 @@
  * any extra contention...
  */
 
+static int fastcall link_path_walk(const char *name, struct nameidata *nd);
+
 /* In order to reduce some races, while at the same time doing additional
  * checking and hopefully speeding things up, we copy filenames to the
  * kernel data space before using them..
@@ -998,7 +1000,7 @@
  * Retry the whole path once, forcing real lookup requests
  * instead of relying on the dcache.
  */
-int fastcall link_path_walk(const char *name, struct nameidata *nd)
+static int fastcall link_path_walk(const char *name, struct nameidata *nd)
 {
 	struct nameidata save = *nd;
 	int result;
@@ -1022,7 +1024,7 @@
 	return result;
 }
 
-int fastcall path_walk(const char * name, struct nameidata *nd)
+static int fastcall path_walk(const char * name, struct nameidata *nd)
 {
 	current->total_link_count = 0;
 	return link_path_walk(name, nd);
@@ -1172,6 +1174,37 @@
 	return do_path_lookup(AT_FDCWD, name, flags, nd);
 }
 
+/**
+ * vfs_path_lookup - lookup a file path relative to a dentry-vfsmount pair
+ * @dentry:  pointer to dentry of the base directory
+ * @mnt: pointer to vfs mount of the base directory
+ * @name: pointer to file name
+ * @flags: lookup flags
+ * @nd: pointer to nameidata
+ */
+int vfs_path_lookup(struct dentry *dentry, struct vfsmount *mnt,
+		    const char *name, unsigned int flags,
+		    struct nameidata *nd)
+{
+	int retval;
+
+	/* same as do_path_lookup */
+	nd->last_type = LAST_ROOT;
+	nd->flags = flags;
+	nd->depth = 0;
+
+	nd->mnt = mntget(mnt);
+	nd->dentry = dget(dentry);
+
+	retval = path_walk(name, nd);
+	if (unlikely(!retval && !audit_dummy_context() && nd->dentry &&
+				nd->dentry->d_inode))
+		audit_inode(name, nd->dentry->d_inode);
+
+	return retval;
+
+}
+
 static int __path_lookup_intent_open(int dfd, const char *name,
 		unsigned int lookup_flags, struct nameidata *nd,
 		int open_flags, int create_mode)
@@ -1576,7 +1609,7 @@
 
 	/* O_NOATIME can only be set by the owner or superuser */
 	if (flag & O_NOATIME)
-		if (current->fsuid != inode->i_uid && !capable(CAP_FOWNER))
+		if (!is_owner_or_cap(inode))
 			return -EPERM;
 
 	/*
@@ -2774,8 +2807,8 @@
 EXPORT_SYMBOL(page_symlink);
 EXPORT_SYMBOL(page_symlink_inode_operations);
 EXPORT_SYMBOL(path_lookup);
+EXPORT_SYMBOL(vfs_path_lookup);
 EXPORT_SYMBOL(path_release);
-EXPORT_SYMBOL(path_walk);
 EXPORT_SYMBOL(permission);
 EXPORT_SYMBOL(vfs_permission);
 EXPORT_SYMBOL(file_permission);
diff --git a/fs/ncpfs/mmap.c b/fs/ncpfs/mmap.c
index 70a6911..a94473d 100644
--- a/fs/ncpfs/mmap.c
+++ b/fs/ncpfs/mmap.c
@@ -24,31 +24,35 @@
 
 /*
  * Fill in the supplied page for mmap
+ * XXX: how are we excluding truncate/invalidate here? Maybe need to lock
+ * page?
  */
-static struct page* ncp_file_mmap_nopage(struct vm_area_struct *area,
-				     unsigned long address, int *type)
+static int ncp_file_mmap_fault(struct vm_area_struct *area,
+					struct vm_fault *vmf)
 {
 	struct file *file = area->vm_file;
 	struct dentry *dentry = file->f_path.dentry;
 	struct inode *inode = dentry->d_inode;
-	struct page* page;
 	char *pg_addr;
 	unsigned int already_read;
 	unsigned int count;
 	int bufsize;
-	int pos;
+	int pos; /* XXX: loff_t ? */
 
-	page = alloc_page(GFP_HIGHUSER); /* ncpfs has nothing against high pages
-	           as long as recvmsg and memset works on it */
-	if (!page)
-		return page;
-	pg_addr = kmap(page);
-	address &= PAGE_MASK;
-	pos = address - area->vm_start + (area->vm_pgoff << PAGE_SHIFT);
+	/*
+	 * ncpfs has nothing against high pages as long
+	 * as recvmsg and memset works on it
+	 */
+	vmf->page = alloc_page(GFP_HIGHUSER);
+	if (!vmf->page)
+		return VM_FAULT_OOM;
+	pg_addr = kmap(vmf->page);
+	pos = vmf->pgoff << PAGE_SHIFT;
 
 	count = PAGE_SIZE;
-	if (address + PAGE_SIZE > area->vm_end) {
-		count = area->vm_end - address;
+	if ((unsigned long)vmf->virtual_address + PAGE_SIZE > area->vm_end) {
+		WARN_ON(1); /* shouldn't happen? */
+		count = area->vm_end - (unsigned long)vmf->virtual_address;
 	}
 	/* what we can read in one go */
 	bufsize = NCP_SERVER(inode)->buffer_size;
@@ -83,23 +87,21 @@
 
 	if (already_read < PAGE_SIZE)
 		memset(pg_addr + already_read, 0, PAGE_SIZE - already_read);
-	flush_dcache_page(page);
-	kunmap(page);
+	flush_dcache_page(vmf->page);
+	kunmap(vmf->page);
 
 	/*
 	 * If I understand ncp_read_kernel() properly, the above always
 	 * fetches from the network, here the analogue of disk.
 	 * -- wli
 	 */
-	if (type)
-		*type = VM_FAULT_MAJOR;
 	count_vm_event(PGMAJFAULT);
-	return page;
+	return VM_FAULT_MAJOR;
 }
 
 static struct vm_operations_struct ncp_file_mmap =
 {
-	.nopage	= ncp_file_mmap_nopage,
+	.fault = ncp_file_mmap_fault,
 };
 
 
diff --git a/fs/nfs/callback.c b/fs/nfs/callback.c
index 75f309c..a796be5 100644
--- a/fs/nfs/callback.c
+++ b/fs/nfs/callback.c
@@ -14,6 +14,7 @@
 #include <linux/sunrpc/svcsock.h>
 #include <linux/nfs_fs.h>
 #include <linux/mutex.h>
+#include <linux/freezer.h>
 
 #include <net/inet_sock.h>
 
@@ -67,6 +68,7 @@
 	daemonize("nfsv4-svc");
 	/* Process request with signals blocked, but allow SIGKILL.  */
 	allow_signal(SIGKILL);
+	set_freezable();
 
 	complete(&nfs_callback_info.started);
 
diff --git a/fs/nfs/file.c b/fs/nfs/file.c
index 8689b73..c87dc71 100644
--- a/fs/nfs/file.c
+++ b/fs/nfs/file.c
@@ -53,6 +53,7 @@
 static int nfs_check_flags(int flags);
 static int nfs_lock(struct file *filp, int cmd, struct file_lock *fl);
 static int nfs_flock(struct file *filp, int cmd, struct file_lock *fl);
+static int nfs_setlease(struct file *file, long arg, struct file_lock **fl);
 
 const struct file_operations nfs_file_operations = {
 	.llseek		= nfs_file_llseek,
@@ -69,6 +70,7 @@
 	.flock		= nfs_flock,
 	.splice_read	= nfs_file_splice_read,
 	.check_flags	= nfs_check_flags,
+	.setlease	= nfs_setlease,
 };
 
 const struct inode_operations nfs_file_inode_operations = {
@@ -400,7 +402,9 @@
 
 	lock_kernel();
 	/* Try local locking first */
-	if (posix_test_lock(filp, fl)) {
+	posix_test_lock(filp, fl);
+	if (fl->fl_type != F_UNLCK) {
+		/* found a conflict */
 		goto out;
 	}
 
@@ -558,3 +562,13 @@
 		return do_unlk(filp, cmd, fl);
 	return do_setlk(filp, cmd, fl);
 }
+
+static int nfs_setlease(struct file *file, long arg, struct file_lock **fl)
+{
+	/*
+	 * There is no protocol support for leases, so we have no way
+	 * to implement them correctly in the face of opens by other
+	 * clients.
+	 */
+	return -EINVAL;
+}
diff --git a/fs/nfs/super.c b/fs/nfs/super.c
index a2b1af8..adffe16 100644
--- a/fs/nfs/super.c
+++ b/fs/nfs/super.c
@@ -300,7 +300,10 @@
 };
 #endif
 
-static struct shrinker *acl_shrinker;
+static struct shrinker acl_shrinker = {
+	.shrink		= nfs_access_cache_shrinker,
+	.seeks		= DEFAULT_SEEKS,
+};
 
 /*
  * Register the NFS filesystems
@@ -321,7 +324,7 @@
 	if (ret < 0)
 		goto error_2;
 #endif
-	acl_shrinker = set_shrinker(DEFAULT_SEEKS, nfs_access_cache_shrinker);
+	register_shrinker(&acl_shrinker);
 	return 0;
 
 #ifdef CONFIG_NFS_V4
@@ -339,8 +342,7 @@
  */
 void __exit unregister_nfs_fs(void)
 {
-	if (acl_shrinker != NULL)
-		remove_shrinker(acl_shrinker);
+	unregister_shrinker(&acl_shrinker);
 #ifdef CONFIG_NFS_V4
 	unregister_filesystem(&nfs4_fs_type);
 	nfs_unregister_sysctl();
diff --git a/fs/nfsctl.c b/fs/nfsctl.c
index c043136a..51f1b31 100644
--- a/fs/nfsctl.c
+++ b/fs/nfsctl.c
@@ -23,19 +23,15 @@
 static struct file *do_open(char *name, int flags)
 {
 	struct nameidata nd;
+	struct vfsmount *mnt;
 	int error;
 
-	nd.mnt = do_kern_mount("nfsd", 0, "nfsd", NULL);
+	mnt = do_kern_mount("nfsd", 0, "nfsd", NULL);
+	if (IS_ERR(mnt))
+		return (struct file *)mnt;
 
-	if (IS_ERR(nd.mnt))
-		return (struct file *)nd.mnt;
-
-	nd.dentry = dget(nd.mnt->mnt_root);
-	nd.last_type = LAST_ROOT;
-	nd.flags = 0;
-	nd.depth = 0;
-
-	error = path_walk(name, &nd);
+	error = vfs_path_lookup(mnt->mnt_root, mnt, name, 0, &nd);
+	mntput(mnt);	/* drop do_kern_mount reference */
 	if (error)
 		return ERR_PTR(error);
 
diff --git a/fs/nfsd/auth.c b/fs/nfsd/auth.c
index 6e92b0f..2192805 100644
--- a/fs/nfsd/auth.c
+++ b/fs/nfsd/auth.c
@@ -9,20 +9,35 @@
 #include <linux/sunrpc/svc.h>
 #include <linux/sunrpc/svcauth.h>
 #include <linux/nfsd/nfsd.h>
+#include <linux/nfsd/export.h>
 
 #define	CAP_NFSD_MASK (CAP_FS_MASK|CAP_TO_MASK(CAP_SYS_RESOURCE))
 
+int nfsexp_flags(struct svc_rqst *rqstp, struct svc_export *exp)
+{
+	struct exp_flavor_info *f;
+	struct exp_flavor_info *end = exp->ex_flavors + exp->ex_nflavors;
+
+	for (f = exp->ex_flavors; f < end; f++) {
+		if (f->pseudoflavor == rqstp->rq_flavor)
+			return f->flags;
+	}
+	return exp->ex_flags;
+
+}
+
 int nfsd_setuser(struct svc_rqst *rqstp, struct svc_export *exp)
 {
 	struct svc_cred	cred = rqstp->rq_cred;
 	int i;
+	int flags = nfsexp_flags(rqstp, exp);
 	int ret;
 
-	if (exp->ex_flags & NFSEXP_ALLSQUASH) {
+	if (flags & NFSEXP_ALLSQUASH) {
 		cred.cr_uid = exp->ex_anon_uid;
 		cred.cr_gid = exp->ex_anon_gid;
 		cred.cr_group_info = groups_alloc(0);
-	} else if (exp->ex_flags & NFSEXP_ROOTSQUASH) {
+	} else if (flags & NFSEXP_ROOTSQUASH) {
 		struct group_info *gi;
 		if (!cred.cr_uid)
 			cred.cr_uid = exp->ex_anon_uid;
diff --git a/fs/nfsd/export.c b/fs/nfsd/export.c
index 79bd03b..6ab8de4 100644
--- a/fs/nfsd/export.c
+++ b/fs/nfsd/export.c
@@ -26,12 +26,15 @@
 #include <linux/mount.h>
 #include <linux/hash.h>
 #include <linux/module.h>
+#include <linux/exportfs.h>
 
 #include <linux/sunrpc/svc.h>
 #include <linux/nfsd/nfsd.h>
 #include <linux/nfsd/nfsfh.h>
 #include <linux/nfsd/syscall.h>
 #include <linux/lockd/bind.h>
+#include <linux/sunrpc/msg_prot.h>
+#include <linux/sunrpc/gss_api.h>
 
 #define NFSDDBG_FACILITY	NFSDDBG_EXPORT
 
@@ -451,8 +454,48 @@
 	return err;
 }
 
+static int secinfo_parse(char **mesg, char *buf, struct svc_export *exp)
+{
+	int listsize, err;
+	struct exp_flavor_info *f;
+
+	err = get_int(mesg, &listsize);
+	if (err)
+		return err;
+	if (listsize < 0 || listsize > MAX_SECINFO_LIST)
+		return -EINVAL;
+
+	for (f = exp->ex_flavors; f < exp->ex_flavors + listsize; f++) {
+		err = get_int(mesg, &f->pseudoflavor);
+		if (err)
+			return err;
+		/*
+		 * Just a quick sanity check; we could also try to check
+		 * whether this pseudoflavor is supported, but at worst
+		 * an unsupported pseudoflavor on the export would just
+		 * be a pseudoflavor that won't match the flavor of any
+		 * authenticated request.  The administrator will
+		 * probably discover the problem when someone fails to
+		 * authenticate.
+		 */
+		if (f->pseudoflavor < 0)
+			return -EINVAL;
+		err = get_int(mesg, &f->flags);
+		if (err)
+			return err;
+		/* Only some flags are allowed to differ between flavors: */
+		if (~NFSEXP_SECINFO_FLAGS & (f->flags ^ exp->ex_flags))
+			return -EINVAL;
+	}
+	exp->ex_nflavors = listsize;
+	return 0;
+}
+
 #else /* CONFIG_NFSD_V4 */
-static inline int fsloc_parse(char **mesg, char *buf, struct nfsd4_fs_locations *fsloc) { return 0; }
+static inline int
+fsloc_parse(char **mesg, char *buf, struct nfsd4_fs_locations *fsloc){return 0;}
+static inline int
+secinfo_parse(char **mesg, char *buf, struct svc_export *exp) { return 0; }
 #endif
 
 static int svc_export_parse(struct cache_detail *cd, char *mesg, int mlen)
@@ -476,6 +519,9 @@
 
 	exp.ex_uuid = NULL;
 
+	/* secinfo */
+	exp.ex_nflavors = 0;
+
 	if (mesg[mlen-1] != '\n')
 		return -EINVAL;
 	mesg[mlen-1] = 0;
@@ -553,7 +599,9 @@
 					if (exp.ex_uuid == NULL)
 						err = -ENOMEM;
 				}
-			} else
+			} else if (strcmp(buf, "secinfo") == 0)
+				err = secinfo_parse(&mesg, buf, &exp);
+			else
 				/* quietly ignore unknown words and anything
 				 * following. Newer user-space can try to set
 				 * new values, then see what the result was.
@@ -593,6 +641,7 @@
 
 static void exp_flags(struct seq_file *m, int flag, int fsid,
 		uid_t anonu, uid_t anong, struct nfsd4_fs_locations *fslocs);
+static void show_secinfo(struct seq_file *m, struct svc_export *exp);
 
 static int svc_export_show(struct seq_file *m,
 			   struct cache_detail *cd,
@@ -622,6 +671,7 @@
 				seq_printf(m, "%02x", exp->ex_uuid[i]);
 			}
 		}
+		show_secinfo(m, exp);
 	}
 	seq_puts(m, ")\n");
 	return 0;
@@ -654,6 +704,7 @@
 {
 	struct svc_export *new = container_of(cnew, struct svc_export, h);
 	struct svc_export *item = container_of(citem, struct svc_export, h);
+	int i;
 
 	new->ex_flags = item->ex_flags;
 	new->ex_anon_uid = item->ex_anon_uid;
@@ -669,6 +720,10 @@
 	item->ex_fslocs.locations_count = 0;
 	new->ex_fslocs.migrated = item->ex_fslocs.migrated;
 	item->ex_fslocs.migrated = 0;
+	new->ex_nflavors = item->ex_nflavors;
+	for (i = 0; i < MAX_SECINFO_LIST; i++) {
+		new->ex_flavors[i] = item->ex_flavors[i];
+	}
 }
 
 static struct cache_head *svc_export_alloc(void)
@@ -738,16 +793,18 @@
 	int err;
 	
 	if (!clp)
-		return NULL;
+		return ERR_PTR(-ENOENT);
 
 	key.ek_client = clp;
 	key.ek_fsidtype = fsid_type;
 	memcpy(key.ek_fsid, fsidv, key_len(fsid_type));
 
 	ek = svc_expkey_lookup(&key);
-	if (ek != NULL)
-		if ((err = cache_check(&svc_expkey_cache, &ek->h, reqp)))
-			ek = ERR_PTR(err);
+	if (ek == NULL)
+		return ERR_PTR(-ENOMEM);
+	err = cache_check(&svc_expkey_cache, &ek->h, reqp);
+	if (err)
+		return ERR_PTR(err);
 	return ek;
 }
 
@@ -808,30 +865,21 @@
 		struct cache_req *reqp)
 {
 	struct svc_export *exp, key;
+	int err;
 	
 	if (!clp)
-		return NULL;
+		return ERR_PTR(-ENOENT);
 
 	key.ex_client = clp;
 	key.ex_mnt = mnt;
 	key.ex_dentry = dentry;
 
 	exp = svc_export_lookup(&key);
-	if (exp != NULL)  {
-		int err;
-
-		err = cache_check(&svc_export_cache, &exp->h, reqp);
-		switch (err) {
-		case 0: break;
-		case -EAGAIN:
-		case -ETIMEDOUT:
-			exp = ERR_PTR(err);
-			break;
-		default:
-			exp = NULL;
-		}
-	}
-
+	if (exp == NULL)
+		return ERR_PTR(-ENOMEM);
+	err = cache_check(&svc_export_cache, &exp->h, reqp);
+	if (err)
+		return ERR_PTR(err);
 	return exp;
 }
 
@@ -847,7 +895,7 @@
 	dget(dentry);
 	exp = exp_get_by_name(clp, mnt, dentry, reqp);
 
-	while (exp == NULL && !IS_ROOT(dentry)) {
+	while (PTR_ERR(exp) == -ENOENT && !IS_ROOT(dentry)) {
 		struct dentry *parent;
 
 		parent = dget_parent(dentry);
@@ -900,7 +948,7 @@
 		return;
 
 	ek = exp_get_fsid_key(exp->ex_client, exp->ex_fsid);
-	if (ek && !IS_ERR(ek)) {
+	if (!IS_ERR(ek)) {
 		ek->h.expiry_time = get_seconds()-1;
 		cache_put(&ek->h, &svc_expkey_cache);
 	}
@@ -938,7 +986,7 @@
 	struct inode *inode = exp->ex_dentry->d_inode;
 
 	ek = exp_get_key(exp->ex_client, inode->i_sb->s_dev, inode->i_ino);
-	if (ek && !IS_ERR(ek)) {
+	if (!IS_ERR(ek)) {
 		ek->h.expiry_time = get_seconds()-1;
 		cache_put(&ek->h, &svc_expkey_cache);
 	}
@@ -989,13 +1037,12 @@
 
 	/* must make sure there won't be an ex_fsid clash */
 	if ((nxp->ex_flags & NFSEXP_FSID) &&
-	    (fsid_key = exp_get_fsid_key(clp, nxp->ex_dev)) &&
-	    !IS_ERR(fsid_key) &&
+	    (!IS_ERR(fsid_key = exp_get_fsid_key(clp, nxp->ex_dev))) &&
 	    fsid_key->ek_mnt &&
 	    (fsid_key->ek_mnt != nd.mnt || fsid_key->ek_dentry != nd.dentry) )
 		goto finish;
 
-	if (exp) {
+	if (!IS_ERR(exp)) {
 		/* just a flags/id/fsid update */
 
 		exp_fsid_unhash(exp);
@@ -1104,7 +1151,7 @@
 	err = -EINVAL;
 	exp = exp_get_by_name(dom, nd.mnt, nd.dentry, NULL);
 	path_release(&nd);
-	if (!exp)
+	if (IS_ERR(exp))
 		goto out_domain;
 
 	exp_do_unexport(exp);
@@ -1149,10 +1196,6 @@
 		err = PTR_ERR(exp);
 		goto out;
 	}
-	if (!exp) {
-		dprintk("nfsd: exp_rootfh export not found.\n");
-		goto out;
-	}
 
 	/*
 	 * fh must be initialized before calling fh_compose
@@ -1176,17 +1219,130 @@
 {
 	struct svc_export *exp;
 	struct svc_expkey *ek = exp_find_key(clp, fsid_type, fsidv, reqp);
-	if (!ek || IS_ERR(ek))
+	if (IS_ERR(ek))
 		return ERR_PTR(PTR_ERR(ek));
 
 	exp = exp_get_by_name(clp, ek->ek_mnt, ek->ek_dentry, reqp);
 	cache_put(&ek->h, &svc_expkey_cache);
 
-	if (!exp || IS_ERR(exp))
+	if (IS_ERR(exp))
 		return ERR_PTR(PTR_ERR(exp));
 	return exp;
 }
 
+__be32 check_nfsd_access(struct svc_export *exp, struct svc_rqst *rqstp)
+{
+	struct exp_flavor_info *f;
+	struct exp_flavor_info *end = exp->ex_flavors + exp->ex_nflavors;
+
+	/* legacy gss-only clients are always OK: */
+	if (exp->ex_client == rqstp->rq_gssclient)
+		return 0;
+	/* ip-address based client; check sec= export option: */
+	for (f = exp->ex_flavors; f < end; f++) {
+		if (f->pseudoflavor == rqstp->rq_flavor)
+			return 0;
+	}
+	/* defaults in absence of sec= options: */
+	if (exp->ex_nflavors == 0) {
+		if (rqstp->rq_flavor == RPC_AUTH_NULL ||
+		    rqstp->rq_flavor == RPC_AUTH_UNIX)
+			return 0;
+	}
+	return nfserr_wrongsec;
+}
+
+/*
+ * Uses rq_client and rq_gssclient to find an export; uses rq_client (an
+ * auth_unix client) if it's available and has secinfo information;
+ * otherwise, will try to use rq_gssclient.
+ *
+ * Called from functions that handle requests; functions that do work on
+ * behalf of mountd are passed a single client name to use, and should
+ * use exp_get_by_name() or exp_find().
+ */
+struct svc_export *
+rqst_exp_get_by_name(struct svc_rqst *rqstp, struct vfsmount *mnt,
+		struct dentry *dentry)
+{
+	struct svc_export *gssexp, *exp = ERR_PTR(-ENOENT);
+
+	if (rqstp->rq_client == NULL)
+		goto gss;
+
+	/* First try the auth_unix client: */
+	exp = exp_get_by_name(rqstp->rq_client, mnt, dentry,
+						&rqstp->rq_chandle);
+	if (PTR_ERR(exp) == -ENOENT)
+		goto gss;
+	if (IS_ERR(exp))
+		return exp;
+	/* If it has secinfo, assume there are no gss/... clients */
+	if (exp->ex_nflavors > 0)
+		return exp;
+gss:
+	/* Otherwise, try falling back on gss client */
+	if (rqstp->rq_gssclient == NULL)
+		return exp;
+	gssexp = exp_get_by_name(rqstp->rq_gssclient, mnt, dentry,
+						&rqstp->rq_chandle);
+	if (PTR_ERR(gssexp) == -ENOENT)
+		return exp;
+	if (!IS_ERR(exp))
+		exp_put(exp);
+	return gssexp;
+}
+
+struct svc_export *
+rqst_exp_find(struct svc_rqst *rqstp, int fsid_type, u32 *fsidv)
+{
+	struct svc_export *gssexp, *exp = ERR_PTR(-ENOENT);
+
+	if (rqstp->rq_client == NULL)
+		goto gss;
+
+	/* First try the auth_unix client: */
+	exp = exp_find(rqstp->rq_client, fsid_type, fsidv, &rqstp->rq_chandle);
+	if (PTR_ERR(exp) == -ENOENT)
+		goto gss;
+	if (IS_ERR(exp))
+		return exp;
+	/* If it has secinfo, assume there are no gss/... clients */
+	if (exp->ex_nflavors > 0)
+		return exp;
+gss:
+	/* Otherwise, try falling back on gss client */
+	if (rqstp->rq_gssclient == NULL)
+		return exp;
+	gssexp = exp_find(rqstp->rq_gssclient, fsid_type, fsidv,
+						&rqstp->rq_chandle);
+	if (PTR_ERR(gssexp) == -ENOENT)
+		return exp;
+	if (!IS_ERR(exp))
+		exp_put(exp);
+	return gssexp;
+}
+
+struct svc_export *
+rqst_exp_parent(struct svc_rqst *rqstp, struct vfsmount *mnt,
+		struct dentry *dentry)
+{
+	struct svc_export *exp;
+
+	dget(dentry);
+	exp = rqst_exp_get_by_name(rqstp, mnt, dentry);
+
+	while (PTR_ERR(exp) == -ENOENT && !IS_ROOT(dentry)) {
+		struct dentry *parent;
+
+		parent = dget_parent(dentry);
+		dput(dentry);
+		dentry = parent;
+		exp = rqst_exp_get_by_name(rqstp, mnt, dentry);
+	}
+	dput(dentry);
+	return exp;
+}
 
 /*
  * Called when we need the filehandle for the root of the pseudofs,
@@ -1194,8 +1350,7 @@
  * export point with fsid==0
  */
 __be32
-exp_pseudoroot(struct auth_domain *clp, struct svc_fh *fhp,
-	       struct cache_req *creq)
+exp_pseudoroot(struct svc_rqst *rqstp, struct svc_fh *fhp)
 {
 	struct svc_export *exp;
 	__be32 rv;
@@ -1203,12 +1358,16 @@
 
 	mk_fsid(FSID_NUM, fsidv, 0, 0, 0, NULL);
 
-	exp = exp_find(clp, FSID_NUM, fsidv, creq);
+	exp = rqst_exp_find(rqstp, FSID_NUM, fsidv);
+	if (PTR_ERR(exp) == -ENOENT)
+		return nfserr_perm;
 	if (IS_ERR(exp))
 		return nfserrno(PTR_ERR(exp));
-	if (exp == NULL)
-		return nfserr_perm;
 	rv = fh_compose(fhp, exp, exp->ex_dentry, NULL);
+	if (rv)
+		goto out;
+	rv = check_nfsd_access(exp, rqstp);
+out:
 	exp_put(exp);
 	return rv;
 }
@@ -1296,28 +1455,62 @@
 	{ 0, {"", ""}}
 };
 
-static void exp_flags(struct seq_file *m, int flag, int fsid,
-		uid_t anonu, uid_t anong, struct nfsd4_fs_locations *fsloc)
+static void show_expflags(struct seq_file *m, int flags, int mask)
 {
-	int first = 0;
 	struct flags *flg;
+	int state, first = 0;
 
 	for (flg = expflags; flg->flag; flg++) {
-		int state = (flg->flag & flag)?0:1;
+		if (flg->flag & ~mask)
+			continue;
+		state = (flg->flag & flags) ? 0 : 1;
 		if (*flg->name[state])
 			seq_printf(m, "%s%s", first++?",":"", flg->name[state]);
 	}
+}
+
+static void show_secinfo_flags(struct seq_file *m, int flags)
+{
+	seq_printf(m, ",");
+	show_expflags(m, flags, NFSEXP_SECINFO_FLAGS);
+}
+
+static void show_secinfo(struct seq_file *m, struct svc_export *exp)
+{
+	struct exp_flavor_info *f;
+	struct exp_flavor_info *end = exp->ex_flavors + exp->ex_nflavors;
+	int lastflags = 0, first = 0;
+
+	if (exp->ex_nflavors == 0)
+		return;
+	for (f = exp->ex_flavors; f < end; f++) {
+		if (first || f->flags != lastflags) {
+			if (!first)
+				show_secinfo_flags(m, lastflags);
+			seq_printf(m, ",sec=%d", f->pseudoflavor);
+			lastflags = f->flags;
+		} else {
+			seq_printf(m, ":%d", f->pseudoflavor);
+		}
+	}
+	show_secinfo_flags(m, lastflags);
+}
+
+static void exp_flags(struct seq_file *m, int flag, int fsid,
+		uid_t anonu, uid_t anong, struct nfsd4_fs_locations *fsloc)
+{
+	show_expflags(m, flag, NFSEXP_ALLFLAGS);
 	if (flag & NFSEXP_FSID)
-		seq_printf(m, "%sfsid=%d", first++?",":"", fsid);
+		seq_printf(m, ",fsid=%d", fsid);
 	if (anonu != (uid_t)-2 && anonu != (0x10000-2))
-		seq_printf(m, "%sanonuid=%d", first++?",":"", anonu);
+		seq_printf(m, ",sanonuid=%d", anonu);
 	if (anong != (gid_t)-2 && anong != (0x10000-2))
-		seq_printf(m, "%sanongid=%d", first++?",":"", anong);
+		seq_printf(m, ",sanongid=%d", anong);
 	if (fsloc && fsloc->locations_count > 0) {
 		char *loctype = (fsloc->migrated) ? "refer" : "replicas";
 		int i;
 
-		seq_printf(m, "%s%s=", first++?",":"", loctype);
+		seq_printf(m, ",%s=", loctype);
 		seq_escape(m, fsloc->locations[0].path, ",;@ \t\n\\");
 		seq_putc(m, '@');
 		seq_escape(m, fsloc->locations[0].hosts, ",;@ \t\n\\");
diff --git a/fs/nfsd/lockd.c b/fs/nfsd/lockd.c
index 221acd1f..9e4a568 100644
--- a/fs/nfsd/lockd.c
+++ b/fs/nfsd/lockd.c
@@ -65,6 +65,7 @@
 static struct nlmsvc_binding	nfsd_nlm_ops = {
 	.fopen		= nlm_fopen,		/* open file for locking */
 	.fclose		= nlm_fclose,		/* close file */
+	.get_grace_period = get_nfs4_grace_period,
 };
 
 void
diff --git a/fs/nfsd/nfs4acl.c b/fs/nfsd/nfs4acl.c
index cc3b7ba..b6ed383 100644
--- a/fs/nfsd/nfs4acl.c
+++ b/fs/nfsd/nfs4acl.c
@@ -183,8 +183,13 @@
 summarize_posix_acl(struct posix_acl *acl, struct posix_acl_summary *pas)
 {
 	struct posix_acl_entry *pa, *pe;
-	pas->users = 0;
-	pas->groups = 0;
+
+	/*
+	 * Only pas.users and pas.groups need initialization; previous
+	 * posix_acl_valid() calls ensure that the other fields will be
+	 * initialized in the following loop.  But, just to placate gcc:
+	 */
+	memset(pas, 0, sizeof(*pas));
 	pas->mask = 07;
 
 	pe = acl->a_entries + acl->a_count;
@@ -732,13 +737,16 @@
 	*pacl = posix_state_to_acl(&effective_acl_state, flags);
 	if (IS_ERR(*pacl)) {
 		ret = PTR_ERR(*pacl);
+		*pacl = NULL;
 		goto out_dstate;
 	}
 	*dpacl = posix_state_to_acl(&default_acl_state,
 						flags | NFS4_ACL_TYPE_DEFAULT);
 	if (IS_ERR(*dpacl)) {
 		ret = PTR_ERR(*dpacl);
+		*dpacl = NULL;
 		posix_acl_release(*pacl);
+		*pacl = NULL;
 		goto out_dstate;
 	}
 	sort_pacl(*pacl);
diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c
index 5443c52..31d6633 100644
--- a/fs/nfsd/nfs4callback.c
+++ b/fs/nfsd/nfs4callback.c
@@ -75,7 +75,7 @@
 #define op_enc_sz			1
 #define op_dec_sz			2
 #define enc_nfs4_fh_sz			(1 + (NFS4_FHSIZE >> 2))
-#define enc_stateid_sz			16
+#define enc_stateid_sz			(NFS4_STATEID_SIZE >> 2)
 #define NFS4_enc_cb_recall_sz		(cb_compound_enc_hdr_sz +       \
 					1 + enc_stateid_sz +            \
 					enc_nfs4_fh_sz)
diff --git a/fs/nfsd/nfs4idmap.c b/fs/nfsd/nfs4idmap.c
index 45aa21c..2cf9a9a 100644
--- a/fs/nfsd/nfs4idmap.c
+++ b/fs/nfsd/nfs4idmap.c
@@ -587,6 +587,15 @@
 	return ret;
 }
 
+static char *
+rqst_authname(struct svc_rqst *rqstp)
+{
+	struct auth_domain *clp;
+
+	clp = rqstp->rq_gssclient ? rqstp->rq_gssclient : rqstp->rq_client;
+	return clp->name;
+}
+
 static int
 idmap_name_to_id(struct svc_rqst *rqstp, int type, const char *name, u32 namelen,
 		uid_t *id)
@@ -600,7 +609,7 @@
 		return -EINVAL;
 	memcpy(key.name, name, namelen);
 	key.name[namelen] = '\0';
-	strlcpy(key.authname, rqstp->rq_client->name, sizeof(key.authname));
+	strlcpy(key.authname, rqst_authname(rqstp), sizeof(key.authname));
 	ret = idmap_lookup(rqstp, nametoid_lookup, &key, &nametoid_cache, &item);
 	if (ret == -ENOENT)
 		ret = -ESRCH; /* nfserr_badname */
@@ -620,7 +629,7 @@
 	};
 	int ret;
 
-	strlcpy(key.authname, rqstp->rq_client->name, sizeof(key.authname));
+	strlcpy(key.authname, rqst_authname(rqstp), sizeof(key.authname));
 	ret = idmap_lookup(rqstp, idtoname_lookup, &key, &idtoname_cache, &item);
 	if (ret == -ENOENT)
 		return sprintf(name, "%u", id);
diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
index 8522729..3c62712 100644
--- a/fs/nfsd/nfs4proc.c
+++ b/fs/nfsd/nfs4proc.c
@@ -47,6 +47,7 @@
 #include <linux/nfsd/state.h>
 #include <linux/nfsd/xdr4.h>
 #include <linux/nfs4_acl.h>
+#include <linux/sunrpc/gss_api.h>
 
 #define NFSDDBG_FACILITY		NFSDDBG_PROC
 
@@ -286,8 +287,7 @@
 	__be32 status;
 
 	fh_put(&cstate->current_fh);
-	status = exp_pseudoroot(rqstp->rq_client, &cstate->current_fh,
-			      &rqstp->rq_chandle);
+	status = exp_pseudoroot(rqstp, &cstate->current_fh);
 	return status;
 }
 
@@ -474,8 +474,8 @@
 	__be32 ret;
 
 	fh_init(&tmp_fh, NFS4_FHSIZE);
-	if((ret = exp_pseudoroot(rqstp->rq_client, &tmp_fh,
-			      &rqstp->rq_chandle)) != 0)
+	ret = exp_pseudoroot(rqstp, &tmp_fh);
+	if (ret)
 		return ret;
 	if (tmp_fh.fh_dentry == cstate->current_fh.fh_dentry) {
 		fh_put(&tmp_fh);
@@ -611,6 +611,30 @@
 }
 
 static __be32
+nfsd4_secinfo(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
+	      struct nfsd4_secinfo *secinfo)
+{
+	struct svc_fh resfh;
+	struct svc_export *exp;
+	struct dentry *dentry;
+	__be32 err;
+
+	fh_init(&resfh, NFS4_FHSIZE);
+	err = nfsd_lookup_dentry(rqstp, &cstate->current_fh,
+				    secinfo->si_name, secinfo->si_namelen,
+				    &exp, &dentry);
+	if (err)
+		return err;
+	if (dentry->d_inode == NULL) {
+		exp_put(exp);
+		err = nfserr_noent;
+	} else
+		secinfo->si_exp = exp;
+	dput(dentry);
+	return err;
+}
+
+static __be32
 nfsd4_setattr(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
 	      struct nfsd4_setattr *setattr)
 {
@@ -1009,6 +1033,9 @@
 	[OP_SAVEFH] = {
 		.op_func = (nfsd4op_func)nfsd4_savefh,
 	},
+	[OP_SECINFO] = {
+		.op_func = (nfsd4op_func)nfsd4_secinfo,
+	},
 	[OP_SETATTR] = {
 		.op_func = (nfsd4op_func)nfsd4_setattr,
 	},
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index 8c52913..6284807 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -49,8 +49,10 @@
 #include <linux/nfsd/state.h>
 #include <linux/nfsd/xdr4.h>
 #include <linux/namei.h>
+#include <linux/swap.h>
 #include <linux/mutex.h>
 #include <linux/lockd/bind.h>
+#include <linux/module.h>
 
 #define NFSDDBG_FACILITY                NFSDDBG_PROC
 
@@ -149,6 +151,7 @@
 }
 
 static int num_delegations;
+unsigned int max_delegations;
 
 /*
  * Open owner state (share locks)
@@ -192,7 +195,9 @@
 	struct nfs4_callback *cb = &stp->st_stateowner->so_client->cl_callback;
 
 	dprintk("NFSD alloc_init_deleg\n");
-	if (num_delegations > STATEID_HASH_SIZE * 4)
+	if (fp->fi_had_conflict)
+		return NULL;
+	if (num_delegations > max_delegations)
 		return NULL;
 	dp = kmem_cache_alloc(deleg_slab, GFP_KERNEL);
 	if (dp == NULL)
@@ -251,7 +256,7 @@
 	/* The following nfsd_close may not actually close the file,
 	 * but we want to remove the lease in any case. */
 	if (dp->dl_flock)
-		setlease(filp, F_UNLCK, &dp->dl_flock);
+		vfs_setlease(filp, F_UNLCK, &dp->dl_flock);
 	nfsd_close(filp);
 }
 
@@ -999,6 +1004,7 @@
 		list_add(&fp->fi_hash, &file_hashtbl[hashval]);
 		fp->fi_inode = igrab(ino);
 		fp->fi_id = current_fileid++;
+		fp->fi_had_conflict = false;
 		return fp;
 	}
 	return NULL;
@@ -1325,6 +1331,7 @@
 {
 	struct nfs4_delegation *dp = __dp;
 
+	dp->dl_file->fi_had_conflict = true;
 	nfsd4_cb_recall(dp);
 	return 0;
 }
@@ -1395,7 +1402,7 @@
 /*
  * Set the delegation file_lock back pointer.
  *
- * Called from __setlease() with lock_kernel() held.
+ * Called from setlease() with lock_kernel() held.
  */
 static
 void nfsd_copy_lock_deleg_cb(struct file_lock *new, struct file_lock *fl)
@@ -1409,7 +1416,7 @@
 }
 
 /*
- * Called from __setlease() with lock_kernel() held
+ * Called from setlease() with lock_kernel() held
  */
 static
 int nfsd_same_client_deleg_cb(struct file_lock *onlist, struct file_lock *try)
@@ -1709,10 +1716,10 @@
 	fl.fl_file = stp->st_vfs_file;
 	fl.fl_pid = current->tgid;
 
-	/* setlease checks to see if delegation should be handed out.
+	/* vfs_setlease checks to see if delegation should be handed out.
 	 * the lock_manager callbacks fl_mylease and fl_change are used
 	 */
-	if ((status = setlease(stp->st_vfs_file,
+	if ((status = vfs_setlease(stp->st_vfs_file,
 		flag == NFS4_OPEN_DELEGATE_READ? F_RDLCK: F_WRLCK, &flp))) {
 		dprintk("NFSD: setlease failed [%d], no delegation\n", status);
 		unhash_delegation(dp);
@@ -3190,20 +3197,49 @@
 		printk("NFSD: Failure reading reboot recovery data\n");
 }
 
+unsigned long
+get_nfs4_grace_period(void)
+{
+	return max(user_lease_time, lease_time) * HZ;
+}
+
+/*
+ * Since the lifetime of a delegation isn't limited to that of an open, a
+ * client may quite reasonably hang on to a delegation as long as it has
+ * the inode cached.  This becomes an obvious problem the first time a
+ * client's inode cache approaches the size of the server's total memory.
+ *
+ * For now we avoid this problem by imposing a hard limit on the number
+ * of delegations, which varies according to the server's memory size.
+ */
+static void
+set_max_delegations(void)
+{
+	/*
+	 * Allow at most 4 delegations per megabyte of RAM.  Quick
+	 * estimates suggest that in the worst case (where every delegation
+	 * is for a different inode), a delegation could take about 1.5K,
+	 * giving a worst case usage of about 6% of memory.
+	 */
+	max_delegations = nr_free_buffer_pages() >> (20 - 2 - PAGE_SHIFT);
+}
+
 /* initialization to perform when the nfsd service is started: */
 
 static void
 __nfs4_state_start(void)
 {
-	time_t grace_time;
+	unsigned long grace_time;
 
 	boot_time = get_seconds();
-	grace_time = max(user_lease_time, lease_time);
+	grace_time = get_nfs_grace_period();
 	lease_time = user_lease_time;
 	in_grace = 1;
-	printk("NFSD: starting %ld-second grace period\n", grace_time);
+	printk(KERN_INFO "NFSD: starting %ld-second grace period\n",
+	       grace_time/HZ);
 	laundry_wq = create_singlethread_workqueue("nfsd4");
-	queue_delayed_work(laundry_wq, &laundromat_work, grace_time*HZ);
+	queue_delayed_work(laundry_wq, &laundromat_work, grace_time);
+	set_max_delegations();
 }
 
 int
diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
index 15809df..b3d55c6 100644
--- a/fs/nfsd/nfs4xdr.c
+++ b/fs/nfsd/nfs4xdr.c
@@ -56,6 +56,8 @@
 #include <linux/nfsd_idmap.h>
 #include <linux/nfs4.h>
 #include <linux/nfs4_acl.h>
+#include <linux/sunrpc/gss_api.h>
+#include <linux/sunrpc/svcauth_gss.h>
 
 #define NFSDDBG_FACILITY		NFSDDBG_XDR
 
@@ -819,6 +821,23 @@
 }
 
 static __be32
+nfsd4_decode_secinfo(struct nfsd4_compoundargs *argp,
+		     struct nfsd4_secinfo *secinfo)
+{
+	DECODE_HEAD;
+
+	READ_BUF(4);
+	READ32(secinfo->si_namelen);
+	READ_BUF(secinfo->si_namelen);
+	SAVEMEM(secinfo->si_name, secinfo->si_namelen);
+	status = check_filename(secinfo->si_name, secinfo->si_namelen,
+								nfserr_noent);
+	if (status)
+		return status;
+	DECODE_TAIL;
+}
+
+static __be32
 nfsd4_decode_setattr(struct nfsd4_compoundargs *argp, struct nfsd4_setattr *setattr)
 {
 	DECODE_HEAD;
@@ -1131,6 +1150,9 @@
 		case OP_SAVEFH:
 			op->status = nfs_ok;
 			break;
+		case OP_SECINFO:
+			op->status = nfsd4_decode_secinfo(argp, &op->u.secinfo);
+			break;
 		case OP_SETATTR:
 			op->status = nfsd4_decode_setattr(argp, &op->u.setattr);
 			break;
@@ -1296,7 +1318,7 @@
 	char *path, *rootpath;
 
 	fh_init(&tmp_fh, NFS4_FHSIZE);
-	*stat = exp_pseudoroot(rqstp->rq_client, &tmp_fh, &rqstp->rq_chandle);
+	*stat = exp_pseudoroot(rqstp, &tmp_fh);
 	if (*stat)
 		return NULL;
 	rootpath = tmp_fh.fh_export->ex_path;
@@ -1847,11 +1869,19 @@
 	if (d_mountpoint(dentry)) {
 		int err;
 
+		/*
+		 * Why the heck aren't we just using nfsd_lookup??
+		 * Different "."/".." handling?  Something else?
+		 * At least, add a comment here to explain....
+		 */
 		err = nfsd_cross_mnt(cd->rd_rqstp, &dentry, &exp);
 		if (err) {
 			nfserr = nfserrno(err);
 			goto out_put;
 		}
+		nfserr = check_nfsd_access(exp, cd->rd_rqstp);
+		if (nfserr)
+			goto out_put;
 
 	}
 	nfserr = nfsd4_encode_fattr(NULL, exp, dentry, p, buflen, cd->rd_bmval,
@@ -2419,6 +2449,72 @@
 	}
 }
 
+static void
+nfsd4_encode_secinfo(struct nfsd4_compoundres *resp, int nfserr,
+		     struct nfsd4_secinfo *secinfo)
+{
+	int i = 0;
+	struct svc_export *exp = secinfo->si_exp;
+	u32 nflavs;
+	struct exp_flavor_info *flavs;
+	struct exp_flavor_info def_flavs[2];
+	ENCODE_HEAD;
+
+	if (nfserr)
+		goto out;
+	if (exp->ex_nflavors) {
+		flavs = exp->ex_flavors;
+		nflavs = exp->ex_nflavors;
+	} else { /* Handling of some defaults in absence of real secinfo: */
+		flavs = def_flavs;
+		if (exp->ex_client->flavour->flavour == RPC_AUTH_UNIX) {
+			nflavs = 2;
+			flavs[0].pseudoflavor = RPC_AUTH_UNIX;
+			flavs[1].pseudoflavor = RPC_AUTH_NULL;
+		} else if (exp->ex_client->flavour->flavour == RPC_AUTH_GSS) {
+			nflavs = 1;
+			flavs[0].pseudoflavor
+					= svcauth_gss_flavor(exp->ex_client);
+		} else {
+			nflavs = 1;
+			flavs[0].pseudoflavor
+					= exp->ex_client->flavour->flavour;
+		}
+	}
+
+	RESERVE_SPACE(4);
+	WRITE32(nflavs);
+	ADJUST_ARGS();
+	for (i = 0; i < nflavs; i++) {
+		u32 flav = flavs[i].pseudoflavor;
+		struct gss_api_mech *gm = gss_mech_get_by_pseudoflavor(flav);
+
+		if (gm) {
+			RESERVE_SPACE(4);
+			WRITE32(RPC_AUTH_GSS);
+			ADJUST_ARGS();
+			RESERVE_SPACE(4 + gm->gm_oid.len);
+			WRITE32(gm->gm_oid.len);
+			WRITEMEM(gm->gm_oid.data, gm->gm_oid.len);
+			ADJUST_ARGS();
+			RESERVE_SPACE(4);
+			WRITE32(0); /* qop */
+			ADJUST_ARGS();
+			RESERVE_SPACE(4);
+			WRITE32(gss_pseudoflavor_to_service(gm, flav));
+			ADJUST_ARGS();
+			gss_mech_put(gm);
+		} else {
+			RESERVE_SPACE(4);
+			WRITE32(flav);
+			ADJUST_ARGS();
+		}
+	}
+out:
+	if (exp)
+		exp_put(exp);
+}
+
 /*
  * The SETATTR encode routine is special -- it always encodes a bitmap,
  * regardless of the error status.
@@ -2559,6 +2655,9 @@
 		break;
 	case OP_SAVEFH:
 		break;
+	case OP_SECINFO:
+		nfsd4_encode_secinfo(resp, op->status, &op->u.secinfo);
+		break;
 	case OP_SETATTR:
 		nfsd4_encode_setattr(resp, op->status, &op->u.setattr);
 		break;
diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c
index 71c686d..baac89d 100644
--- a/fs/nfsd/nfsctl.c
+++ b/fs/nfsd/nfsctl.c
@@ -35,7 +35,6 @@
 #include <linux/nfsd/cache.h>
 #include <linux/nfsd/xdr.h>
 #include <linux/nfsd/syscall.h>
-#include <linux/nfsd/interface.h>
 
 #include <asm/uaccess.h>
 
@@ -245,7 +244,7 @@
 	}
 	exp_readunlock();
 	if (err == 0)
-		err = res->fh_size + (int)&((struct knfsd_fh*)0)->fh_base;
+		err = res->fh_size + offsetof(struct knfsd_fh, fh_base);
  out:
 	return err;
 }
diff --git a/fs/nfsd/nfsfh.c b/fs/nfsd/nfsfh.c
index 6ca2d24..0eb464a 100644
--- a/fs/nfsd/nfsfh.c
+++ b/fs/nfsd/nfsfh.c
@@ -15,10 +15,12 @@
 #include <linux/string.h>
 #include <linux/stat.h>
 #include <linux/dcache.h>
+#include <linux/exportfs.h>
 #include <linux/mount.h>
 
 #include <linux/sunrpc/clnt.h>
 #include <linux/sunrpc/svc.h>
+#include <linux/sunrpc/svcauth_gss.h>
 #include <linux/nfsd/nfsd.h>
 
 #define NFSDDBG_FACILITY		NFSDDBG_FH
@@ -27,10 +29,6 @@
 static int nfsd_nr_verified;
 static int nfsd_nr_put;
 
-extern struct export_operations export_op_default;
-
-#define	CALL(ops,fun) ((ops->fun)?(ops->fun):export_op_default.fun)
-
 /*
  * our acceptability function.
  * if NOSUBTREECHECK, accept anything
@@ -123,8 +121,6 @@
 		int data_left = fh->fh_size/4;
 
 		error = nfserr_stale;
-		if (rqstp->rq_client == NULL)
-			goto out;
 		if (rqstp->rq_vers > 2)
 			error = nfserr_badhandle;
 		if (rqstp->rq_vers == 4 && fh->fh_size == 0)
@@ -148,7 +144,7 @@
 				fh->fh_fsid[1] = fh->fh_fsid[2];
 			}
 			if ((data_left -= len)<0) goto out;
-			exp = exp_find(rqstp->rq_client, fh->fh_fsid_type, datap, &rqstp->rq_chandle);
+			exp = rqst_exp_find(rqstp, fh->fh_fsid_type, datap);
 			datap += len;
 		} else {
 			dev_t xdev;
@@ -159,20 +155,18 @@
 			xdev = old_decode_dev(fh->ofh_xdev);
 			xino = u32_to_ino_t(fh->ofh_xino);
 			mk_fsid(FSID_DEV, tfh, xdev, xino, 0, NULL);
-			exp = exp_find(rqstp->rq_client, FSID_DEV, tfh,
-				       &rqstp->rq_chandle);
+			exp = rqst_exp_find(rqstp, FSID_DEV, tfh);
 		}
 
-		if (IS_ERR(exp) && (PTR_ERR(exp) == -EAGAIN
-				|| PTR_ERR(exp) == -ETIMEDOUT)) {
+		error = nfserr_stale;
+		if (PTR_ERR(exp) == -ENOENT)
+			goto out;
+
+		if (IS_ERR(exp)) {
 			error = nfserrno(PTR_ERR(exp));
 			goto out;
 		}
 
-		error = nfserr_stale; 
-		if (!exp || IS_ERR(exp))
-			goto out;
-
 		/* Check if the request originated from a secure port. */
 		error = nfserr_perm;
 		if (!rqstp->rq_secure && EX_SECURE(exp)) {
@@ -211,11 +205,9 @@
 		if (fileid_type == 0)
 			dentry = dget(exp->ex_dentry);
 		else {
-			struct export_operations *nop = exp->ex_mnt->mnt_sb->s_export_op;
-			dentry = CALL(nop,decode_fh)(exp->ex_mnt->mnt_sb,
-						     datap, data_left,
-						     fileid_type,
-						     nfsd_acceptable, exp);
+			dentry = exportfs_decode_fh(exp->ex_mnt, datap,
+					data_left, fileid_type,
+					nfsd_acceptable, exp);
 		}
 		if (dentry == NULL)
 			goto out;
@@ -257,8 +249,19 @@
 	if (error)
 		goto out;
 
+	if (!(access & MAY_LOCK)) {
+		/*
+		 * pseudoflavor restrictions are not enforced on NLM,
+		 * which clients virtually always use auth_sys for,
+		 * even while using RPCSEC_GSS for NFS.
+		 */
+		error = check_nfsd_access(exp, rqstp);
+		if (error)
+			goto out;
+	}
+
 	/* Finally, check access permissions. */
-	error = nfsd_permission(exp, dentry, access);
+	error = nfsd_permission(rqstp, exp, dentry, access);
 
 	if (error) {
 		dprintk("fh_verify: %s/%s permission failure, "
@@ -286,15 +289,13 @@
 static inline int _fh_update(struct dentry *dentry, struct svc_export *exp,
 			     __u32 *datap, int *maxsize)
 {
-	struct export_operations *nop = exp->ex_mnt->mnt_sb->s_export_op;
-
 	if (dentry == exp->ex_dentry) {
 		*maxsize = 0;
 		return 0;
 	}
 
-	return CALL(nop,encode_fh)(dentry, datap, maxsize,
-			  !(exp->ex_flags&NFSEXP_NOSUBTREECHECK));
+	return exportfs_encode_fh(dentry, datap, maxsize,
+			  !(exp->ex_flags & NFSEXP_NOSUBTREECHECK));
 }
 
 /*
diff --git a/fs/nfsd/nfsproc.c b/fs/nfsd/nfsproc.c
index b2c7147..977a71f 100644
--- a/fs/nfsd/nfsproc.c
+++ b/fs/nfsd/nfsproc.c
@@ -278,7 +278,8 @@
 					 *   echo thing > device-special-file-or-pipe
 					 * by doing a CREATE with type==0
 					 */
-					nfserr = nfsd_permission(newfhp->fh_export,
+					nfserr = nfsd_permission(rqstp,
+								 newfhp->fh_export,
 								 newfhp->fh_dentry,
 								 MAY_WRITE|MAY_LOCAL_ACCESS);
 					if (nfserr && nfserr != nfserr_rofs)
diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c
index ff55950..a8c89ae 100644
--- a/fs/nfsd/nfssvc.c
+++ b/fs/nfsd/nfssvc.c
@@ -19,6 +19,7 @@
 #include <linux/slab.h>
 #include <linux/smp.h>
 #include <linux/smp_lock.h>
+#include <linux/freezer.h>
 #include <linux/fs_struct.h>
 
 #include <linux/sunrpc/types.h>
@@ -432,6 +433,7 @@
 	 * dirty pages.
 	 */
 	current->flags |= PF_LESS_THROTTLE;
+	set_freezable();
 
 	/*
 	 * The main request loop
@@ -492,6 +494,15 @@
 	module_put_and_exit(0);
 }
 
+static __be32 map_new_errors(u32 vers, __be32 nfserr)
+{
+	if (nfserr == nfserr_jukebox && vers == 2)
+		return nfserr_dropit;
+	if (nfserr == nfserr_wrongsec && vers < 4)
+		return nfserr_acces;
+	return nfserr;
+}
+
 int
 nfsd_dispatch(struct svc_rqst *rqstp, __be32 *statp)
 {
@@ -534,6 +545,7 @@
 
 	/* Now call the procedure handler, and encode NFS status. */
 	nfserr = proc->pc_func(rqstp, rqstp->rq_argp, rqstp->rq_resp);
+	nfserr = map_new_errors(rqstp->rq_vers, nfserr);
 	if (nfserr == nfserr_jukebox && rqstp->rq_vers == 2)
 		nfserr = nfserr_dropit;
 	if (nfserr == nfserr_dropit) {
diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
index 945b1ce..ee96a89 100644
--- a/fs/nfsd/vfs.c
+++ b/fs/nfsd/vfs.c
@@ -113,21 +113,21 @@
 
 	while (follow_down(&mnt,&mounts)&&d_mountpoint(mounts));
 
-	exp2 = exp_get_by_name(exp->ex_client, mnt, mounts, &rqstp->rq_chandle);
+	exp2 = rqst_exp_get_by_name(rqstp, mnt, mounts);
 	if (IS_ERR(exp2)) {
 		err = PTR_ERR(exp2);
 		dput(mounts);
 		mntput(mnt);
 		goto out;
 	}
-	if (exp2 && ((exp->ex_flags & NFSEXP_CROSSMOUNT) || EX_NOHIDE(exp2))) {
+	if ((exp->ex_flags & NFSEXP_CROSSMOUNT) || EX_NOHIDE(exp2)) {
 		/* successfully crossed mount point */
 		exp_put(exp);
 		*expp = exp2;
 		dput(dentry);
 		*dpp = mounts;
 	} else {
-		if (exp2) exp_put(exp2);
+		exp_put(exp2);
 		dput(mounts);
 	}
 	mntput(mnt);
@@ -135,21 +135,10 @@
 	return err;
 }
 
-/*
- * Look up one component of a pathname.
- * N.B. After this call _both_ fhp and resfh need an fh_put
- *
- * If the lookup would cross a mountpoint, and the mounted filesystem
- * is exported to the client with NFSEXP_NOHIDE, then the lookup is
- * accepted as it stands and the mounted directory is
- * returned. Otherwise the covered directory is returned.
- * NOTE: this mountpoint crossing is not supported properly by all
- *   clients and is explicitly disallowed for NFSv3
- *      NeilBrown <neilb@cse.unsw.edu.au>
- */
 __be32
-nfsd_lookup(struct svc_rqst *rqstp, struct svc_fh *fhp, const char *name,
-					int len, struct svc_fh *resfh)
+nfsd_lookup_dentry(struct svc_rqst *rqstp, struct svc_fh *fhp,
+		   const char *name, int len,
+		   struct svc_export **exp_ret, struct dentry **dentry_ret)
 {
 	struct svc_export	*exp;
 	struct dentry		*dparent;
@@ -168,8 +157,6 @@
 	exp  = fhp->fh_export;
 	exp_get(exp);
 
-	err = nfserr_acces;
-
 	/* Lookup the name, but don't follow links */
 	if (isdotent(name, len)) {
 		if (len==1)
@@ -190,17 +177,15 @@
 			dput(dentry);
 			dentry = dp;
 
-			exp2 = exp_parent(exp->ex_client, mnt, dentry,
-					  &rqstp->rq_chandle);
-			if (IS_ERR(exp2)) {
+			exp2 = rqst_exp_parent(rqstp, mnt, dentry);
+			if (PTR_ERR(exp2) == -ENOENT) {
+				dput(dentry);
+				dentry = dget(dparent);
+			} else if (IS_ERR(exp2)) {
 				host_err = PTR_ERR(exp2);
 				dput(dentry);
 				mntput(mnt);
 				goto out_nfserr;
-			}
-			if (!exp2) {
-				dput(dentry);
-				dentry = dget(dparent);
 			} else {
 				exp_put(exp);
 				exp = exp2;
@@ -223,6 +208,41 @@
 			}
 		}
 	}
+	*dentry_ret = dentry;
+	*exp_ret = exp;
+	return 0;
+
+out_nfserr:
+	exp_put(exp);
+	return nfserrno(host_err);
+}
+
+/*
+ * Look up one component of a pathname.
+ * N.B. After this call _both_ fhp and resfh need an fh_put
+ *
+ * If the lookup would cross a mountpoint, and the mounted filesystem
+ * is exported to the client with NFSEXP_NOHIDE, then the lookup is
+ * accepted as it stands and the mounted directory is
+ * returned. Otherwise the covered directory is returned.
+ * NOTE: this mountpoint crossing is not supported properly by all
+ *   clients and is explicitly disallowed for NFSv3
+ *      NeilBrown <neilb@cse.unsw.edu.au>
+ */
+__be32
+nfsd_lookup(struct svc_rqst *rqstp, struct svc_fh *fhp, const char *name,
+					int len, struct svc_fh *resfh)
+{
+	struct svc_export	*exp;
+	struct dentry		*dentry;
+	__be32 err;
+
+	err = nfsd_lookup_dentry(rqstp, fhp, name, len, &exp, &dentry);
+	if (err)
+		return err;
+	err = check_nfsd_access(exp, rqstp);
+	if (err)
+		goto out;
 	/*
 	 * Note: we compose the file handle now, but as the
 	 * dentry may be negative, it may need to be updated.
@@ -230,16 +250,13 @@
 	err = fh_compose(resfh, exp, dentry, fhp);
 	if (!err && !dentry->d_inode)
 		err = nfserr_noent;
-	dput(dentry);
 out:
+	dput(dentry);
 	exp_put(exp);
 	return err;
-
-out_nfserr:
-	err = nfserrno(host_err);
-	goto out;
 }
 
+
 /*
  * Set various file attributes.
  * N.B. After this call fhp needs an fh_put
@@ -311,7 +328,7 @@
 	/* The size case is special. It changes the file as well as the attributes.  */
 	if (iap->ia_valid & ATTR_SIZE) {
 		if (iap->ia_size < inode->i_size) {
-			err = nfsd_permission(fhp->fh_export, dentry, MAY_TRUNC|MAY_OWNER_OVERRIDE);
+			err = nfsd_permission(rqstp, fhp->fh_export, dentry, MAY_TRUNC|MAY_OWNER_OVERRIDE);
 			if (err)
 				goto out;
 		}
@@ -435,7 +452,7 @@
 	/* Get inode */
 	error = fh_verify(rqstp, fhp, 0 /* S_IFREG */, MAY_SATTR);
 	if (error)
-		goto out;
+		return error;
 
 	dentry = fhp->fh_dentry;
 	inode = dentry->d_inode;
@@ -444,33 +461,25 @@
 
 	host_error = nfs4_acl_nfsv4_to_posix(acl, &pacl, &dpacl, flags);
 	if (host_error == -EINVAL) {
-		error = nfserr_attrnotsupp;
-		goto out;
+		return nfserr_attrnotsupp;
 	} else if (host_error < 0)
 		goto out_nfserr;
 
 	host_error = set_nfsv4_acl_one(dentry, pacl, POSIX_ACL_XATTR_ACCESS);
 	if (host_error < 0)
-		goto out_nfserr;
+		goto out_release;
 
-	if (S_ISDIR(inode->i_mode)) {
+	if (S_ISDIR(inode->i_mode))
 		host_error = set_nfsv4_acl_one(dentry, dpacl, POSIX_ACL_XATTR_DEFAULT);
-		if (host_error < 0)
-			goto out_nfserr;
-	}
 
-	error = nfs_ok;
-
-out:
+out_release:
 	posix_acl_release(pacl);
 	posix_acl_release(dpacl);
-	return (error);
 out_nfserr:
 	if (host_error == -EOPNOTSUPP)
-		error = nfserr_attrnotsupp;
+		return nfserr_attrnotsupp;
 	else
-		error = nfserrno(host_error);
-	goto out;
+		return nfserrno(host_error);
 }
 
 static struct posix_acl *
@@ -607,7 +616,7 @@
 
 			sresult |= map->access;
 
-			err2 = nfsd_permission(export, dentry, map->how);
+			err2 = nfsd_permission(rqstp, export, dentry, map->how);
 			switch (err2) {
 			case nfs_ok:
 				result |= map->access;
@@ -1034,7 +1043,7 @@
 	__be32		err;
 
 	if (file) {
-		err = nfsd_permission(fhp->fh_export, fhp->fh_dentry,
+		err = nfsd_permission(rqstp, fhp->fh_export, fhp->fh_dentry,
 				MAY_READ|MAY_OWNER_OVERRIDE);
 		if (err)
 			goto out;
@@ -1063,7 +1072,7 @@
 	__be32			err = 0;
 
 	if (file) {
-		err = nfsd_permission(fhp->fh_export, fhp->fh_dentry,
+		err = nfsd_permission(rqstp, fhp->fh_export, fhp->fh_dentry,
 				MAY_WRITE|MAY_OWNER_OVERRIDE);
 		if (err)
 			goto out;
@@ -1788,11 +1797,17 @@
 	return err;
 }
 
+static int exp_rdonly(struct svc_rqst *rqstp, struct svc_export *exp)
+{
+	return nfsexp_flags(rqstp, exp) & NFSEXP_READONLY;
+}
+
 /*
  * Check for a user's access permissions to this inode.
  */
 __be32
-nfsd_permission(struct svc_export *exp, struct dentry *dentry, int acc)
+nfsd_permission(struct svc_rqst *rqstp, struct svc_export *exp,
+					struct dentry *dentry, int acc)
 {
 	struct inode	*inode = dentry->d_inode;
 	int		err;
@@ -1823,7 +1838,7 @@
 	 */
 	if (!(acc & MAY_LOCAL_ACCESS))
 		if (acc & (MAY_WRITE | MAY_SATTR | MAY_TRUNC)) {
-			if (EX_RDONLY(exp) || IS_RDONLY(inode))
+			if (exp_rdonly(rqstp, exp) || IS_RDONLY(inode))
 				return nfserr_rofs;
 			if (/* (acc & MAY_WRITE) && */ IS_IMMUTABLE(inode))
 				return nfserr_perm;
@@ -1906,7 +1921,7 @@
 		raparm_hash[i].pb_head = NULL;
 		spin_lock_init(&raparm_hash[i].pb_lock);
 	}
-	nperbucket = cache_size >> RAPARM_HASH_BITS;
+	nperbucket = DIV_ROUND_UP(cache_size, RAPARM_HASH_SIZE);
 	for (i = 0; i < cache_size - 1; i++) {
 		if (i % nperbucket == 0)
 			raparm_hash[j++].pb_head = raparml + i;
diff --git a/fs/ntfs/namei.c b/fs/ntfs/namei.c
index bff01a5..e93c614 100644
--- a/fs/ntfs/namei.c
+++ b/fs/ntfs/namei.c
@@ -21,6 +21,7 @@
  */
 
 #include <linux/dcache.h>
+#include <linux/exportfs.h>
 #include <linux/security.h>
 
 #include "attrib.h"
diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c
index 84bf6e7..460d440 100644
--- a/fs/ocfs2/aops.c
+++ b/fs/ocfs2/aops.c
@@ -232,7 +232,7 @@
 	 * might now be discovering a truncate that hit on another node.
 	 * block_read_full_page->get_block freaks out if it is asked to read
 	 * beyond the end of a file, so we check here.  Callers
-	 * (generic_file_read, fault->nopage) are clever enough to check i_size
+	 * (generic_file_read, vm_ops->fault) are clever enough to check i_size
 	 * and notice that the page they just read isn't needed.
 	 *
 	 * XXX sys_readahead() seems to get that wrong?
diff --git a/fs/ocfs2/export.h b/fs/ocfs2/export.h
index 5b77ee7..e08bed9 100644
--- a/fs/ocfs2/export.h
+++ b/fs/ocfs2/export.h
@@ -26,6 +26,8 @@
 #ifndef OCFS2_EXPORT_H
 #define OCFS2_EXPORT_H
 
+#include <linux/exportfs.h>
+
 extern struct export_operations ocfs2_export_ops;
 
 #endif /* OCFS2_EXPORT_H */
diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
index f04c7aa..5727cd1 100644
--- a/fs/ocfs2/file.c
+++ b/fs/ocfs2/file.c
@@ -34,6 +34,7 @@
 #include <linux/splice.h>
 #include <linux/mount.h>
 #include <linux/writeback.h>
+#include <linux/falloc.h>
 
 #define MLOG_MASK_PREFIX ML_INODE
 #include <cluster/masklog.h>
@@ -1504,30 +1505,19 @@
 /*
  * Parts of this function taken from xfs_change_file_space()
  */
-int ocfs2_change_file_space(struct file *file, unsigned int cmd,
-			    struct ocfs2_space_resv *sr)
+static int __ocfs2_change_file_space(struct file *file, struct inode *inode,
+				     loff_t f_pos, unsigned int cmd,
+				     struct ocfs2_space_resv *sr,
+				     int change_size)
 {
 	int ret;
 	s64 llen;
-	struct inode *inode = file->f_path.dentry->d_inode;
+	loff_t size;
 	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
 	struct buffer_head *di_bh = NULL;
 	handle_t *handle;
 	unsigned long long max_off = ocfs2_max_file_offset(inode->i_sb->s_blocksize_bits);
 
-	if ((cmd == OCFS2_IOC_RESVSP || cmd == OCFS2_IOC_RESVSP64) &&
-	    !ocfs2_writes_unwritten_extents(osb))
-		return -ENOTTY;
-	else if ((cmd == OCFS2_IOC_UNRESVSP || cmd == OCFS2_IOC_UNRESVSP64) &&
-		 !ocfs2_sparse_alloc(osb))
-		return -ENOTTY;
-
-	if (!S_ISREG(inode->i_mode))
-		return -EINVAL;
-
-	if (!(file->f_mode & FMODE_WRITE))
-		return -EBADF;
-
 	if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb))
 		return -EROFS;
 
@@ -1557,7 +1547,7 @@
 	case 0: /*SEEK_SET*/
 		break;
 	case 1: /*SEEK_CUR*/
-		sr->l_start += file->f_pos;
+		sr->l_start += f_pos;
 		break;
 	case 2: /*SEEK_END*/
 		sr->l_start += i_size_read(inode);
@@ -1577,6 +1567,7 @@
 		ret = -EINVAL;
 		goto out_meta_unlock;
 	}
+	size = sr->l_start + sr->l_len;
 
 	if (cmd == OCFS2_IOC_RESVSP || cmd == OCFS2_IOC_RESVSP64) {
 		if (sr->l_len <= 0) {
@@ -1585,7 +1576,7 @@
 		}
 	}
 
-	if (should_remove_suid(file->f_path.dentry)) {
+	if (file && should_remove_suid(file->f_path.dentry)) {
 		ret = __ocfs2_write_remove_suid(inode, di_bh);
 		if (ret) {
 			mlog_errno(ret);
@@ -1628,6 +1619,9 @@
 		goto out_meta_unlock;
 	}
 
+	if (change_size && i_size_read(inode) < size)
+		i_size_write(inode, size);
+
 	inode->i_ctime = inode->i_mtime = CURRENT_TIME;
 	ret = ocfs2_mark_inode_dirty(handle, inode, di_bh);
 	if (ret < 0)
@@ -1646,6 +1640,52 @@
 	return ret;
 }
 
+int ocfs2_change_file_space(struct file *file, unsigned int cmd,
+			    struct ocfs2_space_resv *sr)
+{
+	struct inode *inode = file->f_path.dentry->d_inode;
+	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);;
+
+	if ((cmd == OCFS2_IOC_RESVSP || cmd == OCFS2_IOC_RESVSP64) &&
+	    !ocfs2_writes_unwritten_extents(osb))
+		return -ENOTTY;
+	else if ((cmd == OCFS2_IOC_UNRESVSP || cmd == OCFS2_IOC_UNRESVSP64) &&
+		 !ocfs2_sparse_alloc(osb))
+		return -ENOTTY;
+
+	if (!S_ISREG(inode->i_mode))
+		return -EINVAL;
+
+	if (!(file->f_mode & FMODE_WRITE))
+		return -EBADF;
+
+	return __ocfs2_change_file_space(file, inode, file->f_pos, cmd, sr, 0);
+}
+
+static long ocfs2_fallocate(struct inode *inode, int mode, loff_t offset,
+			    loff_t len)
+{
+	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
+	struct ocfs2_space_resv sr;
+	int change_size = 1;
+
+	if (!ocfs2_writes_unwritten_extents(osb))
+		return -EOPNOTSUPP;
+
+	if (S_ISDIR(inode->i_mode))
+		return -ENODEV;
+
+	if (mode & FALLOC_FL_KEEP_SIZE)
+		change_size = 0;
+
+	sr.l_whence = 0;
+	sr.l_start = (s64)offset;
+	sr.l_len = (s64)len;
+
+	return __ocfs2_change_file_space(NULL, inode, offset,
+					 OCFS2_IOC_RESVSP64, &sr, change_size);
+}
+
 static int ocfs2_prepare_inode_for_write(struct dentry *dentry,
 					 loff_t *ppos,
 					 size_t count,
@@ -1867,7 +1907,8 @@
 	loff_t pos;
 	const struct iovec *cur_iov = iov;
 	struct page *user_page, *page;
-	char *buf, *dst;
+	char * uninitialized_var(buf);
+	char *dst;
 	void *fsdata;
 
 	/*
@@ -2311,6 +2352,7 @@
 	.setattr	= ocfs2_setattr,
 	.getattr	= ocfs2_getattr,
 	.permission	= ocfs2_permission,
+	.fallocate	= ocfs2_fallocate,
 };
 
 const struct inode_operations ocfs2_special_file_iops = {
diff --git a/fs/ocfs2/heartbeat.c b/fs/ocfs2/heartbeat.c
index 352eb4a..c4c3617 100644
--- a/fs/ocfs2/heartbeat.c
+++ b/fs/ocfs2/heartbeat.c
@@ -209,7 +209,7 @@
 	envp[1] = "PATH=/sbin:/bin:/usr/sbin:/usr/bin";
 	envp[2] = NULL;
 
-	ret = call_usermodehelper(argv[0], argv, envp, 1);
+	ret = call_usermodehelper(argv[0], argv, envp, UMH_WAIT_PROC);
 	if (ret < 0)
 		mlog_errno(ret);
 }
diff --git a/fs/ocfs2/ioctl.c b/fs/ocfs2/ioctl.c
index bd68c3f..87dcece 100644
--- a/fs/ocfs2/ioctl.c
+++ b/fs/ocfs2/ioctl.c
@@ -63,7 +63,7 @@
 		goto bail_unlock;
 
 	status = -EACCES;
-	if ((current->fsuid != inode->i_uid) && !capable(CAP_FOWNER))
+	if (!is_owner_or_cap(inode))
 		goto bail_unlock;
 
 	if (!S_ISDIR(inode->i_mode))
diff --git a/fs/ocfs2/mmap.c b/fs/ocfs2/mmap.c
index d79aa12..ee64749 100644
--- a/fs/ocfs2/mmap.c
+++ b/fs/ocfs2/mmap.c
@@ -60,31 +60,28 @@
 	return sigprocmask(SIG_SETMASK, oldset, NULL);
 }
 
-static struct page *ocfs2_nopage(struct vm_area_struct * area,
-				 unsigned long address,
-				 int *type)
+static int ocfs2_fault(struct vm_area_struct *area, struct vm_fault *vmf)
 {
-	struct page *page = NOPAGE_SIGBUS;
 	sigset_t blocked, oldset;
-	int ret;
+	int error, ret;
 
-	mlog_entry("(area=%p, address=%lu, type=%p)\n", area, address,
-		   type);
+	mlog_entry("(area=%p, page offset=%lu)\n", area, vmf->pgoff);
 
-	ret = ocfs2_vm_op_block_sigs(&blocked, &oldset);
-	if (ret < 0) {
-		mlog_errno(ret);
+	error = ocfs2_vm_op_block_sigs(&blocked, &oldset);
+	if (error < 0) {
+		mlog_errno(error);
+		ret = VM_FAULT_SIGBUS;
 		goto out;
 	}
 
-	page = filemap_nopage(area, address, type);
+	ret = filemap_fault(area, vmf);
 
-	ret = ocfs2_vm_op_unblock_sigs(&oldset);
-	if (ret < 0)
-		mlog_errno(ret);
+	error = ocfs2_vm_op_unblock_sigs(&oldset);
+	if (error < 0)
+		mlog_errno(error);
 out:
-	mlog_exit_ptr(page);
-	return page;
+	mlog_exit_ptr(vmf->page);
+	return ret;
 }
 
 static int __ocfs2_page_mkwrite(struct inode *inode, struct buffer_head *di_bh,
@@ -209,7 +206,7 @@
 }
 
 static struct vm_operations_struct ocfs2_file_vm_ops = {
-	.nopage		= ocfs2_nopage,
+	.fault		= ocfs2_fault,
 	.page_mkwrite	= ocfs2_page_mkwrite,
 };
 
@@ -226,6 +223,7 @@
 	ocfs2_meta_unlock(file->f_dentry->d_inode, lock_level);
 out:
 	vma->vm_ops = &ocfs2_file_vm_ops;
+	vma->vm_flags |= VM_CAN_NONLINEAR;
 	return 0;
 }
 
diff --git a/fs/open.c b/fs/open.c
index be6a457..a6b054e 100644
--- a/fs/open.c
+++ b/fs/open.c
@@ -26,6 +26,7 @@
 #include <linux/syscalls.h>
 #include <linux/rcupdate.h>
 #include <linux/audit.h>
+#include <linux/falloc.h>
 
 int vfs_statfs(struct dentry *dentry, struct kstatfs *buf)
 {
@@ -352,6 +353,64 @@
 }
 #endif
 
+asmlinkage long sys_fallocate(int fd, int mode, loff_t offset, loff_t len)
+{
+	struct file *file;
+	struct inode *inode;
+	long ret = -EINVAL;
+
+	if (offset < 0 || len <= 0)
+		goto out;
+
+	/* Return error if mode is not supported */
+	ret = -EOPNOTSUPP;
+	if (mode && !(mode & FALLOC_FL_KEEP_SIZE))
+		goto out;
+
+	ret = -EBADF;
+	file = fget(fd);
+	if (!file)
+		goto out;
+	if (!(file->f_mode & FMODE_WRITE))
+		goto out_fput;
+	/*
+	 * Revalidate the write permissions, in case security policy has
+	 * changed since the files were opened.
+	 */
+	ret = security_file_permission(file, MAY_WRITE);
+	if (ret)
+		goto out_fput;
+
+	inode = file->f_path.dentry->d_inode;
+
+	ret = -ESPIPE;
+	if (S_ISFIFO(inode->i_mode))
+		goto out_fput;
+
+	ret = -ENODEV;
+	/*
+	 * Let individual file system decide if it supports preallocation
+	 * for directories or not.
+	 */
+	if (!S_ISREG(inode->i_mode) && !S_ISDIR(inode->i_mode))
+		goto out_fput;
+
+	ret = -EFBIG;
+	/* Check for wrap through zero too */
+	if (((offset + len) > inode->i_sb->s_maxbytes) || ((offset + len) < 0))
+		goto out_fput;
+
+	if (inode->i_op && inode->i_op->fallocate)
+		ret = inode->i_op->fallocate(inode, mode, offset, len);
+	else
+		ret = -ENOSYS;
+
+out_fput:
+	fput(file);
+out:
+	return ret;
+}
+
 /*
  * access() needs to use the real uid/gid, not the effective uid/gid.
  * We do this by temporarily clearing all FS-related capabilities and
diff --git a/fs/partitions/check.c b/fs/partitions/check.c
index 98e0b85..783c57e 100644
--- a/fs/partitions/check.c
+++ b/fs/partitions/check.c
@@ -372,11 +372,10 @@
 {
 	struct hd_struct *p;
 
-	p = kmalloc(sizeof(*p), GFP_KERNEL);
+	p = kzalloc(sizeof(*p), GFP_KERNEL);
 	if (!p)
 		return;
 	
-	memset(p, 0, sizeof(*p));
 	p->start_sect = start;
 	p->nr_sects = len;
 	p->partno = part;
diff --git a/fs/partitions/ldm.c b/fs/partitions/ldm.c
index 99873a2..e7dd1d4 100644
--- a/fs/partitions/ldm.c
+++ b/fs/partitions/ldm.c
@@ -677,15 +677,24 @@
  * Return:  -1 Error, the calculated offset exceeded the size of the buffer
  *           n OK, a range-checked offset into buffer
  */
-static int ldm_relative (const u8 *buffer, int buflen, int base, int offset)
+static int ldm_relative(const u8 *buffer, int buflen, int base, int offset)
 {
 
 	base += offset;
-	if ((!buffer) || (offset < 0) || (base > buflen))
+	if (!buffer || offset < 0 || base > buflen) {
+		if (!buffer)
+			ldm_error("!buffer");
+		if (offset < 0)
+			ldm_error("offset (%d) < 0", offset);
+		if (base > buflen)
+			ldm_error("base (%d) > buflen (%d)", base, buflen);
 		return -1;
-	if ((base + buffer[base]) >= buflen)
+	}
+	if (base + buffer[base] >= buflen) {
+		ldm_error("base (%d) + buffer[base] (%d) >= buflen (%d)", base,
+				buffer[base], buflen);
 		return -1;
-
+	}
 	return buffer[base] + offset + 1;
 }
 
@@ -1054,60 +1063,98 @@
  * Return:  'true'   @vb contains a Volume VBLK
  *          'false'  @vb contents are not defined
  */
-static bool ldm_parse_vol5 (const u8 *buffer, int buflen, struct vblk *vb)
+static bool ldm_parse_vol5(const u8 *buffer, int buflen, struct vblk *vb)
 {
-	int r_objid, r_name, r_vtype, r_child, r_size, r_id1, r_id2, r_size2;
-	int r_drive, len;
+	int r_objid, r_name, r_vtype, r_disable_drive_letter, r_child, r_size;
+	int r_id1, r_id2, r_size2, r_drive, len;
 	struct vblk_volu *volu;
 
-	BUG_ON (!buffer || !vb);
-
-	r_objid  = ldm_relative (buffer, buflen, 0x18, 0);
-	r_name   = ldm_relative (buffer, buflen, 0x18, r_objid);
-	r_vtype  = ldm_relative (buffer, buflen, 0x18, r_name);
-	r_child  = ldm_relative (buffer, buflen, 0x2E, r_vtype);
-	r_size   = ldm_relative (buffer, buflen, 0x3E, r_child);
-
-	if (buffer[0x12] & VBLK_FLAG_VOLU_ID1)
-		r_id1 = ldm_relative (buffer, buflen, 0x53, r_size);
-	else
+	BUG_ON(!buffer || !vb);
+	r_objid = ldm_relative(buffer, buflen, 0x18, 0);
+	if (r_objid < 0) {
+		ldm_error("r_objid %d < 0", r_objid);
+		return false;
+	}
+	r_name = ldm_relative(buffer, buflen, 0x18, r_objid);
+	if (r_name < 0) {
+		ldm_error("r_name %d < 0", r_name);
+		return false;
+	}
+	r_vtype = ldm_relative(buffer, buflen, 0x18, r_name);
+	if (r_vtype < 0) {
+		ldm_error("r_vtype %d < 0", r_vtype);
+		return false;
+	}
+	r_disable_drive_letter = ldm_relative(buffer, buflen, 0x18, r_vtype);
+	if (r_disable_drive_letter < 0) {
+		ldm_error("r_disable_drive_letter %d < 0",
+				r_disable_drive_letter);
+		return false;
+	}
+	r_child = ldm_relative(buffer, buflen, 0x2D, r_disable_drive_letter);
+	if (r_child < 0) {
+		ldm_error("r_child %d < 0", r_child);
+		return false;
+	}
+	r_size = ldm_relative(buffer, buflen, 0x3D, r_child);
+	if (r_size < 0) {
+		ldm_error("r_size %d < 0", r_size);
+		return false;
+	}
+	if (buffer[0x12] & VBLK_FLAG_VOLU_ID1) {
+		r_id1 = ldm_relative(buffer, buflen, 0x52, r_size);
+		if (r_id1 < 0) {
+			ldm_error("r_id1 %d < 0", r_id1);
+			return false;
+		}
+	} else
 		r_id1 = r_size;
-
-	if (buffer[0x12] & VBLK_FLAG_VOLU_ID2)
-		r_id2 = ldm_relative (buffer, buflen, 0x53, r_id1);
-	else
+	if (buffer[0x12] & VBLK_FLAG_VOLU_ID2) {
+		r_id2 = ldm_relative(buffer, buflen, 0x52, r_id1);
+		if (r_id2 < 0) {
+			ldm_error("r_id2 %d < 0", r_id2);
+			return false;
+		}
+	} else
 		r_id2 = r_id1;
-
-	if (buffer[0x12] & VBLK_FLAG_VOLU_SIZE)
-		r_size2 = ldm_relative (buffer, buflen, 0x53, r_id2);
-	else
+	if (buffer[0x12] & VBLK_FLAG_VOLU_SIZE) {
+		r_size2 = ldm_relative(buffer, buflen, 0x52, r_id2);
+		if (r_size2 < 0) {
+			ldm_error("r_size2 %d < 0", r_size2);
+			return false;
+		}
+	} else
 		r_size2 = r_id2;
-
-	if (buffer[0x12] & VBLK_FLAG_VOLU_DRIVE)
-		r_drive = ldm_relative (buffer, buflen, 0x53, r_size2);
-	else
-		r_drive = r_size2;
-
-	len = r_drive;
-	if (len < 0)
-		return false;
-
-	len += VBLK_SIZE_VOL5;
-	if (len != BE32 (buffer + 0x14))
-		return false;
-
-	volu = &vb->vblk.volu;
-
-	ldm_get_vstr (buffer + 0x18 + r_name,  volu->volume_type,
-		sizeof (volu->volume_type));
-	memcpy (volu->volume_state, buffer + 0x19 + r_vtype,
-			sizeof (volu->volume_state));
-	volu->size = ldm_get_vnum (buffer + 0x3E + r_child);
-	volu->partition_type = buffer[0x42 + r_size];
-	memcpy (volu->guid, buffer + 0x43 + r_size,  sizeof (volu->guid));
 	if (buffer[0x12] & VBLK_FLAG_VOLU_DRIVE) {
-		ldm_get_vstr (buffer + 0x53 + r_size,  volu->drive_hint,
-			sizeof (volu->drive_hint));
+		r_drive = ldm_relative(buffer, buflen, 0x52, r_size2);
+		if (r_drive < 0) {
+			ldm_error("r_drive %d < 0", r_drive);
+			return false;
+		}
+	} else
+		r_drive = r_size2;
+	len = r_drive;
+	if (len < 0) {
+		ldm_error("len %d < 0", len);
+		return false;
+	}
+	len += VBLK_SIZE_VOL5;
+	if (len > BE32(buffer + 0x14)) {
+		ldm_error("len %d > BE32(buffer + 0x14) %d", len,
+				BE32(buffer + 0x14));
+		return false;
+	}
+	volu = &vb->vblk.volu;
+	ldm_get_vstr(buffer + 0x18 + r_name, volu->volume_type,
+			sizeof(volu->volume_type));
+	memcpy(volu->volume_state, buffer + 0x18 + r_disable_drive_letter,
+			sizeof(volu->volume_state));
+	volu->size = ldm_get_vnum(buffer + 0x3D + r_child);
+	volu->partition_type = buffer[0x41 + r_size];
+	memcpy(volu->guid, buffer + 0x42 + r_size, sizeof(volu->guid));
+	if (buffer[0x12] & VBLK_FLAG_VOLU_DRIVE) {
+		ldm_get_vstr(buffer + 0x52 + r_size, volu->drive_hint,
+				sizeof(volu->drive_hint));
 	}
 	return true;
 }
diff --git a/fs/partitions/ldm.h b/fs/partitions/ldm.h
index d2e6a30..80f63b5 100644
--- a/fs/partitions/ldm.h
+++ b/fs/partitions/ldm.h
@@ -68,7 +68,7 @@
 #define VBLK_SIZE_DSK3		12
 #define VBLK_SIZE_DSK4		45
 #define VBLK_SIZE_PRT3		28
-#define VBLK_SIZE_VOL5		59
+#define VBLK_SIZE_VOL5		58
 
 /* component types */
 #define COMP_STRIPE		0x01		/* Stripe-set */
diff --git a/fs/proc/base.c b/fs/proc/base.c
index ae36273..3c77d5a 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -72,6 +72,7 @@
 #include <linux/poll.h>
 #include <linux/nsproxy.h>
 #include <linux/oom.h>
+#include <linux/elf.h>
 #include "internal.h"
 
 /* NOTE:
@@ -283,7 +284,7 @@
 static int proc_pid_wchan(struct task_struct *task, char *buffer)
 {
 	unsigned long wchan;
-	char symname[KSYM_NAME_LEN+1];
+	char symname[KSYM_NAME_LEN];
 
 	wchan = get_wchan(task);
 
@@ -1014,7 +1015,7 @@
 	task_lock(task);
 	mm = task->mm;
 	if (mm)
-		dumpable = mm->dumpable;
+		dumpable = get_dumpable(mm);
 	task_unlock(task);
 	if(dumpable == 1)
 		return 1;
@@ -1785,6 +1786,91 @@
 
 #endif
 
+#if defined(USE_ELF_CORE_DUMP) && defined(CONFIG_ELF_CORE)
+static ssize_t proc_coredump_filter_read(struct file *file, char __user *buf,
+					 size_t count, loff_t *ppos)
+{
+	struct task_struct *task = get_proc_task(file->f_dentry->d_inode);
+	struct mm_struct *mm;
+	char buffer[PROC_NUMBUF];
+	size_t len;
+	int ret;
+
+	if (!task)
+		return -ESRCH;
+
+	ret = 0;
+	mm = get_task_mm(task);
+	if (mm) {
+		len = snprintf(buffer, sizeof(buffer), "%08lx\n",
+			       ((mm->flags & MMF_DUMP_FILTER_MASK) >>
+				MMF_DUMP_FILTER_SHIFT));
+		mmput(mm);
+		ret = simple_read_from_buffer(buf, count, ppos, buffer, len);
+	}
+
+	put_task_struct(task);
+
+	return ret;
+}
+
+static ssize_t proc_coredump_filter_write(struct file *file,
+					  const char __user *buf,
+					  size_t count,
+					  loff_t *ppos)
+{
+	struct task_struct *task;
+	struct mm_struct *mm;
+	char buffer[PROC_NUMBUF], *end;
+	unsigned int val;
+	int ret;
+	int i;
+	unsigned long mask;
+
+	ret = -EFAULT;
+	memset(buffer, 0, sizeof(buffer));
+	if (count > sizeof(buffer) - 1)
+		count = sizeof(buffer) - 1;
+	if (copy_from_user(buffer, buf, count))
+		goto out_no_task;
+
+	ret = -EINVAL;
+	val = (unsigned int)simple_strtoul(buffer, &end, 0);
+	if (*end == '\n')
+		end++;
+	if (end - buffer == 0)
+		goto out_no_task;
+
+	ret = -ESRCH;
+	task = get_proc_task(file->f_dentry->d_inode);
+	if (!task)
+		goto out_no_task;
+
+	ret = end - buffer;
+	mm = get_task_mm(task);
+	if (!mm)
+		goto out_no_mm;
+
+	for (i = 0, mask = 1; i < MMF_DUMP_FILTER_BITS; i++, mask <<= 1) {
+		if (val & mask)
+			set_bit(i + MMF_DUMP_FILTER_SHIFT, &mm->flags);
+		else
+			clear_bit(i + MMF_DUMP_FILTER_SHIFT, &mm->flags);
+	}
+
+	mmput(mm);
+ out_no_mm:
+	put_task_struct(task);
+ out_no_task:
+	return ret;
+}
+
+static const struct file_operations proc_coredump_filter_operations = {
+	.read		= proc_coredump_filter_read,
+	.write		= proc_coredump_filter_write,
+};
+#endif
+
 /*
  * /proc/self:
  */
@@ -2005,6 +2091,9 @@
 #ifdef CONFIG_FAULT_INJECTION
 	REG("make-it-fail", S_IRUGO|S_IWUSR, fault_inject),
 #endif
+#if defined(USE_ELF_CORE_DUMP) && defined(CONFIG_ELF_CORE)
+	REG("coredump_filter", S_IRUGO|S_IWUSR, coredump_filter),
+#endif
 #ifdef CONFIG_TASK_IO_ACCOUNTING
 	INF("io",	S_IRUGO, pid_io_accounting),
 #endif
diff --git a/fs/proc/proc_misc.c b/fs/proc/proc_misc.c
index d24b8d4..f133afe 100644
--- a/fs/proc/proc_misc.c
+++ b/fs/proc/proc_misc.c
@@ -445,6 +445,11 @@
 	cputime64_t user, nice, system, idle, iowait, irq, softirq, steal;
 	u64 sum = 0;
 	struct timespec boottime;
+	unsigned int *per_irq_sum;
+
+	per_irq_sum = kzalloc(sizeof(unsigned int)*NR_IRQS, GFP_KERNEL);
+	if (!per_irq_sum)
+		return -ENOMEM;
 
 	user = nice = system = idle = iowait =
 		irq = softirq = steal = cputime64_zero;
@@ -462,8 +467,11 @@
 		irq = cputime64_add(irq, kstat_cpu(i).cpustat.irq);
 		softirq = cputime64_add(softirq, kstat_cpu(i).cpustat.softirq);
 		steal = cputime64_add(steal, kstat_cpu(i).cpustat.steal);
-		for (j = 0 ; j < NR_IRQS ; j++)
-			sum += kstat_cpu(i).irqs[j];
+		for (j = 0; j < NR_IRQS; j++) {
+			unsigned int temp = kstat_cpu(i).irqs[j];
+			sum += temp;
+			per_irq_sum[j] += temp;
+		}
 	}
 
 	seq_printf(p, "cpu  %llu %llu %llu %llu %llu %llu %llu %llu\n",
@@ -501,7 +509,7 @@
 
 #if !defined(CONFIG_PPC64) && !defined(CONFIG_ALPHA) && !defined(CONFIG_IA64)
 	for (i = 0; i < NR_IRQS; i++)
-		seq_printf(p, " %u", kstat_irqs(i));
+		seq_printf(p, " %u", per_irq_sum[i]);
 #endif
 
 	seq_printf(p,
@@ -516,6 +524,7 @@
 		nr_running(),
 		nr_iowait());
 
+	kfree(per_irq_sum);
 	return 0;
 }
 
diff --git a/fs/ramfs/inode.c b/fs/ramfs/inode.c
index d40d22b..ef2b46d 100644
--- a/fs/ramfs/inode.c
+++ b/fs/ramfs/inode.c
@@ -60,6 +60,7 @@
 		inode->i_blocks = 0;
 		inode->i_mapping->a_ops = &ramfs_aops;
 		inode->i_mapping->backing_dev_info = &ramfs_backing_dev_info;
+		mapping_set_gfp_mask(inode->i_mapping, GFP_HIGHUSER);
 		inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
 		switch (mode & S_IFMT) {
 		default:
diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c
index 1272d11..ddde489 100644
--- a/fs/reiserfs/inode.c
+++ b/fs/reiserfs/inode.c
@@ -7,6 +7,7 @@
 #include <linux/reiserfs_fs.h>
 #include <linux/reiserfs_acl.h>
 #include <linux/reiserfs_xattr.h>
+#include <linux/exportfs.h>
 #include <linux/smp_lock.h>
 #include <linux/pagemap.h>
 #include <linux/highmem.h>
diff --git a/fs/reiserfs/ioctl.c b/fs/reiserfs/ioctl.c
index b484d29..11a0fcc 100644
--- a/fs/reiserfs/ioctl.c
+++ b/fs/reiserfs/ioctl.c
@@ -51,8 +51,7 @@
 			if (IS_RDONLY(inode))
 				return -EROFS;
 
-			if ((current->fsuid != inode->i_uid)
-			    && !capable(CAP_FOWNER))
+			if (!is_owner_or_cap(inode))
 				return -EPERM;
 
 			if (get_user(flags, (int __user *)arg))
@@ -81,7 +80,7 @@
 	case REISERFS_IOC_GETVERSION:
 		return put_user(inode->i_generation, (int __user *)arg);
 	case REISERFS_IOC_SETVERSION:
-		if ((current->fsuid != inode->i_uid) && !capable(CAP_FOWNER))
+		if (!is_owner_or_cap(inode))
 			return -EPERM;
 		if (IS_RDONLY(inode))
 			return -EROFS;
diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c
index b4ac911..5a93cfe 100644
--- a/fs/reiserfs/super.c
+++ b/fs/reiserfs/super.c
@@ -21,6 +21,7 @@
 #include <linux/init.h>
 #include <linux/blkdev.h>
 #include <linux/buffer_head.h>
+#include <linux/exportfs.h>
 #include <linux/vfs.h>
 #include <linux/mnt_namespace.h>
 #include <linux/mount.h>
diff --git a/fs/reiserfs/xattr_acl.c b/fs/reiserfs/xattr_acl.c
index 5296a29..b7e4fa4 100644
--- a/fs/reiserfs/xattr_acl.c
+++ b/fs/reiserfs/xattr_acl.c
@@ -21,7 +21,7 @@
 
 	if (!reiserfs_posixacl(inode->i_sb))
 		return -EOPNOTSUPP;
-	if ((current->fsuid != inode->i_uid) && !capable(CAP_FOWNER))
+	if (!is_owner_or_cap(inode))
 		return -EPERM;
 
 	if (value) {
diff --git a/fs/splice.c b/fs/splice.c
index 53fc208..22496d2 100644
--- a/fs/splice.c
+++ b/fs/splice.c
@@ -265,7 +265,7 @@
 			   unsigned int flags)
 {
 	struct address_space *mapping = in->f_mapping;
-	unsigned int loff, nr_pages;
+	unsigned int loff, nr_pages, req_pages;
 	struct page *pages[PIPE_BUFFERS];
 	struct partial_page partial[PIPE_BUFFERS];
 	struct page *page;
@@ -281,28 +281,24 @@
 
 	index = *ppos >> PAGE_CACHE_SHIFT;
 	loff = *ppos & ~PAGE_CACHE_MASK;
-	nr_pages = (len + loff + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
-
-	if (nr_pages > PIPE_BUFFERS)
-		nr_pages = PIPE_BUFFERS;
-
-	/*
-	 * Don't try to 2nd guess the read-ahead logic, call into
-	 * page_cache_readahead() like the page cache reads would do.
-	 */
-	page_cache_readahead(mapping, &in->f_ra, in, index, nr_pages);
+	req_pages = (len + loff + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
+	nr_pages = min(req_pages, (unsigned)PIPE_BUFFERS);
 
 	/*
 	 * Lookup the (hopefully) full range of pages we need.
 	 */
 	spd.nr_pages = find_get_pages_contig(mapping, index, nr_pages, pages);
+	index += spd.nr_pages;
 
 	/*
 	 * If find_get_pages_contig() returned fewer pages than we needed,
-	 * allocate the rest and fill in the holes.
+	 * readahead/allocate the rest and fill in the holes.
 	 */
+	if (spd.nr_pages < nr_pages)
+		page_cache_sync_readahead(mapping, &in->f_ra, in,
+				index, req_pages - spd.nr_pages);
+
 	error = 0;
-	index += spd.nr_pages;
 	while (spd.nr_pages < nr_pages) {
 		/*
 		 * Page could be there, find_get_pages_contig() breaks on
@@ -311,12 +307,6 @@
 		page = find_get_page(mapping, index);
 		if (!page) {
 			/*
-			 * Make sure the read-ahead engine is notified
-			 * about this failure.
-			 */
-			handle_ra_miss(mapping, &in->f_ra, index);
-
-			/*
 			 * page didn't exist, allocate one.
 			 */
 			page = page_cache_alloc_cold(mapping);
@@ -361,6 +351,10 @@
 		this_len = min_t(unsigned long, len, PAGE_CACHE_SIZE - loff);
 		page = pages[page_nr];
 
+		if (PageReadahead(page))
+			page_cache_async_readahead(mapping, &in->f_ra, in,
+					page, index, req_pages - page_nr);
+
 		/*
 		 * If the page isn't uptodate, we may need to start io on it
 		 */
@@ -453,6 +447,7 @@
 	 */
 	while (page_nr < nr_pages)
 		page_cache_release(pages[page_nr++]);
+	in->f_ra.prev_index = index;
 
 	if (spd.nr_pages)
 		return splice_to_pipe(pipe, &spd);
diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c
index aee966c..048e605 100644
--- a/fs/sysfs/dir.c
+++ b/fs/sysfs/dir.c
@@ -361,20 +361,20 @@
 struct sysfs_dirent *sysfs_new_dirent(const char *name, umode_t mode, int type)
 {
 	char *dup_name = NULL;
-	struct sysfs_dirent *sd = NULL;
+	struct sysfs_dirent *sd;
 
 	if (type & SYSFS_COPY_NAME) {
 		name = dup_name = kstrdup(name, GFP_KERNEL);
 		if (!name)
-			goto err_out;
+			return NULL;
 	}
 
 	sd = kmem_cache_zalloc(sysfs_dir_cachep, GFP_KERNEL);
 	if (!sd)
-		goto err_out;
+		goto err_out1;
 
 	if (sysfs_alloc_ino(&sd->s_ino))
-		goto err_out;
+		goto err_out2;
 
 	atomic_set(&sd->s_count, 1);
 	atomic_set(&sd->s_active, 0);
@@ -386,9 +386,10 @@
 
 	return sd;
 
- err_out:
-	kfree(dup_name);
+ err_out2:
 	kmem_cache_free(sysfs_dir_cachep, sd);
+ err_out1:
+	kfree(dup_name);
 	return NULL;
 }
 
@@ -698,17 +699,19 @@
 
 	/* link in */
 	sysfs_addrm_start(&acxt, parent_sd);
+
 	if (!sysfs_find_dirent(parent_sd, name)) {
 		sysfs_add_one(&acxt, sd);
 		sysfs_link_sibling(sd);
 	}
-	if (sysfs_addrm_finish(&acxt)) {
-		*p_sd = sd;
-		return 0;
+
+	if (!sysfs_addrm_finish(&acxt)) {
+		sysfs_put(sd);
+		return -EEXIST;
 	}
 
-	sysfs_put(sd);
-	return -EEXIST;
+	*p_sd = sd;
+	return 0;
 }
 
 int sysfs_create_subdir(struct kobject *kobj, const char *name,
diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c
index cc49799..3e1cc06 100644
--- a/fs/sysfs/file.c
+++ b/fs/sysfs/file.c
@@ -410,11 +410,12 @@
 		sysfs_link_sibling(sd);
 	}
 
-	if (sysfs_addrm_finish(&acxt))
-		return 0;
+	if (!sysfs_addrm_finish(&acxt)) {
+		sysfs_put(sd);
+		return -EEXIST;
+	}
 
-	sysfs_put(sd);
-	return -EEXIST;
+	return 0;
 }
 
 
diff --git a/fs/sysfs/inode.c b/fs/sysfs/inode.c
index 3756e15..10d1b52 100644
--- a/fs/sysfs/inode.c
+++ b/fs/sysfs/inode.c
@@ -133,7 +133,7 @@
  */
 static struct lock_class_key sysfs_inode_imutex_key;
 
-void sysfs_init_inode(struct sysfs_dirent *sd, struct inode *inode)
+static void sysfs_init_inode(struct sysfs_dirent *sd, struct inode *inode)
 {
 	inode->i_blocks = 0;
 	inode->i_mapping->a_ops = &sysfs_aops;
diff --git a/fs/sysfs/mount.c b/fs/sysfs/mount.c
index 402cc35..60714d0 100644
--- a/fs/sysfs/mount.c
+++ b/fs/sysfs/mount.c
@@ -43,19 +43,19 @@
 	sb->s_time_gran = 1;
 	sysfs_sb = sb;
 
-	inode = new_inode(sysfs_sb);
+	/* get root inode, initialize and unlock it */
+	inode = sysfs_get_inode(&sysfs_root);
 	if (!inode) {
 		pr_debug("sysfs: could not get root inode\n");
 		return -ENOMEM;
 	}
 
-	sysfs_init_inode(&sysfs_root, inode);
-
 	inode->i_op = &sysfs_dir_inode_operations;
 	inode->i_fop = &sysfs_dir_operations;
-	/* directory inodes start off with i_nlink == 2 (for "." entry) */
-	inc_nlink(inode);
+	inc_nlink(inode); /* directory, account for "." */
+	unlock_new_inode(inode);
 
+	/* instantiate and link root dentry */
 	root = d_alloc_root(inode);
 	if (!root) {
 		pr_debug("%s: could not get root dentry!\n",__FUNCTION__);
diff --git a/fs/sysfs/symlink.c b/fs/sysfs/symlink.c
index 2f86e04..4ce687f 100644
--- a/fs/sysfs/symlink.c
+++ b/fs/sysfs/symlink.c
@@ -86,7 +86,9 @@
 	sd = sysfs_new_dirent(name, S_IFLNK|S_IRWXUGO, SYSFS_KOBJ_LINK);
 	if (!sd)
 		goto out_put;
+
 	sd->s_elem.symlink.target_sd = target_sd;
+	target_sd = NULL;	/* reference is now owned by the symlink */
 
 	sysfs_addrm_start(&acxt, parent_sd);
 
@@ -95,11 +97,13 @@
 		sysfs_link_sibling(sd);
 	}
 
-	if (sysfs_addrm_finish(&acxt))
-		return 0;
+	if (!sysfs_addrm_finish(&acxt)) {
+		error = -EEXIST;
+		goto out_put;
+	}
 
-	error = -EEXIST;
-	/* fall through */
+	return 0;
+
  out_put:
 	sysfs_put(target_sd);
 	sysfs_put(sd);
diff --git a/fs/sysfs/sysfs.h b/fs/sysfs/sysfs.h
index 6a37f23..6b8c8d7 100644
--- a/fs/sysfs/sysfs.h
+++ b/fs/sysfs/sysfs.h
@@ -71,7 +71,6 @@
 extern int sysfs_addrm_finish(struct sysfs_addrm_cxt *acxt);
 
 extern void sysfs_delete_inode(struct inode *inode);
-extern void sysfs_init_inode(struct sysfs_dirent *sd, struct inode *inode);
 extern struct inode * sysfs_get_inode(struct sysfs_dirent *sd);
 extern void sysfs_instantiate(struct dentry *dentry, struct inode *inode);
 
diff --git a/fs/udf/balloc.c b/fs/udf/balloc.c
index 4cec910..ef48d09 100644
--- a/fs/udf/balloc.c
+++ b/fs/udf/balloc.c
@@ -41,18 +41,17 @@
 #define uint(x) xuint(x)
 #define xuint(x) __le ## x
 
-static inline int find_next_one_bit (void * addr, int size, int offset)
+static inline int find_next_one_bit(void *addr, int size, int offset)
 {
-	uintBPL_t * p = ((uintBPL_t *) addr) + (offset / BITS_PER_LONG);
-	int result = offset & ~(BITS_PER_LONG-1);
+	uintBPL_t *p = ((uintBPL_t *) addr) + (offset / BITS_PER_LONG);
+	int result = offset & ~(BITS_PER_LONG - 1);
 	unsigned long tmp;
 
 	if (offset >= size)
 		return size;
 	size -= result;
-	offset &= (BITS_PER_LONG-1);
-	if (offset)
-	{
+	offset &= (BITS_PER_LONG - 1);
+	if (offset) {
 		tmp = leBPL_to_cpup(p++);
 		tmp &= ~0UL << offset;
 		if (size < BITS_PER_LONG)
@@ -62,8 +61,7 @@
 		size -= BITS_PER_LONG;
 		result += BITS_PER_LONG;
 	}
-	while (size & ~(BITS_PER_LONG-1))
-	{
+	while (size & ~(BITS_PER_LONG - 1)) {
 		if ((tmp = leBPL_to_cpup(p++)))
 			goto found_middle;
 		result += BITS_PER_LONG;
@@ -72,17 +70,18 @@
 	if (!size)
 		return result;
 	tmp = leBPL_to_cpup(p);
-found_first:
-	tmp &= ~0UL >> (BITS_PER_LONG-size);
-found_middle:
+      found_first:
+	tmp &= ~0UL >> (BITS_PER_LONG - size);
+      found_middle:
 	return result + ffz(~tmp);
 }
 
 #define find_first_one_bit(addr, size)\
 	find_next_one_bit((addr), (size), 0)
 
-static int read_block_bitmap(struct super_block * sb,
-	struct udf_bitmap *bitmap, unsigned int block, unsigned long bitmap_nr)
+static int read_block_bitmap(struct super_block *sb,
+			     struct udf_bitmap *bitmap, unsigned int block,
+			     unsigned long bitmap_nr)
 {
 	struct buffer_head *bh = NULL;
 	int retval = 0;
@@ -92,38 +91,39 @@
 	loc.partitionReferenceNum = UDF_SB_PARTITION(sb);
 
 	bh = udf_tread(sb, udf_get_lb_pblock(sb, loc, block));
-	if (!bh)
-	{
+	if (!bh) {
 		retval = -EIO;
 	}
 	bitmap->s_block_bitmap[bitmap_nr] = bh;
 	return retval;
 }
 
-static int __load_block_bitmap(struct super_block * sb,
-	struct udf_bitmap *bitmap, unsigned int block_group)
+static int __load_block_bitmap(struct super_block *sb,
+			       struct udf_bitmap *bitmap,
+			       unsigned int block_group)
 {
 	int retval = 0;
 	int nr_groups = bitmap->s_nr_groups;
 
-	if (block_group >= nr_groups)
-	{
-		udf_debug("block_group (%d) > nr_groups (%d)\n", block_group, nr_groups);
+	if (block_group >= nr_groups) {
+		udf_debug("block_group (%d) > nr_groups (%d)\n", block_group,
+			  nr_groups);
 	}
 
 	if (bitmap->s_block_bitmap[block_group])
 		return block_group;
-	else
-	{
-		retval = read_block_bitmap(sb, bitmap, block_group, block_group);
+	else {
+		retval =
+		    read_block_bitmap(sb, bitmap, block_group, block_group);
 		if (retval < 0)
 			return retval;
 		return block_group;
 	}
 }
 
-static inline int load_block_bitmap(struct super_block * sb,
-	struct udf_bitmap *bitmap, unsigned int block_group)
+static inline int load_block_bitmap(struct super_block *sb,
+				    struct udf_bitmap *bitmap,
+				    unsigned int block_group)
 {
 	int slot;
 
@@ -138,13 +138,14 @@
 	return slot;
 }
 
-static void udf_bitmap_free_blocks(struct super_block * sb,
-	struct inode * inode,
-	struct udf_bitmap *bitmap,
-	kernel_lb_addr bloc, uint32_t offset, uint32_t count)
+static void udf_bitmap_free_blocks(struct super_block *sb,
+				   struct inode *inode,
+				   struct udf_bitmap *bitmap,
+				   kernel_lb_addr bloc, uint32_t offset,
+				   uint32_t count)
 {
 	struct udf_sb_info *sbi = UDF_SB(sb);
-	struct buffer_head * bh = NULL;
+	struct buffer_head *bh = NULL;
 	unsigned long block;
 	unsigned long block_group;
 	unsigned long bit;
@@ -154,17 +155,22 @@
 
 	mutex_lock(&sbi->s_alloc_mutex);
 	if (bloc.logicalBlockNum < 0 ||
-		(bloc.logicalBlockNum + count) > UDF_SB_PARTLEN(sb, bloc.partitionReferenceNum))
+	    (bloc.logicalBlockNum + count) > UDF_SB_PARTLEN(sb,
+							    bloc.
+							    partitionReferenceNum))
 	{
-		udf_debug("%d < %d || %d + %d > %d\n",
-			bloc.logicalBlockNum, 0, bloc.logicalBlockNum, count,
-			UDF_SB_PARTLEN(sb, bloc.partitionReferenceNum));
+		udf_debug("%d < %d || %d + %d > %d\n", bloc.logicalBlockNum, 0,
+			  bloc.logicalBlockNum, count, UDF_SB_PARTLEN(sb,
+								      bloc.
+								      partitionReferenceNum));
 		goto error_return;
 	}
 
-	block = bloc.logicalBlockNum + offset + (sizeof(struct spaceBitmapDesc) << 3);
+	block =
+	    bloc.logicalBlockNum + offset +
+	    (sizeof(struct spaceBitmapDesc) << 3);
 
-do_more:
+      do_more:
 	overflow = 0;
 	block_group = block >> (sb->s_blocksize_bits + 3);
 	bit = block % (sb->s_blocksize << 3);
@@ -172,8 +178,7 @@
 	/*
 	 * Check to see if we are freeing blocks across a group boundary.
 	 */
-	if (bit + count > (sb->s_blocksize << 3))
-	{
+	if (bit + count > (sb->s_blocksize << 3)) {
 		overflow = bit + count - (sb->s_blocksize << 3);
 		count -= overflow;
 	}
@@ -182,32 +187,31 @@
 		goto error_return;
 
 	bh = bitmap->s_block_bitmap[bitmap_nr];
-	for (i=0; i < count; i++)
-	{
-		if (udf_set_bit(bit + i, bh->b_data))
-		{
+	for (i = 0; i < count; i++) {
+		if (udf_set_bit(bit + i, bh->b_data)) {
 			udf_debug("bit %ld already set\n", bit + i);
-			udf_debug("byte=%2x\n", ((char *)bh->b_data)[(bit + i) >> 3]);
-		}
-		else
-		{
+			udf_debug("byte=%2x\n",
+				  ((char *)bh->b_data)[(bit + i) >> 3]);
+		} else {
 			if (inode)
 				DQUOT_FREE_BLOCK(inode, 1);
-			if (UDF_SB_LVIDBH(sb))
-			{
-				UDF_SB_LVID(sb)->freeSpaceTable[UDF_SB_PARTITION(sb)] =
-					cpu_to_le32(le32_to_cpu(UDF_SB_LVID(sb)->freeSpaceTable[UDF_SB_PARTITION(sb)])+1);
+			if (UDF_SB_LVIDBH(sb)) {
+				UDF_SB_LVID(sb)->
+				    freeSpaceTable[UDF_SB_PARTITION(sb)] =
+				    cpu_to_le32(le32_to_cpu
+						(UDF_SB_LVID(sb)->
+						 freeSpaceTable[UDF_SB_PARTITION
+								(sb)]) + 1);
 			}
 		}
 	}
 	mark_buffer_dirty(bh);
-	if (overflow)
-	{
+	if (overflow) {
 		block += count;
 		count = overflow;
 		goto do_more;
 	}
-error_return:
+      error_return:
 	sb->s_dirt = 1;
 	if (UDF_SB_LVIDBH(sb))
 		mark_buffer_dirty(UDF_SB_LVIDBH(sb));
@@ -215,10 +219,11 @@
 	return;
 }
 
-static int udf_bitmap_prealloc_blocks(struct super_block * sb,
-	struct inode * inode,
-	struct udf_bitmap *bitmap, uint16_t partition, uint32_t first_block,
-	uint32_t block_count)
+static int udf_bitmap_prealloc_blocks(struct super_block *sb,
+				      struct inode *inode,
+				      struct udf_bitmap *bitmap,
+				      uint16_t partition, uint32_t first_block,
+				      uint32_t block_count)
 {
 	struct udf_sb_info *sbi = UDF_SB(sb);
 	int alloc_count = 0;
@@ -233,9 +238,10 @@
 	if (first_block + block_count > UDF_SB_PARTLEN(sb, partition))
 		block_count = UDF_SB_PARTLEN(sb, partition) - first_block;
 
-repeat:
+      repeat:
 	nr_groups = (UDF_SB_PARTLEN(sb, partition) +
-		(sizeof(struct spaceBitmapDesc) << 3) + (sb->s_blocksize * 8) - 1) / (sb->s_blocksize * 8);
+		     (sizeof(struct spaceBitmapDesc) << 3) +
+		     (sb->s_blocksize * 8) - 1) / (sb->s_blocksize * 8);
 	block = first_block + (sizeof(struct spaceBitmapDesc) << 3);
 	block_group = block >> (sb->s_blocksize_bits + 3);
 	group_start = block_group ? 0 : sizeof(struct spaceBitmapDesc);
@@ -247,31 +253,30 @@
 
 	bit = block % (sb->s_blocksize << 3);
 
-	while (bit < (sb->s_blocksize << 3) && block_count > 0)
-	{
+	while (bit < (sb->s_blocksize << 3) && block_count > 0) {
 		if (!udf_test_bit(bit, bh->b_data))
 			goto out;
 		else if (DQUOT_PREALLOC_BLOCK(inode, 1))
 			goto out;
-		else if (!udf_clear_bit(bit, bh->b_data))
-		{
+		else if (!udf_clear_bit(bit, bh->b_data)) {
 			udf_debug("bit already cleared for block %d\n", bit);
 			DQUOT_FREE_BLOCK(inode, 1);
 			goto out;
 		}
-		block_count --;
-		alloc_count ++;
-		bit ++;
-		block ++;
+		block_count--;
+		alloc_count++;
+		bit++;
+		block++;
 	}
 	mark_buffer_dirty(bh);
 	if (block_count > 0)
 		goto repeat;
-out:
-	if (UDF_SB_LVIDBH(sb))
-	{
+      out:
+	if (UDF_SB_LVIDBH(sb)) {
 		UDF_SB_LVID(sb)->freeSpaceTable[partition] =
-			cpu_to_le32(le32_to_cpu(UDF_SB_LVID(sb)->freeSpaceTable[partition])-alloc_count);
+		    cpu_to_le32(le32_to_cpu
+				(UDF_SB_LVID(sb)->freeSpaceTable[partition]) -
+				alloc_count);
 		mark_buffer_dirty(UDF_SB_LVIDBH(sb));
 	}
 	sb->s_dirt = 1;
@@ -279,12 +284,13 @@
 	return alloc_count;
 }
 
-static int udf_bitmap_new_block(struct super_block * sb,
-	struct inode * inode,
-	struct udf_bitmap *bitmap, uint16_t partition, uint32_t goal, int *err)
+static int udf_bitmap_new_block(struct super_block *sb,
+				struct inode *inode,
+				struct udf_bitmap *bitmap, uint16_t partition,
+				uint32_t goal, int *err)
 {
 	struct udf_sb_info *sbi = UDF_SB(sb);
-	int newbit, bit=0, block, block_group, group_start;
+	int newbit, bit = 0, block, block_group, group_start;
 	int end_goal, nr_groups, bitmap_nr, i;
 	struct buffer_head *bh = NULL;
 	char *ptr;
@@ -293,7 +299,7 @@
 	*err = -ENOSPC;
 	mutex_lock(&sbi->s_alloc_mutex);
 
-repeat:
+      repeat:
 	if (goal < 0 || goal >= UDF_SB_PARTLEN(sb, partition))
 		goal = 0;
 
@@ -306,38 +312,39 @@
 	if (bitmap_nr < 0)
 		goto error_return;
 	bh = bitmap->s_block_bitmap[bitmap_nr];
-	ptr = memscan((char *)bh->b_data + group_start, 0xFF, sb->s_blocksize - group_start);
+	ptr =
+	    memscan((char *)bh->b_data + group_start, 0xFF,
+		    sb->s_blocksize - group_start);
 
-	if ((ptr - ((char *)bh->b_data)) < sb->s_blocksize)
-	{
+	if ((ptr - ((char *)bh->b_data)) < sb->s_blocksize) {
 		bit = block % (sb->s_blocksize << 3);
 
-		if (udf_test_bit(bit, bh->b_data))
-		{
+		if (udf_test_bit(bit, bh->b_data)) {
 			goto got_block;
 		}
 		end_goal = (bit + 63) & ~63;
 		bit = udf_find_next_one_bit(bh->b_data, end_goal, bit);
 		if (bit < end_goal)
 			goto got_block;
-		ptr = memscan((char *)bh->b_data + (bit >> 3), 0xFF, sb->s_blocksize - ((bit + 7) >> 3));
+		ptr =
+		    memscan((char *)bh->b_data + (bit >> 3), 0xFF,
+			    sb->s_blocksize - ((bit + 7) >> 3));
 		newbit = (ptr - ((char *)bh->b_data)) << 3;
-		if (newbit < sb->s_blocksize << 3)
-		{
+		if (newbit < sb->s_blocksize << 3) {
 			bit = newbit;
 			goto search_back;
 		}
-		newbit = udf_find_next_one_bit(bh->b_data, sb->s_blocksize << 3, bit);
-		if (newbit < sb->s_blocksize << 3)
-		{
+		newbit =
+		    udf_find_next_one_bit(bh->b_data, sb->s_blocksize << 3,
+					  bit);
+		if (newbit < sb->s_blocksize << 3) {
 			bit = newbit;
 			goto got_block;
 		}
 	}
 
-	for (i=0; i<(nr_groups*2); i++)
-	{
-		block_group ++;
+	for (i = 0; i < (nr_groups * 2); i++) {
+		block_group++;
 		if (block_group >= nr_groups)
 			block_group = 0;
 		group_start = block_group ? 0 : sizeof(struct spaceBitmapDesc);
@@ -346,67 +353,69 @@
 		if (bitmap_nr < 0)
 			goto error_return;
 		bh = bitmap->s_block_bitmap[bitmap_nr];
-		if (i < nr_groups)
-		{
-			ptr = memscan((char *)bh->b_data + group_start, 0xFF, sb->s_blocksize - group_start);
-			if ((ptr - ((char *)bh->b_data)) < sb->s_blocksize)
-			{
+		if (i < nr_groups) {
+			ptr =
+			    memscan((char *)bh->b_data + group_start, 0xFF,
+				    sb->s_blocksize - group_start);
+			if ((ptr - ((char *)bh->b_data)) < sb->s_blocksize) {
 				bit = (ptr - ((char *)bh->b_data)) << 3;
 				break;
 			}
-		}
-		else
-		{
-			bit = udf_find_next_one_bit((char *)bh->b_data, sb->s_blocksize << 3, group_start << 3);
+		} else {
+			bit =
+			    udf_find_next_one_bit((char *)bh->b_data,
+						  sb->s_blocksize << 3,
+						  group_start << 3);
 			if (bit < sb->s_blocksize << 3)
 				break;
 		}
 	}
-	if (i >= (nr_groups*2))
-	{
+	if (i >= (nr_groups * 2)) {
 		mutex_unlock(&sbi->s_alloc_mutex);
 		return newblock;
 	}
 	if (bit < sb->s_blocksize << 3)
 		goto search_back;
 	else
-		bit = udf_find_next_one_bit(bh->b_data, sb->s_blocksize << 3, group_start << 3);
-	if (bit >= sb->s_blocksize << 3)
-	{
+		bit =
+		    udf_find_next_one_bit(bh->b_data, sb->s_blocksize << 3,
+					  group_start << 3);
+	if (bit >= sb->s_blocksize << 3) {
 		mutex_unlock(&sbi->s_alloc_mutex);
 		return 0;
 	}
 
-search_back:
-	for (i=0; i<7 && bit > (group_start << 3) && udf_test_bit(bit - 1, bh->b_data); i++, bit--);
+      search_back:
+	for (i = 0;
+	     i < 7 && bit > (group_start << 3)
+	     && udf_test_bit(bit - 1, bh->b_data); i++, bit--) ;
 
-got_block:
+      got_block:
 
 	/*
 	 * Check quota for allocation of this block.
 	 */
-	if (inode && DQUOT_ALLOC_BLOCK(inode, 1))
-	{
+	if (inode && DQUOT_ALLOC_BLOCK(inode, 1)) {
 		mutex_unlock(&sbi->s_alloc_mutex);
 		*err = -EDQUOT;
 		return 0;
 	}
 
 	newblock = bit + (block_group << (sb->s_blocksize_bits + 3)) -
-		(sizeof(struct spaceBitmapDesc) << 3);
+	    (sizeof(struct spaceBitmapDesc) << 3);
 
-	if (!udf_clear_bit(bit, bh->b_data))
-	{
+	if (!udf_clear_bit(bit, bh->b_data)) {
 		udf_debug("bit already cleared for block %d\n", bit);
 		goto repeat;
 	}
 
 	mark_buffer_dirty(bh);
 
-	if (UDF_SB_LVIDBH(sb))
-	{
+	if (UDF_SB_LVIDBH(sb)) {
 		UDF_SB_LVID(sb)->freeSpaceTable[partition] =
-			cpu_to_le32(le32_to_cpu(UDF_SB_LVID(sb)->freeSpaceTable[partition])-1);
+		    cpu_to_le32(le32_to_cpu
+				(UDF_SB_LVID(sb)->freeSpaceTable[partition]) -
+				1);
 		mark_buffer_dirty(UDF_SB_LVIDBH(sb));
 	}
 	sb->s_dirt = 1;
@@ -414,16 +423,17 @@
 	*err = 0;
 	return newblock;
 
-error_return:
+      error_return:
 	*err = -EIO;
 	mutex_unlock(&sbi->s_alloc_mutex);
 	return 0;
 }
 
-static void udf_table_free_blocks(struct super_block * sb,
-	struct inode * inode,
-	struct inode * table,
-	kernel_lb_addr bloc, uint32_t offset, uint32_t count)
+static void udf_table_free_blocks(struct super_block *sb,
+				  struct inode *inode,
+				  struct inode *table,
+				  kernel_lb_addr bloc, uint32_t offset,
+				  uint32_t count)
 {
 	struct udf_sb_info *sbi = UDF_SB(sb);
 	uint32_t start, end;
@@ -435,11 +445,14 @@
 
 	mutex_lock(&sbi->s_alloc_mutex);
 	if (bloc.logicalBlockNum < 0 ||
-		(bloc.logicalBlockNum + count) > UDF_SB_PARTLEN(sb, bloc.partitionReferenceNum))
+	    (bloc.logicalBlockNum + count) > UDF_SB_PARTLEN(sb,
+							    bloc.
+							    partitionReferenceNum))
 	{
-		udf_debug("%d < %d || %d + %d > %d\n",
-			bloc.logicalBlockNum, 0, bloc.logicalBlockNum, count,
-			UDF_SB_PARTLEN(sb, bloc.partitionReferenceNum));
+		udf_debug("%d < %d || %d + %d > %d\n", bloc.logicalBlockNum, 0,
+			  bloc.logicalBlockNum, count, UDF_SB_PARTLEN(sb,
+								      bloc.
+								      partitionReferenceNum));
 		goto error_return;
 	}
 
@@ -447,10 +460,11 @@
 	   but.. oh well */
 	if (inode)
 		DQUOT_FREE_BLOCK(inode, count);
-	if (UDF_SB_LVIDBH(sb))
-	{
+	if (UDF_SB_LVIDBH(sb)) {
 		UDF_SB_LVID(sb)->freeSpaceTable[UDF_SB_PARTITION(sb)] =
-			cpu_to_le32(le32_to_cpu(UDF_SB_LVID(sb)->freeSpaceTable[UDF_SB_PARTITION(sb)])+count);
+		    cpu_to_le32(le32_to_cpu
+				(UDF_SB_LVID(sb)->
+				 freeSpaceTable[UDF_SB_PARTITION(sb)]) + count);
 		mark_buffer_dirty(UDF_SB_LVIDBH(sb));
 	}
 
@@ -463,73 +477,75 @@
 	epos.bh = oepos.bh = NULL;
 
 	while (count && (etype =
-		udf_next_aext(table, &epos, &eloc, &elen, 1)) != -1)
-	{
+			 udf_next_aext(table, &epos, &eloc, &elen, 1)) != -1) {
 		if (((eloc.logicalBlockNum + (elen >> sb->s_blocksize_bits)) ==
-			start))
-		{
-			if ((0x3FFFFFFF - elen) < (count << sb->s_blocksize_bits))
-			{
-				count -= ((0x3FFFFFFF - elen) >> sb->s_blocksize_bits);
-				start += ((0x3FFFFFFF - elen) >> sb->s_blocksize_bits);
-				elen = (etype << 30) | (0x40000000 - sb->s_blocksize);
-			}
-			else
-			{
+		     start)) {
+			if ((0x3FFFFFFF - elen) <
+			    (count << sb->s_blocksize_bits)) {
+				count -=
+				    ((0x3FFFFFFF -
+				      elen) >> sb->s_blocksize_bits);
+				start +=
+				    ((0x3FFFFFFF -
+				      elen) >> sb->s_blocksize_bits);
+				elen =
+				    (etype << 30) | (0x40000000 -
+						     sb->s_blocksize);
+			} else {
 				elen = (etype << 30) |
-					(elen + (count << sb->s_blocksize_bits));
+				    (elen + (count << sb->s_blocksize_bits));
 				start += count;
 				count = 0;
 			}
 			udf_write_aext(table, &oepos, eloc, elen, 1);
-		}
-		else if (eloc.logicalBlockNum == (end + 1))
-		{
-			if ((0x3FFFFFFF - elen) < (count << sb->s_blocksize_bits))
-			{
-				count -= ((0x3FFFFFFF - elen) >> sb->s_blocksize_bits);
-				end -= ((0x3FFFFFFF - elen) >> sb->s_blocksize_bits);
+		} else if (eloc.logicalBlockNum == (end + 1)) {
+			if ((0x3FFFFFFF - elen) <
+			    (count << sb->s_blocksize_bits)) {
+				count -=
+				    ((0x3FFFFFFF -
+				      elen) >> sb->s_blocksize_bits);
+				end -=
+				    ((0x3FFFFFFF -
+				      elen) >> sb->s_blocksize_bits);
 				eloc.logicalBlockNum -=
-					((0x3FFFFFFF - elen) >> sb->s_blocksize_bits);
-				elen = (etype << 30) | (0x40000000 - sb->s_blocksize);
-			}
-			else
-			{
+				    ((0x3FFFFFFF -
+				      elen) >> sb->s_blocksize_bits);
+				elen =
+				    (etype << 30) | (0x40000000 -
+						     sb->s_blocksize);
+			} else {
 				eloc.logicalBlockNum = start;
 				elen = (etype << 30) |
-					(elen + (count << sb->s_blocksize_bits));
+				    (elen + (count << sb->s_blocksize_bits));
 				end -= count;
 				count = 0;
 			}
 			udf_write_aext(table, &oepos, eloc, elen, 1);
 		}
 
-		if (epos.bh != oepos.bh)
-		{
+		if (epos.bh != oepos.bh) {
 			i = -1;
 			oepos.block = epos.block;
 			brelse(oepos.bh);
 			get_bh(epos.bh);
 			oepos.bh = epos.bh;
 			oepos.offset = 0;
-		}
-		else
+		} else
 			oepos.offset = epos.offset;
 	}
 
-	if (count)
-	{
+	if (count) {
 		/* NOTE: we CANNOT use udf_add_aext here, as it can try to allocate
-				 a new block, and since we hold the super block lock already
-				 very bad things would happen :)
+		   a new block, and since we hold the super block lock already
+		   very bad things would happen :)
 
-				 We copy the behavior of udf_add_aext, but instead of
-				 trying to allocate a new block close to the existing one,
-				 we just steal a block from the extent we are trying to add.
+		   We copy the behavior of udf_add_aext, but instead of
+		   trying to allocate a new block close to the existing one,
+		   we just steal a block from the extent we are trying to add.
 
-				 It would be nice if the blocks were close together, but it
-				 isn't required.
-		*/
+		   It would be nice if the blocks were close together, but it
+		   isn't required.
+		 */
 
 		int adsize;
 		short_ad *sad = NULL;
@@ -537,121 +553,124 @@
 		struct allocExtDesc *aed;
 
 		eloc.logicalBlockNum = start;
-		elen = EXT_RECORDED_ALLOCATED |
-			(count << sb->s_blocksize_bits);
+		elen = EXT_RECORDED_ALLOCATED | (count << sb->s_blocksize_bits);
 
 		if (UDF_I_ALLOCTYPE(table) == ICBTAG_FLAG_AD_SHORT)
 			adsize = sizeof(short_ad);
 		else if (UDF_I_ALLOCTYPE(table) == ICBTAG_FLAG_AD_LONG)
 			adsize = sizeof(long_ad);
-		else
-		{
+		else {
 			brelse(oepos.bh);
 			brelse(epos.bh);
 			goto error_return;
 		}
 
-		if (epos.offset + (2 * adsize) > sb->s_blocksize)
-		{
+		if (epos.offset + (2 * adsize) > sb->s_blocksize) {
 			char *sptr, *dptr;
 			int loffset;
-	
+
 			brelse(oepos.bh);
 			oepos = epos;
 
 			/* Steal a block from the extent being free'd */
 			epos.block.logicalBlockNum = eloc.logicalBlockNum;
-			eloc.logicalBlockNum ++;
+			eloc.logicalBlockNum++;
 			elen -= sb->s_blocksize;
 
 			if (!(epos.bh = udf_tread(sb,
-				udf_get_lb_pblock(sb, epos.block, 0))))
-			{
+						  udf_get_lb_pblock(sb,
+								    epos.block,
+								    0)))) {
 				brelse(oepos.bh);
 				goto error_return;
 			}
 			aed = (struct allocExtDesc *)(epos.bh->b_data);
-			aed->previousAllocExtLocation = cpu_to_le32(oepos.block.logicalBlockNum);
-			if (epos.offset + adsize > sb->s_blocksize)
-			{
+			aed->previousAllocExtLocation =
+			    cpu_to_le32(oepos.block.logicalBlockNum);
+			if (epos.offset + adsize > sb->s_blocksize) {
 				loffset = epos.offset;
 				aed->lengthAllocDescs = cpu_to_le32(adsize);
 				sptr = UDF_I_DATA(inode) + epos.offset -
-					udf_file_entry_alloc_offset(inode) +
-					UDF_I_LENEATTR(inode) - adsize;
-				dptr = epos.bh->b_data + sizeof(struct allocExtDesc);
+				    udf_file_entry_alloc_offset(inode) +
+				    UDF_I_LENEATTR(inode) - adsize;
+				dptr =
+				    epos.bh->b_data +
+				    sizeof(struct allocExtDesc);
 				memcpy(dptr, sptr, adsize);
-				epos.offset = sizeof(struct allocExtDesc) + adsize;
-			}
-			else
-			{
+				epos.offset =
+				    sizeof(struct allocExtDesc) + adsize;
+			} else {
 				loffset = epos.offset + adsize;
 				aed->lengthAllocDescs = cpu_to_le32(0);
 				sptr = oepos.bh->b_data + epos.offset;
 				epos.offset = sizeof(struct allocExtDesc);
 
-				if (oepos.bh)
-				{
-					aed = (struct allocExtDesc *)oepos.bh->b_data;
+				if (oepos.bh) {
+					aed =
+					    (struct allocExtDesc *)oepos.bh->
+					    b_data;
 					aed->lengthAllocDescs =
-						cpu_to_le32(le32_to_cpu(aed->lengthAllocDescs) + adsize);
-				}
-				else
-				{
+					    cpu_to_le32(le32_to_cpu
+							(aed->
+							 lengthAllocDescs) +
+							adsize);
+				} else {
 					UDF_I_LENALLOC(table) += adsize;
 					mark_inode_dirty(table);
 				}
 			}
 			if (UDF_SB_UDFREV(sb) >= 0x0200)
-				udf_new_tag(epos.bh->b_data, TAG_IDENT_AED, 3, 1,
-					epos.block.logicalBlockNum, sizeof(tag));
+				udf_new_tag(epos.bh->b_data, TAG_IDENT_AED, 3,
+					    1, epos.block.logicalBlockNum,
+					    sizeof(tag));
 			else
-				udf_new_tag(epos.bh->b_data, TAG_IDENT_AED, 2, 1,
-					epos.block.logicalBlockNum, sizeof(tag));
-			switch (UDF_I_ALLOCTYPE(table))
-			{
-				case ICBTAG_FLAG_AD_SHORT:
+				udf_new_tag(epos.bh->b_data, TAG_IDENT_AED, 2,
+					    1, epos.block.logicalBlockNum,
+					    sizeof(tag));
+			switch (UDF_I_ALLOCTYPE(table)) {
+			case ICBTAG_FLAG_AD_SHORT:
 				{
-					sad = (short_ad *)sptr;
-					sad->extLength = cpu_to_le32(
-						EXT_NEXT_EXTENT_ALLOCDECS |
-						sb->s_blocksize);
-					sad->extPosition = cpu_to_le32(epos.block.logicalBlockNum);
+					sad = (short_ad *) sptr;
+					sad->extLength =
+					    cpu_to_le32
+					    (EXT_NEXT_EXTENT_ALLOCDECS | sb->
+					     s_blocksize);
+					sad->extPosition =
+					    cpu_to_le32(epos.block.
+							logicalBlockNum);
 					break;
 				}
-				case ICBTAG_FLAG_AD_LONG:
+			case ICBTAG_FLAG_AD_LONG:
 				{
-					lad = (long_ad *)sptr;
-					lad->extLength = cpu_to_le32(
-						EXT_NEXT_EXTENT_ALLOCDECS |
-						sb->s_blocksize);
-					lad->extLocation = cpu_to_lelb(epos.block);
+					lad = (long_ad *) sptr;
+					lad->extLength =
+					    cpu_to_le32
+					    (EXT_NEXT_EXTENT_ALLOCDECS | sb->
+					     s_blocksize);
+					lad->extLocation =
+					    cpu_to_lelb(epos.block);
 					break;
 				}
 			}
-			if (oepos.bh)
-			{
+			if (oepos.bh) {
 				udf_update_tag(oepos.bh->b_data, loffset);
 				mark_buffer_dirty(oepos.bh);
-			}
-			else
+			} else
 				mark_inode_dirty(table);
 		}
 
-		if (elen) /* It's possible that stealing the block emptied the extent */
-		{
+		if (elen) {	/* It's possible that stealing the block emptied the extent */
 			udf_write_aext(table, &epos, eloc, elen, 1);
 
-			if (!epos.bh)
-			{
+			if (!epos.bh) {
 				UDF_I_LENALLOC(table) += adsize;
 				mark_inode_dirty(table);
-			}
-			else
-			{
+			} else {
 				aed = (struct allocExtDesc *)epos.bh->b_data;
 				aed->lengthAllocDescs =
-					cpu_to_le32(le32_to_cpu(aed->lengthAllocDescs) + adsize);
+				    cpu_to_le32(le32_to_cpu
+						(aed->lengthAllocDescs) +
+						adsize);
 				udf_update_tag(epos.bh->b_data, epos.offset);
 				mark_buffer_dirty(epos.bh);
 			}
@@ -661,16 +680,16 @@
 	brelse(epos.bh);
 	brelse(oepos.bh);
 
-error_return:
+      error_return:
 	sb->s_dirt = 1;
 	mutex_unlock(&sbi->s_alloc_mutex);
 	return;
 }
 
-static int udf_table_prealloc_blocks(struct super_block * sb,
-	struct inode * inode,
-	struct inode *table, uint16_t partition, uint32_t first_block,
-	uint32_t block_count)
+static int udf_table_prealloc_blocks(struct super_block *sb,
+				     struct inode *inode,
+				     struct inode *table, uint16_t partition,
+				     uint32_t first_block, uint32_t block_count)
 {
 	struct udf_sb_info *sbi = UDF_SB(sb);
 	int alloc_count = 0;
@@ -696,39 +715,46 @@
 	eloc.logicalBlockNum = 0xFFFFFFFF;
 
 	while (first_block != eloc.logicalBlockNum && (etype =
-		udf_next_aext(table, &epos, &eloc, &elen, 1)) != -1)
-	{
+						       udf_next_aext(table,
+								     &epos,
+								     &eloc,
+								     &elen,
+								     1)) !=
+	       -1) {
 		udf_debug("eloc=%d, elen=%d, first_block=%d\n",
-			eloc.logicalBlockNum, elen, first_block);
-		; /* empty loop body */
+			  eloc.logicalBlockNum, elen, first_block);
+		;		/* empty loop body */
 	}
 
-	if (first_block == eloc.logicalBlockNum)
-	{
+	if (first_block == eloc.logicalBlockNum) {
 		epos.offset -= adsize;
 
 		alloc_count = (elen >> sb->s_blocksize_bits);
-		if (inode && DQUOT_PREALLOC_BLOCK(inode, alloc_count > block_count ? block_count : alloc_count))
+		if (inode
+		    && DQUOT_PREALLOC_BLOCK(inode,
+					    alloc_count >
+					    block_count ? block_count :
+					    alloc_count))
 			alloc_count = 0;
-		else if (alloc_count > block_count)
-		{
+		else if (alloc_count > block_count) {
 			alloc_count = block_count;
 			eloc.logicalBlockNum += alloc_count;
 			elen -= (alloc_count << sb->s_blocksize_bits);
-			udf_write_aext(table, &epos, eloc, (etype << 30) | elen, 1);
-		}
-		else
-			udf_delete_aext(table, epos, eloc, (etype << 30) | elen);
-	}
-	else
+			udf_write_aext(table, &epos, eloc, (etype << 30) | elen,
+				       1);
+		} else
+			udf_delete_aext(table, epos, eloc,
+					(etype << 30) | elen);
+	} else
 		alloc_count = 0;
 
 	brelse(epos.bh);
 
-	if (alloc_count && UDF_SB_LVIDBH(sb))
-	{
+	if (alloc_count && UDF_SB_LVIDBH(sb)) {
 		UDF_SB_LVID(sb)->freeSpaceTable[partition] =
-			cpu_to_le32(le32_to_cpu(UDF_SB_LVID(sb)->freeSpaceTable[partition])-alloc_count);
+		    cpu_to_le32(le32_to_cpu
+				(UDF_SB_LVID(sb)->freeSpaceTable[partition]) -
+				alloc_count);
 		mark_buffer_dirty(UDF_SB_LVIDBH(sb));
 		sb->s_dirt = 1;
 	}
@@ -736,9 +762,10 @@
 	return alloc_count;
 }
 
-static int udf_table_new_block(struct super_block * sb,
-	struct inode * inode,
-	struct inode *table, uint16_t partition, uint32_t goal, int *err)
+static int udf_table_new_block(struct super_block *sb,
+			       struct inode *inode,
+			       struct inode *table, uint16_t partition,
+			       uint32_t goal, int *err)
 {
 	struct udf_sb_info *sbi = UDF_SB(sb);
 	uint32_t spread = 0xFFFFFFFF, nspread = 0xFFFFFFFF;
@@ -765,30 +792,27 @@
 	   we stop. Otherwise we keep going till we run out of extents.
 	   We store the buffer_head, bloc, and extoffset of the current closest
 	   match and use that when we are done.
-	*/
+	 */
 	epos.offset = sizeof(struct unallocSpaceEntry);
 	epos.block = UDF_I_LOCATION(table);
 	epos.bh = goal_epos.bh = NULL;
 
 	while (spread && (etype =
-		udf_next_aext(table, &epos, &eloc, &elen, 1)) != -1)
-	{
-		if (goal >= eloc.logicalBlockNum)
-		{
-			if (goal < eloc.logicalBlockNum + (elen >> sb->s_blocksize_bits))
+			  udf_next_aext(table, &epos, &eloc, &elen, 1)) != -1) {
+		if (goal >= eloc.logicalBlockNum) {
+			if (goal <
+			    eloc.logicalBlockNum +
+			    (elen >> sb->s_blocksize_bits))
 				nspread = 0;
 			else
 				nspread = goal - eloc.logicalBlockNum -
-					(elen >> sb->s_blocksize_bits);
-		}
-		else
+				    (elen >> sb->s_blocksize_bits);
+		} else
 			nspread = eloc.logicalBlockNum - goal;
 
-		if (nspread < spread)
-		{
+		if (nspread < spread) {
 			spread = nspread;
-			if (goal_epos.bh != epos.bh)
-			{
+			if (goal_epos.bh != epos.bh) {
 				brelse(goal_epos.bh);
 				goal_epos.bh = epos.bh;
 				get_bh(goal_epos.bh);
@@ -802,8 +826,7 @@
 
 	brelse(epos.bh);
 
-	if (spread == 0xFFFFFFFF)
-	{
+	if (spread == 0xFFFFFFFF) {
 		brelse(goal_epos.bh);
 		mutex_unlock(&sbi->s_alloc_mutex);
 		return 0;
@@ -815,11 +838,10 @@
 	/* This works, but very poorly.... */
 
 	newblock = goal_eloc.logicalBlockNum;
-	goal_eloc.logicalBlockNum ++;
+	goal_eloc.logicalBlockNum++;
 	goal_elen -= sb->s_blocksize;
 
-	if (inode && DQUOT_ALLOC_BLOCK(inode, 1))
-	{
+	if (inode && DQUOT_ALLOC_BLOCK(inode, 1)) {
 		brelse(goal_epos.bh);
 		mutex_unlock(&sbi->s_alloc_mutex);
 		*err = -EDQUOT;
@@ -832,10 +854,11 @@
 		udf_delete_aext(table, goal_epos, goal_eloc, goal_elen);
 	brelse(goal_epos.bh);
 
-	if (UDF_SB_LVIDBH(sb))
-	{
+	if (UDF_SB_LVIDBH(sb)) {
 		UDF_SB_LVID(sb)->freeSpaceTable[partition] =
-			cpu_to_le32(le32_to_cpu(UDF_SB_LVID(sb)->freeSpaceTable[partition])-1);
+		    cpu_to_le32(le32_to_cpu
+				(UDF_SB_LVID(sb)->freeSpaceTable[partition]) -
+				1);
 		mark_buffer_dirty(UDF_SB_LVIDBH(sb));
 	}
 
@@ -845,105 +868,99 @@
 	return newblock;
 }
 
-inline void udf_free_blocks(struct super_block * sb,
-	struct inode * inode,
-	kernel_lb_addr bloc, uint32_t offset, uint32_t count)
+inline void udf_free_blocks(struct super_block *sb,
+			    struct inode *inode,
+			    kernel_lb_addr bloc, uint32_t offset,
+			    uint32_t count)
 {
 	uint16_t partition = bloc.partitionReferenceNum;
 
-	if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_UNALLOC_BITMAP)
-	{
+	if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_UNALLOC_BITMAP) {
 		return udf_bitmap_free_blocks(sb, inode,
-			UDF_SB_PARTMAPS(sb)[partition].s_uspace.s_bitmap,
-			bloc, offset, count);
-	}
-	else if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_UNALLOC_TABLE)
-	{
+					      UDF_SB_PARTMAPS(sb)[partition].
+					      s_uspace.s_bitmap, bloc, offset,
+					      count);
+	} else if (UDF_SB_PARTFLAGS(sb, partition) &
+		   UDF_PART_FLAG_UNALLOC_TABLE) {
 		return udf_table_free_blocks(sb, inode,
-			UDF_SB_PARTMAPS(sb)[partition].s_uspace.s_table,
-			bloc, offset, count);
-	}
-	else if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_FREED_BITMAP)
-	{
+					     UDF_SB_PARTMAPS(sb)[partition].
+					     s_uspace.s_table, bloc, offset,
+					     count);
+	} else if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_FREED_BITMAP) {
 		return udf_bitmap_free_blocks(sb, inode,
-			UDF_SB_PARTMAPS(sb)[partition].s_fspace.s_bitmap,
-			bloc, offset, count);
-	}
-	else if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_FREED_TABLE)
-	{
+					      UDF_SB_PARTMAPS(sb)[partition].
+					      s_fspace.s_bitmap, bloc, offset,
+					      count);
+	} else if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_FREED_TABLE) {
 		return udf_table_free_blocks(sb, inode,
-			UDF_SB_PARTMAPS(sb)[partition].s_fspace.s_table,
-			bloc, offset, count);
-	}
-	else
+					     UDF_SB_PARTMAPS(sb)[partition].
+					     s_fspace.s_table, bloc, offset,
+					     count);
+	} else
 		return;
 }
 
-inline int udf_prealloc_blocks(struct super_block * sb,
-	struct inode * inode,
-	uint16_t partition, uint32_t first_block, uint32_t block_count)
+inline int udf_prealloc_blocks(struct super_block *sb,
+			       struct inode *inode,
+			       uint16_t partition, uint32_t first_block,
+			       uint32_t block_count)
 {
-	if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_UNALLOC_BITMAP)
-	{
+	if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_UNALLOC_BITMAP) {
 		return udf_bitmap_prealloc_blocks(sb, inode,
-			UDF_SB_PARTMAPS(sb)[partition].s_uspace.s_bitmap,
-			partition, first_block, block_count);
-	}
-	else if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_UNALLOC_TABLE)
-	{
+						  UDF_SB_PARTMAPS(sb)
+						  [partition].s_uspace.s_bitmap,
+						  partition, first_block,
+						  block_count);
+	} else if (UDF_SB_PARTFLAGS(sb, partition) &
+		   UDF_PART_FLAG_UNALLOC_TABLE) {
 		return udf_table_prealloc_blocks(sb, inode,
-			UDF_SB_PARTMAPS(sb)[partition].s_uspace.s_table,
-			partition, first_block, block_count);
-	}
-	else if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_FREED_BITMAP)
-	{
+						 UDF_SB_PARTMAPS(sb)[partition].
+						 s_uspace.s_table, partition,
+						 first_block, block_count);
+	} else if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_FREED_BITMAP) {
 		return udf_bitmap_prealloc_blocks(sb, inode,
-			UDF_SB_PARTMAPS(sb)[partition].s_fspace.s_bitmap,
-			partition, first_block, block_count);
-	}
-	else if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_FREED_TABLE)
-	{
+						  UDF_SB_PARTMAPS(sb)
+						  [partition].s_fspace.s_bitmap,
+						  partition, first_block,
+						  block_count);
+	} else if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_FREED_TABLE) {
 		return udf_table_prealloc_blocks(sb, inode,
-			UDF_SB_PARTMAPS(sb)[partition].s_fspace.s_table,
-			partition, first_block, block_count);
-	}
-	else
+						 UDF_SB_PARTMAPS(sb)[partition].
+						 s_fspace.s_table, partition,
+						 first_block, block_count);
+	} else
 		return 0;
 }
 
-inline int udf_new_block(struct super_block * sb,
-	struct inode * inode,
-	uint16_t partition, uint32_t goal, int *err)
+inline int udf_new_block(struct super_block *sb,
+			 struct inode *inode,
+			 uint16_t partition, uint32_t goal, int *err)
 {
 	int ret;
 
-	if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_UNALLOC_BITMAP)
-	{
+	if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_UNALLOC_BITMAP) {
 		ret = udf_bitmap_new_block(sb, inode,
-			UDF_SB_PARTMAPS(sb)[partition].s_uspace.s_bitmap,
-			partition, goal, err);
+					   UDF_SB_PARTMAPS(sb)[partition].
+					   s_uspace.s_bitmap, partition, goal,
+					   err);
 		return ret;
-	}
-	else if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_UNALLOC_TABLE)
-	{
+	} else if (UDF_SB_PARTFLAGS(sb, partition) &
+		   UDF_PART_FLAG_UNALLOC_TABLE) {
 		return udf_table_new_block(sb, inode,
-			UDF_SB_PARTMAPS(sb)[partition].s_uspace.s_table,
-			partition, goal, err);
-	}
-	else if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_FREED_BITMAP)
-	{
+					   UDF_SB_PARTMAPS(sb)[partition].
+					   s_uspace.s_table, partition, goal,
+					   err);
+	} else if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_FREED_BITMAP) {
 		return udf_bitmap_new_block(sb, inode,
-			UDF_SB_PARTMAPS(sb)[partition].s_fspace.s_bitmap,
-			partition, goal, err);
-	}
-	else if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_FREED_TABLE)
-	{
+					    UDF_SB_PARTMAPS(sb)[partition].
+					    s_fspace.s_bitmap, partition, goal,
+					    err);
+	} else if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_FREED_TABLE) {
 		return udf_table_new_block(sb, inode,
-			UDF_SB_PARTMAPS(sb)[partition].s_fspace.s_table,
-			partition, goal, err);
-	}
-	else
-	{
+					   UDF_SB_PARTMAPS(sb)[partition].
+					   s_fspace.s_table, partition, goal,
+					   err);
+	} else {
 		*err = -EIO;
 		return 0;
 	}
diff --git a/fs/udf/crc.c b/fs/udf/crc.c
index ef2bfaa..ae3d497 100644
--- a/fs/udf/crc.c
+++ b/fs/udf/crc.c
@@ -79,8 +79,7 @@
  *	July 21, 1997 - Andrew E. Mileski
  *	Adapted from OSTA-UDF(tm) 1.50 standard.
  */
-uint16_t
-udf_crc(uint8_t *data, uint32_t size, uint16_t crc)
+uint16_t udf_crc(uint8_t * data, uint32_t size, uint16_t crc)
 {
 	while (size--)
 		crc = crc_table[(crc >> 8 ^ *(data++)) & 0xffU] ^ (crc << 8);
@@ -112,7 +111,7 @@
 	return 0;
 }
 
-#endif /* defined(TEST) */
+#endif				/* defined(TEST) */
 
 /****************************************************************************/
 #if defined(GENERATE)
@@ -138,7 +137,7 @@
 
 	/* Get the polynomial */
 	sscanf(argv[1], "%lo", &poly);
-	if (poly & 0xffff0000U){
+	if (poly & 0xffff0000U) {
 		fprintf(stderr, "polynomial is too large\en");
 		exit(1);
 	}
@@ -147,22 +146,22 @@
 
 	/* Create a table */
 	printf("static unsigned short crc_table[256] = {\n");
-	for (n = 0; n < 256; n++){
+	for (n = 0; n < 256; n++) {
 		if (n % 8 == 0)
 			printf("\t");
 		crc = n << 8;
-		for (i = 0; i < 8; i++){
-			if(crc & 0x8000U)
+		for (i = 0; i < 8; i++) {
+			if (crc & 0x8000U)
 				crc = (crc << 1) ^ poly;
 			else
 				crc <<= 1;
-		crc &= 0xFFFFU;
+			crc &= 0xFFFFU;
 		}
 		if (n == 255)
 			printf("0x%04xU ", crc);
 		else
 			printf("0x%04xU, ", crc);
-		if(n % 8 == 7)
+		if (n % 8 == 7)
 			printf("\n");
 	}
 	printf("};\n");
@@ -170,4 +169,4 @@
 	return 0;
 }
 
-#endif /* defined(GENERATE) */
+#endif				/* defined(GENERATE) */
diff --git a/fs/udf/dir.c b/fs/udf/dir.c
index e45f86b..79bab9f 100644
--- a/fs/udf/dir.c
+++ b/fs/udf/dir.c
@@ -43,10 +43,10 @@
 /* readdir and lookup functions */
 
 const struct file_operations udf_dir_operations = {
-	.read			= generic_read_dir,
-	.readdir		= udf_readdir,
-	.ioctl			= udf_ioctl,
-	.fsync			= udf_fsync_file,
+	.read = generic_read_dir,
+	.readdir = udf_readdir,
+	.ioctl = udf_ioctl,
+	.fsync = udf_fsync_file,
 };
 
 /*
@@ -82,26 +82,26 @@
 
 	lock_kernel();
 
-	if ( filp->f_pos == 0 ) 
-	{
-		if (filldir(dirent, ".", 1, filp->f_pos, dir->i_ino, DT_DIR) < 0)
-		{
+	if (filp->f_pos == 0) {
+		if (filldir(dirent, ".", 1, filp->f_pos, dir->i_ino, DT_DIR) <
+		    0) {
 			unlock_kernel();
 			return 0;
 		}
-		filp->f_pos ++;
+		filp->f_pos++;
 	}
 
 	result = do_udf_readdir(dir, filp, filldir, dirent);
 	unlock_kernel();
- 	return result;
+	return result;
 }
 
-static int 
-do_udf_readdir(struct inode * dir, struct file *filp, filldir_t filldir, void *dirent)
+static int
+do_udf_readdir(struct inode *dir, struct file *filp, filldir_t filldir,
+	       void *dirent)
 {
 	struct udf_fileident_bh fibh;
-	struct fileIdentDesc *fi=NULL;
+	struct fileIdentDesc *fi = NULL;
 	struct fileIdentDesc cfi;
 	int block, iblock;
 	loff_t nf_pos = filp->f_pos - 1;
@@ -117,7 +117,7 @@
 	sector_t offset;
 	int i, num;
 	unsigned int dt_type;
-	struct extent_position epos = { NULL, 0, {0, 0}};
+	struct extent_position epos = { NULL, 0, {0, 0} };
 
 	if (nf_pos >= size)
 		return 0;
@@ -125,65 +125,61 @@
 	if (nf_pos == 0)
 		nf_pos = (udf_ext0_offset(dir) >> 2);
 
-	fibh.soffset = fibh.eoffset = (nf_pos & ((dir->i_sb->s_blocksize - 1) >> 2)) << 2;
+	fibh.soffset = fibh.eoffset =
+	    (nf_pos & ((dir->i_sb->s_blocksize - 1) >> 2)) << 2;
 	if (UDF_I_ALLOCTYPE(dir) == ICBTAG_FLAG_AD_IN_ICB)
 		fibh.sbh = fibh.ebh = NULL;
 	else if (inode_bmap(dir, nf_pos >> (dir->i_sb->s_blocksize_bits - 2),
-		&epos, &eloc, &elen, &offset) == (EXT_RECORDED_ALLOCATED >> 30))
-	{
+			    &epos, &eloc, &elen,
+			    &offset) == (EXT_RECORDED_ALLOCATED >> 30)) {
 		block = udf_get_lb_pblock(dir->i_sb, eloc, offset);
-		if ((++offset << dir->i_sb->s_blocksize_bits) < elen)
-		{
+		if ((++offset << dir->i_sb->s_blocksize_bits) < elen) {
 			if (UDF_I_ALLOCTYPE(dir) == ICBTAG_FLAG_AD_SHORT)
 				epos.offset -= sizeof(short_ad);
 			else if (UDF_I_ALLOCTYPE(dir) == ICBTAG_FLAG_AD_LONG)
 				epos.offset -= sizeof(long_ad);
-		}
-		else
+		} else
 			offset = 0;
 
-		if (!(fibh.sbh = fibh.ebh = udf_tread(dir->i_sb, block)))
-		{
+		if (!(fibh.sbh = fibh.ebh = udf_tread(dir->i_sb, block))) {
 			brelse(epos.bh);
 			return -EIO;
 		}
-	
-		if (!(offset & ((16 >> (dir->i_sb->s_blocksize_bits - 9))-1)))
-		{
+
+		if (!(offset & ((16 >> (dir->i_sb->s_blocksize_bits - 9)) - 1))) {
 			i = 16 >> (dir->i_sb->s_blocksize_bits - 9);
-			if (i+offset > (elen >> dir->i_sb->s_blocksize_bits))
-				i = (elen >> dir->i_sb->s_blocksize_bits)-offset;
-			for (num=0; i>0; i--)
-			{
-				block = udf_get_lb_pblock(dir->i_sb, eloc, offset+i);
+			if (i + offset > (elen >> dir->i_sb->s_blocksize_bits))
+				i = (elen >> dir->i_sb->s_blocksize_bits) -
+				    offset;
+			for (num = 0; i > 0; i--) {
+				block =
+				    udf_get_lb_pblock(dir->i_sb, eloc,
+						      offset + i);
 				tmp = udf_tgetblk(dir->i_sb, block);
-				if (tmp && !buffer_uptodate(tmp) && !buffer_locked(tmp))
+				if (tmp && !buffer_uptodate(tmp)
+				    && !buffer_locked(tmp))
 					bha[num++] = tmp;
 				else
 					brelse(tmp);
 			}
-			if (num)
-			{
+			if (num) {
 				ll_rw_block(READA, num, bha);
-				for (i=0; i<num; i++)
+				for (i = 0; i < num; i++)
 					brelse(bha[i]);
 			}
 		}
-	}
-	else
-	{
+	} else {
 		brelse(epos.bh);
 		return -ENOENT;
 	}
 
-	while ( nf_pos < size )
-	{
+	while (nf_pos < size) {
 		filp->f_pos = nf_pos + 1;
 
-		fi = udf_fileident_read(dir, &nf_pos, &fibh, &cfi, &epos, &eloc, &elen, &offset);
+		fi = udf_fileident_read(dir, &nf_pos, &fibh, &cfi, &epos, &eloc,
+					&elen, &offset);
 
-		if (!fi)
-		{
+		if (!fi) {
 			if (fibh.sbh != fibh.ebh)
 				brelse(fibh.ebh);
 			brelse(fibh.sbh);
@@ -196,43 +192,41 @@
 
 		if (fibh.sbh == fibh.ebh)
 			nameptr = fi->fileIdent + liu;
-		else
-		{
+		else {
 			int poffset;	/* Unpaded ending offset */
 
-			poffset = fibh.soffset + sizeof(struct fileIdentDesc) + liu + lfi;
+			poffset =
+			    fibh.soffset + sizeof(struct fileIdentDesc) + liu +
+			    lfi;
 
 			if (poffset >= lfi)
-				nameptr = (char *)(fibh.ebh->b_data + poffset - lfi);
-			else
-			{
+				nameptr =
+				    (char *)(fibh.ebh->b_data + poffset - lfi);
+			else {
 				nameptr = fname;
-				memcpy(nameptr, fi->fileIdent + liu, lfi - poffset);
-				memcpy(nameptr + lfi - poffset, fibh.ebh->b_data, poffset);
+				memcpy(nameptr, fi->fileIdent + liu,
+				       lfi - poffset);
+				memcpy(nameptr + lfi - poffset,
+				       fibh.ebh->b_data, poffset);
 			}
 		}
 
-		if ( (cfi.fileCharacteristics & FID_FILE_CHAR_DELETED) != 0 )
-		{
-			if ( !UDF_QUERY_FLAG(dir->i_sb, UDF_FLAG_UNDELETE) )
-				continue;
-		}
-		
-		if ( (cfi.fileCharacteristics & FID_FILE_CHAR_HIDDEN) != 0 )
-		{
-			if ( !UDF_QUERY_FLAG(dir->i_sb, UDF_FLAG_UNHIDE) )
+		if ((cfi.fileCharacteristics & FID_FILE_CHAR_DELETED) != 0) {
+			if (!UDF_QUERY_FLAG(dir->i_sb, UDF_FLAG_UNDELETE))
 				continue;
 		}
 
-		if ( cfi.fileCharacteristics & FID_FILE_CHAR_PARENT )
-		{
+		if ((cfi.fileCharacteristics & FID_FILE_CHAR_HIDDEN) != 0) {
+			if (!UDF_QUERY_FLAG(dir->i_sb, UDF_FLAG_UNHIDE))
+				continue;
+		}
+
+		if (cfi.fileCharacteristics & FID_FILE_CHAR_PARENT) {
 			iblock = parent_ino(filp->f_path.dentry);
 			flen = 2;
 			memcpy(fname, "..", flen);
 			dt_type = DT_DIR;
-		}
-		else
-		{
+		} else {
 			kernel_lb_addr tloc = lelb_to_cpu(cfi.icb.extLocation);
 
 			iblock = udf_get_lb_pblock(dir->i_sb, tloc, 0);
@@ -240,18 +234,18 @@
 			dt_type = DT_UNKNOWN;
 		}
 
-		if (flen)
-		{
-			if (filldir(dirent, fname, flen, filp->f_pos, iblock, dt_type) < 0)
-			{
+		if (flen) {
+			if (filldir
+			    (dirent, fname, flen, filp->f_pos, iblock,
+			     dt_type) < 0) {
 				if (fibh.sbh != fibh.ebh)
 					brelse(fibh.ebh);
 				brelse(fibh.sbh);
 				brelse(epos.bh);
-	 			return 0;
+				return 0;
 			}
 		}
-	} /* end while */
+	}			/* end while */
 
 	filp->f_pos = nf_pos + 1;
 
diff --git a/fs/udf/directory.c b/fs/udf/directory.c
index 198caa3..8adc77c 100644
--- a/fs/udf/directory.c
+++ b/fs/udf/directory.c
@@ -19,10 +19,10 @@
 #include <linux/buffer_head.h>
 
 #if 0
-static uint8_t *
-udf_filead_read(struct inode *dir, uint8_t *tmpad, uint8_t ad_size,
-		kernel_lb_addr fe_loc, int *pos, int *offset,
-		struct buffer_head **bh, int *error)
+static uint8_t *udf_filead_read(struct inode *dir, uint8_t * tmpad,
+				uint8_t ad_size, kernel_lb_addr fe_loc,
+				int *pos, int *offset, struct buffer_head **bh,
+				int *error)
 {
 	int loffset = *offset;
 	int block;
@@ -31,31 +31,27 @@
 
 	*error = 0;
 
-	ad = (uint8_t *)(*bh)->b_data + *offset;
+	ad = (uint8_t *) (*bh)->b_data + *offset;
 	*offset += ad_size;
 
-	if (!ad)
-	{
+	if (!ad) {
 		brelse(*bh);
 		*error = 1;
 		return NULL;
 	}
 
-	if (*offset == dir->i_sb->s_blocksize)
-	{
+	if (*offset == dir->i_sb->s_blocksize) {
 		brelse(*bh);
 		block = udf_get_lb_pblock(dir->i_sb, fe_loc, ++*pos);
 		if (!block)
 			return NULL;
 		if (!(*bh = udf_tread(dir->i_sb, block)))
 			return NULL;
-	}
-	else if (*offset > dir->i_sb->s_blocksize)
-	{
+	} else if (*offset > dir->i_sb->s_blocksize) {
 		ad = tmpad;
 
 		remainder = dir->i_sb->s_blocksize - loffset;
-		memcpy((uint8_t *)ad, (*bh)->b_data + loffset, remainder);
+		memcpy((uint8_t *) ad, (*bh)->b_data + loffset, remainder);
 
 		brelse(*bh);
 		block = udf_get_lb_pblock(dir->i_sb, fe_loc, ++*pos);
@@ -64,56 +60,56 @@
 		if (!((*bh) = udf_tread(dir->i_sb, block)))
 			return NULL;
 
-		memcpy((uint8_t *)ad + remainder, (*bh)->b_data, ad_size - remainder);
+		memcpy((uint8_t *) ad + remainder, (*bh)->b_data,
+		       ad_size - remainder);
 		*offset = ad_size - remainder;
 	}
 	return ad;
 }
 #endif
 
-struct fileIdentDesc *
-udf_fileident_read(struct inode *dir, loff_t *nf_pos,
-	struct udf_fileident_bh *fibh,
-	struct fileIdentDesc *cfi,
-	struct extent_position *epos,
-	kernel_lb_addr *eloc, uint32_t *elen,
-	sector_t *offset)
+struct fileIdentDesc *udf_fileident_read(struct inode *dir, loff_t * nf_pos,
+					 struct udf_fileident_bh *fibh,
+					 struct fileIdentDesc *cfi,
+					 struct extent_position *epos,
+					 kernel_lb_addr * eloc, uint32_t * elen,
+					 sector_t * offset)
 {
 	struct fileIdentDesc *fi;
 	int i, num, block;
-	struct buffer_head * tmp, * bha[16];
+	struct buffer_head *tmp, *bha[16];
 
 	fibh->soffset = fibh->eoffset;
 
-	if (UDF_I_ALLOCTYPE(dir) == ICBTAG_FLAG_AD_IN_ICB)
-	{
+	if (UDF_I_ALLOCTYPE(dir) == ICBTAG_FLAG_AD_IN_ICB) {
 		fi = udf_get_fileident(UDF_I_DATA(dir) -
-			(UDF_I_EFE(dir) ?
-				sizeof(struct extendedFileEntry) :
-				sizeof(struct fileEntry)),
-			dir->i_sb->s_blocksize, &(fibh->eoffset));
+				       (UDF_I_EFE(dir) ?
+					sizeof(struct extendedFileEntry) :
+					sizeof(struct fileEntry)),
+				       dir->i_sb->s_blocksize,
+				       &(fibh->eoffset));
 
 		if (!fi)
 			return NULL;
 
 		*nf_pos += ((fibh->eoffset - fibh->soffset) >> 2);
 
-		memcpy((uint8_t *)cfi, (uint8_t *)fi, sizeof(struct fileIdentDesc));
+		memcpy((uint8_t *) cfi, (uint8_t *) fi,
+		       sizeof(struct fileIdentDesc));
 
 		return fi;
 	}
 
-	if (fibh->eoffset == dir->i_sb->s_blocksize)
-	{
+	if (fibh->eoffset == dir->i_sb->s_blocksize) {
 		int lextoffset = epos->offset;
 
 		if (udf_next_aext(dir, epos, eloc, elen, 1) !=
-			(EXT_RECORDED_ALLOCATED >> 30))
+		    (EXT_RECORDED_ALLOCATED >> 30))
 			return NULL;
 
 		block = udf_get_lb_pblock(dir->i_sb, *eloc, *offset);
 
-		(*offset) ++;
+		(*offset)++;
 
 		if ((*offset << dir->i_sb->s_blocksize_bits) >= *elen)
 			*offset = 0;
@@ -125,57 +121,57 @@
 			return NULL;
 		fibh->soffset = fibh->eoffset = 0;
 
-		if (!(*offset & ((16 >> (dir->i_sb->s_blocksize_bits - 9))-1)))
+		if (!
+		    (*offset & ((16 >> (dir->i_sb->s_blocksize_bits - 9)) - 1)))
 		{
 			i = 16 >> (dir->i_sb->s_blocksize_bits - 9);
-			if (i+*offset > (*elen >> dir->i_sb->s_blocksize_bits))
-				i = (*elen >> dir->i_sb->s_blocksize_bits)-*offset;
-			for (num=0; i>0; i--)
-			{
-				block = udf_get_lb_pblock(dir->i_sb, *eloc, *offset+i);
+			if (i + *offset >
+			    (*elen >> dir->i_sb->s_blocksize_bits))
+				i = (*elen >> dir->i_sb->s_blocksize_bits) -
+				    *offset;
+			for (num = 0; i > 0; i--) {
+				block =
+				    udf_get_lb_pblock(dir->i_sb, *eloc,
+						      *offset + i);
 				tmp = udf_tgetblk(dir->i_sb, block);
-				if (tmp && !buffer_uptodate(tmp) && !buffer_locked(tmp))
+				if (tmp && !buffer_uptodate(tmp)
+				    && !buffer_locked(tmp))
 					bha[num++] = tmp;
 				else
 					brelse(tmp);
 			}
-			if (num)
-			{
+			if (num) {
 				ll_rw_block(READA, num, bha);
-				for (i=0; i<num; i++)
+				for (i = 0; i < num; i++)
 					brelse(bha[i]);
 			}
 		}
-	}
-	else if (fibh->sbh != fibh->ebh)
-	{
+	} else if (fibh->sbh != fibh->ebh) {
 		brelse(fibh->sbh);
 		fibh->sbh = fibh->ebh;
 	}
 
 	fi = udf_get_fileident(fibh->sbh->b_data, dir->i_sb->s_blocksize,
-		&(fibh->eoffset));
+			       &(fibh->eoffset));
 
 	if (!fi)
 		return NULL;
 
 	*nf_pos += ((fibh->eoffset - fibh->soffset) >> 2);
 
-	if (fibh->eoffset <= dir->i_sb->s_blocksize)
-	{
-		memcpy((uint8_t *)cfi, (uint8_t *)fi, sizeof(struct fileIdentDesc));
-	}
-	else if (fibh->eoffset > dir->i_sb->s_blocksize)
-	{
+	if (fibh->eoffset <= dir->i_sb->s_blocksize) {
+		memcpy((uint8_t *) cfi, (uint8_t *) fi,
+		       sizeof(struct fileIdentDesc));
+	} else if (fibh->eoffset > dir->i_sb->s_blocksize) {
 		int lextoffset = epos->offset;
 
 		if (udf_next_aext(dir, epos, eloc, elen, 1) !=
-			(EXT_RECORDED_ALLOCATED >> 30))
+		    (EXT_RECORDED_ALLOCATED >> 30))
 			return NULL;
 
 		block = udf_get_lb_pblock(dir->i_sb, *eloc, *offset);
 
-		(*offset) ++;
+		(*offset)++;
 
 		if ((*offset << dir->i_sb->s_blocksize_bits) >= *elen)
 			*offset = 0;
@@ -188,62 +184,62 @@
 		if (!(fibh->ebh = udf_tread(dir->i_sb, block)))
 			return NULL;
 
-		if (sizeof(struct fileIdentDesc) > - fibh->soffset)
-		{
+		if (sizeof(struct fileIdentDesc) > -fibh->soffset) {
 			int fi_len;
 
-			memcpy((uint8_t *)cfi, (uint8_t *)fi, - fibh->soffset);
-			memcpy((uint8_t *)cfi - fibh->soffset, fibh->ebh->b_data,
-				sizeof(struct fileIdentDesc) + fibh->soffset);
+			memcpy((uint8_t *) cfi, (uint8_t *) fi, -fibh->soffset);
+			memcpy((uint8_t *) cfi - fibh->soffset,
+			       fibh->ebh->b_data,
+			       sizeof(struct fileIdentDesc) + fibh->soffset);
 
-			fi_len = (sizeof(struct fileIdentDesc) + cfi->lengthFileIdent +
-				le16_to_cpu(cfi->lengthOfImpUse) + 3) & ~3;
+			fi_len =
+			    (sizeof(struct fileIdentDesc) +
+			     cfi->lengthFileIdent +
+			     le16_to_cpu(cfi->lengthOfImpUse) + 3) & ~3;
 
-			*nf_pos += ((fi_len - (fibh->eoffset - fibh->soffset)) >> 2);
+			*nf_pos +=
+			    ((fi_len - (fibh->eoffset - fibh->soffset)) >> 2);
 			fibh->eoffset = fibh->soffset + fi_len;
-		}
-		else
-		{
-			memcpy((uint8_t *)cfi, (uint8_t *)fi, sizeof(struct fileIdentDesc));
+		} else {
+			memcpy((uint8_t *) cfi, (uint8_t *) fi,
+			       sizeof(struct fileIdentDesc));
 		}
 	}
 	return fi;
 }
 
-struct fileIdentDesc * 
-udf_get_fileident(void * buffer, int bufsize, int * offset)
+struct fileIdentDesc *udf_get_fileident(void *buffer, int bufsize, int *offset)
 {
 	struct fileIdentDesc *fi;
 	int lengthThisIdent;
-	uint8_t * ptr;
+	uint8_t *ptr;
 	int padlen;
 
-	if ( (!buffer) || (!offset) ) {
-		udf_debug("invalidparms\n, buffer=%p, offset=%p\n", buffer, offset);
+	if ((!buffer) || (!offset)) {
+		udf_debug("invalidparms\n, buffer=%p, offset=%p\n", buffer,
+			  offset);
 		return NULL;
 	}
 
 	ptr = buffer;
 
-	if ( (*offset > 0) && (*offset < bufsize) ) {
+	if ((*offset > 0) && (*offset < bufsize)) {
 		ptr += *offset;
 	}
-	fi=(struct fileIdentDesc *)ptr;
-	if (le16_to_cpu(fi->descTag.tagIdent) != TAG_IDENT_FID)
-	{
+	fi = (struct fileIdentDesc *)ptr;
+	if (le16_to_cpu(fi->descTag.tagIdent) != TAG_IDENT_FID) {
 		udf_debug("0x%x != TAG_IDENT_FID\n",
-			le16_to_cpu(fi->descTag.tagIdent));
+			  le16_to_cpu(fi->descTag.tagIdent));
 		udf_debug("offset: %u sizeof: %lu bufsize: %u\n",
-			*offset, (unsigned long)sizeof(struct fileIdentDesc), bufsize);
+			  *offset, (unsigned long)sizeof(struct fileIdentDesc),
+			  bufsize);
 		return NULL;
 	}
-	if ( (*offset + sizeof(struct fileIdentDesc)) > bufsize )
-	{
+	if ((*offset + sizeof(struct fileIdentDesc)) > bufsize) {
 		lengthThisIdent = sizeof(struct fileIdentDesc);
-	}
-	else
+	} else
 		lengthThisIdent = sizeof(struct fileIdentDesc) +
-			fi->lengthFileIdent + le16_to_cpu(fi->lengthOfImpUse);
+		    fi->lengthFileIdent + le16_to_cpu(fi->lengthOfImpUse);
 
 	/* we need to figure padding, too! */
 	padlen = lengthThisIdent % UDF_NAME_PAD;
@@ -255,56 +251,53 @@
 }
 
 #if 0
-static extent_ad *
-udf_get_fileextent(void * buffer, int bufsize, int * offset)
+static extent_ad *udf_get_fileextent(void *buffer, int bufsize, int *offset)
 {
-	extent_ad * ext;
+	extent_ad *ext;
 	struct fileEntry *fe;
-	uint8_t * ptr;
+	uint8_t *ptr;
 
-	if ( (!buffer) || (!offset) )
-	{
+	if ((!buffer) || (!offset)) {
 		printk(KERN_ERR "udf: udf_get_fileextent() invalidparms\n");
 		return NULL;
 	}
 
 	fe = (struct fileEntry *)buffer;
 
-	if ( le16_to_cpu(fe->descTag.tagIdent) != TAG_IDENT_FE )
-	{
+	if (le16_to_cpu(fe->descTag.tagIdent) != TAG_IDENT_FE) {
 		udf_debug("0x%x != TAG_IDENT_FE\n",
-			le16_to_cpu(fe->descTag.tagIdent));
+			  le16_to_cpu(fe->descTag.tagIdent));
 		return NULL;
 	}
 
-	ptr=(uint8_t *)(fe->extendedAttr) + le32_to_cpu(fe->lengthExtendedAttr);
+	ptr =
+	    (uint8_t *) (fe->extendedAttr) +
+	    le32_to_cpu(fe->lengthExtendedAttr);
 
-	if ( (*offset > 0) && (*offset < le32_to_cpu(fe->lengthAllocDescs)) )
-	{
+	if ((*offset > 0) && (*offset < le32_to_cpu(fe->lengthAllocDescs))) {
 		ptr += *offset;
 	}
 
-	ext = (extent_ad *)ptr;
+	ext = (extent_ad *) ptr;
 
 	*offset = *offset + sizeof(extent_ad);
 	return ext;
 }
 #endif
 
-short_ad *
-udf_get_fileshortad(uint8_t *ptr, int maxoffset, int *offset, int inc)
+short_ad *udf_get_fileshortad(uint8_t * ptr, int maxoffset, int *offset,
+			      int inc)
 {
 	short_ad *sa;
 
-	if ( (!ptr) || (!offset) )
-	{
+	if ((!ptr) || (!offset)) {
 		printk(KERN_ERR "udf: udf_get_fileshortad() invalidparms\n");
 		return NULL;
 	}
 
-	if ( (*offset < 0) || ((*offset + sizeof(short_ad)) > maxoffset) )
+	if ((*offset < 0) || ((*offset + sizeof(short_ad)) > maxoffset))
 		return NULL;
-	else if ((sa = (short_ad *)ptr)->extLength == 0)
+	else if ((sa = (short_ad *) ptr)->extLength == 0)
 		return NULL;
 
 	if (inc)
@@ -312,20 +305,18 @@
 	return sa;
 }
 
-long_ad *
-udf_get_filelongad(uint8_t *ptr, int maxoffset, int * offset, int inc)
+long_ad *udf_get_filelongad(uint8_t * ptr, int maxoffset, int *offset, int inc)
 {
 	long_ad *la;
 
-	if ( (!ptr) || (!offset) ) 
-	{
+	if ((!ptr) || (!offset)) {
 		printk(KERN_ERR "udf: udf_get_filelongad() invalidparms\n");
 		return NULL;
 	}
 
-	if ( (*offset < 0) || ((*offset + sizeof(long_ad)) > maxoffset) )
+	if ((*offset < 0) || ((*offset + sizeof(long_ad)) > maxoffset))
 		return NULL;
-	else if ((la = (long_ad *)ptr)->extLength == 0)
+	else if ((la = (long_ad *) ptr)->extLength == 0)
 		return NULL;
 
 	if (inc)
diff --git a/fs/udf/ecma_167.h b/fs/udf/ecma_167.h
index f81f2eb..294ce2d 100644
--- a/fs/udf/ecma_167.h
+++ b/fs/udf/ecma_167.h
@@ -38,10 +38,9 @@
 #define _ECMA_167_H 1
 
 /* Character set specification (ECMA 167r3 1/7.2.1) */
-typedef struct
-{
-	uint8_t		charSetType;
-	uint8_t		charSetInfo[63];
+typedef struct {
+	uint8_t charSetType;
+	uint8_t charSetInfo[63];
 } __attribute__ ((packed)) charspec;
 
 /* Character Set Type (ECMA 167r3 1/7.2.1.1) */
@@ -55,35 +54,33 @@
 #define CHARSPEC_TYPE_CS7		0x07	/* (1/7.2.9) */
 #define CHARSPEC_TYPE_CS8		0x08	/* (1/7.2.10) */
 
-typedef uint8_t		dstring;
+typedef uint8_t dstring;
 
 /* Timestamp (ECMA 167r3 1/7.3) */
-typedef struct
-{
-	__le16		typeAndTimezone;
-	__le16		year;
-	uint8_t		month;
-	uint8_t		day;
-	uint8_t		hour;
-	uint8_t		minute;
-	uint8_t		second;
-	uint8_t		centiseconds;
-	uint8_t		hundredsOfMicroseconds;
-	uint8_t		microseconds;
+typedef struct {
+	__le16 typeAndTimezone;
+	__le16 year;
+	uint8_t month;
+	uint8_t day;
+	uint8_t hour;
+	uint8_t minute;
+	uint8_t second;
+	uint8_t centiseconds;
+	uint8_t hundredsOfMicroseconds;
+	uint8_t microseconds;
 } __attribute__ ((packed)) timestamp;
 
-typedef struct
-{
-	uint16_t	typeAndTimezone;
-	int16_t		year;
-	uint8_t		month;
-	uint8_t		day;
-	uint8_t		hour;
-	uint8_t		minute;
-	uint8_t		second;
-	uint8_t		centiseconds;
-	uint8_t		hundredsOfMicroseconds;
-	uint8_t		microseconds;
+typedef struct {
+	uint16_t typeAndTimezone;
+	int16_t year;
+	uint8_t month;
+	uint8_t day;
+	uint8_t hour;
+	uint8_t minute;
+	uint8_t second;
+	uint8_t centiseconds;
+	uint8_t hundredsOfMicroseconds;
+	uint8_t microseconds;
 } __attribute__ ((packed)) kernel_timestamp;
 
 /* Type and Time Zone (ECMA 167r3 1/7.3.1) */
@@ -94,11 +91,10 @@
 #define TIMESTAMP_TIMEZONE_MASK		0x0FFF
 
 /* Entity identifier (ECMA 167r3 1/7.4) */
-typedef struct
-{
-	uint8_t		flags;
-	uint8_t		ident[23];
-	uint8_t		identSuffix[8];
+typedef struct {
+	uint8_t flags;
+	uint8_t ident[23];
+	uint8_t identSuffix[8];
 } __attribute__ ((packed)) regid;
 
 /* Flags (ECMA 167r3 1/7.4.1) */
@@ -107,12 +103,11 @@
 
 /* Volume Structure Descriptor (ECMA 167r3 2/9.1) */
 #define VSD_STD_ID_LEN			5
-struct volStructDesc
-{
-	uint8_t		structType;
-	uint8_t		stdIdent[VSD_STD_ID_LEN];
-	uint8_t		structVersion;
-	uint8_t		structData[2041];
+struct volStructDesc {
+	uint8_t structType;
+	uint8_t stdIdent[VSD_STD_ID_LEN];
+	uint8_t structVersion;
+	uint8_t structData[2041];
 } __attribute__ ((packed));
 
 /* Standard Identifier (EMCA 167r2 2/9.1.2) */
@@ -127,69 +122,63 @@
 #define VSD_STD_ID_TEA01		"TEA01"	/* (2/9.3) */
 
 /* Beginning Extended Area Descriptor (ECMA 167r3 2/9.2) */
-struct beginningExtendedAreaDesc
-{
-	uint8_t		structType;
-	uint8_t		stdIdent[VSD_STD_ID_LEN];
-	uint8_t		structVersion;
-	uint8_t		structData[2041];
+struct beginningExtendedAreaDesc {
+	uint8_t structType;
+	uint8_t stdIdent[VSD_STD_ID_LEN];
+	uint8_t structVersion;
+	uint8_t structData[2041];
 } __attribute__ ((packed));
 
 /* Terminating Extended Area Descriptor (ECMA 167r3 2/9.3) */
-struct terminatingExtendedAreaDesc
-{
-	uint8_t		structType;
-	uint8_t		stdIdent[VSD_STD_ID_LEN];
-	uint8_t		structVersion;
-	uint8_t		structData[2041];
+struct terminatingExtendedAreaDesc {
+	uint8_t structType;
+	uint8_t stdIdent[VSD_STD_ID_LEN];
+	uint8_t structVersion;
+	uint8_t structData[2041];
 } __attribute__ ((packed));
 
 /* Boot Descriptor (ECMA 167r3 2/9.4) */
-struct bootDesc
-{
-	uint8_t		structType;
-	uint8_t		stdIdent[VSD_STD_ID_LEN];
-	uint8_t		structVersion;
-	uint8_t		reserved1;
-	regid		archType;
-	regid		bootIdent;
-	__le32		bootExtLocation;
-	__le32		bootExtLength;
-	__le64		loadAddress;
-	__le64		startAddress;
-	timestamp	descCreationDateAndTime;
-	__le16		flags;
-	uint8_t		reserved2[32];
-	uint8_t		bootUse[1906];
+struct bootDesc {
+	uint8_t structType;
+	uint8_t stdIdent[VSD_STD_ID_LEN];
+	uint8_t structVersion;
+	uint8_t reserved1;
+	regid archType;
+	regid bootIdent;
+	__le32 bootExtLocation;
+	__le32 bootExtLength;
+	__le64 loadAddress;
+	__le64 startAddress;
+	timestamp descCreationDateAndTime;
+	__le16 flags;
+	uint8_t reserved2[32];
+	uint8_t bootUse[1906];
 } __attribute__ ((packed));
 
 /* Flags (ECMA 167r3 2/9.4.12) */
 #define BOOT_FLAGS_ERASE		0x01
 
 /* Extent Descriptor (ECMA 167r3 3/7.1) */
-typedef struct
-{
-	__le32		extLength;
-	__le32		extLocation;
+typedef struct {
+	__le32 extLength;
+	__le32 extLocation;
 } __attribute__ ((packed)) extent_ad;
 
-typedef struct
-{
-	uint32_t	extLength;
-	uint32_t	extLocation;
+typedef struct {
+	uint32_t extLength;
+	uint32_t extLocation;
 } kernel_extent_ad;
 
 /* Descriptor Tag (ECMA 167r3 3/7.2) */
-typedef struct
-{
-	__le16		tagIdent;
-	__le16		descVersion;
-	uint8_t		tagChecksum;
-	uint8_t		reserved;
-	__le16		tagSerialNum;
-	__le16		descCRC;
-	__le16		descCRCLength;
-	__le32		tagLocation;
+typedef struct {
+	__le16 tagIdent;
+	__le16 descVersion;
+	uint8_t tagChecksum;
+	uint8_t reserved;
+	__le16 tagSerialNum;
+	__le16 descCRC;
+	__le16 descCRCLength;
+	__le32 tagLocation;
 } __attribute__ ((packed)) tag;
 
 /* Tag Identifier (ECMA 167r3 3/7.2.1) */
@@ -204,87 +193,81 @@
 #define TAG_IDENT_LVID			0x0009
 
 /* NSR Descriptor (ECMA 167r3 3/9.1) */
-struct NSRDesc
-{
-	uint8_t		structType;
-	uint8_t		stdIdent[VSD_STD_ID_LEN];
-	uint8_t		structVersion;
-	uint8_t		reserved;
-	uint8_t		structData[2040];
+struct NSRDesc {
+	uint8_t structType;
+	uint8_t stdIdent[VSD_STD_ID_LEN];
+	uint8_t structVersion;
+	uint8_t reserved;
+	uint8_t structData[2040];
 } __attribute__ ((packed));
-	
+
 /* Primary Volume Descriptor (ECMA 167r3 3/10.1) */
-struct primaryVolDesc
-{
-	tag		descTag;
-	__le32		volDescSeqNum;
-	__le32		primaryVolDescNum;
-	dstring		volIdent[32];
-	__le16		volSeqNum;
-	__le16		maxVolSeqNum;
-	__le16		interchangeLvl;
-	__le16		maxInterchangeLvl;
-	__le32		charSetList;
-	__le32		maxCharSetList;
-	dstring		volSetIdent[128];
-	charspec	descCharSet;
-	charspec	explanatoryCharSet;
-	extent_ad	volAbstract;
-	extent_ad	volCopyright;
-	regid		appIdent;
-	timestamp	recordingDateAndTime;
-	regid		impIdent;
-	uint8_t		impUse[64];
-	__le32		predecessorVolDescSeqLocation;
-	__le16		flags;
-	uint8_t		reserved[22];
+struct primaryVolDesc {
+	tag descTag;
+	__le32 volDescSeqNum;
+	__le32 primaryVolDescNum;
+	dstring volIdent[32];
+	__le16 volSeqNum;
+	__le16 maxVolSeqNum;
+	__le16 interchangeLvl;
+	__le16 maxInterchangeLvl;
+	__le32 charSetList;
+	__le32 maxCharSetList;
+	dstring volSetIdent[128];
+	charspec descCharSet;
+	charspec explanatoryCharSet;
+	extent_ad volAbstract;
+	extent_ad volCopyright;
+	regid appIdent;
+	timestamp recordingDateAndTime;
+	regid impIdent;
+	uint8_t impUse[64];
+	__le32 predecessorVolDescSeqLocation;
+	__le16 flags;
+	uint8_t reserved[22];
 } __attribute__ ((packed));
 
 /* Flags (ECMA 167r3 3/10.1.21) */
 #define PVD_FLAGS_VSID_COMMON		0x0001
 
 /* Anchor Volume Descriptor Pointer (ECMA 167r3 3/10.2) */
-struct anchorVolDescPtr
-{
-	tag		descTag;
-	extent_ad	mainVolDescSeqExt;
-	extent_ad	reserveVolDescSeqExt;
-	uint8_t	 	reserved[480];
+struct anchorVolDescPtr {
+	tag descTag;
+	extent_ad mainVolDescSeqExt;
+	extent_ad reserveVolDescSeqExt;
+	uint8_t reserved[480];
 } __attribute__ ((packed));
 
 /* Volume Descriptor Pointer (ECMA 167r3 3/10.3) */
-struct volDescPtr
-{
-	tag		descTag;
-	__le32		volDescSeqNum;
-	extent_ad	nextVolDescSeqExt;
-	uint8_t		reserved[484];
+struct volDescPtr {
+	tag descTag;
+	__le32 volDescSeqNum;
+	extent_ad nextVolDescSeqExt;
+	uint8_t reserved[484];
 } __attribute__ ((packed));
 
 /* Implementation Use Volume Descriptor (ECMA 167r3 3/10.4) */
-struct impUseVolDesc
-{
-	tag		descTag;
-	__le32		volDescSeqNum;
-	regid		impIdent;
-	uint8_t		impUse[460];
+struct impUseVolDesc {
+	tag descTag;
+	__le32 volDescSeqNum;
+	regid impIdent;
+	uint8_t impUse[460];
 } __attribute__ ((packed));
 
 /* Partition Descriptor (ECMA 167r3 3/10.5) */
-struct partitionDesc
-{
-	tag		descTag;
-	__le32		volDescSeqNum;
-	__le16		partitionFlags;
-	__le16		partitionNumber;
-	regid		partitionContents;
-	uint8_t		partitionContentsUse[128];
-	__le32		accessType;
-	__le32		partitionStartingLocation;
-	__le32		partitionLength;
-	regid		impIdent;
-	uint8_t		impUse[128];
-	uint8_t		reserved[156];
+struct partitionDesc {
+	tag descTag;
+	__le32 volDescSeqNum;
+	__le16 partitionFlags;
+	__le16 partitionNumber;
+	regid partitionContents;
+	uint8_t partitionContentsUse[128];
+	__le32 accessType;
+	__le32 partitionStartingLocation;
+	__le32 partitionLength;
+	regid impIdent;
+	uint8_t impUse[128];
+	uint8_t reserved[156];
 } __attribute__ ((packed));
 
 /* Partition Flags (ECMA 167r3 3/10.5.3) */
@@ -307,29 +290,27 @@
 #define PD_ACCESS_TYPE_OVERWRITABLE	0x00000004
 
 /* Logical Volume Descriptor (ECMA 167r3 3/10.6) */
-struct logicalVolDesc
-{
-	tag		descTag;
-	__le32		volDescSeqNum;
-	charspec	descCharSet;
-	dstring		logicalVolIdent[128];
-	__le32		logicalBlockSize;
-	regid		domainIdent;
-	uint8_t		logicalVolContentsUse[16];
-	__le32		mapTableLength;
-	__le32		numPartitionMaps;
-	regid		impIdent;
-	uint8_t		impUse[128];
-	extent_ad	integritySeqExt;
-	uint8_t		partitionMaps[0];
+struct logicalVolDesc {
+	tag descTag;
+	__le32 volDescSeqNum;
+	charspec descCharSet;
+	dstring logicalVolIdent[128];
+	__le32 logicalBlockSize;
+	regid domainIdent;
+	uint8_t logicalVolContentsUse[16];
+	__le32 mapTableLength;
+	__le32 numPartitionMaps;
+	regid impIdent;
+	uint8_t impUse[128];
+	extent_ad integritySeqExt;
+	uint8_t partitionMaps[0];
 } __attribute__ ((packed));
 
 /* Generic Partition Map (ECMA 167r3 3/10.7.1) */
-struct genericPartitionMap
-{
-	uint8_t		partitionMapType;
-	uint8_t		partitionMapLength;
-	uint8_t		partitionMapping[0];
+struct genericPartitionMap {
+	uint8_t partitionMapType;
+	uint8_t partitionMapLength;
+	uint8_t partitionMapping[0];
 } __attribute__ ((packed));
 
 /* Partition Map Type (ECMA 167r3 3/10.7.1.1) */
@@ -338,51 +319,46 @@
 #define GP_PARTITION_MAP_TYPE_2		0x02
 
 /* Type 1 Partition Map (ECMA 167r3 3/10.7.2) */
-struct genericPartitionMap1
-{
-	uint8_t		partitionMapType;
-	uint8_t		partitionMapLength;
-	__le16		volSeqNum;
-	__le16		partitionNum;
+struct genericPartitionMap1 {
+	uint8_t partitionMapType;
+	uint8_t partitionMapLength;
+	__le16 volSeqNum;
+	__le16 partitionNum;
 } __attribute__ ((packed));
 
 /* Type 2 Partition Map (ECMA 167r3 3/10.7.3) */
-struct genericPartitionMap2
-{
-	uint8_t		partitionMapType;
-	uint8_t		partitionMapLength; 
-	uint8_t		partitionIdent[62];
+struct genericPartitionMap2 {
+	uint8_t partitionMapType;
+	uint8_t partitionMapLength;
+	uint8_t partitionIdent[62];
 } __attribute__ ((packed));
 
 /* Unallocated Space Descriptor (ECMA 167r3 3/10.8) */
-struct unallocSpaceDesc
-{
-	tag		descTag;
-	__le32		volDescSeqNum;
-	__le32		numAllocDescs;
-	extent_ad	allocDescs[0];
+struct unallocSpaceDesc {
+	tag descTag;
+	__le32 volDescSeqNum;
+	__le32 numAllocDescs;
+	extent_ad allocDescs[0];
 } __attribute__ ((packed));
 
 /* Terminating Descriptor (ECMA 167r3 3/10.9) */
-struct terminatingDesc
-{
-	tag		descTag;
-	uint8_t		reserved[496];
+struct terminatingDesc {
+	tag descTag;
+	uint8_t reserved[496];
 } __attribute__ ((packed));
 
 /* Logical Volume Integrity Descriptor (ECMA 167r3 3/10.10) */
-struct logicalVolIntegrityDesc
-{
-	tag		descTag;
-	timestamp	recordingDateAndTime;
-	__le32		integrityType;
-	extent_ad	nextIntegrityExt;
-	uint8_t		logicalVolContentsUse[32];
-	__le32		numOfPartitions;
-	__le32		lengthOfImpUse;
-	__le32		freeSpaceTable[0];
-	__le32		sizeTable[0];
-	uint8_t		impUse[0];
+struct logicalVolIntegrityDesc {
+	tag descTag;
+	timestamp recordingDateAndTime;
+	__le32 integrityType;
+	extent_ad nextIntegrityExt;
+	uint8_t logicalVolContentsUse[32];
+	__le32 numOfPartitions;
+	__le32 lengthOfImpUse;
+	__le32 freeSpaceTable[0];
+	__le32 sizeTable[0];
+	uint8_t impUse[0];
 } __attribute__ ((packed));
 
 /* Integrity Type (ECMA 167r3 3/10.10.3) */
@@ -390,56 +366,49 @@
 #define LVID_INTEGRITY_TYPE_CLOSE	0x00000001
 
 /* Recorded Address (ECMA 167r3 4/7.1) */
-typedef struct 
-{
-	__le32		logicalBlockNum;
-	__le16	 	partitionReferenceNum;
+typedef struct {
+	__le32 logicalBlockNum;
+	__le16 partitionReferenceNum;
 } __attribute__ ((packed)) lb_addr;
 
 /* ... and its in-core analog */
-typedef struct 
-{
-	uint32_t		logicalBlockNum;
-	uint16_t	 	partitionReferenceNum;
+typedef struct {
+	uint32_t logicalBlockNum;
+	uint16_t partitionReferenceNum;
 } kernel_lb_addr;
 
 /* Short Allocation Descriptor (ECMA 167r3 4/14.14.1) */
-typedef struct
-{
-        __le32		extLength;
-        __le32		extPosition;
+typedef struct {
+	__le32 extLength;
+	__le32 extPosition;
 } __attribute__ ((packed)) short_ad;
 
 /* Long Allocation Descriptor (ECMA 167r3 4/14.14.2) */
-typedef struct
-{
-	__le32		extLength;
-	lb_addr		extLocation;
-	uint8_t		impUse[6];
+typedef struct {
+	__le32 extLength;
+	lb_addr extLocation;
+	uint8_t impUse[6];
 } __attribute__ ((packed)) long_ad;
 
-typedef struct
-{
-	uint32_t	extLength;
-	kernel_lb_addr	extLocation;
-	uint8_t		impUse[6];
+typedef struct {
+	uint32_t extLength;
+	kernel_lb_addr extLocation;
+	uint8_t impUse[6];
 } kernel_long_ad;
 
 /* Extended Allocation Descriptor (ECMA 167r3 4/14.14.3) */
-typedef struct
-{
-	__le32		extLength;
-	__le32		recordedLength;
-	__le32		informationLength;
-	lb_addr		extLocation;
+typedef struct {
+	__le32 extLength;
+	__le32 recordedLength;
+	__le32 informationLength;
+	lb_addr extLocation;
 } __attribute__ ((packed)) ext_ad;
 
-typedef struct
-{
-	uint32_t	extLength;
-	uint32_t	recordedLength;
-	uint32_t	informationLength;
-	kernel_lb_addr	extLocation;
+typedef struct {
+	uint32_t extLength;
+	uint32_t recordedLength;
+	uint32_t informationLength;
+	kernel_lb_addr extLocation;
 } kernel_ext_ad;
 
 /* Descriptor Tag (ECMA 167r3 4/7.2 - See 3/7.2) */
@@ -458,52 +427,49 @@
 #define TAG_IDENT_EFE			0x010A
 
 /* File Set Descriptor (ECMA 167r3 4/14.1) */
-struct fileSetDesc
-{
-	tag		descTag;
-	timestamp	recordingDateAndTime;
-	__le16		interchangeLvl;
-	__le16		maxInterchangeLvl;
-	__le32		charSetList;
-	__le32		maxCharSetList;
-	__le32		fileSetNum;
-	__le32		fileSetDescNum;
-	charspec	logicalVolIdentCharSet;
-	dstring		logicalVolIdent[128];
-	charspec	fileSetCharSet;
-	dstring		fileSetIdent[32];
-	dstring		copyrightFileIdent[32];
-	dstring		abstractFileIdent[32];
-	long_ad		rootDirectoryICB;
-	regid		domainIdent;
-	long_ad		nextExt;
-	long_ad		streamDirectoryICB;
-	uint8_t		reserved[32];
+struct fileSetDesc {
+	tag descTag;
+	timestamp recordingDateAndTime;
+	__le16 interchangeLvl;
+	__le16 maxInterchangeLvl;
+	__le32 charSetList;
+	__le32 maxCharSetList;
+	__le32 fileSetNum;
+	__le32 fileSetDescNum;
+	charspec logicalVolIdentCharSet;
+	dstring logicalVolIdent[128];
+	charspec fileSetCharSet;
+	dstring fileSetIdent[32];
+	dstring copyrightFileIdent[32];
+	dstring abstractFileIdent[32];
+	long_ad rootDirectoryICB;
+	regid domainIdent;
+	long_ad nextExt;
+	long_ad streamDirectoryICB;
+	uint8_t reserved[32];
 } __attribute__ ((packed));
 
 /* Partition Header Descriptor (ECMA 167r3 4/14.3) */
-struct partitionHeaderDesc
-{
-	short_ad	unallocSpaceTable;
-	short_ad	unallocSpaceBitmap;
-	short_ad	partitionIntegrityTable;
-	short_ad	freedSpaceTable;
-	short_ad	freedSpaceBitmap;
-	uint8_t		reserved[88];
+struct partitionHeaderDesc {
+	short_ad unallocSpaceTable;
+	short_ad unallocSpaceBitmap;
+	short_ad partitionIntegrityTable;
+	short_ad freedSpaceTable;
+	short_ad freedSpaceBitmap;
+	uint8_t reserved[88];
 } __attribute__ ((packed));
 
 /* File Identifier Descriptor (ECMA 167r3 4/14.4) */
-struct fileIdentDesc
-{
-	tag		descTag;
-	__le16		fileVersionNum;
-	uint8_t		fileCharacteristics;
-	uint8_t		lengthFileIdent;
-	long_ad		icb;
-	__le16		lengthOfImpUse;
-	uint8_t		impUse[0];
-	uint8_t		fileIdent[0];
-	uint8_t		padding[0];
+struct fileIdentDesc {
+	tag descTag;
+	__le16 fileVersionNum;
+	uint8_t fileCharacteristics;
+	uint8_t lengthFileIdent;
+	long_ad icb;
+	__le16 lengthOfImpUse;
+	uint8_t impUse[0];
+	uint8_t fileIdent[0];
+	uint8_t padding[0];
 } __attribute__ ((packed));
 
 /* File Characteristics (ECMA 167r3 4/14.4.3) */
@@ -514,24 +480,22 @@
 #define FID_FILE_CHAR_METADATA		0x10
 
 /* Allocation Ext Descriptor (ECMA 167r3 4/14.5) */
-struct allocExtDesc
-{
-	tag		descTag;
-	__le32		previousAllocExtLocation;
-	__le32		lengthAllocDescs;
+struct allocExtDesc {
+	tag descTag;
+	__le32 previousAllocExtLocation;
+	__le32 lengthAllocDescs;
 } __attribute__ ((packed));
 
 /* ICB Tag (ECMA 167r3 4/14.6) */
-typedef struct
-{
-	__le32		priorRecordedNumDirectEntries;
-	__le16		strategyType;
-	__le16		strategyParameter;
-	__le16		numEntries;
-	uint8_t		reserved;
-	uint8_t		fileType;
-	lb_addr		parentICBLocation;
-	__le16		flags;
+typedef struct {
+	__le32 priorRecordedNumDirectEntries;
+	__le16 strategyType;
+	__le16 strategyParameter;
+	__le16 numEntries;
+	uint8_t reserved;
+	uint8_t fileType;
+	lb_addr parentICBLocation;
+	__le16 flags;
 } __attribute__ ((packed)) icbtag;
 
 /* Strategy Type (ECMA 167r3 4/14.6.2) */
@@ -576,45 +540,42 @@
 #define ICBTAG_FLAG_STREAM		0x2000
 
 /* Indirect Entry (ECMA 167r3 4/14.7) */
-struct indirectEntry
-{
-	tag		descTag;
-	icbtag		icbTag;
-	long_ad		indirectICB;
+struct indirectEntry {
+	tag descTag;
+	icbtag icbTag;
+	long_ad indirectICB;
 } __attribute__ ((packed));
 
 /* Terminal Entry (ECMA 167r3 4/14.8) */
-struct terminalEntry
-{
-	tag		descTag;
-	icbtag		icbTag;
+struct terminalEntry {
+	tag descTag;
+	icbtag icbTag;
 } __attribute__ ((packed));
 
 /* File Entry (ECMA 167r3 4/14.9) */
-struct fileEntry
-{
-	tag		descTag;
-	icbtag		icbTag;
-	__le32		uid;
-	__le32		gid;
-	__le32		permissions;
-	__le16		fileLinkCount;
-	uint8_t		recordFormat;
-	uint8_t		recordDisplayAttr;
-	__le32		recordLength;
-	__le64		informationLength;
-	__le64		logicalBlocksRecorded;
-	timestamp	accessTime;
-	timestamp	modificationTime;
-	timestamp	attrTime;
-	__le32		checkpoint;
-	long_ad		extendedAttrICB;
-	regid		impIdent;
-	__le64		uniqueID;
-	__le32		lengthExtendedAttr;
-	__le32		lengthAllocDescs;
-	uint8_t		extendedAttr[0];
-	uint8_t		allocDescs[0];
+struct fileEntry {
+	tag descTag;
+	icbtag icbTag;
+	__le32 uid;
+	__le32 gid;
+	__le32 permissions;
+	__le16 fileLinkCount;
+	uint8_t recordFormat;
+	uint8_t recordDisplayAttr;
+	__le32 recordLength;
+	__le64 informationLength;
+	__le64 logicalBlocksRecorded;
+	timestamp accessTime;
+	timestamp modificationTime;
+	timestamp attrTime;
+	__le32 checkpoint;
+	long_ad extendedAttrICB;
+	regid impIdent;
+	__le64 uniqueID;
+	__le32 lengthExtendedAttr;
+	__le32 lengthAllocDescs;
+	uint8_t extendedAttr[0];
+	uint8_t allocDescs[0];
 } __attribute__ ((packed));
 
 /* Permissions (ECMA 167r3 4/14.9.5) */
@@ -655,57 +616,52 @@
 #define FE_RECORD_DISPLAY_ATTR_3	0x03
 
 /* Extended Attribute Header Descriptor (ECMA 167r3 4/14.10.1) */
-struct extendedAttrHeaderDesc
-{
-	tag		descTag;
-	__le32		impAttrLocation;
-	__le32		appAttrLocation;
+struct extendedAttrHeaderDesc {
+	tag descTag;
+	__le32 impAttrLocation;
+	__le32 appAttrLocation;
 } __attribute__ ((packed));
 
 /* Generic Format (ECMA 167r3 4/14.10.2) */
-struct genericFormat
-{
-	__le32		attrType;
-	uint8_t		attrSubtype;
-	uint8_t		reserved[3];
-	__le32		attrLength;
-	uint8_t		attrData[0];
+struct genericFormat {
+	__le32 attrType;
+	uint8_t attrSubtype;
+	uint8_t reserved[3];
+	__le32 attrLength;
+	uint8_t attrData[0];
 } __attribute__ ((packed));
 
 /* Character Set Information (ECMA 167r3 4/14.10.3) */
-struct charSetInfo
-{
-	__le32		attrType;
-	uint8_t		attrSubtype;
-	uint8_t		reserved[3];
-	__le32		attrLength;
-	__le32		escapeSeqLength;
-	uint8_t		charSetType;
-	uint8_t		escapeSeq[0];
+struct charSetInfo {
+	__le32 attrType;
+	uint8_t attrSubtype;
+	uint8_t reserved[3];
+	__le32 attrLength;
+	__le32 escapeSeqLength;
+	uint8_t charSetType;
+	uint8_t escapeSeq[0];
 } __attribute__ ((packed));
 
 /* Alternate Permissions (ECMA 167r3 4/14.10.4) */
-struct altPerms
-{
-	__le32		attrType;
-	uint8_t		attrSubtype;
-	uint8_t		reserved[3];
-	__le32		attrLength;
-	__le16		ownerIdent;
-	__le16		groupIdent;
-	__le16		permission;
+struct altPerms {
+	__le32 attrType;
+	uint8_t attrSubtype;
+	uint8_t reserved[3];
+	__le32 attrLength;
+	__le16 ownerIdent;
+	__le16 groupIdent;
+	__le16 permission;
 } __attribute__ ((packed));
 
 /* File Times Extended Attribute (ECMA 167r3 4/14.10.5) */
-struct fileTimesExtAttr
-{
-	__le32		attrType;
-	uint8_t		attrSubtype;
-	uint8_t		reserved[3];
-	__le32		attrLength;
-	__le32		dataLength;
-	__le32		fileTimeExistence;
-	uint8_t		fileTimes;
+struct fileTimesExtAttr {
+	__le32 attrType;
+	uint8_t attrSubtype;
+	uint8_t reserved[3];
+	__le32 attrLength;
+	__le32 dataLength;
+	__le32 fileTimeExistence;
+	uint8_t fileTimes;
 } __attribute__ ((packed));
 
 /* FileTimeExistence (ECMA 167r3 4/14.10.5.6) */
@@ -715,52 +671,48 @@
 #define FTE_BACKUP			0x00000002
 
 /* Information Times Extended Attribute (ECMA 167r3 4/14.10.6) */
-struct infoTimesExtAttr
-{
-	__le32		attrType;
-	uint8_t		attrSubtype;
-	uint8_t		reserved[3];
-	__le32		attrLength;
-	__le32		dataLength;
-	__le32		infoTimeExistence;
-	uint8_t		infoTimes[0];
+struct infoTimesExtAttr {
+	__le32 attrType;
+	uint8_t attrSubtype;
+	uint8_t reserved[3];
+	__le32 attrLength;
+	__le32 dataLength;
+	__le32 infoTimeExistence;
+	uint8_t infoTimes[0];
 } __attribute__ ((packed));
 
 /* Device Specification (ECMA 167r3 4/14.10.7) */
-struct deviceSpec
-{
-	__le32		attrType;
-	uint8_t		attrSubtype;
-	uint8_t		reserved[3];
-	__le32		attrLength;
-	__le32		impUseLength;
-	__le32		majorDeviceIdent;
-	__le32		minorDeviceIdent;
-	uint8_t		impUse[0];
+struct deviceSpec {
+	__le32 attrType;
+	uint8_t attrSubtype;
+	uint8_t reserved[3];
+	__le32 attrLength;
+	__le32 impUseLength;
+	__le32 majorDeviceIdent;
+	__le32 minorDeviceIdent;
+	uint8_t impUse[0];
 } __attribute__ ((packed));
 
 /* Implementation Use Extended Attr (ECMA 167r3 4/14.10.8) */
-struct impUseExtAttr
-{
-	__le32		attrType;
-	uint8_t		attrSubtype;
-	uint8_t		reserved[3];
-	__le32		attrLength;
-	__le32		impUseLength;
-	regid		impIdent;
-	uint8_t		impUse[0];
+struct impUseExtAttr {
+	__le32 attrType;
+	uint8_t attrSubtype;
+	uint8_t reserved[3];
+	__le32 attrLength;
+	__le32 impUseLength;
+	regid impIdent;
+	uint8_t impUse[0];
 } __attribute__ ((packed));
 
 /* Application Use Extended Attribute (ECMA 167r3 4/14.10.9) */
-struct appUseExtAttr
-{
-	__le32		attrType;
-	uint8_t		attrSubtype;
-	uint8_t		reserved[3];
-	__le32		attrLength;
-	__le32		appUseLength;
-	regid		appIdent;
-	uint8_t		appUse[0];
+struct appUseExtAttr {
+	__le32 attrType;
+	uint8_t attrSubtype;
+	uint8_t reserved[3];
+	__le32 attrLength;
+	__le32 appUseLength;
+	regid appIdent;
+	uint8_t appUse[0];
 } __attribute__ ((packed));
 
 #define EXTATTR_CHAR_SET		1
@@ -771,35 +723,31 @@
 #define EXTATTR_IMP_USE			2048
 #define EXTATTR_APP_USE			65536
 
-
 /* Unallocated Space Entry (ECMA 167r3 4/14.11) */
-struct unallocSpaceEntry
-{
-	tag		descTag;
-	icbtag		icbTag;
-	__le32		lengthAllocDescs;
-	uint8_t		allocDescs[0];
+struct unallocSpaceEntry {
+	tag descTag;
+	icbtag icbTag;
+	__le32 lengthAllocDescs;
+	uint8_t allocDescs[0];
 } __attribute__ ((packed));
 
 /* Space Bitmap Descriptor (ECMA 167r3 4/14.12) */
-struct spaceBitmapDesc
-{
-	tag		descTag;
-	__le32		numOfBits;
-	__le32		numOfBytes;
-	uint8_t		bitmap[0];
+struct spaceBitmapDesc {
+	tag descTag;
+	__le32 numOfBits;
+	__le32 numOfBytes;
+	uint8_t bitmap[0];
 } __attribute__ ((packed));
 
 /* Partition Integrity Entry (ECMA 167r3 4/14.13) */
-struct partitionIntegrityEntry
-{
-	tag		descTag;
-	icbtag		icbTag;
-	timestamp	recordingDateAndTime;
-	uint8_t		integrityType;
-	uint8_t		reserved[175];
-	regid		impIdent;
-	uint8_t		impUse[256];
+struct partitionIntegrityEntry {
+	tag descTag;
+	icbtag icbTag;
+	timestamp recordingDateAndTime;
+	uint8_t integrityType;
+	uint8_t reserved[175];
+	regid impIdent;
+	uint8_t impUse[256];
 } __attribute__ ((packed));
 
 /* Short Allocation Descriptor (ECMA 167r3 4/14.14.1) */
@@ -815,50 +763,47 @@
 /* Extended Allocation Descriptor (ECMA 167r3 4/14.14.3) */
 
 /* Logical Volume Header Descriptor (ECMA 167r3 4/14.15) */
-struct logicalVolHeaderDesc
-{
-	__le64		uniqueID;
-	uint8_t		reserved[24];
+struct logicalVolHeaderDesc {
+	__le64 uniqueID;
+	uint8_t reserved[24];
 } __attribute__ ((packed));
 
 /* Path Component (ECMA 167r3 4/14.16.1) */
-struct pathComponent
-{
-	uint8_t		componentType;
-	uint8_t		lengthComponentIdent;
-	__le16		componentFileVersionNum;
-	dstring		componentIdent[0];
+struct pathComponent {
+	uint8_t componentType;
+	uint8_t lengthComponentIdent;
+	__le16 componentFileVersionNum;
+	dstring componentIdent[0];
 } __attribute__ ((packed));
 
 /* File Entry (ECMA 167r3 4/14.17) */
-struct extendedFileEntry
-{
-	tag		descTag;
-	icbtag		icbTag;
-	__le32		uid;
-	__le32		gid;
-	__le32		permissions;
-	__le16		fileLinkCount;
-	uint8_t		recordFormat;
-	uint8_t		recordDisplayAttr;
-	__le32		recordLength;
-	__le64		informationLength;
-	__le64		objectSize;
-	__le64		logicalBlocksRecorded;
-	timestamp	accessTime;
-	timestamp	modificationTime;
-	timestamp	createTime;
-	timestamp	attrTime;
-	__le32		checkpoint;
-	__le32		reserved;
-	long_ad		extendedAttrICB;
-	long_ad		streamDirectoryICB;
-	regid		impIdent;
-	__le64		uniqueID;
-	__le32		lengthExtendedAttr;
-	__le32		lengthAllocDescs;
-	uint8_t		extendedAttr[0];
-	uint8_t		allocDescs[0];
+struct extendedFileEntry {
+	tag descTag;
+	icbtag icbTag;
+	__le32 uid;
+	__le32 gid;
+	__le32 permissions;
+	__le16 fileLinkCount;
+	uint8_t recordFormat;
+	uint8_t recordDisplayAttr;
+	__le32 recordLength;
+	__le64 informationLength;
+	__le64 objectSize;
+	__le64 logicalBlocksRecorded;
+	timestamp accessTime;
+	timestamp modificationTime;
+	timestamp createTime;
+	timestamp attrTime;
+	__le32 checkpoint;
+	__le32 reserved;
+	long_ad extendedAttrICB;
+	long_ad streamDirectoryICB;
+	regid impIdent;
+	__le64 uniqueID;
+	__le32 lengthExtendedAttr;
+	__le32 lengthAllocDescs;
+	uint8_t extendedAttr[0];
+	uint8_t allocDescs[0];
 } __attribute__ ((packed));
 
-#endif /* _ECMA_167_H */
+#endif				/* _ECMA_167_H */
diff --git a/fs/udf/file.c b/fs/udf/file.c
index df070be..67bf36b 100644
--- a/fs/udf/file.c
+++ b/fs/udf/file.c
@@ -30,7 +30,7 @@
 #include <linux/udf_fs.h>
 #include <asm/uaccess.h>
 #include <linux/kernel.h>
-#include <linux/string.h> /* memset */
+#include <linux/string.h>	/* memset */
 #include <linux/capability.h>
 #include <linux/errno.h>
 #include <linux/smp_lock.h>
@@ -41,7 +41,7 @@
 #include "udf_i.h"
 #include "udf_sb.h"
 
-static int udf_adinicb_readpage(struct file *file, struct page * page)
+static int udf_adinicb_readpage(struct file *file, struct page *page)
 {
 	struct inode *inode = page->mapping->host;
 	char *kaddr;
@@ -58,7 +58,8 @@
 	return 0;
 }
 
-static int udf_adinicb_writepage(struct page *page, struct writeback_control *wbc)
+static int udf_adinicb_writepage(struct page *page,
+				 struct writeback_control *wbc)
 {
 	struct inode *inode = page->mapping->host;
 	char *kaddr;
@@ -74,19 +75,21 @@
 	return 0;
 }
 
-static int udf_adinicb_prepare_write(struct file *file, struct page *page, unsigned offset, unsigned to)
+static int udf_adinicb_prepare_write(struct file *file, struct page *page,
+				     unsigned offset, unsigned to)
 {
 	kmap(page);
 	return 0;
 }
 
-static int udf_adinicb_commit_write(struct file *file, struct page *page, unsigned offset, unsigned to)
+static int udf_adinicb_commit_write(struct file *file, struct page *page,
+				    unsigned offset, unsigned to)
 {
 	struct inode *inode = page->mapping->host;
 	char *kaddr = page_address(page);
 
 	memcpy(UDF_I_DATA(inode) + UDF_I_LENEATTR(inode) + offset,
-		kaddr + offset, to - offset);
+	       kaddr + offset, to - offset);
 	mark_inode_dirty(inode);
 	SetPageUptodate(page);
 	kunmap(page);
@@ -97,15 +100,15 @@
 }
 
 const struct address_space_operations udf_adinicb_aops = {
-	.readpage		= udf_adinicb_readpage,
-	.writepage		= udf_adinicb_writepage,
-	.sync_page		= block_sync_page,
-	.prepare_write		= udf_adinicb_prepare_write,
-	.commit_write		= udf_adinicb_commit_write,
+	.readpage = udf_adinicb_readpage,
+	.writepage = udf_adinicb_writepage,
+	.sync_page = block_sync_page,
+	.prepare_write = udf_adinicb_prepare_write,
+	.commit_write = udf_adinicb_commit_write,
 };
 
 static ssize_t udf_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
-			      unsigned long nr_segs, loff_t ppos)
+				  unsigned long nr_segs, loff_t ppos)
 {
 	ssize_t retval;
 	struct file *file = iocb->ki_filp;
@@ -113,25 +116,20 @@
 	int err, pos;
 	size_t count = iocb->ki_left;
 
-	if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_IN_ICB)
-	{
+	if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_IN_ICB) {
 		if (file->f_flags & O_APPEND)
 			pos = inode->i_size;
 		else
 			pos = ppos;
 
-		if (inode->i_sb->s_blocksize < (udf_file_entry_alloc_offset(inode) +
-			pos + count))
-		{
+		if (inode->i_sb->s_blocksize <
+		    (udf_file_entry_alloc_offset(inode) + pos + count)) {
 			udf_expand_file_adinicb(inode, pos + count, &err);
-			if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_IN_ICB)
-			{
+			if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_IN_ICB) {
 				udf_debug("udf_expand_adinicb: err=%d\n", err);
 				return err;
 			}
-		}
-		else
-		{
+		} else {
 			if (pos + count > inode->i_size)
 				UDF_I_LENALLOC(inode) = pos + count;
 			else
@@ -181,48 +179,47 @@
  *	Written, tested, and released.
  */
 int udf_ioctl(struct inode *inode, struct file *filp, unsigned int cmd,
-	unsigned long arg)
+	      unsigned long arg)
 {
 	int result = -EINVAL;
 
-	if ( file_permission(filp, MAY_READ) != 0 )
-	{
-		udf_debug("no permission to access inode %lu\n",
-						inode->i_ino);
+	if (file_permission(filp, MAY_READ) != 0) {
+		udf_debug("no permission to access inode %lu\n", inode->i_ino);
 		return -EPERM;
 	}
 
-	if ( !arg )
-	{
+	if (!arg) {
 		udf_debug("invalid argument to udf_ioctl\n");
 		return -EINVAL;
 	}
 
-	switch (cmd)
-	{
-		case UDF_GETVOLIDENT:
-			return copy_to_user((char __user *)arg,
-				UDF_SB_VOLIDENT(inode->i_sb), 32) ? -EFAULT : 0;
-		case UDF_RELOCATE_BLOCKS:
+	switch (cmd) {
+	case UDF_GETVOLIDENT:
+		return copy_to_user((char __user *)arg,
+				    UDF_SB_VOLIDENT(inode->i_sb),
+				    32) ? -EFAULT : 0;
+	case UDF_RELOCATE_BLOCKS:
 		{
 			long old, new;
 
-			if (!capable(CAP_SYS_ADMIN)) return -EACCES;
-			if (get_user(old, (long __user *)arg)) return -EFAULT;
+			if (!capable(CAP_SYS_ADMIN))
+				return -EACCES;
+			if (get_user(old, (long __user *)arg))
+				return -EFAULT;
 			if ((result = udf_relocate_blocks(inode->i_sb,
-					old, &new)) == 0)
+							  old, &new)) == 0)
 				result = put_user(new, (long __user *)arg);
 
 			return result;
 		}
-		case UDF_GETEASIZE:
-			result = put_user(UDF_I_LENEATTR(inode), (int __user *)arg);
-			break;
+	case UDF_GETEASIZE:
+		result = put_user(UDF_I_LENEATTR(inode), (int __user *)arg);
+		break;
 
-		case UDF_GETEABLOCK:
-			result = copy_to_user((char __user *)arg, UDF_I_DATA(inode),
-				UDF_I_LENEATTR(inode)) ? -EFAULT : 0;
-			break;
+	case UDF_GETEABLOCK:
+		result = copy_to_user((char __user *)arg, UDF_I_DATA(inode),
+				      UDF_I_LENEATTR(inode)) ? -EFAULT : 0;
+		break;
 	}
 
 	return result;
@@ -240,10 +237,9 @@
  * HISTORY
  *
  */
-static int udf_release_file(struct inode * inode, struct file * filp)
+static int udf_release_file(struct inode *inode, struct file *filp)
 {
-	if (filp->f_mode & FMODE_WRITE)
-	{
+	if (filp->f_mode & FMODE_WRITE) {
 		lock_kernel();
 		udf_discard_prealloc(inode);
 		unlock_kernel();
@@ -252,18 +248,18 @@
 }
 
 const struct file_operations udf_file_operations = {
-	.read			= do_sync_read,
-	.aio_read		= generic_file_aio_read,
-	.ioctl			= udf_ioctl,
-	.open			= generic_file_open,
-	.mmap			= generic_file_mmap,
-	.write			= do_sync_write,
-	.aio_write		= udf_file_aio_write,
-	.release		= udf_release_file,
-	.fsync			= udf_fsync_file,
-	.splice_read		= generic_file_splice_read,
+	.read = do_sync_read,
+	.aio_read = generic_file_aio_read,
+	.ioctl = udf_ioctl,
+	.open = generic_file_open,
+	.mmap = generic_file_mmap,
+	.write = do_sync_write,
+	.aio_write = udf_file_aio_write,
+	.release = udf_release_file,
+	.fsync = udf_fsync_file,
+	.splice_read = generic_file_splice_read,
 };
 
 const struct inode_operations udf_file_inode_operations = {
-	.truncate		= udf_truncate,
+	.truncate = udf_truncate,
 };
diff --git a/fs/udf/fsync.c b/fs/udf/fsync.c
index 6ded93e..7f0901c 100644
--- a/fs/udf/fsync.c
+++ b/fs/udf/fsync.c
@@ -29,7 +29,7 @@
  *	even pass file to fsync ?
  */
 
-int udf_fsync_file(struct file * file, struct dentry *dentry, int datasync)
+int udf_fsync_file(struct file *file, struct dentry *dentry, int datasync)
 {
 	struct inode *inode = dentry->d_inode;
 	return udf_fsync_inode(inode, datasync);
@@ -45,6 +45,6 @@
 	if (datasync && !(inode->i_state & I_DIRTY_DATASYNC))
 		return err;
 
-	err |= udf_sync_inode (inode);
+	err |= udf_sync_inode(inode);
 	return err ? -EIO : 0;
 }
diff --git a/fs/udf/ialloc.c b/fs/udf/ialloc.c
index 10f3188..2eb50380 100644
--- a/fs/udf/ialloc.c
+++ b/fs/udf/ialloc.c
@@ -28,7 +28,7 @@
 #include "udf_i.h"
 #include "udf_sb.h"
 
-void udf_free_inode(struct inode * inode)
+void udf_free_inode(struct inode *inode)
 {
 	struct super_block *sb = inode->i_sb;
 	struct udf_sb_info *sbi = UDF_SB(sb);
@@ -46,10 +46,12 @@
 	if (sbi->s_lvidbh) {
 		if (S_ISDIR(inode->i_mode))
 			UDF_SB_LVIDIU(sb)->numDirs =
-				cpu_to_le32(le32_to_cpu(UDF_SB_LVIDIU(sb)->numDirs) - 1);
+			    cpu_to_le32(le32_to_cpu(UDF_SB_LVIDIU(sb)->numDirs)
+					- 1);
 		else
 			UDF_SB_LVIDIU(sb)->numFiles =
-				cpu_to_le32(le32_to_cpu(UDF_SB_LVIDIU(sb)->numFiles) - 1);
+			    cpu_to_le32(le32_to_cpu(UDF_SB_LVIDIU(sb)->numFiles)
+					- 1);
 
 		mark_buffer_dirty(sbi->s_lvidbh);
 	}
@@ -58,18 +60,17 @@
 	udf_free_blocks(sb, NULL, UDF_I_LOCATION(inode), 0, 1);
 }
 
-struct inode * udf_new_inode (struct inode *dir, int mode, int * err)
+struct inode *udf_new_inode(struct inode *dir, int mode, int *err)
 {
 	struct super_block *sb = dir->i_sb;
 	struct udf_sb_info *sbi = UDF_SB(sb);
-	struct inode * inode;
+	struct inode *inode;
 	int block;
 	uint32_t start = UDF_I_LOCATION(dir).logicalBlockNum;
 
 	inode = new_inode(sb);
 
-	if (!inode)
-	{
+	if (!inode) {
 		*err = -ENOMEM;
 		return NULL;
 	}
@@ -81,26 +82,30 @@
 	UDF_I_NEXT_ALLOC_GOAL(inode) = 0;
 	UDF_I_STRAT4096(inode) = 0;
 
-	block = udf_new_block(dir->i_sb, NULL, UDF_I_LOCATION(dir).partitionReferenceNum,
-		start, err);
-	if (*err)
-	{
+	block =
+	    udf_new_block(dir->i_sb, NULL,
+			  UDF_I_LOCATION(dir).partitionReferenceNum, start,
+			  err);
+	if (*err) {
 		iput(inode);
 		return NULL;
 	}
 
 	mutex_lock(&sbi->s_alloc_mutex);
-	if (UDF_SB_LVIDBH(sb))
-	{
+	if (UDF_SB_LVIDBH(sb)) {
 		struct logicalVolHeaderDesc *lvhd;
 		uint64_t uniqueID;
-		lvhd = (struct logicalVolHeaderDesc *)(UDF_SB_LVID(sb)->logicalVolContentsUse);
+		lvhd =
+		    (struct logicalVolHeaderDesc *)(UDF_SB_LVID(sb)->
+						    logicalVolContentsUse);
 		if (S_ISDIR(mode))
 			UDF_SB_LVIDIU(sb)->numDirs =
-				cpu_to_le32(le32_to_cpu(UDF_SB_LVIDIU(sb)->numDirs) + 1);
+			    cpu_to_le32(le32_to_cpu(UDF_SB_LVIDIU(sb)->numDirs)
+					+ 1);
 		else
 			UDF_SB_LVIDIU(sb)->numFiles =
-				cpu_to_le32(le32_to_cpu(UDF_SB_LVIDIU(sb)->numFiles) + 1);
+			    cpu_to_le32(le32_to_cpu(UDF_SB_LVIDIU(sb)->numFiles)
+					+ 1);
 		UDF_I_UNIQUE(inode) = uniqueID = le64_to_cpu(lvhd->uniqueID);
 		if (!(++uniqueID & 0x00000000FFFFFFFFUL))
 			uniqueID += 16;
@@ -109,35 +114,34 @@
 	}
 	inode->i_mode = mode;
 	inode->i_uid = current->fsuid;
-	if (dir->i_mode & S_ISGID)
-	{
+	if (dir->i_mode & S_ISGID) {
 		inode->i_gid = dir->i_gid;
 		if (S_ISDIR(mode))
 			mode |= S_ISGID;
-	}
-	else
+	} else
 		inode->i_gid = current->fsgid;
 
 	UDF_I_LOCATION(inode).logicalBlockNum = block;
-	UDF_I_LOCATION(inode).partitionReferenceNum = UDF_I_LOCATION(dir).partitionReferenceNum;
+	UDF_I_LOCATION(inode).partitionReferenceNum =
+	    UDF_I_LOCATION(dir).partitionReferenceNum;
 	inode->i_ino = udf_get_lb_pblock(sb, UDF_I_LOCATION(inode), 0);
 	inode->i_blocks = 0;
 	UDF_I_LENEATTR(inode) = 0;
 	UDF_I_LENALLOC(inode) = 0;
 	UDF_I_USE(inode) = 0;
-	if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_EXTENDED_FE))
-	{
+	if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_EXTENDED_FE)) {
 		UDF_I_EFE(inode) = 1;
 		UDF_UPDATE_UDFREV(inode->i_sb, UDF_VERS_USE_EXTENDED_FE);
-		UDF_I_DATA(inode) = kzalloc(inode->i_sb->s_blocksize - sizeof(struct extendedFileEntry), GFP_KERNEL);
-	}
-	else
-	{
+		UDF_I_DATA(inode) =
+		    kzalloc(inode->i_sb->s_blocksize -
+			    sizeof(struct extendedFileEntry), GFP_KERNEL);
+	} else {
 		UDF_I_EFE(inode) = 0;
-		UDF_I_DATA(inode) = kzalloc(inode->i_sb->s_blocksize - sizeof(struct fileEntry), GFP_KERNEL);
+		UDF_I_DATA(inode) =
+		    kzalloc(inode->i_sb->s_blocksize - sizeof(struct fileEntry),
+			    GFP_KERNEL);
 	}
-	if (!UDF_I_DATA(inode))
-	{
+	if (!UDF_I_DATA(inode)) {
 		iput(inode);
 		*err = -ENOMEM;
 		mutex_unlock(&sbi->s_alloc_mutex);
@@ -150,13 +154,12 @@
 	else
 		UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_LONG;
 	inode->i_mtime = inode->i_atime = inode->i_ctime =
-		UDF_I_CRTIME(inode) = current_fs_time(inode->i_sb);
+	    UDF_I_CRTIME(inode) = current_fs_time(inode->i_sb);
 	insert_inode_hash(inode);
 	mark_inode_dirty(inode);
 	mutex_unlock(&sbi->s_alloc_mutex);
 
-	if (DQUOT_ALLOC_INODE(inode))
-	{
+	if (DQUOT_ALLOC_INODE(inode)) {
 		DQUOT_DROP(inode);
 		inode->i_flags |= S_NOQUOTA;
 		inode->i_nlink = 0;
diff --git a/fs/udf/inode.c b/fs/udf/inode.c
index 5b82e48..be6326f 100644
--- a/fs/udf/inode.c
+++ b/fs/udf/inode.c
@@ -51,18 +51,18 @@
 static void udf_fill_inode(struct inode *, struct buffer_head *);
 static int udf_alloc_i_data(struct inode *inode, size_t size);
 static struct buffer_head *inode_getblk(struct inode *, sector_t, int *,
-	long *, int *);
+					long *, int *);
 static int8_t udf_insert_aext(struct inode *, struct extent_position,
-	kernel_lb_addr, uint32_t);
+			      kernel_lb_addr, uint32_t);
 static void udf_split_extents(struct inode *, int *, int, int,
-	kernel_long_ad [EXTENT_MERGE_SIZE], int *);
+			      kernel_long_ad[EXTENT_MERGE_SIZE], int *);
 static void udf_prealloc_extents(struct inode *, int, int,
-	 kernel_long_ad [EXTENT_MERGE_SIZE], int *);
+				 kernel_long_ad[EXTENT_MERGE_SIZE], int *);
 static void udf_merge_extents(struct inode *,
-	 kernel_long_ad [EXTENT_MERGE_SIZE], int *);
+			      kernel_long_ad[EXTENT_MERGE_SIZE], int *);
 static void udf_update_extents(struct inode *,
-	kernel_long_ad [EXTENT_MERGE_SIZE], int, int,
-	struct extent_position *);
+			       kernel_long_ad[EXTENT_MERGE_SIZE], int, int,
+			       struct extent_position *);
 static int udf_get_block(struct inode *, sector_t, struct buffer_head *, int);
 
 /*
@@ -81,7 +81,7 @@
  *
  *  Called at the last iput() if i_nlink is zero.
  */
-void udf_delete_inode(struct inode * inode)
+void udf_delete_inode(struct inode *inode)
 {
 	truncate_inode_pages(&inode->i_data, 0);
 
@@ -97,7 +97,7 @@
 
 	unlock_kernel();
 	return;
-no_delete:
+      no_delete:
 	clear_inode(inode);
 }
 
@@ -132,26 +132,27 @@
 	return block_read_full_page(page, udf_get_block);
 }
 
-static int udf_prepare_write(struct file *file, struct page *page, unsigned from, unsigned to)
+static int udf_prepare_write(struct file *file, struct page *page,
+			     unsigned from, unsigned to)
 {
 	return block_prepare_write(page, from, to, udf_get_block);
 }
 
 static sector_t udf_bmap(struct address_space *mapping, sector_t block)
 {
-	return generic_block_bmap(mapping,block,udf_get_block);
+	return generic_block_bmap(mapping, block, udf_get_block);
 }
 
 const struct address_space_operations udf_aops = {
-	.readpage		= udf_readpage,
-	.writepage		= udf_writepage,
-	.sync_page		= block_sync_page,
-	.prepare_write		= udf_prepare_write,
-	.commit_write		= generic_commit_write,
-	.bmap			= udf_bmap,
+	.readpage = udf_readpage,
+	.writepage = udf_writepage,
+	.sync_page = block_sync_page,
+	.prepare_write = udf_prepare_write,
+	.commit_write = generic_commit_write,
+	.bmap = udf_bmap,
 };
 
-void udf_expand_file_adinicb(struct inode * inode, int newsize, int * err)
+void udf_expand_file_adinicb(struct inode *inode, int newsize, int *err)
 {
 	struct page *page;
 	char *kaddr;
@@ -163,8 +164,7 @@
 	/* from now on we have normal address_space methods */
 	inode->i_data.a_ops = &udf_aops;
 
-	if (!UDF_I_LENALLOC(inode))
-	{
+	if (!UDF_I_LENALLOC(inode)) {
 		if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_SHORT_AD))
 			UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_SHORT;
 		else
@@ -176,19 +176,18 @@
 	page = grab_cache_page(inode->i_mapping, 0);
 	BUG_ON(!PageLocked(page));
 
-	if (!PageUptodate(page))
-	{
+	if (!PageUptodate(page)) {
 		kaddr = kmap(page);
 		memset(kaddr + UDF_I_LENALLOC(inode), 0x00,
-			PAGE_CACHE_SIZE - UDF_I_LENALLOC(inode));
+		       PAGE_CACHE_SIZE - UDF_I_LENALLOC(inode));
 		memcpy(kaddr, UDF_I_DATA(inode) + UDF_I_LENEATTR(inode),
-			UDF_I_LENALLOC(inode));
+		       UDF_I_LENALLOC(inode));
 		flush_dcache_page(page);
 		SetPageUptodate(page);
 		kunmap(page);
 	}
 	memset(UDF_I_DATA(inode) + UDF_I_LENEATTR(inode), 0x00,
-		UDF_I_LENALLOC(inode));
+	       UDF_I_LENALLOC(inode));
 	UDF_I_LENALLOC(inode) = 0;
 	if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_SHORT_AD))
 		UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_SHORT;
@@ -201,7 +200,8 @@
 	mark_inode_dirty(inode);
 }
 
-struct buffer_head * udf_expand_dir_adinicb(struct inode *inode, int *block, int *err)
+struct buffer_head *udf_expand_dir_adinicb(struct inode *inode, int *block,
+					   int *err)
 {
 	int newblock;
 	struct buffer_head *dbh = NULL;
@@ -220,8 +220,7 @@
 	else
 		alloctype = ICBTAG_FLAG_AD_LONG;
 
-	if (!inode->i_size)
-	{
+	if (!inode->i_size) {
 		UDF_I_ALLOCTYPE(inode) = alloctype;
 		mark_inode_dirty(inode);
 		return NULL;
@@ -229,13 +228,14 @@
 
 	/* alloc block, and copy data to it */
 	*block = udf_new_block(inode->i_sb, inode,
-		UDF_I_LOCATION(inode).partitionReferenceNum,
-		UDF_I_LOCATION(inode).logicalBlockNum, err);
+			       UDF_I_LOCATION(inode).partitionReferenceNum,
+			       UDF_I_LOCATION(inode).logicalBlockNum, err);
 
 	if (!(*block))
 		return NULL;
 	newblock = udf_get_pblock(inode->i_sb, *block,
-		UDF_I_LOCATION(inode).partitionReferenceNum, 0);
+				  UDF_I_LOCATION(inode).partitionReferenceNum,
+				  0);
 	if (!newblock)
 		return NULL;
 	dbh = udf_tgetblk(inode->i_sb, newblock);
@@ -247,16 +247,17 @@
 	unlock_buffer(dbh);
 	mark_buffer_dirty_inode(dbh, inode);
 
-	sfibh.soffset = sfibh.eoffset = (f_pos & ((inode->i_sb->s_blocksize - 1) >> 2)) << 2;
+	sfibh.soffset = sfibh.eoffset =
+	    (f_pos & ((inode->i_sb->s_blocksize - 1) >> 2)) << 2;
 	sfibh.sbh = sfibh.ebh = NULL;
 	dfibh.soffset = dfibh.eoffset = 0;
 	dfibh.sbh = dfibh.ebh = dbh;
-	while ( (f_pos < size) )
-	{
+	while ((f_pos < size)) {
 		UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_IN_ICB;
-		sfi = udf_fileident_read(inode, &f_pos, &sfibh, &cfi, NULL, NULL, NULL, NULL);
-		if (!sfi)
-		{
+		sfi =
+		    udf_fileident_read(inode, &f_pos, &sfibh, &cfi, NULL, NULL,
+				       NULL, NULL);
+		if (!sfi) {
 			brelse(dbh);
 			return NULL;
 		}
@@ -266,8 +267,8 @@
 		dfibh.eoffset += (sfibh.eoffset - sfibh.soffset);
 		dfi = (struct fileIdentDesc *)(dbh->b_data + dfibh.soffset);
 		if (udf_write_fi(inode, sfi, dfi, &dfibh, sfi->impUse,
-			sfi->fileIdent + le16_to_cpu(sfi->lengthOfImpUse)))
-		{
+				 sfi->fileIdent +
+				 le16_to_cpu(sfi->lengthOfImpUse))) {
 			UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_IN_ICB;
 			brelse(dbh);
 			return NULL;
@@ -275,10 +276,12 @@
 	}
 	mark_buffer_dirty_inode(dbh, inode);
 
-	memset(UDF_I_DATA(inode) + UDF_I_LENEATTR(inode), 0, UDF_I_LENALLOC(inode));
+	memset(UDF_I_DATA(inode) + UDF_I_LENEATTR(inode), 0,
+	       UDF_I_LENALLOC(inode));
 	UDF_I_LENALLOC(inode) = 0;
 	eloc.logicalBlockNum = *block;
-	eloc.partitionReferenceNum = UDF_I_LOCATION(inode).partitionReferenceNum;
+	eloc.partitionReferenceNum =
+	    UDF_I_LOCATION(inode).partitionReferenceNum;
 	elen = inode->i_size;
 	UDF_I_LENEXTENTS(inode) = elen;
 	epos.bh = NULL;
@@ -292,14 +295,14 @@
 	return dbh;
 }
 
-static int udf_get_block(struct inode *inode, sector_t block, struct buffer_head *bh_result, int create)
+static int udf_get_block(struct inode *inode, sector_t block,
+			 struct buffer_head *bh_result, int create)
 {
 	int err, new;
 	struct buffer_head *bh;
 	unsigned long phys;
 
-	if (!create)
-	{
+	if (!create) {
 		phys = udf_block_map(inode, block);
 		if (phys)
 			map_bh(bh_result, inode->i_sb, phys);
@@ -315,10 +318,9 @@
 	if (block < 0)
 		goto abort_negative;
 
-	if (block == UDF_I_NEXT_ALLOC_BLOCK(inode) + 1)
-	{
-		UDF_I_NEXT_ALLOC_BLOCK(inode) ++;
-		UDF_I_NEXT_ALLOC_GOAL(inode) ++;
+	if (block == UDF_I_NEXT_ALLOC_BLOCK(inode) + 1) {
+		UDF_I_NEXT_ALLOC_BLOCK(inode)++;
+		UDF_I_NEXT_ALLOC_GOAL(inode)++;
 	}
 
 	err = 0;
@@ -332,29 +334,27 @@
 	if (new)
 		set_buffer_new(bh_result);
 	map_bh(bh_result, inode->i_sb, phys);
-abort:
+      abort:
 	unlock_kernel();
 	return err;
 
-abort_negative:
+      abort_negative:
 	udf_warning(inode->i_sb, "udf_get_block", "block < 0");
 	goto abort;
 }
 
-static struct buffer_head *
-udf_getblk(struct inode *inode, long block, int create, int *err)
+static struct buffer_head *udf_getblk(struct inode *inode, long block,
+				      int create, int *err)
 {
 	struct buffer_head dummy;
 
 	dummy.b_state = 0;
 	dummy.b_blocknr = -1000;
 	*err = udf_get_block(inode, block, &dummy, create);
-	if (!*err && buffer_mapped(&dummy))
-	{
+	if (!*err && buffer_mapped(&dummy)) {
 		struct buffer_head *bh;
 		bh = sb_getblk(inode->i_sb, dummy.b_blocknr);
-		if (buffer_new(&dummy))
-		{
+		if (buffer_new(&dummy)) {
 			lock_buffer(bh);
 			memset(bh->b_data, 0x00, inode->i_sb->s_blocksize);
 			set_buffer_uptodate(bh);
@@ -368,12 +368,12 @@
 
 /* Extend the file by 'blocks' blocks, return the number of extents added */
 int udf_extend_file(struct inode *inode, struct extent_position *last_pos,
-	kernel_long_ad *last_ext, sector_t blocks)
+		    kernel_long_ad * last_ext, sector_t blocks)
 {
 	sector_t add;
 	int count = 0, fake = !(last_ext->extLength & UDF_EXTENT_LENGTH_MASK);
 	struct super_block *sb = inode->i_sb;
-	kernel_lb_addr prealloc_loc = {0, 0};
+	kernel_lb_addr prealloc_loc = { 0, 0 };
 	int prealloc_len = 0;
 
 	/* The previous extent is fake and we should not extend by anything
@@ -383,28 +383,32 @@
 	/* Round the last extent up to a multiple of block size */
 	if (last_ext->extLength & (sb->s_blocksize - 1)) {
 		last_ext->extLength =
-			(last_ext->extLength & UDF_EXTENT_FLAG_MASK) |
-			(((last_ext->extLength & UDF_EXTENT_LENGTH_MASK) +
-				sb->s_blocksize - 1) & ~(sb->s_blocksize - 1));
+		    (last_ext->extLength & UDF_EXTENT_FLAG_MASK) |
+		    (((last_ext->extLength & UDF_EXTENT_LENGTH_MASK) +
+		      sb->s_blocksize - 1) & ~(sb->s_blocksize - 1));
 		UDF_I_LENEXTENTS(inode) =
-			(UDF_I_LENEXTENTS(inode) + sb->s_blocksize - 1) &
-				~(sb->s_blocksize - 1);
+		    (UDF_I_LENEXTENTS(inode) + sb->s_blocksize - 1) &
+		    ~(sb->s_blocksize - 1);
 	}
 	/* Last extent are just preallocated blocks? */
-	if ((last_ext->extLength & UDF_EXTENT_FLAG_MASK) == EXT_NOT_RECORDED_ALLOCATED) {
+	if ((last_ext->extLength & UDF_EXTENT_FLAG_MASK) ==
+	    EXT_NOT_RECORDED_ALLOCATED) {
 		/* Save the extent so that we can reattach it to the end */
 		prealloc_loc = last_ext->extLocation;
 		prealloc_len = last_ext->extLength;
 		/* Mark the extent as a hole */
 		last_ext->extLength = EXT_NOT_RECORDED_NOT_ALLOCATED |
-			(last_ext->extLength & UDF_EXTENT_LENGTH_MASK);
+		    (last_ext->extLength & UDF_EXTENT_LENGTH_MASK);
 		last_ext->extLocation.logicalBlockNum = 0;
-       		last_ext->extLocation.partitionReferenceNum = 0;
+		last_ext->extLocation.partitionReferenceNum = 0;
 	}
 	/* Can we merge with the previous extent? */
-	if ((last_ext->extLength & UDF_EXTENT_FLAG_MASK) == EXT_NOT_RECORDED_NOT_ALLOCATED) {
-		add = ((1<<30) - sb->s_blocksize - (last_ext->extLength &
-			UDF_EXTENT_LENGTH_MASK)) >> sb->s_blocksize_bits;
+	if ((last_ext->extLength & UDF_EXTENT_FLAG_MASK) ==
+	    EXT_NOT_RECORDED_NOT_ALLOCATED) {
+		add =
+		    ((1 << 30) - sb->s_blocksize -
+		     (last_ext->extLength & UDF_EXTENT_LENGTH_MASK)) >> sb->
+		    s_blocksize_bits;
 		if (add > blocks)
 			add = blocks;
 		blocks -= add;
@@ -413,40 +417,42 @@
 
 	if (fake) {
 		udf_add_aext(inode, last_pos, last_ext->extLocation,
-			last_ext->extLength, 1);
+			     last_ext->extLength, 1);
 		count++;
-	}
-	else
-		udf_write_aext(inode, last_pos, last_ext->extLocation, last_ext->extLength, 1);
+	} else
+		udf_write_aext(inode, last_pos, last_ext->extLocation,
+			       last_ext->extLength, 1);
 	/* Managed to do everything necessary? */
 	if (!blocks)
 		goto out;
 
 	/* All further extents will be NOT_RECORDED_NOT_ALLOCATED */
 	last_ext->extLocation.logicalBlockNum = 0;
-       	last_ext->extLocation.partitionReferenceNum = 0;
-	add = (1 << (30-sb->s_blocksize_bits)) - 1;
-	last_ext->extLength = EXT_NOT_RECORDED_NOT_ALLOCATED | (add << sb->s_blocksize_bits);
+	last_ext->extLocation.partitionReferenceNum = 0;
+	add = (1 << (30 - sb->s_blocksize_bits)) - 1;
+	last_ext->extLength =
+	    EXT_NOT_RECORDED_NOT_ALLOCATED | (add << sb->s_blocksize_bits);
 	/* Create enough extents to cover the whole hole */
 	while (blocks > add) {
 		blocks -= add;
 		if (udf_add_aext(inode, last_pos, last_ext->extLocation,
-			last_ext->extLength, 1) == -1)
+				 last_ext->extLength, 1) == -1)
 			return -1;
 		count++;
 	}
 	if (blocks) {
 		last_ext->extLength = EXT_NOT_RECORDED_NOT_ALLOCATED |
-			(blocks << sb->s_blocksize_bits);
+		    (blocks << sb->s_blocksize_bits);
 		if (udf_add_aext(inode, last_pos, last_ext->extLocation,
-			last_ext->extLength, 1) == -1)
+				 last_ext->extLength, 1) == -1)
 			return -1;
 		count++;
 	}
-out:
+      out:
 	/* Do we have some preallocated blocks saved? */
 	if (prealloc_len) {
-		if (udf_add_aext(inode, last_pos, prealloc_loc, prealloc_len, 1) == -1)
+		if (udf_add_aext(inode, last_pos, prealloc_loc, prealloc_len, 1)
+		    == -1)
 			return -1;
 		last_ext->extLocation = prealloc_loc;
 		last_ext->extLength = prealloc_len;
@@ -462,8 +468,8 @@
 	return count;
 }
 
-static struct buffer_head * inode_getblk(struct inode * inode, sector_t block,
-	int *err, long *phys, int *new)
+static struct buffer_head *inode_getblk(struct inode *inode, sector_t block,
+					int *err, long *phys, int *new)
 {
 	static sector_t last_block;
 	struct buffer_head *result = NULL;
@@ -484,21 +490,18 @@
 	prev_epos.block = UDF_I_LOCATION(inode);
 	prev_epos.bh = NULL;
 	cur_epos = next_epos = prev_epos;
-	b_off = (loff_t)block << inode->i_sb->s_blocksize_bits;
+	b_off = (loff_t) block << inode->i_sb->s_blocksize_bits;
 
 	/* find the extent which contains the block we are looking for.
-       alternate between laarr[0] and laarr[1] for locations of the
-       current extent, and the previous extent */
-	do
-	{
-		if (prev_epos.bh != cur_epos.bh)
-		{
+	   alternate between laarr[0] and laarr[1] for locations of the
+	   current extent, and the previous extent */
+	do {
+		if (prev_epos.bh != cur_epos.bh) {
 			brelse(prev_epos.bh);
 			get_bh(cur_epos.bh);
 			prev_epos.bh = cur_epos.bh;
 		}
-		if (cur_epos.bh != next_epos.bh)
-		{
+		if (cur_epos.bh != next_epos.bh) {
 			brelse(cur_epos.bh);
 			get_bh(next_epos.bh);
 			cur_epos.bh = next_epos.bh;
@@ -512,7 +515,8 @@
 		prev_epos.offset = cur_epos.offset;
 		cur_epos.offset = next_epos.offset;
 
-		if ((etype = udf_next_aext(inode, &next_epos, &eloc, &elen, 1)) == -1)
+		if ((etype =
+		     udf_next_aext(inode, &next_epos, &eloc, &elen, 1)) == -1)
 			break;
 
 		c = !c;
@@ -522,10 +526,10 @@
 
 		if (etype != (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30))
 			pgoal = eloc.logicalBlockNum +
-				((elen + inode->i_sb->s_blocksize - 1) >>
-				inode->i_sb->s_blocksize_bits);
+			    ((elen + inode->i_sb->s_blocksize - 1) >>
+			     inode->i_sb->s_blocksize_bits);
 
-		count ++;
+		count++;
 	} while (lbcount + elen <= b_off);
 
 	b_off -= lbcount;
@@ -538,15 +542,13 @@
 	udf_next_aext(inode, &cur_epos, &tmpeloc, &tmpelen, 0);
 
 	/* if the extent is allocated and recorded, return the block
-       if the extent is not a multiple of the blocksize, round up */
+	   if the extent is not a multiple of the blocksize, round up */
 
-	if (etype == (EXT_RECORDED_ALLOCATED >> 30))
-	{
-		if (elen & (inode->i_sb->s_blocksize - 1))
-		{
+	if (etype == (EXT_RECORDED_ALLOCATED >> 30)) {
+		if (elen & (inode->i_sb->s_blocksize - 1)) {
 			elen = EXT_RECORDED_ALLOCATED |
-				((elen + inode->i_sb->s_blocksize - 1) &
-				~(inode->i_sb->s_blocksize - 1));
+			    ((elen + inode->i_sb->s_blocksize - 1) &
+			     ~(inode->i_sb->s_blocksize - 1));
 			etype = udf_write_aext(inode, &cur_epos, eloc, elen, 1);
 		}
 		brelse(prev_epos.bh);
@@ -559,18 +561,17 @@
 
 	last_block = block;
 	/* Are we beyond EOF? */
-	if (etype == -1)
-	{
+	if (etype == -1) {
 		int ret;
 
 		if (count) {
 			if (c)
 				laarr[0] = laarr[1];
 			startnum = 1;
-		}
-		else {
+		} else {
 			/* Create a fake extent when there's not one */
-			memset(&laarr[0].extLocation, 0x00, sizeof(kernel_lb_addr));
+			memset(&laarr[0].extLocation, 0x00,
+			       sizeof(kernel_lb_addr));
 			laarr[0].extLength = EXT_NOT_RECORDED_NOT_ALLOCATED;
 			/* Will udf_extend_file() create real extent from a fake one? */
 			startnum = (offset > 0);
@@ -590,26 +591,26 @@
 		offset = 0;
 		count += ret;
 		/* We are not covered by a preallocated extent? */
-		if ((laarr[0].extLength & UDF_EXTENT_FLAG_MASK) != EXT_NOT_RECORDED_ALLOCATED) {
+		if ((laarr[0].extLength & UDF_EXTENT_FLAG_MASK) !=
+		    EXT_NOT_RECORDED_ALLOCATED) {
 			/* Is there any real extent? - otherwise we overwrite
 			 * the fake one... */
 			if (count)
 				c = !c;
 			laarr[c].extLength = EXT_NOT_RECORDED_NOT_ALLOCATED |
-				inode->i_sb->s_blocksize;
-			memset(&laarr[c].extLocation, 0x00, sizeof(kernel_lb_addr));
-			count ++;
-			endnum ++;
+			    inode->i_sb->s_blocksize;
+			memset(&laarr[c].extLocation, 0x00,
+			       sizeof(kernel_lb_addr));
+			count++;
+			endnum++;
 		}
-		endnum = c+1;
+		endnum = c + 1;
 		lastblock = 1;
-	}
-	else {
+	} else {
 		endnum = startnum = ((count > 2) ? 2 : count);
 
 		/* if the current extent is in position 0, swap it with the previous */
-		if (!c && count != 1)
-		{
+		if (!c && count != 1) {
 			laarr[2] = laarr[0];
 			laarr[0] = laarr[1];
 			laarr[1] = laarr[2];
@@ -617,37 +618,37 @@
 		}
 
 		/* if the current block is located in an extent, read the next extent */
-		if ((etype = udf_next_aext(inode, &next_epos, &eloc, &elen, 0)) != -1)
-		{
-			laarr[c+1].extLength = (etype << 30) | elen;
-			laarr[c+1].extLocation = eloc;
-			count ++;
-			startnum ++;
-			endnum ++;
-		}
-		else {
+		if ((etype =
+		     udf_next_aext(inode, &next_epos, &eloc, &elen, 0)) != -1) {
+			laarr[c + 1].extLength = (etype << 30) | elen;
+			laarr[c + 1].extLocation = eloc;
+			count++;
+			startnum++;
+			endnum++;
+		} else {
 			lastblock = 1;
 		}
 	}
 
 	/* if the current extent is not recorded but allocated, get the
-		block in the extent corresponding to the requested block */
+	   block in the extent corresponding to the requested block */
 	if ((laarr[c].extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30))
 		newblocknum = laarr[c].extLocation.logicalBlockNum + offset;
-	else /* otherwise, allocate a new block */
-	{
+	else {			/* otherwise, allocate a new block */
+
 		if (UDF_I_NEXT_ALLOC_BLOCK(inode) == block)
 			goal = UDF_I_NEXT_ALLOC_GOAL(inode);
 
-		if (!goal)
-		{
+		if (!goal) {
 			if (!(goal = pgoal))
-				goal = UDF_I_LOCATION(inode).logicalBlockNum + 1;
+				goal =
+				    UDF_I_LOCATION(inode).logicalBlockNum + 1;
 		}
 
 		if (!(newblocknum = udf_new_block(inode->i_sb, inode,
-			UDF_I_LOCATION(inode).partitionReferenceNum, goal, err)))
-		{
+						  UDF_I_LOCATION(inode).
+						  partitionReferenceNum, goal,
+						  err))) {
 			brelse(prev_epos.bh);
 			*err = -ENOSPC;
 			return NULL;
@@ -656,8 +657,8 @@
 	}
 
 	/* if the extent the requsted block is located in contains multiple blocks,
-       split the extent into at most three extents. blocks prior to requested
-       block, requested block, and blocks after requested block */
+	   split the extent into at most three extents. blocks prior to requested
+	   block, requested block, and blocks after requested block */
 	udf_split_extents(inode, &c, offset, newblocknum, laarr, &endnum);
 
 #ifdef UDF_PREALLOCATE
@@ -669,15 +670,15 @@
 	udf_merge_extents(inode, laarr, &endnum);
 
 	/* write back the new extents, inserting new extents if the new number
-	of extents is greater than the old number, and deleting extents if
-	the new number of extents is less than the old number */
+	   of extents is greater than the old number, and deleting extents if
+	   the new number of extents is less than the old number */
 	udf_update_extents(inode, laarr, startnum, endnum, &prev_epos);
 
 	brelse(prev_epos.bh);
 
 	if (!(newblock = udf_get_pblock(inode->i_sb, newblocknum,
-		UDF_I_LOCATION(inode).partitionReferenceNum, 0)))
-	{
+					UDF_I_LOCATION(inode).
+					partitionReferenceNum, 0))) {
 		return NULL;
 	}
 	*phys = newblock;
@@ -694,283 +695,329 @@
 	return result;
 }
 
-static void udf_split_extents(struct inode *inode, int *c, int offset, int newblocknum,
-	kernel_long_ad laarr[EXTENT_MERGE_SIZE], int *endnum)
+static void udf_split_extents(struct inode *inode, int *c, int offset,
+			      int newblocknum,
+			      kernel_long_ad laarr[EXTENT_MERGE_SIZE],
+			      int *endnum)
 {
 	if ((laarr[*c].extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30) ||
-		(laarr[*c].extLength >> 30) == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30))
-	{
+	    (laarr[*c].extLength >> 30) ==
+	    (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30)) {
 		int curr = *c;
 		int blen = ((laarr[curr].extLength & UDF_EXTENT_LENGTH_MASK) +
-			inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits;
+			    inode->i_sb->s_blocksize -
+			    1) >> inode->i_sb->s_blocksize_bits;
 		int8_t etype = (laarr[curr].extLength >> 30);
 
-		if (blen == 1)
-			;
-		else if (!offset || blen == offset + 1)
-		{
-			laarr[curr+2] = laarr[curr+1];
-			laarr[curr+1] = laarr[curr];
-		}
-		else
-		{
-			laarr[curr+3] = laarr[curr+1];
-			laarr[curr+2] = laarr[curr+1] = laarr[curr];
+		if (blen == 1) ;
+		else if (!offset || blen == offset + 1) {
+			laarr[curr + 2] = laarr[curr + 1];
+			laarr[curr + 1] = laarr[curr];
+		} else {
+			laarr[curr + 3] = laarr[curr + 1];
+			laarr[curr + 2] = laarr[curr + 1] = laarr[curr];
 		}
 
-		if (offset)
-		{
-			if (etype == (EXT_NOT_RECORDED_ALLOCATED >> 30))
-			{
-				udf_free_blocks(inode->i_sb, inode, laarr[curr].extLocation, 0, offset);
-				laarr[curr].extLength = EXT_NOT_RECORDED_NOT_ALLOCATED |
-					(offset << inode->i_sb->s_blocksize_bits);
+		if (offset) {
+			if (etype == (EXT_NOT_RECORDED_ALLOCATED >> 30)) {
+				udf_free_blocks(inode->i_sb, inode,
+						laarr[curr].extLocation, 0,
+						offset);
+				laarr[curr].extLength =
+				    EXT_NOT_RECORDED_NOT_ALLOCATED | (offset <<
+								      inode->
+								      i_sb->
+								      s_blocksize_bits);
 				laarr[curr].extLocation.logicalBlockNum = 0;
-				laarr[curr].extLocation.partitionReferenceNum = 0;
-			}
-			else
+				laarr[curr].extLocation.partitionReferenceNum =
+				    0;
+			} else
 				laarr[curr].extLength = (etype << 30) |
-					(offset << inode->i_sb->s_blocksize_bits);
-			curr ++;
-			(*c) ++;
-			(*endnum) ++;
+				    (offset << inode->i_sb->s_blocksize_bits);
+			curr++;
+			(*c)++;
+			(*endnum)++;
 		}
 
 		laarr[curr].extLocation.logicalBlockNum = newblocknum;
 		if (etype == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30))
 			laarr[curr].extLocation.partitionReferenceNum =
-				UDF_I_LOCATION(inode).partitionReferenceNum;
+			    UDF_I_LOCATION(inode).partitionReferenceNum;
 		laarr[curr].extLength = EXT_RECORDED_ALLOCATED |
-			inode->i_sb->s_blocksize;
-		curr ++;
+		    inode->i_sb->s_blocksize;
+		curr++;
 
-		if (blen != offset + 1)
-		{
+		if (blen != offset + 1) {
 			if (etype == (EXT_NOT_RECORDED_ALLOCATED >> 30))
-				laarr[curr].extLocation.logicalBlockNum += (offset + 1);
-			laarr[curr].extLength = (etype << 30) |
-				((blen - (offset + 1)) << inode->i_sb->s_blocksize_bits);
-			curr ++;
-			(*endnum) ++;
+				laarr[curr].extLocation.logicalBlockNum +=
+				    (offset + 1);
+			laarr[curr].extLength =
+			    (etype << 30) | ((blen - (offset + 1)) << inode->
+					     i_sb->s_blocksize_bits);
+			curr++;
+			(*endnum)++;
 		}
 	}
 }
 
 static void udf_prealloc_extents(struct inode *inode, int c, int lastblock,
-	 kernel_long_ad laarr[EXTENT_MERGE_SIZE], int *endnum)
+				 kernel_long_ad laarr[EXTENT_MERGE_SIZE],
+				 int *endnum)
 {
 	int start, length = 0, currlength = 0, i;
 
-	if (*endnum >= (c+1))
-	{
+	if (*endnum >= (c + 1)) {
 		if (!lastblock)
 			return;
 		else
 			start = c;
-	}
-	else
-	{
-		if ((laarr[c+1].extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30))
-		{
-			start = c+1;
-			length = currlength = (((laarr[c+1].extLength & UDF_EXTENT_LENGTH_MASK) +
-				inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits);
-		}
-		else
+	} else {
+		if ((laarr[c + 1].extLength >> 30) ==
+		    (EXT_NOT_RECORDED_ALLOCATED >> 30)) {
+			start = c + 1;
+			length = currlength =
+			    (((laarr[c + 1].
+			       extLength & UDF_EXTENT_LENGTH_MASK) +
+			      inode->i_sb->s_blocksize -
+			      1) >> inode->i_sb->s_blocksize_bits);
+		} else
 			start = c;
 	}
 
-	for (i=start+1; i<=*endnum; i++)
-	{
-		if (i == *endnum)
-		{
+	for (i = start + 1; i <= *endnum; i++) {
+		if (i == *endnum) {
 			if (lastblock)
 				length += UDF_DEFAULT_PREALLOC_BLOCKS;
-		}
-		else if ((laarr[i].extLength >> 30) == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30))
-			length += (((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
-				inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits);
+		} else if ((laarr[i].extLength >> 30) ==
+			   (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30))
+			length +=
+			    (((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
+			      inode->i_sb->s_blocksize -
+			      1) >> inode->i_sb->s_blocksize_bits);
 		else
 			break;
 	}
 
-	if (length)
-	{
+	if (length) {
 		int next = laarr[start].extLocation.logicalBlockNum +
-			(((laarr[start].extLength & UDF_EXTENT_LENGTH_MASK) +
-			inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits);
+		    (((laarr[start].extLength & UDF_EXTENT_LENGTH_MASK) +
+		      inode->i_sb->s_blocksize -
+		      1) >> inode->i_sb->s_blocksize_bits);
 		int numalloc = udf_prealloc_blocks(inode->i_sb, inode,
-			laarr[start].extLocation.partitionReferenceNum,
-			next, (UDF_DEFAULT_PREALLOC_BLOCKS > length ? length :
-				UDF_DEFAULT_PREALLOC_BLOCKS) - currlength);
+						   laarr[start].extLocation.
+						   partitionReferenceNum,
+						   next,
+						   (UDF_DEFAULT_PREALLOC_BLOCKS
+						    >
+						    length ? length :
+						    UDF_DEFAULT_PREALLOC_BLOCKS)
+						   - currlength);
 
-		if (numalloc)
-		{
-			if (start == (c+1))
+		if (numalloc) {
+			if (start == (c + 1))
 				laarr[start].extLength +=
-					(numalloc << inode->i_sb->s_blocksize_bits);
-			else
-			{
-				memmove(&laarr[c+2], &laarr[c+1],
-					sizeof(long_ad) * (*endnum - (c+1)));
-				(*endnum) ++;
-				laarr[c+1].extLocation.logicalBlockNum = next;
-				laarr[c+1].extLocation.partitionReferenceNum =
-					laarr[c].extLocation.partitionReferenceNum;
-				laarr[c+1].extLength = EXT_NOT_RECORDED_ALLOCATED |
-					(numalloc << inode->i_sb->s_blocksize_bits);
-				start = c+1;
+				    (numalloc << inode->i_sb->s_blocksize_bits);
+			else {
+				memmove(&laarr[c + 2], &laarr[c + 1],
+					sizeof(long_ad) * (*endnum - (c + 1)));
+				(*endnum)++;
+				laarr[c + 1].extLocation.logicalBlockNum = next;
+				laarr[c + 1].extLocation.partitionReferenceNum =
+				    laarr[c].extLocation.partitionReferenceNum;
+				laarr[c + 1].extLength =
+				    EXT_NOT_RECORDED_ALLOCATED | (numalloc <<
+								  inode->i_sb->
+								  s_blocksize_bits);
+				start = c + 1;
 			}
 
-			for (i=start+1; numalloc && i<*endnum; i++)
-			{
-				int elen = ((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
-					inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits;
+			for (i = start + 1; numalloc && i < *endnum; i++) {
+				int elen =
+				    ((laarr[i].
+				      extLength & UDF_EXTENT_LENGTH_MASK) +
+				     inode->i_sb->s_blocksize -
+				     1) >> inode->i_sb->s_blocksize_bits;
 
-				if (elen > numalloc)
-				{
+				if (elen > numalloc) {
 					laarr[i].extLength -=
-						(numalloc << inode->i_sb->s_blocksize_bits);
+					    (numalloc << inode->i_sb->
+					     s_blocksize_bits);
 					numalloc = 0;
-				}
-				else
-				{
+				} else {
 					numalloc -= elen;
-					if (*endnum > (i+1))
-						memmove(&laarr[i], &laarr[i+1],
-							sizeof(long_ad) * (*endnum - (i+1)));
-					i --;
-					(*endnum) --;
+					if (*endnum > (i + 1))
+						memmove(&laarr[i],
+							&laarr[i + 1],
+							sizeof(long_ad) *
+							(*endnum - (i + 1)));
+					i--;
+					(*endnum)--;
 				}
 			}
-			UDF_I_LENEXTENTS(inode) += numalloc << inode->i_sb->s_blocksize_bits;
+			UDF_I_LENEXTENTS(inode) +=
+			    numalloc << inode->i_sb->s_blocksize_bits;
 		}
 	}
 }
 
 static void udf_merge_extents(struct inode *inode,
-	 kernel_long_ad laarr[EXTENT_MERGE_SIZE], int *endnum)
+			      kernel_long_ad laarr[EXTENT_MERGE_SIZE],
+			      int *endnum)
 {
 	int i;
 
-	for (i=0; i<(*endnum-1); i++)
-	{
-		if ((laarr[i].extLength >> 30) == (laarr[i+1].extLength >> 30))
-		{
-			if (((laarr[i].extLength >> 30) == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30)) ||
-				((laarr[i+1].extLocation.logicalBlockNum - laarr[i].extLocation.logicalBlockNum) ==
-				(((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
-				inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits)))
-			{
-				if (((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
-					(laarr[i+1].extLength & UDF_EXTENT_LENGTH_MASK) +
-					inode->i_sb->s_blocksize - 1) & ~UDF_EXTENT_LENGTH_MASK)
-				{
-					laarr[i+1].extLength = (laarr[i+1].extLength -
-						(laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
-						UDF_EXTENT_LENGTH_MASK) & ~(inode->i_sb->s_blocksize-1);
-					laarr[i].extLength = (laarr[i].extLength & UDF_EXTENT_FLAG_MASK) +
-						(UDF_EXTENT_LENGTH_MASK + 1) - inode->i_sb->s_blocksize;
-					laarr[i+1].extLocation.logicalBlockNum =
-						laarr[i].extLocation.logicalBlockNum +
-						((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) >>
-							inode->i_sb->s_blocksize_bits);
-				}
-				else
-				{
-					laarr[i].extLength = laarr[i+1].extLength +
-						(((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
-						inode->i_sb->s_blocksize - 1) & ~(inode->i_sb->s_blocksize-1));
-					if (*endnum > (i+2))
-						memmove(&laarr[i+1], &laarr[i+2],
-							sizeof(long_ad) * (*endnum - (i+2)));
-					i --;
-					(*endnum) --;
+	for (i = 0; i < (*endnum - 1); i++) {
+		if ((laarr[i].extLength >> 30) ==
+		    (laarr[i + 1].extLength >> 30)) {
+			if (((laarr[i].extLength >> 30) ==
+			     (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30))
+			    ||
+			    ((laarr[i + 1].extLocation.logicalBlockNum -
+			      laarr[i].extLocation.logicalBlockNum) ==
+			     (((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
+			       inode->i_sb->s_blocksize -
+			       1) >> inode->i_sb->s_blocksize_bits))) {
+				if (((laarr[i].
+				      extLength & UDF_EXTENT_LENGTH_MASK) +
+				     (laarr[i + 1].
+				      extLength & UDF_EXTENT_LENGTH_MASK) +
+				     inode->i_sb->s_blocksize -
+				     1) & ~UDF_EXTENT_LENGTH_MASK) {
+					laarr[i + 1].extLength =
+					    (laarr[i + 1].extLength -
+					     (laarr[i].
+					      extLength &
+					      UDF_EXTENT_LENGTH_MASK) +
+					     UDF_EXTENT_LENGTH_MASK) & ~(inode->
+									 i_sb->
+									 s_blocksize
+									 - 1);
+					laarr[i].extLength =
+					    (laarr[i].
+					     extLength & UDF_EXTENT_FLAG_MASK) +
+					    (UDF_EXTENT_LENGTH_MASK + 1) -
+					    inode->i_sb->s_blocksize;
+					laarr[i +
+					      1].extLocation.logicalBlockNum =
+					    laarr[i].extLocation.
+					    logicalBlockNum +
+					    ((laarr[i].
+					      extLength &
+					      UDF_EXTENT_LENGTH_MASK) >> inode->
+					     i_sb->s_blocksize_bits);
+				} else {
+					laarr[i].extLength =
+					    laarr[i + 1].extLength +
+					    (((laarr[i].
+					       extLength &
+					       UDF_EXTENT_LENGTH_MASK) +
+					      inode->i_sb->s_blocksize -
+					      1) & ~(inode->i_sb->s_blocksize -
+						     1));
+					if (*endnum > (i + 2))
+						memmove(&laarr[i + 1],
+							&laarr[i + 2],
+							sizeof(long_ad) *
+							(*endnum - (i + 2)));
+					i--;
+					(*endnum)--;
 				}
 			}
-		}
-		else if (((laarr[i].extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30)) &&
-			((laarr[i+1].extLength >> 30) == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30)))
-		{
-			udf_free_blocks(inode->i_sb, inode, laarr[i].extLocation, 0,
-				((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
-				inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits);
+		} else
+		    if (((laarr[i].extLength >> 30) ==
+			 (EXT_NOT_RECORDED_ALLOCATED >> 30))
+			&& ((laarr[i + 1].extLength >> 30) ==
+			    (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30))) {
+			udf_free_blocks(inode->i_sb, inode,
+					laarr[i].extLocation, 0,
+					((laarr[i].
+					  extLength & UDF_EXTENT_LENGTH_MASK) +
+					 inode->i_sb->s_blocksize -
+					 1) >> inode->i_sb->s_blocksize_bits);
 			laarr[i].extLocation.logicalBlockNum = 0;
 			laarr[i].extLocation.partitionReferenceNum = 0;
 
 			if (((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
-				(laarr[i+1].extLength & UDF_EXTENT_LENGTH_MASK) +
-				inode->i_sb->s_blocksize - 1) & ~UDF_EXTENT_LENGTH_MASK)
-			{
-				laarr[i+1].extLength = (laarr[i+1].extLength -
-					(laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
-					UDF_EXTENT_LENGTH_MASK) & ~(inode->i_sb->s_blocksize-1);
-				laarr[i].extLength = (laarr[i].extLength & UDF_EXTENT_FLAG_MASK) +
-					(UDF_EXTENT_LENGTH_MASK + 1) - inode->i_sb->s_blocksize;
+			     (laarr[i + 1].extLength & UDF_EXTENT_LENGTH_MASK) +
+			     inode->i_sb->s_blocksize -
+			     1) & ~UDF_EXTENT_LENGTH_MASK) {
+				laarr[i + 1].extLength =
+				    (laarr[i + 1].extLength -
+				     (laarr[i].
+				      extLength & UDF_EXTENT_LENGTH_MASK) +
+				     UDF_EXTENT_LENGTH_MASK) & ~(inode->i_sb->
+								 s_blocksize -
+								 1);
+				laarr[i].extLength =
+				    (laarr[i].
+				     extLength & UDF_EXTENT_FLAG_MASK) +
+				    (UDF_EXTENT_LENGTH_MASK + 1) -
+				    inode->i_sb->s_blocksize;
+			} else {
+				laarr[i].extLength = laarr[i + 1].extLength +
+				    (((laarr[i].
+				       extLength & UDF_EXTENT_LENGTH_MASK) +
+				      inode->i_sb->s_blocksize -
+				      1) & ~(inode->i_sb->s_blocksize - 1));
+				if (*endnum > (i + 2))
+					memmove(&laarr[i + 1], &laarr[i + 2],
+						sizeof(long_ad) * (*endnum -
+								   (i + 2)));
+				i--;
+				(*endnum)--;
 			}
-			else
-			{
-				laarr[i].extLength = laarr[i+1].extLength +
-					(((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
-					inode->i_sb->s_blocksize - 1) & ~(inode->i_sb->s_blocksize-1));
-				if (*endnum > (i+2))
-					memmove(&laarr[i+1], &laarr[i+2],
-						sizeof(long_ad) * (*endnum - (i+2)));
-				i --;
-				(*endnum) --;
-			}
-		}
-		else if ((laarr[i].extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30))
-		{
-			udf_free_blocks(inode->i_sb, inode, laarr[i].extLocation, 0,
-				((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
-			       inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits);
+		} else if ((laarr[i].extLength >> 30) ==
+			   (EXT_NOT_RECORDED_ALLOCATED >> 30)) {
+			udf_free_blocks(inode->i_sb, inode,
+					laarr[i].extLocation, 0,
+					((laarr[i].
+					  extLength & UDF_EXTENT_LENGTH_MASK) +
+					 inode->i_sb->s_blocksize -
+					 1) >> inode->i_sb->s_blocksize_bits);
 			laarr[i].extLocation.logicalBlockNum = 0;
 			laarr[i].extLocation.partitionReferenceNum = 0;
-			laarr[i].extLength = (laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) |
-				EXT_NOT_RECORDED_NOT_ALLOCATED;
+			laarr[i].extLength =
+			    (laarr[i].
+			     extLength & UDF_EXTENT_LENGTH_MASK) |
+			    EXT_NOT_RECORDED_NOT_ALLOCATED;
 		}
 	}
 }
 
 static void udf_update_extents(struct inode *inode,
-	kernel_long_ad laarr[EXTENT_MERGE_SIZE], int startnum, int endnum,
-	struct extent_position *epos)
+			       kernel_long_ad laarr[EXTENT_MERGE_SIZE],
+			       int startnum, int endnum,
+			       struct extent_position *epos)
 {
 	int start = 0, i;
 	kernel_lb_addr tmploc;
 	uint32_t tmplen;
 
-	if (startnum > endnum)
-	{
-		for (i=0; i<(startnum-endnum); i++)
+	if (startnum > endnum) {
+		for (i = 0; i < (startnum - endnum); i++)
 			udf_delete_aext(inode, *epos, laarr[i].extLocation,
-				laarr[i].extLength);
-	}
-	else if (startnum < endnum)
-	{
-		for (i=0; i<(endnum-startnum); i++)
-		{
+					laarr[i].extLength);
+	} else if (startnum < endnum) {
+		for (i = 0; i < (endnum - startnum); i++) {
 			udf_insert_aext(inode, *epos, laarr[i].extLocation,
-				laarr[i].extLength);
+					laarr[i].extLength);
 			udf_next_aext(inode, epos, &laarr[i].extLocation,
-				&laarr[i].extLength, 1);
-			start ++;
+				      &laarr[i].extLength, 1);
+			start++;
 		}
 	}
 
-	for (i=start; i<endnum; i++)
-	{
+	for (i = start; i < endnum; i++) {
 		udf_next_aext(inode, epos, &tmploc, &tmplen, 0);
 		udf_write_aext(inode, epos, laarr[i].extLocation,
-			laarr[i].extLength, 1);
+			       laarr[i].extLength, 1);
 	}
 }
 
-struct buffer_head * udf_bread(struct inode * inode, int block,
-	int create, int * err)
+struct buffer_head *udf_bread(struct inode *inode, int block,
+			      int create, int *err)
 {
-	struct buffer_head * bh = NULL;
+	struct buffer_head *bh = NULL;
 
 	bh = udf_getblk(inode, block, create, err);
 	if (!bh)
@@ -987,56 +1034,51 @@
 	return NULL;
 }
 
-void udf_truncate(struct inode * inode)
+void udf_truncate(struct inode *inode)
 {
 	int offset;
 	int err;
 
 	if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
-			S_ISLNK(inode->i_mode)))
+	      S_ISLNK(inode->i_mode)))
 		return;
 	if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
 		return;
 
 	lock_kernel();
-	if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_IN_ICB)
-	{
-		if (inode->i_sb->s_blocksize < (udf_file_entry_alloc_offset(inode) +
-			inode->i_size))
-		{
+	if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_IN_ICB) {
+		if (inode->i_sb->s_blocksize <
+		    (udf_file_entry_alloc_offset(inode) + inode->i_size)) {
 			udf_expand_file_adinicb(inode, inode->i_size, &err);
-			if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_IN_ICB)
-			{
+			if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_IN_ICB) {
 				inode->i_size = UDF_I_LENALLOC(inode);
 				unlock_kernel();
 				return;
-			}
-			else
+			} else
 				udf_truncate_extents(inode);
-		}
-		else
-		{
+		} else {
 			offset = inode->i_size & (inode->i_sb->s_blocksize - 1);
-			memset(UDF_I_DATA(inode) + UDF_I_LENEATTR(inode) + offset, 0x00, inode->i_sb->s_blocksize - offset - udf_file_entry_alloc_offset(inode));
+			memset(UDF_I_DATA(inode) + UDF_I_LENEATTR(inode) +
+			       offset, 0x00,
+			       inode->i_sb->s_blocksize - offset -
+			       udf_file_entry_alloc_offset(inode));
 			UDF_I_LENALLOC(inode) = inode->i_size;
 		}
-	}
-	else
-	{
-		block_truncate_page(inode->i_mapping, inode->i_size, udf_get_block);
+	} else {
+		block_truncate_page(inode->i_mapping, inode->i_size,
+				    udf_get_block);
 		udf_truncate_extents(inode);
 	}
 
 	inode->i_mtime = inode->i_ctime = current_fs_time(inode->i_sb);
 	if (IS_SYNC(inode))
-		udf_sync_inode (inode);
+		udf_sync_inode(inode);
 	else
 		mark_inode_dirty(inode);
 	unlock_kernel();
 }
 
-static void
-__udf_read_inode(struct inode *inode)
+static void __udf_read_inode(struct inode *inode)
 {
 	struct buffer_head *bh = NULL;
 	struct fileEntry *fe;
@@ -1056,19 +1098,18 @@
 	 */
 	bh = udf_read_ptagged(inode->i_sb, UDF_I_LOCATION(inode), 0, &ident);
 
-	if (!bh)
-	{
+	if (!bh) {
 		printk(KERN_ERR "udf: udf_read_inode(ino %ld) failed !bh\n",
-			inode->i_ino);
+		       inode->i_ino);
 		make_bad_inode(inode);
 		return;
 	}
 
 	if (ident != TAG_IDENT_FE && ident != TAG_IDENT_EFE &&
-		ident != TAG_IDENT_USE)
-	{
-		printk(KERN_ERR "udf: udf_read_inode(ino %ld) failed ident=%d\n",
-			inode->i_ino, ident);
+	    ident != TAG_IDENT_USE) {
+		printk(KERN_ERR
+		       "udf: udf_read_inode(ino %ld) failed ident=%d\n",
+		       inode->i_ino, ident);
 		brelse(bh);
 		make_bad_inode(inode);
 		return;
@@ -1076,51 +1117,46 @@
 
 	fe = (struct fileEntry *)bh->b_data;
 
-	if (le16_to_cpu(fe->icbTag.strategyType) == 4096)
-	{
+	if (le16_to_cpu(fe->icbTag.strategyType) == 4096) {
 		struct buffer_head *ibh = NULL, *nbh = NULL;
 		struct indirectEntry *ie;
 
-		ibh = udf_read_ptagged(inode->i_sb, UDF_I_LOCATION(inode), 1, &ident);
-		if (ident == TAG_IDENT_IE)
-		{
-			if (ibh)
-			{
+		ibh =
+		    udf_read_ptagged(inode->i_sb, UDF_I_LOCATION(inode), 1,
+				     &ident);
+		if (ident == TAG_IDENT_IE) {
+			if (ibh) {
 				kernel_lb_addr loc;
 				ie = (struct indirectEntry *)ibh->b_data;
 
 				loc = lelb_to_cpu(ie->indirectICB.extLocation);
 
 				if (ie->indirectICB.extLength &&
-					(nbh = udf_read_ptagged(inode->i_sb, loc, 0, &ident)))
-				{
-					if (ident == TAG_IDENT_FE ||
-						ident == TAG_IDENT_EFE)
-					{
-						memcpy(&UDF_I_LOCATION(inode), &loc, sizeof(kernel_lb_addr));
+				    (nbh =
+				     udf_read_ptagged(inode->i_sb, loc, 0,
+						      &ident))) {
+					if (ident == TAG_IDENT_FE
+					    || ident == TAG_IDENT_EFE) {
+						memcpy(&UDF_I_LOCATION(inode),
+						       &loc,
+						       sizeof(kernel_lb_addr));
 						brelse(bh);
 						brelse(ibh);
 						brelse(nbh);
 						__udf_read_inode(inode);
 						return;
-					}
-					else
-					{
+					} else {
 						brelse(nbh);
 						brelse(ibh);
 					}
-				}
-				else
+				} else
 					brelse(ibh);
 			}
-		}
-		else
+		} else
 			brelse(ibh);
-	}
-	else if (le16_to_cpu(fe->icbTag.strategyType) != 4)
-	{
+	} else if (le16_to_cpu(fe->icbTag.strategyType) != 4) {
 		printk(KERN_ERR "udf: unsupported strategy type: %d\n",
-			le16_to_cpu(fe->icbTag.strategyType));
+		       le16_to_cpu(fe->icbTag.strategyType));
 		brelse(bh);
 		make_bad_inode(inode);
 		return;
@@ -1143,62 +1179,70 @@
 
 	if (le16_to_cpu(fe->icbTag.strategyType) == 4)
 		UDF_I_STRAT4096(inode) = 0;
-	else /* if (le16_to_cpu(fe->icbTag.strategyType) == 4096) */
+	else			/* if (le16_to_cpu(fe->icbTag.strategyType) == 4096) */
 		UDF_I_STRAT4096(inode) = 1;
 
-	UDF_I_ALLOCTYPE(inode) = le16_to_cpu(fe->icbTag.flags) & ICBTAG_FLAG_AD_MASK;
+	UDF_I_ALLOCTYPE(inode) =
+	    le16_to_cpu(fe->icbTag.flags) & ICBTAG_FLAG_AD_MASK;
 	UDF_I_UNIQUE(inode) = 0;
 	UDF_I_LENEATTR(inode) = 0;
 	UDF_I_LENEXTENTS(inode) = 0;
 	UDF_I_LENALLOC(inode) = 0;
 	UDF_I_NEXT_ALLOC_BLOCK(inode) = 0;
 	UDF_I_NEXT_ALLOC_GOAL(inode) = 0;
-	if (le16_to_cpu(fe->descTag.tagIdent) == TAG_IDENT_EFE)
-	{
+	if (le16_to_cpu(fe->descTag.tagIdent) == TAG_IDENT_EFE) {
 		UDF_I_EFE(inode) = 1;
 		UDF_I_USE(inode) = 0;
-		if (udf_alloc_i_data(inode, inode->i_sb->s_blocksize - sizeof(struct extendedFileEntry)))
-		{
+		if (udf_alloc_i_data
+		    (inode,
+		     inode->i_sb->s_blocksize -
+		     sizeof(struct extendedFileEntry))) {
 			make_bad_inode(inode);
 			return;
 		}
-		memcpy(UDF_I_DATA(inode), bh->b_data + sizeof(struct extendedFileEntry), inode->i_sb->s_blocksize - sizeof(struct extendedFileEntry));
-	}
-	else if (le16_to_cpu(fe->descTag.tagIdent) == TAG_IDENT_FE)
-	{
+		memcpy(UDF_I_DATA(inode),
+		       bh->b_data + sizeof(struct extendedFileEntry),
+		       inode->i_sb->s_blocksize -
+		       sizeof(struct extendedFileEntry));
+	} else if (le16_to_cpu(fe->descTag.tagIdent) == TAG_IDENT_FE) {
 		UDF_I_EFE(inode) = 0;
 		UDF_I_USE(inode) = 0;
-		if (udf_alloc_i_data(inode, inode->i_sb->s_blocksize - sizeof(struct fileEntry)))
-		{
+		if (udf_alloc_i_data
+		    (inode,
+		     inode->i_sb->s_blocksize - sizeof(struct fileEntry))) {
 			make_bad_inode(inode);
 			return;
 		}
-		memcpy(UDF_I_DATA(inode), bh->b_data + sizeof(struct fileEntry), inode->i_sb->s_blocksize - sizeof(struct fileEntry));
-	}
-	else if (le16_to_cpu(fe->descTag.tagIdent) == TAG_IDENT_USE)
-	{
+		memcpy(UDF_I_DATA(inode), bh->b_data + sizeof(struct fileEntry),
+		       inode->i_sb->s_blocksize - sizeof(struct fileEntry));
+	} else if (le16_to_cpu(fe->descTag.tagIdent) == TAG_IDENT_USE) {
 		UDF_I_EFE(inode) = 0;
 		UDF_I_USE(inode) = 1;
 		UDF_I_LENALLOC(inode) =
-			le32_to_cpu(
-				((struct unallocSpaceEntry *)bh->b_data)->lengthAllocDescs);
-		if (udf_alloc_i_data(inode, inode->i_sb->s_blocksize - sizeof(struct unallocSpaceEntry)))
-		{
+		    le32_to_cpu(((struct unallocSpaceEntry *)bh->b_data)->
+				lengthAllocDescs);
+		if (udf_alloc_i_data
+		    (inode,
+		     inode->i_sb->s_blocksize -
+		     sizeof(struct unallocSpaceEntry))) {
 			make_bad_inode(inode);
 			return;
 		}
-		memcpy(UDF_I_DATA(inode), bh->b_data + sizeof(struct unallocSpaceEntry), inode->i_sb->s_blocksize - sizeof(struct unallocSpaceEntry));
+		memcpy(UDF_I_DATA(inode),
+		       bh->b_data + sizeof(struct unallocSpaceEntry),
+		       inode->i_sb->s_blocksize -
+		       sizeof(struct unallocSpaceEntry));
 		return;
 	}
 
 	inode->i_uid = le32_to_cpu(fe->uid);
 	if (inode->i_uid == -1 || UDF_QUERY_FLAG(inode->i_sb,
-					UDF_FLAG_UID_IGNORE))
+						 UDF_FLAG_UID_IGNORE))
 		inode->i_uid = UDF_SB(inode->i_sb)->s_uid;
 
 	inode->i_gid = le32_to_cpu(fe->gid);
 	if (inode->i_gid == -1 || UDF_QUERY_FLAG(inode->i_sb,
-					UDF_FLAG_GID_IGNORE))
+						 UDF_FLAG_GID_IGNORE))
 		inode->i_gid = UDF_SB(inode->i_sb)->s_gid;
 
 	inode->i_nlink = le16_to_cpu(fe->fileLinkCount);
@@ -1211,41 +1255,31 @@
 	inode->i_mode = udf_convert_permissions(fe);
 	inode->i_mode &= ~UDF_SB(inode->i_sb)->s_umask;
 
-	if (UDF_I_EFE(inode) == 0)
-	{
+	if (UDF_I_EFE(inode) == 0) {
 		inode->i_blocks = le64_to_cpu(fe->logicalBlocksRecorded) <<
-			(inode->i_sb->s_blocksize_bits - 9);
+		    (inode->i_sb->s_blocksize_bits - 9);
 
-		if ( udf_stamp_to_time(&convtime, &convtime_usec,
-			lets_to_cpu(fe->accessTime)) )
-		{
+		if (udf_stamp_to_time(&convtime, &convtime_usec,
+				      lets_to_cpu(fe->accessTime))) {
 			inode->i_atime.tv_sec = convtime;
 			inode->i_atime.tv_nsec = convtime_usec * 1000;
-		}
-		else
-		{
+		} else {
 			inode->i_atime = UDF_SB_RECORDTIME(inode->i_sb);
 		}
 
-		if ( udf_stamp_to_time(&convtime, &convtime_usec,
-			lets_to_cpu(fe->modificationTime)) )
-		{
+		if (udf_stamp_to_time(&convtime, &convtime_usec,
+				      lets_to_cpu(fe->modificationTime))) {
 			inode->i_mtime.tv_sec = convtime;
 			inode->i_mtime.tv_nsec = convtime_usec * 1000;
-		}
-		else
-		{
+		} else {
 			inode->i_mtime = UDF_SB_RECORDTIME(inode->i_sb);
 		}
 
-		if ( udf_stamp_to_time(&convtime, &convtime_usec,
-			lets_to_cpu(fe->attrTime)) )
-		{
+		if (udf_stamp_to_time(&convtime, &convtime_usec,
+				      lets_to_cpu(fe->attrTime))) {
 			inode->i_ctime.tv_sec = convtime;
 			inode->i_ctime.tv_nsec = convtime_usec * 1000;
-		}
-		else
-		{
+		} else {
 			inode->i_ctime = UDF_SB_RECORDTIME(inode->i_sb);
 		}
 
@@ -1253,65 +1287,51 @@
 		UDF_I_LENEATTR(inode) = le32_to_cpu(fe->lengthExtendedAttr);
 		UDF_I_LENALLOC(inode) = le32_to_cpu(fe->lengthAllocDescs);
 		offset = sizeof(struct fileEntry) + UDF_I_LENEATTR(inode);
-	}
-	else
-	{
+	} else {
 		inode->i_blocks = le64_to_cpu(efe->logicalBlocksRecorded) <<
-			(inode->i_sb->s_blocksize_bits - 9);
+		    (inode->i_sb->s_blocksize_bits - 9);
 
-		if ( udf_stamp_to_time(&convtime, &convtime_usec,
-			lets_to_cpu(efe->accessTime)) )
-		{
+		if (udf_stamp_to_time(&convtime, &convtime_usec,
+				      lets_to_cpu(efe->accessTime))) {
 			inode->i_atime.tv_sec = convtime;
 			inode->i_atime.tv_nsec = convtime_usec * 1000;
-		}
-		else
-		{
+		} else {
 			inode->i_atime = UDF_SB_RECORDTIME(inode->i_sb);
 		}
 
-		if ( udf_stamp_to_time(&convtime, &convtime_usec,
-			lets_to_cpu(efe->modificationTime)) )
-		{
+		if (udf_stamp_to_time(&convtime, &convtime_usec,
+				      lets_to_cpu(efe->modificationTime))) {
 			inode->i_mtime.tv_sec = convtime;
 			inode->i_mtime.tv_nsec = convtime_usec * 1000;
-		}
-		else
-		{
+		} else {
 			inode->i_mtime = UDF_SB_RECORDTIME(inode->i_sb);
 		}
 
-		if ( udf_stamp_to_time(&convtime, &convtime_usec,
-			lets_to_cpu(efe->createTime)) )
-		{
+		if (udf_stamp_to_time(&convtime, &convtime_usec,
+				      lets_to_cpu(efe->createTime))) {
 			UDF_I_CRTIME(inode).tv_sec = convtime;
 			UDF_I_CRTIME(inode).tv_nsec = convtime_usec * 1000;
-		}
-		else
-		{
+		} else {
 			UDF_I_CRTIME(inode) = UDF_SB_RECORDTIME(inode->i_sb);
 		}
 
-		if ( udf_stamp_to_time(&convtime, &convtime_usec,
-			lets_to_cpu(efe->attrTime)) )
-		{
+		if (udf_stamp_to_time(&convtime, &convtime_usec,
+				      lets_to_cpu(efe->attrTime))) {
 			inode->i_ctime.tv_sec = convtime;
 			inode->i_ctime.tv_nsec = convtime_usec * 1000;
-		}
-		else
-		{
+		} else {
 			inode->i_ctime = UDF_SB_RECORDTIME(inode->i_sb);
 		}
 
 		UDF_I_UNIQUE(inode) = le64_to_cpu(efe->uniqueID);
 		UDF_I_LENEATTR(inode) = le32_to_cpu(efe->lengthExtendedAttr);
 		UDF_I_LENALLOC(inode) = le32_to_cpu(efe->lengthAllocDescs);
-		offset = sizeof(struct extendedFileEntry) + UDF_I_LENEATTR(inode);
+		offset =
+		    sizeof(struct extendedFileEntry) + UDF_I_LENEATTR(inode);
 	}
 
-	switch (fe->icbTag.fileType)
-	{
-		case ICBTAG_FILE_TYPE_DIRECTORY:
+	switch (fe->icbTag.fileType) {
+	case ICBTAG_FILE_TYPE_DIRECTORY:
 		{
 			inode->i_op = &udf_dir_inode_operations;
 			inode->i_fop = &udf_dir_operations;
@@ -1319,9 +1339,9 @@
 			inc_nlink(inode);
 			break;
 		}
-		case ICBTAG_FILE_TYPE_REALTIME:
-		case ICBTAG_FILE_TYPE_REGULAR:
-		case ICBTAG_FILE_TYPE_UNDEF:
+	case ICBTAG_FILE_TYPE_REALTIME:
+	case ICBTAG_FILE_TYPE_REGULAR:
+	case ICBTAG_FILE_TYPE_UNDEF:
 		{
 			if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_IN_ICB)
 				inode->i_data.a_ops = &udf_adinicb_aops;
@@ -1332,56 +1352,54 @@
 			inode->i_mode |= S_IFREG;
 			break;
 		}
-		case ICBTAG_FILE_TYPE_BLOCK:
+	case ICBTAG_FILE_TYPE_BLOCK:
 		{
 			inode->i_mode |= S_IFBLK;
 			break;
 		}
-		case ICBTAG_FILE_TYPE_CHAR:
+	case ICBTAG_FILE_TYPE_CHAR:
 		{
 			inode->i_mode |= S_IFCHR;
 			break;
 		}
-		case ICBTAG_FILE_TYPE_FIFO:
+	case ICBTAG_FILE_TYPE_FIFO:
 		{
 			init_special_inode(inode, inode->i_mode | S_IFIFO, 0);
 			break;
 		}
-		case ICBTAG_FILE_TYPE_SOCKET:
+	case ICBTAG_FILE_TYPE_SOCKET:
 		{
 			init_special_inode(inode, inode->i_mode | S_IFSOCK, 0);
 			break;
 		}
-		case ICBTAG_FILE_TYPE_SYMLINK:
+	case ICBTAG_FILE_TYPE_SYMLINK:
 		{
 			inode->i_data.a_ops = &udf_symlink_aops;
 			inode->i_op = &page_symlink_inode_operations;
-			inode->i_mode = S_IFLNK|S_IRWXUGO;
+			inode->i_mode = S_IFLNK | S_IRWXUGO;
 			break;
 		}
-		default:
+	default:
 		{
-			printk(KERN_ERR "udf: udf_fill_inode(ino %ld) failed unknown file type=%d\n",
-				inode->i_ino, fe->icbTag.fileType);
+			printk(KERN_ERR
+			       "udf: udf_fill_inode(ino %ld) failed unknown file type=%d\n",
+			       inode->i_ino, fe->icbTag.fileType);
 			make_bad_inode(inode);
 			return;
 		}
 	}
-	if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode))
-	{
-		struct deviceSpec *dsea =
-			(struct deviceSpec *)
-				udf_get_extendedattr(inode, 12, 1);
+	if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
+		struct deviceSpec *dsea = (struct deviceSpec *)
+		    udf_get_extendedattr(inode, 12, 1);
 
-		if (dsea)
-		{
-			init_special_inode(inode, inode->i_mode, MKDEV(
-				le32_to_cpu(dsea->majorDeviceIdent),
-				le32_to_cpu(dsea->minorDeviceIdent)));
+		if (dsea) {
+			init_special_inode(inode, inode->i_mode,
+					   MKDEV(le32_to_cpu
+						 (dsea->majorDeviceIdent),
+						 le32_to_cpu(dsea->
+							     minorDeviceIdent)));
 			/* Developer ID ??? */
-		}
-		else
-		{
+		} else {
 			make_bad_inode(inode);
 		}
 	}
@@ -1391,9 +1409,9 @@
 {
 	UDF_I_DATA(inode) = kmalloc(size, GFP_KERNEL);
 
-	if (!UDF_I_DATA(inode))
-	{
-		printk(KERN_ERR "udf:udf_alloc_i_data (ino %ld) no free memory\n",
+	if (!UDF_I_DATA(inode)) {
+		printk(KERN_ERR
+		       "udf:udf_alloc_i_data (ino %ld) no free memory\n",
 		       inode->i_ino);
 		return -ENOMEM;
 	}
@@ -1401,8 +1419,7 @@
 	return 0;
 }
 
-static mode_t
-udf_convert_permissions(struct fileEntry *fe)
+static mode_t udf_convert_permissions(struct fileEntry *fe)
 {
 	mode_t mode;
 	uint32_t permissions;
@@ -1411,12 +1428,12 @@
 	permissions = le32_to_cpu(fe->permissions);
 	flags = le16_to_cpu(fe->icbTag.flags);
 
-	mode =	(( permissions      ) & S_IRWXO) |
-		(( permissions >> 2 ) & S_IRWXG) |
-		(( permissions >> 4 ) & S_IRWXU) |
-		(( flags & ICBTAG_FLAG_SETUID) ? S_ISUID : 0) |
-		(( flags & ICBTAG_FLAG_SETGID) ? S_ISGID : 0) |
-		(( flags & ICBTAG_FLAG_STICKY) ? S_ISVTX : 0);
+	mode = ((permissions) & S_IRWXO) |
+	    ((permissions >> 2) & S_IRWXG) |
+	    ((permissions >> 4) & S_IRWXU) |
+	    ((flags & ICBTAG_FLAG_SETUID) ? S_ISUID : 0) |
+	    ((flags & ICBTAG_FLAG_SETGID) ? S_ISGID : 0) |
+	    ((flags & ICBTAG_FLAG_STICKY) ? S_ISVTX : 0);
 
 	return mode;
 }
@@ -1436,7 +1453,7 @@
  *	Written, tested, and released.
  */
 
-int udf_write_inode(struct inode * inode, int sync)
+int udf_write_inode(struct inode *inode, int sync)
 {
 	int ret;
 	lock_kernel();
@@ -1445,13 +1462,12 @@
 	return ret;
 }
 
-int udf_sync_inode(struct inode * inode)
+int udf_sync_inode(struct inode *inode)
 {
 	return udf_update_inode(inode, 1);
 }
 
-static int
-udf_update_inode(struct inode *inode, int do_sync)
+static int udf_update_inode(struct inode *inode, int do_sync)
 {
 	struct buffer_head *bh = NULL;
 	struct fileEntry *fe;
@@ -1464,10 +1480,10 @@
 	int err = 0;
 
 	bh = udf_tread(inode->i_sb,
-		udf_get_lb_pblock(inode->i_sb, UDF_I_LOCATION(inode), 0));
+		       udf_get_lb_pblock(inode->i_sb, UDF_I_LOCATION(inode),
+					 0));
 
-	if (!bh)
-	{
+	if (!bh) {
 		udf_debug("bread failure\n");
 		return -EIO;
 	}
@@ -1477,23 +1493,29 @@
 	fe = (struct fileEntry *)bh->b_data;
 	efe = (struct extendedFileEntry *)bh->b_data;
 
-	if (le16_to_cpu(fe->descTag.tagIdent) == TAG_IDENT_USE)
-	{
+	if (le16_to_cpu(fe->descTag.tagIdent) == TAG_IDENT_USE) {
 		struct unallocSpaceEntry *use =
-			(struct unallocSpaceEntry *)bh->b_data;
+		    (struct unallocSpaceEntry *)bh->b_data;
 
 		use->lengthAllocDescs = cpu_to_le32(UDF_I_LENALLOC(inode));
-		memcpy(bh->b_data + sizeof(struct unallocSpaceEntry), UDF_I_DATA(inode), inode->i_sb->s_blocksize - sizeof(struct unallocSpaceEntry));
-		crclen = sizeof(struct unallocSpaceEntry) + UDF_I_LENALLOC(inode) -
-			sizeof(tag);
-		use->descTag.tagLocation = cpu_to_le32(UDF_I_LOCATION(inode).logicalBlockNum);
+		memcpy(bh->b_data + sizeof(struct unallocSpaceEntry),
+		       UDF_I_DATA(inode),
+		       inode->i_sb->s_blocksize -
+		       sizeof(struct unallocSpaceEntry));
+		crclen =
+		    sizeof(struct unallocSpaceEntry) + UDF_I_LENALLOC(inode) -
+		    sizeof(tag);
+		use->descTag.tagLocation =
+		    cpu_to_le32(UDF_I_LOCATION(inode).logicalBlockNum);
 		use->descTag.descCRCLength = cpu_to_le16(crclen);
-		use->descTag.descCRC = cpu_to_le16(udf_crc((char *)use + sizeof(tag), crclen, 0));
+		use->descTag.descCRC =
+		    cpu_to_le16(udf_crc((char *)use + sizeof(tag), crclen, 0));
 
 		use->descTag.tagChecksum = 0;
-		for (i=0; i<16; i++)
+		for (i = 0; i < 16; i++)
 			if (i != 4)
-				use->descTag.tagChecksum += ((uint8_t *)&(use->descTag))[i];
+				use->descTag.tagChecksum +=
+				    ((uint8_t *) & (use->descTag))[i];
 
 		mark_buffer_dirty(bh);
 		brelse(bh);
@@ -1502,20 +1524,21 @@
 
 	if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_UID_FORGET))
 		fe->uid = cpu_to_le32(-1);
-	else fe->uid = cpu_to_le32(inode->i_uid);
+	else
+		fe->uid = cpu_to_le32(inode->i_uid);
 
 	if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_GID_FORGET))
 		fe->gid = cpu_to_le32(-1);
-	else fe->gid = cpu_to_le32(inode->i_gid);
+	else
+		fe->gid = cpu_to_le32(inode->i_gid);
 
-	udfperms =	((inode->i_mode & S_IRWXO)     ) |
-			((inode->i_mode & S_IRWXG) << 2) |
-			((inode->i_mode & S_IRWXU) << 4);
+	udfperms = ((inode->i_mode & S_IRWXO)) |
+	    ((inode->i_mode & S_IRWXG) << 2) | ((inode->i_mode & S_IRWXU) << 4);
 
-	udfperms |=	(le32_to_cpu(fe->permissions) &
-			(FE_PERM_O_DELETE | FE_PERM_O_CHATTR |
-			 FE_PERM_G_DELETE | FE_PERM_G_CHATTR |
-			 FE_PERM_U_DELETE | FE_PERM_U_CHATTR));
+	udfperms |= (le32_to_cpu(fe->permissions) &
+		     (FE_PERM_O_DELETE | FE_PERM_O_CHATTR |
+		      FE_PERM_G_DELETE | FE_PERM_G_CHATTR |
+		      FE_PERM_U_DELETE | FE_PERM_U_CHATTR));
 	fe->permissions = cpu_to_le32(udfperms);
 
 	if (S_ISDIR(inode->i_mode))
@@ -1525,26 +1548,24 @@
 
 	fe->informationLength = cpu_to_le64(inode->i_size);
 
-	if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode))
-	{
+	if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
 		regid *eid;
-		struct deviceSpec *dsea =
-			(struct deviceSpec *)
-				udf_get_extendedattr(inode, 12, 1);
+		struct deviceSpec *dsea = (struct deviceSpec *)
+		    udf_get_extendedattr(inode, 12, 1);
 
-		if (!dsea)
-		{
+		if (!dsea) {
 			dsea = (struct deviceSpec *)
-				udf_add_extendedattr(inode,
-					sizeof(struct deviceSpec) +
-					sizeof(regid), 12, 0x3);
+			    udf_add_extendedattr(inode,
+						 sizeof(struct deviceSpec) +
+						 sizeof(regid), 12, 0x3);
 			dsea->attrType = cpu_to_le32(12);
 			dsea->attrSubtype = 1;
-			dsea->attrLength = cpu_to_le32(sizeof(struct deviceSpec) +
-				sizeof(regid));
+			dsea->attrLength =
+			    cpu_to_le32(sizeof(struct deviceSpec) +
+					sizeof(regid));
 			dsea->impUseLength = cpu_to_le32(sizeof(regid));
 		}
-		eid = (regid *)dsea->impUse;
+		eid = (regid *) dsea->impUse;
 		memset(eid, 0, sizeof(regid));
 		strcpy(eid->ident, UDF_ID_DEVELOPER);
 		eid->identSuffix[0] = UDF_OS_CLASS_UNIX;
@@ -1553,12 +1574,13 @@
 		dsea->minorDeviceIdent = cpu_to_le32(iminor(inode));
 	}
 
-	if (UDF_I_EFE(inode) == 0)
-	{
-		memcpy(bh->b_data + sizeof(struct fileEntry), UDF_I_DATA(inode), inode->i_sb->s_blocksize - sizeof(struct fileEntry));
-		fe->logicalBlocksRecorded = cpu_to_le64(
-			(inode->i_blocks + (1 << (inode->i_sb->s_blocksize_bits - 9)) - 1) >>
-			(inode->i_sb->s_blocksize_bits - 9));
+	if (UDF_I_EFE(inode) == 0) {
+		memcpy(bh->b_data + sizeof(struct fileEntry), UDF_I_DATA(inode),
+		       inode->i_sb->s_blocksize - sizeof(struct fileEntry));
+		fe->logicalBlocksRecorded =
+		    cpu_to_le64((inode->i_blocks +
+				 (1 << (inode->i_sb->s_blocksize_bits - 9)) -
+				 1) >> (inode->i_sb->s_blocksize_bits - 9));
 
 		if (udf_time_to_stamp(&cpu_time, inode->i_atime))
 			fe->accessTime = cpu_to_lets(cpu_time);
@@ -1575,31 +1597,34 @@
 		fe->lengthAllocDescs = cpu_to_le32(UDF_I_LENALLOC(inode));
 		fe->descTag.tagIdent = cpu_to_le16(TAG_IDENT_FE);
 		crclen = sizeof(struct fileEntry);
-	}
-	else
-	{
-		memcpy(bh->b_data + sizeof(struct extendedFileEntry), UDF_I_DATA(inode), inode->i_sb->s_blocksize - sizeof(struct extendedFileEntry));
+	} else {
+		memcpy(bh->b_data + sizeof(struct extendedFileEntry),
+		       UDF_I_DATA(inode),
+		       inode->i_sb->s_blocksize -
+		       sizeof(struct extendedFileEntry));
 		efe->objectSize = cpu_to_le64(inode->i_size);
-		efe->logicalBlocksRecorded = cpu_to_le64(
-			(inode->i_blocks + (1 << (inode->i_sb->s_blocksize_bits - 9)) - 1) >>
-			(inode->i_sb->s_blocksize_bits - 9));
+		efe->logicalBlocksRecorded = cpu_to_le64((inode->i_blocks +
+							  (1 <<
+							   (inode->i_sb->
+							    s_blocksize_bits -
+							    9)) -
+							  1) >> (inode->i_sb->
+								 s_blocksize_bits
+								 - 9));
 
 		if (UDF_I_CRTIME(inode).tv_sec > inode->i_atime.tv_sec ||
-			(UDF_I_CRTIME(inode).tv_sec == inode->i_atime.tv_sec &&
-			 UDF_I_CRTIME(inode).tv_nsec > inode->i_atime.tv_nsec))
-		{
+		    (UDF_I_CRTIME(inode).tv_sec == inode->i_atime.tv_sec &&
+		     UDF_I_CRTIME(inode).tv_nsec > inode->i_atime.tv_nsec)) {
 			UDF_I_CRTIME(inode) = inode->i_atime;
 		}
 		if (UDF_I_CRTIME(inode).tv_sec > inode->i_mtime.tv_sec ||
-			(UDF_I_CRTIME(inode).tv_sec == inode->i_mtime.tv_sec &&
-			 UDF_I_CRTIME(inode).tv_nsec > inode->i_mtime.tv_nsec))
-		{
+		    (UDF_I_CRTIME(inode).tv_sec == inode->i_mtime.tv_sec &&
+		     UDF_I_CRTIME(inode).tv_nsec > inode->i_mtime.tv_nsec)) {
 			UDF_I_CRTIME(inode) = inode->i_mtime;
 		}
 		if (UDF_I_CRTIME(inode).tv_sec > inode->i_ctime.tv_sec ||
-			(UDF_I_CRTIME(inode).tv_sec == inode->i_ctime.tv_sec &&
-			 UDF_I_CRTIME(inode).tv_nsec > inode->i_ctime.tv_nsec))
-		{
+		    (UDF_I_CRTIME(inode).tv_sec == inode->i_ctime.tv_sec &&
+		     UDF_I_CRTIME(inode).tv_nsec > inode->i_ctime.tv_nsec)) {
 			UDF_I_CRTIME(inode) = inode->i_ctime;
 		}
 
@@ -1622,14 +1647,11 @@
 		efe->descTag.tagIdent = cpu_to_le16(TAG_IDENT_EFE);
 		crclen = sizeof(struct extendedFileEntry);
 	}
-	if (UDF_I_STRAT4096(inode))
-	{
+	if (UDF_I_STRAT4096(inode)) {
 		fe->icbTag.strategyType = cpu_to_le16(4096);
 		fe->icbTag.strategyParameter = cpu_to_le16(1);
 		fe->icbTag.numEntries = cpu_to_le16(2);
-	}
-	else
-	{
+	} else {
 		fe->icbTag.strategyType = cpu_to_le16(4);
 		fe->icbTag.numEntries = cpu_to_le16(1);
 	}
@@ -1649,13 +1671,13 @@
 	else if (S_ISSOCK(inode->i_mode))
 		fe->icbTag.fileType = ICBTAG_FILE_TYPE_SOCKET;
 
-	icbflags =	UDF_I_ALLOCTYPE(inode) |
-			((inode->i_mode & S_ISUID) ? ICBTAG_FLAG_SETUID : 0) |
-			((inode->i_mode & S_ISGID) ? ICBTAG_FLAG_SETGID : 0) |
-			((inode->i_mode & S_ISVTX) ? ICBTAG_FLAG_STICKY : 0) |
-			(le16_to_cpu(fe->icbTag.flags) &
-				~(ICBTAG_FLAG_AD_MASK | ICBTAG_FLAG_SETUID |
-				ICBTAG_FLAG_SETGID | ICBTAG_FLAG_STICKY));
+	icbflags = UDF_I_ALLOCTYPE(inode) |
+	    ((inode->i_mode & S_ISUID) ? ICBTAG_FLAG_SETUID : 0) |
+	    ((inode->i_mode & S_ISGID) ? ICBTAG_FLAG_SETGID : 0) |
+	    ((inode->i_mode & S_ISVTX) ? ICBTAG_FLAG_STICKY : 0) |
+	    (le16_to_cpu(fe->icbTag.flags) &
+	     ~(ICBTAG_FLAG_AD_MASK | ICBTAG_FLAG_SETUID |
+	       ICBTAG_FLAG_SETGID | ICBTAG_FLAG_STICKY));
 
 	fe->icbTag.flags = cpu_to_le16(icbflags);
 	if (UDF_SB_UDFREV(inode->i_sb) >= 0x0200)
@@ -1663,25 +1685,26 @@
 	else
 		fe->descTag.descVersion = cpu_to_le16(2);
 	fe->descTag.tagSerialNum = cpu_to_le16(UDF_SB_SERIALNUM(inode->i_sb));
-	fe->descTag.tagLocation = cpu_to_le32(UDF_I_LOCATION(inode).logicalBlockNum);
+	fe->descTag.tagLocation =
+	    cpu_to_le32(UDF_I_LOCATION(inode).logicalBlockNum);
 	crclen += UDF_I_LENEATTR(inode) + UDF_I_LENALLOC(inode) - sizeof(tag);
 	fe->descTag.descCRCLength = cpu_to_le16(crclen);
-	fe->descTag.descCRC = cpu_to_le16(udf_crc((char *)fe + sizeof(tag), crclen, 0));
+	fe->descTag.descCRC =
+	    cpu_to_le16(udf_crc((char *)fe + sizeof(tag), crclen, 0));
 
 	fe->descTag.tagChecksum = 0;
-	for (i=0; i<16; i++)
+	for (i = 0; i < 16; i++)
 		if (i != 4)
-			fe->descTag.tagChecksum += ((uint8_t *)&(fe->descTag))[i];
+			fe->descTag.tagChecksum +=
+			    ((uint8_t *) & (fe->descTag))[i];
 
 	/* write the data blocks */
 	mark_buffer_dirty(bh);
-	if (do_sync)
-	{
+	if (do_sync) {
 		sync_dirty_buffer(bh);
-		if (buffer_req(bh) && !buffer_uptodate(bh))
-		{
+		if (buffer_req(bh) && !buffer_uptodate(bh)) {
 			printk("IO error syncing udf inode [%s:%08lx]\n",
-				inode->i_sb->s_id, inode->i_ino);
+			       inode->i_sb->s_id, inode->i_ino);
 			err = -EIO;
 		}
 	}
@@ -1689,8 +1712,7 @@
 	return err;
 }
 
-struct inode *
-udf_iget(struct super_block *sb, kernel_lb_addr ino)
+struct inode *udf_iget(struct super_block *sb, kernel_lb_addr ino)
 {
 	unsigned long block = udf_get_lb_pblock(sb, ino, 0);
 	struct inode *inode = iget_locked(sb, block);
@@ -1707,22 +1729,23 @@
 	if (is_bad_inode(inode))
 		goto out_iput;
 
-	if (ino.logicalBlockNum >= UDF_SB_PARTLEN(sb, ino.partitionReferenceNum)) {
+	if (ino.logicalBlockNum >=
+	    UDF_SB_PARTLEN(sb, ino.partitionReferenceNum)) {
 		udf_debug("block=%d, partition=%d out of range\n",
-			ino.logicalBlockNum, ino.partitionReferenceNum);
+			  ino.logicalBlockNum, ino.partitionReferenceNum);
 		make_bad_inode(inode);
 		goto out_iput;
 	}
 
 	return inode;
 
- out_iput:
+      out_iput:
 	iput(inode);
 	return NULL;
 }
 
-int8_t udf_add_aext(struct inode *inode, struct extent_position *epos,
-	kernel_lb_addr eloc, uint32_t elen, int inc)
+int8_t udf_add_aext(struct inode * inode, struct extent_position * epos,
+		    kernel_lb_addr eloc, uint32_t elen, int inc)
 {
 	int adsize;
 	short_ad *sad = NULL;
@@ -1732,7 +1755,9 @@
 	uint8_t *ptr;
 
 	if (!epos->bh)
-		ptr = UDF_I_DATA(inode) + epos->offset - udf_file_entry_alloc_offset(inode) + UDF_I_LENEATTR(inode);
+		ptr =
+		    UDF_I_DATA(inode) + epos->offset -
+		    udf_file_entry_alloc_offset(inode) + UDF_I_LENEATTR(inode);
 	else
 		ptr = epos->bh->b_data + epos->offset;
 
@@ -1743,21 +1768,24 @@
 	else
 		return -1;
 
-	if (epos->offset + (2 * adsize) > inode->i_sb->s_blocksize)
-	{
+	if (epos->offset + (2 * adsize) > inode->i_sb->s_blocksize) {
 		char *sptr, *dptr;
 		struct buffer_head *nbh;
 		int err, loffset;
 		kernel_lb_addr obloc = epos->block;
 
-		if (!(epos->block.logicalBlockNum = udf_new_block(inode->i_sb, NULL,
-			obloc.partitionReferenceNum, obloc.logicalBlockNum, &err)))
-		{
+		if (!
+		    (epos->block.logicalBlockNum =
+		     udf_new_block(inode->i_sb, NULL,
+				   obloc.partitionReferenceNum,
+				   obloc.logicalBlockNum, &err))) {
 			return -1;
 		}
-		if (!(nbh = udf_tgetblk(inode->i_sb, udf_get_lb_pblock(inode->i_sb,
-			epos->block, 0))))
-		{
+		if (!
+		    (nbh =
+		     udf_tgetblk(inode->i_sb,
+				 udf_get_lb_pblock(inode->i_sb, epos->block,
+						   0)))) {
 			return -1;
 		}
 		lock_buffer(nbh);
@@ -1768,144 +1796,142 @@
 
 		aed = (struct allocExtDesc *)(nbh->b_data);
 		if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT))
-			aed->previousAllocExtLocation = cpu_to_le32(obloc.logicalBlockNum);
-		if (epos->offset + adsize > inode->i_sb->s_blocksize)
-		{
+			aed->previousAllocExtLocation =
+			    cpu_to_le32(obloc.logicalBlockNum);
+		if (epos->offset + adsize > inode->i_sb->s_blocksize) {
 			loffset = epos->offset;
 			aed->lengthAllocDescs = cpu_to_le32(adsize);
 			sptr = ptr - adsize;
 			dptr = nbh->b_data + sizeof(struct allocExtDesc);
 			memcpy(dptr, sptr, adsize);
 			epos->offset = sizeof(struct allocExtDesc) + adsize;
-		}
-		else
-		{
+		} else {
 			loffset = epos->offset + adsize;
 			aed->lengthAllocDescs = cpu_to_le32(0);
 			sptr = ptr;
 			epos->offset = sizeof(struct allocExtDesc);
 
-			if (epos->bh)
-			{
+			if (epos->bh) {
 				aed = (struct allocExtDesc *)epos->bh->b_data;
 				aed->lengthAllocDescs =
-					cpu_to_le32(le32_to_cpu(aed->lengthAllocDescs) + adsize);
-			}
-			else
-			{
+				    cpu_to_le32(le32_to_cpu
+						(aed->lengthAllocDescs) +
+						adsize);
+			} else {
 				UDF_I_LENALLOC(inode) += adsize;
 				mark_inode_dirty(inode);
 			}
 		}
 		if (UDF_SB_UDFREV(inode->i_sb) >= 0x0200)
 			udf_new_tag(nbh->b_data, TAG_IDENT_AED, 3, 1,
-				epos->block.logicalBlockNum, sizeof(tag));
+				    epos->block.logicalBlockNum, sizeof(tag));
 		else
 			udf_new_tag(nbh->b_data, TAG_IDENT_AED, 2, 1,
-				epos->block.logicalBlockNum, sizeof(tag));
-		switch (UDF_I_ALLOCTYPE(inode))
-		{
-			case ICBTAG_FLAG_AD_SHORT:
+				    epos->block.logicalBlockNum, sizeof(tag));
+		switch (UDF_I_ALLOCTYPE(inode)) {
+		case ICBTAG_FLAG_AD_SHORT:
 			{
-				sad = (short_ad *)sptr;
-				sad->extLength = cpu_to_le32(
-					EXT_NEXT_EXTENT_ALLOCDECS |
-					inode->i_sb->s_blocksize);
-				sad->extPosition = cpu_to_le32(epos->block.logicalBlockNum);
+				sad = (short_ad *) sptr;
+				sad->extLength =
+				    cpu_to_le32(EXT_NEXT_EXTENT_ALLOCDECS |
+						inode->i_sb->s_blocksize);
+				sad->extPosition =
+				    cpu_to_le32(epos->block.logicalBlockNum);
 				break;
 			}
-			case ICBTAG_FLAG_AD_LONG:
+		case ICBTAG_FLAG_AD_LONG:
 			{
-				lad = (long_ad *)sptr;
-				lad->extLength = cpu_to_le32(
-					EXT_NEXT_EXTENT_ALLOCDECS |
-					inode->i_sb->s_blocksize);
+				lad = (long_ad *) sptr;
+				lad->extLength =
+				    cpu_to_le32(EXT_NEXT_EXTENT_ALLOCDECS |
+						inode->i_sb->s_blocksize);
 				lad->extLocation = cpu_to_lelb(epos->block);
 				memset(lad->impUse, 0x00, sizeof(lad->impUse));
 				break;
 			}
 		}
-		if (epos->bh)
-		{
-			if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) || UDF_SB_UDFREV(inode->i_sb) >= 0x0201)
+		if (epos->bh) {
+			if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT)
+			    || UDF_SB_UDFREV(inode->i_sb) >= 0x0201)
 				udf_update_tag(epos->bh->b_data, loffset);
 			else
-				udf_update_tag(epos->bh->b_data, sizeof(struct allocExtDesc));
+				udf_update_tag(epos->bh->b_data,
+					       sizeof(struct allocExtDesc));
 			mark_buffer_dirty_inode(epos->bh, inode);
 			brelse(epos->bh);
-		}
-		else
+		} else
 			mark_inode_dirty(inode);
 		epos->bh = nbh;
 	}
 
 	etype = udf_write_aext(inode, epos, eloc, elen, inc);
 
-	if (!epos->bh)
-	{
+	if (!epos->bh) {
 		UDF_I_LENALLOC(inode) += adsize;
 		mark_inode_dirty(inode);
-	}
-	else
-	{
+	} else {
 		aed = (struct allocExtDesc *)epos->bh->b_data;
 		aed->lengthAllocDescs =
-			cpu_to_le32(le32_to_cpu(aed->lengthAllocDescs) + adsize);
-		if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) || UDF_SB_UDFREV(inode->i_sb) >= 0x0201)
-			udf_update_tag(epos->bh->b_data, epos->offset + (inc ? 0 : adsize));
+		    cpu_to_le32(le32_to_cpu(aed->lengthAllocDescs) + adsize);
+		if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT)
+		    || UDF_SB_UDFREV(inode->i_sb) >= 0x0201)
+			udf_update_tag(epos->bh->b_data,
+				       epos->offset + (inc ? 0 : adsize));
 		else
-			udf_update_tag(epos->bh->b_data, sizeof(struct allocExtDesc));
+			udf_update_tag(epos->bh->b_data,
+				       sizeof(struct allocExtDesc));
 		mark_buffer_dirty_inode(epos->bh, inode);
 	}
 
 	return etype;
 }
 
-int8_t udf_write_aext(struct inode *inode, struct extent_position *epos,
-    kernel_lb_addr eloc, uint32_t elen, int inc)
+int8_t udf_write_aext(struct inode * inode, struct extent_position * epos,
+		      kernel_lb_addr eloc, uint32_t elen, int inc)
 {
 	int adsize;
 	uint8_t *ptr;
 
 	if (!epos->bh)
-		ptr = UDF_I_DATA(inode) + epos->offset - udf_file_entry_alloc_offset(inode) + UDF_I_LENEATTR(inode);
+		ptr =
+		    UDF_I_DATA(inode) + epos->offset -
+		    udf_file_entry_alloc_offset(inode) + UDF_I_LENEATTR(inode);
 	else
 		ptr = epos->bh->b_data + epos->offset;
 
-	switch (UDF_I_ALLOCTYPE(inode))
-	{
-		case ICBTAG_FLAG_AD_SHORT:
+	switch (UDF_I_ALLOCTYPE(inode)) {
+	case ICBTAG_FLAG_AD_SHORT:
 		{
-			short_ad *sad = (short_ad *)ptr;
+			short_ad *sad = (short_ad *) ptr;
 			sad->extLength = cpu_to_le32(elen);
 			sad->extPosition = cpu_to_le32(eloc.logicalBlockNum);
 			adsize = sizeof(short_ad);
 			break;
 		}
-		case ICBTAG_FLAG_AD_LONG:
+	case ICBTAG_FLAG_AD_LONG:
 		{
-			long_ad *lad = (long_ad *)ptr;
+			long_ad *lad = (long_ad *) ptr;
 			lad->extLength = cpu_to_le32(elen);
 			lad->extLocation = cpu_to_lelb(eloc);
 			memset(lad->impUse, 0x00, sizeof(lad->impUse));
 			adsize = sizeof(long_ad);
 			break;
 		}
-		default:
-			return -1;
+	default:
+		return -1;
 	}
 
-	if (epos->bh)
-	{
-		if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) || UDF_SB_UDFREV(inode->i_sb) >= 0x0201)
-		{
-			struct allocExtDesc *aed = (struct allocExtDesc *)epos->bh->b_data;
+	if (epos->bh) {
+		if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT)
+		    || UDF_SB_UDFREV(inode->i_sb) >= 0x0201) {
+			struct allocExtDesc *aed =
+			    (struct allocExtDesc *)epos->bh->b_data;
 			udf_update_tag(epos->bh->b_data,
-				le32_to_cpu(aed->lengthAllocDescs) + sizeof(struct allocExtDesc));
+				       le32_to_cpu(aed->lengthAllocDescs) +
+				       sizeof(struct allocExtDesc));
 		}
 		mark_buffer_dirty_inode(epos->bh, inode);
-	}
-	else
+	} else
 		mark_inode_dirty(inode);
 
 	if (inc)
@@ -1913,21 +1939,24 @@
 	return (elen >> 30);
 }
 
-int8_t udf_next_aext(struct inode *inode, struct extent_position *epos,
-	kernel_lb_addr *eloc, uint32_t *elen, int inc)
+int8_t udf_next_aext(struct inode * inode, struct extent_position * epos,
+		     kernel_lb_addr * eloc, uint32_t * elen, int inc)
 {
 	int8_t etype;
 
 	while ((etype = udf_current_aext(inode, epos, eloc, elen, inc)) ==
-		(EXT_NEXT_EXTENT_ALLOCDECS >> 30))
-	{
+	       (EXT_NEXT_EXTENT_ALLOCDECS >> 30)) {
 		epos->block = *eloc;
 		epos->offset = sizeof(struct allocExtDesc);
 		brelse(epos->bh);
-		if (!(epos->bh = udf_tread(inode->i_sb, udf_get_lb_pblock(inode->i_sb, epos->block, 0))))
-		{
+		if (!
+		    (epos->bh =
+		     udf_tread(inode->i_sb,
+			       udf_get_lb_pblock(inode->i_sb, epos->block,
+						 0)))) {
 			udf_debug("reading block %d failed!\n",
-				udf_get_lb_pblock(inode->i_sb, epos->block, 0));
+				  udf_get_lb_pblock(inode->i_sb, epos->block,
+						    0));
 			return -1;
 		}
 	}
@@ -1935,58 +1964,71 @@
 	return etype;
 }
 
-int8_t udf_current_aext(struct inode *inode, struct extent_position *epos,
-	kernel_lb_addr *eloc, uint32_t *elen, int inc)
+int8_t udf_current_aext(struct inode * inode, struct extent_position * epos,
+			kernel_lb_addr * eloc, uint32_t * elen, int inc)
 {
 	int alen;
 	int8_t etype;
 	uint8_t *ptr;
 
-	if (!epos->bh)
-	{
+	if (!epos->bh) {
 		if (!epos->offset)
 			epos->offset = udf_file_entry_alloc_offset(inode);
-		ptr = UDF_I_DATA(inode) + epos->offset - udf_file_entry_alloc_offset(inode) + UDF_I_LENEATTR(inode);
-		alen = udf_file_entry_alloc_offset(inode) + UDF_I_LENALLOC(inode);
-	}
-	else
-	{
+		ptr =
+		    UDF_I_DATA(inode) + epos->offset -
+		    udf_file_entry_alloc_offset(inode) + UDF_I_LENEATTR(inode);
+		alen =
+		    udf_file_entry_alloc_offset(inode) + UDF_I_LENALLOC(inode);
+	} else {
 		if (!epos->offset)
 			epos->offset = sizeof(struct allocExtDesc);
 		ptr = epos->bh->b_data + epos->offset;
-		alen = sizeof(struct allocExtDesc) + le32_to_cpu(((struct allocExtDesc *)epos->bh->b_data)->lengthAllocDescs);
+		alen =
+		    sizeof(struct allocExtDesc) +
+		    le32_to_cpu(((struct allocExtDesc *)epos->bh->b_data)->
+				lengthAllocDescs);
 	}
 
-	switch (UDF_I_ALLOCTYPE(inode))
-	{
-		case ICBTAG_FLAG_AD_SHORT:
+	switch (UDF_I_ALLOCTYPE(inode)) {
+	case ICBTAG_FLAG_AD_SHORT:
 		{
 			short_ad *sad;
 
-			if (!(sad = udf_get_fileshortad(ptr, alen, &epos->offset, inc)))
+			if (!
+			    (sad =
+			     udf_get_fileshortad(ptr, alen, &epos->offset,
+						 inc)))
 				return -1;
 
 			etype = le32_to_cpu(sad->extLength) >> 30;
 			eloc->logicalBlockNum = le32_to_cpu(sad->extPosition);
-			eloc->partitionReferenceNum = UDF_I_LOCATION(inode).partitionReferenceNum;
-			*elen = le32_to_cpu(sad->extLength) & UDF_EXTENT_LENGTH_MASK;
+			eloc->partitionReferenceNum =
+			    UDF_I_LOCATION(inode).partitionReferenceNum;
+			*elen =
+			    le32_to_cpu(sad->
+					extLength) & UDF_EXTENT_LENGTH_MASK;
 			break;
 		}
-		case ICBTAG_FLAG_AD_LONG:
+	case ICBTAG_FLAG_AD_LONG:
 		{
 			long_ad *lad;
 
-			if (!(lad = udf_get_filelongad(ptr, alen, &epos->offset, inc)))
+			if (!
+			    (lad =
+			     udf_get_filelongad(ptr, alen, &epos->offset, inc)))
 				return -1;
 
 			etype = le32_to_cpu(lad->extLength) >> 30;
 			*eloc = lelb_to_cpu(lad->extLocation);
-			*elen = le32_to_cpu(lad->extLength) & UDF_EXTENT_LENGTH_MASK;
+			*elen =
+			    le32_to_cpu(lad->
+					extLength) & UDF_EXTENT_LENGTH_MASK;
 			break;
 		}
-		default:
+	default:
 		{
-			udf_debug("alloc_type = %d unsupported\n", UDF_I_ALLOCTYPE(inode));
+			udf_debug("alloc_type = %d unsupported\n",
+				  UDF_I_ALLOCTYPE(inode));
 			return -1;
 		}
 	}
@@ -2005,8 +2047,7 @@
 	if (epos.bh)
 		get_bh(epos.bh);
 
-	while ((etype = udf_next_aext(inode, &epos, &oeloc, &oelen, 0)) != -1)
-	{
+	while ((etype = udf_next_aext(inode, &epos, &oeloc, &oelen, 0)) != -1) {
 		udf_write_aext(inode, &epos, neloc, nelen, 1);
 
 		neloc = oeloc;
@@ -2017,16 +2058,15 @@
 	return (nelen >> 30);
 }
 
-int8_t udf_delete_aext(struct inode *inode, struct extent_position epos,
-	kernel_lb_addr eloc, uint32_t elen)
+int8_t udf_delete_aext(struct inode * inode, struct extent_position epos,
+		       kernel_lb_addr eloc, uint32_t elen)
 {
 	struct extent_position oepos;
 	int adsize;
 	int8_t etype;
 	struct allocExtDesc *aed;
 
-	if (epos.bh)
-	{
+	if (epos.bh) {
 		get_bh(epos.bh);
 		get_bh(epos.bh);
 	}
@@ -2042,11 +2082,9 @@
 	if (udf_next_aext(inode, &epos, &eloc, &elen, 1) == -1)
 		return -1;
 
-	while ((etype = udf_next_aext(inode, &epos, &eloc, &elen, 1)) != -1)
-	{
+	while ((etype = udf_next_aext(inode, &epos, &eloc, &elen, 1)) != -1) {
 		udf_write_aext(inode, &oepos, eloc, (etype << 30) | elen, 1);
-		if (oepos.bh != epos.bh)
-		{
+		if (oepos.bh != epos.bh) {
 			oepos.block = epos.block;
 			brelse(oepos.bh);
 			get_bh(epos.bh);
@@ -2057,45 +2095,44 @@
 	memset(&eloc, 0x00, sizeof(kernel_lb_addr));
 	elen = 0;
 
-	if (epos.bh != oepos.bh)
-	{
+	if (epos.bh != oepos.bh) {
 		udf_free_blocks(inode->i_sb, inode, epos.block, 0, 1);
 		udf_write_aext(inode, &oepos, eloc, elen, 1);
 		udf_write_aext(inode, &oepos, eloc, elen, 1);
-		if (!oepos.bh)
-		{
+		if (!oepos.bh) {
 			UDF_I_LENALLOC(inode) -= (adsize * 2);
 			mark_inode_dirty(inode);
-		}
-		else
-		{
+		} else {
 			aed = (struct allocExtDesc *)oepos.bh->b_data;
 			aed->lengthAllocDescs =
-				cpu_to_le32(le32_to_cpu(aed->lengthAllocDescs) - (2*adsize));
-			if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) || UDF_SB_UDFREV(inode->i_sb) >= 0x0201)
-				udf_update_tag(oepos.bh->b_data, oepos.offset - (2*adsize));
+			    cpu_to_le32(le32_to_cpu(aed->lengthAllocDescs) -
+					(2 * adsize));
+			if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT)
+			    || UDF_SB_UDFREV(inode->i_sb) >= 0x0201)
+				udf_update_tag(oepos.bh->b_data,
+					       oepos.offset - (2 * adsize));
 			else
-				udf_update_tag(oepos.bh->b_data, sizeof(struct allocExtDesc));
+				udf_update_tag(oepos.bh->b_data,
+					       sizeof(struct allocExtDesc));
 			mark_buffer_dirty_inode(oepos.bh, inode);
 		}
-	}
-	else
-	{
+	} else {
 		udf_write_aext(inode, &oepos, eloc, elen, 1);
-		if (!oepos.bh)
-		{
+		if (!oepos.bh) {
 			UDF_I_LENALLOC(inode) -= adsize;
 			mark_inode_dirty(inode);
-		}
-		else
-		{
+		} else {
 			aed = (struct allocExtDesc *)oepos.bh->b_data;
 			aed->lengthAllocDescs =
-				cpu_to_le32(le32_to_cpu(aed->lengthAllocDescs) - adsize);
-			if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) || UDF_SB_UDFREV(inode->i_sb) >= 0x0201)
-				udf_update_tag(oepos.bh->b_data, epos.offset - adsize);
+			    cpu_to_le32(le32_to_cpu(aed->lengthAllocDescs) -
+					adsize);
+			if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT)
+			    || UDF_SB_UDFREV(inode->i_sb) >= 0x0201)
+				udf_update_tag(oepos.bh->b_data,
+					       epos.offset - adsize);
 			else
-				udf_update_tag(oepos.bh->b_data, sizeof(struct allocExtDesc));
+				udf_update_tag(oepos.bh->b_data,
+					       sizeof(struct allocExtDesc));
 			mark_buffer_dirty_inode(oepos.bh, inode);
 		}
 	}
@@ -2105,14 +2142,15 @@
 	return (elen >> 30);
 }
 
-int8_t inode_bmap(struct inode *inode, sector_t block, struct extent_position *pos,
-	kernel_lb_addr *eloc, uint32_t *elen, sector_t *offset)
+int8_t inode_bmap(struct inode * inode, sector_t block,
+		  struct extent_position * pos, kernel_lb_addr * eloc,
+		  uint32_t * elen, sector_t * offset)
 {
-	loff_t lbcount = 0, bcount = (loff_t)block << inode->i_sb->s_blocksize_bits;
+	loff_t lbcount = 0, bcount =
+	    (loff_t) block << inode->i_sb->s_blocksize_bits;
 	int8_t etype;
 
-	if (block < 0)
-	{
+	if (block < 0) {
 		printk(KERN_ERR "udf: inode_bmap: block < 0\n");
 		return -1;
 	}
@@ -2122,11 +2160,10 @@
 	pos->bh = NULL;
 	*elen = 0;
 
-	do
-	{
-		if ((etype = udf_next_aext(inode, pos, eloc, elen, 1)) == -1)
-		{
-			*offset = (bcount - lbcount) >> inode->i_sb->s_blocksize_bits;
+	do {
+		if ((etype = udf_next_aext(inode, pos, eloc, elen, 1)) == -1) {
+			*offset =
+			    (bcount - lbcount) >> inode->i_sb->s_blocksize_bits;
 			UDF_I_LENEXTENTS(inode) = lbcount;
 			return -1;
 		}
@@ -2143,12 +2180,13 @@
 	kernel_lb_addr eloc;
 	uint32_t elen;
 	sector_t offset;
-	struct extent_position epos = { NULL, 0, { 0, 0}};
+	struct extent_position epos = { NULL, 0, {0, 0} };
 	int ret;
 
 	lock_kernel();
 
-	if (inode_bmap(inode, block, &epos, &eloc, &elen, &offset) == (EXT_RECORDED_ALLOCATED >> 30))
+	if (inode_bmap(inode, block, &epos, &eloc, &elen, &offset) ==
+	    (EXT_RECORDED_ALLOCATED >> 30))
 		ret = udf_get_lb_pblock(inode->i_sb, eloc, offset);
 	else
 		ret = 0;
diff --git a/fs/udf/lowlevel.c b/fs/udf/lowlevel.c
index 0842161..4826c36 100644
--- a/fs/udf/lowlevel.c
+++ b/fs/udf/lowlevel.c
@@ -26,43 +26,38 @@
 #include <linux/udf_fs.h>
 #include "udf_sb.h"
 
-unsigned int 
-udf_get_last_session(struct super_block *sb)
+unsigned int udf_get_last_session(struct super_block *sb)
 {
 	struct cdrom_multisession ms_info;
 	unsigned int vol_desc_start;
 	struct block_device *bdev = sb->s_bdev;
 	int i;
 
-	vol_desc_start=0;
-	ms_info.addr_format=CDROM_LBA;
-	i = ioctl_by_bdev(bdev, CDROMMULTISESSION, (unsigned long) &ms_info);
+	vol_desc_start = 0;
+	ms_info.addr_format = CDROM_LBA;
+	i = ioctl_by_bdev(bdev, CDROMMULTISESSION, (unsigned long)&ms_info);
 
 #define WE_OBEY_THE_WRITTEN_STANDARDS 1
 
-	if (i == 0)
-	{
+	if (i == 0) {
 		udf_debug("XA disk: %s, vol_desc_start=%d\n",
-			(ms_info.xa_flag ? "yes" : "no"), ms_info.addr.lba);
+			  (ms_info.xa_flag ? "yes" : "no"), ms_info.addr.lba);
 #if WE_OBEY_THE_WRITTEN_STANDARDS
-		if (ms_info.xa_flag) /* necessary for a valid ms_info.addr */
+		if (ms_info.xa_flag)	/* necessary for a valid ms_info.addr */
 #endif
 			vol_desc_start = ms_info.addr.lba;
-	}
-	else
-	{
+	} else {
 		udf_debug("CDROMMULTISESSION not supported: rc=%d\n", i);
 	}
 	return vol_desc_start;
 }
 
-unsigned long
-udf_get_last_block(struct super_block *sb)
+unsigned long udf_get_last_block(struct super_block *sb)
 {
 	struct block_device *bdev = sb->s_bdev;
 	unsigned long lblock = 0;
 
-	if (ioctl_by_bdev(bdev, CDROM_LAST_WRITTEN, (unsigned long) &lblock))
+	if (ioctl_by_bdev(bdev, CDROM_LAST_WRITTEN, (unsigned long)&lblock))
 		lblock = bdev->bd_inode->i_size >> sb->s_blocksize_bits;
 
 	if (lblock)
diff --git a/fs/udf/misc.c b/fs/udf/misc.c
index a2b2a98..a7f5727 100644
--- a/fs/udf/misc.c
+++ b/fs/udf/misc.c
@@ -29,8 +29,7 @@
 #include "udf_i.h"
 #include "udf_sb.h"
 
-struct buffer_head *
-udf_tgetblk(struct super_block *sb, int block)
+struct buffer_head *udf_tgetblk(struct super_block *sb, int block)
 {
 	if (UDF_QUERY_FLAG(sb, UDF_FLAG_VARCONV))
 		return sb_getblk(sb, udf_fixed_to_variable(block));
@@ -38,8 +37,7 @@
 		return sb_getblk(sb, block);
 }
 
-struct buffer_head *
-udf_tread(struct super_block *sb, int block)
+struct buffer_head *udf_tread(struct super_block *sb, int block)
 {
 	if (UDF_QUERY_FLAG(sb, UDF_FLAG_VARCONV))
 		return sb_bread(sb, udf_fixed_to_variable(block));
@@ -47,9 +45,8 @@
 		return sb_bread(sb, block);
 }
 
-struct genericFormat *
-udf_add_extendedattr(struct inode * inode, uint32_t size, uint32_t type,
-	uint8_t loc)
+struct genericFormat *udf_add_extendedattr(struct inode *inode, uint32_t size,
+					   uint32_t type, uint8_t loc)
 {
 	uint8_t *ea = NULL, *ad = NULL;
 	int offset;
@@ -59,78 +56,76 @@
 	ea = UDF_I_DATA(inode);
 	if (UDF_I_LENEATTR(inode))
 		ad = UDF_I_DATA(inode) + UDF_I_LENEATTR(inode);
-	else
-	{
+	else {
 		ad = ea;
 		size += sizeof(struct extendedAttrHeaderDesc);
 	}
 
 	offset = inode->i_sb->s_blocksize - udf_file_entry_alloc_offset(inode) -
-		UDF_I_LENALLOC(inode);
+	    UDF_I_LENALLOC(inode);
 
 	/* TODO - Check for FreeEASpace */
 
-	if (loc & 0x01 && offset >= size)
-	{
+	if (loc & 0x01 && offset >= size) {
 		struct extendedAttrHeaderDesc *eahd;
 		eahd = (struct extendedAttrHeaderDesc *)ea;
 
-		if (UDF_I_LENALLOC(inode))
-		{
+		if (UDF_I_LENALLOC(inode)) {
 			memmove(&ad[size], ad, UDF_I_LENALLOC(inode));
 		}
 
-		if (UDF_I_LENEATTR(inode))
-		{
+		if (UDF_I_LENEATTR(inode)) {
 			/* check checksum/crc */
-			if (le16_to_cpu(eahd->descTag.tagIdent) != TAG_IDENT_EAHD ||
-				le32_to_cpu(eahd->descTag.tagLocation) != UDF_I_LOCATION(inode).logicalBlockNum)
-			{
+			if (le16_to_cpu(eahd->descTag.tagIdent) !=
+			    TAG_IDENT_EAHD
+			    || le32_to_cpu(eahd->descTag.tagLocation) !=
+			    UDF_I_LOCATION(inode).logicalBlockNum) {
 				return NULL;
 			}
-		}
-		else
-		{
+		} else {
 			size -= sizeof(struct extendedAttrHeaderDesc);
-			UDF_I_LENEATTR(inode) += sizeof(struct extendedAttrHeaderDesc);
+			UDF_I_LENEATTR(inode) +=
+			    sizeof(struct extendedAttrHeaderDesc);
 			eahd->descTag.tagIdent = cpu_to_le16(TAG_IDENT_EAHD);
 			if (UDF_SB_UDFREV(inode->i_sb) >= 0x0200)
 				eahd->descTag.descVersion = cpu_to_le16(3);
 			else
 				eahd->descTag.descVersion = cpu_to_le16(2);
-			eahd->descTag.tagSerialNum = cpu_to_le16(UDF_SB_SERIALNUM(inode->i_sb));
-			eahd->descTag.tagLocation = cpu_to_le32(UDF_I_LOCATION(inode).logicalBlockNum);
+			eahd->descTag.tagSerialNum =
+			    cpu_to_le16(UDF_SB_SERIALNUM(inode->i_sb));
+			eahd->descTag.tagLocation =
+			    cpu_to_le32(UDF_I_LOCATION(inode).logicalBlockNum);
 			eahd->impAttrLocation = cpu_to_le32(0xFFFFFFFF);
 			eahd->appAttrLocation = cpu_to_le32(0xFFFFFFFF);
 		}
 
 		offset = UDF_I_LENEATTR(inode);
-		if (type < 2048)
-		{
-			if (le32_to_cpu(eahd->appAttrLocation) < UDF_I_LENEATTR(inode))
-			{
-				uint32_t aal = le32_to_cpu(eahd->appAttrLocation);
-				memmove(&ea[offset - aal + size],
-					&ea[aal], offset - aal);
+		if (type < 2048) {
+			if (le32_to_cpu(eahd->appAttrLocation) <
+			    UDF_I_LENEATTR(inode)) {
+				uint32_t aal =
+				    le32_to_cpu(eahd->appAttrLocation);
+				memmove(&ea[offset - aal + size], &ea[aal],
+					offset - aal);
 				offset -= aal;
 				eahd->appAttrLocation = cpu_to_le32(aal + size);
 			}
-			if (le32_to_cpu(eahd->impAttrLocation) < UDF_I_LENEATTR(inode))
-			{
-				uint32_t ial = le32_to_cpu(eahd->impAttrLocation);
-				memmove(&ea[offset - ial + size],
-					&ea[ial], offset - ial);
+			if (le32_to_cpu(eahd->impAttrLocation) <
+			    UDF_I_LENEATTR(inode)) {
+				uint32_t ial =
+				    le32_to_cpu(eahd->impAttrLocation);
+				memmove(&ea[offset - ial + size], &ea[ial],
+					offset - ial);
 				offset -= ial;
 				eahd->impAttrLocation = cpu_to_le32(ial + size);
 			}
-		}
-		else if (type < 65536)
-		{
-			if (le32_to_cpu(eahd->appAttrLocation) < UDF_I_LENEATTR(inode))
-			{
-				uint32_t aal = le32_to_cpu(eahd->appAttrLocation);
-				memmove(&ea[offset - aal + size],
-					&ea[aal], offset - aal);
+		} else if (type < 65536) {
+			if (le32_to_cpu(eahd->appAttrLocation) <
+			    UDF_I_LENEATTR(inode)) {
+				uint32_t aal =
+				    le32_to_cpu(eahd->appAttrLocation);
+				memmove(&ea[offset - aal + size], &ea[aal],
+					offset - aal);
 				offset -= aal;
 				eahd->appAttrLocation = cpu_to_le32(aal + size);
 			}
@@ -138,22 +133,23 @@
 		/* rewrite CRC + checksum of eahd */
 		crclen = sizeof(struct extendedAttrHeaderDesc) - sizeof(tag);
 		eahd->descTag.descCRCLength = cpu_to_le16(crclen);
-		eahd->descTag.descCRC = cpu_to_le16(udf_crc((char *)eahd + sizeof(tag), crclen, 0));
+		eahd->descTag.descCRC =
+		    cpu_to_le16(udf_crc((char *)eahd + sizeof(tag), crclen, 0));
 		eahd->descTag.tagChecksum = 0;
-		for (i=0; i<16; i++)
+		for (i = 0; i < 16; i++)
 			if (i != 4)
-				eahd->descTag.tagChecksum += ((uint8_t *)&(eahd->descTag))[i];
+				eahd->descTag.tagChecksum +=
+				    ((uint8_t *) & (eahd->descTag))[i];
 		UDF_I_LENEATTR(inode) += size;
 		return (struct genericFormat *)&ea[offset];
 	}
-	if (loc & 0x02)
-	{
+	if (loc & 0x02) {
 	}
 	return NULL;
 }
 
-struct genericFormat *
-udf_get_extendedattr(struct inode *inode, uint32_t type, uint8_t subtype)
+struct genericFormat *udf_get_extendedattr(struct inode *inode, uint32_t type,
+					   uint8_t subtype)
 {
 	struct genericFormat *gaf;
 	uint8_t *ea = NULL;
@@ -161,18 +157,17 @@
 
 	ea = UDF_I_DATA(inode);
 
-	if (UDF_I_LENEATTR(inode))
-	{
+	if (UDF_I_LENEATTR(inode)) {
 		struct extendedAttrHeaderDesc *eahd;
 		eahd = (struct extendedAttrHeaderDesc *)ea;
 
 		/* check checksum/crc */
 		if (le16_to_cpu(eahd->descTag.tagIdent) != TAG_IDENT_EAHD ||
-			le32_to_cpu(eahd->descTag.tagLocation) != UDF_I_LOCATION(inode).logicalBlockNum)
-		{
+		    le32_to_cpu(eahd->descTag.tagLocation) !=
+		    UDF_I_LOCATION(inode).logicalBlockNum) {
 			return NULL;
 		}
-	
+
 		if (type < 2048)
 			offset = sizeof(struct extendedAttrHeaderDesc);
 		else if (type < 65536)
@@ -180,10 +175,10 @@
 		else
 			offset = le32_to_cpu(eahd->appAttrLocation);
 
-		while (offset < UDF_I_LENEATTR(inode))
-		{
+		while (offset < UDF_I_LENEATTR(inode)) {
 			gaf = (struct genericFormat *)&ea[offset];
-			if (le32_to_cpu(gaf->attrType) == type && gaf->attrSubtype == subtype)
+			if (le32_to_cpu(gaf->attrType) == type
+			    && gaf->attrSubtype == subtype)
 				return gaf;
 			else
 				offset += le32_to_cpu(gaf->attrLength);
@@ -202,8 +197,8 @@
  *	July 1, 1997 - Andrew E. Mileski
  *	Written, tested, and released.
  */
-struct buffer_head *
-udf_read_tagged(struct super_block *sb, uint32_t block, uint32_t location, uint16_t *ident)
+struct buffer_head *udf_read_tagged(struct super_block *sb, uint32_t block,
+				    uint32_t location, uint16_t * ident)
 {
 	tag *tag_p;
 	struct buffer_head *bh = NULL;
@@ -215,29 +210,29 @@
 		return NULL;
 
 	bh = udf_tread(sb, block + UDF_SB_SESSION(sb));
-	if (!bh)
-	{
-		udf_debug("block=%d, location=%d: read failed\n", block + UDF_SB_SESSION(sb), location);
+	if (!bh) {
+		udf_debug("block=%d, location=%d: read failed\n",
+			  block + UDF_SB_SESSION(sb), location);
 		return NULL;
 	}
 
-	tag_p = (tag *)(bh->b_data);
+	tag_p = (tag *) (bh->b_data);
 
 	*ident = le16_to_cpu(tag_p->tagIdent);
 
-	if ( location != le32_to_cpu(tag_p->tagLocation) )
-	{
+	if (location != le32_to_cpu(tag_p->tagLocation)) {
 		udf_debug("location mismatch block %u, tag %u != %u\n",
-			block + UDF_SB_SESSION(sb), le32_to_cpu(tag_p->tagLocation), location);
+			  block + UDF_SB_SESSION(sb),
+			  le32_to_cpu(tag_p->tagLocation), location);
 		goto error_out;
 	}
-	
+
 	/* Verify the tag checksum */
 	checksum = 0U;
 	for (i = 0; i < 4; i++)
-		checksum += (uint8_t)(bh->b_data[i]);
+		checksum += (uint8_t) (bh->b_data[i]);
 	for (i = 5; i < 16; i++)
-		checksum += (uint8_t)(bh->b_data[i]);
+		checksum += (uint8_t) (bh->b_data[i]);
 	if (checksum != tag_p->tagChecksum) {
 		printk(KERN_ERR "udf: tag checksum failed block %d\n", block);
 		goto error_out;
@@ -245,38 +240,39 @@
 
 	/* Verify the tag version */
 	if (le16_to_cpu(tag_p->descVersion) != 0x0002U &&
-		le16_to_cpu(tag_p->descVersion) != 0x0003U)
-	{
+	    le16_to_cpu(tag_p->descVersion) != 0x0003U) {
 		udf_debug("tag version 0x%04x != 0x0002 || 0x0003 block %d\n",
-			le16_to_cpu(tag_p->descVersion), block);
+			  le16_to_cpu(tag_p->descVersion), block);
 		goto error_out;
 	}
 
 	/* Verify the descriptor CRC */
 	if (le16_to_cpu(tag_p->descCRCLength) + sizeof(tag) > sb->s_blocksize ||
-		le16_to_cpu(tag_p->descCRC) == udf_crc(bh->b_data + sizeof(tag),
-			le16_to_cpu(tag_p->descCRCLength), 0))
-	{
+	    le16_to_cpu(tag_p->descCRC) == udf_crc(bh->b_data + sizeof(tag),
+						   le16_to_cpu(tag_p->
+							       descCRCLength),
+						   0)) {
 		return bh;
 	}
 	udf_debug("Crc failure block %d: crc = %d, crclen = %d\n",
-		block + UDF_SB_SESSION(sb), le16_to_cpu(tag_p->descCRC), le16_to_cpu(tag_p->descCRCLength));
+		  block + UDF_SB_SESSION(sb), le16_to_cpu(tag_p->descCRC),
+		  le16_to_cpu(tag_p->descCRCLength));
 
-error_out:
+      error_out:
 	brelse(bh);
 	return NULL;
 }
 
-struct buffer_head *
-udf_read_ptagged(struct super_block *sb, kernel_lb_addr loc, uint32_t offset, uint16_t *ident)
+struct buffer_head *udf_read_ptagged(struct super_block *sb, kernel_lb_addr loc,
+				     uint32_t offset, uint16_t * ident)
 {
 	return udf_read_tagged(sb, udf_get_lb_pblock(sb, loc, offset),
-		loc.logicalBlockNum + offset, ident);
+			       loc.logicalBlockNum + offset, ident);
 }
 
 void udf_update_tag(char *data, int length)
 {
-	tag *tptr = (tag *)data;
+	tag *tptr = (tag *) data;
 	int i;
 
 	length -= sizeof(tag);
@@ -285,15 +281,15 @@
 	tptr->descCRCLength = cpu_to_le16(length);
 	tptr->descCRC = cpu_to_le16(udf_crc(data + sizeof(tag), length, 0));
 
-	for (i=0; i<16; i++)
+	for (i = 0; i < 16; i++)
 		if (i != 4)
-			tptr->tagChecksum += (uint8_t)(data[i]);
+			tptr->tagChecksum += (uint8_t) (data[i]);
 }
 
 void udf_new_tag(char *data, uint16_t ident, uint16_t version, uint16_t snum,
-	uint32_t loc, int length)
+		 uint32_t loc, int length)
 {
-	tag *tptr = (tag *)data;
+	tag *tptr = (tag *) data;
 	tptr->tagIdent = cpu_to_le16(ident);
 	tptr->descVersion = cpu_to_le16(version);
 	tptr->tagSerialNum = cpu_to_le16(snum);
diff --git a/fs/udf/namei.c b/fs/udf/namei.c
index 51fe307..334d363 100644
--- a/fs/udf/namei.c
+++ b/fs/udf/namei.c
@@ -32,7 +32,8 @@
 #include <linux/buffer_head.h>
 #include <linux/sched.h>
 
-static inline int udf_match(int len1, const char *name1, int len2, const char *name2)
+static inline int udf_match(int len1, const char *name1, int len2,
+			    const char *name2)
 {
 	if (len1 != len2)
 		return 0;
@@ -40,8 +41,8 @@
 }
 
 int udf_write_fi(struct inode *inode, struct fileIdentDesc *cfi,
-	struct fileIdentDesc *sfi, struct udf_fileident_bh *fibh,
-	uint8_t *impuse, uint8_t *fileident)
+		 struct fileIdentDesc *sfi, struct udf_fileident_bh *fibh,
+		 uint8_t * impuse, uint8_t * fileident)
 {
 	uint16_t crclen = fibh->eoffset - fibh->soffset - sizeof(tag);
 	uint16_t crc;
@@ -51,7 +52,7 @@
 	uint16_t liu = le16_to_cpu(cfi->lengthOfImpUse);
 	uint8_t lfi = cfi->lengthFileIdent;
 	int padlen = fibh->eoffset - fibh->soffset - liu - lfi -
-		sizeof(struct fileIdentDesc);
+	    sizeof(struct fileIdentDesc);
 	int adinicb = 0;
 
 	if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_IN_ICB)
@@ -59,83 +60,86 @@
 
 	offset = fibh->soffset + sizeof(struct fileIdentDesc);
 
-	if (impuse)
-	{
+	if (impuse) {
 		if (adinicb || (offset + liu < 0))
-			memcpy((uint8_t *)sfi->impUse, impuse, liu);
+			memcpy((uint8_t *) sfi->impUse, impuse, liu);
 		else if (offset >= 0)
 			memcpy(fibh->ebh->b_data + offset, impuse, liu);
-		else
-		{
-			memcpy((uint8_t *)sfi->impUse, impuse, -offset);
-			memcpy(fibh->ebh->b_data, impuse - offset, liu + offset);
+		else {
+			memcpy((uint8_t *) sfi->impUse, impuse, -offset);
+			memcpy(fibh->ebh->b_data, impuse - offset,
+			       liu + offset);
 		}
 	}
 
 	offset += liu;
 
-	if (fileident)
-	{
+	if (fileident) {
 		if (adinicb || (offset + lfi < 0))
-			memcpy((uint8_t *)sfi->fileIdent + liu, fileident, lfi);
+			memcpy((uint8_t *) sfi->fileIdent + liu, fileident,
+			       lfi);
 		else if (offset >= 0)
 			memcpy(fibh->ebh->b_data + offset, fileident, lfi);
-		else
-		{
-			memcpy((uint8_t *)sfi->fileIdent + liu, fileident, -offset);
-			memcpy(fibh->ebh->b_data, fileident - offset, lfi + offset);
+		else {
+			memcpy((uint8_t *) sfi->fileIdent + liu, fileident,
+			       -offset);
+			memcpy(fibh->ebh->b_data, fileident - offset,
+			       lfi + offset);
 		}
 	}
 
 	offset += lfi;
 
 	if (adinicb || (offset + padlen < 0))
-		memset((uint8_t *)sfi->padding + liu + lfi, 0x00, padlen);
+		memset((uint8_t *) sfi->padding + liu + lfi, 0x00, padlen);
 	else if (offset >= 0)
 		memset(fibh->ebh->b_data + offset, 0x00, padlen);
-	else
-	{
-		memset((uint8_t *)sfi->padding + liu + lfi, 0x00, -offset);
+	else {
+		memset((uint8_t *) sfi->padding + liu + lfi, 0x00, -offset);
 		memset(fibh->ebh->b_data, 0x00, padlen + offset);
 	}
 
-	crc = udf_crc((uint8_t *)cfi + sizeof(tag), sizeof(struct fileIdentDesc) -
-		sizeof(tag), 0);
+	crc =
+	    udf_crc((uint8_t *) cfi + sizeof(tag),
+		    sizeof(struct fileIdentDesc) - sizeof(tag), 0);
 
 	if (fibh->sbh == fibh->ebh)
-		crc = udf_crc((uint8_t *)sfi->impUse,
-			crclen + sizeof(tag) - sizeof(struct fileIdentDesc), crc);
+		crc = udf_crc((uint8_t *) sfi->impUse,
+			      crclen + sizeof(tag) -
+			      sizeof(struct fileIdentDesc), crc);
 	else if (sizeof(struct fileIdentDesc) >= -fibh->soffset)
-		crc = udf_crc(fibh->ebh->b_data + sizeof(struct fileIdentDesc) + fibh->soffset,
-			crclen + sizeof(tag) - sizeof(struct fileIdentDesc), crc);
-	else
-	{
-		crc = udf_crc((uint8_t *)sfi->impUse,
-			-fibh->soffset - sizeof(struct fileIdentDesc), crc);
+		crc =
+		    udf_crc(fibh->ebh->b_data + sizeof(struct fileIdentDesc) +
+			    fibh->soffset,
+			    crclen + sizeof(tag) - sizeof(struct fileIdentDesc),
+			    crc);
+	else {
+		crc = udf_crc((uint8_t *) sfi->impUse,
+			      -fibh->soffset - sizeof(struct fileIdentDesc),
+			      crc);
 		crc = udf_crc(fibh->ebh->b_data, fibh->eoffset, crc);
 	}
 
 	cfi->descTag.descCRC = cpu_to_le16(crc);
 	cfi->descTag.descCRCLength = cpu_to_le16(crclen);
 
-	for (i=0; i<16; i++)
+	for (i = 0; i < 16; i++)
 		if (i != 4)
-			checksum += ((uint8_t *)&cfi->descTag)[i];
+			checksum += ((uint8_t *) & cfi->descTag)[i];
 
 	cfi->descTag.tagChecksum = checksum;
 	if (adinicb || (sizeof(struct fileIdentDesc) <= -fibh->soffset))
-		memcpy((uint8_t *)sfi, (uint8_t *)cfi, sizeof(struct fileIdentDesc));
-	else
-	{
-		memcpy((uint8_t *)sfi, (uint8_t *)cfi, -fibh->soffset);
-		memcpy(fibh->ebh->b_data, (uint8_t *)cfi - fibh->soffset,
-			sizeof(struct fileIdentDesc) + fibh->soffset);
+		memcpy((uint8_t *) sfi, (uint8_t *) cfi,
+		       sizeof(struct fileIdentDesc));
+	else {
+		memcpy((uint8_t *) sfi, (uint8_t *) cfi, -fibh->soffset);
+		memcpy(fibh->ebh->b_data, (uint8_t *) cfi - fibh->soffset,
+		       sizeof(struct fileIdentDesc) + fibh->soffset);
 	}
 
 	if (adinicb)
 		mark_inode_dirty(inode);
-	else
-	{
+	else {
 		if (fibh->sbh != fibh->ebh)
 			mark_buffer_dirty_inode(fibh->ebh, inode);
 		mark_buffer_dirty_inode(fibh->sbh, inode);
@@ -143,12 +147,12 @@
 	return 0;
 }
 
-static struct fileIdentDesc *
-udf_find_entry(struct inode *dir, struct dentry *dentry,
-	struct udf_fileident_bh *fibh,
-	struct fileIdentDesc *cfi)
+static struct fileIdentDesc *udf_find_entry(struct inode *dir,
+					    struct dentry *dentry,
+					    struct udf_fileident_bh *fibh,
+					    struct fileIdentDesc *cfi)
 {
-	struct fileIdentDesc *fi=NULL;
+	struct fileIdentDesc *fi = NULL;
 	loff_t f_pos;
 	int block, flen;
 	char fname[UDF_NAME_LEN];
@@ -159,46 +163,41 @@
 	kernel_lb_addr eloc;
 	uint32_t elen;
 	sector_t offset;
-	struct extent_position epos = { NULL, 0, { 0, 0}};
+	struct extent_position epos = { NULL, 0, {0, 0} };
 
 	size = (udf_ext0_offset(dir) + dir->i_size) >> 2;
 	f_pos = (udf_ext0_offset(dir) >> 2);
 
-	fibh->soffset = fibh->eoffset = (f_pos & ((dir->i_sb->s_blocksize - 1) >> 2)) << 2;
+	fibh->soffset = fibh->eoffset =
+	    (f_pos & ((dir->i_sb->s_blocksize - 1) >> 2)) << 2;
 	if (UDF_I_ALLOCTYPE(dir) == ICBTAG_FLAG_AD_IN_ICB)
 		fibh->sbh = fibh->ebh = NULL;
 	else if (inode_bmap(dir, f_pos >> (dir->i_sb->s_blocksize_bits - 2),
-		&epos, &eloc, &elen, &offset) == (EXT_RECORDED_ALLOCATED >> 30))
-	{
+			    &epos, &eloc, &elen,
+			    &offset) == (EXT_RECORDED_ALLOCATED >> 30)) {
 		block = udf_get_lb_pblock(dir->i_sb, eloc, offset);
-		if ((++offset << dir->i_sb->s_blocksize_bits) < elen)
-		{
+		if ((++offset << dir->i_sb->s_blocksize_bits) < elen) {
 			if (UDF_I_ALLOCTYPE(dir) == ICBTAG_FLAG_AD_SHORT)
 				epos.offset -= sizeof(short_ad);
 			else if (UDF_I_ALLOCTYPE(dir) == ICBTAG_FLAG_AD_LONG)
 				epos.offset -= sizeof(long_ad);
-		}
-		else
+		} else
 			offset = 0;
 
-		if (!(fibh->sbh = fibh->ebh = udf_tread(dir->i_sb, block)))
-		{
+		if (!(fibh->sbh = fibh->ebh = udf_tread(dir->i_sb, block))) {
 			brelse(epos.bh);
 			return NULL;
 		}
-	}
-	else
-	{
+	} else {
 		brelse(epos.bh);
 		return NULL;
 	}
 
-	while ( (f_pos < size) )
-	{
-		fi = udf_fileident_read(dir, &f_pos, fibh, cfi, &epos, &eloc, &elen, &offset);
+	while ((f_pos < size)) {
+		fi = udf_fileident_read(dir, &f_pos, fibh, cfi, &epos, &eloc,
+					&elen, &offset);
 
-		if (!fi)
-		{
+		if (!fi) {
 			if (fibh->sbh != fibh->ebh)
 				brelse(fibh->ebh);
 			brelse(fibh->sbh);
@@ -209,45 +208,45 @@
 		liu = le16_to_cpu(cfi->lengthOfImpUse);
 		lfi = cfi->lengthFileIdent;
 
-		if (fibh->sbh == fibh->ebh)
-		{
+		if (fibh->sbh == fibh->ebh) {
 			nameptr = fi->fileIdent + liu;
-		}
-		else
-		{
+		} else {
 			int poffset;	/* Unpaded ending offset */
 
-			poffset = fibh->soffset + sizeof(struct fileIdentDesc) + liu + lfi;
+			poffset =
+			    fibh->soffset + sizeof(struct fileIdentDesc) + liu +
+			    lfi;
 
 			if (poffset >= lfi)
-				nameptr = (uint8_t *)(fibh->ebh->b_data + poffset - lfi);
-			else
-			{
+				nameptr =
+				    (uint8_t *) (fibh->ebh->b_data + poffset -
+						 lfi);
+			else {
 				nameptr = fname;
-				memcpy(nameptr, fi->fileIdent + liu, lfi - poffset);
-				memcpy(nameptr + lfi - poffset, fibh->ebh->b_data, poffset);
+				memcpy(nameptr, fi->fileIdent + liu,
+				       lfi - poffset);
+				memcpy(nameptr + lfi - poffset,
+				       fibh->ebh->b_data, poffset);
 			}
 		}
 
-		if ( (cfi->fileCharacteristics & FID_FILE_CHAR_DELETED) != 0 )
-		{
-			if ( !UDF_QUERY_FLAG(dir->i_sb, UDF_FLAG_UNDELETE) )
+		if ((cfi->fileCharacteristics & FID_FILE_CHAR_DELETED) != 0) {
+			if (!UDF_QUERY_FLAG(dir->i_sb, UDF_FLAG_UNDELETE))
 				continue;
 		}
-	    
-		if ( (cfi->fileCharacteristics & FID_FILE_CHAR_HIDDEN) != 0 )
-		{
-			if ( !UDF_QUERY_FLAG(dir->i_sb, UDF_FLAG_UNHIDE) )
+
+		if ((cfi->fileCharacteristics & FID_FILE_CHAR_HIDDEN) != 0) {
+			if (!UDF_QUERY_FLAG(dir->i_sb, UDF_FLAG_UNHIDE))
 				continue;
 		}
 
 		if (!lfi)
 			continue;
 
-		if ((flen = udf_get_filename(dir->i_sb, nameptr, fname, lfi)))
-		{
-			if (udf_match(flen, fname, dentry->d_name.len, dentry->d_name.name))
-			{
+		if ((flen = udf_get_filename(dir->i_sb, nameptr, fname, lfi))) {
+			if (udf_match
+			    (flen, fname, dentry->d_name.len,
+			     dentry->d_name.name)) {
 				brelse(epos.bh);
 				return fi;
 			}
@@ -293,41 +292,37 @@
  *	Written, tested, and released.
  */
 
-static struct dentry *
-udf_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd)
+static struct dentry *udf_lookup(struct inode *dir, struct dentry *dentry,
+				 struct nameidata *nd)
 {
 	struct inode *inode = NULL;
 	struct fileIdentDesc cfi;
 	struct udf_fileident_bh fibh;
 
-	if (dentry->d_name.len > UDF_NAME_LEN-2)
+	if (dentry->d_name.len > UDF_NAME_LEN - 2)
 		return ERR_PTR(-ENAMETOOLONG);
 
 	lock_kernel();
 #ifdef UDF_RECOVERY
 	/* temporary shorthand for specifying files by inode number */
-	if (!strncmp(dentry->d_name.name, ".B=", 3) )
-	{
-		kernel_lb_addr lb = { 0, simple_strtoul(dentry->d_name.name+3, NULL, 0) };
+	if (!strncmp(dentry->d_name.name, ".B=", 3)) {
+		kernel_lb_addr lb =
+		    { 0, simple_strtoul(dentry->d_name.name + 3, NULL, 0) };
 		inode = udf_iget(dir->i_sb, lb);
-		if (!inode)
-		{
+		if (!inode) {
 			unlock_kernel();
 			return ERR_PTR(-EACCES);
 		}
-	}
-	else
-#endif /* UDF_RECOVERY */
+	} else
+#endif				/* UDF_RECOVERY */
 
-	if (udf_find_entry(dir, dentry, &fibh, &cfi))
-	{
+	if (udf_find_entry(dir, dentry, &fibh, &cfi)) {
 		if (fibh.sbh != fibh.ebh)
 			brelse(fibh.ebh);
 		brelse(fibh.sbh);
 
 		inode = udf_iget(dir->i_sb, lelb_to_cpu(cfi.icb.extLocation));
-		if ( !inode )
-		{
+		if (!inode) {
 			unlock_kernel();
 			return ERR_PTR(-EACCES);
 		}
@@ -337,13 +332,13 @@
 	return NULL;
 }
 
-static struct fileIdentDesc *
-udf_add_entry(struct inode *dir, struct dentry *dentry,
-	struct udf_fileident_bh *fibh,
-	struct fileIdentDesc *cfi, int *err)
+static struct fileIdentDesc *udf_add_entry(struct inode *dir,
+					   struct dentry *dentry,
+					   struct udf_fileident_bh *fibh,
+					   struct fileIdentDesc *cfi, int *err)
 {
 	struct super_block *sb;
-	struct fileIdentDesc *fi=NULL;
+	struct fileIdentDesc *fi = NULL;
 	char name[UDF_NAME_LEN], fname[UDF_NAME_LEN];
 	int namelen;
 	loff_t f_pos;
@@ -357,50 +352,47 @@
 	kernel_lb_addr eloc;
 	uint32_t elen;
 	sector_t offset;
-	struct extent_position epos = { NULL, 0, { 0, 0 }};
+	struct extent_position epos = { NULL, 0, {0, 0} };
 
 	sb = dir->i_sb;
 
-	if (dentry)
-	{
-		if (!dentry->d_name.len)
-		{
+	if (dentry) {
+		if (!dentry->d_name.len) {
 			*err = -EINVAL;
 			return NULL;
 		}
 
-		if ( !(namelen = udf_put_filename(sb, dentry->d_name.name, name, dentry->d_name.len)))
-		{
+		if (!
+		    (namelen =
+		     udf_put_filename(sb, dentry->d_name.name, name,
+				      dentry->d_name.len))) {
 			*err = -ENAMETOOLONG;
 			return NULL;
 		}
-	}
-	else
+	} else
 		namelen = 0;
 
 	nfidlen = (sizeof(struct fileIdentDesc) + namelen + 3) & ~3;
 
 	f_pos = (udf_ext0_offset(dir) >> 2);
 
-	fibh->soffset = fibh->eoffset = (f_pos & ((dir->i_sb->s_blocksize - 1) >> 2)) << 2;
+	fibh->soffset = fibh->eoffset =
+	    (f_pos & ((dir->i_sb->s_blocksize - 1) >> 2)) << 2;
 	if (UDF_I_ALLOCTYPE(dir) == ICBTAG_FLAG_AD_IN_ICB)
 		fibh->sbh = fibh->ebh = NULL;
 	else if (inode_bmap(dir, f_pos >> (dir->i_sb->s_blocksize_bits - 2),
-		&epos, &eloc, &elen, &offset) == (EXT_RECORDED_ALLOCATED >> 30))
-	{
+			    &epos, &eloc, &elen,
+			    &offset) == (EXT_RECORDED_ALLOCATED >> 30)) {
 		block = udf_get_lb_pblock(dir->i_sb, eloc, offset);
-		if ((++offset << dir->i_sb->s_blocksize_bits) < elen)
-		{
+		if ((++offset << dir->i_sb->s_blocksize_bits) < elen) {
 			if (UDF_I_ALLOCTYPE(dir) == ICBTAG_FLAG_AD_SHORT)
 				epos.offset -= sizeof(short_ad);
 			else if (UDF_I_ALLOCTYPE(dir) == ICBTAG_FLAG_AD_LONG)
 				epos.offset -= sizeof(long_ad);
-		}
-		else
+		} else
 			offset = 0;
 
-		if (!(fibh->sbh = fibh->ebh = udf_tread(dir->i_sb, block)))
-		{
+		if (!(fibh->sbh = fibh->ebh = udf_tread(dir->i_sb, block))) {
 			brelse(epos.bh);
 			*err = -EIO;
 			return NULL;
@@ -408,21 +400,18 @@
 
 		block = UDF_I_LOCATION(dir).logicalBlockNum;
 
-	}
-	else
-	{
+	} else {
 		block = udf_get_lb_pblock(dir->i_sb, UDF_I_LOCATION(dir), 0);
 		fibh->sbh = fibh->ebh = NULL;
 		fibh->soffset = fibh->eoffset = sb->s_blocksize;
 		goto add;
 	}
 
-	while ( (f_pos < size) )
-	{
-		fi = udf_fileident_read(dir, &f_pos, fibh, cfi, &epos, &eloc, &elen, &offset);
+	while ((f_pos < size)) {
+		fi = udf_fileident_read(dir, &f_pos, fibh, cfi, &epos, &eloc,
+					&elen, &offset);
 
-		if (!fi)
-		{
+		if (!fi) {
 			if (fibh->sbh != fibh->ebh)
 				brelse(fibh->ebh);
 			brelse(fibh->sbh);
@@ -436,36 +425,38 @@
 
 		if (fibh->sbh == fibh->ebh)
 			nameptr = fi->fileIdent + liu;
-		else
-		{
+		else {
 			int poffset;	/* Unpaded ending offset */
 
-			poffset = fibh->soffset + sizeof(struct fileIdentDesc) + liu + lfi;
+			poffset =
+			    fibh->soffset + sizeof(struct fileIdentDesc) + liu +
+			    lfi;
 
 			if (poffset >= lfi)
-				nameptr = (char *)(fibh->ebh->b_data + poffset - lfi);
-			else
-			{
+				nameptr =
+				    (char *)(fibh->ebh->b_data + poffset - lfi);
+			else {
 				nameptr = fname;
-				memcpy(nameptr, fi->fileIdent + liu, lfi - poffset);
-				memcpy(nameptr + lfi - poffset, fibh->ebh->b_data, poffset);
+				memcpy(nameptr, fi->fileIdent + liu,
+				       lfi - poffset);
+				memcpy(nameptr + lfi - poffset,
+				       fibh->ebh->b_data, poffset);
 			}
 		}
 
-		if ( (cfi->fileCharacteristics & FID_FILE_CHAR_DELETED) != 0 )
-		{
-			if (((sizeof(struct fileIdentDesc) + liu + lfi + 3) & ~3) == nfidlen)
-			{
+		if ((cfi->fileCharacteristics & FID_FILE_CHAR_DELETED) != 0) {
+			if (((sizeof(struct fileIdentDesc) + liu + lfi +
+			      3) & ~3) == nfidlen) {
 				brelse(epos.bh);
 				cfi->descTag.tagSerialNum = cpu_to_le16(1);
 				cfi->fileVersionNum = cpu_to_le16(1);
 				cfi->fileCharacteristics = 0;
 				cfi->lengthFileIdent = namelen;
 				cfi->lengthOfImpUse = cpu_to_le16(0);
-				if (!udf_write_fi(dir, cfi, fi, fibh, NULL, name))
+				if (!udf_write_fi
+				    (dir, cfi, fi, fibh, NULL, name))
 					return fi;
-				else
-				{
+				else {
 					*err = -EIO;
 					return NULL;
 				}
@@ -476,8 +467,8 @@
 			continue;
 
 		if ((flen = udf_get_filename(dir->i_sb, nameptr, fname, lfi)) &&
-			udf_match(flen, fname, dentry->d_name.len, dentry->d_name.name))
-		{
+		    udf_match(flen, fname, dentry->d_name.len,
+			      dentry->d_name.name)) {
 			if (fibh->sbh != fibh->ebh)
 				brelse(fibh->ebh);
 			brelse(fibh->sbh);
@@ -487,12 +478,11 @@
 		}
 	}
 
-add:
+      add:
 	f_pos += nfidlen;
 
 	if (UDF_I_ALLOCTYPE(dir) == ICBTAG_FLAG_AD_IN_ICB &&
-		sb->s_blocksize - fibh->eoffset < nfidlen)
-	{
+	    sb->s_blocksize - fibh->eoffset < nfidlen) {
 		brelse(epos.bh);
 		epos.bh = NULL;
 		fibh->soffset -= udf_ext0_offset(dir);
@@ -501,11 +491,14 @@
 		if (fibh->sbh != fibh->ebh)
 			brelse(fibh->ebh);
 		brelse(fibh->sbh);
-		if (!(fibh->sbh = fibh->ebh = udf_expand_dir_adinicb(dir, &block, err)))
+		if (!
+		    (fibh->sbh = fibh->ebh =
+		     udf_expand_dir_adinicb(dir, &block, err)))
 			return NULL;
 		epos.block = UDF_I_LOCATION(dir);
 		eloc.logicalBlockNum = block;
-		eloc.partitionReferenceNum = UDF_I_LOCATION(dir).partitionReferenceNum;
+		eloc.partitionReferenceNum =
+		    UDF_I_LOCATION(dir).partitionReferenceNum;
 		elen = dir->i_sb->s_blocksize;
 		epos.offset = udf_file_entry_alloc_offset(dir);
 		if (UDF_I_ALLOCTYPE(dir) == ICBTAG_FLAG_AD_SHORT)
@@ -514,89 +507,84 @@
 			epos.offset += sizeof(long_ad);
 	}
 
-	if (sb->s_blocksize - fibh->eoffset >= nfidlen)
-	{
+	if (sb->s_blocksize - fibh->eoffset >= nfidlen) {
 		fibh->soffset = fibh->eoffset;
 		fibh->eoffset += nfidlen;
-		if (fibh->sbh != fibh->ebh)
-		{
+		if (fibh->sbh != fibh->ebh) {
 			brelse(fibh->sbh);
 			fibh->sbh = fibh->ebh;
 		}
 
-		if (UDF_I_ALLOCTYPE(dir) == ICBTAG_FLAG_AD_IN_ICB)
-		{
+		if (UDF_I_ALLOCTYPE(dir) == ICBTAG_FLAG_AD_IN_ICB) {
 			block = UDF_I_LOCATION(dir).logicalBlockNum;
-			fi = (struct fileIdentDesc *)(UDF_I_DATA(dir) + fibh->soffset - udf_ext0_offset(dir) + UDF_I_LENEATTR(dir));
-		}
-		else
-		{
+			fi = (struct fileIdentDesc *)(UDF_I_DATA(dir) +
+						      fibh->soffset -
+						      udf_ext0_offset(dir) +
+						      UDF_I_LENEATTR(dir));
+		} else {
 			block = eloc.logicalBlockNum + ((elen - 1) >>
-				dir->i_sb->s_blocksize_bits);
-			fi = (struct fileIdentDesc *)(fibh->sbh->b_data + fibh->soffset);
+							dir->i_sb->
+							s_blocksize_bits);
+			fi = (struct fileIdentDesc *)(fibh->sbh->b_data +
+						      fibh->soffset);
 		}
-	}
-	else
-	{
+	} else {
 		fibh->soffset = fibh->eoffset - sb->s_blocksize;
 		fibh->eoffset += nfidlen - sb->s_blocksize;
-		if (fibh->sbh != fibh->ebh)
-		{
+		if (fibh->sbh != fibh->ebh) {
 			brelse(fibh->sbh);
 			fibh->sbh = fibh->ebh;
 		}
 
 		block = eloc.logicalBlockNum + ((elen - 1) >>
-			dir->i_sb->s_blocksize_bits);
+						dir->i_sb->s_blocksize_bits);
 
-		if (!(fibh->ebh = udf_bread(dir, f_pos >> (dir->i_sb->s_blocksize_bits - 2), 1, err)))
-		{
+		if (!
+		    (fibh->ebh =
+		     udf_bread(dir, f_pos >> (dir->i_sb->s_blocksize_bits - 2),
+			       1, err))) {
 			brelse(epos.bh);
 			brelse(fibh->sbh);
 			return NULL;
 		}
 
-		if (!(fibh->soffset))
-		{
+		if (!(fibh->soffset)) {
 			if (udf_next_aext(dir, &epos, &eloc, &elen, 1) ==
-				(EXT_RECORDED_ALLOCATED >> 30))
-			{
+			    (EXT_RECORDED_ALLOCATED >> 30)) {
 				block = eloc.logicalBlockNum + ((elen - 1) >>
-					dir->i_sb->s_blocksize_bits);
-			}
-			else
-				block ++;
+								dir->i_sb->
+								s_blocksize_bits);
+			} else
+				block++;
 
 			brelse(fibh->sbh);
 			fibh->sbh = fibh->ebh;
 			fi = (struct fileIdentDesc *)(fibh->sbh->b_data);
-		}
-		else
-		{
+		} else {
 			fi = (struct fileIdentDesc *)
-				(fibh->sbh->b_data + sb->s_blocksize + fibh->soffset);
+			    (fibh->sbh->b_data + sb->s_blocksize +
+			     fibh->soffset);
 		}
 	}
 
 	memset(cfi, 0, sizeof(struct fileIdentDesc));
 	if (UDF_SB_UDFREV(sb) >= 0x0200)
-		udf_new_tag((char *)cfi, TAG_IDENT_FID, 3, 1, block, sizeof(tag));
+		udf_new_tag((char *)cfi, TAG_IDENT_FID, 3, 1, block,
+			    sizeof(tag));
 	else
-		udf_new_tag((char *)cfi, TAG_IDENT_FID, 2, 1, block, sizeof(tag));
+		udf_new_tag((char *)cfi, TAG_IDENT_FID, 2, 1, block,
+			    sizeof(tag));
 	cfi->fileVersionNum = cpu_to_le16(1);
 	cfi->lengthFileIdent = namelen;
 	cfi->lengthOfImpUse = cpu_to_le16(0);
-	if (!udf_write_fi(dir, cfi, fi, fibh, NULL, name))
-	{
+	if (!udf_write_fi(dir, cfi, fi, fibh, NULL, name)) {
 		brelse(epos.bh);
 		dir->i_size += nfidlen;
 		if (UDF_I_ALLOCTYPE(dir) == ICBTAG_FLAG_AD_IN_ICB)
 			UDF_I_LENALLOC(dir) += nfidlen;
 		mark_inode_dirty(dir);
 		return fi;
-	}
-	else
-	{
+	} else {
 		brelse(epos.bh);
 		if (fibh->sbh != fibh->ebh)
 			brelse(fibh->ebh);
@@ -607,7 +595,8 @@
 }
 
 static int udf_delete_entry(struct inode *inode, struct fileIdentDesc *fi,
-	struct udf_fileident_bh *fibh, struct fileIdentDesc *cfi)
+			    struct udf_fileident_bh *fibh,
+			    struct fileIdentDesc *cfi)
 {
 	cfi->fileCharacteristics |= FID_FILE_CHAR_DELETED;
 	if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT))
@@ -615,7 +604,8 @@
 	return udf_write_fi(inode, cfi, fi, fibh, NULL, NULL);
 }
 
-static int udf_create(struct inode *dir, struct dentry *dentry, int mode, struct nameidata *nd)
+static int udf_create(struct inode *dir, struct dentry *dentry, int mode,
+		      struct nameidata *nd)
 {
 	struct udf_fileident_bh fibh;
 	struct inode *inode;
@@ -624,8 +614,7 @@
 
 	lock_kernel();
 	inode = udf_new_inode(dir, mode, &err);
-	if (!inode)
-	{
+	if (!inode) {
 		unlock_kernel();
 		return err;
 	}
@@ -639,9 +628,8 @@
 	inode->i_mode = mode;
 	mark_inode_dirty(inode);
 
-	if (!(fi = udf_add_entry(dir, dentry, &fibh, &cfi, &err)))
-	{
-		inode->i_nlink --;
+	if (!(fi = udf_add_entry(dir, dentry, &fibh, &cfi, &err))) {
+		inode->i_nlink--;
 		mark_inode_dirty(inode);
 		iput(inode);
 		unlock_kernel();
@@ -649,11 +637,10 @@
 	}
 	cfi.icb.extLength = cpu_to_le32(inode->i_sb->s_blocksize);
 	cfi.icb.extLocation = cpu_to_lelb(UDF_I_LOCATION(inode));
-	*(__le32 *)((struct allocDescImpUse *)cfi.icb.impUse)->impUse =
-		cpu_to_le32(UDF_I_UNIQUE(inode) & 0x00000000FFFFFFFFUL);
+	*(__le32 *) ((struct allocDescImpUse *)cfi.icb.impUse)->impUse =
+	    cpu_to_le32(UDF_I_UNIQUE(inode) & 0x00000000FFFFFFFFUL);
 	udf_write_fi(dir, &cfi, fi, &fibh, NULL, NULL);
-	if (UDF_I_ALLOCTYPE(dir) == ICBTAG_FLAG_AD_IN_ICB)
-	{
+	if (UDF_I_ALLOCTYPE(dir) == ICBTAG_FLAG_AD_IN_ICB) {
 		mark_inode_dirty(dir);
 	}
 	if (fibh.sbh != fibh.ebh)
@@ -664,9 +651,10 @@
 	return 0;
 }
 
-static int udf_mknod(struct inode * dir, struct dentry * dentry, int mode, dev_t rdev)
+static int udf_mknod(struct inode *dir, struct dentry *dentry, int mode,
+		     dev_t rdev)
 {
-	struct inode * inode;
+	struct inode *inode;
 	struct udf_fileident_bh fibh;
 	struct fileIdentDesc cfi, *fi;
 	int err;
@@ -682,9 +670,8 @@
 
 	inode->i_uid = current->fsuid;
 	init_special_inode(inode, mode, rdev);
-	if (!(fi = udf_add_entry(dir, dentry, &fibh, &cfi, &err)))
-	{
-		inode->i_nlink --;
+	if (!(fi = udf_add_entry(dir, dentry, &fibh, &cfi, &err))) {
+		inode->i_nlink--;
 		mark_inode_dirty(inode);
 		iput(inode);
 		unlock_kernel();
@@ -692,11 +679,10 @@
 	}
 	cfi.icb.extLength = cpu_to_le32(inode->i_sb->s_blocksize);
 	cfi.icb.extLocation = cpu_to_lelb(UDF_I_LOCATION(inode));
-	*(__le32 *)((struct allocDescImpUse *)cfi.icb.impUse)->impUse =
-		cpu_to_le32(UDF_I_UNIQUE(inode) & 0x00000000FFFFFFFFUL);
+	*(__le32 *) ((struct allocDescImpUse *)cfi.icb.impUse)->impUse =
+	    cpu_to_le32(UDF_I_UNIQUE(inode) & 0x00000000FFFFFFFFUL);
 	udf_write_fi(dir, &cfi, fi, &fibh, NULL, NULL);
-	if (UDF_I_ALLOCTYPE(dir) == ICBTAG_FLAG_AD_IN_ICB)
-	{
+	if (UDF_I_ALLOCTYPE(dir) == ICBTAG_FLAG_AD_IN_ICB) {
 		mark_inode_dirty(dir);
 	}
 	mark_inode_dirty(inode);
@@ -706,21 +692,21 @@
 	brelse(fibh.sbh);
 	d_instantiate(dentry, inode);
 	err = 0;
-out:
+      out:
 	unlock_kernel();
 	return err;
 }
 
-static int udf_mkdir(struct inode * dir, struct dentry * dentry, int mode)
+static int udf_mkdir(struct inode *dir, struct dentry *dentry, int mode)
 {
-	struct inode * inode;
+	struct inode *inode;
 	struct udf_fileident_bh fibh;
 	struct fileIdentDesc cfi, *fi;
 	int err;
 
 	lock_kernel();
 	err = -EMLINK;
-	if (dir->i_nlink >= (256<<sizeof(dir->i_nlink))-1)
+	if (dir->i_nlink >= (256 << sizeof(dir->i_nlink)) - 1)
 		goto out;
 
 	err = -EIO;
@@ -730,8 +716,7 @@
 
 	inode->i_op = &udf_dir_inode_operations;
 	inode->i_fop = &udf_dir_operations;
-	if (!(fi = udf_add_entry(inode, NULL, &fibh, &cfi, &err)))
-	{
+	if (!(fi = udf_add_entry(inode, NULL, &fibh, &cfi, &err))) {
 		inode->i_nlink--;
 		mark_inode_dirty(inode);
 		iput(inode);
@@ -740,9 +725,10 @@
 	inode->i_nlink = 2;
 	cfi.icb.extLength = cpu_to_le32(inode->i_sb->s_blocksize);
 	cfi.icb.extLocation = cpu_to_lelb(UDF_I_LOCATION(dir));
-	*(__le32 *)((struct allocDescImpUse *)cfi.icb.impUse)->impUse =
-		cpu_to_le32(UDF_I_UNIQUE(dir) & 0x00000000FFFFFFFFUL);
-	cfi.fileCharacteristics = FID_FILE_CHAR_DIRECTORY | FID_FILE_CHAR_PARENT;
+	*(__le32 *) ((struct allocDescImpUse *)cfi.icb.impUse)->impUse =
+	    cpu_to_le32(UDF_I_UNIQUE(dir) & 0x00000000FFFFFFFFUL);
+	cfi.fileCharacteristics =
+	    FID_FILE_CHAR_DIRECTORY | FID_FILE_CHAR_PARENT;
 	udf_write_fi(inode, &cfi, fi, &fibh, NULL, NULL);
 	brelse(fibh.sbh);
 	inode->i_mode = S_IFDIR | mode;
@@ -750,8 +736,7 @@
 		inode->i_mode |= S_ISGID;
 	mark_inode_dirty(inode);
 
-	if (!(fi = udf_add_entry(dir, dentry, &fibh, &cfi, &err)))
-	{
+	if (!(fi = udf_add_entry(dir, dentry, &fibh, &cfi, &err))) {
 		inode->i_nlink = 0;
 		mark_inode_dirty(inode);
 		iput(inode);
@@ -759,8 +744,8 @@
 	}
 	cfi.icb.extLength = cpu_to_le32(inode->i_sb->s_blocksize);
 	cfi.icb.extLocation = cpu_to_lelb(UDF_I_LOCATION(inode));
-	*(__le32 *)((struct allocDescImpUse *)cfi.icb.impUse)->impUse =
-		cpu_to_le32(UDF_I_UNIQUE(inode) & 0x00000000FFFFFFFFUL);
+	*(__le32 *) ((struct allocDescImpUse *)cfi.icb.impUse)->impUse =
+	    cpu_to_le32(UDF_I_UNIQUE(inode) & 0x00000000FFFFFFFFUL);
 	cfi.fileCharacteristics |= FID_FILE_CHAR_DIRECTORY;
 	udf_write_fi(dir, &cfi, fi, &fibh, NULL, NULL);
 	inc_nlink(dir);
@@ -770,7 +755,7 @@
 		brelse(fibh.ebh);
 	brelse(fibh.sbh);
 	err = 0;
-out:
+      out:
 	unlock_kernel();
 	return err;
 }
@@ -785,47 +770,41 @@
 	kernel_lb_addr eloc;
 	uint32_t elen;
 	sector_t offset;
-	struct extent_position epos = { NULL, 0, { 0, 0}};
+	struct extent_position epos = { NULL, 0, {0, 0} };
 
 	f_pos = (udf_ext0_offset(dir) >> 2);
 
-	fibh.soffset = fibh.eoffset = (f_pos & ((dir->i_sb->s_blocksize - 1) >> 2)) << 2;
+	fibh.soffset = fibh.eoffset =
+	    (f_pos & ((dir->i_sb->s_blocksize - 1) >> 2)) << 2;
 
 	if (UDF_I_ALLOCTYPE(dir) == ICBTAG_FLAG_AD_IN_ICB)
 		fibh.sbh = fibh.ebh = NULL;
 	else if (inode_bmap(dir, f_pos >> (dir->i_sb->s_blocksize_bits - 2),
-		&epos, &eloc, &elen, &offset) == (EXT_RECORDED_ALLOCATED >> 30))
-	{
+			    &epos, &eloc, &elen,
+			    &offset) == (EXT_RECORDED_ALLOCATED >> 30)) {
 		block = udf_get_lb_pblock(dir->i_sb, eloc, offset);
-		if ((++offset << dir->i_sb->s_blocksize_bits) < elen)
-		{
+		if ((++offset << dir->i_sb->s_blocksize_bits) < elen) {
 			if (UDF_I_ALLOCTYPE(dir) == ICBTAG_FLAG_AD_SHORT)
 				epos.offset -= sizeof(short_ad);
 			else if (UDF_I_ALLOCTYPE(dir) == ICBTAG_FLAG_AD_LONG)
 				epos.offset -= sizeof(long_ad);
-		}
-		else
+		} else
 			offset = 0;
 
-		if (!(fibh.sbh = fibh.ebh = udf_tread(dir->i_sb, block)))
-		{
+		if (!(fibh.sbh = fibh.ebh = udf_tread(dir->i_sb, block))) {
 			brelse(epos.bh);
 			return 0;
 		}
-	}
-	else
-	{
+	} else {
 		brelse(epos.bh);
 		return 0;
 	}
 
+	while ((f_pos < size)) {
+		fi = udf_fileident_read(dir, &f_pos, &fibh, &cfi, &epos, &eloc,
+					&elen, &offset);
 
-	while ( (f_pos < size) )
-	{
-		fi = udf_fileident_read(dir, &f_pos, &fibh, &cfi, &epos, &eloc, &elen, &offset);
-
-		if (!fi)
-		{
+		if (!fi) {
 			if (fibh.sbh != fibh.ebh)
 				brelse(fibh.ebh);
 			brelse(fibh.sbh);
@@ -833,8 +812,8 @@
 			return 0;
 		}
 
-		if (cfi.lengthFileIdent && (cfi.fileCharacteristics & FID_FILE_CHAR_DELETED) == 0)
-		{
+		if (cfi.lengthFileIdent
+		    && (cfi.fileCharacteristics & FID_FILE_CHAR_DELETED) == 0) {
 			if (fibh.sbh != fibh.ebh)
 				brelse(fibh.ebh);
 			brelse(fibh.sbh);
@@ -849,10 +828,10 @@
 	return 1;
 }
 
-static int udf_rmdir(struct inode * dir, struct dentry * dentry)
+static int udf_rmdir(struct inode *dir, struct dentry *dentry)
 {
 	int retval;
-	struct inode * inode = dentry->d_inode;
+	struct inode *inode = dentry->d_inode;
 	struct udf_fileident_bh fibh;
 	struct fileIdentDesc *fi, cfi;
 	kernel_lb_addr tloc;
@@ -875,27 +854,28 @@
 		goto end_rmdir;
 	if (inode->i_nlink != 2)
 		udf_warning(inode->i_sb, "udf_rmdir",
-			"empty directory has nlink != 2 (%d)",
-			inode->i_nlink);
+			    "empty directory has nlink != 2 (%d)",
+			    inode->i_nlink);
 	clear_nlink(inode);
 	inode->i_size = 0;
 	inode_dec_link_count(dir);
-	inode->i_ctime = dir->i_ctime = dir->i_mtime = current_fs_time(dir->i_sb);
+	inode->i_ctime = dir->i_ctime = dir->i_mtime =
+	    current_fs_time(dir->i_sb);
 	mark_inode_dirty(dir);
 
-end_rmdir:
+      end_rmdir:
 	if (fibh.sbh != fibh.ebh)
 		brelse(fibh.ebh);
 	brelse(fibh.sbh);
-out:
+      out:
 	unlock_kernel();
 	return retval;
 }
 
-static int udf_unlink(struct inode * dir, struct dentry * dentry)
+static int udf_unlink(struct inode *dir, struct dentry *dentry)
 {
 	int retval;
-	struct inode * inode = dentry->d_inode;
+	struct inode *inode = dentry->d_inode;
 	struct udf_fileident_bh fibh;
 	struct fileIdentDesc *fi;
 	struct fileIdentDesc cfi;
@@ -912,10 +892,9 @@
 	if (udf_get_lb_pblock(dir->i_sb, tloc, 0) != inode->i_ino)
 		goto end_unlink;
 
-	if (!inode->i_nlink)
-	{
+	if (!inode->i_nlink) {
 		udf_debug("Deleting nonexistent file (%lu), %d\n",
-			inode->i_ino, inode->i_nlink);
+			  inode->i_ino, inode->i_nlink);
 		inode->i_nlink = 1;
 	}
 	retval = udf_delete_entry(dir, fi, &fibh, &cfi);
@@ -927,22 +906,23 @@
 	inode->i_ctime = dir->i_ctime;
 	retval = 0;
 
-end_unlink:
+      end_unlink:
 	if (fibh.sbh != fibh.ebh)
 		brelse(fibh.ebh);
 	brelse(fibh.sbh);
-out:
+      out:
 	unlock_kernel();
 	return retval;
 }
 
-static int udf_symlink(struct inode * dir, struct dentry * dentry, const char * symname)
+static int udf_symlink(struct inode *dir, struct dentry *dentry,
+		       const char *symname)
 {
-	struct inode * inode;
+	struct inode *inode;
 	struct pathComponent *pc;
 	char *compstart;
 	struct udf_fileident_bh fibh;
-	struct extent_position epos = { NULL,  0, {0, 0}};
+	struct extent_position epos = { NULL, 0, {0, 0} };
 	int eoffset, elen = 0;
 	struct fileIdentDesc *fi;
 	struct fileIdentDesc cfi;
@@ -960,28 +940,31 @@
 	inode->i_data.a_ops = &udf_symlink_aops;
 	inode->i_op = &page_symlink_inode_operations;
 
-	if (UDF_I_ALLOCTYPE(inode) != ICBTAG_FLAG_AD_IN_ICB)
-	{
+	if (UDF_I_ALLOCTYPE(inode) != ICBTAG_FLAG_AD_IN_ICB) {
 		kernel_lb_addr eloc;
 		uint32_t elen;
 
 		block = udf_new_block(inode->i_sb, inode,
-			UDF_I_LOCATION(inode).partitionReferenceNum,
-			UDF_I_LOCATION(inode).logicalBlockNum, &err);
+				      UDF_I_LOCATION(inode).
+				      partitionReferenceNum,
+				      UDF_I_LOCATION(inode).logicalBlockNum,
+				      &err);
 		if (!block)
 			goto out_no_entry;
 		epos.block = UDF_I_LOCATION(inode);
 		epos.offset = udf_file_entry_alloc_offset(inode);
 		epos.bh = NULL;
 		eloc.logicalBlockNum = block;
-		eloc.partitionReferenceNum = UDF_I_LOCATION(inode).partitionReferenceNum;
+		eloc.partitionReferenceNum =
+		    UDF_I_LOCATION(inode).partitionReferenceNum;
 		elen = inode->i_sb->s_blocksize;
 		UDF_I_LENEXTENTS(inode) = elen;
 		udf_add_aext(inode, &epos, eloc, elen, 0);
 		brelse(epos.bh);
 
 		block = udf_get_pblock(inode->i_sb, block,
-			UDF_I_LOCATION(inode).partitionReferenceNum, 0);
+				       UDF_I_LOCATION(inode).
+				       partitionReferenceNum, 0);
 		epos.bh = udf_tread(inode->i_sb, block);
 		lock_buffer(epos.bh);
 		memset(epos.bh->b_data, 0x00, inode->i_sb->s_blocksize);
@@ -989,17 +972,14 @@
 		unlock_buffer(epos.bh);
 		mark_buffer_dirty_inode(epos.bh, inode);
 		ea = epos.bh->b_data + udf_ext0_offset(inode);
-	}
-	else
+	} else
 		ea = UDF_I_DATA(inode) + UDF_I_LENEATTR(inode);
 
 	eoffset = inode->i_sb->s_blocksize - udf_ext0_offset(inode);
 	pc = (struct pathComponent *)ea;
 
-	if (*symname == '/')
-	{
-		do
-		{
+	if (*symname == '/') {
+		do {
 			symname++;
 		} while (*symname == '/');
 
@@ -1012,8 +992,7 @@
 
 	err = -ENAMETOOLONG;
 
-	while (*symname)
-	{
+	while (*symname) {
 		if (elen + sizeof(struct pathComponent) > eoffset)
 			goto out_no_entry;
 
@@ -1021,28 +1000,30 @@
 
 		compstart = (char *)symname;
 
-		do
-		{
+		do {
 			symname++;
 		} while (*symname && *symname != '/');
 
 		pc->componentType = 5;
 		pc->lengthComponentIdent = 0;
 		pc->componentFileVersionNum = 0;
-		if (compstart[0] == '.')
-		{
-			if ((symname-compstart) == 1)
+		if (compstart[0] == '.') {
+			if ((symname - compstart) == 1)
 				pc->componentType = 4;
-			else if ((symname-compstart) == 2 && compstart[1] == '.')
+			else if ((symname - compstart) == 2
+				 && compstart[1] == '.')
 				pc->componentType = 3;
 		}
 
-		if (pc->componentType == 5)
-		{
-			if ( !(namelen = udf_put_filename(inode->i_sb, compstart, name, symname-compstart)))
+		if (pc->componentType == 5) {
+			if (!
+			    (namelen =
+			     udf_put_filename(inode->i_sb, compstart, name,
+					      symname - compstart)))
 				goto out_no_entry;
 
-			if (elen + sizeof(struct pathComponent) + namelen > eoffset)
+			if (elen + sizeof(struct pathComponent) + namelen >
+			    eoffset)
 				goto out_no_entry;
 			else
 				pc->lengthComponentIdent = namelen;
@@ -1052,10 +1033,8 @@
 
 		elen += sizeof(struct pathComponent) + pc->lengthComponentIdent;
 
-		if (*symname)
-		{
-			do
-			{
+		if (*symname) {
+			do {
 				symname++;
 			} while (*symname == '/');
 		}
@@ -1071,22 +1050,22 @@
 		goto out_no_entry;
 	cfi.icb.extLength = cpu_to_le32(inode->i_sb->s_blocksize);
 	cfi.icb.extLocation = cpu_to_lelb(UDF_I_LOCATION(inode));
-	if (UDF_SB_LVIDBH(inode->i_sb))
-	{
+	if (UDF_SB_LVIDBH(inode->i_sb)) {
 		struct logicalVolHeaderDesc *lvhd;
 		uint64_t uniqueID;
-		lvhd = (struct logicalVolHeaderDesc *)(UDF_SB_LVID(inode->i_sb)->logicalVolContentsUse);
+		lvhd =
+		    (struct logicalVolHeaderDesc *)(UDF_SB_LVID(inode->i_sb)->
+						    logicalVolContentsUse);
 		uniqueID = le64_to_cpu(lvhd->uniqueID);
-		*(__le32 *)((struct allocDescImpUse *)cfi.icb.impUse)->impUse =
-			cpu_to_le32(uniqueID & 0x00000000FFFFFFFFUL);
+		*(__le32 *) ((struct allocDescImpUse *)cfi.icb.impUse)->impUse =
+		    cpu_to_le32(uniqueID & 0x00000000FFFFFFFFUL);
 		if (!(++uniqueID & 0x00000000FFFFFFFFUL))
 			uniqueID += 16;
 		lvhd->uniqueID = cpu_to_le64(uniqueID);
 		mark_buffer_dirty(UDF_SB_LVIDBH(inode->i_sb));
 	}
 	udf_write_fi(dir, &cfi, fi, &fibh, NULL, NULL);
-	if (UDF_I_ALLOCTYPE(dir) == ICBTAG_FLAG_AD_IN_ICB)
-	{
+	if (UDF_I_ALLOCTYPE(dir) == ICBTAG_FLAG_AD_IN_ICB) {
 		mark_inode_dirty(dir);
 	}
 	if (fibh.sbh != fibh.ebh)
@@ -1095,18 +1074,18 @@
 	d_instantiate(dentry, inode);
 	err = 0;
 
-out:
+      out:
 	unlock_kernel();
 	return err;
 
-out_no_entry:
+      out_no_entry:
 	inode_dec_link_count(inode);
 	iput(inode);
 	goto out;
 }
 
-static int udf_link(struct dentry * old_dentry, struct inode * dir,
-	 struct dentry *dentry)
+static int udf_link(struct dentry *old_dentry, struct inode *dir,
+		    struct dentry *dentry)
 {
 	struct inode *inode = old_dentry->d_inode;
 	struct udf_fileident_bh fibh;
@@ -1114,35 +1093,33 @@
 	int err;
 
 	lock_kernel();
-	if (inode->i_nlink >= (256<<sizeof(inode->i_nlink))-1)
-	{
+	if (inode->i_nlink >= (256 << sizeof(inode->i_nlink)) - 1) {
 		unlock_kernel();
 		return -EMLINK;
 	}
 
-	if (!(fi = udf_add_entry(dir, dentry, &fibh, &cfi, &err)))
-	{
+	if (!(fi = udf_add_entry(dir, dentry, &fibh, &cfi, &err))) {
 		unlock_kernel();
 		return err;
 	}
 	cfi.icb.extLength = cpu_to_le32(inode->i_sb->s_blocksize);
 	cfi.icb.extLocation = cpu_to_lelb(UDF_I_LOCATION(inode));
-	if (UDF_SB_LVIDBH(inode->i_sb))
-	{
+	if (UDF_SB_LVIDBH(inode->i_sb)) {
 		struct logicalVolHeaderDesc *lvhd;
 		uint64_t uniqueID;
-		lvhd = (struct logicalVolHeaderDesc *)(UDF_SB_LVID(inode->i_sb)->logicalVolContentsUse);
+		lvhd =
+		    (struct logicalVolHeaderDesc *)(UDF_SB_LVID(inode->i_sb)->
+						    logicalVolContentsUse);
 		uniqueID = le64_to_cpu(lvhd->uniqueID);
-		*(__le32 *)((struct allocDescImpUse *)cfi.icb.impUse)->impUse =
-			cpu_to_le32(uniqueID & 0x00000000FFFFFFFFUL);
+		*(__le32 *) ((struct allocDescImpUse *)cfi.icb.impUse)->impUse =
+		    cpu_to_le32(uniqueID & 0x00000000FFFFFFFFUL);
 		if (!(++uniqueID & 0x00000000FFFFFFFFUL))
 			uniqueID += 16;
 		lvhd->uniqueID = cpu_to_le64(uniqueID);
 		mark_buffer_dirty(UDF_SB_LVIDBH(inode->i_sb));
 	}
 	udf_write_fi(dir, &cfi, fi, &fibh, NULL, NULL);
-	if (UDF_I_ALLOCTYPE(dir) == ICBTAG_FLAG_AD_IN_ICB)
-	{
+	if (UDF_I_ALLOCTYPE(dir) == ICBTAG_FLAG_AD_IN_ICB) {
 		mark_inode_dirty(dir);
 	}
 	if (fibh.sbh != fibh.ebh)
@@ -1160,80 +1137,80 @@
 /* Anybody can rename anything with this: the permission checks are left to the
  * higher-level routines.
  */
-static int udf_rename (struct inode * old_dir, struct dentry * old_dentry,
-	struct inode * new_dir, struct dentry * new_dentry)
+static int udf_rename(struct inode *old_dir, struct dentry *old_dentry,
+		      struct inode *new_dir, struct dentry *new_dentry)
 {
-	struct inode * old_inode = old_dentry->d_inode;
-	struct inode * new_inode = new_dentry->d_inode;
+	struct inode *old_inode = old_dentry->d_inode;
+	struct inode *new_inode = new_dentry->d_inode;
 	struct udf_fileident_bh ofibh, nfibh;
-	struct fileIdentDesc *ofi = NULL, *nfi = NULL, *dir_fi = NULL, ocfi, ncfi;
+	struct fileIdentDesc *ofi = NULL, *nfi = NULL, *dir_fi =
+	    NULL, ocfi, ncfi;
 	struct buffer_head *dir_bh = NULL;
 	int retval = -ENOENT;
 	kernel_lb_addr tloc;
 
 	lock_kernel();
-	if ((ofi = udf_find_entry(old_dir, old_dentry, &ofibh, &ocfi)))
-	{
+	if ((ofi = udf_find_entry(old_dir, old_dentry, &ofibh, &ocfi))) {
 		if (ofibh.sbh != ofibh.ebh)
 			brelse(ofibh.ebh);
 		brelse(ofibh.sbh);
 	}
 	tloc = lelb_to_cpu(ocfi.icb.extLocation);
 	if (!ofi || udf_get_lb_pblock(old_dir->i_sb, tloc, 0)
-					!= old_inode->i_ino)
+	    != old_inode->i_ino)
 		goto end_rename;
 
 	nfi = udf_find_entry(new_dir, new_dentry, &nfibh, &ncfi);
-	if (nfi)
-	{
-		if (!new_inode)
-		{
+	if (nfi) {
+		if (!new_inode) {
 			if (nfibh.sbh != nfibh.ebh)
 				brelse(nfibh.ebh);
 			brelse(nfibh.sbh);
 			nfi = NULL;
 		}
 	}
-	if (S_ISDIR(old_inode->i_mode))
-	{
+	if (S_ISDIR(old_inode->i_mode)) {
 		uint32_t offset = udf_ext0_offset(old_inode);
 
-		if (new_inode)
-		{
+		if (new_inode) {
 			retval = -ENOTEMPTY;
 			if (!empty_dir(new_inode))
 				goto end_rename;
 		}
 		retval = -EIO;
-		if (UDF_I_ALLOCTYPE(old_inode) == ICBTAG_FLAG_AD_IN_ICB)
-		{
+		if (UDF_I_ALLOCTYPE(old_inode) == ICBTAG_FLAG_AD_IN_ICB) {
 			dir_fi = udf_get_fileident(UDF_I_DATA(old_inode) -
-				(UDF_I_EFE(old_inode) ?
-					sizeof(struct extendedFileEntry) :
-					sizeof(struct fileEntry)),
-				old_inode->i_sb->s_blocksize, &offset);
-		}
-		else
-		{
+						   (UDF_I_EFE(old_inode) ?
+						    sizeof(struct
+							   extendedFileEntry) :
+						    sizeof(struct fileEntry)),
+						   old_inode->i_sb->s_blocksize,
+						   &offset);
+		} else {
 			dir_bh = udf_bread(old_inode, 0, 0, &retval);
 			if (!dir_bh)
 				goto end_rename;
-			dir_fi = udf_get_fileident(dir_bh->b_data, old_inode->i_sb->s_blocksize, &offset);
+			dir_fi =
+			    udf_get_fileident(dir_bh->b_data,
+					      old_inode->i_sb->s_blocksize,
+					      &offset);
 		}
 		if (!dir_fi)
 			goto end_rename;
 		tloc = lelb_to_cpu(dir_fi->icb.extLocation);
 		if (udf_get_lb_pblock(old_inode->i_sb, tloc, 0)
-					!= old_dir->i_ino)
+		    != old_dir->i_ino)
 			goto end_rename;
 
 		retval = -EMLINK;
-		if (!new_inode && new_dir->i_nlink >= (256<<sizeof(new_dir->i_nlink))-1)
+		if (!new_inode
+		    && new_dir->i_nlink >=
+		    (256 << sizeof(new_dir->i_nlink)) - 1)
 			goto end_rename;
 	}
-	if (!nfi)
-	{
-		nfi = udf_add_entry(new_dir, new_dentry, &nfibh, &ncfi, &retval);
+	if (!nfi) {
+		nfi =
+		    udf_add_entry(new_dir, new_dentry, &nfibh, &ncfi, &retval);
 		if (!nfi)
 			goto end_rename;
 	}
@@ -1257,39 +1234,33 @@
 	ofi = udf_find_entry(old_dir, old_dentry, &ofibh, &ocfi);
 	udf_delete_entry(old_dir, ofi, &ofibh, &ocfi);
 
-	if (new_inode)
-	{
+	if (new_inode) {
 		new_inode->i_ctime = current_fs_time(new_inode->i_sb);
 		inode_dec_link_count(new_inode);
 	}
 	old_dir->i_ctime = old_dir->i_mtime = current_fs_time(old_dir->i_sb);
 	mark_inode_dirty(old_dir);
 
-	if (dir_fi)
-	{
+	if (dir_fi) {
 		dir_fi->icb.extLocation = cpu_to_lelb(UDF_I_LOCATION(new_dir));
 		udf_update_tag((char *)dir_fi, (sizeof(struct fileIdentDesc) +
-			le16_to_cpu(dir_fi->lengthOfImpUse) + 3) & ~3);
-		if (UDF_I_ALLOCTYPE(old_inode) == ICBTAG_FLAG_AD_IN_ICB)
-		{
+						le16_to_cpu(dir_fi->
+							    lengthOfImpUse) +
+						3) & ~3);
+		if (UDF_I_ALLOCTYPE(old_inode) == ICBTAG_FLAG_AD_IN_ICB) {
 			mark_inode_dirty(old_inode);
-		}
-		else
+		} else
 			mark_buffer_dirty_inode(dir_bh, old_inode);
 		inode_dec_link_count(old_dir);
-		if (new_inode)
-		{
+		if (new_inode) {
 			inode_dec_link_count(new_inode);
-		}
-		else
-		{
+		} else {
 			inc_nlink(new_dir);
 			mark_inode_dirty(new_dir);
 		}
 	}
 
-	if (ofi)
-	{
+	if (ofi) {
 		if (ofibh.sbh != ofibh.ebh)
 			brelse(ofibh.ebh);
 		brelse(ofibh.sbh);
@@ -1297,10 +1268,9 @@
 
 	retval = 0;
 
-end_rename:
+      end_rename:
 	brelse(dir_bh);
-	if (nfi)
-	{
+	if (nfi) {
 		if (nfibh.sbh != nfibh.ebh)
 			brelse(nfibh.ebh);
 		brelse(nfibh.sbh);
@@ -1310,13 +1280,13 @@
 }
 
 const struct inode_operations udf_dir_inode_operations = {
-	.lookup				= udf_lookup,
-	.create				= udf_create,
-	.link				= udf_link,
-	.unlink				= udf_unlink,
-	.symlink			= udf_symlink,
-	.mkdir				= udf_mkdir,
-	.rmdir				= udf_rmdir,
-	.mknod				= udf_mknod,
-	.rename				= udf_rename,
+	.lookup = udf_lookup,
+	.create = udf_create,
+	.link = udf_link,
+	.unlink = udf_unlink,
+	.symlink = udf_symlink,
+	.mkdir = udf_mkdir,
+	.rmdir = udf_rmdir,
+	.mknod = udf_mknod,
+	.rename = udf_rename,
 };
diff --git a/fs/udf/osta_udf.h b/fs/udf/osta_udf.h
index e82aae6..bec5d34 100644
--- a/fs/udf/osta_udf.h
+++ b/fs/udf/osta_udf.h
@@ -65,153 +65,140 @@
 #define IS_DF_HARD_WRITE_PROTECT	0x01
 #define IS_DF_SOFT_WRITE_PROTECT	0x02
 
-struct UDFIdentSuffix
-{
-	__le16		UDFRevision;
-	uint8_t		OSClass;
-	uint8_t		OSIdentifier;
-	uint8_t		reserved[4];
+struct UDFIdentSuffix {
+	__le16 UDFRevision;
+	uint8_t OSClass;
+	uint8_t OSIdentifier;
+	uint8_t reserved[4];
 } __attribute__ ((packed));
 
-struct impIdentSuffix
-{
-	uint8_t		OSClass;
-	uint8_t		OSIdentifier;
-	uint8_t		reserved[6];
+struct impIdentSuffix {
+	uint8_t OSClass;
+	uint8_t OSIdentifier;
+	uint8_t reserved[6];
 } __attribute__ ((packed));
 
-struct appIdentSuffix
-{
-	uint8_t		impUse[8];
+struct appIdentSuffix {
+	uint8_t impUse[8];
 } __attribute__ ((packed));
 
 /* Logical Volume Integrity Descriptor (UDF 2.50 2.2.6) */
 /* Implementation Use (UDF 2.50 2.2.6.4) */
-struct logicalVolIntegrityDescImpUse
-{
-	regid		impIdent;
-	__le32		numFiles;
-	__le32		numDirs;
-	__le16		minUDFReadRev;
-	__le16		minUDFWriteRev;
-	__le16		maxUDFWriteRev;
-	uint8_t		impUse[0];
+struct logicalVolIntegrityDescImpUse {
+	regid impIdent;
+	__le32 numFiles;
+	__le32 numDirs;
+	__le16 minUDFReadRev;
+	__le16 minUDFWriteRev;
+	__le16 maxUDFWriteRev;
+	uint8_t impUse[0];
 } __attribute__ ((packed));
 
 /* Implementation Use Volume Descriptor (UDF 2.50 2.2.7) */
 /* Implementation Use (UDF 2.50 2.2.7.2) */
-struct impUseVolDescImpUse
-{
-	charspec	LVICharset;
-	dstring		logicalVolIdent[128];
-	dstring		LVInfo1[36];
-	dstring		LVInfo2[36];
-	dstring		LVInfo3[36];
-	regid		impIdent;
-	uint8_t		impUse[128];
+struct impUseVolDescImpUse {
+	charspec LVICharset;
+	dstring logicalVolIdent[128];
+	dstring LVInfo1[36];
+	dstring LVInfo2[36];
+	dstring LVInfo3[36];
+	regid impIdent;
+	uint8_t impUse[128];
 } __attribute__ ((packed));
 
-struct udfPartitionMap2
-{
-	uint8_t		partitionMapType;
-	uint8_t		partitionMapLength;
-	uint8_t		reserved1[2];
-	regid		partIdent;
-	__le16		volSeqNum;
-	__le16		partitionNum;
+struct udfPartitionMap2 {
+	uint8_t partitionMapType;
+	uint8_t partitionMapLength;
+	uint8_t reserved1[2];
+	regid partIdent;
+	__le16 volSeqNum;
+	__le16 partitionNum;
 } __attribute__ ((packed));
 
 /* Virtual Partition Map (UDF 2.50 2.2.8) */
-struct virtualPartitionMap
-{
-	uint8_t		partitionMapType;
-	uint8_t		partitionMapLength;
-	uint8_t		reserved1[2];
-	regid		partIdent;
-	__le16		volSeqNum;
-	__le16		partitionNum;
-	uint8_t		reserved2[24];
+struct virtualPartitionMap {
+	uint8_t partitionMapType;
+	uint8_t partitionMapLength;
+	uint8_t reserved1[2];
+	regid partIdent;
+	__le16 volSeqNum;
+	__le16 partitionNum;
+	uint8_t reserved2[24];
 } __attribute__ ((packed));
 
 /* Sparable Partition Map (UDF 2.50 2.2.9) */
-struct sparablePartitionMap
-{
-	uint8_t		partitionMapType;
-	uint8_t		partitionMapLength;
-	uint8_t		reserved1[2];
-	regid		partIdent;
-	__le16		volSeqNum;
-	__le16		partitionNum;
-	__le16		packetLength;
-	uint8_t		numSparingTables;
-	uint8_t		reserved2[1];
-	__le32		sizeSparingTable;
-	__le32		locSparingTable[4];
+struct sparablePartitionMap {
+	uint8_t partitionMapType;
+	uint8_t partitionMapLength;
+	uint8_t reserved1[2];
+	regid partIdent;
+	__le16 volSeqNum;
+	__le16 partitionNum;
+	__le16 packetLength;
+	uint8_t numSparingTables;
+	uint8_t reserved2[1];
+	__le32 sizeSparingTable;
+	__le32 locSparingTable[4];
 } __attribute__ ((packed));
 
 /* Metadata Partition Map (UDF 2.4.0 2.2.10) */
-struct metadataPartitionMap
-{
-	uint8_t		partitionMapType;
-	uint8_t		partitionMapLength;
-	uint8_t		reserved1[2];
-	regid		partIdent;
-	__le16		volSeqNum;
-	__le16		partitionNum;
-	__le32		metadataFileLoc;
-	__le32		metadataMirrorFileLoc;
-	__le32		metadataBitmapFileLoc;
-	__le32		allocUnitSize;
-	__le16		alignUnitSize;
-	uint8_t		flags;
-	uint8_t		reserved2[5];
+struct metadataPartitionMap {
+	uint8_t partitionMapType;
+	uint8_t partitionMapLength;
+	uint8_t reserved1[2];
+	regid partIdent;
+	__le16 volSeqNum;
+	__le16 partitionNum;
+	__le32 metadataFileLoc;
+	__le32 metadataMirrorFileLoc;
+	__le32 metadataBitmapFileLoc;
+	__le32 allocUnitSize;
+	__le16 alignUnitSize;
+	uint8_t flags;
+	uint8_t reserved2[5];
 } __attribute__ ((packed));
 
 /* Virtual Allocation Table (UDF 1.5 2.2.10) */
-struct virtualAllocationTable15
-{
-	__le32		VirtualSector[0];
-	regid		vatIdent;
-	__le32		previousVATICBLoc;
-} __attribute__ ((packed));  
+struct virtualAllocationTable15 {
+	__le32 VirtualSector[0];
+	regid vatIdent;
+	__le32 previousVATICBLoc;
+} __attribute__ ((packed));
 
 #define ICBTAG_FILE_TYPE_VAT15		0x00U
 
 /* Virtual Allocation Table (UDF 2.50 2.2.11) */
-struct virtualAllocationTable20
-{
-	__le16		lengthHeader;
-	__le16		lengthImpUse;
-	dstring		logicalVolIdent[128];
-	__le32		previousVATICBLoc;
-	__le32		numFiles;
-	__le32		numDirs;
-	__le16		minReadRevision;
-	__le16		minWriteRevision;
-	__le16		maxWriteRevision;
-	__le16		reserved;
-	uint8_t		impUse[0];
-	__le32		vatEntry[0];
+struct virtualAllocationTable20 {
+	__le16 lengthHeader;
+	__le16 lengthImpUse;
+	dstring logicalVolIdent[128];
+	__le32 previousVATICBLoc;
+	__le32 numFiles;
+	__le32 numDirs;
+	__le16 minReadRevision;
+	__le16 minWriteRevision;
+	__le16 maxWriteRevision;
+	__le16 reserved;
+	uint8_t impUse[0];
+	__le32 vatEntry[0];
 } __attribute__ ((packed));
 
 #define ICBTAG_FILE_TYPE_VAT20		0xF8U
 
 /* Sparing Table (UDF 2.50 2.2.12) */
-struct sparingEntry
-{
-	__le32		origLocation;
-	__le32		mappedLocation;
+struct sparingEntry {
+	__le32 origLocation;
+	__le32 mappedLocation;
 } __attribute__ ((packed));
 
-struct sparingTable
-{
-	tag 		descTag;
-	regid		sparingIdent;
-	__le16		reallocationTableLen;
-	__le16		reserved;
-	__le32		sequenceNum;
+struct sparingTable {
+	tag descTag;
+	regid sparingIdent;
+	__le16 reallocationTableLen;
+	__le16 reserved;
+	__le32 sequenceNum;
 	struct sparingEntry
-			mapEntry[0];
+	 mapEntry[0];
 } __attribute__ ((packed));
 
 /* Metadata File (and Metadata Mirror File) (UDF 2.50 2.2.13.1) */
@@ -220,10 +207,9 @@
 #define ICBTAG_FILE_TYPE_BITMAP		0xFC
 
 /* struct long_ad ICB - ADImpUse (UDF 2.50 2.2.4.3) */
-struct allocDescImpUse
-{
-	__le16		flags;
-	uint8_t		impUse[4];
+struct allocDescImpUse {
+	__le16 flags;
+	uint8_t impUse[4];
 } __attribute__ ((packed));
 
 #define AD_IU_EXT_ERASED		0x0001
@@ -233,27 +219,24 @@
 
 /* Implementation Use Extended Attribute (UDF 2.50 3.3.4.5) */
 /* FreeEASpace (UDF 2.50 3.3.4.5.1.1) */
-struct freeEaSpace
-{
-	__le16		headerChecksum;
-	uint8_t		freeEASpace[0];
+struct freeEaSpace {
+	__le16 headerChecksum;
+	uint8_t freeEASpace[0];
 } __attribute__ ((packed));
 
 /* DVD Copyright Management Information (UDF 2.50 3.3.4.5.1.2) */
-struct DVDCopyrightImpUse 
-{
-	__le16		headerChecksum;
-	uint8_t		CGMSInfo;
-	uint8_t		dataType;
-	uint8_t		protectionSystemInfo[4];
+struct DVDCopyrightImpUse {
+	__le16 headerChecksum;
+	uint8_t CGMSInfo;
+	uint8_t dataType;
+	uint8_t protectionSystemInfo[4];
 } __attribute__ ((packed));
 
 /* Application Use Extended Attribute (UDF 2.50 3.3.4.6) */
 /* FreeAppEASpace (UDF 2.50 3.3.4.6.1) */
-struct freeAppEASpace
-{
-	__le16		headerChecksum;
-	uint8_t		freeEASpace[0];
+struct freeAppEASpace {
+	__le16 headerChecksum;
+	uint8_t freeEASpace[0];
 } __attribute__ ((packed));
 
 /* UDF Defined System Stream (UDF 2.50 3.3.7) */
@@ -293,4 +276,4 @@
 #define UDF_OS_ID_BEOS			0x00U
 #define UDF_OS_ID_WINCE			0x00U
 
-#endif /* _OSTA_UDF_H */
+#endif				/* _OSTA_UDF_H */
diff --git a/fs/udf/partition.c b/fs/udf/partition.c
index 467a261..a95d830 100644
--- a/fs/udf/partition.c
+++ b/fs/udf/partition.c
@@ -28,106 +28,120 @@
 #include <linux/slab.h>
 #include <linux/buffer_head.h>
 
-inline uint32_t udf_get_pblock(struct super_block *sb, uint32_t block, uint16_t partition, uint32_t offset)
+inline uint32_t udf_get_pblock(struct super_block *sb, uint32_t block,
+			       uint16_t partition, uint32_t offset)
 {
-	if (partition >= UDF_SB_NUMPARTS(sb))
-	{
-		udf_debug("block=%d, partition=%d, offset=%d: invalid partition\n",
-			block, partition, offset);
+	if (partition >= UDF_SB_NUMPARTS(sb)) {
+		udf_debug
+		    ("block=%d, partition=%d, offset=%d: invalid partition\n",
+		     block, partition, offset);
 		return 0xFFFFFFFF;
 	}
 	if (UDF_SB_PARTFUNC(sb, partition))
-		return UDF_SB_PARTFUNC(sb, partition)(sb, block, partition, offset);
+		return UDF_SB_PARTFUNC(sb, partition) (sb, block, partition,
+						       offset);
 	else
 		return UDF_SB_PARTROOT(sb, partition) + block + offset;
 }
 
-uint32_t udf_get_pblock_virt15(struct super_block *sb, uint32_t block, uint16_t partition, uint32_t offset)
+uint32_t udf_get_pblock_virt15(struct super_block * sb, uint32_t block,
+			       uint16_t partition, uint32_t offset)
 {
 	struct buffer_head *bh = NULL;
 	uint32_t newblock;
 	uint32_t index;
 	uint32_t loc;
 
-	index = (sb->s_blocksize - UDF_SB_TYPEVIRT(sb,partition).s_start_offset) / sizeof(uint32_t);
+	index =
+	    (sb->s_blocksize -
+	     UDF_SB_TYPEVIRT(sb, partition).s_start_offset) / sizeof(uint32_t);
 
-	if (block > UDF_SB_TYPEVIRT(sb,partition).s_num_entries)
-	{
-		udf_debug("Trying to access block beyond end of VAT (%d max %d)\n",
-			block, UDF_SB_TYPEVIRT(sb,partition).s_num_entries);
+	if (block > UDF_SB_TYPEVIRT(sb, partition).s_num_entries) {
+		udf_debug
+		    ("Trying to access block beyond end of VAT (%d max %d)\n",
+		     block, UDF_SB_TYPEVIRT(sb, partition).s_num_entries);
 		return 0xFFFFFFFF;
 	}
 
-	if (block >= index)
-	{
+	if (block >= index) {
 		block -= index;
 		newblock = 1 + (block / (sb->s_blocksize / sizeof(uint32_t)));
 		index = block % (sb->s_blocksize / sizeof(uint32_t));
-	}
-	else
-	{
+	} else {
 		newblock = 0;
-		index = UDF_SB_TYPEVIRT(sb,partition).s_start_offset / sizeof(uint32_t) + block;
+		index =
+		    UDF_SB_TYPEVIRT(sb,
+				    partition).s_start_offset /
+		    sizeof(uint32_t) + block;
 	}
 
 	loc = udf_block_map(UDF_SB_VAT(sb), newblock);
 
-	if (!(bh = sb_bread(sb, loc)))
-	{
+	if (!(bh = sb_bread(sb, loc))) {
 		udf_debug("get_pblock(UDF_VIRTUAL_MAP:%p,%d,%d) VAT: %d[%d]\n",
-			sb, block, partition, loc, index);
+			  sb, block, partition, loc, index);
 		return 0xFFFFFFFF;
 	}
 
-	loc = le32_to_cpu(((__le32 *)bh->b_data)[index]);
+	loc = le32_to_cpu(((__le32 *) bh->b_data)[index]);
 
 	brelse(bh);
 
-	if (UDF_I_LOCATION(UDF_SB_VAT(sb)).partitionReferenceNum == partition)
-	{
+	if (UDF_I_LOCATION(UDF_SB_VAT(sb)).partitionReferenceNum == partition) {
 		udf_debug("recursive call to udf_get_pblock!\n");
 		return 0xFFFFFFFF;
 	}
 
-	return udf_get_pblock(sb, loc, UDF_I_LOCATION(UDF_SB_VAT(sb)).partitionReferenceNum, offset);
+	return udf_get_pblock(sb, loc,
+			      UDF_I_LOCATION(UDF_SB_VAT(sb)).
+			      partitionReferenceNum, offset);
 }
 
-inline uint32_t udf_get_pblock_virt20(struct super_block *sb, uint32_t block, uint16_t partition, uint32_t offset)
+inline uint32_t udf_get_pblock_virt20(struct super_block * sb, uint32_t block,
+				      uint16_t partition, uint32_t offset)
 {
 	return udf_get_pblock_virt15(sb, block, partition, offset);
 }
 
-uint32_t udf_get_pblock_spar15(struct super_block *sb, uint32_t block, uint16_t partition, uint32_t offset)
+uint32_t udf_get_pblock_spar15(struct super_block * sb, uint32_t block,
+			       uint16_t partition, uint32_t offset)
 {
 	int i;
 	struct sparingTable *st = NULL;
-	uint32_t packet = (block + offset) & ~(UDF_SB_TYPESPAR(sb,partition).s_packet_len - 1);
+	uint32_t packet =
+	    (block + offset) & ~(UDF_SB_TYPESPAR(sb, partition).s_packet_len -
+				 1);
 
-	for (i=0; i<4; i++)
-	{
-		if (UDF_SB_TYPESPAR(sb,partition).s_spar_map[i] != NULL)
-		{
-			st = (struct sparingTable *)UDF_SB_TYPESPAR(sb,partition).s_spar_map[i]->b_data;
+	for (i = 0; i < 4; i++) {
+		if (UDF_SB_TYPESPAR(sb, partition).s_spar_map[i] != NULL) {
+			st = (struct sparingTable *)UDF_SB_TYPESPAR(sb,
+								    partition).
+			    s_spar_map[i]->b_data;
 			break;
 		}
 	}
 
-	if (st)
-	{
-		for (i=0; i<le16_to_cpu(st->reallocationTableLen); i++)
-		{
-			if (le32_to_cpu(st->mapEntry[i].origLocation) >= 0xFFFFFFF0)
+	if (st) {
+		for (i = 0; i < le16_to_cpu(st->reallocationTableLen); i++) {
+			if (le32_to_cpu(st->mapEntry[i].origLocation) >=
+			    0xFFFFFFF0)
 				break;
-			else if (le32_to_cpu(st->mapEntry[i].origLocation) == packet)
-			{
-				return le32_to_cpu(st->mapEntry[i].mappedLocation) +
-					((block + offset) & (UDF_SB_TYPESPAR(sb,partition).s_packet_len - 1));
-			}
-			else if (le32_to_cpu(st->mapEntry[i].origLocation) > packet)
+			else if (le32_to_cpu(st->mapEntry[i].origLocation) ==
+				 packet) {
+				return le32_to_cpu(st->mapEntry[i].
+						   mappedLocation) + ((block +
+								       offset) &
+								      (UDF_SB_TYPESPAR
+								       (sb,
+									partition).
+								       s_packet_len
+								       - 1));
+			} else if (le32_to_cpu(st->mapEntry[i].origLocation) >
+				   packet)
 				break;
 		}
 	}
-	return UDF_SB_PARTROOT(sb,partition) + block + offset;
+	return UDF_SB_PARTROOT(sb, partition) + block + offset;
 }
 
 int udf_relocate_blocks(struct super_block *sb, long old_block, long *new_block)
@@ -138,19 +152,21 @@
 	uint32_t packet;
 	int i, j, k, l;
 
-	for (i=0; i<UDF_SB_NUMPARTS(sb); i++)
-	{
-		if (old_block > UDF_SB_PARTROOT(sb,i) &&
-		    old_block < UDF_SB_PARTROOT(sb,i) + UDF_SB_PARTLEN(sb,i))
+	for (i = 0; i < UDF_SB_NUMPARTS(sb); i++) {
+		if (old_block > UDF_SB_PARTROOT(sb, i) &&
+		    old_block < UDF_SB_PARTROOT(sb, i) + UDF_SB_PARTLEN(sb, i))
 		{
-			sdata = &UDF_SB_TYPESPAR(sb,i);
-			packet = (old_block - UDF_SB_PARTROOT(sb,i)) & ~(sdata->s_packet_len - 1);
+			sdata = &UDF_SB_TYPESPAR(sb, i);
+			packet =
+			    (old_block -
+			     UDF_SB_PARTROOT(sb,
+					     i)) & ~(sdata->s_packet_len - 1);
 
-			for (j=0; j<4; j++)
-			{
-				if (UDF_SB_TYPESPAR(sb,i).s_spar_map[j] != NULL)
-				{
-					st = (struct sparingTable *)sdata->s_spar_map[j]->b_data;
+			for (j = 0; j < 4; j++) {
+				if (UDF_SB_TYPESPAR(sb, i).s_spar_map[j] !=
+				    NULL) {
+					st = (struct sparingTable *)sdata->
+					    s_spar_map[j]->b_data;
 					break;
 				}
 			}
@@ -158,60 +174,123 @@
 			if (!st)
 				return 1;
 
-			for (k=0; k<le16_to_cpu(st->reallocationTableLen); k++)
-			{
-				if (le32_to_cpu(st->mapEntry[k].origLocation) == 0xFFFFFFFF)
-				{
-					for (; j<4; j++)
-					{
-						if (sdata->s_spar_map[j])
-						{
-							st = (struct sparingTable *)sdata->s_spar_map[j]->b_data;
-							st->mapEntry[k].origLocation = cpu_to_le32(packet);
-							udf_update_tag((char *)st, sizeof(struct sparingTable) + le16_to_cpu(st->reallocationTableLen) * sizeof(struct sparingEntry));
-							mark_buffer_dirty(sdata->s_spar_map[j]);
+			for (k = 0; k < le16_to_cpu(st->reallocationTableLen);
+			     k++) {
+				if (le32_to_cpu(st->mapEntry[k].origLocation) ==
+				    0xFFFFFFFF) {
+					for (; j < 4; j++) {
+						if (sdata->s_spar_map[j]) {
+							st = (struct
+							      sparingTable *)
+							    sdata->
+							    s_spar_map[j]->
+							    b_data;
+							st->mapEntry[k].
+							    origLocation =
+							    cpu_to_le32(packet);
+							udf_update_tag((char *)
+								       st,
+								       sizeof
+								       (struct
+									sparingTable)
+								       +
+								       le16_to_cpu
+								       (st->
+									reallocationTableLen)
+								       *
+								       sizeof
+								       (struct
+									sparingEntry));
+							mark_buffer_dirty
+							    (sdata->
+							     s_spar_map[j]);
 						}
 					}
-					*new_block = le32_to_cpu(st->mapEntry[k].mappedLocation) +
-						((old_block - UDF_SB_PARTROOT(sb,i)) & (sdata->s_packet_len - 1));
+					*new_block =
+					    le32_to_cpu(st->mapEntry[k].
+							mappedLocation) +
+					    ((old_block -
+					      UDF_SB_PARTROOT(sb,
+							      i)) & (sdata->
+								     s_packet_len
+								     - 1));
 					return 0;
-				}
-				else if (le32_to_cpu(st->mapEntry[k].origLocation) == packet)
-				{
-					*new_block = le32_to_cpu(st->mapEntry[k].mappedLocation) +
-						((old_block - UDF_SB_PARTROOT(sb,i)) & (sdata->s_packet_len - 1));
+				} else
+				    if (le32_to_cpu
+					(st->mapEntry[k].origLocation) ==
+					packet) {
+					*new_block =
+					    le32_to_cpu(st->mapEntry[k].
+							mappedLocation) +
+					    ((old_block -
+					      UDF_SB_PARTROOT(sb,
+							      i)) & (sdata->
+								     s_packet_len
+								     - 1));
 					return 0;
-				}
-				else if (le32_to_cpu(st->mapEntry[k].origLocation) > packet)
+				} else
+				    if (le32_to_cpu
+					(st->mapEntry[k].origLocation) > packet)
 					break;
 			}
-			for (l=k; l<le16_to_cpu(st->reallocationTableLen); l++)
-			{
-				if (le32_to_cpu(st->mapEntry[l].origLocation) == 0xFFFFFFFF)
-				{
-					for (; j<4; j++)
-					{
-						if (sdata->s_spar_map[j])
-						{
-							st = (struct sparingTable *)sdata->s_spar_map[j]->b_data;
-							mapEntry = st->mapEntry[l];
-							mapEntry.origLocation = cpu_to_le32(packet);
-							memmove(&st->mapEntry[k+1], &st->mapEntry[k], (l-k)*sizeof(struct sparingEntry));
-							st->mapEntry[k] = mapEntry;
-							udf_update_tag((char *)st, sizeof(struct sparingTable) + le16_to_cpu(st->reallocationTableLen) * sizeof(struct sparingEntry));
-							mark_buffer_dirty(sdata->s_spar_map[j]);
+			for (l = k; l < le16_to_cpu(st->reallocationTableLen);
+			     l++) {
+				if (le32_to_cpu(st->mapEntry[l].origLocation) ==
+				    0xFFFFFFFF) {
+					for (; j < 4; j++) {
+						if (sdata->s_spar_map[j]) {
+							st = (struct
+							      sparingTable *)
+							    sdata->
+							    s_spar_map[j]->
+							    b_data;
+							mapEntry =
+							    st->mapEntry[l];
+							mapEntry.origLocation =
+							    cpu_to_le32(packet);
+							memmove(&st->
+								mapEntry[k + 1],
+								&st->
+								mapEntry[k],
+								(l -
+								 k) *
+								sizeof(struct
+								       sparingEntry));
+							st->mapEntry[k] =
+							    mapEntry;
+							udf_update_tag((char *)
+								       st,
+								       sizeof
+								       (struct
+									sparingTable)
+								       +
+								       le16_to_cpu
+								       (st->
+									reallocationTableLen)
+								       *
+								       sizeof
+								       (struct
+									sparingEntry));
+							mark_buffer_dirty
+							    (sdata->
+							     s_spar_map[j]);
 						}
 					}
-					*new_block = le32_to_cpu(st->mapEntry[k].mappedLocation) +
-						((old_block - UDF_SB_PARTROOT(sb,i)) & (sdata->s_packet_len - 1));
+					*new_block =
+					    le32_to_cpu(st->mapEntry[k].
+							mappedLocation) +
+					    ((old_block -
+					      UDF_SB_PARTROOT(sb,
+							      i)) & (sdata->
+								     s_packet_len
+								     - 1));
 					return 0;
 				}
 			}
 			return 1;
 		}
 	}
-	if (i == UDF_SB_NUMPARTS(sb))
-	{
+	if (i == UDF_SB_NUMPARTS(sb)) {
 		/* outside of partitions */
 		/* for now, fail =) */
 		return 1;
diff --git a/fs/udf/super.c b/fs/udf/super.c
index 6658afb..911387a 100644
--- a/fs/udf/super.c
+++ b/fs/udf/super.c
@@ -38,7 +38,7 @@
  *  12/20/98      find the free space bitmap (if it exists)
  */
 
-#include "udfdecl.h"    
+#include "udfdecl.h"
 
 #include <linux/blkdev.h>
 #include <linux/slab.h>
@@ -80,12 +80,15 @@
 static int udf_check_valid(struct super_block *, int, int);
 static int udf_vrs(struct super_block *sb, int silent);
 static int udf_load_partition(struct super_block *, kernel_lb_addr *);
-static int udf_load_logicalvol(struct super_block *, struct buffer_head *, kernel_lb_addr *);
+static int udf_load_logicalvol(struct super_block *, struct buffer_head *,
+			       kernel_lb_addr *);
 static void udf_load_logicalvolint(struct super_block *, kernel_extent_ad);
 static void udf_find_anchor(struct super_block *);
-static int udf_find_fileset(struct super_block *, kernel_lb_addr *, kernel_lb_addr *);
+static int udf_find_fileset(struct super_block *, kernel_lb_addr *,
+			    kernel_lb_addr *);
 static void udf_load_pvoldesc(struct super_block *, struct buffer_head *);
-static void udf_load_fileset(struct super_block *, struct buffer_head *, kernel_lb_addr *);
+static void udf_load_fileset(struct super_block *, struct buffer_head *,
+			     kernel_lb_addr *);
 static void udf_load_partdesc(struct super_block *, struct buffer_head *);
 static void udf_open_lvid(struct super_block *);
 static void udf_close_lvid(struct super_block *);
@@ -94,25 +97,27 @@
 
 /* UDF filesystem type */
 static int udf_get_sb(struct file_system_type *fs_type,
-	int flags, const char *dev_name, void *data, struct vfsmount *mnt)
+		      int flags, const char *dev_name, void *data,
+		      struct vfsmount *mnt)
 {
 	return get_sb_bdev(fs_type, flags, dev_name, data, udf_fill_super, mnt);
 }
 
 static struct file_system_type udf_fstype = {
-	.owner		= THIS_MODULE,
-	.name		= "udf",
-	.get_sb		= udf_get_sb,
-	.kill_sb	= kill_block_super,
-	.fs_flags	= FS_REQUIRES_DEV,
+	.owner = THIS_MODULE,
+	.name = "udf",
+	.get_sb = udf_get_sb,
+	.kill_sb = kill_block_super,
+	.fs_flags = FS_REQUIRES_DEV,
 };
 
-static struct kmem_cache * udf_inode_cachep;
+static struct kmem_cache *udf_inode_cachep;
 
 static struct inode *udf_alloc_inode(struct super_block *sb)
 {
 	struct udf_inode_info *ei;
-	ei = (struct udf_inode_info *)kmem_cache_alloc(udf_inode_cachep, GFP_KERNEL);
+	ei = (struct udf_inode_info *)kmem_cache_alloc(udf_inode_cachep,
+						       GFP_KERNEL);
 	if (!ei)
 		return NULL;
 
@@ -130,9 +135,9 @@
 	kmem_cache_free(udf_inode_cachep, UDF_I(inode));
 }
 
-static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags)
+static void init_once(void *foo, struct kmem_cache *cachep, unsigned long flags)
 {
-	struct udf_inode_info *ei = (struct udf_inode_info *) foo;
+	struct udf_inode_info *ei = (struct udf_inode_info *)foo;
 
 	ei->i_ext.i_data = NULL;
 	inode_init_once(&ei->vfs_inode);
@@ -142,8 +147,8 @@
 {
 	udf_inode_cachep = kmem_cache_create("udf_inode_cache",
 					     sizeof(struct udf_inode_info),
-					     0, (SLAB_RECLAIM_ACCOUNT|
-						SLAB_MEM_SPREAD),
+					     0, (SLAB_RECLAIM_ACCOUNT |
+						 SLAB_MEM_SPREAD),
 					     init_once, NULL);
 	if (udf_inode_cachep == NULL)
 		return -ENOMEM;
@@ -157,19 +162,18 @@
 
 /* Superblock operations */
 static const struct super_operations udf_sb_ops = {
-	.alloc_inode		= udf_alloc_inode,
-	.destroy_inode		= udf_destroy_inode,
-	.write_inode		= udf_write_inode,
-	.delete_inode		= udf_delete_inode,
-	.clear_inode		= udf_clear_inode,
-	.put_super		= udf_put_super,
-	.write_super		= udf_write_super,
-	.statfs			= udf_statfs,
-	.remount_fs		= udf_remount_fs,
+	.alloc_inode = udf_alloc_inode,
+	.destroy_inode = udf_destroy_inode,
+	.write_inode = udf_write_inode,
+	.delete_inode = udf_delete_inode,
+	.clear_inode = udf_clear_inode,
+	.put_super = udf_put_super,
+	.write_super = udf_write_super,
+	.statfs = udf_statfs,
+	.remount_fs = udf_remount_fs,
 };
 
-struct udf_options
-{
+struct udf_options {
 	unsigned char novrs;
 	unsigned int blocksize;
 	unsigned int session;
@@ -196,9 +200,9 @@
 	if (err)
 		goto out;
 	return 0;
-out:
+      out:
 	destroy_inodecache();
-out1:
+      out1:
 	return err;
 }
 
@@ -209,7 +213,7 @@
 }
 
 module_init(init_udf_fs)
-module_exit(exit_udf_fs)
+    module_exit(exit_udf_fs)
 
 /*
  * udf_parse_options
@@ -264,7 +268,6 @@
  *	July 1, 1997 - Andrew E. Mileski
  *	Written, tested, and released.
  */
-
 enum {
 	Opt_novrs, Opt_nostrict, Opt_bs, Opt_unhide, Opt_undelete,
 	Opt_noadinicb, Opt_adinicb, Opt_shortad, Opt_longad,
@@ -303,8 +306,7 @@
 	{Opt_err, NULL}
 };
 
-static int
-udf_parse_options(char *options, struct udf_options *uopt)
+static int udf_parse_options(char *options, struct udf_options *uopt)
 {
 	char *p;
 	int option;
@@ -323,126 +325,123 @@
 	if (!options)
 		return 1;
 
-	while ((p = strsep(&options, ",")) != NULL)
-	{
+	while ((p = strsep(&options, ",")) != NULL) {
 		substring_t args[MAX_OPT_ARGS];
 		int token;
 		if (!*p)
 			continue;
 
 		token = match_token(p, tokens, args);
-		switch (token)
-		{
-			case Opt_novrs:
-				uopt->novrs = 1;
-			case Opt_bs:
-				if (match_int(&args[0], &option))
-					return 0;
-				uopt->blocksize = option;
-				break;
-			case Opt_unhide:
-				uopt->flags |= (1 << UDF_FLAG_UNHIDE);
-				break;
-			case Opt_undelete:
-				uopt->flags |= (1 << UDF_FLAG_UNDELETE);
-				break;
-			case Opt_noadinicb:
-				uopt->flags &= ~(1 << UDF_FLAG_USE_AD_IN_ICB);
-				break;
-			case Opt_adinicb:
-				uopt->flags |= (1 << UDF_FLAG_USE_AD_IN_ICB);
-				break;
-			case Opt_shortad:
-				uopt->flags |= (1 << UDF_FLAG_USE_SHORT_AD);
-				break;
-			case Opt_longad:
-				uopt->flags &= ~(1 << UDF_FLAG_USE_SHORT_AD);
-				break;
-			case Opt_gid:
-				if (match_int(args, &option))
-					return 0;
-				uopt->gid = option;
-				break;
-			case Opt_uid:
-				if (match_int(args, &option))
-					return 0;
-				uopt->uid = option;
-				break;
-			case Opt_umask:
-				if (match_octal(args, &option))
-					return 0;
-				uopt->umask = option;
-				break;
-			case Opt_nostrict:
-				uopt->flags &= ~(1 << UDF_FLAG_STRICT);
-				break;
-			case Opt_session:
-				if (match_int(args, &option))
-					return 0;
-				uopt->session = option;
-				break;
-			case Opt_lastblock:
-				if (match_int(args, &option))
-					return 0;
-				uopt->lastblock = option;
-				break;
-			case Opt_anchor:
-				if (match_int(args, &option))
-					return 0;
-				uopt->anchor = option;
-				break;
-			case Opt_volume:
-				if (match_int(args, &option))
-					return 0;
-				uopt->volume = option;
-				break;
-			case Opt_partition:
-				if (match_int(args, &option))
-					return 0;
-				uopt->partition = option;
-				break;
-			case Opt_fileset:
-				if (match_int(args, &option))
-					return 0;
-				uopt->fileset = option;
-				break;
-			case Opt_rootdir:
-				if (match_int(args, &option))
-					return 0;
-				uopt->rootdir = option;
-				break;
-			case Opt_utf8:
-				uopt->flags |= (1 << UDF_FLAG_UTF8);
-				break;
+		switch (token) {
+		case Opt_novrs:
+			uopt->novrs = 1;
+		case Opt_bs:
+			if (match_int(&args[0], &option))
+				return 0;
+			uopt->blocksize = option;
+			break;
+		case Opt_unhide:
+			uopt->flags |= (1 << UDF_FLAG_UNHIDE);
+			break;
+		case Opt_undelete:
+			uopt->flags |= (1 << UDF_FLAG_UNDELETE);
+			break;
+		case Opt_noadinicb:
+			uopt->flags &= ~(1 << UDF_FLAG_USE_AD_IN_ICB);
+			break;
+		case Opt_adinicb:
+			uopt->flags |= (1 << UDF_FLAG_USE_AD_IN_ICB);
+			break;
+		case Opt_shortad:
+			uopt->flags |= (1 << UDF_FLAG_USE_SHORT_AD);
+			break;
+		case Opt_longad:
+			uopt->flags &= ~(1 << UDF_FLAG_USE_SHORT_AD);
+			break;
+		case Opt_gid:
+			if (match_int(args, &option))
+				return 0;
+			uopt->gid = option;
+			break;
+		case Opt_uid:
+			if (match_int(args, &option))
+				return 0;
+			uopt->uid = option;
+			break;
+		case Opt_umask:
+			if (match_octal(args, &option))
+				return 0;
+			uopt->umask = option;
+			break;
+		case Opt_nostrict:
+			uopt->flags &= ~(1 << UDF_FLAG_STRICT);
+			break;
+		case Opt_session:
+			if (match_int(args, &option))
+				return 0;
+			uopt->session = option;
+			break;
+		case Opt_lastblock:
+			if (match_int(args, &option))
+				return 0;
+			uopt->lastblock = option;
+			break;
+		case Opt_anchor:
+			if (match_int(args, &option))
+				return 0;
+			uopt->anchor = option;
+			break;
+		case Opt_volume:
+			if (match_int(args, &option))
+				return 0;
+			uopt->volume = option;
+			break;
+		case Opt_partition:
+			if (match_int(args, &option))
+				return 0;
+			uopt->partition = option;
+			break;
+		case Opt_fileset:
+			if (match_int(args, &option))
+				return 0;
+			uopt->fileset = option;
+			break;
+		case Opt_rootdir:
+			if (match_int(args, &option))
+				return 0;
+			uopt->rootdir = option;
+			break;
+		case Opt_utf8:
+			uopt->flags |= (1 << UDF_FLAG_UTF8);
+			break;
 #ifdef CONFIG_UDF_NLS
-			case Opt_iocharset:
-				uopt->nls_map = load_nls(args[0].from);
-				uopt->flags |= (1 << UDF_FLAG_NLS_MAP);
-				break;
+		case Opt_iocharset:
+			uopt->nls_map = load_nls(args[0].from);
+			uopt->flags |= (1 << UDF_FLAG_NLS_MAP);
+			break;
 #endif
-			case Opt_uignore:
-				uopt->flags |= (1 << UDF_FLAG_UID_IGNORE);
-				break;
-			case Opt_uforget:
-				uopt->flags |= (1 << UDF_FLAG_UID_FORGET);
-				break;
-			case Opt_gignore:
-			    uopt->flags |= (1 << UDF_FLAG_GID_IGNORE);
-				break;
-			case Opt_gforget:
-			    uopt->flags |= (1 << UDF_FLAG_GID_FORGET);
-				break;
-			default:
-				printk(KERN_ERR "udf: bad mount option \"%s\" "
-						"or missing value\n", p);
+		case Opt_uignore:
+			uopt->flags |= (1 << UDF_FLAG_UID_IGNORE);
+			break;
+		case Opt_uforget:
+			uopt->flags |= (1 << UDF_FLAG_UID_FORGET);
+			break;
+		case Opt_gignore:
+			uopt->flags |= (1 << UDF_FLAG_GID_IGNORE);
+			break;
+		case Opt_gforget:
+			uopt->flags |= (1 << UDF_FLAG_GID_FORGET);
+			break;
+		default:
+			printk(KERN_ERR "udf: bad mount option \"%s\" "
+			       "or missing value\n", p);
 			return 0;
 		}
 	}
 	return 1;
 }
 
-void
-udf_write_super(struct super_block *sb)
+void udf_write_super(struct super_block *sb)
 {
 	lock_kernel();
 	if (!(sb->s_flags & MS_RDONLY))
@@ -451,22 +450,21 @@
 	unlock_kernel();
 }
 
-static int
-udf_remount_fs(struct super_block *sb, int *flags, char *options)
+static int udf_remount_fs(struct super_block *sb, int *flags, char *options)
 {
 	struct udf_options uopt;
 
-	uopt.flags = UDF_SB(sb)->s_flags ;
-	uopt.uid   = UDF_SB(sb)->s_uid ;
-	uopt.gid   = UDF_SB(sb)->s_gid ;
-	uopt.umask = UDF_SB(sb)->s_umask ;
+	uopt.flags = UDF_SB(sb)->s_flags;
+	uopt.uid = UDF_SB(sb)->s_uid;
+	uopt.gid = UDF_SB(sb)->s_gid;
+	uopt.umask = UDF_SB(sb)->s_umask;
 
-	if ( !udf_parse_options(options, &uopt) )
+	if (!udf_parse_options(options, &uopt))
 		return -EINVAL;
 
 	UDF_SB(sb)->s_flags = uopt.flags;
-	UDF_SB(sb)->s_uid   = uopt.uid;
-	UDF_SB(sb)->s_gid   = uopt.gid;
+	UDF_SB(sb)->s_uid = uopt.uid;
+	UDF_SB(sb)->s_gid = uopt.gid;
 	UDF_SB(sb)->s_umask = uopt.umask;
 
 	if (UDF_SB_LVIDBH(sb)) {
@@ -512,8 +510,7 @@
  *	July 1, 1997 - Andrew E. Mileski
  *	Written, tested, and released.
  */
-static  int
-udf_set_blocksize(struct super_block *sb, int bsize)
+static int udf_set_blocksize(struct super_block *sb, int bsize)
 {
 	if (!sb_min_blocksize(sb, bsize)) {
 		udf_debug("Bad block size (%d)\n", bsize);
@@ -523,16 +520,15 @@
 	return sb->s_blocksize;
 }
 
-static int
-udf_vrs(struct super_block *sb, int silent)
+static int udf_vrs(struct super_block *sb, int silent)
 {
 	struct volStructDesc *vsd = NULL;
 	int sector = 32768;
 	int sectorsize;
 	struct buffer_head *bh = NULL;
-	int iso9660=0;
-	int nsr02=0;
-	int nsr03=0;
+	int iso9660 = 0;
+	int nsr02 = 0;
+	int nsr03 = 0;
 
 	/* Block size must be a multiple of 512 */
 	if (sb->s_blocksize & 511)
@@ -546,10 +542,9 @@
 	sector += (UDF_SB_SESSION(sb) << sb->s_blocksize_bits);
 
 	udf_debug("Starting at sector %u (%ld byte sectors)\n",
-		(sector >> sb->s_blocksize_bits), sb->s_blocksize);
+		  (sector >> sb->s_blocksize_bits), sb->s_blocksize);
 	/* Process the sequence (if applicable) */
-	for (;!nsr02 && !nsr03; sector += sectorsize)
-	{
+	for (; !nsr02 && !nsr03; sector += sectorsize) {
 		/* Read a block */
 		bh = udf_tread(sb, sector >> sb->s_blocksize_bits);
 		if (!bh)
@@ -557,52 +552,56 @@
 
 		/* Look for ISO  descriptors */
 		vsd = (struct volStructDesc *)(bh->b_data +
-			(sector & (sb->s_blocksize - 1)));
+					       (sector &
+						(sb->s_blocksize - 1)));
 
-		if (vsd->stdIdent[0] == 0)
-		{
+		if (vsd->stdIdent[0] == 0) {
 			brelse(bh);
 			break;
-		}
-		else if (!strncmp(vsd->stdIdent, VSD_STD_ID_CD001, VSD_STD_ID_LEN))
-		{
+		} else
+		    if (!strncmp
+			(vsd->stdIdent, VSD_STD_ID_CD001, VSD_STD_ID_LEN)) {
 			iso9660 = sector;
-			switch (vsd->structType)
-			{
-				case 0: 
-					udf_debug("ISO9660 Boot Record found\n");
-					break;
-				case 1: 
-					udf_debug("ISO9660 Primary Volume Descriptor found\n");
-					break;
-				case 2: 
-					udf_debug("ISO9660 Supplementary Volume Descriptor found\n");
-					break;
-				case 3: 
-					udf_debug("ISO9660 Volume Partition Descriptor found\n");
-					break;
-				case 255: 
-					udf_debug("ISO9660 Volume Descriptor Set Terminator found\n");
-					break;
-				default: 
-					udf_debug("ISO9660 VRS (%u) found\n", vsd->structType);
-					break;
+			switch (vsd->structType) {
+			case 0:
+				udf_debug("ISO9660 Boot Record found\n");
+				break;
+			case 1:
+				udf_debug
+				    ("ISO9660 Primary Volume Descriptor found\n");
+				break;
+			case 2:
+				udf_debug
+				    ("ISO9660 Supplementary Volume Descriptor found\n");
+				break;
+			case 3:
+				udf_debug
+				    ("ISO9660 Volume Partition Descriptor found\n");
+				break;
+			case 255:
+				udf_debug
+				    ("ISO9660 Volume Descriptor Set Terminator found\n");
+				break;
+			default:
+				udf_debug("ISO9660 VRS (%u) found\n",
+					  vsd->structType);
+				break;
 			}
-		}
-		else if (!strncmp(vsd->stdIdent, VSD_STD_ID_BEA01, VSD_STD_ID_LEN))
-		{
-		}
-		else if (!strncmp(vsd->stdIdent, VSD_STD_ID_TEA01, VSD_STD_ID_LEN))
-		{
+		} else
+		    if (!strncmp
+			(vsd->stdIdent, VSD_STD_ID_BEA01, VSD_STD_ID_LEN)) {
+		} else
+		    if (!strncmp
+			(vsd->stdIdent, VSD_STD_ID_TEA01, VSD_STD_ID_LEN)) {
 			brelse(bh);
 			break;
-		}
-		else if (!strncmp(vsd->stdIdent, VSD_STD_ID_NSR02, VSD_STD_ID_LEN))
-		{
+		} else
+		    if (!strncmp
+			(vsd->stdIdent, VSD_STD_ID_NSR02, VSD_STD_ID_LEN)) {
 			nsr02 = sector;
-		}
-		else if (!strncmp(vsd->stdIdent, VSD_STD_ID_NSR03, VSD_STD_ID_LEN))
-		{
+		} else
+		    if (!strncmp
+			(vsd->stdIdent, VSD_STD_ID_NSR03, VSD_STD_ID_LEN)) {
 			nsr03 = sector;
 		}
 		brelse(bh);
@@ -635,8 +634,7 @@
  *	July 1, 1997 - Andrew E. Mileski
  *	Written, tested, and released.
  */
-static void
-udf_find_anchor(struct super_block *sb)
+static void udf_find_anchor(struct super_block *sb)
 {
 	int lastblock = UDF_SB_LASTBLOCK(sb);
 	struct buffer_head *bh = NULL;
@@ -644,13 +642,13 @@
 	uint32_t location;
 	int i;
 
-	if (lastblock)
-	{
+	if (lastblock) {
 		int varlastblock = udf_variable_to_fixed(lastblock);
-		int last[] =  { lastblock, lastblock - 2,
-				lastblock - 150, lastblock - 152,
-				varlastblock, varlastblock - 2,
-				varlastblock - 150, varlastblock - 152 };
+		int last[] = { lastblock, lastblock - 2,
+			lastblock - 150, lastblock - 152,
+			varlastblock, varlastblock - 2,
+			varlastblock - 150, varlastblock - 152
+		};
 
 		lastblock = 0;
 
@@ -663,90 +661,103 @@
 		 *  however, if the disc isn't closed, it could be 512 */
 
 		for (i = 0; !lastblock && i < ARRAY_SIZE(last); i++) {
-			if (last[i] < 0 || !(bh = sb_bread(sb, last[i])))
-			{
+			if (last[i] < 0 || !(bh = sb_bread(sb, last[i]))) {
 				ident = location = 0;
-			}
-			else
-			{
-				ident = le16_to_cpu(((tag *)bh->b_data)->tagIdent);
-				location = le32_to_cpu(((tag *)bh->b_data)->tagLocation);
+			} else {
+				ident =
+				    le16_to_cpu(((tag *) bh->b_data)->tagIdent);
+				location =
+				    le32_to_cpu(((tag *) bh->b_data)->
+						tagLocation);
 				brelse(bh);
 			}
 
-			if (ident == TAG_IDENT_AVDP)
-			{
-				if (location == last[i] - UDF_SB_SESSION(sb))
-				{
-					lastblock = UDF_SB_ANCHOR(sb)[0] = last[i] - UDF_SB_SESSION(sb);
-					UDF_SB_ANCHOR(sb)[1] = last[i] - 256 - UDF_SB_SESSION(sb);
-				}
-				else if (location == udf_variable_to_fixed(last[i]) - UDF_SB_SESSION(sb))
-				{
+			if (ident == TAG_IDENT_AVDP) {
+				if (location == last[i] - UDF_SB_SESSION(sb)) {
+					lastblock = UDF_SB_ANCHOR(sb)[0] =
+					    last[i] - UDF_SB_SESSION(sb);
+					UDF_SB_ANCHOR(sb)[1] =
+					    last[i] - 256 - UDF_SB_SESSION(sb);
+				} else if (location ==
+					   udf_variable_to_fixed(last[i]) -
+					   UDF_SB_SESSION(sb)) {
 					UDF_SET_FLAG(sb, UDF_FLAG_VARCONV);
-					lastblock = UDF_SB_ANCHOR(sb)[0] = udf_variable_to_fixed(last[i]) - UDF_SB_SESSION(sb);
-					UDF_SB_ANCHOR(sb)[1] = lastblock - 256 - UDF_SB_SESSION(sb);
-				}
-				else
-					udf_debug("Anchor found at block %d, location mismatch %d.\n",
-						last[i], location);
-			}
-			else if (ident == TAG_IDENT_FE || ident == TAG_IDENT_EFE)
-			{
+					lastblock = UDF_SB_ANCHOR(sb)[0] =
+					    udf_variable_to_fixed(last[i]) -
+					    UDF_SB_SESSION(sb);
+					UDF_SB_ANCHOR(sb)[1] =
+					    lastblock - 256 -
+					    UDF_SB_SESSION(sb);
+				} else
+					udf_debug
+					    ("Anchor found at block %d, location mismatch %d.\n",
+					     last[i], location);
+			} else if (ident == TAG_IDENT_FE
+				   || ident == TAG_IDENT_EFE) {
 				lastblock = last[i];
 				UDF_SB_ANCHOR(sb)[3] = 512;
-			}
-			else
-			{
-				if (last[i] < 256 || !(bh = sb_bread(sb, last[i] - 256)))
-				{
+			} else {
+				if (last[i] < 256
+				    || !(bh = sb_bread(sb, last[i] - 256))) {
 					ident = location = 0;
-				}
-				else
-				{
-					ident = le16_to_cpu(((tag *)bh->b_data)->tagIdent);
-					location = le32_to_cpu(((tag *)bh->b_data)->tagLocation);
+				} else {
+					ident =
+					    le16_to_cpu(((tag *) bh->b_data)->
+							tagIdent);
+					location =
+					    le32_to_cpu(((tag *) bh->b_data)->
+							tagLocation);
 					brelse(bh);
 				}
-	
+
 				if (ident == TAG_IDENT_AVDP &&
-					location == last[i] - 256 - UDF_SB_SESSION(sb))
-				{
+				    location ==
+				    last[i] - 256 - UDF_SB_SESSION(sb)) {
 					lastblock = last[i];
 					UDF_SB_ANCHOR(sb)[1] = last[i] - 256;
-				}
-				else
-				{
-					if (last[i] < 312 + UDF_SB_SESSION(sb) || !(bh = sb_bread(sb, last[i] - 312 - UDF_SB_SESSION(sb))))
+				} else {
+					if (last[i] < 312 + UDF_SB_SESSION(sb)
+					    || !(bh =
+						 sb_bread(sb,
+							  last[i] - 312 -
+							  UDF_SB_SESSION(sb))))
 					{
 						ident = location = 0;
-					}
-					else
-					{
-						ident = le16_to_cpu(((tag *)bh->b_data)->tagIdent);
-						location = le32_to_cpu(((tag *)bh->b_data)->tagLocation);
+					} else {
+						ident =
+						    le16_to_cpu(((tag *) bh->
+								 b_data)->
+								tagIdent);
+						location =
+						    le32_to_cpu(((tag *) bh->
+								 b_data)->
+								tagLocation);
 						brelse(bh);
 					}
-	
+
 					if (ident == TAG_IDENT_AVDP &&
-						location == udf_variable_to_fixed(last[i]) - 256)
-					{
-						UDF_SET_FLAG(sb, UDF_FLAG_VARCONV);
-						lastblock = udf_variable_to_fixed(last[i]);
-						UDF_SB_ANCHOR(sb)[1] = lastblock - 256;
+					    location ==
+					    udf_variable_to_fixed(last[i]) -
+					    256) {
+						UDF_SET_FLAG(sb,
+							     UDF_FLAG_VARCONV);
+						lastblock =
+						    udf_variable_to_fixed(last
+									  [i]);
+						UDF_SB_ANCHOR(sb)[1] =
+						    lastblock - 256;
 					}
 				}
 			}
 		}
 	}
 
-	if (!lastblock)
-	{
+	if (!lastblock) {
 		/* We havn't found the lastblock. check 312 */
-		if ((bh = sb_bread(sb, 312 + UDF_SB_SESSION(sb))))
-		{
-			ident = le16_to_cpu(((tag *)bh->b_data)->tagIdent);
-			location = le32_to_cpu(((tag *)bh->b_data)->tagLocation);
+		if ((bh = sb_bread(sb, 312 + UDF_SB_SESSION(sb)))) {
+			ident = le16_to_cpu(((tag *) bh->b_data)->tagIdent);
+			location =
+			    le32_to_cpu(((tag *) bh->b_data)->tagLocation);
 			brelse(bh);
 
 			if (ident == TAG_IDENT_AVDP && location == 256)
@@ -755,18 +766,19 @@
 	}
 
 	for (i = 0; i < ARRAY_SIZE(UDF_SB_ANCHOR(sb)); i++) {
-		if (UDF_SB_ANCHOR(sb)[i])
-		{
+		if (UDF_SB_ANCHOR(sb)[i]) {
 			if (!(bh = udf_read_tagged(sb,
-				UDF_SB_ANCHOR(sb)[i], UDF_SB_ANCHOR(sb)[i], &ident)))
-			{
+						   UDF_SB_ANCHOR(sb)[i],
+						   UDF_SB_ANCHOR(sb)[i],
+						   &ident))) {
 				UDF_SB_ANCHOR(sb)[i] = 0;
-			}
-			else
-			{
+			} else {
 				brelse(bh);
 				if ((ident != TAG_IDENT_AVDP) && (i ||
-					(ident != TAG_IDENT_FE && ident != TAG_IDENT_EFE)))
+								  (ident !=
+								   TAG_IDENT_FE
+								   && ident !=
+								   TAG_IDENT_EFE)))
 				{
 					UDF_SB_ANCHOR(sb)[i] = 0;
 				}
@@ -777,72 +789,75 @@
 	UDF_SB_LASTBLOCK(sb) = lastblock;
 }
 
-static int 
-udf_find_fileset(struct super_block *sb, kernel_lb_addr *fileset, kernel_lb_addr *root)
+static int
+udf_find_fileset(struct super_block *sb, kernel_lb_addr * fileset,
+		 kernel_lb_addr * root)
 {
 	struct buffer_head *bh = NULL;
 	long lastblock;
 	uint16_t ident;
 
 	if (fileset->logicalBlockNum != 0xFFFFFFFF ||
-		fileset->partitionReferenceNum != 0xFFFF)
-	{
+	    fileset->partitionReferenceNum != 0xFFFF) {
 		bh = udf_read_ptagged(sb, *fileset, 0, &ident);
 
 		if (!bh)
 			return 1;
-		else if (ident != TAG_IDENT_FSD)
-		{
+		else if (ident != TAG_IDENT_FSD) {
 			brelse(bh);
 			return 1;
 		}
-			
+
 	}
 
-	if (!bh) /* Search backwards through the partitions */
-	{
+	if (!bh) {		/* Search backwards through the partitions */
 		kernel_lb_addr newfileset;
 
 		return 1;
-		
-		for (newfileset.partitionReferenceNum=UDF_SB_NUMPARTS(sb)-1;
-			(newfileset.partitionReferenceNum != 0xFFFF &&
-				fileset->logicalBlockNum == 0xFFFFFFFF &&
-				fileset->partitionReferenceNum == 0xFFFF);
-			newfileset.partitionReferenceNum--)
-		{
-			lastblock = UDF_SB_PARTLEN(sb, newfileset.partitionReferenceNum);
+
+		for (newfileset.partitionReferenceNum = UDF_SB_NUMPARTS(sb) - 1;
+		     (newfileset.partitionReferenceNum != 0xFFFF &&
+		      fileset->logicalBlockNum == 0xFFFFFFFF &&
+		      fileset->partitionReferenceNum == 0xFFFF);
+		     newfileset.partitionReferenceNum--) {
+			lastblock =
+			    UDF_SB_PARTLEN(sb,
+					   newfileset.partitionReferenceNum);
 			newfileset.logicalBlockNum = 0;
 
-			do
-			{
-				bh = udf_read_ptagged(sb, newfileset, 0, &ident);
-				if (!bh)
-				{
-					newfileset.logicalBlockNum ++;
+			do {
+				bh = udf_read_ptagged(sb, newfileset, 0,
+						      &ident);
+				if (!bh) {
+					newfileset.logicalBlockNum++;
 					continue;
 				}
 
-				switch (ident)
-				{
-					case TAG_IDENT_SBD:
+				switch (ident) {
+				case TAG_IDENT_SBD:
 					{
 						struct spaceBitmapDesc *sp;
-						sp = (struct spaceBitmapDesc *)bh->b_data;
-						newfileset.logicalBlockNum += 1 +
-							((le32_to_cpu(sp->numOfBytes) + sizeof(struct spaceBitmapDesc) - 1)
-								>> sb->s_blocksize_bits);
+						sp = (struct spaceBitmapDesc *)
+						    bh->b_data;
+						newfileset.logicalBlockNum +=
+						    1 +
+						    ((le32_to_cpu
+						      (sp->numOfBytes) +
+						      sizeof(struct
+							     spaceBitmapDesc) -
+						      1)
+						     >> sb->s_blocksize_bits);
 						brelse(bh);
 						break;
 					}
-					case TAG_IDENT_FSD:
+				case TAG_IDENT_FSD:
 					{
 						*fileset = newfileset;
 						break;
 					}
-					default:
+				default:
 					{
-						newfileset.logicalBlockNum ++;
+						newfileset.logicalBlockNum++;
 						brelse(bh);
 						bh = NULL;
 						break;
@@ -850,16 +865,16 @@
 				}
 			}
 			while (newfileset.logicalBlockNum < lastblock &&
-				fileset->logicalBlockNum == 0xFFFFFFFF &&
-				fileset->partitionReferenceNum == 0xFFFF);
+			       fileset->logicalBlockNum == 0xFFFFFFFF &&
+			       fileset->partitionReferenceNum == 0xFFFF);
 		}
 	}
 
 	if ((fileset->logicalBlockNum != 0xFFFFFFFF ||
-		fileset->partitionReferenceNum != 0xFFFF) && bh)
-	{
+	     fileset->partitionReferenceNum != 0xFFFF) && bh) {
 		udf_debug("Fileset at block=%d, partition=%d\n",
-			fileset->logicalBlockNum, fileset->partitionReferenceNum);
+			  fileset->logicalBlockNum,
+			  fileset->partitionReferenceNum);
 
 		UDF_SB_PARTITION(sb) = fileset->partitionReferenceNum;
 		udf_load_fileset(sb, bh, root);
@@ -869,8 +884,7 @@
 	return 1;
 }
 
-static void 
-udf_load_pvoldesc(struct super_block *sb, struct buffer_head *bh)
+static void udf_load_pvoldesc(struct super_block *sb, struct buffer_head *bh)
 {
 	struct primaryVolDesc *pvoldesc;
 	time_t recording;
@@ -880,37 +894,35 @@
 
 	pvoldesc = (struct primaryVolDesc *)bh->b_data;
 
-	if ( udf_stamp_to_time(&recording, &recording_usec,
-		lets_to_cpu(pvoldesc->recordingDateAndTime)) )
-	{
+	if (udf_stamp_to_time(&recording, &recording_usec,
+			      lets_to_cpu(pvoldesc->recordingDateAndTime))) {
 		kernel_timestamp ts;
 		ts = lets_to_cpu(pvoldesc->recordingDateAndTime);
-		udf_debug("recording time %ld/%ld, %04u/%02u/%02u %02u:%02u (%x)\n",
-			recording, recording_usec,
-			ts.year, ts.month, ts.day, ts.hour, ts.minute, ts.typeAndTimezone);
+		udf_debug
+		    ("recording time %ld/%ld, %04u/%02u/%02u %02u:%02u (%x)\n",
+		     recording, recording_usec, ts.year, ts.month, ts.day,
+		     ts.hour, ts.minute, ts.typeAndTimezone);
 		UDF_SB_RECORDTIME(sb).tv_sec = recording;
 		UDF_SB_RECORDTIME(sb).tv_nsec = recording_usec * 1000;
 	}
 
-	if ( !udf_build_ustr(&instr, pvoldesc->volIdent, 32) )
-	{
-		if (udf_CS0toUTF8(&outstr, &instr))
-		{
-			strncpy( UDF_SB_VOLIDENT(sb), outstr.u_name,
+	if (!udf_build_ustr(&instr, pvoldesc->volIdent, 32)) {
+		if (udf_CS0toUTF8(&outstr, &instr)) {
+			strncpy(UDF_SB_VOLIDENT(sb), outstr.u_name,
 				outstr.u_len > 31 ? 31 : outstr.u_len);
 			udf_debug("volIdent[] = '%s'\n", UDF_SB_VOLIDENT(sb));
 		}
 	}
 
-	if ( !udf_build_ustr(&instr, pvoldesc->volSetIdent, 128) )
-	{
+	if (!udf_build_ustr(&instr, pvoldesc->volSetIdent, 128)) {
 		if (udf_CS0toUTF8(&outstr, &instr))
 			udf_debug("volSetIdent[] = '%s'\n", outstr.u_name);
 	}
 }
 
-static void 
-udf_load_fileset(struct super_block *sb, struct buffer_head *bh, kernel_lb_addr *root)
+static void
+udf_load_fileset(struct super_block *sb, struct buffer_head *bh,
+		 kernel_lb_addr * root)
 {
 	struct fileSetDesc *fset;
 
@@ -920,109 +932,154 @@
 
 	UDF_SB_SERIALNUM(sb) = le16_to_cpu(fset->descTag.tagSerialNum);
 
-	udf_debug("Rootdir at block=%d, partition=%d\n", 
-		root->logicalBlockNum, root->partitionReferenceNum);
+	udf_debug("Rootdir at block=%d, partition=%d\n",
+		  root->logicalBlockNum, root->partitionReferenceNum);
 }
 
-static void 
-udf_load_partdesc(struct super_block *sb, struct buffer_head *bh)
+static void udf_load_partdesc(struct super_block *sb, struct buffer_head *bh)
 {
 	struct partitionDesc *p;
 	int i;
 
 	p = (struct partitionDesc *)bh->b_data;
 
-	for (i=0; i<UDF_SB_NUMPARTS(sb); i++)
-	{
-		udf_debug("Searching map: (%d == %d)\n", 
-			UDF_SB_PARTMAPS(sb)[i].s_partition_num, le16_to_cpu(p->partitionNumber));
-		if (UDF_SB_PARTMAPS(sb)[i].s_partition_num == le16_to_cpu(p->partitionNumber))
-		{
-			UDF_SB_PARTLEN(sb,i) = le32_to_cpu(p->partitionLength); /* blocks */
-			UDF_SB_PARTROOT(sb,i) = le32_to_cpu(p->partitionStartingLocation);
-			if (le32_to_cpu(p->accessType) == PD_ACCESS_TYPE_READ_ONLY)
-				UDF_SB_PARTFLAGS(sb,i) |= UDF_PART_FLAG_READ_ONLY;
-			if (le32_to_cpu(p->accessType) == PD_ACCESS_TYPE_WRITE_ONCE)
-				UDF_SB_PARTFLAGS(sb,i) |= UDF_PART_FLAG_WRITE_ONCE;
-			if (le32_to_cpu(p->accessType) == PD_ACCESS_TYPE_REWRITABLE)
-				UDF_SB_PARTFLAGS(sb,i) |= UDF_PART_FLAG_REWRITABLE;
-			if (le32_to_cpu(p->accessType) == PD_ACCESS_TYPE_OVERWRITABLE)
-				UDF_SB_PARTFLAGS(sb,i) |= UDF_PART_FLAG_OVERWRITABLE;
+	for (i = 0; i < UDF_SB_NUMPARTS(sb); i++) {
+		udf_debug("Searching map: (%d == %d)\n",
+			  UDF_SB_PARTMAPS(sb)[i].s_partition_num,
+			  le16_to_cpu(p->partitionNumber));
+		if (UDF_SB_PARTMAPS(sb)[i].s_partition_num ==
+		    le16_to_cpu(p->partitionNumber)) {
+			UDF_SB_PARTLEN(sb, i) = le32_to_cpu(p->partitionLength);	/* blocks */
+			UDF_SB_PARTROOT(sb, i) =
+			    le32_to_cpu(p->partitionStartingLocation);
+			if (le32_to_cpu(p->accessType) ==
+			    PD_ACCESS_TYPE_READ_ONLY)
+				UDF_SB_PARTFLAGS(sb, i) |=
+				    UDF_PART_FLAG_READ_ONLY;
+			if (le32_to_cpu(p->accessType) ==
+			    PD_ACCESS_TYPE_WRITE_ONCE)
+				UDF_SB_PARTFLAGS(sb, i) |=
+				    UDF_PART_FLAG_WRITE_ONCE;
+			if (le32_to_cpu(p->accessType) ==
+			    PD_ACCESS_TYPE_REWRITABLE)
+				UDF_SB_PARTFLAGS(sb, i) |=
+				    UDF_PART_FLAG_REWRITABLE;
+			if (le32_to_cpu(p->accessType) ==
+			    PD_ACCESS_TYPE_OVERWRITABLE)
+				UDF_SB_PARTFLAGS(sb, i) |=
+				    UDF_PART_FLAG_OVERWRITABLE;
 
-			if (!strcmp(p->partitionContents.ident, PD_PARTITION_CONTENTS_NSR02) ||
-				!strcmp(p->partitionContents.ident, PD_PARTITION_CONTENTS_NSR03))
-			{
+			if (!strcmp
+			    (p->partitionContents.ident,
+			     PD_PARTITION_CONTENTS_NSR02)
+			    || !strcmp(p->partitionContents.ident,
+				       PD_PARTITION_CONTENTS_NSR03)) {
 				struct partitionHeaderDesc *phd;
 
-				phd = (struct partitionHeaderDesc *)(p->partitionContentsUse);
-				if (phd->unallocSpaceTable.extLength)
-				{
-					kernel_lb_addr loc = { le32_to_cpu(phd->unallocSpaceTable.extPosition), i };
+				phd =
+				    (struct partitionHeaderDesc *)(p->
+								   partitionContentsUse);
+				if (phd->unallocSpaceTable.extLength) {
+					kernel_lb_addr loc =
+					    { le32_to_cpu(phd->
+							  unallocSpaceTable.
+							  extPosition), i };
 
-					UDF_SB_PARTMAPS(sb)[i].s_uspace.s_table =
-						udf_iget(sb, loc);
-					UDF_SB_PARTFLAGS(sb,i) |= UDF_PART_FLAG_UNALLOC_TABLE;
-					udf_debug("unallocSpaceTable (part %d) @ %ld\n",
-						i, UDF_SB_PARTMAPS(sb)[i].s_uspace.s_table->i_ino);
+					UDF_SB_PARTMAPS(sb)[i].s_uspace.
+					    s_table = udf_iget(sb, loc);
+					UDF_SB_PARTFLAGS(sb, i) |=
+					    UDF_PART_FLAG_UNALLOC_TABLE;
+					udf_debug
+					    ("unallocSpaceTable (part %d) @ %ld\n",
+					     i,
+					     UDF_SB_PARTMAPS(sb)[i].s_uspace.
+					     s_table->i_ino);
 				}
-				if (phd->unallocSpaceBitmap.extLength)
-				{
+				if (phd->unallocSpaceBitmap.extLength) {
 					UDF_SB_ALLOC_BITMAP(sb, i, s_uspace);
-					if (UDF_SB_PARTMAPS(sb)[i].s_uspace.s_bitmap != NULL)
-					{
-						UDF_SB_PARTMAPS(sb)[i].s_uspace.s_bitmap->s_extLength =
-							le32_to_cpu(phd->unallocSpaceBitmap.extLength);
-						UDF_SB_PARTMAPS(sb)[i].s_uspace.s_bitmap->s_extPosition =
-							le32_to_cpu(phd->unallocSpaceBitmap.extPosition);
-						UDF_SB_PARTFLAGS(sb,i) |= UDF_PART_FLAG_UNALLOC_BITMAP;
-						udf_debug("unallocSpaceBitmap (part %d) @ %d\n",
-							i, UDF_SB_PARTMAPS(sb)[i].s_uspace.s_bitmap->s_extPosition);
+					if (UDF_SB_PARTMAPS(sb)[i].s_uspace.
+					    s_bitmap != NULL) {
+						UDF_SB_PARTMAPS(sb)[i].s_uspace.
+						    s_bitmap->s_extLength =
+						    le32_to_cpu(phd->
+								unallocSpaceBitmap.
+								extLength);
+						UDF_SB_PARTMAPS(sb)[i].s_uspace.
+						    s_bitmap->s_extPosition =
+						    le32_to_cpu(phd->
+								unallocSpaceBitmap.
+								extPosition);
+						UDF_SB_PARTFLAGS(sb, i) |=
+						    UDF_PART_FLAG_UNALLOC_BITMAP;
+						udf_debug
+						    ("unallocSpaceBitmap (part %d) @ %d\n",
+						     i,
+						     UDF_SB_PARTMAPS(sb)[i].
+						     s_uspace.s_bitmap->
+						     s_extPosition);
 					}
 				}
 				if (phd->partitionIntegrityTable.extLength)
-					udf_debug("partitionIntegrityTable (part %d)\n", i);
-				if (phd->freedSpaceTable.extLength)
-				{
-					kernel_lb_addr loc = { le32_to_cpu(phd->freedSpaceTable.extPosition), i };
+					udf_debug
+					    ("partitionIntegrityTable (part %d)\n",
+					     i);
+				if (phd->freedSpaceTable.extLength) {
+					kernel_lb_addr loc =
+					    { le32_to_cpu(phd->freedSpaceTable.
+							  extPosition), i };
 
-					UDF_SB_PARTMAPS(sb)[i].s_fspace.s_table =
-						udf_iget(sb, loc);
-					UDF_SB_PARTFLAGS(sb,i) |= UDF_PART_FLAG_FREED_TABLE;
-					udf_debug("freedSpaceTable (part %d) @ %ld\n",
-						i, UDF_SB_PARTMAPS(sb)[i].s_fspace.s_table->i_ino);
+					UDF_SB_PARTMAPS(sb)[i].s_fspace.
+					    s_table = udf_iget(sb, loc);
+					UDF_SB_PARTFLAGS(sb, i) |=
+					    UDF_PART_FLAG_FREED_TABLE;
+					udf_debug
+					    ("freedSpaceTable (part %d) @ %ld\n",
+					     i,
+					     UDF_SB_PARTMAPS(sb)[i].s_fspace.
+					     s_table->i_ino);
 				}
-				if (phd->freedSpaceBitmap.extLength)
-				{
+				if (phd->freedSpaceBitmap.extLength) {
 					UDF_SB_ALLOC_BITMAP(sb, i, s_fspace);
-					if (UDF_SB_PARTMAPS(sb)[i].s_fspace.s_bitmap != NULL)
-					{
-						UDF_SB_PARTMAPS(sb)[i].s_fspace.s_bitmap->s_extLength =
-							le32_to_cpu(phd->freedSpaceBitmap.extLength);
-						UDF_SB_PARTMAPS(sb)[i].s_fspace.s_bitmap->s_extPosition =
-							le32_to_cpu(phd->freedSpaceBitmap.extPosition);
-						UDF_SB_PARTFLAGS(sb,i) |= UDF_PART_FLAG_FREED_BITMAP;
-						udf_debug("freedSpaceBitmap (part %d) @ %d\n",
-							i, UDF_SB_PARTMAPS(sb)[i].s_fspace.s_bitmap->s_extPosition);
+					if (UDF_SB_PARTMAPS(sb)[i].s_fspace.
+					    s_bitmap != NULL) {
+						UDF_SB_PARTMAPS(sb)[i].s_fspace.
+						    s_bitmap->s_extLength =
+						    le32_to_cpu(phd->
+								freedSpaceBitmap.
+								extLength);
+						UDF_SB_PARTMAPS(sb)[i].s_fspace.
+						    s_bitmap->s_extPosition =
+						    le32_to_cpu(phd->
+								freedSpaceBitmap.
+								extPosition);
+						UDF_SB_PARTFLAGS(sb, i) |=
+						    UDF_PART_FLAG_FREED_BITMAP;
+						udf_debug
+						    ("freedSpaceBitmap (part %d) @ %d\n",
+						     i,
+						     UDF_SB_PARTMAPS(sb)[i].
+						     s_fspace.s_bitmap->
+						     s_extPosition);
 					}
 				}
 			}
 			break;
 		}
 	}
-	if (i == UDF_SB_NUMPARTS(sb))
-	{
-		udf_debug("Partition (%d) not found in partition map\n", le16_to_cpu(p->partitionNumber));
-	}
-	else
-	{
-		udf_debug("Partition (%d:%d type %x) starts at physical %d, block length %d\n",
-			le16_to_cpu(p->partitionNumber), i, UDF_SB_PARTTYPE(sb,i),
-			UDF_SB_PARTROOT(sb,i), UDF_SB_PARTLEN(sb,i));
+	if (i == UDF_SB_NUMPARTS(sb)) {
+		udf_debug("Partition (%d) not found in partition map\n",
+			  le16_to_cpu(p->partitionNumber));
+	} else {
+		udf_debug
+		    ("Partition (%d:%d type %x) starts at physical %d, block length %d\n",
+		     le16_to_cpu(p->partitionNumber), i, UDF_SB_PARTTYPE(sb, i),
+		     UDF_SB_PARTROOT(sb, i), UDF_SB_PARTLEN(sb, i));
 	}
 }
 
-static int 
-udf_load_logicalvol(struct super_block *sb, struct buffer_head * bh, kernel_lb_addr *fileset)
+static int
+udf_load_logicalvol(struct super_block *sb, struct buffer_head *bh,
+		    kernel_lb_addr * fileset)
 {
 	struct logicalVolDesc *lvd;
 	int i, j, offset;
@@ -1032,82 +1089,114 @@
 
 	UDF_SB_ALLOC_PARTMAPS(sb, le32_to_cpu(lvd->numPartitionMaps));
 
-	for (i=0,offset=0;
-		 i<UDF_SB_NUMPARTS(sb) && offset<le32_to_cpu(lvd->mapTableLength);
-		 i++,offset+=((struct genericPartitionMap *)&(lvd->partitionMaps[offset]))->partitionMapLength)
-	{
-		type = ((struct genericPartitionMap *)&(lvd->partitionMaps[offset]))->partitionMapType;
-		if (type == 1)
-		{
-			struct genericPartitionMap1 *gpm1 = (struct genericPartitionMap1 *)&(lvd->partitionMaps[offset]);
-			UDF_SB_PARTTYPE(sb,i) = UDF_TYPE1_MAP15;
-			UDF_SB_PARTVSN(sb,i) = le16_to_cpu(gpm1->volSeqNum);
-			UDF_SB_PARTNUM(sb,i) = le16_to_cpu(gpm1->partitionNum);
-			UDF_SB_PARTFUNC(sb,i) = NULL;
-		}
-		else if (type == 2)
-		{
-			struct udfPartitionMap2 *upm2 = (struct udfPartitionMap2 *)&(lvd->partitionMaps[offset]);
-			if (!strncmp(upm2->partIdent.ident, UDF_ID_VIRTUAL, strlen(UDF_ID_VIRTUAL)))
-			{
-				if (le16_to_cpu(((__le16 *)upm2->partIdent.identSuffix)[0]) == 0x0150)
-				{
-					UDF_SB_PARTTYPE(sb,i) = UDF_VIRTUAL_MAP15;
-					UDF_SB_PARTFUNC(sb,i) = udf_get_pblock_virt15;
+	for (i = 0, offset = 0;
+	     i < UDF_SB_NUMPARTS(sb)
+	     && offset < le32_to_cpu(lvd->mapTableLength);
+	     i++, offset +=
+	     ((struct genericPartitionMap *)&(lvd->partitionMaps[offset]))->
+	     partitionMapLength) {
+		type =
+		    ((struct genericPartitionMap *)
+		     &(lvd->partitionMaps[offset]))->partitionMapType;
+		if (type == 1) {
+			struct genericPartitionMap1 *gpm1 =
+			    (struct genericPartitionMap1 *)&(lvd->
+							     partitionMaps
+							     [offset]);
+			UDF_SB_PARTTYPE(sb, i) = UDF_TYPE1_MAP15;
+			UDF_SB_PARTVSN(sb, i) = le16_to_cpu(gpm1->volSeqNum);
+			UDF_SB_PARTNUM(sb, i) = le16_to_cpu(gpm1->partitionNum);
+			UDF_SB_PARTFUNC(sb, i) = NULL;
+		} else if (type == 2) {
+			struct udfPartitionMap2 *upm2 =
+			    (struct udfPartitionMap2 *)&(lvd->
+							 partitionMaps[offset]);
+			if (!strncmp
+			    (upm2->partIdent.ident, UDF_ID_VIRTUAL,
+			     strlen(UDF_ID_VIRTUAL))) {
+				if (le16_to_cpu
+				    (((__le16 *) upm2->partIdent.
+				      identSuffix)[0]) == 0x0150) {
+					UDF_SB_PARTTYPE(sb, i) =
+					    UDF_VIRTUAL_MAP15;
+					UDF_SB_PARTFUNC(sb, i) =
+					    udf_get_pblock_virt15;
+				} else
+				    if (le16_to_cpu
+					(((__le16 *) upm2->partIdent.
+					  identSuffix)[0]) == 0x0200) {
+					UDF_SB_PARTTYPE(sb, i) =
+					    UDF_VIRTUAL_MAP20;
+					UDF_SB_PARTFUNC(sb, i) =
+					    udf_get_pblock_virt20;
 				}
-				else if (le16_to_cpu(((__le16 *)upm2->partIdent.identSuffix)[0]) == 0x0200)
-				{
-					UDF_SB_PARTTYPE(sb,i) = UDF_VIRTUAL_MAP20;
-					UDF_SB_PARTFUNC(sb,i) = udf_get_pblock_virt20;
-				}
-			}
-			else if (!strncmp(upm2->partIdent.ident, UDF_ID_SPARABLE, strlen(UDF_ID_SPARABLE)))
-			{
+			} else
+			    if (!strncmp
+				(upm2->partIdent.ident, UDF_ID_SPARABLE,
+				 strlen(UDF_ID_SPARABLE))) {
 				uint32_t loc;
 				uint16_t ident;
 				struct sparingTable *st;
-				struct sparablePartitionMap *spm = (struct sparablePartitionMap *)&(lvd->partitionMaps[offset]);
+				struct sparablePartitionMap *spm =
+				    (struct sparablePartitionMap *)&(lvd->
+								     partitionMaps
+								     [offset]);
 
-				UDF_SB_PARTTYPE(sb,i) = UDF_SPARABLE_MAP15;
-				UDF_SB_TYPESPAR(sb,i).s_packet_len = le16_to_cpu(spm->packetLength);
-				for (j=0; j<spm->numSparingTables; j++)
-				{
-					loc = le32_to_cpu(spm->locSparingTable[j]);
-					UDF_SB_TYPESPAR(sb,i).s_spar_map[j] =
-						udf_read_tagged(sb, loc, loc, &ident);
-					if (UDF_SB_TYPESPAR(sb,i).s_spar_map[j] != NULL)
-					{
-						st = (struct sparingTable *)UDF_SB_TYPESPAR(sb,i).s_spar_map[j]->b_data;
-						if (ident != 0 ||
-							strncmp(st->sparingIdent.ident, UDF_ID_SPARING, strlen(UDF_ID_SPARING)))
+				UDF_SB_PARTTYPE(sb, i) = UDF_SPARABLE_MAP15;
+				UDF_SB_TYPESPAR(sb, i).s_packet_len =
+				    le16_to_cpu(spm->packetLength);
+				for (j = 0; j < spm->numSparingTables; j++) {
+					loc =
+					    le32_to_cpu(spm->
+							locSparingTable[j]);
+					UDF_SB_TYPESPAR(sb, i).s_spar_map[j] =
+					    udf_read_tagged(sb, loc, loc,
+							    &ident);
+					if (UDF_SB_TYPESPAR(sb, i).
+					    s_spar_map[j] != NULL) {
+						st = (struct sparingTable *)
+						    UDF_SB_TYPESPAR(sb,
+								    i).
+						    s_spar_map[j]->b_data;
+						if (ident != 0
+						    || strncmp(st->sparingIdent.
+							       ident,
+							       UDF_ID_SPARING,
+							       strlen
+							       (UDF_ID_SPARING)))
 						{
-							brelse(UDF_SB_TYPESPAR(sb,i).s_spar_map[j]);
-							UDF_SB_TYPESPAR(sb,i).s_spar_map[j] = NULL;
+							brelse(UDF_SB_TYPESPAR
+							       (sb,
+								i).
+							       s_spar_map[j]);
+							UDF_SB_TYPESPAR(sb,
+									i).
+							    s_spar_map[j] =
+							    NULL;
 						}
 					}
 				}
-				UDF_SB_PARTFUNC(sb,i) = udf_get_pblock_spar15;
-			}
-			else
-			{
-				udf_debug("Unknown ident: %s\n", upm2->partIdent.ident);
+				UDF_SB_PARTFUNC(sb, i) = udf_get_pblock_spar15;
+			} else {
+				udf_debug("Unknown ident: %s\n",
+					  upm2->partIdent.ident);
 				continue;
 			}
-			UDF_SB_PARTVSN(sb,i) = le16_to_cpu(upm2->volSeqNum);
-			UDF_SB_PARTNUM(sb,i) = le16_to_cpu(upm2->partitionNum);
+			UDF_SB_PARTVSN(sb, i) = le16_to_cpu(upm2->volSeqNum);
+			UDF_SB_PARTNUM(sb, i) = le16_to_cpu(upm2->partitionNum);
 		}
 		udf_debug("Partition (%d:%d) type %d on volume %d\n",
-			i, UDF_SB_PARTNUM(sb,i), type, UDF_SB_PARTVSN(sb,i));
+			  i, UDF_SB_PARTNUM(sb, i), type, UDF_SB_PARTVSN(sb,
+									 i));
 	}
 
-	if (fileset)
-	{
-		long_ad *la = (long_ad *)&(lvd->logicalVolContentsUse[0]);
+	if (fileset) {
+		long_ad *la = (long_ad *) & (lvd->logicalVolContentsUse[0]);
 
 		*fileset = lelb_to_cpu(la->extLocation);
-		udf_debug("FileSet found in LogicalVolDesc at block=%d, partition=%d\n",
-			fileset->logicalBlockNum,
-			fileset->partitionReferenceNum);
+		udf_debug
+		    ("FileSet found in LogicalVolDesc at block=%d, partition=%d\n",
+		     fileset->logicalBlockNum, fileset->partitionReferenceNum);
 	}
 	if (lvd->integritySeqExt.extLength)
 		udf_load_logicalvolint(sb, leea_to_cpu(lvd->integritySeqExt));
@@ -1118,26 +1207,26 @@
  * udf_load_logicalvolint
  *
  */
-static void
-udf_load_logicalvolint(struct super_block *sb, kernel_extent_ad loc)
+static void udf_load_logicalvolint(struct super_block *sb, kernel_extent_ad loc)
 {
 	struct buffer_head *bh = NULL;
 	uint16_t ident;
 
 	while (loc.extLength > 0 &&
-		(bh = udf_read_tagged(sb, loc.extLocation,
-			loc.extLocation, &ident)) &&
-		ident == TAG_IDENT_LVID)
-	{
+	       (bh = udf_read_tagged(sb, loc.extLocation,
+				     loc.extLocation, &ident)) &&
+	       ident == TAG_IDENT_LVID) {
 		UDF_SB_LVIDBH(sb) = bh;
-		
+
 		if (UDF_SB_LVID(sb)->nextIntegrityExt.extLength)
-			udf_load_logicalvolint(sb, leea_to_cpu(UDF_SB_LVID(sb)->nextIntegrityExt));
-		
+			udf_load_logicalvolint(sb,
+					       leea_to_cpu(UDF_SB_LVID(sb)->
+							   nextIntegrityExt));
+
 		if (UDF_SB_LVIDBH(sb) != bh)
 			brelse(bh);
 		loc.extLength -= sb->s_blocksize;
-		loc.extLocation ++;
+		loc.extLocation++;
 	}
 	if (UDF_SB_LVIDBH(sb) != bh)
 		brelse(bh);
@@ -1158,15 +1247,16 @@
  *	July 1, 1997 - Andrew E. Mileski
  *	Written, tested, and released.
  */
-static  int
-udf_process_sequence(struct super_block *sb, long block, long lastblock, kernel_lb_addr *fileset)
+static int
+udf_process_sequence(struct super_block *sb, long block, long lastblock,
+		     kernel_lb_addr * fileset)
 {
 	struct buffer_head *bh = NULL;
 	struct udf_vds_record vds[VDS_POS_LENGTH];
 	struct generic_desc *gd;
 	struct volDescPtr *vdp;
-	int done=0;
-	int i,j;
+	int done = 0;
+	int i, j;
 	uint32_t vdsn;
 	uint16_t ident;
 	long next_s = 0, next_e = 0;
@@ -1174,93 +1264,92 @@
 	memset(vds, 0, sizeof(struct udf_vds_record) * VDS_POS_LENGTH);
 
 	/* Read the main descriptor sequence */
-	for (;(!done && block <= lastblock); block++)
-	{
+	for (; (!done && block <= lastblock); block++) {
 
 		bh = udf_read_tagged(sb, block, block, &ident);
-		if (!bh) 
+		if (!bh)
 			break;
 
 		/* Process each descriptor (ISO 13346 3/8.3-8.4) */
 		gd = (struct generic_desc *)bh->b_data;
 		vdsn = le32_to_cpu(gd->volDescSeqNum);
-		switch (ident)
-		{
-			case TAG_IDENT_PVD: /* ISO 13346 3/10.1 */
-				if (vdsn >= vds[VDS_POS_PRIMARY_VOL_DESC].volDescSeqNum)
-				{
-					vds[VDS_POS_PRIMARY_VOL_DESC].volDescSeqNum = vdsn;
-					vds[VDS_POS_PRIMARY_VOL_DESC].block = block;
-				}
-				break;
-			case TAG_IDENT_VDP: /* ISO 13346 3/10.3 */
-				if (vdsn >= vds[VDS_POS_VOL_DESC_PTR].volDescSeqNum)
-				{
-					vds[VDS_POS_VOL_DESC_PTR].volDescSeqNum = vdsn;
-					vds[VDS_POS_VOL_DESC_PTR].block = block;
+		switch (ident) {
+		case TAG_IDENT_PVD:	/* ISO 13346 3/10.1 */
+			if (vdsn >= vds[VDS_POS_PRIMARY_VOL_DESC].volDescSeqNum) {
+				vds[VDS_POS_PRIMARY_VOL_DESC].volDescSeqNum =
+				    vdsn;
+				vds[VDS_POS_PRIMARY_VOL_DESC].block = block;
+			}
+			break;
+		case TAG_IDENT_VDP:	/* ISO 13346 3/10.3 */
+			if (vdsn >= vds[VDS_POS_VOL_DESC_PTR].volDescSeqNum) {
+				vds[VDS_POS_VOL_DESC_PTR].volDescSeqNum = vdsn;
+				vds[VDS_POS_VOL_DESC_PTR].block = block;
 
-					vdp = (struct volDescPtr *)bh->b_data;
-					next_s = le32_to_cpu(vdp->nextVolDescSeqExt.extLocation);
-					next_e = le32_to_cpu(vdp->nextVolDescSeqExt.extLength);
-					next_e = next_e >> sb->s_blocksize_bits;
-					next_e += next_s;
-				}
-				break;
-			case TAG_IDENT_IUVD: /* ISO 13346 3/10.4 */
-				if (vdsn >= vds[VDS_POS_IMP_USE_VOL_DESC].volDescSeqNum)
-				{
-					vds[VDS_POS_IMP_USE_VOL_DESC].volDescSeqNum = vdsn;
-					vds[VDS_POS_IMP_USE_VOL_DESC].block = block;
-				}
-				break;
-			case TAG_IDENT_PD: /* ISO 13346 3/10.5 */
-				if (!vds[VDS_POS_PARTITION_DESC].block)
-					vds[VDS_POS_PARTITION_DESC].block = block;
-				break;
-			case TAG_IDENT_LVD: /* ISO 13346 3/10.6 */
-				if (vdsn >= vds[VDS_POS_LOGICAL_VOL_DESC].volDescSeqNum)
-				{
-					vds[VDS_POS_LOGICAL_VOL_DESC].volDescSeqNum = vdsn;
-					vds[VDS_POS_LOGICAL_VOL_DESC].block = block;
-				}
-				break;
-			case TAG_IDENT_USD: /* ISO 13346 3/10.8 */
-				if (vdsn >= vds[VDS_POS_UNALLOC_SPACE_DESC].volDescSeqNum)
-				{
-					vds[VDS_POS_UNALLOC_SPACE_DESC].volDescSeqNum = vdsn;
-					vds[VDS_POS_UNALLOC_SPACE_DESC].block = block;
-				}
-				break;
-			case TAG_IDENT_TD: /* ISO 13346 3/10.9 */
-				vds[VDS_POS_TERMINATING_DESC].block = block;
-				if (next_e)
-				{
-					block = next_s;
-					lastblock = next_e;
-					next_s = next_e = 0;
-				}
-				else
-					done = 1;
-				break;
+				vdp = (struct volDescPtr *)bh->b_data;
+				next_s =
+				    le32_to_cpu(vdp->nextVolDescSeqExt.
+						extLocation);
+				next_e =
+				    le32_to_cpu(vdp->nextVolDescSeqExt.
+						extLength);
+				next_e = next_e >> sb->s_blocksize_bits;
+				next_e += next_s;
+			}
+			break;
+		case TAG_IDENT_IUVD:	/* ISO 13346 3/10.4 */
+			if (vdsn >= vds[VDS_POS_IMP_USE_VOL_DESC].volDescSeqNum) {
+				vds[VDS_POS_IMP_USE_VOL_DESC].volDescSeqNum =
+				    vdsn;
+				vds[VDS_POS_IMP_USE_VOL_DESC].block = block;
+			}
+			break;
+		case TAG_IDENT_PD:	/* ISO 13346 3/10.5 */
+			if (!vds[VDS_POS_PARTITION_DESC].block)
+				vds[VDS_POS_PARTITION_DESC].block = block;
+			break;
+		case TAG_IDENT_LVD:	/* ISO 13346 3/10.6 */
+			if (vdsn >= vds[VDS_POS_LOGICAL_VOL_DESC].volDescSeqNum) {
+				vds[VDS_POS_LOGICAL_VOL_DESC].volDescSeqNum =
+				    vdsn;
+				vds[VDS_POS_LOGICAL_VOL_DESC].block = block;
+			}
+			break;
+		case TAG_IDENT_USD:	/* ISO 13346 3/10.8 */
+			if (vdsn >=
+			    vds[VDS_POS_UNALLOC_SPACE_DESC].volDescSeqNum) {
+				vds[VDS_POS_UNALLOC_SPACE_DESC].volDescSeqNum =
+				    vdsn;
+				vds[VDS_POS_UNALLOC_SPACE_DESC].block = block;
+			}
+			break;
+		case TAG_IDENT_TD:	/* ISO 13346 3/10.9 */
+			vds[VDS_POS_TERMINATING_DESC].block = block;
+			if (next_e) {
+				block = next_s;
+				lastblock = next_e;
+				next_s = next_e = 0;
+			} else
+				done = 1;
+			break;
 		}
 		brelse(bh);
 	}
-	for (i=0; i<VDS_POS_LENGTH; i++)
-	{
-		if (vds[i].block)
-		{
-			bh = udf_read_tagged(sb, vds[i].block, vds[i].block, &ident);
+	for (i = 0; i < VDS_POS_LENGTH; i++) {
+		if (vds[i].block) {
+			bh = udf_read_tagged(sb, vds[i].block, vds[i].block,
+					     &ident);
 
 			if (i == VDS_POS_PRIMARY_VOL_DESC)
 				udf_load_pvoldesc(sb, bh);
 			else if (i == VDS_POS_LOGICAL_VOL_DESC)
 				udf_load_logicalvol(sb, bh, fileset);
-			else if (i == VDS_POS_PARTITION_DESC)
-			{
+			else if (i == VDS_POS_PARTITION_DESC) {
 				struct buffer_head *bh2 = NULL;
 				udf_load_partdesc(sb, bh);
-				for (j=vds[i].block+1; j<vds[VDS_POS_TERMINATING_DESC].block; j++)
-				{
+				for (j = vds[i].block + 1;
+				     j < vds[VDS_POS_TERMINATING_DESC].block;
+				     j++) {
 					bh2 = udf_read_tagged(sb, j, j, &ident);
 					gd = (struct generic_desc *)bh2->b_data;
 					if (ident == TAG_IDENT_PD)
@@ -1278,31 +1367,27 @@
 /*
  * udf_check_valid()
  */
-static int
-udf_check_valid(struct super_block *sb, int novrs, int silent)
+static int udf_check_valid(struct super_block *sb, int novrs, int silent)
 {
 	long block;
 
-	if (novrs)
-	{
+	if (novrs) {
 		udf_debug("Validity check skipped because of novrs option\n");
 		return 0;
 	}
 	/* Check that it is NSR02 compliant */
 	/* Process any "CD-ROM Volume Descriptor Set" (ECMA 167 2/8.3.1) */
-	else if ((block = udf_vrs(sb, silent)) == -1)
-	{
-		udf_debug("Failed to read byte 32768. Assuming open disc. Skipping validity check\n");
+	else if ((block = udf_vrs(sb, silent)) == -1) {
+		udf_debug
+		    ("Failed to read byte 32768. Assuming open disc. Skipping validity check\n");
 		if (!UDF_SB_LASTBLOCK(sb))
 			UDF_SB_LASTBLOCK(sb) = udf_get_last_block(sb);
 		return 0;
-	}
-	else 
+	} else
 		return !block;
 }
 
-static int
-udf_load_partition(struct super_block *sb, kernel_lb_addr *fileset)
+static int udf_load_partition(struct super_block *sb, kernel_lb_addr * fileset)
 {
 	struct anchorVolDescPtr *anchor;
 	uint16_t ident;
@@ -1315,19 +1400,27 @@
 
 	for (i = 0; i < ARRAY_SIZE(UDF_SB_ANCHOR(sb)); i++) {
 		if (UDF_SB_ANCHOR(sb)[i] && (bh = udf_read_tagged(sb,
-			UDF_SB_ANCHOR(sb)[i], UDF_SB_ANCHOR(sb)[i], &ident)))
-		{
+								  UDF_SB_ANCHOR
+								  (sb)[i],
+								  UDF_SB_ANCHOR
+								  (sb)[i],
+								  &ident))) {
 			anchor = (struct anchorVolDescPtr *)bh->b_data;
 
 			/* Locate the main sequence */
-			main_s = le32_to_cpu( anchor->mainVolDescSeqExt.extLocation );
-			main_e = le32_to_cpu( anchor->mainVolDescSeqExt.extLength );
+			main_s =
+			    le32_to_cpu(anchor->mainVolDescSeqExt.extLocation);
+			main_e =
+			    le32_to_cpu(anchor->mainVolDescSeqExt.extLength);
 			main_e = main_e >> sb->s_blocksize_bits;
 			main_e += main_s;
 
 			/* Locate the reserve sequence */
-			reserve_s = le32_to_cpu(anchor->reserveVolDescSeqExt.extLocation);
-			reserve_e = le32_to_cpu(anchor->reserveVolDescSeqExt.extLength);
+			reserve_s =
+			    le32_to_cpu(anchor->reserveVolDescSeqExt.
+					extLocation);
+			reserve_e =
+			    le32_to_cpu(anchor->reserveVolDescSeqExt.extLength);
 			reserve_e = reserve_e >> sb->s_blocksize_bits;
 			reserve_e += reserve_s;
 
@@ -1335,9 +1428,10 @@
 
 			/* Process the main & reserve sequences */
 			/* responsible for finding the PartitionDesc(s) */
-			if (!(udf_process_sequence(sb, main_s, main_e, fileset) &&
-				udf_process_sequence(sb, reserve_s, reserve_e, fileset)))
-			{
+			if (!
+			    (udf_process_sequence(sb, main_s, main_e, fileset)
+			     && udf_process_sequence(sb, reserve_s, reserve_e,
+						     fileset))) {
 				break;
 			}
 		}
@@ -1349,36 +1443,37 @@
 	} else
 		udf_debug("Using anchor in block %d\n", UDF_SB_ANCHOR(sb)[i]);
 
-	for (i=0; i<UDF_SB_NUMPARTS(sb); i++)
-	{
-		switch (UDF_SB_PARTTYPE(sb, i))
-		{
-			case UDF_VIRTUAL_MAP15:
-			case UDF_VIRTUAL_MAP20:
+	for (i = 0; i < UDF_SB_NUMPARTS(sb); i++) {
+		switch (UDF_SB_PARTTYPE(sb, i)) {
+		case UDF_VIRTUAL_MAP15:
+		case UDF_VIRTUAL_MAP20:
 			{
-				kernel_lb_addr ino;
+				kernel_lb_addr uninitialized_var(ino);
 
-				if (!UDF_SB_LASTBLOCK(sb))
-				{
-					UDF_SB_LASTBLOCK(sb) = udf_get_last_block(sb);
+				if (!UDF_SB_LASTBLOCK(sb)) {
+					UDF_SB_LASTBLOCK(sb) =
+					    udf_get_last_block(sb);
 					udf_find_anchor(sb);
 				}
 
-				if (!UDF_SB_LASTBLOCK(sb))
-				{
-					udf_debug("Unable to determine Lastblock (For Virtual Partition)\n");
+				if (!UDF_SB_LASTBLOCK(sb)) {
+					udf_debug
+					    ("Unable to determine Lastblock (For Virtual Partition)\n");
 					return 1;
 				}
 
-				for (j=0; j<UDF_SB_NUMPARTS(sb); j++)
-				{
+				for (j = 0; j < UDF_SB_NUMPARTS(sb); j++) {
 					if (j != i &&
-						UDF_SB_PARTVSN(sb,i) == UDF_SB_PARTVSN(sb,j) &&
-						UDF_SB_PARTNUM(sb,i) == UDF_SB_PARTNUM(sb,j))
-					{
+					    UDF_SB_PARTVSN(sb,
+							   i) ==
+					    UDF_SB_PARTVSN(sb, j)
+					    && UDF_SB_PARTNUM(sb,
+							      i) ==
+					    UDF_SB_PARTNUM(sb, j)) {
 						ino.partitionReferenceNum = j;
-						ino.logicalBlockNum = UDF_SB_LASTBLOCK(sb) -
-							UDF_SB_PARTROOT(sb,j);
+						ino.logicalBlockNum =
+						    UDF_SB_LASTBLOCK(sb) -
+						    UDF_SB_PARTROOT(sb, j);
 						break;
 					}
 				}
@@ -1389,13 +1484,13 @@
 				if (!(UDF_SB_VAT(sb) = udf_iget(sb, ino)))
 					return 1;
 
-				if (UDF_SB_PARTTYPE(sb,i) == UDF_VIRTUAL_MAP15)
-				{
-					UDF_SB_TYPEVIRT(sb,i).s_start_offset = udf_ext0_offset(UDF_SB_VAT(sb));
-					UDF_SB_TYPEVIRT(sb,i).s_num_entries = (UDF_SB_VAT(sb)->i_size - 36) >> 2;
-				}
-				else if (UDF_SB_PARTTYPE(sb,i) == UDF_VIRTUAL_MAP20)
-				{
+				if (UDF_SB_PARTTYPE(sb, i) == UDF_VIRTUAL_MAP15) {
+					UDF_SB_TYPEVIRT(sb, i).s_start_offset =
+					    udf_ext0_offset(UDF_SB_VAT(sb));
+					UDF_SB_TYPEVIRT(sb, i).s_num_entries =
+					    (UDF_SB_VAT(sb)->i_size - 36) >> 2;
+				} else if (UDF_SB_PARTTYPE(sb, i) ==
+					   UDF_VIRTUAL_MAP20) {
 					struct buffer_head *bh = NULL;
 					uint32_t pos;
 
@@ -1403,15 +1498,26 @@
 					bh = sb_bread(sb, pos);
 					if (!bh)
 						return 1;
-					UDF_SB_TYPEVIRT(sb,i).s_start_offset =
-						le16_to_cpu(((struct virtualAllocationTable20 *)bh->b_data + udf_ext0_offset(UDF_SB_VAT(sb)))->lengthHeader) +
-							udf_ext0_offset(UDF_SB_VAT(sb));
-					UDF_SB_TYPEVIRT(sb,i).s_num_entries = (UDF_SB_VAT(sb)->i_size -
-						UDF_SB_TYPEVIRT(sb,i).s_start_offset) >> 2;
+					UDF_SB_TYPEVIRT(sb, i).s_start_offset =
+					    le16_to_cpu(((struct
+							  virtualAllocationTable20
+							  *)bh->b_data +
+							 udf_ext0_offset
+							 (UDF_SB_VAT(sb)))->
+							lengthHeader) +
+					    udf_ext0_offset(UDF_SB_VAT(sb));
+					UDF_SB_TYPEVIRT(sb, i).s_num_entries =
+					    (UDF_SB_VAT(sb)->i_size -
+					     UDF_SB_TYPEVIRT(sb,
+							     i).
+					     s_start_offset) >> 2;
 					brelse(bh);
 				}
-				UDF_SB_PARTROOT(sb,i) = udf_get_pblock(sb, 0, i, 0);
-				UDF_SB_PARTLEN(sb,i) = UDF_SB_PARTLEN(sb,ino.partitionReferenceNum);
+				UDF_SB_PARTROOT(sb, i) =
+				    udf_get_pblock(sb, 0, i, 0);
+				UDF_SB_PARTLEN(sb, i) =
+				    UDF_SB_PARTLEN(sb,
+						   ino.partitionReferenceNum);
 			}
 		}
 	}
@@ -1420,26 +1526,28 @@
 
 static void udf_open_lvid(struct super_block *sb)
 {
-	if (UDF_SB_LVIDBH(sb))
-	{
+	if (UDF_SB_LVIDBH(sb)) {
 		int i;
 		kernel_timestamp cpu_time;
 
 		UDF_SB_LVIDIU(sb)->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX;
 		UDF_SB_LVIDIU(sb)->impIdent.identSuffix[1] = UDF_OS_ID_LINUX;
 		if (udf_time_to_stamp(&cpu_time, CURRENT_TIME))
-			UDF_SB_LVID(sb)->recordingDateAndTime = cpu_to_lets(cpu_time);
+			UDF_SB_LVID(sb)->recordingDateAndTime =
+			    cpu_to_lets(cpu_time);
 		UDF_SB_LVID(sb)->integrityType = LVID_INTEGRITY_TYPE_OPEN;
 
 		UDF_SB_LVID(sb)->descTag.descCRC =
-			cpu_to_le16(udf_crc((char *)UDF_SB_LVID(sb) + sizeof(tag),
-			le16_to_cpu(UDF_SB_LVID(sb)->descTag.descCRCLength), 0));
+		    cpu_to_le16(udf_crc((char *)UDF_SB_LVID(sb) + sizeof(tag),
+					le16_to_cpu(UDF_SB_LVID(sb)->descTag.
+						    descCRCLength), 0));
 
 		UDF_SB_LVID(sb)->descTag.tagChecksum = 0;
-		for (i=0; i<16; i++)
+		for (i = 0; i < 16; i++)
 			if (i != 4)
 				UDF_SB_LVID(sb)->descTag.tagChecksum +=
-					((uint8_t *)&(UDF_SB_LVID(sb)->descTag))[i];
+				    ((uint8_t *) &
+				     (UDF_SB_LVID(sb)->descTag))[i];
 
 		mark_buffer_dirty(UDF_SB_LVIDBH(sb));
 	}
@@ -1448,32 +1556,41 @@
 static void udf_close_lvid(struct super_block *sb)
 {
 	if (UDF_SB_LVIDBH(sb) &&
-		UDF_SB_LVID(sb)->integrityType == LVID_INTEGRITY_TYPE_OPEN)
-	{
+	    UDF_SB_LVID(sb)->integrityType == LVID_INTEGRITY_TYPE_OPEN) {
 		int i;
 		kernel_timestamp cpu_time;
 
 		UDF_SB_LVIDIU(sb)->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX;
 		UDF_SB_LVIDIU(sb)->impIdent.identSuffix[1] = UDF_OS_ID_LINUX;
 		if (udf_time_to_stamp(&cpu_time, CURRENT_TIME))
-			UDF_SB_LVID(sb)->recordingDateAndTime = cpu_to_lets(cpu_time);
-		if (UDF_MAX_WRITE_VERSION > le16_to_cpu(UDF_SB_LVIDIU(sb)->maxUDFWriteRev))
-			UDF_SB_LVIDIU(sb)->maxUDFWriteRev = cpu_to_le16(UDF_MAX_WRITE_VERSION);
-		if (UDF_SB_UDFREV(sb) > le16_to_cpu(UDF_SB_LVIDIU(sb)->minUDFReadRev))
-			UDF_SB_LVIDIU(sb)->minUDFReadRev = cpu_to_le16(UDF_SB_UDFREV(sb));
-		if (UDF_SB_UDFREV(sb) > le16_to_cpu(UDF_SB_LVIDIU(sb)->minUDFWriteRev))
-			UDF_SB_LVIDIU(sb)->minUDFWriteRev = cpu_to_le16(UDF_SB_UDFREV(sb));
-		UDF_SB_LVID(sb)->integrityType = cpu_to_le32(LVID_INTEGRITY_TYPE_CLOSE);
+			UDF_SB_LVID(sb)->recordingDateAndTime =
+			    cpu_to_lets(cpu_time);
+		if (UDF_MAX_WRITE_VERSION >
+		    le16_to_cpu(UDF_SB_LVIDIU(sb)->maxUDFWriteRev))
+			UDF_SB_LVIDIU(sb)->maxUDFWriteRev =
+			    cpu_to_le16(UDF_MAX_WRITE_VERSION);
+		if (UDF_SB_UDFREV(sb) >
+		    le16_to_cpu(UDF_SB_LVIDIU(sb)->minUDFReadRev))
+			UDF_SB_LVIDIU(sb)->minUDFReadRev =
+			    cpu_to_le16(UDF_SB_UDFREV(sb));
+		if (UDF_SB_UDFREV(sb) >
+		    le16_to_cpu(UDF_SB_LVIDIU(sb)->minUDFWriteRev))
+			UDF_SB_LVIDIU(sb)->minUDFWriteRev =
+			    cpu_to_le16(UDF_SB_UDFREV(sb));
+		UDF_SB_LVID(sb)->integrityType =
+		    cpu_to_le32(LVID_INTEGRITY_TYPE_CLOSE);
 
 		UDF_SB_LVID(sb)->descTag.descCRC =
-			cpu_to_le16(udf_crc((char *)UDF_SB_LVID(sb) + sizeof(tag),
-			le16_to_cpu(UDF_SB_LVID(sb)->descTag.descCRCLength), 0));
+		    cpu_to_le16(udf_crc((char *)UDF_SB_LVID(sb) + sizeof(tag),
+					le16_to_cpu(UDF_SB_LVID(sb)->descTag.
+						    descCRCLength), 0));
 
 		UDF_SB_LVID(sb)->descTag.tagChecksum = 0;
-		for (i=0; i<16; i++)
+		for (i = 0; i < 16; i++)
 			if (i != 4)
 				UDF_SB_LVID(sb)->descTag.tagChecksum +=
-					((uint8_t *)&(UDF_SB_LVID(sb)->descTag))[i];
+				    ((uint8_t *) &
+				     (UDF_SB_LVID(sb)->descTag))[i];
 
 		mark_buffer_dirty(UDF_SB_LVIDBH(sb));
 	}
@@ -1498,7 +1615,7 @@
 static int udf_fill_super(struct super_block *sb, void *options, int silent)
 {
 	int i;
-	struct inode *inode=NULL;
+	struct inode *inode = NULL;
 	struct udf_options uopt;
 	kernel_lb_addr rootdir, fileset;
 	struct udf_sb_info *sbi;
@@ -1520,15 +1637,13 @@
 		goto error_out;
 
 	if (uopt.flags & (1 << UDF_FLAG_UTF8) &&
-	    uopt.flags & (1 << UDF_FLAG_NLS_MAP))
-	{
+	    uopt.flags & (1 << UDF_FLAG_NLS_MAP)) {
 		udf_error(sb, "udf_read_super",
-			"utf8 cannot be combined with iocharset\n");
+			  "utf8 cannot be combined with iocharset\n");
 		goto error_out;
 	}
 #ifdef CONFIG_UDF_NLS
-	if ((uopt.flags & (1 << UDF_FLAG_NLS_MAP)) && !uopt.nls_map)
-	{
+	if ((uopt.flags & (1 << UDF_FLAG_NLS_MAP)) && !uopt.nls_map) {
 		uopt.nls_map = load_nls_default();
 		if (!uopt.nls_map)
 			uopt.flags &= ~(1 << UDF_FLAG_NLS_MAP);
@@ -1552,7 +1667,7 @@
 	if (!udf_set_blocksize(sb, uopt.blocksize))
 		goto error_out;
 
-	if ( uopt.session == 0xFFFFFFFF )
+	if (uopt.session == 0xFFFFFFFF)
 		UDF_SB_SESSION(sb) = udf_get_last_session(sb);
 	else
 		UDF_SB_SESSION(sb) = uopt.session;
@@ -1564,10 +1679,9 @@
 	UDF_SB_ANCHOR(sb)[2] = uopt.anchor;
 	UDF_SB_ANCHOR(sb)[3] = 256;
 
-	if (udf_check_valid(sb, uopt.novrs, silent)) /* read volume recognition sequences */
-	{
+	if (udf_check_valid(sb, uopt.novrs, silent)) {	/* read volume recognition sequences */
 		printk("UDF-fs: No VRS found\n");
- 		goto error_out;
+		goto error_out;
 	}
 
 	udf_find_anchor(sb);
@@ -1579,29 +1693,26 @@
 	sb->s_magic = UDF_SUPER_MAGIC;
 	sb->s_time_gran = 1000;
 
-	if (udf_load_partition(sb, &fileset))
-	{
+	if (udf_load_partition(sb, &fileset)) {
 		printk("UDF-fs: No partition found (1)\n");
 		goto error_out;
 	}
 
 	udf_debug("Lastblock=%d\n", UDF_SB_LASTBLOCK(sb));
 
-	if ( UDF_SB_LVIDBH(sb) )
-	{
-		uint16_t minUDFReadRev = le16_to_cpu(UDF_SB_LVIDIU(sb)->minUDFReadRev);
-		uint16_t minUDFWriteRev = le16_to_cpu(UDF_SB_LVIDIU(sb)->minUDFWriteRev);
+	if (UDF_SB_LVIDBH(sb)) {
+		uint16_t minUDFReadRev =
+		    le16_to_cpu(UDF_SB_LVIDIU(sb)->minUDFReadRev);
+		uint16_t minUDFWriteRev =
+		    le16_to_cpu(UDF_SB_LVIDIU(sb)->minUDFWriteRev);
 		/* uint16_t maxUDFWriteRev = le16_to_cpu(UDF_SB_LVIDIU(sb)->maxUDFWriteRev); */
 
-		if (minUDFReadRev > UDF_MAX_READ_VERSION)
-		{
+		if (minUDFReadRev > UDF_MAX_READ_VERSION) {
 			printk("UDF-fs: minUDFReadRev=%x (max is %x)\n",
-				le16_to_cpu(UDF_SB_LVIDIU(sb)->minUDFReadRev),
-				UDF_MAX_READ_VERSION);
+			       le16_to_cpu(UDF_SB_LVIDIU(sb)->minUDFReadRev),
+			       UDF_MAX_READ_VERSION);
 			goto error_out;
-		}
-		else if (minUDFWriteRev > UDF_MAX_WRITE_VERSION)
-		{
+		} else if (minUDFWriteRev > UDF_MAX_WRITE_VERSION) {
 			sb->s_flags |= MS_RDONLY;
 		}
 
@@ -1613,31 +1724,30 @@
 			UDF_SET_FLAG(sb, UDF_FLAG_USE_STREAMS);
 	}
 
-	if ( !UDF_SB_NUMPARTS(sb) )
-	{
+	if (!UDF_SB_NUMPARTS(sb)) {
 		printk("UDF-fs: No partition found (2)\n");
 		goto error_out;
 	}
 
-	if (UDF_SB_PARTFLAGS(sb, UDF_SB_PARTITION(sb)) & UDF_PART_FLAG_READ_ONLY) {
-		printk("UDF-fs: Partition marked readonly; forcing readonly mount\n");
+	if (UDF_SB_PARTFLAGS(sb, UDF_SB_PARTITION(sb)) &
+	    UDF_PART_FLAG_READ_ONLY) {
+		printk
+		    ("UDF-fs: Partition marked readonly; forcing readonly mount\n");
 		sb->s_flags |= MS_RDONLY;
 	}
 
-	if ( udf_find_fileset(sb, &fileset, &rootdir) )
-	{
+	if (udf_find_fileset(sb, &fileset, &rootdir)) {
 		printk("UDF-fs: No fileset found\n");
 		goto error_out;
 	}
 
-	if (!silent)
-	{
+	if (!silent) {
 		kernel_timestamp ts;
 		udf_time_to_stamp(&ts, UDF_SB_RECORDTIME(sb));
-		udf_info("UDF %s (%s) Mounting volume '%s', timestamp %04u/%02u/%02u %02u:%02u (%x)\n",
-			UDFFS_VERSION, UDFFS_DATE,
-			UDF_SB_VOLIDENT(sb), ts.year, ts.month, ts.day, ts.hour, ts.minute,
-			ts.typeAndTimezone);
+		udf_info
+		    ("UDF %s (%s) Mounting volume '%s', timestamp %04u/%02u/%02u %02u:%02u (%x)\n",
+		     UDFFS_VERSION, UDFFS_DATE, UDF_SB_VOLIDENT(sb), ts.year,
+		     ts.month, ts.day, ts.hour, ts.minute, ts.typeAndTimezone);
 	}
 	if (!(sb->s_flags & MS_RDONLY))
 		udf_open_lvid(sb);
@@ -1645,18 +1755,16 @@
 	/* Assign the root inode */
 	/* assign inodes by physical block number */
 	/* perhaps it's not extensible enough, but for now ... */
-	inode = udf_iget(sb, rootdir); 
-	if (!inode)
-	{
+	inode = udf_iget(sb, rootdir);
+	if (!inode) {
 		printk("UDF-fs: Error in udf_iget, block=%d, partition=%d\n",
-			rootdir.logicalBlockNum, rootdir.partitionReferenceNum);
+		       rootdir.logicalBlockNum, rootdir.partitionReferenceNum);
 		goto error_out;
 	}
 
 	/* Allocate a dentry for the root inode */
 	sb->s_root = d_alloc_root(inode);
-	if (!sb->s_root)
-	{
+	if (!sb->s_root) {
 		printk("UDF-fs: Couldn't allocate root dentry\n");
 		iput(inode);
 		goto error_out;
@@ -1664,23 +1772,30 @@
 	sb->s_maxbytes = MAX_LFS_FILESIZE;
 	return 0;
 
-error_out:
+      error_out:
 	if (UDF_SB_VAT(sb))
 		iput(UDF_SB_VAT(sb));
-	if (UDF_SB_NUMPARTS(sb))
-	{
-		if (UDF_SB_PARTFLAGS(sb, UDF_SB_PARTITION(sb)) & UDF_PART_FLAG_UNALLOC_TABLE)
-			iput(UDF_SB_PARTMAPS(sb)[UDF_SB_PARTITION(sb)].s_uspace.s_table);
-		if (UDF_SB_PARTFLAGS(sb, UDF_SB_PARTITION(sb)) & UDF_PART_FLAG_FREED_TABLE)
-			iput(UDF_SB_PARTMAPS(sb)[UDF_SB_PARTITION(sb)].s_fspace.s_table);
-		if (UDF_SB_PARTFLAGS(sb, UDF_SB_PARTITION(sb)) & UDF_PART_FLAG_UNALLOC_BITMAP)
-			UDF_SB_FREE_BITMAP(sb,UDF_SB_PARTITION(sb),s_uspace);
-		if (UDF_SB_PARTFLAGS(sb, UDF_SB_PARTITION(sb)) & UDF_PART_FLAG_FREED_BITMAP)
-			UDF_SB_FREE_BITMAP(sb,UDF_SB_PARTITION(sb),s_fspace);
-		if (UDF_SB_PARTTYPE(sb, UDF_SB_PARTITION(sb)) == UDF_SPARABLE_MAP15)
-		{
-			for (i=0; i<4; i++)
-				brelse(UDF_SB_TYPESPAR(sb, UDF_SB_PARTITION(sb)).s_spar_map[i]);
+	if (UDF_SB_NUMPARTS(sb)) {
+		if (UDF_SB_PARTFLAGS(sb, UDF_SB_PARTITION(sb)) &
+		    UDF_PART_FLAG_UNALLOC_TABLE)
+			iput(UDF_SB_PARTMAPS(sb)[UDF_SB_PARTITION(sb)].s_uspace.
+			     s_table);
+		if (UDF_SB_PARTFLAGS(sb, UDF_SB_PARTITION(sb)) &
+		    UDF_PART_FLAG_FREED_TABLE)
+			iput(UDF_SB_PARTMAPS(sb)[UDF_SB_PARTITION(sb)].s_fspace.
+			     s_table);
+		if (UDF_SB_PARTFLAGS(sb, UDF_SB_PARTITION(sb)) &
+		    UDF_PART_FLAG_UNALLOC_BITMAP)
+			UDF_SB_FREE_BITMAP(sb, UDF_SB_PARTITION(sb), s_uspace);
+		if (UDF_SB_PARTFLAGS(sb, UDF_SB_PARTITION(sb)) &
+		    UDF_PART_FLAG_FREED_BITMAP)
+			UDF_SB_FREE_BITMAP(sb, UDF_SB_PARTITION(sb), s_fspace);
+		if (UDF_SB_PARTTYPE(sb, UDF_SB_PARTITION(sb)) ==
+		    UDF_SPARABLE_MAP15) {
+			for (i = 0; i < 4; i++)
+				brelse(UDF_SB_TYPESPAR
+				       (sb,
+					UDF_SB_PARTITION(sb)).s_spar_map[i]);
 		}
 	}
 #ifdef CONFIG_UDF_NLS
@@ -1697,32 +1812,31 @@
 }
 
 void udf_error(struct super_block *sb, const char *function,
-	const char *fmt, ...)
+	       const char *fmt, ...)
 {
 	va_list args;
 
-	if (!(sb->s_flags & MS_RDONLY))
-	{
+	if (!(sb->s_flags & MS_RDONLY)) {
 		/* mark sb error */
 		sb->s_dirt = 1;
 	}
 	va_start(args, fmt);
 	vsnprintf(error_buf, sizeof(error_buf), fmt, args);
 	va_end(args);
-	printk (KERN_CRIT "UDF-fs error (device %s): %s: %s\n",
-		sb->s_id, function, error_buf);
+	printk(KERN_CRIT "UDF-fs error (device %s): %s: %s\n",
+	       sb->s_id, function, error_buf);
 }
 
 void udf_warning(struct super_block *sb, const char *function,
-	const char *fmt, ...)
+		 const char *fmt, ...)
 {
 	va_list args;
 
-	va_start (args, fmt);
+	va_start(args, fmt);
 	vsnprintf(error_buf, sizeof(error_buf), fmt, args);
 	va_end(args);
 	printk(KERN_WARNING "UDF-fs warning (device %s): %s: %s\n",
-		sb->s_id, function, error_buf);
+	       sb->s_id, function, error_buf);
 }
 
 /*
@@ -1738,27 +1852,33 @@
  *	July 1, 1997 - Andrew E. Mileski
  *	Written, tested, and released.
  */
-static void
-udf_put_super(struct super_block *sb)
+static void udf_put_super(struct super_block *sb)
 {
 	int i;
 
 	if (UDF_SB_VAT(sb))
 		iput(UDF_SB_VAT(sb));
-	if (UDF_SB_NUMPARTS(sb))
-	{
-		if (UDF_SB_PARTFLAGS(sb, UDF_SB_PARTITION(sb)) & UDF_PART_FLAG_UNALLOC_TABLE)
-			iput(UDF_SB_PARTMAPS(sb)[UDF_SB_PARTITION(sb)].s_uspace.s_table);
-		if (UDF_SB_PARTFLAGS(sb, UDF_SB_PARTITION(sb)) & UDF_PART_FLAG_FREED_TABLE)
-			iput(UDF_SB_PARTMAPS(sb)[UDF_SB_PARTITION(sb)].s_fspace.s_table);
-		if (UDF_SB_PARTFLAGS(sb, UDF_SB_PARTITION(sb)) & UDF_PART_FLAG_UNALLOC_BITMAP)
-			UDF_SB_FREE_BITMAP(sb,UDF_SB_PARTITION(sb),s_uspace);
-		if (UDF_SB_PARTFLAGS(sb, UDF_SB_PARTITION(sb)) & UDF_PART_FLAG_FREED_BITMAP)
-			UDF_SB_FREE_BITMAP(sb,UDF_SB_PARTITION(sb),s_fspace);
-		if (UDF_SB_PARTTYPE(sb, UDF_SB_PARTITION(sb)) == UDF_SPARABLE_MAP15)
-		{
-			for (i=0; i<4; i++)
-				brelse(UDF_SB_TYPESPAR(sb, UDF_SB_PARTITION(sb)).s_spar_map[i]);
+	if (UDF_SB_NUMPARTS(sb)) {
+		if (UDF_SB_PARTFLAGS(sb, UDF_SB_PARTITION(sb)) &
+		    UDF_PART_FLAG_UNALLOC_TABLE)
+			iput(UDF_SB_PARTMAPS(sb)[UDF_SB_PARTITION(sb)].s_uspace.
+			     s_table);
+		if (UDF_SB_PARTFLAGS(sb, UDF_SB_PARTITION(sb)) &
+		    UDF_PART_FLAG_FREED_TABLE)
+			iput(UDF_SB_PARTMAPS(sb)[UDF_SB_PARTITION(sb)].s_fspace.
+			     s_table);
+		if (UDF_SB_PARTFLAGS(sb, UDF_SB_PARTITION(sb)) &
+		    UDF_PART_FLAG_UNALLOC_BITMAP)
+			UDF_SB_FREE_BITMAP(sb, UDF_SB_PARTITION(sb), s_uspace);
+		if (UDF_SB_PARTFLAGS(sb, UDF_SB_PARTITION(sb)) &
+		    UDF_PART_FLAG_FREED_BITMAP)
+			UDF_SB_FREE_BITMAP(sb, UDF_SB_PARTITION(sb), s_fspace);
+		if (UDF_SB_PARTTYPE(sb, UDF_SB_PARTITION(sb)) ==
+		    UDF_SPARABLE_MAP15) {
+			for (i = 0; i < 4; i++)
+				brelse(UDF_SB_TYPESPAR
+				       (sb,
+					UDF_SB_PARTITION(sb)).s_spar_map[i]);
 		}
 	}
 #ifdef CONFIG_UDF_NLS
@@ -1786,8 +1906,7 @@
  *	July 1, 1997 - Andrew E. Mileski
  *	Written, tested, and released.
  */
-static int
-udf_statfs(struct dentry *dentry, struct kstatfs *buf)
+static int udf_statfs(struct dentry *dentry, struct kstatfs *buf)
 {
 	struct super_block *sb = dentry->d_sb;
 
@@ -1797,11 +1916,12 @@
 	buf->f_bfree = udf_count_free(sb);
 	buf->f_bavail = buf->f_bfree;
 	buf->f_files = (UDF_SB_LVIDBH(sb) ?
-		(le32_to_cpu(UDF_SB_LVIDIU(sb)->numFiles) +
-		le32_to_cpu(UDF_SB_LVIDIU(sb)->numDirs)) : 0) + buf->f_bfree;
+			(le32_to_cpu(UDF_SB_LVIDIU(sb)->numFiles) +
+			 le32_to_cpu(UDF_SB_LVIDIU(sb)->numDirs)) : 0) +
+	    buf->f_bfree;
 	buf->f_ffree = buf->f_bfree;
 	/* __kernel_fsid_t f_fsid */
-	buf->f_namelen = UDF_NAME_LEN-2;
+	buf->f_namelen = UDF_NAME_LEN - 2;
 
 	return 0;
 }
@@ -1830,13 +1950,10 @@
 	loc.partitionReferenceNum = UDF_SB_PARTITION(sb);
 	bh = udf_read_ptagged(sb, loc, 0, &ident);
 
-	if (!bh)
-	{
+	if (!bh) {
 		printk(KERN_ERR "udf: udf_count_free failed\n");
 		goto out;
-	}
-	else if (ident != TAG_IDENT_SBD)
-	{
+	} else if (ident != TAG_IDENT_SBD) {
 		brelse(bh);
 		printk(KERN_ERR "udf: udf_count_free failed\n");
 		goto out;
@@ -1844,43 +1961,39 @@
 
 	bm = (struct spaceBitmapDesc *)bh->b_data;
 	bytes = le32_to_cpu(bm->numOfBytes);
-	index = sizeof(struct spaceBitmapDesc); /* offset in first block only */
-	ptr = (uint8_t *)bh->b_data;
+	index = sizeof(struct spaceBitmapDesc);	/* offset in first block only */
+	ptr = (uint8_t *) bh->b_data;
 
-	while ( bytes > 0 )
-	{
-		while ((bytes > 0) && (index < sb->s_blocksize))
-		{
+	while (bytes > 0) {
+		while ((bytes > 0) && (index < sb->s_blocksize)) {
 			value = ptr[index];
-			accum += udf_bitmap_lookup[ value & 0x0f ];
-			accum += udf_bitmap_lookup[ value >> 4 ];
+			accum += udf_bitmap_lookup[value & 0x0f];
+			accum += udf_bitmap_lookup[value >> 4];
 			index++;
 			bytes--;
 		}
-		if ( bytes )
-		{
+		if (bytes) {
 			brelse(bh);
 			newblock = udf_get_lb_pblock(sb, loc, ++block);
 			bh = udf_tread(sb, newblock);
-			if (!bh)
-			{
+			if (!bh) {
 				udf_debug("read failed\n");
 				goto out;
 			}
 			index = 0;
-			ptr = (uint8_t *)bh->b_data;
+			ptr = (uint8_t *) bh->b_data;
 		}
 	}
 	brelse(bh);
 
-out:
+      out:
 	unlock_kernel();
 
 	return accum;
 }
 
 static unsigned int
-udf_count_free_table(struct super_block *sb, struct inode * table)
+udf_count_free_table(struct super_block *sb, struct inode *table)
 {
 	unsigned int accum = 0;
 	uint32_t elen;
@@ -1902,17 +2015,17 @@
 
 	return accum;
 }
-	
-static unsigned int
-udf_count_free(struct super_block *sb)
+
+static unsigned int udf_count_free(struct super_block *sb)
 {
 	unsigned int accum = 0;
 
-	if (UDF_SB_LVIDBH(sb))
-	{
-		if (le32_to_cpu(UDF_SB_LVID(sb)->numOfPartitions) > UDF_SB_PARTITION(sb))
-		{
-			accum = le32_to_cpu(UDF_SB_LVID(sb)->freeSpaceTable[UDF_SB_PARTITION(sb)]);
+	if (UDF_SB_LVIDBH(sb)) {
+		if (le32_to_cpu(UDF_SB_LVID(sb)->numOfPartitions) >
+		    UDF_SB_PARTITION(sb)) {
+			accum =
+			    le32_to_cpu(UDF_SB_LVID(sb)->
+					freeSpaceTable[UDF_SB_PARTITION(sb)]);
 
 			if (accum == 0xFFFFFFFF)
 				accum = 0;
@@ -1922,28 +2035,40 @@
 	if (accum)
 		return accum;
 
-	if (UDF_SB_PARTFLAGS(sb,UDF_SB_PARTITION(sb)) & UDF_PART_FLAG_UNALLOC_BITMAP)
-	{
-		accum += udf_count_free_bitmap(sb,
-			UDF_SB_PARTMAPS(sb)[UDF_SB_PARTITION(sb)].s_uspace.s_bitmap);
+	if (UDF_SB_PARTFLAGS(sb, UDF_SB_PARTITION(sb)) &
+	    UDF_PART_FLAG_UNALLOC_BITMAP) {
+		accum +=
+		    udf_count_free_bitmap(sb,
+					  UDF_SB_PARTMAPS(sb)[UDF_SB_PARTITION
+							      (sb)].s_uspace.
+					  s_bitmap);
 	}
-	if (UDF_SB_PARTFLAGS(sb,UDF_SB_PARTITION(sb)) & UDF_PART_FLAG_FREED_BITMAP)
-	{
-		accum += udf_count_free_bitmap(sb,
-			UDF_SB_PARTMAPS(sb)[UDF_SB_PARTITION(sb)].s_fspace.s_bitmap);
+	if (UDF_SB_PARTFLAGS(sb, UDF_SB_PARTITION(sb)) &
+	    UDF_PART_FLAG_FREED_BITMAP) {
+		accum +=
+		    udf_count_free_bitmap(sb,
+					  UDF_SB_PARTMAPS(sb)[UDF_SB_PARTITION
+							      (sb)].s_fspace.
+					  s_bitmap);
 	}
 	if (accum)
 		return accum;
 
-	if (UDF_SB_PARTFLAGS(sb,UDF_SB_PARTITION(sb)) & UDF_PART_FLAG_UNALLOC_TABLE)
-	{
-		accum += udf_count_free_table(sb,
-			UDF_SB_PARTMAPS(sb)[UDF_SB_PARTITION(sb)].s_uspace.s_table);
+	if (UDF_SB_PARTFLAGS(sb, UDF_SB_PARTITION(sb)) &
+	    UDF_PART_FLAG_UNALLOC_TABLE) {
+		accum +=
+		    udf_count_free_table(sb,
+					 UDF_SB_PARTMAPS(sb)[UDF_SB_PARTITION
+							     (sb)].s_uspace.
+					 s_table);
 	}
-	if (UDF_SB_PARTFLAGS(sb,UDF_SB_PARTITION(sb)) & UDF_PART_FLAG_FREED_TABLE)
-	{
-		accum += udf_count_free_table(sb,
-			UDF_SB_PARTMAPS(sb)[UDF_SB_PARTITION(sb)].s_fspace.s_table);
+	if (UDF_SB_PARTFLAGS(sb, UDF_SB_PARTITION(sb)) &
+	    UDF_PART_FLAG_FREED_TABLE) {
+		accum +=
+		    udf_count_free_table(sb,
+					 UDF_SB_PARTMAPS(sb)[UDF_SB_PARTITION
+							     (sb)].s_fspace.
+					 s_table);
 	}
 
 	return accum;
diff --git a/fs/udf/symlink.c b/fs/udf/symlink.c
index 12613b6..c4b82a9 100644
--- a/fs/udf/symlink.c
+++ b/fs/udf/symlink.c
@@ -33,41 +33,40 @@
 #include <linux/buffer_head.h>
 #include "udf_i.h"
 
-static void udf_pc_to_char(struct super_block *sb, char *from, int fromlen, char *to)
+static void udf_pc_to_char(struct super_block *sb, char *from, int fromlen,
+			   char *to)
 {
 	struct pathComponent *pc;
 	int elen = 0;
 	char *p = to;
 
-	while (elen < fromlen)
-	{
+	while (elen < fromlen) {
 		pc = (struct pathComponent *)(from + elen);
-		switch (pc->componentType)
-		{
-			case 1:
-				if (pc->lengthComponentIdent == 0)
-				{
-					p = to;
-					*p++ = '/';
-				}
-				break;
-			case 3:
-				memcpy(p, "../", 3);
-				p += 3;
-				break;
-			case 4:
-				memcpy(p, "./", 2);
-				p += 2;
-				/* that would be . - just ignore */
-				break;
-			case 5:
-				p += udf_get_filename(sb, pc->componentIdent, p, pc->lengthComponentIdent);
+		switch (pc->componentType) {
+		case 1:
+			if (pc->lengthComponentIdent == 0) {
+				p = to;
 				*p++ = '/';
-				break;
+			}
+			break;
+		case 3:
+			memcpy(p, "../", 3);
+			p += 3;
+			break;
+		case 4:
+			memcpy(p, "./", 2);
+			p += 2;
+			/* that would be . - just ignore */
+			break;
+		case 5:
+			p += udf_get_filename(sb, pc->componentIdent, p,
+					      pc->lengthComponentIdent);
+			*p++ = '/';
+			break;
 		}
 		elen += sizeof(struct pathComponent) + pc->lengthComponentIdent;
 	}
-	if (p > to+1)
+	if (p > to + 1)
 		p[-1] = '\0';
 	else
 		p[0] = '\0';
@@ -84,8 +83,7 @@
 	lock_kernel();
 	if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_IN_ICB)
 		symlink = UDF_I_DATA(inode) + UDF_I_LENEATTR(inode);
-	else
-	{
+	else {
 		bh = sb_bread(inode->i_sb, udf_block_map(inode, 0));
 
 		if (!bh)
@@ -102,7 +100,7 @@
 	kunmap(page);
 	unlock_page(page);
 	return 0;
-out:
+      out:
 	unlock_kernel();
 	SetPageError(page);
 	kunmap(page);
@@ -114,5 +112,5 @@
  * symlinks can't do much...
  */
 const struct address_space_operations udf_symlink_aops = {
-	.readpage		= udf_symlink_filler,
+	.readpage = udf_symlink_filler,
 };
diff --git a/fs/udf/truncate.c b/fs/udf/truncate.c
index 60d2776..b2002da 100644
--- a/fs/udf/truncate.c
+++ b/fs/udf/truncate.c
@@ -28,35 +28,38 @@
 #include "udf_i.h"
 #include "udf_sb.h"
 
-static void extent_trunc(struct inode * inode, struct extent_position *epos,
-	kernel_lb_addr eloc, int8_t etype, uint32_t elen, uint32_t nelen)
+static void extent_trunc(struct inode *inode, struct extent_position *epos,
+			 kernel_lb_addr eloc, int8_t etype, uint32_t elen,
+			 uint32_t nelen)
 {
 	kernel_lb_addr neloc = { 0, 0 };
-	int last_block = (elen + inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits;
-	int first_block = (nelen + inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits;
+	int last_block =
+	    (elen + inode->i_sb->s_blocksize -
+	     1) >> inode->i_sb->s_blocksize_bits;
+	int first_block =
+	    (nelen + inode->i_sb->s_blocksize -
+	     1) >> inode->i_sb->s_blocksize_bits;
 
-	if (nelen)
-	{
-		if (etype == (EXT_NOT_RECORDED_ALLOCATED >> 30))
-		{
-			udf_free_blocks(inode->i_sb, inode, eloc, 0, last_block);
+	if (nelen) {
+		if (etype == (EXT_NOT_RECORDED_ALLOCATED >> 30)) {
+			udf_free_blocks(inode->i_sb, inode, eloc, 0,
+					last_block);
 			etype = (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30);
-		}
-		else
+		} else
 			neloc = eloc;
 		nelen = (etype << 30) | nelen;
 	}
 
-	if (elen != nelen)
-	{
+	if (elen != nelen) {
 		udf_write_aext(inode, epos, neloc, nelen, 0);
-		if (last_block - first_block > 0)
-		{
+		if (last_block - first_block > 0) {
 			if (etype == (EXT_RECORDED_ALLOCATED >> 30))
 				mark_inode_dirty(inode);
 
 			if (etype != (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30))
-				udf_free_blocks(inode->i_sb, inode, eloc, first_block, last_block - first_block);
+				udf_free_blocks(inode->i_sb, inode, eloc,
+						first_block,
+						last_block - first_block);
 		}
 	}
 }
@@ -67,7 +70,7 @@
  */
 void udf_truncate_tail_extent(struct inode *inode)
 {
-	struct extent_position epos = { NULL, 0, {0, 0}};
+	struct extent_position epos = { NULL, 0, {0, 0} };
 	kernel_lb_addr eloc;
 	uint32_t elen, nelen;
 	uint64_t lbcount = 0;
@@ -89,8 +92,7 @@
 		BUG();
 
 	/* Find the last extent in the file */
-	while ((netype = udf_next_aext(inode, &epos, &eloc, &elen, 1)) != -1)
-	{
+	while ((netype = udf_next_aext(inode, &epos, &eloc, &elen, 1)) != -1) {
 		etype = netype;
 		lbcount += elen;
 		if (lbcount > inode->i_size) {
@@ -123,7 +125,7 @@
 
 void udf_discard_prealloc(struct inode *inode)
 {
-	struct extent_position epos = { NULL, 0, {0, 0}};
+	struct extent_position epos = { NULL, 0, {0, 0} };
 	kernel_lb_addr eloc;
 	uint32_t elen;
 	uint64_t lbcount = 0;
@@ -131,7 +133,7 @@
 	int adsize;
 
 	if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_IN_ICB ||
-		inode->i_size == UDF_I_LENEXTENTS(inode))
+	    inode->i_size == UDF_I_LENEXTENTS(inode))
 		return;
 
 	if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_SHORT)
@@ -153,15 +155,21 @@
 		lbcount -= elen;
 		extent_trunc(inode, &epos, eloc, etype, elen, 0);
 		if (!epos.bh) {
-			UDF_I_LENALLOC(inode) = epos.offset - udf_file_entry_alloc_offset(inode);
+			UDF_I_LENALLOC(inode) =
+			    epos.offset - udf_file_entry_alloc_offset(inode);
 			mark_inode_dirty(inode);
 		} else {
-			struct allocExtDesc *aed = (struct allocExtDesc *)(epos.bh->b_data);
-			aed->lengthAllocDescs = cpu_to_le32(epos.offset - sizeof(struct allocExtDesc));
-			if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) || UDF_SB_UDFREV(inode->i_sb) >= 0x0201)
+			struct allocExtDesc *aed =
+			    (struct allocExtDesc *)(epos.bh->b_data);
+			aed->lengthAllocDescs =
+			    cpu_to_le32(epos.offset -
+					sizeof(struct allocExtDesc));
+			if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT)
+			    || UDF_SB_UDFREV(inode->i_sb) >= 0x0201)
 				udf_update_tag(epos.bh->b_data, epos.offset);
 			else
-				udf_update_tag(epos.bh->b_data, sizeof(struct allocExtDesc));
+				udf_update_tag(epos.bh->b_data,
+					       sizeof(struct allocExtDesc));
 			mark_buffer_dirty_inode(epos.bh, inode);
 		}
 	}
@@ -171,7 +179,7 @@
 	brelse(epos.bh);
 }
 
-void udf_truncate_extents(struct inode * inode)
+void udf_truncate_extents(struct inode *inode)
 {
 	struct extent_position epos;
 	kernel_lb_addr eloc, neloc = { 0, 0 };
@@ -190,9 +198,10 @@
 		BUG();
 
 	etype = inode_bmap(inode, first_block, &epos, &eloc, &elen, &offset);
-	byte_offset = (offset << sb->s_blocksize_bits) + (inode->i_size & (sb->s_blocksize-1));
-	if (etype != -1)
-	{
+	byte_offset =
+	    (offset << sb->s_blocksize_bits) +
+	    (inode->i_size & (sb->s_blocksize - 1));
+	if (etype != -1) {
 		epos.offset -= adsize;
 		extent_trunc(inode, &epos, eloc, etype, elen, byte_offset);
 		epos.offset += adsize;
@@ -206,86 +215,98 @@
 		else
 			lenalloc -= sizeof(struct allocExtDesc);
 
-		while ((etype = udf_current_aext(inode, &epos, &eloc, &elen, 0)) != -1)
-		{
-			if (etype == (EXT_NEXT_EXTENT_ALLOCDECS >> 30))
-			{
+		while ((etype =
+			udf_current_aext(inode, &epos, &eloc, &elen,
+					 0)) != -1) {
+			if (etype == (EXT_NEXT_EXTENT_ALLOCDECS >> 30)) {
 				udf_write_aext(inode, &epos, neloc, nelen, 0);
-				if (indirect_ext_len)
-				{
+				if (indirect_ext_len) {
 					/* We managed to free all extents in the
 					 * indirect extent - free it too */
 					if (!epos.bh)
 						BUG();
-					udf_free_blocks(sb, inode, epos.block, 0, indirect_ext_len);
-				}
-				else
-				{
-					if (!epos.bh)
-					{
-						UDF_I_LENALLOC(inode) = lenalloc;
+					udf_free_blocks(sb, inode, epos.block,
+							0, indirect_ext_len);
+				} else {
+					if (!epos.bh) {
+						UDF_I_LENALLOC(inode) =
+						    lenalloc;
 						mark_inode_dirty(inode);
-					}
-					else
-					{
-						struct allocExtDesc *aed = (struct allocExtDesc *)(epos.bh->b_data);
-						aed->lengthAllocDescs = cpu_to_le32(lenalloc);
-						if (!UDF_QUERY_FLAG(sb, UDF_FLAG_STRICT) || UDF_SB_UDFREV(sb) >= 0x0201)
-							udf_update_tag(epos.bh->b_data, lenalloc +
-								sizeof(struct allocExtDesc));
+					} else {
+						struct allocExtDesc *aed =
+						    (struct allocExtDesc
+						     *)(epos.bh->b_data);
+						aed->lengthAllocDescs =
+						    cpu_to_le32(lenalloc);
+						if (!UDF_QUERY_FLAG
+						    (sb, UDF_FLAG_STRICT)
+						    || UDF_SB_UDFREV(sb) >=
+						    0x0201)
+							udf_update_tag(epos.bh->
+								       b_data,
+								       lenalloc
+								       +
+								       sizeof
+								       (struct
+									allocExtDesc));
 						else
-							udf_update_tag(epos.bh->b_data, sizeof(struct allocExtDesc));
-						mark_buffer_dirty_inode(epos.bh, inode);
+							udf_update_tag(epos.bh->
+								       b_data,
+								       sizeof
+								       (struct
+									allocExtDesc));
+						mark_buffer_dirty_inode(epos.bh,
+									inode);
 					}
 				}
 				brelse(epos.bh);
 				epos.offset = sizeof(struct allocExtDesc);
 				epos.block = eloc;
-				epos.bh = udf_tread(sb, udf_get_lb_pblock(sb, eloc, 0));
+				epos.bh =
+				    udf_tread(sb,
+					      udf_get_lb_pblock(sb, eloc, 0));
 				if (elen)
 					indirect_ext_len = (elen +
-						sb->s_blocksize - 1) >>
-						sb->s_blocksize_bits;
+							    sb->s_blocksize -
+							    1) >> sb->
+					    s_blocksize_bits;
 				else
 					indirect_ext_len = 1;
-			}
-			else
-			{
-				extent_trunc(inode, &epos, eloc, etype, elen, 0);
+			} else {
+				extent_trunc(inode, &epos, eloc, etype, elen,
+					     0);
 				epos.offset += adsize;
 			}
 		}
 
-		if (indirect_ext_len)
-		{
+		if (indirect_ext_len) {
 			if (!epos.bh)
 				BUG();
-			udf_free_blocks(sb, inode, epos.block, 0, indirect_ext_len);
-		}
-		else
-		{
-			if (!epos.bh)
-			{
+			udf_free_blocks(sb, inode, epos.block, 0,
+					indirect_ext_len);
+		} else {
+			if (!epos.bh) {
 				UDF_I_LENALLOC(inode) = lenalloc;
 				mark_inode_dirty(inode);
-			}
-			else
-			{
-				struct allocExtDesc *aed = (struct allocExtDesc *)(epos.bh->b_data);
+			} else {
+				struct allocExtDesc *aed =
+				    (struct allocExtDesc *)(epos.bh->b_data);
 				aed->lengthAllocDescs = cpu_to_le32(lenalloc);
-				if (!UDF_QUERY_FLAG(sb, UDF_FLAG_STRICT) || UDF_SB_UDFREV(sb) >= 0x0201)
-					udf_update_tag(epos.bh->b_data, lenalloc +
-						sizeof(struct allocExtDesc));
+				if (!UDF_QUERY_FLAG(sb, UDF_FLAG_STRICT)
+				    || UDF_SB_UDFREV(sb) >= 0x0201)
+					udf_update_tag(epos.bh->b_data,
+						       lenalloc +
+						       sizeof(struct
+							      allocExtDesc));
 				else
-					udf_update_tag(epos.bh->b_data, sizeof(struct allocExtDesc));
+					udf_update_tag(epos.bh->b_data,
+						       sizeof(struct
+							      allocExtDesc));
 				mark_buffer_dirty_inode(epos.bh, inode);
 			}
 		}
-	}
-	else if (inode->i_size)
-	{
-		if (byte_offset)
-		{
+	} else if (inode->i_size) {
+		if (byte_offset) {
 			kernel_long_ad extent;
 
 			/*
@@ -293,21 +314,33 @@
 			 *  no extent above inode->i_size => truncate is
 			 *  extending the file by 'offset' blocks.
 			 */
-			if ((!epos.bh && epos.offset == udf_file_entry_alloc_offset(inode)) ||
-			    (epos.bh && epos.offset == sizeof(struct allocExtDesc))) {
+			if ((!epos.bh
+			     && epos.offset ==
+			     udf_file_entry_alloc_offset(inode)) || (epos.bh
+								     && epos.
+								     offset ==
+								     sizeof
+								     (struct
+								      allocExtDesc)))
+			{
 				/* File has no extents at all or has empty last
 				 * indirect extent! Create a fake extent... */
 				extent.extLocation.logicalBlockNum = 0;
 				extent.extLocation.partitionReferenceNum = 0;
-				extent.extLength = EXT_NOT_RECORDED_NOT_ALLOCATED;
-			}
-			else {
+				extent.extLength =
+				    EXT_NOT_RECORDED_NOT_ALLOCATED;
+			} else {
 				epos.offset -= adsize;
 				etype = udf_next_aext(inode, &epos,
-					&extent.extLocation, &extent.extLength, 0);
+						      &extent.extLocation,
+						      &extent.extLength, 0);
 				extent.extLength |= etype << 30;
 			}
-			udf_extend_file(inode, &epos, &extent, offset+((inode->i_size & (sb->s_blocksize-1)) != 0));
+			udf_extend_file(inode, &epos, &extent,
+					offset +
+					((inode->
+					  i_size & (sb->s_blocksize - 1)) !=
+					 0));
 		}
 	}
 	UDF_I_LENEXTENTS(inode) = inode->i_size;
diff --git a/fs/udf/udf_i.h b/fs/udf/udf_i.h
index d7dbe6f..bee4308 100644
--- a/fs/udf/udf_i.h
+++ b/fs/udf/udf_i.h
@@ -23,4 +23,4 @@
 #define UDF_I_LAD(X)		( UDF_I(X)->i_ext.i_lad )
 #define UDF_I_DATA(X)		( UDF_I(X)->i_ext.i_data )
 
-#endif /* !defined(_LINUX_UDF_I_H) */
+#endif				/* !defined(_LINUX_UDF_I_H) */
diff --git a/fs/udf/udf_sb.h b/fs/udf/udf_sb.h
index 3b2e6c8..60f31d8 100644
--- a/fs/udf/udf_sb.h
+++ b/fs/udf/udf_sb.h
@@ -20,8 +20,8 @@
 #define UDF_FLAG_VARCONV		8
 #define UDF_FLAG_NLS_MAP		9
 #define UDF_FLAG_UTF8			10
-#define UDF_FLAG_UID_FORGET     11    /* save -1 for uid to disk */
-#define UDF_FLAG_UID_IGNORE     12    /* use sb uid instead of on disk uid */
+#define UDF_FLAG_UID_FORGET     11	/* save -1 for uid to disk */
+#define UDF_FLAG_UID_IGNORE     12	/* use sb uid instead of on disk uid */
 #define UDF_FLAG_GID_FORGET     13
 #define UDF_FLAG_GID_IGNORE     14
 
@@ -139,4 +139,4 @@
 #define UDF_SB_FLAGS(X)				( UDF_SB(X)->s_flags )
 #define UDF_SB_VAT(X)				( UDF_SB(X)->s_vat )
 
-#endif /* __LINUX_UDF_SB_H */
+#endif				/* __LINUX_UDF_SB_H */
diff --git a/fs/udf/udfdecl.h b/fs/udf/udfdecl.h
index f581f2f..76f2b82 100644
--- a/fs/udf/udfdecl.h
+++ b/fs/udf/udfdecl.h
@@ -50,30 +50,26 @@
 extern const struct address_space_operations udf_adinicb_aops;
 extern const struct address_space_operations udf_symlink_aops;
 
-struct udf_fileident_bh
-{
+struct udf_fileident_bh {
 	struct buffer_head *sbh;
 	struct buffer_head *ebh;
 	int soffset;
 	int eoffset;
 };
 
-struct udf_vds_record
-{
+struct udf_vds_record {
 	uint32_t block;
 	uint32_t volDescSeqNum;
 };
 
-struct generic_desc
-{
-	tag		descTag;
-	__le32		volDescSeqNum;
+struct generic_desc {
+	tag descTag;
+	__le32 volDescSeqNum;
 };
 
-struct ustr
-{
+struct ustr {
 	uint8_t u_cmpID;
-	uint8_t u_name[UDF_NAME_LEN-2];
+	uint8_t u_name[UDF_NAME_LEN - 2];
 	uint8_t u_len;
 };
 
@@ -83,44 +79,58 @@
 	kernel_lb_addr block;
 };
 
-
 /* super.c */
 extern void udf_error(struct super_block *, const char *, const char *, ...);
 extern void udf_warning(struct super_block *, const char *, const char *, ...);
 
 /* namei.c */
-extern int udf_write_fi(struct inode *inode, struct fileIdentDesc *, struct fileIdentDesc *, struct udf_fileident_bh *, uint8_t *, uint8_t *);
+extern int udf_write_fi(struct inode *inode, struct fileIdentDesc *,
+			struct fileIdentDesc *, struct udf_fileident_bh *,
+			uint8_t *, uint8_t *);
 
 /* file.c */
-extern int udf_ioctl(struct inode *, struct file *, unsigned int, unsigned long);
+extern int udf_ioctl(struct inode *, struct file *, unsigned int,
+		     unsigned long);
 
 /* inode.c */
 extern struct inode *udf_iget(struct super_block *, kernel_lb_addr);
 extern int udf_sync_inode(struct inode *);
 extern void udf_expand_file_adinicb(struct inode *, int, int *);
-extern struct buffer_head * udf_expand_dir_adinicb(struct inode *, int *, int *);
-extern struct buffer_head * udf_bread(struct inode *, int, int, int *);
+extern struct buffer_head *udf_expand_dir_adinicb(struct inode *, int *, int *);
+extern struct buffer_head *udf_bread(struct inode *, int, int, int *);
 extern void udf_truncate(struct inode *);
 extern void udf_read_inode(struct inode *);
 extern void udf_delete_inode(struct inode *);
 extern void udf_clear_inode(struct inode *);
 extern int udf_write_inode(struct inode *, int);
 extern long udf_block_map(struct inode *, sector_t);
-extern int udf_extend_file(struct inode *, struct extent_position *, kernel_long_ad *, sector_t);
-extern int8_t inode_bmap(struct inode *, sector_t, struct extent_position *, kernel_lb_addr *, uint32_t *, sector_t *);
-extern int8_t udf_add_aext(struct inode *, struct extent_position *, kernel_lb_addr, uint32_t, int);
-extern int8_t udf_write_aext(struct inode *, struct extent_position *, kernel_lb_addr, uint32_t, int);
-extern int8_t udf_delete_aext(struct inode *, struct extent_position, kernel_lb_addr, uint32_t);
-extern int8_t udf_next_aext(struct inode *, struct extent_position *, kernel_lb_addr *, uint32_t *, int);
-extern int8_t udf_current_aext(struct inode *, struct extent_position *, kernel_lb_addr *, uint32_t *, int);
+extern int udf_extend_file(struct inode *, struct extent_position *,
+			   kernel_long_ad *, sector_t);
+extern int8_t inode_bmap(struct inode *, sector_t, struct extent_position *,
+			 kernel_lb_addr *, uint32_t *, sector_t *);
+extern int8_t udf_add_aext(struct inode *, struct extent_position *,
+			   kernel_lb_addr, uint32_t, int);
+extern int8_t udf_write_aext(struct inode *, struct extent_position *,
+			     kernel_lb_addr, uint32_t, int);
+extern int8_t udf_delete_aext(struct inode *, struct extent_position,
+			      kernel_lb_addr, uint32_t);
+extern int8_t udf_next_aext(struct inode *, struct extent_position *,
+			    kernel_lb_addr *, uint32_t *, int);
+extern int8_t udf_current_aext(struct inode *, struct extent_position *,
+			       kernel_lb_addr *, uint32_t *, int);
 
 /* misc.c */
 extern struct buffer_head *udf_tgetblk(struct super_block *, int);
 extern struct buffer_head *udf_tread(struct super_block *, int);
-extern struct genericFormat *udf_add_extendedattr(struct inode *, uint32_t, uint32_t, uint8_t);
-extern struct genericFormat *udf_get_extendedattr(struct inode *, uint32_t, uint8_t);
-extern struct buffer_head *udf_read_tagged(struct super_block *, uint32_t, uint32_t, uint16_t *);
-extern struct buffer_head *udf_read_ptagged(struct super_block *, kernel_lb_addr, uint32_t, uint16_t *);
+extern struct genericFormat *udf_add_extendedattr(struct inode *, uint32_t,
+						  uint32_t, uint8_t);
+extern struct genericFormat *udf_get_extendedattr(struct inode *, uint32_t,
+						  uint8_t);
+extern struct buffer_head *udf_read_tagged(struct super_block *, uint32_t,
+					   uint32_t, uint16_t *);
+extern struct buffer_head *udf_read_ptagged(struct super_block *,
+					    kernel_lb_addr, uint32_t,
+					    uint16_t *);
 extern void udf_update_tag(char *, int);
 extern void udf_new_tag(char *, uint16_t, uint16_t, uint16_t, uint32_t, int);
 
@@ -129,21 +139,26 @@
 extern unsigned long udf_get_last_block(struct super_block *);
 
 /* partition.c */
-extern uint32_t udf_get_pblock(struct super_block *, uint32_t, uint16_t, uint32_t);
-extern uint32_t udf_get_pblock_virt15(struct super_block *, uint32_t, uint16_t, uint32_t);
-extern uint32_t udf_get_pblock_virt20(struct super_block *, uint32_t, uint16_t, uint32_t);
-extern uint32_t udf_get_pblock_spar15(struct super_block *, uint32_t, uint16_t, uint32_t);
+extern uint32_t udf_get_pblock(struct super_block *, uint32_t, uint16_t,
+			       uint32_t);
+extern uint32_t udf_get_pblock_virt15(struct super_block *, uint32_t, uint16_t,
+				      uint32_t);
+extern uint32_t udf_get_pblock_virt20(struct super_block *, uint32_t, uint16_t,
+				      uint32_t);
+extern uint32_t udf_get_pblock_spar15(struct super_block *, uint32_t, uint16_t,
+				      uint32_t);
 extern int udf_relocate_blocks(struct super_block *, long, long *);
 
 /* unicode.c */
 extern int udf_get_filename(struct super_block *, uint8_t *, uint8_t *, int);
-extern int udf_put_filename(struct super_block *, const uint8_t *, uint8_t *, int);
+extern int udf_put_filename(struct super_block *, const uint8_t *, uint8_t *,
+			    int);
 extern int udf_build_ustr(struct ustr *, dstring *, int);
 extern int udf_CS0toUTF8(struct ustr *, struct ustr *);
 
 /* ialloc.c */
 extern void udf_free_inode(struct inode *);
-extern struct inode * udf_new_inode (struct inode *, int, int *);
+extern struct inode *udf_new_inode(struct inode *, int, int *);
 
 /* truncate.c */
 extern void udf_truncate_tail_extent(struct inode *);
@@ -151,18 +166,27 @@
 extern void udf_truncate_extents(struct inode *);
 
 /* balloc.c */
-extern void udf_free_blocks(struct super_block *, struct inode *, kernel_lb_addr, uint32_t, uint32_t);
-extern int udf_prealloc_blocks(struct super_block *, struct inode *, uint16_t, uint32_t, uint32_t);
-extern int udf_new_block(struct super_block *, struct inode *, uint16_t, uint32_t, int *);
+extern void udf_free_blocks(struct super_block *, struct inode *,
+			    kernel_lb_addr, uint32_t, uint32_t);
+extern int udf_prealloc_blocks(struct super_block *, struct inode *, uint16_t,
+			       uint32_t, uint32_t);
+extern int udf_new_block(struct super_block *, struct inode *, uint16_t,
+			 uint32_t, int *);
 
 /* fsync.c */
 extern int udf_fsync_file(struct file *, struct dentry *, int);
 
 /* directory.c */
-extern struct fileIdentDesc * udf_fileident_read(struct inode *, loff_t *, struct udf_fileident_bh *, struct fileIdentDesc *, struct extent_position *, kernel_lb_addr *, uint32_t *, sector_t *);
-extern struct fileIdentDesc * udf_get_fileident(void * buffer, int bufsize, int * offset);
-extern long_ad * udf_get_filelongad(uint8_t *, int, int *, int);
-extern short_ad * udf_get_fileshortad(uint8_t *, int, int *, int);
+extern struct fileIdentDesc *udf_fileident_read(struct inode *, loff_t *,
+						struct udf_fileident_bh *,
+						struct fileIdentDesc *,
+						struct extent_position *,
+						kernel_lb_addr *, uint32_t *,
+						sector_t *);
+extern struct fileIdentDesc *udf_get_fileident(void *buffer, int bufsize,
+					       int *offset);
+extern long_ad *udf_get_filelongad(uint8_t *, int, int *, int);
+extern short_ad *udf_get_fileshortad(uint8_t *, int, int *, int);
 
 /* crc.c */
 extern uint16_t udf_crc(uint8_t *, uint32_t, uint16_t);
@@ -171,4 +195,4 @@
 extern time_t *udf_stamp_to_time(time_t *, long *, kernel_timestamp);
 extern kernel_timestamp *udf_time_to_stamp(kernel_timestamp *, struct timespec);
 
-#endif /* __UDF_DECL_H */
+#endif				/* __UDF_DECL_H */
diff --git a/fs/udf/udfend.h b/fs/udf/udfend.h
index 17d3788..450daab 100644
--- a/fs/udf/udfend.h
+++ b/fs/udf/udfend.h
@@ -78,4 +78,4 @@
 	return out;
 }
 
-#endif /* __UDF_ENDIAN_H */
+#endif				/* __UDF_ENDIAN_H */
diff --git a/fs/udf/udftime.c b/fs/udf/udftime.c
index 85d8dbe..b9f3198 100644
--- a/fs/udf/udftime.c
+++ b/fs/udf/udftime.c
@@ -46,37 +46,36 @@
 #endif
 
 /* How many days come before each month (0-12).  */
-static const unsigned short int __mon_yday[2][13] =
-{
+static const unsigned short int __mon_yday[2][13] = {
 	/* Normal years.  */
-	{ 0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334, 365 },
+	{0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334, 365},
 	/* Leap years.  */
-	{ 0, 31, 60, 91, 121, 152, 182, 213, 244, 274, 305, 335, 366 }
+	{0, 31, 60, 91, 121, 152, 182, 213, 244, 274, 305, 335, 366}
 };
 
 #define MAX_YEAR_SECONDS	69
-#define SPD 0x15180 /*3600*24*/
+#define SPD 0x15180		/*3600*24 */
 #define SPY(y,l,s) (SPD * (365*y+l)+s)
 
-static time_t year_seconds[MAX_YEAR_SECONDS]= {
-/*1970*/ SPY( 0, 0,0), SPY( 1, 0,0), SPY( 2, 0,0), SPY( 3, 1,0), 
-/*1974*/ SPY( 4, 1,0), SPY( 5, 1,0), SPY( 6, 1,0), SPY( 7, 2,0), 
-/*1978*/ SPY( 8, 2,0), SPY( 9, 2,0), SPY(10, 2,0), SPY(11, 3,0), 
-/*1982*/ SPY(12, 3,0), SPY(13, 3,0), SPY(14, 3,0), SPY(15, 4,0), 
-/*1986*/ SPY(16, 4,0), SPY(17, 4,0), SPY(18, 4,0), SPY(19, 5,0), 
-/*1990*/ SPY(20, 5,0), SPY(21, 5,0), SPY(22, 5,0), SPY(23, 6,0), 
-/*1994*/ SPY(24, 6,0), SPY(25, 6,0), SPY(26, 6,0), SPY(27, 7,0), 
-/*1998*/ SPY(28, 7,0), SPY(29, 7,0), SPY(30, 7,0), SPY(31, 8,0), 
-/*2002*/ SPY(32, 8,0), SPY(33, 8,0), SPY(34, 8,0), SPY(35, 9,0), 
-/*2006*/ SPY(36, 9,0), SPY(37, 9,0), SPY(38, 9,0), SPY(39,10,0), 
-/*2010*/ SPY(40,10,0), SPY(41,10,0), SPY(42,10,0), SPY(43,11,0), 
-/*2014*/ SPY(44,11,0), SPY(45,11,0), SPY(46,11,0), SPY(47,12,0), 
-/*2018*/ SPY(48,12,0), SPY(49,12,0), SPY(50,12,0), SPY(51,13,0), 
-/*2022*/ SPY(52,13,0), SPY(53,13,0), SPY(54,13,0), SPY(55,14,0), 
-/*2026*/ SPY(56,14,0), SPY(57,14,0), SPY(58,14,0), SPY(59,15,0), 
-/*2030*/ SPY(60,15,0), SPY(61,15,0), SPY(62,15,0), SPY(63,16,0), 
-/*2034*/ SPY(64,16,0), SPY(65,16,0), SPY(66,16,0), SPY(67,17,0), 
-/*2038*/ SPY(68,17,0)
+static time_t year_seconds[MAX_YEAR_SECONDS] = {
+/*1970*/ SPY(0, 0, 0), SPY(1, 0, 0), SPY(2, 0, 0), SPY(3, 1, 0),
+/*1974*/ SPY(4, 1, 0), SPY(5, 1, 0), SPY(6, 1, 0), SPY(7, 2, 0),
+/*1978*/ SPY(8, 2, 0), SPY(9, 2, 0), SPY(10, 2, 0), SPY(11, 3, 0),
+/*1982*/ SPY(12, 3, 0), SPY(13, 3, 0), SPY(14, 3, 0), SPY(15, 4, 0),
+/*1986*/ SPY(16, 4, 0), SPY(17, 4, 0), SPY(18, 4, 0), SPY(19, 5, 0),
+/*1990*/ SPY(20, 5, 0), SPY(21, 5, 0), SPY(22, 5, 0), SPY(23, 6, 0),
+/*1994*/ SPY(24, 6, 0), SPY(25, 6, 0), SPY(26, 6, 0), SPY(27, 7, 0),
+/*1998*/ SPY(28, 7, 0), SPY(29, 7, 0), SPY(30, 7, 0), SPY(31, 8, 0),
+/*2002*/ SPY(32, 8, 0), SPY(33, 8, 0), SPY(34, 8, 0), SPY(35, 9, 0),
+/*2006*/ SPY(36, 9, 0), SPY(37, 9, 0), SPY(38, 9, 0), SPY(39, 10, 0),
+/*2010*/ SPY(40, 10, 0), SPY(41, 10, 0), SPY(42, 10, 0), SPY(43, 11, 0),
+/*2014*/ SPY(44, 11, 0), SPY(45, 11, 0), SPY(46, 11, 0), SPY(47, 12, 0),
+/*2018*/ SPY(48, 12, 0), SPY(49, 12, 0), SPY(50, 12, 0), SPY(51, 13, 0),
+/*2022*/ SPY(52, 13, 0), SPY(53, 13, 0), SPY(54, 13, 0), SPY(55, 14, 0),
+/*2026*/ SPY(56, 14, 0), SPY(57, 14, 0), SPY(58, 14, 0), SPY(59, 15, 0),
+/*2030*/ SPY(60, 15, 0), SPY(61, 15, 0), SPY(62, 15, 0), SPY(63, 16, 0),
+/*2034*/ SPY(64, 16, 0), SPY(65, 16, 0), SPY(66, 16, 0), SPY(67, 17, 0),
+/*2038*/ SPY(68, 17, 0)
 };
 
 extern struct timezone sys_tz;
@@ -84,27 +83,23 @@
 #define SECS_PER_HOUR	(60 * 60)
 #define SECS_PER_DAY	(SECS_PER_HOUR * 24)
 
-time_t *
-udf_stamp_to_time(time_t *dest, long *dest_usec, kernel_timestamp src)
+time_t *udf_stamp_to_time(time_t * dest, long *dest_usec, kernel_timestamp src)
 {
 	int yday;
 	uint8_t type = src.typeAndTimezone >> 12;
 	int16_t offset;
 
-	if (type == 1)
-	{
+	if (type == 1) {
 		offset = src.typeAndTimezone << 4;
 		/* sign extent offset */
 		offset = (offset >> 4);
-		if (offset == -2047) /* unspecified offset */
+		if (offset == -2047)	/* unspecified offset */
 			offset = 0;
-	}
-	else
+	} else
 		offset = 0;
 
 	if ((src.year < EPOCH_YEAR) ||
-		(src.year >= EPOCH_YEAR+MAX_YEAR_SECONDS))
-	{
+	    (src.year >= EPOCH_YEAR + MAX_YEAR_SECONDS)) {
 		*dest = -1;
 		*dest_usec = -1;
 		return NULL;
@@ -112,16 +107,16 @@
 	*dest = year_seconds[src.year - EPOCH_YEAR];
 	*dest -= offset * 60;
 
-	yday = ((__mon_yday[__isleap (src.year)]
-		[src.month-1]) + (src.day-1));
-	*dest += ( ( (yday* 24) + src.hour ) * 60 + src.minute ) * 60 + src.second;
-	*dest_usec = src.centiseconds * 10000 + src.hundredsOfMicroseconds * 100 + src.microseconds;
+	yday = ((__mon_yday[__isleap(src.year)]
+		 [src.month - 1]) + (src.day - 1));
+	*dest += (((yday * 24) + src.hour) * 60 + src.minute) * 60 + src.second;
+	*dest_usec =
+	    src.centiseconds * 10000 + src.hundredsOfMicroseconds * 100 +
+	    src.microseconds;
 	return dest;
 }
 
-
-kernel_timestamp *
-udf_time_to_stamp(kernel_timestamp *dest, struct timespec ts)
+kernel_timestamp *udf_time_to_stamp(kernel_timestamp * dest, struct timespec ts)
 {
 	long int days, rem, y;
 	const unsigned short int *ip;
@@ -146,28 +141,28 @@
 #define DIV(a,b) ((a) / (b) - ((a) % (b) < 0))
 #define LEAPS_THRU_END_OF(y) (DIV (y, 4) - DIV (y, 100) + DIV (y, 400))
 
-	while (days < 0 || days >= (__isleap(y) ? 366 : 365))
-	{
+	while (days < 0 || days >= (__isleap(y) ? 366 : 365)) {
 		long int yg = y + days / 365 - (days % 365 < 0);
 
 		/* Adjust DAYS and Y to match the guessed year.  */
-		days -= ((yg - y) * 365
-			+ LEAPS_THRU_END_OF (yg - 1)
-			- LEAPS_THRU_END_OF (y - 1));
+		days -= ((yg - y) * 365 + LEAPS_THRU_END_OF(yg - 1)
+			 - LEAPS_THRU_END_OF(y - 1));
 		y = yg;
 	}
 	dest->year = y;
 	ip = __mon_yday[__isleap(y)];
-	for (y = 11; days < (long int) ip[y]; --y)
+	for (y = 11; days < (long int)ip[y]; --y)
 		continue;
 	days -= ip[y];
 	dest->month = y + 1;
 	dest->day = days + 1;
 
 	dest->centiseconds = ts.tv_nsec / 10000000;
-	dest->hundredsOfMicroseconds = (ts.tv_nsec / 1000 - dest->centiseconds * 10000) / 100;
-	dest->microseconds = (ts.tv_nsec / 1000 - dest->centiseconds * 10000 -
-		dest->hundredsOfMicroseconds * 100);
+	dest->hundredsOfMicroseconds =
+	    (ts.tv_nsec / 1000 - dest->centiseconds * 10000) / 100;
+	dest->microseconds =
+	    (ts.tv_nsec / 1000 - dest->centiseconds * 10000 -
+	     dest->hundredsOfMicroseconds * 100);
 	return dest;
 }
 
diff --git a/fs/udf/unicode.c b/fs/udf/unicode.c
index 706c92e..4683524 100644
--- a/fs/udf/unicode.c
+++ b/fs/udf/unicode.c
@@ -29,9 +29,9 @@
 
 static int udf_translate_to_linux(uint8_t *, uint8_t *, int, uint8_t *, int);
 
-static int udf_char_to_ustr(struct ustr *dest, const uint8_t *src, int strlen)
+static int udf_char_to_ustr(struct ustr *dest, const uint8_t * src, int strlen)
 {
-	if ( (!dest) || (!src) || (!strlen) || (strlen > UDF_NAME_LEN-2) )
+	if ((!dest) || (!src) || (!strlen) || (strlen > UDF_NAME_LEN - 2))
 		return 0;
 	memset(dest, 0, sizeof(struct ustr));
 	memcpy(dest->u_name, src, strlen);
@@ -43,33 +43,33 @@
 /*
  * udf_build_ustr
  */
-int udf_build_ustr(struct ustr *dest, dstring *ptr, int size)
+int udf_build_ustr(struct ustr *dest, dstring * ptr, int size)
 {
 	int usesize;
 
-	if ( (!dest) || (!ptr) || (!size) )
+	if ((!dest) || (!ptr) || (!size))
 		return -1;
 
 	memset(dest, 0, sizeof(struct ustr));
-	usesize= (size > UDF_NAME_LEN) ? UDF_NAME_LEN : size;
-	dest->u_cmpID=ptr[0];
-	dest->u_len=ptr[size-1];
-	memcpy(dest->u_name, ptr+1, usesize-1);
+	usesize = (size > UDF_NAME_LEN) ? UDF_NAME_LEN : size;
+	dest->u_cmpID = ptr[0];
+	dest->u_len = ptr[size - 1];
+	memcpy(dest->u_name, ptr + 1, usesize - 1);
 	return 0;
 }
 
 /*
  * udf_build_ustr_exact
  */
-static int udf_build_ustr_exact(struct ustr *dest, dstring *ptr, int exactsize)
+static int udf_build_ustr_exact(struct ustr *dest, dstring * ptr, int exactsize)
 {
-	if ( (!dest) || (!ptr) || (!exactsize) )
+	if ((!dest) || (!ptr) || (!exactsize))
 		return -1;
 
 	memset(dest, 0, sizeof(struct ustr));
-	dest->u_cmpID=ptr[0];
-	dest->u_len=exactsize-1;
-	memcpy(dest->u_name, ptr+1, exactsize-1);
+	dest->u_cmpID = ptr[0];
+	dest->u_len = exactsize - 1;
+	memcpy(dest->u_name, ptr + 1, exactsize - 1);
 	return 0;
 }
 
@@ -108,22 +108,20 @@
 	cmp_id = ocu_i->u_cmpID;
 	utf_o->u_len = 0;
 
-	if (ocu_len == 0)
-	{
+	if (ocu_len == 0) {
 		memset(utf_o, 0, sizeof(struct ustr));
 		utf_o->u_cmpID = 0;
 		utf_o->u_len = 0;
 		return 0;
 	}
 
-	if ((cmp_id != 8) && (cmp_id != 16))
-	{
-		printk(KERN_ERR "udf: unknown compression code (%d) stri=%s\n", cmp_id, ocu_i->u_name);
+	if ((cmp_id != 8) && (cmp_id != 16)) {
+		printk(KERN_ERR "udf: unknown compression code (%d) stri=%s\n",
+		       cmp_id, ocu_i->u_name);
 		return 0;
 	}
 
-	for (i = 0; (i < ocu_len) && (utf_o->u_len <= (UDF_NAME_LEN-3)) ;)
-	{
+	for (i = 0; (i < ocu_len) && (utf_o->u_len <= (UDF_NAME_LEN - 3));) {
 
 		/* Expand OSTA compressed Unicode to Unicode */
 		c = ocu[i++];
@@ -132,20 +130,22 @@
 
 		/* Compress Unicode to UTF-8 */
 		if (c < 0x80U)
-			utf_o->u_name[utf_o->u_len++] = (uint8_t)c;
-		else if (c < 0x800U)
-		{
-			utf_o->u_name[utf_o->u_len++] = (uint8_t)(0xc0 | (c >> 6));
-			utf_o->u_name[utf_o->u_len++] = (uint8_t)(0x80 | (c & 0x3f));
-		}
-		else
-		{
-			utf_o->u_name[utf_o->u_len++] = (uint8_t)(0xe0 | (c >> 12));
-			utf_o->u_name[utf_o->u_len++] = (uint8_t)(0x80 | ((c >> 6) & 0x3f));
-			utf_o->u_name[utf_o->u_len++] = (uint8_t)(0x80 | (c & 0x3f));
+			utf_o->u_name[utf_o->u_len++] = (uint8_t) c;
+		else if (c < 0x800U) {
+			utf_o->u_name[utf_o->u_len++] =
+			    (uint8_t) (0xc0 | (c >> 6));
+			utf_o->u_name[utf_o->u_len++] =
+			    (uint8_t) (0x80 | (c & 0x3f));
+		} else {
+			utf_o->u_name[utf_o->u_len++] =
+			    (uint8_t) (0xe0 | (c >> 12));
+			utf_o->u_name[utf_o->u_len++] =
+			    (uint8_t) (0x80 | ((c >> 6) & 0x3f));
+			utf_o->u_name[utf_o->u_len++] =
+			    (uint8_t) (0x80 | (c & 0x3f));
 		}
 	}
-	utf_o->u_cmpID=8;
+	utf_o->u_cmpID = 8;
 
 	return utf_o->u_len;
 }
@@ -173,7 +173,7 @@
  *	November 12, 1997 - Andrew E. Mileski
  *	Written, tested, and released.
  */
-static int udf_UTF8toCS0(dstring *ocu, struct ustr *utf, int length)
+static int udf_UTF8toCS0(dstring * ocu, struct ustr *utf, int length)
 {
 	unsigned c, i, max_val, utf_char;
 	int utf_cnt, u_len;
@@ -182,53 +182,38 @@
 	ocu[0] = 8;
 	max_val = 0xffU;
 
-try_again:
+      try_again:
 	u_len = 0U;
 	utf_char = 0U;
 	utf_cnt = 0U;
-	for (i = 0U; i < utf->u_len; i++)
-	{
-		c = (uint8_t)utf->u_name[i];
+	for (i = 0U; i < utf->u_len; i++) {
+		c = (uint8_t) utf->u_name[i];
 
 		/* Complete a multi-byte UTF-8 character */
-		if (utf_cnt)
-		{
+		if (utf_cnt) {
 			utf_char = (utf_char << 6) | (c & 0x3fU);
 			if (--utf_cnt)
 				continue;
-		}
-		else
-		{
+		} else {
 			/* Check for a multi-byte UTF-8 character */
-			if (c & 0x80U)
-			{
+			if (c & 0x80U) {
 				/* Start a multi-byte UTF-8 character */
-				if ((c & 0xe0U) == 0xc0U)
-				{
+				if ((c & 0xe0U) == 0xc0U) {
 					utf_char = c & 0x1fU;
 					utf_cnt = 1;
-				}
-				else if ((c & 0xf0U) == 0xe0U)
-				{
+				} else if ((c & 0xf0U) == 0xe0U) {
 					utf_char = c & 0x0fU;
 					utf_cnt = 2;
-				}
-				else if ((c & 0xf8U) == 0xf0U)
-				{
+				} else if ((c & 0xf8U) == 0xf0U) {
 					utf_char = c & 0x07U;
 					utf_cnt = 3;
-				}
-				else if ((c & 0xfcU) == 0xf8U)
-				{
+				} else if ((c & 0xfcU) == 0xf8U) {
 					utf_char = c & 0x03U;
 					utf_cnt = 4;
-				}
-				else if ((c & 0xfeU) == 0xfcU)
-				{
+				} else if ((c & 0xfeU) == 0xfcU) {
 					utf_char = c & 0x01U;
 					utf_cnt = 5;
-				}
-				else
+				} else
 					goto error_out;
 				continue;
 			} else
@@ -237,37 +222,33 @@
 		}
 
 		/* Choose no compression if necessary */
-		if (utf_char > max_val)
-		{
-			if ( 0xffU == max_val )
-			{
+		if (utf_char > max_val) {
+			if (0xffU == max_val) {
 				max_val = 0xffffU;
-				ocu[0] = (uint8_t)0x10U;
+				ocu[0] = (uint8_t) 0x10U;
 				goto try_again;
 			}
 			goto error_out;
 		}
 
-		if (max_val == 0xffffU)
-		{
-			ocu[++u_len] = (uint8_t)(utf_char >> 8);
+		if (max_val == 0xffffU) {
+			ocu[++u_len] = (uint8_t) (utf_char >> 8);
 		}
-		ocu[++u_len] = (uint8_t)(utf_char & 0xffU);
+		ocu[++u_len] = (uint8_t) (utf_char & 0xffU);
 	}
 
-
-	if (utf_cnt)
-	{
-error_out:
+	if (utf_cnt) {
+	      error_out:
 		ocu[++u_len] = '?';
 		printk(KERN_DEBUG "udf: bad UTF-8 character\n");
 	}
 
-	ocu[length - 1] = (uint8_t)u_len + 1;
+	ocu[length - 1] = (uint8_t) u_len + 1;
 	return u_len + 1;
 }
 
-static int udf_CS0toNLS(struct nls_table *nls, struct ustr *utf_o, struct ustr *ocu_i)
+static int udf_CS0toNLS(struct nls_table *nls, struct ustr *utf_o,
+			struct ustr *ocu_i)
 {
 	uint8_t *ocu;
 	uint32_t c;
@@ -280,36 +261,35 @@
 	cmp_id = ocu_i->u_cmpID;
 	utf_o->u_len = 0;
 
-	if (ocu_len == 0)
-	{
+	if (ocu_len == 0) {
 		memset(utf_o, 0, sizeof(struct ustr));
 		utf_o->u_cmpID = 0;
 		utf_o->u_len = 0;
 		return 0;
 	}
 
-	if ((cmp_id != 8) && (cmp_id != 16))
-	{
-		printk(KERN_ERR "udf: unknown compression code (%d) stri=%s\n", cmp_id, ocu_i->u_name);
+	if ((cmp_id != 8) && (cmp_id != 16)) {
+		printk(KERN_ERR "udf: unknown compression code (%d) stri=%s\n",
+		       cmp_id, ocu_i->u_name);
 		return 0;
 	}
 
-	for (i = 0; (i < ocu_len) && (utf_o->u_len <= (UDF_NAME_LEN-3)) ;)
-	{
+	for (i = 0; (i < ocu_len) && (utf_o->u_len <= (UDF_NAME_LEN - 3));) {
 		/* Expand OSTA compressed Unicode to Unicode */
 		c = ocu[i++];
 		if (cmp_id == 16)
 			c = (c << 8) | ocu[i++];
 
-		utf_o->u_len += nls->uni2char(c, &utf_o->u_name[utf_o->u_len], 
-			UDF_NAME_LEN - utf_o->u_len);
+		utf_o->u_len += nls->uni2char(c, &utf_o->u_name[utf_o->u_len],
+					      UDF_NAME_LEN - utf_o->u_len);
 	}
-	utf_o->u_cmpID=8;
+	utf_o->u_cmpID = 8;
 
 	return utf_o->u_len;
 }
 
-static int udf_NLStoCS0(struct nls_table *nls, dstring *ocu, struct ustr *uni, int length)
+static int udf_NLStoCS0(struct nls_table *nls, dstring * ocu, struct ustr *uni,
+			int length)
 {
 	unsigned len, i, max_val;
 	uint16_t uni_char;
@@ -319,93 +299,87 @@
 	ocu[0] = 8;
 	max_val = 0xffU;
 
-try_again:
+      try_again:
 	u_len = 0U;
-	for (i = 0U; i < uni->u_len; i++)
-	{
-		len = nls->char2uni(&uni->u_name[i], uni->u_len-i, &uni_char);
+	for (i = 0U; i < uni->u_len; i++) {
+		len = nls->char2uni(&uni->u_name[i], uni->u_len - i, &uni_char);
 		if (len <= 0)
 			continue;
 
-		if (uni_char > max_val)
-		{
+		if (uni_char > max_val) {
 			max_val = 0xffffU;
-			ocu[0] = (uint8_t)0x10U;
+			ocu[0] = (uint8_t) 0x10U;
 			goto try_again;
 		}
-		
+
 		if (max_val == 0xffffU)
-			ocu[++u_len] = (uint8_t)(uni_char >> 8);
-		ocu[++u_len] = (uint8_t)(uni_char & 0xffU);
+			ocu[++u_len] = (uint8_t) (uni_char >> 8);
+		ocu[++u_len] = (uint8_t) (uni_char & 0xffU);
 		i += len - 1;
 	}
 
-	ocu[length - 1] = (uint8_t)u_len + 1;
+	ocu[length - 1] = (uint8_t) u_len + 1;
 	return u_len + 1;
 }
 
-int udf_get_filename(struct super_block *sb, uint8_t *sname, uint8_t *dname, int flen)
+int udf_get_filename(struct super_block *sb, uint8_t * sname, uint8_t * dname,
+		     int flen)
 {
 	struct ustr filename, unifilename;
 	int len;
 
-	if (udf_build_ustr_exact(&unifilename, sname, flen))
-	{
+	if (udf_build_ustr_exact(&unifilename, sname, flen)) {
 		return 0;
 	}
 
-	if (UDF_QUERY_FLAG(sb, UDF_FLAG_UTF8))
-	{
-		if (!udf_CS0toUTF8(&filename, &unifilename) )
-		{
-			udf_debug("Failed in udf_get_filename: sname = %s\n", sname);
+	if (UDF_QUERY_FLAG(sb, UDF_FLAG_UTF8)) {
+		if (!udf_CS0toUTF8(&filename, &unifilename)) {
+			udf_debug("Failed in udf_get_filename: sname = %s\n",
+				  sname);
 			return 0;
 		}
-	}
-	else if (UDF_QUERY_FLAG(sb, UDF_FLAG_NLS_MAP))
-	{
-		if (!udf_CS0toNLS(UDF_SB(sb)->s_nls_map, &filename, &unifilename) )
-		{
-			udf_debug("Failed in udf_get_filename: sname = %s\n", sname);
+	} else if (UDF_QUERY_FLAG(sb, UDF_FLAG_NLS_MAP)) {
+		if (!udf_CS0toNLS
+		    (UDF_SB(sb)->s_nls_map, &filename, &unifilename)) {
+			udf_debug("Failed in udf_get_filename: sname = %s\n",
+				  sname);
 			return 0;
 		}
-	}
-	else
+	} else
 		return 0;
 
-	if ((len = udf_translate_to_linux(dname, filename.u_name, filename.u_len,
-		unifilename.u_name, unifilename.u_len)))
-	{
+	if ((len =
+	     udf_translate_to_linux(dname, filename.u_name, filename.u_len,
+				    unifilename.u_name, unifilename.u_len))) {
 		return len;
 	}
 	return 0;
 }
 
-int udf_put_filename(struct super_block *sb, const uint8_t *sname, uint8_t *dname, int flen)
+int udf_put_filename(struct super_block *sb, const uint8_t * sname,
+		     uint8_t * dname, int flen)
 {
 	struct ustr unifilename;
 	int namelen;
 
-	if ( !(udf_char_to_ustr(&unifilename, sname, flen)) )
-	{
+	if (!(udf_char_to_ustr(&unifilename, sname, flen))) {
 		return 0;
 	}
 
-	if (UDF_QUERY_FLAG(sb, UDF_FLAG_UTF8))
-	{
-		if ( !(namelen = udf_UTF8toCS0(dname, &unifilename, UDF_NAME_LEN)) )
-		{
+	if (UDF_QUERY_FLAG(sb, UDF_FLAG_UTF8)) {
+		if (!
+		    (namelen =
+		     udf_UTF8toCS0(dname, &unifilename, UDF_NAME_LEN))) {
 			return 0;
 		}
-	}
-	else if (UDF_QUERY_FLAG(sb, UDF_FLAG_NLS_MAP))
-	{
-		if ( !(namelen = udf_NLStoCS0(UDF_SB(sb)->s_nls_map, dname, &unifilename, UDF_NAME_LEN)) )
-		{
+	} else if (UDF_QUERY_FLAG(sb, UDF_FLAG_NLS_MAP)) {
+		if (!
+		    (namelen =
+		     udf_NLStoCS0(UDF_SB(sb)->s_nls_map, dname, &unifilename,
+				  UDF_NAME_LEN))) {
 			return 0;
 		}
-	}
-	else
+	} else
 		return 0;
 
 	return namelen;
@@ -416,40 +390,36 @@
 #define CRC_MARK			'#'
 #define EXT_SIZE			5
 
-static int udf_translate_to_linux(uint8_t *newName, uint8_t *udfName, int udfLen, uint8_t *fidName, int fidNameLen)
+static int udf_translate_to_linux(uint8_t * newName, uint8_t * udfName,
+				  int udfLen, uint8_t * fidName, int fidNameLen)
 {
-	int index, newIndex = 0, needsCRC = 0;	
+	int index, newIndex = 0, needsCRC = 0;
 	int extIndex = 0, newExtIndex = 0, hasExt = 0;
 	unsigned short valueCRC;
 	uint8_t curr;
 	const uint8_t hexChar[] = "0123456789ABCDEF";
 
 	if (udfName[0] == '.' && (udfLen == 1 ||
-		(udfLen == 2 && udfName[1] == '.')))
-	{
+				  (udfLen == 2 && udfName[1] == '.'))) {
 		needsCRC = 1;
 		newIndex = udfLen;
 		memcpy(newName, udfName, udfLen);
-	}
-	else
-	{	
-		for (index = 0; index < udfLen; index++)
-		{
+	} else {
+		for (index = 0; index < udfLen; index++) {
 			curr = udfName[index];
-			if (curr == '/' || curr == 0)
-			{
+			if (curr == '/' || curr == 0) {
 				needsCRC = 1;
 				curr = ILLEGAL_CHAR_MARK;
-				while (index+1 < udfLen && (udfName[index+1] == '/' ||
-					udfName[index+1] == 0))
+				while (index + 1 < udfLen
+				       && (udfName[index + 1] == '/'
+					   || udfName[index + 1] == 0))
 					index++;
 			}
-			if (curr == EXT_MARK && (udfLen - index - 1) <= EXT_SIZE)
-			{
+			if (curr == EXT_MARK
+			    && (udfLen - index - 1) <= EXT_SIZE) {
 				if (udfLen == index + 1)
 					hasExt = 0;
-				else
-				{
+				else {
 					hasExt = 1;
 					extIndex = index;
 					newExtIndex = newIndex;
@@ -461,26 +431,29 @@
 				needsCRC = 1;
 		}
 	}
-	if (needsCRC)
-	{
+	if (needsCRC) {
 		uint8_t ext[EXT_SIZE];
 		int localExtIndex = 0;
 
-		if (hasExt)
-		{
+		if (hasExt) {
 			int maxFilenameLen;
-			for(index = 0; index<EXT_SIZE && extIndex + index +1 < udfLen;
-				index++ )
-			{
+			for (index = 0;
+			     index < EXT_SIZE && extIndex + index + 1 < udfLen;
+			     index++) {
 				curr = udfName[extIndex + index + 1];
 
-				if (curr == '/' || curr == 0)
-				{
+				if (curr == '/' || curr == 0) {
 					needsCRC = 1;
 					curr = ILLEGAL_CHAR_MARK;
-					while(extIndex + index + 2 < udfLen && (index + 1 < EXT_SIZE
-						&& (udfName[extIndex + index + 2] == '/' ||
-							udfName[extIndex + index + 2] == 0)))
+					while (extIndex + index + 2 < udfLen
+					       && (index + 1 < EXT_SIZE
+						   &&
+						   (udfName
+						    [extIndex + index + 2] ==
+						    '/'
+						    || udfName[extIndex +
+							       index + 2] ==
+						    0)))
 						index++;
 				}
 				ext[localExtIndex++] = curr;
@@ -490,8 +463,7 @@
 				newIndex = maxFilenameLen;
 			else
 				newIndex = newExtIndex;
-		}
-		else if (newIndex > 250)
+		} else if (newIndex > 250)
 			newIndex = 250;
 		newName[newIndex++] = CRC_MARK;
 		valueCRC = udf_crc(fidName, fidNameLen, 0);
@@ -500,10 +472,9 @@
 		newName[newIndex++] = hexChar[(valueCRC & 0x00f0) >> 4];
 		newName[newIndex++] = hexChar[(valueCRC & 0x000f)];
 
-		if (hasExt)
-		{
+		if (hasExt) {
 			newName[newIndex++] = EXT_MARK;
-			for (index = 0;index < localExtIndex ;index++ )
+			for (index = 0; index < localExtIndex; index++)
 				newName[newIndex++] = ext[index];
 		}
 	}
diff --git a/fs/utimes.c b/fs/utimes.c
index b3c8895..682eb63 100644
--- a/fs/utimes.c
+++ b/fs/utimes.c
@@ -106,7 +106,7 @@
                 if (IS_IMMUTABLE(inode))
                         goto dput_and_out;
 
-		if (current->fsuid != inode->i_uid) {
+		if (!is_owner_or_cap(inode)) {
 			if (f) {
 				if (!(f->f_mode & FMODE_WRITE))
 					goto dput_and_out;
diff --git a/fs/xattr.c b/fs/xattr.c
index 4523aca..a44fd92 100644
--- a/fs/xattr.c
+++ b/fs/xattr.c
@@ -60,8 +60,7 @@
 		if (!S_ISREG(inode->i_mode) && !S_ISDIR(inode->i_mode))
 			return -EPERM;
 		if (S_ISDIR(inode->i_mode) && (inode->i_mode & S_ISVTX) &&
-		    (mask & MAY_WRITE) && (current->fsuid != inode->i_uid) &&
-		    !capable(CAP_FOWNER))
+		    (mask & MAY_WRITE) && !is_owner_or_cap(inode))
 			return -EPERM;
 	}
 
diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c
index 2df6362..b0f0e58 100644
--- a/fs/xfs/linux-2.6/xfs_buf.c
+++ b/fs/xfs/linux-2.6/xfs_buf.c
@@ -35,10 +35,13 @@
 #include <linux/freezer.h>
 
 static kmem_zone_t *xfs_buf_zone;
-static struct shrinker *xfs_buf_shake;
 STATIC int xfsbufd(void *);
 STATIC int xfsbufd_wakeup(int, gfp_t);
 STATIC void xfs_buf_delwri_queue(xfs_buf_t *, int);
+static struct shrinker xfs_buf_shake = {
+	.shrink = xfsbufd_wakeup,
+	.seeks = DEFAULT_SEEKS,
+};
 
 static struct workqueue_struct *xfslogd_workqueue;
 struct workqueue_struct *xfsdatad_workqueue;
@@ -1832,14 +1835,9 @@
 	if (!xfsdatad_workqueue)
 		goto out_destroy_xfslogd_workqueue;
 
-	xfs_buf_shake = set_shrinker(DEFAULT_SEEKS, xfsbufd_wakeup);
-	if (!xfs_buf_shake)
-		goto out_destroy_xfsdatad_workqueue;
-
+	register_shrinker(&xfs_buf_shake);
 	return 0;
 
- out_destroy_xfsdatad_workqueue:
-	destroy_workqueue(xfsdatad_workqueue);
  out_destroy_xfslogd_workqueue:
 	destroy_workqueue(xfslogd_workqueue);
  out_free_buf_zone:
@@ -1854,7 +1852,7 @@
 void
 xfs_buf_terminate(void)
 {
-	remove_shrinker(xfs_buf_shake);
+	unregister_shrinker(&xfs_buf_shake);
 	destroy_workqueue(xfsdatad_workqueue);
 	destroy_workqueue(xfslogd_workqueue);
 	kmem_zone_destroy(xfs_buf_zone);
diff --git a/fs/xfs/linux-2.6/xfs_file.c b/fs/xfs/linux-2.6/xfs_file.c
index cbcd40c..2d4be2f 100644
--- a/fs/xfs/linux-2.6/xfs_file.c
+++ b/fs/xfs/linux-2.6/xfs_file.c
@@ -212,19 +212,18 @@
 }
 
 #ifdef CONFIG_XFS_DMAPI
-STATIC struct page *
-xfs_vm_nopage(
-	struct vm_area_struct	*area,
-	unsigned long		address,
-	int			*type)
+STATIC int
+xfs_vm_fault(
+	struct vm_area_struct	*vma,
+	struct vm_fault	*vmf)
 {
-	struct inode	*inode = area->vm_file->f_path.dentry->d_inode;
+	struct inode	*inode = vma->vm_file->f_path.dentry->d_inode;
 	bhv_vnode_t	*vp = vn_from_inode(inode);
 
 	ASSERT_ALWAYS(vp->v_vfsp->vfs_flag & VFS_DMI);
-	if (XFS_SEND_MMAP(XFS_VFSTOM(vp->v_vfsp), area, 0))
-		return NULL;
-	return filemap_nopage(area, address, type);
+	if (XFS_SEND_MMAP(XFS_VFSTOM(vp->v_vfsp), vma, 0))
+		return VM_FAULT_SIGBUS;
+	return filemap_fault(vma, vmf);
 }
 #endif /* CONFIG_XFS_DMAPI */
 
@@ -310,6 +309,7 @@
 	struct vm_area_struct *vma)
 {
 	vma->vm_ops = &xfs_file_vm_ops;
+	vma->vm_flags |= VM_CAN_NONLINEAR;
 
 #ifdef CONFIG_XFS_DMAPI
 	if (vn_from_inode(filp->f_path.dentry->d_inode)->v_vfsp->vfs_flag & VFS_DMI)
@@ -464,14 +464,12 @@
 };
 
 static struct vm_operations_struct xfs_file_vm_ops = {
-	.nopage		= filemap_nopage,
-	.populate	= filemap_populate,
+	.fault		= filemap_fault,
 };
 
 #ifdef CONFIG_XFS_DMAPI
 static struct vm_operations_struct xfs_dmapi_file_vm_ops = {
-	.nopage		= xfs_vm_nopage,
-	.populate	= filemap_populate,
+	.fault		= xfs_vm_fault,
 #ifdef HAVE_VMOP_MPROTECT
 	.mprotect	= xfs_vm_mprotect,
 #endif
diff --git a/fs/xfs/linux-2.6/xfs_super.c b/fs/xfs/linux-2.6/xfs_super.c
index 06894cf..4528f9a 100644
--- a/fs/xfs/linux-2.6/xfs_super.c
+++ b/fs/xfs/linux-2.6/xfs_super.c
@@ -562,6 +562,7 @@
 	bhv_vfs_sync_work_t	*work, *n;
 	LIST_HEAD		(tmp);
 
+	set_freezable();
 	timeleft = xfs_syncd_centisecs * msecs_to_jiffies(10);
 	for (;;) {
 		timeleft = schedule_timeout_interruptible(timeleft);
diff --git a/fs/xfs/linux-2.6/xfs_super.h b/fs/xfs/linux-2.6/xfs_super.h
index 33dd1ca..201cc32 100644
--- a/fs/xfs/linux-2.6/xfs_super.h
+++ b/fs/xfs/linux-2.6/xfs_super.h
@@ -18,6 +18,8 @@
 #ifndef __XFS_SUPER_H__
 #define __XFS_SUPER_H__
 
+#include <linux/exportfs.h>
+
 #ifdef CONFIG_XFS_DMAPI
 # define vfs_insertdmapi(vfs)	vfs_insertops(vfsp, &xfs_dmops)
 # define vfs_initdmapi()	dmapi_init()
diff --git a/fs/xfs/quota/xfs_qm.c b/fs/xfs/quota/xfs_qm.c
index 7def4c6..2d274b2 100644
--- a/fs/xfs/quota/xfs_qm.c
+++ b/fs/xfs/quota/xfs_qm.c
@@ -62,7 +62,6 @@
 
 kmem_zone_t	*qm_dqzone;
 kmem_zone_t	*qm_dqtrxzone;
-static struct shrinker *xfs_qm_shaker;
 
 static cred_t	xfs_zerocr;
 
@@ -78,6 +77,11 @@
 STATIC int	xfs_qm_init_quotainfo(xfs_mount_t *);
 STATIC int	xfs_qm_shake(int, gfp_t);
 
+static struct shrinker xfs_qm_shaker = {
+	.shrink = xfs_qm_shake,
+	.seeks = DEFAULT_SEEKS,
+};
+
 #ifdef DEBUG
 extern mutex_t	qcheck_lock;
 #endif
@@ -149,7 +153,7 @@
 	} else
 		xqm->qm_dqzone = qm_dqzone;
 
-	xfs_qm_shaker = set_shrinker(DEFAULT_SEEKS, xfs_qm_shake);
+	register_shrinker(&xfs_qm_shaker);
 
 	/*
 	 * The t_dqinfo portion of transactions.
@@ -181,7 +185,7 @@
 
 	ASSERT(xqm != NULL);
 	ASSERT(xqm->qm_nrefs == 0);
-	remove_shrinker(xfs_qm_shaker);
+	unregister_shrinker(&xfs_qm_shaker);
 	hsize = xqm->qm_dqhashmask + 1;
 	for (i = 0; i < hsize; i++) {
 		xfs_qm_list_destroy(&(xqm->qm_usr_dqhtable[i]));
diff --git a/include/asm-alpha/a.out.h b/include/asm-alpha/a.out.h
index d97daf4..e43cf61 100644
--- a/include/asm-alpha/a.out.h
+++ b/include/asm-alpha/a.out.h
@@ -101,6 +101,8 @@
 #define STACK_TOP \
   (current->personality & ADDR_LIMIT_32BIT ? 0x80000000 : 0x00120000000UL)
 
+#define STACK_TOP_MAX	0x00120000000UL
+
 #endif
 
 #endif /* __A_OUT_GNU_H__ */
diff --git a/include/asm-alpha/fb.h b/include/asm-alpha/fb.h
new file mode 100644
index 0000000..fa9bbb9
--- /dev/null
+++ b/include/asm-alpha/fb.h
@@ -0,0 +1,13 @@
+#ifndef _ASM_FB_H_
+#define _ASM_FB_H_
+#include <linux/device.h>
+
+/* Caching is off in the I/O space quadrant by design.  */
+#define fb_pgprotect(...) do {} while (0)
+
+static inline int fb_is_primary_device(struct fb_info *info)
+{
+	return 0;
+}
+
+#endif /* _ASM_FB_H_ */
diff --git a/include/asm-alpha/page.h b/include/asm-alpha/page.h
index d2bed3c..bae7f05 100644
--- a/include/asm-alpha/page.h
+++ b/include/asm-alpha/page.h
@@ -17,7 +17,8 @@
 extern void clear_page(void *page);
 #define clear_user_page(page, vaddr, pg)	clear_page(page)
 
-#define alloc_zeroed_user_highpage(vma, vaddr) alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO, vma, vmaddr)
+#define __alloc_zeroed_user_highpage(movableflags, vma, vaddr) \
+	alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO | movableflags, vma, vmaddr)
 #define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
 
 extern void copy_page(void * _to, void * _from);
diff --git a/include/asm-alpha/system.h b/include/asm-alpha/system.h
index cf1021a..620c4d8 100644
--- a/include/asm-alpha/system.h
+++ b/include/asm-alpha/system.h
@@ -139,16 +139,6 @@
 struct task_struct;
 extern struct task_struct *alpha_switch_to(unsigned long, struct task_struct*);
 
-/*
- * On SMP systems, when the scheduler does migration-cost autodetection,
- * it needs a way to flush as much of the CPU's caches as possible.
- *
- * TODO: fill this in!
- */
-static inline void sched_cacheflush(void)
-{
-}
-
 #define imb() \
 __asm__ __volatile__ ("call_pal %0 #imb" : : "i" (PAL_imb) : "memory")
 
diff --git a/include/asm-alpha/termios.h b/include/asm-alpha/termios.h
index 39e492c..fa13716 100644
--- a/include/asm-alpha/termios.h
+++ b/include/asm-alpha/termios.h
@@ -81,7 +81,7 @@
 
 #define user_termio_to_kernel_termios(a_termios, u_termio)			\
 ({										\
-	struct termios *k_termios = (a_termios);				\
+	struct ktermios *k_termios = (a_termios);				\
 	struct termio k_termio;							\
 	int canon, ret;								\
 										\
@@ -113,7 +113,7 @@
  */
 #define kernel_termios_to_user_termio(u_termio, a_termios)		\
 ({									\
-	struct termios *k_termios = (a_termios);			\
+	struct ktermios *k_termios = (a_termios);			\
 	struct termio k_termio;						\
 	int canon;							\
 									\
diff --git a/include/asm-arm/a.out.h b/include/asm-arm/a.out.h
index 3e5fe64..d7165e8 100644
--- a/include/asm-arm/a.out.h
+++ b/include/asm-arm/a.out.h
@@ -30,6 +30,7 @@
 #ifdef __KERNEL__
 #define STACK_TOP	((current->personality == PER_LINUX_32BIT) ? \
 			 TASK_SIZE : TASK_SIZE_26)
+#define STACK_TOP_MAX	TASK_SIZE
 #endif
 
 #ifndef LIBRARY_START_TEXT
diff --git a/include/asm-arm/arch-at91/board.h b/include/asm-arm/arch-at91/board.h
index 0ce6ee9..d96b10f 100644
--- a/include/asm-arm/arch-at91/board.h
+++ b/include/asm-arm/arch-at91/board.h
@@ -64,6 +64,7 @@
 
  /* Ethernet (EMAC & MACB) */
 struct at91_eth_data {
+	u32		phy_mask;
 	u8		phy_irq_pin;	/* PHY IRQ */
 	u8		is_rmii;	/* using RMII interface? */
 };
diff --git a/include/asm-arm/fb.h b/include/asm-arm/fb.h
new file mode 100644
index 0000000..d92e99c
--- /dev/null
+++ b/include/asm-arm/fb.h
@@ -0,0 +1,19 @@
+#ifndef _ASM_FB_H_
+#define _ASM_FB_H_
+
+#include <linux/fb.h>
+#include <linux/fs.h>
+#include <asm/page.h>
+
+static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma,
+				unsigned long off)
+{
+	vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+}
+
+static inline int fb_is_primary_device(struct fb_info *info)
+{
+	return 0;
+}
+
+#endif /* _ASM_FB_H_ */
diff --git a/include/asm-arm/pgtable.h b/include/asm-arm/pgtable.h
index cb4c2c9..d2e8171 100644
--- a/include/asm-arm/pgtable.h
+++ b/include/asm-arm/pgtable.h
@@ -83,14 +83,14 @@
  * means that a write to a clean page will cause a permission fault, and
  * the Linux MM layer will mark the page dirty via handle_pte_fault().
  * For the hardware to notice the permission change, the TLB entry must
- * be flushed, and ptep_establish() does that for us.
+ * be flushed, and ptep_set_access_flags() does that for us.
  *
  * The "accessed" or "young" bit is emulated by a similar method; we only
  * allow accesses to the page if the "young" bit is set.  Accesses to the
  * page will cause a fault, and handle_pte_fault() will set the young bit
  * for us as long as the page is marked present in the corresponding Linux
- * PTE entry.  Again, ptep_establish() will ensure that the TLB is up to
- * date.
+ * PTE entry.  Again, ptep_set_access_flags() will ensure that the TLB is
+ * up to date.
  *
  * However, when the "young" bit is cleared, we deny access to the page
  * by clearing the hardware PTE.  Currently Linux does not flush the TLB
diff --git a/include/asm-arm/system.h b/include/asm-arm/system.h
index 6f8e6a6..94ea8c6 100644
--- a/include/asm-arm/system.h
+++ b/include/asm-arm/system.h
@@ -254,16 +254,6 @@
 	last = __switch_to(prev,task_thread_info(prev), task_thread_info(next));	\
 } while (0)
 
-/*
- * On SMP systems, when the scheduler does migration-cost autodetection,
- * it needs a way to flush as much of the CPU's caches as possible.
- *
- * TODO: fill this in!
- */
-static inline void sched_cacheflush(void)
-{
-}
-
 #if defined(CONFIG_CPU_SA1100) || defined(CONFIG_CPU_SA110)
 /*
  * On the StrongARM, "swp" is terminally broken since it bypasses the
diff --git a/include/asm-arm26/a.out.h b/include/asm-arm26/a.out.h
index 9b2702c..7167f54 100644
--- a/include/asm-arm26/a.out.h
+++ b/include/asm-arm26/a.out.h
@@ -29,6 +29,7 @@
 
 #ifdef __KERNEL__
 #define STACK_TOP	TASK_SIZE
+#define STACK_TOP_MAX	STACK_TOP
 #endif
 
 #ifndef LIBRARY_START_TEXT
diff --git a/include/asm-arm26/fb.h b/include/asm-arm26/fb.h
new file mode 100644
index 0000000..c7df380
--- /dev/null
+++ b/include/asm-arm26/fb.h
@@ -0,0 +1,12 @@
+#ifndef _ASM_FB_H_
+#define _ASM_FB_H_
+#include <linux/fb.h>
+
+#define fb_pgprotect(...) do {} while (0)
+
+static inline int fb_is_primary_device(struct fb_info *info)
+{
+	return 0;
+}
+
+#endif /* _ASM_FB_H_ */
diff --git a/include/asm-arm26/system.h b/include/asm-arm26/system.h
index 4703593b..e09da5f 100644
--- a/include/asm-arm26/system.h
+++ b/include/asm-arm26/system.h
@@ -110,16 +110,6 @@
 } while (0)
 
 /*
- * On SMP systems, when the scheduler does migration-cost autodetection,
- * it needs a way to flush as much of the CPU's caches as possible.
- *
- * TODO: fill this in!
- */
-static inline void sched_cacheflush(void)
-{
-}
-
-/*
  * Save the current interrupt enable state & disable IRQs
  */
 #define local_irq_save(x)                               \
diff --git a/include/asm-avr32/a.out.h b/include/asm-avr32/a.out.h
index 50bf6e3..9f398ab 100644
--- a/include/asm-avr32/a.out.h
+++ b/include/asm-avr32/a.out.h
@@ -20,6 +20,7 @@
 #ifdef __KERNEL__
 
 #define STACK_TOP	TASK_SIZE
+#define STACK_TOP_MAX	STACK_TOP
 
 #endif
 
diff --git a/include/asm-avr32/arch-at32ap/board.h b/include/asm-avr32/arch-at32ap/board.h
index 9fd2e32..0215965 100644
--- a/include/asm-avr32/arch-at32ap/board.h
+++ b/include/asm-avr32/arch-at32ap/board.h
@@ -21,6 +21,7 @@
 struct platform_device *at32_add_device_usart(unsigned int id);
 
 struct eth_platform_data {
+	u32	phy_mask;
 	u8	is_rmii;
 };
 struct platform_device *
@@ -35,4 +36,18 @@
 at32_add_device_lcdc(unsigned int id, struct atmel_lcdfb_info *data,
 		     unsigned long fbmem_start, unsigned long fbmem_len);
 
+/* depending on what's hooked up, not all SSC pins will be used */
+#define	ATMEL_SSC_TK		0x01
+#define	ATMEL_SSC_TF		0x02
+#define	ATMEL_SSC_TD		0x04
+#define	ATMEL_SSC_TX		(ATMEL_SSC_TK | ATMEL_SSC_TF | ATMEL_SSC_TD)
+
+#define	ATMEL_SSC_RK		0x10
+#define	ATMEL_SSC_RF		0x20
+#define	ATMEL_SSC_RD		0x40
+#define	ATMEL_SSC_RX		(ATMEL_SSC_RK | ATMEL_SSC_RF | ATMEL_SSC_RD)
+
+struct platform_device *
+at32_add_device_ssc(unsigned int id, unsigned int flags);
+
 #endif /* __ASM_ARCH_BOARD_H */
diff --git a/include/asm-avr32/arch-at32ap/sm.h b/include/asm-avr32/arch-at32ap/sm.h
deleted file mode 100644
index 265a9ea..0000000
--- a/include/asm-avr32/arch-at32ap/sm.h
+++ /dev/null
@@ -1,27 +0,0 @@
-/*
- * AT32 System Manager interface.
- *
- * Copyright (C) 2006 Atmel Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-#ifndef __ASM_AVR32_AT32_SM_H__
-#define __ASM_AVR32_AT32_SM_H__
-
-struct irq_chip;
-struct platform_device;
-
-struct at32_sm {
-	spinlock_t lock;
-	void __iomem *regs;
-	struct irq_chip *eim_chip;
-	unsigned int eim_first_irq;
-	struct platform_device *pdev;
-};
-
-extern struct platform_device at32_sm_device;
-extern struct at32_sm system_manager;
-
-#endif /* __ASM_AVR32_AT32_SM_H__ */
diff --git a/include/asm-avr32/atomic.h b/include/asm-avr32/atomic.h
index b9c2548..7ef3862 100644
--- a/include/asm-avr32/atomic.h
+++ b/include/asm-avr32/atomic.h
@@ -101,7 +101,7 @@
 		"	mov	%1, 1\n"
 		"1:"
 		: "=&r"(tmp), "=&r"(result), "=o"(v->counter)
-		: "m"(v->counter), "rKs21"(a), "rKs21"(u)
+		: "m"(v->counter), "rKs21"(a), "rKs21"(u), "1"(result)
 		: "cc", "memory");
 
 	return result;
@@ -137,7 +137,7 @@
 			"	mov	%1, 1\n"
 			"1:"
 			: "=&r"(tmp), "=&r"(result), "=o"(v->counter)
-			: "m"(v->counter), "r"(a), "ir"(u)
+			: "m"(v->counter), "r"(a), "ir"(u), "1"(result)
 			: "cc", "memory");
 	}
 
diff --git a/include/asm-avr32/fb.h b/include/asm-avr32/fb.h
new file mode 100644
index 0000000..41baf84
--- /dev/null
+++ b/include/asm-avr32/fb.h
@@ -0,0 +1,21 @@
+#ifndef _ASM_FB_H_
+#define _ASM_FB_H_
+
+#include <linux/fb.h>
+#include <linux/fs.h>
+#include <asm/page.h>
+
+static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma,
+				unsigned long off)
+{
+	vma->vm_page_prot = __pgprot((pgprot_val(vma->vm_page_prot)
+				      & ~_PAGE_CACHABLE)
+				     | (_PAGE_BUFFER | _PAGE_DIRTY));
+}
+
+static inline int fb_is_primary_device(struct fb_info *info)
+{
+	return 0;
+}
+
+#endif /* _ASM_FB_H_ */
diff --git a/include/asm-avr32/unaligned.h b/include/asm-avr32/unaligned.h
index 3042723..36f5fd4 100644
--- a/include/asm-avr32/unaligned.h
+++ b/include/asm-avr32/unaligned.h
@@ -7,19 +7,10 @@
  * words, but halfwords must be halfword-aligned, and doublewords must
  * be word-aligned.
  *
- * TODO: Make all this CPU-specific and optimize.
+ * However, swapped word loads must be word-aligned so we can't
+ * optimize word loads in general.
  */
 
-#include <linux/string.h>
-
-/* Use memmove here, so gcc does not insert a __builtin_memcpy. */
-
-#define get_unaligned(ptr) \
-  ({ __typeof__(*(ptr)) __tmp; memmove(&__tmp, (ptr), sizeof(*(ptr))); __tmp; })
-
-#define put_unaligned(val, ptr)				\
-  ({ __typeof__(*(ptr)) __tmp = (val);			\
-     memmove((ptr), &__tmp, sizeof(*(ptr)));		\
-     (void)0; })
+#include <asm-generic/unaligned.h>
 
 #endif /* __ASM_AVR32_UNALIGNED_H */
diff --git a/include/asm-blackfin/fb.h b/include/asm-blackfin/fb.h
new file mode 100644
index 0000000..c7df380
--- /dev/null
+++ b/include/asm-blackfin/fb.h
@@ -0,0 +1,12 @@
+#ifndef _ASM_FB_H_
+#define _ASM_FB_H_
+#include <linux/fb.h>
+
+#define fb_pgprotect(...) do {} while (0)
+
+static inline int fb_is_primary_device(struct fb_info *info)
+{
+	return 0;
+}
+
+#endif /* _ASM_FB_H_ */
diff --git a/include/asm-cris/a.out.h b/include/asm-cris/a.out.h
index 770734c..919b34a 100644
--- a/include/asm-cris/a.out.h
+++ b/include/asm-cris/a.out.h
@@ -8,6 +8,7 @@
 
 /* grabbed from the intel stuff  */   
 #define STACK_TOP TASK_SIZE
+#define STACK_TOP_MAX	STACK_TOP
 
 
 struct exec
diff --git a/include/asm-cris/fb.h b/include/asm-cris/fb.h
new file mode 100644
index 0000000..c7df380
--- /dev/null
+++ b/include/asm-cris/fb.h
@@ -0,0 +1,12 @@
+#ifndef _ASM_FB_H_
+#define _ASM_FB_H_
+#include <linux/fb.h>
+
+#define fb_pgprotect(...) do {} while (0)
+
+static inline int fb_is_primary_device(struct fb_info *info)
+{
+	return 0;
+}
+
+#endif /* _ASM_FB_H_ */
diff --git a/include/asm-cris/page.h b/include/asm-cris/page.h
index 9f13c32..0648e31 100644
--- a/include/asm-cris/page.h
+++ b/include/asm-cris/page.h
@@ -20,7 +20,8 @@
 #define clear_user_page(page, vaddr, pg)    clear_page(page)
 #define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
 
-#define alloc_zeroed_user_highpage(vma, vaddr) alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO, vma, vaddr)
+#define __alloc_zeroed_user_highpage(movableflags, vma, vaddr) \
+	alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO | movableflags, vma, vaddr)
 #define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
 
 /*
diff --git a/include/asm-frv/fb.h b/include/asm-frv/fb.h
new file mode 100644
index 0000000..c7df380
--- /dev/null
+++ b/include/asm-frv/fb.h
@@ -0,0 +1,12 @@
+#ifndef _ASM_FB_H_
+#define _ASM_FB_H_
+#include <linux/fb.h>
+
+#define fb_pgprotect(...) do {} while (0)
+
+static inline int fb_is_primary_device(struct fb_info *info)
+{
+	return 0;
+}
+
+#endif /* _ASM_FB_H_ */
diff --git a/include/asm-frv/mem-layout.h b/include/asm-frv/mem-layout.h
index a025dd4..aaf2a77 100644
--- a/include/asm-frv/mem-layout.h
+++ b/include/asm-frv/mem-layout.h
@@ -60,6 +60,7 @@
  */
 #define BRK_BASE			__UL(2 * 1024 * 1024 + PAGE_SIZE)
 #define STACK_TOP			__UL(2 * 1024 * 1024)
+#define STACK_TOP_MAX	STACK_TOP
 
 /* userspace process size */
 #ifdef CONFIG_MMU
diff --git a/include/asm-frv/pgtable.h b/include/asm-frv/pgtable.h
index adde699..147e995 100644
--- a/include/asm-frv/pgtable.h
+++ b/include/asm-frv/pgtable.h
@@ -388,13 +388,6 @@
 static inline pte_t pte_mkyoung(pte_t pte)	{ (pte).pte |= _PAGE_ACCESSED; return pte; }
 static inline pte_t pte_mkwrite(pte_t pte)	{ (pte).pte &= ~_PAGE_WP; return pte; }
 
-static inline int ptep_test_and_clear_dirty(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
-{
-	int i = test_and_clear_bit(_PAGE_BIT_DIRTY, ptep);
-	asm volatile("dcf %M0" :: "U"(*ptep));
-	return i;
-}
-
 static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
 {
 	int i = test_and_clear_bit(_PAGE_BIT_ACCESSED, ptep);
@@ -504,7 +497,6 @@
 		remap_pfn_range(vma, vaddr, pfn, size, prot)
 
 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
-#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY
 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
 #define __HAVE_ARCH_PTEP_SET_WRPROTECT
 #define __HAVE_ARCH_PTE_SAME
diff --git a/include/asm-generic/bug.h b/include/asm-generic/bug.h
index 7f30cce..344e309 100644
--- a/include/asm-generic/bug.h
+++ b/include/asm-generic/bug.h
@@ -28,7 +28,7 @@
 #endif
 
 #ifndef HAVE_ARCH_BUG_ON
-#define BUG_ON(condition) do { if (unlikely((condition)!=0)) BUG(); } while(0)
+#define BUG_ON(condition) do { if (unlikely(condition)) BUG(); } while(0)
 #endif
 
 #ifndef HAVE_ARCH_WARN_ON
diff --git a/include/asm-generic/percpu.h b/include/asm-generic/percpu.h
index d984a90..d85172e 100644
--- a/include/asm-generic/percpu.h
+++ b/include/asm-generic/percpu.h
@@ -14,6 +14,11 @@
 #define DEFINE_PER_CPU(type, name) \
     __attribute__((__section__(".data.percpu"))) __typeof__(type) per_cpu__##name
 
+#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name)		\
+    __attribute__((__section__(".data.percpu.shared_aligned"))) \
+    __typeof__(type) per_cpu__##name				\
+    ____cacheline_aligned_in_smp
+
 /* var is in discarded region: offset to particular copy we want */
 #define per_cpu(var, cpu) (*({				\
 	extern int simple_identifier_##var(void);	\
@@ -34,6 +39,9 @@
 #define DEFINE_PER_CPU(type, name) \
     __typeof__(type) per_cpu__##name
 
+#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name)	\
+    DEFINE_PER_CPU(type, name)
+
 #define per_cpu(var, cpu)			(*((void)(cpu), &per_cpu__##var))
 #define __get_cpu_var(var)			per_cpu__##var
 #define __raw_get_cpu_var(var)			per_cpu__##var
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
index 7d7bcf9..f605e8d 100644
--- a/include/asm-generic/pgtable.h
+++ b/include/asm-generic/pgtable.h
@@ -3,25 +3,6 @@
 
 #ifndef __ASSEMBLY__
 
-#ifndef __HAVE_ARCH_PTEP_ESTABLISH
-/*
- * Establish a new mapping:
- *  - flush the old one
- *  - update the page tables
- *  - inform the TLB about the new one
- *
- * We hold the mm semaphore for reading, and the pte lock.
- *
- * Note: the old pte is known to not be writable, so we don't need to
- * worry about dirty bits etc getting lost.
- */
-#define ptep_establish(__vma, __address, __ptep, __entry)		\
-do {				  					\
-	set_pte_at((__vma)->vm_mm, (__address), __ptep, __entry);	\
-	flush_tlb_page(__vma, __address);				\
-} while (0)
-#endif
-
 #ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
 /*
  * Largely same as above, but only sets the access flags (dirty,
@@ -68,31 +49,6 @@
 })
 #endif
 
-#ifndef __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY
-#define ptep_test_and_clear_dirty(__vma, __address, __ptep)		\
-({									\
-	pte_t __pte = *__ptep;						\
-	int r = 1;							\
-	if (!pte_dirty(__pte))						\
-		r = 0;							\
-	else								\
-		set_pte_at((__vma)->vm_mm, (__address), (__ptep),	\
-			   pte_mkclean(__pte));				\
-	r;								\
-})
-#endif
-
-#ifndef __HAVE_ARCH_PTEP_CLEAR_DIRTY_FLUSH
-#define ptep_clear_flush_dirty(__vma, __address, __ptep)		\
-({									\
-	int __dirty;							\
-	__dirty = ptep_test_and_clear_dirty(__vma, __address, __ptep);	\
-	if (__dirty)							\
-		flush_tlb_page(__vma, __address);			\
-	__dirty;							\
-})
-#endif
-
 #ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR
 #define ptep_get_and_clear(__mm, __address, __ptep)			\
 ({									\
diff --git a/include/asm-generic/unaligned.h b/include/asm-generic/unaligned.h
index 09ec447..16a466e 100644
--- a/include/asm-generic/unaligned.h
+++ b/include/asm-generic/unaligned.h
@@ -18,7 +18,8 @@
 #define get_unaligned(ptr) \
 	__get_unaligned((ptr), sizeof(*(ptr)))
 #define put_unaligned(x,ptr) \
-	__put_unaligned((__u64)(x), (ptr), sizeof(*(ptr)))
+	((void)sizeof(*(ptr)=(x)),\
+	__put_unaligned((__force __u64)(x), (ptr), sizeof(*(ptr))))
 
 /*
  * This function doesn't actually exist.  The idea is that when
@@ -95,21 +96,21 @@
 	default:				\
 		bad_unaligned_access_length();	\
 	};					\
-	(__typeof__(*(ptr)))val;		\
+	(__force __typeof__(*(ptr)))val;	\
 })
 
 #define __put_unaligned(val, ptr, size)		\
-do {						\
+({						\
 	void *__gu_p = ptr;			\
 	switch (size) {				\
 	case 1:					\
-		*(__u8 *)__gu_p = val;		\
+		*(__u8 *)__gu_p = (__force __u8)val;		\
 	        break;				\
 	case 2:					\
-		__ustw(val, __gu_p);		\
+		__ustw((__force __u16)val, __gu_p);		\
 		break;				\
 	case 4:					\
-		__ustl(val, __gu_p);		\
+		__ustl((__force __u32)val, __gu_p);		\
 		break;				\
 	case 8:					\
 		__ustq(val, __gu_p);		\
@@ -117,6 +118,7 @@
 	default:				\
 	    	bad_unaligned_access_length();	\
 	};					\
-} while(0)
+	(void)0;				\
+})
 
 #endif /* _ASM_GENERIC_UNALIGNED_H */
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index 84155eb..0240e05 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -224,7 +224,11 @@
 	}
 
 #define NOTES								\
-	.notes : { *(.note.*) } :note
+	.notes : AT(ADDR(.notes) - LOAD_OFFSET) {			\
+		VMLINUX_SYMBOL(__start_notes) = .;			\
+		*(.note.*)						\
+		VMLINUX_SYMBOL(__stop_notes) = .;			\
+	}
 
 #define INITCALLS							\
   	*(.initcall0.init)						\
@@ -245,3 +249,11 @@
   	*(.initcall7.init)						\
   	*(.initcall7s.init)
 
+#define PERCPU(align)							\
+	. = ALIGN(align);						\
+	__per_cpu_start = .;						\
+	.data.percpu  : AT(ADDR(.data.percpu) - LOAD_OFFSET) {		\
+		*(.data.percpu)						\
+		*(.data.percpu.shared_aligned)				\
+	}								\
+	__per_cpu_end = .;
diff --git a/include/asm-h8300/a.out.h b/include/asm-h8300/a.out.h
index 3c70939..aa5d227 100644
--- a/include/asm-h8300/a.out.h
+++ b/include/asm-h8300/a.out.h
@@ -20,6 +20,7 @@
 #ifdef __KERNEL__
 
 #define STACK_TOP	TASK_SIZE
+#define STACK_TOP_MAX	STACK_TOP
 
 #endif
 
diff --git a/include/asm-h8300/fb.h b/include/asm-h8300/fb.h
new file mode 100644
index 0000000..c7df380
--- /dev/null
+++ b/include/asm-h8300/fb.h
@@ -0,0 +1,12 @@
+#ifndef _ASM_FB_H_
+#define _ASM_FB_H_
+#include <linux/fb.h>
+
+#define fb_pgprotect(...) do {} while (0)
+
+static inline int fb_is_primary_device(struct fb_info *info)
+{
+	return 0;
+}
+
+#endif /* _ASM_FB_H_ */
diff --git a/include/asm-h8300/page.h b/include/asm-h8300/page.h
index 3b4f290..c8cc81a 100644
--- a/include/asm-h8300/page.h
+++ b/include/asm-h8300/page.h
@@ -22,7 +22,8 @@
 #define clear_user_page(page, vaddr, pg)	clear_page(page)
 #define copy_user_page(to, from, vaddr, pg)	copy_page(to, from)
 
-#define alloc_zeroed_user_highpage(vma, vaddr) alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO, vma, vaddr)
+#define __alloc_zeroed_user_highpage(movableflags, vma, vaddr) \
+	alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO | movableflags, vma, vaddr)
 #define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
 
 /*
diff --git a/include/asm-i386/a.out.h b/include/asm-i386/a.out.h
index ab17bb8..851a60f 100644
--- a/include/asm-i386/a.out.h
+++ b/include/asm-i386/a.out.h
@@ -20,6 +20,7 @@
 #ifdef __KERNEL__
 
 #define STACK_TOP	TASK_SIZE
+#define STACK_TOP_MAX	STACK_TOP
 
 #endif
 
diff --git a/include/asm-i386/fb.h b/include/asm-i386/fb.h
new file mode 100644
index 0000000..d1c6297
--- /dev/null
+++ b/include/asm-i386/fb.h
@@ -0,0 +1,17 @@
+#ifndef _ASM_FB_H_
+#define _ASM_FB_H_
+
+#include <linux/fb.h>
+#include <linux/fs.h>
+#include <asm/page.h>
+
+extern int fb_is_primary_device(struct fb_info *info);
+
+static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma,
+				unsigned long off)
+{
+	if (boot_cpu_data.x86 > 3)
+		pgprot_val(vma->vm_page_prot) |= _PAGE_PCD;
+}
+
+#endif /* _ASM_FB_H_ */
diff --git a/include/asm-i386/irq.h b/include/asm-i386/irq.h
index 9e15ce0..36f3106 100644
--- a/include/asm-i386/irq.h
+++ b/include/asm-i386/irq.h
@@ -41,6 +41,7 @@
 extern void fixup_irqs(cpumask_t map);
 #endif
 
+unsigned int do_IRQ(struct pt_regs *regs);
 void init_IRQ(void);
 void __init native_init_IRQ(void);
 
diff --git a/include/asm-i386/kprobes.h b/include/asm-i386/kprobes.h
index 8774d06..06f7303 100644
--- a/include/asm-i386/kprobes.h
+++ b/include/asm-i386/kprobes.h
@@ -42,7 +42,6 @@
 	? (MAX_STACK_SIZE) \
 	: (((unsigned long)current_thread_info()) + THREAD_SIZE - (ADDR)))
 
-#define JPROBE_ENTRY(pentry)	(kprobe_opcode_t *)pentry
 #define ARCH_SUPPORTS_KRETPROBES
 #define  ARCH_INACTIVE_KPROBE_COUNT 0
 #define flush_insn_slot(p)	do { } while (0)
diff --git a/include/asm-i386/mach-default/irq_vectors_limits.h b/include/asm-i386/mach-default/irq_vectors_limits.h
index 7f161e7..a90c7a6 100644
--- a/include/asm-i386/mach-default/irq_vectors_limits.h
+++ b/include/asm-i386/mach-default/irq_vectors_limits.h
@@ -1,7 +1,7 @@
 #ifndef _ASM_IRQ_VECTORS_LIMITS_H
 #define _ASM_IRQ_VECTORS_LIMITS_H
 
-#ifdef CONFIG_X86_IO_APIC
+#if defined(CONFIG_X86_IO_APIC) || defined(CONFIG_PARAVIRT)
 #define NR_IRQS 224
 # if (224 >= 32 * NR_CPUS)
 # define NR_IRQ_VECTORS NR_IRQS
diff --git a/include/asm-i386/mmu_context.h b/include/asm-i386/mmu_context.h
index 8198d1c..7eb0b0b 100644
--- a/include/asm-i386/mmu_context.h
+++ b/include/asm-i386/mmu_context.h
@@ -32,6 +32,8 @@
 #endif
 }
 
+void leave_mm(unsigned long cpu);
+
 static inline void switch_mm(struct mm_struct *prev,
 			     struct mm_struct *next,
 			     struct task_struct *tsk)
diff --git a/include/asm-i386/page.h b/include/asm-i386/page.h
index 818ac8b..99cf5d3 100644
--- a/include/asm-i386/page.h
+++ b/include/asm-i386/page.h
@@ -34,7 +34,8 @@
 #define clear_user_page(page, vaddr, pg)	clear_page(page)
 #define copy_user_page(to, from, vaddr, pg)	copy_page(to, from)
 
-#define alloc_zeroed_user_highpage(vma, vaddr) alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO, vma, vaddr)
+#define __alloc_zeroed_user_highpage(movableflags, vma, vaddr) \
+	alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO | movableflags, vma, vaddr)
 #define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
 
 /*
diff --git a/include/asm-i386/paravirt.h b/include/asm-i386/paravirt.h
index 7f846a7..7df88be 100644
--- a/include/asm-i386/paravirt.h
+++ b/include/asm-i386/paravirt.h
@@ -52,6 +52,8 @@
 	/* Basic arch-specific setup */
 	void (*arch_setup)(void);
 	char *(*memory_setup)(void);
+	void (*post_allocator_init)(void);
+
 	void (*init_IRQ)(void);
 	void (*time_init)(void);
 
@@ -116,7 +118,7 @@
 
 	u64 (*read_tsc)(void);
 	u64 (*read_pmc)(void);
- 	u64 (*get_scheduled_cycles)(void);
+	unsigned long long (*sched_clock)(void);
 	unsigned long (*get_cpu_khz)(void);
 
 	/* Segment descriptor handling */
@@ -173,7 +175,7 @@
 				 unsigned long va);
 
 	/* Hooks for allocating/releasing pagetable pages */
-	void (*alloc_pt)(u32 pfn);
+	void (*alloc_pt)(struct mm_struct *mm, u32 pfn);
 	void (*alloc_pd)(u32 pfn);
 	void (*alloc_pd_clone)(u32 pfn, u32 clonepfn, u32 start, u32 count);
 	void (*release_pt)(u32 pfn);
@@ -260,6 +262,7 @@
 unsigned paravirt_patch_insns(void *site, unsigned len,
 			      const char *start, const char *end);
 
+int paravirt_disable_iospace(void);
 
 /*
  * This generates an indirect call based on the operation type number.
@@ -563,7 +566,10 @@
 
 #define rdtscll(val) (val = paravirt_read_tsc())
 
-#define get_scheduled_cycles(val) (val = paravirt_ops.get_scheduled_cycles())
+static inline unsigned long long paravirt_sched_clock(void)
+{
+	return PVOP_CALL0(unsigned long long, sched_clock);
+}
 #define calculate_cpu_khz() (paravirt_ops.get_cpu_khz())
 
 #define write_tsc(val1,val2) wrmsr(0x10, val1, val2)
@@ -669,6 +675,12 @@
 }
 #endif
 
+static inline void paravirt_post_allocator_init(void)
+{
+	if (paravirt_ops.post_allocator_init)
+		(*paravirt_ops.post_allocator_init)();
+}
+
 static inline void paravirt_pagetable_setup_start(pgd_t *base)
 {
 	if (paravirt_ops.pagetable_setup_start)
@@ -725,9 +737,9 @@
 	PVOP_VCALL3(flush_tlb_others, &cpumask, mm, va);
 }
 
-static inline void paravirt_alloc_pt(unsigned pfn)
+static inline void paravirt_alloc_pt(struct mm_struct *mm, unsigned pfn)
 {
-	PVOP_VCALL1(alloc_pt, pfn);
+	PVOP_VCALL2(alloc_pt, mm, pfn);
 }
 static inline void paravirt_release_pt(unsigned pfn)
 {
diff --git a/include/asm-i386/percpu.h b/include/asm-i386/percpu.h
index f54830b..a7ebd43 100644
--- a/include/asm-i386/percpu.h
+++ b/include/asm-i386/percpu.h
@@ -54,6 +54,11 @@
 #define DEFINE_PER_CPU(type, name) \
     __attribute__((__section__(".data.percpu"))) __typeof__(type) per_cpu__##name
 
+#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name)		\
+    __attribute__((__section__(".data.percpu.shared_aligned"))) \
+    __typeof__(type) per_cpu__##name				\
+    ____cacheline_aligned_in_smp
+
 /* We can use this directly for local CPU (faster). */
 DECLARE_PER_CPU(unsigned long, this_cpu_off);
 
diff --git a/include/asm-i386/pgalloc.h b/include/asm-i386/pgalloc.h
index d07b7af..f2fc33c 100644
--- a/include/asm-i386/pgalloc.h
+++ b/include/asm-i386/pgalloc.h
@@ -7,7 +7,7 @@
 #ifdef CONFIG_PARAVIRT
 #include <asm/paravirt.h>
 #else
-#define paravirt_alloc_pt(pfn) do { } while (0)
+#define paravirt_alloc_pt(mm, pfn) do { } while (0)
 #define paravirt_alloc_pd(pfn) do { } while (0)
 #define paravirt_alloc_pd(pfn) do { } while (0)
 #define paravirt_alloc_pd_clone(pfn, clonepfn, start, count) do { } while (0)
@@ -17,13 +17,13 @@
 
 #define pmd_populate_kernel(mm, pmd, pte)			\
 do {								\
-	paravirt_alloc_pt(__pa(pte) >> PAGE_SHIFT);		\
+	paravirt_alloc_pt(mm, __pa(pte) >> PAGE_SHIFT);		\
 	set_pmd(pmd, __pmd(_PAGE_TABLE + __pa(pte)));		\
 } while (0)
 
 #define pmd_populate(mm, pmd, pte) 				\
 do {								\
-	paravirt_alloc_pt(page_to_pfn(pte));			\
+	paravirt_alloc_pt(mm, page_to_pfn(pte));		\
 	set_pmd(pmd, __pmd(_PAGE_TABLE +			\
 		((unsigned long long)page_to_pfn(pte) <<	\
 			(unsigned long long) PAGE_SHIFT)));	\
diff --git a/include/asm-i386/pgtable.h b/include/asm-i386/pgtable.h
index 01734e0..c7fefa6 100644
--- a/include/asm-i386/pgtable.h
+++ b/include/asm-i386/pgtable.h
@@ -289,17 +289,6 @@
 	__changed;							\
 })
 
-#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY
-#define ptep_test_and_clear_dirty(vma, addr, ptep) ({			\
-	int __ret = 0;							\
-	if (pte_dirty(*(ptep)))						\
-		__ret = test_and_clear_bit(_PAGE_BIT_DIRTY,		\
-						&(ptep)->pte_low);	\
-	if (__ret)							\
-		pte_update((vma)->vm_mm, addr, ptep);			\
-	__ret;								\
-})
-
 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
 #define ptep_test_and_clear_young(vma, addr, ptep) ({			\
 	int __ret = 0;							\
@@ -311,27 +300,6 @@
 	__ret;								\
 })
 
-/*
- * Rules for using ptep_establish: the pte MUST be a user pte, and
- * must be a present->present transition.
- */
-#define __HAVE_ARCH_PTEP_ESTABLISH
-#define ptep_establish(vma, address, ptep, pteval)			\
-do {									\
-	set_pte_present((vma)->vm_mm, address, ptep, pteval);		\
-	flush_tlb_page(vma, address);					\
-} while (0)
-
-#define __HAVE_ARCH_PTEP_CLEAR_DIRTY_FLUSH
-#define ptep_clear_flush_dirty(vma, address, ptep)			\
-({									\
-	int __dirty;							\
-	__dirty = ptep_test_and_clear_dirty((vma), (address), (ptep));	\
-	if (__dirty)							\
-		flush_tlb_page(vma, address);				\
-	__dirty;							\
-})
-
 #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
 #define ptep_clear_flush_young(vma, address, ptep)			\
 ({									\
diff --git a/include/asm-i386/setup.h b/include/asm-i386/setup.h
index 0d5bff9..7862fe8 100644
--- a/include/asm-i386/setup.h
+++ b/include/asm-i386/setup.h
@@ -81,6 +81,10 @@
 
 extern unsigned long init_pg_tables_end;
 
+#ifndef CONFIG_PARAVIRT
+#define paravirt_post_allocator_init()	do {} while (0)
+#endif
+
 #endif /* __ASSEMBLY__ */
 
 #endif  /*  __KERNEL__  */
diff --git a/include/asm-i386/smp.h b/include/asm-i386/smp.h
index 0c71327..1f73bde 100644
--- a/include/asm-i386/smp.h
+++ b/include/asm-i386/smp.h
@@ -43,9 +43,12 @@
 
 #define cpu_physical_id(cpu)	x86_cpu_to_apicid[cpu]
 
+extern void set_cpu_sibling_map(int cpu);
+
 #ifdef CONFIG_HOTPLUG_CPU
 extern void cpu_exit_clear(void);
 extern void cpu_uninit(void);
+extern void remove_siblinginfo(int cpu);
 #endif
 
 struct smp_ops
@@ -129,6 +132,8 @@
 extern void __cpu_die(unsigned int cpu);
 extern unsigned int num_processors;
 
+void __cpuinit smp_store_cpu_info(int id);
+
 #endif /* !__ASSEMBLY__ */
 
 #else /* CONFIG_SMP */
diff --git a/include/asm-i386/system.h b/include/asm-i386/system.h
index 94ed3686..609756c 100644
--- a/include/asm-i386/system.h
+++ b/include/asm-i386/system.h
@@ -310,15 +310,6 @@
 extern int es7000_plat;
 void cpu_idle_wait(void);
 
-/*
- * On SMP systems, when the scheduler does migration-cost autodetection,
- * it needs a way to flush as much of the CPU's caches as possible:
- */
-static inline void sched_cacheflush(void)
-{
-	wbinvd();
-}
-
 extern unsigned long arch_align_stack(unsigned long sp);
 extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
 
diff --git a/include/asm-i386/timer.h b/include/asm-i386/timer.h
index 153770e..51a713e 100644
--- a/include/asm-i386/timer.h
+++ b/include/asm-i386/timer.h
@@ -15,8 +15,38 @@
 extern int recalibrate_cpu_khz(void);
 
 #ifndef CONFIG_PARAVIRT
-#define get_scheduled_cycles(val) rdtscll(val)
 #define calculate_cpu_khz() native_calculate_cpu_khz()
 #endif
 
+/* Accellerators for sched_clock()
+ * convert from cycles(64bits) => nanoseconds (64bits)
+ *  basic equation:
+ *		ns = cycles / (freq / ns_per_sec)
+ *		ns = cycles * (ns_per_sec / freq)
+ *		ns = cycles * (10^9 / (cpu_khz * 10^3))
+ *		ns = cycles * (10^6 / cpu_khz)
+ *
+ *	Then we use scaling math (suggested by george@mvista.com) to get:
+ *		ns = cycles * (10^6 * SC / cpu_khz) / SC
+ *		ns = cycles * cyc2ns_scale / SC
+ *
+ *	And since SC is a constant power of two, we can convert the div
+ *  into a shift.
+ *
+ *  We can use khz divisor instead of mhz to keep a better percision, since
+ *  cyc2ns_scale is limited to 10^6 * 2^10, which fits in 32 bits.
+ *  (mathieu.desnoyers@polymtl.ca)
+ *
+ *			-johnstul@us.ibm.com "math is hard, lets go shopping!"
+ */
+extern unsigned long cyc2ns_scale __read_mostly;
+
+#define CYC2NS_SCALE_FACTOR 10 /* 2^10, carefully chosen */
+
+static inline unsigned long long cycles_2_ns(unsigned long long cyc)
+{
+	return (cyc * cyc2ns_scale) >> CYC2NS_SCALE_FACTOR;
+}
+
+
 #endif
diff --git a/include/asm-i386/tsc.h b/include/asm-i386/tsc.h
index 62c091f..a4d8066 100644
--- a/include/asm-i386/tsc.h
+++ b/include/asm-i386/tsc.h
@@ -63,6 +63,7 @@
 extern void mark_tsc_unstable(char *reason);
 extern int unsynchronized_tsc(void);
 extern void init_tsc_clocksource(void);
+int check_tsc_unstable(void);
 
 /*
  * Boot-time check whether the TSCs are synchronized across
diff --git a/include/asm-i386/unistd.h b/include/asm-i386/unistd.h
index e84ace1..9b15545 100644
--- a/include/asm-i386/unistd.h
+++ b/include/asm-i386/unistd.h
@@ -329,10 +329,11 @@
 #define __NR_signalfd		321
 #define __NR_timerfd		322
 #define __NR_eventfd		323
+#define __NR_fallocate		324
 
 #ifdef __KERNEL__
 
-#define NR_syscalls 324
+#define NR_syscalls 325
 
 #define __ARCH_WANT_IPC_PARSE_VERSION
 #define __ARCH_WANT_OLD_READDIR
diff --git a/include/asm-i386/vmi_time.h b/include/asm-i386/vmi_time.h
index 213930b..4781881 100644
--- a/include/asm-i386/vmi_time.h
+++ b/include/asm-i386/vmi_time.h
@@ -49,7 +49,7 @@
 extern void __init vmi_time_init(void);
 extern unsigned long vmi_get_wallclock(void);
 extern int vmi_set_wallclock(unsigned long now);
-extern unsigned long long vmi_get_sched_cycles(void);
+extern unsigned long long vmi_sched_clock(void);
 extern unsigned long vmi_cpu_khz(void);
 
 #ifdef CONFIG_X86_LOCAL_APIC
diff --git a/include/asm-i386/xen/hypercall.h b/include/asm-i386/xen/hypercall.h
new file mode 100644
index 0000000..bc0ee7d
--- /dev/null
+++ b/include/asm-i386/xen/hypercall.h
@@ -0,0 +1,413 @@
+/******************************************************************************
+ * hypercall.h
+ *
+ * Linux-specific hypervisor handling.
+ *
+ * Copyright (c) 2002-2004, K A Fraser
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation; or, when distributed
+ * separately from the Linux kernel or incorporated into other
+ * software packages, subject to the following license:
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this source file (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy, modify,
+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#ifndef __HYPERCALL_H__
+#define __HYPERCALL_H__
+
+#include <linux/errno.h>
+#include <linux/string.h>
+
+#include <xen/interface/xen.h>
+#include <xen/interface/sched.h>
+#include <xen/interface/physdev.h>
+
+extern struct { char _entry[32]; } hypercall_page[];
+
+#define _hypercall0(type, name)						\
+({									\
+	long __res;							\
+	asm volatile (							\
+		"call %[call]"						\
+		: "=a" (__res)						\
+		: [call] "m" (hypercall_page[__HYPERVISOR_##name])	\
+		: "memory" );						\
+	(type)__res;							\
+})
+
+#define _hypercall1(type, name, a1)					\
+({									\
+	long __res, __ign1;						\
+	asm volatile (							\
+		"call %[call]"						\
+		: "=a" (__res), "=b" (__ign1)				\
+		: "1" ((long)(a1)),					\
+		  [call] "m" (hypercall_page[__HYPERVISOR_##name])	\
+		: "memory" );						\
+	(type)__res;							\
+})
+
+#define _hypercall2(type, name, a1, a2)					\
+({									\
+	long __res, __ign1, __ign2;					\
+	asm volatile (							\
+		"call %[call]"						\
+		: "=a" (__res), "=b" (__ign1), "=c" (__ign2)		\
+		: "1" ((long)(a1)), "2" ((long)(a2)),			\
+		  [call] "m" (hypercall_page[__HYPERVISOR_##name])	\
+		: "memory" );						\
+	(type)__res;							\
+})
+
+#define _hypercall3(type, name, a1, a2, a3)				\
+({									\
+	long __res, __ign1, __ign2, __ign3;				\
+	asm volatile (							\
+		"call %[call]"						\
+		: "=a" (__res), "=b" (__ign1), "=c" (__ign2),		\
+		"=d" (__ign3)						\
+		: "1" ((long)(a1)), "2" ((long)(a2)),			\
+		  "3" ((long)(a3)),					\
+		  [call] "m" (hypercall_page[__HYPERVISOR_##name])	\
+		: "memory" );						\
+	(type)__res;							\
+})
+
+#define _hypercall4(type, name, a1, a2, a3, a4)				\
+({									\
+	long __res, __ign1, __ign2, __ign3, __ign4;			\
+	asm volatile (							\
+		"call %[call]"						\
+		: "=a" (__res), "=b" (__ign1), "=c" (__ign2),		\
+		"=d" (__ign3), "=S" (__ign4)				\
+		: "1" ((long)(a1)), "2" ((long)(a2)),			\
+		  "3" ((long)(a3)), "4" ((long)(a4)),			\
+		  [call] "m" (hypercall_page[__HYPERVISOR_##name])	\
+		: "memory" );						\
+	(type)__res;							\
+})
+
+#define _hypercall5(type, name, a1, a2, a3, a4, a5)			\
+({									\
+	long __res, __ign1, __ign2, __ign3, __ign4, __ign5;		\
+	asm volatile (							\
+		"call %[call]"						\
+		: "=a" (__res), "=b" (__ign1), "=c" (__ign2),		\
+		"=d" (__ign3), "=S" (__ign4), "=D" (__ign5)		\
+		: "1" ((long)(a1)), "2" ((long)(a2)),			\
+		  "3" ((long)(a3)), "4" ((long)(a4)),			\
+		  "5" ((long)(a5)),					\
+		  [call] "m" (hypercall_page[__HYPERVISOR_##name])	\
+		: "memory" );						\
+	(type)__res;							\
+})
+
+static inline int
+HYPERVISOR_set_trap_table(struct trap_info *table)
+{
+	return _hypercall1(int, set_trap_table, table);
+}
+
+static inline int
+HYPERVISOR_mmu_update(struct mmu_update *req, int count,
+		      int *success_count, domid_t domid)
+{
+	return _hypercall4(int, mmu_update, req, count, success_count, domid);
+}
+
+static inline int
+HYPERVISOR_mmuext_op(struct mmuext_op *op, int count,
+		     int *success_count, domid_t domid)
+{
+	return _hypercall4(int, mmuext_op, op, count, success_count, domid);
+}
+
+static inline int
+HYPERVISOR_set_gdt(unsigned long *frame_list, int entries)
+{
+	return _hypercall2(int, set_gdt, frame_list, entries);
+}
+
+static inline int
+HYPERVISOR_stack_switch(unsigned long ss, unsigned long esp)
+{
+	return _hypercall2(int, stack_switch, ss, esp);
+}
+
+static inline int
+HYPERVISOR_set_callbacks(unsigned long event_selector,
+			 unsigned long event_address,
+			 unsigned long failsafe_selector,
+			 unsigned long failsafe_address)
+{
+	return _hypercall4(int, set_callbacks,
+			   event_selector, event_address,
+			   failsafe_selector, failsafe_address);
+}
+
+static inline int
+HYPERVISOR_fpu_taskswitch(int set)
+{
+	return _hypercall1(int, fpu_taskswitch, set);
+}
+
+static inline int
+HYPERVISOR_sched_op(int cmd, unsigned long arg)
+{
+	return _hypercall2(int, sched_op, cmd, arg);
+}
+
+static inline long
+HYPERVISOR_set_timer_op(u64 timeout)
+{
+	unsigned long timeout_hi = (unsigned long)(timeout>>32);
+	unsigned long timeout_lo = (unsigned long)timeout;
+	return _hypercall2(long, set_timer_op, timeout_lo, timeout_hi);
+}
+
+static inline int
+HYPERVISOR_set_debugreg(int reg, unsigned long value)
+{
+	return _hypercall2(int, set_debugreg, reg, value);
+}
+
+static inline unsigned long
+HYPERVISOR_get_debugreg(int reg)
+{
+	return _hypercall1(unsigned long, get_debugreg, reg);
+}
+
+static inline int
+HYPERVISOR_update_descriptor(u64 ma, u64 desc)
+{
+	return _hypercall4(int, update_descriptor, ma, ma>>32, desc, desc>>32);
+}
+
+static inline int
+HYPERVISOR_memory_op(unsigned int cmd, void *arg)
+{
+	return _hypercall2(int, memory_op, cmd, arg);
+}
+
+static inline int
+HYPERVISOR_multicall(void *call_list, int nr_calls)
+{
+	return _hypercall2(int, multicall, call_list, nr_calls);
+}
+
+static inline int
+HYPERVISOR_update_va_mapping(unsigned long va, pte_t new_val,
+			     unsigned long flags)
+{
+	unsigned long pte_hi = 0;
+#ifdef CONFIG_X86_PAE
+	pte_hi = new_val.pte_high;
+#endif
+	return _hypercall4(int, update_va_mapping, va,
+			   new_val.pte_low, pte_hi, flags);
+}
+
+static inline int
+HYPERVISOR_event_channel_op(int cmd, void *arg)
+{
+	int rc = _hypercall2(int, event_channel_op, cmd, arg);
+	if (unlikely(rc == -ENOSYS)) {
+		struct evtchn_op op;
+		op.cmd = cmd;
+		memcpy(&op.u, arg, sizeof(op.u));
+		rc = _hypercall1(int, event_channel_op_compat, &op);
+		memcpy(arg, &op.u, sizeof(op.u));
+	}
+	return rc;
+}
+
+static inline int
+HYPERVISOR_xen_version(int cmd, void *arg)
+{
+	return _hypercall2(int, xen_version, cmd, arg);
+}
+
+static inline int
+HYPERVISOR_console_io(int cmd, int count, char *str)
+{
+	return _hypercall3(int, console_io, cmd, count, str);
+}
+
+static inline int
+HYPERVISOR_physdev_op(int cmd, void *arg)
+{
+	int rc = _hypercall2(int, physdev_op, cmd, arg);
+	if (unlikely(rc == -ENOSYS)) {
+		struct physdev_op op;
+		op.cmd = cmd;
+		memcpy(&op.u, arg, sizeof(op.u));
+		rc = _hypercall1(int, physdev_op_compat, &op);
+		memcpy(arg, &op.u, sizeof(op.u));
+	}
+	return rc;
+}
+
+static inline int
+HYPERVISOR_grant_table_op(unsigned int cmd, void *uop, unsigned int count)
+{
+	return _hypercall3(int, grant_table_op, cmd, uop, count);
+}
+
+static inline int
+HYPERVISOR_update_va_mapping_otherdomain(unsigned long va, pte_t new_val,
+					 unsigned long flags, domid_t domid)
+{
+	unsigned long pte_hi = 0;
+#ifdef CONFIG_X86_PAE
+	pte_hi = new_val.pte_high;
+#endif
+	return _hypercall5(int, update_va_mapping_otherdomain, va,
+			   new_val.pte_low, pte_hi, flags, domid);
+}
+
+static inline int
+HYPERVISOR_vm_assist(unsigned int cmd, unsigned int type)
+{
+	return _hypercall2(int, vm_assist, cmd, type);
+}
+
+static inline int
+HYPERVISOR_vcpu_op(int cmd, int vcpuid, void *extra_args)
+{
+	return _hypercall3(int, vcpu_op, cmd, vcpuid, extra_args);
+}
+
+static inline int
+HYPERVISOR_suspend(unsigned long srec)
+{
+	return _hypercall3(int, sched_op, SCHEDOP_shutdown,
+			   SHUTDOWN_suspend, srec);
+}
+
+static inline int
+HYPERVISOR_nmi_op(unsigned long op, unsigned long arg)
+{
+	return _hypercall2(int, nmi_op, op, arg);
+}
+
+static inline void
+MULTI_update_va_mapping(struct multicall_entry *mcl, unsigned long va,
+			pte_t new_val, unsigned long flags)
+{
+	mcl->op = __HYPERVISOR_update_va_mapping;
+	mcl->args[0] = va;
+#ifdef CONFIG_X86_PAE
+	mcl->args[1] = new_val.pte_low;
+	mcl->args[2] = new_val.pte_high;
+#else
+	mcl->args[1] = new_val.pte_low;
+	mcl->args[2] = 0;
+#endif
+	mcl->args[3] = flags;
+}
+
+static inline void
+MULTI_grant_table_op(struct multicall_entry *mcl, unsigned int cmd,
+		     void *uop, unsigned int count)
+{
+	mcl->op = __HYPERVISOR_grant_table_op;
+	mcl->args[0] = cmd;
+	mcl->args[1] = (unsigned long)uop;
+	mcl->args[2] = count;
+}
+
+static inline void
+MULTI_update_va_mapping_otherdomain(struct multicall_entry *mcl, unsigned long va,
+				    pte_t new_val, unsigned long flags,
+				    domid_t domid)
+{
+	mcl->op = __HYPERVISOR_update_va_mapping_otherdomain;
+	mcl->args[0] = va;
+#ifdef CONFIG_X86_PAE
+	mcl->args[1] = new_val.pte_low;
+	mcl->args[2] = new_val.pte_high;
+#else
+	mcl->args[1] = new_val.pte_low;
+	mcl->args[2] = 0;
+#endif
+	mcl->args[3] = flags;
+	mcl->args[4] = domid;
+}
+
+static inline void
+MULTI_update_descriptor(struct multicall_entry *mcl, u64 maddr,
+			struct desc_struct desc)
+{
+	mcl->op = __HYPERVISOR_update_descriptor;
+	mcl->args[0] = maddr;
+	mcl->args[1] = maddr >> 32;
+	mcl->args[2] = desc.a;
+	mcl->args[3] = desc.b;
+}
+
+static inline void
+MULTI_memory_op(struct multicall_entry *mcl, unsigned int cmd, void *arg)
+{
+	mcl->op = __HYPERVISOR_memory_op;
+	mcl->args[0] = cmd;
+	mcl->args[1] = (unsigned long)arg;
+}
+
+static inline void
+MULTI_mmu_update(struct multicall_entry *mcl, struct mmu_update *req,
+		 int count, int *success_count, domid_t domid)
+{
+	mcl->op = __HYPERVISOR_mmu_update;
+	mcl->args[0] = (unsigned long)req;
+	mcl->args[1] = count;
+	mcl->args[2] = (unsigned long)success_count;
+	mcl->args[3] = domid;
+}
+
+static inline void
+MULTI_mmuext_op(struct multicall_entry *mcl, struct mmuext_op *op, int count,
+		int *success_count, domid_t domid)
+{
+	mcl->op = __HYPERVISOR_mmuext_op;
+	mcl->args[0] = (unsigned long)op;
+	mcl->args[1] = count;
+	mcl->args[2] = (unsigned long)success_count;
+	mcl->args[3] = domid;
+}
+
+static inline void
+MULTI_set_gdt(struct multicall_entry *mcl, unsigned long *frames, int entries)
+{
+	mcl->op = __HYPERVISOR_set_gdt;
+	mcl->args[0] = (unsigned long)frames;
+	mcl->args[1] = entries;
+}
+
+static inline void
+MULTI_stack_switch(struct multicall_entry *mcl,
+		   unsigned long ss, unsigned long esp)
+{
+	mcl->op = __HYPERVISOR_stack_switch;
+	mcl->args[0] = ss;
+	mcl->args[1] = esp;
+}
+
+#endif /* __HYPERCALL_H__ */
diff --git a/include/asm-i386/xen/hypervisor.h b/include/asm-i386/xen/hypervisor.h
new file mode 100644
index 0000000..8e15dd2
--- /dev/null
+++ b/include/asm-i386/xen/hypervisor.h
@@ -0,0 +1,73 @@
+/******************************************************************************
+ * hypervisor.h
+ *
+ * Linux-specific hypervisor handling.
+ *
+ * Copyright (c) 2002-2004, K A Fraser
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation; or, when distributed
+ * separately from the Linux kernel or incorporated into other
+ * software packages, subject to the following license:
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this source file (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy, modify,
+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#ifndef __HYPERVISOR_H__
+#define __HYPERVISOR_H__
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/version.h>
+
+#include <xen/interface/xen.h>
+#include <xen/interface/version.h>
+
+#include <asm/ptrace.h>
+#include <asm/page.h>
+#include <asm/desc.h>
+#if defined(__i386__)
+#  ifdef CONFIG_X86_PAE
+#   include <asm-generic/pgtable-nopud.h>
+#  else
+#   include <asm-generic/pgtable-nopmd.h>
+#  endif
+#endif
+#include <asm/xen/hypercall.h>
+
+/* arch/i386/kernel/setup.c */
+extern struct shared_info *HYPERVISOR_shared_info;
+extern struct start_info *xen_start_info;
+#define is_initial_xendomain() (xen_start_info->flags & SIF_INITDOMAIN)
+
+/* arch/i386/mach-xen/evtchn.c */
+/* Force a proper event-channel callback from Xen. */
+extern void force_evtchn_callback(void);
+
+/* Turn jiffies into Xen system time. */
+u64 jiffies_to_st(unsigned long jiffies);
+
+
+#define MULTI_UVMFLAGS_INDEX 3
+#define MULTI_UVMDOMID_INDEX 4
+
+#define is_running_on_xen()	(xen_start_info ? 1 : 0)
+
+#endif /* __HYPERVISOR_H__ */
diff --git a/include/asm-i386/xen/interface.h b/include/asm-i386/xen/interface.h
new file mode 100644
index 0000000..165c396
--- /dev/null
+++ b/include/asm-i386/xen/interface.h
@@ -0,0 +1,188 @@
+/******************************************************************************
+ * arch-x86_32.h
+ *
+ * Guest OS interface to x86 32-bit Xen.
+ *
+ * Copyright (c) 2004, K A Fraser
+ */
+
+#ifndef __XEN_PUBLIC_ARCH_X86_32_H__
+#define __XEN_PUBLIC_ARCH_X86_32_H__
+
+#ifdef __XEN__
+#define __DEFINE_GUEST_HANDLE(name, type) \
+    typedef struct { type *p; } __guest_handle_ ## name
+#else
+#define __DEFINE_GUEST_HANDLE(name, type) \
+    typedef type * __guest_handle_ ## name
+#endif
+
+#define DEFINE_GUEST_HANDLE_STRUCT(name) \
+	__DEFINE_GUEST_HANDLE(name, struct name)
+#define DEFINE_GUEST_HANDLE(name) __DEFINE_GUEST_HANDLE(name, name)
+#define GUEST_HANDLE(name)        __guest_handle_ ## name
+
+#ifndef __ASSEMBLY__
+/* Guest handles for primitive C types. */
+__DEFINE_GUEST_HANDLE(uchar, unsigned char);
+__DEFINE_GUEST_HANDLE(uint,  unsigned int);
+__DEFINE_GUEST_HANDLE(ulong, unsigned long);
+DEFINE_GUEST_HANDLE(char);
+DEFINE_GUEST_HANDLE(int);
+DEFINE_GUEST_HANDLE(long);
+DEFINE_GUEST_HANDLE(void);
+#endif
+
+/*
+ * SEGMENT DESCRIPTOR TABLES
+ */
+/*
+ * A number of GDT entries are reserved by Xen. These are not situated at the
+ * start of the GDT because some stupid OSes export hard-coded selector values
+ * in their ABI. These hard-coded values are always near the start of the GDT,
+ * so Xen places itself out of the way, at the far end of the GDT.
+ */
+#define FIRST_RESERVED_GDT_PAGE  14
+#define FIRST_RESERVED_GDT_BYTE  (FIRST_RESERVED_GDT_PAGE * 4096)
+#define FIRST_RESERVED_GDT_ENTRY (FIRST_RESERVED_GDT_BYTE / 8)
+
+/*
+ * These flat segments are in the Xen-private section of every GDT. Since these
+ * are also present in the initial GDT, many OSes will be able to avoid
+ * installing their own GDT.
+ */
+#define FLAT_RING1_CS 0xe019    /* GDT index 259 */
+#define FLAT_RING1_DS 0xe021    /* GDT index 260 */
+#define FLAT_RING1_SS 0xe021    /* GDT index 260 */
+#define FLAT_RING3_CS 0xe02b    /* GDT index 261 */
+#define FLAT_RING3_DS 0xe033    /* GDT index 262 */
+#define FLAT_RING3_SS 0xe033    /* GDT index 262 */
+
+#define FLAT_KERNEL_CS FLAT_RING1_CS
+#define FLAT_KERNEL_DS FLAT_RING1_DS
+#define FLAT_KERNEL_SS FLAT_RING1_SS
+#define FLAT_USER_CS    FLAT_RING3_CS
+#define FLAT_USER_DS    FLAT_RING3_DS
+#define FLAT_USER_SS    FLAT_RING3_SS
+
+/* And the trap vector is... */
+#define TRAP_INSTR "int $0x82"
+
+/*
+ * Virtual addresses beyond this are not modifiable by guest OSes. The
+ * machine->physical mapping table starts at this address, read-only.
+ */
+#ifdef CONFIG_X86_PAE
+#define __HYPERVISOR_VIRT_START 0xF5800000
+#else
+#define __HYPERVISOR_VIRT_START 0xFC000000
+#endif
+
+#ifndef HYPERVISOR_VIRT_START
+#define HYPERVISOR_VIRT_START mk_unsigned_long(__HYPERVISOR_VIRT_START)
+#endif
+
+#ifndef machine_to_phys_mapping
+#define machine_to_phys_mapping ((unsigned long *)HYPERVISOR_VIRT_START)
+#endif
+
+/* Maximum number of virtual CPUs in multi-processor guests. */
+#define MAX_VIRT_CPUS 32
+
+#ifndef __ASSEMBLY__
+
+/*
+ * Send an array of these to HYPERVISOR_set_trap_table()
+ */
+#define TI_GET_DPL(_ti)		((_ti)->flags & 3)
+#define TI_GET_IF(_ti)		((_ti)->flags & 4)
+#define TI_SET_DPL(_ti, _dpl)	((_ti)->flags |= (_dpl))
+#define TI_SET_IF(_ti, _if)	((_ti)->flags |= ((!!(_if))<<2))
+
+struct trap_info {
+    uint8_t       vector;  /* exception vector                              */
+    uint8_t       flags;   /* 0-3: privilege level; 4: clear event enable?  */
+    uint16_t      cs;      /* code selector                                 */
+    unsigned long address; /* code offset                                   */
+};
+DEFINE_GUEST_HANDLE_STRUCT(trap_info);
+
+struct cpu_user_regs {
+    uint32_t ebx;
+    uint32_t ecx;
+    uint32_t edx;
+    uint32_t esi;
+    uint32_t edi;
+    uint32_t ebp;
+    uint32_t eax;
+    uint16_t error_code;    /* private */
+    uint16_t entry_vector;  /* private */
+    uint32_t eip;
+    uint16_t cs;
+    uint8_t  saved_upcall_mask;
+    uint8_t  _pad0;
+    uint32_t eflags;        /* eflags.IF == !saved_upcall_mask */
+    uint32_t esp;
+    uint16_t ss, _pad1;
+    uint16_t es, _pad2;
+    uint16_t ds, _pad3;
+    uint16_t fs, _pad4;
+    uint16_t gs, _pad5;
+};
+DEFINE_GUEST_HANDLE_STRUCT(cpu_user_regs);
+
+typedef uint64_t tsc_timestamp_t; /* RDTSC timestamp */
+
+/*
+ * The following is all CPU context. Note that the fpu_ctxt block is filled
+ * in by FXSAVE if the CPU has feature FXSR; otherwise FSAVE is used.
+ */
+struct vcpu_guest_context {
+    /* FPU registers come first so they can be aligned for FXSAVE/FXRSTOR. */
+    struct { char x[512]; } fpu_ctxt;       /* User-level FPU registers     */
+#define VGCF_I387_VALID (1<<0)
+#define VGCF_HVM_GUEST  (1<<1)
+#define VGCF_IN_KERNEL  (1<<2)
+    unsigned long flags;                    /* VGCF_* flags                 */
+    struct cpu_user_regs user_regs;         /* User-level CPU registers     */
+    struct trap_info trap_ctxt[256];        /* Virtual IDT                  */
+    unsigned long ldt_base, ldt_ents;       /* LDT (linear address, # ents) */
+    unsigned long gdt_frames[16], gdt_ents; /* GDT (machine frames, # ents) */
+    unsigned long kernel_ss, kernel_sp;     /* Virtual TSS (only SS1/SP1)   */
+    unsigned long ctrlreg[8];               /* CR0-CR7 (control registers)  */
+    unsigned long debugreg[8];              /* DB0-DB7 (debug registers)    */
+    unsigned long event_callback_cs;        /* CS:EIP of event callback     */
+    unsigned long event_callback_eip;
+    unsigned long failsafe_callback_cs;     /* CS:EIP of failsafe callback  */
+    unsigned long failsafe_callback_eip;
+    unsigned long vm_assist;                /* VMASST_TYPE_* bitmap */
+};
+DEFINE_GUEST_HANDLE_STRUCT(vcpu_guest_context);
+
+struct arch_shared_info {
+    unsigned long max_pfn;                  /* max pfn that appears in table */
+    /* Frame containing list of mfns containing list of mfns containing p2m. */
+    unsigned long pfn_to_mfn_frame_list_list;
+    unsigned long nmi_reason;
+};
+
+struct arch_vcpu_info {
+    unsigned long cr2;
+    unsigned long pad[5]; /* sizeof(struct vcpu_info) == 64 */
+};
+
+#endif /* !__ASSEMBLY__ */
+
+/*
+ * Prefix forces emulation of some non-trapping instructions.
+ * Currently only CPUID.
+ */
+#ifdef __ASSEMBLY__
+#define XEN_EMULATE_PREFIX .byte 0x0f,0x0b,0x78,0x65,0x6e ;
+#define XEN_CPUID          XEN_EMULATE_PREFIX cpuid
+#else
+#define XEN_EMULATE_PREFIX ".byte 0x0f,0x0b,0x78,0x65,0x6e ; "
+#define XEN_CPUID          XEN_EMULATE_PREFIX "cpuid"
+#endif
+
+#endif
diff --git a/include/asm-ia64/fb.h b/include/asm-ia64/fb.h
new file mode 100644
index 0000000..89a397c
--- /dev/null
+++ b/include/asm-ia64/fb.h
@@ -0,0 +1,23 @@
+#ifndef _ASM_FB_H_
+#define _ASM_FB_H_
+
+#include <linux/fb.h>
+#include <linux/fs.h>
+#include <linux/efi.h>
+#include <asm/page.h>
+
+static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma,
+				unsigned long off)
+{
+	if (efi_range_is_wc(vma->vm_start, vma->vm_end - vma->vm_start))
+		vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+	else
+		vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+}
+
+static inline int fb_is_primary_device(struct fb_info *info)
+{
+	return 0;
+}
+
+#endif /* _ASM_FB_H_ */
diff --git a/include/asm-ia64/ioctls.h b/include/asm-ia64/ioctls.h
index 31ee521..f41b636 100644
--- a/include/asm-ia64/ioctls.h
+++ b/include/asm-ia64/ioctls.h
@@ -53,6 +53,10 @@
 #define TIOCSBRK	0x5427  /* BSD compatibility */
 #define TIOCCBRK	0x5428  /* BSD compatibility */
 #define TIOCGSID	0x5429  /* Return the session ID of FD */
+#define TCGETS2		_IOR('T',0x2A, struct termios2)
+#define TCSETS2		_IOW('T',0x2B, struct termios2)
+#define TCSETSW2	_IOW('T',0x2C, struct termios2)
+#define TCSETSF2	_IOW('T',0x2D, struct termios2)
 #define TIOCGPTN	_IOR('T',0x30, unsigned int) /* Get Pty Number (of pty-mux device) */
 #define TIOCSPTLCK	_IOW('T',0x31, int)  /* Lock/unlock Pty */
 
diff --git a/include/asm-ia64/kprobes.h b/include/asm-ia64/kprobes.h
index 6382e52..067d9dea 100644
--- a/include/asm-ia64/kprobes.h
+++ b/include/asm-ia64/kprobes.h
@@ -82,8 +82,6 @@
 	struct prev_kprobe prev_kprobe[ARCH_PREV_KPROBE_SZ];
 };
 
-#define JPROBE_ENTRY(pentry)	(kprobe_opcode_t *)pentry
-
 #define ARCH_SUPPORTS_KRETPROBES
 #define  ARCH_INACTIVE_KPROBE_COUNT 1
 
diff --git a/include/asm-ia64/page.h b/include/asm-ia64/page.h
index 485759b..d634546 100644
--- a/include/asm-ia64/page.h
+++ b/include/asm-ia64/page.h
@@ -87,12 +87,13 @@
 } while (0)
 
 
-#define alloc_zeroed_user_highpage(vma, vaddr) \
-({						\
-	struct page *page = alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO, vma, vaddr); \
-	if (page)				\
- 		flush_dcache_page(page);	\
-	page;					\
+#define __alloc_zeroed_user_highpage(movableflags, vma, vaddr)		\
+({									\
+	struct page *page = alloc_page_vma(				\
+		GFP_HIGHUSER | __GFP_ZERO | movableflags, vma, vaddr);	\
+	if (page)							\
+ 		flush_dcache_page(page);				\
+	page;								\
 })
 
 #define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
diff --git a/include/asm-ia64/percpu.h b/include/asm-ia64/percpu.h
index fbe5cf3..43a7aac 100644
--- a/include/asm-ia64/percpu.h
+++ b/include/asm-ia64/percpu.h
@@ -29,6 +29,16 @@
 	__attribute__((__section__(".data.percpu")))		\
 	__SMALL_ADDR_AREA __typeof__(type) per_cpu__##name
 
+#ifdef CONFIG_SMP
+#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name)			\
+	__attribute__((__section__(".data.percpu.shared_aligned")))	\
+	__SMALL_ADDR_AREA __typeof__(type) per_cpu__##name		\
+	____cacheline_aligned_in_smp
+#else
+#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name)	\
+	DEFINE_PER_CPU(type, name)
+#endif
+
 /*
  * Pretty much a literal copy of asm-generic/percpu.h, except that percpu_modcopy() is an
  * external routine, to avoid include-hell.
diff --git a/include/asm-ia64/pgtable.h b/include/asm-ia64/pgtable.h
index f923d81..de6d01e 100644
--- a/include/asm-ia64/pgtable.h
+++ b/include/asm-ia64/pgtable.h
@@ -395,22 +395,6 @@
 #endif
 }
 
-static inline int
-ptep_test_and_clear_dirty (struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
-{
-#ifdef CONFIG_SMP
-	if (!pte_dirty(*ptep))
-		return 0;
-	return test_and_clear_bit(_PAGE_D_BIT, ptep);
-#else
-	pte_t pte = *ptep;
-	if (!pte_dirty(pte))
-		return 0;
-	set_pte_at(vma->vm_mm, addr, ptep, pte_mkclean(pte));
-	return 1;
-#endif
-}
-
 static inline pte_t
 ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
 {
@@ -543,8 +527,10 @@
 # define ptep_set_access_flags(__vma, __addr, __ptep, __entry, __safely_writable) \
 ({									\
 	int __changed = !pte_same(*(__ptep), __entry);			\
-	if (__changed)							\
-		ptep_establish(__vma, __addr, __ptep, __entry);		\
+	if (__changed) {						\
+		set_pte_at((__vma)->vm_mm, (__addr), __ptep, __entry);	\
+		flush_tlb_page(__vma, __addr);				\
+	}								\
 	__changed;							\
 })
 #endif
@@ -588,7 +574,6 @@
 #endif
 
 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
-#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY
 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
 #define __HAVE_ARCH_PTEP_SET_WRPROTECT
 #define __HAVE_ARCH_PTE_SAME
diff --git a/include/asm-ia64/processor.h b/include/asm-ia64/processor.h
index db81ba4..6251c764 100644
--- a/include/asm-ia64/processor.h
+++ b/include/asm-ia64/processor.h
@@ -295,9 +295,9 @@
 	regs->ar_bspstore = current->thread.rbs_bot;						\
 	regs->ar_fpsr = FPSR_DEFAULT;								\
 	regs->loadrs = 0;									\
-	regs->r8 = current->mm->dumpable;	/* set "don't zap registers" flag */		\
+	regs->r8 = get_dumpable(current->mm);	/* set "don't zap registers" flag */		\
 	regs->r12 = new_sp - 16;	/* allocate 16 byte scratch area */			\
-	if (unlikely(!current->mm->dumpable)) {							\
+	if (unlikely(!get_dumpable(current->mm))) {							\
 		/*										\
 		 * Zap scratch regs to avoid leaking bits between processes with different	\
 		 * uid/privileges.								\
diff --git a/include/asm-ia64/system.h b/include/asm-ia64/system.h
index 384fbf7..91bb8e0 100644
--- a/include/asm-ia64/system.h
+++ b/include/asm-ia64/system.h
@@ -259,7 +259,6 @@
 #define ia64_platform_is(x) (strcmp(x, platform_name) == 0)
 
 void cpu_idle_wait(void);
-void sched_cacheflush(void);
 
 #define arch_align_stack(x) (x)
 
diff --git a/include/asm-ia64/termbits.h b/include/asm-ia64/termbits.h
index 7fae310..9f162e0 100644
--- a/include/asm-ia64/termbits.h
+++ b/include/asm-ia64/termbits.h
@@ -149,6 +149,7 @@
 #define HUPCL	0002000
 #define CLOCAL	0004000
 #define CBAUDEX 0010000
+#define    BOTHER 0010000
 #define    B57600 0010001
 #define   B115200 0010002
 #define   B230400 0010003
@@ -164,10 +165,12 @@
 #define  B3000000 0010015
 #define  B3500000 0010016
 #define  B4000000 0010017
-#define CIBAUD	  002003600000	/* input baud rate (not used) */
+#define CIBAUD	  002003600000		/* input baud rate */
 #define CMSPAR	  010000000000		/* mark or space (stick) parity */
 #define CRTSCTS	  020000000000		/* flow control */
 
+#define IBSHIFT	16		/* Shift from CBAUD to CIBAUD */
+
 /* c_lflag bits */
 #define ISIG	0000001
 #define ICANON	0000002
diff --git a/include/asm-ia64/termios.h b/include/asm-ia64/termios.h
index 08750c2..689d218 100644
--- a/include/asm-ia64/termios.h
+++ b/include/asm-ia64/termios.h
@@ -87,8 +87,10 @@
 	copy_to_user((termio)->c_cc, (termios)->c_cc, NCC);	\
 })
 
-#define user_termios_to_kernel_termios(k, u) copy_from_user(k, u, sizeof(struct termios))
-#define kernel_termios_to_user_termios(u, k) copy_to_user(u, k, sizeof(struct termios))
+#define user_termios_to_kernel_termios(k, u) copy_from_user(k, u, sizeof(struct termios2))
+#define kernel_termios_to_user_termios(u, k) copy_to_user(u, k, sizeof(struct termios2))
+#define user_termios_to_kernel_termios_1(k, u) copy_from_user(k, u, sizeof(struct termios))
+#define kernel_termios_to_user_termios_1(u, k) copy_to_user(u, k, sizeof(struct termios))
 
 # endif /* __KERNEL__ */
 
diff --git a/include/asm-ia64/ustack.h b/include/asm-ia64/ustack.h
index a349467..504167c 100644
--- a/include/asm-ia64/ustack.h
+++ b/include/asm-ia64/ustack.h
@@ -11,6 +11,7 @@
 /* The absolute hard limit for stack size is 1/2 of the mappable space in the region */
 #define MAX_USER_STACK_SIZE	(RGN_MAP_LIMIT/2)
 #define STACK_TOP		(0x6000000000000000UL + RGN_MAP_LIMIT)
+#define STACK_TOP_MAX		STACK_TOP
 #endif
 
 /* Make a default stack size of 2GiB */
diff --git a/include/asm-m32r/a.out.h b/include/asm-m32r/a.out.h
index 9a4a5d2..6a1b5d4 100644
--- a/include/asm-m32r/a.out.h
+++ b/include/asm-m32r/a.out.h
@@ -20,6 +20,7 @@
 #ifdef __KERNEL__
 
 #define STACK_TOP	TASK_SIZE
+#define STACK_TOP_MAX	STACK_TOP
 
 #endif
 
diff --git a/include/asm-m32r/fb.h b/include/asm-m32r/fb.h
new file mode 100644
index 0000000..d92e99c
--- /dev/null
+++ b/include/asm-m32r/fb.h
@@ -0,0 +1,19 @@
+#ifndef _ASM_FB_H_
+#define _ASM_FB_H_
+
+#include <linux/fb.h>
+#include <linux/fs.h>
+#include <asm/page.h>
+
+static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma,
+				unsigned long off)
+{
+	vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+}
+
+static inline int fb_is_primary_device(struct fb_info *info)
+{
+	return 0;
+}
+
+#endif /* _ASM_FB_H_ */
diff --git a/include/asm-m32r/page.h b/include/asm-m32r/page.h
index 6f6ecf7..04fd183 100644
--- a/include/asm-m32r/page.h
+++ b/include/asm-m32r/page.h
@@ -15,7 +15,8 @@
 #define clear_user_page(page, vaddr, pg)	clear_page(page)
 #define copy_user_page(to, from, vaddr, pg)	copy_page(to, from)
 
-#define alloc_zeroed_user_highpage(vma, vaddr) alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO, vma, vaddr)
+#define __alloc_zeroed_user_highpage(movableflags, vma, vaddr) \
+	alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO | movableflags, vma, vaddr)
 #define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
 
 /*
diff --git a/include/asm-m32r/pgtable.h b/include/asm-m32r/pgtable.h
index 35af58c..92d7266 100644
--- a/include/asm-m32r/pgtable.h
+++ b/include/asm-m32r/pgtable.h
@@ -250,11 +250,6 @@
 	return pte;
 }
 
-static inline  int ptep_test_and_clear_dirty(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
-{
-	return test_and_clear_bit(_PAGE_BIT_DIRTY, ptep);
-}
-
 static inline  int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
 {
 	return test_and_clear_bit(_PAGE_BIT_ACCESSED, ptep);
@@ -348,7 +343,6 @@
 		remap_pfn_range(vma, vaddr, pfn, size, prot)
 
 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
-#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY
 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
 #define __HAVE_ARCH_PTEP_SET_WRPROTECT
 #define __HAVE_ARCH_PTE_SAME
diff --git a/include/asm-m32r/system.h b/include/asm-m32r/system.h
index 8ee73d3..2365de5 100644
--- a/include/asm-m32r/system.h
+++ b/include/asm-m32r/system.h
@@ -54,16 +54,6 @@
 	); \
 } while(0)
 
-/*
- * On SMP systems, when the scheduler does migration-cost autodetection,
- * it needs a way to flush as much of the CPU's caches as possible.
- *
- * TODO: fill this in!
- */
-static inline void sched_cacheflush(void)
-{
-}
-
 /* Interrupt Control */
 #if !defined(CONFIG_CHIP_M32102) && !defined(CONFIG_CHIP_M32104)
 #define local_irq_enable() \
diff --git a/include/asm-m68k/a.out.h b/include/asm-m68k/a.out.h
index eda16627..6fc86a2 100644
--- a/include/asm-m68k/a.out.h
+++ b/include/asm-m68k/a.out.h
@@ -20,6 +20,7 @@
 #ifdef __KERNEL__
 
 #define STACK_TOP	TASK_SIZE
+#define STACK_TOP_MAX	STACK_TOP
 
 #endif
 
diff --git a/include/asm-m68k/fb.h b/include/asm-m68k/fb.h
new file mode 100644
index 0000000..380b97a
--- /dev/null
+++ b/include/asm-m68k/fb.h
@@ -0,0 +1,34 @@
+#ifndef _ASM_FB_H_
+#define _ASM_FB_H_
+
+#include <linux/fb.h>
+#include <linux/fs.h>
+#include <asm/page.h>
+#include <asm/setup.h>
+
+#ifdef CONFIG_SUN3
+static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma,
+				unsigned long off)
+{
+	pgprot_val(vma->vm_page_prot) |= SUN3_PAGE_NOCACHE;
+}
+#else
+static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma,
+				unsigned long off)
+{
+	if (CPU_IS_020_OR_030)
+		pgprot_val(vma->vm_page_prot) |= _PAGE_NOCACHE030;
+	if (CPU_IS_040_OR_060) {
+		pgprot_val(vma->vm_page_prot) &= _CACHEMASK040;
+		/* Use no-cache mode, serialized */
+		pgprot_val(vma->vm_page_prot) |= _PAGE_NOCACHE_S;
+	}
+}
+#endif /* CONFIG_SUN3 */
+
+static inline int fb_is_primary_device(struct fb_info *info)
+{
+	return 0;
+}
+
+#endif /* _ASM_FB_H_ */
diff --git a/include/asm-m68knommu/fb.h b/include/asm-m68knommu/fb.h
new file mode 100644
index 0000000..c7df380
--- /dev/null
+++ b/include/asm-m68knommu/fb.h
@@ -0,0 +1,12 @@
+#ifndef _ASM_FB_H_
+#define _ASM_FB_H_
+#include <linux/fb.h>
+
+#define fb_pgprotect(...) do {} while (0)
+
+static inline int fb_is_primary_device(struct fb_info *info)
+{
+	return 0;
+}
+
+#endif /* _ASM_FB_H_ */
diff --git a/include/asm-m68knommu/irq.h b/include/asm-m68knommu/irq.h
index 7b8f874..9373c31 100644
--- a/include/asm-m68knommu/irq.h
+++ b/include/asm-m68knommu/irq.h
@@ -1,7 +1,5 @@
-#ifndef _M68K_IRQ_H_
-#define _M68K_IRQ_H_
-
-#include <asm/ptrace.h>
+#ifndef _M68KNOMMU_IRQ_H_
+#define _M68KNOMMU_IRQ_H_
 
 #ifdef CONFIG_COLDFIRE
 /*
@@ -17,75 +15,12 @@
 /*
  * # of m68k interrupts
  */
-#define SYS_IRQS 8
-#define NR_IRQS (24+SYS_IRQS)
+#define SYS_IRQS	8
+#define NR_IRQS		(24 + SYS_IRQS)
 
 #endif /* CONFIG_COLDFIRE */
 
-/*
- * Interrupt source definitions
- * General interrupt sources are the level 1-7.
- * Adding an interrupt service routine for one of these sources
- * results in the addition of that routine to a chain of routines.
- * Each one is called in succession.  Each individual interrupt
- * service routine should determine if the device associated with
- * that routine requires service.
- */
 
-#define IRQ1		(1)	/* level 1 interrupt */
-#define IRQ2		(2)	/* level 2 interrupt */
-#define IRQ3		(3)	/* level 3 interrupt */
-#define IRQ4		(4)	/* level 4 interrupt */
-#define IRQ5		(5)	/* level 5 interrupt */
-#define IRQ6		(6)	/* level 6 interrupt */
-#define IRQ7		(7)	/* level 7 interrupt (non-maskable) */
-
-/*
- * Machine specific interrupt sources.
- *
- * Adding an interrupt service routine for a source with this bit
- * set indicates a special machine specific interrupt source.
- * The machine specific files define these sources.
- *
- * The IRQ_MACHSPEC bit is now gone - the only thing it did was to
- * introduce unnecessary overhead.
- *
- * All interrupt handling is actually machine specific so it is better
- * to use function pointers, as used by the Sparc port, and select the
- * interrupt handling functions when initializing the kernel. This way
- * we save some unnecessary overhead at run-time. 
- *                                                      01/11/97 - Jes
- */
-
-extern void (*mach_enable_irq)(unsigned int);
-extern void (*mach_disable_irq)(unsigned int);
-
-/*
- * various flags for request_irq() - the Amiga now uses the standard
- * mechanism like all other architectures - IRQF_DISABLED and
- * IRQF_SHARED are your friends.
- */
-#define IRQ_FLG_LOCK	(0x0001)	/* handler is not replaceable	*/
-#define IRQ_FLG_REPLACE	(0x0002)	/* replace existing handler	*/
-#define IRQ_FLG_FAST	(0x0004)
-#define IRQ_FLG_SLOW	(0x0008)
-#define IRQ_FLG_STD	(0x8000)	/* internally used		*/
-
-#ifdef CONFIG_M68360
-
-#define CPM_INTERRUPT    IRQ4
-
-/* see MC68360 User's Manual, p. 7-377  */
-#define CPM_VECTOR_BASE  0x04           /* 3 MSbits of CPM vector */
-
-#endif /* CONFIG_M68360 */
-
-/*
- * Some drivers want these entry points
- */
-#define enable_irq(x)	do { } while (0)
-#define disable_irq(x)	do { } while (0)
-#define disable_irq_nosync(x)	disable_irq(x)
 #define irq_canonicalize(irq)	(irq)
 
-#endif /* _M68K_IRQ_H_ */
+#endif /* _M68KNOMMU_IRQ_H_ */
diff --git a/include/asm-m68knommu/irqnode.h b/include/asm-m68knommu/irqnode.h
deleted file mode 100644
index 6132a98..0000000
--- a/include/asm-m68knommu/irqnode.h
+++ /dev/null
@@ -1,36 +0,0 @@
-#ifndef _M68K_IRQNODE_H_
-#define _M68K_IRQNODE_H_
-
-#include <linux/interrupt.h>
-
-/*
- * This structure is used to chain together the ISRs for a particular
- * interrupt source (if it supports chaining).
- */
-typedef struct irq_node {
-	irq_handler_t	handler;
-	unsigned long	flags;
-	void		*dev_id;
-	const char	*devname;
-	struct irq_node *next;
-} irq_node_t;
-
-/*
- * This structure has only 4 elements for speed reasons
- */
-struct irq_entry {
-	irq_handler_t	handler;
-	unsigned long	flags;
-	void		*dev_id;
-	const char	*devname;
-};
-
-/* count of spurious interrupts */
-extern volatile unsigned int num_spurious;
-
-/*
- * This function returns a new irq_node_t
- */
-extern irq_node_t *new_irq_node(void);
-
-#endif /* _M68K_IRQNODE_H_ */
diff --git a/include/asm-m68knommu/m68360.h b/include/asm-m68knommu/m68360.h
index dd11b07..eb7d39e 100644
--- a/include/asm-m68knommu/m68360.h
+++ b/include/asm-m68knommu/m68360.h
@@ -3,3 +3,11 @@
 #include "m68360_quicc.h"
 #include "m68360_enet.h"
 
+#ifdef CONFIG_M68360
+
+#define CPM_INTERRUPT    4
+
+/* see MC68360 User's Manual, p. 7-377  */
+#define CPM_VECTOR_BASE  0x04           /* 3 MSbits of CPM vector */
+
+#endif /* CONFIG_M68360 */
diff --git a/include/asm-m68knommu/page.h b/include/asm-m68knommu/page.h
index 2a1b8bd..9efa0a9 100644
--- a/include/asm-m68knommu/page.h
+++ b/include/asm-m68knommu/page.h
@@ -22,7 +22,8 @@
 #define clear_user_page(page, vaddr, pg)	clear_page(page)
 #define copy_user_page(to, from, vaddr, pg)	copy_page(to, from)
 
-#define alloc_zeroed_user_highpage(vma, vaddr) alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO, vma, vaddr)
+#define __alloc_zeroed_user_highpage(movableflags, vma, vaddr) \
+	alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO | movableflags, vma, vaddr)
 #define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
 
 /*
diff --git a/include/asm-m68knommu/pgtable.h b/include/asm-m68knommu/pgtable.h
index 9dfbbc2..e1e6a1d 100644
--- a/include/asm-m68knommu/pgtable.h
+++ b/include/asm-m68knommu/pgtable.h
@@ -49,7 +49,6 @@
  * These would be in other places but having them here reduces the diffs.
  */
 extern unsigned int kobjsize(const void *objp);
-extern int is_in_rom(unsigned long);
 
 /*
  * No page table caches to initialise.
diff --git a/include/asm-m68knommu/traps.h b/include/asm-m68knommu/traps.h
index f2a8131..d0671e5 100644
--- a/include/asm-m68knommu/traps.h
+++ b/include/asm-m68knommu/traps.h
@@ -16,6 +16,10 @@
 typedef void (*e_vector)(void);
 
 extern e_vector vectors[];
+extern void init_vectors(void);
+extern void enable_vector(unsigned int irq);
+extern void disable_vector(unsigned int irq);
+extern void ack_vector(unsigned int irq);
 
 #endif
 
diff --git a/include/asm-m68knommu/uaccess.h b/include/asm-m68knommu/uaccess.h
index 62b29b1..9ed9169 100644
--- a/include/asm-m68knommu/uaccess.h
+++ b/include/asm-m68knommu/uaccess.h
@@ -15,12 +15,15 @@
 
 #define access_ok(type,addr,size)	_access_ok((unsigned long)(addr),(size))
 
+/*
+ * It is not enough to just have access_ok check for a real RAM address.
+ * This would disallow the case of code/ro-data running XIP in flash/rom.
+ * Ideally we would check the possible flash ranges too, but that is
+ * currently not so easy.
+ */
 static inline int _access_ok(unsigned long addr, unsigned long size)
 {
-	extern unsigned long memory_start, memory_end;
-
-	return (((addr >= memory_start) && (addr+size < memory_end)) ||
-		(is_in_rom(addr) && is_in_rom(addr+size)));
+	return 1;
 }
 
 /*
diff --git a/include/asm-mips/a.out.h b/include/asm-mips/a.out.h
index ef33c3f..1ad60ba 100644
--- a/include/asm-mips/a.out.h
+++ b/include/asm-mips/a.out.h
@@ -40,6 +40,7 @@
 #ifdef CONFIG_64BIT
 #define STACK_TOP	(current->thread.mflags & MF_32BIT_ADDR ? TASK_SIZE32 : TASK_SIZE)
 #endif
+#define STACK_TOP_MAX	TASK_SIZE
 
 #endif
 
diff --git a/include/asm-mips/dec/serial.h b/include/asm-mips/dec/serial.h
deleted file mode 100644
index acad758..0000000
--- a/include/asm-mips/dec/serial.h
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- *	include/asm-mips/dec/serial.h
- *
- *	Definitions common to all DECstation serial devices.
- *
- *	Copyright (C) 2004  Maciej W. Rozycki
- *
- *	Based on bits extracted from drivers/tc/zs.h for which
- *	the following copyrights apply:
- *
- *	Copyright (C) 1995  David S. Miller (davem@caip.rutgers.edu)
- *	Copyright (C) 1996  Paul Mackerras (Paul.Mackerras@cs.anu.edu.au)
- *	Copyright (C)       Harald Koerfgen
- *
- *	This program is free software; you can redistribute it and/or
- *	modify it under the terms of the GNU General Public License
- *	as published by the Free Software Foundation; either version
- *	2 of the License, or (at your option) any later version.
- */
-#ifndef __ASM_MIPS_DEC_SERIAL_H
-#define __ASM_MIPS_DEC_SERIAL_H
-
-struct dec_serial_hook {
-	int (*init_channel)(void *handle);
-	void (*init_info)(void *handle);
-	void (*rx_char)(unsigned char ch, unsigned char fl);
-	int (*poll_rx_char)(void *handle);
-	int (*poll_tx_char)(void *handle, unsigned char ch);
-	unsigned int cflags;
-};
-
-extern int register_dec_serial_hook(unsigned int channel,
-				    struct dec_serial_hook *hook);
-extern int unregister_dec_serial_hook(unsigned int channel);
-
-#endif /* __ASM_MIPS_DEC_SERIAL_H */
diff --git a/include/asm-mips/fb.h b/include/asm-mips/fb.h
new file mode 100644
index 0000000..bd3f68c
--- /dev/null
+++ b/include/asm-mips/fb.h
@@ -0,0 +1,19 @@
+#ifndef _ASM_FB_H_
+#define _ASM_FB_H_
+
+#include <linux/fb.h>
+#include <linux/fs.h>
+#include <asm/page.h>
+
+static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma,
+				unsigned long off)
+{
+	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+}
+
+static inline int fb_is_primary_device(struct fb_info *info)
+{
+	return 0;
+}
+
+#endif /* _ASM_FB_H_ */
diff --git a/include/asm-mips/sibyte/bcm1480_regs.h b/include/asm-mips/sibyte/bcm1480_regs.h
index bda391d..2738c13 100644
--- a/include/asm-mips/sibyte/bcm1480_regs.h
+++ b/include/asm-mips/sibyte/bcm1480_regs.h
@@ -220,17 +220,25 @@
 #define A_BCM1480_DUART(chan)               ((((chan)&2) == 0)? A_BCM1480_DUART0 : A_BCM1480_DUART1)
 
 #define BCM1480_DUART_CHANREG_SPACING       0x100
-#define A_BCM1480_DUART_CHANREG(chan,reg)   (A_BCM1480_DUART(chan) \
-                                     + BCM1480_DUART_CHANREG_SPACING*((chan)&1) \
-                                     + (reg))
-#define R_BCM1480_DUART_CHANREG(chan,reg)   (BCM1480_DUART_CHANREG_SPACING*((chan)&1) + (reg))
+#define A_BCM1480_DUART_CHANREG(chan, reg)				\
+	(A_BCM1480_DUART(chan) +					\
+	 BCM1480_DUART_CHANREG_SPACING * (((chan) & 1) + 1) + (reg))
+#define A_BCM1480_DUART_CTRLREG(chan, reg)				\
+	(A_BCM1480_DUART(chan) +					\
+	 BCM1480_DUART_CHANREG_SPACING * 3 + (reg))
 
-#define R_BCM1480_DUART_IMRREG(chan)	    (R_DUART_IMR_A + ((chan)&1)*DUART_IMRISR_SPACING)
-#define R_BCM1480_DUART_ISRREG(chan)	    (R_DUART_ISR_A + ((chan)&1)*DUART_IMRISR_SPACING)
+#define R_BCM1480_DUART_IMRREG(chan)					\
+	(R_DUART_IMR_A + ((chan) & 1) * DUART_IMRISR_SPACING)
+#define R_BCM1480_DUART_ISRREG(chan)					\
+	(R_DUART_ISR_A + ((chan) & 1) * DUART_IMRISR_SPACING)
 
-#define A_BCM1480_DUART_IMRREG(chan)	    (A_BCM1480_DUART(chan) + R_BCM1480_DUART_IMRREG(chan))
-#define A_BCM1480_DUART_ISRREG(chan)	    (A_BCM1480_DUART(chan) + R_BCM1480_DUART_ISRREG(chan))
-#define A_BCM1480_DUART_IN_PORT(chan)       (A_BCM1480_DUART(chan) + R_DUART_INP_ORT)
+#define A_BCM1480_DUART_IMRREG(chan)					\
+	(A_BCM1480_DUART_CTRLREG((chan), R_BCM1480_DUART_IMRREG(chan)))
+#define A_BCM1480_DUART_ISRREG(chan)					\
+	(A_BCM1480_DUART_CTRLREG((chan), R_BCM1480_DUART_ISRREG(chan)))
+
+#define A_BCM1480_DUART_IN_PORT(chan)					\
+	(A_BCM1480_DUART_CTRLREG((chan), R_DUART_IN_PORT))
 
 /*
  * These constants are the absolute addresses.
diff --git a/include/asm-mips/sibyte/sb1250_regs.h b/include/asm-mips/sibyte/sb1250_regs.h
index da7c188..220b7e9 100644
--- a/include/asm-mips/sibyte/sb1250_regs.h
+++ b/include/asm-mips/sibyte/sb1250_regs.h
@@ -272,59 +272,69 @@
     ********************************************************************* */
 
 
-#if SIBYTE_HDR_FEATURE_1250_112x		/* This MC only on 1250 & 112x */
+#if SIBYTE_HDR_FEATURE_1250_112x    /* This MC only on 1250 & 112x */
 #define R_DUART_NUM_PORTS           2
 
 #define A_DUART                     0x0010060000
 
 #define DUART_CHANREG_SPACING       0x100
-#define A_DUART_CHANREG(chan,reg)   (A_DUART + DUART_CHANREG_SPACING*(chan) + (reg))
-#define R_DUART_CHANREG(chan,reg)   (DUART_CHANREG_SPACING*(chan) + (reg))
+
+#define A_DUART_CHANREG(chan, reg)					\
+	(A_DUART + DUART_CHANREG_SPACING * ((chan) + 1) + (reg))
 #endif	/* 1250 & 112x */
 
-#define R_DUART_MODE_REG_1	    0x100
-#define R_DUART_MODE_REG_2	    0x110
-#define R_DUART_STATUS              0x120
-#define R_DUART_CLK_SEL             0x130
-#define R_DUART_CMD                 0x150
-#define R_DUART_RX_HOLD             0x160
-#define R_DUART_TX_HOLD             0x170
+#define R_DUART_MODE_REG_1	    0x000
+#define R_DUART_MODE_REG_2	    0x010
+#define R_DUART_STATUS		    0x020
+#define R_DUART_CLK_SEL		    0x030
+#define R_DUART_CMD		    0x050
+#define R_DUART_RX_HOLD		    0x060
+#define R_DUART_TX_HOLD		    0x070
 
 #if SIBYTE_HDR_FEATURE(1250, PASS2) || SIBYTE_HDR_FEATURE(112x, PASS1) || SIBYTE_HDR_FEATURE_CHIP(1480)
-#define R_DUART_FULL_CTL	    0x140
-#define R_DUART_OPCR_X		    0x180
-#define R_DUART_AUXCTL_X	    0x190
-#endif /* 1250 PASS2 || 112x PASS1 || 1480*/
+#define R_DUART_FULL_CTL	    0x040
+#define R_DUART_OPCR_X		    0x080
+#define R_DUART_AUXCTL_X	    0x090
+#endif /* 1250 PASS2 || 112x PASS1 || 1480 */
 
 
 /*
  * The IMR and ISR can't be addressed with A_DUART_CHANREG,
- * so use this macro instead.
+ * so use these macros instead.
  */
 
-#define R_DUART_AUX_CTRL            0x310
-#define R_DUART_ISR_A               0x320
-#define R_DUART_IMR_A               0x330
-#define R_DUART_ISR_B               0x340
-#define R_DUART_IMR_B               0x350
-#define R_DUART_OUT_PORT            0x360
-#define R_DUART_OPCR                0x370
-#define R_DUART_IN_PORT             0x380
+#if SIBYTE_HDR_FEATURE_1250_112x    /* This MC only on 1250 & 112x */
+#define DUART_IMRISR_SPACING	    0x20
+#define DUART_INCHNG_SPACING	    0x10
 
-#define R_DUART_SET_OPR		    0x3B0
-#define R_DUART_CLEAR_OPR	    0x3C0
+#define A_DUART_CTRLREG(reg)						\
+	(A_DUART + DUART_CHANREG_SPACING * 3 + (reg))
 
-#define DUART_IMRISR_SPACING        0x20
+#define R_DUART_IMRREG(chan)						\
+	(R_DUART_IMR_A + (chan) * DUART_IMRISR_SPACING)
+#define R_DUART_ISRREG(chan)						\
+	(R_DUART_ISR_A + (chan) * DUART_IMRISR_SPACING)
+#define R_DUART_INCHREG(chan)						\
+	(R_DUART_IN_CHNG_A + (chan) * DUART_INCHNG_SPACING)
 
-#if SIBYTE_HDR_FEATURE_1250_112x		/* This MC only on 1250 & 112x */
-#define R_DUART_IMRREG(chan)	    (R_DUART_IMR_A + (chan)*DUART_IMRISR_SPACING)
-#define R_DUART_ISRREG(chan)	    (R_DUART_ISR_A + (chan)*DUART_IMRISR_SPACING)
-
-#define A_DUART_IMRREG(chan)	    (A_DUART + R_DUART_IMRREG(chan))
-#define A_DUART_ISRREG(chan)	    (A_DUART + R_DUART_ISRREG(chan))
+#define A_DUART_IMRREG(chan)	    A_DUART_CTRLREG(R_DUART_IMRREG(chan))
+#define A_DUART_ISRREG(chan)	    A_DUART_CTRLREG(R_DUART_ISRREG(chan))
+#define A_DUART_INCHREG(chan)	    A_DUART_CTRLREG(R_DUART_INCHREG(chan))
 #endif	/* 1250 & 112x */
 
+#define R_DUART_AUX_CTRL	    0x010
+#define R_DUART_ISR_A		    0x020
+#define R_DUART_IMR_A		    0x030
+#define R_DUART_ISR_B		    0x040
+#define R_DUART_IMR_B		    0x050
+#define R_DUART_OUT_PORT	    0x060
+#define R_DUART_OPCR		    0x070
+#define R_DUART_IN_PORT		    0x080
 
+#define R_DUART_SET_OPR		    0x0B0
+#define R_DUART_CLEAR_OPR	    0x0C0
+#define R_DUART_IN_CHNG_A	    0x0D0
+#define R_DUART_IN_CHNG_B	    0x0E0
 
 
 /*
diff --git a/include/asm-mips/sibyte/sb1250_uart.h b/include/asm-mips/sibyte/sb1250_uart.h
index e87045e..cf74fed 100644
--- a/include/asm-mips/sibyte/sb1250_uart.h
+++ b/include/asm-mips/sibyte/sb1250_uart.h
@@ -75,7 +75,8 @@
 #define V_DUART_PARITY_MODE_ADD_FIXED V_DUART_PARITY_MODE(K_DUART_PARITY_MODE_ADD_FIXED)
 #define V_DUART_PARITY_MODE_NONE      V_DUART_PARITY_MODE(K_DUART_PARITY_MODE_NONE)
 
-#define M_DUART_ERR_MODE            _SB_MAKEMASK1(5)    /* must be zero */
+#define M_DUART_TX_IRQ_SEL_TXRDY    0
+#define M_DUART_TX_IRQ_SEL_TXEMPT   _SB_MAKEMASK1(5)
 
 #define M_DUART_RX_IRQ_SEL_RXRDY    0
 #define M_DUART_RX_IRQ_SEL_RXFULL   _SB_MAKEMASK1(6)
@@ -246,10 +247,13 @@
 
 #define M_DUART_ISR_BRK_A           _SB_MAKEMASK1(2)
 #define M_DUART_ISR_IN_A            _SB_MAKEMASK1(3)
+#define M_DUART_ISR_ALL_A	    _SB_MAKEMASK(4,0)
+
 #define M_DUART_ISR_TX_B            _SB_MAKEMASK1(4)
 #define M_DUART_ISR_RX_B            _SB_MAKEMASK1(5)
 #define M_DUART_ISR_BRK_B           _SB_MAKEMASK1(6)
 #define M_DUART_ISR_IN_B            _SB_MAKEMASK1(7)
+#define M_DUART_ISR_ALL_B	    _SB_MAKEMASK(4,4)
 
 /*
  * DUART Channel A Interrupt Status Register (Table 10-17)
@@ -262,6 +266,7 @@
 #define M_DUART_ISR_RX              _SB_MAKEMASK1(1)
 #define M_DUART_ISR_BRK             _SB_MAKEMASK1(2)
 #define M_DUART_ISR_IN              _SB_MAKEMASK1(3)
+#define M_DUART_ISR_ALL		    _SB_MAKEMASK(4,0)
 #define M_DUART_ISR_RESERVED        _SB_MAKEMASK(4,4)
 
 /*
diff --git a/include/asm-mips/system.h b/include/asm-mips/system.h
index 46bdb3f..7633916 100644
--- a/include/asm-mips/system.h
+++ b/include/asm-mips/system.h
@@ -71,16 +71,6 @@
 		write_c0_userlocal(task_thread_info(current)->tp_value);\
 } while(0)
 
-/*
- * On SMP systems, when the scheduler does migration-cost autodetection,
- * it needs a way to flush as much of the CPU's caches as possible.
- *
- * TODO: fill this in!
- */
-static inline void sched_cacheflush(void)
-{
-}
-
 static inline unsigned long __xchg_u32(volatile int * m, unsigned int val)
 {
 	__u32 retval;
diff --git a/include/asm-parisc/a.out.h b/include/asm-parisc/a.out.h
index 2a490cc..23e2c90 100644
--- a/include/asm-parisc/a.out.h
+++ b/include/asm-parisc/a.out.h
@@ -23,6 +23,7 @@
  * prumpf */
 
 #define STACK_TOP	TASK_SIZE
+#define STACK_TOP_MAX	DEFAULT_TASK_SIZE
 
 #endif
 
diff --git a/include/asm-parisc/fb.h b/include/asm-parisc/fb.h
new file mode 100644
index 0000000..4d503a0
--- /dev/null
+++ b/include/asm-parisc/fb.h
@@ -0,0 +1,19 @@
+#ifndef _ASM_FB_H_
+#define _ASM_FB_H_
+
+#include <linux/fb.h>
+#include <linux/fs.h>
+#include <asm/page.h>
+
+static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma,
+				unsigned long off)
+{
+	pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE;
+}
+
+static inline int fb_is_primary_device(struct fb_info *info)
+{
+	return 0;
+}
+
+#endif /* _ASM_FB_H_ */
diff --git a/include/asm-parisc/pgtable.h b/include/asm-parisc/pgtable.h
index 7e222c8..e88cacd 100644
--- a/include/asm-parisc/pgtable.h
+++ b/include/asm-parisc/pgtable.h
@@ -447,21 +447,6 @@
 #endif
 }
 
-static inline int ptep_test_and_clear_dirty(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
-{
-#ifdef CONFIG_SMP
-	if (!pte_dirty(*ptep))
-		return 0;
-	return test_and_clear_bit(xlate_pabit(_PAGE_DIRTY_BIT), &pte_val(*ptep));
-#else
-	pte_t pte = *ptep;
-	if (!pte_dirty(pte))
-		return 0;
-	set_pte_at(vma->vm_mm, addr, ptep, pte_mkclean(pte));
-	return 1;
-#endif
-}
-
 extern spinlock_t pa_dbit_lock;
 
 struct mm_struct;
@@ -529,7 +514,6 @@
 #define HAVE_ARCH_UNMAPPED_AREA
 
 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
-#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY
 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
 #define __HAVE_ARCH_PTEP_SET_WRPROTECT
 #define __HAVE_ARCH_PTE_SAME
diff --git a/include/asm-parisc/system.h b/include/asm-parisc/system.h
index 21fbfc5..ee80c92 100644
--- a/include/asm-parisc/system.h
+++ b/include/asm-parisc/system.h
@@ -48,17 +48,6 @@
 	(last) = _switch_to(prev, next);			\
 } while(0)
 
-/*
- * On SMP systems, when the scheduler does migration-cost autodetection,
- * it needs a way to flush as much of the CPU's caches as possible.
- *
- * TODO: fill this in!
- */
-static inline void sched_cacheflush(void)
-{
-}
-
-
 /* interrupt control */
 #define local_save_flags(x)	__asm__ __volatile__("ssm 0, %0" : "=r" (x) : : "memory")
 #define local_irq_disable()	__asm__ __volatile__("rsm %0,%%r0\n" : : "i" (PSW_I) : "memory" )
diff --git a/include/asm-powerpc/a.out.h b/include/asm-powerpc/a.out.h
index c7393a9..5c5ea83 100644
--- a/include/asm-powerpc/a.out.h
+++ b/include/asm-powerpc/a.out.h
@@ -26,9 +26,12 @@
 #define STACK_TOP (test_thread_flag(TIF_32BIT) ? \
 		   STACK_TOP_USER32 : STACK_TOP_USER64)
 
+#define STACK_TOP_MAX STACK_TOP_USER64
+
 #else /* __powerpc64__ */
 
 #define STACK_TOP TASK_SIZE
+#define STACK_TOP_MAX	STACK_TOP
 
 #endif /* __powerpc64__ */
 #endif /* __KERNEL__ */
diff --git a/include/asm-powerpc/cache.h b/include/asm-powerpc/cache.h
index 642be62..5350704 100644
--- a/include/asm-powerpc/cache.h
+++ b/include/asm-powerpc/cache.h
@@ -34,5 +34,9 @@
 extern struct ppc64_caches ppc64_caches;
 #endif /* __powerpc64__ && ! __ASSEMBLY__ */
 
+#if !defined(__ASSEMBLY__)
+#define __read_mostly __attribute__((__section__(".data.read_mostly")))
+#endif
+
 #endif /* __KERNEL__ */
 #endif /* _ASM_POWERPC_CACHE_H */
diff --git a/include/asm-powerpc/cputable.h b/include/asm-powerpc/cputable.h
index 82d595a..3dc8e2d 100644
--- a/include/asm-powerpc/cputable.h
+++ b/include/asm-powerpc/cputable.h
@@ -111,7 +111,7 @@
 /* CPU kernel features */
 
 /* Retain the 32b definitions all use bottom half of word */
-#define CPU_FTR_SPLIT_ID_CACHE		ASM_CONST(0x0000000000000001)
+#define CPU_FTR_COHERENT_ICACHE		ASM_CONST(0x0000000000000001)
 #define CPU_FTR_L2CR			ASM_CONST(0x0000000000000002)
 #define CPU_FTR_SPEC7450		ASM_CONST(0x0000000000000004)
 #define CPU_FTR_ALTIVEC			ASM_CONST(0x0000000000000008)
@@ -135,6 +135,7 @@
 #define CPU_FTR_PPC_LE			ASM_CONST(0x0000000000200000)
 #define CPU_FTR_REAL_LE			ASM_CONST(0x0000000000400000)
 #define CPU_FTR_FPU_UNAVAILABLE		ASM_CONST(0x0000000000800000)
+#define CPU_FTR_UNIFIED_ID_CACHE	ASM_CONST(0x0000000001000000)
 
 /*
  * Add the 64-bit processor unique features in the top half of the word;
@@ -154,7 +155,6 @@
 #define CPU_FTR_MMCRA			LONG_ASM_CONST(0x0000004000000000)
 #define CPU_FTR_CTRL			LONG_ASM_CONST(0x0000008000000000)
 #define CPU_FTR_SMT			LONG_ASM_CONST(0x0000010000000000)
-#define CPU_FTR_COHERENT_ICACHE		LONG_ASM_CONST(0x0000020000000000)
 #define CPU_FTR_LOCKLESS_TLBIE		LONG_ASM_CONST(0x0000040000000000)
 #define CPU_FTR_CI_LARGE_PAGE		LONG_ASM_CONST(0x0000100000000000)
 #define CPU_FTR_PAUSE_ZERO		LONG_ASM_CONST(0x0000200000000000)
@@ -206,164 +206,149 @@
 		     !defined(CONFIG_POWER3) && !defined(CONFIG_POWER4) && \
 		     !defined(CONFIG_BOOKE))
 
-#define CPU_FTRS_PPC601	(CPU_FTR_COMMON | CPU_FTR_601 | CPU_FTR_HPTE_TABLE)
-#define CPU_FTRS_603	(CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE | \
+#define CPU_FTRS_PPC601	(CPU_FTR_COMMON | CPU_FTR_601 | CPU_FTR_HPTE_TABLE | \
+	CPU_FTR_COHERENT_ICACHE | CPU_FTR_UNIFIED_ID_CACHE)
+#define CPU_FTRS_603	(CPU_FTR_COMMON | \
 	    CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | \
 	    CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_PPC_LE)
-#define CPU_FTRS_604	(CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE | \
+#define CPU_FTRS_604	(CPU_FTR_COMMON | \
 	    CPU_FTR_USE_TB | CPU_FTR_604_PERF_MON | CPU_FTR_HPTE_TABLE | \
 	    CPU_FTR_PPC_LE)
-#define CPU_FTRS_740_NOTAU	(CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE | \
+#define CPU_FTRS_740_NOTAU	(CPU_FTR_COMMON | \
 	    CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | CPU_FTR_L2CR | \
 	    CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_PPC_LE)
-#define CPU_FTRS_740	(CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE | \
+#define CPU_FTRS_740	(CPU_FTR_COMMON | \
 	    CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | CPU_FTR_L2CR | \
 	    CPU_FTR_TAU | CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP | \
 	    CPU_FTR_PPC_LE)
-#define CPU_FTRS_750	(CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE | \
+#define CPU_FTRS_750	(CPU_FTR_COMMON | \
 	    CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | CPU_FTR_L2CR | \
 	    CPU_FTR_TAU | CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP | \
 	    CPU_FTR_PPC_LE)
-#define CPU_FTRS_750CL	(CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE | \
-	    CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | CPU_FTR_L2CR | \
-	    CPU_FTR_TAU | CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP | \
-	    CPU_FTR_HAS_HIGH_BATS | CPU_FTR_PPC_LE)
-#define CPU_FTRS_750FX1	(CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE | \
-	    CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | CPU_FTR_L2CR | \
-	    CPU_FTR_TAU | CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP | \
-	    CPU_FTR_DUAL_PLL_750FX | CPU_FTR_NO_DPM | CPU_FTR_PPC_LE)
-#define CPU_FTRS_750FX2	(CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE | \
-	    CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | CPU_FTR_L2CR | \
-	    CPU_FTR_TAU | CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP | \
-	    CPU_FTR_NO_DPM | CPU_FTR_PPC_LE)
-#define CPU_FTRS_750FX	(CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE | \
-	    CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | CPU_FTR_L2CR | \
-	    CPU_FTR_TAU | CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP | \
-	    CPU_FTR_DUAL_PLL_750FX | CPU_FTR_HAS_HIGH_BATS | CPU_FTR_PPC_LE)
-#define CPU_FTRS_750GX	(CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE | \
-	    CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | CPU_FTR_L2CR | \
-	    CPU_FTR_TAU | CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP | \
-	    CPU_FTR_DUAL_PLL_750FX | CPU_FTR_HAS_HIGH_BATS | CPU_FTR_PPC_LE)
-#define CPU_FTRS_7400_NOTAU	(CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE | \
+#define CPU_FTRS_750CL	(CPU_FTRS_750 | CPU_FTR_HAS_HIGH_BATS)
+#define CPU_FTRS_750FX1	(CPU_FTRS_750 | CPU_FTR_DUAL_PLL_750FX | CPU_FTR_NO_DPM)
+#define CPU_FTRS_750FX2	(CPU_FTRS_750 | CPU_FTR_NO_DPM)
+#define CPU_FTRS_750FX	(CPU_FTRS_750 | CPU_FTR_DUAL_PLL_750FX | \
+		CPU_FTR_HAS_HIGH_BATS)
+#define CPU_FTRS_750GX	(CPU_FTRS_750FX)
+#define CPU_FTRS_7400_NOTAU	(CPU_FTR_COMMON | \
 	    CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | CPU_FTR_L2CR | \
 	    CPU_FTR_ALTIVEC_COMP | CPU_FTR_HPTE_TABLE | \
 	    CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_PPC_LE)
-#define CPU_FTRS_7400	(CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE | \
+#define CPU_FTRS_7400	(CPU_FTR_COMMON | \
 	    CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | CPU_FTR_L2CR | \
 	    CPU_FTR_TAU | CPU_FTR_ALTIVEC_COMP | CPU_FTR_HPTE_TABLE | \
 	    CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_PPC_LE)
-#define CPU_FTRS_7450_20	(CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE | \
+#define CPU_FTRS_7450_20	(CPU_FTR_COMMON | \
 	    CPU_FTR_USE_TB | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | \
 	    CPU_FTR_L3CR | CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 | \
 	    CPU_FTR_NEED_COHERENT | CPU_FTR_PPC_LE)
-#define CPU_FTRS_7450_21	(CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE | \
+#define CPU_FTRS_7450_21	(CPU_FTR_COMMON | \
 	    CPU_FTR_USE_TB | \
 	    CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | \
 	    CPU_FTR_L3CR | CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 | \
 	    CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_L3_DISABLE_NAP | \
 	    CPU_FTR_NEED_COHERENT | CPU_FTR_PPC_LE)
-#define CPU_FTRS_7450_23	(CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE | \
+#define CPU_FTRS_7450_23	(CPU_FTR_COMMON | \
 	    CPU_FTR_USE_TB | \
 	    CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | \
 	    CPU_FTR_L3CR | CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 | \
 	    CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_NEED_COHERENT | CPU_FTR_PPC_LE)
-#define CPU_FTRS_7455_1	(CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE | \
+#define CPU_FTRS_7455_1	(CPU_FTR_COMMON | \
 	    CPU_FTR_USE_TB | \
 	    CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | CPU_FTR_L3CR | \
 	    CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 | CPU_FTR_HAS_HIGH_BATS | \
 	    CPU_FTR_NEED_COHERENT | CPU_FTR_PPC_LE)
-#define CPU_FTRS_7455_20	(CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE | \
+#define CPU_FTRS_7455_20	(CPU_FTR_COMMON | \
 	    CPU_FTR_USE_TB | \
 	    CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | \
 	    CPU_FTR_L3CR | CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 | \
 	    CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_L3_DISABLE_NAP | \
 	    CPU_FTR_NEED_COHERENT | CPU_FTR_HAS_HIGH_BATS | CPU_FTR_PPC_LE)
-#define CPU_FTRS_7455	(CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE | \
+#define CPU_FTRS_7455	(CPU_FTR_COMMON | \
 	    CPU_FTR_USE_TB | \
 	    CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | \
 	    CPU_FTR_L3CR | CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 | \
 	    CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_HAS_HIGH_BATS | \
 	    CPU_FTR_NEED_COHERENT | CPU_FTR_PPC_LE)
-#define CPU_FTRS_7447_10	(CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE | \
+#define CPU_FTRS_7447_10	(CPU_FTR_COMMON | \
 	    CPU_FTR_USE_TB | \
 	    CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | \
 	    CPU_FTR_L3CR | CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 | \
 	    CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_HAS_HIGH_BATS | \
 	    CPU_FTR_NEED_COHERENT | CPU_FTR_NO_BTIC | CPU_FTR_PPC_LE)
-#define CPU_FTRS_7447	(CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE | \
+#define CPU_FTRS_7447	(CPU_FTR_COMMON | \
 	    CPU_FTR_USE_TB | \
 	    CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | \
 	    CPU_FTR_L3CR | CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 | \
 	    CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_HAS_HIGH_BATS | \
 	    CPU_FTR_NEED_COHERENT | CPU_FTR_PPC_LE)
-#define CPU_FTRS_7447A	(CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE | \
+#define CPU_FTRS_7447A	(CPU_FTR_COMMON | \
 	    CPU_FTR_USE_TB | \
 	    CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | \
 	    CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 | \
 	    CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_HAS_HIGH_BATS | \
 	    CPU_FTR_NEED_COHERENT | CPU_FTR_PPC_LE)
-#define CPU_FTRS_7448	(CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE | \
+#define CPU_FTRS_7448	(CPU_FTR_COMMON | \
 	    CPU_FTR_USE_TB | \
 	    CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | \
 	    CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 | \
 	    CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_HAS_HIGH_BATS | \
 	    CPU_FTR_PPC_LE)
-#define CPU_FTRS_82XX	(CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE | \
+#define CPU_FTRS_82XX	(CPU_FTR_COMMON | \
 	    CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB)
-#define CPU_FTRS_G2_LE	(CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_MAYBE_CAN_DOZE | \
+#define CPU_FTRS_G2_LE	(CPU_FTR_MAYBE_CAN_DOZE | \
 	    CPU_FTR_USE_TB | CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_HAS_HIGH_BATS)
-#define CPU_FTRS_E300	(CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_MAYBE_CAN_DOZE | \
+#define CPU_FTRS_E300	(CPU_FTR_MAYBE_CAN_DOZE | \
 	    CPU_FTR_USE_TB | CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_HAS_HIGH_BATS | \
 	    CPU_FTR_COMMON)
-#define CPU_FTRS_E300C2	(CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_MAYBE_CAN_DOZE | \
+#define CPU_FTRS_E300C2	(CPU_FTR_MAYBE_CAN_DOZE | \
 	    CPU_FTR_USE_TB | CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_HAS_HIGH_BATS | \
 	    CPU_FTR_COMMON | CPU_FTR_FPU_UNAVAILABLE)
-#define CPU_FTRS_CLASSIC32	(CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE | \
+#define CPU_FTRS_CLASSIC32	(CPU_FTR_COMMON | \
 	    CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE)
-#define CPU_FTRS_8XX	(CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB)
-#define CPU_FTRS_40X	(CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | \
-	    CPU_FTR_NODSISRALIGN)
-#define CPU_FTRS_44X	(CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | \
-	    CPU_FTR_NODSISRALIGN)
-#define CPU_FTRS_E200	(CPU_FTR_USE_TB | CPU_FTR_NODSISRALIGN)
-#define CPU_FTRS_E500	(CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | \
-	    CPU_FTR_NODSISRALIGN)
-#define CPU_FTRS_E500_2	(CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | \
+#define CPU_FTRS_8XX	(CPU_FTR_USE_TB)
+#define CPU_FTRS_40X	(CPU_FTR_USE_TB | CPU_FTR_NODSISRALIGN)
+#define CPU_FTRS_44X	(CPU_FTR_USE_TB | CPU_FTR_NODSISRALIGN)
+#define CPU_FTRS_E200	(CPU_FTR_USE_TB | CPU_FTR_NODSISRALIGN | \
+	    CPU_FTR_COHERENT_ICACHE | CPU_FTR_UNIFIED_ID_CACHE)
+#define CPU_FTRS_E500	(CPU_FTR_USE_TB | CPU_FTR_NODSISRALIGN)
+#define CPU_FTRS_E500_2	(CPU_FTR_USE_TB | \
 	    CPU_FTR_BIG_PHYS | CPU_FTR_NODSISRALIGN)
 #define CPU_FTRS_GENERIC_32	(CPU_FTR_COMMON | CPU_FTR_NODSISRALIGN)
 
 /* 64-bit CPUs */
-#define CPU_FTRS_POWER3	(CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | \
+#define CPU_FTRS_POWER3	(CPU_FTR_USE_TB | \
 	    CPU_FTR_HPTE_TABLE | CPU_FTR_IABR | CPU_FTR_PPC_LE)
-#define CPU_FTRS_RS64	(CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | \
+#define CPU_FTRS_RS64	(CPU_FTR_USE_TB | \
 	    CPU_FTR_HPTE_TABLE | CPU_FTR_IABR | \
 	    CPU_FTR_MMCRA | CPU_FTR_CTRL)
-#define CPU_FTRS_POWER4	(CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | \
+#define CPU_FTRS_POWER4	(CPU_FTR_USE_TB | \
 	    CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
 	    CPU_FTR_MMCRA)
-#define CPU_FTRS_PPC970	(CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | \
+#define CPU_FTRS_PPC970	(CPU_FTR_USE_TB | \
 	    CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
 	    CPU_FTR_ALTIVEC_COMP | CPU_FTR_CAN_NAP | CPU_FTR_MMCRA)
-#define CPU_FTRS_POWER5	(CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | \
+#define CPU_FTRS_POWER5	(CPU_FTR_USE_TB | \
 	    CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
 	    CPU_FTR_MMCRA | CPU_FTR_SMT | \
 	    CPU_FTR_COHERENT_ICACHE | CPU_FTR_LOCKLESS_TLBIE | \
 	    CPU_FTR_PURR)
-#define CPU_FTRS_POWER6 (CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | \
+#define CPU_FTRS_POWER6 (CPU_FTR_USE_TB | \
 	    CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
 	    CPU_FTR_MMCRA | CPU_FTR_SMT | \
 	    CPU_FTR_COHERENT_ICACHE | CPU_FTR_LOCKLESS_TLBIE | \
 	    CPU_FTR_PURR | CPU_FTR_SPURR | CPU_FTR_REAL_LE | \
 	    CPU_FTR_DSCR)
-#define CPU_FTRS_CELL	(CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | \
+#define CPU_FTRS_CELL	(CPU_FTR_USE_TB | \
 	    CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
 	    CPU_FTR_ALTIVEC_COMP | CPU_FTR_MMCRA | CPU_FTR_SMT | \
 	    CPU_FTR_PAUSE_ZERO | CPU_FTR_CI_LARGE_PAGE | CPU_FTR_CELL_TB_BUG)
-#define CPU_FTRS_PA6T (CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | \
+#define CPU_FTRS_PA6T (CPU_FTR_USE_TB | \
 	    CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | \
 	    CPU_FTR_ALTIVEC_COMP | CPU_FTR_CI_LARGE_PAGE | \
 	    CPU_FTR_PURR | CPU_FTR_REAL_LE)
-#define CPU_FTRS_COMPATIBLE	(CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | \
+#define CPU_FTRS_COMPATIBLE	(CPU_FTR_USE_TB | \
 	    CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2)
 
 #ifdef __powerpc64__
diff --git a/include/asm-powerpc/fb.h b/include/asm-powerpc/fb.h
new file mode 100644
index 0000000..411af8d
--- /dev/null
+++ b/include/asm-powerpc/fb.h
@@ -0,0 +1,21 @@
+#ifndef _ASM_FB_H_
+#define _ASM_FB_H_
+
+#include <linux/fb.h>
+#include <linux/fs.h>
+#include <asm/page.h>
+
+static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma,
+				unsigned long off)
+{
+	vma->vm_page_prot = phys_mem_access_prot(file, off >> PAGE_SHIFT,
+						 vma->vm_end - vma->vm_start,
+						 vma->vm_page_prot);
+}
+
+static inline int fb_is_primary_device(struct fb_info *info)
+{
+	return 0;
+}
+
+#endif /* _ASM_FB_H_ */
diff --git a/include/asm-powerpc/floppy.h b/include/asm-powerpc/floppy.h
index afa700d..34146f0 100644
--- a/include/asm-powerpc/floppy.h
+++ b/include/asm-powerpc/floppy.h
@@ -29,7 +29,7 @@
 #define fd_free_irq()           free_irq(FLOPPY_IRQ, NULL);
 
 #include <linux/pci.h>
-#include <asm/ppc-pci.h>	/* for ppc64_isabridge_dev */
+#include <asm/ppc-pci.h>	/* for isa_bridge_pcidev */
 
 #define fd_dma_setup(addr,size,mode,io) fd_ops->_dma_setup(addr,size,mode,io)
 
@@ -139,12 +139,12 @@
 	if (bus_addr 
 	    && (addr != prev_addr || size != prev_size || dir != prev_dir)) {
 		/* different from last time -- unmap prev */
-		pci_unmap_single(ppc64_isabridge_dev, bus_addr, prev_size, prev_dir);
+		pci_unmap_single(isa_bridge_pcidev, bus_addr, prev_size, prev_dir);
 		bus_addr = 0;
 	}
 
 	if (!bus_addr)	/* need to map it */
-		bus_addr = pci_map_single(ppc64_isabridge_dev, addr, size, dir);
+		bus_addr = pci_map_single(isa_bridge_pcidev, addr, size, dir);
 
 	/* remember this one as prev */
 	prev_addr = addr;
diff --git a/include/asm-powerpc/hvcall.h b/include/asm-powerpc/hvcall.h
index 62efd9d..bf6cd7c 100644
--- a/include/asm-powerpc/hvcall.h
+++ b/include/asm-powerpc/hvcall.h
@@ -206,6 +206,7 @@
 #define H_FREE_LOGICAL_LAN_BUFFER 0x1D4
 #define H_QUERY_INT_STATE       0x1E4
 #define H_POLL_PENDING		0x1D8
+#define H_ILLAN_ATTRIBUTES	0x244
 #define H_JOIN			0x298
 #define H_VASI_STATE            0x2A4
 #define H_ENABLE_CRQ		0x2B0
diff --git a/include/asm-powerpc/io.h b/include/asm-powerpc/io.h
index 350c9bd..bb8d965 100644
--- a/include/asm-powerpc/io.h
+++ b/include/asm-powerpc/io.h
@@ -607,9 +607,9 @@
  *
  * * iounmap undoes such a mapping and can be hooked
  *
- * * __ioremap_explicit (and the pending __iounmap_explicit) are low level
- *   functions to create hand-made mappings for use only by the PCI code
- *   and cannot currently be hooked.
+ * * __ioremap_at (and the pending __iounmap_at) are low level functions to
+ *   create hand-made mappings for use only by the PCI code and cannot
+ *   currently be hooked. Must be page aligned.
  *
  * * __ioremap is the low level implementation used by ioremap and
  *   ioremap_flags and cannot be hooked (but can be used by a hook on one
@@ -629,19 +629,9 @@
 			       unsigned long flags);
 extern void __iounmap(volatile void __iomem *addr);
 
-extern int __ioremap_explicit(phys_addr_t p_addr, unsigned long v_addr,
-		     	      unsigned long size, unsigned long flags);
-extern int __iounmap_explicit(volatile void __iomem *start,
-			      unsigned long size);
-
-extern void __iomem * reserve_phb_iospace(unsigned long size);
-
-/* Those are more 32 bits only functions */
-extern unsigned long iopa(unsigned long addr);
-extern unsigned long mm_ptov(unsigned long addr) __attribute_const__;
-extern void io_block_mapping(unsigned long virt, phys_addr_t phys,
-			     unsigned int size, int flags);
-
+extern void __iomem * __ioremap_at(phys_addr_t pa, void *ea,
+				   unsigned long size, unsigned long flags);
+extern void __iounmap_at(void *ea, unsigned long size);
 
 /*
  * When CONFIG_PPC_INDIRECT_IO is set, we use the generic iomap implementation
@@ -651,8 +641,8 @@
  */
 #define HAVE_ARCH_PIO_SIZE		1
 #define PIO_OFFSET			0x00000000UL
-#define PIO_MASK			0x3fffffffUL
-#define PIO_RESERVED			0x40000000UL
+#define PIO_MASK			(FULL_IO_SIZE - 1)
+#define PIO_RESERVED			(FULL_IO_SIZE)
 
 #define mmio_read16be(addr)		readw_be(addr)
 #define mmio_read32be(addr)		readl_be(addr)
diff --git a/include/asm-powerpc/irq.h b/include/asm-powerpc/irq.h
index 05dd5a3..0485c53 100644
--- a/include/asm-powerpc/irq.h
+++ b/include/asm-powerpc/irq.h
@@ -223,6 +223,15 @@
 extern unsigned int irq_find_mapping(struct irq_host *host,
 				     irq_hw_number_t hwirq);
 
+/**
+ * irq_create_direct_mapping - Allocate a virq for direct mapping
+ * @host: host to allocate the virq for or NULL for default host
+ *
+ * This routine is used for irq controllers which can choose the hardware
+ * interrupt numbers they generate. In such a case it's simplest to use
+ * the linux virq as the hardware interrupt number.
+ */
+extern unsigned int irq_create_direct_mapping(struct irq_host *host);
 
 /**
  * irq_radix_revmap - Find a linux virq from a hw irq number.
diff --git a/include/asm-powerpc/kprobes.h b/include/asm-powerpc/kprobes.h
index b0e40ff..8b08b44 100644
--- a/include/asm-powerpc/kprobes.h
+++ b/include/asm-powerpc/kprobes.h
@@ -65,20 +65,18 @@
 		} else if (name[0] != '.')				\
 			addr = *(kprobe_opcode_t **)addr;		\
 	} else {							\
-		char dot_name[KSYM_NAME_LEN+1];				\
+		char dot_name[KSYM_NAME_LEN];				\
 		dot_name[0] = '.';					\
 		dot_name[1] = '\0';					\
-		strncat(dot_name, name, KSYM_NAME_LEN);			\
+		strncat(dot_name, name, KSYM_NAME_LEN - 2);		\
 		addr = (kprobe_opcode_t *)kallsyms_lookup_name(dot_name); \
 	}								\
 }
 
-#define JPROBE_ENTRY(pentry)	(kprobe_opcode_t *)((func_descr_t *)pentry)
 #define is_trap(instr)	(IS_TW(instr) || IS_TD(instr) || \
 			IS_TWI(instr) || IS_TDI(instr))
 #else
 /* Use stock kprobe_lookup_name since ppc32 doesn't use function descriptors */
-#define JPROBE_ENTRY(pentry)	(kprobe_opcode_t *)(pentry)
 #define is_trap(instr)	(IS_TW(instr) || IS_TWI(instr))
 #endif
 
diff --git a/include/asm-powerpc/lppaca.h b/include/asm-powerpc/lppaca.h
index 821ea0c..567ed92 100644
--- a/include/asm-powerpc/lppaca.h
+++ b/include/asm-powerpc/lppaca.h
@@ -98,7 +98,7 @@
 	u64	saved_gpr5;		// Saved GPR5                   x30-x37
 
 	u8	reserved4;		// Reserved			x38-x38
-	u8	cpuctls_task_attrs;	// Task attributes for cpuctls  x39-x39
+	u8	donate_dedicated_cpu;	// Donate dedicated CPU cycles  x39-x39
 	u8	fpregs_in_use;		// FP regs in use               x3A-x3A
 	u8	pmcregs_in_use;		// PMC regs in use              x3B-x3B
 	volatile u32 saved_decr;	// Saved Decr Value             x3C-x3F
diff --git a/include/asm-powerpc/lv1call.h b/include/asm-powerpc/lv1call.h
index f733bee..81713ac 100644
--- a/include/asm-powerpc/lv1call.h
+++ b/include/asm-powerpc/lv1call.h
@@ -238,6 +238,7 @@
 LV1_CALL(configure_irq_state_bitmap,                    3, 0,  11 )
 LV1_CALL(connect_irq_plug_ext,                          5, 0,  12 )
 LV1_CALL(release_memory,                                1, 0,  13 )
+LV1_CALL(put_iopte,                                     5, 0,  15 )
 LV1_CALL(disconnect_irq_plug_ext,                       3, 0,  17 )
 LV1_CALL(construct_event_receive_port,                  0, 1,  18 )
 LV1_CALL(destruct_event_receive_port,                   1, 0,  19 )
@@ -268,6 +269,8 @@
 LV1_CALL(read_htab_entries,                             2, 5,  95 )
 LV1_CALL(set_dabr,                                      2, 0,  96 )
 LV1_CALL(get_total_execution_time,                      2, 1, 103 )
+LV1_CALL(allocate_io_segment,                           3, 1, 116 )
+LV1_CALL(release_io_segment,                            2, 0, 117 )
 LV1_CALL(construct_io_irq_outlet,                       1, 1, 120 )
 LV1_CALL(destruct_io_irq_outlet,                        1, 0, 121 )
 LV1_CALL(map_htab,                                      1, 1, 122 )
diff --git a/include/asm-powerpc/machdep.h b/include/asm-powerpc/machdep.h
index 6cf1a83..71c6e7e 100644
--- a/include/asm-powerpc/machdep.h
+++ b/include/asm-powerpc/machdep.h
@@ -218,7 +218,7 @@
 	int  (*pcibios_enable_device_hook)(struct pci_dev *, int initial);
 
 	/* Called in indirect_* to avoid touching devices */
-	int (*pci_exclude_device)(unsigned char, unsigned char);
+	int (*pci_exclude_device)(struct pci_controller *, unsigned char, unsigned char);
 
 	/* Called at then very end of pcibios_init() */
 	void (*pcibios_after_init)(void);
diff --git a/include/asm-powerpc/mmu-8xx.h b/include/asm-powerpc/mmu-8xx.h
new file mode 100644
index 0000000..952bd88
--- /dev/null
+++ b/include/asm-powerpc/mmu-8xx.h
@@ -0,0 +1,147 @@
+#ifndef _ASM_POWERPC_MMU_8XX_H_
+#define _ASM_POWERPC_MMU_8XX_H_
+/*
+ * PPC8xx support
+ */
+
+/* Control/status registers for the MPC8xx.
+ * A write operation to these registers causes serialized access.
+ * During software tablewalk, the registers used perform mask/shift-add
+ * operations when written/read.  A TLB entry is created when the Mx_RPN
+ * is written, and the contents of several registers are used to
+ * create the entry.
+ */
+#define SPRN_MI_CTR	784	/* Instruction TLB control register */
+#define MI_GPM		0x80000000	/* Set domain manager mode */
+#define MI_PPM		0x40000000	/* Set subpage protection */
+#define MI_CIDEF	0x20000000	/* Set cache inhibit when MMU dis */
+#define MI_RSV4I	0x08000000	/* Reserve 4 TLB entries */
+#define MI_PPCS		0x02000000	/* Use MI_RPN prob/priv state */
+#define MI_IDXMASK	0x00001f00	/* TLB index to be loaded */
+#define MI_RESETVAL	0x00000000	/* Value of register at reset */
+
+/* These are the Ks and Kp from the PowerPC books.  For proper operation,
+ * Ks = 0, Kp = 1.
+ */
+#define SPRN_MI_AP	786
+#define MI_Ks		0x80000000	/* Should not be set */
+#define MI_Kp		0x40000000	/* Should always be set */
+
+/* The effective page number register.  When read, contains the information
+ * about the last instruction TLB miss.  When MI_RPN is written, bits in
+ * this register are used to create the TLB entry.
+ */
+#define SPRN_MI_EPN	787
+#define MI_EPNMASK	0xfffff000	/* Effective page number for entry */
+#define MI_EVALID	0x00000200	/* Entry is valid */
+#define MI_ASIDMASK	0x0000000f	/* ASID match value */
+					/* Reset value is undefined */
+
+/* A "level 1" or "segment" or whatever you want to call it register.
+ * For the instruction TLB, it contains bits that get loaded into the
+ * TLB entry when the MI_RPN is written.
+ */
+#define SPRN_MI_TWC	789
+#define MI_APG		0x000001e0	/* Access protection group (0) */
+#define MI_GUARDED	0x00000010	/* Guarded storage */
+#define MI_PSMASK	0x0000000c	/* Mask of page size bits */
+#define MI_PS8MEG	0x0000000c	/* 8M page size */
+#define MI_PS512K	0x00000004	/* 512K page size */
+#define MI_PS4K_16K	0x00000000	/* 4K or 16K page size */
+#define MI_SVALID	0x00000001	/* Segment entry is valid */
+					/* Reset value is undefined */
+
+/* Real page number.  Defined by the pte.  Writing this register
+ * causes a TLB entry to be created for the instruction TLB, using
+ * additional information from the MI_EPN, and MI_TWC registers.
+ */
+#define SPRN_MI_RPN	790
+
+/* Define an RPN value for mapping kernel memory to large virtual
+ * pages for boot initialization.  This has real page number of 0,
+ * large page size, shared page, cache enabled, and valid.
+ * Also mark all subpages valid and write access.
+ */
+#define MI_BOOTINIT	0x000001fd
+
+#define SPRN_MD_CTR	792	/* Data TLB control register */
+#define MD_GPM		0x80000000	/* Set domain manager mode */
+#define MD_PPM		0x40000000	/* Set subpage protection */
+#define MD_CIDEF	0x20000000	/* Set cache inhibit when MMU dis */
+#define MD_WTDEF	0x10000000	/* Set writethrough when MMU dis */
+#define MD_RSV4I	0x08000000	/* Reserve 4 TLB entries */
+#define MD_TWAM		0x04000000	/* Use 4K page hardware assist */
+#define MD_PPCS		0x02000000	/* Use MI_RPN prob/priv state */
+#define MD_IDXMASK	0x00001f00	/* TLB index to be loaded */
+#define MD_RESETVAL	0x04000000	/* Value of register at reset */
+
+#define SPRN_M_CASID	793	/* Address space ID (context) to match */
+#define MC_ASIDMASK	0x0000000f	/* Bits used for ASID value */
+
+
+/* These are the Ks and Kp from the PowerPC books.  For proper operation,
+ * Ks = 0, Kp = 1.
+ */
+#define SPRN_MD_AP	794
+#define MD_Ks		0x80000000	/* Should not be set */
+#define MD_Kp		0x40000000	/* Should always be set */
+
+/* The effective page number register.  When read, contains the information
+ * about the last instruction TLB miss.  When MD_RPN is written, bits in
+ * this register are used to create the TLB entry.
+ */
+#define SPRN_MD_EPN	795
+#define MD_EPNMASK	0xfffff000	/* Effective page number for entry */
+#define MD_EVALID	0x00000200	/* Entry is valid */
+#define MD_ASIDMASK	0x0000000f	/* ASID match value */
+					/* Reset value is undefined */
+
+/* The pointer to the base address of the first level page table.
+ * During a software tablewalk, reading this register provides the address
+ * of the entry associated with MD_EPN.
+ */
+#define SPRN_M_TWB	796
+#define	M_L1TB		0xfffff000	/* Level 1 table base address */
+#define M_L1INDX	0x00000ffc	/* Level 1 index, when read */
+					/* Reset value is undefined */
+
+/* A "level 1" or "segment" or whatever you want to call it register.
+ * For the data TLB, it contains bits that get loaded into the TLB entry
+ * when the MD_RPN is written.  It is also provides the hardware assist
+ * for finding the PTE address during software tablewalk.
+ */
+#define SPRN_MD_TWC	797
+#define MD_L2TB		0xfffff000	/* Level 2 table base address */
+#define MD_L2INDX	0xfffffe00	/* Level 2 index (*pte), when read */
+#define MD_APG		0x000001e0	/* Access protection group (0) */
+#define MD_GUARDED	0x00000010	/* Guarded storage */
+#define MD_PSMASK	0x0000000c	/* Mask of page size bits */
+#define MD_PS8MEG	0x0000000c	/* 8M page size */
+#define MD_PS512K	0x00000004	/* 512K page size */
+#define MD_PS4K_16K	0x00000000	/* 4K or 16K page size */
+#define MD_WT		0x00000002	/* Use writethrough page attribute */
+#define MD_SVALID	0x00000001	/* Segment entry is valid */
+					/* Reset value is undefined */
+
+
+/* Real page number.  Defined by the pte.  Writing this register
+ * causes a TLB entry to be created for the data TLB, using
+ * additional information from the MD_EPN, and MD_TWC registers.
+ */
+#define SPRN_MD_RPN	798
+
+/* This is a temporary storage register that could be used to save
+ * a processor working register during a tablewalk.
+ */
+#define SPRN_M_TW	799
+
+#ifndef __ASSEMBLY__
+typedef unsigned long phys_addr_t;
+
+typedef struct {
+	unsigned long id;
+	unsigned long vdso_base;
+} mm_context_t;
+#endif /* !__ASSEMBLY__ */
+
+#endif /* _ASM_POWERPC_MMU_8XX_H_ */
diff --git a/include/asm-powerpc/mmu-fsl-booke.h b/include/asm-powerpc/mmu-fsl-booke.h
new file mode 100644
index 0000000..3758000
--- /dev/null
+++ b/include/asm-powerpc/mmu-fsl-booke.h
@@ -0,0 +1,88 @@
+#ifndef _ASM_POWERPC_MMU_FSL_BOOKE_H_
+#define _ASM_POWERPC_MMU_FSL_BOOKE_H_
+/*
+ * Freescale Book-E MMU support
+ */
+
+/* Book-E defined page sizes */
+#define BOOKE_PAGESZ_1K		0
+#define BOOKE_PAGESZ_4K		1
+#define BOOKE_PAGESZ_16K	2
+#define BOOKE_PAGESZ_64K	3
+#define BOOKE_PAGESZ_256K	4
+#define BOOKE_PAGESZ_1M		5
+#define BOOKE_PAGESZ_4M		6
+#define BOOKE_PAGESZ_16M	7
+#define BOOKE_PAGESZ_64M	8
+#define BOOKE_PAGESZ_256M	9
+#define BOOKE_PAGESZ_1GB	10
+#define BOOKE_PAGESZ_4GB	11
+#define BOOKE_PAGESZ_16GB	12
+#define BOOKE_PAGESZ_64GB	13
+#define BOOKE_PAGESZ_256GB	14
+#define BOOKE_PAGESZ_1TB	15
+
+#define MAS0_TLBSEL(x)	((x << 28) & 0x30000000)
+#define MAS0_ESEL(x)	((x << 16) & 0x0FFF0000)
+#define MAS0_NV(x)	((x) & 0x00000FFF)
+
+#define MAS1_VALID 	0x80000000
+#define MAS1_IPROT	0x40000000
+#define MAS1_TID(x)	((x << 16) & 0x3FFF0000)
+#define MAS1_TS		0x00001000
+#define MAS1_TSIZE(x)	((x << 8) & 0x00000F00)
+
+#define MAS2_EPN	0xFFFFF000
+#define MAS2_X0		0x00000040
+#define MAS2_X1		0x00000020
+#define MAS2_W		0x00000010
+#define MAS2_I		0x00000008
+#define MAS2_M		0x00000004
+#define MAS2_G		0x00000002
+#define MAS2_E		0x00000001
+
+#define MAS3_RPN	0xFFFFF000
+#define MAS3_U0		0x00000200
+#define MAS3_U1		0x00000100
+#define MAS3_U2		0x00000080
+#define MAS3_U3		0x00000040
+#define MAS3_UX		0x00000020
+#define MAS3_SX		0x00000010
+#define MAS3_UW		0x00000008
+#define MAS3_SW		0x00000004
+#define MAS3_UR		0x00000002
+#define MAS3_SR		0x00000001
+
+#define MAS4_TLBSELD(x) MAS0_TLBSEL(x)
+#define MAS4_TIDDSEL	0x000F0000
+#define MAS4_TSIZED(x)	MAS1_TSIZE(x)
+#define MAS4_X0D	0x00000040
+#define MAS4_X1D	0x00000020
+#define MAS4_WD		0x00000010
+#define MAS4_ID		0x00000008
+#define MAS4_MD		0x00000004
+#define MAS4_GD		0x00000002
+#define MAS4_ED		0x00000001
+
+#define MAS6_SPID0	0x3FFF0000
+#define MAS6_SPID1	0x00007FFE
+#define MAS6_SAS	0x00000001
+#define MAS6_SPID	MAS6_SPID0
+
+#define MAS7_RPN	0xFFFFFFFF
+
+#ifndef __ASSEMBLY__
+
+#ifndef CONFIG_PHYS_64BIT
+typedef unsigned long phys_addr_t;
+#else
+typedef unsigned long long phys_addr_t;
+#endif
+
+typedef struct {
+	unsigned long id;
+	unsigned long vdso_base;
+} mm_context_t;
+#endif /* !__ASSEMBLY__ */
+
+#endif /* _ASM_POWERPC_MMU_FSL_BOOKE_H_ */
diff --git a/include/asm-powerpc/mmu-hash32.h b/include/asm-powerpc/mmu-hash32.h
new file mode 100644
index 0000000..4bd735b
--- /dev/null
+++ b/include/asm-powerpc/mmu-hash32.h
@@ -0,0 +1,91 @@
+#ifndef _ASM_POWERPC_MMU_HASH32_H_
+#define _ASM_POWERPC_MMU_HASH32_H_
+/*
+ * 32-bit hash table MMU support
+ */
+
+/*
+ * BATs
+ */
+
+/* Block size masks */
+#define BL_128K	0x000
+#define BL_256K 0x001
+#define BL_512K 0x003
+#define BL_1M   0x007
+#define BL_2M   0x00F
+#define BL_4M   0x01F
+#define BL_8M   0x03F
+#define BL_16M  0x07F
+#define BL_32M  0x0FF
+#define BL_64M  0x1FF
+#define BL_128M 0x3FF
+#define BL_256M 0x7FF
+
+/* BAT Access Protection */
+#define BPP_XX	0x00		/* No access */
+#define BPP_RX	0x01		/* Read only */
+#define BPP_RW	0x02		/* Read/write */
+
+#ifndef __ASSEMBLY__
+struct ppc_bat {
+	struct {
+		unsigned long bepi:15;	/* Effective page index (virtual address) */
+		unsigned long :4;	/* Unused */
+		unsigned long bl:11;	/* Block size mask */
+		unsigned long vs:1;	/* Supervisor valid */
+		unsigned long vp:1;	/* User valid */
+	} batu; 		/* Upper register */
+	struct {
+		unsigned long brpn:15;	/* Real page index (physical address) */
+		unsigned long :10;	/* Unused */
+		unsigned long w:1;	/* Write-thru cache */
+		unsigned long i:1;	/* Cache inhibit */
+		unsigned long m:1;	/* Memory coherence */
+		unsigned long g:1;	/* Guarded (MBZ in IBAT) */
+		unsigned long :1;	/* Unused */
+		unsigned long pp:2;	/* Page access protections */
+	} batl;			/* Lower register */
+};
+#endif /* !__ASSEMBLY__ */
+
+/*
+ * Hash table
+ */
+
+/* Values for PP (assumes Ks=0, Kp=1) */
+#define PP_RWXX	0	/* Supervisor read/write, User none */
+#define PP_RWRX 1	/* Supervisor read/write, User read */
+#define PP_RWRW 2	/* Supervisor read/write, User read/write */
+#define PP_RXRX 3	/* Supervisor read,       User read */
+
+#ifndef __ASSEMBLY__
+
+/* Hardware Page Table Entry */
+struct hash_pte {
+	unsigned long v:1;	/* Entry is valid */
+	unsigned long vsid:24;	/* Virtual segment identifier */
+	unsigned long h:1;	/* Hash algorithm indicator */
+	unsigned long api:6;	/* Abbreviated page index */
+	unsigned long rpn:20;	/* Real (physical) page number */
+	unsigned long    :3;	/* Unused */
+	unsigned long r:1;	/* Referenced */
+	unsigned long c:1;	/* Changed */
+	unsigned long w:1;	/* Write-thru cache mode */
+	unsigned long i:1;	/* Cache inhibited */
+	unsigned long m:1;	/* Memory coherence */
+	unsigned long g:1;	/* Guarded */
+	unsigned long  :1;	/* Unused */
+	unsigned long pp:2;	/* Page protection */
+};
+
+typedef struct {
+	unsigned long id;
+	unsigned long vdso_base;
+} mm_context_t;
+
+typedef unsigned long phys_addr_t;
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* _ASM_POWERPC_MMU_HASH32_H_ */
diff --git a/include/asm-powerpc/mmu-hash64.h b/include/asm-powerpc/mmu-hash64.h
index b8dca30..695962f 100644
--- a/include/asm-powerpc/mmu-hash64.h
+++ b/include/asm-powerpc/mmu-hash64.h
@@ -94,6 +94,9 @@
 #define HPTE_R_C		ASM_CONST(0x0000000000000080)
 #define HPTE_R_R		ASM_CONST(0x0000000000000100)
 
+#define HPTE_V_1TB_SEG		ASM_CONST(0x4000000000000000)
+#define HPTE_V_VRMA_MASK	ASM_CONST(0x4001ffffff000000)
+
 /* Values for PP (assumes Ks=0, Kp=1) */
 /* pp0 will always be 0 for linux     */
 #define PP_RWXX	0	/* Supervisor read/write, User none */
@@ -103,12 +106,12 @@
 
 #ifndef __ASSEMBLY__
 
-typedef struct {
+struct hash_pte {
 	unsigned long v;
 	unsigned long r;
-} hpte_t;
+};
 
-extern hpte_t *htab_address;
+extern struct hash_pte *htab_address;
 extern unsigned long htab_size_bytes;
 extern unsigned long htab_hash_mask;
 
diff --git a/include/asm-powerpc/mmu.h b/include/asm-powerpc/mmu.h
index fe510ff..d44d211 100644
--- a/include/asm-powerpc/mmu.h
+++ b/include/asm-powerpc/mmu.h
@@ -5,13 +5,18 @@
 #ifdef CONFIG_PPC64
 /* 64-bit classic hash table MMU */
 #  include <asm/mmu-hash64.h>
+#elif defined(CONFIG_PPC_STD_MMU)
+/* 32-bit classic hash table MMU */
+#  include <asm/mmu-hash32.h>
 #elif defined(CONFIG_44x)
 /* 44x-style software loaded TLB */
 #  include <asm/mmu-44x.h>
-#else
-/* Other 32-bit.  FIXME: split up the other 32-bit MMU types, and
- * revise for arch/powerpc */
-#  include <asm-ppc/mmu.h>
+#elif defined(CONFIG_FSL_BOOKE)
+/* Freescale Book-E software loaded TLB */
+#  include <asm/mmu-fsl-booke.h>
+#elif defined (CONFIG_PPC_8xx)
+/* Motorola/Freescale 8xx software loaded TLB */
+#  include <asm/mmu-8xx.h>
 #endif
 
 #endif /* __KERNEL__ */
diff --git a/include/asm-powerpc/mmu_context.h b/include/asm-powerpc/mmu_context.h
index 40c9e5a..f863ac2 100644
--- a/include/asm-powerpc/mmu_context.h
+++ b/include/asm-powerpc/mmu_context.h
@@ -2,16 +2,210 @@
 #define __ASM_POWERPC_MMU_CONTEXT_H
 #ifdef __KERNEL__
 
+#include <asm/mmu.h>	
+#include <asm/cputable.h>
+#include <asm-generic/mm_hooks.h>
+
 #ifndef CONFIG_PPC64
-#include <asm-ppc/mmu_context.h>
+#include <asm/atomic.h>
+#include <asm/bitops.h>
+
+/*
+ * On 32-bit PowerPC 6xx/7xx/7xxx CPUs, we use a set of 16 VSIDs
+ * (virtual segment identifiers) for each context.  Although the
+ * hardware supports 24-bit VSIDs, and thus >1 million contexts,
+ * we only use 32,768 of them.  That is ample, since there can be
+ * at most around 30,000 tasks in the system anyway, and it means
+ * that we can use a bitmap to indicate which contexts are in use.
+ * Using a bitmap means that we entirely avoid all of the problems
+ * that we used to have when the context number overflowed,
+ * particularly on SMP systems.
+ *  -- paulus.
+ */
+
+/*
+ * This function defines the mapping from contexts to VSIDs (virtual
+ * segment IDs).  We use a skew on both the context and the high 4 bits
+ * of the 32-bit virtual address (the "effective segment ID") in order
+ * to spread out the entries in the MMU hash table.  Note, if this
+ * function is changed then arch/ppc/mm/hashtable.S will have to be
+ * changed to correspond.
+ */
+#define CTX_TO_VSID(ctx, va)	(((ctx) * (897 * 16) + ((va) >> 28) * 0x111) \
+				 & 0xffffff)
+
+/*
+   The MPC8xx has only 16 contexts.  We rotate through them on each
+   task switch.  A better way would be to keep track of tasks that
+   own contexts, and implement an LRU usage.  That way very active
+   tasks don't always have to pay the TLB reload overhead.  The
+   kernel pages are mapped shared, so the kernel can run on behalf
+   of any task that makes a kernel entry.  Shared does not mean they
+   are not protected, just that the ASID comparison is not performed.
+        -- Dan
+
+   The IBM4xx has 256 contexts, so we can just rotate through these
+   as a way of "switching" contexts.  If the TID of the TLB is zero,
+   the PID/TID comparison is disabled, so we can use a TID of zero
+   to represent all kernel pages as shared among all contexts.
+   	-- Dan
+ */
+
+static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
+{
+}
+
+#ifdef CONFIG_8xx
+#define NO_CONTEXT      	16
+#define LAST_CONTEXT    	15
+#define FIRST_CONTEXT    	0
+
+#elif defined(CONFIG_4xx)
+#define NO_CONTEXT      	256
+#define LAST_CONTEXT    	255
+#define FIRST_CONTEXT    	1
+
+#elif defined(CONFIG_E200) || defined(CONFIG_E500)
+#define NO_CONTEXT      	256
+#define LAST_CONTEXT    	255
+#define FIRST_CONTEXT    	1
+
+#else
+
+/* PPC 6xx, 7xx CPUs */
+#define NO_CONTEXT      	((unsigned long) -1)
+#define LAST_CONTEXT    	32767
+#define FIRST_CONTEXT    	1
+#endif
+
+/*
+ * Set the current MMU context.
+ * On 32-bit PowerPCs (other than the 8xx embedded chips), this is done by
+ * loading up the segment registers for the user part of the address space.
+ *
+ * Since the PGD is immediately available, it is much faster to simply
+ * pass this along as a second parameter, which is required for 8xx and
+ * can be used for debugging on all processors (if you happen to have
+ * an Abatron).
+ */
+extern void set_context(unsigned long contextid, pgd_t *pgd);
+
+/*
+ * Bitmap of contexts in use.
+ * The size of this bitmap is LAST_CONTEXT + 1 bits.
+ */
+extern unsigned long context_map[];
+
+/*
+ * This caches the next context number that we expect to be free.
+ * Its use is an optimization only, we can't rely on this context
+ * number to be free, but it usually will be.
+ */
+extern unsigned long next_mmu_context;
+
+/*
+ * If we don't have sufficient contexts to give one to every task
+ * that could be in the system, we need to be able to steal contexts.
+ * These variables support that.
+ */
+#if LAST_CONTEXT < 30000
+#define FEW_CONTEXTS	1
+extern atomic_t nr_free_contexts;
+extern struct mm_struct *context_mm[LAST_CONTEXT+1];
+extern void steal_context(void);
+#endif
+
+/*
+ * Get a new mmu context for the address space described by `mm'.
+ */
+static inline void get_mmu_context(struct mm_struct *mm)
+{
+	unsigned long ctx;
+
+	if (mm->context.id != NO_CONTEXT)
+		return;
+#ifdef FEW_CONTEXTS
+	while (atomic_dec_if_positive(&nr_free_contexts) < 0)
+		steal_context();
+#endif
+	ctx = next_mmu_context;
+	while (test_and_set_bit(ctx, context_map)) {
+		ctx = find_next_zero_bit(context_map, LAST_CONTEXT+1, ctx);
+		if (ctx > LAST_CONTEXT)
+			ctx = 0;
+	}
+	next_mmu_context = (ctx + 1) & LAST_CONTEXT;
+	mm->context.id = ctx;
+#ifdef FEW_CONTEXTS
+	context_mm[ctx] = mm;
+#endif
+}
+
+/*
+ * Set up the context for a new address space.
+ */
+static inline int init_new_context(struct task_struct *t, struct mm_struct *mm)
+{
+	mm->context.id = NO_CONTEXT;
+	mm->context.vdso_base = 0;
+	return 0;
+}
+
+/*
+ * We're finished using the context for an address space.
+ */
+static inline void destroy_context(struct mm_struct *mm)
+{
+	preempt_disable();
+	if (mm->context.id != NO_CONTEXT) {
+		clear_bit(mm->context.id, context_map);
+		mm->context.id = NO_CONTEXT;
+#ifdef FEW_CONTEXTS
+		atomic_inc(&nr_free_contexts);
+#endif
+	}
+	preempt_enable();
+}
+
+static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
+			     struct task_struct *tsk)
+{
+#ifdef CONFIG_ALTIVEC
+	if (cpu_has_feature(CPU_FTR_ALTIVEC))
+	asm volatile ("dssall;\n"
+#ifndef CONFIG_POWER4
+	 "sync;\n" /* G4 needs a sync here, G5 apparently not */
+#endif
+	 : : );
+#endif /* CONFIG_ALTIVEC */
+
+	tsk->thread.pgdir = next->pgd;
+
+	/* No need to flush userspace segments if the mm doesnt change */
+	if (prev == next)
+		return;
+
+	/* Setup new userspace context */
+	get_mmu_context(next);
+	set_context(next->context.id, next->pgd);
+}
+
+#define deactivate_mm(tsk,mm)	do { } while (0)
+
+/*
+ * After we have set current->mm to a new value, this activates
+ * the context for the new mm so we see the new mappings.
+ */
+#define activate_mm(active_mm, mm)   switch_mm(active_mm, mm, current)
+
+extern void mmu_context_init(void);
+
+
 #else
 
 #include <linux/kernel.h>	
 #include <linux/mm.h>	
 #include <linux/sched.h>
-#include <asm/mmu.h>	
-#include <asm/cputable.h>
-#include <asm-generic/mm_hooks.h>
 
 /*
  * Copyright (C) 2001 PPC 64 Team, IBM Corp
diff --git a/include/asm-powerpc/mpc86xx.h b/include/asm-powerpc/mpc86xx.h
index b85df45..15f650f 100644
--- a/include/asm-powerpc/mpc86xx.h
+++ b/include/asm-powerpc/mpc86xx.h
@@ -19,12 +19,6 @@
 
 #ifdef CONFIG_PPC_86xx
 
-#define _IO_BASE        isa_io_base
-#define _ISA_MEM_BASE   isa_mem_base
-#ifdef CONFIG_PCI
-#define PCI_DRAM_OFFSET pci_dram_offset
-#endif
-
 #define CPU0_BOOT_RELEASE 0x01000000
 #define CPU1_BOOT_RELEASE 0x02000000
 #define CPU_ALL_RELEASED (CPU0_BOOT_RELEASE | CPU1_BOOT_RELEASE)
diff --git a/include/asm-powerpc/mpc8xx.h b/include/asm-powerpc/mpc8xx.h
index 5803711..2be014b 100644
--- a/include/asm-powerpc/mpc8xx.h
+++ b/include/asm-powerpc/mpc8xx.h
@@ -23,6 +23,10 @@
 #include <platforms/8xx/mpc885ads.h>
 #endif
 
+#ifdef CONFIG_PCMCIA_M8XX
+extern struct mpc8xx_pcmcia_ops m8xx_pcmcia_ops;
+#endif
+
 #endif /* CONFIG_8xx */
 #endif /* __CONFIG_8xx_DEFS */
 #endif /* __KERNEL__ */
diff --git a/include/asm-powerpc/pci-bridge.h b/include/asm-powerpc/pci-bridge.h
index d9bf5ab..e72c2a6 100644
--- a/include/asm-powerpc/pci-bridge.h
+++ b/include/asm-powerpc/pci-bridge.h
@@ -2,12 +2,91 @@
 #define _ASM_POWERPC_PCI_BRIDGE_H
 #ifdef __KERNEL__
 
-#ifndef CONFIG_PPC64
-#include <asm-ppc/pci-bridge.h>
-#else
-
 #include <linux/pci.h>
 #include <linux/list.h>
+#include <linux/ioport.h>
+
+#ifndef CONFIG_PPC64
+
+struct device_node;
+struct pci_controller;
+
+/*
+ * Structure of a PCI controller (host bridge)
+ */
+struct pci_controller {
+	struct pci_bus *bus;
+	char is_dynamic;
+	void *arch_data;
+	struct list_head list_node;
+	struct device *parent;
+
+	int first_busno;
+	int last_busno;
+	int self_busno;
+
+	void __iomem *io_base_virt;
+	resource_size_t io_base_phys;
+
+	/* Some machines (PReP) have a non 1:1 mapping of
+	 * the PCI memory space in the CPU bus space
+	 */
+	resource_size_t pci_mem_offset;
+
+	struct pci_ops *ops;
+	volatile unsigned int __iomem *cfg_addr;
+	volatile void __iomem *cfg_data;
+
+	/*
+	 * Used for variants of PCI indirect handling and possible quirks:
+	 *  SET_CFG_TYPE - used on 4xx or any PHB that does explicit type0/1
+	 *  EXT_REG - provides access to PCI-e extended registers
+	 *  SURPRESS_PRIMARY_BUS - we surpress the setting of PCI_PRIMARY_BUS
+	 *   on Freescale PCI-e controllers since they used the PCI_PRIMARY_BUS
+	 *   to determine which bus number to match on when generating type0
+	 *   config cycles
+	 */
+#define PPC_INDIRECT_TYPE_SET_CFG_TYPE		(0x00000001)
+#define PPC_INDIRECT_TYPE_EXT_REG		(0x00000002)
+#define PPC_INDIRECT_TYPE_SURPRESS_PRIMARY_BUS	(0x00000004)
+	u32 indirect_type;
+
+	/* Currently, we limit ourselves to 1 IO range and 3 mem
+	 * ranges since the common pci_bus structure can't handle more
+	 */
+	struct resource	io_resource;
+	struct resource mem_resources[3];
+	int global_number;		/* PCI domain number */
+};
+
+static inline struct pci_controller *pci_bus_to_host(struct pci_bus *bus)
+{
+	return bus->sysdata;
+}
+
+/* These are used for config access before all the PCI probing
+   has been done. */
+int early_read_config_byte(struct pci_controller *hose, int bus, int dev_fn,
+			   int where, u8 *val);
+int early_read_config_word(struct pci_controller *hose, int bus, int dev_fn,
+			   int where, u16 *val);
+int early_read_config_dword(struct pci_controller *hose, int bus, int dev_fn,
+			    int where, u32 *val);
+int early_write_config_byte(struct pci_controller *hose, int bus, int dev_fn,
+			    int where, u8 val);
+int early_write_config_word(struct pci_controller *hose, int bus, int dev_fn,
+			    int where, u16 val);
+int early_write_config_dword(struct pci_controller *hose, int bus, int dev_fn,
+			     int where, u32 val);
+
+extern void setup_indirect_pci_nomap(struct pci_controller* hose,
+			       void __iomem *cfg_addr, void __iomem *cfg_data);
+extern void setup_indirect_pci(struct pci_controller* hose,
+			       u32 cfg_addr, u32 cfg_data);
+extern void setup_grackle(struct pci_controller *hose);
+
+#else
+
 
 /*
  * This program is free software; you can redistribute it and/or
@@ -31,6 +110,7 @@
 	int last_busno;
 
 	void __iomem *io_base_virt;
+	void *io_base_alloc;
 	resource_size_t io_base_phys;
 
 	/* Some machines have a non 1:1 mapping of
@@ -48,8 +128,7 @@
 	 */
 	struct resource io_resource;
 	struct resource mem_resources[3];
-	int global_number;		
-	int local_number;		
+	int global_number;
 	unsigned long buid;
 	unsigned long dma_window_base_cur;
 	unsigned long dma_window_size;
@@ -70,19 +149,22 @@
 	int	devfn;			/* pci device and function number */
 	int	class_code;		/* pci device class */
 
-#ifdef CONFIG_PPC_PSERIES
+	struct  pci_controller *phb;	/* for pci devices */
+	struct	iommu_table *iommu_table;	/* for phb's or bridges */
+	struct	pci_dev *pcidev;	/* back-pointer to the pci device */
+	struct	device_node *node;	/* back-pointer to the device_node */
+
+	int	pci_ext_config_space;	/* for pci devices */
+
+#ifdef CONFIG_EEH
 	int	eeh_mode;		/* See eeh.h for possible EEH_MODEs */
 	int	eeh_config_addr;
 	int	eeh_pe_config_addr; /* new-style partition endpoint address */
 	int 	eeh_check_count;	/* # times driver ignored error */
 	int 	eeh_freeze_count;	/* # times this device froze up. */
-#endif
-	int	pci_ext_config_space;	/* for pci devices */
-	struct  pci_controller *phb;	/* for pci devices */
-	struct	iommu_table *iommu_table;	/* for phb's or bridges */
-	struct	pci_dev *pcidev;	/* back-pointer to the pci device */
-	struct	device_node *node;	/* back-pointer to the device_node */
+	int 	eeh_false_positives;	/* # times this device reported #ff's */
 	u32	config_space[16];	/* saved PCI config space */
+#endif
 };
 
 /* Get the pointer to a device_node's pci_dn */
@@ -128,9 +210,6 @@
 /** Find the bus corresponding to the indicated device node */
 struct pci_bus * pcibios_find_pci_bus(struct device_node *dn);
 
-extern void pci_process_bridge_OF_ranges(struct pci_controller *hose,
-					 struct device_node *dev, int primary);
-
 /** Remove all of the PCI devices under this bus */
 void pcibios_remove_pci_devices(struct pci_bus *bus);
 
@@ -148,21 +227,12 @@
 	return PCI_DN(busdn)->phb;
 }
 
-extern struct pci_controller*
-pci_find_hose_for_OF_device(struct device_node* node);
-
-extern struct pci_controller *
-pcibios_alloc_controller(struct device_node *dev);
 extern void pcibios_free_controller(struct pci_controller *phb);
 
-#ifdef CONFIG_PCI
-extern unsigned long pci_address_to_pio(phys_addr_t address);
-#else
-static inline unsigned long pci_address_to_pio(phys_addr_t address)
-{
-	return (unsigned long)-1;
-}
-#endif
+extern void isa_bridge_find_early(struct pci_controller *hose);
+
+extern int pcibios_unmap_io_space(struct pci_bus *bus);
+extern int pcibios_map_io_space(struct pci_bus *bus);
 
 /* Return values for ppc_md.pci_probe_mode function */
 #define PCI_PROBE_NONE		-1	/* Don't look at this bus at all */
@@ -176,5 +246,29 @@
 #endif
 
 #endif /* CONFIG_PPC64 */
+
+/* Get the PCI host controller for an OF device */
+extern struct pci_controller*
+pci_find_hose_for_OF_device(struct device_node* node);
+
+/* Fill up host controller resources from the OF node */
+extern void
+pci_process_bridge_OF_ranges(struct pci_controller *hose,
+			   struct device_node *dev, int primary);
+
+/* Allocate a new PCI host bridge structure */
+extern struct pci_controller *
+pcibios_alloc_controller(struct device_node *dev);
+#ifdef CONFIG_PCI
+extern unsigned long pci_address_to_pio(phys_addr_t address);
+#else
+static inline unsigned long pci_address_to_pio(phys_addr_t address)
+{
+	return (unsigned long)-1;
+}
+#endif
+
+
+
 #endif /* __KERNEL__ */
 #endif
diff --git a/include/asm-powerpc/pci.h b/include/asm-powerpc/pci.h
index e16e7bc..7b11765 100644
--- a/include/asm-powerpc/pci.h
+++ b/include/asm-powerpc/pci.h
@@ -95,8 +95,6 @@
 #define get_pci_dma_ops()	NULL
 #endif
 
-extern int pci_domain_nr(struct pci_bus *bus);
-
 /* Decide whether to display the domain number in /proc */
 extern int pci_proc_domain(struct pci_bus *bus);
 
@@ -112,9 +110,6 @@
 }
 #endif
 
-/* Return the index of the PCI controller for device PDEV. */
-#define pci_domain_nr(bus) ((struct pci_controller *)(bus)->sysdata)->index
-
 /* Set the name of the bus as it appears in /proc/bus/pci */
 static inline int pci_proc_domain(struct pci_bus *bus)
 {
@@ -123,6 +118,8 @@
 
 #endif /* CONFIG_PPC64 */
 
+extern int pci_domain_nr(struct pci_bus *bus);
+
 struct vm_area_struct;
 /* Map a range of PCI memory or I/O space for a device into user space */
 int pci_mmap_page_range(struct pci_dev *pdev, struct vm_area_struct *vma,
@@ -202,10 +199,6 @@
 	return root;
 }
 
-extern int unmap_bus_range(struct pci_bus *bus);
-
-extern int remap_bus_range(struct pci_bus *bus);
-
 extern void pcibios_fixup_device_resources(struct pci_dev *dev,
 			struct pci_bus *bus);
 
diff --git a/include/asm-powerpc/percpu.h b/include/asm-powerpc/percpu.h
index 2f2e302..73dc8ba 100644
--- a/include/asm-powerpc/percpu.h
+++ b/include/asm-powerpc/percpu.h
@@ -20,6 +20,11 @@
 #define DEFINE_PER_CPU(type, name) \
     __attribute__((__section__(".data.percpu"))) __typeof__(type) per_cpu__##name
 
+#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name)		\
+    __attribute__((__section__(".data.percpu.shared_aligned"))) \
+    __typeof__(type) per_cpu__##name				\
+    ____cacheline_aligned_in_smp
+
 /* var is in discarded region: offset to particular copy we want */
 #define per_cpu(var, cpu) (*RELOC_HIDE(&per_cpu__##var, __per_cpu_offset(cpu)))
 #define __get_cpu_var(var) (*RELOC_HIDE(&per_cpu__##var, __my_cpu_offset()))
@@ -40,6 +45,8 @@
 
 #define DEFINE_PER_CPU(type, name) \
     __typeof__(type) per_cpu__##name
+#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name)	\
+    DEFINE_PER_CPU(type, name)
 
 #define per_cpu(var, cpu)			(*((void)(cpu), &per_cpu__##var))
 #define __get_cpu_var(var)			per_cpu__##var
diff --git a/include/asm-powerpc/pgtable-ppc32.h b/include/asm-powerpc/pgtable-ppc32.h
index 973c1c1..86a54a4a 100644
--- a/include/asm-powerpc/pgtable-ppc32.h
+++ b/include/asm-powerpc/pgtable-ppc32.h
@@ -6,11 +6,7 @@
 #ifndef __ASSEMBLY__
 #include <linux/sched.h>
 #include <linux/threads.h>
-#include <asm/processor.h>		/* For TASK_SIZE */
-#include <asm/mmu.h>
-#include <asm/page.h>
 #include <asm/io.h>			/* For sub-arch specific PPC_PIN_SIZE */
-struct mm_struct;
 
 extern unsigned long va_to_phys(unsigned long address);
 extern pte_t *va_to_pte(unsigned long address);
@@ -488,14 +484,6 @@
 #define pfn_pte(pfn, prot)	__pte(((pte_basic_t)(pfn) << PFN_SHIFT_OFFSET) |\
 					pgprot_val(prot))
 #define mk_pte(page, prot)	pfn_pte(page_to_pfn(page), prot)
-
-/*
- * ZERO_PAGE is a global shared page that is always zero: used
- * for zero-mapped memory areas etc..
- */
-extern unsigned long empty_zero_page[1024];
-#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
-
 #endif /* __ASSEMBLY__ */
 
 #define pte_none(pte)		((pte_val(pte) & ~_PTE_NONE_MASK) == 0)
@@ -633,13 +621,6 @@
 #define ptep_test_and_clear_young(__vma, __addr, __ptep) \
 	__ptep_test_and_clear_young((__vma)->vm_mm->context.id, __addr, __ptep)
 
-#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY
-static inline int ptep_test_and_clear_dirty(struct vm_area_struct *vma,
-					    unsigned long addr, pte_t *ptep)
-{
-	return (pte_update(ptep, (_PAGE_DIRTY | _PAGE_HWWRITE), 0) & _PAGE_DIRTY) != 0;
-}
-
 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
 static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
 				       pte_t *ptep)
@@ -724,10 +705,6 @@
 #define pte_unmap(pte)		kunmap_atomic(pte, KM_PTE0)
 #define pte_unmap_nested(pte)	kunmap_atomic(pte, KM_PTE1)
 
-extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
-
-extern void paging_init(void);
-
 /*
  * Encode and decode a swap entry.
  * Note that the bits we use in a PTE for representing a swap entry
@@ -745,40 +722,6 @@
 #define pte_to_pgoff(pte)	(pte_val(pte) >> 3)
 #define pgoff_to_pte(off)	((pte_t) { ((off) << 3) | _PAGE_FILE })
 
-/* CONFIG_APUS */
-/* For virtual address to physical address conversion */
-extern void cache_clear(__u32 addr, int length);
-extern void cache_push(__u32 addr, int length);
-extern int mm_end_of_chunk (unsigned long addr, int len);
-extern unsigned long iopa(unsigned long addr);
-extern unsigned long mm_ptov(unsigned long addr) __attribute_const__;
-
-/* Values for nocacheflag and cmode */
-/* These are not used by the APUS kernel_map, but prevents
-   compilation errors. */
-#define	KERNELMAP_FULL_CACHING		0
-#define	KERNELMAP_NOCACHE_SER		1
-#define	KERNELMAP_NOCACHE_NONSER	2
-#define	KERNELMAP_NO_COPYBACK		3
-
-/*
- * Map some physical address range into the kernel address space.
- */
-extern unsigned long kernel_map(unsigned long paddr, unsigned long size,
-				int nocacheflag, unsigned long *memavailp );
-
-/*
- * Set cache mode of (kernel space) address range.
- */
-extern void kernel_set_cachemode (unsigned long address, unsigned long size,
-                                 unsigned int cmode);
-
-/* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
-#define kern_addr_valid(addr)	(1)
-
-#define io_remap_pfn_range(vma, vaddr, pfn, size, prot)		\
-		remap_pfn_range(vma, vaddr, pfn, size, prot)
-
 /*
  * No page table caches to initialise
  */
diff --git a/include/asm-powerpc/pgtable-ppc64.h b/include/asm-powerpc/pgtable-ppc64.h
index 0c87912..300f9a1 100644
--- a/include/asm-powerpc/pgtable-ppc64.h
+++ b/include/asm-powerpc/pgtable-ppc64.h
@@ -7,11 +7,7 @@
 
 #ifndef __ASSEMBLY__
 #include <linux/stddef.h>
-#include <asm/processor.h>		/* For TASK_SIZE */
-#include <asm/mmu.h>
-#include <asm/page.h>
 #include <asm/tlbflush.h>
-struct mm_struct;
 #endif /* __ASSEMBLY__ */
 
 #ifdef CONFIG_PPC_64K_PAGES
@@ -27,7 +23,7 @@
  */
 #define PGTABLE_EADDR_SIZE (PTE_INDEX_SIZE + PMD_INDEX_SIZE + \
                 	    PUD_INDEX_SIZE + PGD_INDEX_SIZE + PAGE_SHIFT)
-#define PGTABLE_RANGE (1UL << PGTABLE_EADDR_SIZE)
+#define PGTABLE_RANGE (ASM_CONST(1) << PGTABLE_EADDR_SIZE)
 
 #if TASK_SIZE_USER64 > PGTABLE_RANGE
 #error TASK_SIZE_USER64 exceeds pagetable range
@@ -37,19 +33,28 @@
 #error TASK_SIZE_USER64 exceeds user VSID range
 #endif
 
+
 /*
  * Define the address range of the vmalloc VM area.
  */
 #define VMALLOC_START ASM_CONST(0xD000000000000000)
-#define VMALLOC_SIZE  ASM_CONST(0x80000000000)
+#define VMALLOC_SIZE  (PGTABLE_RANGE >> 1)
 #define VMALLOC_END   (VMALLOC_START + VMALLOC_SIZE)
 
 /*
- * Define the address range of the imalloc VM area.
+ * Define the address ranges for MMIO and IO space :
+ *
+ *  ISA_IO_BASE = VMALLOC_END, 64K reserved area
+ *  PHB_IO_BASE = ISA_IO_BASE + 64K to ISA_IO_BASE + 2G, PHB IO spaces
+ * IOREMAP_BASE = ISA_IO_BASE + 2G to VMALLOC_START + PGTABLE_RANGE
  */
-#define PHBS_IO_BASE	VMALLOC_END
-#define IMALLOC_BASE	(PHBS_IO_BASE + 0x80000000ul)	/* Reserve 2 gigs for PHBs */
-#define IMALLOC_END	(VMALLOC_START + PGTABLE_RANGE)
+#define FULL_IO_SIZE	0x80000000ul
+#define  ISA_IO_BASE	(VMALLOC_END)
+#define  ISA_IO_END	(VMALLOC_END + 0x10000ul)
+#define  PHB_IO_BASE	(ISA_IO_END)
+#define  PHB_IO_END	(VMALLOC_END + FULL_IO_SIZE)
+#define IOREMAP_BASE	(PHB_IO_END)
+#define IOREMAP_END	(VMALLOC_START + PGTABLE_RANGE)
 
 /*
  * Region IDs
@@ -134,16 +139,6 @@
 #define __S110	PAGE_SHARED_X
 #define __S111	PAGE_SHARED_X
 
-#ifndef __ASSEMBLY__
-
-/*
- * ZERO_PAGE is a global shared page that is always zero: used
- * for zero-mapped memory areas etc..
- */
-extern unsigned long empty_zero_page[PAGE_SIZE/sizeof(unsigned long)];
-#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
-#endif /* __ASSEMBLY__ */
-
 #ifdef CONFIG_HUGETLB_PAGE
 
 #define HAVE_ARCH_UNMAPPED_AREA
@@ -297,29 +292,6 @@
 	__r;								   \
 })
 
-/*
- * On RW/DIRTY bit transitions we can avoid flushing the hpte. For the
- * moment we always flush but we need to fix hpte_update and test if the
- * optimisation is worth it.
- */
-static inline int __ptep_test_and_clear_dirty(struct mm_struct *mm,
-					      unsigned long addr, pte_t *ptep)
-{
-	unsigned long old;
-
-       	if ((pte_val(*ptep) & _PAGE_DIRTY) == 0)
-		return 0;
-	old = pte_update(mm, addr, ptep, _PAGE_DIRTY, 0);
-	return (old & _PAGE_DIRTY) != 0;
-}
-#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY
-#define ptep_test_and_clear_dirty(__vma, __addr, __ptep)		   \
-({									   \
-	int __r;							   \
-	__r = __ptep_test_and_clear_dirty((__vma)->vm_mm, __addr, __ptep); \
-	__r;								   \
-})
-
 #define __HAVE_ARCH_PTEP_SET_WRPROTECT
 static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
 				      pte_t *ptep)
@@ -347,14 +319,6 @@
 	__young;							\
 })
 
-#define __HAVE_ARCH_PTEP_CLEAR_DIRTY_FLUSH
-#define ptep_clear_flush_dirty(__vma, __address, __ptep)		\
-({									\
-	int __dirty = __ptep_test_and_clear_dirty((__vma)->vm_mm, __address, \
-						  __ptep); 		\
-	__dirty;							\
-})
-
 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
 static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
 				       unsigned long addr, pte_t *ptep)
@@ -432,10 +396,6 @@
 #define pgd_ERROR(e) \
 	printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
 
-extern pgd_t swapper_pg_dir[];
-
-extern void paging_init(void);
-
 /* Encode and de-code a swap entry */
 #define __swp_type(entry)	(((entry).val >> 1) & 0x3f)
 #define __swp_offset(entry)	((entry).val >> 8)
@@ -446,17 +406,6 @@
 #define pgoff_to_pte(off)	((pte_t) {((off) << PTE_RPN_SHIFT)|_PAGE_FILE})
 #define PTE_FILE_MAX_BITS	(BITS_PER_LONG - PTE_RPN_SHIFT)
 
-/*
- * kern_addr_valid is intended to indicate whether an address is a valid
- * kernel address.  Most 32-bit archs define it as always true (like this)
- * but most 64-bit archs actually perform a test.  What should we do here?
- * The only use is in fs/ncpfs/dir.c
- */
-#define kern_addr_valid(addr)	(1)
-
-#define io_remap_pfn_range(vma, vaddr, pfn, size, prot)		\
-		remap_pfn_range(vma, vaddr, pfn, size, prot)
-
 void pgtable_cache_init(void);
 
 /*
diff --git a/include/asm-powerpc/pgtable.h b/include/asm-powerpc/pgtable.h
index 78bf4ae..d18ffe7 100644
--- a/include/asm-powerpc/pgtable.h
+++ b/include/asm-powerpc/pgtable.h
@@ -2,6 +2,13 @@
 #define _ASM_POWERPC_PGTABLE_H
 #ifdef __KERNEL__
 
+#ifndef __ASSEMBLY__
+#include <asm/processor.h>		/* For TASK_SIZE */
+#include <asm/mmu.h>
+#include <asm/page.h>
+struct mm_struct;
+#endif /* !__ASSEMBLY__ */
+
 #if defined(CONFIG_PPC64)
 #  include <asm/pgtable-ppc64.h>
 #else
@@ -9,6 +16,27 @@
 #endif
 
 #ifndef __ASSEMBLY__
+/*
+ * ZERO_PAGE is a global shared page that is always zero: used
+ * for zero-mapped memory areas etc..
+ */
+extern unsigned long empty_zero_page[];
+#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
+
+extern pgd_t swapper_pg_dir[];
+
+extern void paging_init(void);
+
+/*
+ * kern_addr_valid is intended to indicate whether an address is a valid
+ * kernel address.  Most 32-bit archs define it as always true (like this)
+ * but most 64-bit archs actually perform a test.  What should we do here?
+ */
+#define kern_addr_valid(addr)	(1)
+
+#define io_remap_pfn_range(vma, vaddr, pfn, size, prot)		\
+		remap_pfn_range(vma, vaddr, pfn, size, prot)
+
 #include <asm-generic/pgtable.h>
 #endif /* __ASSEMBLY__ */
 
diff --git a/include/asm-powerpc/ppc-pci.h b/include/asm-powerpc/ppc-pci.h
index 8e20051..b847aa1 100644
--- a/include/asm-powerpc/ppc-pci.h
+++ b/include/asm-powerpc/ppc-pci.h
@@ -26,7 +26,7 @@
 
 extern void find_and_init_phbs(void);
 
-extern struct pci_dev *ppc64_isabridge_dev;	/* may be NULL if no ISA bus */
+extern struct pci_dev *isa_bridge_pcidev;	/* may be NULL if no ISA bus */
 
 /** Bus Unit ID macros; get low and hi 32-bits of the 64-bit BUID */
 #define BUID_HI(buid) ((buid) >> 32)
@@ -47,8 +47,8 @@
 extern unsigned long get_phb_buid (struct device_node *);
 extern int rtas_setup_phb(struct pci_controller *phb);
 
-/* From pSeries_pci.h */
-extern void pSeries_final_fixup(void);
+/* From iSeries PCI */
+extern void iSeries_pcibios_init(void);
 
 extern unsigned long pci_probe_only;
 
@@ -139,6 +139,9 @@
  */
 struct device_node * find_device_pe(struct device_node *dn);
 
+void eeh_sysfs_add_device(struct pci_dev *pdev);
+void eeh_sysfs_remove_device(struct pci_dev *pdev);
+
 #endif /* CONFIG_EEH */
 
 #else /* CONFIG_PCI */
diff --git a/include/asm-powerpc/processor.h b/include/asm-powerpc/processor.h
index d947b16..e28b108 100644
--- a/include/asm-powerpc/processor.h
+++ b/include/asm-powerpc/processor.h
@@ -43,14 +43,6 @@
 /* what kind of prep workstation we are */
 extern int _prep_type;
 
-/*
- * This is used to identify the board type from a given PReP board
- * vendor. Board revision is also made available. This will be moved
- * elsewhere soon
- */
-extern unsigned char ucBoardRev;
-extern unsigned char ucBoardRevMaj, ucBoardRevMin;
-
 #endif /* CONFIG_PPC_PREP */
 
 #endif /* defined(__KERNEL__) && defined(CONFIG_PPC32) */
diff --git a/include/asm-powerpc/prom.h b/include/asm-powerpc/prom.h
index 6845af9..1632baa 100644
--- a/include/asm-powerpc/prom.h
+++ b/include/asm-powerpc/prom.h
@@ -98,10 +98,19 @@
 extern struct device_node *of_chosen;
 
 /* flag descriptions */
-#define OF_DYNAMIC 1 /* node and properties were allocated via kmalloc */
+#define OF_DYNAMIC	1 /* node and properties were allocated via kmalloc */
+#define OF_DETACHED	2 /* node has been detached from the device tree */
 
-#define OF_IS_DYNAMIC(x) test_bit(OF_DYNAMIC, &x->_flags)
-#define OF_MARK_DYNAMIC(x) set_bit(OF_DYNAMIC, &x->_flags)
+static inline int of_node_check_flag(struct device_node *n, unsigned long flag)
+{
+	return test_bit(flag, &n->_flags);
+}
+
+static inline void of_node_set_flag(struct device_node *n, unsigned long flag)
+{
+	set_bit(flag, &n->_flags);
+}
+
 
 #define HAVE_ARCH_DEVTREE_FIXUPS
 
@@ -124,6 +133,9 @@
 	     dn = of_find_node_by_type(dn, type))
 extern struct device_node *of_find_compatible_node(struct device_node *from,
 	const char *type, const char *compat);
+#define for_each_compatible_node(dn, type, compatible) \
+	for (dn = of_find_compatible_node(NULL, type, compatible); dn; \
+	     dn = of_find_compatible_node(dn, type, compatible))
 extern struct device_node *of_find_node_by_path(const char *path);
 extern struct device_node *of_find_node_by_phandle(phandle handle);
 extern struct device_node *of_find_all_nodes(struct device_node *prev);
diff --git a/include/asm-powerpc/ps3.h b/include/asm-powerpc/ps3.h
index 1e04651..a6f3f5e 100644
--- a/include/asm-powerpc/ps3.h
+++ b/include/asm-powerpc/ps3.h
@@ -35,7 +35,8 @@
 	};
 };
 
-int ps3_get_firmware_version(union ps3_firmware_version *v);
+void ps3_get_firmware_version(union ps3_firmware_version *v);
+int ps3_compare_firmware_version(u16 major, u16 minor, u16 rev);
 
 /* 'Other OS' area */
 
@@ -48,18 +49,6 @@
 
 enum ps3_param_av_multi_out ps3_os_area_get_av_multi_out(void);
 
-/**
- * struct ps3_device_id - HV bus device identifier from the system repository
- * @bus_id: HV bus id, {1..} (zero invalid)
- * @dev_id: HV device id, {0..}
- */
-
-struct ps3_device_id {
-	unsigned int bus_id;
-	unsigned int dev_id;
-};
-
-
 /* dma routines */
 
 enum ps3_dma_page_size {
@@ -74,6 +63,8 @@
 	PS3_DMA_INTERNAL = 2,
 };
 
+struct ps3_dma_region_ops;
+
 /**
  * struct ps3_dma_region - A per device dma state variables structure
  * @did: The HV device id.
@@ -81,21 +72,42 @@
  * @region_type: The HV region type.
  * @bus_addr: The 'translated' bus address of the region.
  * @len: The length in bytes of the region.
+ * @offset: The offset from the start of memory of the region.
+ * @ioid: The IOID of the device who owns this region
  * @chunk_list: Opaque variable used by the ioc page manager.
+ * @region_ops: struct ps3_dma_region_ops - dma region operations
  */
 
 struct ps3_dma_region {
-	struct ps3_device_id did;
+	struct ps3_system_bus_device *dev;
+	/* device variables */
+	const struct ps3_dma_region_ops *region_ops;
+	unsigned char ioid;
 	enum ps3_dma_page_size page_size;
 	enum ps3_dma_region_type region_type;
-	unsigned long bus_addr;
 	unsigned long len;
+	unsigned long offset;
+
+	/* driver variables  (set by ps3_dma_region_create) */
+	unsigned long bus_addr;
 	struct {
 		spinlock_t lock;
 		struct list_head head;
 	} chunk_list;
 };
 
+struct ps3_dma_region_ops {
+	int (*create)(struct ps3_dma_region *);
+	int (*free)(struct ps3_dma_region *);
+	int (*map)(struct ps3_dma_region *,
+		   unsigned long virt_addr,
+		   unsigned long len,
+		   unsigned long *bus_addr,
+		   u64 iopte_pp);
+	int (*unmap)(struct ps3_dma_region *,
+		     unsigned long bus_addr,
+		     unsigned long len);
+};
 /**
  * struct ps3_dma_region_init - Helper to initialize structure variables
  *
@@ -103,18 +115,16 @@
  * ps3_system_bus_device_register.
  */
 
-static inline void ps3_dma_region_init(struct ps3_dma_region *r,
-	const struct ps3_device_id* did, enum ps3_dma_page_size page_size,
-	enum ps3_dma_region_type region_type)
-{
-	r->did = *did;
-	r->page_size = page_size;
-	r->region_type = region_type;
-}
+struct ps3_system_bus_device;
+
+int ps3_dma_region_init(struct ps3_system_bus_device *dev,
+	struct ps3_dma_region *r, enum ps3_dma_page_size page_size,
+	enum ps3_dma_region_type region_type, void *addr, unsigned long len);
 int ps3_dma_region_create(struct ps3_dma_region *r);
 int ps3_dma_region_free(struct ps3_dma_region *r);
 int ps3_dma_map(struct ps3_dma_region *r, unsigned long virt_addr,
-	unsigned long len, unsigned long *bus_addr);
+	unsigned long len, unsigned long *bus_addr,
+	u64 iopte_pp);
 int ps3_dma_unmap(struct ps3_dma_region *r, unsigned long bus_addr,
 	unsigned long len);
 
@@ -125,6 +135,7 @@
 	PS3_MMIO_64K = 16U
 };
 
+struct ps3_mmio_region_ops;
 /**
  * struct ps3_mmio_region - a per device mmio state variables structure
  *
@@ -132,13 +143,18 @@
  */
 
 struct ps3_mmio_region {
-	struct ps3_device_id did;
+	struct ps3_system_bus_device *dev;
+	const struct ps3_mmio_region_ops *mmio_ops;
 	unsigned long bus_addr;
 	unsigned long len;
 	enum ps3_mmio_page_size page_size;
 	unsigned long lpar_addr;
 };
 
+struct ps3_mmio_region_ops {
+	int (*create)(struct ps3_mmio_region *);
+	int (*free)(struct ps3_mmio_region *);
+};
 /**
  * struct ps3_mmio_region_init - Helper to initialize structure variables
  *
@@ -146,15 +162,9 @@
  * ps3_system_bus_device_register.
  */
 
-static inline void ps3_mmio_region_init(struct ps3_mmio_region *r,
-	const struct ps3_device_id* did, unsigned long bus_addr,
-	unsigned long len, enum ps3_mmio_page_size page_size)
-{
-	r->did = *did;
-	r->bus_addr = bus_addr;
-	r->len = len;
-	r->page_size = page_size;
-}
+int ps3_mmio_region_init(struct ps3_system_bus_device *dev,
+	struct ps3_mmio_region *r, unsigned long bus_addr, unsigned long len,
+	enum ps3_mmio_page_size page_size);
 int ps3_mmio_region_create(struct ps3_mmio_region *r);
 int ps3_free_mmio_region(struct ps3_mmio_region *r);
 unsigned long ps3_mm_phys_to_lpar(unsigned long phys_addr);
@@ -187,11 +197,10 @@
 	unsigned int class, unsigned int *virq);
 int ps3_spe_irq_destroy(unsigned int virq);
 
-int ps3_sb_event_receive_port_setup(enum ps3_cpu_binding cpu,
-	const struct ps3_device_id *did, unsigned int interrupt_id,
-	unsigned int *virq);
-int ps3_sb_event_receive_port_destroy(const struct ps3_device_id *did,
-	unsigned int interrupt_id, unsigned int virq);
+int ps3_sb_event_receive_port_setup(struct ps3_system_bus_device *dev,
+	enum ps3_cpu_binding cpu, unsigned int *virq);
+int ps3_sb_event_receive_port_destroy(struct ps3_system_bus_device *dev,
+	unsigned int virq);
 
 /* lv1 result codes */
 
@@ -289,11 +298,33 @@
 /* system bus routines */
 
 enum ps3_match_id {
-	PS3_MATCH_ID_EHCI = 1,
-	PS3_MATCH_ID_OHCI,
-	PS3_MATCH_ID_GELIC,
-	PS3_MATCH_ID_AV_SETTINGS,
-	PS3_MATCH_ID_SYSTEM_MANAGER,
+	PS3_MATCH_ID_EHCI           = 1,
+	PS3_MATCH_ID_OHCI           = 2,
+	PS3_MATCH_ID_GELIC          = 3,
+	PS3_MATCH_ID_AV_SETTINGS    = 4,
+	PS3_MATCH_ID_SYSTEM_MANAGER = 5,
+	PS3_MATCH_ID_STOR_DISK      = 6,
+	PS3_MATCH_ID_STOR_ROM       = 7,
+	PS3_MATCH_ID_STOR_FLASH     = 8,
+	PS3_MATCH_ID_SOUND          = 9,
+	PS3_MATCH_ID_GRAPHICS       = 10,
+};
+
+#define PS3_MODULE_ALIAS_EHCI           "ps3:1"
+#define PS3_MODULE_ALIAS_OHCI           "ps3:2"
+#define PS3_MODULE_ALIAS_GELIC          "ps3:3"
+#define PS3_MODULE_ALIAS_AV_SETTINGS    "ps3:4"
+#define PS3_MODULE_ALIAS_SYSTEM_MANAGER "ps3:5"
+#define PS3_MODULE_ALIAS_STOR_DISK      "ps3:6"
+#define PS3_MODULE_ALIAS_STOR_ROM       "ps3:7"
+#define PS3_MODULE_ALIAS_STOR_FLASH     "ps3:8"
+#define PS3_MODULE_ALIAS_SOUND          "ps3:9"
+#define PS3_MODULE_ALIAS_GRAPHICS       "ps3:10"
+
+enum ps3_system_bus_device_type {
+	PS3_DEVICE_TYPE_IOC0 = 1,
+	PS3_DEVICE_TYPE_SB,
+	PS3_DEVICE_TYPE_VUART,
 };
 
 /**
@@ -302,14 +333,23 @@
 
 struct ps3_system_bus_device {
 	enum ps3_match_id match_id;
-	struct ps3_device_id did;
-	unsigned int interrupt_id;
-/*	struct iommu_table *iommu_table; -- waiting for Ben's cleanups */
-	struct ps3_dma_region *d_region;
-	struct ps3_mmio_region *m_region;
+	enum ps3_system_bus_device_type dev_type;
+
+	unsigned int bus_id;              /* SB */
+	unsigned int dev_id;              /* SB */
+	unsigned int interrupt_id;        /* SB */
+	struct ps3_dma_region *d_region;  /* SB, IOC0 */
+	struct ps3_mmio_region *m_region; /* SB, IOC0*/
+	unsigned int port_number;         /* VUART */
+
+/*	struct iommu_table *iommu_table; -- waiting for BenH's cleanups */
 	struct device core;
+	void *driver_priv; /* private driver variables */
 };
 
+int ps3_open_hv_device(struct ps3_system_bus_device *dev);
+int ps3_close_hv_device(struct ps3_system_bus_device *dev);
+
 /**
  * struct ps3_system_bus_driver - a driver for a device on the system bus
  */
@@ -319,6 +359,7 @@
 	struct device_driver core;
 	int (*probe)(struct ps3_system_bus_device *);
 	int (*remove)(struct ps3_system_bus_device *);
+	int (*shutdown)(struct ps3_system_bus_device *);
 /*	int (*suspend)(struct ps3_system_bus_device *, pm_message_t); */
 /*	int (*resume)(struct ps3_system_bus_device *); */
 };
@@ -326,16 +367,24 @@
 int ps3_system_bus_device_register(struct ps3_system_bus_device *dev);
 int ps3_system_bus_driver_register(struct ps3_system_bus_driver *drv);
 void ps3_system_bus_driver_unregister(struct ps3_system_bus_driver *drv);
-static inline struct ps3_system_bus_driver *to_ps3_system_bus_driver(
+
+static inline struct ps3_system_bus_driver *ps3_drv_to_system_bus_drv(
 	struct device_driver *_drv)
 {
 	return container_of(_drv, struct ps3_system_bus_driver, core);
 }
-static inline struct ps3_system_bus_device *to_ps3_system_bus_device(
+static inline struct ps3_system_bus_device *ps3_dev_to_system_bus_dev(
 	struct device *_dev)
 {
 	return container_of(_dev, struct ps3_system_bus_device, core);
 }
+static inline struct ps3_system_bus_driver *
+	ps3_system_bus_dev_to_system_bus_drv(struct ps3_system_bus_device *_dev)
+{
+	BUG_ON(!_dev);
+	BUG_ON(!_dev->core.driver);
+	return ps3_drv_to_system_bus_drv(_dev->core.driver);
+}
 
 /**
  * ps3_system_bus_set_drvdata -
@@ -358,32 +407,17 @@
 
 extern struct bus_type ps3_system_bus_type;
 
-/* vuart routines */
-
-struct ps3_vuart_port_priv;
-
-/**
- * struct ps3_vuart_port_device - a device on a vuart port
- */
-
-struct ps3_vuart_port_device {
-	enum ps3_match_id match_id;
-	struct device core;
-	struct ps3_vuart_port_priv* priv; /* private driver variables */
-
-};
-
-int ps3_vuart_port_device_register(struct ps3_vuart_port_device *dev);
-
 /* system manager */
 
-#ifdef CONFIG_PS3_SYS_MANAGER
-void ps3_sys_manager_restart(void);
+struct ps3_sys_manager_ops {
+	struct ps3_system_bus_device *dev;
+	void (*power_off)(struct ps3_system_bus_device *dev);
+	void (*restart)(struct ps3_system_bus_device *dev);
+};
+
+void ps3_sys_manager_register_ops(const struct ps3_sys_manager_ops *ops);
 void ps3_sys_manager_power_off(void);
-#else
-static inline void ps3_sys_manager_restart(void) {}
-static inline void ps3_sys_manager_power_off(void) {}
-#endif
+void ps3_sys_manager_restart(void);
 
 struct ps3_prealloc {
     const char *name;
@@ -393,5 +427,7 @@
 };
 
 extern struct ps3_prealloc ps3fb_videomemory;
+extern struct ps3_prealloc ps3flash_bounce_buffer;
+
 
 #endif
diff --git a/include/asm-powerpc/ps3av.h b/include/asm-powerpc/ps3av.h
index 9efc40f..7df4250 100644
--- a/include/asm-powerpc/ps3av.h
+++ b/include/asm-powerpc/ps3av.h
@@ -1,20 +1,23 @@
 /*
- * Copyright (C) 2006 Sony Computer Entertainment Inc.
- * Copyright 2006, 2007 Sony Corporation
+ *  PS3 AV backend support.
  *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published
- * by the Free Software Foundation; version 2 of the License.
+ *  Copyright (C) 2007 Sony Computer Entertainment Inc.
+ *  Copyright 2007 Sony Corp.
  *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; version 2 of the License.
  *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
  */
+
 #ifndef _ASM_POWERPC_PS3AV_H_
 #define _ASM_POWERPC_PS3AV_H_
 
@@ -159,6 +162,9 @@
 #define PS3AV_CMD_VIDEO_FMT_X8R8G8B8			0x0000
 /* video_out_format */
 #define PS3AV_CMD_VIDEO_OUT_FORMAT_RGB_12BIT		0x0000
+/* video_cl_cnv */
+#define PS3AV_CMD_VIDEO_CL_CNV_ENABLE_LUT		0x0000
+#define PS3AV_CMD_VIDEO_CL_CNV_DISABLE_LUT		0x0010
 /* video_sync */
 #define PS3AV_CMD_VIDEO_SYNC_VSYNC			0x0001
 #define PS3AV_CMD_VIDEO_SYNC_CSYNC			0x0004
@@ -311,6 +317,8 @@
 #define PS3AV_MODE_MASK				0x000F
 #define PS3AV_MODE_HDCP_OFF			0x1000	/* Retail PS3 product doesn't support this */
 #define PS3AV_MODE_DITHER			0x0800
+#define PS3AV_MODE_COLOR			0x0400
+#define PS3AV_MODE_WHITE			0x0200
 #define PS3AV_MODE_FULL				0x0080
 #define PS3AV_MODE_DVI				0x0040
 #define PS3AV_MODE_RGB				0x0020
@@ -529,9 +537,9 @@
 	u32 video_out_format;	/* in: out format */
 	u32 video_format;	/* in: input frame buffer format */
 	u8 reserved3;
-	u8 reserved4;
+	u8 video_cl_cnv;	/* in: color conversion */
 	u16 video_order;	/* in: input RGB order */
-	u32 reserved5;
+	u32 reserved4;
 };
 
 /* video: format */
@@ -539,7 +547,8 @@
 	struct ps3av_send_hdr send_hdr;
 	u32 video_head;		/* in: head */
 	u32 video_format;	/* in: frame buffer format */
-	u16 reserved;
+	u8 reserved;
+	u8 video_cl_cnv;	/* in: color conversion */
 	u16 video_order;	/* in: input RGB order */
 };
 
@@ -698,12 +707,6 @@
 extern int ps3av_cmd_video_get_monitor_info(struct ps3av_pkt_av_get_monitor_info *,
 					    u32);
 
-struct ps3_vuart_port_device;
-extern int ps3av_vuart_write(struct ps3_vuart_port_device *dev,
-			     const void *buf, unsigned long size);
-extern int ps3av_vuart_read(struct ps3_vuart_port_device *dev, void *buf,
-			    unsigned long size, int timeout);
-
 extern int ps3av_set_video_mode(u32, int);
 extern int ps3av_set_audio_mode(u32, u32, u32, u32, u32);
 extern int ps3av_get_auto_mode(int);
@@ -716,5 +719,8 @@
 extern int ps3av_audio_mute(int);
 extern int ps3av_dev_open(void);
 extern int ps3av_dev_close(void);
+extern void ps3av_register_flip_ctl(void (*flip_ctl)(int on, void *data),
+				    void *flip_data);
+extern void ps3av_flip_ctl(int on);
 
 #endif	/* _ASM_POWERPC_PS3AV_H_ */
diff --git a/include/asm-powerpc/ps3fb.h b/include/asm-powerpc/ps3fb.h
index ad81cf4..3f121fe 100644
--- a/include/asm-powerpc/ps3fb.h
+++ b/include/asm-powerpc/ps3fb.h
@@ -41,16 +41,4 @@
 	__u32 num_frames; /* num of frame buffers */
 };
 
-#ifdef __KERNEL__
-
-#ifdef CONFIG_FB_PS3
-extern void ps3fb_flip_ctl(int on);
-extern void ps3fb_cleanup(void);
-#else
-static inline void ps3fb_flip_ctl(int on) {}
-static inline void ps3fb_cleanup(void) {}
-#endif
-
-#endif /* __KERNEL__ */
-
 #endif /* _ASM_POWERPC_PS3FB_H_ */
diff --git a/include/asm-powerpc/ps3stor.h b/include/asm-powerpc/ps3stor.h
new file mode 100644
index 0000000..6fcaf71
--- /dev/null
+++ b/include/asm-powerpc/ps3stor.h
@@ -0,0 +1,71 @@
+/*
+ * PS3 Storage Devices
+ *
+ * Copyright (C) 2007 Sony Computer Entertainment Inc.
+ * Copyright 2007 Sony Corp.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#ifndef _ASM_POWERPC_PS3STOR_H_
+#define _ASM_POWERPC_PS3STOR_H_
+
+#include <linux/interrupt.h>
+
+#include <asm/ps3.h>
+
+
+struct ps3_storage_region {
+	unsigned int id;
+	u64 start;
+	u64 size;
+};
+
+struct ps3_storage_device {
+	struct ps3_system_bus_device sbd;
+
+	struct ps3_dma_region dma_region;
+	unsigned int irq;
+	u64 blk_size;
+
+	u64 tag;
+	u64 lv1_status;
+	struct completion done;
+
+	unsigned long bounce_size;
+	void *bounce_buf;
+	u64 bounce_lpar;
+	dma_addr_t bounce_dma;
+
+	unsigned int num_regions;
+	unsigned long accessible_regions;
+	unsigned int region_idx;		/* first accessible region */
+	struct ps3_storage_region regions[0];	/* Must be last */
+};
+
+static inline struct ps3_storage_device *to_ps3_storage_device(struct device *dev)
+{
+	return container_of(dev, struct ps3_storage_device, sbd.core);
+}
+
+extern int ps3stor_setup(struct ps3_storage_device *dev,
+			 irq_handler_t handler);
+extern void ps3stor_teardown(struct ps3_storage_device *dev);
+extern u64 ps3stor_read_write_sectors(struct ps3_storage_device *dev, u64 lpar,
+				      u64 start_sector, u64 sectors,
+				      int write);
+extern u64 ps3stor_send_command(struct ps3_storage_device *dev, u64 cmd,
+				u64 arg1, u64 arg2, u64 arg3, u64 arg4);
+
+#endif /* _ASM_POWERPC_PS3STOR_H_ */
diff --git a/include/asm-powerpc/ptrace.h b/include/asm-powerpc/ptrace.h
index 4ad77a1..13fccc5 100644
--- a/include/asm-powerpc/ptrace.h
+++ b/include/asm-powerpc/ptrace.h
@@ -92,6 +92,11 @@
 		set_thread_flag(TIF_NOERROR); \
 	} while(0)
 
+struct task_struct;
+extern unsigned long ptrace_get_reg(struct task_struct *task, int regno);
+extern int ptrace_put_reg(struct task_struct *task, int regno,
+			  unsigned long data);
+
 /*
  * We use the least-significant bit of the trap field to indicate
  * whether we have saved the full set of registers, or only a
@@ -158,9 +163,7 @@
 
 #define PT_NIP	32
 #define PT_MSR	33
-#ifdef __KERNEL__
 #define PT_ORIG_R3 34
-#endif
 #define PT_CTR	35
 #define PT_LNK	36
 #define PT_XER	37
@@ -169,11 +172,12 @@
 #define PT_MQ	39
 #else
 #define PT_SOFTE 39
+#endif
 #define PT_TRAP	40
 #define PT_DAR	41
 #define PT_DSISR 42
 #define PT_RESULT 43
-#endif
+#define PT_REGS_COUNT 44
 
 #define PT_FPR0	48	/* each FP reg occupies 2 slots in this space */
 
@@ -229,7 +233,17 @@
 #define PTRACE_GET_DEBUGREG	25
 #define PTRACE_SET_DEBUGREG	26
 
-/* Additional PTRACE requests implemented on PowerPC. */
+/* (new) PTRACE requests using the same numbers as x86 and the same
+ * argument ordering. Additionally, they support more registers too
+ */
+#define PTRACE_GETREGS            12
+#define PTRACE_SETREGS            13
+#define PTRACE_GETFPREGS          14
+#define PTRACE_SETFPREGS          15
+#define PTRACE_GETREGS64	  22
+#define PTRACE_SETREGS64	  23
+
+/* (old) PTRACE requests with inverted arguments */
 #define PPC_PTRACE_GETREGS	0x99	/* Get GPRs 0 - 31 */
 #define PPC_PTRACE_SETREGS	0x98	/* Set GPRs 0 - 31 */
 #define PPC_PTRACE_GETFPREGS	0x97	/* Get FPRs 0 - 31 */
diff --git a/include/asm-powerpc/reg.h b/include/asm-powerpc/reg.h
index 749c7f9..281011e 100644
--- a/include/asm-powerpc/reg.h
+++ b/include/asm-powerpc/reg.h
@@ -453,6 +453,8 @@
 #define SPRN_MMCRA	0x312
 #define   MMCRA_SIHV	0x10000000UL /* state of MSR HV when SIAR set */
 #define   MMCRA_SIPR	0x08000000UL /* state of MSR PR when SIAR set */
+#define   MMCRA_SLOT	0x07000000UL /* SLOT bits (37-39) */
+#define   MMCRA_SLOT_SHIFT	24
 #define   MMCRA_SAMPLE_ENABLE 0x00000001UL /* enable sampling */
 #define   POWER6_MMCRA_SIHV   0x0000040000000000ULL
 #define   POWER6_MMCRA_SIPR   0x0000020000000000ULL
diff --git a/include/asm-powerpc/spu.h b/include/asm-powerpc/spu.h
index 31d5054..eedc828 100644
--- a/include/asm-powerpc/spu.h
+++ b/include/asm-powerpc/spu.h
@@ -106,6 +106,14 @@
 struct spu_runqueue;
 struct device_node;
 
+enum spu_utilization_state {
+	SPU_UTIL_SYSTEM,
+	SPU_UTIL_USER,
+	SPU_UTIL_IOWAIT,
+	SPU_UTIL_IDLE,
+	SPU_UTIL_MAX
+};
+
 struct spu {
 	const char *name;
 	unsigned long local_store_phys;
@@ -156,6 +164,21 @@
 	u64 shadow_int_mask_RW[3];
 
 	struct sys_device sysdev;
+
+	struct {
+		/* protected by interrupt reentrancy */
+		enum spu_utilization_state utilization_state;
+		unsigned long tstamp;		/* time of last ctx switch */
+		unsigned long times[SPU_UTIL_MAX];
+		unsigned long long vol_ctx_switch;
+		unsigned long long invol_ctx_switch;
+		unsigned long long min_flt;
+		unsigned long long maj_flt;
+		unsigned long long hash_flt;
+		unsigned long long slb_flt;
+		unsigned long long class2_intr;
+		unsigned long long libassist;
+	} stats;
 };
 
 struct spu *spu_alloc(void);
@@ -448,6 +471,7 @@
 #define MFC_STATE1_PROBLEM_STATE_MASK		0x08ull
 #define MFC_STATE1_RELOCATE_MASK		0x10ull
 #define MFC_STATE1_MASTER_RUN_CONTROL_MASK	0x20ull
+#define MFC_STATE1_TABLE_SEARCH_MASK		0x40ull
 	u64 mfc_lpid_RW;					/* 0x008 */
 	u64 spu_idr_RW;						/* 0x010 */
 	u64 mfc_vr_RO;						/* 0x018 */
diff --git a/include/asm-powerpc/syscalls.h b/include/asm-powerpc/syscalls.h
index c2fe79d..b3ca41f 100644
--- a/include/asm-powerpc/syscalls.h
+++ b/include/asm-powerpc/syscalls.h
@@ -43,16 +43,9 @@
 
 asmlinkage long sys_rt_sigsuspend(sigset_t __user *unewset,
 		size_t sigsetsize);
-
-#ifndef __powerpc64__
-asmlinkage long sys_sigaltstack(const stack_t __user *uss,
-		stack_t __user *uoss, int r5, int r6, int r7, int r8,
-		struct pt_regs *regs);
-#else /* __powerpc64__ */
 asmlinkage long sys_sigaltstack(const stack_t __user *uss,
 		stack_t __user *uoss, unsigned long r5, unsigned long r6,
 		unsigned long r7, unsigned long r8, struct pt_regs *regs);
-#endif /* __powerpc64__ */
 
 #endif /* __KERNEL__ */
 #endif /* __ASM_POWERPC_SYSCALLS_H */
diff --git a/include/asm-powerpc/systbl.h b/include/asm-powerpc/systbl.h
index 1cc3f9c..cc6d872 100644
--- a/include/asm-powerpc/systbl.h
+++ b/include/asm-powerpc/systbl.h
@@ -308,6 +308,7 @@
 SYSCALL_SPU(getcpu)
 COMPAT_SYS(epoll_pwait)
 COMPAT_SYS_SPU(utimensat)
+COMPAT_SYS(fallocate)
 COMPAT_SYS_SPU(signalfd)
 COMPAT_SYS_SPU(timerfd)
 SYSCALL_SPU(eventfd)
diff --git a/include/asm-powerpc/system.h b/include/asm-powerpc/system.h
index 09621f6..41520b7 100644
--- a/include/asm-powerpc/system.h
+++ b/include/asm-powerpc/system.h
@@ -43,7 +43,7 @@
 #ifdef CONFIG_SMP
 #define smp_mb()	mb()
 #define smp_rmb()	rmb()
-#define smp_wmb()	__asm__ __volatile__ ("eieio" : : : "memory")
+#define smp_wmb()	eieio()
 #define smp_read_barrier_depends()	read_barrier_depends()
 #else
 #define smp_mb()	barrier()
@@ -184,16 +184,6 @@
 extern struct task_struct *_switch(struct thread_struct *prev,
 				   struct thread_struct *next);
 
-/*
- * On SMP systems, when the scheduler does migration-cost autodetection,
- * it needs a way to flush as much of the CPU's caches as possible.
- *
- * TODO: fill this in!
- */
-static inline void sched_cacheflush(void)
-{
-}
-
 extern unsigned int rtas_data;
 extern int mem_init_done;	/* set on boot once kmalloc can be called */
 extern unsigned long memory_limit;
@@ -559,5 +549,7 @@
 extern void account_system_vtime(struct task_struct *);
 #endif
 
+extern struct dentry *powerpc_debugfs_root;
+
 #endif /* __KERNEL__ */
 #endif /* _ASM_POWERPC_SYSTEM_H */
diff --git a/include/asm-powerpc/termbits.h b/include/asm-powerpc/termbits.h
index 5e79198..6698188 100644
--- a/include/asm-powerpc/termbits.h
+++ b/include/asm-powerpc/termbits.h
@@ -152,6 +152,10 @@
 #define B3000000  00034
 #define B3500000  00035
 #define B4000000  00036
+#define   BOTHER  00037
+
+#define CIBAUD	077600000
+#define IBSHIFT	16		/* Shift from CBAUD to CIBAUD */
 
 #define CSIZE	00001400
 #define   CS5	00000000
diff --git a/include/asm-powerpc/thread_info.h b/include/asm-powerpc/thread_info.h
index 3f32ca8..9d9aeca 100644
--- a/include/asm-powerpc/thread_info.h
+++ b/include/asm-powerpc/thread_info.h
@@ -113,8 +113,8 @@
 #define TIF_POLLING_NRFLAG	4	/* true if poll_idle() is polling
 					   TIF_NEED_RESCHED */
 #define TIF_32BIT		5	/* 32 bit binary */
-#define TIF_RUNLATCH		6	/* Is the runlatch enabled? */
-#define TIF_ABI_PENDING		7	/* 32/64 bit switch needed */
+#define TIF_PERFMON_WORK	6	/* work for pfm_handle_work() */
+#define TIF_PERFMON_CTXSW	7	/* perfmon needs ctxsw calls */
 #define TIF_SYSCALL_AUDIT	8	/* syscall auditing active */
 #define TIF_SINGLESTEP		9	/* singlestepping active */
 #define TIF_MEMDIE		10
@@ -123,6 +123,8 @@
 #define TIF_NOERROR		14	/* Force successful syscall return */
 #define TIF_RESTORE_SIGMASK	15	/* Restore signal mask in do_signal */
 #define TIF_FREEZE		16	/* Freezing for suspend */
+#define TIF_RUNLATCH		17	/* Is the runlatch enabled? */
+#define TIF_ABI_PENDING		18	/* 32/64 bit switch needed */
 
 /* as above, but as bit values */
 #define _TIF_SYSCALL_TRACE	(1<<TIF_SYSCALL_TRACE)
@@ -131,8 +133,8 @@
 #define _TIF_NEED_RESCHED	(1<<TIF_NEED_RESCHED)
 #define _TIF_POLLING_NRFLAG	(1<<TIF_POLLING_NRFLAG)
 #define _TIF_32BIT		(1<<TIF_32BIT)
-#define _TIF_RUNLATCH		(1<<TIF_RUNLATCH)
-#define _TIF_ABI_PENDING	(1<<TIF_ABI_PENDING)
+#define _TIF_PERFMON_WORK	(1<<TIF_PERFMON_WORK)
+#define _TIF_PERFMON_CTXSW	(1<<TIF_PERFMON_CTXSW)
 #define _TIF_SYSCALL_AUDIT	(1<<TIF_SYSCALL_AUDIT)
 #define _TIF_SINGLESTEP		(1<<TIF_SINGLESTEP)
 #define _TIF_SECCOMP		(1<<TIF_SECCOMP)
@@ -140,6 +142,8 @@
 #define _TIF_NOERROR		(1<<TIF_NOERROR)
 #define _TIF_RESTORE_SIGMASK	(1<<TIF_RESTORE_SIGMASK)
 #define _TIF_FREEZE		(1<<TIF_FREEZE)
+#define _TIF_RUNLATCH		(1<<TIF_RUNLATCH)
+#define _TIF_ABI_PENDING	(1<<TIF_ABI_PENDING)
 #define _TIF_SYSCALL_T_OR_A	(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SECCOMP)
 
 #define _TIF_USER_WORK_MASK	(_TIF_NOTIFY_RESUME | _TIF_SIGPENDING | \
diff --git a/include/asm-powerpc/time.h b/include/asm-powerpc/time.h
index 3fd57c0..d7f5ddf 100644
--- a/include/asm-powerpc/time.h
+++ b/include/asm-powerpc/time.h
@@ -232,7 +232,7 @@
 #define account_process_vtime(tsk)		do { } while (0)
 #endif
 
-#if defined(CONFIG_VIRT_CPU_ACCOUNTING) && defined(CONFIG_PPC_SPLPAR)
+#if defined(CONFIG_VIRT_CPU_ACCOUNTING)
 extern void calculate_steal_time(void);
 extern void snapshot_timebases(void);
 #else
@@ -240,5 +240,7 @@
 #define snapshot_timebases()			do { } while (0)
 #endif
 
+extern void iSeries_time_init_early(void);
+
 #endif /* __KERNEL__ */
 #endif /* __POWERPC_TIME_H */
diff --git a/include/asm-powerpc/tlbflush.h b/include/asm-powerpc/tlbflush.h
index 86e6266..99a0439 100644
--- a/include/asm-powerpc/tlbflush.h
+++ b/include/asm-powerpc/tlbflush.h
@@ -155,6 +155,11 @@
 {
 }
 
+/* Private function for use by PCI IO mapping code */
+extern void __flush_hash_table_range(struct mm_struct *mm, unsigned long start,
+				     unsigned long end);
+
+
 #endif
 
 /*
diff --git a/include/asm-powerpc/unistd.h b/include/asm-powerpc/unistd.h
index f71c606..97d82b6 100644
--- a/include/asm-powerpc/unistd.h
+++ b/include/asm-powerpc/unistd.h
@@ -331,10 +331,11 @@
 #define __NR_timerfd		306
 #define __NR_eventfd		307
 #define __NR_sync_file_range2	308
+#define __NR_fallocate		309
 
 #ifdef __KERNEL__
 
-#define __NR_syscalls		309
+#define __NR_syscalls		310
 
 #define __NR__exit __NR_exit
 #define NR_syscalls	__NR_syscalls
diff --git a/include/asm-ppc/pgtable.h b/include/asm-ppc/pgtable.h
index 18aa776..c159315 100644
--- a/include/asm-ppc/pgtable.h
+++ b/include/asm-ppc/pgtable.h
@@ -654,13 +654,6 @@
 #define ptep_test_and_clear_young(__vma, __addr, __ptep) \
 	__ptep_test_and_clear_young((__vma)->vm_mm->context.id, __addr, __ptep)
 
-#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY
-static inline int ptep_test_and_clear_dirty(struct vm_area_struct *vma,
-					    unsigned long addr, pte_t *ptep)
-{
-	return (pte_update(ptep, (_PAGE_DIRTY | _PAGE_HWWRITE), 0) & _PAGE_DIRTY) != 0;
-}
-
 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
 static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
 				       pte_t *ptep)
diff --git a/include/asm-ppc/system.h b/include/asm-ppc/system.h
index d84a3cf..f1311a8 100644
--- a/include/asm-ppc/system.h
+++ b/include/asm-ppc/system.h
@@ -129,16 +129,6 @@
 	struct task_struct *);
 #define switch_to(prev, next, last)	((last) = __switch_to((prev), (next)))
 
-/*
- * On SMP systems, when the scheduler does migration-cost autodetection,
- * it needs a way to flush as much of the CPU's caches as possible.
- *
- * TODO: fill this in!
- */
-static inline void sched_cacheflush(void)
-{
-}
-
 struct thread_struct;
 extern struct task_struct *_switch(struct thread_struct *prev,
 				   struct thread_struct *next);
diff --git a/include/asm-s390/a.out.h b/include/asm-s390/a.out.h
index 72adee6..46158dc 100644
--- a/include/asm-s390/a.out.h
+++ b/include/asm-s390/a.out.h
@@ -32,6 +32,7 @@
 #ifdef __KERNEL__
 
 #define STACK_TOP	TASK_SIZE
+#define STACK_TOP_MAX	DEFAULT_TASK_SIZE
 
 #endif
 
diff --git a/include/asm-s390/fb.h b/include/asm-s390/fb.h
new file mode 100644
index 0000000..c7df380
--- /dev/null
+++ b/include/asm-s390/fb.h
@@ -0,0 +1,12 @@
+#ifndef _ASM_FB_H_
+#define _ASM_FB_H_
+#include <linux/fb.h>
+
+#define fb_pgprotect(...) do {} while (0)
+
+static inline int fb_is_primary_device(struct fb_info *info)
+{
+	return 0;
+}
+
+#endif /* _ASM_FB_H_ */
diff --git a/include/asm-s390/kprobes.h b/include/asm-s390/kprobes.h
index 830fe4c..340ba10 100644
--- a/include/asm-s390/kprobes.h
+++ b/include/asm-s390/kprobes.h
@@ -46,8 +46,6 @@
 	? (MAX_STACK_SIZE) \
 	: (((unsigned long)current_thread_info()) + THREAD_SIZE - (ADDR)))
 
-#define JPROBE_ENTRY(pentry) (kprobe_opcode_t *)(pentry)
-
 #define ARCH_SUPPORTS_KRETPROBES
 #define ARCH_INACTIVE_KPROBE_COUNT 0
 
diff --git a/include/asm-s390/page.h b/include/asm-s390/page.h
index 05ea6f1..f326451 100644
--- a/include/asm-s390/page.h
+++ b/include/asm-s390/page.h
@@ -64,7 +64,8 @@
 #define clear_user_page(page, vaddr, pg)	clear_page(page)
 #define copy_user_page(to, from, vaddr, pg)	copy_page(to, from)
 
-#define alloc_zeroed_user_highpage(vma, vaddr) alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO, vma, vaddr)
+#define __alloc_zeroed_user_highpage(movableflags, vma, vaddr) \
+	alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO | movableflags, vma, vaddr)
 #define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
 
 /*
diff --git a/include/asm-s390/percpu.h b/include/asm-s390/percpu.h
index 9ea7f10..545857e 100644
--- a/include/asm-s390/percpu.h
+++ b/include/asm-s390/percpu.h
@@ -41,6 +41,11 @@
     __attribute__((__section__(".data.percpu"))) \
     __typeof__(type) per_cpu__##name
 
+#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name)		\
+    __attribute__((__section__(".data.percpu.shared_aligned"))) \
+    __typeof__(type) per_cpu__##name				\
+    ____cacheline_aligned_in_smp
+
 #define __get_cpu_var(var) __reloc_hide(var,S390_lowcore.percpu_offset)
 #define __raw_get_cpu_var(var) __reloc_hide(var,S390_lowcore.percpu_offset)
 #define per_cpu(var,cpu) __reloc_hide(var,__per_cpu_offset[cpu])
@@ -59,6 +64,8 @@
 
 #define DEFINE_PER_CPU(type, name) \
     __typeof__(type) per_cpu__##name
+#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name)	\
+    DEFINE_PER_CPU(type, name)
 
 #define __get_cpu_var(var) __reloc_hide(var,0)
 #define __raw_get_cpu_var(var) __reloc_hide(var,0)
diff --git a/include/asm-s390/pgtable.h b/include/asm-s390/pgtable.h
index 26215a9..3208dc6 100644
--- a/include/asm-s390/pgtable.h
+++ b/include/asm-s390/pgtable.h
@@ -669,19 +669,6 @@
 	return ptep_test_and_clear_young(vma, address, ptep);
 }
 
-static inline int ptep_test_and_clear_dirty(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
-{
-	return 0;
-}
-
-static inline int
-ptep_clear_flush_dirty(struct vm_area_struct *vma,
-			unsigned long address, pte_t *ptep)
-{
-	/* No need to flush TLB; bits are in storage key */
-	return ptep_test_and_clear_dirty(vma, address, ptep);
-}
-
 static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
 {
 	pte_t pte = *ptep;
@@ -707,16 +694,19 @@
 	pte_val(*ptep) = _PAGE_TYPE_EMPTY;
 }
 
-static inline pte_t
-ptep_clear_flush(struct vm_area_struct *vma,
-		 unsigned long address, pte_t *ptep)
+static inline void ptep_invalidate(unsigned long address, pte_t *ptep)
+{
+	__ptep_ipte(address, ptep);
+	ptep = get_shadow_pte(ptep);
+	if (ptep)
+		__ptep_ipte(address, ptep);
+}
+
+static inline pte_t ptep_clear_flush(struct vm_area_struct *vma,
+				     unsigned long address, pte_t *ptep)
 {
 	pte_t pte = *ptep;
-	pte_t *shadow_pte = get_shadow_pte(ptep);
-
-	__ptep_ipte(address, ptep);
-	if (shadow_pte)
-		__ptep_ipte(address, shadow_pte);
+	ptep_invalidate(address, ptep);
 	return pte;
 }
 
@@ -726,21 +716,14 @@
 	set_pte_at(mm, addr, ptep, pte_wrprotect(old_pte));
 }
 
-static inline void
-ptep_establish(struct vm_area_struct *vma, 
-	       unsigned long address, pte_t *ptep,
-	       pte_t entry)
-{
-	ptep_clear_flush(vma, address, ptep);
-	set_pte(ptep, entry);
-}
-
-#define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \
-({									  \
-	int __changed = !pte_same(*(__ptep), __entry);			  \
-	if (__changed)							  \
-		ptep_establish(__vma, __address, __ptep, __entry);	  \
-	__changed;							  \
+#define ptep_set_access_flags(__vma, __addr, __ptep, __entry, __dirty)	\
+({									\
+	int __changed = !pte_same(*(__ptep), __entry);			\
+	if (__changed) {						\
+		ptep_invalidate(__addr, __ptep);			\
+		set_pte_at((__vma)->vm_mm, __addr, __ptep, __entry);	\
+	}								\
+	__changed;							\
 })
 
 /*
@@ -940,12 +923,9 @@
 #define __HAVE_ARCH_MEMMAP_INIT
 extern void memmap_init(unsigned long, int, unsigned long, unsigned long);
 
-#define __HAVE_ARCH_PTEP_ESTABLISH
 #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
 #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
-#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY
-#define __HAVE_ARCH_PTEP_CLEAR_DIRTY_FLUSH
 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
 #define __HAVE_ARCH_PTEP_CLEAR_FLUSH
 #define __HAVE_ARCH_PTEP_SET_WRPROTECT
diff --git a/include/asm-s390/system.h b/include/asm-s390/system.h
index bbe137c..64a3cd0 100644
--- a/include/asm-s390/system.h
+++ b/include/asm-s390/system.h
@@ -97,16 +97,6 @@
 	prev = __switch_to(prev,next);					     \
 } while (0)
 
-/*
- * On SMP systems, when the scheduler does migration-cost autodetection,
- * it needs a way to flush as much of the CPU's caches as possible.
- *
- * TODO: fill this in!
- */
-static inline void sched_cacheflush(void)
-{
-}
-
 #ifdef CONFIG_VIRT_CPU_ACCOUNTING
 extern void account_vtime(struct task_struct *);
 extern void account_tick_vtime(struct task_struct *);
diff --git a/include/asm-sh/a.out.h b/include/asm-sh/a.out.h
index 6e9fca9..685d0f6 100644
--- a/include/asm-sh/a.out.h
+++ b/include/asm-sh/a.out.h
@@ -20,6 +20,7 @@
 #ifdef __KERNEL__
 
 #define STACK_TOP	TASK_SIZE
+#define STACK_TOP_MAX	STACK_TOP
 
 #endif
 
diff --git a/include/asm-sh/fb.h b/include/asm-sh/fb.h
new file mode 100644
index 0000000..d92e99c
--- /dev/null
+++ b/include/asm-sh/fb.h
@@ -0,0 +1,19 @@
+#ifndef _ASM_FB_H_
+#define _ASM_FB_H_
+
+#include <linux/fb.h>
+#include <linux/fs.h>
+#include <asm/page.h>
+
+static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma,
+				unsigned long off)
+{
+	vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+}
+
+static inline int fb_is_primary_device(struct fb_info *info)
+{
+	return 0;
+}
+
+#endif /* _ASM_FB_H_ */
diff --git a/include/asm-sh/system.h b/include/asm-sh/system.h
index 7c75045..2450425 100644
--- a/include/asm-sh/system.h
+++ b/include/asm-sh/system.h
@@ -64,16 +64,6 @@
 	last = __last;							\
 } while (0)
 
-/*
- * On SMP systems, when the scheduler does migration-cost autodetection,
- * it needs a way to flush as much of the CPU's caches as possible.
- *
- * TODO: fill this in!
- */
-static inline void sched_cacheflush(void)
-{
-}
-
 #ifdef CONFIG_CPU_SH4A
 #define __icbi()			\
 {					\
diff --git a/include/asm-sh64/a.out.h b/include/asm-sh64/a.out.h
index e1995e8..237ee4e 100644
--- a/include/asm-sh64/a.out.h
+++ b/include/asm-sh64/a.out.h
@@ -31,6 +31,7 @@
 #ifdef __KERNEL__
 
 #define STACK_TOP	TASK_SIZE
+#define STACK_TOP_MAX	STACK_TOP
 
 #endif
 
diff --git a/include/asm-sh64/fb.h b/include/asm-sh64/fb.h
new file mode 100644
index 0000000..d92e99c
--- /dev/null
+++ b/include/asm-sh64/fb.h
@@ -0,0 +1,19 @@
+#ifndef _ASM_FB_H_
+#define _ASM_FB_H_
+
+#include <linux/fb.h>
+#include <linux/fs.h>
+#include <asm/page.h>
+
+static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma,
+				unsigned long off)
+{
+	vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+}
+
+static inline int fb_is_primary_device(struct fb_info *info)
+{
+	return 0;
+}
+
+#endif /* _ASM_FB_H_ */
diff --git a/include/asm-sparc/a.out.h b/include/asm-sparc/a.out.h
index 9090060..917e042 100644
--- a/include/asm-sparc/a.out.h
+++ b/include/asm-sparc/a.out.h
@@ -92,6 +92,7 @@
 #include <asm/page.h>
 
 #define STACK_TOP	(PAGE_OFFSET - PAGE_SIZE)
+#define STACK_TOP_MAX	STACK_TOP
 
 #endif /* __KERNEL__ */
 
diff --git a/include/asm-sparc/fb.h b/include/asm-sparc/fb.h
new file mode 100644
index 0000000..c7df380
--- /dev/null
+++ b/include/asm-sparc/fb.h
@@ -0,0 +1,12 @@
+#ifndef _ASM_FB_H_
+#define _ASM_FB_H_
+#include <linux/fb.h>
+
+#define fb_pgprotect(...) do {} while (0)
+
+static inline int fb_is_primary_device(struct fb_info *info)
+{
+	return 0;
+}
+
+#endif /* _ASM_FB_H_ */
diff --git a/include/asm-sparc/system.h b/include/asm-sparc/system.h
index 8b4e23b..d1a2572 100644
--- a/include/asm-sparc/system.h
+++ b/include/asm-sparc/system.h
@@ -165,16 +165,6 @@
 	} while(0)
 
 /*
- * On SMP systems, when the scheduler does migration-cost autodetection,
- * it needs a way to flush as much of the CPU's caches as possible.
- *
- * TODO: fill this in!
- */
-static inline void sched_cacheflush(void)
-{
-}
-
-/*
  * Changing the IRQ level on the Sparc.
  */
 extern void local_irq_restore(unsigned long);
diff --git a/include/asm-sparc64/a.out.h b/include/asm-sparc64/a.out.h
index eb3b8e9..902e07f 100644
--- a/include/asm-sparc64/a.out.h
+++ b/include/asm-sparc64/a.out.h
@@ -101,6 +101,8 @@
 #define STACK_TOP (test_thread_flag(TIF_32BIT) ? \
 		   STACK_TOP32 : STACK_TOP64)
 
+#define STACK_TOP_MAX STACK_TOP64
+
 #endif
 
 #endif /* !(__ASSEMBLY__) */
diff --git a/include/asm-sparc64/fb.h b/include/asm-sparc64/fb.h
new file mode 100644
index 0000000..d6cd3a1
--- /dev/null
+++ b/include/asm-sparc64/fb.h
@@ -0,0 +1,18 @@
+#ifndef _ASM_FB_H_
+#define _ASM_FB_H_
+#include <linux/fb.h>
+#include <linux/fs.h>
+#include <asm/page.h>
+
+static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma,
+				unsigned long off)
+{
+	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+}
+
+static inline int fb_is_primary_device(struct fb_info *info)
+{
+	return 0;
+}
+
+#endif /* _ASM_FB_H_ */
diff --git a/include/asm-sparc64/io.h b/include/asm-sparc64/io.h
index ad595b6..9565a89 100644
--- a/include/asm-sparc64/io.h
+++ b/include/asm-sparc64/io.h
@@ -14,11 +14,6 @@
 #define __SLOW_DOWN_IO	do { } while (0)
 #define SLOW_DOWN_IO	do { } while (0)
 
-extern unsigned long virt_to_bus_not_defined_use_pci_map(volatile void *addr);
-#define virt_to_bus virt_to_bus_not_defined_use_pci_map
-extern unsigned long bus_to_virt_not_defined_use_pci_map(volatile void *addr);
-#define bus_to_virt bus_to_virt_not_defined_use_pci_map
-
 /* BIO layer definitions. */
 extern unsigned long kern_base, kern_size;
 #define page_to_phys(page)	(page_to_pfn(page) << PAGE_SHIFT)
diff --git a/include/asm-sparc64/kprobes.h b/include/asm-sparc64/kprobes.h
index a331b7b..7f6774d 100644
--- a/include/asm-sparc64/kprobes.h
+++ b/include/asm-sparc64/kprobes.h
@@ -10,7 +10,6 @@
 #define BREAKPOINT_INSTRUCTION_2 0x91d02071 /* ta 0x71 */
 #define MAX_INSN_SIZE 2
 
-#define JPROBE_ENTRY(pentry)	(kprobe_opcode_t *)pentry
 #define arch_remove_kprobe(p)	do {} while (0)
 #define  ARCH_INACTIVE_KPROBE_COUNT 0
 
diff --git a/include/asm-sparc64/mdesc.h b/include/asm-sparc64/mdesc.h
index e97c431..1acc727 100644
--- a/include/asm-sparc64/mdesc.h
+++ b/include/asm-sparc64/mdesc.h
@@ -61,6 +61,16 @@
 
 extern void mdesc_update(void);
 
+struct mdesc_notifier_client {
+	void (*add)(struct mdesc_handle *handle, u64 node);
+	void (*remove)(struct mdesc_handle *handle, u64 node);
+
+	const char			*node_name;
+	struct mdesc_notifier_client	*next;
+};
+
+extern void mdesc_register_notifier(struct mdesc_notifier_client *client);
+
 extern void mdesc_fill_in_cpu_data(cpumask_t mask);
 
 extern void sun4v_mdesc_init(void);
diff --git a/include/asm-sparc64/percpu.h b/include/asm-sparc64/percpu.h
index 88db872..caf8750 100644
--- a/include/asm-sparc64/percpu.h
+++ b/include/asm-sparc64/percpu.h
@@ -18,6 +18,11 @@
 #define DEFINE_PER_CPU(type, name) \
     __attribute__((__section__(".data.percpu"))) __typeof__(type) per_cpu__##name
 
+#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name)		\
+    __attribute__((__section__(".data.percpu.shared_aligned"))) \
+    __typeof__(type) per_cpu__##name				\
+    ____cacheline_aligned_in_smp
+
 register unsigned long __local_per_cpu_offset asm("g5");
 
 /* var is in discarded region: offset to particular copy we want */
@@ -38,6 +43,8 @@
 #define real_setup_per_cpu_areas()		do { } while (0)
 #define DEFINE_PER_CPU(type, name) \
     __typeof__(type) per_cpu__##name
+#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name)	\
+    DEFINE_PER_CPU(type, name)
 
 #define per_cpu(var, cpu)			(*((void)cpu, &per_cpu__##var))
 #define __get_cpu_var(var)			per_cpu__##var
diff --git a/include/asm-sparc64/system.h b/include/asm-sparc64/system.h
index 8ba380e..4090674 100644
--- a/include/asm-sparc64/system.h
+++ b/include/asm-sparc64/system.h
@@ -204,16 +204,6 @@
 	}								\
 } while(0)
 
-/*
- * On SMP systems, when the scheduler does migration-cost autodetection,
- * it needs a way to flush as much of the CPU's caches as possible.
- *
- * TODO: fill this in!
- */
-static inline void sched_cacheflush(void)
-{
-}
-
 static inline unsigned long xchg32(__volatile__ unsigned int *m, unsigned int val)
 {
 	unsigned long tmp1, tmp2;
diff --git a/include/asm-sparc64/vio.h b/include/asm-sparc64/vio.h
index 83c9642..c0a8d4e 100644
--- a/include/asm-sparc64/vio.h
+++ b/include/asm-sparc64/vio.h
@@ -264,7 +264,7 @@
 		((dr->prod - dr->cons) & (ring_size - 1)));
 }
 
-#define VIO_MAX_TYPE_LEN	64
+#define VIO_MAX_TYPE_LEN	32
 #define VIO_MAX_COMPAT_LEN	64
 
 struct vio_dev {
diff --git a/include/asm-um/a.out.h b/include/asm-um/a.out.h
index 7016b89..78bc9ee 100644
--- a/include/asm-um/a.out.h
+++ b/include/asm-um/a.out.h
@@ -17,4 +17,6 @@
 #define STACK_TOP \
 	CHOOSE_MODE((honeypot ? host_task_size : task_size), task_size)
 
+#define STACK_TOP_MAX	STACK_TOP
+
 #endif
diff --git a/include/asm-v850/fb.h b/include/asm-v850/fb.h
new file mode 100644
index 0000000..c7df380
--- /dev/null
+++ b/include/asm-v850/fb.h
@@ -0,0 +1,12 @@
+#ifndef _ASM_FB_H_
+#define _ASM_FB_H_
+#include <linux/fb.h>
+
+#define fb_pgprotect(...) do {} while (0)
+
+static inline int fb_is_primary_device(struct fb_info *info)
+{
+	return 0;
+}
+
+#endif /* _ASM_FB_H_ */
diff --git a/include/asm-x86_64/a.out.h b/include/asm-x86_64/a.out.h
index 7255cde..e789300 100644
--- a/include/asm-x86_64/a.out.h
+++ b/include/asm-x86_64/a.out.h
@@ -21,7 +21,8 @@
 
 #ifdef __KERNEL__
 #include <linux/thread_info.h>
-#define STACK_TOP TASK_SIZE
+#define STACK_TOP	TASK_SIZE
+#define STACK_TOP_MAX	TASK_SIZE64
 #endif
 
 #endif /* __A_OUT_GNU_H__ */
diff --git a/include/asm-x86_64/fb.h b/include/asm-x86_64/fb.h
new file mode 100644
index 0000000..60548e6
--- /dev/null
+++ b/include/asm-x86_64/fb.h
@@ -0,0 +1,19 @@
+#ifndef _ASM_FB_H_
+#define _ASM_FB_H_
+#include <linux/fb.h>
+#include <linux/fs.h>
+#include <asm/page.h>
+
+static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma,
+				unsigned long off)
+{
+	if (boot_cpu_data.x86 > 3)
+		pgprot_val(vma->vm_page_prot) |= _PAGE_PCD;
+}
+
+static inline int fb_is_primary_device(struct fb_info *info)
+{
+	return 0;
+}
+
+#endif /* _ASM_FB_H_ */
diff --git a/include/asm-x86_64/kprobes.h b/include/asm-x86_64/kprobes.h
index cf53178..7db8254 100644
--- a/include/asm-x86_64/kprobes.h
+++ b/include/asm-x86_64/kprobes.h
@@ -41,7 +41,6 @@
 	? (MAX_STACK_SIZE) \
 	: (((unsigned long)current_thread_info()) + THREAD_SIZE - (ADDR)))
 
-#define JPROBE_ENTRY(pentry)	(kprobe_opcode_t *)pentry
 #define ARCH_SUPPORTS_KRETPROBES
 #define  ARCH_INACTIVE_KPROBE_COUNT 1
 
diff --git a/include/asm-x86_64/page.h b/include/asm-x86_64/page.h
index e327c83..88adf1a 100644
--- a/include/asm-x86_64/page.h
+++ b/include/asm-x86_64/page.h
@@ -48,7 +48,8 @@
 #define clear_user_page(page, vaddr, pg)	clear_page(page)
 #define copy_user_page(to, from, vaddr, pg)	copy_page(to, from)
 
-#define alloc_zeroed_user_highpage(vma, vaddr) alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO, vma, vaddr)
+#define __alloc_zeroed_user_highpage(movableflags, vma, vaddr) \
+	alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO | movableflags, vma, vaddr)
 #define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
 /*
  * These are used to make use of C type-checking..
diff --git a/include/asm-x86_64/percpu.h b/include/asm-x86_64/percpu.h
index c6fbb67..5abd482 100644
--- a/include/asm-x86_64/percpu.h
+++ b/include/asm-x86_64/percpu.h
@@ -20,6 +20,11 @@
 #define DEFINE_PER_CPU(type, name) \
     __attribute__((__section__(".data.percpu"))) __typeof__(type) per_cpu__##name
 
+#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name)		\
+    __attribute__((__section__(".data.percpu.shared_aligned"))) \
+    __typeof__(type) per_cpu__##name				\
+    ____cacheline_internodealigned_in_smp
+
 /* var is in discarded region: offset to particular copy we want */
 #define per_cpu(var, cpu) (*({				\
 	extern int simple_identifier_##var(void);	\
@@ -46,6 +51,8 @@
 
 #define DEFINE_PER_CPU(type, name) \
     __typeof__(type) per_cpu__##name
+#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name)	\
+    DEFINE_PER_CPU(type, name)
 
 #define per_cpu(var, cpu)			(*((void)(cpu), &per_cpu__##var))
 #define __get_cpu_var(var)			per_cpu__##var
diff --git a/include/asm-x86_64/pgtable.h b/include/asm-x86_64/pgtable.h
index 4f169ac..3ba5309 100644
--- a/include/asm-x86_64/pgtable.h
+++ b/include/asm-x86_64/pgtable.h
@@ -284,13 +284,6 @@
 
 struct vm_area_struct;
 
-static inline int ptep_test_and_clear_dirty(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
-{
-	if (!pte_dirty(*ptep))
-		return 0;
-	return test_and_clear_bit(_PAGE_BIT_DIRTY, &ptep->pte);
-}
-
 static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
 {
 	if (!pte_young(*ptep))
@@ -427,7 +420,6 @@
    (((o) & (1UL << (__VIRTUAL_MASK_SHIFT-1))) ? ((o) | (~__VIRTUAL_MASK)) : (o))
 
 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
-#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY
 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
 #define __HAVE_ARCH_PTEP_SET_WRPROTECT
diff --git a/include/asm-x86_64/system.h b/include/asm-x86_64/system.h
index ead9f9a..e4f246d 100644
--- a/include/asm-x86_64/system.h
+++ b/include/asm-x86_64/system.h
@@ -111,15 +111,6 @@
 #define wbinvd() \
 	__asm__ __volatile__ ("wbinvd": : :"memory");
 
-/*
- * On SMP systems, when the scheduler does migration-cost autodetection,
- * it needs a way to flush as much of the CPU's caches as possible.
- */
-static inline void sched_cacheflush(void)
-{
-	wbinvd();
-}
-
 #endif	/* __KERNEL__ */
 
 #define nop() __asm__ __volatile__ ("nop")
diff --git a/include/asm-x86_64/unistd.h b/include/asm-x86_64/unistd.h
index 8696f8a..fc4e73f 100644
--- a/include/asm-x86_64/unistd.h
+++ b/include/asm-x86_64/unistd.h
@@ -630,6 +630,8 @@
 __SYSCALL(__NR_timerfd, sys_timerfd)
 #define __NR_eventfd		284
 __SYSCALL(__NR_eventfd, sys_eventfd)
+#define __NR_fallocate		285
+__SYSCALL(__NR_fallocate, sys_fallocate)
 
 #ifndef __NO_STUBS
 #define __ARCH_WANT_OLD_READDIR
diff --git a/include/asm-xtensa/a.out.h b/include/asm-xtensa/a.out.h
index ffc4dcf..05a2f67 100644
--- a/include/asm-xtensa/a.out.h
+++ b/include/asm-xtensa/a.out.h
@@ -17,6 +17,7 @@
 /* Note: the kernel needs the a.out definitions, even if only ELF is used. */
 
 #define STACK_TOP	TASK_SIZE
+#define STACK_TOP_MAX	STACK_TOP
 
 struct exec
 {
diff --git a/include/asm-xtensa/fb.h b/include/asm-xtensa/fb.h
new file mode 100644
index 0000000..c7df380
--- /dev/null
+++ b/include/asm-xtensa/fb.h
@@ -0,0 +1,12 @@
+#ifndef _ASM_FB_H_
+#define _ASM_FB_H_
+#include <linux/fb.h>
+
+#define fb_pgprotect(...) do {} while (0)
+
+static inline int fb_is_primary_device(struct fb_info *info)
+{
+	return 0;
+}
+
+#endif /* _ASM_FB_H_ */
diff --git a/include/asm-xtensa/pgtable.h b/include/asm-xtensa/pgtable.h
index e9fc512..06850f3 100644
--- a/include/asm-xtensa/pgtable.h
+++ b/include/asm-xtensa/pgtable.h
@@ -267,17 +267,6 @@
 	return 1;
 }
 
-static inline int
-ptep_test_and_clear_dirty(struct vm_area_struct *vma, unsigned long addr,
-   			  pte_t *ptep)
-{
-	pte_t pte = *ptep;
-	if (!pte_dirty(pte))
-		return 0;
-	update_pte(ptep, pte_mkclean(pte));
-	return 1;
-}
-
 static inline pte_t
 ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
 {
@@ -418,7 +407,6 @@
 #endif /* !defined (__ASSEMBLY__) */
 
 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
-#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY
 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
 #define __HAVE_ARCH_PTEP_SET_WRPROTECT
 #define __HAVE_ARCH_PTEP_MKDIRTY
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index fccd8b5..dc234c5 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -122,7 +122,7 @@
 extern int pci_mmcfg_config_num;
 
 extern int sbf_port;
-extern unsigned long acpi_video_flags;
+extern unsigned long acpi_realmode_flags;
 
 #else	/* !CONFIG_ACPI */
 
diff --git a/include/linux/aio.h b/include/linux/aio.h
index b903fc0..d10e608 100644
--- a/include/linux/aio.h
+++ b/include/linux/aio.h
@@ -86,7 +86,7 @@
  */
 struct kiocb {
 	struct list_head	ki_run_list;
-	long			ki_flags;
+	unsigned long		ki_flags;
 	int			ki_users;
 	unsigned		ki_key;		/* id of this request */
 
diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
index e1a7083..91c8c07 100644
--- a/include/linux/binfmts.h
+++ b/include/linux/binfmts.h
@@ -6,11 +6,13 @@
 struct pt_regs;
 
 /*
- * MAX_ARG_PAGES defines the number of pages allocated for arguments
- * and envelope for the new program. 32 should suffice, this gives
- * a maximum env+arg of 128kB w/4KB pages!
+ * These are the maximum length and maximum number of strings passed to the
+ * execve() system call.  MAX_ARG_STRLEN is essentially random but serves to
+ * prevent the kernel from being unduly impacted by misaddressed pointers.
+ * MAX_ARG_STRINGS is chosen to fit in a signed 32-bit integer.
  */
-#define MAX_ARG_PAGES 32
+#define MAX_ARG_STRLEN (PAGE_SIZE * 32)
+#define MAX_ARG_STRINGS 0x7FFFFFFF
 
 /* sizeof(linux_binprm->buf) */
 #define BINPRM_BUF_SIZE 128
@@ -24,7 +26,12 @@
  */
 struct linux_binprm{
 	char buf[BINPRM_BUF_SIZE];
+#ifdef CONFIG_MMU
+	struct vm_area_struct *vma;
+#else
+# define MAX_ARG_PAGES	32
 	struct page *page[MAX_ARG_PAGES];
+#endif
 	struct mm_struct *mm;
 	unsigned long p; /* current top of mem */
 	int sh_bang;
@@ -40,6 +47,7 @@
 	unsigned interp_flags;
 	unsigned interp_data;
 	unsigned long loader, exec;
+	unsigned long argv_len;
 };
 
 #define BINPRM_FLAGS_ENFORCE_NONDUMP_BIT 0
@@ -68,7 +76,7 @@
 extern int unregister_binfmt(struct linux_binfmt *);
 
 extern int prepare_binprm(struct linux_binprm *);
-extern void remove_arg_zero(struct linux_binprm *);
+extern int __must_check remove_arg_zero(struct linux_binprm *);
 extern int search_binary_handler(struct linux_binprm *,struct pt_regs *);
 extern int flush_old_exec(struct linux_binprm * bprm);
 
@@ -85,6 +93,7 @@
 extern int setup_arg_pages(struct linux_binprm * bprm,
 			   unsigned long stack_top,
 			   int executable_stack);
+extern int bprm_mm_init(struct linux_binprm *bprm);
 extern int copy_strings_kernel(int argc,char ** argv,struct linux_binprm *bprm);
 extern void compute_creds(struct linux_binprm *binprm);
 extern int do_coredump(long signr, int exit_code, struct pt_regs * regs);
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index b32564a..f78965f 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -624,7 +624,7 @@
  */
 #define BLK_DEFAULT_SG_TIMEOUT	(60 * HZ)
 
-#ifdef CONFIG_MMU
+#ifdef CONFIG_BOUNCE
 extern int init_emergency_isa_pool(void);
 extern void blk_queue_bounce(request_queue_t *q, struct bio **bio);
 #else
diff --git a/include/linux/bsg.h b/include/linux/bsg.h
index bd998ca..8547b10 100644
--- a/include/linux/bsg.h
+++ b/include/linux/bsg.h
@@ -60,7 +60,6 @@
 extern int bsg_register_queue(struct request_queue *, const char *);
 extern void bsg_unregister_queue(struct request_queue *);
 #else
-struct bsg_class_device { };
 #define bsg_register_queue(disk, name)		(0)
 #define bsg_unregister_queue(disk)	do { } while (0)
 #endif
diff --git a/include/linux/coda_linux.h b/include/linux/coda_linux.h
index e4ac016..c4079b4 100644
--- a/include/linux/coda_linux.h
+++ b/include/linux/coda_linux.h
@@ -43,9 +43,6 @@
 int coda_getattr(struct vfsmount *, struct dentry *, struct kstat *);
 int coda_setattr(struct dentry *, struct iattr *);
 
-/* global variables */
-extern int coda_fake_statfs;
-
 /* this file:  heloers */
 static __inline__ struct CodaFid *coda_i2f(struct inode *);
 static __inline__ char *coda_i2s(struct inode *);
diff --git a/include/linux/coda_proc.h b/include/linux/coda_proc.h
deleted file mode 100644
index 0dc1b04..0000000
--- a/include/linux/coda_proc.h
+++ /dev/null
@@ -1,76 +0,0 @@
-/*
- * coda_statis.h
- * 
- * CODA operation statistics
- *
- * (c) March, 1998
- * by Michihiro Kuramochi, Zhenyu Xia and Zhanyong Wan
- * zhanyong.wan@yale.edu
- *
- */
-
-#ifndef _CODA_PROC_H
-#define _CODA_PROC_H
-
-void coda_sysctl_init(void);
-void coda_sysctl_clean(void);
-
-#include <linux/sysctl.h>
-#include <linux/coda_fs_i.h>
-#include <linux/coda.h>
-
-/* these four files are presented to show the result of the statistics:
- *
- *	/proc/fs/coda/vfs_stats
- *		      cache_inv_stats
- *
- * these four files are presented to reset the statistics to 0:
- *
- *	/proc/sys/coda/vfs_stats
- *		       cache_inv_stats
- */
-
-/* VFS operation statistics */
-struct coda_vfs_stats 
-{
-	/* file operations */
-	int open;
-	int flush;
-	int release;
-	int fsync;
-
-	/* dir operations */
-	int readdir;
-  
-	/* inode operations */
-	int create;
-	int lookup;
-	int link;
-	int unlink;
-	int symlink;
-	int mkdir;
-	int rmdir;
-	int rename;
-	int permission;
-
-	/* symlink operatoins*/
-	int follow_link;
-	int readlink;
-};
-
-/* cache invalidation statistics */
-struct coda_cache_inv_stats
-{
-	int flush;
-	int purge_user;
-	int zap_dir;
-	int zap_file;
-	int zap_vnode;
-	int purge_fid;
-	int replace;
-};
-
-/* these global variables hold the actual statistics data */
-extern struct coda_vfs_stats		coda_vfs_stat;
-
-#endif /* _CODA_PROC_H */
diff --git a/include/linux/coda_psdev.h b/include/linux/coda_psdev.h
index b541bb3..aa8f454 100644
--- a/include/linux/coda_psdev.h
+++ b/include/linux/coda_psdev.h
@@ -8,11 +8,6 @@
 
 struct kstatfs;
 
-struct coda_sb_info
-{
-	struct venus_comm *sbi_vcomm;
-};
-
 /* communication pending/processing queues */
 struct venus_comm {
 	u_long		    vc_seq;
@@ -24,9 +19,9 @@
 };
 
 
-static inline struct coda_sb_info *coda_sbp(struct super_block *sb)
+static inline struct venus_comm *coda_vcp(struct super_block *sb)
 {
-    return ((struct coda_sb_info *)((sb)->s_fs_info));
+	return (struct venus_comm *)((sb)->s_fs_info);
 }
 
 
@@ -74,8 +69,6 @@
 
 
 /* messages between coda filesystem in kernel and Venus */
-extern int coda_hard;
-extern unsigned long coda_timeout;
 struct upc_req {
 	struct list_head    uc_chain;
 	caddr_t	            uc_data;
@@ -85,7 +78,6 @@
 	u_short	            uc_opcode;  /* copied from data to save lookup */
 	int		    uc_unique;
 	wait_queue_head_t   uc_sleep;   /* process' wait queue */
-	unsigned long       uc_posttime;
 };
 
 #define REQ_ASYNC  0x1
diff --git a/include/linux/crc7.h b/include/linux/crc7.h
new file mode 100644
index 0000000..1786e77
--- /dev/null
+++ b/include/linux/crc7.h
@@ -0,0 +1,14 @@
+#ifndef _LINUX_CRC7_H
+#define _LINUX_CRC7_H
+#include <linux/types.h>
+
+extern const u8 crc7_syndrome_table[256];
+
+static inline u8 crc7_byte(u8 crc, u8 data)
+{
+	return crc7_syndrome_table[(crc << 1) ^ data];
+}
+
+extern u8 crc7(u8 crc, const u8 *buffer, size_t len);
+
+#endif
diff --git a/include/linux/device.h b/include/linux/device.h
index be2debe..d9f0a57 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -572,6 +572,16 @@
 }
 #endif
 
+#ifdef VERBOSE_DEBUG
+#define dev_vdbg	dev_dbg
+#else
+static inline int __attribute__ ((format (printf, 2, 3)))
+dev_vdbg(struct device * dev, const char * fmt, ...)
+{
+	return 0;
+}
+#endif
+
 #define dev_err(dev, format, arg...)		\
 	dev_printk(KERN_ERR , dev , format , ## arg)
 #define dev_info(dev, format, arg...)		\
diff --git a/include/linux/edac.h b/include/linux/edac.h
new file mode 100644
index 0000000..eab451e
--- /dev/null
+++ b/include/linux/edac.h
@@ -0,0 +1,29 @@
+/*
+ * Generic EDAC defs
+ *
+ * Author: Dave Jiang <djiang@mvista.com>
+ *
+ * 2006-2007 (c) MontaVista Software, Inc. This file is licensed under
+ * the terms of the GNU General Public License version 2. This program
+ * is licensed "as is" without any warranty of any kind, whether express
+ * or implied.
+ *
+ */
+#ifndef _LINUX_EDAC_H_
+#define _LINUX_EDAC_H_
+
+#include <asm/atomic.h>
+
+#define EDAC_OPSTATE_INVAL	-1
+#define EDAC_OPSTATE_POLL	0
+#define EDAC_OPSTATE_NMI	1
+#define EDAC_OPSTATE_INT	2
+
+extern int edac_op_state;
+extern int edac_err_assert;
+extern atomic_t edac_handlers;
+
+extern int edac_handler_set(void);
+extern void edac_atomic_assert_error(void);
+
+#endif
diff --git a/include/linux/efs_fs.h b/include/linux/efs_fs.h
index dfed800..16cb25c 100644
--- a/include/linux/efs_fs.h
+++ b/include/linux/efs_fs.h
@@ -45,6 +45,7 @@
 extern int efs_get_block(struct inode *, sector_t, struct buffer_head *, int);
 
 extern struct dentry *efs_lookup(struct inode *, struct dentry *, struct nameidata *);
+extern struct dentry *efs_get_dentry(struct super_block *sb, void *vobjp);
 extern struct dentry *efs_get_parent(struct dentry *);
 extern int efs_bmap(struct inode *, int);
 
diff --git a/include/linux/elfnote.h b/include/linux/elfnote.h
index 9a1e067..e831759 100644
--- a/include/linux/elfnote.h
+++ b/include/linux/elfnote.h
@@ -38,17 +38,25 @@
  * e.g. ELFNOTE(XYZCo, 42, .asciz, "forty-two")
  *      ELFNOTE(XYZCo, 12, .long, 0xdeadbeef)
  */
-#define ELFNOTE(name, type, desctype, descdata)	\
-.pushsection .note.name, "",@note	;	\
-  .align 4				;	\
+#define ELFNOTE_START(name, type, flags)	\
+.pushsection .note.name, flags,@note	;	\
+  .balign 4				;	\
   .long 2f - 1f		/* namesz */	;	\
-  .long 4f - 3f		/* descsz */	;	\
+  .long 4484f - 3f	/* descsz */	;	\
   .long type				;	\
 1:.asciz #name				;	\
-2:.align 4				;	\
-3:desctype descdata			;	\
-4:.align 4				;	\
+2:.balign 4				;	\
+3:
+
+#define ELFNOTE_END				\
+4484:.balign 4				;	\
 .popsection				;
+
+#define ELFNOTE(name, type, desc)		\
+	ELFNOTE_START(name, type, "")		\
+		desc			;	\
+	ELFNOTE_END
+
 #else	/* !__ASSEMBLER__ */
 #include <linux/elf.h>
 /*
diff --git a/include/linux/exportfs.h b/include/linux/exportfs.h
new file mode 100644
index 0000000..8872fe8
--- /dev/null
+++ b/include/linux/exportfs.h
@@ -0,0 +1,126 @@
+#ifndef LINUX_EXPORTFS_H
+#define LINUX_EXPORTFS_H 1
+
+#include <linux/types.h>
+
+struct dentry;
+struct super_block;
+struct vfsmount;
+
+
+/**
+ * struct export_operations - for nfsd to communicate with file systems
+ * @decode_fh:      decode a file handle fragment and return a &struct dentry
+ * @encode_fh:      encode a file handle fragment from a dentry
+ * @get_name:       find the name for a given inode in a given directory
+ * @get_parent:     find the parent of a given directory
+ * @get_dentry:     find a dentry for the inode given a file handle sub-fragment
+ * @find_exported_dentry:
+ *	set by the exporting module to a standard helper function.
+ *
+ * Description:
+ *    The export_operations structure provides a means for nfsd to communicate
+ *    with a particular exported file system  - particularly enabling nfsd and
+ *    the filesystem to co-operate when dealing with file handles.
+ *
+ *    export_operations contains two basic operation for dealing with file
+ *    handles, decode_fh() and encode_fh(), and allows for some other
+ *    operations to be defined which standard helper routines use to get
+ *    specific information from the filesystem.
+ *
+ *    nfsd encodes information use to determine which filesystem a filehandle
+ *    applies to in the initial part of the file handle.  The remainder, termed
+ *    a file handle fragment, is controlled completely by the filesystem.  The
+ *    standard helper routines assume that this fragment will contain one or
+ *    two sub-fragments, one which identifies the file, and one which may be
+ *    used to identify the (a) directory containing the file.
+ *
+ *    In some situations, nfsd needs to get a dentry which is connected into a
+ *    specific part of the file tree.  To allow for this, it passes the
+ *    function acceptable() together with a @context which can be used to see
+ *    if the dentry is acceptable.  As there can be multiple dentrys for a
+ *    given file, the filesystem should check each one for acceptability before
+ *    looking for the next.  As soon as an acceptable one is found, it should
+ *    be returned.
+ *
+ * decode_fh:
+ *    @decode_fh is given a &struct super_block (@sb), a file handle fragment
+ *    (@fh, @fh_len) and an acceptability testing function (@acceptable,
+ *    @context).  It should return a &struct dentry which refers to the same
+ *    file that the file handle fragment refers to,  and which passes the
+ *    acceptability test.  If it cannot, it should return a %NULL pointer if
+ *    the file was found but no acceptable &dentries were available, or a
+ *    %ERR_PTR error code indicating why it couldn't be found (e.g. %ENOENT or
+ *    %ENOMEM).
+ *
+ * encode_fh:
+ *    @encode_fh should store in the file handle fragment @fh (using at most
+ *    @max_len bytes) information that can be used by @decode_fh to recover the
+ *    file refered to by the &struct dentry @de.  If the @connectable flag is
+ *    set, the encode_fh() should store sufficient information so that a good
+ *    attempt can be made to find not only the file but also it's place in the
+ *    filesystem.   This typically means storing a reference to de->d_parent in
+ *    the filehandle fragment.  encode_fh() should return the number of bytes
+ *    stored or a negative error code such as %-ENOSPC
+ *
+ * get_name:
+ *    @get_name should find a name for the given @child in the given @parent
+ *    directory.  The name should be stored in the @name (with the
+ *    understanding that it is already pointing to a a %NAME_MAX+1 sized
+ *    buffer.   get_name() should return %0 on success, a negative error code
+ *    or error.  @get_name will be called without @parent->i_mutex held.
+ *
+ * get_parent:
+ *    @get_parent should find the parent directory for the given @child which
+ *    is also a directory.  In the event that it cannot be found, or storage
+ *    space cannot be allocated, a %ERR_PTR should be returned.
+ *
+ * get_dentry:
+ *    Given a &super_block (@sb) and a pointer to a file-system specific inode
+ *    identifier, possibly an inode number, (@inump) get_dentry() should find
+ *    the identified inode and return a dentry for that inode.  Any suitable
+ *    dentry can be returned including, if necessary, a new dentry created with
+ *    d_alloc_root.  The caller can then find any other extant dentrys by
+ *    following the d_alias links.  If a new dentry was created using
+ *    d_alloc_root, DCACHE_NFSD_DISCONNECTED should be set, and the dentry
+ *    should be d_rehash()ed.
+ *
+ *    If the inode cannot be found, either a %NULL pointer or an %ERR_PTR code
+ *    can be returned.  The @inump will be whatever was passed to
+ *    nfsd_find_fh_dentry() in either the @obj or @parent parameters.
+ *
+ * Locking rules:
+ *    get_parent is called with child->d_inode->i_mutex down
+ *    get_name is not (which is possibly inconsistent)
+ */
+
+struct export_operations {
+	struct dentry *(*decode_fh)(struct super_block *sb, __u32 *fh,
+			int fh_len, int fh_type,
+			int (*acceptable)(void *context, struct dentry *de),
+			void *context);
+	int (*encode_fh)(struct dentry *de, __u32 *fh, int *max_len,
+			int connectable);
+	int (*get_name)(struct dentry *parent, char *name,
+			struct dentry *child);
+	struct dentry * (*get_parent)(struct dentry *child);
+	struct dentry * (*get_dentry)(struct super_block *sb, void *inump);
+
+	/* This is set by the exporting module to a standard helper */
+	struct dentry * (*find_exported_dentry)(
+			struct super_block *sb, void *obj, void *parent,
+			int (*acceptable)(void *context, struct dentry *de),
+			void *context);
+};
+
+extern struct dentry *find_exported_dentry(struct super_block *sb, void *obj,
+	void *parent, int (*acceptable)(void *context, struct dentry *de),
+	void *context);
+
+extern int exportfs_encode_fh(struct dentry *dentry, __u32 *fh, int *max_len,
+	int connectable);
+extern struct dentry *exportfs_decode_fh(struct vfsmount *mnt, __u32 *fh,
+	int fh_len, int fileid_type, int (*acceptable)(void *, struct dentry *),
+	void *context);
+
+#endif /* LINUX_EXPORTFS_H */
diff --git a/include/linux/ext4_fs.h b/include/linux/ext4_fs.h
index de1f9f7..cdee7aa 100644
--- a/include/linux/ext4_fs.h
+++ b/include/linux/ext4_fs.h
@@ -71,7 +71,7 @@
 /*
  * Maximal count of links to a file
  */
-#define EXT4_LINK_MAX		32000
+#define EXT4_LINK_MAX		65000
 
 /*
  * Macro-instructions used to manage several block sizes
@@ -102,6 +102,7 @@
 				 EXT4_GOOD_OLD_FIRST_INO : \
 				 (s)->s_first_ino)
 #endif
+#define EXT4_BLOCK_ALIGN(size, blkbits)		ALIGN((size), (1 << (blkbits)))
 
 /*
  * Macro-instructions used to manage fragments
@@ -201,6 +202,7 @@
 #define EXT4_STATE_JDATA		0x00000001 /* journaled data exists */
 #define EXT4_STATE_NEW			0x00000002 /* inode is newly created */
 #define EXT4_STATE_XATTR		0x00000004 /* has in-inode xattrs */
+#define EXT4_STATE_NO_EXPAND		0x00000008 /* No space for expansion */
 
 /* Used to pass group descriptor data when online resize is done */
 struct ext4_new_group_input {
@@ -225,6 +227,11 @@
 	__u32 free_blocks_count;
 };
 
+/*
+ * Following is used by preallocation code to tell get_blocks() that we
+ * want uninitialzed extents.
+ */
+#define EXT4_CREATE_UNINITIALIZED_EXT		2
 
 /*
  * ioctl commands
@@ -237,7 +244,7 @@
 #define EXT4_IOC_GROUP_ADD		_IOW('f', 8,struct ext4_new_group_input)
 #define	EXT4_IOC_GETVERSION_OLD		FS_IOC_GETVERSION
 #define	EXT4_IOC_SETVERSION_OLD		FS_IOC_SETVERSION
-#ifdef CONFIG_JBD_DEBUG
+#ifdef CONFIG_JBD2_DEBUG
 #define EXT4_IOC_WAIT_FOR_READONLY	_IOR('f', 99, long)
 #endif
 #define EXT4_IOC_GETRSVSZ		_IOR('f', 5, long)
@@ -253,7 +260,7 @@
 #define EXT4_IOC32_GETRSVSZ		_IOR('f', 5, int)
 #define EXT4_IOC32_SETRSVSZ		_IOW('f', 6, int)
 #define EXT4_IOC32_GROUP_EXTEND		_IOW('f', 7, unsigned int)
-#ifdef CONFIG_JBD_DEBUG
+#ifdef CONFIG_JBD2_DEBUG
 #define EXT4_IOC32_WAIT_FOR_READONLY	_IOR('f', 99, int)
 #endif
 #define EXT4_IOC32_GETVERSION_OLD	FS_IOC32_GETVERSION
@@ -282,7 +289,7 @@
 	__le16	i_uid;		/* Low 16 bits of Owner Uid */
 	__le32	i_size;		/* Size in bytes */
 	__le32	i_atime;	/* Access time */
-	__le32	i_ctime;	/* Creation time */
+	__le32	i_ctime;	/* Inode Change time */
 	__le32	i_mtime;	/* Modification time */
 	__le32	i_dtime;	/* Deletion Time */
 	__le16	i_gid;		/* Low 16 bits of Group Id */
@@ -331,10 +338,85 @@
 	} osd2;				/* OS dependent 2 */
 	__le16	i_extra_isize;
 	__le16	i_pad1;
+	__le32  i_ctime_extra;  /* extra Change time      (nsec << 2 | epoch) */
+	__le32  i_mtime_extra;  /* extra Modification time(nsec << 2 | epoch) */
+	__le32  i_atime_extra;  /* extra Access time      (nsec << 2 | epoch) */
+	__le32  i_crtime;       /* File Creation time */
+	__le32  i_crtime_extra; /* extra FileCreationtime (nsec << 2 | epoch) */
 };
 
 #define i_size_high	i_dir_acl
 
+#define EXT4_EPOCH_BITS 2
+#define EXT4_EPOCH_MASK ((1 << EXT4_EPOCH_BITS) - 1)
+#define EXT4_NSEC_MASK  (~0UL << EXT4_EPOCH_BITS)
+
+/*
+ * Extended fields will fit into an inode if the filesystem was formatted
+ * with large inodes (-I 256 or larger) and there are not currently any EAs
+ * consuming all of the available space. For new inodes we always reserve
+ * enough space for the kernel's known extended fields, but for inodes
+ * created with an old kernel this might not have been the case. None of
+ * the extended inode fields is critical for correct filesystem operation.
+ * This macro checks if a certain field fits in the inode. Note that
+ * inode-size = GOOD_OLD_INODE_SIZE + i_extra_isize
+ */
+#define EXT4_FITS_IN_INODE(ext4_inode, einode, field)	\
+	((offsetof(typeof(*ext4_inode), field) +	\
+	  sizeof((ext4_inode)->field))			\
+	<= (EXT4_GOOD_OLD_INODE_SIZE +			\
+	    (einode)->i_extra_isize))			\
+
+static inline __le32 ext4_encode_extra_time(struct timespec *time)
+{
+       return cpu_to_le32((sizeof(time->tv_sec) > 4 ?
+			   time->tv_sec >> 32 : 0) |
+			   ((time->tv_nsec << 2) & EXT4_NSEC_MASK));
+}
+
+static inline void ext4_decode_extra_time(struct timespec *time, __le32 extra)
+{
+       if (sizeof(time->tv_sec) > 4)
+	       time->tv_sec |= (__u64)(le32_to_cpu(extra) & EXT4_EPOCH_MASK)
+			       << 32;
+       time->tv_nsec = (le32_to_cpu(extra) & EXT4_NSEC_MASK) >> 2;
+}
+
+#define EXT4_INODE_SET_XTIME(xtime, inode, raw_inode)			       \
+do {									       \
+	(raw_inode)->xtime = cpu_to_le32((inode)->xtime.tv_sec);	       \
+	if (EXT4_FITS_IN_INODE(raw_inode, EXT4_I(inode), xtime ## _extra))     \
+		(raw_inode)->xtime ## _extra =				       \
+				ext4_encode_extra_time(&(inode)->xtime);       \
+} while (0)
+
+#define EXT4_EINODE_SET_XTIME(xtime, einode, raw_inode)			       \
+do {									       \
+	if (EXT4_FITS_IN_INODE(raw_inode, einode, xtime))		       \
+		(raw_inode)->xtime = cpu_to_le32((einode)->xtime.tv_sec);      \
+	if (EXT4_FITS_IN_INODE(raw_inode, einode, xtime ## _extra))	       \
+		(raw_inode)->xtime ## _extra =				       \
+				ext4_encode_extra_time(&(einode)->xtime);      \
+} while (0)
+
+#define EXT4_INODE_GET_XTIME(xtime, inode, raw_inode)			       \
+do {									       \
+	(inode)->xtime.tv_sec = (signed)le32_to_cpu((raw_inode)->xtime);       \
+	if (EXT4_FITS_IN_INODE(raw_inode, EXT4_I(inode), xtime ## _extra))     \
+		ext4_decode_extra_time(&(inode)->xtime,			       \
+				       raw_inode->xtime ## _extra);	       \
+} while (0)
+
+#define EXT4_EINODE_GET_XTIME(xtime, einode, raw_inode)			       \
+do {									       \
+	if (EXT4_FITS_IN_INODE(raw_inode, einode, xtime))		       \
+		(einode)->xtime.tv_sec = 				       \
+			(signed)le32_to_cpu((raw_inode)->xtime);	       \
+	if (EXT4_FITS_IN_INODE(raw_inode, einode, xtime ## _extra))	       \
+		ext4_decode_extra_time(&(einode)->xtime,		       \
+				       raw_inode->xtime ## _extra);	       \
+} while (0)
+
 #if defined(__KERNEL__) || defined(__linux__)
 #define i_reserved1	osd1.linux1.l_i_reserved1
 #define i_frag		osd2.linux2.l_i_frag
@@ -533,6 +615,13 @@
 	return container_of(inode, struct ext4_inode_info, vfs_inode);
 }
 
+static inline struct timespec ext4_current_time(struct inode *inode)
+{
+	return (inode->i_sb->s_time_gran < NSEC_PER_SEC) ?
+		current_fs_time(inode->i_sb) : CURRENT_TIME_SEC;
+}
+
+
 static inline int ext4_valid_inum(struct super_block *sb, unsigned long ino)
 {
 	return ino == EXT4_ROOT_INO ||
@@ -603,6 +692,8 @@
 #define EXT4_FEATURE_RO_COMPAT_SPARSE_SUPER	0x0001
 #define EXT4_FEATURE_RO_COMPAT_LARGE_FILE	0x0002
 #define EXT4_FEATURE_RO_COMPAT_BTREE_DIR	0x0004
+#define EXT4_FEATURE_RO_COMPAT_DIR_NLINK	0x0020
+#define EXT4_FEATURE_RO_COMPAT_EXTRA_ISIZE	0x0040
 
 #define EXT4_FEATURE_INCOMPAT_COMPRESSION	0x0001
 #define EXT4_FEATURE_INCOMPAT_FILETYPE		0x0002
@@ -620,6 +711,8 @@
 					 EXT4_FEATURE_INCOMPAT_64BIT)
 #define EXT4_FEATURE_RO_COMPAT_SUPP	(EXT4_FEATURE_RO_COMPAT_SPARSE_SUPER| \
 					 EXT4_FEATURE_RO_COMPAT_LARGE_FILE| \
+					 EXT4_FEATURE_RO_COMPAT_DIR_NLINK | \
+					 EXT4_FEATURE_RO_COMPAT_EXTRA_ISIZE | \
 					 EXT4_FEATURE_RO_COMPAT_BTREE_DIR)
 
 /*
@@ -862,6 +955,7 @@
 extern int ext4_get_inode_loc(struct inode *, struct ext4_iloc *);
 extern void ext4_truncate (struct inode *);
 extern void ext4_set_inode_flags(struct inode *);
+extern void ext4_get_inode_flags(struct ext4_inode_info *);
 extern void ext4_set_aops(struct inode *inode);
 extern int ext4_writepage_trans_blocks(struct inode *);
 extern int ext4_block_truncate_page(handle_t *handle, struct page *page,
@@ -983,6 +1077,8 @@
 extern void ext4_ext_truncate(struct inode *, struct page *);
 extern void ext4_ext_init(struct super_block *);
 extern void ext4_ext_release(struct super_block *);
+extern long ext4_fallocate(struct inode *inode, int mode, loff_t offset,
+			  loff_t len);
 static inline int
 ext4_get_blocks_wrap(handle_t *handle, struct inode *inode, sector_t block,
 			unsigned long max_blocks, struct buffer_head *bh,
diff --git a/include/linux/ext4_fs_extents.h b/include/linux/ext4_fs_extents.h
index acfe5974..81406f3 100644
--- a/include/linux/ext4_fs_extents.h
+++ b/include/linux/ext4_fs_extents.h
@@ -141,7 +141,25 @@
 
 #define EXT_MAX_BLOCK	0xffffffff
 
-#define EXT_MAX_LEN	((1UL << 15) - 1)
+/*
+ * EXT_INIT_MAX_LEN is the maximum number of blocks we can have in an
+ * initialized extent. This is 2^15 and not (2^16 - 1), since we use the
+ * MSB of ee_len field in the extent datastructure to signify if this
+ * particular extent is an initialized extent or an uninitialized (i.e.
+ * preallocated).
+ * EXT_UNINIT_MAX_LEN is the maximum number of blocks we can have in an
+ * uninitialized extent.
+ * If ee_len is <= 0x8000, it is an initialized extent. Otherwise, it is an
+ * uninitialized one. In other words, if MSB of ee_len is set, it is an
+ * uninitialized extent with only one special scenario when ee_len = 0x8000.
+ * In this case we can not have an uninitialized extent of zero length and
+ * thus we make it as a special case of initialized extent with 0x8000 length.
+ * This way we get better extent-to-group alignment for initialized extents.
+ * Hence, the maximum number of blocks we can have in an *initialized*
+ * extent is 2^15 (32768) and in an *uninitialized* extent is 2^15-1 (32767).
+ */
+#define EXT_INIT_MAX_LEN	(1UL << 15)
+#define EXT_UNINIT_MAX_LEN	(EXT_INIT_MAX_LEN - 1)
 
 
 #define EXT_FIRST_EXTENT(__hdr__) \
@@ -188,8 +206,31 @@
 	EXT4_I(inode)->i_cached_extent.ec_type = EXT4_EXT_CACHE_NO;
 }
 
+static inline void ext4_ext_mark_uninitialized(struct ext4_extent *ext)
+{
+	/* We can not have an uninitialized extent of zero length! */
+	BUG_ON((le16_to_cpu(ext->ee_len) & ~EXT_INIT_MAX_LEN) == 0);
+	ext->ee_len |= cpu_to_le16(EXT_INIT_MAX_LEN);
+}
+
+static inline int ext4_ext_is_uninitialized(struct ext4_extent *ext)
+{
+	/* Extent with ee_len of 0x8000 is treated as an initialized extent */
+	return (le16_to_cpu(ext->ee_len) > EXT_INIT_MAX_LEN);
+}
+
+static inline int ext4_ext_get_actual_len(struct ext4_extent *ext)
+{
+	return (le16_to_cpu(ext->ee_len) <= EXT_INIT_MAX_LEN ?
+		le16_to_cpu(ext->ee_len) :
+		(le16_to_cpu(ext->ee_len) - EXT_INIT_MAX_LEN));
+}
+
 extern int ext4_extent_tree_init(handle_t *, struct inode *);
 extern int ext4_ext_calc_credits_for_insert(struct inode *, struct ext4_ext_path *);
+extern int ext4_ext_try_to_merge(struct inode *inode,
+				 struct ext4_ext_path *path,
+				 struct ext4_extent *);
 extern unsigned int ext4_ext_check_overlap(struct inode *, struct ext4_extent *, struct ext4_ext_path *);
 extern int ext4_ext_insert_extent(handle_t *, struct inode *, struct ext4_ext_path *, struct ext4_extent *);
 extern int ext4_ext_walk_space(struct inode *, unsigned long, unsigned long, ext_prepare_callback, void *);
diff --git a/include/linux/ext4_fs_i.h b/include/linux/ext4_fs_i.h
index 9de4944..1a511e99 100644
--- a/include/linux/ext4_fs_i.h
+++ b/include/linux/ext4_fs_i.h
@@ -153,6 +153,11 @@
 
 	unsigned long i_ext_generation;
 	struct ext4_ext_cache i_cached_extent;
+	/*
+	 * File creation time. Its function is same as that of
+	 * struct timespec i_{a,c,m}time in the generic inode.
+	 */
+	struct timespec i_crtime;
 };
 
 #endif	/* _LINUX_EXT4_FS_I */
diff --git a/include/linux/ext4_fs_sb.h b/include/linux/ext4_fs_sb.h
index 2347557..1b2ffee 100644
--- a/include/linux/ext4_fs_sb.h
+++ b/include/linux/ext4_fs_sb.h
@@ -73,7 +73,7 @@
 	struct list_head s_orphan;
 	unsigned long s_commit_interval;
 	struct block_device *journal_bdev;
-#ifdef CONFIG_JBD_DEBUG
+#ifdef CONFIG_JBD2_DEBUG
 	struct timer_list turn_ro_timer;	/* For turning read-only (crash simulation) */
 	wait_queue_head_t ro_wait_queue;	/* For people waiting for the fs to go read-only */
 #endif
@@ -81,6 +81,7 @@
 	char *s_qf_names[MAXQUOTAS];		/* Names of quota files with journalled quota */
 	int s_jquota_fmt;			/* Format of quota to use */
 #endif
+	unsigned int s_want_extra_isize; /* New inodes should reserve # bytes */
 
 #ifdef EXTENTS_STATS
 	/* ext4 extents stats */
diff --git a/include/linux/falloc.h b/include/linux/falloc.h
new file mode 100644
index 0000000..8e912ab
--- /dev/null
+++ b/include/linux/falloc.h
@@ -0,0 +1,6 @@
+#ifndef _FALLOC_H_
+#define _FALLOC_H_
+
+#define FALLOC_FL_KEEP_SIZE	0x01 /* default is extend size */
+
+#endif /* _FALLOC_H_ */
diff --git a/include/linux/fb.h b/include/linux/fb.h
index 6622682..cec5410 100644
--- a/include/linux/fb.h
+++ b/include/linux/fb.h
@@ -119,6 +119,7 @@
 #define FB_ACCEL_NV_40          46      /* nVidia Arch 40               */
 #define FB_ACCEL_XGI_VOLARI_V	47	/* XGI Volari V3XT, V5, V8      */
 #define FB_ACCEL_XGI_VOLARI_Z	48	/* XGI Volari Z7                */
+#define FB_ACCEL_OMAP1610	49	/* TI OMAP16xx                  */
 #define FB_ACCEL_NEOMAGIC_NM2070 90	/* NeoMagic NM2070              */
 #define FB_ACCEL_NEOMAGIC_NM2090 91	/* NeoMagic NM2090              */
 #define FB_ACCEL_NEOMAGIC_NM2093 92	/* NeoMagic NM2093              */
@@ -529,6 +530,8 @@
 #define FB_EVENT_CONBLANK               0x0C
 /*      Get drawing requirements        */
 #define FB_EVENT_GET_REQ                0x0D
+/*      Unbind from the console if possible */
+#define FB_EVENT_FB_UNBIND              0x0E
 
 struct fb_event {
 	struct fb_info *info;
diff --git a/include/linux/freezer.h b/include/linux/freezer.h
index 4631086..c8e02de 100644
--- a/include/linux/freezer.h
+++ b/include/linux/freezer.h
@@ -1,5 +1,8 @@
 /* Freezer declarations */
 
+#ifndef FREEZER_H_INCLUDED
+#define FREEZER_H_INCLUDED
+
 #include <linux/sched.h>
 
 #ifdef CONFIG_PM
@@ -22,7 +25,7 @@
 /*
  * Request that a process be frozen
  */
-static inline void freeze(struct task_struct *p)
+static inline void set_freeze_flag(struct task_struct *p)
 {
 	set_tsk_thread_flag(p, TIF_FREEZE);
 }
@@ -30,7 +33,7 @@
 /*
  * Sometimes we may need to cancel the previous 'freeze' request
  */
-static inline void do_not_freeze(struct task_struct *p)
+static inline void clear_freeze_flag(struct task_struct *p)
 {
 	clear_tsk_thread_flag(p, TIF_FREEZE);
 }
@@ -53,7 +56,7 @@
 		wake_up_process(p);
 		return 1;
 	}
-	clear_tsk_thread_flag(p, TIF_FREEZE);
+	clear_freeze_flag(p);
 	task_unlock(p);
 	return 0;
 }
@@ -115,10 +118,19 @@
 	return !!(p->flags & PF_FREEZER_SKIP);
 }
 
+/*
+ * Tell the freezer that the current task should be frozen by it
+ */
+static inline void set_freezable(void)
+{
+	current->flags &= ~PF_NOFREEZE;
+}
+
 #else
 static inline int frozen(struct task_struct *p) { return 0; }
 static inline int freezing(struct task_struct *p) { return 0; }
-static inline void freeze(struct task_struct *p) { BUG(); }
+static inline void set_freeze_flag(struct task_struct *p) {}
+static inline void clear_freeze_flag(struct task_struct *p) {}
 static inline int thaw_process(struct task_struct *p) { return 1; }
 
 static inline void refrigerator(void) {}
@@ -130,4 +142,7 @@
 static inline void freezer_do_not_count(void) {}
 static inline void freezer_count(void) {}
 static inline int freezer_should_skip(struct task_struct *p) { return 0; }
+static inline void set_freezable(void) {}
 #endif
+
+#endif	/* FREEZER_H_INCLUDED */
diff --git a/include/linux/fs.h b/include/linux/fs.h
index e687808..d33bead 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -283,11 +283,14 @@
 #include <linux/init.h>
 #include <linux/pid.h>
 #include <linux/mutex.h>
+#include <linux/sysctl.h>
+#include <linux/capability.h>
 
 #include <asm/atomic.h>
 #include <asm/semaphore.h>
 #include <asm/byteorder.h>
 
+struct export_operations;
 struct hd_geometry;
 struct iovec;
 struct nameidata;
@@ -694,20 +697,26 @@
  * Track a single file's readahead state
  */
 struct file_ra_state {
-	unsigned long start;		/* Current window */
-	unsigned long size;
-	unsigned long flags;		/* ra flags RA_FLAG_xxx*/
-	unsigned long cache_hit;	/* cache hit count*/
-	unsigned long prev_index;	/* Cache last read() position */
-	unsigned long ahead_start;	/* Ahead window */
-	unsigned long ahead_size;
+	pgoff_t start;                  /* where readahead started */
+	unsigned long size;             /* # of readahead pages */
+	unsigned long async_size;       /* do asynchronous readahead when
+					   there are only # of pages ahead */
+
 	unsigned long ra_pages;		/* Maximum readahead window */
 	unsigned long mmap_hit;		/* Cache hit stat for mmap accesses */
 	unsigned long mmap_miss;	/* Cache miss stat for mmap accesses */
+	unsigned long prev_index;	/* Cache last read() position */
 	unsigned int prev_offset;	/* Offset where last read() ended in a page */
 };
-#define RA_FLAG_MISS 0x01	/* a cache miss occured against this file */
-#define RA_FLAG_INCACHE 0x02	/* file is already in cache */
+
+/*
+ * Check if @index falls in the readahead windows.
+ */
+static inline int ra_has_index(struct file_ra_state *ra, pgoff_t index)
+{
+	return (index >= ra->start &&
+		index <  ra->start + ra->size);
+}
 
 struct file {
 	/*
@@ -859,7 +868,7 @@
 extern void locks_copy_lock(struct file_lock *, struct file_lock *);
 extern void locks_remove_posix(struct file *, fl_owner_t);
 extern void locks_remove_flock(struct file *);
-extern int posix_test_lock(struct file *, struct file_lock *);
+extern void posix_test_lock(struct file *, struct file_lock *);
 extern int posix_lock_file(struct file *, struct file_lock *, struct file_lock *);
 extern int posix_lock_file_wait(struct file *, struct file_lock *);
 extern int posix_unblock_lock(struct file *, struct file_lock *);
@@ -870,6 +879,7 @@
 extern int __break_lease(struct inode *inode, unsigned int flags);
 extern void lease_get_mtime(struct inode *, struct timespec *time);
 extern int setlease(struct file *, long, struct file_lock **);
+extern int vfs_setlease(struct file *, long, struct file_lock **);
 extern int lease_modify(struct file_lock **, int);
 extern int lock_may_read(struct inode *, loff_t start, unsigned long count);
 extern int lock_may_write(struct inode *, loff_t start, unsigned long count);
@@ -988,6 +998,9 @@
 #define put_fs_excl() atomic_dec(&current->fs_excl)
 #define has_fs_excl() atomic_read(&current->fs_excl)
 
+#define is_owner_or_cap(inode)	\
+	((current->fsuid == (inode)->i_uid) || capable(CAP_FOWNER))
+
 /* not quite ready to be deprecated, but... */
 extern void lock_super(struct super_block *);
 extern void unlock_super(struct super_block *);
@@ -1116,6 +1129,7 @@
 	int (*flock) (struct file *, int, struct file_lock *);
 	ssize_t (*splice_write)(struct pipe_inode_info *, struct file *, loff_t *, size_t, unsigned int);
 	ssize_t (*splice_read)(struct file *, loff_t *, struct pipe_inode_info *, size_t, unsigned int);
+	int (*setlease)(struct file *, long, struct file_lock **);
 };
 
 struct inode_operations {
@@ -1141,6 +1155,8 @@
 	ssize_t (*listxattr) (struct dentry *, char *, size_t);
 	int (*removexattr) (struct dentry *, const char *);
 	void (*truncate_range)(struct inode *, loff_t, loff_t);
+	long (*fallocate)(struct inode *inode, int mode, loff_t offset,
+			  loff_t len);
 };
 
 struct seq_file;
@@ -1277,119 +1293,6 @@
 
 int sync_inode(struct inode *inode, struct writeback_control *wbc);
 
-/**
- * struct export_operations - for nfsd to communicate with file systems
- * @decode_fh:      decode a file handle fragment and return a &struct dentry
- * @encode_fh:      encode a file handle fragment from a dentry
- * @get_name:       find the name for a given inode in a given directory
- * @get_parent:     find the parent of a given directory
- * @get_dentry:     find a dentry for the inode given a file handle sub-fragment
- * @find_exported_dentry:
- *	set by the exporting module to a standard helper function.
- *
- * Description:
- *    The export_operations structure provides a means for nfsd to communicate
- *    with a particular exported file system  - particularly enabling nfsd and
- *    the filesystem to co-operate when dealing with file handles.
- *
- *    export_operations contains two basic operation for dealing with file
- *    handles, decode_fh() and encode_fh(), and allows for some other
- *    operations to be defined which standard helper routines use to get
- *    specific information from the filesystem.
- *
- *    nfsd encodes information use to determine which filesystem a filehandle
- *    applies to in the initial part of the file handle.  The remainder, termed
- *    a file handle fragment, is controlled completely by the filesystem.  The
- *    standard helper routines assume that this fragment will contain one or
- *    two sub-fragments, one which identifies the file, and one which may be
- *    used to identify the (a) directory containing the file.
- *
- *    In some situations, nfsd needs to get a dentry which is connected into a
- *    specific part of the file tree.  To allow for this, it passes the
- *    function acceptable() together with a @context which can be used to see
- *    if the dentry is acceptable.  As there can be multiple dentrys for a
- *    given file, the filesystem should check each one for acceptability before
- *    looking for the next.  As soon as an acceptable one is found, it should
- *    be returned.
- *
- * decode_fh:
- *    @decode_fh is given a &struct super_block (@sb), a file handle fragment
- *    (@fh, @fh_len) and an acceptability testing function (@acceptable,
- *    @context).  It should return a &struct dentry which refers to the same
- *    file that the file handle fragment refers to,  and which passes the
- *    acceptability test.  If it cannot, it should return a %NULL pointer if
- *    the file was found but no acceptable &dentries were available, or a
- *    %ERR_PTR error code indicating why it couldn't be found (e.g. %ENOENT or
- *    %ENOMEM).
- *
- * encode_fh:
- *    @encode_fh should store in the file handle fragment @fh (using at most
- *    @max_len bytes) information that can be used by @decode_fh to recover the
- *    file refered to by the &struct dentry @de.  If the @connectable flag is
- *    set, the encode_fh() should store sufficient information so that a good
- *    attempt can be made to find not only the file but also it's place in the
- *    filesystem.   This typically means storing a reference to de->d_parent in
- *    the filehandle fragment.  encode_fh() should return the number of bytes
- *    stored or a negative error code such as %-ENOSPC
- *
- * get_name:
- *    @get_name should find a name for the given @child in the given @parent
- *    directory.  The name should be stored in the @name (with the
- *    understanding that it is already pointing to a a %NAME_MAX+1 sized
- *    buffer.   get_name() should return %0 on success, a negative error code
- *    or error.  @get_name will be called without @parent->i_mutex held.
- *
- * get_parent:
- *    @get_parent should find the parent directory for the given @child which
- *    is also a directory.  In the event that it cannot be found, or storage
- *    space cannot be allocated, a %ERR_PTR should be returned.
- *
- * get_dentry:
- *    Given a &super_block (@sb) and a pointer to a file-system specific inode
- *    identifier, possibly an inode number, (@inump) get_dentry() should find
- *    the identified inode and return a dentry for that inode.  Any suitable
- *    dentry can be returned including, if necessary, a new dentry created with
- *    d_alloc_root.  The caller can then find any other extant dentrys by
- *    following the d_alias links.  If a new dentry was created using
- *    d_alloc_root, DCACHE_NFSD_DISCONNECTED should be set, and the dentry
- *    should be d_rehash()ed.
- *
- *    If the inode cannot be found, either a %NULL pointer or an %ERR_PTR code
- *    can be returned.  The @inump will be whatever was passed to
- *    nfsd_find_fh_dentry() in either the @obj or @parent parameters.
- *
- * Locking rules:
- *    get_parent is called with child->d_inode->i_mutex down
- *    get_name is not (which is possibly inconsistent)
- */
-
-struct export_operations {
-	struct dentry *(*decode_fh)(struct super_block *sb, __u32 *fh, int fh_len, int fh_type,
-			 int (*acceptable)(void *context, struct dentry *de),
-			 void *context);
-	int (*encode_fh)(struct dentry *de, __u32 *fh, int *max_len,
-			 int connectable);
-
-	/* the following are only called from the filesystem itself */
-	int (*get_name)(struct dentry *parent, char *name,
-			struct dentry *child);
-	struct dentry * (*get_parent)(struct dentry *child);
-	struct dentry * (*get_dentry)(struct super_block *sb, void *inump);
-
-	/* This is set by the exporting module to a standard helper */
-	struct dentry * (*find_exported_dentry)(
-		struct super_block *sb, void *obj, void *parent,
-		int (*acceptable)(void *context, struct dentry *de),
-		void *context);
-
-
-};
-
-extern struct dentry *
-find_exported_dentry(struct super_block *sb, void *obj, void *parent,
-		     int (*acceptable)(void *context, struct dentry *de),
-		     void *context);
-
 struct file_system_type {
 	const char *name;
 	int fs_flags;
@@ -1526,7 +1429,7 @@
 
 #ifdef CONFIG_BLOCK
 extern int register_blkdev(unsigned int, const char *);
-extern int unregister_blkdev(unsigned int, const char *);
+extern void unregister_blkdev(unsigned int, const char *);
 extern struct block_device *bdget(dev_t);
 extern void bd_set_size(struct block_device *, loff_t size);
 extern void bd_forget(struct inode *inode);
@@ -1566,7 +1469,7 @@
 extern int register_chrdev_region(dev_t, unsigned, const char *);
 extern int register_chrdev(unsigned int, const char *,
 			   const struct file_operations *);
-extern int unregister_chrdev(unsigned int, const char *);
+extern void unregister_chrdev(unsigned int, const char *);
 extern void unregister_chrdev_region(dev_t, unsigned);
 extern int chrdev_open(struct inode *, struct file *);
 extern void chrdev_show(struct seq_file *,off_t);
@@ -2050,5 +1953,9 @@
 { }
 #endif	/* CONFIG_SECURITY */
 
+int proc_nr_files(ctl_table *table, int write, struct file *filp,
+		  void __user *buffer, size_t *lenp, loff_t *ppos);
+
+
 #endif /* __KERNEL__ */
 #endif /* _LINUX_FS_H */
diff --git a/include/linux/fsl_devices.h b/include/linux/fsl_devices.h
index 73710d6..1831b19 100644
--- a/include/linux/fsl_devices.h
+++ b/include/linux/fsl_devices.h
@@ -53,6 +53,7 @@
 	u32	bus_id;
 	u32	phy_id;
 	u8	mac_addr[6];
+	phy_interface_t interface;
 };
 
 struct gianfar_mdio_data {
@@ -112,7 +113,7 @@
 struct fsl_spi_platform_data {
 	u32 	initial_spmode;	/* initial SPMODE value */
 	u16	bus_num;
-
+	bool	qe_mode;
 	/* board specific information */
 	u16	max_chipselect;
 	void	(*activate_cs)(u8 cs, u8 polarity);
@@ -120,5 +121,10 @@
 	u32	sysclk;
 };
 
+struct mpc8xx_pcmcia_ops {
+	void(*hw_ctrl)(int slot, int enable);
+	int(*voltage_set)(int slot, int vcc, int vpp);
+};
+
 #endif /* _FSL_DEVICE_H_ */
 #endif /* __KERNEL__ */
diff --git a/include/linux/genetlink.h b/include/linux/genetlink.h
index f7a9377..7da02c9 100644
--- a/include/linux/genetlink.h
+++ b/include/linux/genetlink.h
@@ -39,6 +39,9 @@
 	CTRL_CMD_NEWOPS,
 	CTRL_CMD_DELOPS,
 	CTRL_CMD_GETOPS,
+	CTRL_CMD_NEWMCAST_GRP,
+	CTRL_CMD_DELMCAST_GRP,
+	CTRL_CMD_GETMCAST_GRP, /* unused */
 	__CTRL_CMD_MAX,
 };
 
@@ -52,6 +55,7 @@
 	CTRL_ATTR_HDRSIZE,
 	CTRL_ATTR_MAXATTR,
 	CTRL_ATTR_OPS,
+	CTRL_ATTR_MCAST_GROUPS,
 	__CTRL_ATTR_MAX,
 };
 
@@ -66,4 +70,13 @@
 
 #define CTRL_ATTR_OP_MAX (__CTRL_ATTR_OP_MAX - 1)
 
+enum {
+	CTRL_ATTR_MCAST_GRP_UNSPEC,
+	CTRL_ATTR_MCAST_GRP_NAME,
+	CTRL_ATTR_MCAST_GRP_ID,
+	__CTRL_ATTR_MCAST_GRP_MAX,
+};
+
+#define CTRL_ATTR_MCAST_GRP_MAX (__CTRL_ATTR_MCAST_GRP_MAX - 1)
+
 #endif	/* __LINUX_GENERIC_NETLINK_H */
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index 0d2ef0b..bc68dd9 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -30,6 +30,9 @@
  * cannot handle allocation failures.
  *
  * __GFP_NORETRY: The VM implementation must not retry indefinitely.
+ *
+ * __GFP_MOVABLE: Flag that this page will be movable by the page migration
+ * mechanism or reclaimed
  */
 #define __GFP_WAIT	((__force gfp_t)0x10u)	/* Can wait and reschedule? */
 #define __GFP_HIGH	((__force gfp_t)0x20u)	/* Should access emergency pools? */
@@ -45,6 +48,7 @@
 #define __GFP_NOMEMALLOC ((__force gfp_t)0x10000u) /* Don't use emergency reserves */
 #define __GFP_HARDWALL   ((__force gfp_t)0x20000u) /* Enforce hardwall cpuset memory allocs */
 #define __GFP_THISNODE	((__force gfp_t)0x40000u)/* No fallback, no policies */
+#define __GFP_MOVABLE	((__force gfp_t)0x80000u) /* Page is movable */
 
 #define __GFP_BITS_SHIFT 20	/* Room for 20 __GFP_FOO bits */
 #define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
@@ -53,7 +57,8 @@
 #define GFP_LEVEL_MASK (__GFP_WAIT|__GFP_HIGH|__GFP_IO|__GFP_FS| \
 			__GFP_COLD|__GFP_NOWARN|__GFP_REPEAT| \
 			__GFP_NOFAIL|__GFP_NORETRY|__GFP_COMP| \
-			__GFP_NOMEMALLOC|__GFP_HARDWALL|__GFP_THISNODE)
+			__GFP_NOMEMALLOC|__GFP_HARDWALL|__GFP_THISNODE| \
+			__GFP_MOVABLE)
 
 /* This equals 0, but use constants in case they ever change */
 #define GFP_NOWAIT	(GFP_ATOMIC & ~__GFP_HIGH)
@@ -65,6 +70,15 @@
 #define GFP_USER	(__GFP_WAIT | __GFP_IO | __GFP_FS | __GFP_HARDWALL)
 #define GFP_HIGHUSER	(__GFP_WAIT | __GFP_IO | __GFP_FS | __GFP_HARDWALL | \
 			 __GFP_HIGHMEM)
+#define GFP_HIGHUSER_MOVABLE	(__GFP_WAIT | __GFP_IO | __GFP_FS | \
+				 __GFP_HARDWALL | __GFP_HIGHMEM | \
+				 __GFP_MOVABLE)
+#define GFP_NOFS_PAGECACHE	(__GFP_WAIT | __GFP_IO | __GFP_MOVABLE)
+#define GFP_USER_PAGECACHE	(__GFP_WAIT | __GFP_IO | __GFP_FS | \
+				 __GFP_HARDWALL | __GFP_MOVABLE)
+#define GFP_HIGHUSER_PAGECACHE	(__GFP_WAIT | __GFP_IO | __GFP_FS | \
+				 __GFP_HARDWALL | __GFP_HIGHMEM | \
+				 __GFP_MOVABLE)
 
 #ifdef CONFIG_NUMA
 #define GFP_THISNODE	(__GFP_THISNODE | __GFP_NOWARN | __GFP_NORETRY)
@@ -92,6 +106,9 @@
 	if (flags & __GFP_DMA32)
 		return ZONE_DMA32;
 #endif
+	if ((flags & (__GFP_HIGHMEM | __GFP_MOVABLE)) ==
+			(__GFP_HIGHMEM | __GFP_MOVABLE))
+		return ZONE_MOVABLE;
 #ifdef CONFIG_HIGHMEM
 	if (flags & __GFP_HIGHMEM)
 		return ZONE_HIGHMEM;
diff --git a/include/linux/highmem.h b/include/linux/highmem.h
index 98e2cce..1fcb003 100644
--- a/include/linux/highmem.h
+++ b/include/linux/highmem.h
@@ -73,10 +73,27 @@
 }
 
 #ifndef __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
+/**
+ * __alloc_zeroed_user_highpage - Allocate a zeroed HIGHMEM page for a VMA with caller-specified movable GFP flags
+ * @movableflags: The GFP flags related to the pages future ability to move like __GFP_MOVABLE
+ * @vma: The VMA the page is to be allocated for
+ * @vaddr: The virtual address the page will be inserted into
+ *
+ * This function will allocate a page for a VMA but the caller is expected
+ * to specify via movableflags whether the page will be movable in the
+ * future or not
+ *
+ * An architecture may override this function by defining
+ * __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE and providing their own
+ * implementation.
+ */
 static inline struct page *
-alloc_zeroed_user_highpage(struct vm_area_struct *vma, unsigned long vaddr)
+__alloc_zeroed_user_highpage(gfp_t movableflags,
+			struct vm_area_struct *vma,
+			unsigned long vaddr)
 {
-	struct page *page = alloc_page_vma(GFP_HIGHUSER, vma, vaddr);
+	struct page *page = alloc_page_vma(GFP_HIGHUSER | movableflags,
+			vma, vaddr);
 
 	if (page)
 		clear_user_highpage(page, vaddr);
@@ -85,6 +102,21 @@
 }
 #endif
 
+/**
+ * alloc_zeroed_user_highpage_movable - Allocate a zeroed HIGHMEM page for a VMA that the caller knows can move
+ * @vma: The VMA the page is to be allocated for
+ * @vaddr: The virtual address the page will be inserted into
+ *
+ * This function will allocate a page for a VMA that the caller knows will
+ * be able to migrate in the future using move_pages() or reclaimed
+ */
+static inline struct page *
+alloc_zeroed_user_highpage_movable(struct vm_area_struct *vma,
+					unsigned long vaddr)
+{
+	return __alloc_zeroed_user_highpage(__GFP_MOVABLE, vma, vaddr);
+}
+
 static inline void clear_highpage(struct page *page)
 {
 	void *kaddr = kmap_atomic(page, KM_USER0);
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 2c13715..49b7053 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -15,6 +15,7 @@
 }
 
 int hugetlb_sysctl_handler(struct ctl_table *, int, struct file *, void __user *, size_t *, loff_t *);
+int hugetlb_treat_movable_handler(struct ctl_table *, int, struct file *, void __user *, size_t *, loff_t *);
 int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *, struct vm_area_struct *);
 int follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *, struct page **, struct vm_area_struct **, unsigned long *, int *, int);
 void unmap_hugepage_range(struct vm_area_struct *, unsigned long, unsigned long);
@@ -29,6 +30,7 @@
 void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed);
 
 extern unsigned long max_huge_pages;
+extern unsigned long hugepages_treat_as_movable;
 extern const unsigned long hugetlb_zero, hugetlb_infinity;
 extern int sysctl_hugetlb_shm_group;
 
diff --git a/include/linux/i2c-isa.h b/include/linux/i2c-isa.h
deleted file mode 100644
index 67e3598..0000000
--- a/include/linux/i2c-isa.h
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * i2c-isa.h - definitions for the i2c-isa pseudo-i2c-adapter interface
- *
- * Copyright (C) 2005 Jean Delvare <khali@linux-fr.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-#ifndef _LINUX_I2C_ISA_H
-#define _LINUX_I2C_ISA_H
-
-#include <linux/i2c.h>
-
-extern int i2c_isa_add_driver(struct i2c_driver *driver);
-extern int i2c_isa_del_driver(struct i2c_driver *driver);
-
-/* Detect whether we are on the isa bus. This is only useful to hybrid
-   (i2c+isa) drivers. */
-#define i2c_is_isa_adapter(adapptr) \
-        ((adapptr)->id == I2C_HW_ISA)
-#define i2c_is_isa_client(clientptr) \
-        i2c_is_isa_adapter((clientptr)->adapter)
-
-#endif /* _LINUX_I2C_ISA_H */
diff --git a/include/linux/i2c.h b/include/linux/i2c.h
index 2eaba21..0c37a73 100644
--- a/include/linux/i2c.h
+++ b/include/linux/i2c.h
@@ -368,7 +368,6 @@
 
 /* The numbers to use to set I2C bus address */
 #define ANY_I2C_BUS		0xffff
-#define ANY_I2C_ISA_BUS		9191
 
 
 /* ----- functions exported by i2c.o */
diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h
index 0e0fedd..260d6d7 100644
--- a/include/linux/jbd2.h
+++ b/include/linux/jbd2.h
@@ -50,14 +50,14 @@
  */
 #define JBD_DEFAULT_MAX_COMMIT_AGE 5
 
-#ifdef CONFIG_JBD_DEBUG
+#ifdef CONFIG_JBD2_DEBUG
 /*
  * Define JBD_EXPENSIVE_CHECKING to enable more expensive internal
  * consistency checks.  By default we don't do this unless
- * CONFIG_JBD_DEBUG is on.
+ * CONFIG_JBD2_DEBUG is on.
  */
 #define JBD_EXPENSIVE_CHECKING
-extern int jbd2_journal_enable_debug;
+extern u8 jbd2_journal_enable_debug;
 
 #define jbd_debug(n, f, a...)						\
 	do {								\
diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h
index 5f06527..f73de6f 100644
--- a/include/linux/kallsyms.h
+++ b/include/linux/kallsyms.h
@@ -7,9 +7,9 @@
 
 #include <linux/errno.h>
 
-#define KSYM_NAME_LEN 127
-#define KSYM_SYMBOL_LEN (sizeof("%s+%#lx/%#lx [%s]") + KSYM_NAME_LEN +	\
-			 2*(BITS_PER_LONG*3/10) + MODULE_NAME_LEN + 1)
+#define KSYM_NAME_LEN 128
+#define KSYM_SYMBOL_LEN (sizeof("%s+%#lx/%#lx [%s]") + (KSYM_NAME_LEN - 1) + \
+			 2*(BITS_PER_LONG*3/10) + (MODULE_NAME_LEN - 1) + 1)
 
 #ifdef CONFIG_KALLSYMS
 /* Lookup the address for a symbol. Returns 0 if not found. */
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 7a48525..1eb9cde 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -210,6 +210,7 @@
 #define TAINT_MACHINE_CHECK		(1<<4)
 #define TAINT_BAD_PAGE			(1<<5)
 #define TAINT_USER			(1<<6)
+#define TAINT_DIE			(1<<7)
 
 extern void dump_stack(void);
 
diff --git a/include/linux/kernelcapi.h b/include/linux/kernelcapi.h
index aea34e74..8c4350a 100644
--- a/include/linux/kernelcapi.h
+++ b/include/linux/kernelcapi.h
@@ -64,7 +64,7 @@
 	unsigned long nrecvdatapkt;
 	unsigned long nsentctlpkt;
 	unsigned long nsentdatapkt;
-	struct semaphore recv_sem;
+	struct mutex recv_mtx;
 	struct sk_buff_head recv_queue;
 	struct work_struct recv_work;
 	int release_in_progress;
diff --git a/include/linux/kmod.h b/include/linux/kmod.h
index 10f505c..5dc1384 100644
--- a/include/linux/kmod.h
+++ b/include/linux/kmod.h
@@ -36,13 +36,57 @@
 #define try_then_request_module(x, mod...) ((x) ?: (request_module(mod), (x)))
 
 struct key;
-extern int call_usermodehelper_keys(char *path, char *argv[], char *envp[],
-				    struct key *session_keyring, int wait);
+struct file;
+struct subprocess_info;
+
+/* Allocate a subprocess_info structure */
+struct subprocess_info *call_usermodehelper_setup(char *path,
+						  char **argv, char **envp);
+
+/* Set various pieces of state into the subprocess_info structure */
+void call_usermodehelper_setkeys(struct subprocess_info *info,
+				 struct key *session_keyring);
+int call_usermodehelper_stdinpipe(struct subprocess_info *sub_info,
+				  struct file **filp);
+void call_usermodehelper_setcleanup(struct subprocess_info *info,
+				    void (*cleanup)(char **argv, char **envp));
+
+enum umh_wait {
+	UMH_NO_WAIT = -1,	/* don't wait at all */
+	UMH_WAIT_EXEC = 0,	/* wait for the exec, but not the process */
+	UMH_WAIT_PROC = 1,	/* wait for the process to complete */
+};
+
+/* Actually execute the sub-process */
+int call_usermodehelper_exec(struct subprocess_info *info, enum umh_wait wait);
+
+/* Free the subprocess_info. This is only needed if you're not going
+   to call call_usermodehelper_exec */
+void call_usermodehelper_freeinfo(struct subprocess_info *info);
 
 static inline int
-call_usermodehelper(char *path, char **argv, char **envp, int wait)
+call_usermodehelper(char *path, char **argv, char **envp, enum umh_wait wait)
 {
-	return call_usermodehelper_keys(path, argv, envp, NULL, wait);
+	struct subprocess_info *info;
+
+	info = call_usermodehelper_setup(path, argv, envp);
+	if (info == NULL)
+		return -ENOMEM;
+	return call_usermodehelper_exec(info, wait);
+}
+
+static inline int
+call_usermodehelper_keys(char *path, char **argv, char **envp,
+			 struct key *session_keyring, enum umh_wait wait)
+{
+	struct subprocess_info *info;
+
+	info = call_usermodehelper_setup(path, argv, envp);
+	if (info == NULL)
+		return -ENOMEM;
+
+	call_usermodehelper_setkeys(info, session_keyring);
+	return call_usermodehelper_exec(info, wait);
 }
 
 extern void usermodehelper_init(void);
diff --git a/include/linux/kobject.h b/include/linux/kobject.h
index 06cbf41..aa2fe22 100644
--- a/include/linux/kobject.h
+++ b/include/linux/kobject.h
@@ -36,15 +36,24 @@
 /* counter to tag the uevent, read only except for the kobject core */
 extern u64 uevent_seqnum;
 
-/* the actions here must match the proper string in lib/kobject_uevent.c */
-typedef int __bitwise kobject_action_t;
+/*
+ * The actions here must match the index to the string array
+ * in lib/kobject_uevent.c
+ *
+ * Do not add new actions here without checking with the driver-core
+ * maintainers. Action strings are not meant to express subsystem
+ * or device specific properties. In most cases you want to send a
+ * kobject_uevent_env(kobj, KOBJ_CHANGE, env) with additional event
+ * specific variables added to the event environment.
+ */
 enum kobject_action {
-	KOBJ_ADD	= (__force kobject_action_t) 0x01,	/* exclusive to core */
-	KOBJ_REMOVE	= (__force kobject_action_t) 0x02,	/* exclusive to core */
-	KOBJ_CHANGE	= (__force kobject_action_t) 0x03,	/* device state change */
-	KOBJ_OFFLINE	= (__force kobject_action_t) 0x04,	/* device offline */
-	KOBJ_ONLINE	= (__force kobject_action_t) 0x05,	/* device online */
-	KOBJ_MOVE	= (__force kobject_action_t) 0x06,	/* device move */
+	KOBJ_ADD,
+	KOBJ_REMOVE,
+	KOBJ_CHANGE,
+	KOBJ_MOVE,
+	KOBJ_ONLINE,
+	KOBJ_OFFLINE,
+	KOBJ_MAX
 };
 
 struct kobject {
diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h
index 23adf60..51464d1 100644
--- a/include/linux/kprobes.h
+++ b/include/linux/kprobes.h
@@ -116,9 +116,12 @@
  */
 struct jprobe {
 	struct kprobe kp;
-	kprobe_opcode_t *entry;	/* probe handling code to jump to */
+	void *entry;	/* probe handling code to jump to */
 };
 
+/* For backward compatibility with old code using JPROBE_ENTRY() */
+#define JPROBE_ENTRY(handler)	(handler)
+
 DECLARE_PER_CPU(struct kprobe *, current_kprobe);
 DECLARE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
 
@@ -211,6 +214,7 @@
 int register_jprobe(struct jprobe *p);
 void unregister_jprobe(struct jprobe *p);
 void jprobe_return(void);
+unsigned long arch_deref_entry_point(void *);
 
 int register_kretprobe(struct kretprobe *rp);
 void unregister_kretprobe(struct kretprobe *rp);
diff --git a/include/linux/lguest.h b/include/linux/lguest.h
new file mode 100644
index 0000000..500aace
--- /dev/null
+++ b/include/linux/lguest.h
@@ -0,0 +1,85 @@
+/* Things the lguest guest needs to know.  Note: like all lguest interfaces,
+ * this is subject to wild and random change between versions. */
+#ifndef _ASM_LGUEST_H
+#define _ASM_LGUEST_H
+
+#ifndef __ASSEMBLY__
+#include <asm/irq.h>
+
+#define LHCALL_FLUSH_ASYNC	0
+#define LHCALL_LGUEST_INIT	1
+#define LHCALL_CRASH		2
+#define LHCALL_LOAD_GDT		3
+#define LHCALL_NEW_PGTABLE	4
+#define LHCALL_FLUSH_TLB	5
+#define LHCALL_LOAD_IDT_ENTRY	6
+#define LHCALL_SET_STACK	7
+#define LHCALL_TS		8
+#define LHCALL_SET_CLOCKEVENT	9
+#define LHCALL_HALT		10
+#define LHCALL_GET_WALLCLOCK	11
+#define LHCALL_BIND_DMA		12
+#define LHCALL_SEND_DMA		13
+#define LHCALL_SET_PTE		14
+#define LHCALL_SET_PMD		15
+#define LHCALL_LOAD_TLS		16
+
+#define LG_CLOCK_MIN_DELTA	100UL
+#define LG_CLOCK_MAX_DELTA	ULONG_MAX
+
+#define LGUEST_TRAP_ENTRY 0x1F
+
+static inline unsigned long
+hcall(unsigned long call,
+      unsigned long arg1, unsigned long arg2, unsigned long arg3)
+{
+	asm volatile("int $" __stringify(LGUEST_TRAP_ENTRY)
+		     : "=a"(call)
+		     : "a"(call), "d"(arg1), "b"(arg2), "c"(arg3)
+		     : "memory");
+	return call;
+}
+
+void async_hcall(unsigned long call,
+		 unsigned long arg1, unsigned long arg2, unsigned long arg3);
+
+/* Can't use our min() macro here: needs to be a constant */
+#define LGUEST_IRQS (NR_IRQS < 32 ? NR_IRQS: 32)
+
+#define LHCALL_RING_SIZE 64
+struct hcall_ring
+{
+	u32 eax, edx, ebx, ecx;
+};
+
+/* All the good stuff happens here: guest registers it with LGUEST_INIT */
+struct lguest_data
+{
+/* Fields which change during running: */
+	/* 512 == enabled (same as eflags) */
+	unsigned int irq_enabled;
+	/* Interrupts blocked by guest. */
+	DECLARE_BITMAP(blocked_interrupts, LGUEST_IRQS);
+
+	/* Virtual address of page fault. */
+	unsigned long cr2;
+
+	/* Async hypercall ring.  0xFF == done, 0 == pending. */
+	u8 hcall_status[LHCALL_RING_SIZE];
+	struct hcall_ring hcalls[LHCALL_RING_SIZE];
+
+/* Fields initialized by the hypervisor at boot: */
+	/* Memory not to try to access */
+	unsigned long reserve_mem;
+	/* ID of this guest (used by network driver to set ethernet address) */
+	u16 guestid;
+	/* KHz for the TSC clock. */
+	u32 tsc_khz;
+
+/* Fields initialized by the guest at boot: */
+	/* Instruction range to suppress interrupts even if enabled */
+	unsigned long noirq_start, noirq_end;
+};
+extern struct lguest_data lguest_data;
+#endif /* __ASSEMBLY__ */
+#endif	/* _ASM_LGUEST_H */
diff --git a/include/linux/lguest_bus.h b/include/linux/lguest_bus.h
new file mode 100644
index 0000000..c9b4e05f
--- /dev/null
+++ b/include/linux/lguest_bus.h
@@ -0,0 +1,48 @@
+#ifndef _ASM_LGUEST_DEVICE_H
+#define _ASM_LGUEST_DEVICE_H
+/* Everything you need to know about lguest devices. */
+#include <linux/device.h>
+#include <linux/lguest.h>
+#include <linux/lguest_launcher.h>
+
+struct lguest_device {
+	/* Unique busid, and index into lguest_page->devices[] */
+	unsigned int index;
+
+	struct device dev;
+
+	/* Driver can hang data off here. */
+	void *private;
+};
+
+/* By convention, each device can use irq index+1 if it wants to. */
+static inline int lgdev_irq(const struct lguest_device *dev)
+{
+	return dev->index + 1;
+}
+
+/* dma args must not be vmalloced! */
+void lguest_send_dma(unsigned long key, struct lguest_dma *dma);
+int lguest_bind_dma(unsigned long key, struct lguest_dma *dmas,
+		    unsigned int num, u8 irq);
+void lguest_unbind_dma(unsigned long key, struct lguest_dma *dmas);
+
+/* Map the virtual device space */
+void *lguest_map(unsigned long phys_addr, unsigned long pages);
+void lguest_unmap(void *);
+
+struct lguest_driver {
+	const char *name;
+	struct module *owner;
+	u16 device_type;
+	int (*probe)(struct lguest_device *dev);
+	void (*remove)(struct lguest_device *dev);
+
+	struct device_driver drv;
+};
+
+extern int register_lguest_driver(struct lguest_driver *drv);
+extern void unregister_lguest_driver(struct lguest_driver *drv);
+
+extern struct lguest_device_desc *lguest_devices; /* Just past max_pfn */
+#endif /* _ASM_LGUEST_DEVICE_H */
diff --git a/include/linux/lguest_launcher.h b/include/linux/lguest_launcher.h
new file mode 100644
index 0000000..0ba414a
--- /dev/null
+++ b/include/linux/lguest_launcher.h
@@ -0,0 +1,73 @@
+#ifndef _ASM_LGUEST_USER
+#define _ASM_LGUEST_USER
+/* Everything the "lguest" userspace program needs to know. */
+/* They can register up to 32 arrays of lguest_dma. */
+#define LGUEST_MAX_DMA		32
+/* At most we can dma 16 lguest_dma in one op. */
+#define LGUEST_MAX_DMA_SECTIONS	16
+
+/* How many devices?  Assume each one wants up to two dma arrays per device. */
+#define LGUEST_MAX_DEVICES (LGUEST_MAX_DMA/2)
+
+struct lguest_dma
+{
+	/* 0 if free to be used, filled by hypervisor. */
+ 	u32 used_len;
+	unsigned long addr[LGUEST_MAX_DMA_SECTIONS];
+	u16 len[LGUEST_MAX_DMA_SECTIONS];
+};
+
+struct lguest_block_page
+{
+	/* 0 is a read, 1 is a write. */
+	int type;
+	u32 sector; 	/* Offset in device = sector * 512. */
+	u32 bytes;	/* Length expected to be read/written in bytes */
+	/* 0 = pending, 1 = done, 2 = done, error */
+	int result;
+	u32 num_sectors; /* Disk length = num_sectors * 512 */
+};
+
+/* There is a shared page of these. */
+struct lguest_net
+{
+	/* Simply the mac address (with multicast bit meaning promisc). */
+	unsigned char mac[6];
+};
+
+/* Where the Host expects the Guest to SEND_DMA console output to. */
+#define LGUEST_CONSOLE_DMA_KEY 0
+
+/* We have a page of these descriptors in the lguest_device page. */
+struct lguest_device_desc {
+	u16 type;
+#define LGUEST_DEVICE_T_CONSOLE	1
+#define LGUEST_DEVICE_T_NET	2
+#define LGUEST_DEVICE_T_BLOCK	3
+
+	u16 features;
+#define LGUEST_NET_F_NOCSUM		0x4000 /* Don't bother checksumming */
+#define LGUEST_DEVICE_F_RANDOMNESS	0x8000 /* IRQ is fairly random */
+
+	u16 status;
+/* 256 and above are device specific. */
+#define LGUEST_DEVICE_S_ACKNOWLEDGE	1 /* We have seen device. */
+#define LGUEST_DEVICE_S_DRIVER		2 /* We have found a driver */
+#define LGUEST_DEVICE_S_DRIVER_OK	4 /* Driver says OK! */
+#define LGUEST_DEVICE_S_REMOVED		8 /* Device has gone away. */
+#define LGUEST_DEVICE_S_REMOVED_ACK	16 /* Driver has been told. */
+#define LGUEST_DEVICE_S_FAILED		128 /* Something actually failed */
+
+	u16 num_pages;
+	u32 pfn;
+};
+
+/* Write command first word is a request. */
+enum lguest_req
+{
+	LHREQ_INITIALIZE, /* + pfnlimit, pgdir, start, pageoffset */
+	LHREQ_GETDMA, /* + addr (returns &lguest_dma, irq in ->used_len) */
+	LHREQ_IRQ, /* + irq */
+	LHREQ_BREAK, /* + on/off flag (on blocks until someone does off) */
+};
+#endif /* _ASM_LGUEST_USER */
diff --git a/include/linux/limits.h b/include/linux/limits.h
index eaf2e09..2d0f941 100644
--- a/include/linux/limits.h
+++ b/include/linux/limits.h
@@ -5,8 +5,6 @@
 
 #define NGROUPS_MAX    65536	/* supplemental group IDs are available */
 #define ARG_MAX       131072	/* # bytes of args + environ for exec() */
-#define CHILD_MAX        999    /* no limit :-) */
-#define OPEN_MAX         256	/* # open files a process may have */
 #define LINK_MAX         127	/* # links a file may have */
 #define MAX_CANON        255	/* size of the canonical input queue */
 #define MAX_INPUT        255	/* size of the type-ahead buffer */
diff --git a/include/linux/linux_logo.h b/include/linux/linux_logo.h
index 9c01bde..08a9296 100644
--- a/include/linux/linux_logo.h
+++ b/include/linux/linux_logo.h
@@ -33,5 +33,13 @@
 };
 
 extern const struct linux_logo *fb_find_logo(int depth);
+#ifdef CONFIG_FB_LOGO_EXTRA
+extern void fb_append_extra_logo(const struct linux_logo *logo,
+				 unsigned int n);
+#else
+static inline void fb_append_extra_logo(const struct linux_logo *logo,
+					unsigned int n)
+{}
+#endif
 
 #endif /* _LINUX_LINUX_LOGO_H */
diff --git a/include/linux/lockd/bind.h b/include/linux/lockd/bind.h
index 246de1d..6f1637c 100644
--- a/include/linux/lockd/bind.h
+++ b/include/linux/lockd/bind.h
@@ -27,6 +27,7 @@
 						struct nfs_fh *,
 						struct file **);
 	void			(*fclose)(struct file *);
+	unsigned long		(*get_grace_period)(void);
 };
 
 extern struct nlmsvc_binding *	nlmsvc_ops;
@@ -38,4 +39,12 @@
 extern int	lockd_up(int proto);
 extern void	lockd_down(void);
 
+unsigned long get_nfs_grace_period(void);
+
+#ifdef CONFIG_NFSD_V4
+unsigned long get_nfs4_grace_period(void);
+#else
+static inline unsigned long get_nfs4_grace_period(void) {return 0;}
+#endif
+
 #endif /* LINUX_LOCKD_BIND_H */
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index 14c937d..0e843bf 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -1,7 +1,8 @@
 /*
  * Runtime locking correctness validator
  *
- *  Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
+ *  Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
+ *  Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
  *
  * see Documentation/lockdep-design.txt for more details.
  */
@@ -9,6 +10,7 @@
 #define __LINUX_LOCKDEP_H
 
 struct task_struct;
+struct lockdep_map;
 
 #ifdef CONFIG_LOCKDEP
 
@@ -114,8 +116,44 @@
 
 	const char			*name;
 	int				name_version;
+
+#ifdef CONFIG_LOCK_STAT
+	unsigned long			contention_point[4];
+#endif
 };
 
+#ifdef CONFIG_LOCK_STAT
+struct lock_time {
+	s64				min;
+	s64				max;
+	s64				total;
+	unsigned long			nr;
+};
+
+enum bounce_type {
+	bounce_acquired_write,
+	bounce_acquired_read,
+	bounce_contended_write,
+	bounce_contended_read,
+	nr_bounce_types,
+
+	bounce_acquired = bounce_acquired_write,
+	bounce_contended = bounce_contended_write,
+};
+
+struct lock_class_stats {
+	unsigned long			contention_point[4];
+	struct lock_time		read_waittime;
+	struct lock_time		write_waittime;
+	struct lock_time		read_holdtime;
+	struct lock_time		write_holdtime;
+	unsigned long			bounces[nr_bounce_types];
+};
+
+struct lock_class_stats lock_stats(struct lock_class *class);
+void clear_lock_stats(struct lock_class *class);
+#endif
+
 /*
  * Map the lock object (the lock instance) to the lock-class object.
  * This is embedded into specific lock instances:
@@ -124,6 +162,9 @@
 	struct lock_class_key		*key;
 	struct lock_class		*class_cache;
 	const char			*name;
+#ifdef CONFIG_LOCK_STAT
+	int				cpu;
+#endif
 };
 
 /*
@@ -165,6 +206,10 @@
 	unsigned long			acquire_ip;
 	struct lockdep_map		*instance;
 
+#ifdef CONFIG_LOCK_STAT
+	u64 				waittime_stamp;
+	u64				holdtime_stamp;
+#endif
 	/*
 	 * The lock-stack is unified in that the lock chains of interrupt
 	 * contexts nest ontop of process context chains, but we 'separate'
@@ -281,6 +326,30 @@
 
 #endif /* !LOCKDEP */
 
+#ifdef CONFIG_LOCK_STAT
+
+extern void lock_contended(struct lockdep_map *lock, unsigned long ip);
+extern void lock_acquired(struct lockdep_map *lock);
+
+#define LOCK_CONTENDED(_lock, try, lock)			\
+do {								\
+	if (!try(_lock)) {					\
+		lock_contended(&(_lock)->dep_map, _RET_IP_);	\
+		lock(_lock);					\
+	}							\
+	lock_acquired(&(_lock)->dep_map);			\
+} while (0)
+
+#else /* CONFIG_LOCK_STAT */
+
+#define lock_contended(lockdep_map, ip) do {} while (0)
+#define lock_acquired(lockdep_map) do {} while (0)
+
+#define LOCK_CONTENDED(_lock, try, lock) \
+	lock(_lock)
+
+#endif /* CONFIG_LOCK_STAT */
+
 #if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_GENERIC_HARDIRQS)
 extern void early_init_irq_lock_class(void);
 #else
diff --git a/include/linux/magic.h b/include/linux/magic.h
index 9d713c0..36cc20d 100644
--- a/include/linux/magic.h
+++ b/include/linux/magic.h
@@ -13,7 +13,6 @@
 #define HPFS_SUPER_MAGIC	0xf995e849
 #define ISOFS_SUPER_MAGIC	0x9660
 #define JFFS2_SUPER_MAGIC	0x72b6
-#define KVMFS_SUPER_MAGIC	0x19700426
 #define ANON_INODE_FS_MAGIC	0x09041934
 
 #define MINIX_SUPER_MAGIC	0x137F		/* original minix fs */
diff --git a/include/linux/major.h b/include/linux/major.h
index 7e7c909..0cb9805 100644
--- a/include/linux/major.h
+++ b/include/linux/major.h
@@ -158,6 +158,8 @@
 #define VXSPEC_MAJOR		200	/* VERITAS volume config driver */
 #define VXDMP_MAJOR		201	/* VERITAS volume multipath driver */
 
+#define XENVBD_MAJOR		202	/* Xen virtual block device */
+
 #define MSR_MAJOR		202
 #define CPUID_MAJOR		203
 
diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h
index daabb3a..e147cf5 100644
--- a/include/linux/mempolicy.h
+++ b/include/linux/mempolicy.h
@@ -159,7 +159,7 @@
 
 extern struct mempolicy default_policy;
 extern struct zonelist *huge_zonelist(struct vm_area_struct *vma,
-		unsigned long addr);
+		unsigned long addr, gfp_t gfp_flags);
 extern unsigned slab_node(struct mempolicy *policy);
 
 extern enum zone_type policy_zone;
@@ -256,9 +256,9 @@
 #define set_cpuset_being_rebound(x) do {} while (0)
 
 static inline struct zonelist *huge_zonelist(struct vm_area_struct *vma,
-		unsigned long addr)
+		unsigned long addr, gfp_t gfp_flags)
 {
-	return NODE_DATA(0)->node_zonelists + gfp_zone(GFP_HIGHUSER);
+	return NODE_DATA(0)->node_zonelists + gfp_zone(gfp_flags);
 }
 
 static inline int do_migrate_pages(struct mm_struct *mm,
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 97d0cdd..c456c3a 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -168,6 +168,8 @@
 #define VM_INSERTPAGE	0x02000000	/* The vma has had "vm_insert_page()" done on it */
 #define VM_ALWAYSDUMP	0x04000000	/* Always include in core dumps */
 
+#define VM_CAN_NONLINEAR 0x08000000	/* Has ->fault & does nonlinear pages */
+
 #ifndef VM_STACK_DEFAULT_FLAGS		/* arch can override this */
 #define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS
 #endif
@@ -190,6 +192,30 @@
  */
 extern pgprot_t protection_map[16];
 
+#define FAULT_FLAG_WRITE	0x01	/* Fault was a write access */
+#define FAULT_FLAG_NONLINEAR	0x02	/* Fault was via a nonlinear mapping */
+
+
+/*
+ * vm_fault is filled by the the pagefault handler and passed to the vma's
+ * ->fault function. The vma's ->fault is responsible for returning a bitmask
+ * of VM_FAULT_xxx flags that give details about how the fault was handled.
+ *
+ * pgoff should be used in favour of virtual_address, if possible. If pgoff
+ * is used, one may set VM_CAN_NONLINEAR in the vma->vm_flags to get nonlinear
+ * mapping support.
+ */
+struct vm_fault {
+	unsigned int flags;		/* FAULT_FLAG_xxx flags */
+	pgoff_t pgoff;			/* Logical page offset based on vma */
+	void __user *virtual_address;	/* Faulting virtual address */
+
+	struct page *page;		/* ->fault handlers should return a
+					 * page here, unless VM_FAULT_NOPAGE
+					 * is set (which is also implied by
+					 * VM_FAULT_ERROR).
+					 */
+};
 
 /*
  * These are the virtual MM functions - opening of an area, closing and
@@ -199,9 +225,11 @@
 struct vm_operations_struct {
 	void (*open)(struct vm_area_struct * area);
 	void (*close)(struct vm_area_struct * area);
-	struct page * (*nopage)(struct vm_area_struct * area, unsigned long address, int *type);
-	unsigned long (*nopfn)(struct vm_area_struct * area, unsigned long address);
-	int (*populate)(struct vm_area_struct * area, unsigned long address, unsigned long len, pgprot_t prot, unsigned long pgoff, int nonblock);
+	int (*fault)(struct vm_area_struct *vma, struct vm_fault *vmf);
+	struct page *(*nopage)(struct vm_area_struct *area,
+			unsigned long address, int *type);
+	unsigned long (*nopfn)(struct vm_area_struct *area,
+			unsigned long address);
 
 	/* notification that a previously read-only page is about to become
 	 * writable, if an error is returned it will cause a SIGBUS */
@@ -599,6 +627,7 @@
 {
 	struct address_space *mapping = page->mapping;
 
+	VM_BUG_ON(PageSlab(page));
 	if (unlikely(PageSwapCache(page)))
 		mapping = &swapper_space;
 #ifdef CONFIG_SLUB
@@ -654,7 +683,6 @@
  */
 #define NOPAGE_SIGBUS	(NULL)
 #define NOPAGE_OOM	((struct page *) (-1))
-#define NOPAGE_REFAULT	((struct page *) (-2))	/* Return to userspace, rerun */
 
 /*
  * Error return values for the *_nopfn functions
@@ -668,16 +696,18 @@
  * Used to decide whether a process gets delivered SIGBUS or
  * just gets major/minor fault counters bumped up.
  */
-#define VM_FAULT_OOM	0x00
-#define VM_FAULT_SIGBUS	0x01
-#define VM_FAULT_MINOR	0x02
-#define VM_FAULT_MAJOR	0x03
 
-/* 
- * Special case for get_user_pages.
- * Must be in a distinct bit from the above VM_FAULT_ flags.
- */
-#define VM_FAULT_WRITE	0x10
+#define VM_FAULT_MINOR	0 /* For backwards compat. Remove me quickly. */
+
+#define VM_FAULT_OOM	0x0001
+#define VM_FAULT_SIGBUS	0x0002
+#define VM_FAULT_MAJOR	0x0004
+#define VM_FAULT_WRITE	0x0008	/* Special case for get_user_pages */
+
+#define VM_FAULT_NOPAGE	0x0100	/* ->fault installed the pte, not return page */
+#define VM_FAULT_LOCKED	0x0200	/* ->fault locked the returned page */
+
+#define VM_FAULT_ERROR	(VM_FAULT_OOM | VM_FAULT_SIGBUS)
 
 #define offset_in_page(p)	((unsigned long)(p) & ~PAGE_MASK)
 
@@ -761,20 +791,10 @@
 
 extern int vmtruncate(struct inode * inode, loff_t offset);
 extern int vmtruncate_range(struct inode * inode, loff_t offset, loff_t end);
-extern int install_page(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, struct page *page, pgprot_t prot);
-extern int install_file_pte(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, unsigned long pgoff, pgprot_t prot);
 
 #ifdef CONFIG_MMU
-extern int __handle_mm_fault(struct mm_struct *mm,struct vm_area_struct *vma,
+extern int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
 			unsigned long address, int write_access);
-
-static inline int handle_mm_fault(struct mm_struct *mm,
-			struct vm_area_struct *vma, unsigned long address,
-			int write_access)
-{
-	return __handle_mm_fault(mm, vma, address, write_access) &
-				(~VM_FAULT_WRITE);
-}
 #else
 static inline int handle_mm_fault(struct mm_struct *mm,
 			struct vm_area_struct *vma, unsigned long address,
@@ -788,7 +808,6 @@
 
 extern int make_pages_present(unsigned long addr, unsigned long end);
 extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write);
-void install_arg_page(struct vm_area_struct *, struct page *, unsigned long);
 
 int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, unsigned long start,
 		int len, int write, int force, struct page **pages, struct vm_area_struct **vmas);
@@ -805,32 +824,42 @@
 int set_page_dirty_lock(struct page *page);
 int clear_page_dirty_for_io(struct page *page);
 
+extern unsigned long move_page_tables(struct vm_area_struct *vma,
+		unsigned long old_addr, struct vm_area_struct *new_vma,
+		unsigned long new_addr, unsigned long len);
 extern unsigned long do_mremap(unsigned long addr,
 			       unsigned long old_len, unsigned long new_len,
 			       unsigned long flags, unsigned long new_addr);
+extern int mprotect_fixup(struct vm_area_struct *vma,
+			  struct vm_area_struct **pprev, unsigned long start,
+			  unsigned long end, unsigned long newflags);
 
 /*
- * Prototype to add a shrinker callback for ageable caches.
- * 
- * These functions are passed a count `nr_to_scan' and a gfpmask.  They should
- * scan `nr_to_scan' objects, attempting to free them.
+ * A callback you can register to apply pressure to ageable caches.
  *
- * The callback must return the number of objects which remain in the cache.
+ * 'shrink' is passed a count 'nr_to_scan' and a 'gfpmask'.  It should
+ * look through the least-recently-used 'nr_to_scan' entries and
+ * attempt to free them up.  It should return the number of objects
+ * which remain in the cache.  If it returns -1, it means it cannot do
+ * any scanning at this time (eg. there is a risk of deadlock).
  *
- * The callback will be passed nr_to_scan == 0 when the VM is querying the
- * cache size, so a fastpath for that case is appropriate.
+ * The 'gfpmask' refers to the allocation we are currently trying to
+ * fulfil.
+ *
+ * Note that 'shrink' will be passed nr_to_scan == 0 when the VM is
+ * querying the cache size, so a fastpath for that case is appropriate.
  */
-typedef int (*shrinker_t)(int nr_to_scan, gfp_t gfp_mask);
+struct shrinker {
+	int (*shrink)(int nr_to_scan, gfp_t gfp_mask);
+	int seeks;	/* seeks to recreate an obj */
 
-/*
- * Add an aging callback.  The int is the number of 'seeks' it takes
- * to recreate one of the objects that these functions age.
- */
-
-#define DEFAULT_SEEKS 2
-struct shrinker;
-extern struct shrinker *set_shrinker(int, shrinker_t);
-extern void remove_shrinker(struct shrinker *shrinker);
+	/* These are for internal use */
+	struct list_head list;
+	long nr;	/* objs pending delete */
+};
+#define DEFAULT_SEEKS 2 /* A good number if you don't know better. */
+extern void register_shrinker(struct shrinker *);
+extern void unregister_shrinker(struct shrinker *);
 
 /*
  * Some shared mappigns will want the pages marked read-only
@@ -1099,9 +1128,7 @@
 				       loff_t lstart, loff_t lend);
 
 /* generic vm_area_ops exported for stackable file systems */
-extern struct page *filemap_nopage(struct vm_area_struct *, unsigned long, int *);
-extern int filemap_populate(struct vm_area_struct *, unsigned long,
-		unsigned long, pgprot_t, unsigned long, int);
+extern int filemap_fault(struct vm_area_struct *, struct vm_fault *);
 
 /* mm/page-writeback.c */
 int write_one_page(struct page *page, int wait);
@@ -1116,13 +1143,20 @@
 			pgoff_t offset, unsigned long nr_to_read);
 int force_page_cache_readahead(struct address_space *mapping, struct file *filp,
 			pgoff_t offset, unsigned long nr_to_read);
-unsigned long page_cache_readahead(struct address_space *mapping,
-			  struct file_ra_state *ra,
-			  struct file *filp,
-			  pgoff_t offset,
-			  unsigned long size);
-void handle_ra_miss(struct address_space *mapping, 
-		    struct file_ra_state *ra, pgoff_t offset);
+
+void page_cache_sync_readahead(struct address_space *mapping,
+			       struct file_ra_state *ra,
+			       struct file *filp,
+			       pgoff_t offset,
+			       unsigned long size);
+
+void page_cache_async_readahead(struct address_space *mapping,
+				struct file_ra_state *ra,
+				struct file *filp,
+				struct page *pg,
+				pgoff_t offset,
+				unsigned long size);
+
 unsigned long max_sane_readahead(unsigned long nr);
 
 /* Do stack extension */
@@ -1130,6 +1164,8 @@
 #ifdef CONFIG_IA64
 extern int expand_upwards(struct vm_area_struct *vma, unsigned long address);
 #endif
+extern int expand_stack_downwards(struct vm_area_struct *vma,
+				  unsigned long address);
 
 /* Look up the first VMA which satisfies  addr < vm_end,  NULL if none. */
 extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr);
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 04b1636..da8eb8ad9 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -24,6 +24,14 @@
 #endif
 #define MAX_ORDER_NR_PAGES (1 << (MAX_ORDER - 1))
 
+/*
+ * PAGE_ALLOC_COSTLY_ORDER is the order at which allocations are deemed
+ * costly to service.  That is between allocation orders which should
+ * coelesce naturally under reasonable reclaim pressure and those which
+ * will not.
+ */
+#define PAGE_ALLOC_COSTLY_ORDER 3
+
 struct free_area {
 	struct list_head	free_list;
 	unsigned long		nr_free;
@@ -146,6 +154,7 @@
 	 */
 	ZONE_HIGHMEM,
 #endif
+	ZONE_MOVABLE,
 	MAX_NR_ZONES
 };
 
@@ -167,6 +176,7 @@
 	+ defined(CONFIG_ZONE_DMA32)	\
 	+ 1				\
 	+ defined(CONFIG_HIGHMEM)	\
+	+ 1				\
 )
 #if __ZONE_COUNT < 2
 #define ZONES_SHIFT 0
@@ -499,10 +509,22 @@
 	return (!!zone->present_pages);
 }
 
+extern int movable_zone;
+
+static inline int zone_movable_is_highmem(void)
+{
+#if defined(CONFIG_HIGHMEM) && defined(CONFIG_ARCH_POPULATES_NODE_MAP)
+	return movable_zone == ZONE_HIGHMEM;
+#else
+	return 0;
+#endif
+}
+
 static inline int is_highmem_idx(enum zone_type idx)
 {
 #ifdef CONFIG_HIGHMEM
-	return (idx == ZONE_HIGHMEM);
+	return (idx == ZONE_HIGHMEM ||
+		(idx == ZONE_MOVABLE && zone_movable_is_highmem()));
 #else
 	return 0;
 #endif
@@ -522,7 +544,9 @@
 static inline int is_highmem(struct zone *zone)
 {
 #ifdef CONFIG_HIGHMEM
-	return zone == zone->zone_pgdat->node_zones + ZONE_HIGHMEM;
+	int zone_idx = zone - zone->zone_pgdat->node_zones;
+	return zone_idx == ZONE_HIGHMEM ||
+		(zone_idx == ZONE_MOVABLE && zone_movable_is_highmem());
 #else
 	return 0;
 #endif
diff --git a/include/linux/namei.h b/include/linux/namei.h
index b7dd249..6c38efb 100644
--- a/include/linux/namei.h
+++ b/include/linux/namei.h
@@ -69,8 +69,8 @@
 #define user_path_walk_link(name,nd) \
 	__user_walk_fd(AT_FDCWD, name, 0, nd)
 extern int FASTCALL(path_lookup(const char *, unsigned, struct nameidata *));
-extern int FASTCALL(path_walk(const char *, struct nameidata *));
-extern int FASTCALL(link_path_walk(const char *, struct nameidata *));
+extern int vfs_path_lookup(struct dentry *, struct vfsmount *,
+			   const char *, unsigned int, struct nameidata *);
 extern void path_release(struct nameidata *);
 extern void path_release_on_umount(struct nameidata *);
 
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 322b5ea..9820ca1 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -262,8 +262,6 @@
 	__LINK_STATE_LINKWATCH_PENDING,
 	__LINK_STATE_DORMANT,
 	__LINK_STATE_QDISC_RUNNING,
-	/* Set by the netpoll NAPI code */
-	__LINK_STATE_POLL_LIST_FROZEN,
 };
 
 
@@ -1022,14 +1020,6 @@
 {
 	unsigned long flags;
 
-#ifdef CONFIG_NETPOLL
-	/* Prevent race with netpoll - yes, this is a kludge.
-	 * But at least it doesn't penalize the non-netpoll
-	 * code path. */
-	if (test_bit(__LINK_STATE_POLL_LIST_FROZEN, &dev->state))
-		return;
-#endif
-
 	local_irq_save(flags);
 	__netif_rx_complete(dev);
 	local_irq_restore(flags);
@@ -1108,10 +1098,8 @@
 extern int		dev_mc_add(struct net_device *dev, void *addr, int alen, int newonly);
 extern int		dev_mc_sync(struct net_device *to, struct net_device *from);
 extern void		dev_mc_unsync(struct net_device *to, struct net_device *from);
-extern void		dev_mc_discard(struct net_device *dev);
 extern int 		__dev_addr_delete(struct dev_addr_list **list, int *count, void *addr, int alen, int all);
 extern int		__dev_addr_add(struct dev_addr_list **list, int *count, void *addr, int alen, int newonly);
-extern void		__dev_addr_discard(struct dev_addr_list **list);
 extern void		dev_set_promiscuity(struct net_device *dev, int inc);
 extern void		dev_set_allmulti(struct net_device *dev, int inc);
 extern void		netdev_state_change(struct net_device *dev);
diff --git a/include/linux/netfilter_ipv4/ipt_iprange.h b/include/linux/netfilter_ipv4/ipt_iprange.h
index 34ab0fb..a92fefc 100644
--- a/include/linux/netfilter_ipv4/ipt_iprange.h
+++ b/include/linux/netfilter_ipv4/ipt_iprange.h
@@ -1,6 +1,8 @@
 #ifndef _IPT_IPRANGE_H
 #define _IPT_IPRANGE_H
 
+#include <linux/types.h>
+
 #define IPRANGE_SRC		0x01	/* Match source IP address */
 #define IPRANGE_DST		0x02	/* Match destination IP address */
 #define IPRANGE_SRC_INV		0x10	/* Negate the condition */
diff --git a/include/linux/netlink.h b/include/linux/netlink.h
index 2e23353..83d8239 100644
--- a/include/linux/netlink.h
+++ b/include/linux/netlink.h
@@ -161,6 +161,8 @@
 					  void (*input)(struct sock *sk, int len),
 					  struct mutex *cb_mutex,
 					  struct module *module);
+extern int netlink_change_ngroups(struct sock *sk, unsigned int groups);
+extern void netlink_clear_multicast_users(struct sock *sk, unsigned int group);
 extern void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err);
 extern int netlink_has_listeners(struct sock *sk, unsigned int group);
 extern int netlink_unicast(struct sock *ssk, struct sk_buff *skb, __u32 pid, int nonblock);
diff --git a/include/linux/nfsd/export.h b/include/linux/nfsd/export.h
index 9f62d61..5cd1924 100644
--- a/include/linux/nfsd/export.h
+++ b/include/linux/nfsd/export.h
@@ -42,6 +42,9 @@
 #define	NFSEXP_NOACL		0x8000	/* reserved for possible ACL related use */
 #define NFSEXP_ALLFLAGS		0xFE3F
 
+/* The flags that may vary depending on security flavor: */
+#define NFSEXP_SECINFO_FLAGS	(NFSEXP_READONLY | NFSEXP_ROOTSQUASH \
+					| NFSEXP_ALLSQUASH)
 
 #ifdef __KERNEL__
 
@@ -64,6 +67,19 @@
 	int migrated;
 };
 
+/*
+ * We keep an array of pseudoflavors with the export, in order from most
+ * to least preferred.  For the forseeable future, we don't expect more
+ * than the eight pseudoflavors null, unix, krb5, krb5i, krb5p, skpm3,
+ * spkm3i, and spkm3p (and using all 8 at once should be rare).
+ */
+#define MAX_SECINFO_LIST	8
+
+struct exp_flavor_info {
+	u32	pseudoflavor;
+	u32	flags;
+};
+
 struct svc_export {
 	struct cache_head	h;
 	struct auth_domain *	ex_client;
@@ -76,6 +92,8 @@
 	int			ex_fsid;
 	unsigned char *		ex_uuid; /* 16 byte fsid */
 	struct nfsd4_fs_locations ex_fslocs;
+	int			ex_nflavors;
+	struct exp_flavor_info	ex_flavors[MAX_SECINFO_LIST];
 };
 
 /* an "export key" (expkey) maps a filehandlefragement to an
@@ -95,10 +113,11 @@
 
 #define EX_SECURE(exp)		(!((exp)->ex_flags & NFSEXP_INSECURE_PORT))
 #define EX_ISSYNC(exp)		(!((exp)->ex_flags & NFSEXP_ASYNC))
-#define EX_RDONLY(exp)		((exp)->ex_flags & NFSEXP_READONLY)
 #define EX_NOHIDE(exp)		((exp)->ex_flags & NFSEXP_NOHIDE)
 #define EX_WGATHER(exp)		((exp)->ex_flags & NFSEXP_GATHERED_WRITES)
 
+int nfsexp_flags(struct svc_rqst *rqstp, struct svc_export *exp);
+__be32 check_nfsd_access(struct svc_export *exp, struct svc_rqst *rqstp);
 
 /*
  * Function declarations
@@ -112,13 +131,19 @@
 					struct vfsmount *mnt,
 					struct dentry *dentry,
 					struct cache_req *reqp);
+struct svc_export *	rqst_exp_get_by_name(struct svc_rqst *,
+					     struct vfsmount *,
+					     struct dentry *);
 struct svc_export *	exp_parent(struct auth_domain *clp,
 				   struct vfsmount *mnt,
 				   struct dentry *dentry,
 				   struct cache_req *reqp);
+struct svc_export *	rqst_exp_parent(struct svc_rqst *,
+					struct vfsmount *mnt,
+					struct dentry *dentry);
 int			exp_rootfh(struct auth_domain *, 
 					char *path, struct knfsd_fh *, int maxsize);
-__be32			exp_pseudoroot(struct auth_domain *, struct svc_fh *fhp, struct cache_req *creq);
+__be32			exp_pseudoroot(struct svc_rqst *, struct svc_fh *);
 __be32			nfserrno(int errno);
 
 extern struct cache_detail svc_export_cache;
@@ -135,6 +160,7 @@
 extern struct svc_export *
 exp_find(struct auth_domain *clp, int fsid_type, u32 *fsidv,
 	 struct cache_req *reqp);
+struct svc_export * rqst_exp_find(struct svc_rqst *, int, u32 *);
 
 #endif /* __KERNEL__ */
 
diff --git a/include/linux/nfsd/interface.h b/include/linux/nfsd/interface.h
deleted file mode 100644
index af09797..0000000
--- a/include/linux/nfsd/interface.h
+++ /dev/null
@@ -1,13 +0,0 @@
-/*
- * include/linux/nfsd/interface.h
- *
- * defines interface between nfsd and other bits of
- * the kernel.  Particularly filesystems (eventually).
- *
- * Copyright (C) 2000 Neil Brown <neilb@cse.unsw.edu.au>
- */
-
-#ifndef LINUX_NFSD_INTERFACE_H
-#define LINUX_NFSD_INTERFACE_H
-
-#endif /* LINUX_NFSD_INTERFACE_H */
diff --git a/include/linux/nfsd/nfsd.h b/include/linux/nfsd/nfsd.h
index 72feac5..e452256 100644
--- a/include/linux/nfsd/nfsd.h
+++ b/include/linux/nfsd/nfsd.h
@@ -22,7 +22,6 @@
 #include <linux/nfsd/export.h>
 #include <linux/nfsd/auth.h>
 #include <linux/nfsd/stats.h>
-#include <linux/nfsd/interface.h>
 /*
  * nfsd version
  */
@@ -72,6 +71,9 @@
 		                struct svc_export **expp);
 __be32		nfsd_lookup(struct svc_rqst *, struct svc_fh *,
 				const char *, int, struct svc_fh *);
+__be32		 nfsd_lookup_dentry(struct svc_rqst *, struct svc_fh *,
+				const char *, int,
+				struct svc_export **, struct dentry **);
 __be32		nfsd_setattr(struct svc_rqst *, struct svc_fh *,
 				struct iattr *, int, time_t);
 #ifdef CONFIG_NFSD_V4
@@ -120,7 +122,8 @@
 				struct kstatfs *);
 
 int		nfsd_notify_change(struct inode *, struct iattr *);
-__be32		nfsd_permission(struct svc_export *, struct dentry *, int);
+__be32		nfsd_permission(struct svc_rqst *, struct svc_export *,
+				struct dentry *, int);
 int		nfsd_sync_dir(struct dentry *dp);
 
 #if defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL)
@@ -149,6 +152,7 @@
  * NFSv4 State
  */
 #ifdef CONFIG_NFSD_V4
+extern unsigned int max_delegations;
 void nfs4_state_init(void);
 int nfs4_state_start(void);
 void nfs4_state_shutdown(void);
@@ -236,6 +240,7 @@
 #define	nfserr_badname		__constant_htonl(NFSERR_BADNAME)
 #define	nfserr_cb_path_down	__constant_htonl(NFSERR_CB_PATH_DOWN)
 #define	nfserr_locked		__constant_htonl(NFSERR_LOCKED)
+#define	nfserr_wrongsec		__constant_htonl(NFSERR_WRONGSEC)
 #define	nfserr_replay_me	__constant_htonl(NFSERR_REPLAY_ME)
 
 /* error codes for internal use */
diff --git a/include/linux/nfsd/state.h b/include/linux/nfsd/state.h
index ab5c236..db348f7 100644
--- a/include/linux/nfsd/state.h
+++ b/include/linux/nfsd/state.h
@@ -67,7 +67,7 @@
 	int			cbr_trunc;
 	stateid_t		cbr_stateid;
 	u32			cbr_fhlen;
-	u32			cbr_fhval[NFS4_FHSIZE];
+	char			cbr_fhval[NFS4_FHSIZE];
 	struct nfs4_delegation	*cbr_dp;
 };
 
@@ -224,6 +224,7 @@
 	struct inode		*fi_inode;
 	u32                     fi_id;      /* used with stateowner->so_id 
 					     * for stateid_hashtbl hash */
+	bool			fi_had_conflict;
 };
 
 /*
diff --git a/include/linux/nfsd/xdr4.h b/include/linux/nfsd/xdr4.h
index 09799bc..1b65326 100644
--- a/include/linux/nfsd/xdr4.h
+++ b/include/linux/nfsd/xdr4.h
@@ -293,6 +293,12 @@
 	struct nfsd4_change_info  rn_tinfo; /* response */
 };
 
+struct nfsd4_secinfo {
+	u32 si_namelen;					/* request */
+	char *si_name;					/* request */
+	struct svc_export *si_exp;			/* response */
+};
+
 struct nfsd4_setattr {
 	stateid_t	sa_stateid;         /* request */
 	u32		sa_bmval[2];        /* request */
@@ -365,6 +371,7 @@
 		struct nfsd4_remove		remove;
 		struct nfsd4_rename		rename;
 		clientid_t			renew;
+		struct nfsd4_secinfo		secinfo;
 		struct nfsd4_setattr		setattr;
 		struct nfsd4_setclientid	setclientid;
 		struct nfsd4_setclientid_confirm setclientid_confirm;
diff --git a/include/linux/notifier.h b/include/linux/notifier.h
index 9431101..be3f2bb 100644
--- a/include/linux/notifier.h
+++ b/include/linux/notifier.h
@@ -196,6 +196,8 @@
 #define CPU_DEAD		0x0007 /* CPU (unsigned)v dead */
 #define CPU_LOCK_ACQUIRE	0x0008 /* Acquire all hotcpu locks */
 #define CPU_LOCK_RELEASE	0x0009 /* Release all hotcpu locks */
+#define CPU_DYING		0x000A /* CPU (unsigned)v not running any task,
+				        * not handling interrupts, soon dead */
 
 /* Used for CPU hotplug events occuring while tasks are frozen due to a suspend
  * operation in progress
@@ -208,6 +210,13 @@
 #define CPU_DOWN_PREPARE_FROZEN	(CPU_DOWN_PREPARE | CPU_TASKS_FROZEN)
 #define CPU_DOWN_FAILED_FROZEN	(CPU_DOWN_FAILED | CPU_TASKS_FROZEN)
 #define CPU_DEAD_FROZEN		(CPU_DEAD | CPU_TASKS_FROZEN)
+#define CPU_DYING_FROZEN	(CPU_DYING | CPU_TASKS_FROZEN)
+
+/* Hibernation and suspend events */
+#define PM_HIBERNATION_PREPARE	0x0001 /* Going to hibernate */
+#define PM_POST_HIBERNATION	0x0002 /* Hibernation finished */
+#define PM_SUSPEND_PREPARE	0x0003 /* Going to suspend the system */
+#define PM_POST_SUSPEND		0x0004 /* Suspend finished */
 
 #endif /* __KERNEL__ */
 #endif /* _LINUX_NOTIFIER_H */
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index ae2d79f..209d3a4 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -90,8 +90,12 @@
 #define PG_reclaim		17	/* To be reclaimed asap */
 #define PG_buddy		19	/* Page is free, on buddy lists */
 
+/* PG_readahead is only used for file reads; PG_reclaim is only for writes */
+#define PG_readahead		PG_reclaim /* Reminder to do async read-ahead */
+
 /* PG_owner_priv_1 users should have descriptive aliases */
 #define PG_checked		PG_owner_priv_1 /* Used by some filesystems */
+#define PG_pinned		PG_owner_priv_1	/* Xen pinned pagetable */
 
 #if (BITS_PER_LONG > 32)
 /*
@@ -170,6 +174,10 @@
 #define SetPageChecked(page)	set_bit(PG_checked, &(page)->flags)
 #define ClearPageChecked(page)	clear_bit(PG_checked, &(page)->flags)
 
+#define PagePinned(page)	test_bit(PG_pinned, &(page)->flags)
+#define SetPagePinned(page)	set_bit(PG_pinned, &(page)->flags)
+#define ClearPagePinned(page)	clear_bit(PG_pinned, &(page)->flags)
+
 #define PageReserved(page)	test_bit(PG_reserved, &(page)->flags)
 #define SetPageReserved(page)	set_bit(PG_reserved, &(page)->flags)
 #define ClearPageReserved(page)	clear_bit(PG_reserved, &(page)->flags)
@@ -181,37 +189,15 @@
 #define __SetPagePrivate(page)  __set_bit(PG_private, &(page)->flags)
 #define __ClearPagePrivate(page) __clear_bit(PG_private, &(page)->flags)
 
+/*
+ * Only test-and-set exist for PG_writeback.  The unconditional operators are
+ * risky: they bypass page accounting.
+ */
 #define PageWriteback(page)	test_bit(PG_writeback, &(page)->flags)
-#define SetPageWriteback(page)						\
-	do {								\
-		if (!test_and_set_bit(PG_writeback,			\
-				&(page)->flags))			\
-			inc_zone_page_state(page, NR_WRITEBACK);	\
-	} while (0)
-#define TestSetPageWriteback(page)					\
-	({								\
-		int ret;						\
-		ret = test_and_set_bit(PG_writeback,			\
-					&(page)->flags);		\
-		if (!ret)						\
-			inc_zone_page_state(page, NR_WRITEBACK);	\
-		ret;							\
-	})
-#define ClearPageWriteback(page)					\
-	do {								\
-		if (test_and_clear_bit(PG_writeback,			\
-				&(page)->flags))			\
-			dec_zone_page_state(page, NR_WRITEBACK);	\
-	} while (0)
-#define TestClearPageWriteback(page)					\
-	({								\
-		int ret;						\
-		ret = test_and_clear_bit(PG_writeback,			\
-				&(page)->flags);			\
-		if (ret)						\
-			dec_zone_page_state(page, NR_WRITEBACK);	\
-		ret;							\
-	})
+#define TestSetPageWriteback(page) test_and_set_bit(PG_writeback,	\
+							&(page)->flags)
+#define TestClearPageWriteback(page) test_and_clear_bit(PG_writeback,	\
+							&(page)->flags)
 
 #define PageBuddy(page)		test_bit(PG_buddy, &(page)->flags)
 #define __SetPageBuddy(page)	__set_bit(PG_buddy, &(page)->flags)
@@ -221,6 +207,10 @@
 #define SetPageMappedToDisk(page) set_bit(PG_mappedtodisk, &(page)->flags)
 #define ClearPageMappedToDisk(page) clear_bit(PG_mappedtodisk, &(page)->flags)
 
+#define PageReadahead(page)	test_bit(PG_readahead, &(page)->flags)
+#define SetPageReadahead(page)	set_bit(PG_readahead, &(page)->flags)
+#define ClearPageReadahead(page) clear_bit(PG_readahead, &(page)->flags)
+
 #define PageReclaim(page)	test_bit(PG_reclaim, &(page)->flags)
 #define SetPageReclaim(page)	set_bit(PG_reclaim, &(page)->flags)
 #define ClearPageReclaim(page)	clear_bit(PG_reclaim, &(page)->flags)
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index 2c7add1..b15c649 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -495,6 +495,8 @@
 
 #define PCI_VENDOR_ID_AMD		0x1022
 #define PCI_DEVICE_ID_AMD_K8_NB		0x1100
+#define PCI_DEVICE_ID_AMD_K8_NB_ADDRMAP	0x1101
+#define PCI_DEVICE_ID_AMD_K8_NB_MEMCTL	0x1102
 #define PCI_DEVICE_ID_AMD_K8_NB_MISC	0x1103
 #define PCI_DEVICE_ID_AMD_LANCE		0x2000
 #define PCI_DEVICE_ID_AMD_LANCE_HOME	0x2001
@@ -2209,6 +2211,7 @@
 #define PCI_DEVICE_ID_INTEL_82915GM_IG	0x2592
 #define PCI_DEVICE_ID_INTEL_82945G_HB	0x2770
 #define PCI_DEVICE_ID_INTEL_82945G_IG	0x2772
+#define PCI_DEVICE_ID_INTEL_3000_HB	0x2778
 #define PCI_DEVICE_ID_INTEL_82945GM_HB	0x27A0
 #define PCI_DEVICE_ID_INTEL_82945GM_IG	0x27A2
 #define PCI_DEVICE_ID_INTEL_ICH6_0	0x2640
diff --git a/include/linux/pm.h b/include/linux/pm.h
index 273781c..ad3cc2e 100644
--- a/include/linux/pm.h
+++ b/include/linux/pm.h
@@ -101,6 +101,7 @@
  */
 extern void (*pm_idle)(void);
 extern void (*pm_power_off)(void);
+extern void (*pm_power_off_prepare)(void);
 
 typedef int __bitwise suspend_state_t;
 
@@ -284,8 +285,6 @@
 #define device_may_wakeup(dev) \
 	(device_can_wakeup(dev) && (dev)->power.should_wakeup)
 
-extern int dpm_runtime_suspend(struct device *, pm_message_t);
-extern void dpm_runtime_resume(struct device *);
 extern void __suspend_report_result(const char *function, void *fn, int ret);
 
 #define suspend_report_result(fn, ret)					\
@@ -317,15 +316,6 @@
 #define device_set_wakeup_enable(dev,val)	do{}while(0)
 #define device_may_wakeup(dev)			(0)
 
-static inline int dpm_runtime_suspend(struct device * dev, pm_message_t state)
-{
-	return 0;
-}
-
-static inline void dpm_runtime_resume(struct device * dev)
-{
-}
-
 #define suspend_report_result(fn, ret) do { } while (0)
 
 static inline int call_platform_enable_wakeup(struct device *dev, int is_on)
diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h
index eeb1976..ae8146a 100644
--- a/include/linux/ptrace.h
+++ b/include/linux/ptrace.h
@@ -110,6 +110,8 @@
 		__ptrace_unlink(child);
 }
 
+int generic_ptrace_peekdata(struct task_struct *tsk, long addr, long data);
+int generic_ptrace_pokedata(struct task_struct *tsk, long addr, long data);
 
 #ifndef force_successful_syscall_return
 /*
diff --git a/include/linux/raid/bitmap.h b/include/linux/raid/bitmap.h
index dd5a05d..75e17a0 100644
--- a/include/linux/raid/bitmap.h
+++ b/include/linux/raid/bitmap.h
@@ -262,7 +262,7 @@
 
 char *file_path(struct file *file, char *buf, int count);
 void bitmap_print_sb(struct bitmap *bitmap);
-int bitmap_update_sb(struct bitmap *bitmap);
+void bitmap_update_sb(struct bitmap *bitmap);
 
 int  bitmap_setallbits(struct bitmap *bitmap);
 void bitmap_write_all(struct bitmap *bitmap);
@@ -278,8 +278,8 @@
 void bitmap_end_sync(struct bitmap *bitmap, sector_t offset, int *blocks, int aborted);
 void bitmap_close_sync(struct bitmap *bitmap);
 
-int bitmap_unplug(struct bitmap *bitmap);
-int bitmap_daemon_work(struct bitmap *bitmap);
+void bitmap_unplug(struct bitmap *bitmap);
+void bitmap_daemon_work(struct bitmap *bitmap);
 #endif
 
 #endif
diff --git a/include/linux/raid/md_k.h b/include/linux/raid/md_k.h
index de72c49..28ac632 100644
--- a/include/linux/raid/md_k.h
+++ b/include/linux/raid/md_k.h
@@ -51,7 +51,7 @@
 
 	sector_t size;			/* Device size (in blocks) */
 	mddev_t *mddev;			/* RAID array if running */
-	unsigned long last_events;	/* IO event timestamp */
+	long last_events;		/* IO event timestamp */
 
 	struct block_device *bdev;	/* block device handle */
 
diff --git a/include/linux/reboot.h b/include/linux/reboot.h
index 1dd1c70..85ea63f 100644
--- a/include/linux/reboot.h
+++ b/include/linux/reboot.h
@@ -67,6 +67,11 @@
 
 void ctrl_alt_del(void);
 
+#define POWEROFF_CMD_PATH_LEN	256
+extern char poweroff_cmd[POWEROFF_CMD_PATH_LEN];
+
+extern int orderly_poweroff(bool force);
+
 /*
  * Emergency restart, callable from an interrupt handler.
  */
diff --git a/include/linux/rtc/m48t59.h b/include/linux/rtc/m48t59.h
new file mode 100644
index 0000000..e8c7c21
--- /dev/null
+++ b/include/linux/rtc/m48t59.h
@@ -0,0 +1,57 @@
+/*
+ * include/linux/rtc/m48t59.h
+ *
+ * Definitions for the platform data of m48t59 RTC chip driver.
+ *
+ * Copyright (c) 2007 Wind River Systems, Inc.
+ *
+ * Mark Zhan <rongkai.zhan@windriver.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _LINUX_RTC_M48T59_H_
+#define _LINUX_RTC_M48T59_H_
+
+/*
+ * M48T59 Register Offset
+ */
+#define M48T59_YEAR		0x1fff
+#define M48T59_MONTH		0x1ffe
+#define M48T59_MDAY		0x1ffd	/* Day of Month */
+#define M48T59_WDAY		0x1ffc	/* Day of Week */
+#define M48T59_WDAY_CB			0x20	/* Century Bit */
+#define M48T59_WDAY_CEB			0x10	/* Century Enable Bit */
+#define M48T59_HOUR		0x1ffb
+#define M48T59_MIN		0x1ffa
+#define M48T59_SEC		0x1ff9
+#define M48T59_CNTL		0x1ff8
+#define M48T59_CNTL_READ		0x40
+#define M48T59_CNTL_WRITE		0x80
+#define M48T59_WATCHDOG		0x1ff7
+#define M48T59_INTR		0x1ff6
+#define M48T59_INTR_AFE			0x80	/* Alarm Interrupt Enable */
+#define M48T59_INTR_ABE			0x20
+#define M48T59_ALARM_DATE	0x1ff5
+#define M48T59_ALARM_HOUR	0x1ff4
+#define M48T59_ALARM_MIN	0x1ff3
+#define M48T59_ALARM_SEC	0x1ff2
+#define M48T59_UNUSED		0x1ff1
+#define M48T59_FLAGS		0x1ff0
+#define M48T59_FLAGS_WDT		0x80	/* watchdog timer expired */
+#define M48T59_FLAGS_AF			0x40	/* alarm */
+#define M48T59_FLAGS_BF			0x10	/* low battery */
+
+#define M48T59_NVRAM_SIZE	0x1ff0
+
+struct m48t59_plat_data {
+	/* The method to access M48T59 registers,
+	 * NOTE: The 'ofs' should be 0x00~0x1fff
+	 */
+	void (*write_byte)(struct device *dev, u32 ofs, u8 val);
+	unsigned char (*read_byte)(struct device *dev, u32 ofs);
+};
+
+#endif /* _LINUX_RTC_M48T59_H_ */
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 731edaca..33b9b48 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -345,6 +345,27 @@
 		(mm)->hiwater_vm = (mm)->total_vm;	\
 } while (0)
 
+extern void set_dumpable(struct mm_struct *mm, int value);
+extern int get_dumpable(struct mm_struct *mm);
+
+/* mm flags */
+/* dumpable bits */
+#define MMF_DUMPABLE      0  /* core dump is permitted */
+#define MMF_DUMP_SECURELY 1  /* core file is readable only by root */
+#define MMF_DUMPABLE_BITS 2
+
+/* coredump filter bits */
+#define MMF_DUMP_ANON_PRIVATE	2
+#define MMF_DUMP_ANON_SHARED	3
+#define MMF_DUMP_MAPPED_PRIVATE	4
+#define MMF_DUMP_MAPPED_SHARED	5
+#define MMF_DUMP_FILTER_SHIFT	MMF_DUMPABLE_BITS
+#define MMF_DUMP_FILTER_BITS	4
+#define MMF_DUMP_FILTER_MASK \
+	(((1 << MMF_DUMP_FILTER_BITS) - 1) << MMF_DUMP_FILTER_SHIFT)
+#define MMF_DUMP_FILTER_DEFAULT \
+	((1 << MMF_DUMP_ANON_PRIVATE) |	(1 << MMF_DUMP_ANON_SHARED))
+
 struct mm_struct {
 	struct vm_area_struct * mmap;		/* list of VMAs */
 	struct rb_root mm_rb;
@@ -402,7 +423,7 @@
 	unsigned int token_priority;
 	unsigned int last_interval;
 
-	unsigned char dumpable:2;
+	unsigned long flags; /* Must use atomic bitops to access the bits */
 
 	/* coredumping support */
 	int core_waiters;
@@ -1327,6 +1348,13 @@
 #endif
 
 extern unsigned long long sched_clock(void);
+
+/*
+ * For kernel-internal use: high-speed (but slightly incorrect) per-cpu
+ * clock constructed from sched_clock():
+ */
+extern unsigned long long cpu_clock(int cpu);
+
 extern unsigned long long
 task_sched_runtime(struct task_struct *task);
 
diff --git a/include/linux/serial_8250.h b/include/linux/serial_8250.h
index 706ee9a..8518fa2 100644
--- a/include/linux/serial_8250.h
+++ b/include/linux/serial_8250.h
@@ -60,6 +60,8 @@
 void serial8250_suspend_port(int line);
 void serial8250_resume_port(int line);
 
+extern int early_serial_setup(struct uart_port *port);
+
 extern int serial8250_find_port(struct uart_port *p);
 extern int serial8250_find_port_for_earlycon(void);
 extern int setup_early_serial8250_console(char *cmdline);
diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h
index 7f2c99d..773d8d8 100644
--- a/include/linux/serial_core.h
+++ b/include/linux/serial_core.h
@@ -62,8 +62,9 @@
 /* NEC v850.  */
 #define PORT_V850E_UART	40
 
-/* DZ */
-#define PORT_DZ		47
+/* DEC */
+#define PORT_DZ		46
+#define PORT_ZS		47
 
 /* Parisc type numbers. */
 #define PORT_MUX	48
@@ -142,6 +143,9 @@
 /* Micrel KS8695 */
 #define PORT_KS8695	76
 
+/* Broadcom SB1250, etc. SOC */
+#define PORT_SB1250_DUART	77
+
 
 #ifdef __KERNEL__
 
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 27402fe..0e1d0da 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -31,6 +31,19 @@
 #define SLAB_TRACE		0x00200000UL	/* Trace allocations and frees */
 
 /*
+ * ZERO_SIZE_PTR will be returned for zero sized kmalloc requests.
+ *
+ * Dereferencing ZERO_SIZE_PTR will lead to a distinct access fault.
+ *
+ * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
+ * Both make kfree a no-op.
+ */
+#define ZERO_SIZE_PTR ((void *)16)
+
+#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) < \
+				(unsigned long)ZERO_SIZE_PTR)
+
+/*
  * struct kmem_cache related prototypes
  */
 void __init kmem_cache_init(void);
@@ -42,7 +55,6 @@
 			void (*)(void *, struct kmem_cache *, unsigned long));
 void kmem_cache_destroy(struct kmem_cache *);
 int kmem_cache_shrink(struct kmem_cache *);
-void *kmem_cache_zalloc(struct kmem_cache *, gfp_t);
 void kmem_cache_free(struct kmem_cache *, void *);
 unsigned int kmem_cache_size(struct kmem_cache *);
 const char *kmem_cache_name(struct kmem_cache *);
@@ -78,11 +90,37 @@
 /*
  * Common kmalloc functions provided by all allocators
  */
-void *__kzalloc(size_t, gfp_t);
 void * __must_check krealloc(const void *, size_t, gfp_t);
 void kfree(const void *);
 size_t ksize(const void *);
 
+/*
+ * Allocator specific definitions. These are mainly used to establish optimized
+ * ways to convert kmalloc() calls to kmem_cache_alloc() invocations by
+ * selecting the appropriate general cache at compile time.
+ *
+ * Allocators must define at least:
+ *
+ *	kmem_cache_alloc()
+ *	__kmalloc()
+ *	kmalloc()
+ *
+ * Those wishing to support NUMA must also define:
+ *
+ *	kmem_cache_alloc_node()
+ *	kmalloc_node()
+ *
+ * See each allocator definition file for additional comments and
+ * implementation notes.
+ */
+#ifdef CONFIG_SLUB
+#include <linux/slub_def.h>
+#elif defined(CONFIG_SLOB)
+#include <linux/slob_def.h>
+#else
+#include <linux/slab_def.h>
+#endif
+
 /**
  * kcalloc - allocate memory for an array. The memory is set to zero.
  * @n: number of elements.
@@ -138,37 +176,9 @@
 {
 	if (n != 0 && size > ULONG_MAX / n)
 		return NULL;
-	return __kzalloc(n * size, flags);
+	return __kmalloc(n * size, flags | __GFP_ZERO);
 }
 
-/*
- * Allocator specific definitions. These are mainly used to establish optimized
- * ways to convert kmalloc() calls to kmem_cache_alloc() invocations by
- * selecting the appropriate general cache at compile time.
- *
- * Allocators must define at least:
- *
- *	kmem_cache_alloc()
- *	__kmalloc()
- *	kmalloc()
- *	kzalloc()
- *
- * Those wishing to support NUMA must also define:
- *
- *	kmem_cache_alloc_node()
- *	kmalloc_node()
- *
- * See each allocator definition file for additional comments and
- * implementation notes.
- */
-#ifdef CONFIG_SLUB
-#include <linux/slub_def.h>
-#elif defined(CONFIG_SLOB)
-#include <linux/slob_def.h>
-#else
-#include <linux/slab_def.h>
-#endif
-
 #if !defined(CONFIG_NUMA) && !defined(CONFIG_SLOB)
 /**
  * kmalloc_node - allocate memory from a specific node
@@ -242,5 +252,23 @@
 
 #endif /* DEBUG_SLAB */
 
+/*
+ * Shortcuts
+ */
+static inline void *kmem_cache_zalloc(struct kmem_cache *k, gfp_t flags)
+{
+	return kmem_cache_alloc(k, flags | __GFP_ZERO);
+}
+
+/**
+ * kzalloc - allocate memory. The memory is set to zero.
+ * @size: how many bytes of memory are required.
+ * @flags: the type of memory to allocate (see kmalloc).
+ */
+static inline void *kzalloc(size_t size, gfp_t flags)
+{
+	return kmalloc(size, flags | __GFP_ZERO);
+}
+
 #endif	/* __KERNEL__ */
 #endif	/* _LINUX_SLAB_H */
diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
index 365d036..32bdc2f 100644
--- a/include/linux/slab_def.h
+++ b/include/linux/slab_def.h
@@ -32,6 +32,10 @@
 {
 	if (__builtin_constant_p(size)) {
 		int i = 0;
+
+		if (!size)
+			return ZERO_SIZE_PTR;
+
 #define CACHE(x) \
 		if (size <= x) \
 			goto found; \
@@ -54,32 +58,6 @@
 	return __kmalloc(size, flags);
 }
 
-static inline void *kzalloc(size_t size, gfp_t flags)
-{
-	if (__builtin_constant_p(size)) {
-		int i = 0;
-#define CACHE(x) \
-		if (size <= x) \
-			goto found; \
-		else \
-			i++;
-#include "kmalloc_sizes.h"
-#undef CACHE
-		{
-			extern void __you_cannot_kzalloc_that_much(void);
-			__you_cannot_kzalloc_that_much();
-		}
-found:
-#ifdef CONFIG_ZONE_DMA
-		if (flags & GFP_DMA)
-			return kmem_cache_zalloc(malloc_sizes[i].cs_dmacachep,
-						flags);
-#endif
-		return kmem_cache_zalloc(malloc_sizes[i].cs_cachep, flags);
-	}
-	return __kzalloc(size, flags);
-}
-
 #ifdef CONFIG_NUMA
 extern void *__kmalloc_node(size_t size, gfp_t flags, int node);
 extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
@@ -88,6 +66,10 @@
 {
 	if (__builtin_constant_p(size)) {
 		int i = 0;
+
+		if (!size)
+			return ZERO_SIZE_PTR;
+
 #define CACHE(x) \
 		if (size <= x) \
 			goto found; \
diff --git a/include/linux/slob_def.h b/include/linux/slob_def.h
index a2daf2d..59a3fa4 100644
--- a/include/linux/slob_def.h
+++ b/include/linux/slob_def.h
@@ -33,14 +33,4 @@
 	return kmalloc(size, flags);
 }
 
-/**
- * kzalloc - allocate memory. The memory is set to zero.
- * @size: how many bytes of memory are required.
- * @flags: the type of memory to allocate (see kcalloc).
- */
-static inline void *kzalloc(size_t size, gfp_t flags)
-{
-	return __kzalloc(size, flags);
-}
-
 #endif /* __LINUX_SLOB_DEF_H */
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
index a582f67..07f7e4c 100644
--- a/include/linux/slub_def.h
+++ b/include/linux/slub_def.h
@@ -16,7 +16,9 @@
 	unsigned long nr_partial;
 	atomic_long_t nr_slabs;
 	struct list_head partial;
+#ifdef CONFIG_SLUB_DEBUG
 	struct list_head full;
+#endif
 };
 
 /*
@@ -44,7 +46,9 @@
 	int align;		/* Alignment */
 	const char *name;	/* Name (only for display!) */
 	struct list_head list;	/* List of slab caches */
+#ifdef CONFIG_SLUB_DEBUG
 	struct kobject kobj;	/* For sysfs */
+#endif
 
 #ifdef CONFIG_NUMA
 	int defrag_ratio;
@@ -159,18 +163,6 @@
 #define SLUB_DMA 0
 #endif
 
-
-/*
- * ZERO_SIZE_PTR will be returned for zero sized kmalloc requests.
- *
- * Dereferencing ZERO_SIZE_PTR will lead to a distinct access fault.
- *
- * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
- * Both make kfree a no-op.
- */
-#define ZERO_SIZE_PTR ((void *)16)
-
-
 void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
 void *__kmalloc(size_t size, gfp_t flags);
 
@@ -187,19 +179,6 @@
 		return __kmalloc(size, flags);
 }
 
-static inline void *kzalloc(size_t size, gfp_t flags)
-{
-	if (__builtin_constant_p(size) && !(flags & SLUB_DMA)) {
-		struct kmem_cache *s = kmalloc_slab(size);
-
-		if (!s)
-			return ZERO_SIZE_PTR;
-
-		return kmem_cache_zalloc(s, flags);
-	} else
-		return __kzalloc(size, flags);
-}
-
 #ifdef CONFIG_NUMA
 void *__kmalloc_node(size_t size, gfp_t flags, int node);
 void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
diff --git a/include/linux/smp.h b/include/linux/smp.h
index 96ac21f..259a13c 100644
--- a/include/linux/smp.h
+++ b/include/linux/smp.h
@@ -99,11 +99,14 @@
 static inline void smp_send_reschedule(int cpu) { }
 #define num_booting_cpus()			1
 #define smp_prepare_boot_cpu()			do {} while (0)
-static inline int smp_call_function_single(int cpuid, void (*func) (void *info),
-					   void *info, int retry, int wait)
-{
-	return -EBUSY;
-}
+#define smp_call_function_single(cpuid, func, info, retry, wait) \
+({ \
+	WARN_ON(cpuid != 0);	\
+	local_irq_disable();	\
+	(func)(info);		\
+	local_irq_enable();	\
+	0;			\
+})
 
 #endif /* !SMP */
 
diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h
index 1be5ea0..302b81d 100644
--- a/include/linux/spi/spi.h
+++ b/include/linux/spi/spi.h
@@ -76,6 +76,7 @@
 #define	SPI_MODE_3	(SPI_CPOL|SPI_CPHA)
 #define	SPI_CS_HIGH	0x04			/* chipselect active high? */
 #define	SPI_LSB_FIRST	0x08			/* per-word bits-on-wire */
+#define	SPI_3WIRE	0x10			/* SI/SO signals shared */
 	u8			bits_per_word;
 	int			irq;
 	void			*controller_state;
diff --git a/include/linux/spi/spi_bitbang.h b/include/linux/spi/spi_bitbang.h
index 9dbca62..b8db32c 100644
--- a/include/linux/spi/spi_bitbang.h
+++ b/include/linux/spi/spi_bitbang.h
@@ -26,6 +26,7 @@
 	struct list_head	queue;
 	u8			busy;
 	u8			use_dma;
+	u8			flags;		/* extra spi->mode support */
 
 	struct spi_master	*master;
 
diff --git a/include/linux/spi/tle62x0.h b/include/linux/spi/tle62x0.h
new file mode 100644
index 0000000..60b5918
--- /dev/null
+++ b/include/linux/spi/tle62x0.h
@@ -0,0 +1,24 @@
+/*
+ * tle62x0.h - platform glue to Infineon TLE62x0 driver chips
+ *
+ * Copyright 2007 Simtec Electronics
+ *	Ben Dooks <ben@simtec.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+*/
+
+struct tle62x0_pdata {
+	unsigned int		init_state;
+	unsigned int		gpio_count;
+};
diff --git a/include/linux/spinlock_types.h b/include/linux/spinlock_types.h
index 210549b..f6a3a95 100644
--- a/include/linux/spinlock_types.h
+++ b/include/linux/spinlock_types.h
@@ -9,14 +9,14 @@
  * Released under the General Public License (GPL).
  */
 
-#include <linux/lockdep.h>
-
 #if defined(CONFIG_SMP)
 # include <asm/spinlock_types.h>
 #else
 # include <linux/spinlock_types_up.h>
 #endif
 
+#include <linux/lockdep.h>
+
 typedef struct {
 	raw_spinlock_t raw_lock;
 #if defined(CONFIG_PREEMPT) && defined(CONFIG_SMP)
diff --git a/include/linux/spinlock_types_up.h b/include/linux/spinlock_types_up.h
index 27644af..04135b0 100644
--- a/include/linux/spinlock_types_up.h
+++ b/include/linux/spinlock_types_up.h
@@ -12,14 +12,10 @@
  * Released under the General Public License (GPL).
  */
 
-#if defined(CONFIG_DEBUG_SPINLOCK) || \
-	defined(CONFIG_DEBUG_LOCK_ALLOC)
+#ifdef CONFIG_DEBUG_SPINLOCK
 
 typedef struct {
 	volatile unsigned int slock;
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
-	struct lockdep_map dep_map;
-#endif
 } raw_spinlock_t;
 
 #define __RAW_SPIN_LOCK_UNLOCKED { 1 }
@@ -34,9 +30,6 @@
 
 typedef struct {
 	/* no debug version on UP */
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
-	struct lockdep_map dep_map;
-#endif
 } raw_rwlock_t;
 
 #define __RAW_RW_LOCK_UNLOCKED { }
diff --git a/include/linux/stacktrace.h b/include/linux/stacktrace.h
index 1d2b084..e7fa657 100644
--- a/include/linux/stacktrace.h
+++ b/include/linux/stacktrace.h
@@ -13,7 +13,7 @@
 extern void print_stack_trace(struct stack_trace *trace, int spaces);
 #else
 # define save_stack_trace(trace)			do { } while (0)
-# define print_stack_trace(trace)			do { } while (0)
+# define print_stack_trace(trace, spaces)		do { } while (0)
 #endif
 
 #endif
diff --git a/include/linux/string.h b/include/linux/string.h
index 7f2eb6a..836062b 100644
--- a/include/linux/string.h
+++ b/include/linux/string.h
@@ -105,8 +105,12 @@
 #endif
 
 extern char *kstrdup(const char *s, gfp_t gfp);
+extern char *kstrndup(const char *s, size_t len, gfp_t gfp);
 extern void *kmemdup(const void *src, size_t len, gfp_t gfp);
 
+extern char **argv_split(gfp_t gfp, const char *str, int *argcp);
+extern void argv_free(char **argv);
+
 #ifdef __cplusplus
 }
 #endif
diff --git a/include/linux/sunrpc/gss_api.h b/include/linux/sunrpc/gss_api.h
index bbac101..459c5fc 100644
--- a/include/linux/sunrpc/gss_api.h
+++ b/include/linux/sunrpc/gss_api.h
@@ -58,6 +58,7 @@
 u32 gss_delete_sec_context(
 		struct gss_ctx		**ctx_id);
 
+u32 gss_svc_to_pseudoflavor(struct gss_api_mech *, u32 service);
 u32 gss_pseudoflavor_to_service(struct gss_api_mech *, u32 pseudoflavor);
 char *gss_service_to_auth_domain_name(struct gss_api_mech *, u32 service);
 
diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
index 129d50f..8531a70 100644
--- a/include/linux/sunrpc/svc.h
+++ b/include/linux/sunrpc/svc.h
@@ -212,6 +212,7 @@
 	struct svc_pool *	rq_pool;	/* thread pool */
 	struct svc_procedure *	rq_procinfo;	/* procedure info */
 	struct auth_ops *	rq_authop;	/* authentication flavour */
+	u32			rq_flavor;	/* pseudoflavor */
 	struct svc_cred		rq_cred;	/* auth info */
 	struct sk_buff *	rq_skbuff;	/* fast recv inet buffer */
 	struct svc_deferred_req*rq_deferred;	/* deferred request we are replaying */
@@ -248,6 +249,7 @@
 						 */
 	/* Catering to nfsd */
 	struct auth_domain *	rq_client;	/* RPC peer info */
+	struct auth_domain *	rq_gssclient;	/* "gss/"-style peer info */
 	struct svc_cacherep *	rq_cacherep;	/* cache info */
 	struct knfsd_fh *	rq_reffh;	/* Referrence filehandle, used to
 						 * determine what device number
diff --git a/include/linux/sunrpc/svcauth.h b/include/linux/sunrpc/svcauth.h
index de92619..22e1ef8 100644
--- a/include/linux/sunrpc/svcauth.h
+++ b/include/linux/sunrpc/svcauth.h
@@ -127,6 +127,7 @@
 extern int auth_unix_forget_old(struct auth_domain *dom);
 extern void svcauth_unix_purge(void);
 extern void svcauth_unix_info_release(void *);
+extern int svcauth_unix_set_client(struct svc_rqst *rqstp);
 
 static inline unsigned long hash_str(char *name, int bits)
 {
diff --git a/include/linux/sunrpc/svcauth_gss.h b/include/linux/sunrpc/svcauth_gss.h
index 5a5db16..417a1de 100644
--- a/include/linux/sunrpc/svcauth_gss.h
+++ b/include/linux/sunrpc/svcauth_gss.h
@@ -22,6 +22,7 @@
 int gss_svc_init(void);
 void gss_svc_shutdown(void);
 int svcauth_gss_register_pseudoflavor(u32 pseudoflavor, char * name);
+u32 svcauth_gss_flavor(struct auth_domain *dom);
 
 #endif /* __KERNEL__ */
 #endif /* _LINUX_SUNRPC_SVCAUTH_GSS_H */
diff --git a/include/linux/suspend.h b/include/linux/suspend.h
index 9c7cb64..e8e6da3 100644
--- a/include/linux/suspend.h
+++ b/include/linux/suspend.h
@@ -43,14 +43,19 @@
  * @prepare: prepare system for hibernation
  * @enter: shut down system after state has been saved to disk
  * @finish: finish/clean up after state has been reloaded
+ * @pre_restore: prepare system for the restoration from a hibernation image
+ * @restore_cleanup: clean up after a failing image restoration
  */
 struct hibernation_ops {
 	int (*prepare)(void);
 	int (*enter)(void);
 	void (*finish)(void);
+	int (*pre_restore)(void);
+	void (*restore_cleanup)(void);
 };
 
-#if defined(CONFIG_PM) && defined(CONFIG_SOFTWARE_SUSPEND)
+#ifdef CONFIG_PM
+#ifdef CONFIG_SOFTWARE_SUSPEND
 /* kernel/power/snapshot.c */
 extern void __register_nosave_region(unsigned long b, unsigned long e, int km);
 static inline void register_nosave_region(unsigned long b, unsigned long e)
@@ -68,16 +73,14 @@
 
 extern void hibernation_set_ops(struct hibernation_ops *ops);
 extern int hibernate(void);
-#else
-static inline void register_nosave_region(unsigned long b, unsigned long e) {}
-static inline void register_nosave_region_late(unsigned long b, unsigned long e) {}
+#else /* CONFIG_SOFTWARE_SUSPEND */
 static inline int swsusp_page_is_forbidden(struct page *p) { return 0; }
 static inline void swsusp_set_page_free(struct page *p) {}
 static inline void swsusp_unset_page_free(struct page *p) {}
 
 static inline void hibernation_set_ops(struct hibernation_ops *ops) {}
 static inline int hibernate(void) { return -ENOSYS; }
-#endif /* defined(CONFIG_PM) && defined(CONFIG_SOFTWARE_SUSPEND) */
+#endif /* CONFIG_SOFTWARE_SUSPEND */
 
 void save_processor_state(void);
 void restore_processor_state(void);
@@ -85,4 +88,43 @@
 void __save_processor_state(struct saved_context *ctxt);
 void __restore_processor_state(struct saved_context *ctxt);
 
+/* kernel/power/main.c */
+extern struct blocking_notifier_head pm_chain_head;
+
+static inline int register_pm_notifier(struct notifier_block *nb)
+{
+	return blocking_notifier_chain_register(&pm_chain_head, nb);
+}
+
+static inline int unregister_pm_notifier(struct notifier_block *nb)
+{
+	return blocking_notifier_chain_unregister(&pm_chain_head, nb);
+}
+
+#define pm_notifier(fn, pri) {				\
+	static struct notifier_block fn##_nb =			\
+		{ .notifier_call = fn, .priority = pri };	\
+	register_pm_notifier(&fn##_nb);			\
+}
+#else /* CONFIG_PM */
+
+static inline int register_pm_notifier(struct notifier_block *nb)
+{
+	return 0;
+}
+
+static inline int unregister_pm_notifier(struct notifier_block *nb)
+{
+	return 0;
+}
+
+#define pm_notifier(fn, pri)	do { (void)(fn); } while (0)
+#endif /* CONFIG_PM */
+
+#if !defined CONFIG_SOFTWARE_SUSPEND || !defined(CONFIG_PM)
+static inline void register_nosave_region(unsigned long b, unsigned long e)
+{
+}
+#endif
+
 #endif /* _LINUX_SWSUSP_H */
diff --git a/include/linux/swap.h b/include/linux/swap.h
index 00686888..665f85f 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -188,7 +188,8 @@
 extern void swap_setup(void);
 
 /* linux/mm/vmscan.c */
-extern unsigned long try_to_free_pages(struct zone **, gfp_t);
+extern unsigned long try_to_free_pages(struct zone **zones, int order,
+					gfp_t gfp_mask);
 extern unsigned long shrink_all_memory(unsigned long nr_pages);
 extern int vm_swappiness;
 extern int remove_mapping(struct address_space *mapping, struct page *page);
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index 83d0ec1..7a8b1e3 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -610,6 +610,7 @@
 asmlinkage long sys_timerfd(int ufd, int clockid, int flags,
 			    const struct itimerspec __user *utmr);
 asmlinkage long sys_eventfd(unsigned int count);
+asmlinkage long sys_fallocate(int fd, int mode, loff_t offset, loff_t len);
 
 int kernel_execve(const char *filename, char *const argv[], char *const envp[]);
 
diff --git a/include/linux/time.h b/include/linux/time.h
index 4bb05a8..ec3b0ce 100644
--- a/include/linux/time.h
+++ b/include/linux/time.h
@@ -36,7 +36,8 @@
 #define NSEC_PER_SEC	1000000000L
 #define FSEC_PER_SEC	1000000000000000L
 
-static inline int timespec_equal(struct timespec *a, struct timespec *b)
+static inline int timespec_equal(const struct timespec *a,
+                                 const struct timespec *b)
 {
 	return (a->tv_sec == b->tv_sec) && (a->tv_nsec == b->tv_nsec);
 }
diff --git a/include/linux/uio_driver.h b/include/linux/uio_driver.h
new file mode 100644
index 0000000..44c28e9
--- /dev/null
+++ b/include/linux/uio_driver.h
@@ -0,0 +1,91 @@
+/*
+ * include/linux/uio_driver.h
+ *
+ * Copyright(C) 2005, Benedikt Spranger <b.spranger@linutronix.de>
+ * Copyright(C) 2005, Thomas Gleixner <tglx@linutronix.de>
+ * Copyright(C) 2006, Hans J. Koch <hjk@linutronix.de>
+ * Copyright(C) 2006, Greg Kroah-Hartman <greg@kroah.com>
+ *
+ * Userspace IO driver.
+ *
+ * Licensed under the GPLv2 only.
+ */
+
+#ifndef _UIO_DRIVER_H_
+#define _UIO_DRIVER_H_
+
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/interrupt.h>
+
+/**
+ * struct uio_mem - description of a UIO memory region
+ * @kobj:		kobject for this mapping
+ * @addr:		address of the device's memory
+ * @size:		size of IO
+ * @memtype:		type of memory addr points to
+ * @internal_addr:	ioremap-ped version of addr, for driver internal use
+ */
+struct uio_mem {
+	struct kobject		kobj;
+	unsigned long		addr;
+	unsigned long		size;
+	int			memtype;
+	void __iomem		*internal_addr;
+};
+
+#define MAX_UIO_MAPS 	5
+
+struct uio_device;
+
+/**
+ * struct uio_info - UIO device capabilities
+ * @uio_dev:		the UIO device this info belongs to
+ * @name:		device name
+ * @version:		device driver version
+ * @mem:		list of mappable memory regions, size==0 for end of list
+ * @irq:		interrupt number or UIO_IRQ_CUSTOM
+ * @irq_flags:		flags for request_irq()
+ * @priv:		optional private data
+ * @handler:		the device's irq handler
+ * @mmap:		mmap operation for this uio device
+ * @open:		open operation for this uio device
+ * @release:		release operation for this uio device
+ */
+struct uio_info {
+	struct uio_device	*uio_dev;
+	char			*name;
+	char			*version;
+	struct uio_mem		mem[MAX_UIO_MAPS];
+	long			irq;
+	unsigned long		irq_flags;
+	void			*priv;
+	irqreturn_t (*handler)(int irq, struct uio_info *dev_info);
+	int (*mmap)(struct uio_info *info, struct vm_area_struct *vma);
+	int (*open)(struct uio_info *info, struct inode *inode);
+	int (*release)(struct uio_info *info, struct inode *inode);
+};
+
+extern int __must_check
+	__uio_register_device(struct module *owner,
+			      struct device *parent,
+			      struct uio_info *info);
+static inline int __must_check
+	uio_register_device(struct device *parent, struct uio_info *info)
+{
+	return __uio_register_device(THIS_MODULE, parent, info);
+}
+extern void uio_unregister_device(struct uio_info *info);
+extern void uio_event_notify(struct uio_info *info);
+
+/* defines for uio_device->irq */
+#define UIO_IRQ_CUSTOM	-1
+#define UIO_IRQ_NONE	-2
+
+/* defines for uio_device->memtype */
+#define UIO_MEM_NONE	0
+#define UIO_MEM_PHYS	1
+#define UIO_MEM_LOGICAL	2
+#define UIO_MEM_VIRTUAL 3
+
+#endif /* _LINUX_UIO_DRIVER_H_ */
diff --git a/include/linux/user_namespace.h b/include/linux/user_namespace.h
index bb32057..1101b0c 100644
--- a/include/linux/user_namespace.h
+++ b/include/linux/user_namespace.h
@@ -49,7 +49,7 @@
 	if (flags & CLONE_NEWUSER)
 		return ERR_PTR(-EINVAL);
 
-	return NULL;
+	return old_ns;
 }
 
 static inline void put_user_ns(struct user_namespace *ns)
diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
index 4b7ee83..c2b10ca 100644
--- a/include/linux/vmalloc.h
+++ b/include/linux/vmalloc.h
@@ -65,9 +65,14 @@
 					  unsigned long flags, int node,
 					  gfp_t gfp_mask);
 extern struct vm_struct *remove_vm_area(void *addr);
+
 extern int map_vm_area(struct vm_struct *area, pgprot_t prot,
 			struct page ***pages);
-extern void unmap_vm_area(struct vm_struct *area);
+extern void unmap_kernel_range(unsigned long addr, unsigned long size);
+
+/* Allocate/destroy a 'vmalloc' VM area. */
+extern struct vm_struct *alloc_vm_area(size_t size);
+extern void free_vm_area(struct vm_struct *area);
 
 /*
  *	Internals.  Dont't use..
diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
index d9325cf..75370ec 100644
--- a/include/linux/vmstat.h
+++ b/include/linux/vmstat.h
@@ -25,7 +25,7 @@
 #define HIGHMEM_ZONE(xx)
 #endif
 
-#define FOR_ALL_ZONES(xx) DMA_ZONE(xx) DMA32_ZONE(xx) xx##_NORMAL HIGHMEM_ZONE(xx)
+#define FOR_ALL_ZONES(xx) DMA_ZONE(xx) DMA32_ZONE(xx) xx##_NORMAL HIGHMEM_ZONE(xx) , xx##_MOVABLE
 
 enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
 		FOR_ALL_ZONES(PGALLOC),
@@ -170,7 +170,8 @@
 #ifdef CONFIG_HIGHMEM
 		zone_page_state(&zones[ZONE_HIGHMEM], item) +
 #endif
-		zone_page_state(&zones[ZONE_NORMAL], item);
+		zone_page_state(&zones[ZONE_NORMAL], item) +
+		zone_page_state(&zones[ZONE_MOVABLE], item);
 }
 
 extern void zone_statistics(struct zonelist *, struct zone *);
diff --git a/include/linux/vt_kern.h b/include/linux/vt_kern.h
index d961635..699b7e9 100644
--- a/include/linux/vt_kern.h
+++ b/include/linux/vt_kern.h
@@ -75,6 +75,8 @@
 int vt_waitactive(int vt);
 void change_console(struct vc_data *new_vc);
 void reset_vc(struct vc_data *vc);
+extern int unbind_con_driver(const struct consw *csw, int first, int last,
+			     int deflt);
 
 /*
  * vc_screen.c shares this temporary buffer with the console write code so that
diff --git a/include/media/saa7146.h b/include/media/saa7146.h
index d3f4f5a..6770324 100644
--- a/include/media/saa7146.h
+++ b/include/media/saa7146.h
@@ -114,7 +114,7 @@
 	struct mutex			lock;
 
 	unsigned char			__iomem *mem;		/* pointer to mapped IO memory */
-	int				revision;	/* chip revision; needed for bug-workarounds*/
+	u32				revision;	/* chip revision; needed for bug-workarounds*/
 
 	/* pci-device & irq stuff*/
 	char				name[32];
@@ -157,8 +157,8 @@
 int saa7146_pgtable_alloc(struct pci_dev *pci, struct saa7146_pgtable *pt);
 void saa7146_pgtable_free(struct pci_dev *pci, struct saa7146_pgtable *pt);
 int saa7146_pgtable_build_single(struct pci_dev *pci, struct saa7146_pgtable *pt, struct scatterlist *list, int length );
-char *saa7146_vmalloc_build_pgtable(struct pci_dev *pci, long length, struct saa7146_pgtable *pt);
-void saa7146_vfree_destroy_pgtable(struct pci_dev *pci, char *mem, struct saa7146_pgtable *pt);
+void *saa7146_vmalloc_build_pgtable(struct pci_dev *pci, long length, struct saa7146_pgtable *pt);
+void saa7146_vfree_destroy_pgtable(struct pci_dev *pci, void *mem, struct saa7146_pgtable *pt);
 void saa7146_setgpio(struct saa7146_dev *dev, int port, u32 data);
 int saa7146_wait_for_debi_done(struct saa7146_dev *dev, int nobusyloop);
 
diff --git a/include/media/tuner.h b/include/media/tuner.h
index 6dcf3c4..160381c 100644
--- a/include/media/tuner.h
+++ b/include/media/tuner.h
@@ -23,8 +23,6 @@
 #define _TUNER_H
 
 #include <linux/videodev2.h>
-#include <linux/i2c.h>
-#include <media/tuner-types.h>
 
 extern int tuner_debug;
 
@@ -124,6 +122,7 @@
 #define TUNER_THOMSON_FE6600		72	/* DViCO FusionHDTV DVB-T Hybrid */
 #define TUNER_SAMSUNG_TCPG_6121P30A     73 	/* Hauppauge PVR-500 PAL */
 #define TUNER_TDA9887                   74      /* This tuner should be used only internally */
+#define TUNER_TEA5761			75	/* Only FM Radio Tuner */
 
 /* tv card specific */
 #define TDA9887_PRESENT 		(1<<0)
@@ -182,74 +181,6 @@
 	int (*tuner_callback) (void *dev, int command,int arg);
 };
 
-struct tuner {
-	/* device */
-	struct i2c_client i2c;
-
-	unsigned int type;	/* chip type */
-
-	unsigned int mode;
-	unsigned int mode_mask;	/* Combination of allowable modes */
-
-	unsigned int tv_freq;	/* keep track of the current settings */
-	unsigned int radio_freq;
-	u16 	     last_div;
-	unsigned int audmode;
-	v4l2_std_id  std;
-
-	int          using_v4l2;
-
-	/* used by tda9887 */
-	unsigned int       tda9887_config;
-	unsigned char 	   tda9887_data[4];
-
-	/* used by MT2032 */
-	unsigned int xogc;
-	unsigned int radio_if2;
-
-	/* used by tda8290 */
-	unsigned char tda8290_easy_mode;
-	unsigned char tda827x_lpsel;
-	unsigned char tda827x_addr;
-	unsigned char tda827x_ver;
-	unsigned int sgIF;
-
-	unsigned int config;
-	int (*tuner_callback) (void *dev, int command,int arg);
-
-	/* function ptrs */
-	void (*set_tv_freq)(struct i2c_client *c, unsigned int freq);
-	void (*set_radio_freq)(struct i2c_client *c, unsigned int freq);
-	int  (*has_signal)(struct i2c_client *c);
-	int  (*is_stereo)(struct i2c_client *c);
-	int  (*get_afc)(struct i2c_client *c);
-	void (*tuner_status)(struct i2c_client *c);
-	void (*standby)(struct i2c_client *c);
-};
-
-extern unsigned const int tuner_count;
-
-extern int microtune_init(struct i2c_client *c);
-extern int xc3028_init(struct i2c_client *c);
-extern int tda8290_init(struct i2c_client *c);
-extern int tda8290_probe(struct i2c_client *c);
-extern int tea5767_tuner_init(struct i2c_client *c);
-extern int default_tuner_init(struct i2c_client *c);
-extern int tea5767_autodetection(struct i2c_client *c);
-extern int tda9887_tuner_init(struct i2c_client *c);
-
-#define tuner_warn(fmt, arg...) do {\
-	printk(KERN_WARNING "%s %d-%04x: " fmt, t->i2c.driver->driver.name, \
-			i2c_adapter_id(t->i2c.adapter), t->i2c.addr , ##arg); } while (0)
-#define tuner_info(fmt, arg...) do {\
-	printk(KERN_INFO "%s %d-%04x: " fmt, t->i2c.driver->driver.name, \
-			i2c_adapter_id(t->i2c.adapter), t->i2c.addr , ##arg); } while (0)
-#define tuner_dbg(fmt, arg...) do {\
-	extern int tuner_debug; \
-	if (tuner_debug) \
-		printk(KERN_DEBUG "%s %d-%04x: " fmt, t->i2c.driver->driver.name, \
-			i2c_adapter_id(t->i2c.adapter), t->i2c.addr , ##arg); } while (0)
-
 #endif /* __KERNEL__ */
 
 #endif /* _TUNER_H */
diff --git a/include/mtd/ubi-header.h b/include/mtd/ubi-header.h
index fa479c7..74efa77 100644
--- a/include/mtd/ubi-header.h
+++ b/include/mtd/ubi-header.h
@@ -74,42 +74,13 @@
 	UBI_COMPAT_REJECT   = 5
 };
 
-/*
- * ubi16_t/ubi32_t/ubi64_t - 16, 32, and 64-bit integers used in UBI on-flash
- * data structures.
- */
-typedef struct {
-	uint16_t int16;
-} __attribute__ ((packed)) ubi16_t;
-
-typedef struct {
-	uint32_t int32;
-} __attribute__ ((packed)) ubi32_t;
-
-typedef struct {
-	uint64_t int64;
-} __attribute__ ((packed)) ubi64_t;
-
-/*
- * In this implementation of UBI uses the big-endian format for on-flash
- * integers. The below are the corresponding conversion macros.
- */
-#define cpu_to_ubi16(x) ((ubi16_t){__cpu_to_be16(x)})
-#define ubi16_to_cpu(x) ((uint16_t)__be16_to_cpu((x).int16))
-
-#define cpu_to_ubi32(x) ((ubi32_t){__cpu_to_be32(x)})
-#define ubi32_to_cpu(x) ((uint32_t)__be32_to_cpu((x).int32))
-
-#define cpu_to_ubi64(x) ((ubi64_t){__cpu_to_be64(x)})
-#define ubi64_to_cpu(x) ((uint64_t)__be64_to_cpu((x).int64))
-
 /* Sizes of UBI headers */
 #define UBI_EC_HDR_SIZE  sizeof(struct ubi_ec_hdr)
 #define UBI_VID_HDR_SIZE sizeof(struct ubi_vid_hdr)
 
 /* Sizes of UBI headers without the ending CRC */
-#define UBI_EC_HDR_SIZE_CRC  (UBI_EC_HDR_SIZE  - sizeof(ubi32_t))
-#define UBI_VID_HDR_SIZE_CRC (UBI_VID_HDR_SIZE - sizeof(ubi32_t))
+#define UBI_EC_HDR_SIZE_CRC  (UBI_EC_HDR_SIZE  - sizeof(__be32))
+#define UBI_VID_HDR_SIZE_CRC (UBI_VID_HDR_SIZE - sizeof(__be32))
 
 /**
  * struct ubi_ec_hdr - UBI erase counter header.
@@ -137,14 +108,14 @@
  * eraseblocks.
  */
 struct ubi_ec_hdr {
-	ubi32_t magic;
-	uint8_t version;
-	uint8_t padding1[3];
-	ubi64_t ec; /* Warning: the current limit is 31-bit anyway! */
-	ubi32_t vid_hdr_offset;
-	ubi32_t data_offset;
-	uint8_t padding2[36];
-	ubi32_t hdr_crc;
+	__be32  magic;
+	__u8    version;
+	__u8    padding1[3];
+	__be64  ec; /* Warning: the current limit is 31-bit anyway! */
+	__be32  vid_hdr_offset;
+	__be32  data_offset;
+	__u8    padding2[36];
+	__be32  hdr_crc;
 } __attribute__ ((packed));
 
 /**
@@ -262,22 +233,22 @@
  * software (say, cramfs) on top of the UBI volume.
  */
 struct ubi_vid_hdr {
-	ubi32_t magic;
-	uint8_t version;
-	uint8_t vol_type;
-	uint8_t copy_flag;
-	uint8_t compat;
-	ubi32_t vol_id;
-	ubi32_t lnum;
-	ubi32_t leb_ver; /* obsolete, to be removed, don't use */
-	ubi32_t data_size;
-	ubi32_t used_ebs;
-	ubi32_t data_pad;
-	ubi32_t data_crc;
-	uint8_t padding1[4];
-	ubi64_t sqnum;
-	uint8_t padding2[12];
-	ubi32_t hdr_crc;
+	__be32  magic;
+	__u8    version;
+	__u8    vol_type;
+	__u8    copy_flag;
+	__u8    compat;
+	__be32  vol_id;
+	__be32  lnum;
+	__be32  leb_ver; /* obsolete, to be removed, don't use */
+	__be32  data_size;
+	__be32  used_ebs;
+	__be32  data_pad;
+	__be32  data_crc;
+	__u8    padding1[4];
+	__be64  sqnum;
+	__u8    padding2[12];
+	__be32  hdr_crc;
 } __attribute__ ((packed));
 
 /* Internal UBI volumes count */
@@ -306,7 +277,7 @@
 #define UBI_VTBL_RECORD_SIZE sizeof(struct ubi_vtbl_record)
 
 /* Size of the volume table record without the ending CRC */
-#define UBI_VTBL_RECORD_SIZE_CRC (UBI_VTBL_RECORD_SIZE - sizeof(ubi32_t))
+#define UBI_VTBL_RECORD_SIZE_CRC (UBI_VTBL_RECORD_SIZE - sizeof(__be32))
 
 /**
  * struct ubi_vtbl_record - a record in the volume table.
@@ -346,15 +317,15 @@
  * Empty records contain all zeroes and the CRC checksum of those zeroes.
  */
 struct ubi_vtbl_record {
-	ubi32_t reserved_pebs;
-	ubi32_t alignment;
-	ubi32_t data_pad;
-	uint8_t vol_type;
-	uint8_t upd_marker;
-	ubi16_t name_len;
-	uint8_t name[UBI_VOL_NAME_MAX+1];
-	uint8_t padding2[24];
-	ubi32_t crc;
+	__be32  reserved_pebs;
+	__be32  alignment;
+	__be32  data_pad;
+	__u8    vol_type;
+	__u8    upd_marker;
+	__be16  name_len;
+	__u8    name[UBI_VOL_NAME_MAX+1];
+	__u8    padding2[24];
+	__be32  crc;
 } __attribute__ ((packed));
 
 #endif /* !__UBI_HEADER_H__ */
diff --git a/include/net/genetlink.h b/include/net/genetlink.h
index b6eaca1..decdda5 100644
--- a/include/net/genetlink.h
+++ b/include/net/genetlink.h
@@ -5,6 +5,22 @@
 #include <net/netlink.h>
 
 /**
+ * struct genl_multicast_group - generic netlink multicast group
+ * @name: name of the multicast group, names are per-family
+ * @id: multicast group ID, assigned by the core, to use with
+ *      genlmsg_multicast().
+ * @list: list entry for linking
+ * @family: pointer to family, need not be set before registering
+ */
+struct genl_multicast_group
+{
+	struct genl_family	*family;	/* private */
+	struct list_head	list;		/* private */
+	char			name[GENL_NAMSIZ];
+	u32			id;
+};
+
+/**
  * struct genl_family - generic netlink family
  * @id: protocol family idenfitier
  * @hdrsize: length of user specific header in bytes
@@ -14,6 +30,7 @@
  * @attrbuf: buffer to store parsed attributes
  * @ops_list: list of all assigned operations
  * @family_list: family list
+ * @mcast_groups: multicast groups list
  */
 struct genl_family
 {
@@ -25,6 +42,7 @@
 	struct nlattr **	attrbuf;	/* private */
 	struct list_head	ops_list;	/* private */
 	struct list_head	family_list;	/* private */
+	struct list_head	mcast_groups;	/* private */
 };
 
 /**
@@ -73,6 +91,10 @@
 extern int genl_unregister_family(struct genl_family *family);
 extern int genl_register_ops(struct genl_family *, struct genl_ops *ops);
 extern int genl_unregister_ops(struct genl_family *, struct genl_ops *ops);
+extern int genl_register_mc_group(struct genl_family *family,
+				  struct genl_multicast_group *grp);
+extern void genl_unregister_mc_group(struct genl_family *family,
+				     struct genl_multicast_group *grp);
 
 extern struct sock *genl_sock;
 
diff --git a/include/net/scm.h b/include/net/scm.h
index 5637d5e..423cb1d 100644
--- a/include/net/scm.h
+++ b/include/net/scm.h
@@ -8,7 +8,7 @@
 /* Well, we should have at least one descriptor open
  * to accept passed FDs 8)
  */
-#define SCM_MAX_FD	(OPEN_MAX-1)
+#define SCM_MAX_FD	255
 
 struct scm_fp_list
 {
diff --git a/include/net/tcp.h b/include/net/tcp.h
index a8af9ae..8b404b1 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -652,8 +652,7 @@
 	/* lower bound for congestion window (optional) */
 	u32 (*min_cwnd)(const struct sock *sk);
 	/* do new cwnd calculation (required) */
-	void (*cong_avoid)(struct sock *sk, u32 ack,
-			   u32 rtt, u32 in_flight, int good_ack);
+	void (*cong_avoid)(struct sock *sk, u32 ack, u32 in_flight, int good_ack);
 	/* call before changing ca_state (optional) */
 	void (*set_state)(struct sock *sk, u8 new_state);
 	/* call when cwnd event occurs (optional) */
@@ -684,8 +683,7 @@
 
 extern struct tcp_congestion_ops tcp_init_congestion_ops;
 extern u32 tcp_reno_ssthresh(struct sock *sk);
-extern void tcp_reno_cong_avoid(struct sock *sk, u32 ack,
-				u32 rtt, u32 in_flight, int flag);
+extern void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 in_flight, int flag);
 extern u32 tcp_reno_min_cwnd(const struct sock *sk);
 extern struct tcp_congestion_ops tcp_reno;
 
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index ae959e9..a5f80bf 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -585,7 +585,6 @@
 struct xfrm_dst
 {
 	union {
-		struct xfrm_dst		*next;
 		struct dst_entry	dst;
 		struct rtable		rt;
 		struct rt6_info		rt6;
diff --git a/include/video/tgafb.h b/include/video/tgafb.h
index 03d0dbe..7bc5e2c 100644
--- a/include/video/tgafb.h
+++ b/include/video/tgafb.h
@@ -216,6 +216,7 @@
 	u32 pll_freq;			/* pixclock in mhz */
 	u32 bits_per_pixel;		/* bits per pixel */
 	u32 sync_on_green;		/* set if sync is on green */
+	u32 palette[16];
 };
 
 
diff --git a/include/xen/events.h b/include/xen/events.h
new file mode 100644
index 0000000..2bde54d
--- /dev/null
+++ b/include/xen/events.h
@@ -0,0 +1,48 @@
+#ifndef _XEN_EVENTS_H
+#define _XEN_EVENTS_H
+
+#include <linux/interrupt.h>
+
+#include <xen/interface/event_channel.h>
+#include <asm/xen/hypercall.h>
+
+enum ipi_vector {
+	XEN_RESCHEDULE_VECTOR,
+	XEN_CALL_FUNCTION_VECTOR,
+
+	XEN_NR_IPIS,
+};
+
+int bind_evtchn_to_irq(unsigned int evtchn);
+int bind_evtchn_to_irqhandler(unsigned int evtchn,
+			      irq_handler_t handler,
+			      unsigned long irqflags, const char *devname,
+			      void *dev_id);
+int bind_virq_to_irqhandler(unsigned int virq, unsigned int cpu,
+			    irq_handler_t handler,
+			    unsigned long irqflags, const char *devname,
+			    void *dev_id);
+int bind_ipi_to_irqhandler(enum ipi_vector ipi,
+			   unsigned int cpu,
+			   irq_handler_t handler,
+			   unsigned long irqflags,
+			   const char *devname,
+			   void *dev_id);
+
+/*
+ * Common unbind function for all event sources. Takes IRQ to unbind from.
+ * Automatically closes the underlying event channel (even for bindings
+ * made with bind_evtchn_to_irqhandler()).
+ */
+void unbind_from_irqhandler(unsigned int irq, void *dev_id);
+
+void xen_send_IPI_one(unsigned int cpu, enum ipi_vector vector);
+
+static inline void notify_remote_via_evtchn(int port)
+{
+	struct evtchn_send send = { .port = port };
+	(void)HYPERVISOR_event_channel_op(EVTCHNOP_send, &send);
+}
+
+extern void notify_remote_via_irq(int irq);
+#endif	/* _XEN_EVENTS_H */
diff --git a/include/xen/features.h b/include/xen/features.h
new file mode 100644
index 0000000..27292d4
--- /dev/null
+++ b/include/xen/features.h
@@ -0,0 +1,23 @@
+/******************************************************************************
+ * features.h
+ *
+ * Query the features reported by Xen.
+ *
+ * Copyright (c) 2006, Ian Campbell
+ */
+
+#ifndef __XEN_FEATURES_H__
+#define __XEN_FEATURES_H__
+
+#include <xen/interface/features.h>
+
+void xen_setup_features(void);
+
+extern u8 xen_features[XENFEAT_NR_SUBMAPS * 32];
+
+static inline int xen_feature(int flag)
+{
+	return xen_features[flag];
+}
+
+#endif /* __ASM_XEN_FEATURES_H__ */
diff --git a/include/xen/grant_table.h b/include/xen/grant_table.h
new file mode 100644
index 0000000..761c834
--- /dev/null
+++ b/include/xen/grant_table.h
@@ -0,0 +1,107 @@
+/******************************************************************************
+ * grant_table.h
+ *
+ * Two sets of functionality:
+ * 1. Granting foreign access to our memory reservation.
+ * 2. Accessing others' memory reservations via grant references.
+ * (i.e., mechanisms for both sender and recipient of grant references)
+ *
+ * Copyright (c) 2004-2005, K A Fraser
+ * Copyright (c) 2005, Christopher Clark
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation; or, when distributed
+ * separately from the Linux kernel or incorporated into other
+ * software packages, subject to the following license:
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this source file (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy, modify,
+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#ifndef __ASM_GNTTAB_H__
+#define __ASM_GNTTAB_H__
+
+#include <asm/xen/hypervisor.h>
+#include <xen/interface/grant_table.h>
+
+/* NR_GRANT_FRAMES must be less than or equal to that configured in Xen */
+#define NR_GRANT_FRAMES 4
+
+struct gnttab_free_callback {
+	struct gnttab_free_callback *next;
+	void (*fn)(void *);
+	void *arg;
+	u16 count;
+};
+
+int gnttab_grant_foreign_access(domid_t domid, unsigned long frame,
+				int readonly);
+
+/*
+ * End access through the given grant reference, iff the grant entry is no
+ * longer in use.  Return 1 if the grant entry was freed, 0 if it is still in
+ * use.
+ */
+int gnttab_end_foreign_access_ref(grant_ref_t ref, int readonly);
+
+/*
+ * Eventually end access through the given grant reference, and once that
+ * access has been ended, free the given page too.  Access will be ended
+ * immediately iff the grant entry is not in use, otherwise it will happen
+ * some time later.  page may be 0, in which case no freeing will occur.
+ */
+void gnttab_end_foreign_access(grant_ref_t ref, int readonly,
+			       unsigned long page);
+
+int gnttab_grant_foreign_transfer(domid_t domid, unsigned long pfn);
+
+unsigned long gnttab_end_foreign_transfer_ref(grant_ref_t ref);
+unsigned long gnttab_end_foreign_transfer(grant_ref_t ref);
+
+int gnttab_query_foreign_access(grant_ref_t ref);
+
+/*
+ * operations on reserved batches of grant references
+ */
+int gnttab_alloc_grant_references(u16 count, grant_ref_t *pprivate_head);
+
+void gnttab_free_grant_reference(grant_ref_t ref);
+
+void gnttab_free_grant_references(grant_ref_t head);
+
+int gnttab_empty_grant_references(const grant_ref_t *pprivate_head);
+
+int gnttab_claim_grant_reference(grant_ref_t *pprivate_head);
+
+void gnttab_release_grant_reference(grant_ref_t *private_head,
+				    grant_ref_t release);
+
+void gnttab_request_free_callback(struct gnttab_free_callback *callback,
+				  void (*fn)(void *), void *arg, u16 count);
+void gnttab_cancel_free_callback(struct gnttab_free_callback *callback);
+
+void gnttab_grant_foreign_access_ref(grant_ref_t ref, domid_t domid,
+				     unsigned long frame, int readonly);
+
+void gnttab_grant_foreign_transfer_ref(grant_ref_t, domid_t domid,
+				       unsigned long pfn);
+
+#define gnttab_map_vaddr(map) ((void *)(map.host_virt_addr))
+
+#endif /* __ASM_GNTTAB_H__ */
diff --git a/include/xen/hvc-console.h b/include/xen/hvc-console.h
new file mode 100644
index 0000000..21c0ecf
--- /dev/null
+++ b/include/xen/hvc-console.h
@@ -0,0 +1,6 @@
+#ifndef XEN_HVC_CONSOLE_H
+#define XEN_HVC_CONSOLE_H
+
+extern struct console xenboot_console;
+
+#endif	/* XEN_HVC_CONSOLE_H */
diff --git a/include/xen/interface/elfnote.h b/include/xen/interface/elfnote.h
new file mode 100644
index 0000000..a64d3df
--- /dev/null
+++ b/include/xen/interface/elfnote.h
@@ -0,0 +1,133 @@
+/******************************************************************************
+ * elfnote.h
+ *
+ * Definitions used for the Xen ELF notes.
+ *
+ * Copyright (c) 2006, Ian Campbell, XenSource Ltd.
+ */
+
+#ifndef __XEN_PUBLIC_ELFNOTE_H__
+#define __XEN_PUBLIC_ELFNOTE_H__
+
+/*
+ * The notes should live in a SHT_NOTE segment and have "Xen" in the
+ * name field.
+ *
+ * Numeric types are either 4 or 8 bytes depending on the content of
+ * the desc field.
+ *
+ * LEGACY indicated the fields in the legacy __xen_guest string which
+ * this a note type replaces.
+ */
+
+/*
+ * NAME=VALUE pair (string).
+ *
+ * LEGACY: FEATURES and PAE
+ */
+#define XEN_ELFNOTE_INFO           0
+
+/*
+ * The virtual address of the entry point (numeric).
+ *
+ * LEGACY: VIRT_ENTRY
+ */
+#define XEN_ELFNOTE_ENTRY          1
+
+/* The virtual address of the hypercall transfer page (numeric).
+ *
+ * LEGACY: HYPERCALL_PAGE. (n.b. legacy value is a physical page
+ * number not a virtual address)
+ */
+#define XEN_ELFNOTE_HYPERCALL_PAGE 2
+
+/* The virtual address where the kernel image should be mapped (numeric).
+ *
+ * Defaults to 0.
+ *
+ * LEGACY: VIRT_BASE
+ */
+#define XEN_ELFNOTE_VIRT_BASE      3
+
+/*
+ * The offset of the ELF paddr field from the acutal required
+ * psuedo-physical address (numeric).
+ *
+ * This is used to maintain backwards compatibility with older kernels
+ * which wrote __PAGE_OFFSET into that field. This field defaults to 0
+ * if not present.
+ *
+ * LEGACY: ELF_PADDR_OFFSET. (n.b. legacy default is VIRT_BASE)
+ */
+#define XEN_ELFNOTE_PADDR_OFFSET   4
+
+/*
+ * The version of Xen that we work with (string).
+ *
+ * LEGACY: XEN_VER
+ */
+#define XEN_ELFNOTE_XEN_VERSION    5
+
+/*
+ * The name of the guest operating system (string).
+ *
+ * LEGACY: GUEST_OS
+ */
+#define XEN_ELFNOTE_GUEST_OS       6
+
+/*
+ * The version of the guest operating system (string).
+ *
+ * LEGACY: GUEST_VER
+ */
+#define XEN_ELFNOTE_GUEST_VERSION  7
+
+/*
+ * The loader type (string).
+ *
+ * LEGACY: LOADER
+ */
+#define XEN_ELFNOTE_LOADER         8
+
+/*
+ * The kernel supports PAE (x86/32 only, string = "yes" or "no").
+ *
+ * LEGACY: PAE (n.b. The legacy interface included a provision to
+ * indicate 'extended-cr3' support allowing L3 page tables to be
+ * placed above 4G. It is assumed that any kernel new enough to use
+ * these ELF notes will include this and therefore "yes" here is
+ * equivalent to "yes[entended-cr3]" in the __xen_guest interface.
+ */
+#define XEN_ELFNOTE_PAE_MODE       9
+
+/*
+ * The features supported/required by this kernel (string).
+ *
+ * The string must consist of a list of feature names (as given in
+ * features.h, without the "XENFEAT_" prefix) separated by '|'
+ * characters. If a feature is required for the kernel to function
+ * then the feature name must be preceded by a '!' character.
+ *
+ * LEGACY: FEATURES
+ */
+#define XEN_ELFNOTE_FEATURES      10
+
+/*
+ * The kernel requires the symbol table to be loaded (string = "yes" or "no")
+ * LEGACY: BSD_SYMTAB (n.b. The legacy treated the presence or absence
+ * of this string as a boolean flag rather than requiring "yes" or
+ * "no".
+ */
+#define XEN_ELFNOTE_BSD_SYMTAB    11
+
+#endif /* __XEN_PUBLIC_ELFNOTE_H__ */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/include/xen/interface/event_channel.h b/include/xen/interface/event_channel.h
new file mode 100644
index 0000000..919b5bd
--- /dev/null
+++ b/include/xen/interface/event_channel.h
@@ -0,0 +1,195 @@
+/******************************************************************************
+ * event_channel.h
+ *
+ * Event channels between domains.
+ *
+ * Copyright (c) 2003-2004, K A Fraser.
+ */
+
+#ifndef __XEN_PUBLIC_EVENT_CHANNEL_H__
+#define __XEN_PUBLIC_EVENT_CHANNEL_H__
+
+typedef uint32_t evtchn_port_t;
+DEFINE_GUEST_HANDLE(evtchn_port_t);
+
+/*
+ * EVTCHNOP_alloc_unbound: Allocate a port in domain <dom> and mark as
+ * accepting interdomain bindings from domain <remote_dom>. A fresh port
+ * is allocated in <dom> and returned as <port>.
+ * NOTES:
+ *  1. If the caller is unprivileged then <dom> must be DOMID_SELF.
+ *  2. <rdom> may be DOMID_SELF, allowing loopback connections.
+ */
+#define EVTCHNOP_alloc_unbound	  6
+struct evtchn_alloc_unbound {
+	/* IN parameters */
+	domid_t dom, remote_dom;
+	/* OUT parameters */
+	evtchn_port_t port;
+};
+
+/*
+ * EVTCHNOP_bind_interdomain: Construct an interdomain event channel between
+ * the calling domain and <remote_dom>. <remote_dom,remote_port> must identify
+ * a port that is unbound and marked as accepting bindings from the calling
+ * domain. A fresh port is allocated in the calling domain and returned as
+ * <local_port>.
+ * NOTES:
+ *  2. <remote_dom> may be DOMID_SELF, allowing loopback connections.
+ */
+#define EVTCHNOP_bind_interdomain 0
+struct evtchn_bind_interdomain {
+	/* IN parameters. */
+	domid_t remote_dom;
+	evtchn_port_t remote_port;
+	/* OUT parameters. */
+	evtchn_port_t local_port;
+};
+
+/*
+ * EVTCHNOP_bind_virq: Bind a local event channel to VIRQ <irq> on specified
+ * vcpu.
+ * NOTES:
+ *  1. A virtual IRQ may be bound to at most one event channel per vcpu.
+ *  2. The allocated event channel is bound to the specified vcpu. The binding
+ *     may not be changed.
+ */
+#define EVTCHNOP_bind_virq	  1
+struct evtchn_bind_virq {
+	/* IN parameters. */
+	uint32_t virq;
+	uint32_t vcpu;
+	/* OUT parameters. */
+	evtchn_port_t port;
+};
+
+/*
+ * EVTCHNOP_bind_pirq: Bind a local event channel to PIRQ <irq>.
+ * NOTES:
+ *  1. A physical IRQ may be bound to at most one event channel per domain.
+ *  2. Only a sufficiently-privileged domain may bind to a physical IRQ.
+ */
+#define EVTCHNOP_bind_pirq	  2
+struct evtchn_bind_pirq {
+	/* IN parameters. */
+	uint32_t pirq;
+#define BIND_PIRQ__WILL_SHARE 1
+	uint32_t flags; /* BIND_PIRQ__* */
+	/* OUT parameters. */
+	evtchn_port_t port;
+};
+
+/*
+ * EVTCHNOP_bind_ipi: Bind a local event channel to receive events.
+ * NOTES:
+ *  1. The allocated event channel is bound to the specified vcpu. The binding
+ *     may not be changed.
+ */
+#define EVTCHNOP_bind_ipi	  7
+struct evtchn_bind_ipi {
+	uint32_t vcpu;
+	/* OUT parameters. */
+	evtchn_port_t port;
+};
+
+/*
+ * EVTCHNOP_close: Close a local event channel <port>. If the channel is
+ * interdomain then the remote end is placed in the unbound state
+ * (EVTCHNSTAT_unbound), awaiting a new connection.
+ */
+#define EVTCHNOP_close		  3
+struct evtchn_close {
+	/* IN parameters. */
+	evtchn_port_t port;
+};
+
+/*
+ * EVTCHNOP_send: Send an event to the remote end of the channel whose local
+ * endpoint is <port>.
+ */
+#define EVTCHNOP_send		  4
+struct evtchn_send {
+	/* IN parameters. */
+	evtchn_port_t port;
+};
+
+/*
+ * EVTCHNOP_status: Get the current status of the communication channel which
+ * has an endpoint at <dom, port>.
+ * NOTES:
+ *  1. <dom> may be specified as DOMID_SELF.
+ *  2. Only a sufficiently-privileged domain may obtain the status of an event
+ *     channel for which <dom> is not DOMID_SELF.
+ */
+#define EVTCHNOP_status		  5
+struct evtchn_status {
+	/* IN parameters */
+	domid_t  dom;
+	evtchn_port_t port;
+	/* OUT parameters */
+#define EVTCHNSTAT_closed	0  /* Channel is not in use.		     */
+#define EVTCHNSTAT_unbound	1  /* Channel is waiting interdom connection.*/
+#define EVTCHNSTAT_interdomain	2  /* Channel is connected to remote domain. */
+#define EVTCHNSTAT_pirq		3  /* Channel is bound to a phys IRQ line.   */
+#define EVTCHNSTAT_virq		4  /* Channel is bound to a virtual IRQ line */
+#define EVTCHNSTAT_ipi		5  /* Channel is bound to a virtual IPI line */
+	uint32_t status;
+	uint32_t vcpu;		   /* VCPU to which this channel is bound.   */
+	union {
+		struct {
+			domid_t dom;
+		} unbound; /* EVTCHNSTAT_unbound */
+		struct {
+			domid_t dom;
+			evtchn_port_t port;
+		} interdomain; /* EVTCHNSTAT_interdomain */
+		uint32_t pirq;	    /* EVTCHNSTAT_pirq	      */
+		uint32_t virq;	    /* EVTCHNSTAT_virq	      */
+	} u;
+};
+
+/*
+ * EVTCHNOP_bind_vcpu: Specify which vcpu a channel should notify when an
+ * event is pending.
+ * NOTES:
+ *  1. IPI- and VIRQ-bound channels always notify the vcpu that initialised
+ *     the binding. This binding cannot be changed.
+ *  2. All other channels notify vcpu0 by default. This default is set when
+ *     the channel is allocated (a port that is freed and subsequently reused
+ *     has its binding reset to vcpu0).
+ */
+#define EVTCHNOP_bind_vcpu	  8
+struct evtchn_bind_vcpu {
+	/* IN parameters. */
+	evtchn_port_t port;
+	uint32_t vcpu;
+};
+
+/*
+ * EVTCHNOP_unmask: Unmask the specified local event-channel port and deliver
+ * a notification to the appropriate VCPU if an event is pending.
+ */
+#define EVTCHNOP_unmask		  9
+struct evtchn_unmask {
+	/* IN parameters. */
+	evtchn_port_t port;
+};
+
+struct evtchn_op {
+	uint32_t cmd; /* EVTCHNOP_* */
+	union {
+		struct evtchn_alloc_unbound    alloc_unbound;
+		struct evtchn_bind_interdomain bind_interdomain;
+		struct evtchn_bind_virq	       bind_virq;
+		struct evtchn_bind_pirq	       bind_pirq;
+		struct evtchn_bind_ipi	       bind_ipi;
+		struct evtchn_close	       close;
+		struct evtchn_send	       send;
+		struct evtchn_status	       status;
+		struct evtchn_bind_vcpu	       bind_vcpu;
+		struct evtchn_unmask	       unmask;
+	} u;
+};
+DEFINE_GUEST_HANDLE_STRUCT(evtchn_op);
+
+#endif /* __XEN_PUBLIC_EVENT_CHANNEL_H__ */
diff --git a/include/xen/interface/features.h b/include/xen/interface/features.h
new file mode 100644
index 0000000..d73228d
--- /dev/null
+++ b/include/xen/interface/features.h
@@ -0,0 +1,43 @@
+/******************************************************************************
+ * features.h
+ *
+ * Feature flags, reported by XENVER_get_features.
+ *
+ * Copyright (c) 2006, Keir Fraser <keir@xensource.com>
+ */
+
+#ifndef __XEN_PUBLIC_FEATURES_H__
+#define __XEN_PUBLIC_FEATURES_H__
+
+/*
+ * If set, the guest does not need to write-protect its pagetables, and can
+ * update them via direct writes.
+ */
+#define XENFEAT_writable_page_tables       0
+
+/*
+ * If set, the guest does not need to write-protect its segment descriptor
+ * tables, and can update them via direct writes.
+ */
+#define XENFEAT_writable_descriptor_tables 1
+
+/*
+ * If set, translation between the guest's 'pseudo-physical' address space
+ * and the host's machine address space are handled by the hypervisor. In this
+ * mode the guest does not need to perform phys-to/from-machine translations
+ * when performing page table operations.
+ */
+#define XENFEAT_auto_translated_physmap    2
+
+/* If set, the guest is running in supervisor mode (e.g., x86 ring 0). */
+#define XENFEAT_supervisor_mode_kernel     3
+
+/*
+ * If set, the guest does not need to allocate x86 PAE page directories
+ * below 4GB. This flag is usually implied by auto_translated_physmap.
+ */
+#define XENFEAT_pae_pgdir_above_4gb        4
+
+#define XENFEAT_NR_SUBMAPS 1
+
+#endif /* __XEN_PUBLIC_FEATURES_H__ */
diff --git a/include/xen/interface/grant_table.h b/include/xen/interface/grant_table.h
new file mode 100644
index 0000000..2190498
--- /dev/null
+++ b/include/xen/interface/grant_table.h
@@ -0,0 +1,375 @@
+/******************************************************************************
+ * grant_table.h
+ *
+ * Interface for granting foreign access to page frames, and receiving
+ * page-ownership transfers.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Copyright (c) 2004, K A Fraser
+ */
+
+#ifndef __XEN_PUBLIC_GRANT_TABLE_H__
+#define __XEN_PUBLIC_GRANT_TABLE_H__
+
+
+/***********************************
+ * GRANT TABLE REPRESENTATION
+ */
+
+/* Some rough guidelines on accessing and updating grant-table entries
+ * in a concurrency-safe manner. For more information, Linux contains a
+ * reference implementation for guest OSes (arch/xen/kernel/grant_table.c).
+ *
+ * NB. WMB is a no-op on current-generation x86 processors. However, a
+ *     compiler barrier will still be required.
+ *
+ * Introducing a valid entry into the grant table:
+ *  1. Write ent->domid.
+ *  2. Write ent->frame:
+ *      GTF_permit_access:   Frame to which access is permitted.
+ *      GTF_accept_transfer: Pseudo-phys frame slot being filled by new
+ *                           frame, or zero if none.
+ *  3. Write memory barrier (WMB).
+ *  4. Write ent->flags, inc. valid type.
+ *
+ * Invalidating an unused GTF_permit_access entry:
+ *  1. flags = ent->flags.
+ *  2. Observe that !(flags & (GTF_reading|GTF_writing)).
+ *  3. Check result of SMP-safe CMPXCHG(&ent->flags, flags, 0).
+ *  NB. No need for WMB as reuse of entry is control-dependent on success of
+ *      step 3, and all architectures guarantee ordering of ctrl-dep writes.
+ *
+ * Invalidating an in-use GTF_permit_access entry:
+ *  This cannot be done directly. Request assistance from the domain controller
+ *  which can set a timeout on the use of a grant entry and take necessary
+ *  action. (NB. This is not yet implemented!).
+ *
+ * Invalidating an unused GTF_accept_transfer entry:
+ *  1. flags = ent->flags.
+ *  2. Observe that !(flags & GTF_transfer_committed). [*]
+ *  3. Check result of SMP-safe CMPXCHG(&ent->flags, flags, 0).
+ *  NB. No need for WMB as reuse of entry is control-dependent on success of
+ *      step 3, and all architectures guarantee ordering of ctrl-dep writes.
+ *  [*] If GTF_transfer_committed is set then the grant entry is 'committed'.
+ *      The guest must /not/ modify the grant entry until the address of the
+ *      transferred frame is written. It is safe for the guest to spin waiting
+ *      for this to occur (detect by observing GTF_transfer_completed in
+ *      ent->flags).
+ *
+ * Invalidating a committed GTF_accept_transfer entry:
+ *  1. Wait for (ent->flags & GTF_transfer_completed).
+ *
+ * Changing a GTF_permit_access from writable to read-only:
+ *  Use SMP-safe CMPXCHG to set GTF_readonly, while checking !GTF_writing.
+ *
+ * Changing a GTF_permit_access from read-only to writable:
+ *  Use SMP-safe bit-setting instruction.
+ */
+
+/*
+ * A grant table comprises a packed array of grant entries in one or more
+ * page frames shared between Xen and a guest.
+ * [XEN]: This field is written by Xen and read by the sharing guest.
+ * [GST]: This field is written by the guest and read by Xen.
+ */
+struct grant_entry {
+    /* GTF_xxx: various type and flag information.  [XEN,GST] */
+    uint16_t flags;
+    /* The domain being granted foreign privileges. [GST] */
+    domid_t  domid;
+    /*
+     * GTF_permit_access: Frame that @domid is allowed to map and access. [GST]
+     * GTF_accept_transfer: Frame whose ownership transferred by @domid. [XEN]
+     */
+    uint32_t frame;
+};
+
+/*
+ * Type of grant entry.
+ *  GTF_invalid: This grant entry grants no privileges.
+ *  GTF_permit_access: Allow @domid to map/access @frame.
+ *  GTF_accept_transfer: Allow @domid to transfer ownership of one page frame
+ *                       to this guest. Xen writes the page number to @frame.
+ */
+#define GTF_invalid         (0U<<0)
+#define GTF_permit_access   (1U<<0)
+#define GTF_accept_transfer (2U<<0)
+#define GTF_type_mask       (3U<<0)
+
+/*
+ * Subflags for GTF_permit_access.
+ *  GTF_readonly: Restrict @domid to read-only mappings and accesses. [GST]
+ *  GTF_reading: Grant entry is currently mapped for reading by @domid. [XEN]
+ *  GTF_writing: Grant entry is currently mapped for writing by @domid. [XEN]
+ */
+#define _GTF_readonly       (2)
+#define GTF_readonly        (1U<<_GTF_readonly)
+#define _GTF_reading        (3)
+#define GTF_reading         (1U<<_GTF_reading)
+#define _GTF_writing        (4)
+#define GTF_writing         (1U<<_GTF_writing)
+
+/*
+ * Subflags for GTF_accept_transfer:
+ *  GTF_transfer_committed: Xen sets this flag to indicate that it is committed
+ *      to transferring ownership of a page frame. When a guest sees this flag
+ *      it must /not/ modify the grant entry until GTF_transfer_completed is
+ *      set by Xen.
+ *  GTF_transfer_completed: It is safe for the guest to spin-wait on this flag
+ *      after reading GTF_transfer_committed. Xen will always write the frame
+ *      address, followed by ORing this flag, in a timely manner.
+ */
+#define _GTF_transfer_committed (2)
+#define GTF_transfer_committed  (1U<<_GTF_transfer_committed)
+#define _GTF_transfer_completed (3)
+#define GTF_transfer_completed  (1U<<_GTF_transfer_completed)
+
+
+/***********************************
+ * GRANT TABLE QUERIES AND USES
+ */
+
+/*
+ * Reference to a grant entry in a specified domain's grant table.
+ */
+typedef uint32_t grant_ref_t;
+
+/*
+ * Handle to track a mapping created via a grant reference.
+ */
+typedef uint32_t grant_handle_t;
+
+/*
+ * GNTTABOP_map_grant_ref: Map the grant entry (<dom>,<ref>) for access
+ * by devices and/or host CPUs. If successful, <handle> is a tracking number
+ * that must be presented later to destroy the mapping(s). On error, <handle>
+ * is a negative status code.
+ * NOTES:
+ *  1. If GNTMAP_device_map is specified then <dev_bus_addr> is the address
+ *     via which I/O devices may access the granted frame.
+ *  2. If GNTMAP_host_map is specified then a mapping will be added at
+ *     either a host virtual address in the current address space, or at
+ *     a PTE at the specified machine address.  The type of mapping to
+ *     perform is selected through the GNTMAP_contains_pte flag, and the
+ *     address is specified in <host_addr>.
+ *  3. Mappings should only be destroyed via GNTTABOP_unmap_grant_ref. If a
+ *     host mapping is destroyed by other means then it is *NOT* guaranteed
+ *     to be accounted to the correct grant reference!
+ */
+#define GNTTABOP_map_grant_ref        0
+struct gnttab_map_grant_ref {
+    /* IN parameters. */
+    uint64_t host_addr;
+    uint32_t flags;               /* GNTMAP_* */
+    grant_ref_t ref;
+    domid_t  dom;
+    /* OUT parameters. */
+    int16_t  status;              /* GNTST_* */
+    grant_handle_t handle;
+    uint64_t dev_bus_addr;
+};
+
+/*
+ * GNTTABOP_unmap_grant_ref: Destroy one or more grant-reference mappings
+ * tracked by <handle>. If <host_addr> or <dev_bus_addr> is zero, that
+ * field is ignored. If non-zero, they must refer to a device/host mapping
+ * that is tracked by <handle>
+ * NOTES:
+ *  1. The call may fail in an undefined manner if either mapping is not
+ *     tracked by <handle>.
+ *  3. After executing a batch of unmaps, it is guaranteed that no stale
+ *     mappings will remain in the device or host TLBs.
+ */
+#define GNTTABOP_unmap_grant_ref      1
+struct gnttab_unmap_grant_ref {
+    /* IN parameters. */
+    uint64_t host_addr;
+    uint64_t dev_bus_addr;
+    grant_handle_t handle;
+    /* OUT parameters. */
+    int16_t  status;              /* GNTST_* */
+};
+
+/*
+ * GNTTABOP_setup_table: Set up a grant table for <dom> comprising at least
+ * <nr_frames> pages. The frame addresses are written to the <frame_list>.
+ * Only <nr_frames> addresses are written, even if the table is larger.
+ * NOTES:
+ *  1. <dom> may be specified as DOMID_SELF.
+ *  2. Only a sufficiently-privileged domain may specify <dom> != DOMID_SELF.
+ *  3. Xen may not support more than a single grant-table page per domain.
+ */
+#define GNTTABOP_setup_table          2
+struct gnttab_setup_table {
+    /* IN parameters. */
+    domid_t  dom;
+    uint32_t nr_frames;
+    /* OUT parameters. */
+    int16_t  status;              /* GNTST_* */
+    ulong *frame_list;
+};
+
+/*
+ * GNTTABOP_dump_table: Dump the contents of the grant table to the
+ * xen console. Debugging use only.
+ */
+#define GNTTABOP_dump_table           3
+struct gnttab_dump_table {
+    /* IN parameters. */
+    domid_t dom;
+    /* OUT parameters. */
+    int16_t status;               /* GNTST_* */
+};
+
+/*
+ * GNTTABOP_transfer_grant_ref: Transfer <frame> to a foreign domain. The
+ * foreign domain has previously registered its interest in the transfer via
+ * <domid, ref>.
+ *
+ * Note that, even if the transfer fails, the specified page no longer belongs
+ * to the calling domain *unless* the error is GNTST_bad_page.
+ */
+#define GNTTABOP_transfer                4
+struct gnttab_transfer {
+    /* IN parameters. */
+    unsigned long mfn;
+    domid_t       domid;
+    grant_ref_t   ref;
+    /* OUT parameters. */
+    int16_t       status;
+};
+
+
+/*
+ * GNTTABOP_copy: Hypervisor based copy
+ * source and destinations can be eithers MFNs or, for foreign domains,
+ * grant references. the foreign domain has to grant read/write access
+ * in its grant table.
+ *
+ * The flags specify what type source and destinations are (either MFN
+ * or grant reference).
+ *
+ * Note that this can also be used to copy data between two domains
+ * via a third party if the source and destination domains had previously
+ * grant appropriate access to their pages to the third party.
+ *
+ * source_offset specifies an offset in the source frame, dest_offset
+ * the offset in the target frame and  len specifies the number of
+ * bytes to be copied.
+ */
+
+#define _GNTCOPY_source_gref      (0)
+#define GNTCOPY_source_gref       (1<<_GNTCOPY_source_gref)
+#define _GNTCOPY_dest_gref        (1)
+#define GNTCOPY_dest_gref         (1<<_GNTCOPY_dest_gref)
+
+#define GNTTABOP_copy                 5
+struct gnttab_copy {
+	/* IN parameters. */
+	struct {
+		union {
+			grant_ref_t ref;
+			unsigned long   gmfn;
+		} u;
+		domid_t  domid;
+		uint16_t offset;
+	} source, dest;
+	uint16_t      len;
+	uint16_t      flags;          /* GNTCOPY_* */
+	/* OUT parameters. */
+	int16_t       status;
+};
+
+/*
+ * GNTTABOP_query_size: Query the current and maximum sizes of the shared
+ * grant table.
+ * NOTES:
+ *  1. <dom> may be specified as DOMID_SELF.
+ *  2. Only a sufficiently-privileged domain may specify <dom> != DOMID_SELF.
+ */
+#define GNTTABOP_query_size           6
+struct gnttab_query_size {
+    /* IN parameters. */
+    domid_t  dom;
+    /* OUT parameters. */
+    uint32_t nr_frames;
+    uint32_t max_nr_frames;
+    int16_t  status;              /* GNTST_* */
+};
+
+
+/*
+ * Bitfield values for update_pin_status.flags.
+ */
+ /* Map the grant entry for access by I/O devices. */
+#define _GNTMAP_device_map      (0)
+#define GNTMAP_device_map       (1<<_GNTMAP_device_map)
+ /* Map the grant entry for access by host CPUs. */
+#define _GNTMAP_host_map        (1)
+#define GNTMAP_host_map         (1<<_GNTMAP_host_map)
+ /* Accesses to the granted frame will be restricted to read-only access. */
+#define _GNTMAP_readonly        (2)
+#define GNTMAP_readonly         (1<<_GNTMAP_readonly)
+ /*
+  * GNTMAP_host_map subflag:
+  *  0 => The host mapping is usable only by the guest OS.
+  *  1 => The host mapping is usable by guest OS + current application.
+  */
+#define _GNTMAP_application_map (3)
+#define GNTMAP_application_map  (1<<_GNTMAP_application_map)
+
+ /*
+  * GNTMAP_contains_pte subflag:
+  *  0 => This map request contains a host virtual address.
+  *  1 => This map request contains the machine addess of the PTE to update.
+  */
+#define _GNTMAP_contains_pte    (4)
+#define GNTMAP_contains_pte     (1<<_GNTMAP_contains_pte)
+
+/*
+ * Values for error status returns. All errors are -ve.
+ */
+#define GNTST_okay             (0)  /* Normal return.                        */
+#define GNTST_general_error    (-1) /* General undefined error.              */
+#define GNTST_bad_domain       (-2) /* Unrecognsed domain id.                */
+#define GNTST_bad_gntref       (-3) /* Unrecognised or inappropriate gntref. */
+#define GNTST_bad_handle       (-4) /* Unrecognised or inappropriate handle. */
+#define GNTST_bad_virt_addr    (-5) /* Inappropriate virtual address to map. */
+#define GNTST_bad_dev_addr     (-6) /* Inappropriate device address to unmap.*/
+#define GNTST_no_device_space  (-7) /* Out of space in I/O MMU.              */
+#define GNTST_permission_denied (-8) /* Not enough privilege for operation.  */
+#define GNTST_bad_page         (-9) /* Specified page was invalid for op.    */
+#define GNTST_bad_copy_arg    (-10) /* copy arguments cross page boundary */
+
+#define GNTTABOP_error_msgs {                   \
+    "okay",                                     \
+    "undefined error",                          \
+    "unrecognised domain id",                   \
+    "invalid grant reference",                  \
+    "invalid mapping handle",                   \
+    "invalid virtual address",                  \
+    "invalid device address",                   \
+    "no spare translation slot in the I/O MMU", \
+    "permission denied",                        \
+    "bad page",                                 \
+    "copy arguments cross page boundary"        \
+}
+
+#endif /* __XEN_PUBLIC_GRANT_TABLE_H__ */
diff --git a/include/xen/interface/io/blkif.h b/include/xen/interface/io/blkif.h
new file mode 100644
index 0000000..c2d1fa4
--- /dev/null
+++ b/include/xen/interface/io/blkif.h
@@ -0,0 +1,94 @@
+/******************************************************************************
+ * blkif.h
+ *
+ * Unified block-device I/O interface for Xen guest OSes.
+ *
+ * Copyright (c) 2003-2004, Keir Fraser
+ */
+
+#ifndef __XEN_PUBLIC_IO_BLKIF_H__
+#define __XEN_PUBLIC_IO_BLKIF_H__
+
+#include "ring.h"
+#include "../grant_table.h"
+
+/*
+ * Front->back notifications: When enqueuing a new request, sending a
+ * notification can be made conditional on req_event (i.e., the generic
+ * hold-off mechanism provided by the ring macros). Backends must set
+ * req_event appropriately (e.g., using RING_FINAL_CHECK_FOR_REQUESTS()).
+ *
+ * Back->front notifications: When enqueuing a new response, sending a
+ * notification can be made conditional on rsp_event (i.e., the generic
+ * hold-off mechanism provided by the ring macros). Frontends must set
+ * rsp_event appropriately (e.g., using RING_FINAL_CHECK_FOR_RESPONSES()).
+ */
+
+typedef uint16_t blkif_vdev_t;
+typedef uint64_t blkif_sector_t;
+
+/*
+ * REQUEST CODES.
+ */
+#define BLKIF_OP_READ              0
+#define BLKIF_OP_WRITE             1
+/*
+ * Recognised only if "feature-barrier" is present in backend xenbus info.
+ * The "feature_barrier" node contains a boolean indicating whether barrier
+ * requests are likely to succeed or fail. Either way, a barrier request
+ * may fail at any time with BLKIF_RSP_EOPNOTSUPP if it is unsupported by
+ * the underlying block-device hardware. The boolean simply indicates whether
+ * or not it is worthwhile for the frontend to attempt barrier requests.
+ * If a backend does not recognise BLKIF_OP_WRITE_BARRIER, it should *not*
+ * create the "feature-barrier" node!
+ */
+#define BLKIF_OP_WRITE_BARRIER     2
+
+/*
+ * Maximum scatter/gather segments per request.
+ * This is carefully chosen so that sizeof(struct blkif_ring) <= PAGE_SIZE.
+ * NB. This could be 12 if the ring indexes weren't stored in the same page.
+ */
+#define BLKIF_MAX_SEGMENTS_PER_REQUEST 11
+
+struct blkif_request {
+	uint8_t        operation;    /* BLKIF_OP_???                         */
+	uint8_t        nr_segments;  /* number of segments                   */
+	blkif_vdev_t   handle;       /* only for read/write requests         */
+	uint64_t       id;           /* private guest value, echoed in resp  */
+	blkif_sector_t sector_number;/* start sector idx on disk (r/w only)  */
+	struct blkif_request_segment {
+		grant_ref_t gref;        /* reference to I/O buffer frame        */
+		/* @first_sect: first sector in frame to transfer (inclusive).   */
+		/* @last_sect: last sector in frame to transfer (inclusive).     */
+		uint8_t     first_sect, last_sect;
+	} seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
+};
+
+struct blkif_response {
+	uint64_t        id;              /* copied from request */
+	uint8_t         operation;       /* copied from request */
+	int16_t         status;          /* BLKIF_RSP_???       */
+};
+
+/*
+ * STATUS RETURN CODES.
+ */
+ /* Operation not supported (only happens on barrier writes). */
+#define BLKIF_RSP_EOPNOTSUPP  -2
+ /* Operation failed for some unspecified reason (-EIO). */
+#define BLKIF_RSP_ERROR       -1
+ /* Operation completed successfully. */
+#define BLKIF_RSP_OKAY         0
+
+/*
+ * Generate blkif ring structures and types.
+ */
+
+DEFINE_RING_TYPES(blkif, struct blkif_request, struct blkif_response);
+
+#define VDISK_CDROM        0x1
+#define VDISK_REMOVABLE    0x2
+#define VDISK_READONLY     0x4
+
+#endif /* __XEN_PUBLIC_IO_BLKIF_H__ */
diff --git a/include/xen/interface/io/console.h b/include/xen/interface/io/console.h
new file mode 100644
index 0000000..e563de7
--- /dev/null
+++ b/include/xen/interface/io/console.h
@@ -0,0 +1,23 @@
+/******************************************************************************
+ * console.h
+ *
+ * Console I/O interface for Xen guest OSes.
+ *
+ * Copyright (c) 2005, Keir Fraser
+ */
+
+#ifndef __XEN_PUBLIC_IO_CONSOLE_H__
+#define __XEN_PUBLIC_IO_CONSOLE_H__
+
+typedef uint32_t XENCONS_RING_IDX;
+
+#define MASK_XENCONS_IDX(idx, ring) ((idx) & (sizeof(ring)-1))
+
+struct xencons_interface {
+    char in[1024];
+    char out[2048];
+    XENCONS_RING_IDX in_cons, in_prod;
+    XENCONS_RING_IDX out_cons, out_prod;
+};
+
+#endif /* __XEN_PUBLIC_IO_CONSOLE_H__ */
diff --git a/include/xen/interface/io/netif.h b/include/xen/interface/io/netif.h
new file mode 100644
index 0000000..518481c
--- /dev/null
+++ b/include/xen/interface/io/netif.h
@@ -0,0 +1,158 @@
+/******************************************************************************
+ * netif.h
+ *
+ * Unified network-device I/O interface for Xen guest OSes.
+ *
+ * Copyright (c) 2003-2004, Keir Fraser
+ */
+
+#ifndef __XEN_PUBLIC_IO_NETIF_H__
+#define __XEN_PUBLIC_IO_NETIF_H__
+
+#include "ring.h"
+#include "../grant_table.h"
+
+/*
+ * Notifications after enqueuing any type of message should be conditional on
+ * the appropriate req_event or rsp_event field in the shared ring.
+ * If the client sends notification for rx requests then it should specify
+ * feature 'feature-rx-notify' via xenbus. Otherwise the backend will assume
+ * that it cannot safely queue packets (as it may not be kicked to send them).
+ */
+
+/*
+ * This is the 'wire' format for packets:
+ *  Request 1: netif_tx_request -- NETTXF_* (any flags)
+ * [Request 2: netif_tx_extra]  (only if request 1 has NETTXF_extra_info)
+ * [Request 3: netif_tx_extra]  (only if request 2 has XEN_NETIF_EXTRA_MORE)
+ *  Request 4: netif_tx_request -- NETTXF_more_data
+ *  Request 5: netif_tx_request -- NETTXF_more_data
+ *  ...
+ *  Request N: netif_tx_request -- 0
+ */
+
+/* Protocol checksum field is blank in the packet (hardware offload)? */
+#define _NETTXF_csum_blank     (0)
+#define  NETTXF_csum_blank     (1U<<_NETTXF_csum_blank)
+
+/* Packet data has been validated against protocol checksum. */
+#define _NETTXF_data_validated (1)
+#define  NETTXF_data_validated (1U<<_NETTXF_data_validated)
+
+/* Packet continues in the next request descriptor. */
+#define _NETTXF_more_data      (2)
+#define  NETTXF_more_data      (1U<<_NETTXF_more_data)
+
+/* Packet to be followed by extra descriptor(s). */
+#define _NETTXF_extra_info     (3)
+#define  NETTXF_extra_info     (1U<<_NETTXF_extra_info)
+
+struct xen_netif_tx_request {
+    grant_ref_t gref;      /* Reference to buffer page */
+    uint16_t offset;       /* Offset within buffer page */
+    uint16_t flags;        /* NETTXF_* */
+    uint16_t id;           /* Echoed in response message. */
+    uint16_t size;         /* Packet size in bytes.       */
+};
+
+/* Types of netif_extra_info descriptors. */
+#define XEN_NETIF_EXTRA_TYPE_NONE  (0)  /* Never used - invalid */
+#define XEN_NETIF_EXTRA_TYPE_GSO   (1)  /* u.gso */
+#define XEN_NETIF_EXTRA_TYPE_MAX   (2)
+
+/* netif_extra_info flags. */
+#define _XEN_NETIF_EXTRA_FLAG_MORE (0)
+#define XEN_NETIF_EXTRA_FLAG_MORE  (1U<<_XEN_NETIF_EXTRA_FLAG_MORE)
+
+/* GSO types - only TCPv4 currently supported. */
+#define XEN_NETIF_GSO_TYPE_TCPV4        (1)
+
+/*
+ * This structure needs to fit within both netif_tx_request and
+ * netif_rx_response for compatibility.
+ */
+struct xen_netif_extra_info {
+	uint8_t type;  /* XEN_NETIF_EXTRA_TYPE_* */
+	uint8_t flags; /* XEN_NETIF_EXTRA_FLAG_* */
+
+	union {
+		struct {
+			/*
+			 * Maximum payload size of each segment. For
+			 * example, for TCP this is just the path MSS.
+			 */
+			uint16_t size;
+
+			/*
+			 * GSO type. This determines the protocol of
+			 * the packet and any extra features required
+			 * to segment the packet properly.
+			 */
+			uint8_t type; /* XEN_NETIF_GSO_TYPE_* */
+
+			/* Future expansion. */
+			uint8_t pad;
+
+			/*
+			 * GSO features. This specifies any extra GSO
+			 * features required to process this packet,
+			 * such as ECN support for TCPv4.
+			 */
+			uint16_t features; /* XEN_NETIF_GSO_FEAT_* */
+		} gso;
+
+		uint16_t pad[3];
+	} u;
+};
+
+struct xen_netif_tx_response {
+	uint16_t id;
+	int16_t  status;       /* NETIF_RSP_* */
+};
+
+struct xen_netif_rx_request {
+	uint16_t    id;        /* Echoed in response message.        */
+	grant_ref_t gref;      /* Reference to incoming granted frame */
+};
+
+/* Packet data has been validated against protocol checksum. */
+#define _NETRXF_data_validated (0)
+#define  NETRXF_data_validated (1U<<_NETRXF_data_validated)
+
+/* Protocol checksum field is blank in the packet (hardware offload)? */
+#define _NETRXF_csum_blank     (1)
+#define  NETRXF_csum_blank     (1U<<_NETRXF_csum_blank)
+
+/* Packet continues in the next request descriptor. */
+#define _NETRXF_more_data      (2)
+#define  NETRXF_more_data      (1U<<_NETRXF_more_data)
+
+/* Packet to be followed by extra descriptor(s). */
+#define _NETRXF_extra_info     (3)
+#define  NETRXF_extra_info     (1U<<_NETRXF_extra_info)
+
+struct xen_netif_rx_response {
+    uint16_t id;
+    uint16_t offset;       /* Offset in page of start of received packet  */
+    uint16_t flags;        /* NETRXF_* */
+    int16_t  status;       /* -ve: BLKIF_RSP_* ; +ve: Rx'ed pkt size. */
+};
+
+/*
+ * Generate netif ring structures and types.
+ */
+
+DEFINE_RING_TYPES(xen_netif_tx,
+		  struct xen_netif_tx_request,
+		  struct xen_netif_tx_response);
+DEFINE_RING_TYPES(xen_netif_rx,
+		  struct xen_netif_rx_request,
+		  struct xen_netif_rx_response);
+
+#define NETIF_RSP_DROPPED         -2
+#define NETIF_RSP_ERROR           -1
+#define NETIF_RSP_OKAY             0
+/* No response: used for auxiliary requests (e.g., netif_tx_extra). */
+#define NETIF_RSP_NULL             1
+
+#endif
diff --git a/include/xen/interface/io/ring.h b/include/xen/interface/io/ring.h
new file mode 100644
index 0000000..e8cbf43
--- /dev/null
+++ b/include/xen/interface/io/ring.h
@@ -0,0 +1,260 @@
+/******************************************************************************
+ * ring.h
+ *
+ * Shared producer-consumer ring macros.
+ *
+ * Tim Deegan and Andrew Warfield November 2004.
+ */
+
+#ifndef __XEN_PUBLIC_IO_RING_H__
+#define __XEN_PUBLIC_IO_RING_H__
+
+typedef unsigned int RING_IDX;
+
+/* Round a 32-bit unsigned constant down to the nearest power of two. */
+#define __RD2(_x)  (((_x) & 0x00000002) ? 0x2		       : ((_x) & 0x1))
+#define __RD4(_x)  (((_x) & 0x0000000c) ? __RD2((_x)>>2)<<2    : __RD2(_x))
+#define __RD8(_x)  (((_x) & 0x000000f0) ? __RD4((_x)>>4)<<4    : __RD4(_x))
+#define __RD16(_x) (((_x) & 0x0000ff00) ? __RD8((_x)>>8)<<8    : __RD8(_x))
+#define __RD32(_x) (((_x) & 0xffff0000) ? __RD16((_x)>>16)<<16 : __RD16(_x))
+
+/*
+ * Calculate size of a shared ring, given the total available space for the
+ * ring and indexes (_sz), and the name tag of the request/response structure.
+ * A ring contains as many entries as will fit, rounded down to the nearest
+ * power of two (so we can mask with (size-1) to loop around).
+ */
+#define __RING_SIZE(_s, _sz) \
+    (__RD32(((_sz) - (long)&(_s)->ring + (long)(_s)) / sizeof((_s)->ring[0])))
+
+/*
+ * Macros to make the correct C datatypes for a new kind of ring.
+ *
+ * To make a new ring datatype, you need to have two message structures,
+ * let's say struct request, and struct response already defined.
+ *
+ * In a header where you want the ring datatype declared, you then do:
+ *
+ *     DEFINE_RING_TYPES(mytag, struct request, struct response);
+ *
+ * These expand out to give you a set of types, as you can see below.
+ * The most important of these are:
+ *
+ *     struct mytag_sring      - The shared ring.
+ *     struct mytag_front_ring - The 'front' half of the ring.
+ *     struct mytag_back_ring  - The 'back' half of the ring.
+ *
+ * To initialize a ring in your code you need to know the location and size
+ * of the shared memory area (PAGE_SIZE, for instance). To initialise
+ * the front half:
+ *
+ *     struct mytag_front_ring front_ring;
+ *     SHARED_RING_INIT((struct mytag_sring *)shared_page);
+ *     FRONT_RING_INIT(&front_ring, (struct mytag_sring *)shared_page,
+ *		       PAGE_SIZE);
+ *
+ * Initializing the back follows similarly (note that only the front
+ * initializes the shared ring):
+ *
+ *     struct mytag_back_ring back_ring;
+ *     BACK_RING_INIT(&back_ring, (struct mytag_sring *)shared_page,
+ *		      PAGE_SIZE);
+ */
+
+#define DEFINE_RING_TYPES(__name, __req_t, __rsp_t)			\
+									\
+/* Shared ring entry */							\
+union __name##_sring_entry {						\
+    __req_t req;							\
+    __rsp_t rsp;							\
+};									\
+									\
+/* Shared ring page */							\
+struct __name##_sring {							\
+    RING_IDX req_prod, req_event;					\
+    RING_IDX rsp_prod, rsp_event;					\
+    uint8_t  pad[48];							\
+    union __name##_sring_entry ring[1]; /* variable-length */		\
+};									\
+									\
+/* "Front" end's private variables */					\
+struct __name##_front_ring {						\
+    RING_IDX req_prod_pvt;						\
+    RING_IDX rsp_cons;							\
+    unsigned int nr_ents;						\
+    struct __name##_sring *sring;					\
+};									\
+									\
+/* "Back" end's private variables */					\
+struct __name##_back_ring {						\
+    RING_IDX rsp_prod_pvt;						\
+    RING_IDX req_cons;							\
+    unsigned int nr_ents;						\
+    struct __name##_sring *sring;					\
+};
+
+/*
+ * Macros for manipulating rings.
+ *
+ * FRONT_RING_whatever works on the "front end" of a ring: here
+ * requests are pushed on to the ring and responses taken off it.
+ *
+ * BACK_RING_whatever works on the "back end" of a ring: here
+ * requests are taken off the ring and responses put on.
+ *
+ * N.B. these macros do NO INTERLOCKS OR FLOW CONTROL.
+ * This is OK in 1-for-1 request-response situations where the
+ * requestor (front end) never has more than RING_SIZE()-1
+ * outstanding requests.
+ */
+
+/* Initialising empty rings */
+#define SHARED_RING_INIT(_s) do {					\
+    (_s)->req_prod  = (_s)->rsp_prod  = 0;				\
+    (_s)->req_event = (_s)->rsp_event = 1;				\
+    memset((_s)->pad, 0, sizeof((_s)->pad));				\
+} while(0)
+
+#define FRONT_RING_INIT(_r, _s, __size) do {				\
+    (_r)->req_prod_pvt = 0;						\
+    (_r)->rsp_cons = 0;							\
+    (_r)->nr_ents = __RING_SIZE(_s, __size);				\
+    (_r)->sring = (_s);							\
+} while (0)
+
+#define BACK_RING_INIT(_r, _s, __size) do {				\
+    (_r)->rsp_prod_pvt = 0;						\
+    (_r)->req_cons = 0;							\
+    (_r)->nr_ents = __RING_SIZE(_s, __size);				\
+    (_r)->sring = (_s);							\
+} while (0)
+
+/* Initialize to existing shared indexes -- for recovery */
+#define FRONT_RING_ATTACH(_r, _s, __size) do {				\
+    (_r)->sring = (_s);							\
+    (_r)->req_prod_pvt = (_s)->req_prod;				\
+    (_r)->rsp_cons = (_s)->rsp_prod;					\
+    (_r)->nr_ents = __RING_SIZE(_s, __size);				\
+} while (0)
+
+#define BACK_RING_ATTACH(_r, _s, __size) do {				\
+    (_r)->sring = (_s);							\
+    (_r)->rsp_prod_pvt = (_s)->rsp_prod;				\
+    (_r)->req_cons = (_s)->req_prod;					\
+    (_r)->nr_ents = __RING_SIZE(_s, __size);				\
+} while (0)
+
+/* How big is this ring? */
+#define RING_SIZE(_r)							\
+    ((_r)->nr_ents)
+
+/* Number of free requests (for use on front side only). */
+#define RING_FREE_REQUESTS(_r)						\
+    (RING_SIZE(_r) - ((_r)->req_prod_pvt - (_r)->rsp_cons))
+
+/* Test if there is an empty slot available on the front ring.
+ * (This is only meaningful from the front. )
+ */
+#define RING_FULL(_r)							\
+    (RING_FREE_REQUESTS(_r) == 0)
+
+/* Test if there are outstanding messages to be processed on a ring. */
+#define RING_HAS_UNCONSUMED_RESPONSES(_r)				\
+    ((_r)->sring->rsp_prod - (_r)->rsp_cons)
+
+#define RING_HAS_UNCONSUMED_REQUESTS(_r)				\
+    ({									\
+	unsigned int req = (_r)->sring->req_prod - (_r)->req_cons;	\
+	unsigned int rsp = RING_SIZE(_r) -				\
+			   ((_r)->req_cons - (_r)->rsp_prod_pvt);	\
+	req < rsp ? req : rsp;						\
+    })
+
+/* Direct access to individual ring elements, by index. */
+#define RING_GET_REQUEST(_r, _idx)					\
+    (&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].req))
+
+#define RING_GET_RESPONSE(_r, _idx)					\
+    (&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].rsp))
+
+/* Loop termination condition: Would the specified index overflow the ring? */
+#define RING_REQUEST_CONS_OVERFLOW(_r, _cons)				\
+    (((_cons) - (_r)->rsp_prod_pvt) >= RING_SIZE(_r))
+
+#define RING_PUSH_REQUESTS(_r) do {					\
+    wmb(); /* back sees requests /before/ updated producer index */	\
+    (_r)->sring->req_prod = (_r)->req_prod_pvt;				\
+} while (0)
+
+#define RING_PUSH_RESPONSES(_r) do {					\
+    wmb(); /* front sees responses /before/ updated producer index */	\
+    (_r)->sring->rsp_prod = (_r)->rsp_prod_pvt;				\
+} while (0)
+
+/*
+ * Notification hold-off (req_event and rsp_event):
+ *
+ * When queueing requests or responses on a shared ring, it may not always be
+ * necessary to notify the remote end. For example, if requests are in flight
+ * in a backend, the front may be able to queue further requests without
+ * notifying the back (if the back checks for new requests when it queues
+ * responses).
+ *
+ * When enqueuing requests or responses:
+ *
+ *  Use RING_PUSH_{REQUESTS,RESPONSES}_AND_CHECK_NOTIFY(). The second argument
+ *  is a boolean return value. True indicates that the receiver requires an
+ *  asynchronous notification.
+ *
+ * After dequeuing requests or responses (before sleeping the connection):
+ *
+ *  Use RING_FINAL_CHECK_FOR_REQUESTS() or RING_FINAL_CHECK_FOR_RESPONSES().
+ *  The second argument is a boolean return value. True indicates that there
+ *  are pending messages on the ring (i.e., the connection should not be put
+ *  to sleep).
+ *
+ *  These macros will set the req_event/rsp_event field to trigger a
+ *  notification on the very next message that is enqueued. If you want to
+ *  create batches of work (i.e., only receive a notification after several
+ *  messages have been enqueued) then you will need to create a customised
+ *  version of the FINAL_CHECK macro in your own code, which sets the event
+ *  field appropriately.
+ */
+
+#define RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(_r, _notify) do {		\
+    RING_IDX __old = (_r)->sring->req_prod;				\
+    RING_IDX __new = (_r)->req_prod_pvt;				\
+    wmb(); /* back sees requests /before/ updated producer index */	\
+    (_r)->sring->req_prod = __new;					\
+    mb(); /* back sees new requests /before/ we check req_event */	\
+    (_notify) = ((RING_IDX)(__new - (_r)->sring->req_event) <		\
+		 (RING_IDX)(__new - __old));				\
+} while (0)
+
+#define RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(_r, _notify) do {		\
+    RING_IDX __old = (_r)->sring->rsp_prod;				\
+    RING_IDX __new = (_r)->rsp_prod_pvt;				\
+    wmb(); /* front sees responses /before/ updated producer index */	\
+    (_r)->sring->rsp_prod = __new;					\
+    mb(); /* front sees new responses /before/ we check rsp_event */	\
+    (_notify) = ((RING_IDX)(__new - (_r)->sring->rsp_event) <		\
+		 (RING_IDX)(__new - __old));				\
+} while (0)
+
+#define RING_FINAL_CHECK_FOR_REQUESTS(_r, _work_to_do) do {		\
+    (_work_to_do) = RING_HAS_UNCONSUMED_REQUESTS(_r);			\
+    if (_work_to_do) break;						\
+    (_r)->sring->req_event = (_r)->req_cons + 1;			\
+    mb();								\
+    (_work_to_do) = RING_HAS_UNCONSUMED_REQUESTS(_r);			\
+} while (0)
+
+#define RING_FINAL_CHECK_FOR_RESPONSES(_r, _work_to_do) do {		\
+    (_work_to_do) = RING_HAS_UNCONSUMED_RESPONSES(_r);			\
+    if (_work_to_do) break;						\
+    (_r)->sring->rsp_event = (_r)->rsp_cons + 1;			\
+    mb();								\
+    (_work_to_do) = RING_HAS_UNCONSUMED_RESPONSES(_r);			\
+} while (0)
+
+#endif /* __XEN_PUBLIC_IO_RING_H__ */
diff --git a/include/xen/interface/io/xenbus.h b/include/xen/interface/io/xenbus.h
new file mode 100644
index 0000000..46508c7
--- /dev/null
+++ b/include/xen/interface/io/xenbus.h
@@ -0,0 +1,44 @@
+/*****************************************************************************
+ * xenbus.h
+ *
+ * Xenbus protocol details.
+ *
+ * Copyright (C) 2005 XenSource Ltd.
+ */
+
+#ifndef _XEN_PUBLIC_IO_XENBUS_H
+#define _XEN_PUBLIC_IO_XENBUS_H
+
+/* The state of either end of the Xenbus, i.e. the current communication
+   status of initialisation across the bus.  States here imply nothing about
+   the state of the connection between the driver and the kernel's device
+   layers.  */
+enum xenbus_state
+{
+	XenbusStateUnknown      = 0,
+	XenbusStateInitialising = 1,
+	XenbusStateInitWait     = 2,  /* Finished early
+					 initialisation, but waiting
+					 for information from the peer
+					 or hotplug scripts. */
+	XenbusStateInitialised  = 3,  /* Initialised and waiting for a
+					 connection from the peer. */
+	XenbusStateConnected    = 4,
+	XenbusStateClosing      = 5,  /* The device is being closed
+					 due to an error or an unplug
+					 event. */
+	XenbusStateClosed       = 6
+
+};
+
+#endif /* _XEN_PUBLIC_IO_XENBUS_H */
+
+/*
+ * Local variables:
+ *  c-file-style: "linux"
+ *  indent-tabs-mode: t
+ *  c-indent-level: 8
+ *  c-basic-offset: 8
+ *  tab-width: 8
+ * End:
+ */
diff --git a/include/xen/interface/io/xs_wire.h b/include/xen/interface/io/xs_wire.h
new file mode 100644
index 0000000..99fcffb
--- /dev/null
+++ b/include/xen/interface/io/xs_wire.h
@@ -0,0 +1,87 @@
+/*
+ * Details of the "wire" protocol between Xen Store Daemon and client
+ * library or guest kernel.
+ * Copyright (C) 2005 Rusty Russell IBM Corporation
+ */
+
+#ifndef _XS_WIRE_H
+#define _XS_WIRE_H
+
+enum xsd_sockmsg_type
+{
+    XS_DEBUG,
+    XS_DIRECTORY,
+    XS_READ,
+    XS_GET_PERMS,
+    XS_WATCH,
+    XS_UNWATCH,
+    XS_TRANSACTION_START,
+    XS_TRANSACTION_END,
+    XS_INTRODUCE,
+    XS_RELEASE,
+    XS_GET_DOMAIN_PATH,
+    XS_WRITE,
+    XS_MKDIR,
+    XS_RM,
+    XS_SET_PERMS,
+    XS_WATCH_EVENT,
+    XS_ERROR,
+    XS_IS_DOMAIN_INTRODUCED
+};
+
+#define XS_WRITE_NONE "NONE"
+#define XS_WRITE_CREATE "CREATE"
+#define XS_WRITE_CREATE_EXCL "CREATE|EXCL"
+
+/* We hand errors as strings, for portability. */
+struct xsd_errors
+{
+    int errnum;
+    const char *errstring;
+};
+#define XSD_ERROR(x) { x, #x }
+static struct xsd_errors xsd_errors[] __attribute__((unused)) = {
+    XSD_ERROR(EINVAL),
+    XSD_ERROR(EACCES),
+    XSD_ERROR(EEXIST),
+    XSD_ERROR(EISDIR),
+    XSD_ERROR(ENOENT),
+    XSD_ERROR(ENOMEM),
+    XSD_ERROR(ENOSPC),
+    XSD_ERROR(EIO),
+    XSD_ERROR(ENOTEMPTY),
+    XSD_ERROR(ENOSYS),
+    XSD_ERROR(EROFS),
+    XSD_ERROR(EBUSY),
+    XSD_ERROR(EAGAIN),
+    XSD_ERROR(EISCONN)
+};
+
+struct xsd_sockmsg
+{
+    uint32_t type;  /* XS_??? */
+    uint32_t req_id;/* Request identifier, echoed in daemon's response.  */
+    uint32_t tx_id; /* Transaction id (0 if not related to a transaction). */
+    uint32_t len;   /* Length of data following this. */
+
+    /* Generally followed by nul-terminated string(s). */
+};
+
+enum xs_watch_type
+{
+    XS_WATCH_PATH = 0,
+    XS_WATCH_TOKEN
+};
+
+/* Inter-domain shared memory communications. */
+#define XENSTORE_RING_SIZE 1024
+typedef uint32_t XENSTORE_RING_IDX;
+#define MASK_XENSTORE_IDX(idx) ((idx) & (XENSTORE_RING_SIZE-1))
+struct xenstore_domain_interface {
+    char req[XENSTORE_RING_SIZE]; /* Requests to xenstore daemon. */
+    char rsp[XENSTORE_RING_SIZE]; /* Replies and async watch events. */
+    XENSTORE_RING_IDX req_cons, req_prod;
+    XENSTORE_RING_IDX rsp_cons, rsp_prod;
+};
+
+#endif /* _XS_WIRE_H */
diff --git a/include/xen/interface/memory.h b/include/xen/interface/memory.h
new file mode 100644
index 0000000..af36ead
--- /dev/null
+++ b/include/xen/interface/memory.h
@@ -0,0 +1,145 @@
+/******************************************************************************
+ * memory.h
+ *
+ * Memory reservation and information.
+ *
+ * Copyright (c) 2005, Keir Fraser <keir@xensource.com>
+ */
+
+#ifndef __XEN_PUBLIC_MEMORY_H__
+#define __XEN_PUBLIC_MEMORY_H__
+
+/*
+ * Increase or decrease the specified domain's memory reservation. Returns a
+ * -ve errcode on failure, or the # extents successfully allocated or freed.
+ * arg == addr of struct xen_memory_reservation.
+ */
+#define XENMEM_increase_reservation 0
+#define XENMEM_decrease_reservation 1
+#define XENMEM_populate_physmap     6
+struct xen_memory_reservation {
+
+    /*
+     * XENMEM_increase_reservation:
+     *   OUT: MFN (*not* GMFN) bases of extents that were allocated
+     * XENMEM_decrease_reservation:
+     *   IN:  GMFN bases of extents to free
+     * XENMEM_populate_physmap:
+     *   IN:  GPFN bases of extents to populate with memory
+     *   OUT: GMFN bases of extents that were allocated
+     *   (NB. This command also updates the mach_to_phys translation table)
+     */
+    GUEST_HANDLE(ulong) extent_start;
+
+    /* Number of extents, and size/alignment of each (2^extent_order pages). */
+    unsigned long  nr_extents;
+    unsigned int   extent_order;
+
+    /*
+     * Maximum # bits addressable by the user of the allocated region (e.g.,
+     * I/O devices often have a 32-bit limitation even in 64-bit systems). If
+     * zero then the user has no addressing restriction.
+     * This field is not used by XENMEM_decrease_reservation.
+     */
+    unsigned int   address_bits;
+
+    /*
+     * Domain whose reservation is being changed.
+     * Unprivileged domains can specify only DOMID_SELF.
+     */
+    domid_t        domid;
+
+};
+DEFINE_GUEST_HANDLE_STRUCT(xen_memory_reservation);
+
+/*
+ * Returns the maximum machine frame number of mapped RAM in this system.
+ * This command always succeeds (it never returns an error code).
+ * arg == NULL.
+ */
+#define XENMEM_maximum_ram_page     2
+
+/*
+ * Returns the current or maximum memory reservation, in pages, of the
+ * specified domain (may be DOMID_SELF). Returns -ve errcode on failure.
+ * arg == addr of domid_t.
+ */
+#define XENMEM_current_reservation  3
+#define XENMEM_maximum_reservation  4
+
+/*
+ * Returns a list of MFN bases of 2MB extents comprising the machine_to_phys
+ * mapping table. Architectures which do not have a m2p table do not implement
+ * this command.
+ * arg == addr of xen_machphys_mfn_list_t.
+ */
+#define XENMEM_machphys_mfn_list    5
+struct xen_machphys_mfn_list {
+    /*
+     * Size of the 'extent_start' array. Fewer entries will be filled if the
+     * machphys table is smaller than max_extents * 2MB.
+     */
+    unsigned int max_extents;
+
+    /*
+     * Pointer to buffer to fill with list of extent starts. If there are
+     * any large discontiguities in the machine address space, 2MB gaps in
+     * the machphys table will be represented by an MFN base of zero.
+     */
+    GUEST_HANDLE(ulong) extent_start;
+
+    /*
+     * Number of extents written to the above array. This will be smaller
+     * than 'max_extents' if the machphys table is smaller than max_e * 2MB.
+     */
+    unsigned int nr_extents;
+};
+DEFINE_GUEST_HANDLE_STRUCT(xen_machphys_mfn_list);
+
+/*
+ * Sets the GPFN at which a particular page appears in the specified guest's
+ * pseudophysical address space.
+ * arg == addr of xen_add_to_physmap_t.
+ */
+#define XENMEM_add_to_physmap      7
+struct xen_add_to_physmap {
+    /* Which domain to change the mapping for. */
+    domid_t domid;
+
+    /* Source mapping space. */
+#define XENMAPSPACE_shared_info 0 /* shared info page */
+#define XENMAPSPACE_grant_table 1 /* grant table page */
+    unsigned int space;
+
+    /* Index into source mapping space. */
+    unsigned long idx;
+
+    /* GPFN where the source mapping page should appear. */
+    unsigned long gpfn;
+};
+DEFINE_GUEST_HANDLE_STRUCT(xen_add_to_physmap);
+
+/*
+ * Translates a list of domain-specific GPFNs into MFNs. Returns a -ve error
+ * code on failure. This call only works for auto-translated guests.
+ */
+#define XENMEM_translate_gpfn_list  8
+struct xen_translate_gpfn_list {
+    /* Which domain to translate for? */
+    domid_t domid;
+
+    /* Length of list. */
+    unsigned long nr_gpfns;
+
+    /* List of GPFNs to translate. */
+    GUEST_HANDLE(ulong) gpfn_list;
+
+    /*
+     * Output list to contain MFN translations. May be the same as the input
+     * list (in which case each input GPFN is overwritten with the output MFN).
+     */
+    GUEST_HANDLE(ulong) mfn_list;
+};
+DEFINE_GUEST_HANDLE_STRUCT(xen_translate_gpfn_list);
+
+#endif /* __XEN_PUBLIC_MEMORY_H__ */
diff --git a/include/xen/interface/physdev.h b/include/xen/interface/physdev.h
new file mode 100644
index 0000000..cd69391
--- /dev/null
+++ b/include/xen/interface/physdev.h
@@ -0,0 +1,145 @@
+/*
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef __XEN_PUBLIC_PHYSDEV_H__
+#define __XEN_PUBLIC_PHYSDEV_H__
+
+/*
+ * Prototype for this hypercall is:
+ *  int physdev_op(int cmd, void *args)
+ * @cmd	 == PHYSDEVOP_??? (physdev operation).
+ * @args == Operation-specific extra arguments (NULL if none).
+ */
+
+/*
+ * Notify end-of-interrupt (EOI) for the specified IRQ.
+ * @arg == pointer to physdev_eoi structure.
+ */
+#define PHYSDEVOP_eoi			12
+struct physdev_eoi {
+	/* IN */
+	uint32_t irq;
+};
+
+/*
+ * Query the status of an IRQ line.
+ * @arg == pointer to physdev_irq_status_query structure.
+ */
+#define PHYSDEVOP_irq_status_query	 5
+struct physdev_irq_status_query {
+	/* IN */
+	uint32_t irq;
+	/* OUT */
+	uint32_t flags; /* XENIRQSTAT_* */
+};
+
+/* Need to call PHYSDEVOP_eoi when the IRQ has been serviced? */
+#define _XENIRQSTAT_needs_eoi	(0)
+#define	 XENIRQSTAT_needs_eoi	(1U<<_XENIRQSTAT_needs_eoi)
+
+/* IRQ shared by multiple guests? */
+#define _XENIRQSTAT_shared	(1)
+#define	 XENIRQSTAT_shared	(1U<<_XENIRQSTAT_shared)
+
+/*
+ * Set the current VCPU's I/O privilege level.
+ * @arg == pointer to physdev_set_iopl structure.
+ */
+#define PHYSDEVOP_set_iopl		 6
+struct physdev_set_iopl {
+	/* IN */
+	uint32_t iopl;
+};
+
+/*
+ * Set the current VCPU's I/O-port permissions bitmap.
+ * @arg == pointer to physdev_set_iobitmap structure.
+ */
+#define PHYSDEVOP_set_iobitmap		 7
+struct physdev_set_iobitmap {
+	/* IN */
+	uint8_t * bitmap;
+	uint32_t nr_ports;
+};
+
+/*
+ * Read or write an IO-APIC register.
+ * @arg == pointer to physdev_apic structure.
+ */
+#define PHYSDEVOP_apic_read		 8
+#define PHYSDEVOP_apic_write		 9
+struct physdev_apic {
+	/* IN */
+	unsigned long apic_physbase;
+	uint32_t reg;
+	/* IN or OUT */
+	uint32_t value;
+};
+
+/*
+ * Allocate or free a physical upcall vector for the specified IRQ line.
+ * @arg == pointer to physdev_irq structure.
+ */
+#define PHYSDEVOP_alloc_irq_vector	10
+#define PHYSDEVOP_free_irq_vector	11
+struct physdev_irq {
+	/* IN */
+	uint32_t irq;
+	/* IN or OUT */
+	uint32_t vector;
+};
+
+/*
+ * Argument to physdev_op_compat() hypercall. Superceded by new physdev_op()
+ * hypercall since 0x00030202.
+ */
+struct physdev_op {
+	uint32_t cmd;
+	union {
+		struct physdev_irq_status_query	     irq_status_query;
+		struct physdev_set_iopl		     set_iopl;
+		struct physdev_set_iobitmap	     set_iobitmap;
+		struct physdev_apic		     apic_op;
+		struct physdev_irq		     irq_op;
+	} u;
+};
+
+/*
+ * Notify that some PIRQ-bound event channels have been unmasked.
+ * ** This command is obsolete since interface version 0x00030202 and is **
+ * ** unsupported by newer versions of Xen.				 **
+ */
+#define PHYSDEVOP_IRQ_UNMASK_NOTIFY	 4
+
+/*
+ * These all-capitals physdev operation names are superceded by the new names
+ * (defined above) since interface version 0x00030202.
+ */
+#define PHYSDEVOP_IRQ_STATUS_QUERY	 PHYSDEVOP_irq_status_query
+#define PHYSDEVOP_SET_IOPL		 PHYSDEVOP_set_iopl
+#define PHYSDEVOP_SET_IOBITMAP		 PHYSDEVOP_set_iobitmap
+#define PHYSDEVOP_APIC_READ		 PHYSDEVOP_apic_read
+#define PHYSDEVOP_APIC_WRITE		 PHYSDEVOP_apic_write
+#define PHYSDEVOP_ASSIGN_VECTOR		 PHYSDEVOP_alloc_irq_vector
+#define PHYSDEVOP_FREE_VECTOR		 PHYSDEVOP_free_irq_vector
+#define PHYSDEVOP_IRQ_NEEDS_UNMASK_NOTIFY XENIRQSTAT_needs_eoi
+#define PHYSDEVOP_IRQ_SHARED		 XENIRQSTAT_shared
+
+#endif /* __XEN_PUBLIC_PHYSDEV_H__ */
diff --git a/include/xen/interface/sched.h b/include/xen/interface/sched.h
new file mode 100644
index 0000000..5fec575
--- /dev/null
+++ b/include/xen/interface/sched.h
@@ -0,0 +1,77 @@
+/******************************************************************************
+ * sched.h
+ *
+ * Scheduler state interactions
+ *
+ * Copyright (c) 2005, Keir Fraser <keir@xensource.com>
+ */
+
+#ifndef __XEN_PUBLIC_SCHED_H__
+#define __XEN_PUBLIC_SCHED_H__
+
+#include "event_channel.h"
+
+/*
+ * The prototype for this hypercall is:
+ *  long sched_op_new(int cmd, void *arg)
+ * @cmd == SCHEDOP_??? (scheduler operation).
+ * @arg == Operation-specific extra argument(s), as described below.
+ *
+ * **NOTE**:
+ * Versions of Xen prior to 3.0.2 provide only the following legacy version
+ * of this hypercall, supporting only the commands yield, block and shutdown:
+ *  long sched_op(int cmd, unsigned long arg)
+ * @cmd == SCHEDOP_??? (scheduler operation).
+ * @arg == 0               (SCHEDOP_yield and SCHEDOP_block)
+ *      == SHUTDOWN_* code (SCHEDOP_shutdown)
+ */
+
+/*
+ * Voluntarily yield the CPU.
+ * @arg == NULL.
+ */
+#define SCHEDOP_yield       0
+
+/*
+ * Block execution of this VCPU until an event is received for processing.
+ * If called with event upcalls masked, this operation will atomically
+ * reenable event delivery and check for pending events before blocking the
+ * VCPU. This avoids a "wakeup waiting" race.
+ * @arg == NULL.
+ */
+#define SCHEDOP_block       1
+
+/*
+ * Halt execution of this domain (all VCPUs) and notify the system controller.
+ * @arg == pointer to sched_shutdown structure.
+ */
+#define SCHEDOP_shutdown    2
+struct sched_shutdown {
+    unsigned int reason; /* SHUTDOWN_* */
+};
+DEFINE_GUEST_HANDLE_STRUCT(sched_shutdown);
+
+/*
+ * Poll a set of event-channel ports. Return when one or more are pending. An
+ * optional timeout may be specified.
+ * @arg == pointer to sched_poll structure.
+ */
+#define SCHEDOP_poll        3
+struct sched_poll {
+    GUEST_HANDLE(evtchn_port_t) ports;
+    unsigned int nr_ports;
+    uint64_t timeout;
+};
+DEFINE_GUEST_HANDLE_STRUCT(sched_poll);
+
+/*
+ * Reason codes for SCHEDOP_shutdown. These may be interpreted by control
+ * software to determine the appropriate action. For the most part, Xen does
+ * not care about the shutdown code.
+ */
+#define SHUTDOWN_poweroff   0  /* Domain exited normally. Clean up and kill. */
+#define SHUTDOWN_reboot     1  /* Clean up, kill, and then restart.          */
+#define SHUTDOWN_suspend    2  /* Clean up, save suspend info, kill.         */
+#define SHUTDOWN_crash      3  /* Tell controller we've crashed.             */
+
+#endif /* __XEN_PUBLIC_SCHED_H__ */
diff --git a/include/xen/interface/vcpu.h b/include/xen/interface/vcpu.h
new file mode 100644
index 0000000..ff61ea3
--- /dev/null
+++ b/include/xen/interface/vcpu.h
@@ -0,0 +1,167 @@
+/******************************************************************************
+ * vcpu.h
+ *
+ * VCPU initialisation, query, and hotplug.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Copyright (c) 2005, Keir Fraser <keir@xensource.com>
+ */
+
+#ifndef __XEN_PUBLIC_VCPU_H__
+#define __XEN_PUBLIC_VCPU_H__
+
+/*
+ * Prototype for this hypercall is:
+ *	int vcpu_op(int cmd, int vcpuid, void *extra_args)
+ * @cmd		   == VCPUOP_??? (VCPU operation).
+ * @vcpuid	   == VCPU to operate on.
+ * @extra_args == Operation-specific extra arguments (NULL if none).
+ */
+
+/*
+ * Initialise a VCPU. Each VCPU can be initialised only once. A
+ * newly-initialised VCPU will not run until it is brought up by VCPUOP_up.
+ *
+ * @extra_arg == pointer to vcpu_guest_context structure containing initial
+ *				 state for the VCPU.
+ */
+#define VCPUOP_initialise			 0
+
+/*
+ * Bring up a VCPU. This makes the VCPU runnable. This operation will fail
+ * if the VCPU has not been initialised (VCPUOP_initialise).
+ */
+#define VCPUOP_up					 1
+
+/*
+ * Bring down a VCPU (i.e., make it non-runnable).
+ * There are a few caveats that callers should observe:
+ *	1. This operation may return, and VCPU_is_up may return false, before the
+ *	   VCPU stops running (i.e., the command is asynchronous). It is a good
+ *	   idea to ensure that the VCPU has entered a non-critical loop before
+ *	   bringing it down. Alternatively, this operation is guaranteed
+ *	   synchronous if invoked by the VCPU itself.
+ *	2. After a VCPU is initialised, there is currently no way to drop all its
+ *	   references to domain memory. Even a VCPU that is down still holds
+ *	   memory references via its pagetable base pointer and GDT. It is good
+ *	   practise to move a VCPU onto an 'idle' or default page table, LDT and
+ *	   GDT before bringing it down.
+ */
+#define VCPUOP_down					 2
+
+/* Returns 1 if the given VCPU is up. */
+#define VCPUOP_is_up				 3
+
+/*
+ * Return information about the state and running time of a VCPU.
+ * @extra_arg == pointer to vcpu_runstate_info structure.
+ */
+#define VCPUOP_get_runstate_info	 4
+struct vcpu_runstate_info {
+		/* VCPU's current state (RUNSTATE_*). */
+		int		 state;
+		/* When was current state entered (system time, ns)? */
+		uint64_t state_entry_time;
+		/*
+		 * Time spent in each RUNSTATE_* (ns). The sum of these times is
+		 * guaranteed not to drift from system time.
+		 */
+		uint64_t time[4];
+};
+
+/* VCPU is currently running on a physical CPU. */
+#define RUNSTATE_running  0
+
+/* VCPU is runnable, but not currently scheduled on any physical CPU. */
+#define RUNSTATE_runnable 1
+
+/* VCPU is blocked (a.k.a. idle). It is therefore not runnable. */
+#define RUNSTATE_blocked  2
+
+/*
+ * VCPU is not runnable, but it is not blocked.
+ * This is a 'catch all' state for things like hotplug and pauses by the
+ * system administrator (or for critical sections in the hypervisor).
+ * RUNSTATE_blocked dominates this state (it is the preferred state).
+ */
+#define RUNSTATE_offline  3
+
+/*
+ * Register a shared memory area from which the guest may obtain its own
+ * runstate information without needing to execute a hypercall.
+ * Notes:
+ *	1. The registered address may be virtual or physical, depending on the
+ *	   platform. The virtual address should be registered on x86 systems.
+ *	2. Only one shared area may be registered per VCPU. The shared area is
+ *	   updated by the hypervisor each time the VCPU is scheduled. Thus
+ *	   runstate.state will always be RUNSTATE_running and
+ *	   runstate.state_entry_time will indicate the system time at which the
+ *	   VCPU was last scheduled to run.
+ * @extra_arg == pointer to vcpu_register_runstate_memory_area structure.
+ */
+#define VCPUOP_register_runstate_memory_area 5
+struct vcpu_register_runstate_memory_area {
+		union {
+				struct vcpu_runstate_info *v;
+				uint64_t p;
+		} addr;
+};
+
+/*
+ * Set or stop a VCPU's periodic timer. Every VCPU has one periodic timer
+ * which can be set via these commands. Periods smaller than one millisecond
+ * may not be supported.
+ */
+#define VCPUOP_set_periodic_timer	 6 /* arg == vcpu_set_periodic_timer_t */
+#define VCPUOP_stop_periodic_timer	 7 /* arg == NULL */
+struct vcpu_set_periodic_timer {
+		uint64_t period_ns;
+};
+
+/*
+ * Set or stop a VCPU's single-shot timer. Every VCPU has one single-shot
+ * timer which can be set via these commands.
+ */
+#define VCPUOP_set_singleshot_timer	 8 /* arg == vcpu_set_singleshot_timer_t */
+#define VCPUOP_stop_singleshot_timer 9 /* arg == NULL */
+struct vcpu_set_singleshot_timer {
+		uint64_t timeout_abs_ns;
+		uint32_t flags;			   /* VCPU_SSHOTTMR_??? */
+};
+
+/* Flags to VCPUOP_set_singleshot_timer. */
+ /* Require the timeout to be in the future (return -ETIME if it's passed). */
+#define _VCPU_SSHOTTMR_future (0)
+#define VCPU_SSHOTTMR_future  (1U << _VCPU_SSHOTTMR_future)
+
+/*
+ * Register a memory location in the guest address space for the
+ * vcpu_info structure.  This allows the guest to place the vcpu_info
+ * structure in a convenient place, such as in a per-cpu data area.
+ * The pointer need not be page aligned, but the structure must not
+ * cross a page boundary.
+ */
+#define VCPUOP_register_vcpu_info   10  /* arg == struct vcpu_info */
+struct vcpu_register_vcpu_info {
+    uint32_t mfn;               /* mfn of page to place vcpu_info */
+    uint32_t offset;            /* offset within page */
+};
+
+#endif /* __XEN_PUBLIC_VCPU_H__ */
diff --git a/include/xen/interface/version.h b/include/xen/interface/version.h
new file mode 100644
index 0000000..453235e
--- /dev/null
+++ b/include/xen/interface/version.h
@@ -0,0 +1,60 @@
+/******************************************************************************
+ * version.h
+ *
+ * Xen version, type, and compile information.
+ *
+ * Copyright (c) 2005, Nguyen Anh Quynh <aquynh@gmail.com>
+ * Copyright (c) 2005, Keir Fraser <keir@xensource.com>
+ */
+
+#ifndef __XEN_PUBLIC_VERSION_H__
+#define __XEN_PUBLIC_VERSION_H__
+
+/* NB. All ops return zero on success, except XENVER_version. */
+
+/* arg == NULL; returns major:minor (16:16). */
+#define XENVER_version      0
+
+/* arg == xen_extraversion_t. */
+#define XENVER_extraversion 1
+struct xen_extraversion {
+    char extraversion[16];
+};
+#define XEN_EXTRAVERSION_LEN (sizeof(struct xen_extraversion))
+
+/* arg == xen_compile_info_t. */
+#define XENVER_compile_info 2
+struct xen_compile_info {
+    char compiler[64];
+    char compile_by[16];
+    char compile_domain[32];
+    char compile_date[32];
+};
+
+#define XENVER_capabilities 3
+struct xen_capabilities_info {
+    char info[1024];
+};
+#define XEN_CAPABILITIES_INFO_LEN (sizeof(struct xen_capabilities_info))
+
+#define XENVER_changeset 4
+struct xen_changeset_info {
+    char info[64];
+};
+#define XEN_CHANGESET_INFO_LEN (sizeof(struct xen_changeset_info))
+
+#define XENVER_platform_parameters 5
+struct xen_platform_parameters {
+    unsigned long virt_start;
+};
+
+#define XENVER_get_features 6
+struct xen_feature_info {
+    unsigned int submap_idx;    /* IN: which 32-bit submap to return */
+    uint32_t     submap;        /* OUT: 32-bit submap */
+};
+
+/* Declares the features reported by XENVER_get_features. */
+#include "features.h"
+
+#endif /* __XEN_PUBLIC_VERSION_H__ */
diff --git a/include/xen/interface/xen.h b/include/xen/interface/xen.h
new file mode 100644
index 0000000..518a5bf
--- /dev/null
+++ b/include/xen/interface/xen.h
@@ -0,0 +1,447 @@
+/******************************************************************************
+ * xen.h
+ *
+ * Guest OS interface to Xen.
+ *
+ * Copyright (c) 2004, K A Fraser
+ */
+
+#ifndef __XEN_PUBLIC_XEN_H__
+#define __XEN_PUBLIC_XEN_H__
+
+#include <asm/xen/interface.h>
+
+/*
+ * XEN "SYSTEM CALLS" (a.k.a. HYPERCALLS).
+ */
+
+/*
+ * x86_32: EAX = vector; EBX, ECX, EDX, ESI, EDI = args 1, 2, 3, 4, 5.
+ *         EAX = return value
+ *         (argument registers may be clobbered on return)
+ * x86_64: RAX = vector; RDI, RSI, RDX, R10, R8, R9 = args 1, 2, 3, 4, 5, 6.
+ *         RAX = return value
+ *         (argument registers not clobbered on return; RCX, R11 are)
+ */
+#define __HYPERVISOR_set_trap_table        0
+#define __HYPERVISOR_mmu_update            1
+#define __HYPERVISOR_set_gdt               2
+#define __HYPERVISOR_stack_switch          3
+#define __HYPERVISOR_set_callbacks         4
+#define __HYPERVISOR_fpu_taskswitch        5
+#define __HYPERVISOR_sched_op              6
+#define __HYPERVISOR_dom0_op               7
+#define __HYPERVISOR_set_debugreg          8
+#define __HYPERVISOR_get_debugreg          9
+#define __HYPERVISOR_update_descriptor    10
+#define __HYPERVISOR_memory_op            12
+#define __HYPERVISOR_multicall            13
+#define __HYPERVISOR_update_va_mapping    14
+#define __HYPERVISOR_set_timer_op         15
+#define __HYPERVISOR_event_channel_op_compat 16
+#define __HYPERVISOR_xen_version          17
+#define __HYPERVISOR_console_io           18
+#define __HYPERVISOR_physdev_op_compat    19
+#define __HYPERVISOR_grant_table_op       20
+#define __HYPERVISOR_vm_assist            21
+#define __HYPERVISOR_update_va_mapping_otherdomain 22
+#define __HYPERVISOR_iret                 23 /* x86 only */
+#define __HYPERVISOR_vcpu_op              24
+#define __HYPERVISOR_set_segment_base     25 /* x86/64 only */
+#define __HYPERVISOR_mmuext_op            26
+#define __HYPERVISOR_acm_op               27
+#define __HYPERVISOR_nmi_op               28
+#define __HYPERVISOR_sched_op_new         29
+#define __HYPERVISOR_callback_op          30
+#define __HYPERVISOR_xenoprof_op          31
+#define __HYPERVISOR_event_channel_op     32
+#define __HYPERVISOR_physdev_op           33
+#define __HYPERVISOR_hvm_op               34
+
+/*
+ * VIRTUAL INTERRUPTS
+ *
+ * Virtual interrupts that a guest OS may receive from Xen.
+ */
+#define VIRQ_TIMER      0  /* Timebase update, and/or requested timeout.  */
+#define VIRQ_DEBUG      1  /* Request guest to dump debug info.           */
+#define VIRQ_CONSOLE    2  /* (DOM0) Bytes received on emergency console. */
+#define VIRQ_DOM_EXC    3  /* (DOM0) Exceptional event for some domain.   */
+#define VIRQ_DEBUGGER   6  /* (DOM0) A domain has paused for debugging.   */
+#define NR_VIRQS        8
+
+/*
+ * MMU-UPDATE REQUESTS
+ *
+ * HYPERVISOR_mmu_update() accepts a list of (ptr, val) pairs.
+ * A foreigndom (FD) can be specified (or DOMID_SELF for none).
+ * Where the FD has some effect, it is described below.
+ * ptr[1:0] specifies the appropriate MMU_* command.
+ *
+ * ptr[1:0] == MMU_NORMAL_PT_UPDATE:
+ * Updates an entry in a page table. If updating an L1 table, and the new
+ * table entry is valid/present, the mapped frame must belong to the FD, if
+ * an FD has been specified. If attempting to map an I/O page then the
+ * caller assumes the privilege of the FD.
+ * FD == DOMID_IO: Permit /only/ I/O mappings, at the priv level of the caller.
+ * FD == DOMID_XEN: Map restricted areas of Xen's heap space.
+ * ptr[:2]  -- Machine address of the page-table entry to modify.
+ * val      -- Value to write.
+ *
+ * ptr[1:0] == MMU_MACHPHYS_UPDATE:
+ * Updates an entry in the machine->pseudo-physical mapping table.
+ * ptr[:2]  -- Machine address within the frame whose mapping to modify.
+ *             The frame must belong to the FD, if one is specified.
+ * val      -- Value to write into the mapping entry.
+ */
+#define MMU_NORMAL_PT_UPDATE     0 /* checked '*ptr = val'. ptr is MA.       */
+#define MMU_MACHPHYS_UPDATE      1 /* ptr = MA of frame to modify entry for  */
+
+/*
+ * MMU EXTENDED OPERATIONS
+ *
+ * HYPERVISOR_mmuext_op() accepts a list of mmuext_op structures.
+ * A foreigndom (FD) can be specified (or DOMID_SELF for none).
+ * Where the FD has some effect, it is described below.
+ *
+ * cmd: MMUEXT_(UN)PIN_*_TABLE
+ * mfn: Machine frame number to be (un)pinned as a p.t. page.
+ *      The frame must belong to the FD, if one is specified.
+ *
+ * cmd: MMUEXT_NEW_BASEPTR
+ * mfn: Machine frame number of new page-table base to install in MMU.
+ *
+ * cmd: MMUEXT_NEW_USER_BASEPTR [x86/64 only]
+ * mfn: Machine frame number of new page-table base to install in MMU
+ *      when in user space.
+ *
+ * cmd: MMUEXT_TLB_FLUSH_LOCAL
+ * No additional arguments. Flushes local TLB.
+ *
+ * cmd: MMUEXT_INVLPG_LOCAL
+ * linear_addr: Linear address to be flushed from the local TLB.
+ *
+ * cmd: MMUEXT_TLB_FLUSH_MULTI
+ * vcpumask: Pointer to bitmap of VCPUs to be flushed.
+ *
+ * cmd: MMUEXT_INVLPG_MULTI
+ * linear_addr: Linear address to be flushed.
+ * vcpumask: Pointer to bitmap of VCPUs to be flushed.
+ *
+ * cmd: MMUEXT_TLB_FLUSH_ALL
+ * No additional arguments. Flushes all VCPUs' TLBs.
+ *
+ * cmd: MMUEXT_INVLPG_ALL
+ * linear_addr: Linear address to be flushed from all VCPUs' TLBs.
+ *
+ * cmd: MMUEXT_FLUSH_CACHE
+ * No additional arguments. Writes back and flushes cache contents.
+ *
+ * cmd: MMUEXT_SET_LDT
+ * linear_addr: Linear address of LDT base (NB. must be page-aligned).
+ * nr_ents: Number of entries in LDT.
+ */
+#define MMUEXT_PIN_L1_TABLE      0
+#define MMUEXT_PIN_L2_TABLE      1
+#define MMUEXT_PIN_L3_TABLE      2
+#define MMUEXT_PIN_L4_TABLE      3
+#define MMUEXT_UNPIN_TABLE       4
+#define MMUEXT_NEW_BASEPTR       5
+#define MMUEXT_TLB_FLUSH_LOCAL   6
+#define MMUEXT_INVLPG_LOCAL      7
+#define MMUEXT_TLB_FLUSH_MULTI   8
+#define MMUEXT_INVLPG_MULTI      9
+#define MMUEXT_TLB_FLUSH_ALL    10
+#define MMUEXT_INVLPG_ALL       11
+#define MMUEXT_FLUSH_CACHE      12
+#define MMUEXT_SET_LDT          13
+#define MMUEXT_NEW_USER_BASEPTR 15
+
+#ifndef __ASSEMBLY__
+struct mmuext_op {
+	unsigned int cmd;
+	union {
+		/* [UN]PIN_TABLE, NEW_BASEPTR, NEW_USER_BASEPTR */
+		unsigned long mfn;
+		/* INVLPG_LOCAL, INVLPG_ALL, SET_LDT */
+		unsigned long linear_addr;
+	} arg1;
+	union {
+		/* SET_LDT */
+		unsigned int nr_ents;
+		/* TLB_FLUSH_MULTI, INVLPG_MULTI */
+		void *vcpumask;
+	} arg2;
+};
+DEFINE_GUEST_HANDLE_STRUCT(mmuext_op);
+#endif
+
+/* These are passed as 'flags' to update_va_mapping. They can be ORed. */
+/* When specifying UVMF_MULTI, also OR in a pointer to a CPU bitmap.   */
+/* UVMF_LOCAL is merely UVMF_MULTI with a NULL bitmap pointer.         */
+#define UVMF_NONE               (0UL<<0) /* No flushing at all.   */
+#define UVMF_TLB_FLUSH          (1UL<<0) /* Flush entire TLB(s).  */
+#define UVMF_INVLPG             (2UL<<0) /* Flush only one entry. */
+#define UVMF_FLUSHTYPE_MASK     (3UL<<0)
+#define UVMF_MULTI              (0UL<<2) /* Flush subset of TLBs. */
+#define UVMF_LOCAL              (0UL<<2) /* Flush local TLB.      */
+#define UVMF_ALL                (1UL<<2) /* Flush all TLBs.       */
+
+/*
+ * Commands to HYPERVISOR_console_io().
+ */
+#define CONSOLEIO_write         0
+#define CONSOLEIO_read          1
+
+/*
+ * Commands to HYPERVISOR_vm_assist().
+ */
+#define VMASST_CMD_enable                0
+#define VMASST_CMD_disable               1
+#define VMASST_TYPE_4gb_segments         0
+#define VMASST_TYPE_4gb_segments_notify  1
+#define VMASST_TYPE_writable_pagetables  2
+#define VMASST_TYPE_pae_extended_cr3     3
+#define MAX_VMASST_TYPE 3
+
+#ifndef __ASSEMBLY__
+
+typedef uint16_t domid_t;
+
+/* Domain ids >= DOMID_FIRST_RESERVED cannot be used for ordinary domains. */
+#define DOMID_FIRST_RESERVED (0x7FF0U)
+
+/* DOMID_SELF is used in certain contexts to refer to oneself. */
+#define DOMID_SELF (0x7FF0U)
+
+/*
+ * DOMID_IO is used to restrict page-table updates to mapping I/O memory.
+ * Although no Foreign Domain need be specified to map I/O pages, DOMID_IO
+ * is useful to ensure that no mappings to the OS's own heap are accidentally
+ * installed. (e.g., in Linux this could cause havoc as reference counts
+ * aren't adjusted on the I/O-mapping code path).
+ * This only makes sense in MMUEXT_SET_FOREIGNDOM, but in that context can
+ * be specified by any calling domain.
+ */
+#define DOMID_IO   (0x7FF1U)
+
+/*
+ * DOMID_XEN is used to allow privileged domains to map restricted parts of
+ * Xen's heap space (e.g., the machine_to_phys table).
+ * This only makes sense in MMUEXT_SET_FOREIGNDOM, and is only permitted if
+ * the caller is privileged.
+ */
+#define DOMID_XEN  (0x7FF2U)
+
+/*
+ * Send an array of these to HYPERVISOR_mmu_update().
+ * NB. The fields are natural pointer/address size for this architecture.
+ */
+struct mmu_update {
+    uint64_t ptr;       /* Machine address of PTE. */
+    uint64_t val;       /* New contents of PTE.    */
+};
+DEFINE_GUEST_HANDLE_STRUCT(mmu_update);
+
+/*
+ * Send an array of these to HYPERVISOR_multicall().
+ * NB. The fields are natural register size for this architecture.
+ */
+struct multicall_entry {
+    unsigned long op;
+    long result;
+    unsigned long args[6];
+};
+DEFINE_GUEST_HANDLE_STRUCT(multicall_entry);
+
+/*
+ * Event channel endpoints per domain:
+ *  1024 if a long is 32 bits; 4096 if a long is 64 bits.
+ */
+#define NR_EVENT_CHANNELS (sizeof(unsigned long) * sizeof(unsigned long) * 64)
+
+struct vcpu_time_info {
+	/*
+	 * Updates to the following values are preceded and followed
+	 * by an increment of 'version'. The guest can therefore
+	 * detect updates by looking for changes to 'version'. If the
+	 * least-significant bit of the version number is set then an
+	 * update is in progress and the guest must wait to read a
+	 * consistent set of values.  The correct way to interact with
+	 * the version number is similar to Linux's seqlock: see the
+	 * implementations of read_seqbegin/read_seqretry.
+	 */
+	uint32_t version;
+	uint32_t pad0;
+	uint64_t tsc_timestamp;   /* TSC at last update of time vals.  */
+	uint64_t system_time;     /* Time, in nanosecs, since boot.    */
+	/*
+	 * Current system time:
+	 *   system_time + ((tsc - tsc_timestamp) << tsc_shift) * tsc_to_system_mul
+	 * CPU frequency (Hz):
+	 *   ((10^9 << 32) / tsc_to_system_mul) >> tsc_shift
+	 */
+	uint32_t tsc_to_system_mul;
+	int8_t   tsc_shift;
+	int8_t   pad1[3];
+}; /* 32 bytes */
+
+struct vcpu_info {
+	/*
+	 * 'evtchn_upcall_pending' is written non-zero by Xen to indicate
+	 * a pending notification for a particular VCPU. It is then cleared
+	 * by the guest OS /before/ checking for pending work, thus avoiding
+	 * a set-and-check race. Note that the mask is only accessed by Xen
+	 * on the CPU that is currently hosting the VCPU. This means that the
+	 * pending and mask flags can be updated by the guest without special
+	 * synchronisation (i.e., no need for the x86 LOCK prefix).
+	 * This may seem suboptimal because if the pending flag is set by
+	 * a different CPU then an IPI may be scheduled even when the mask
+	 * is set. However, note:
+	 *  1. The task of 'interrupt holdoff' is covered by the per-event-
+	 *     channel mask bits. A 'noisy' event that is continually being
+	 *     triggered can be masked at source at this very precise
+	 *     granularity.
+	 *  2. The main purpose of the per-VCPU mask is therefore to restrict
+	 *     reentrant execution: whether for concurrency control, or to
+	 *     prevent unbounded stack usage. Whatever the purpose, we expect
+	 *     that the mask will be asserted only for short periods at a time,
+	 *     and so the likelihood of a 'spurious' IPI is suitably small.
+	 * The mask is read before making an event upcall to the guest: a
+	 * non-zero mask therefore guarantees that the VCPU will not receive
+	 * an upcall activation. The mask is cleared when the VCPU requests
+	 * to block: this avoids wakeup-waiting races.
+	 */
+	uint8_t evtchn_upcall_pending;
+	uint8_t evtchn_upcall_mask;
+	unsigned long evtchn_pending_sel;
+	struct arch_vcpu_info arch;
+	struct vcpu_time_info time;
+}; /* 64 bytes (x86) */
+
+/*
+ * Xen/kernel shared data -- pointer provided in start_info.
+ * NB. We expect that this struct is smaller than a page.
+ */
+struct shared_info {
+	struct vcpu_info vcpu_info[MAX_VIRT_CPUS];
+
+	/*
+	 * A domain can create "event channels" on which it can send and receive
+	 * asynchronous event notifications. There are three classes of event that
+	 * are delivered by this mechanism:
+	 *  1. Bi-directional inter- and intra-domain connections. Domains must
+	 *     arrange out-of-band to set up a connection (usually by allocating
+	 *     an unbound 'listener' port and avertising that via a storage service
+	 *     such as xenstore).
+	 *  2. Physical interrupts. A domain with suitable hardware-access
+	 *     privileges can bind an event-channel port to a physical interrupt
+	 *     source.
+	 *  3. Virtual interrupts ('events'). A domain can bind an event-channel
+	 *     port to a virtual interrupt source, such as the virtual-timer
+	 *     device or the emergency console.
+	 *
+	 * Event channels are addressed by a "port index". Each channel is
+	 * associated with two bits of information:
+	 *  1. PENDING -- notifies the domain that there is a pending notification
+	 *     to be processed. This bit is cleared by the guest.
+	 *  2. MASK -- if this bit is clear then a 0->1 transition of PENDING
+	 *     will cause an asynchronous upcall to be scheduled. This bit is only
+	 *     updated by the guest. It is read-only within Xen. If a channel
+	 *     becomes pending while the channel is masked then the 'edge' is lost
+	 *     (i.e., when the channel is unmasked, the guest must manually handle
+	 *     pending notifications as no upcall will be scheduled by Xen).
+	 *
+	 * To expedite scanning of pending notifications, any 0->1 pending
+	 * transition on an unmasked channel causes a corresponding bit in a
+	 * per-vcpu selector word to be set. Each bit in the selector covers a
+	 * 'C long' in the PENDING bitfield array.
+	 */
+	unsigned long evtchn_pending[sizeof(unsigned long) * 8];
+	unsigned long evtchn_mask[sizeof(unsigned long) * 8];
+
+	/*
+	 * Wallclock time: updated only by control software. Guests should base
+	 * their gettimeofday() syscall on this wallclock-base value.
+	 */
+	uint32_t wc_version;      /* Version counter: see vcpu_time_info_t. */
+	uint32_t wc_sec;          /* Secs  00:00:00 UTC, Jan 1, 1970.  */
+	uint32_t wc_nsec;         /* Nsecs 00:00:00 UTC, Jan 1, 1970.  */
+
+	struct arch_shared_info arch;
+
+};
+
+/*
+ * Start-of-day memory layout for the initial domain (DOM0):
+ *  1. The domain is started within contiguous virtual-memory region.
+ *  2. The contiguous region begins and ends on an aligned 4MB boundary.
+ *  3. The region start corresponds to the load address of the OS image.
+ *     If the load address is not 4MB aligned then the address is rounded down.
+ *  4. This the order of bootstrap elements in the initial virtual region:
+ *      a. relocated kernel image
+ *      b. initial ram disk              [mod_start, mod_len]
+ *      c. list of allocated page frames [mfn_list, nr_pages]
+ *      d. start_info_t structure        [register ESI (x86)]
+ *      e. bootstrap page tables         [pt_base, CR3 (x86)]
+ *      f. bootstrap stack               [register ESP (x86)]
+ *  5. Bootstrap elements are packed together, but each is 4kB-aligned.
+ *  6. The initial ram disk may be omitted.
+ *  7. The list of page frames forms a contiguous 'pseudo-physical' memory
+ *     layout for the domain. In particular, the bootstrap virtual-memory
+ *     region is a 1:1 mapping to the first section of the pseudo-physical map.
+ *  8. All bootstrap elements are mapped read-writable for the guest OS. The
+ *     only exception is the bootstrap page table, which is mapped read-only.
+ *  9. There is guaranteed to be at least 512kB padding after the final
+ *     bootstrap element. If necessary, the bootstrap virtual region is
+ *     extended by an extra 4MB to ensure this.
+ */
+
+#define MAX_GUEST_CMDLINE 1024
+struct start_info {
+	/* THE FOLLOWING ARE FILLED IN BOTH ON INITIAL BOOT AND ON RESUME.    */
+	char magic[32];             /* "xen-<version>-<platform>".            */
+	unsigned long nr_pages;     /* Total pages allocated to this domain.  */
+	unsigned long shared_info;  /* MACHINE address of shared info struct. */
+	uint32_t flags;             /* SIF_xxx flags.                         */
+	unsigned long store_mfn;    /* MACHINE page number of shared page.    */
+	uint32_t store_evtchn;      /* Event channel for store communication. */
+	union {
+		struct {
+			unsigned long mfn;  /* MACHINE page number of console page.   */
+			uint32_t  evtchn;   /* Event channel for console page.        */
+		} domU;
+		struct {
+			uint32_t info_off;  /* Offset of console_info struct.         */
+			uint32_t info_size; /* Size of console_info struct from start.*/
+		} dom0;
+	} console;
+	/* THE FOLLOWING ARE ONLY FILLED IN ON INITIAL BOOT (NOT RESUME).     */
+	unsigned long pt_base;      /* VIRTUAL address of page directory.     */
+	unsigned long nr_pt_frames; /* Number of bootstrap p.t. frames.       */
+	unsigned long mfn_list;     /* VIRTUAL address of page-frame list.    */
+	unsigned long mod_start;    /* VIRTUAL address of pre-loaded module.  */
+	unsigned long mod_len;      /* Size (bytes) of pre-loaded module.     */
+	int8_t cmd_line[MAX_GUEST_CMDLINE];
+};
+
+/* These flags are passed in the 'flags' field of start_info_t. */
+#define SIF_PRIVILEGED    (1<<0)  /* Is the domain privileged? */
+#define SIF_INITDOMAIN    (1<<1)  /* Is this the initial control domain? */
+
+typedef uint64_t cpumap_t;
+
+typedef uint8_t xen_domain_handle_t[16];
+
+/* Turn a plain number into a C unsigned long constant. */
+#define __mk_unsigned_long(x) x ## UL
+#define mk_unsigned_long(x) __mk_unsigned_long(x)
+
+#else /* __ASSEMBLY__ */
+
+/* In assembly code we cannot use C numeric constant suffixes. */
+#define mk_unsigned_long(x) x
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* __XEN_PUBLIC_XEN_H__ */
diff --git a/include/xen/page.h b/include/xen/page.h
new file mode 100644
index 0000000..1df6c19
--- /dev/null
+++ b/include/xen/page.h
@@ -0,0 +1,179 @@
+#ifndef __XEN_PAGE_H
+#define __XEN_PAGE_H
+
+#include <linux/pfn.h>
+
+#include <asm/uaccess.h>
+
+#include <xen/features.h>
+
+#ifdef CONFIG_X86_PAE
+/* Xen machine address */
+typedef struct xmaddr {
+	unsigned long long maddr;
+} xmaddr_t;
+
+/* Xen pseudo-physical address */
+typedef struct xpaddr {
+	unsigned long long paddr;
+} xpaddr_t;
+#else
+/* Xen machine address */
+typedef struct xmaddr {
+	unsigned long maddr;
+} xmaddr_t;
+
+/* Xen pseudo-physical address */
+typedef struct xpaddr {
+	unsigned long paddr;
+} xpaddr_t;
+#endif
+
+#define XMADDR(x)	((xmaddr_t) { .maddr = (x) })
+#define XPADDR(x)	((xpaddr_t) { .paddr = (x) })
+
+/**** MACHINE <-> PHYSICAL CONVERSION MACROS ****/
+#define INVALID_P2M_ENTRY	(~0UL)
+#define FOREIGN_FRAME_BIT	(1UL<<31)
+#define FOREIGN_FRAME(m)	((m) | FOREIGN_FRAME_BIT)
+
+extern unsigned long *phys_to_machine_mapping;
+
+static inline unsigned long pfn_to_mfn(unsigned long pfn)
+{
+	if (xen_feature(XENFEAT_auto_translated_physmap))
+		return pfn;
+
+	return phys_to_machine_mapping[(unsigned int)(pfn)] &
+		~FOREIGN_FRAME_BIT;
+}
+
+static inline int phys_to_machine_mapping_valid(unsigned long pfn)
+{
+	if (xen_feature(XENFEAT_auto_translated_physmap))
+		return 1;
+
+	return (phys_to_machine_mapping[pfn] != INVALID_P2M_ENTRY);
+}
+
+static inline unsigned long mfn_to_pfn(unsigned long mfn)
+{
+	unsigned long pfn;
+
+	if (xen_feature(XENFEAT_auto_translated_physmap))
+		return mfn;
+
+#if 0
+	if (unlikely((mfn >> machine_to_phys_order) != 0))
+		return max_mapnr;
+#endif
+
+	pfn = 0;
+	/*
+	 * The array access can fail (e.g., device space beyond end of RAM).
+	 * In such cases it doesn't matter what we return (we return garbage),
+	 * but we must handle the fault without crashing!
+	 */
+	__get_user(pfn, &machine_to_phys_mapping[mfn]);
+
+	return pfn;
+}
+
+static inline xmaddr_t phys_to_machine(xpaddr_t phys)
+{
+	unsigned offset = phys.paddr & ~PAGE_MASK;
+	return XMADDR(PFN_PHYS((u64)pfn_to_mfn(PFN_DOWN(phys.paddr))) | offset);
+}
+
+static inline xpaddr_t machine_to_phys(xmaddr_t machine)
+{
+	unsigned offset = machine.maddr & ~PAGE_MASK;
+	return XPADDR(PFN_PHYS((u64)mfn_to_pfn(PFN_DOWN(machine.maddr))) | offset);
+}
+
+/*
+ * We detect special mappings in one of two ways:
+ *  1. If the MFN is an I/O page then Xen will set the m2p entry
+ *     to be outside our maximum possible pseudophys range.
+ *  2. If the MFN belongs to a different domain then we will certainly
+ *     not have MFN in our p2m table. Conversely, if the page is ours,
+ *     then we'll have p2m(m2p(MFN))==MFN.
+ * If we detect a special mapping then it doesn't have a 'struct page'.
+ * We force !pfn_valid() by returning an out-of-range pointer.
+ *
+ * NB. These checks require that, for any MFN that is not in our reservation,
+ * there is no PFN such that p2m(PFN) == MFN. Otherwise we can get confused if
+ * we are foreign-mapping the MFN, and the other domain as m2p(MFN) == PFN.
+ * Yikes! Various places must poke in INVALID_P2M_ENTRY for safety.
+ *
+ * NB2. When deliberately mapping foreign pages into the p2m table, you *must*
+ *      use FOREIGN_FRAME(). This will cause pte_pfn() to choke on it, as we
+ *      require. In all the cases we care about, the FOREIGN_FRAME bit is
+ *      masked (e.g., pfn_to_mfn()) so behaviour there is correct.
+ */
+static inline unsigned long mfn_to_local_pfn(unsigned long mfn)
+{
+	extern unsigned long max_mapnr;
+	unsigned long pfn = mfn_to_pfn(mfn);
+	if ((pfn < max_mapnr)
+	    && !xen_feature(XENFEAT_auto_translated_physmap)
+	    && (phys_to_machine_mapping[pfn] != mfn))
+		return max_mapnr; /* force !pfn_valid() */
+	return pfn;
+}
+
+static inline void set_phys_to_machine(unsigned long pfn, unsigned long mfn)
+{
+	if (xen_feature(XENFEAT_auto_translated_physmap)) {
+		BUG_ON(pfn != mfn && mfn != INVALID_P2M_ENTRY);
+		return;
+	}
+	phys_to_machine_mapping[pfn] = mfn;
+}
+
+/* VIRT <-> MACHINE conversion */
+#define virt_to_machine(v)	(phys_to_machine(XPADDR(__pa(v))))
+#define virt_to_mfn(v)		(pfn_to_mfn(PFN_DOWN(__pa(v))))
+#define mfn_to_virt(m)		(__va(mfn_to_pfn(m) << PAGE_SHIFT))
+
+#ifdef CONFIG_X86_PAE
+#define pte_mfn(_pte) (((_pte).pte_low >> PAGE_SHIFT) |			\
+		       (((_pte).pte_high & 0xfff) << (32-PAGE_SHIFT)))
+
+static inline pte_t mfn_pte(unsigned long page_nr, pgprot_t pgprot)
+{
+	pte_t pte;
+
+	pte.pte_high = (page_nr >> (32 - PAGE_SHIFT)) |
+		(pgprot_val(pgprot) >> 32);
+	pte.pte_high &= (__supported_pte_mask >> 32);
+	pte.pte_low = ((page_nr << PAGE_SHIFT) | pgprot_val(pgprot));
+	pte.pte_low &= __supported_pte_mask;
+
+	return pte;
+}
+
+static inline unsigned long long pte_val_ma(pte_t x)
+{
+	return ((unsigned long long)x.pte_high << 32) | x.pte_low;
+}
+#define pmd_val_ma(v) ((v).pmd)
+#define pud_val_ma(v) ((v).pgd.pgd)
+#define __pte_ma(x)	((pte_t) { .pte_low = (x), .pte_high = (x)>>32 } )
+#define __pmd_ma(x)	((pmd_t) { (x) } )
+#else  /* !X86_PAE */
+#define pte_mfn(_pte) ((_pte).pte_low >> PAGE_SHIFT)
+#define mfn_pte(pfn, prot)	__pte_ma(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
+#define pte_val_ma(x)	((x).pte_low)
+#define pmd_val_ma(v)	((v).pud.pgd.pgd)
+#define __pte_ma(x)	((pte_t) { (x) } )
+#endif	/* CONFIG_X86_PAE */
+
+#define pgd_val_ma(x)	((x).pgd)
+
+
+xmaddr_t arbitrary_virt_to_machine(unsigned long address);
+void make_lowmem_page_readonly(void *vaddr);
+void make_lowmem_page_readwrite(void *vaddr);
+
+#endif /* __XEN_PAGE_H */
diff --git a/include/xen/xenbus.h b/include/xen/xenbus.h
new file mode 100644
index 0000000..6f7c290
--- /dev/null
+++ b/include/xen/xenbus.h
@@ -0,0 +1,234 @@
+/******************************************************************************
+ * xenbus.h
+ *
+ * Talks to Xen Store to figure out what devices we have.
+ *
+ * Copyright (C) 2005 Rusty Russell, IBM Corporation
+ * Copyright (C) 2005 XenSource Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation; or, when distributed
+ * separately from the Linux kernel or incorporated into other
+ * software packages, subject to the following license:
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this source file (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy, modify,
+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#ifndef _XEN_XENBUS_H
+#define _XEN_XENBUS_H
+
+#include <linux/device.h>
+#include <linux/notifier.h>
+#include <linux/mutex.h>
+#include <linux/completion.h>
+#include <linux/init.h>
+#include <xen/interface/xen.h>
+#include <xen/interface/grant_table.h>
+#include <xen/interface/io/xenbus.h>
+#include <xen/interface/io/xs_wire.h>
+
+/* Register callback to watch this node. */
+struct xenbus_watch
+{
+	struct list_head list;
+
+	/* Path being watched. */
+	const char *node;
+
+	/* Callback (executed in a process context with no locks held). */
+	void (*callback)(struct xenbus_watch *,
+			 const char **vec, unsigned int len);
+};
+
+
+/* A xenbus device. */
+struct xenbus_device {
+	const char *devicetype;
+	const char *nodename;
+	const char *otherend;
+	int otherend_id;
+	struct xenbus_watch otherend_watch;
+	struct device dev;
+	enum xenbus_state state;
+	struct completion down;
+};
+
+static inline struct xenbus_device *to_xenbus_device(struct device *dev)
+{
+	return container_of(dev, struct xenbus_device, dev);
+}
+
+struct xenbus_device_id
+{
+	/* .../device/<device_type>/<identifier> */
+	char devicetype[32]; 	/* General class of device. */
+};
+
+/* A xenbus driver. */
+struct xenbus_driver {
+	char *name;
+	struct module *owner;
+	const struct xenbus_device_id *ids;
+	int (*probe)(struct xenbus_device *dev,
+		     const struct xenbus_device_id *id);
+	void (*otherend_changed)(struct xenbus_device *dev,
+				 enum xenbus_state backend_state);
+	int (*remove)(struct xenbus_device *dev);
+	int (*suspend)(struct xenbus_device *dev);
+	int (*suspend_cancel)(struct xenbus_device *dev);
+	int (*resume)(struct xenbus_device *dev);
+	int (*uevent)(struct xenbus_device *, char **, int, char *, int);
+	struct device_driver driver;
+	int (*read_otherend_details)(struct xenbus_device *dev);
+};
+
+static inline struct xenbus_driver *to_xenbus_driver(struct device_driver *drv)
+{
+	return container_of(drv, struct xenbus_driver, driver);
+}
+
+int __must_check __xenbus_register_frontend(struct xenbus_driver *drv,
+					    struct module *owner,
+					    const char *mod_name);
+
+static inline int __must_check
+xenbus_register_frontend(struct xenbus_driver *drv)
+{
+	WARN_ON(drv->owner != THIS_MODULE);
+	return __xenbus_register_frontend(drv, THIS_MODULE, KBUILD_MODNAME);
+}
+
+int __must_check __xenbus_register_backend(struct xenbus_driver *drv,
+					   struct module *owner,
+					   const char *mod_name);
+static inline int __must_check
+xenbus_register_backend(struct xenbus_driver *drv)
+{
+	WARN_ON(drv->owner != THIS_MODULE);
+	return __xenbus_register_backend(drv, THIS_MODULE, KBUILD_MODNAME);
+}
+
+void xenbus_unregister_driver(struct xenbus_driver *drv);
+
+struct xenbus_transaction
+{
+	u32 id;
+};
+
+/* Nil transaction ID. */
+#define XBT_NIL ((struct xenbus_transaction) { 0 })
+
+int __init xenbus_dev_init(void);
+
+char **xenbus_directory(struct xenbus_transaction t,
+			const char *dir, const char *node, unsigned int *num);
+void *xenbus_read(struct xenbus_transaction t,
+		  const char *dir, const char *node, unsigned int *len);
+int xenbus_write(struct xenbus_transaction t,
+		 const char *dir, const char *node, const char *string);
+int xenbus_mkdir(struct xenbus_transaction t,
+		 const char *dir, const char *node);
+int xenbus_exists(struct xenbus_transaction t,
+		  const char *dir, const char *node);
+int xenbus_rm(struct xenbus_transaction t, const char *dir, const char *node);
+int xenbus_transaction_start(struct xenbus_transaction *t);
+int xenbus_transaction_end(struct xenbus_transaction t, int abort);
+
+/* Single read and scanf: returns -errno or num scanned if > 0. */
+int xenbus_scanf(struct xenbus_transaction t,
+		 const char *dir, const char *node, const char *fmt, ...)
+	__attribute__((format(scanf, 4, 5)));
+
+/* Single printf and write: returns -errno or 0. */
+int xenbus_printf(struct xenbus_transaction t,
+		  const char *dir, const char *node, const char *fmt, ...)
+	__attribute__((format(printf, 4, 5)));
+
+/* Generic read function: NULL-terminated triples of name,
+ * sprintf-style type string, and pointer. Returns 0 or errno.*/
+int xenbus_gather(struct xenbus_transaction t, const char *dir, ...);
+
+/* notifer routines for when the xenstore comes up */
+extern int xenstored_ready;
+int register_xenstore_notifier(struct notifier_block *nb);
+void unregister_xenstore_notifier(struct notifier_block *nb);
+
+int register_xenbus_watch(struct xenbus_watch *watch);
+void unregister_xenbus_watch(struct xenbus_watch *watch);
+void xs_suspend(void);
+void xs_resume(void);
+void xs_suspend_cancel(void);
+
+/* Used by xenbus_dev to borrow kernel's store connection. */
+void *xenbus_dev_request_and_reply(struct xsd_sockmsg *msg);
+
+struct work_struct;
+
+/* Prepare for domain suspend: then resume or cancel the suspend. */
+void xenbus_suspend(void);
+void xenbus_resume(void);
+void xenbus_probe(struct work_struct *);
+void xenbus_suspend_cancel(void);
+
+#define XENBUS_IS_ERR_READ(str) ({			\
+	if (!IS_ERR(str) && strlen(str) == 0) {		\
+		kfree(str);				\
+		str = ERR_PTR(-ERANGE);			\
+	}						\
+	IS_ERR(str);					\
+})
+
+#define XENBUS_EXIST_ERR(err) ((err) == -ENOENT || (err) == -ERANGE)
+
+int xenbus_watch_path(struct xenbus_device *dev, const char *path,
+		      struct xenbus_watch *watch,
+		      void (*callback)(struct xenbus_watch *,
+				       const char **, unsigned int));
+int xenbus_watch_pathfmt(struct xenbus_device *dev, struct xenbus_watch *watch,
+			 void (*callback)(struct xenbus_watch *,
+					  const char **, unsigned int),
+			 const char *pathfmt, ...)
+	__attribute__ ((format (printf, 4, 5)));
+
+int xenbus_switch_state(struct xenbus_device *dev, enum xenbus_state new_state);
+int xenbus_grant_ring(struct xenbus_device *dev, unsigned long ring_mfn);
+int xenbus_map_ring_valloc(struct xenbus_device *dev,
+			   int gnt_ref, void **vaddr);
+int xenbus_map_ring(struct xenbus_device *dev, int gnt_ref,
+			   grant_handle_t *handle, void *vaddr);
+
+int xenbus_unmap_ring_vfree(struct xenbus_device *dev, void *vaddr);
+int xenbus_unmap_ring(struct xenbus_device *dev,
+		      grant_handle_t handle, void *vaddr);
+
+int xenbus_alloc_evtchn(struct xenbus_device *dev, int *port);
+int xenbus_bind_evtchn(struct xenbus_device *dev, int remote_port, int *port);
+int xenbus_free_evtchn(struct xenbus_device *dev, int port);
+
+enum xenbus_state xenbus_read_driver_state(const char *path);
+
+void xenbus_dev_error(struct xenbus_device *dev, int err, const char *fmt, ...);
+void xenbus_dev_fatal(struct xenbus_device *dev, int err, const char *fmt, ...);
+
+const char *xenbus_strstate(enum xenbus_state state);
+int xenbus_dev_is_online(struct xenbus_device *dev);
+int xenbus_frontend_closed(struct xenbus_device *dev);
+
+#endif /* _XEN_XENBUS_H */
diff --git a/init/Kconfig b/init/Kconfig
index 0b0e29e..e205682 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -546,7 +546,7 @@
 
 choice
 	prompt "Choose SLAB allocator"
-	default SLAB
+	default SLUB
 	help
 	   This option allows to select a slab allocator.
 
diff --git a/init/do_mounts_initrd.c b/init/do_mounts_initrd.c
index b222ce9..a6b4c0c 100644
--- a/init/do_mounts_initrd.c
+++ b/init/do_mounts_initrd.c
@@ -56,12 +56,9 @@
 	sys_chroot(".");
 
 	pid = kernel_thread(do_linuxrc, "/linuxrc", SIGCHLD);
-	if (pid > 0) {
-		while (pid != sys_wait4(-1, NULL, 0, NULL)) {
-			try_to_freeze();
+	if (pid > 0)
+		while (pid != sys_wait4(-1, NULL, 0, NULL))
 			yield();
-		}
-	}
 
 	/* move initrd to rootfs' /old */
 	sys_fchdir(old_fd);
diff --git a/ipc/msg.c b/ipc/msg.c
index cbd27e5..a03fcb52 100644
--- a/ipc/msg.c
+++ b/ipc/msg.c
@@ -385,7 +385,7 @@
 asmlinkage long sys_msgctl(int msqid, int cmd, struct msqid_ds __user *buf)
 {
 	struct kern_ipc_perm *ipcp;
-	struct msq_setbuf setbuf;
+	struct msq_setbuf uninitialized_var(setbuf);
 	struct msg_queue *msq;
 	int err, version;
 	struct ipc_namespace *ns;
@@ -509,7 +509,7 @@
 	err = audit_ipc_obj(ipcp);
 	if (err)
 		goto out_unlock_up;
-	if (cmd==IPC_SET) {
+	if (cmd == IPC_SET) {
 		err = audit_ipc_set_perm(setbuf.qbytes, setbuf.uid, setbuf.gid,
 					 setbuf.mode);
 		if (err)
diff --git a/ipc/sem.c b/ipc/sem.c
index 89bfdff..b676fef 100644
--- a/ipc/sem.c
+++ b/ipc/sem.c
@@ -856,7 +856,7 @@
 {
 	struct sem_array *sma;
 	int err;
-	struct sem_setbuf setbuf;
+	struct sem_setbuf uninitialized_var(setbuf);
 	struct kern_ipc_perm *ipcp;
 
 	if(cmd == IPC_SET) {
diff --git a/ipc/shm.c b/ipc/shm.c
index 242c3f6..d0259e3 100644
--- a/ipc/shm.c
+++ b/ipc/shm.c
@@ -224,13 +224,12 @@
 	mutex_unlock(&shm_ids(ns).mutex);
 }
 
-static struct page *shm_nopage(struct vm_area_struct *vma,
-			       unsigned long address, int *type)
+static int shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 {
 	struct file *file = vma->vm_file;
 	struct shm_file_data *sfd = shm_file_data(file);
 
-	return sfd->vm_ops->nopage(vma, address, type);
+	return sfd->vm_ops->fault(vma, vmf);
 }
 
 #ifdef CONFIG_NUMA
@@ -269,6 +268,7 @@
 	if (ret != 0)
 		return ret;
 	sfd->vm_ops = vma->vm_ops;
+	BUG_ON(!sfd->vm_ops->fault);
 	vma->vm_ops = &shm_vm_ops;
 	shm_open(vma);
 
@@ -327,7 +327,7 @@
 static struct vm_operations_struct shm_vm_ops = {
 	.open	= shm_open,	/* callback for a new vm-area open */
 	.close	= shm_close,	/* callback for when the vm-area is released */
-	.nopage	= shm_nopage,
+	.fault	= shm_fault,
 #if defined(CONFIG_NUMA)
 	.set_policy = shm_set_policy,
 	.get_policy = shm_get_policy,
diff --git a/kernel/audit.c b/kernel/audit.c
index 5ce8851..eb0f916 100644
--- a/kernel/audit.c
+++ b/kernel/audit.c
@@ -392,6 +392,7 @@
 {
 	struct sk_buff *skb;
 
+	set_freezable();
 	while (!kthread_should_stop()) {
 		skb = skb_dequeue(&audit_skb_queue);
 		wake_up(&audit_backlog_wait);
diff --git a/kernel/auditfilter.c b/kernel/auditfilter.c
index ce61f42..1bf093d 100644
--- a/kernel/auditfilter.c
+++ b/kernel/auditfilter.c
@@ -1210,8 +1210,8 @@
 	struct audit_entry *e;
 	struct audit_field *inode_f = entry->rule.inode_f;
 	struct audit_watch *watch = entry->rule.watch;
-	struct nameidata *ndp, *ndw;
-	int h, err, putnd_needed = 0;
+	struct nameidata *ndp = NULL, *ndw = NULL;
+	int h, err;
 #ifdef CONFIG_AUDITSYSCALL
 	int dont_count = 0;
 
@@ -1239,7 +1239,6 @@
 		err = audit_get_nd(watch->path, &ndp, &ndw);
 		if (err)
 			goto error;
-		putnd_needed = 1;
 	}
 
 	mutex_lock(&audit_filter_mutex);
@@ -1269,14 +1268,11 @@
 #endif
 	mutex_unlock(&audit_filter_mutex);
 
-	if (putnd_needed)
-		audit_put_nd(ndp, ndw);
-
+	audit_put_nd(ndp, ndw);		/* NULL args OK */
  	return 0;
 
 error:
-	if (putnd_needed)
-		audit_put_nd(ndp, ndw);
+	audit_put_nd(ndp, ndw);		/* NULL args OK */
 	if (watch)
 		audit_put_watch(watch); /* tmp watch, matches initial get */
 	return err;
diff --git a/kernel/auditsc.c b/kernel/auditsc.c
index b7640a5..145cbb7 100644
--- a/kernel/auditsc.c
+++ b/kernel/auditsc.c
@@ -153,7 +153,7 @@
 	struct audit_aux_data	d;
 	int argc;
 	int envc;
-	char mem[0];
+	struct mm_struct *mm;
 };
 
 struct audit_aux_data_socketcall {
@@ -831,6 +831,55 @@
 	return rc;
 }
 
+static void audit_log_execve_info(struct audit_buffer *ab,
+		struct audit_aux_data_execve *axi)
+{
+	int i;
+	long len, ret;
+	const char __user *p = (const char __user *)axi->mm->arg_start;
+	char *buf;
+
+	if (axi->mm != current->mm)
+		return; /* execve failed, no additional info */
+
+	for (i = 0; i < axi->argc; i++, p += len) {
+		len = strnlen_user(p, MAX_ARG_STRLEN);
+		/*
+		 * We just created this mm, if we can't find the strings
+		 * we just copied into it something is _very_ wrong. Similar
+		 * for strings that are too long, we should not have created
+		 * any.
+		 */
+		if (!len || len > MAX_ARG_STRLEN) {
+			WARN_ON(1);
+			send_sig(SIGKILL, current, 0);
+		}
+
+		buf = kmalloc(len, GFP_KERNEL);
+		if (!buf) {
+			audit_panic("out of memory for argv string\n");
+			break;
+		}
+
+		ret = copy_from_user(buf, p, len);
+		/*
+		 * There is no reason for this copy to be short. We just
+		 * copied them here, and the mm hasn't been exposed to user-
+		 * space yet.
+		 */
+		if (!ret) {
+			WARN_ON(1);
+			send_sig(SIGKILL, current, 0);
+		}
+
+		audit_log_format(ab, "a%d=", i);
+		audit_log_untrustedstring(ab, buf);
+		audit_log_format(ab, "\n");
+
+		kfree(buf);
+	}
+}
+
 static void audit_log_exit(struct audit_context *context, struct task_struct *tsk)
 {
 	int i, call_panic = 0;
@@ -971,13 +1020,7 @@
 
 		case AUDIT_EXECVE: {
 			struct audit_aux_data_execve *axi = (void *)aux;
-			int i;
-			const char *p;
-			for (i = 0, p = axi->mem; i < axi->argc; i++) {
-				audit_log_format(ab, "a%d=", i);
-				p = audit_log_untrustedstring(ab, p);
-				audit_log_format(ab, "\n");
-			}
+			audit_log_execve_info(ab, axi);
 			break; }
 
 		case AUDIT_SOCKETCALL: {
@@ -1821,32 +1864,31 @@
 	return 0;
 }
 
+int audit_argv_kb = 32;
+
 int audit_bprm(struct linux_binprm *bprm)
 {
 	struct audit_aux_data_execve *ax;
 	struct audit_context *context = current->audit_context;
-	unsigned long p, next;
-	void *to;
 
 	if (likely(!audit_enabled || !context || context->dummy))
 		return 0;
 
-	ax = kmalloc(sizeof(*ax) + PAGE_SIZE * MAX_ARG_PAGES - bprm->p,
-				GFP_KERNEL);
+	/*
+	 * Even though the stack code doesn't limit the arg+env size any more,
+	 * the audit code requires that _all_ arguments be logged in a single
+	 * netlink skb. Hence cap it :-(
+	 */
+	if (bprm->argv_len > (audit_argv_kb << 10))
+		return -E2BIG;
+
+	ax = kmalloc(sizeof(*ax), GFP_KERNEL);
 	if (!ax)
 		return -ENOMEM;
 
 	ax->argc = bprm->argc;
 	ax->envc = bprm->envc;
-	for (p = bprm->p, to = ax->mem; p < MAX_ARG_PAGES*PAGE_SIZE; p = next) {
-		struct page *page = bprm->page[p / PAGE_SIZE];
-		void *kaddr = kmap(page);
-		next = (p + PAGE_SIZE) & ~(PAGE_SIZE - 1);
-		memcpy(to, kaddr + (p & (PAGE_SIZE - 1)), next - p);
-		to += next - p;
-		kunmap(page);
-	}
-
+	ax->mm = bprm->mm;
 	ax->d.type = AUDIT_EXECVE;
 	ax->d.next = context->aux;
 	context->aux = (void *)ax;
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 208cf34..181ae70 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -103,11 +103,19 @@
 	write_unlock_irq(&tasklist_lock);
 }
 
+struct take_cpu_down_param {
+	unsigned long mod;
+	void *hcpu;
+};
+
 /* Take this CPU down. */
-static int take_cpu_down(void *unused)
+static int take_cpu_down(void *_param)
 {
+	struct take_cpu_down_param *param = _param;
 	int err;
 
+	raw_notifier_call_chain(&cpu_chain, CPU_DYING | param->mod,
+				param->hcpu);
 	/* Ensure this CPU doesn't handle any more interrupts. */
 	err = __cpu_disable();
 	if (err < 0)
@@ -127,6 +135,10 @@
 	cpumask_t old_allowed, tmp;
 	void *hcpu = (void *)(long)cpu;
 	unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0;
+	struct take_cpu_down_param tcd_param = {
+		.mod = mod,
+		.hcpu = hcpu,
+	};
 
 	if (num_online_cpus() == 1)
 		return -EBUSY;
@@ -153,7 +165,7 @@
 	set_cpus_allowed(current, tmp);
 
 	mutex_lock(&cpu_bitmask_lock);
-	p = __stop_machine_run(take_cpu_down, NULL, cpu);
+	p = __stop_machine_run(take_cpu_down, &tcd_param, cpu);
 	mutex_unlock(&cpu_bitmask_lock);
 
 	if (IS_ERR(p) || cpu_online(cpu)) {
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index 824b1c0..57e6448 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -516,7 +516,7 @@
 	envp[i++] = "PATH=/sbin:/bin:/usr/sbin:/usr/bin";
 	envp[i] = NULL;
 
-	call_usermodehelper(argv[0], argv, envp, 0);
+	call_usermodehelper(argv[0], argv, envp, UMH_WAIT_EXEC);
 	kfree(pathbuf);
 }
 
@@ -2138,6 +2138,9 @@
 static int cpuset_handle_cpuhp(struct notifier_block *nb,
 				unsigned long phase, void *cpu)
 {
+	if (phase == CPU_DYING || phase == CPU_DYING_FROZEN)
+		return NOTIFY_DONE;
+
 	common_cpu_mem_hotplug_unplug();
 	return 0;
 }
diff --git a/kernel/exit.c b/kernel/exit.c
index 5762669..464c2b1 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -31,6 +31,7 @@
 #include <linux/mempolicy.h>
 #include <linux/taskstats_kern.h>
 #include <linux/delayacct.h>
+#include <linux/freezer.h>
 #include <linux/cpuset.h>
 #include <linux/syscalls.h>
 #include <linux/signal.h>
@@ -44,6 +45,7 @@
 #include <linux/resource.h>
 #include <linux/blkdev.h>
 #include <linux/task_io_accounting_ops.h>
+#include <linux/freezer.h>
 
 #include <asm/uaccess.h>
 #include <asm/unistd.h>
@@ -387,6 +389,11 @@
 	 * they would be locked into memory.
 	 */
 	exit_mm(current);
+	/*
+	 * We don't want to have TIF_FREEZE set if the system-wide hibernation
+	 * or suspend transition begins right now.
+	 */
+	current->flags |= PF_NOFREEZE;
 
 	set_special_pids(1, 1);
 	proc_clear_tty(current);
@@ -588,6 +595,8 @@
 	tsk->mm = NULL;
 	up_read(&mm->mmap_sem);
 	enter_lazy_tlb(mm, current);
+	/* We don't want this task to be frozen prematurely */
+	clear_freeze_flag(tsk);
 	task_unlock(tsk);
 	mmput(mm);
 }
diff --git a/kernel/fork.c b/kernel/fork.c
index 7c5c588..4698389 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -334,6 +334,8 @@
 	atomic_set(&mm->mm_count, 1);
 	init_rwsem(&mm->mmap_sem);
 	INIT_LIST_HEAD(&mm->mmlist);
+	mm->flags = (current->mm) ? current->mm->flags
+				  : MMF_DUMP_FILTER_DEFAULT;
 	mm->core_waiters = 0;
 	mm->nr_ptes = 0;
 	set_mm_counter(mm, file_rss, 0);
@@ -923,7 +925,7 @@
 {
 	unsigned long new_flags = p->flags;
 
-	new_flags &= ~(PF_SUPERPRIV | PF_NOFREEZE);
+	new_flags &= ~PF_SUPERPRIV;
 	new_flags |= PF_FORKNOEXEC;
 	if (!(clone_flags & CLONE_PTRACE))
 		p->ptrace = 0;
diff --git a/kernel/futex.c b/kernel/futex.c
index 5c3f45d..a124250 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -346,15 +346,20 @@
 	vma = find_vma(mm, address);
 	if (vma && address >= vma->vm_start &&
 	    (vma->vm_flags & VM_WRITE)) {
-		switch (handle_mm_fault(mm, vma, address, 1)) {
-		case VM_FAULT_MINOR:
+		int fault;
+		fault = handle_mm_fault(mm, vma, address, 1);
+		if (unlikely((fault & VM_FAULT_ERROR))) {
+#if 0
+			/* XXX: let's do this when we verify it is OK */
+			if (ret & VM_FAULT_OOM)
+				ret = -ENOMEM;
+#endif
+		} else {
 			ret = 0;
-			current->min_flt++;
-			break;
-		case VM_FAULT_MAJOR:
-			ret = 0;
-			current->maj_flt++;
-			break;
+			if (fault & VM_FAULT_MAJOR)
+				current->maj_flt++;
+			else
+				current->min_flt++;
 		}
 	}
 	if (!fshared)
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
index 23c03f4..72d0342 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -1406,7 +1406,7 @@
 static int __cpuinit hrtimer_cpu_notify(struct notifier_block *self,
 					unsigned long action, void *hcpu)
 {
-	long cpu = (long)hcpu;
+	unsigned int cpu = (long)hcpu;
 
 	switch (action) {
 
diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
index 0d66247..474219a 100644
--- a/kernel/kallsyms.c
+++ b/kernel/kallsyms.c
@@ -152,7 +152,7 @@
 /* Lookup the address for this symbol. Returns 0 if not found. */
 unsigned long kallsyms_lookup_name(const char *name)
 {
-	char namebuf[KSYM_NAME_LEN+1];
+	char namebuf[KSYM_NAME_LEN];
 	unsigned long i;
 	unsigned int off;
 
@@ -248,7 +248,7 @@
 {
 	const char *msym;
 
-	namebuf[KSYM_NAME_LEN] = 0;
+	namebuf[KSYM_NAME_LEN - 1] = 0;
 	namebuf[0] = 0;
 
 	if (is_ksym_addr(addr)) {
@@ -265,7 +265,7 @@
 	/* see if it's in a module */
 	msym = module_address_lookup(addr, symbolsize, offset, modname);
 	if (msym)
-		return strncpy(namebuf, msym, KSYM_NAME_LEN);
+		return strncpy(namebuf, msym, KSYM_NAME_LEN - 1);
 
 	return NULL;
 }
@@ -273,7 +273,7 @@
 int lookup_symbol_name(unsigned long addr, char *symname)
 {
 	symname[0] = '\0';
-	symname[KSYM_NAME_LEN] = '\0';
+	symname[KSYM_NAME_LEN - 1] = '\0';
 
 	if (is_ksym_addr(addr)) {
 		unsigned long pos;
@@ -291,7 +291,7 @@
 			unsigned long *offset, char *modname, char *name)
 {
 	name[0] = '\0';
-	name[KSYM_NAME_LEN] = '\0';
+	name[KSYM_NAME_LEN - 1] = '\0';
 
 	if (is_ksym_addr(addr)) {
 		unsigned long pos;
@@ -312,7 +312,7 @@
 	char *modname;
 	const char *name;
 	unsigned long offset, size;
-	char namebuf[KSYM_NAME_LEN+1];
+	char namebuf[KSYM_NAME_LEN];
 
 	name = kallsyms_lookup(address, &size, &offset, &modname, namebuf);
 	if (!name)
@@ -342,8 +342,8 @@
 	unsigned long value;
 	unsigned int nameoff; /* If iterating in core kernel symbols */
 	char type;
-	char name[KSYM_NAME_LEN+1];
-	char module_name[MODULE_NAME_LEN + 1];
+	char name[KSYM_NAME_LEN];
+	char module_name[MODULE_NAME_LEN];
 	int exported;
 };
 
diff --git a/kernel/kmod.c b/kernel/kmod.c
index 4d32eb0..beedbdc 100644
--- a/kernel/kmod.c
+++ b/kernel/kmod.c
@@ -33,6 +33,8 @@
 #include <linux/kernel.h>
 #include <linux/init.h>
 #include <linux/resource.h>
+#include <linux/notifier.h>
+#include <linux/suspend.h>
 #include <asm/uaccess.h>
 
 extern int max_threads;
@@ -119,9 +121,10 @@
 	char **argv;
 	char **envp;
 	struct key *ring;
-	int wait;
+	enum umh_wait wait;
 	int retval;
 	struct file *stdin;
+	void (*cleanup)(char **argv, char **envp);
 };
 
 /*
@@ -180,6 +183,14 @@
 	do_exit(0);
 }
 
+void call_usermodehelper_freeinfo(struct subprocess_info *info)
+{
+	if (info->cleanup)
+		(*info->cleanup)(info->argv, info->envp);
+	kfree(info);
+}
+EXPORT_SYMBOL(call_usermodehelper_freeinfo);
+
 /* Keventd can't block, but this (a child) can. */
 static int wait_for_helper(void *data)
 {
@@ -216,8 +227,8 @@
 			sub_info->retval = ret;
 	}
 
-	if (sub_info->wait < 0)
-		kfree(sub_info);
+	if (sub_info->wait == UMH_NO_WAIT)
+		call_usermodehelper_freeinfo(sub_info);
 	else
 		complete(sub_info->complete);
 	return 0;
@@ -229,102 +240,185 @@
 	struct subprocess_info *sub_info =
 		container_of(work, struct subprocess_info, work);
 	pid_t pid;
-	int wait = sub_info->wait;
+	enum umh_wait wait = sub_info->wait;
 
 	/* CLONE_VFORK: wait until the usermode helper has execve'd
 	 * successfully We need the data structures to stay around
 	 * until that is done.  */
-	if (wait)
+	if (wait == UMH_WAIT_PROC || wait == UMH_NO_WAIT)
 		pid = kernel_thread(wait_for_helper, sub_info,
 				    CLONE_FS | CLONE_FILES | SIGCHLD);
 	else
 		pid = kernel_thread(____call_usermodehelper, sub_info,
 				    CLONE_VFORK | SIGCHLD);
 
-	if (wait < 0)
-		return;
+	switch (wait) {
+	case UMH_NO_WAIT:
+		break;
 
-	if (pid < 0) {
+	case UMH_WAIT_PROC:
+		if (pid > 0)
+			break;
 		sub_info->retval = pid;
+		/* FALLTHROUGH */
+
+	case UMH_WAIT_EXEC:
 		complete(sub_info->complete);
-	} else if (!wait)
-		complete(sub_info->complete);
+	}
 }
 
-/**
- * call_usermodehelper_keys - start a usermode application
- * @path: pathname for the application
- * @argv: null-terminated argument list
- * @envp: null-terminated environment list
- * @session_keyring: session keyring for process (NULL for an empty keyring)
- * @wait: wait for the application to finish and return status.
- *        when -1 don't wait at all, but you get no useful error back when
- *        the program couldn't be exec'ed. This makes it safe to call
- *        from interrupt context.
- *
- * Runs a user-space application.  The application is started
- * asynchronously if wait is not set, and runs as a child of keventd.
- * (ie. it runs with full root capabilities).
- *
- * Must be called from process context.  Returns a negative error code
- * if program was not execed successfully, or 0.
+#ifdef CONFIG_PM
+/*
+ * If set, call_usermodehelper_exec() will exit immediately returning -EBUSY
+ * (used for preventing user land processes from being created after the user
+ * land has been frozen during a system-wide hibernation or suspend operation).
  */
-int call_usermodehelper_keys(char *path, char **argv, char **envp,
-			     struct key *session_keyring, int wait)
+static int usermodehelper_disabled;
+
+/* Number of helpers running */
+static atomic_t running_helpers = ATOMIC_INIT(0);
+
+/*
+ * Wait queue head used by usermodehelper_pm_callback() to wait for all running
+ * helpers to finish.
+ */
+static DECLARE_WAIT_QUEUE_HEAD(running_helpers_waitq);
+
+/*
+ * Time to wait for running_helpers to become zero before the setting of
+ * usermodehelper_disabled in usermodehelper_pm_callback() fails
+ */
+#define RUNNING_HELPERS_TIMEOUT	(5 * HZ)
+
+static int usermodehelper_pm_callback(struct notifier_block *nfb,
+					unsigned long action,
+					void *ignored)
 {
-	DECLARE_COMPLETION_ONSTACK(done);
+	long retval;
+
+	switch (action) {
+	case PM_HIBERNATION_PREPARE:
+	case PM_SUSPEND_PREPARE:
+		usermodehelper_disabled = 1;
+		smp_mb();
+		/*
+		 * From now on call_usermodehelper_exec() won't start any new
+		 * helpers, so it is sufficient if running_helpers turns out to
+		 * be zero at one point (it may be increased later, but that
+		 * doesn't matter).
+		 */
+		retval = wait_event_timeout(running_helpers_waitq,
+					atomic_read(&running_helpers) == 0,
+					RUNNING_HELPERS_TIMEOUT);
+		if (retval) {
+			return NOTIFY_OK;
+		} else {
+			usermodehelper_disabled = 0;
+			return NOTIFY_BAD;
+		}
+	case PM_POST_HIBERNATION:
+	case PM_POST_SUSPEND:
+		usermodehelper_disabled = 0;
+		return NOTIFY_OK;
+	}
+
+	return NOTIFY_DONE;
+}
+
+static void helper_lock(void)
+{
+	atomic_inc(&running_helpers);
+	smp_mb__after_atomic_inc();
+}
+
+static void helper_unlock(void)
+{
+	if (atomic_dec_and_test(&running_helpers))
+		wake_up(&running_helpers_waitq);
+}
+
+static void register_pm_notifier_callback(void)
+{
+	pm_notifier(usermodehelper_pm_callback, 0);
+}
+#else /* CONFIG_PM */
+#define usermodehelper_disabled	0
+
+static inline void helper_lock(void) {}
+static inline void helper_unlock(void) {}
+static inline void register_pm_notifier_callback(void) {}
+#endif /* CONFIG_PM */
+
+/**
+ * call_usermodehelper_setup - prepare to call a usermode helper
+ * @path - path to usermode executable
+ * @argv - arg vector for process
+ * @envp - environment for process
+ *
+ * Returns either NULL on allocation failure, or a subprocess_info
+ * structure.  This should be passed to call_usermodehelper_exec to
+ * exec the process and free the structure.
+ */
+struct subprocess_info *call_usermodehelper_setup(char *path,
+						  char **argv, char **envp)
+{
 	struct subprocess_info *sub_info;
-	int retval;
-
-	if (!khelper_wq)
-		return -EBUSY;
-
-	if (path[0] == '\0')
-		return 0;
-
 	sub_info = kzalloc(sizeof(struct subprocess_info),  GFP_ATOMIC);
 	if (!sub_info)
-		return -ENOMEM;
+		goto out;
 
 	INIT_WORK(&sub_info->work, __call_usermodehelper);
-	sub_info->complete = &done;
 	sub_info->path = path;
 	sub_info->argv = argv;
 	sub_info->envp = envp;
-	sub_info->ring = session_keyring;
-	sub_info->wait = wait;
 
-	queue_work(khelper_wq, &sub_info->work);
-	if (wait < 0) /* task has freed sub_info */
-		return 0;
-	wait_for_completion(&done);
-	retval = sub_info->retval;
-	kfree(sub_info);
-	return retval;
+  out:
+	return sub_info;
 }
-EXPORT_SYMBOL(call_usermodehelper_keys);
+EXPORT_SYMBOL(call_usermodehelper_setup);
 
-int call_usermodehelper_pipe(char *path, char **argv, char **envp,
-			     struct file **filp)
+/**
+ * call_usermodehelper_setkeys - set the session keys for usermode helper
+ * @info: a subprocess_info returned by call_usermodehelper_setup
+ * @session_keyring: the session keyring for the process
+ */
+void call_usermodehelper_setkeys(struct subprocess_info *info,
+				 struct key *session_keyring)
 {
-	DECLARE_COMPLETION(done);
-	struct subprocess_info sub_info = {
-		.work		= __WORK_INITIALIZER(sub_info.work,
-						     __call_usermodehelper),
-		.complete	= &done,
-		.path		= path,
-		.argv		= argv,
-		.envp		= envp,
-		.retval		= 0,
-	};
+	info->ring = session_keyring;
+}
+EXPORT_SYMBOL(call_usermodehelper_setkeys);
+
+/**
+ * call_usermodehelper_setcleanup - set a cleanup function
+ * @info: a subprocess_info returned by call_usermodehelper_setup
+ * @cleanup: a cleanup function
+ *
+ * The cleanup function is just befor ethe subprocess_info is about to
+ * be freed.  This can be used for freeing the argv and envp.  The
+ * Function must be runnable in either a process context or the
+ * context in which call_usermodehelper_exec is called.
+ */
+void call_usermodehelper_setcleanup(struct subprocess_info *info,
+				    void (*cleanup)(char **argv, char **envp))
+{
+	info->cleanup = cleanup;
+}
+EXPORT_SYMBOL(call_usermodehelper_setcleanup);
+
+/**
+ * call_usermodehelper_stdinpipe - set up a pipe to be used for stdin
+ * @sub_info: a subprocess_info returned by call_usermodehelper_setup
+ * @filp: set to the write-end of a pipe
+ *
+ * This constructs a pipe, and sets the read end to be the stdin of the
+ * subprocess, and returns the write-end in *@filp.
+ */
+int call_usermodehelper_stdinpipe(struct subprocess_info *sub_info,
+				  struct file **filp)
+{
 	struct file *f;
 
-	if (!khelper_wq)
-		return -EBUSY;
-
-	if (path[0] == '\0')
-		return 0;
-
 	f = create_write_pipe();
 	if (IS_ERR(f))
 		return PTR_ERR(f);
@@ -335,11 +429,87 @@
 		free_write_pipe(*filp);
 		return PTR_ERR(f);
 	}
-	sub_info.stdin = f;
+	sub_info->stdin = f;
 
-	queue_work(khelper_wq, &sub_info.work);
+	return 0;
+}
+EXPORT_SYMBOL(call_usermodehelper_stdinpipe);
+
+/**
+ * call_usermodehelper_exec - start a usermode application
+ * @sub_info: information about the subprocessa
+ * @wait: wait for the application to finish and return status.
+ *        when -1 don't wait at all, but you get no useful error back when
+ *        the program couldn't be exec'ed. This makes it safe to call
+ *        from interrupt context.
+ *
+ * Runs a user-space application.  The application is started
+ * asynchronously if wait is not set, and runs as a child of keventd.
+ * (ie. it runs with full root capabilities).
+ */
+int call_usermodehelper_exec(struct subprocess_info *sub_info,
+			     enum umh_wait wait)
+{
+	DECLARE_COMPLETION_ONSTACK(done);
+	int retval;
+
+	helper_lock();
+	if (sub_info->path[0] == '\0') {
+		retval = 0;
+		goto out;
+	}
+
+	if (!khelper_wq || usermodehelper_disabled) {
+		retval = -EBUSY;
+		goto out;
+	}
+
+	sub_info->complete = &done;
+	sub_info->wait = wait;
+
+	queue_work(khelper_wq, &sub_info->work);
+	if (wait == UMH_NO_WAIT) /* task has freed sub_info */
+		return 0;
 	wait_for_completion(&done);
-	return sub_info.retval;
+	retval = sub_info->retval;
+
+  out:
+	call_usermodehelper_freeinfo(sub_info);
+	helper_unlock();
+	return retval;
+}
+EXPORT_SYMBOL(call_usermodehelper_exec);
+
+/**
+ * call_usermodehelper_pipe - call a usermode helper process with a pipe stdin
+ * @path: path to usermode executable
+ * @argv: arg vector for process
+ * @envp: environment for process
+ * @filp: set to the write-end of a pipe
+ *
+ * This is a simple wrapper which executes a usermode-helper function
+ * with a pipe as stdin.  It is implemented entirely in terms of
+ * lower-level call_usermodehelper_* functions.
+ */
+int call_usermodehelper_pipe(char *path, char **argv, char **envp,
+			     struct file **filp)
+{
+	struct subprocess_info *sub_info;
+	int ret;
+
+	sub_info = call_usermodehelper_setup(path, argv, envp);
+	if (sub_info == NULL)
+		return -ENOMEM;
+
+	ret = call_usermodehelper_stdinpipe(sub_info, filp);
+	if (ret < 0)
+		goto out;
+
+	return call_usermodehelper_exec(sub_info, 1);
+
+  out:
+	call_usermodehelper_freeinfo(sub_info);
+	return ret;
 }
 EXPORT_SYMBOL(call_usermodehelper_pipe);
 
@@ -347,4 +517,5 @@
 {
 	khelper_wq = create_singlethread_workqueue("khelper");
 	BUG_ON(!khelper_wq);
+	register_pm_notifier_callback();
 }
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index 9e47d8c..3e9f513 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -675,9 +675,18 @@
 	.priority = 0x7fffffff /* we need to be notified first */
 };
 
+unsigned long __weak arch_deref_entry_point(void *entry)
+{
+	return (unsigned long)entry;
+}
 
 int __kprobes register_jprobe(struct jprobe *jp)
 {
+	unsigned long addr = arch_deref_entry_point(jp->entry);
+
+	if (!kernel_text_address(addr))
+		return -EINVAL;
+
 	/* Todo: Verify probepoint is a function entry point */
 	jp->kp.pre_handler = setjmp_pre_handler;
 	jp->kp.break_handler = longjmp_break_handler;
diff --git a/kernel/ksysfs.c b/kernel/ksysfs.c
index 559deca..2565e1b 100644
--- a/kernel/ksysfs.c
+++ b/kernel/ksysfs.c
@@ -62,6 +62,28 @@
 KERNEL_ATTR_RO(kexec_crash_loaded);
 #endif /* CONFIG_KEXEC */
 
+/*
+ * Make /sys/kernel/notes give the raw contents of our kernel .notes section.
+ */
+extern const char __start_notes __attribute__((weak));
+extern const char __stop_notes __attribute__((weak));
+#define	notes_size (&__stop_notes - &__start_notes)
+
+static ssize_t notes_read(struct kobject *kobj, struct bin_attribute *bin_attr,
+			  char *buf, loff_t off, size_t count)
+{
+	memcpy(buf, &__start_notes + off, count);
+	return count;
+}
+
+static struct bin_attribute notes_attr = {
+	.attr = {
+		.name = "notes",
+		.mode = S_IRUGO,
+	},
+	.read = &notes_read,
+};
+
 decl_subsys(kernel, NULL, NULL);
 EXPORT_SYMBOL_GPL(kernel_subsys);
 
@@ -88,6 +110,12 @@
 		error = sysfs_create_group(&kernel_subsys.kobj,
 					   &kernel_attr_group);
 
+	if (!error && notes_size > 0) {
+		notes_attr.size = notes_size;
+		error = sysfs_create_bin_file(&kernel_subsys.kobj,
+					      &notes_attr);
+	}
+
 	return error;
 }
 
diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index 1a5ff22..734da57 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -5,7 +5,8 @@
  *
  * Started by Ingo Molnar:
  *
- *  Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
+ *  Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
+ *  Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
  *
  * this code maps all the lock dependencies as they occur in a live kernel
  * and will warn about the following classes of locking bugs:
@@ -37,11 +38,26 @@
 #include <linux/debug_locks.h>
 #include <linux/irqflags.h>
 #include <linux/utsname.h>
+#include <linux/hash.h>
 
 #include <asm/sections.h>
 
 #include "lockdep_internals.h"
 
+#ifdef CONFIG_PROVE_LOCKING
+int prove_locking = 1;
+module_param(prove_locking, int, 0644);
+#else
+#define prove_locking 0
+#endif
+
+#ifdef CONFIG_LOCK_STAT
+int lock_stat = 1;
+module_param(lock_stat, int, 0644);
+#else
+#define lock_stat 0
+#endif
+
 /*
  * lockdep_lock: protects the lockdep graph, the hashes and the
  *               class/list/hash allocators.
@@ -96,23 +112,6 @@
 static struct lock_list list_entries[MAX_LOCKDEP_ENTRIES];
 
 /*
- * Allocate a lockdep entry. (assumes the graph_lock held, returns
- * with NULL on failure)
- */
-static struct lock_list *alloc_list_entry(void)
-{
-	if (nr_list_entries >= MAX_LOCKDEP_ENTRIES) {
-		if (!debug_locks_off_graph_unlock())
-			return NULL;
-
-		printk("BUG: MAX_LOCKDEP_ENTRIES too low!\n");
-		printk("turning off the locking correctness validator.\n");
-		return NULL;
-	}
-	return list_entries + nr_list_entries++;
-}
-
-/*
  * All data structures here are protected by the global debug_lock.
  *
  * Mutex key structs only get allocated, once during bootup, and never
@@ -121,6 +120,117 @@
 unsigned long nr_lock_classes;
 static struct lock_class lock_classes[MAX_LOCKDEP_KEYS];
 
+#ifdef CONFIG_LOCK_STAT
+static DEFINE_PER_CPU(struct lock_class_stats[MAX_LOCKDEP_KEYS], lock_stats);
+
+static int lock_contention_point(struct lock_class *class, unsigned long ip)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(class->contention_point); i++) {
+		if (class->contention_point[i] == 0) {
+			class->contention_point[i] = ip;
+			break;
+		}
+		if (class->contention_point[i] == ip)
+			break;
+	}
+
+	return i;
+}
+
+static void lock_time_inc(struct lock_time *lt, s64 time)
+{
+	if (time > lt->max)
+		lt->max = time;
+
+	if (time < lt->min || !lt->min)
+		lt->min = time;
+
+	lt->total += time;
+	lt->nr++;
+}
+
+static inline void lock_time_add(struct lock_time *src, struct lock_time *dst)
+{
+	dst->min += src->min;
+	dst->max += src->max;
+	dst->total += src->total;
+	dst->nr += src->nr;
+}
+
+struct lock_class_stats lock_stats(struct lock_class *class)
+{
+	struct lock_class_stats stats;
+	int cpu, i;
+
+	memset(&stats, 0, sizeof(struct lock_class_stats));
+	for_each_possible_cpu(cpu) {
+		struct lock_class_stats *pcs =
+			&per_cpu(lock_stats, cpu)[class - lock_classes];
+
+		for (i = 0; i < ARRAY_SIZE(stats.contention_point); i++)
+			stats.contention_point[i] += pcs->contention_point[i];
+
+		lock_time_add(&pcs->read_waittime, &stats.read_waittime);
+		lock_time_add(&pcs->write_waittime, &stats.write_waittime);
+
+		lock_time_add(&pcs->read_holdtime, &stats.read_holdtime);
+		lock_time_add(&pcs->write_holdtime, &stats.write_holdtime);
+
+		for (i = 0; i < ARRAY_SIZE(stats.bounces); i++)
+			stats.bounces[i] += pcs->bounces[i];
+	}
+
+	return stats;
+}
+
+void clear_lock_stats(struct lock_class *class)
+{
+	int cpu;
+
+	for_each_possible_cpu(cpu) {
+		struct lock_class_stats *cpu_stats =
+			&per_cpu(lock_stats, cpu)[class - lock_classes];
+
+		memset(cpu_stats, 0, sizeof(struct lock_class_stats));
+	}
+	memset(class->contention_point, 0, sizeof(class->contention_point));
+}
+
+static struct lock_class_stats *get_lock_stats(struct lock_class *class)
+{
+	return &get_cpu_var(lock_stats)[class - lock_classes];
+}
+
+static void put_lock_stats(struct lock_class_stats *stats)
+{
+	put_cpu_var(lock_stats);
+}
+
+static void lock_release_holdtime(struct held_lock *hlock)
+{
+	struct lock_class_stats *stats;
+	s64 holdtime;
+
+	if (!lock_stat)
+		return;
+
+	holdtime = sched_clock() - hlock->holdtime_stamp;
+
+	stats = get_lock_stats(hlock->class);
+	if (hlock->read)
+		lock_time_inc(&stats->read_holdtime, holdtime);
+	else
+		lock_time_inc(&stats->write_holdtime, holdtime);
+	put_lock_stats(stats);
+}
+#else
+static inline void lock_release_holdtime(struct held_lock *hlock)
+{
+}
+#endif
+
 /*
  * We keep a global list of all lock classes. The list only grows,
  * never shrinks. The list is only accessed with the lockdep
@@ -133,24 +243,18 @@
  */
 #define CLASSHASH_BITS		(MAX_LOCKDEP_KEYS_BITS - 1)
 #define CLASSHASH_SIZE		(1UL << CLASSHASH_BITS)
-#define CLASSHASH_MASK		(CLASSHASH_SIZE - 1)
-#define __classhashfn(key)	((((unsigned long)key >> CLASSHASH_BITS) + (unsigned long)key) & CLASSHASH_MASK)
+#define __classhashfn(key)	hash_long((unsigned long)key, CLASSHASH_BITS)
 #define classhashentry(key)	(classhash_table + __classhashfn((key)))
 
 static struct list_head classhash_table[CLASSHASH_SIZE];
 
-unsigned long nr_lock_chains;
-static struct lock_chain lock_chains[MAX_LOCKDEP_CHAINS];
-
 /*
  * We put the lock dependency chains into a hash-table as well, to cache
  * their existence:
  */
 #define CHAINHASH_BITS		(MAX_LOCKDEP_CHAINS_BITS-1)
 #define CHAINHASH_SIZE		(1UL << CHAINHASH_BITS)
-#define CHAINHASH_MASK		(CHAINHASH_SIZE - 1)
-#define __chainhashfn(chain) \
-		(((chain >> CHAINHASH_BITS) + chain) & CHAINHASH_MASK)
+#define __chainhashfn(chain)	hash_long(chain, CHAINHASH_BITS)
 #define chainhashentry(chain)	(chainhash_table + __chainhashfn((chain)))
 
 static struct list_head chainhash_table[CHAINHASH_SIZE];
@@ -223,26 +327,6 @@
 	return 0;
 }
 
-#ifdef CONFIG_TRACE_IRQFLAGS
-
-static int hardirq_verbose(struct lock_class *class)
-{
-#if HARDIRQ_VERBOSE
-	return class_filter(class);
-#endif
-	return 0;
-}
-
-static int softirq_verbose(struct lock_class *class)
-{
-#if SOFTIRQ_VERBOSE
-	return class_filter(class);
-#endif
-	return 0;
-}
-
-#endif
-
 /*
  * Stack-trace: tightly packed array of stack backtrace
  * addresses. Protected by the graph_lock.
@@ -291,6 +375,11 @@
  * about it later on, in lockdep_info().
  */
 static int lockdep_init_error;
+static unsigned long lockdep_init_trace_data[20];
+static struct stack_trace lockdep_init_trace = {
+	.max_entries = ARRAY_SIZE(lockdep_init_trace_data),
+	.entries = lockdep_init_trace_data,
+};
 
 /*
  * Various lockdep statistics:
@@ -379,7 +468,7 @@
 
 static void print_lock_name(struct lock_class *class)
 {
-	char str[KSYM_NAME_LEN + 1], c1, c2, c3, c4;
+	char str[KSYM_NAME_LEN], c1, c2, c3, c4;
 	const char *name;
 
 	get_usage_chars(class, &c1, &c2, &c3, &c4);
@@ -401,7 +490,7 @@
 static void print_lockdep_cache(struct lockdep_map *lock)
 {
 	const char *name;
-	char str[KSYM_NAME_LEN + 1];
+	char str[KSYM_NAME_LEN];
 
 	name = lock->name;
 	if (!name)
@@ -482,6 +571,262 @@
 	}
 }
 
+static void print_kernel_version(void)
+{
+	printk("%s %.*s\n", init_utsname()->release,
+		(int)strcspn(init_utsname()->version, " "),
+		init_utsname()->version);
+}
+
+static int very_verbose(struct lock_class *class)
+{
+#if VERY_VERBOSE
+	return class_filter(class);
+#endif
+	return 0;
+}
+
+/*
+ * Is this the address of a static object:
+ */
+static int static_obj(void *obj)
+{
+	unsigned long start = (unsigned long) &_stext,
+		      end   = (unsigned long) &_end,
+		      addr  = (unsigned long) obj;
+#ifdef CONFIG_SMP
+	int i;
+#endif
+
+	/*
+	 * static variable?
+	 */
+	if ((addr >= start) && (addr < end))
+		return 1;
+
+#ifdef CONFIG_SMP
+	/*
+	 * percpu var?
+	 */
+	for_each_possible_cpu(i) {
+		start = (unsigned long) &__per_cpu_start + per_cpu_offset(i);
+		end   = (unsigned long) &__per_cpu_start + PERCPU_ENOUGH_ROOM
+					+ per_cpu_offset(i);
+
+		if ((addr >= start) && (addr < end))
+			return 1;
+	}
+#endif
+
+	/*
+	 * module var?
+	 */
+	return is_module_address(addr);
+}
+
+/*
+ * To make lock name printouts unique, we calculate a unique
+ * class->name_version generation counter:
+ */
+static int count_matching_names(struct lock_class *new_class)
+{
+	struct lock_class *class;
+	int count = 0;
+
+	if (!new_class->name)
+		return 0;
+
+	list_for_each_entry(class, &all_lock_classes, lock_entry) {
+		if (new_class->key - new_class->subclass == class->key)
+			return class->name_version;
+		if (class->name && !strcmp(class->name, new_class->name))
+			count = max(count, class->name_version);
+	}
+
+	return count + 1;
+}
+
+/*
+ * Register a lock's class in the hash-table, if the class is not present
+ * yet. Otherwise we look it up. We cache the result in the lock object
+ * itself, so actual lookup of the hash should be once per lock object.
+ */
+static inline struct lock_class *
+look_up_lock_class(struct lockdep_map *lock, unsigned int subclass)
+{
+	struct lockdep_subclass_key *key;
+	struct list_head *hash_head;
+	struct lock_class *class;
+
+#ifdef CONFIG_DEBUG_LOCKDEP
+	/*
+	 * If the architecture calls into lockdep before initializing
+	 * the hashes then we'll warn about it later. (we cannot printk
+	 * right now)
+	 */
+	if (unlikely(!lockdep_initialized)) {
+		lockdep_init();
+		lockdep_init_error = 1;
+		save_stack_trace(&lockdep_init_trace);
+	}
+#endif
+
+	/*
+	 * Static locks do not have their class-keys yet - for them the key
+	 * is the lock object itself:
+	 */
+	if (unlikely(!lock->key))
+		lock->key = (void *)lock;
+
+	/*
+	 * NOTE: the class-key must be unique. For dynamic locks, a static
+	 * lock_class_key variable is passed in through the mutex_init()
+	 * (or spin_lock_init()) call - which acts as the key. For static
+	 * locks we use the lock object itself as the key.
+	 */
+	BUILD_BUG_ON(sizeof(struct lock_class_key) >
+			sizeof(struct lockdep_map));
+
+	key = lock->key->subkeys + subclass;
+
+	hash_head = classhashentry(key);
+
+	/*
+	 * We can walk the hash lockfree, because the hash only
+	 * grows, and we are careful when adding entries to the end:
+	 */
+	list_for_each_entry(class, hash_head, hash_entry) {
+		if (class->key == key) {
+			WARN_ON_ONCE(class->name != lock->name);
+			return class;
+		}
+	}
+
+	return NULL;
+}
+
+/*
+ * Register a lock's class in the hash-table, if the class is not present
+ * yet. Otherwise we look it up. We cache the result in the lock object
+ * itself, so actual lookup of the hash should be once per lock object.
+ */
+static inline struct lock_class *
+register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
+{
+	struct lockdep_subclass_key *key;
+	struct list_head *hash_head;
+	struct lock_class *class;
+	unsigned long flags;
+
+	class = look_up_lock_class(lock, subclass);
+	if (likely(class))
+		return class;
+
+	/*
+	 * Debug-check: all keys must be persistent!
+ 	 */
+	if (!static_obj(lock->key)) {
+		debug_locks_off();
+		printk("INFO: trying to register non-static key.\n");
+		printk("the code is fine but needs lockdep annotation.\n");
+		printk("turning off the locking correctness validator.\n");
+		dump_stack();
+
+		return NULL;
+	}
+
+	key = lock->key->subkeys + subclass;
+	hash_head = classhashentry(key);
+
+	raw_local_irq_save(flags);
+	if (!graph_lock()) {
+		raw_local_irq_restore(flags);
+		return NULL;
+	}
+	/*
+	 * We have to do the hash-walk again, to avoid races
+	 * with another CPU:
+	 */
+	list_for_each_entry(class, hash_head, hash_entry)
+		if (class->key == key)
+			goto out_unlock_set;
+	/*
+	 * Allocate a new key from the static array, and add it to
+	 * the hash:
+	 */
+	if (nr_lock_classes >= MAX_LOCKDEP_KEYS) {
+		if (!debug_locks_off_graph_unlock()) {
+			raw_local_irq_restore(flags);
+			return NULL;
+		}
+		raw_local_irq_restore(flags);
+
+		printk("BUG: MAX_LOCKDEP_KEYS too low!\n");
+		printk("turning off the locking correctness validator.\n");
+		return NULL;
+	}
+	class = lock_classes + nr_lock_classes++;
+	debug_atomic_inc(&nr_unused_locks);
+	class->key = key;
+	class->name = lock->name;
+	class->subclass = subclass;
+	INIT_LIST_HEAD(&class->lock_entry);
+	INIT_LIST_HEAD(&class->locks_before);
+	INIT_LIST_HEAD(&class->locks_after);
+	class->name_version = count_matching_names(class);
+	/*
+	 * We use RCU's safe list-add method to make
+	 * parallel walking of the hash-list safe:
+	 */
+	list_add_tail_rcu(&class->hash_entry, hash_head);
+
+	if (verbose(class)) {
+		graph_unlock();
+		raw_local_irq_restore(flags);
+
+		printk("\nnew class %p: %s", class->key, class->name);
+		if (class->name_version > 1)
+			printk("#%d", class->name_version);
+		printk("\n");
+		dump_stack();
+
+		raw_local_irq_save(flags);
+		if (!graph_lock()) {
+			raw_local_irq_restore(flags);
+			return NULL;
+		}
+	}
+out_unlock_set:
+	graph_unlock();
+	raw_local_irq_restore(flags);
+
+	if (!subclass || force)
+		lock->class_cache = class;
+
+	if (DEBUG_LOCKS_WARN_ON(class->subclass != subclass))
+		return NULL;
+
+	return class;
+}
+
+#ifdef CONFIG_PROVE_LOCKING
+/*
+ * Allocate a lockdep entry. (assumes the graph_lock held, returns
+ * with NULL on failure)
+ */
+static struct lock_list *alloc_list_entry(void)
+{
+	if (nr_list_entries >= MAX_LOCKDEP_ENTRIES) {
+		if (!debug_locks_off_graph_unlock())
+			return NULL;
+
+		printk("BUG: MAX_LOCKDEP_ENTRIES too low!\n");
+		printk("turning off the locking correctness validator.\n");
+		return NULL;
+	}
+	return list_entries + nr_list_entries++;
+}
+
 /*
  * Add a new dependency to the head of the list:
  */
@@ -542,13 +887,6 @@
 	return 0;
 }
 
-static void print_kernel_version(void)
-{
-	printk("%s %.*s\n", init_utsname()->release,
-		(int)strcspn(init_utsname()->version, " "),
-		init_utsname()->version);
-}
-
 /*
  * When a circular dependency is detected, print the
  * header first:
@@ -640,15 +978,7 @@
 	return 1;
 }
 
-static int very_verbose(struct lock_class *class)
-{
-#if VERY_VERBOSE
-	return class_filter(class);
-#endif
-	return 0;
-}
 #ifdef CONFIG_TRACE_IRQFLAGS
-
 /*
  * Forwards and backwards subgraph searching, for the purposes of
  * proving that two subgraphs can be connected by a new dependency
@@ -821,6 +1151,78 @@
 			bit_backwards, bit_forwards, irqclass);
 }
 
+static int
+check_prev_add_irq(struct task_struct *curr, struct held_lock *prev,
+		struct held_lock *next)
+{
+	/*
+	 * Prove that the new dependency does not connect a hardirq-safe
+	 * lock with a hardirq-unsafe lock - to achieve this we search
+	 * the backwards-subgraph starting at <prev>, and the
+	 * forwards-subgraph starting at <next>:
+	 */
+	if (!check_usage(curr, prev, next, LOCK_USED_IN_HARDIRQ,
+					LOCK_ENABLED_HARDIRQS, "hard"))
+		return 0;
+
+	/*
+	 * Prove that the new dependency does not connect a hardirq-safe-read
+	 * lock with a hardirq-unsafe lock - to achieve this we search
+	 * the backwards-subgraph starting at <prev>, and the
+	 * forwards-subgraph starting at <next>:
+	 */
+	if (!check_usage(curr, prev, next, LOCK_USED_IN_HARDIRQ_READ,
+					LOCK_ENABLED_HARDIRQS, "hard-read"))
+		return 0;
+
+	/*
+	 * Prove that the new dependency does not connect a softirq-safe
+	 * lock with a softirq-unsafe lock - to achieve this we search
+	 * the backwards-subgraph starting at <prev>, and the
+	 * forwards-subgraph starting at <next>:
+	 */
+	if (!check_usage(curr, prev, next, LOCK_USED_IN_SOFTIRQ,
+					LOCK_ENABLED_SOFTIRQS, "soft"))
+		return 0;
+	/*
+	 * Prove that the new dependency does not connect a softirq-safe-read
+	 * lock with a softirq-unsafe lock - to achieve this we search
+	 * the backwards-subgraph starting at <prev>, and the
+	 * forwards-subgraph starting at <next>:
+	 */
+	if (!check_usage(curr, prev, next, LOCK_USED_IN_SOFTIRQ_READ,
+					LOCK_ENABLED_SOFTIRQS, "soft"))
+		return 0;
+
+	return 1;
+}
+
+static void inc_chains(void)
+{
+	if (current->hardirq_context)
+		nr_hardirq_chains++;
+	else {
+		if (current->softirq_context)
+			nr_softirq_chains++;
+		else
+			nr_process_chains++;
+	}
+}
+
+#else
+
+static inline int
+check_prev_add_irq(struct task_struct *curr, struct held_lock *prev,
+		struct held_lock *next)
+{
+	return 1;
+}
+
+static inline void inc_chains(void)
+{
+	nr_process_chains++;
+}
+
 #endif
 
 static int
@@ -922,47 +1324,10 @@
 	if (!(check_noncircular(next->class, 0)))
 		return print_circular_bug_tail();
 
-#ifdef CONFIG_TRACE_IRQFLAGS
-	/*
-	 * Prove that the new dependency does not connect a hardirq-safe
-	 * lock with a hardirq-unsafe lock - to achieve this we search
-	 * the backwards-subgraph starting at <prev>, and the
-	 * forwards-subgraph starting at <next>:
-	 */
-	if (!check_usage(curr, prev, next, LOCK_USED_IN_HARDIRQ,
-					LOCK_ENABLED_HARDIRQS, "hard"))
+	if (!check_prev_add_irq(curr, prev, next))
 		return 0;
 
 	/*
-	 * Prove that the new dependency does not connect a hardirq-safe-read
-	 * lock with a hardirq-unsafe lock - to achieve this we search
-	 * the backwards-subgraph starting at <prev>, and the
-	 * forwards-subgraph starting at <next>:
-	 */
-	if (!check_usage(curr, prev, next, LOCK_USED_IN_HARDIRQ_READ,
-					LOCK_ENABLED_HARDIRQS, "hard-read"))
-		return 0;
-
-	/*
-	 * Prove that the new dependency does not connect a softirq-safe
-	 * lock with a softirq-unsafe lock - to achieve this we search
-	 * the backwards-subgraph starting at <prev>, and the
-	 * forwards-subgraph starting at <next>:
-	 */
-	if (!check_usage(curr, prev, next, LOCK_USED_IN_SOFTIRQ,
-					LOCK_ENABLED_SOFTIRQS, "soft"))
-		return 0;
-	/*
-	 * Prove that the new dependency does not connect a softirq-safe-read
-	 * lock with a softirq-unsafe lock - to achieve this we search
-	 * the backwards-subgraph starting at <prev>, and the
-	 * forwards-subgraph starting at <next>:
-	 */
-	if (!check_usage(curr, prev, next, LOCK_USED_IN_SOFTIRQ_READ,
-					LOCK_ENABLED_SOFTIRQS, "soft"))
-		return 0;
-#endif
-	/*
 	 * For recursive read-locks we do all the dependency checks,
 	 * but we dont store read-triggered dependencies (only
 	 * write-triggered dependencies). This ensures that only the
@@ -1088,224 +1453,8 @@
 	return 0;
 }
 
-
-/*
- * Is this the address of a static object:
- */
-static int static_obj(void *obj)
-{
-	unsigned long start = (unsigned long) &_stext,
-		      end   = (unsigned long) &_end,
-		      addr  = (unsigned long) obj;
-#ifdef CONFIG_SMP
-	int i;
-#endif
-
-	/*
-	 * static variable?
-	 */
-	if ((addr >= start) && (addr < end))
-		return 1;
-
-#ifdef CONFIG_SMP
-	/*
-	 * percpu var?
-	 */
-	for_each_possible_cpu(i) {
-		start = (unsigned long) &__per_cpu_start + per_cpu_offset(i);
-		end   = (unsigned long) &__per_cpu_start + PERCPU_ENOUGH_ROOM
-					+ per_cpu_offset(i);
-
-		if ((addr >= start) && (addr < end))
-			return 1;
-	}
-#endif
-
-	/*
-	 * module var?
-	 */
-	return is_module_address(addr);
-}
-
-/*
- * To make lock name printouts unique, we calculate a unique
- * class->name_version generation counter:
- */
-static int count_matching_names(struct lock_class *new_class)
-{
-	struct lock_class *class;
-	int count = 0;
-
-	if (!new_class->name)
-		return 0;
-
-	list_for_each_entry(class, &all_lock_classes, lock_entry) {
-		if (new_class->key - new_class->subclass == class->key)
-			return class->name_version;
-		if (class->name && !strcmp(class->name, new_class->name))
-			count = max(count, class->name_version);
-	}
-
-	return count + 1;
-}
-
-/*
- * Register a lock's class in the hash-table, if the class is not present
- * yet. Otherwise we look it up. We cache the result in the lock object
- * itself, so actual lookup of the hash should be once per lock object.
- */
-static inline struct lock_class *
-look_up_lock_class(struct lockdep_map *lock, unsigned int subclass)
-{
-	struct lockdep_subclass_key *key;
-	struct list_head *hash_head;
-	struct lock_class *class;
-
-#ifdef CONFIG_DEBUG_LOCKDEP
-	/*
-	 * If the architecture calls into lockdep before initializing
-	 * the hashes then we'll warn about it later. (we cannot printk
-	 * right now)
-	 */
-	if (unlikely(!lockdep_initialized)) {
-		lockdep_init();
-		lockdep_init_error = 1;
-	}
-#endif
-
-	/*
-	 * Static locks do not have their class-keys yet - for them the key
-	 * is the lock object itself:
-	 */
-	if (unlikely(!lock->key))
-		lock->key = (void *)lock;
-
-	/*
-	 * NOTE: the class-key must be unique. For dynamic locks, a static
-	 * lock_class_key variable is passed in through the mutex_init()
-	 * (or spin_lock_init()) call - which acts as the key. For static
-	 * locks we use the lock object itself as the key.
-	 */
-	BUILD_BUG_ON(sizeof(struct lock_class_key) > sizeof(struct lock_class));
-
-	key = lock->key->subkeys + subclass;
-
-	hash_head = classhashentry(key);
-
-	/*
-	 * We can walk the hash lockfree, because the hash only
-	 * grows, and we are careful when adding entries to the end:
-	 */
-	list_for_each_entry(class, hash_head, hash_entry)
-		if (class->key == key)
-			return class;
-
-	return NULL;
-}
-
-/*
- * Register a lock's class in the hash-table, if the class is not present
- * yet. Otherwise we look it up. We cache the result in the lock object
- * itself, so actual lookup of the hash should be once per lock object.
- */
-static inline struct lock_class *
-register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
-{
-	struct lockdep_subclass_key *key;
-	struct list_head *hash_head;
-	struct lock_class *class;
-	unsigned long flags;
-
-	class = look_up_lock_class(lock, subclass);
-	if (likely(class))
-		return class;
-
-	/*
-	 * Debug-check: all keys must be persistent!
- 	 */
-	if (!static_obj(lock->key)) {
-		debug_locks_off();
-		printk("INFO: trying to register non-static key.\n");
-		printk("the code is fine but needs lockdep annotation.\n");
-		printk("turning off the locking correctness validator.\n");
-		dump_stack();
-
-		return NULL;
-	}
-
-	key = lock->key->subkeys + subclass;
-	hash_head = classhashentry(key);
-
-	raw_local_irq_save(flags);
-	if (!graph_lock()) {
-		raw_local_irq_restore(flags);
-		return NULL;
-	}
-	/*
-	 * We have to do the hash-walk again, to avoid races
-	 * with another CPU:
-	 */
-	list_for_each_entry(class, hash_head, hash_entry)
-		if (class->key == key)
-			goto out_unlock_set;
-	/*
-	 * Allocate a new key from the static array, and add it to
-	 * the hash:
-	 */
-	if (nr_lock_classes >= MAX_LOCKDEP_KEYS) {
-		if (!debug_locks_off_graph_unlock()) {
-			raw_local_irq_restore(flags);
-			return NULL;
-		}
-		raw_local_irq_restore(flags);
-
-		printk("BUG: MAX_LOCKDEP_KEYS too low!\n");
-		printk("turning off the locking correctness validator.\n");
-		return NULL;
-	}
-	class = lock_classes + nr_lock_classes++;
-	debug_atomic_inc(&nr_unused_locks);
-	class->key = key;
-	class->name = lock->name;
-	class->subclass = subclass;
-	INIT_LIST_HEAD(&class->lock_entry);
-	INIT_LIST_HEAD(&class->locks_before);
-	INIT_LIST_HEAD(&class->locks_after);
-	class->name_version = count_matching_names(class);
-	/*
-	 * We use RCU's safe list-add method to make
-	 * parallel walking of the hash-list safe:
-	 */
-	list_add_tail_rcu(&class->hash_entry, hash_head);
-
-	if (verbose(class)) {
-		graph_unlock();
-		raw_local_irq_restore(flags);
-
-		printk("\nnew class %p: %s", class->key, class->name);
-		if (class->name_version > 1)
-			printk("#%d", class->name_version);
-		printk("\n");
-		dump_stack();
-
-		raw_local_irq_save(flags);
-		if (!graph_lock()) {
-			raw_local_irq_restore(flags);
-			return NULL;
-		}
-	}
-out_unlock_set:
-	graph_unlock();
-	raw_local_irq_restore(flags);
-
-	if (!subclass || force)
-		lock->class_cache = class;
-
-	if (DEBUG_LOCKS_WARN_ON(class->subclass != subclass))
-		return NULL;
-
-	return class;
-}
+unsigned long nr_lock_chains;
+static struct lock_chain lock_chains[MAX_LOCKDEP_CHAINS];
 
 /*
  * Look up a dependency chain. If the key is not present yet then
@@ -1366,22 +1515,73 @@
 	chain->chain_key = chain_key;
 	list_add_tail_rcu(&chain->entry, hash_head);
 	debug_atomic_inc(&chain_lookup_misses);
-#ifdef CONFIG_TRACE_IRQFLAGS
-	if (current->hardirq_context)
-		nr_hardirq_chains++;
-	else {
-		if (current->softirq_context)
-			nr_softirq_chains++;
-		else
-			nr_process_chains++;
-	}
-#else
-	nr_process_chains++;
-#endif
+	inc_chains();
 
 	return 1;
 }
 
+static int validate_chain(struct task_struct *curr, struct lockdep_map *lock,
+	       	struct held_lock *hlock, int chain_head)
+{
+	/*
+	 * Trylock needs to maintain the stack of held locks, but it
+	 * does not add new dependencies, because trylock can be done
+	 * in any order.
+	 *
+	 * We look up the chain_key and do the O(N^2) check and update of
+	 * the dependencies only if this is a new dependency chain.
+	 * (If lookup_chain_cache() returns with 1 it acquires
+	 * graph_lock for us)
+	 */
+	if (!hlock->trylock && (hlock->check == 2) &&
+			lookup_chain_cache(curr->curr_chain_key, hlock->class)) {
+		/*
+		 * Check whether last held lock:
+		 *
+		 * - is irq-safe, if this lock is irq-unsafe
+		 * - is softirq-safe, if this lock is hardirq-unsafe
+		 *
+		 * And check whether the new lock's dependency graph
+		 * could lead back to the previous lock.
+		 *
+		 * any of these scenarios could lead to a deadlock. If
+		 * All validations
+		 */
+		int ret = check_deadlock(curr, hlock, lock, hlock->read);
+
+		if (!ret)
+			return 0;
+		/*
+		 * Mark recursive read, as we jump over it when
+		 * building dependencies (just like we jump over
+		 * trylock entries):
+		 */
+		if (ret == 2)
+			hlock->read = 2;
+		/*
+		 * Add dependency only if this lock is not the head
+		 * of the chain, and if it's not a secondary read-lock:
+		 */
+		if (!chain_head && ret != 2)
+			if (!check_prevs_add(curr, hlock))
+				return 0;
+		graph_unlock();
+	} else
+		/* after lookup_chain_cache(): */
+		if (unlikely(!debug_locks))
+			return 0;
+
+	return 1;
+}
+#else
+static inline int validate_chain(struct task_struct *curr,
+	       	struct lockdep_map *lock, struct held_lock *hlock,
+		int chain_head)
+{
+	return 1;
+}
+#endif
+
 /*
  * We are building curr_chain_key incrementally, so double-check
  * it from scratch, to make sure that it's done correctly:
@@ -1425,6 +1625,57 @@
 #endif
 }
 
+static int
+print_usage_bug(struct task_struct *curr, struct held_lock *this,
+		enum lock_usage_bit prev_bit, enum lock_usage_bit new_bit)
+{
+	if (!debug_locks_off_graph_unlock() || debug_locks_silent)
+		return 0;
+
+	printk("\n=================================\n");
+	printk(  "[ INFO: inconsistent lock state ]\n");
+	print_kernel_version();
+	printk(  "---------------------------------\n");
+
+	printk("inconsistent {%s} -> {%s} usage.\n",
+		usage_str[prev_bit], usage_str[new_bit]);
+
+	printk("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] takes:\n",
+		curr->comm, curr->pid,
+		trace_hardirq_context(curr), hardirq_count() >> HARDIRQ_SHIFT,
+		trace_softirq_context(curr), softirq_count() >> SOFTIRQ_SHIFT,
+		trace_hardirqs_enabled(curr),
+		trace_softirqs_enabled(curr));
+	print_lock(this);
+
+	printk("{%s} state was registered at:\n", usage_str[prev_bit]);
+	print_stack_trace(this->class->usage_traces + prev_bit, 1);
+
+	print_irqtrace_events(curr);
+	printk("\nother info that might help us debug this:\n");
+	lockdep_print_held_locks(curr);
+
+	printk("\nstack backtrace:\n");
+	dump_stack();
+
+	return 0;
+}
+
+/*
+ * Print out an error if an invalid bit is set:
+ */
+static inline int
+valid_state(struct task_struct *curr, struct held_lock *this,
+	    enum lock_usage_bit new_bit, enum lock_usage_bit bad_bit)
+{
+	if (unlikely(this->class->usage_mask & (1 << bad_bit)))
+		return print_usage_bug(curr, this, bad_bit, new_bit);
+	return 1;
+}
+
+static int mark_lock(struct task_struct *curr, struct held_lock *this,
+		     enum lock_usage_bit new_bit);
+
 #ifdef CONFIG_TRACE_IRQFLAGS
 
 /*
@@ -1518,90 +1769,30 @@
 	print_ip_sym(curr->softirq_disable_ip);
 }
 
-#endif
-
-static int
-print_usage_bug(struct task_struct *curr, struct held_lock *this,
-		enum lock_usage_bit prev_bit, enum lock_usage_bit new_bit)
+static int hardirq_verbose(struct lock_class *class)
 {
-	if (!debug_locks_off_graph_unlock() || debug_locks_silent)
-		return 0;
-
-	printk("\n=================================\n");
-	printk(  "[ INFO: inconsistent lock state ]\n");
-	print_kernel_version();
-	printk(  "---------------------------------\n");
-
-	printk("inconsistent {%s} -> {%s} usage.\n",
-		usage_str[prev_bit], usage_str[new_bit]);
-
-	printk("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] takes:\n",
-		curr->comm, curr->pid,
-		trace_hardirq_context(curr), hardirq_count() >> HARDIRQ_SHIFT,
-		trace_softirq_context(curr), softirq_count() >> SOFTIRQ_SHIFT,
-		trace_hardirqs_enabled(curr),
-		trace_softirqs_enabled(curr));
-	print_lock(this);
-
-	printk("{%s} state was registered at:\n", usage_str[prev_bit]);
-	print_stack_trace(this->class->usage_traces + prev_bit, 1);
-
-	print_irqtrace_events(curr);
-	printk("\nother info that might help us debug this:\n");
-	lockdep_print_held_locks(curr);
-
-	printk("\nstack backtrace:\n");
-	dump_stack();
-
+#if HARDIRQ_VERBOSE
+	return class_filter(class);
+#endif
 	return 0;
 }
 
-/*
- * Print out an error if an invalid bit is set:
- */
-static inline int
-valid_state(struct task_struct *curr, struct held_lock *this,
-	    enum lock_usage_bit new_bit, enum lock_usage_bit bad_bit)
+static int softirq_verbose(struct lock_class *class)
 {
-	if (unlikely(this->class->usage_mask & (1 << bad_bit)))
-		return print_usage_bug(curr, this, bad_bit, new_bit);
-	return 1;
+#if SOFTIRQ_VERBOSE
+	return class_filter(class);
+#endif
+	return 0;
 }
 
 #define STRICT_READ_CHECKS	1
 
-/*
- * Mark a lock with a usage bit, and validate the state transition:
- */
-static int mark_lock(struct task_struct *curr, struct held_lock *this,
-		     enum lock_usage_bit new_bit)
+static int mark_lock_irq(struct task_struct *curr, struct held_lock *this,
+		enum lock_usage_bit new_bit)
 {
-	unsigned int new_mask = 1 << new_bit, ret = 1;
+	int ret = 1;
 
-	/*
-	 * If already set then do not dirty the cacheline,
-	 * nor do any checks:
-	 */
-	if (likely(this->class->usage_mask & new_mask))
-		return 1;
-
-	if (!graph_lock())
-		return 0;
-	/*
-	 * Make sure we didnt race:
-	 */
-	if (unlikely(this->class->usage_mask & new_mask)) {
-		graph_unlock();
-		return 1;
-	}
-
-	this->class->usage_mask |= new_mask;
-
-	if (!save_trace(this->class->usage_traces + new_bit))
-		return 0;
-
-	switch (new_bit) {
-#ifdef CONFIG_TRACE_IRQFLAGS
+	switch(new_bit) {
 	case LOCK_USED_IN_HARDIRQ:
 		if (!valid_state(curr, this, new_bit, LOCK_ENABLED_HARDIRQS))
 			return 0;
@@ -1760,37 +1951,14 @@
 		if (softirq_verbose(this->class))
 			ret = 2;
 		break;
-#endif
-	case LOCK_USED:
-		/*
-		 * Add it to the global list of classes:
-		 */
-		list_add_tail_rcu(&this->class->lock_entry, &all_lock_classes);
-		debug_atomic_dec(&nr_unused_locks);
-		break;
 	default:
-		if (!debug_locks_off_graph_unlock())
-			return 0;
 		WARN_ON(1);
-		return 0;
-	}
-
-	graph_unlock();
-
-	/*
-	 * We must printk outside of the graph_lock:
-	 */
-	if (ret == 2) {
-		printk("\nmarked lock as {%s}:\n", usage_str[new_bit]);
-		print_lock(this);
-		print_irqtrace_events(curr);
-		dump_stack();
+		break;
 	}
 
 	return ret;
 }
 
-#ifdef CONFIG_TRACE_IRQFLAGS
 /*
  * Mark all held locks with a usage bit:
  */
@@ -1973,9 +2141,176 @@
 		debug_atomic_inc(&redundant_softirqs_off);
 }
 
+static int mark_irqflags(struct task_struct *curr, struct held_lock *hlock)
+{
+	/*
+	 * If non-trylock use in a hardirq or softirq context, then
+	 * mark the lock as used in these contexts:
+	 */
+	if (!hlock->trylock) {
+		if (hlock->read) {
+			if (curr->hardirq_context)
+				if (!mark_lock(curr, hlock,
+						LOCK_USED_IN_HARDIRQ_READ))
+					return 0;
+			if (curr->softirq_context)
+				if (!mark_lock(curr, hlock,
+						LOCK_USED_IN_SOFTIRQ_READ))
+					return 0;
+		} else {
+			if (curr->hardirq_context)
+				if (!mark_lock(curr, hlock, LOCK_USED_IN_HARDIRQ))
+					return 0;
+			if (curr->softirq_context)
+				if (!mark_lock(curr, hlock, LOCK_USED_IN_SOFTIRQ))
+					return 0;
+		}
+	}
+	if (!hlock->hardirqs_off) {
+		if (hlock->read) {
+			if (!mark_lock(curr, hlock,
+					LOCK_ENABLED_HARDIRQS_READ))
+				return 0;
+			if (curr->softirqs_enabled)
+				if (!mark_lock(curr, hlock,
+						LOCK_ENABLED_SOFTIRQS_READ))
+					return 0;
+		} else {
+			if (!mark_lock(curr, hlock,
+					LOCK_ENABLED_HARDIRQS))
+				return 0;
+			if (curr->softirqs_enabled)
+				if (!mark_lock(curr, hlock,
+						LOCK_ENABLED_SOFTIRQS))
+					return 0;
+		}
+	}
+
+	return 1;
+}
+
+static int separate_irq_context(struct task_struct *curr,
+		struct held_lock *hlock)
+{
+	unsigned int depth = curr->lockdep_depth;
+
+	/*
+	 * Keep track of points where we cross into an interrupt context:
+	 */
+	hlock->irq_context = 2*(curr->hardirq_context ? 1 : 0) +
+				curr->softirq_context;
+	if (depth) {
+		struct held_lock *prev_hlock;
+
+		prev_hlock = curr->held_locks + depth-1;
+		/*
+		 * If we cross into another context, reset the
+		 * hash key (this also prevents the checking and the
+		 * adding of the dependency to 'prev'):
+		 */
+		if (prev_hlock->irq_context != hlock->irq_context)
+			return 1;
+	}
+	return 0;
+}
+
+#else
+
+static inline
+int mark_lock_irq(struct task_struct *curr, struct held_lock *this,
+		enum lock_usage_bit new_bit)
+{
+	WARN_ON(1);
+	return 1;
+}
+
+static inline int mark_irqflags(struct task_struct *curr,
+		struct held_lock *hlock)
+{
+	return 1;
+}
+
+static inline int separate_irq_context(struct task_struct *curr,
+		struct held_lock *hlock)
+{
+	return 0;
+}
+
 #endif
 
 /*
+ * Mark a lock with a usage bit, and validate the state transition:
+ */
+static int mark_lock(struct task_struct *curr, struct held_lock *this,
+		     enum lock_usage_bit new_bit)
+{
+	unsigned int new_mask = 1 << new_bit, ret = 1;
+
+	/*
+	 * If already set then do not dirty the cacheline,
+	 * nor do any checks:
+	 */
+	if (likely(this->class->usage_mask & new_mask))
+		return 1;
+
+	if (!graph_lock())
+		return 0;
+	/*
+	 * Make sure we didnt race:
+	 */
+	if (unlikely(this->class->usage_mask & new_mask)) {
+		graph_unlock();
+		return 1;
+	}
+
+	this->class->usage_mask |= new_mask;
+
+	if (!save_trace(this->class->usage_traces + new_bit))
+		return 0;
+
+	switch (new_bit) {
+	case LOCK_USED_IN_HARDIRQ:
+	case LOCK_USED_IN_SOFTIRQ:
+	case LOCK_USED_IN_HARDIRQ_READ:
+	case LOCK_USED_IN_SOFTIRQ_READ:
+	case LOCK_ENABLED_HARDIRQS:
+	case LOCK_ENABLED_SOFTIRQS:
+	case LOCK_ENABLED_HARDIRQS_READ:
+	case LOCK_ENABLED_SOFTIRQS_READ:
+		ret = mark_lock_irq(curr, this, new_bit);
+		if (!ret)
+			return 0;
+		break;
+	case LOCK_USED:
+		/*
+		 * Add it to the global list of classes:
+		 */
+		list_add_tail_rcu(&this->class->lock_entry, &all_lock_classes);
+		debug_atomic_dec(&nr_unused_locks);
+		break;
+	default:
+		if (!debug_locks_off_graph_unlock())
+			return 0;
+		WARN_ON(1);
+		return 0;
+	}
+
+	graph_unlock();
+
+	/*
+	 * We must printk outside of the graph_lock:
+	 */
+	if (ret == 2) {
+		printk("\nmarked lock as {%s}:\n", usage_str[new_bit]);
+		print_lock(this);
+		print_irqtrace_events(curr);
+		dump_stack();
+	}
+
+	return ret;
+}
+
+/*
  * Initialize a lock instance's lock-class mapping info:
  */
 void lockdep_init_map(struct lockdep_map *lock, const char *name,
@@ -1999,6 +2334,9 @@
 	lock->name = name;
 	lock->key = key;
 	lock->class_cache = NULL;
+#ifdef CONFIG_LOCK_STAT
+	lock->cpu = raw_smp_processor_id();
+#endif
 	if (subclass)
 		register_lock_class(lock, subclass, 1);
 }
@@ -2020,6 +2358,9 @@
 	int chain_head = 0;
 	u64 chain_key;
 
+	if (!prove_locking)
+		check = 1;
+
 	if (unlikely(!debug_locks))
 		return 0;
 
@@ -2070,57 +2411,18 @@
 	hlock->read = read;
 	hlock->check = check;
 	hlock->hardirqs_off = hardirqs_off;
-
-	if (check != 2)
-		goto out_calc_hash;
-#ifdef CONFIG_TRACE_IRQFLAGS
-	/*
-	 * If non-trylock use in a hardirq or softirq context, then
-	 * mark the lock as used in these contexts:
-	 */
-	if (!trylock) {
-		if (read) {
-			if (curr->hardirq_context)
-				if (!mark_lock(curr, hlock,
-						LOCK_USED_IN_HARDIRQ_READ))
-					return 0;
-			if (curr->softirq_context)
-				if (!mark_lock(curr, hlock,
-						LOCK_USED_IN_SOFTIRQ_READ))
-					return 0;
-		} else {
-			if (curr->hardirq_context)
-				if (!mark_lock(curr, hlock, LOCK_USED_IN_HARDIRQ))
-					return 0;
-			if (curr->softirq_context)
-				if (!mark_lock(curr, hlock, LOCK_USED_IN_SOFTIRQ))
-					return 0;
-		}
-	}
-	if (!hardirqs_off) {
-		if (read) {
-			if (!mark_lock(curr, hlock,
-					LOCK_ENABLED_HARDIRQS_READ))
-				return 0;
-			if (curr->softirqs_enabled)
-				if (!mark_lock(curr, hlock,
-						LOCK_ENABLED_SOFTIRQS_READ))
-					return 0;
-		} else {
-			if (!mark_lock(curr, hlock,
-					LOCK_ENABLED_HARDIRQS))
-				return 0;
-			if (curr->softirqs_enabled)
-				if (!mark_lock(curr, hlock,
-						LOCK_ENABLED_SOFTIRQS))
-					return 0;
-		}
-	}
+#ifdef CONFIG_LOCK_STAT
+	hlock->waittime_stamp = 0;
+	hlock->holdtime_stamp = sched_clock();
 #endif
+
+	if (check == 2 && !mark_irqflags(curr, hlock))
+		return 0;
+
 	/* mark it as used: */
 	if (!mark_lock(curr, hlock, LOCK_USED))
 		return 0;
-out_calc_hash:
+
 	/*
 	 * Calculate the chain hash: it's the combined has of all the
 	 * lock keys along the dependency chain. We save the hash value
@@ -2143,77 +2445,15 @@
 	}
 
 	hlock->prev_chain_key = chain_key;
-
-#ifdef CONFIG_TRACE_IRQFLAGS
-	/*
-	 * Keep track of points where we cross into an interrupt context:
-	 */
-	hlock->irq_context = 2*(curr->hardirq_context ? 1 : 0) +
-				curr->softirq_context;
-	if (depth) {
-		struct held_lock *prev_hlock;
-
-		prev_hlock = curr->held_locks + depth-1;
-		/*
-		 * If we cross into another context, reset the
-		 * hash key (this also prevents the checking and the
-		 * adding of the dependency to 'prev'):
-		 */
-		if (prev_hlock->irq_context != hlock->irq_context) {
-			chain_key = 0;
-			chain_head = 1;
-		}
+	if (separate_irq_context(curr, hlock)) {
+		chain_key = 0;
+		chain_head = 1;
 	}
-#endif
 	chain_key = iterate_chain_key(chain_key, id);
 	curr->curr_chain_key = chain_key;
 
-	/*
-	 * Trylock needs to maintain the stack of held locks, but it
-	 * does not add new dependencies, because trylock can be done
-	 * in any order.
-	 *
-	 * We look up the chain_key and do the O(N^2) check and update of
-	 * the dependencies only if this is a new dependency chain.
-	 * (If lookup_chain_cache() returns with 1 it acquires
-	 * graph_lock for us)
-	 */
-	if (!trylock && (check == 2) && lookup_chain_cache(chain_key, class)) {
-		/*
-		 * Check whether last held lock:
-		 *
-		 * - is irq-safe, if this lock is irq-unsafe
-		 * - is softirq-safe, if this lock is hardirq-unsafe
-		 *
-		 * And check whether the new lock's dependency graph
-		 * could lead back to the previous lock.
-		 *
-		 * any of these scenarios could lead to a deadlock. If
-		 * All validations
-		 */
-		int ret = check_deadlock(curr, hlock, lock, read);
-
-		if (!ret)
-			return 0;
-		/*
-		 * Mark recursive read, as we jump over it when
-		 * building dependencies (just like we jump over
-		 * trylock entries):
-		 */
-		if (ret == 2)
-			hlock->read = 2;
-		/*
-		 * Add dependency only if this lock is not the head
-		 * of the chain, and if it's not a secondary read-lock:
-		 */
-		if (!chain_head && ret != 2)
-			if (!check_prevs_add(curr, hlock))
-				return 0;
-		graph_unlock();
-	} else
-		/* after lookup_chain_cache(): */
-		if (unlikely(!debug_locks))
-			return 0;
+	if (!validate_chain(curr, lock, hlock, chain_head))
+		return 0;
 
 	curr->lockdep_depth++;
 	check_chain_key(curr);
@@ -2315,6 +2555,8 @@
 	return print_unlock_inbalance_bug(curr, lock, ip);
 
 found_it:
+	lock_release_holdtime(hlock);
+
 	/*
 	 * We have the right lock to unlock, 'hlock' points to it.
 	 * Now we remove it from the stack, and add back the other
@@ -2367,6 +2609,8 @@
 
 	curr->curr_chain_key = hlock->prev_chain_key;
 
+	lock_release_holdtime(hlock);
+
 #ifdef CONFIG_DEBUG_LOCKDEP
 	hlock->prev_chain_key = 0;
 	hlock->class = NULL;
@@ -2441,6 +2685,9 @@
 {
 	unsigned long flags;
 
+	if (unlikely(!lock_stat && !prove_locking))
+		return;
+
 	if (unlikely(current->lockdep_recursion))
 		return;
 
@@ -2460,6 +2707,9 @@
 {
 	unsigned long flags;
 
+	if (unlikely(!lock_stat && !prove_locking))
+		return;
+
 	if (unlikely(current->lockdep_recursion))
 		return;
 
@@ -2473,6 +2723,166 @@
 
 EXPORT_SYMBOL_GPL(lock_release);
 
+#ifdef CONFIG_LOCK_STAT
+static int
+print_lock_contention_bug(struct task_struct *curr, struct lockdep_map *lock,
+			   unsigned long ip)
+{
+	if (!debug_locks_off())
+		return 0;
+	if (debug_locks_silent)
+		return 0;
+
+	printk("\n=================================\n");
+	printk(  "[ BUG: bad contention detected! ]\n");
+	printk(  "---------------------------------\n");
+	printk("%s/%d is trying to contend lock (",
+		curr->comm, curr->pid);
+	print_lockdep_cache(lock);
+	printk(") at:\n");
+	print_ip_sym(ip);
+	printk("but there are no locks held!\n");
+	printk("\nother info that might help us debug this:\n");
+	lockdep_print_held_locks(curr);
+
+	printk("\nstack backtrace:\n");
+	dump_stack();
+
+	return 0;
+}
+
+static void
+__lock_contended(struct lockdep_map *lock, unsigned long ip)
+{
+	struct task_struct *curr = current;
+	struct held_lock *hlock, *prev_hlock;
+	struct lock_class_stats *stats;
+	unsigned int depth;
+	int i, point;
+
+	depth = curr->lockdep_depth;
+	if (DEBUG_LOCKS_WARN_ON(!depth))
+		return;
+
+	prev_hlock = NULL;
+	for (i = depth-1; i >= 0; i--) {
+		hlock = curr->held_locks + i;
+		/*
+		 * We must not cross into another context:
+		 */
+		if (prev_hlock && prev_hlock->irq_context != hlock->irq_context)
+			break;
+		if (hlock->instance == lock)
+			goto found_it;
+		prev_hlock = hlock;
+	}
+	print_lock_contention_bug(curr, lock, ip);
+	return;
+
+found_it:
+	hlock->waittime_stamp = sched_clock();
+
+	point = lock_contention_point(hlock->class, ip);
+
+	stats = get_lock_stats(hlock->class);
+	if (point < ARRAY_SIZE(stats->contention_point))
+		stats->contention_point[i]++;
+	if (lock->cpu != smp_processor_id())
+		stats->bounces[bounce_contended + !!hlock->read]++;
+	put_lock_stats(stats);
+}
+
+static void
+__lock_acquired(struct lockdep_map *lock)
+{
+	struct task_struct *curr = current;
+	struct held_lock *hlock, *prev_hlock;
+	struct lock_class_stats *stats;
+	unsigned int depth;
+	u64 now;
+	s64 waittime = 0;
+	int i, cpu;
+
+	depth = curr->lockdep_depth;
+	if (DEBUG_LOCKS_WARN_ON(!depth))
+		return;
+
+	prev_hlock = NULL;
+	for (i = depth-1; i >= 0; i--) {
+		hlock = curr->held_locks + i;
+		/*
+		 * We must not cross into another context:
+		 */
+		if (prev_hlock && prev_hlock->irq_context != hlock->irq_context)
+			break;
+		if (hlock->instance == lock)
+			goto found_it;
+		prev_hlock = hlock;
+	}
+	print_lock_contention_bug(curr, lock, _RET_IP_);
+	return;
+
+found_it:
+	cpu = smp_processor_id();
+	if (hlock->waittime_stamp) {
+		now = sched_clock();
+		waittime = now - hlock->waittime_stamp;
+		hlock->holdtime_stamp = now;
+	}
+
+	stats = get_lock_stats(hlock->class);
+	if (waittime) {
+		if (hlock->read)
+			lock_time_inc(&stats->read_waittime, waittime);
+		else
+			lock_time_inc(&stats->write_waittime, waittime);
+	}
+	if (lock->cpu != cpu)
+		stats->bounces[bounce_acquired + !!hlock->read]++;
+	put_lock_stats(stats);
+
+	lock->cpu = cpu;
+}
+
+void lock_contended(struct lockdep_map *lock, unsigned long ip)
+{
+	unsigned long flags;
+
+	if (unlikely(!lock_stat))
+		return;
+
+	if (unlikely(current->lockdep_recursion))
+		return;
+
+	raw_local_irq_save(flags);
+	check_flags(flags);
+	current->lockdep_recursion = 1;
+	__lock_contended(lock, ip);
+	current->lockdep_recursion = 0;
+	raw_local_irq_restore(flags);
+}
+EXPORT_SYMBOL_GPL(lock_contended);
+
+void lock_acquired(struct lockdep_map *lock)
+{
+	unsigned long flags;
+
+	if (unlikely(!lock_stat))
+		return;
+
+	if (unlikely(current->lockdep_recursion))
+		return;
+
+	raw_local_irq_save(flags);
+	check_flags(flags);
+	current->lockdep_recursion = 1;
+	__lock_acquired(lock);
+	current->lockdep_recursion = 0;
+	raw_local_irq_restore(flags);
+}
+EXPORT_SYMBOL_GPL(lock_acquired);
+#endif
+
 /*
  * Used by the testsuite, sanitize the validator state
  * after a simulated failure:
@@ -2636,8 +3046,11 @@
 		sizeof(struct held_lock) * MAX_LOCK_DEPTH);
 
 #ifdef CONFIG_DEBUG_LOCKDEP
-	if (lockdep_init_error)
-		printk("WARNING: lockdep init error! Arch code didnt call lockdep_init() early enough?\n");
+	if (lockdep_init_error) {
+		printk("WARNING: lockdep init error! Arch code didn't call lockdep_init() early enough?\n");
+		printk("Call stack leading to lockdep invocation was:\n");
+		print_stack_trace(&lockdep_init_trace, 0);
+	}
 #endif
 }
 
diff --git a/kernel/lockdep_proc.c b/kernel/lockdep_proc.c
index 58f35e5..9f17af4 100644
--- a/kernel/lockdep_proc.c
+++ b/kernel/lockdep_proc.c
@@ -5,7 +5,8 @@
  *
  * Started by Ingo Molnar:
  *
- *  Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
+ *  Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
+ *  Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
  *
  * Code for /proc/lockdep and /proc/lockdep_stats:
  *
@@ -15,6 +16,10 @@
 #include <linux/seq_file.h>
 #include <linux/kallsyms.h>
 #include <linux/debug_locks.h>
+#include <linux/vmalloc.h>
+#include <linux/sort.h>
+#include <asm/uaccess.h>
+#include <asm/div64.h>
 
 #include "lockdep_internals.h"
 
@@ -271,8 +276,10 @@
 	if (nr_list_entries)
 		factor = sum_forward_deps / nr_list_entries;
 
+#ifdef CONFIG_PROVE_LOCKING
 	seq_printf(m, " dependency chains:             %11lu [max: %lu]\n",
 			nr_lock_chains, MAX_LOCKDEP_CHAINS);
+#endif
 
 #ifdef CONFIG_TRACE_IRQFLAGS
 	seq_printf(m, " in-hardirq chains:             %11u\n",
@@ -342,6 +349,292 @@
 	.release	= seq_release,
 };
 
+#ifdef CONFIG_LOCK_STAT
+
+struct lock_stat_data {
+	struct lock_class *class;
+	struct lock_class_stats stats;
+};
+
+struct lock_stat_seq {
+	struct lock_stat_data *iter;
+	struct lock_stat_data *iter_end;
+	struct lock_stat_data stats[MAX_LOCKDEP_KEYS];
+};
+
+/*
+ * sort on absolute number of contentions
+ */
+static int lock_stat_cmp(const void *l, const void *r)
+{
+	const struct lock_stat_data *dl = l, *dr = r;
+	unsigned long nl, nr;
+
+	nl = dl->stats.read_waittime.nr + dl->stats.write_waittime.nr;
+	nr = dr->stats.read_waittime.nr + dr->stats.write_waittime.nr;
+
+	return nr - nl;
+}
+
+static void seq_line(struct seq_file *m, char c, int offset, int length)
+{
+	int i;
+
+	for (i = 0; i < offset; i++)
+		seq_puts(m, " ");
+	for (i = 0; i < length; i++)
+		seq_printf(m, "%c", c);
+	seq_puts(m, "\n");
+}
+
+static void snprint_time(char *buf, size_t bufsiz, s64 nr)
+{
+	unsigned long rem;
+
+	rem = do_div(nr, 1000); /* XXX: do_div_signed */
+	snprintf(buf, bufsiz, "%lld.%02d", (long long)nr, ((int)rem+5)/10);
+}
+
+static void seq_time(struct seq_file *m, s64 time)
+{
+	char num[15];
+
+	snprint_time(num, sizeof(num), time);
+	seq_printf(m, " %14s", num);
+}
+
+static void seq_lock_time(struct seq_file *m, struct lock_time *lt)
+{
+	seq_printf(m, "%14lu", lt->nr);
+	seq_time(m, lt->min);
+	seq_time(m, lt->max);
+	seq_time(m, lt->total);
+}
+
+static void seq_stats(struct seq_file *m, struct lock_stat_data *data)
+{
+	char name[39];
+	struct lock_class *class;
+	struct lock_class_stats *stats;
+	int i, namelen;
+
+	class = data->class;
+	stats = &data->stats;
+
+	namelen = 38;
+	if (class->name_version > 1)
+		namelen -= 2; /* XXX truncates versions > 9 */
+	if (class->subclass)
+		namelen -= 2;
+
+	if (!class->name) {
+		char str[KSYM_NAME_LEN];
+		const char *key_name;
+
+		key_name = __get_key_name(class->key, str);
+		snprintf(name, namelen, "%s", key_name);
+	} else {
+		snprintf(name, namelen, "%s", class->name);
+	}
+	namelen = strlen(name);
+	if (class->name_version > 1) {
+		snprintf(name+namelen, 3, "#%d", class->name_version);
+		namelen += 2;
+	}
+	if (class->subclass) {
+		snprintf(name+namelen, 3, "/%d", class->subclass);
+		namelen += 2;
+	}
+
+	if (stats->write_holdtime.nr) {
+		if (stats->read_holdtime.nr)
+			seq_printf(m, "%38s-W:", name);
+		else
+			seq_printf(m, "%40s:", name);
+
+		seq_printf(m, "%14lu ", stats->bounces[bounce_contended_write]);
+		seq_lock_time(m, &stats->write_waittime);
+		seq_printf(m, " %14lu ", stats->bounces[bounce_acquired_write]);
+		seq_lock_time(m, &stats->write_holdtime);
+		seq_puts(m, "\n");
+	}
+
+	if (stats->read_holdtime.nr) {
+		seq_printf(m, "%38s-R:", name);
+		seq_printf(m, "%14lu ", stats->bounces[bounce_contended_read]);
+		seq_lock_time(m, &stats->read_waittime);
+		seq_printf(m, " %14lu ", stats->bounces[bounce_acquired_read]);
+		seq_lock_time(m, &stats->read_holdtime);
+		seq_puts(m, "\n");
+	}
+
+	if (stats->read_waittime.nr + stats->write_waittime.nr == 0)
+		return;
+
+	if (stats->read_holdtime.nr)
+		namelen += 2;
+
+	for (i = 0; i < ARRAY_SIZE(class->contention_point); i++) {
+		char sym[KSYM_SYMBOL_LEN];
+		char ip[32];
+
+		if (class->contention_point[i] == 0)
+			break;
+
+		if (!i)
+			seq_line(m, '-', 40-namelen, namelen);
+
+		sprint_symbol(sym, class->contention_point[i]);
+		snprintf(ip, sizeof(ip), "[<%p>]",
+				(void *)class->contention_point[i]);
+		seq_printf(m, "%40s %14lu %29s %s\n", name,
+				stats->contention_point[i],
+				ip, sym);
+	}
+	if (i) {
+		seq_puts(m, "\n");
+		seq_line(m, '.', 0, 40 + 1 + 10 * (14 + 1));
+		seq_puts(m, "\n");
+	}
+}
+
+static void seq_header(struct seq_file *m)
+{
+	seq_printf(m, "lock_stat version 0.2\n");
+	seq_line(m, '-', 0, 40 + 1 + 10 * (14 + 1));
+	seq_printf(m, "%40s %14s %14s %14s %14s %14s %14s %14s %14s "
+			"%14s %14s\n",
+			"class name",
+			"con-bounces",
+			"contentions",
+			"waittime-min",
+			"waittime-max",
+			"waittime-total",
+			"acq-bounces",
+			"acquisitions",
+			"holdtime-min",
+			"holdtime-max",
+			"holdtime-total");
+	seq_line(m, '-', 0, 40 + 1 + 10 * (14 + 1));
+	seq_printf(m, "\n");
+}
+
+static void *ls_start(struct seq_file *m, loff_t *pos)
+{
+	struct lock_stat_seq *data = m->private;
+
+	if (data->iter == data->stats)
+		seq_header(m);
+
+	if (data->iter == data->iter_end)
+		data->iter = NULL;
+
+	return data->iter;
+}
+
+static void *ls_next(struct seq_file *m, void *v, loff_t *pos)
+{
+	struct lock_stat_seq *data = m->private;
+
+	(*pos)++;
+
+	data->iter = v;
+	data->iter++;
+	if (data->iter == data->iter_end)
+		data->iter = NULL;
+
+	return data->iter;
+}
+
+static void ls_stop(struct seq_file *m, void *v)
+{
+}
+
+static int ls_show(struct seq_file *m, void *v)
+{
+	struct lock_stat_seq *data = m->private;
+
+	seq_stats(m, data->iter);
+	return 0;
+}
+
+static struct seq_operations lockstat_ops = {
+	.start	= ls_start,
+	.next	= ls_next,
+	.stop	= ls_stop,
+	.show	= ls_show,
+};
+
+static int lock_stat_open(struct inode *inode, struct file *file)
+{
+	int res;
+	struct lock_class *class;
+	struct lock_stat_seq *data = vmalloc(sizeof(struct lock_stat_seq));
+
+	if (!data)
+		return -ENOMEM;
+
+	res = seq_open(file, &lockstat_ops);
+	if (!res) {
+		struct lock_stat_data *iter = data->stats;
+		struct seq_file *m = file->private_data;
+
+		data->iter = iter;
+		list_for_each_entry(class, &all_lock_classes, lock_entry) {
+			iter->class = class;
+			iter->stats = lock_stats(class);
+			iter++;
+		}
+		data->iter_end = iter;
+
+		sort(data->stats, data->iter_end - data->iter,
+				sizeof(struct lock_stat_data),
+				lock_stat_cmp, NULL);
+
+		m->private = data;
+	} else
+		vfree(data);
+
+	return res;
+}
+
+static ssize_t lock_stat_write(struct file *file, const char __user *buf,
+			       size_t count, loff_t *ppos)
+{
+	struct lock_class *class;
+	char c;
+
+	if (count) {
+		if (get_user(c, buf))
+			return -EFAULT;
+
+		if (c != '0')
+			return count;
+
+		list_for_each_entry(class, &all_lock_classes, lock_entry)
+			clear_lock_stats(class);
+	}
+	return count;
+}
+
+static int lock_stat_release(struct inode *inode, struct file *file)
+{
+	struct seq_file *seq = file->private_data;
+
+	vfree(seq->private);
+	seq->private = NULL;
+	return seq_release(inode, file);
+}
+
+static const struct file_operations proc_lock_stat_operations = {
+	.open		= lock_stat_open,
+	.write		= lock_stat_write,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= lock_stat_release,
+};
+#endif /* CONFIG_LOCK_STAT */
+
 static int __init lockdep_proc_init(void)
 {
 	struct proc_dir_entry *entry;
@@ -354,6 +647,12 @@
 	if (entry)
 		entry->proc_fops = &proc_lockdep_stats_operations;
 
+#ifdef CONFIG_LOCK_STAT
+	entry = create_proc_entry("lock_stat", S_IRUSR, NULL);
+	if (entry)
+		entry->proc_fops = &proc_lock_stat_operations;
+#endif
+
 	return 0;
 }
 
diff --git a/kernel/module.c b/kernel/module.c
index 539fed9..33c04ad 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -2133,7 +2133,7 @@
 			sym = get_ksymbol(mod, addr, NULL, NULL);
 			if (!sym)
 				goto out;
-			strlcpy(symname, sym, KSYM_NAME_LEN + 1);
+			strlcpy(symname, sym, KSYM_NAME_LEN);
 			mutex_unlock(&module_mutex);
 			return 0;
 		}
@@ -2158,9 +2158,9 @@
 			if (!sym)
 				goto out;
 			if (modname)
-				strlcpy(modname, mod->name, MODULE_NAME_LEN + 1);
+				strlcpy(modname, mod->name, MODULE_NAME_LEN);
 			if (name)
-				strlcpy(name, sym, KSYM_NAME_LEN + 1);
+				strlcpy(name, sym, KSYM_NAME_LEN);
 			mutex_unlock(&module_mutex);
 			return 0;
 		}
@@ -2181,8 +2181,8 @@
 			*value = mod->symtab[symnum].st_value;
 			*type = mod->symtab[symnum].st_info;
 			strlcpy(name, mod->strtab + mod->symtab[symnum].st_name,
-				KSYM_NAME_LEN + 1);
-			strlcpy(module_name, mod->name, MODULE_NAME_LEN + 1);
+				KSYM_NAME_LEN);
+			strlcpy(module_name, mod->name, MODULE_NAME_LEN);
 			*exported = is_exported(name, mod);
 			mutex_unlock(&module_mutex);
 			return 0;
diff --git a/kernel/mutex.c b/kernel/mutex.c
index 303eab1..691b865 100644
--- a/kernel/mutex.c
+++ b/kernel/mutex.c
@@ -139,6 +139,12 @@
 	list_add_tail(&waiter.list, &lock->wait_list);
 	waiter.task = task;
 
+	old_val = atomic_xchg(&lock->count, -1);
+	if (old_val == 1)
+		goto done;
+
+	lock_contended(&lock->dep_map, _RET_IP_);
+
 	for (;;) {
 		/*
 		 * Lets try to take the lock again - this is needed even if
@@ -174,6 +180,8 @@
 		spin_lock_mutex(&lock->wait_lock, flags);
 	}
 
+done:
+	lock_acquired(&lock->dep_map);
 	/* got the lock - rejoice! */
 	mutex_remove_waiter(lock, &waiter, task_thread_info(task));
 	debug_mutex_set_owner(lock, task_thread_info(task));
diff --git a/kernel/panic.c b/kernel/panic.c
index 623d182..f64f4c1 100644
--- a/kernel/panic.c
+++ b/kernel/panic.c
@@ -159,14 +159,15 @@
 {
 	static char buf[20];
 	if (tainted) {
-		snprintf(buf, sizeof(buf), "Tainted: %c%c%c%c%c%c%c",
+		snprintf(buf, sizeof(buf), "Tainted: %c%c%c%c%c%c%c%c",
 			tainted & TAINT_PROPRIETARY_MODULE ? 'P' : 'G',
 			tainted & TAINT_FORCED_MODULE ? 'F' : ' ',
 			tainted & TAINT_UNSAFE_SMP ? 'S' : ' ',
 			tainted & TAINT_FORCED_RMMOD ? 'R' : ' ',
  			tainted & TAINT_MACHINE_CHECK ? 'M' : ' ',
 			tainted & TAINT_BAD_PAGE ? 'B' : ' ',
-			tainted & TAINT_USER ? 'U' : ' ');
+			tainted & TAINT_USER ? 'U' : ' ',
+			tainted & TAINT_DIE ? 'D' : ' ');
 	}
 	else
 		snprintf(buf, sizeof(buf), "Not tainted");
diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig
index 495b7d4..7358609 100644
--- a/kernel/power/Kconfig
+++ b/kernel/power/Kconfig
@@ -33,13 +33,20 @@
 	bool "Power Management Debug Support"
 	depends on PM
 	---help---
-	This option enables verbose debugging support in the Power Management
-	code. This is helpful when debugging and reporting various PM bugs, 
-	like suspend support.
+	This option enables various debugging support in the Power Management
+	code. This is helpful when debugging and reporting PM bugs, like
+	suspend support.
+
+config PM_VERBOSE
+	bool "Verbose Power Management debugging"
+	depends on PM_DEBUG
+	default n
+	---help---
+	This option enables verbose messages from the Power Management code.
 
 config DISABLE_CONSOLE_SUSPEND
 	bool "Keep console(s) enabled during suspend/resume (DANGEROUS)"
-	depends on PM && PM_DEBUG
+	depends on PM_DEBUG
 	default n
 	---help---
 	This option turns off the console suspend mechanism that prevents
@@ -50,7 +57,7 @@
 
 config PM_TRACE
 	bool "Suspend/resume event tracing"
-	depends on PM && PM_DEBUG && X86_32 && EXPERIMENTAL
+	depends on PM_DEBUG && X86_32 && EXPERIMENTAL
 	default n
 	---help---
 	This enables some cheesy code to save the last PM event point in the
@@ -65,18 +72,6 @@
 	CAUTION: this option will cause your machine's real-time clock to be
 	set to an invalid time after a resume.
 
-config PM_SYSFS_DEPRECATED
-	bool "Driver model /sys/devices/.../power/state files (DEPRECATED)"
-	depends on PM && SYSFS
-	default n
-	help
-	  The driver model started out with a sysfs file intended to provide
-	  a userspace hook for device power management.  This feature has never
-	  worked very well, except for limited testing purposes, and so it will
-	  be removed.   It's not clear that a generic mechanism could really
-	  handle the wide variability of device power states; any replacements
-	  are likely to be bus or driver specific.
-
 config SOFTWARE_SUSPEND
 	bool "Software Suspend (Hibernation)"
 	depends on PM && SWAP && (((X86 || PPC64_SWSUSP) && (!SMP || SUSPEND_SMP)) || ((FRV || PPC32) && !SMP))
diff --git a/kernel/power/disk.c b/kernel/power/disk.c
index f445b9c..324ac01 100644
--- a/kernel/power/disk.c
+++ b/kernel/power/disk.c
@@ -45,7 +45,7 @@
 
 static int hibernation_mode = HIBERNATION_SHUTDOWN;
 
-struct hibernation_ops *hibernation_ops;
+static struct hibernation_ops *hibernation_ops;
 
 /**
  * hibernation_set_ops - set the global hibernate operations
@@ -54,7 +54,8 @@
 
 void hibernation_set_ops(struct hibernation_ops *ops)
 {
-	if (ops && !(ops->prepare && ops->enter && ops->finish)) {
+	if (ops && !(ops->prepare && ops->enter && ops->finish
+	    && ops->pre_restore && ops->restore_cleanup)) {
 		WARN_ON(1);
 		return;
 	}
@@ -74,9 +75,9 @@
  *	platform driver if so configured and return an error code if it fails
  */
 
-static int platform_prepare(void)
+static int platform_prepare(int platform_mode)
 {
-	return (hibernation_mode == HIBERNATION_PLATFORM && hibernation_ops) ?
+	return (platform_mode && hibernation_ops) ?
 		hibernation_ops->prepare() : 0;
 }
 
@@ -85,13 +86,145 @@
  *	using the platform driver (must be called after platform_prepare())
  */
 
-static void platform_finish(void)
+static void platform_finish(int platform_mode)
 {
-	if (hibernation_mode == HIBERNATION_PLATFORM && hibernation_ops)
+	if (platform_mode && hibernation_ops)
 		hibernation_ops->finish();
 }
 
 /**
+ *	platform_pre_restore - prepare the platform for the restoration from a
+ *	hibernation image.  If the restore fails after this function has been
+ *	called, platform_restore_cleanup() must be called.
+ */
+
+static int platform_pre_restore(int platform_mode)
+{
+	return (platform_mode && hibernation_ops) ?
+		hibernation_ops->pre_restore() : 0;
+}
+
+/**
+ *	platform_restore_cleanup - switch the platform to the normal mode of
+ *	operation after a failing restore.  If platform_pre_restore() has been
+ *	called before the failing restore, this function must be called too,
+ *	regardless of the result of platform_pre_restore().
+ */
+
+static void platform_restore_cleanup(int platform_mode)
+{
+	if (platform_mode && hibernation_ops)
+		hibernation_ops->restore_cleanup();
+}
+
+/**
+ *	hibernation_snapshot - quiesce devices and create the hibernation
+ *	snapshot image.
+ *	@platform_mode - if set, use the platform driver, if available, to
+ *			 prepare the platform frimware for the power transition.
+ *
+ *	Must be called with pm_mutex held
+ */
+
+int hibernation_snapshot(int platform_mode)
+{
+	int error;
+
+	/* Free memory before shutting down devices. */
+	error = swsusp_shrink_memory();
+	if (error)
+		return error;
+
+	suspend_console();
+	error = device_suspend(PMSG_FREEZE);
+	if (error)
+		goto Resume_console;
+
+	error = platform_prepare(platform_mode);
+	if (error)
+		goto Resume_devices;
+
+	error = disable_nonboot_cpus();
+	if (!error) {
+		if (hibernation_mode != HIBERNATION_TEST) {
+			in_suspend = 1;
+			error = swsusp_suspend();
+			/* Control returns here after successful restore */
+		} else {
+			printk("swsusp debug: Waiting for 5 seconds.\n");
+			mdelay(5000);
+		}
+	}
+	enable_nonboot_cpus();
+ Resume_devices:
+	platform_finish(platform_mode);
+	device_resume();
+ Resume_console:
+	resume_console();
+	return error;
+}
+
+/**
+ *	hibernation_restore - quiesce devices and restore the hibernation
+ *	snapshot image.  If successful, control returns in hibernation_snaphot()
+ *	@platform_mode - if set, use the platform driver, if available, to
+ *			 prepare the platform frimware for the transition.
+ *
+ *	Must be called with pm_mutex held
+ */
+
+int hibernation_restore(int platform_mode)
+{
+	int error;
+
+	pm_prepare_console();
+	suspend_console();
+	error = device_suspend(PMSG_PRETHAW);
+	if (error)
+		goto Finish;
+
+	error = platform_pre_restore(platform_mode);
+	if (!error) {
+		error = disable_nonboot_cpus();
+		if (!error)
+			error = swsusp_resume();
+		enable_nonboot_cpus();
+	}
+	platform_restore_cleanup(platform_mode);
+	device_resume();
+ Finish:
+	resume_console();
+	pm_restore_console();
+	return error;
+}
+
+/**
+ *	hibernation_platform_enter - enter the hibernation state using the
+ *	platform driver (if available)
+ */
+
+int hibernation_platform_enter(void)
+{
+	int error;
+
+	if (hibernation_ops) {
+		kernel_shutdown_prepare(SYSTEM_SUSPEND_DISK);
+		/*
+		 * We have cancelled the power transition by running
+		 * hibernation_ops->finish() before saving the image, so we
+		 * should let the firmware know that we're going to enter the
+		 * sleep state after all
+		 */
+		error = hibernation_ops->prepare();
+		if (!error)
+			error = hibernation_ops->enter();
+	} else {
+		error = -ENOSYS;
+	}
+	return error;
+}
+
+/**
  *	power_down - Shut the machine down for hibernation.
  *
  *	Use the platform driver, if configured so; otherwise try
@@ -111,11 +244,7 @@
 		kernel_restart(NULL);
 		break;
 	case HIBERNATION_PLATFORM:
-		if (hibernation_ops) {
-			kernel_shutdown_prepare(SYSTEM_SUSPEND_DISK);
-			hibernation_ops->enter();
-			break;
-		}
+		hibernation_platform_enter();
 	}
 	kernel_halt();
 	/*
@@ -152,9 +281,16 @@
 {
 	int error;
 
+	mutex_lock(&pm_mutex);
 	/* The snapshot device should not be opened while we're running */
-	if (!atomic_add_unless(&snapshot_device_available, -1, 0))
-		return -EBUSY;
+	if (!atomic_add_unless(&snapshot_device_available, -1, 0)) {
+		error = -EBUSY;
+		goto Unlock;
+	}
+
+	error = pm_notifier_call_chain(PM_HIBERNATION_PREPARE);
+	if (error)
+		goto Exit;
 
 	/* Allocate memory management structures */
 	error = create_basic_memory_bitmaps();
@@ -165,75 +301,35 @@
 	if (error)
 		goto Finish;
 
-	mutex_lock(&pm_mutex);
 	if (hibernation_mode == HIBERNATION_TESTPROC) {
 		printk("swsusp debug: Waiting for 5 seconds.\n");
 		mdelay(5000);
 		goto Thaw;
 	}
+	error = hibernation_snapshot(hibernation_mode == HIBERNATION_PLATFORM);
+	if (in_suspend && !error) {
+		unsigned int flags = 0;
 
-	/* Free memory before shutting down devices. */
-	error = swsusp_shrink_memory();
-	if (error)
-		goto Thaw;
-
-	error = platform_prepare();
-	if (error)
-		goto Thaw;
-
-	suspend_console();
-	error = device_suspend(PMSG_FREEZE);
-	if (error) {
-		printk(KERN_ERR "PM: Some devices failed to suspend\n");
-		goto Resume_devices;
-	}
-	error = disable_nonboot_cpus();
-	if (error)
-		goto Enable_cpus;
-
-	if (hibernation_mode == HIBERNATION_TEST) {
-		printk("swsusp debug: Waiting for 5 seconds.\n");
-		mdelay(5000);
-		goto Enable_cpus;
-	}
-
-	pr_debug("PM: snapshotting memory.\n");
-	in_suspend = 1;
-	error = swsusp_suspend();
-	if (error)
-		goto Enable_cpus;
-
-	if (in_suspend) {
-		enable_nonboot_cpus();
-		platform_finish();
-		device_resume();
-		resume_console();
+		if (hibernation_mode == HIBERNATION_PLATFORM)
+			flags |= SF_PLATFORM_MODE;
 		pr_debug("PM: writing image.\n");
-		error = swsusp_write();
+		error = swsusp_write(flags);
+		swsusp_free();
 		if (!error)
 			power_down();
-		else {
-			swsusp_free();
-			goto Thaw;
-		}
 	} else {
 		pr_debug("PM: Image restored successfully.\n");
+		swsusp_free();
 	}
-
-	swsusp_free();
- Enable_cpus:
-	enable_nonboot_cpus();
- Resume_devices:
-	platform_finish();
-	device_resume();
-	resume_console();
  Thaw:
-	mutex_unlock(&pm_mutex);
 	unprepare_processes();
  Finish:
 	free_basic_memory_bitmaps();
  Exit:
+	pm_notifier_call_chain(PM_POST_HIBERNATION);
 	atomic_inc(&snapshot_device_available);
+ Unlock:
+	mutex_unlock(&pm_mutex);
 	return error;
 }
 
@@ -253,6 +349,7 @@
 static int software_resume(void)
 {
 	int error;
+	unsigned int flags;
 
 	mutex_lock(&pm_mutex);
 	if (!swsusp_resume_device) {
@@ -300,30 +397,12 @@
 
 	pr_debug("PM: Reading swsusp image.\n");
 
-	error = swsusp_read();
-	if (error) {
-		swsusp_free();
-		goto Thaw;
-	}
-
-	pr_debug("PM: Preparing devices for restore.\n");
-
-	suspend_console();
-	error = device_suspend(PMSG_PRETHAW);
-	if (error)
-		goto Free;
-
-	error = disable_nonboot_cpus();
+	error = swsusp_read(&flags);
 	if (!error)
-		swsusp_resume();
+		hibernation_restore(flags & SF_PLATFORM_MODE);
 
-	enable_nonboot_cpus();
- Free:
-	swsusp_free();
-	device_resume();
-	resume_console();
- Thaw:
 	printk(KERN_ERR "PM: Restore failed, recovering.\n");
+	swsusp_free();
 	unprepare_processes();
  Done:
 	free_basic_memory_bitmaps();
@@ -333,7 +412,7 @@
  Unlock:
 	mutex_unlock(&pm_mutex);
 	pr_debug("PM: Resume from disk failed.\n");
-	return 0;
+	return error;
 }
 
 late_initcall(software_resume);
diff --git a/kernel/power/main.c b/kernel/power/main.c
index fc45ed2..32147b5 100644
--- a/kernel/power/main.c
+++ b/kernel/power/main.c
@@ -23,6 +23,8 @@
 
 #include "power.h"
 
+BLOCKING_NOTIFIER_HEAD(pm_chain_head);
+
 /*This is just an arbitrary number */
 #define FREE_PAGE_NUMBER (100)
 
@@ -63,14 +65,11 @@
 
 /**
  *	suspend_prepare - Do prep work before entering low-power state.
- *	@state:		State we're entering.
  *
- *	This is common code that is called for each state that we're 
- *	entering. Allocate a console, stop all processes, then make sure
- *	the platform can enter the requested state.
+ *	This is common code that is called for each state that we're entering.
+ *	Run suspend notifiers, allocate a console and stop all processes.
  */
-
-static int suspend_prepare(suspend_state_t state)
+static int suspend_prepare(void)
 {
 	int error;
 	unsigned int free_pages;
@@ -78,6 +77,10 @@
 	if (!pm_ops || !pm_ops->enter)
 		return -EPERM;
 
+	error = pm_notifier_call_chain(PM_SUSPEND_PREPARE);
+	if (error)
+		goto Finish;
+
 	pm_prepare_console();
 
 	if (freeze_processes()) {
@@ -85,46 +88,23 @@
 		goto Thaw;
 	}
 
-	if ((free_pages = global_page_state(NR_FREE_PAGES))
-			< FREE_PAGE_NUMBER) {
+	free_pages = global_page_state(NR_FREE_PAGES);
+	if (free_pages < FREE_PAGE_NUMBER) {
 		pr_debug("PM: free some memory\n");
 		shrink_all_memory(FREE_PAGE_NUMBER - free_pages);
 		if (nr_free_pages() < FREE_PAGE_NUMBER) {
 			error = -ENOMEM;
 			printk(KERN_ERR "PM: No enough memory\n");
-			goto Thaw;
 		}
 	}
-
-	if (pm_ops->set_target) {
-		error = pm_ops->set_target(state);
-		if (error)
-			goto Thaw;
-	}
-	suspend_console();
-	error = device_suspend(PMSG_SUSPEND);
-	if (error) {
-		printk(KERN_ERR "Some devices failed to suspend\n");
-		goto Resume_console;
-	}
-	if (pm_ops->prepare) {
-		if ((error = pm_ops->prepare(state)))
-			goto Resume_devices;
-	}
-
-	error = disable_nonboot_cpus();
 	if (!error)
 		return 0;
 
-	enable_nonboot_cpus();
-	pm_finish(state);
- Resume_devices:
-	device_resume();
- Resume_console:
-	resume_console();
  Thaw:
 	thaw_processes();
 	pm_restore_console();
+ Finish:
+	pm_notifier_call_chain(PM_POST_SUSPEND);
 	return error;
 }
 
@@ -140,6 +120,12 @@
 	local_irq_enable();
 }
 
+/**
+ *	suspend_enter - enter the desired system sleep state.
+ *	@state:		state to enter
+ *
+ *	This function should be called after devices have been suspended.
+ */
 int suspend_enter(suspend_state_t state)
 {
 	int error = 0;
@@ -159,23 +145,58 @@
 	return error;
 }
 
+/**
+ *	suspend_devices_and_enter - suspend devices and enter the desired system sleep
+ *			  state.
+ *	@state:		  state to enter
+ */
+int suspend_devices_and_enter(suspend_state_t state)
+{
+	int error;
+
+	if (!pm_ops)
+		return -ENOSYS;
+
+	if (pm_ops->set_target) {
+		error = pm_ops->set_target(state);
+		if (error)
+			return error;
+	}
+	suspend_console();
+	error = device_suspend(PMSG_SUSPEND);
+	if (error) {
+		printk(KERN_ERR "Some devices failed to suspend\n");
+		goto Resume_console;
+	}
+	if (pm_ops->prepare) {
+		error = pm_ops->prepare(state);
+		if (error)
+			goto Resume_devices;
+	}
+	error = disable_nonboot_cpus();
+	if (!error)
+		suspend_enter(state);
+
+	enable_nonboot_cpus();
+	pm_finish(state);
+ Resume_devices:
+	device_resume();
+ Resume_console:
+	resume_console();
+	return error;
+}
 
 /**
  *	suspend_finish - Do final work before exiting suspend sequence.
- *	@state:		State we're coming out of.
  *
  *	Call platform code to clean up, restart processes, and free the 
  *	console that we've allocated. This is not called for suspend-to-disk.
  */
-
-static void suspend_finish(suspend_state_t state)
+static void suspend_finish(void)
 {
-	enable_nonboot_cpus();
-	pm_finish(state);
-	device_resume();
-	resume_console();
 	thaw_processes();
 	pm_restore_console();
+	pm_notifier_call_chain(PM_POST_SUSPEND);
 }
 
 
@@ -207,7 +228,6 @@
  *	Then, do the setup for suspend, enter the state, and cleaup (after
  *	we've woken up).
  */
-
 static int enter_state(suspend_state_t state)
 {
 	int error;
@@ -218,14 +238,14 @@
 		return -EBUSY;
 
 	pr_debug("PM: Preparing system for %s sleep\n", pm_states[state]);
-	if ((error = suspend_prepare(state)))
+	if ((error = suspend_prepare()))
 		goto Unlock;
 
 	pr_debug("PM: Entering %s sleep\n", pm_states[state]);
-	error = suspend_enter(state);
+	error = suspend_devices_and_enter(state);
 
 	pr_debug("PM: Finishing wakeup.\n");
-	suspend_finish(state);
+	suspend_finish();
  Unlock:
 	mutex_unlock(&pm_mutex);
 	return error;
diff --git a/kernel/power/power.h b/kernel/power/power.h
index 5138148..5f24c78 100644
--- a/kernel/power/power.h
+++ b/kernel/power/power.h
@@ -25,7 +25,10 @@
  */
 #define SPARE_PAGES	((1024 * 1024) >> PAGE_SHIFT)
 
-extern struct hibernation_ops *hibernation_ops;
+/* kernel/power/disk.c */
+extern int hibernation_snapshot(int platform_mode);
+extern int hibernation_restore(int platform_mode);
+extern int hibernation_platform_enter(void);
 #endif
 
 extern int pfn_is_nosave(unsigned long);
@@ -152,16 +155,34 @@
 extern void free_all_swap_pages(int swap);
 extern int swsusp_swap_in_use(void);
 
+/*
+ * Flags that can be passed from the hibernatig hernel to the "boot" kernel in
+ * the image header.
+ */
+#define SF_PLATFORM_MODE	1
+
+/* kernel/power/disk.c */
 extern int swsusp_check(void);
 extern int swsusp_shrink_memory(void);
 extern void swsusp_free(void);
 extern int swsusp_suspend(void);
 extern int swsusp_resume(void);
-extern int swsusp_read(void);
-extern int swsusp_write(void);
+extern int swsusp_read(unsigned int *flags_p);
+extern int swsusp_write(unsigned int flags);
 extern void swsusp_close(void);
-extern int suspend_enter(suspend_state_t state);
 
 struct timeval;
+/* kernel/power/swsusp.c */
 extern void swsusp_show_speed(struct timeval *, struct timeval *,
 				unsigned int, char *);
+
+/* kernel/power/main.c */
+extern int suspend_enter(suspend_state_t state);
+extern int suspend_devices_and_enter(suspend_state_t state);
+extern struct blocking_notifier_head pm_chain_head;
+
+static inline int pm_notifier_call_chain(unsigned long val)
+{
+	return (blocking_notifier_call_chain(&pm_chain_head, val, NULL)
+			== NOTIFY_BAD) ? -EINVAL : 0;
+}
diff --git a/kernel/power/process.c b/kernel/power/process.c
index e0233d8..3434940 100644
--- a/kernel/power/process.c
+++ b/kernel/power/process.c
@@ -40,7 +40,7 @@
 		current->flags |= PF_FROZEN;
 		wmb();
 	}
-	clear_tsk_thread_flag(current, TIF_FREEZE);
+	clear_freeze_flag(current);
 }
 
 /* Refrigerator is place where frozen processes are stored :-). */
@@ -72,20 +72,19 @@
 		schedule();
 	}
 	pr_debug("%s left refrigerator\n", current->comm);
-	current->state = save;
+	__set_current_state(save);
 }
 
-static inline void freeze_process(struct task_struct *p)
+static void freeze_task(struct task_struct *p)
 {
 	unsigned long flags;
 
 	if (!freezing(p)) {
 		rmb();
 		if (!frozen(p)) {
+			set_freeze_flag(p);
 			if (p->state == TASK_STOPPED)
 				force_sig_specific(SIGSTOP, p);
-
-			freeze(p);
 			spin_lock_irqsave(&p->sighand->siglock, flags);
 			signal_wake_up(p, p->state == TASK_STOPPED);
 			spin_unlock_irqrestore(&p->sighand->siglock, flags);
@@ -99,19 +98,14 @@
 
 	if (freezing(p)) {
 		pr_debug("  clean up: %s\n", p->comm);
-		do_not_freeze(p);
+		clear_freeze_flag(p);
 		spin_lock_irqsave(&p->sighand->siglock, flags);
 		recalc_sigpending_and_wake(p);
 		spin_unlock_irqrestore(&p->sighand->siglock, flags);
 	}
 }
 
-static inline int is_user_space(struct task_struct *p)
-{
-	return p->mm && !(p->flags & PF_BORROWED_MM);
-}
-
-static unsigned int try_to_freeze_tasks(int freeze_user_space)
+static int try_to_freeze_tasks(int freeze_user_space)
 {
 	struct task_struct *g, *p;
 	unsigned long end_time;
@@ -122,26 +116,40 @@
 		todo = 0;
 		read_lock(&tasklist_lock);
 		do_each_thread(g, p) {
-			if (!freezeable(p))
+			if (frozen(p) || !freezeable(p))
 				continue;
 
-			if (frozen(p))
-				continue;
-
-			if (p->state == TASK_TRACED && frozen(p->parent)) {
-				cancel_freezing(p);
-				continue;
+			if (freeze_user_space) {
+				if (p->state == TASK_TRACED &&
+				    frozen(p->parent)) {
+					cancel_freezing(p);
+					continue;
+				}
+				/*
+				 * Kernel threads should not have TIF_FREEZE set
+				 * at this point, so we must ensure that either
+				 * p->mm is not NULL *and* PF_BORROWED_MM is
+				 * unset, or TIF_FRREZE is left unset.
+				 * The task_lock() is necessary to prevent races
+				 * with exit_mm() or use_mm()/unuse_mm() from
+				 * occuring.
+				 */
+				task_lock(p);
+				if (!p->mm || (p->flags & PF_BORROWED_MM)) {
+					task_unlock(p);
+					continue;
+				}
+				freeze_task(p);
+				task_unlock(p);
+			} else {
+				freeze_task(p);
 			}
-			if (freeze_user_space && !is_user_space(p))
-				continue;
-
-			freeze_process(p);
 			if (!freezer_should_skip(p))
 				todo++;
 		} while_each_thread(g, p);
 		read_unlock(&tasklist_lock);
 		yield();			/* Yield is okay here */
-		if (todo && time_after(jiffies, end_time))
+		if (time_after(jiffies, end_time))
 			break;
 	} while (todo);
 
@@ -152,49 +160,41 @@
 		 * but it cleans up leftover PF_FREEZE requests.
 		 */
 		printk("\n");
-		printk(KERN_ERR "Stopping %s timed out after %d seconds "
+		printk(KERN_ERR "Freezing of %s timed out after %d seconds "
 				"(%d tasks refusing to freeze):\n",
-				freeze_user_space ? "user space processes" :
-					"kernel threads",
+				freeze_user_space ? "user space " : "tasks ",
 				TIMEOUT / HZ, todo);
+		show_state();
 		read_lock(&tasklist_lock);
 		do_each_thread(g, p) {
-			if (freeze_user_space && !is_user_space(p))
-				continue;
-
 			task_lock(p);
-			if (freezeable(p) && !frozen(p) &&
-			    !freezer_should_skip(p))
+			if (freezing(p) && !freezer_should_skip(p))
 				printk(KERN_ERR " %s\n", p->comm);
-
 			cancel_freezing(p);
 			task_unlock(p);
 		} while_each_thread(g, p);
 		read_unlock(&tasklist_lock);
 	}
 
-	return todo;
+	return todo ? -EBUSY : 0;
 }
 
 /**
  *	freeze_processes - tell processes to enter the refrigerator
- *
- *	Returns 0 on success, or the number of processes that didn't freeze,
- *	although they were told to.
  */
 int freeze_processes(void)
 {
-	unsigned int nr_unfrozen;
+	int error;
 
 	printk("Stopping tasks ... ");
-	nr_unfrozen = try_to_freeze_tasks(FREEZER_USER_SPACE);
-	if (nr_unfrozen)
-		return nr_unfrozen;
+	error = try_to_freeze_tasks(FREEZER_USER_SPACE);
+	if (error)
+		return error;
 
 	sys_sync();
-	nr_unfrozen = try_to_freeze_tasks(FREEZER_KERNEL_THREADS);
-	if (nr_unfrozen)
-		return nr_unfrozen;
+	error = try_to_freeze_tasks(FREEZER_KERNEL_THREADS);
+	if (error)
+		return error;
 
 	printk("done.\n");
 	BUG_ON(in_atomic());
@@ -210,7 +210,7 @@
 		if (!freezeable(p))
 			continue;
 
-		if (is_user_space(p) == !thaw_user_space)
+		if (!p->mm == thaw_user_space)
 			continue;
 
 		thaw_process(p);
diff --git a/kernel/power/swap.c b/kernel/power/swap.c
index 8b1a1b8..917aba1 100644
--- a/kernel/power/swap.c
+++ b/kernel/power/swap.c
@@ -33,8 +33,9 @@
 #define SWSUSP_SIG	"S1SUSPEND"
 
 struct swsusp_header {
-	char reserved[PAGE_SIZE - 20 - sizeof(sector_t)];
+	char reserved[PAGE_SIZE - 20 - sizeof(sector_t) - sizeof(int)];
 	sector_t image;
+	unsigned int flags;	/* Flags to pass to the "boot" kernel */
 	char	orig_sig[10];
 	char	sig[10];
 } __attribute__((packed));
@@ -138,7 +139,7 @@
  * Saving part
  */
 
-static int mark_swapfiles(sector_t start)
+static int mark_swapfiles(sector_t start, unsigned int flags)
 {
 	int error;
 
@@ -148,6 +149,7 @@
 		memcpy(swsusp_header->orig_sig,swsusp_header->sig, 10);
 		memcpy(swsusp_header->sig,SWSUSP_SIG, 10);
 		swsusp_header->image = start;
+		swsusp_header->flags = flags;
 		error = bio_write_page(swsusp_resume_block,
 					swsusp_header, NULL);
 	} else {
@@ -369,6 +371,7 @@
 
 /**
  *	swsusp_write - Write entire image and metadata.
+ *	@flags: flags to pass to the "boot" kernel in the image header
  *
  *	It is important _NOT_ to umount filesystems at this point. We want
  *	them synced (in case something goes wrong) but we DO not want to mark
@@ -376,7 +379,7 @@
  *	correctly, we'll mark system clean, anyway.)
  */
 
-int swsusp_write(void)
+int swsusp_write(unsigned int flags)
 {
 	struct swap_map_handle handle;
 	struct snapshot_handle snapshot;
@@ -415,7 +418,7 @@
 		if (!error) {
 			flush_swap_writer(&handle);
 			printk("S");
-			error = mark_swapfiles(start);
+			error = mark_swapfiles(start, flags);
 			printk("|\n");
 		}
 	}
@@ -540,13 +543,20 @@
 	return error;
 }
 
-int swsusp_read(void)
+/**
+ *	swsusp_read - read the hibernation image.
+ *	@flags_p: flags passed by the "frozen" kernel in the image header should
+ *		  be written into this memeory location
+ */
+
+int swsusp_read(unsigned int *flags_p)
 {
 	int error;
 	struct swap_map_handle handle;
 	struct snapshot_handle snapshot;
 	struct swsusp_info *header;
 
+	*flags_p = swsusp_header->flags;
 	if (IS_ERR(resume_bdev)) {
 		pr_debug("swsusp: block device not initialised\n");
 		return PTR_ERR(resume_bdev);
diff --git a/kernel/power/user.c b/kernel/power/user.c
index d65305b..bd0723a 100644
--- a/kernel/power/user.c
+++ b/kernel/power/user.c
@@ -128,92 +128,6 @@
 	return res;
 }
 
-static inline int platform_prepare(void)
-{
-	int error = 0;
-
-	if (hibernation_ops)
-		error = hibernation_ops->prepare();
-
-	return error;
-}
-
-static inline void platform_finish(void)
-{
-	if (hibernation_ops)
-		hibernation_ops->finish();
-}
-
-static inline int snapshot_suspend(int platform_suspend)
-{
-	int error;
-
-	mutex_lock(&pm_mutex);
-	/* Free memory before shutting down devices. */
-	error = swsusp_shrink_memory();
-	if (error)
-		goto Finish;
-
-	if (platform_suspend) {
-		error = platform_prepare();
-		if (error)
-			goto Finish;
-	}
-	suspend_console();
-	error = device_suspend(PMSG_FREEZE);
-	if (error)
-		goto Resume_devices;
-
-	error = disable_nonboot_cpus();
-	if (!error) {
-		in_suspend = 1;
-		error = swsusp_suspend();
-	}
-	enable_nonboot_cpus();
- Resume_devices:
-	if (platform_suspend)
-		platform_finish();
-
-	device_resume();
-	resume_console();
- Finish:
-	mutex_unlock(&pm_mutex);
-	return error;
-}
-
-static inline int snapshot_restore(int platform_suspend)
-{
-	int error;
-
-	mutex_lock(&pm_mutex);
-	pm_prepare_console();
-	if (platform_suspend) {
-		error = platform_prepare();
-		if (error)
-			goto Finish;
-	}
-	suspend_console();
-	error = device_suspend(PMSG_PRETHAW);
-	if (error)
-		goto Resume_devices;
-
-	error = disable_nonboot_cpus();
-	if (!error)
-		error = swsusp_resume();
-
-	enable_nonboot_cpus();
- Resume_devices:
-	if (platform_suspend)
-		platform_finish();
-
-	device_resume();
-	resume_console();
- Finish:
-	pm_restore_console();
-	mutex_unlock(&pm_mutex);
-	return error;
-}
-
 static int snapshot_ioctl(struct inode *inode, struct file *filp,
                           unsigned int cmd, unsigned long arg)
 {
@@ -237,10 +151,14 @@
 		if (data->frozen)
 			break;
 		mutex_lock(&pm_mutex);
-		if (freeze_processes()) {
-			thaw_processes();
-			error = -EBUSY;
+		error = pm_notifier_call_chain(PM_HIBERNATION_PREPARE);
+		if (!error) {
+			error = freeze_processes();
+			if (error)
+				thaw_processes();
 		}
+		if (error)
+			pm_notifier_call_chain(PM_POST_HIBERNATION);
 		mutex_unlock(&pm_mutex);
 		if (!error)
 			data->frozen = 1;
@@ -251,6 +169,7 @@
 			break;
 		mutex_lock(&pm_mutex);
 		thaw_processes();
+		pm_notifier_call_chain(PM_POST_HIBERNATION);
 		mutex_unlock(&pm_mutex);
 		data->frozen = 0;
 		break;
@@ -260,7 +179,7 @@
 			error = -EPERM;
 			break;
 		}
-		error = snapshot_suspend(data->platform_suspend);
+		error = hibernation_snapshot(data->platform_suspend);
 		if (!error)
 			error = put_user(in_suspend, (unsigned int __user *)arg);
 		if (!error)
@@ -274,7 +193,7 @@
 			error = -EPERM;
 			break;
 		}
-		error = snapshot_restore(data->platform_suspend);
+		error = hibernation_restore(data->platform_suspend);
 		break;
 
 	case SNAPSHOT_FREE:
@@ -336,47 +255,19 @@
 		break;
 
 	case SNAPSHOT_S2RAM:
-		if (!pm_ops) {
-			error = -ENOSYS;
-			break;
-		}
-
 		if (!data->frozen) {
 			error = -EPERM;
 			break;
 		}
-
 		if (!mutex_trylock(&pm_mutex)) {
 			error = -EBUSY;
 			break;
 		}
-
-		if (pm_ops->prepare) {
-			error = pm_ops->prepare(PM_SUSPEND_MEM);
-			if (error)
-				goto OutS3;
-		}
-
-		/* Put devices to sleep */
-		suspend_console();
-		error = device_suspend(PMSG_SUSPEND);
-		if (error) {
-			printk(KERN_ERR "Failed to suspend some devices.\n");
-		} else {
-			error = disable_nonboot_cpus();
-			if (!error) {
-				/* Enter S3, system is already frozen */
-				suspend_enter(PM_SUSPEND_MEM);
-				enable_nonboot_cpus();
-			}
-			/* Wake up devices */
-			device_resume();
-		}
-		resume_console();
-		if (pm_ops->finish)
-			pm_ops->finish(PM_SUSPEND_MEM);
-
- OutS3:
+		/*
+		 * Tasks are frozen and the notifiers have been called with
+		 * PM_HIBERNATION_PREPARE
+		 */
+		error = suspend_devices_and_enter(PM_SUSPEND_MEM);
 		mutex_unlock(&pm_mutex);
 		break;
 
@@ -386,19 +277,14 @@
 		switch (arg) {
 
 		case PMOPS_PREPARE:
-			if (hibernation_ops) {
-				data->platform_suspend = 1;
-				error = 0;
-			} else {
-				error = -ENOSYS;
-			}
+			data->platform_suspend = 1;
+			error = 0;
 			break;
 
 		case PMOPS_ENTER:
-			if (data->platform_suspend) {
-				kernel_shutdown_prepare(SYSTEM_SUSPEND_DISK);
-				error = hibernation_ops->enter();
-			}
+			if (data->platform_suspend)
+				error = hibernation_platform_enter();
+
 			break;
 
 		case PMOPS_FINISH:
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index b1d11f1..82a558b 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -142,7 +142,7 @@
 		return -EPERM;
 	smp_rmb();
 	if (task->mm)
-		dumpable = task->mm->dumpable;
+		dumpable = get_dumpable(task->mm);
 	if (!dumpable && !capable(CAP_SYS_PTRACE))
 		return -EPERM;
 
@@ -490,3 +490,22 @@
 	return ret;
 }
 #endif /* __ARCH_SYS_PTRACE */
+
+int generic_ptrace_peekdata(struct task_struct *tsk, long addr, long data)
+{
+	unsigned long tmp;
+	int copied;
+
+	copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
+	if (copied != sizeof(tmp))
+		return -EIO;
+	return put_user(tmp, (unsigned long __user *)data);
+}
+
+int generic_ptrace_pokedata(struct task_struct *tsk, long addr, long data)
+{
+	int copied;
+
+	copied = access_process_vm(tsk, addr, &data, sizeof(data), 1);
+	return (copied == sizeof(data)) ? 0 : -EIO;
+}
diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c
index 55ba82a..ddff332 100644
--- a/kernel/rcutorture.c
+++ b/kernel/rcutorture.c
@@ -40,6 +40,7 @@
 #include <linux/moduleparam.h>
 #include <linux/percpu.h>
 #include <linux/notifier.h>
+#include <linux/freezer.h>
 #include <linux/cpu.h>
 #include <linux/random.h>
 #include <linux/delay.h>
@@ -518,7 +519,6 @@
 
 	VERBOSE_PRINTK_STRING("rcu_torture_writer task started");
 	set_user_nice(current, 19);
-	current->flags |= PF_NOFREEZE;
 
 	do {
 		schedule_timeout_uninterruptible(1);
@@ -558,7 +558,6 @@
 
 	VERBOSE_PRINTK_STRING("rcu_torture_fakewriter task started");
 	set_user_nice(current, 19);
-	current->flags |= PF_NOFREEZE;
 
 	do {
 		schedule_timeout_uninterruptible(1 + rcu_random(&rand)%10);
@@ -589,7 +588,6 @@
 
 	VERBOSE_PRINTK_STRING("rcu_torture_reader task started");
 	set_user_nice(current, 19);
-	current->flags |= PF_NOFREEZE;
 
 	do {
 		idx = cur_ops->readlock();
diff --git a/kernel/relay.c b/kernel/relay.c
index a615a8f..510fbbd 100644
--- a/kernel/relay.c
+++ b/kernel/relay.c
@@ -80,7 +80,7 @@
  *
  *	Caller should already have grabbed mmap_sem.
  */
-int relay_mmap_buf(struct rchan_buf *buf, struct vm_area_struct *vma)
+static int relay_mmap_buf(struct rchan_buf *buf, struct vm_area_struct *vma)
 {
 	unsigned long length = vma->vm_end - vma->vm_start;
 	struct file *filp = vma->vm_file;
@@ -145,7 +145,7 @@
  *
  *	Returns channel buffer if successful, %NULL otherwise.
  */
-struct rchan_buf *relay_create_buf(struct rchan *chan)
+static struct rchan_buf *relay_create_buf(struct rchan *chan)
 {
 	struct rchan_buf *buf = kzalloc(sizeof(struct rchan_buf), GFP_KERNEL);
 	if (!buf)
@@ -175,7 +175,7 @@
  *
  *	Should only be called from kref_put().
  */
-void relay_destroy_channel(struct kref *kref)
+static void relay_destroy_channel(struct kref *kref)
 {
 	struct rchan *chan = container_of(kref, struct rchan, kref);
 	kfree(chan);
@@ -185,7 +185,7 @@
  *	relay_destroy_buf - destroy an rchan_buf struct and associated buffer
  *	@buf: the buffer struct
  */
-void relay_destroy_buf(struct rchan_buf *buf)
+static void relay_destroy_buf(struct rchan_buf *buf)
 {
 	struct rchan *chan = buf->chan;
 	unsigned int i;
@@ -210,7 +210,7 @@
  *	rchan_buf_struct and the channel buffer.  Should only be called from
  *	kref_put().
  */
-void relay_remove_buf(struct kref *kref)
+static void relay_remove_buf(struct kref *kref)
 {
 	struct rchan_buf *buf = container_of(kref, struct rchan_buf, kref);
 	buf->chan->cb->remove_buf_file(buf->dentry);
@@ -223,11 +223,10 @@
  *
  *	Returns 1 if the buffer is empty, 0 otherwise.
  */
-int relay_buf_empty(struct rchan_buf *buf)
+static int relay_buf_empty(struct rchan_buf *buf)
 {
 	return (buf->subbufs_produced - buf->subbufs_consumed) ? 0 : 1;
 }
-EXPORT_SYMBOL_GPL(relay_buf_empty);
 
 /**
  *	relay_buf_full - boolean, is the channel buffer full?
diff --git a/kernel/rtmutex-tester.c b/kernel/rtmutex-tester.c
index 015fc63..e3055ba6 100644
--- a/kernel/rtmutex-tester.c
+++ b/kernel/rtmutex-tester.c
@@ -260,6 +260,7 @@
 	int ret;
 
 	current->flags |= PF_MUTEX_TESTER;
+	set_freezable();
 	allow_signal(SIGHUP);
 
 	for(;;) {
diff --git a/kernel/rwsem.c b/kernel/rwsem.c
index 9a87886..1ec620c0 100644
--- a/kernel/rwsem.c
+++ b/kernel/rwsem.c
@@ -20,7 +20,7 @@
 	might_sleep();
 	rwsem_acquire_read(&sem->dep_map, 0, 0, _RET_IP_);
 
-	__down_read(sem);
+	LOCK_CONTENDED(sem, __down_read_trylock, __down_read);
 }
 
 EXPORT_SYMBOL(down_read);
@@ -47,7 +47,7 @@
 	might_sleep();
 	rwsem_acquire(&sem->dep_map, 0, 0, _RET_IP_);
 
-	__down_write(sem);
+	LOCK_CONTENDED(sem, __down_write_trylock, __down_write);
 }
 
 EXPORT_SYMBOL(down_write);
@@ -111,7 +111,7 @@
 	might_sleep();
 	rwsem_acquire_read(&sem->dep_map, subclass, 0, _RET_IP_);
 
-	__down_read(sem);
+	LOCK_CONTENDED(sem, __down_read_trylock, __down_read);
 }
 
 EXPORT_SYMBOL(down_read_nested);
@@ -130,7 +130,7 @@
 	might_sleep();
 	rwsem_acquire(&sem->dep_map, subclass, 0, _RET_IP_);
 
-	__down_write_nested(sem, subclass);
+	LOCK_CONTENDED(sem, __down_write_trylock, __down_write);
 }
 
 EXPORT_SYMBOL(down_write_nested);
diff --git a/kernel/sched.c b/kernel/sched.c
index 1c80766..93cf241 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -301,7 +301,7 @@
 	struct lock_class_key rq_lock_key;
 };
 
-static DEFINE_PER_CPU(struct rq, runqueues) ____cacheline_aligned_in_smp;
+static DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
 static DEFINE_MUTEX(sched_hotcpu_mutex);
 
 static inline void check_preempt_curr(struct rq *rq, struct task_struct *p)
@@ -379,6 +379,23 @@
 #define task_rq(p)		cpu_rq(task_cpu(p))
 #define cpu_curr(cpu)		(cpu_rq(cpu)->curr)
 
+/*
+ * For kernel-internal use: high-speed (but slightly incorrect) per-cpu
+ * clock constructed from sched_clock():
+ */
+unsigned long long cpu_clock(int cpu)
+{
+	struct rq *rq = cpu_rq(cpu);
+	unsigned long long now;
+	unsigned long flags;
+
+	spin_lock_irqsave(&rq->lock, flags);
+	now = rq_clock(rq);
+	spin_unlock_irqrestore(&rq->lock, flags);
+
+	return now;
+}
+
 #ifdef CONFIG_FAIR_GROUP_SCHED
 /* Change a task's ->cfs_rq if it moves across CPUs */
 static inline void set_task_cfs_rq(struct task_struct *p)
@@ -2235,7 +2252,7 @@
 
 			rq = cpu_rq(i);
 
-			if (*sd_idle && !idle_cpu(i))
+			if (*sd_idle && rq->nr_running)
 				*sd_idle = 0;
 
 			/* Bias balancing toward cpus of our domain */
@@ -2257,9 +2274,11 @@
 		/*
 		 * First idle cpu or the first cpu(busiest) in this sched group
 		 * is eligible for doing load balancing at this and above
-		 * domains.
+		 * domains. In the newly idle case, we will allow all the cpu's
+		 * to do the newly idle load balance.
 		 */
-		if (local_group && balance_cpu != this_cpu && balance) {
+		if (idle != CPU_NEWLY_IDLE && local_group &&
+		    balance_cpu != this_cpu && balance) {
 			*balance = 0;
 			goto ret;
 		}
@@ -2677,6 +2696,7 @@
 	unsigned long imbalance;
 	int nr_moved = 0;
 	int sd_idle = 0;
+	int all_pinned = 0;
 	cpumask_t cpus = CPU_MASK_ALL;
 
 	/*
@@ -2715,10 +2735,11 @@
 		double_lock_balance(this_rq, busiest);
 		nr_moved = move_tasks(this_rq, this_cpu, busiest,
 					minus_1_or_zero(busiest->nr_running),
-					imbalance, sd, CPU_NEWLY_IDLE, NULL);
+					imbalance, sd, CPU_NEWLY_IDLE,
+					&all_pinned);
 		spin_unlock(&busiest->lock);
 
-		if (!nr_moved) {
+		if (unlikely(all_pinned)) {
 			cpu_clear(cpu_of(busiest), cpus);
 			if (!cpus_empty(cpus))
 				goto redo;
@@ -4912,8 +4933,6 @@
 		struct migration_req *req;
 		struct list_head *head;
 
-		try_to_freeze();
-
 		spin_lock_irq(&rq->lock);
 
 		if (cpu_is_offline(cpu)) {
@@ -5147,7 +5166,6 @@
 		p = kthread_create(migration_thread, hcpu, "migration/%d", cpu);
 		if (IS_ERR(p))
 			return NOTIFY_BAD;
-		p->flags |= PF_NOFREEZE;
 		kthread_bind(p, cpu);
 		/* Must be high prio: stop_machine expects to yield to it. */
 		rq = task_rq_lock(p, &flags);
diff --git a/kernel/softirq.c b/kernel/softirq.c
index 8de2677..0f546dd 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -14,6 +14,7 @@
 #include <linux/notifier.h>
 #include <linux/percpu.h>
 #include <linux/cpu.h>
+#include <linux/freezer.h>
 #include <linux/kthread.h>
 #include <linux/rcupdate.h>
 #include <linux/smp.h>
@@ -488,8 +489,6 @@
 
 static int ksoftirqd(void * __bind_cpu)
 {
-	current->flags |= PF_NOFREEZE;
-
 	set_current_state(TASK_INTERRUPTIBLE);
 
 	while (!kthread_should_stop()) {
diff --git a/kernel/softlockup.c b/kernel/softlockup.c
index 0131e29..708d488 100644
--- a/kernel/softlockup.c
+++ b/kernel/softlockup.c
@@ -10,6 +10,7 @@
 #include <linux/cpu.h>
 #include <linux/init.h>
 #include <linux/delay.h>
+#include <linux/freezer.h>
 #include <linux/kthread.h>
 #include <linux/notifier.h>
 #include <linux/module.h>
@@ -116,7 +117,6 @@
 	struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
 
 	sched_setscheduler(current, SCHED_FIFO, &param);
-	current->flags |= PF_NOFREEZE;
 
 	/* initialize timestamp */
 	touch_softlockup_watchdog();
diff --git a/kernel/spinlock.c b/kernel/spinlock.c
index 2c6c2bf..cd72424 100644
--- a/kernel/spinlock.c
+++ b/kernel/spinlock.c
@@ -72,7 +72,7 @@
 {
 	preempt_disable();
 	rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
-	_raw_read_lock(lock);
+	LOCK_CONTENDED(lock, _raw_read_trylock, _raw_read_lock);
 }
 EXPORT_SYMBOL(_read_lock);
 
@@ -88,8 +88,8 @@
 	 * _raw_spin_lock_flags() code, because lockdep assumes
 	 * that interrupts are not re-enabled during lock-acquire:
 	 */
-#ifdef CONFIG_PROVE_LOCKING
-	_raw_spin_lock(lock);
+#ifdef CONFIG_LOCKDEP
+	LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
 #else
 	_raw_spin_lock_flags(lock, &flags);
 #endif
@@ -102,7 +102,7 @@
 	local_irq_disable();
 	preempt_disable();
 	spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
-	_raw_spin_lock(lock);
+	LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
 }
 EXPORT_SYMBOL(_spin_lock_irq);
 
@@ -111,7 +111,7 @@
 	local_bh_disable();
 	preempt_disable();
 	spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
-	_raw_spin_lock(lock);
+	LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
 }
 EXPORT_SYMBOL(_spin_lock_bh);
 
@@ -122,7 +122,7 @@
 	local_irq_save(flags);
 	preempt_disable();
 	rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
-	_raw_read_lock(lock);
+	LOCK_CONTENDED(lock, _raw_read_trylock, _raw_read_lock);
 	return flags;
 }
 EXPORT_SYMBOL(_read_lock_irqsave);
@@ -132,7 +132,7 @@
 	local_irq_disable();
 	preempt_disable();
 	rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
-	_raw_read_lock(lock);
+	LOCK_CONTENDED(lock, _raw_read_trylock, _raw_read_lock);
 }
 EXPORT_SYMBOL(_read_lock_irq);
 
@@ -141,7 +141,7 @@
 	local_bh_disable();
 	preempt_disable();
 	rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
-	_raw_read_lock(lock);
+	LOCK_CONTENDED(lock, _raw_read_trylock, _raw_read_lock);
 }
 EXPORT_SYMBOL(_read_lock_bh);
 
@@ -152,7 +152,7 @@
 	local_irq_save(flags);
 	preempt_disable();
 	rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
-	_raw_write_lock(lock);
+	LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock);
 	return flags;
 }
 EXPORT_SYMBOL(_write_lock_irqsave);
@@ -162,7 +162,7 @@
 	local_irq_disable();
 	preempt_disable();
 	rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
-	_raw_write_lock(lock);
+	LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock);
 }
 EXPORT_SYMBOL(_write_lock_irq);
 
@@ -171,7 +171,7 @@
 	local_bh_disable();
 	preempt_disable();
 	rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
-	_raw_write_lock(lock);
+	LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock);
 }
 EXPORT_SYMBOL(_write_lock_bh);
 
@@ -179,7 +179,7 @@
 {
 	preempt_disable();
 	spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
-	_raw_spin_lock(lock);
+	LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
 }
 
 EXPORT_SYMBOL(_spin_lock);
@@ -188,7 +188,7 @@
 {
 	preempt_disable();
 	rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
-	_raw_write_lock(lock);
+	LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock);
 }
 
 EXPORT_SYMBOL(_write_lock);
@@ -289,7 +289,7 @@
 {
 	preempt_disable();
 	spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
-	_raw_spin_lock(lock);
+	LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
 }
 
 EXPORT_SYMBOL(_spin_lock_nested);
@@ -305,8 +305,8 @@
 	 * _raw_spin_lock_flags() code, because lockdep assumes
 	 * that interrupts are not re-enabled during lock-acquire:
 	 */
-#ifdef CONFIG_PROVE_SPIN_LOCKING
-	_raw_spin_lock(lock);
+#ifdef CONFIG_LOCKDEP
+	LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
 #else
 	_raw_spin_lock_flags(lock, &flags);
 #endif
diff --git a/kernel/sys.c b/kernel/sys.c
index 4d141ae..08562f4 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -100,6 +100,13 @@
 EXPORT_SYMBOL(cad_pid);
 
 /*
+ * If set, this is used for preparing the system to power off.
+ */
+
+void (*pm_power_off_prepare)(void);
+EXPORT_SYMBOL(pm_power_off_prepare);
+
+/*
  *	Notifier list for kernel code which wants to be called
  *	at shutdown. This is used to stop any idling DMA operations
  *	and the like. 
@@ -867,6 +874,8 @@
 void kernel_power_off(void)
 {
 	kernel_shutdown_prepare(SYSTEM_POWER_OFF);
+	if (pm_power_off_prepare)
+		pm_power_off_prepare();
 	printk(KERN_EMERG "Power down.\n");
 	machine_power_off();
 }
@@ -1027,7 +1036,7 @@
 			return -EPERM;
 	}
 	if (new_egid != old_egid) {
-		current->mm->dumpable = suid_dumpable;
+		set_dumpable(current->mm, suid_dumpable);
 		smp_wmb();
 	}
 	if (rgid != (gid_t) -1 ||
@@ -1057,13 +1066,13 @@
 
 	if (capable(CAP_SETGID)) {
 		if (old_egid != gid) {
-			current->mm->dumpable = suid_dumpable;
+			set_dumpable(current->mm, suid_dumpable);
 			smp_wmb();
 		}
 		current->gid = current->egid = current->sgid = current->fsgid = gid;
 	} else if ((gid == current->gid) || (gid == current->sgid)) {
 		if (old_egid != gid) {
-			current->mm->dumpable = suid_dumpable;
+			set_dumpable(current->mm, suid_dumpable);
 			smp_wmb();
 		}
 		current->egid = current->fsgid = gid;
@@ -1094,7 +1103,7 @@
 	switch_uid(new_user);
 
 	if (dumpclear) {
-		current->mm->dumpable = suid_dumpable;
+		set_dumpable(current->mm, suid_dumpable);
 		smp_wmb();
 	}
 	current->uid = new_ruid;
@@ -1150,7 +1159,7 @@
 		return -EAGAIN;
 
 	if (new_euid != old_euid) {
-		current->mm->dumpable = suid_dumpable;
+		set_dumpable(current->mm, suid_dumpable);
 		smp_wmb();
 	}
 	current->fsuid = current->euid = new_euid;
@@ -1200,7 +1209,7 @@
 		return -EPERM;
 
 	if (old_euid != uid) {
-		current->mm->dumpable = suid_dumpable;
+		set_dumpable(current->mm, suid_dumpable);
 		smp_wmb();
 	}
 	current->fsuid = current->euid = uid;
@@ -1245,7 +1254,7 @@
 	}
 	if (euid != (uid_t) -1) {
 		if (euid != current->euid) {
-			current->mm->dumpable = suid_dumpable;
+			set_dumpable(current->mm, suid_dumpable);
 			smp_wmb();
 		}
 		current->euid = euid;
@@ -1295,7 +1304,7 @@
 	}
 	if (egid != (gid_t) -1) {
 		if (egid != current->egid) {
-			current->mm->dumpable = suid_dumpable;
+			set_dumpable(current->mm, suid_dumpable);
 			smp_wmb();
 		}
 		current->egid = egid;
@@ -1341,7 +1350,7 @@
 	    uid == current->suid || uid == current->fsuid || 
 	    capable(CAP_SETUID)) {
 		if (uid != old_fsuid) {
-			current->mm->dumpable = suid_dumpable;
+			set_dumpable(current->mm, suid_dumpable);
 			smp_wmb();
 		}
 		current->fsuid = uid;
@@ -1370,7 +1379,7 @@
 	    gid == current->sgid || gid == current->fsgid || 
 	    capable(CAP_SETGID)) {
 		if (gid != old_fsgid) {
-			current->mm->dumpable = suid_dumpable;
+			set_dumpable(current->mm, suid_dumpable);
 			smp_wmb();
 		}
 		current->fsgid = gid;
@@ -2167,14 +2176,14 @@
 			error = put_user(current->pdeath_signal, (int __user *)arg2);
 			break;
 		case PR_GET_DUMPABLE:
-			error = current->mm->dumpable;
+			error = get_dumpable(current->mm);
 			break;
 		case PR_SET_DUMPABLE:
 			if (arg2 < 0 || arg2 > 1) {
 				error = -EINVAL;
 				break;
 			}
-			current->mm->dumpable = arg2;
+			set_dumpable(current->mm, arg2);
 			break;
 
 		case PR_SET_UNALIGN:
@@ -2286,3 +2295,61 @@
 	}
 	return err ? -EFAULT : 0;
 }
+
+char poweroff_cmd[POWEROFF_CMD_PATH_LEN] = "/sbin/poweroff";
+
+static void argv_cleanup(char **argv, char **envp)
+{
+	argv_free(argv);
+}
+
+/**
+ * orderly_poweroff - Trigger an orderly system poweroff
+ * @force: force poweroff if command execution fails
+ *
+ * This may be called from any context to trigger a system shutdown.
+ * If the orderly shutdown fails, it will force an immediate shutdown.
+ */
+int orderly_poweroff(bool force)
+{
+	int argc;
+	char **argv = argv_split(GFP_ATOMIC, poweroff_cmd, &argc);
+	static char *envp[] = {
+		"HOME=/",
+		"PATH=/sbin:/bin:/usr/sbin:/usr/bin",
+		NULL
+	};
+	int ret = -ENOMEM;
+	struct subprocess_info *info;
+
+	if (argv == NULL) {
+		printk(KERN_WARNING "%s failed to allocate memory for \"%s\"\n",
+		       __func__, poweroff_cmd);
+		goto out;
+	}
+
+	info = call_usermodehelper_setup(argv[0], argv, envp);
+	if (info == NULL) {
+		argv_free(argv);
+		goto out;
+	}
+
+	call_usermodehelper_setcleanup(info, argv_cleanup);
+
+	ret = call_usermodehelper_exec(info, UMH_NO_WAIT);
+
+  out:
+	if (ret && force) {
+		printk(KERN_WARNING "Failed to start orderly shutdown: "
+		       "forcing the issue\n");
+
+		/* I guess this should try to kick off some daemon to
+		   sync and poweroff asap.  Or not even bother syncing
+		   if we're doing an emergency shutdown? */
+		emergency_sync();
+		kernel_power_off();
+	}
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(orderly_poweroff);
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 2ce7acf..2222998 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -29,6 +29,7 @@
 #include <linux/utsname.h>
 #include <linux/capability.h>
 #include <linux/smp_lock.h>
+#include <linux/fs.h>
 #include <linux/init.h>
 #include <linux/kernel.h>
 #include <linux/kobject.h>
@@ -45,13 +46,11 @@
 #include <linux/syscalls.h>
 #include <linux/nfs_fs.h>
 #include <linux/acpi.h>
+#include <linux/reboot.h>
 
 #include <asm/uaccess.h>
 #include <asm/processor.h>
 
-extern int proc_nr_files(ctl_table *table, int write, struct file *filp,
-                     void __user *buffer, size_t *lenp, loff_t *ppos);
-
 #ifdef CONFIG_X86
 #include <asm/nmi.h>
 #include <asm/stacktrace.h>
@@ -79,6 +78,7 @@
 extern int compat_log;
 extern int maps_protect;
 extern int sysctl_stat_interval;
+extern int audit_argv_kb;
 
 /* this is needed for the proc_dointvec_minmax for [fs_]overflow UID and GID */
 static int maxolduid = 65535;
@@ -161,6 +161,8 @@
 int sysctl_legacy_va_layout;
 #endif
 
+extern int prove_locking;
+extern int lock_stat;
 
 /* The default sysctl tables: */
 
@@ -282,6 +284,26 @@
 		.mode		= 0644,
 		.proc_handler	= &proc_dointvec,
 	},
+#ifdef CONFIG_PROVE_LOCKING
+	{
+		.ctl_name	= CTL_UNNUMBERED,
+		.procname	= "prove_locking",
+		.data		= &prove_locking,
+		.maxlen		= sizeof(int),
+		.mode		= 0644,
+		.proc_handler	= &proc_dointvec,
+	},
+#endif
+#ifdef CONFIG_LOCK_STAT
+	{
+		.ctl_name	= CTL_UNNUMBERED,
+		.procname	= "lock_stat",
+		.data		= &lock_stat,
+		.maxlen		= sizeof(int),
+		.mode		= 0644,
+		.proc_handler	= &proc_dointvec,
+	},
+#endif
 	{
 		.ctl_name	= CTL_UNNUMBERED,
 		.procname	= "sched_features",
@@ -307,6 +329,16 @@
 		.mode		= 0644,
 		.proc_handler	= &proc_dointvec,
 	},
+#ifdef CONFIG_AUDITSYSCALL
+	{
+		.ctl_name	= CTL_UNNUMBERED,
+		.procname	= "audit_argv_kb",
+		.data		= &audit_argv_kb,
+		.maxlen		= sizeof(int),
+		.mode		= 0644,
+		.proc_handler	= &proc_dointvec,
+	},
+#endif
 	{
 		.ctl_name	= KERN_CORE_PATTERN,
 		.procname	= "core_pattern",
@@ -661,7 +693,7 @@
 	{
 		.ctl_name	= KERN_ACPI_VIDEO_FLAGS,
 		.procname	= "acpi_video_flags",
-		.data		= &acpi_video_flags,
+		.data		= &acpi_realmode_flags,
 		.maxlen		= sizeof (unsigned long),
 		.mode		= 0644,
 		.proc_handler	= &proc_doulongvec_minmax,
@@ -707,13 +739,26 @@
 		.proc_handler   = &proc_dointvec,
 	},
 #endif
-
+	{
+		.ctl_name	= CTL_UNNUMBERED,
+		.procname	= "poweroff_cmd",
+		.data		= &poweroff_cmd,
+		.maxlen		= POWEROFF_CMD_PATH_LEN,
+		.mode		= 0644,
+		.proc_handler	= &proc_dostring,
+		.strategy	= &sysctl_string,
+	},
+/*
+ * NOTE: do not add new entries to this table unless you have read
+ * Documentation/sysctl/ctl_unnumbered.txt
+ */
 	{ .ctl_name = 0 }
 };
 
 /* Constants for minimum and maximum testing in vm_table.
    We use these as one-element integer vectors. */
 static int zero;
+static int two = 2;
 static int one_hundred = 100;
 
 
@@ -826,6 +871,14 @@
 		.mode		= 0644,
 		.proc_handler	= &proc_dointvec,
 	 },
+	 {
+		.ctl_name	= CTL_UNNUMBERED,
+		.procname	= "hugepages_treat_as_movable",
+		.data		= &hugepages_treat_as_movable,
+		.maxlen		= sizeof(int),
+		.mode		= 0644,
+		.proc_handler	= &hugetlb_treat_movable_handler,
+	},
 #endif
 	{
 		.ctl_name	= VM_LOWMEM_RESERVE_RATIO,
@@ -1096,7 +1149,10 @@
 		.data		= &lease_break_time,
 		.maxlen		= sizeof(int),
 		.mode		= 0644,
-		.proc_handler	= &proc_dointvec,
+		.proc_handler	= &proc_dointvec_minmax,
+		.strategy	= &sysctl_intvec,
+		.extra1		= &zero,
+		.extra2		= &two,
 	},
 	{
 		.ctl_name	= FS_AIO_NR,
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index 728cedf..8969877 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -401,7 +401,7 @@
  * this is optimized for the most common adjustments of -1,0,1,
  * for other values we can do a bit more work.
  */
-static void clocksource_adjust(struct clocksource *clock, s64 offset)
+static void clocksource_adjust(s64 offset)
 {
 	s64 error, interval = clock->cycle_interval;
 	int adj;
@@ -476,7 +476,7 @@
 	}
 
 	/* correct the clock when NTP error is too big */
-	clocksource_adjust(clock, offset);
+	clocksource_adjust(offset);
 
 	/* store full nanoseconds into xtime */
 	xtime.tv_nsec = (s64)clock->xtime_nsec >> clock->shift;
diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c
index 8bbcfb7..e5edc3a 100644
--- a/kernel/time/timer_list.c
+++ b/kernel/time/timer_list.c
@@ -38,7 +38,7 @@
 
 static void print_name_offset(struct seq_file *m, void *sym)
 {
-	char symname[KSYM_NAME_LEN+1];
+	char symname[KSYM_NAME_LEN];
 
 	if (lookup_symbol_name((unsigned long)sym, symname) < 0)
 		SEQ_printf(m, "<%p>", sym);
diff --git a/kernel/time/timer_stats.c b/kernel/time/timer_stats.c
index 9b8a826..8ed62fd 100644
--- a/kernel/time/timer_stats.c
+++ b/kernel/time/timer_stats.c
@@ -269,7 +269,7 @@
 
 static void print_name_offset(struct seq_file *m, unsigned long addr)
 {
-	char symname[KSYM_NAME_LEN+1];
+	char symname[KSYM_NAME_LEN];
 
 	if (lookup_symbol_name(addr, symname) < 0)
 		seq_printf(m, "<%p>", (void *)addr);
diff --git a/kernel/timer.c b/kernel/timer.c
index 1258371..d1e8b97 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -103,14 +103,14 @@
 static inline void timer_set_deferrable(struct timer_list *timer)
 {
 	timer->base = ((tvec_base_t *)((unsigned long)(timer->base) |
-	                               TBASE_DEFERRABLE_FLAG));
+				       TBASE_DEFERRABLE_FLAG));
 }
 
 static inline void
 timer_set_base(struct timer_list *timer, tvec_base_t *new_base)
 {
 	timer->base = (tvec_base_t *)((unsigned long)(new_base) |
-	                              tbase_get_deferrable(timer->base));
+				      tbase_get_deferrable(timer->base));
 }
 
 /**
@@ -445,10 +445,10 @@
 void add_timer_on(struct timer_list *timer, int cpu)
 {
 	tvec_base_t *base = per_cpu(tvec_bases, cpu);
-  	unsigned long flags;
+	unsigned long flags;
 
 	timer_stats_timer_set_start_info(timer);
-  	BUG_ON(timer_pending(timer) || !timer->function);
+	BUG_ON(timer_pending(timer) || !timer->function);
 	spin_lock_irqsave(&base->lock, flags);
 	timer_set_base(timer, base);
 	internal_add_timer(base, timer);
@@ -627,7 +627,7 @@
 	while (time_after_eq(jiffies, base->timer_jiffies)) {
 		struct list_head work_list;
 		struct list_head *head = &work_list;
- 		int index = base->timer_jiffies & TVR_MASK;
+		int index = base->timer_jiffies & TVR_MASK;
 
 		/*
 		 * Cascade timers:
@@ -644,8 +644,8 @@
 			unsigned long data;
 
 			timer = list_first_entry(head, struct timer_list,entry);
- 			fn = timer->function;
- 			data = timer->data;
+			fn = timer->function;
+			data = timer->data;
 
 			timer_stats_account_timer(timer);
 
@@ -689,8 +689,8 @@
 	index = slot = timer_jiffies & TVR_MASK;
 	do {
 		list_for_each_entry(nte, base->tv1.vec + slot, entry) {
- 			if (tbase_get_deferrable(nte->base))
- 				continue;
+			if (tbase_get_deferrable(nte->base))
+				continue;
 
 			found = 1;
 			expires = nte->expires;
@@ -834,7 +834,7 @@
 	if (rcu_pending(cpu))
 		rcu_check_callbacks(cpu, user_tick);
 	scheduler_tick();
- 	run_posix_cpu_timers(p);
+	run_posix_cpu_timers(p);
 }
 
 /*
@@ -909,7 +909,7 @@
 	update_wall_time();
 	calc_load(ticks);
 }
-  
+
 /*
  * The 64-bit jiffies value is not atomic - you MUST NOT read it
  * without sampling the sequence number in xtime_lock.
@@ -1105,7 +1105,7 @@
 /**
  * do_sysinfo - fill in sysinfo struct
  * @info: pointer to buffer to fill
- */ 
+ */
 int do_sysinfo(struct sysinfo *info)
 {
 	unsigned long mem_total, sav_total;
@@ -1221,7 +1221,8 @@
 			/*
 			 * The APs use this path later in boot
 			 */
-			base = kmalloc_node(sizeof(*base), GFP_KERNEL,
+			base = kmalloc_node(sizeof(*base),
+						GFP_KERNEL | __GFP_ZERO,
 						cpu_to_node(cpu));
 			if (!base)
 				return -ENOMEM;
@@ -1232,7 +1233,6 @@
 				kfree(base);
 				return -ENOMEM;
 			}
-			memset(base, 0, sizeof(*base));
 			per_cpu(tvec_bases, cpu) = base;
 		} else {
 			/*
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index d7d3fa3..58e5c15 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -282,8 +282,8 @@
 	struct cpu_workqueue_struct *cwq = __cwq;
 	DEFINE_WAIT(wait);
 
-	if (!cwq->wq->freezeable)
-		current->flags |= PF_NOFREEZE;
+	if (cwq->wq->freezeable)
+		set_freezable();
 
 	set_user_nice(current, -5);
 
@@ -752,18 +752,17 @@
 	if (cwq->thread == NULL)
 		return;
 
+	flush_cpu_workqueue(cwq);
 	/*
-	 * If the caller is CPU_DEAD the single flush_cpu_workqueue()
-	 * is not enough, a concurrent flush_workqueue() can insert a
-	 * barrier after us.
+	 * If the caller is CPU_DEAD and cwq->worklist was not empty,
+	 * a concurrent flush_workqueue() can insert a barrier after us.
+	 * However, in that case run_workqueue() won't return and check
+	 * kthread_should_stop() until it flushes all work_struct's.
 	 * When ->worklist becomes empty it is safe to exit because no
 	 * more work_structs can be queued on this cwq: flush_workqueue
 	 * checks list_empty(), and a "normal" queue_work() can't use
 	 * a dead CPU.
 	 */
-	while (flush_cpu_workqueue(cwq))
-		;
-
 	kthread_stop(cwq->thread);
 	cwq->thread = NULL;
 }
diff --git a/lib/Kconfig b/lib/Kconfig
index 3eb29d5..e5c2c51 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -41,6 +41,14 @@
 	  kernel tree does. Such modules that use library CRC32 functions
 	  require M here.
 
+config CRC7
+	tristate "CRC7 functions"
+	help
+	  This option is provided for the case where no in-kernel-tree
+	  modules require CRC7 functions, but a module built outside
+	  the kernel tree does. Such modules that use library CRC7
+	  functions require M here.
+
 config LIBCRC32C
 	tristate "CRC32c (Castagnoli, et al) Cyclic Redundancy-Check"
 	help
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 6408440..f3e0c2a 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -283,6 +283,17 @@
 	select KALLSYMS
 	select KALLSYMS_ALL
 
+config LOCK_STAT
+	bool "Lock usage statisitics"
+	depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
+	select LOCKDEP
+	select DEBUG_SPINLOCK
+	select DEBUG_MUTEXES
+	select DEBUG_LOCK_ALLOC
+	default n
+	help
+	 This feature enables tracking lock contention points
+
 config DEBUG_LOCKDEP
 	bool "Lock dependency engine debugging"
 	depends on DEBUG_KERNEL && LOCKDEP
diff --git a/lib/Makefile b/lib/Makefile
index d7a93ff..6149663 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -5,7 +5,7 @@
 lib-y := ctype.o string.o vsprintf.o cmdline.o \
 	 rbtree.o radix-tree.o dump_stack.o \
 	 idr.o int_sqrt.o bitmap.o extable.o prio_tree.o \
-	 sha1.o irq_regs.o reciprocal_div.o
+	 sha1.o irq_regs.o reciprocal_div.o argv_split.o
 
 lib-$(CONFIG_MMU) += ioremap.o
 lib-$(CONFIG_SMP) += cpumask.o
@@ -13,7 +13,7 @@
 lib-y	+= kobject.o kref.o kobject_uevent.o klist.o
 
 obj-y += div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \
-	 bust_spinlocks.o hexdump.o check_signature.o
+	 bust_spinlocks.o hexdump.o
 
 ifeq ($(CONFIG_DEBUG_KOBJECT),y)
 CFLAGS_kobject.o += -DDEBUG
@@ -21,7 +21,7 @@
 endif
 
 obj-$(CONFIG_GENERIC_IOMAP) += iomap.o
-obj-$(CONFIG_HAS_IOMEM) += iomap_copy.o devres.o
+obj-$(CONFIG_HAS_IOMEM) += iomap_copy.o devres.o check_signature.o
 obj-$(CONFIG_DEBUG_LOCKING_API_SELFTESTS) += locking-selftest.o
 obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock_debug.o
 lib-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o
@@ -43,6 +43,7 @@
 obj-$(CONFIG_CRC16)	+= crc16.o
 obj-$(CONFIG_CRC_ITU_T)	+= crc-itu-t.o
 obj-$(CONFIG_CRC32)	+= crc32.o
+obj-$(CONFIG_CRC7)	+= crc7.o
 obj-$(CONFIG_LIBCRC32C)	+= libcrc32c.o
 obj-$(CONFIG_GENERIC_ALLOCATOR) += genalloc.o
 
diff --git a/lib/argv_split.c b/lib/argv_split.c
new file mode 100644
index 0000000..4096ed4
--- /dev/null
+++ b/lib/argv_split.c
@@ -0,0 +1,105 @@
+/*
+ * Helper function for splitting a string into an argv-like array.
+ */
+
+#include <linux/kernel.h>
+#include <linux/ctype.h>
+#include <linux/bug.h>
+
+static const char *skip_sep(const char *cp)
+{
+	while (*cp && isspace(*cp))
+		cp++;
+
+	return cp;
+}
+
+static const char *skip_arg(const char *cp)
+{
+	while (*cp && !isspace(*cp))
+		cp++;
+
+	return cp;
+}
+
+static int count_argc(const char *str)
+{
+	int count = 0;
+
+	while (*str) {
+		str = skip_sep(str);
+		if (*str) {
+			count++;
+			str = skip_arg(str);
+		}
+	}
+
+	return count;
+}
+
+/**
+ * argv_free - free an argv
+ * @argv - the argument vector to be freed
+ *
+ * Frees an argv and the strings it points to.
+ */
+void argv_free(char **argv)
+{
+	char **p;
+	for (p = argv; *p; p++)
+		kfree(*p);
+
+	kfree(argv);
+}
+EXPORT_SYMBOL(argv_free);
+
+/**
+ * argv_split - split a string at whitespace, returning an argv
+ * @gfp: the GFP mask used to allocate memory
+ * @str: the string to be split
+ * @argcp: returned argument count
+ *
+ * Returns an array of pointers to strings which are split out from
+ * @str.  This is performed by strictly splitting on white-space; no
+ * quote processing is performed.  Multiple whitespace characters are
+ * considered to be a single argument separator.  The returned array
+ * is always NULL-terminated.  Returns NULL on memory allocation
+ * failure.
+ */
+char **argv_split(gfp_t gfp, const char *str, int *argcp)
+{
+	int argc = count_argc(str);
+	char **argv = kzalloc(sizeof(*argv) * (argc+1), gfp);
+	char **argvp;
+
+	if (argv == NULL)
+		goto out;
+
+	*argcp = argc;
+	argvp = argv;
+
+	while (*str) {
+		str = skip_sep(str);
+
+		if (*str) {
+			const char *p = str;
+			char *t;
+
+			str = skip_arg(str);
+
+			t = kstrndup(p, str-p, gfp);
+			if (t == NULL)
+				goto fail;
+			*argvp++ = t;
+		}
+	}
+	*argvp = NULL;
+
+  out:
+	return argv;
+
+  fail:
+	argv_free(argv);
+	return NULL;
+}
+EXPORT_SYMBOL(argv_split);
diff --git a/lib/crc7.c b/lib/crc7.c
new file mode 100644
index 0000000..f1c3a14
--- /dev/null
+++ b/lib/crc7.c
@@ -0,0 +1,68 @@
+/*
+ *      crc7.c
+ *
+ * This source code is licensed under the GNU General Public License,
+ * Version 2. See the file COPYING for more details.
+ */
+
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/crc7.h>
+
+
+/* Table for CRC-7 (polynomial x^7 + x^3 + 1) */
+const u8 crc7_syndrome_table[256] = {
+	0x00, 0x09, 0x12, 0x1b, 0x24, 0x2d, 0x36, 0x3f,
+	0x48, 0x41, 0x5a, 0x53, 0x6c, 0x65, 0x7e, 0x77,
+	0x19, 0x10, 0x0b, 0x02, 0x3d, 0x34, 0x2f, 0x26,
+	0x51, 0x58, 0x43, 0x4a, 0x75, 0x7c, 0x67, 0x6e,
+	0x32, 0x3b, 0x20, 0x29, 0x16, 0x1f, 0x04, 0x0d,
+	0x7a, 0x73, 0x68, 0x61, 0x5e, 0x57, 0x4c, 0x45,
+	0x2b, 0x22, 0x39, 0x30, 0x0f, 0x06, 0x1d, 0x14,
+	0x63, 0x6a, 0x71, 0x78, 0x47, 0x4e, 0x55, 0x5c,
+	0x64, 0x6d, 0x76, 0x7f, 0x40, 0x49, 0x52, 0x5b,
+	0x2c, 0x25, 0x3e, 0x37, 0x08, 0x01, 0x1a, 0x13,
+	0x7d, 0x74, 0x6f, 0x66, 0x59, 0x50, 0x4b, 0x42,
+	0x35, 0x3c, 0x27, 0x2e, 0x11, 0x18, 0x03, 0x0a,
+	0x56, 0x5f, 0x44, 0x4d, 0x72, 0x7b, 0x60, 0x69,
+	0x1e, 0x17, 0x0c, 0x05, 0x3a, 0x33, 0x28, 0x21,
+	0x4f, 0x46, 0x5d, 0x54, 0x6b, 0x62, 0x79, 0x70,
+	0x07, 0x0e, 0x15, 0x1c, 0x23, 0x2a, 0x31, 0x38,
+	0x41, 0x48, 0x53, 0x5a, 0x65, 0x6c, 0x77, 0x7e,
+	0x09, 0x00, 0x1b, 0x12, 0x2d, 0x24, 0x3f, 0x36,
+	0x58, 0x51, 0x4a, 0x43, 0x7c, 0x75, 0x6e, 0x67,
+	0x10, 0x19, 0x02, 0x0b, 0x34, 0x3d, 0x26, 0x2f,
+	0x73, 0x7a, 0x61, 0x68, 0x57, 0x5e, 0x45, 0x4c,
+	0x3b, 0x32, 0x29, 0x20, 0x1f, 0x16, 0x0d, 0x04,
+	0x6a, 0x63, 0x78, 0x71, 0x4e, 0x47, 0x5c, 0x55,
+	0x22, 0x2b, 0x30, 0x39, 0x06, 0x0f, 0x14, 0x1d,
+	0x25, 0x2c, 0x37, 0x3e, 0x01, 0x08, 0x13, 0x1a,
+	0x6d, 0x64, 0x7f, 0x76, 0x49, 0x40, 0x5b, 0x52,
+	0x3c, 0x35, 0x2e, 0x27, 0x18, 0x11, 0x0a, 0x03,
+	0x74, 0x7d, 0x66, 0x6f, 0x50, 0x59, 0x42, 0x4b,
+	0x17, 0x1e, 0x05, 0x0c, 0x33, 0x3a, 0x21, 0x28,
+	0x5f, 0x56, 0x4d, 0x44, 0x7b, 0x72, 0x69, 0x60,
+	0x0e, 0x07, 0x1c, 0x15, 0x2a, 0x23, 0x38, 0x31,
+	0x46, 0x4f, 0x54, 0x5d, 0x62, 0x6b, 0x70, 0x79
+};
+EXPORT_SYMBOL(crc7_syndrome_table);
+
+/**
+ * crc7 - update the CRC7 for the data buffer
+ * @crc:     previous CRC7 value
+ * @buffer:  data pointer
+ * @len:     number of bytes in the buffer
+ * Context: any
+ *
+ * Returns the updated CRC7 value.
+ */
+u8 crc7(u8 crc, const u8 *buffer, size_t len)
+{
+	while (len--)
+		crc = crc7_byte(crc, *buffer++);
+	return crc;
+}
+EXPORT_SYMBOL(crc7);
+
+MODULE_DESCRIPTION("CRC7 calculations");
+MODULE_LICENSE("GPL");
diff --git a/lib/genalloc.c b/lib/genalloc.c
index eb7c2ba..f6d276d 100644
--- a/lib/genalloc.c
+++ b/lib/genalloc.c
@@ -54,11 +54,10 @@
 	int nbytes = sizeof(struct gen_pool_chunk) +
 				(nbits + BITS_PER_BYTE - 1) / BITS_PER_BYTE;
 
-	chunk = kmalloc_node(nbytes, GFP_KERNEL, nid);
+	chunk = kmalloc_node(nbytes, GFP_KERNEL | __GFP_ZERO, nid);
 	if (unlikely(chunk == NULL))
 		return -1;
 
-	memset(chunk, 0, nbytes);
 	spin_lock_init(&chunk->lock);
 	chunk->start_addr = addr;
 	chunk->end_addr = addr + size;
diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c
index 12e311d..6a80c78 100644
--- a/lib/kobject_uevent.c
+++ b/lib/kobject_uevent.c
@@ -33,25 +33,15 @@
 static struct sock *uevent_sock;
 #endif
 
-static char *action_to_string(enum kobject_action action)
-{
-	switch (action) {
-	case KOBJ_ADD:
-		return "add";
-	case KOBJ_REMOVE:
-		return "remove";
-	case KOBJ_CHANGE:
-		return "change";
-	case KOBJ_OFFLINE:
-		return "offline";
-	case KOBJ_ONLINE:
-		return "online";
-	case KOBJ_MOVE:
-		return "move";
-	default:
-		return NULL;
-	}
-}
+/* the strings here must match the enum in include/linux/kobject.h */
+const char *kobject_actions[] = {
+	"add",
+	"remove",
+	"change",
+	"move",
+	"online",
+	"offline",
+};
 
 /**
  * kobject_uevent_env - send an uevent with environmental data
@@ -83,7 +73,7 @@
 
 	pr_debug("%s\n", __FUNCTION__);
 
-	action_string = action_to_string(action);
+	action_string = kobject_actions[action];
 	if (!action_string) {
 		pr_debug("kobject attempted to send uevent without action_string!\n");
 		return -EINVAL;
@@ -208,7 +198,7 @@
 		argv [0] = uevent_helper;
 		argv [1] = (char *)subsystem;
 		argv [2] = NULL;
-		call_usermodehelper (argv[0], argv, envp, 0);
+		call_usermodehelper (argv[0], argv, envp, UMH_WAIT_EXEC);
 	}
 
 exit:
diff --git a/mm/Kconfig b/mm/Kconfig
index 086af70..8618722 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -163,6 +163,10 @@
 	default "0" if !ZONE_DMA
 	default "1"
 
+config BOUNCE
+	def_bool y
+	depends on BLOCK && MMU && (ZONE_DMA || HIGHMEM)
+
 config NR_QUICK
 	int
 	depends on QUICKLIST
diff --git a/mm/Makefile b/mm/Makefile
index a9148ea..245e33a 100644
--- a/mm/Makefile
+++ b/mm/Makefile
@@ -13,9 +13,7 @@
 			   prio_tree.o util.o mmzone.o vmstat.o backing-dev.o \
 			   $(mmu-y)
 
-ifeq ($(CONFIG_MMU)$(CONFIG_BLOCK),yy)
-obj-y			+= bounce.o
-endif
+obj-$(CONFIG_BOUNCE)	+= bounce.o
 obj-$(CONFIG_SWAP)	+= page_io.o swap_state.o swapfile.o thrash.o
 obj-$(CONFIG_HUGETLBFS)	+= hugetlb.o
 obj-$(CONFIG_NUMA) 	+= mempolicy.o
diff --git a/mm/allocpercpu.c b/mm/allocpercpu.c
index b2486cf..00b0262 100644
--- a/mm/allocpercpu.c
+++ b/mm/allocpercpu.c
@@ -53,12 +53,9 @@
 	int node = cpu_to_node(cpu);
 
 	BUG_ON(pdata->ptrs[cpu]);
-	if (node_online(node)) {
-		/* FIXME: kzalloc_node(size, gfp, node) */
-		pdata->ptrs[cpu] = kmalloc_node(size, gfp, node);
-		if (pdata->ptrs[cpu])
-			memset(pdata->ptrs[cpu], 0, size);
-	} else
+	if (node_online(node))
+		pdata->ptrs[cpu] = kmalloc_node(size, gfp|__GFP_ZERO, node);
+	else
 		pdata->ptrs[cpu] = kzalloc(size, gfp);
 	return pdata->ptrs[cpu];
 }
diff --git a/mm/filemap.c b/mm/filemap.c
index 100b99c..49a6fe3 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -867,13 +867,11 @@
 {
 	struct inode *inode = mapping->host;
 	unsigned long index;
-	unsigned long end_index;
 	unsigned long offset;
 	unsigned long last_index;
 	unsigned long next_index;
 	unsigned long prev_index;
 	unsigned int prev_offset;
-	loff_t isize;
 	struct page *cached_page;
 	int error;
 	struct file_ra_state ra = *_ra;
@@ -886,41 +884,57 @@
 	last_index = (*ppos + desc->count + PAGE_CACHE_SIZE-1) >> PAGE_CACHE_SHIFT;
 	offset = *ppos & ~PAGE_CACHE_MASK;
 
-	isize = i_size_read(inode);
-	if (!isize)
-		goto out;
-
-	end_index = (isize - 1) >> PAGE_CACHE_SHIFT;
 	for (;;) {
 		struct page *page;
+		unsigned long end_index;
+		loff_t isize;
 		unsigned long nr, ret;
 
-		/* nr is the maximum number of bytes to copy from this page */
-		nr = PAGE_CACHE_SIZE;
-		if (index >= end_index) {
-			if (index > end_index)
-				goto out;
-			nr = ((isize - 1) & ~PAGE_CACHE_MASK) + 1;
-			if (nr <= offset) {
-				goto out;
-			}
-		}
-		nr = nr - offset;
-
 		cond_resched();
-		if (index == next_index)
-			next_index = page_cache_readahead(mapping, &ra, filp,
-					index, last_index - index);
-
 find_page:
 		page = find_get_page(mapping, index);
-		if (unlikely(page == NULL)) {
-			handle_ra_miss(mapping, &ra, index);
-			goto no_cached_page;
+		if (!page) {
+			page_cache_sync_readahead(mapping,
+					&ra, filp,
+					index, last_index - index);
+			page = find_get_page(mapping, index);
+			if (unlikely(page == NULL))
+				goto no_cached_page;
+		}
+		if (PageReadahead(page)) {
+			page_cache_async_readahead(mapping,
+					&ra, filp, page,
+					index, last_index - index);
 		}
 		if (!PageUptodate(page))
 			goto page_not_up_to_date;
 page_ok:
+		/*
+		 * i_size must be checked after we know the page is Uptodate.
+		 *
+		 * Checking i_size after the check allows us to calculate
+		 * the correct value for "nr", which means the zero-filled
+		 * part of the page is not copied back to userspace (unless
+		 * another truncate extends the file - this is desired though).
+		 */
+
+		isize = i_size_read(inode);
+		end_index = (isize - 1) >> PAGE_CACHE_SHIFT;
+		if (unlikely(!isize || index > end_index)) {
+			page_cache_release(page);
+			goto out;
+		}
+
+		/* nr is the maximum number of bytes to copy from this page */
+		nr = PAGE_CACHE_SIZE;
+		if (index == end_index) {
+			nr = ((isize - 1) & ~PAGE_CACHE_MASK) + 1;
+			if (nr <= offset) {
+				page_cache_release(page);
+				goto out;
+			}
+		}
+		nr = nr - offset;
 
 		/* If users can be writing to this page using arbitrary
 		 * virtual addresses, take care about potential aliasing
@@ -1007,31 +1021,6 @@
 			unlock_page(page);
 		}
 
-		/*
-		 * i_size must be checked after we have done ->readpage.
-		 *
-		 * Checking i_size after the readpage allows us to calculate
-		 * the correct value for "nr", which means the zero-filled
-		 * part of the page is not copied back to userspace (unless
-		 * another truncate extends the file - this is desired though).
-		 */
-		isize = i_size_read(inode);
-		end_index = (isize - 1) >> PAGE_CACHE_SHIFT;
-		if (unlikely(!isize || index > end_index)) {
-			page_cache_release(page);
-			goto out;
-		}
-
-		/* nr is the maximum number of bytes to copy from this page */
-		nr = PAGE_CACHE_SIZE;
-		if (index == end_index) {
-			nr = ((isize - 1) & ~PAGE_CACHE_MASK) + 1;
-			if (nr <= offset) {
-				page_cache_release(page);
-				goto out;
-			}
-		}
-		nr = nr - offset;
 		goto page_ok;
 
 readpage_error:
@@ -1067,6 +1056,7 @@
 
 out:
 	*_ra = ra;
+	_ra->prev_index = prev_index;
 
 	*ppos = ((loff_t) index << PAGE_CACHE_SHIFT) + offset;
 	if (cached_page)
@@ -1317,62 +1307,62 @@
 #define MMAP_LOTSAMISS  (100)
 
 /**
- * filemap_nopage - read in file data for page fault handling
- * @area:	the applicable vm_area
- * @address:	target address to read in
- * @type:	returned with VM_FAULT_{MINOR,MAJOR} if not %NULL
+ * filemap_fault - read in file data for page fault handling
+ * @vma:	vma in which the fault was taken
+ * @vmf:	struct vm_fault containing details of the fault
  *
- * filemap_nopage() is invoked via the vma operations vector for a
+ * filemap_fault() is invoked via the vma operations vector for a
  * mapped memory region to read in file data during a page fault.
  *
  * The goto's are kind of ugly, but this streamlines the normal case of having
  * it in the page cache, and handles the special cases reasonably without
  * having a lot of duplicated code.
  */
-struct page *filemap_nopage(struct vm_area_struct *area,
-				unsigned long address, int *type)
+int filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 {
 	int error;
-	struct file *file = area->vm_file;
+	struct file *file = vma->vm_file;
 	struct address_space *mapping = file->f_mapping;
 	struct file_ra_state *ra = &file->f_ra;
 	struct inode *inode = mapping->host;
 	struct page *page;
-	unsigned long size, pgoff;
-	int did_readaround = 0, majmin = VM_FAULT_MINOR;
+	unsigned long size;
+	int did_readaround = 0;
+	int ret = 0;
 
-	pgoff = ((address-area->vm_start) >> PAGE_CACHE_SHIFT) + area->vm_pgoff;
-
-retry_all:
 	size = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
-	if (pgoff >= size)
+	if (vmf->pgoff >= size)
 		goto outside_data_content;
 
 	/* If we don't want any read-ahead, don't bother */
-	if (VM_RandomReadHint(area))
+	if (VM_RandomReadHint(vma))
 		goto no_cached_page;
 
 	/*
-	 * The readahead code wants to be told about each and every page
-	 * so it can build and shrink its windows appropriately
-	 *
-	 * For sequential accesses, we use the generic readahead logic.
-	 */
-	if (VM_SequentialReadHint(area))
-		page_cache_readahead(mapping, ra, file, pgoff, 1);
-
-	/*
 	 * Do we have something in the page cache already?
 	 */
 retry_find:
-	page = find_get_page(mapping, pgoff);
+	page = find_lock_page(mapping, vmf->pgoff);
+	/*
+	 * For sequential accesses, we use the generic readahead logic.
+	 */
+	if (VM_SequentialReadHint(vma)) {
+		if (!page) {
+			page_cache_sync_readahead(mapping, ra, file,
+							   vmf->pgoff, 1);
+			page = find_lock_page(mapping, vmf->pgoff);
+			if (!page)
+				goto no_cached_page;
+		}
+		if (PageReadahead(page)) {
+			page_cache_async_readahead(mapping, ra, file, page,
+							   vmf->pgoff, 1);
+		}
+	}
+
 	if (!page) {
 		unsigned long ra_pages;
 
-		if (VM_SequentialReadHint(area)) {
-			handle_ra_miss(mapping, ra, pgoff);
-			goto no_cached_page;
-		}
 		ra->mmap_miss++;
 
 		/*
@@ -1387,7 +1377,7 @@
 		 * check did_readaround, as this is an inner loop.
 		 */
 		if (!did_readaround) {
-			majmin = VM_FAULT_MAJOR;
+			ret = VM_FAULT_MAJOR;
 			count_vm_event(PGMAJFAULT);
 		}
 		did_readaround = 1;
@@ -1395,11 +1385,11 @@
 		if (ra_pages) {
 			pgoff_t start = 0;
 
-			if (pgoff > ra_pages / 2)
-				start = pgoff - ra_pages / 2;
+			if (vmf->pgoff > ra_pages / 2)
+				start = vmf->pgoff - ra_pages / 2;
 			do_page_cache_readahead(mapping, file, start, ra_pages);
 		}
-		page = find_get_page(mapping, pgoff);
+		page = find_lock_page(mapping, vmf->pgoff);
 		if (!page)
 			goto no_cached_page;
 	}
@@ -1408,35 +1398,42 @@
 		ra->mmap_hit++;
 
 	/*
-	 * Ok, found a page in the page cache, now we need to check
-	 * that it's up-to-date.
+	 * We have a locked page in the page cache, now we need to check
+	 * that it's up-to-date. If not, it is going to be due to an error.
 	 */
-	if (!PageUptodate(page))
+	if (unlikely(!PageUptodate(page)))
 		goto page_not_uptodate;
 
-success:
+	/* Must recheck i_size under page lock */
+	size = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
+	if (unlikely(vmf->pgoff >= size)) {
+		unlock_page(page);
+		goto outside_data_content;
+	}
+
 	/*
 	 * Found the page and have a reference on it.
 	 */
 	mark_page_accessed(page);
-	if (type)
-		*type = majmin;
-	return page;
+	ra->prev_index = page->index;
+	vmf->page = page;
+	return ret | VM_FAULT_LOCKED;
 
 outside_data_content:
 	/*
 	 * An external ptracer can access pages that normally aren't
 	 * accessible..
 	 */
-	if (area->vm_mm == current->mm)
-		return NOPAGE_SIGBUS;
+	if (vma->vm_mm == current->mm)
+		return VM_FAULT_SIGBUS;
+
 	/* Fall through to the non-read-ahead case */
 no_cached_page:
 	/*
 	 * We're only likely to ever get here if MADV_RANDOM is in
 	 * effect.
 	 */
-	error = page_cache_read(file, pgoff);
+	error = page_cache_read(file, vmf->pgoff);
 
 	/*
 	 * The page we want has now been added to the page cache.
@@ -1452,12 +1449,13 @@
 	 * to schedule I/O.
 	 */
 	if (error == -ENOMEM)
-		return NOPAGE_OOM;
-	return NOPAGE_SIGBUS;
+		return VM_FAULT_OOM;
+	return VM_FAULT_SIGBUS;
 
 page_not_uptodate:
+	/* IO error path */
 	if (!did_readaround) {
-		majmin = VM_FAULT_MAJOR;
+		ret = VM_FAULT_MAJOR;
 		count_vm_event(PGMAJFAULT);
 	}
 
@@ -1467,217 +1465,21 @@
 	 * because there really aren't any performance issues here
 	 * and we need to check for errors.
 	 */
-	lock_page(page);
-
-	/* Somebody truncated the page on us? */
-	if (!page->mapping) {
-		unlock_page(page);
-		page_cache_release(page);
-		goto retry_all;
-	}
-
-	/* Somebody else successfully read it in? */
-	if (PageUptodate(page)) {
-		unlock_page(page);
-		goto success;
-	}
 	ClearPageError(page);
 	error = mapping->a_ops->readpage(file, page);
-	if (!error) {
-		wait_on_page_locked(page);
-		if (PageUptodate(page))
-			goto success;
-	} else if (error == AOP_TRUNCATED_PAGE) {
-		page_cache_release(page);
-		goto retry_find;
-	}
+	page_cache_release(page);
 
-	/*
-	 * Things didn't work out. Return zero to tell the
-	 * mm layer so, possibly freeing the page cache page first.
-	 */
+	if (!error || error == AOP_TRUNCATED_PAGE)
+		goto retry_find;
+
+	/* Things didn't work out. Return zero to tell the mm layer so. */
 	shrink_readahead_size_eio(file, ra);
-	page_cache_release(page);
-	return NOPAGE_SIGBUS;
+	return VM_FAULT_SIGBUS;
 }
-EXPORT_SYMBOL(filemap_nopage);
-
-static struct page * filemap_getpage(struct file *file, unsigned long pgoff,
-					int nonblock)
-{
-	struct address_space *mapping = file->f_mapping;
-	struct page *page;
-	int error;
-
-	/*
-	 * Do we have something in the page cache already?
-	 */
-retry_find:
-	page = find_get_page(mapping, pgoff);
-	if (!page) {
-		if (nonblock)
-			return NULL;
-		goto no_cached_page;
-	}
-
-	/*
-	 * Ok, found a page in the page cache, now we need to check
-	 * that it's up-to-date.
-	 */
-	if (!PageUptodate(page)) {
-		if (nonblock) {
-			page_cache_release(page);
-			return NULL;
-		}
-		goto page_not_uptodate;
-	}
-
-success:
-	/*
-	 * Found the page and have a reference on it.
-	 */
-	mark_page_accessed(page);
-	return page;
-
-no_cached_page:
-	error = page_cache_read(file, pgoff);
-
-	/*
-	 * The page we want has now been added to the page cache.
-	 * In the unlikely event that someone removed it in the
-	 * meantime, we'll just come back here and read it again.
-	 */
-	if (error >= 0)
-		goto retry_find;
-
-	/*
-	 * An error return from page_cache_read can result if the
-	 * system is low on memory, or a problem occurs while trying
-	 * to schedule I/O.
-	 */
-	return NULL;
-
-page_not_uptodate:
-	lock_page(page);
-
-	/* Did it get truncated while we waited for it? */
-	if (!page->mapping) {
-		unlock_page(page);
-		goto err;
-	}
-
-	/* Did somebody else get it up-to-date? */
-	if (PageUptodate(page)) {
-		unlock_page(page);
-		goto success;
-	}
-
-	error = mapping->a_ops->readpage(file, page);
-	if (!error) {
-		wait_on_page_locked(page);
-		if (PageUptodate(page))
-			goto success;
-	} else if (error == AOP_TRUNCATED_PAGE) {
-		page_cache_release(page);
-		goto retry_find;
-	}
-
-	/*
-	 * Umm, take care of errors if the page isn't up-to-date.
-	 * Try to re-read it _once_. We do this synchronously,
-	 * because there really aren't any performance issues here
-	 * and we need to check for errors.
-	 */
-	lock_page(page);
-
-	/* Somebody truncated the page on us? */
-	if (!page->mapping) {
-		unlock_page(page);
-		goto err;
-	}
-	/* Somebody else successfully read it in? */
-	if (PageUptodate(page)) {
-		unlock_page(page);
-		goto success;
-	}
-
-	ClearPageError(page);
-	error = mapping->a_ops->readpage(file, page);
-	if (!error) {
-		wait_on_page_locked(page);
-		if (PageUptodate(page))
-			goto success;
-	} else if (error == AOP_TRUNCATED_PAGE) {
-		page_cache_release(page);
-		goto retry_find;
-	}
-
-	/*
-	 * Things didn't work out. Return zero to tell the
-	 * mm layer so, possibly freeing the page cache page first.
-	 */
-err:
-	page_cache_release(page);
-
-	return NULL;
-}
-
-int filemap_populate(struct vm_area_struct *vma, unsigned long addr,
-		unsigned long len, pgprot_t prot, unsigned long pgoff,
-		int nonblock)
-{
-	struct file *file = vma->vm_file;
-	struct address_space *mapping = file->f_mapping;
-	struct inode *inode = mapping->host;
-	unsigned long size;
-	struct mm_struct *mm = vma->vm_mm;
-	struct page *page;
-	int err;
-
-	if (!nonblock)
-		force_page_cache_readahead(mapping, vma->vm_file,
-					pgoff, len >> PAGE_CACHE_SHIFT);
-
-repeat:
-	size = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
-	if (pgoff + (len >> PAGE_CACHE_SHIFT) > size)
-		return -EINVAL;
-
-	page = filemap_getpage(file, pgoff, nonblock);
-
-	/* XXX: This is wrong, a filesystem I/O error may have happened. Fix that as
-	 * done in shmem_populate calling shmem_getpage */
-	if (!page && !nonblock)
-		return -ENOMEM;
-
-	if (page) {
-		err = install_page(mm, vma, addr, page, prot);
-		if (err) {
-			page_cache_release(page);
-			return err;
-		}
-	} else if (vma->vm_flags & VM_NONLINEAR) {
-		/* No page was found just because we can't read it in now (being
-		 * here implies nonblock != 0), but the page may exist, so set
-		 * the PTE to fault it in later. */
-		err = install_file_pte(mm, vma, addr, pgoff, prot);
-		if (err)
-			return err;
-	}
-
-	len -= PAGE_SIZE;
-	addr += PAGE_SIZE;
-	pgoff++;
-	if (len)
-		goto repeat;
-
-	return 0;
-}
-EXPORT_SYMBOL(filemap_populate);
+EXPORT_SYMBOL(filemap_fault);
 
 struct vm_operations_struct generic_file_vm_ops = {
-	.nopage		= filemap_nopage,
-	.populate	= filemap_populate,
+	.fault		= filemap_fault,
 };
 
 /* This is used for a general mmap of a disk file */
@@ -1690,6 +1492,7 @@
 		return -ENOEXEC;
 	file_accessed(file);
 	vma->vm_ops = &generic_file_vm_ops;
+	vma->vm_flags |= VM_CAN_NONLINEAR;
 	return 0;
 }
 
diff --git a/mm/filemap_xip.c b/mm/filemap_xip.c
index 65ffc32..53ee6a2 100644
--- a/mm/filemap_xip.c
+++ b/mm/filemap_xip.c
@@ -205,62 +205,58 @@
 }
 
 /*
- * xip_nopage() is invoked via the vma operations vector for a
+ * xip_fault() is invoked via the vma operations vector for a
  * mapped memory region to read in file data during a page fault.
  *
- * This function is derived from filemap_nopage, but used for execute in place
+ * This function is derived from filemap_fault, but used for execute in place
  */
-static struct page *
-xip_file_nopage(struct vm_area_struct * area,
-		   unsigned long address,
-		   int *type)
+static int xip_file_fault(struct vm_area_struct *area, struct vm_fault *vmf)
 {
 	struct file *file = area->vm_file;
 	struct address_space *mapping = file->f_mapping;
 	struct inode *inode = mapping->host;
 	struct page *page;
-	unsigned long size, pgoff, endoff;
+	pgoff_t size;
 
-	pgoff = ((address - area->vm_start) >> PAGE_CACHE_SHIFT)
-		+ area->vm_pgoff;
-	endoff = ((area->vm_end - area->vm_start) >> PAGE_CACHE_SHIFT)
-		+ area->vm_pgoff;
+	/* XXX: are VM_FAULT_ codes OK? */
 
 	size = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
-	if (pgoff >= size)
-		return NOPAGE_SIGBUS;
+	if (vmf->pgoff >= size)
+		return VM_FAULT_SIGBUS;
 
-	page = mapping->a_ops->get_xip_page(mapping, pgoff*(PAGE_SIZE/512), 0);
+	page = mapping->a_ops->get_xip_page(mapping,
+					vmf->pgoff*(PAGE_SIZE/512), 0);
 	if (!IS_ERR(page))
 		goto out;
 	if (PTR_ERR(page) != -ENODATA)
-		return NOPAGE_SIGBUS;
+		return VM_FAULT_OOM;
 
 	/* sparse block */
 	if ((area->vm_flags & (VM_WRITE | VM_MAYWRITE)) &&
 	    (area->vm_flags & (VM_SHARED| VM_MAYSHARE)) &&
 	    (!(mapping->host->i_sb->s_flags & MS_RDONLY))) {
 		/* maybe shared writable, allocate new block */
-		page = mapping->a_ops->get_xip_page (mapping,
-			pgoff*(PAGE_SIZE/512), 1);
+		page = mapping->a_ops->get_xip_page(mapping,
+					vmf->pgoff*(PAGE_SIZE/512), 1);
 		if (IS_ERR(page))
-			return NOPAGE_SIGBUS;
+			return VM_FAULT_SIGBUS;
 		/* unmap page at pgoff from all other vmas */
-		__xip_unmap(mapping, pgoff);
+		__xip_unmap(mapping, vmf->pgoff);
 	} else {
 		/* not shared and writable, use xip_sparse_page() */
 		page = xip_sparse_page();
 		if (!page)
-			return NOPAGE_OOM;
+			return VM_FAULT_OOM;
 	}
 
 out:
 	page_cache_get(page);
-	return page;
+	vmf->page = page;
+	return 0;
 }
 
 static struct vm_operations_struct xip_file_vm_ops = {
-	.nopage         = xip_file_nopage,
+	.fault	= xip_file_fault,
 };
 
 int xip_file_mmap(struct file * file, struct vm_area_struct * vma)
@@ -269,6 +265,7 @@
 
 	file_accessed(file);
 	vma->vm_ops = &xip_file_vm_ops;
+	vma->vm_flags |= VM_CAN_NONLINEAR;
 	return 0;
 }
 EXPORT_SYMBOL_GPL(xip_file_mmap);
diff --git a/mm/fremap.c b/mm/fremap.c
index 4e3f53d..c395b1a 100644
--- a/mm/fremap.c
+++ b/mm/fremap.c
@@ -20,13 +20,14 @@
 #include <asm/cacheflush.h>
 #include <asm/tlbflush.h>
 
-static int zap_pte(struct mm_struct *mm, struct vm_area_struct *vma,
+static void zap_pte(struct mm_struct *mm, struct vm_area_struct *vma,
 			unsigned long addr, pte_t *ptep)
 {
 	pte_t pte = *ptep;
-	struct page *page = NULL;
 
 	if (pte_present(pte)) {
+		struct page *page;
+
 		flush_cache_page(vma, addr, pte_pfn(pte));
 		pte = ptep_clear_flush(vma, addr, ptep);
 		page = vm_normal_page(vma, addr, pte);
@@ -35,68 +36,21 @@
 				set_page_dirty(page);
 			page_remove_rmap(page, vma);
 			page_cache_release(page);
+			update_hiwater_rss(mm);
+			dec_mm_counter(mm, file_rss);
 		}
 	} else {
 		if (!pte_file(pte))
 			free_swap_and_cache(pte_to_swp_entry(pte));
 		pte_clear_not_present_full(mm, addr, ptep, 0);
 	}
-	return !!page;
 }
 
 /*
- * Install a file page to a given virtual memory address, release any
- * previously existing mapping.
- */
-int install_page(struct mm_struct *mm, struct vm_area_struct *vma,
-		unsigned long addr, struct page *page, pgprot_t prot)
-{
-	struct inode *inode;
-	pgoff_t size;
-	int err = -ENOMEM;
-	pte_t *pte;
-	pte_t pte_val;
-	spinlock_t *ptl;
-
-	pte = get_locked_pte(mm, addr, &ptl);
-	if (!pte)
-		goto out;
-
-	/*
-	 * This page may have been truncated. Tell the
-	 * caller about it.
-	 */
-	err = -EINVAL;
-	inode = vma->vm_file->f_mapping->host;
-	size = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
-	if (!page->mapping || page->index >= size)
-		goto unlock;
-	err = -ENOMEM;
-	if (page_mapcount(page) > INT_MAX/2)
-		goto unlock;
-
-	if (pte_none(*pte) || !zap_pte(mm, vma, addr, pte))
-		inc_mm_counter(mm, file_rss);
-
-	flush_icache_page(vma, page);
-	pte_val = mk_pte(page, prot);
-	set_pte_at(mm, addr, pte, pte_val);
-	page_add_file_rmap(page);
-	update_mmu_cache(vma, addr, pte_val);
-	lazy_mmu_prot_update(pte_val);
-	err = 0;
-unlock:
-	pte_unmap_unlock(pte, ptl);
-out:
-	return err;
-}
-EXPORT_SYMBOL(install_page);
-
-/*
  * Install a file pte to a given virtual memory address, release any
  * previously existing mapping.
  */
-int install_file_pte(struct mm_struct *mm, struct vm_area_struct *vma,
+static int install_file_pte(struct mm_struct *mm, struct vm_area_struct *vma,
 		unsigned long addr, unsigned long pgoff, pgprot_t prot)
 {
 	int err = -ENOMEM;
@@ -107,10 +61,8 @@
 	if (!pte)
 		goto out;
 
-	if (!pte_none(*pte) && zap_pte(mm, vma, addr, pte)) {
-		update_hiwater_rss(mm);
-		dec_mm_counter(mm, file_rss);
-	}
+	if (!pte_none(*pte))
+		zap_pte(mm, vma, addr, pte);
 
 	set_pte_at(mm, addr, pte, pgoff_to_pte(pgoff));
 	/*
@@ -126,6 +78,25 @@
 	return err;
 }
 
+static int populate_range(struct mm_struct *mm, struct vm_area_struct *vma,
+			unsigned long addr, unsigned long size, pgoff_t pgoff)
+{
+	int err;
+
+	do {
+		err = install_file_pte(mm, vma, addr, pgoff, vma->vm_page_prot);
+		if (err)
+			return err;
+
+		size -= PAGE_SIZE;
+		addr += PAGE_SIZE;
+		pgoff++;
+	} while (size);
+
+        return 0;
+
+}
+
 /***
  * sys_remap_file_pages - remap arbitrary pages of a shared backing store
  *                        file within an existing vma.
@@ -183,41 +154,77 @@
 	 * the single existing vma.  vm_private_data is used as a
 	 * swapout cursor in a VM_NONLINEAR vma.
 	 */
-	if (vma && (vma->vm_flags & VM_SHARED) &&
-		(!vma->vm_private_data || (vma->vm_flags & VM_NONLINEAR)) &&
-		vma->vm_ops && vma->vm_ops->populate &&
-			end > start && start >= vma->vm_start &&
-				end <= vma->vm_end) {
+	if (!vma || !(vma->vm_flags & VM_SHARED))
+		goto out;
 
-		/* Must set VM_NONLINEAR before any pages are populated. */
-		if (pgoff != linear_page_index(vma, start) &&
-		    !(vma->vm_flags & VM_NONLINEAR)) {
-			if (!has_write_lock) {
-				up_read(&mm->mmap_sem);
-				down_write(&mm->mmap_sem);
-				has_write_lock = 1;
-				goto retry;
-			}
-			mapping = vma->vm_file->f_mapping;
-			spin_lock(&mapping->i_mmap_lock);
-			flush_dcache_mmap_lock(mapping);
-			vma->vm_flags |= VM_NONLINEAR;
-			vma_prio_tree_remove(vma, &mapping->i_mmap);
-			vma_nonlinear_insert(vma, &mapping->i_mmap_nonlinear);
-			flush_dcache_mmap_unlock(mapping);
-			spin_unlock(&mapping->i_mmap_lock);
+	if (vma->vm_private_data && !(vma->vm_flags & VM_NONLINEAR))
+		goto out;
+
+	if (!vma->vm_flags & VM_CAN_NONLINEAR)
+		goto out;
+
+	if (end <= start || start < vma->vm_start || end > vma->vm_end)
+		goto out;
+
+	/* Must set VM_NONLINEAR before any pages are populated. */
+	if (!(vma->vm_flags & VM_NONLINEAR)) {
+		/* Don't need a nonlinear mapping, exit success */
+		if (pgoff == linear_page_index(vma, start)) {
+			err = 0;
+			goto out;
 		}
 
-		err = vma->vm_ops->populate(vma, start, size,
-					    vma->vm_page_prot,
-					    pgoff, flags & MAP_NONBLOCK);
-
+		if (!has_write_lock) {
+			up_read(&mm->mmap_sem);
+			down_write(&mm->mmap_sem);
+			has_write_lock = 1;
+			goto retry;
+		}
+		mapping = vma->vm_file->f_mapping;
 		/*
-		 * We can't clear VM_NONLINEAR because we'd have to do
-		 * it after ->populate completes, and that would prevent
-		 * downgrading the lock.  (Locks can't be upgraded).
+		 * page_mkclean doesn't work on nonlinear vmas, so if
+		 * dirty pages need to be accounted, emulate with linear
+		 * vmas.
 		 */
+		if (mapping_cap_account_dirty(mapping)) {
+			unsigned long addr;
+
+			flags &= MAP_NONBLOCK;
+			addr = mmap_region(vma->vm_file, start, size,
+					flags, vma->vm_flags, pgoff, 1);
+			if (IS_ERR_VALUE(addr)) {
+				err = addr;
+			} else {
+				BUG_ON(addr != start);
+				err = 0;
+			}
+			goto out;
+		}
+		spin_lock(&mapping->i_mmap_lock);
+		flush_dcache_mmap_lock(mapping);
+		vma->vm_flags |= VM_NONLINEAR;
+		vma_prio_tree_remove(vma, &mapping->i_mmap);
+		vma_nonlinear_insert(vma, &mapping->i_mmap_nonlinear);
+		flush_dcache_mmap_unlock(mapping);
+		spin_unlock(&mapping->i_mmap_lock);
 	}
+
+	err = populate_range(mm, vma, start, size, pgoff);
+	if (!err && !(flags & MAP_NONBLOCK)) {
+		if (unlikely(has_write_lock)) {
+			downgrade_write(&mm->mmap_sem);
+			has_write_lock = 0;
+		}
+		make_pages_present(start, start+size);
+	}
+
+	/*
+	 * We can't clear VM_NONLINEAR because we'd have to do
+	 * it after ->populate completes, and that would prevent
+	 * downgrading the lock.  (Locks can't be upgraded).
+	 */
+
+out:
 	if (likely(!has_write_lock))
 		up_read(&mm->mmap_sem);
 	else
diff --git a/mm/highmem.c b/mm/highmem.c
index be8f8d3..7a967bc 100644
--- a/mm/highmem.c
+++ b/mm/highmem.c
@@ -46,9 +46,14 @@
 	pg_data_t *pgdat;
 	unsigned int pages = 0;
 
-	for_each_online_pgdat(pgdat)
+	for_each_online_pgdat(pgdat) {
 		pages += zone_page_state(&pgdat->node_zones[ZONE_HIGHMEM],
 			NR_FREE_PAGES);
+		if (zone_movable_is_highmem())
+			pages += zone_page_state(
+					&pgdat->node_zones[ZONE_MOVABLE],
+					NR_FREE_PAGES);
+	}
 
 	return pages;
 }
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index acc0fb3..f127940 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -27,6 +27,9 @@
 static struct list_head hugepage_freelists[MAX_NUMNODES];
 static unsigned int nr_huge_pages_node[MAX_NUMNODES];
 static unsigned int free_huge_pages_node[MAX_NUMNODES];
+static gfp_t htlb_alloc_mask = GFP_HIGHUSER;
+unsigned long hugepages_treat_as_movable;
+
 /*
  * Protects updates to hugepage_freelists, nr_huge_pages, and free_huge_pages
  */
@@ -68,22 +71,20 @@
 {
 	int nid;
 	struct page *page = NULL;
-	struct zonelist *zonelist = huge_zonelist(vma, address);
+	struct zonelist *zonelist = huge_zonelist(vma, address,
+						htlb_alloc_mask);
 	struct zone **z;
 
 	for (z = zonelist->zones; *z; z++) {
 		nid = zone_to_nid(*z);
-		if (cpuset_zone_allowed_softwall(*z, GFP_HIGHUSER) &&
-		    !list_empty(&hugepage_freelists[nid]))
-			break;
-	}
-
-	if (*z) {
-		page = list_entry(hugepage_freelists[nid].next,
-				  struct page, lru);
-		list_del(&page->lru);
-		free_huge_pages--;
-		free_huge_pages_node[nid]--;
+		if (cpuset_zone_allowed_softwall(*z, htlb_alloc_mask) &&
+		    !list_empty(&hugepage_freelists[nid])) {
+			page = list_entry(hugepage_freelists[nid].next,
+					  struct page, lru);
+			list_del(&page->lru);
+			free_huge_pages--;
+			free_huge_pages_node[nid]--;
+		}
 	}
 	return page;
 }
@@ -103,17 +104,21 @@
 {
 	static int prev_nid;
 	struct page *page;
-	static DEFINE_SPINLOCK(nid_lock);
 	int nid;
 
-	spin_lock(&nid_lock);
+	/*
+	 * Copy static prev_nid to local nid, work on that, then copy it
+	 * back to prev_nid afterwards: otherwise there's a window in which
+	 * a racer might pass invalid nid MAX_NUMNODES to alloc_pages_node.
+	 * But we don't need to use a spin_lock here: it really doesn't
+	 * matter if occasionally a racer chooses the same nid as we do.
+	 */
 	nid = next_node(prev_nid, node_online_map);
 	if (nid == MAX_NUMNODES)
 		nid = first_node(node_online_map);
 	prev_nid = nid;
-	spin_unlock(&nid_lock);
 
-	page = alloc_pages_node(nid, GFP_HIGHUSER|__GFP_COMP|__GFP_NOWARN,
+	page = alloc_pages_node(nid, htlb_alloc_mask|__GFP_COMP|__GFP_NOWARN,
 					HUGETLB_PAGE_ORDER);
 	if (page) {
 		set_compound_page_dtor(page, free_huge_page);
@@ -203,7 +208,7 @@
 				1 << PG_dirty | 1 << PG_active | 1 << PG_reserved |
 				1 << PG_private | 1<< PG_writeback);
 	}
-	page[1].lru.next = NULL;
+	set_compound_page_dtor(page, NULL);
 	set_page_refcounted(page);
 	__free_pages(page, HUGETLB_PAGE_ORDER);
 }
@@ -263,6 +268,19 @@
 	max_huge_pages = set_max_huge_pages(max_huge_pages);
 	return 0;
 }
+
+int hugetlb_treat_movable_handler(struct ctl_table *table, int write,
+			struct file *file, void __user *buffer,
+			size_t *length, loff_t *ppos)
+{
+	proc_dointvec(table, write, file, buffer, length, ppos);
+	if (hugepages_treat_as_movable)
+		htlb_alloc_mask = GFP_HIGHUSER_MOVABLE;
+	else
+		htlb_alloc_mask = GFP_HIGHUSER;
+	return 0;
+}
+
 #endif /* CONFIG_SYSCTL */
 
 int hugetlb_report_meminfo(char *buf)
@@ -299,15 +317,14 @@
  * hugegpage VMA.  do_page_fault() is supposed to trap this, so BUG is we get
  * this far.
  */
-static struct page *hugetlb_nopage(struct vm_area_struct *vma,
-				unsigned long address, int *unused)
+static int hugetlb_vm_op_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 {
 	BUG();
-	return NULL;
+	return 0;
 }
 
 struct vm_operations_struct hugetlb_vm_ops = {
-	.nopage = hugetlb_nopage,
+	.fault = hugetlb_vm_op_fault,
 };
 
 static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page,
@@ -453,7 +470,7 @@
 	avoidcopy = (page_count(old_page) == 1);
 	if (avoidcopy) {
 		set_huge_ptep_writable(vma, address, ptep);
-		return VM_FAULT_MINOR;
+		return 0;
 	}
 
 	page_cache_get(old_page);
@@ -478,10 +495,10 @@
 	}
 	page_cache_release(new_page);
 	page_cache_release(old_page);
-	return VM_FAULT_MINOR;
+	return 0;
 }
 
-int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
+static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
 			unsigned long address, pte_t *ptep, int write_access)
 {
 	int ret = VM_FAULT_SIGBUS;
@@ -535,7 +552,7 @@
 	if (idx >= size)
 		goto backout;
 
-	ret = VM_FAULT_MINOR;
+	ret = 0;
 	if (!pte_none(*ptep))
 		goto backout;
 
@@ -586,7 +603,7 @@
 		return ret;
 	}
 
-	ret = VM_FAULT_MINOR;
+	ret = 0;
 
 	spin_lock(&mm->page_table_lock);
 	/* Check for a racing update before calling hugetlb_cow */
@@ -625,7 +642,7 @@
 			spin_unlock(&mm->page_table_lock);
 			ret = hugetlb_fault(mm, vma, vaddr, 0);
 			spin_lock(&mm->page_table_lock);
-			if (ret == VM_FAULT_MINOR)
+			if (!(ret & VM_FAULT_MAJOR))
 				continue;
 
 			remainder = 0;
diff --git a/mm/memory.c b/mm/memory.c
index b3d73bb..8aace3db 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1047,7 +1047,8 @@
 		if (pages)
 			foll_flags |= FOLL_GET;
 		if (!write && !(vma->vm_flags & VM_LOCKED) &&
-		    (!vma->vm_ops || !vma->vm_ops->nopage))
+		    (!vma->vm_ops || (!vma->vm_ops->nopage &&
+					!vma->vm_ops->fault)))
 			foll_flags |= FOLL_ANON;
 
 		do {
@@ -1067,31 +1068,30 @@
 			cond_resched();
 			while (!(page = follow_page(vma, start, foll_flags))) {
 				int ret;
-				ret = __handle_mm_fault(mm, vma, start,
+				ret = handle_mm_fault(mm, vma, start,
 						foll_flags & FOLL_WRITE);
+				if (ret & VM_FAULT_ERROR) {
+					if (ret & VM_FAULT_OOM)
+						return i ? i : -ENOMEM;
+					else if (ret & VM_FAULT_SIGBUS)
+						return i ? i : -EFAULT;
+					BUG();
+				}
+				if (ret & VM_FAULT_MAJOR)
+					tsk->maj_flt++;
+				else
+					tsk->min_flt++;
+
 				/*
-				 * The VM_FAULT_WRITE bit tells us that do_wp_page has
-				 * broken COW when necessary, even if maybe_mkwrite
-				 * decided not to set pte_write. We can thus safely do
-				 * subsequent page lookups as if they were reads.
+				 * The VM_FAULT_WRITE bit tells us that
+				 * do_wp_page has broken COW when necessary,
+				 * even if maybe_mkwrite decided not to set
+				 * pte_write. We can thus safely do subsequent
+				 * page lookups as if they were reads.
 				 */
 				if (ret & VM_FAULT_WRITE)
 					foll_flags &= ~FOLL_WRITE;
-				
-				switch (ret & ~VM_FAULT_WRITE) {
-				case VM_FAULT_MINOR:
-					tsk->min_flt++;
-					break;
-				case VM_FAULT_MAJOR:
-					tsk->maj_flt++;
-					break;
-				case VM_FAULT_SIGBUS:
-					return i ? i : -EFAULT;
-				case VM_FAULT_OOM:
-					return i ? i : -ENOMEM;
-				default:
-					BUG();
-				}
+
 				cond_resched();
 			}
 			if (pages) {
@@ -1638,7 +1638,7 @@
 {
 	struct page *old_page, *new_page;
 	pte_t entry;
-	int reuse = 0, ret = VM_FAULT_MINOR;
+	int reuse = 0, ret = 0;
 	struct page *dirty_page = NULL;
 
 	old_page = vm_normal_page(vma, address, orig_pte);
@@ -1715,11 +1715,11 @@
 	if (unlikely(anon_vma_prepare(vma)))
 		goto oom;
 	if (old_page == ZERO_PAGE(address)) {
-		new_page = alloc_zeroed_user_highpage(vma, address);
+		new_page = alloc_zeroed_user_highpage_movable(vma, address);
 		if (!new_page)
 			goto oom;
 	} else {
-		new_page = alloc_page_vma(GFP_HIGHUSER, vma, address);
+		new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
 		if (!new_page)
 			goto oom;
 		cow_user_page(new_page, old_page, address, vma);
@@ -1765,6 +1765,15 @@
 unlock:
 	pte_unmap_unlock(page_table, ptl);
 	if (dirty_page) {
+		/*
+		 * Yes, Virginia, this is actually required to prevent a race
+		 * with clear_page_dirty_for_io() from clearing the page dirty
+		 * bit after it clear all dirty ptes, but before a racing
+		 * do_wp_page installs a dirty pte.
+		 *
+		 * do_no_page is protected similarly.
+		 */
+		wait_on_page_locked(dirty_page);
 		set_page_dirty_balance(dirty_page);
 		put_page(dirty_page);
 	}
@@ -1831,6 +1840,13 @@
 	unsigned long restart_addr;
 	int need_break;
 
+	/*
+	 * files that support invalidating or truncating portions of the
+	 * file from under mmaped areas must have their ->fault function
+	 * return a locked page (and set VM_FAULT_LOCKED in the return).
+	 * This provides synchronisation against concurrent unmapping here.
+	 */
+
 again:
 	restart_addr = vma->vm_truncate_count;
 	if (is_restart_addr(restart_addr) && start_addr < restart_addr) {
@@ -1959,17 +1975,8 @@
 
 	spin_lock(&mapping->i_mmap_lock);
 
-	/* serialize i_size write against truncate_count write */
-	smp_wmb();
-	/* Protect against page faults, and endless unmapping loops */
+	/* Protect against endless unmapping loops */
 	mapping->truncate_count++;
-	/*
-	 * For archs where spin_lock has inclusive semantics like ia64
-	 * this smp_mb() will prevent to read pagetable contents
-	 * before the truncate_count increment is visible to
-	 * other cpus.
-	 */
-	smp_mb();
 	if (unlikely(is_restart_addr(mapping->truncate_count))) {
 		if (mapping->truncate_count == 0)
 			reset_vma_truncate_counts(mapping);
@@ -2008,8 +2015,18 @@
 	if (IS_SWAPFILE(inode))
 		goto out_busy;
 	i_size_write(inode, offset);
+
+	/*
+	 * unmap_mapping_range is called twice, first simply for efficiency
+	 * so that truncate_inode_pages does fewer single-page unmaps. However
+	 * after this first call, and before truncate_inode_pages finishes,
+	 * it is possible for private pages to be COWed, which remain after
+	 * truncate_inode_pages finishes, hence the second unmap_mapping_range
+	 * call must be made for correctness.
+	 */
 	unmap_mapping_range(mapping, offset + PAGE_SIZE - 1, 0, 1);
 	truncate_inode_pages(mapping, offset);
+	unmap_mapping_range(mapping, offset + PAGE_SIZE - 1, 0, 1);
 	goto out_truncate;
 
 do_expand:
@@ -2049,6 +2066,7 @@
 	down_write(&inode->i_alloc_sem);
 	unmap_mapping_range(mapping, offset, (end - offset), 1);
 	truncate_inode_pages_range(mapping, offset, end);
+	unmap_mapping_range(mapping, offset, (end - offset), 1);
 	inode->i_op->truncate_range(inode, offset, end);
 	up_write(&inode->i_alloc_sem);
 	mutex_unlock(&inode->i_mutex);
@@ -2130,7 +2148,7 @@
 	struct page *page;
 	swp_entry_t entry;
 	pte_t pte;
-	int ret = VM_FAULT_MINOR;
+	int ret = 0;
 
 	if (!pte_unmap_same(mm, pmd, page_table, orig_pte))
 		goto out;
@@ -2198,15 +2216,15 @@
 	unlock_page(page);
 
 	if (write_access) {
+		/* XXX: We could OR the do_wp_page code with this one? */
 		if (do_wp_page(mm, vma, address,
-				page_table, pmd, ptl, pte) == VM_FAULT_OOM)
+				page_table, pmd, ptl, pte) & VM_FAULT_OOM)
 			ret = VM_FAULT_OOM;
 		goto out;
 	}
 
 	/* No need to invalidate - it was non-present before */
 	update_mmu_cache(vma, address, pte);
-	lazy_mmu_prot_update(pte);
 unlock:
 	pte_unmap_unlock(page_table, ptl);
 out:
@@ -2237,7 +2255,7 @@
 
 		if (unlikely(anon_vma_prepare(vma)))
 			goto oom;
-		page = alloc_zeroed_user_highpage(vma, address);
+		page = alloc_zeroed_user_highpage_movable(vma, address);
 		if (!page)
 			goto oom;
 
@@ -2271,7 +2289,7 @@
 	lazy_mmu_prot_update(entry);
 unlock:
 	pte_unmap_unlock(page_table, ptl);
-	return VM_FAULT_MINOR;
+	return 0;
 release:
 	page_cache_release(page);
 	goto unlock;
@@ -2280,10 +2298,10 @@
 }
 
 /*
- * do_no_page() tries to create a new page mapping. It aggressively
+ * __do_fault() tries to create a new page mapping. It aggressively
  * tries to share with existing pages, but makes a separate copy if
- * the "write_access" parameter is true in order to avoid the next
- * page fault.
+ * the FAULT_FLAG_WRITE is set in the flags parameter in order to avoid
+ * the next page fault.
  *
  * As this is called only for pages that do not currently exist, we
  * do not need to flush old virtual caches or the TLB.
@@ -2292,89 +2310,100 @@
  * but allow concurrent faults), and pte mapped but not yet locked.
  * We return with mmap_sem still held, but pte unmapped and unlocked.
  */
-static int do_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
+static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
 		unsigned long address, pte_t *page_table, pmd_t *pmd,
-		int write_access)
+		pgoff_t pgoff, unsigned int flags, pte_t orig_pte)
 {
 	spinlock_t *ptl;
-	struct page *new_page;
-	struct address_space *mapping = NULL;
+	struct page *page;
 	pte_t entry;
-	unsigned int sequence = 0;
-	int ret = VM_FAULT_MINOR;
 	int anon = 0;
 	struct page *dirty_page = NULL;
+	struct vm_fault vmf;
+	int ret;
+
+	vmf.virtual_address = (void __user *)(address & PAGE_MASK);
+	vmf.pgoff = pgoff;
+	vmf.flags = flags;
+	vmf.page = NULL;
 
 	pte_unmap(page_table);
 	BUG_ON(vma->vm_flags & VM_PFNMAP);
 
-	if (vma->vm_file) {
-		mapping = vma->vm_file->f_mapping;
-		sequence = mapping->truncate_count;
-		smp_rmb(); /* serializes i_size against truncate_count */
+	if (likely(vma->vm_ops->fault)) {
+		ret = vma->vm_ops->fault(vma, &vmf);
+		if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))
+			return ret;
+	} else {
+		/* Legacy ->nopage path */
+		ret = 0;
+		vmf.page = vma->vm_ops->nopage(vma, address & PAGE_MASK, &ret);
+		/* no page was available -- either SIGBUS or OOM */
+		if (unlikely(vmf.page == NOPAGE_SIGBUS))
+			return VM_FAULT_SIGBUS;
+		else if (unlikely(vmf.page == NOPAGE_OOM))
+			return VM_FAULT_OOM;
 	}
-retry:
-	new_page = vma->vm_ops->nopage(vma, address & PAGE_MASK, &ret);
-	/*
-	 * No smp_rmb is needed here as long as there's a full
-	 * spin_lock/unlock sequence inside the ->nopage callback
-	 * (for the pagecache lookup) that acts as an implicit
-	 * smp_mb() and prevents the i_size read to happen
-	 * after the next truncate_count read.
-	 */
 
-	/* no page was available -- either SIGBUS, OOM or REFAULT */
-	if (unlikely(new_page == NOPAGE_SIGBUS))
-		return VM_FAULT_SIGBUS;
-	else if (unlikely(new_page == NOPAGE_OOM))
-		return VM_FAULT_OOM;
-	else if (unlikely(new_page == NOPAGE_REFAULT))
-		return VM_FAULT_MINOR;
+	/*
+	 * For consistency in subsequent calls, make the faulted page always
+	 * locked.
+	 */
+	if (unlikely(!(ret & VM_FAULT_LOCKED)))
+		lock_page(vmf.page);
+	else
+		VM_BUG_ON(!PageLocked(vmf.page));
 
 	/*
 	 * Should we do an early C-O-W break?
 	 */
-	if (write_access) {
+	page = vmf.page;
+	if (flags & FAULT_FLAG_WRITE) {
 		if (!(vma->vm_flags & VM_SHARED)) {
-			struct page *page;
-
-			if (unlikely(anon_vma_prepare(vma)))
-				goto oom;
-			page = alloc_page_vma(GFP_HIGHUSER, vma, address);
-			if (!page)
-				goto oom;
-			copy_user_highpage(page, new_page, address, vma);
-			page_cache_release(new_page);
-			new_page = page;
 			anon = 1;
-
+			if (unlikely(anon_vma_prepare(vma))) {
+				ret = VM_FAULT_OOM;
+				goto out;
+			}
+			page = alloc_page_vma(GFP_HIGHUSER_MOVABLE,
+						vma, address);
+			if (!page) {
+				ret = VM_FAULT_OOM;
+				goto out;
+			}
+			copy_user_highpage(page, vmf.page, address, vma);
 		} else {
-			/* if the page will be shareable, see if the backing
+			/*
+			 * If the page will be shareable, see if the backing
 			 * address space wants to know that the page is about
-			 * to become writable */
-			if (vma->vm_ops->page_mkwrite &&
-			    vma->vm_ops->page_mkwrite(vma, new_page) < 0
-			    ) {
-				page_cache_release(new_page);
-				return VM_FAULT_SIGBUS;
+			 * to become writable
+			 */
+			if (vma->vm_ops->page_mkwrite) {
+				unlock_page(page);
+				if (vma->vm_ops->page_mkwrite(vma, page) < 0) {
+					ret = VM_FAULT_SIGBUS;
+					anon = 1; /* no anon but release vmf.page */
+					goto out_unlocked;
+				}
+				lock_page(page);
+				/*
+				 * XXX: this is not quite right (racy vs
+				 * invalidate) to unlock and relock the page
+				 * like this, however a better fix requires
+				 * reworking page_mkwrite locking API, which
+				 * is better done later.
+				 */
+				if (!page->mapping) {
+					ret = 0;
+					anon = 1; /* no anon but release vmf.page */
+					goto out;
+				}
 			}
 		}
+
 	}
 
 	page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
-	/*
-	 * For a file-backed vma, someone could have truncated or otherwise
-	 * invalidated this page.  If unmap_mapping_range got called,
-	 * retry getting the page.
-	 */
-	if (mapping && unlikely(sequence != mapping->truncate_count)) {
-		pte_unmap_unlock(page_table, ptl);
-		page_cache_release(new_page);
-		cond_resched();
-		sequence = mapping->truncate_count;
-		smp_rmb();
-		goto retry;
-	}
 
 	/*
 	 * This silly early PAGE_DIRTY setting removes a race
@@ -2387,45 +2416,63 @@
 	 * handle that later.
 	 */
 	/* Only go through if we didn't race with anybody else... */
-	if (pte_none(*page_table)) {
-		flush_icache_page(vma, new_page);
-		entry = mk_pte(new_page, vma->vm_page_prot);
-		if (write_access)
+	if (likely(pte_same(*page_table, orig_pte))) {
+		flush_icache_page(vma, page);
+		entry = mk_pte(page, vma->vm_page_prot);
+		if (flags & FAULT_FLAG_WRITE)
 			entry = maybe_mkwrite(pte_mkdirty(entry), vma);
 		set_pte_at(mm, address, page_table, entry);
 		if (anon) {
-			inc_mm_counter(mm, anon_rss);
-			lru_cache_add_active(new_page);
-			page_add_new_anon_rmap(new_page, vma, address);
+                        inc_mm_counter(mm, anon_rss);
+                        lru_cache_add_active(page);
+                        page_add_new_anon_rmap(page, vma, address);
 		} else {
 			inc_mm_counter(mm, file_rss);
-			page_add_file_rmap(new_page);
-			if (write_access) {
-				dirty_page = new_page;
+			page_add_file_rmap(page);
+			if (flags & FAULT_FLAG_WRITE) {
+				dirty_page = page;
 				get_page(dirty_page);
 			}
 		}
+
+		/* no need to invalidate: a not-present page won't be cached */
+		update_mmu_cache(vma, address, entry);
+		lazy_mmu_prot_update(entry);
 	} else {
-		/* One of our sibling threads was faster, back out. */
-		page_cache_release(new_page);
-		goto unlock;
+		if (anon)
+			page_cache_release(page);
+		else
+			anon = 1; /* no anon but release faulted_page */
 	}
 
-	/* no need to invalidate: a not-present page shouldn't be cached */
-	update_mmu_cache(vma, address, entry);
-	lazy_mmu_prot_update(entry);
-unlock:
 	pte_unmap_unlock(page_table, ptl);
-	if (dirty_page) {
+
+out:
+	unlock_page(vmf.page);
+out_unlocked:
+	if (anon)
+		page_cache_release(vmf.page);
+	else if (dirty_page) {
 		set_page_dirty_balance(dirty_page);
 		put_page(dirty_page);
 	}
+
 	return ret;
-oom:
-	page_cache_release(new_page);
-	return VM_FAULT_OOM;
 }
 
+static int do_linear_fault(struct mm_struct *mm, struct vm_area_struct *vma,
+		unsigned long address, pte_t *page_table, pmd_t *pmd,
+		int write_access, pte_t orig_pte)
+{
+	pgoff_t pgoff = (((address & PAGE_MASK)
+			- vma->vm_start) >> PAGE_CACHE_SHIFT) + vma->vm_pgoff;
+	unsigned int flags = (write_access ? FAULT_FLAG_WRITE : 0);
+
+	return __do_fault(mm, vma, address, page_table, pmd, pgoff,
+							flags, orig_pte);
+}
+
+
 /*
  * do_no_pfn() tries to create a new page mapping for a page without
  * a struct_page backing it
@@ -2449,7 +2496,6 @@
 	spinlock_t *ptl;
 	pte_t entry;
 	unsigned long pfn;
-	int ret = VM_FAULT_MINOR;
 
 	pte_unmap(page_table);
 	BUG_ON(!(vma->vm_flags & VM_PFNMAP));
@@ -2461,7 +2507,7 @@
 	else if (unlikely(pfn == NOPFN_SIGBUS))
 		return VM_FAULT_SIGBUS;
 	else if (unlikely(pfn == NOPFN_REFAULT))
-		return VM_FAULT_MINOR;
+		return 0;
 
 	page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
 
@@ -2473,7 +2519,7 @@
 		set_pte_at(mm, address, page_table, entry);
 	}
 	pte_unmap_unlock(page_table, ptl);
-	return ret;
+	return 0;
 }
 
 /*
@@ -2485,33 +2531,30 @@
  * but allow concurrent faults), and pte mapped but not yet locked.
  * We return with mmap_sem still held, but pte unmapped and unlocked.
  */
-static int do_file_page(struct mm_struct *mm, struct vm_area_struct *vma,
+static int do_nonlinear_fault(struct mm_struct *mm, struct vm_area_struct *vma,
 		unsigned long address, pte_t *page_table, pmd_t *pmd,
 		int write_access, pte_t orig_pte)
 {
+	unsigned int flags = FAULT_FLAG_NONLINEAR |
+				(write_access ? FAULT_FLAG_WRITE : 0);
 	pgoff_t pgoff;
-	int err;
 
 	if (!pte_unmap_same(mm, pmd, page_table, orig_pte))
-		return VM_FAULT_MINOR;
+		return 0;
 
-	if (unlikely(!(vma->vm_flags & VM_NONLINEAR))) {
+	if (unlikely(!(vma->vm_flags & VM_NONLINEAR) ||
+			!(vma->vm_flags & VM_CAN_NONLINEAR))) {
 		/*
 		 * Page table corrupted: show pte and kill process.
 		 */
 		print_bad_pte(vma, orig_pte, address);
 		return VM_FAULT_OOM;
 	}
-	/* We can then assume vm->vm_ops && vma->vm_ops->populate */
 
 	pgoff = pte_to_pgoff(orig_pte);
-	err = vma->vm_ops->populate(vma, address & PAGE_MASK, PAGE_SIZE,
-					vma->vm_page_prot, pgoff, 0);
-	if (err == -ENOMEM)
-		return VM_FAULT_OOM;
-	if (err)
-		return VM_FAULT_SIGBUS;
-	return VM_FAULT_MAJOR;
+
+	return __do_fault(mm, vma, address, page_table, pmd, pgoff,
+							flags, orig_pte);
 }
 
 /*
@@ -2538,10 +2581,9 @@
 	if (!pte_present(entry)) {
 		if (pte_none(entry)) {
 			if (vma->vm_ops) {
-				if (vma->vm_ops->nopage)
-					return do_no_page(mm, vma, address,
-							  pte, pmd,
-							  write_access);
+				if (vma->vm_ops->fault || vma->vm_ops->nopage)
+					return do_linear_fault(mm, vma, address,
+						pte, pmd, write_access, entry);
 				if (unlikely(vma->vm_ops->nopfn))
 					return do_no_pfn(mm, vma, address, pte,
 							 pmd, write_access);
@@ -2550,7 +2592,7 @@
 						 pte, pmd, write_access);
 		}
 		if (pte_file(entry))
-			return do_file_page(mm, vma, address,
+			return do_nonlinear_fault(mm, vma, address,
 					pte, pmd, write_access, entry);
 		return do_swap_page(mm, vma, address,
 					pte, pmd, write_access, entry);
@@ -2582,13 +2624,13 @@
 	}
 unlock:
 	pte_unmap_unlock(pte, ptl);
-	return VM_FAULT_MINOR;
+	return 0;
 }
 
 /*
  * By the time we get here, we already hold the mm semaphore
  */
-int __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
+int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
 		unsigned long address, int write_access)
 {
 	pgd_t *pgd;
@@ -2617,7 +2659,7 @@
 	return handle_pte_fault(mm, vma, address, pte, pmd, write_access);
 }
 
-EXPORT_SYMBOL_GPL(__handle_mm_fault);
+EXPORT_SYMBOL_GPL(handle_mm_fault);
 
 #ifndef __PAGETABLE_PUD_FOLDED
 /*
@@ -2823,3 +2865,4 @@
 
 	return buf - old_buf;
 }
+EXPORT_SYMBOL_GPL(access_process_vm);
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 188f8d9..9f4e9b9 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -594,7 +594,7 @@
 
 static struct page *new_node_page(struct page *page, unsigned long node, int **x)
 {
-	return alloc_pages_node(node, GFP_HIGHUSER, 0);
+	return alloc_pages_node(node, GFP_HIGHUSER_MOVABLE, 0);
 }
 
 /*
@@ -710,7 +710,8 @@
 {
 	struct vm_area_struct *vma = (struct vm_area_struct *)private;
 
-	return alloc_page_vma(GFP_HIGHUSER, vma, page_address_in_vma(page, vma));
+	return alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma,
+					page_address_in_vma(page, vma));
 }
 #else
 
@@ -1202,7 +1203,8 @@
 
 #ifdef CONFIG_HUGETLBFS
 /* Return a zonelist suitable for a huge page allocation. */
-struct zonelist *huge_zonelist(struct vm_area_struct *vma, unsigned long addr)
+struct zonelist *huge_zonelist(struct vm_area_struct *vma, unsigned long addr,
+							gfp_t gfp_flags)
 {
 	struct mempolicy *pol = get_vma_policy(current, vma, addr);
 
@@ -1210,7 +1212,7 @@
 		unsigned nid;
 
 		nid = interleave_nid(pol, vma, addr, HPAGE_SHIFT);
-		return NODE_DATA(nid)->node_zonelists + gfp_zone(GFP_HIGHUSER);
+		return NODE_DATA(nid)->node_zonelists + gfp_zone(gfp_flags);
 	}
 	return zonelist_policy(GFP_HIGHUSER, pol);
 }
diff --git a/mm/mempool.c b/mm/mempool.c
index 3e8f1fe..02d5ec3 100644
--- a/mm/mempool.c
+++ b/mm/mempool.c
@@ -62,10 +62,9 @@
 			mempool_free_t *free_fn, void *pool_data, int node_id)
 {
 	mempool_t *pool;
-	pool = kmalloc_node(sizeof(*pool), GFP_KERNEL, node_id);
+	pool = kmalloc_node(sizeof(*pool), GFP_KERNEL | __GFP_ZERO, node_id);
 	if (!pool)
 		return NULL;
-	memset(pool, 0, sizeof(*pool));
 	pool->elements = kmalloc_node(min_nr * sizeof(void *),
 					GFP_KERNEL, node_id);
 	if (!pool->elements) {
diff --git a/mm/migrate.c b/mm/migrate.c
index a91ca00..34d8ada 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -761,7 +761,8 @@
 
 	*result = &pm->status;
 
-	return alloc_pages_node(pm->node, GFP_HIGHUSER | GFP_THISNODE, 0);
+	return alloc_pages_node(pm->node,
+				GFP_HIGHUSER_MOVABLE | GFP_THISNODE, 0);
 }
 
 /*
diff --git a/mm/mmap.c b/mm/mmap.c
index 144b4a2..7afc7a7 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -1165,12 +1165,8 @@
 		mm->locked_vm += len >> PAGE_SHIFT;
 		make_pages_present(addr, addr + len);
 	}
-	if (flags & MAP_POPULATE) {
-		up_write(&mm->mmap_sem);
-		sys_remap_file_pages(addr, len, 0,
-					pgoff, flags & MAP_NONBLOCK);
-		down_write(&mm->mmap_sem);
-	}
+	if ((flags & MAP_POPULATE) && !(flags & MAP_NONBLOCK))
+		make_pages_present(addr, addr + len);
 	return addr;
 
 unmap_and_free_vma:
@@ -1575,33 +1571,11 @@
 }
 #endif /* CONFIG_STACK_GROWSUP || CONFIG_IA64 */
 
-#ifdef CONFIG_STACK_GROWSUP
-int expand_stack(struct vm_area_struct *vma, unsigned long address)
-{
-	return expand_upwards(vma, address);
-}
-
-struct vm_area_struct *
-find_extend_vma(struct mm_struct *mm, unsigned long addr)
-{
-	struct vm_area_struct *vma, *prev;
-
-	addr &= PAGE_MASK;
-	vma = find_vma_prev(mm, addr, &prev);
-	if (vma && (vma->vm_start <= addr))
-		return vma;
-	if (!prev || expand_stack(prev, addr))
-		return NULL;
-	if (prev->vm_flags & VM_LOCKED) {
-		make_pages_present(addr, prev->vm_end);
-	}
-	return prev;
-}
-#else
 /*
  * vma is the first one with address < vma->vm_start.  Have to extend vma.
  */
-int expand_stack(struct vm_area_struct *vma, unsigned long address)
+static inline int expand_downwards(struct vm_area_struct *vma,
+				   unsigned long address)
 {
 	int error;
 
@@ -1638,6 +1612,38 @@
 	return error;
 }
 
+int expand_stack_downwards(struct vm_area_struct *vma, unsigned long address)
+{
+	return expand_downwards(vma, address);
+}
+
+#ifdef CONFIG_STACK_GROWSUP
+int expand_stack(struct vm_area_struct *vma, unsigned long address)
+{
+	return expand_upwards(vma, address);
+}
+
+struct vm_area_struct *
+find_extend_vma(struct mm_struct *mm, unsigned long addr)
+{
+	struct vm_area_struct *vma, *prev;
+
+	addr &= PAGE_MASK;
+	vma = find_vma_prev(mm, addr, &prev);
+	if (vma && (vma->vm_start <= addr))
+		return vma;
+	if (!prev || expand_stack(prev, addr))
+		return NULL;
+	if (prev->vm_flags & VM_LOCKED)
+		make_pages_present(addr, prev->vm_end);
+	return prev;
+}
+#else
+int expand_stack(struct vm_area_struct *vma, unsigned long address)
+{
+	return expand_downwards(vma, address);
+}
+
 struct vm_area_struct *
 find_extend_vma(struct mm_struct * mm, unsigned long addr)
 {
@@ -1655,9 +1661,8 @@
 	start = vma->vm_start;
 	if (expand_stack(vma, addr))
 		return NULL;
-	if (vma->vm_flags & VM_LOCKED) {
+	if (vma->vm_flags & VM_LOCKED)
 		make_pages_present(addr, start);
-	}
 	return vma;
 }
 #endif
diff --git a/mm/mprotect.c b/mm/mprotect.c
index 3b8f3c0..e8346c3 100644
--- a/mm/mprotect.c
+++ b/mm/mprotect.c
@@ -128,7 +128,7 @@
 	flush_tlb_range(vma, start, end);
 }
 
-static int
+int
 mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
 	unsigned long start, unsigned long end, unsigned long newflags)
 {
diff --git a/mm/mremap.c b/mm/mremap.c
index bc7c52e..8ea5c24 100644
--- a/mm/mremap.c
+++ b/mm/mremap.c
@@ -120,7 +120,7 @@
 
 #define LATENCY_LIMIT	(64 * PAGE_SIZE)
 
-static unsigned long move_page_tables(struct vm_area_struct *vma,
+unsigned long move_page_tables(struct vm_area_struct *vma,
 		unsigned long old_addr, struct vm_area_struct *new_vma,
 		unsigned long new_addr, unsigned long len)
 {
diff --git a/mm/nommu.c b/mm/nommu.c
index 8bbbf14..1b105d2 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -1341,11 +1341,10 @@
 	return 0;
 }
 
-struct page *filemap_nopage(struct vm_area_struct *area,
-			unsigned long address, int *type)
+int filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 {
 	BUG();
-	return NULL;
+	return 0;
 }
 
 /*
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index ea9da3b..63512a9 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -824,6 +824,7 @@
 		mapping2 = page_mapping(page);
 		if (mapping2) { /* Race with truncate? */
 			BUG_ON(mapping2 != mapping);
+			WARN_ON_ONCE(!PagePrivate(page) && !PageUptodate(page));
 			if (mapping_cap_account_dirty(mapping)) {
 				__inc_zone_page_state(page, NR_FILE_DIRTY);
 				task_io_account_write(PAGE_CACHE_SIZE);
@@ -917,6 +918,9 @@
 {
 	struct address_space *mapping = page_mapping(page);
 
+	BUG_ON(!PageLocked(page));
+
+	ClearPageReclaim(page);
 	if (mapping && mapping_cap_account_dirty(mapping)) {
 		/*
 		 * Yes, Virginia, this is indeed insane.
@@ -942,14 +946,19 @@
 		 * We basically use the page "master dirty bit"
 		 * as a serialization point for all the different
 		 * threads doing their things.
-		 *
-		 * FIXME! We still have a race here: if somebody
-		 * adds the page back to the page tables in
-		 * between the "page_mkclean()" and the "TestClearPageDirty()",
-		 * we might have it mapped without the dirty bit set.
 		 */
 		if (page_mkclean(page))
 			set_page_dirty(page);
+		/*
+		 * We carefully synchronise fault handlers against
+		 * installing a dirty pte and marking the page dirty
+		 * at this point. We do this by having them hold the
+		 * page lock at some point after installing their
+		 * pte, but before marking the page dirty.
+		 * Pages are always locked coming in here, so we get
+		 * the desired exclusion. See mm/memory.c:do_wp_page()
+		 * for more comments.
+		 */
 		if (TestClearPageDirty(page)) {
 			dec_zone_page_state(page, NR_FILE_DIRTY);
 			return 1;
@@ -978,6 +987,8 @@
 	} else {
 		ret = TestClearPageWriteback(page);
 	}
+	if (ret)
+		dec_zone_page_state(page, NR_WRITEBACK);
 	return ret;
 }
 
@@ -1003,6 +1014,8 @@
 	} else {
 		ret = TestSetPageWriteback(page);
 	}
+	if (!ret)
+		inc_zone_page_state(page, NR_WRITEBACK);
 	return ret;
 
 }
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index f9e4e64..43cb3b3 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -80,8 +80,9 @@
 	 256,
 #endif
 #ifdef CONFIG_HIGHMEM
-	 32
+	 32,
 #endif
+	 32,
 };
 
 EXPORT_SYMBOL(totalram_pages);
@@ -95,8 +96,9 @@
 #endif
 	 "Normal",
 #ifdef CONFIG_HIGHMEM
-	 "HighMem"
+	 "HighMem",
 #endif
+	 "Movable",
 };
 
 int min_free_kbytes = 1024;
@@ -134,6 +136,13 @@
   static unsigned long __meminitdata node_boundary_start_pfn[MAX_NUMNODES];
   static unsigned long __meminitdata node_boundary_end_pfn[MAX_NUMNODES];
 #endif /* CONFIG_MEMORY_HOTPLUG_RESERVE */
+  unsigned long __initdata required_kernelcore;
+  unsigned long __initdata required_movablecore;
+  unsigned long __initdata zone_movable_pfn[MAX_NUMNODES];
+
+  /* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */
+  int movable_zone;
+  EXPORT_SYMBOL(movable_zone);
 #endif /* CONFIG_ARCH_POPULATES_NODE_MAP */
 
 #if MAX_NUMNODES > 1
@@ -444,12 +453,6 @@
 			1 << PG_reserved |
 			1 << PG_buddy ))))
 		bad_page(page);
-	/*
-	 * PageReclaim == PageTail. It is only an error
-	 * for PageReclaim to be set if PageCompound is clear.
-	 */
-	if (unlikely(!PageCompound(page) && PageReclaim(page)))
-		bad_page(page);
 	if (PageDirty(page))
 		__ClearPageDirty(page);
 	/*
@@ -593,7 +596,6 @@
 			1 << PG_locked	|
 			1 << PG_active	|
 			1 << PG_dirty	|
-			1 << PG_reclaim	|
 			1 << PG_slab    |
 			1 << PG_swapcache |
 			1 << PG_writeback |
@@ -608,7 +610,7 @@
 	if (PageReserved(page))
 		return 1;
 
-	page->flags &= ~(1 << PG_uptodate | 1 << PG_error |
+	page->flags &= ~(1 << PG_uptodate | 1 << PG_error | 1 << PG_readahead |
 			1 << PG_referenced | 1 << PG_arch_1 |
 			1 << PG_owner_priv_1 | 1 << PG_mappedtodisk);
 	set_page_private(page, 0);
@@ -1324,7 +1326,7 @@
 	reclaim_state.reclaimed_slab = 0;
 	p->reclaim_state = &reclaim_state;
 
-	did_some_progress = try_to_free_pages(zonelist->zones, gfp_mask);
+	did_some_progress = try_to_free_pages(zonelist->zones, order, gfp_mask);
 
 	p->reclaim_state = NULL;
 	p->flags &= ~PF_MEMALLOC;
@@ -1361,7 +1363,8 @@
 	 */
 	do_retry = 0;
 	if (!(gfp_mask & __GFP_NORETRY)) {
-		if ((order <= 3) || (gfp_mask & __GFP_REPEAT))
+		if ((order <= PAGE_ALLOC_COSTLY_ORDER) ||
+						(gfp_mask & __GFP_REPEAT))
 			do_retry = 1;
 		if (gfp_mask & __GFP_NOFAIL)
 			do_retry = 1;
@@ -1474,13 +1477,14 @@
 {
 	return nr_free_zone_pages(gfp_zone(GFP_USER));
 }
+EXPORT_SYMBOL_GPL(nr_free_buffer_pages);
 
 /*
  * Amount of free RAM allocatable within all zones
  */
 unsigned int nr_free_pagecache_pages(void)
 {
-	return nr_free_zone_pages(gfp_zone(GFP_HIGHUSER));
+	return nr_free_zone_pages(gfp_zone(GFP_HIGHUSER_MOVABLE));
 }
 
 static inline void show_node(struct zone *zone)
@@ -2667,6 +2671,63 @@
 }
 
 /*
+ * This finds a zone that can be used for ZONE_MOVABLE pages. The
+ * assumption is made that zones within a node are ordered in monotonic
+ * increasing memory addresses so that the "highest" populated zone is used
+ */
+void __init find_usable_zone_for_movable(void)
+{
+	int zone_index;
+	for (zone_index = MAX_NR_ZONES - 1; zone_index >= 0; zone_index--) {
+		if (zone_index == ZONE_MOVABLE)
+			continue;
+
+		if (arch_zone_highest_possible_pfn[zone_index] >
+				arch_zone_lowest_possible_pfn[zone_index])
+			break;
+	}
+
+	VM_BUG_ON(zone_index == -1);
+	movable_zone = zone_index;
+}
+
+/*
+ * The zone ranges provided by the architecture do not include ZONE_MOVABLE
+ * because it is sized independant of architecture. Unlike the other zones,
+ * the starting point for ZONE_MOVABLE is not fixed. It may be different
+ * in each node depending on the size of each node and how evenly kernelcore
+ * is distributed. This helper function adjusts the zone ranges
+ * provided by the architecture for a given node by using the end of the
+ * highest usable zone for ZONE_MOVABLE. This preserves the assumption that
+ * zones within a node are in order of monotonic increases memory addresses
+ */
+void __meminit adjust_zone_range_for_zone_movable(int nid,
+					unsigned long zone_type,
+					unsigned long node_start_pfn,
+					unsigned long node_end_pfn,
+					unsigned long *zone_start_pfn,
+					unsigned long *zone_end_pfn)
+{
+	/* Only adjust if ZONE_MOVABLE is on this node */
+	if (zone_movable_pfn[nid]) {
+		/* Size ZONE_MOVABLE */
+		if (zone_type == ZONE_MOVABLE) {
+			*zone_start_pfn = zone_movable_pfn[nid];
+			*zone_end_pfn = min(node_end_pfn,
+				arch_zone_highest_possible_pfn[movable_zone]);
+
+		/* Adjust for ZONE_MOVABLE starting within this range */
+		} else if (*zone_start_pfn < zone_movable_pfn[nid] &&
+				*zone_end_pfn > zone_movable_pfn[nid]) {
+			*zone_end_pfn = zone_movable_pfn[nid];
+
+		/* Check if this whole range is within ZONE_MOVABLE */
+		} else if (*zone_start_pfn >= zone_movable_pfn[nid])
+			*zone_start_pfn = *zone_end_pfn;
+	}
+}
+
+/*
  * Return the number of pages a zone spans in a node, including holes
  * present_pages = zone_spanned_pages_in_node() - zone_absent_pages_in_node()
  */
@@ -2681,6 +2742,9 @@
 	get_pfn_range_for_nid(nid, &node_start_pfn, &node_end_pfn);
 	zone_start_pfn = arch_zone_lowest_possible_pfn[zone_type];
 	zone_end_pfn = arch_zone_highest_possible_pfn[zone_type];
+	adjust_zone_range_for_zone_movable(nid, zone_type,
+				node_start_pfn, node_end_pfn,
+				&zone_start_pfn, &zone_end_pfn);
 
 	/* Check that this node has pages within the zone's required range */
 	if (zone_end_pfn < node_start_pfn || zone_start_pfn > node_end_pfn)
@@ -2771,6 +2835,9 @@
 	zone_end_pfn = min(arch_zone_highest_possible_pfn[zone_type],
 							node_end_pfn);
 
+	adjust_zone_range_for_zone_movable(nid, zone_type,
+			node_start_pfn, node_end_pfn,
+			&zone_start_pfn, &zone_end_pfn);
 	return __absent_pages_in_range(nid, zone_start_pfn, zone_end_pfn);
 }
 
@@ -3148,6 +3215,157 @@
 	return max_pfn;
 }
 
+unsigned long __init early_calculate_totalpages(void)
+{
+	int i;
+	unsigned long totalpages = 0;
+
+	for (i = 0; i < nr_nodemap_entries; i++)
+		totalpages += early_node_map[i].end_pfn -
+						early_node_map[i].start_pfn;
+
+	return totalpages;
+}
+
+/*
+ * Find the PFN the Movable zone begins in each node. Kernel memory
+ * is spread evenly between nodes as long as the nodes have enough
+ * memory. When they don't, some nodes will have more kernelcore than
+ * others
+ */
+void __init find_zone_movable_pfns_for_nodes(unsigned long *movable_pfn)
+{
+	int i, nid;
+	unsigned long usable_startpfn;
+	unsigned long kernelcore_node, kernelcore_remaining;
+	int usable_nodes = num_online_nodes();
+
+	/*
+	 * If movablecore was specified, calculate what size of
+	 * kernelcore that corresponds so that memory usable for
+	 * any allocation type is evenly spread. If both kernelcore
+	 * and movablecore are specified, then the value of kernelcore
+	 * will be used for required_kernelcore if it's greater than
+	 * what movablecore would have allowed.
+	 */
+	if (required_movablecore) {
+		unsigned long totalpages = early_calculate_totalpages();
+		unsigned long corepages;
+
+		/*
+		 * Round-up so that ZONE_MOVABLE is at least as large as what
+		 * was requested by the user
+		 */
+		required_movablecore =
+			roundup(required_movablecore, MAX_ORDER_NR_PAGES);
+		corepages = totalpages - required_movablecore;
+
+		required_kernelcore = max(required_kernelcore, corepages);
+	}
+
+	/* If kernelcore was not specified, there is no ZONE_MOVABLE */
+	if (!required_kernelcore)
+		return;
+
+	/* usable_startpfn is the lowest possible pfn ZONE_MOVABLE can be at */
+	find_usable_zone_for_movable();
+	usable_startpfn = arch_zone_lowest_possible_pfn[movable_zone];
+
+restart:
+	/* Spread kernelcore memory as evenly as possible throughout nodes */
+	kernelcore_node = required_kernelcore / usable_nodes;
+	for_each_online_node(nid) {
+		/*
+		 * Recalculate kernelcore_node if the division per node
+		 * now exceeds what is necessary to satisfy the requested
+		 * amount of memory for the kernel
+		 */
+		if (required_kernelcore < kernelcore_node)
+			kernelcore_node = required_kernelcore / usable_nodes;
+
+		/*
+		 * As the map is walked, we track how much memory is usable
+		 * by the kernel using kernelcore_remaining. When it is
+		 * 0, the rest of the node is usable by ZONE_MOVABLE
+		 */
+		kernelcore_remaining = kernelcore_node;
+
+		/* Go through each range of PFNs within this node */
+		for_each_active_range_index_in_nid(i, nid) {
+			unsigned long start_pfn, end_pfn;
+			unsigned long size_pages;
+
+			start_pfn = max(early_node_map[i].start_pfn,
+						zone_movable_pfn[nid]);
+			end_pfn = early_node_map[i].end_pfn;
+			if (start_pfn >= end_pfn)
+				continue;
+
+			/* Account for what is only usable for kernelcore */
+			if (start_pfn < usable_startpfn) {
+				unsigned long kernel_pages;
+				kernel_pages = min(end_pfn, usable_startpfn)
+								- start_pfn;
+
+				kernelcore_remaining -= min(kernel_pages,
+							kernelcore_remaining);
+				required_kernelcore -= min(kernel_pages,
+							required_kernelcore);
+
+				/* Continue if range is now fully accounted */
+				if (end_pfn <= usable_startpfn) {
+
+					/*
+					 * Push zone_movable_pfn to the end so
+					 * that if we have to rebalance
+					 * kernelcore across nodes, we will
+					 * not double account here
+					 */
+					zone_movable_pfn[nid] = end_pfn;
+					continue;
+				}
+				start_pfn = usable_startpfn;
+			}
+
+			/*
+			 * The usable PFN range for ZONE_MOVABLE is from
+			 * start_pfn->end_pfn. Calculate size_pages as the
+			 * number of pages used as kernelcore
+			 */
+			size_pages = end_pfn - start_pfn;
+			if (size_pages > kernelcore_remaining)
+				size_pages = kernelcore_remaining;
+			zone_movable_pfn[nid] = start_pfn + size_pages;
+
+			/*
+			 * Some kernelcore has been met, update counts and
+			 * break if the kernelcore for this node has been
+			 * satisified
+			 */
+			required_kernelcore -= min(required_kernelcore,
+								size_pages);
+			kernelcore_remaining -= size_pages;
+			if (!kernelcore_remaining)
+				break;
+		}
+	}
+
+	/*
+	 * If there is still required_kernelcore, we do another pass with one
+	 * less node in the count. This will push zone_movable_pfn[nid] further
+	 * along on the nodes that still have memory until kernelcore is
+	 * satisified
+	 */
+	usable_nodes--;
+	if (usable_nodes && required_kernelcore > usable_nodes)
+		goto restart;
+
+	/* Align start of ZONE_MOVABLE on all nids to MAX_ORDER_NR_PAGES */
+	for (nid = 0; nid < MAX_NUMNODES; nid++)
+		zone_movable_pfn[nid] =
+			roundup(zone_movable_pfn[nid], MAX_ORDER_NR_PAGES);
+}
+
 /**
  * free_area_init_nodes - Initialise all pg_data_t and zone data
  * @max_zone_pfn: an array of max PFNs for each zone
@@ -3177,19 +3395,37 @@
 	arch_zone_lowest_possible_pfn[0] = find_min_pfn_with_active_regions();
 	arch_zone_highest_possible_pfn[0] = max_zone_pfn[0];
 	for (i = 1; i < MAX_NR_ZONES; i++) {
+		if (i == ZONE_MOVABLE)
+			continue;
 		arch_zone_lowest_possible_pfn[i] =
 			arch_zone_highest_possible_pfn[i-1];
 		arch_zone_highest_possible_pfn[i] =
 			max(max_zone_pfn[i], arch_zone_lowest_possible_pfn[i]);
 	}
+	arch_zone_lowest_possible_pfn[ZONE_MOVABLE] = 0;
+	arch_zone_highest_possible_pfn[ZONE_MOVABLE] = 0;
+
+	/* Find the PFNs that ZONE_MOVABLE begins at in each node */
+	memset(zone_movable_pfn, 0, sizeof(zone_movable_pfn));
+	find_zone_movable_pfns_for_nodes(zone_movable_pfn);
 
 	/* Print out the zone ranges */
 	printk("Zone PFN ranges:\n");
-	for (i = 0; i < MAX_NR_ZONES; i++)
+	for (i = 0; i < MAX_NR_ZONES; i++) {
+		if (i == ZONE_MOVABLE)
+			continue;
 		printk("  %-8s %8lu -> %8lu\n",
 				zone_names[i],
 				arch_zone_lowest_possible_pfn[i],
 				arch_zone_highest_possible_pfn[i]);
+	}
+
+	/* Print out the PFNs ZONE_MOVABLE begins at in each node */
+	printk("Movable zone start PFN for each node\n");
+	for (i = 0; i < MAX_NUMNODES; i++) {
+		if (zone_movable_pfn[i])
+			printk("  Node %d: %lu\n", i, zone_movable_pfn[i]);
+	}
 
 	/* Print out the early_node_map[] */
 	printk("early_node_map[%d] active PFN ranges\n", nr_nodemap_entries);
@@ -3206,6 +3442,43 @@
 				find_min_pfn_for_node(nid), NULL);
 	}
 }
+
+static int __init cmdline_parse_core(char *p, unsigned long *core)
+{
+	unsigned long long coremem;
+	if (!p)
+		return -EINVAL;
+
+	coremem = memparse(p, &p);
+	*core = coremem >> PAGE_SHIFT;
+
+	/* Paranoid check that UL is enough for the coremem value */
+	WARN_ON((coremem >> PAGE_SHIFT) > ULONG_MAX);
+
+	return 0;
+}
+
+/*
+ * kernelcore=size sets the amount of memory for use for allocations that
+ * cannot be reclaimed or migrated.
+ */
+static int __init cmdline_parse_kernelcore(char *p)
+{
+	return cmdline_parse_core(p, &required_kernelcore);
+}
+
+/*
+ * movablecore=size sets the amount of memory for use for allocations that
+ * can be reclaimed or migrated.
+ */
+static int __init cmdline_parse_movablecore(char *p)
+{
+	return cmdline_parse_core(p, &required_movablecore);
+}
+
+early_param("kernelcore", cmdline_parse_kernelcore);
+early_param("movablecore", cmdline_parse_movablecore);
+
 #endif /* CONFIG_ARCH_POPULATES_NODE_MAP */
 
 /**
diff --git a/mm/pdflush.c b/mm/pdflush.c
index 8ce0900..8f6ee07 100644
--- a/mm/pdflush.c
+++ b/mm/pdflush.c
@@ -92,6 +92,7 @@
 static int __pdflush(struct pdflush_work *my_work)
 {
 	current->flags |= PF_FLUSHER | PF_SWAPWRITE;
+	set_freezable();
 	my_work->fn = NULL;
 	my_work->who = current;
 	INIT_LIST_HEAD(&my_work->list);
diff --git a/mm/readahead.c b/mm/readahead.c
index 9861e88..39bf45d 100644
--- a/mm/readahead.c
+++ b/mm/readahead.c
@@ -21,8 +21,16 @@
 }
 EXPORT_SYMBOL(default_unplug_io_fn);
 
+/*
+ * Convienent macros for min/max read-ahead pages.
+ * Note that MAX_RA_PAGES is rounded down, while MIN_RA_PAGES is rounded up.
+ * The latter is necessary for systems with large page size(i.e. 64k).
+ */
+#define MAX_RA_PAGES	(VM_MAX_READAHEAD*1024 / PAGE_CACHE_SIZE)
+#define MIN_RA_PAGES	DIV_ROUND_UP(VM_MIN_READAHEAD*1024, PAGE_CACHE_SIZE)
+
 struct backing_dev_info default_backing_dev_info = {
-	.ra_pages	= (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE,
+	.ra_pages	= MAX_RA_PAGES,
 	.state		= 0,
 	.capabilities	= BDI_CAP_MAP_COPY,
 	.unplug_io_fn	= default_unplug_io_fn,
@@ -41,82 +49,6 @@
 }
 EXPORT_SYMBOL_GPL(file_ra_state_init);
 
-/*
- * Return max readahead size for this inode in number-of-pages.
- */
-static inline unsigned long get_max_readahead(struct file_ra_state *ra)
-{
-	return ra->ra_pages;
-}
-
-static inline unsigned long get_min_readahead(struct file_ra_state *ra)
-{
-	return (VM_MIN_READAHEAD * 1024) / PAGE_CACHE_SIZE;
-}
-
-static inline void reset_ahead_window(struct file_ra_state *ra)
-{
-	/*
-	 * ... but preserve ahead_start + ahead_size value,
-	 * see 'recheck:' label in page_cache_readahead().
-	 * Note: We never use ->ahead_size as rvalue without
-	 * checking ->ahead_start != 0 first.
-	 */
-	ra->ahead_size += ra->ahead_start;
-	ra->ahead_start = 0;
-}
-
-static inline void ra_off(struct file_ra_state *ra)
-{
-	ra->start = 0;
-	ra->flags = 0;
-	ra->size = 0;
-	reset_ahead_window(ra);
-	return;
-}
-
-/*
- * Set the initial window size, round to next power of 2 and square
- * for small size, x 4 for medium, and x 2 for large
- * for 128k (32 page) max ra
- * 1-8 page = 32k initial, > 8 page = 128k initial
- */
-static unsigned long get_init_ra_size(unsigned long size, unsigned long max)
-{
-	unsigned long newsize = roundup_pow_of_two(size);
-
-	if (newsize <= max / 32)
-		newsize = newsize * 4;
-	else if (newsize <= max / 4)
-		newsize = newsize * 2;
-	else
-		newsize = max;
-	return newsize;
-}
-
-/*
- * Set the new window size, this is called only when I/O is to be submitted,
- * not for each call to readahead.  If a cache miss occured, reduce next I/O
- * size, else increase depending on how close to max we are.
- */
-static inline unsigned long get_next_ra_size(struct file_ra_state *ra)
-{
-	unsigned long max = get_max_readahead(ra);
-	unsigned long min = get_min_readahead(ra);
-	unsigned long cur = ra->size;
-	unsigned long newsize;
-
-	if (ra->flags & RA_FLAG_MISS) {
-		ra->flags &= ~RA_FLAG_MISS;
-		newsize = max((cur - 2), min);
-	} else if (cur < max / 16) {
-		newsize = 4 * cur;
-	} else {
-		newsize = 2 * cur;
-	}
-	return min(newsize, max);
-}
-
 #define list_to_page(head) (list_entry((head)->prev, struct page, lru))
 
 /**
@@ -193,66 +125,6 @@
 }
 
 /*
- * Readahead design.
- *
- * The fields in struct file_ra_state represent the most-recently-executed
- * readahead attempt:
- *
- * start:	Page index at which we started the readahead
- * size:	Number of pages in that read
- *              Together, these form the "current window".
- *              Together, start and size represent the `readahead window'.
- * prev_index:  The page which the readahead algorithm most-recently inspected.
- *              It is mainly used to detect sequential file reading.
- *              If page_cache_readahead sees that it is again being called for
- *              a page which it just looked at, it can return immediately without
- *              making any state changes.
- * offset:      Offset in the prev_index where the last read ended - used for
- *              detection of sequential file reading.
- * ahead_start,
- * ahead_size:  Together, these form the "ahead window".
- * ra_pages:	The externally controlled max readahead for this fd.
- *
- * When readahead is in the off state (size == 0), readahead is disabled.
- * In this state, prev_index is used to detect the resumption of sequential I/O.
- *
- * The readahead code manages two windows - the "current" and the "ahead"
- * windows.  The intent is that while the application is walking the pages
- * in the current window, I/O is underway on the ahead window.  When the
- * current window is fully traversed, it is replaced by the ahead window
- * and the ahead window is invalidated.  When this copying happens, the
- * new current window's pages are probably still locked.  So
- * we submit a new batch of I/O immediately, creating a new ahead window.
- *
- * So:
- *
- *   ----|----------------|----------------|-----
- *       ^start           ^start+size
- *                        ^ahead_start     ^ahead_start+ahead_size
- *
- *         ^ When this page is read, we submit I/O for the
- *           ahead window.
- *
- * A `readahead hit' occurs when a read request is made against a page which is
- * the next sequential page. Ahead window calculations are done only when it
- * is time to submit a new IO.  The code ramps up the size agressively at first,
- * but slow down as it approaches max_readhead.
- *
- * Any seek/ramdom IO will result in readahead being turned off.  It will resume
- * at the first sequential access.
- *
- * There is a special-case: if the first page which the application tries to
- * read happens to be the first page of the file, it is assumed that a linear
- * read is about to happen and the window is immediately set to the initial size
- * based on I/O request size and the max_readahead.
- *
- * This function is to be called for every read request, rather than when
- * it is time to perform readahead.  It is called only once for the entire I/O
- * regardless of size unless readahead is unable to start enough I/O to satisfy
- * the request (I/O request > max_readahead).
- */
-
-/*
  * do_page_cache_readahead actually reads a chunk of disk.  It allocates all
  * the pages first, then submits them all for I/O. This avoids the very bad
  * behaviour which would occur if page allocations are causing VM writeback.
@@ -265,7 +137,8 @@
  */
 static int
 __do_page_cache_readahead(struct address_space *mapping, struct file *filp,
-			pgoff_t offset, unsigned long nr_to_read)
+			pgoff_t offset, unsigned long nr_to_read,
+			unsigned long lookahead_size)
 {
 	struct inode *inode = mapping->host;
 	struct page *page;
@@ -278,7 +151,7 @@
 	if (isize == 0)
 		goto out;
 
- 	end_index = ((isize - 1) >> PAGE_CACHE_SHIFT);
+	end_index = ((isize - 1) >> PAGE_CACHE_SHIFT);
 
 	/*
 	 * Preallocate as many pages as we will need.
@@ -286,7 +159,7 @@
 	read_lock_irq(&mapping->tree_lock);
 	for (page_idx = 0; page_idx < nr_to_read; page_idx++) {
 		pgoff_t page_offset = offset + page_idx;
-		
+
 		if (page_offset > end_index)
 			break;
 
@@ -301,6 +174,8 @@
 			break;
 		page->index = page_offset;
 		list_add(&page->lru, &page_pool);
+		if (page_idx == nr_to_read - lookahead_size)
+			SetPageReadahead(page);
 		ret++;
 	}
 	read_unlock_irq(&mapping->tree_lock);
@@ -337,7 +212,7 @@
 		if (this_chunk > nr_to_read)
 			this_chunk = nr_to_read;
 		err = __do_page_cache_readahead(mapping, filp,
-						offset, this_chunk);
+						offset, this_chunk, 0);
 		if (err < 0) {
 			ret = err;
 			break;
@@ -350,28 +225,6 @@
 }
 
 /*
- * Check how effective readahead is being.  If the amount of started IO is
- * less than expected then the file is partly or fully in pagecache and
- * readahead isn't helping.
- *
- */
-static inline int check_ra_success(struct file_ra_state *ra,
-			unsigned long nr_to_read, unsigned long actual)
-{
-	if (actual == 0) {
-		ra->cache_hit += nr_to_read;
-		if (ra->cache_hit >= VM_MAX_CACHE_HIT) {
-			ra_off(ra);
-			ra->flags |= RA_FLAG_INCACHE;
-			return 0;
-		}
-	} else {
-		ra->cache_hit=0;
-	}
-	return 1;
-}
-
-/*
  * This version skips the IO if the queue is read-congested, and will tell the
  * block layer to abandon the readahead if request allocation would block.
  *
@@ -384,192 +237,7 @@
 	if (bdi_read_congested(mapping->backing_dev_info))
 		return -1;
 
-	return __do_page_cache_readahead(mapping, filp, offset, nr_to_read);
-}
-
-/*
- * Read 'nr_to_read' pages starting at page 'offset'. If the flag 'block'
- * is set wait till the read completes.  Otherwise attempt to read without
- * blocking.
- * Returns 1 meaning 'success' if read is successful without switching off
- * readahead mode. Otherwise return failure.
- */
-static int
-blockable_page_cache_readahead(struct address_space *mapping, struct file *filp,
-			pgoff_t offset, unsigned long nr_to_read,
-			struct file_ra_state *ra, int block)
-{
-	int actual;
-
-	if (!block && bdi_read_congested(mapping->backing_dev_info))
-		return 0;
-
-	actual = __do_page_cache_readahead(mapping, filp, offset, nr_to_read);
-
-	return check_ra_success(ra, nr_to_read, actual);
-}
-
-static int make_ahead_window(struct address_space *mapping, struct file *filp,
-				struct file_ra_state *ra, int force)
-{
-	int block, ret;
-
-	ra->ahead_size = get_next_ra_size(ra);
-	ra->ahead_start = ra->start + ra->size;
-
-	block = force || (ra->prev_index >= ra->ahead_start);
-	ret = blockable_page_cache_readahead(mapping, filp,
-			ra->ahead_start, ra->ahead_size, ra, block);
-
-	if (!ret && !force) {
-		/* A read failure in blocking mode, implies pages are
-		 * all cached. So we can safely assume we have taken
-		 * care of all the pages requested in this call.
-		 * A read failure in non-blocking mode, implies we are
-		 * reading more pages than requested in this call.  So
-		 * we safely assume we have taken care of all the pages
-		 * requested in this call.
-		 *
-		 * Just reset the ahead window in case we failed due to
-		 * congestion.  The ahead window will any way be closed
-		 * in case we failed due to excessive page cache hits.
-		 */
-		reset_ahead_window(ra);
-	}
-
-	return ret;
-}
-
-/**
- * page_cache_readahead - generic adaptive readahead
- * @mapping: address_space which holds the pagecache and I/O vectors
- * @ra: file_ra_state which holds the readahead state
- * @filp: passed on to ->readpage() and ->readpages()
- * @offset: start offset into @mapping, in PAGE_CACHE_SIZE units
- * @req_size: hint: total size of the read which the caller is performing in
- *            PAGE_CACHE_SIZE units
- *
- * page_cache_readahead() is the main function.  If performs the adaptive
- * readahead window size management and submits the readahead I/O.
- *
- * Note that @filp is purely used for passing on to the ->readpage[s]()
- * handler: it may refer to a different file from @mapping (so we may not use
- * @filp->f_mapping or @filp->f_path.dentry->d_inode here).
- * Also, @ra may not be equal to &@filp->f_ra.
- *
- */
-unsigned long
-page_cache_readahead(struct address_space *mapping, struct file_ra_state *ra,
-		     struct file *filp, pgoff_t offset, unsigned long req_size)
-{
-	unsigned long max, newsize;
-	int sequential;
-
-	/*
-	 * We avoid doing extra work and bogusly perturbing the readahead
-	 * window expansion logic.
-	 */
-	if (offset == ra->prev_index && --req_size)
-		++offset;
-
-	/* Note that prev_index == -1 if it is a first read */
-	sequential = (offset == ra->prev_index + 1);
-	ra->prev_index = offset;
-	ra->prev_offset = 0;
-
-	max = get_max_readahead(ra);
-	newsize = min(req_size, max);
-
-	/* No readahead or sub-page sized read or file already in cache */
-	if (newsize == 0 || (ra->flags & RA_FLAG_INCACHE))
-		goto out;
-
-	ra->prev_index += newsize - 1;
-
-	/*
-	 * Special case - first read at start of file. We'll assume it's
-	 * a whole-file read and grow the window fast.  Or detect first
-	 * sequential access
-	 */
-	if (sequential && ra->size == 0) {
-		ra->size = get_init_ra_size(newsize, max);
-		ra->start = offset;
-		if (!blockable_page_cache_readahead(mapping, filp, offset,
-							 ra->size, ra, 1))
-			goto out;
-
-		/*
-		 * If the request size is larger than our max readahead, we
-		 * at least want to be sure that we get 2 IOs in flight and
-		 * we know that we will definitly need the new I/O.
-		 * once we do this, subsequent calls should be able to overlap
-		 * IOs,* thus preventing stalls. so issue the ahead window
-		 * immediately.
-		 */
-		if (req_size >= max)
-			make_ahead_window(mapping, filp, ra, 1);
-
-		goto out;
-	}
-
-	/*
-	 * Now handle the random case:
-	 * partial page reads and first access were handled above,
-	 * so this must be the next page otherwise it is random
-	 */
-	if (!sequential) {
-		ra_off(ra);
-		blockable_page_cache_readahead(mapping, filp, offset,
-				 newsize, ra, 1);
-		goto out;
-	}
-
-	/*
-	 * If we get here we are doing sequential IO and this was not the first
-	 * occurence (ie we have an existing window)
-	 */
-	if (ra->ahead_start == 0) {	 /* no ahead window yet */
-		if (!make_ahead_window(mapping, filp, ra, 0))
-			goto recheck;
-	}
-
-	/*
-	 * Already have an ahead window, check if we crossed into it.
-	 * If so, shift windows and issue a new ahead window.
-	 * Only return the #pages that are in the current window, so that
-	 * we get called back on the first page of the ahead window which
-	 * will allow us to submit more IO.
-	 */
-	if (ra->prev_index >= ra->ahead_start) {
-		ra->start = ra->ahead_start;
-		ra->size = ra->ahead_size;
-		make_ahead_window(mapping, filp, ra, 0);
-recheck:
-		/* prev_index shouldn't overrun the ahead window */
-		ra->prev_index = min(ra->prev_index,
-			ra->ahead_start + ra->ahead_size - 1);
-	}
-
-out:
-	return ra->prev_index + 1;
-}
-EXPORT_SYMBOL_GPL(page_cache_readahead);
-
-/*
- * handle_ra_miss() is called when it is known that a page which should have
- * been present in the pagecache (we just did some readahead there) was in fact
- * not found.  This will happen if it was evicted by the VM (readahead
- * thrashing)
- *
- * Turn on the cache miss flag in the RA struct, this will cause the RA code
- * to reduce the RA size on the next read.
- */
-void handle_ra_miss(struct address_space *mapping,
-		struct file_ra_state *ra, pgoff_t offset)
-{
-	ra->flags |= RA_FLAG_MISS;
-	ra->flags &= ~RA_FLAG_INCACHE;
-	ra->cache_hit = 0;
+	return __do_page_cache_readahead(mapping, filp, offset, nr_to_read, 0);
 }
 
 /*
@@ -581,3 +249,225 @@
 	return min(nr, (node_page_state(numa_node_id(), NR_INACTIVE)
 		+ node_page_state(numa_node_id(), NR_FREE_PAGES)) / 2);
 }
+
+/*
+ * Submit IO for the read-ahead request in file_ra_state.
+ */
+static unsigned long ra_submit(struct file_ra_state *ra,
+		       struct address_space *mapping, struct file *filp)
+{
+	int actual;
+
+	actual = __do_page_cache_readahead(mapping, filp,
+					ra->start, ra->size, ra->async_size);
+
+	return actual;
+}
+
+/*
+ * Set the initial window size, round to next power of 2 and square
+ * for small size, x 4 for medium, and x 2 for large
+ * for 128k (32 page) max ra
+ * 1-8 page = 32k initial, > 8 page = 128k initial
+ */
+static unsigned long get_init_ra_size(unsigned long size, unsigned long max)
+{
+	unsigned long newsize = roundup_pow_of_two(size);
+
+	if (newsize <= max / 32)
+		newsize = newsize * 4;
+	else if (newsize <= max / 4)
+		newsize = newsize * 2;
+	else
+		newsize = max;
+
+	return newsize;
+}
+
+/*
+ *  Get the previous window size, ramp it up, and
+ *  return it as the new window size.
+ */
+static unsigned long get_next_ra_size(struct file_ra_state *ra,
+						unsigned long max)
+{
+	unsigned long cur = ra->size;
+	unsigned long newsize;
+
+	if (cur < max / 16)
+		newsize = 4 * cur;
+	else
+		newsize = 2 * cur;
+
+	return min(newsize, max);
+}
+
+/*
+ * On-demand readahead design.
+ *
+ * The fields in struct file_ra_state represent the most-recently-executed
+ * readahead attempt:
+ *
+ *                        |<----- async_size ---------|
+ *     |------------------- size -------------------->|
+ *     |==================#===========================|
+ *     ^start             ^page marked with PG_readahead
+ *
+ * To overlap application thinking time and disk I/O time, we do
+ * `readahead pipelining': Do not wait until the application consumed all
+ * readahead pages and stalled on the missing page at readahead_index;
+ * Instead, submit an asynchronous readahead I/O as soon as there are
+ * only async_size pages left in the readahead window. Normally async_size
+ * will be equal to size, for maximum pipelining.
+ *
+ * In interleaved sequential reads, concurrent streams on the same fd can
+ * be invalidating each other's readahead state. So we flag the new readahead
+ * page at (start+size-async_size) with PG_readahead, and use it as readahead
+ * indicator. The flag won't be set on already cached pages, to avoid the
+ * readahead-for-nothing fuss, saving pointless page cache lookups.
+ *
+ * prev_index tracks the last visited page in the _previous_ read request.
+ * It should be maintained by the caller, and will be used for detecting
+ * small random reads. Note that the readahead algorithm checks loosely
+ * for sequential patterns. Hence interleaved reads might be served as
+ * sequential ones.
+ *
+ * There is a special-case: if the first page which the application tries to
+ * read happens to be the first page of the file, it is assumed that a linear
+ * read is about to happen and the window is immediately set to the initial size
+ * based on I/O request size and the max_readahead.
+ *
+ * The code ramps up the readahead size aggressively at first, but slow down as
+ * it approaches max_readhead.
+ */
+
+/*
+ * A minimal readahead algorithm for trivial sequential/random reads.
+ */
+static unsigned long
+ondemand_readahead(struct address_space *mapping,
+		   struct file_ra_state *ra, struct file *filp,
+		   bool hit_readahead_marker, pgoff_t offset,
+		   unsigned long req_size)
+{
+	unsigned long max;	/* max readahead pages */
+	int sequential;
+
+	max = ra->ra_pages;
+	sequential = (offset - ra->prev_index <= 1UL) || (req_size > max);
+
+	/*
+	 * It's the expected callback offset, assume sequential access.
+	 * Ramp up sizes, and push forward the readahead window.
+	 */
+	if (offset && (offset == (ra->start + ra->size - ra->async_size) ||
+			offset == (ra->start + ra->size))) {
+		ra->start += ra->size;
+		ra->size = get_next_ra_size(ra, max);
+		ra->async_size = ra->size;
+		goto readit;
+	}
+
+	/*
+	 * Standalone, small read.
+	 * Read as is, and do not pollute the readahead state.
+	 */
+	if (!hit_readahead_marker && !sequential) {
+		return __do_page_cache_readahead(mapping, filp,
+						offset, req_size, 0);
+	}
+
+	/*
+	 * It may be one of
+	 * 	- first read on start of file
+	 * 	- sequential cache miss
+	 * 	- oversize random read
+	 * Start readahead for it.
+	 */
+	ra->start = offset;
+	ra->size = get_init_ra_size(req_size, max);
+	ra->async_size = ra->size > req_size ? ra->size - req_size : ra->size;
+
+	/*
+	 * Hit on a marked page without valid readahead state.
+	 * E.g. interleaved reads.
+	 * Not knowing its readahead pos/size, bet on the minimal possible one.
+	 */
+	if (hit_readahead_marker) {
+		ra->start++;
+		ra->size = get_next_ra_size(ra, max);
+	}
+
+readit:
+	return ra_submit(ra, mapping, filp);
+}
+
+/**
+ * page_cache_sync_readahead - generic file readahead
+ * @mapping: address_space which holds the pagecache and I/O vectors
+ * @ra: file_ra_state which holds the readahead state
+ * @filp: passed on to ->readpage() and ->readpages()
+ * @offset: start offset into @mapping, in pagecache page-sized units
+ * @req_size: hint: total size of the read which the caller is performing in
+ *            pagecache pages
+ *
+ * page_cache_sync_readahead() should be called when a cache miss happened:
+ * it will submit the read.  The readahead logic may decide to piggyback more
+ * pages onto the read request if access patterns suggest it will improve
+ * performance.
+ */
+void page_cache_sync_readahead(struct address_space *mapping,
+			       struct file_ra_state *ra, struct file *filp,
+			       pgoff_t offset, unsigned long req_size)
+{
+	/* no read-ahead */
+	if (!ra->ra_pages)
+		return;
+
+	/* do read-ahead */
+	ondemand_readahead(mapping, ra, filp, false, offset, req_size);
+}
+EXPORT_SYMBOL_GPL(page_cache_sync_readahead);
+
+/**
+ * page_cache_async_readahead - file readahead for marked pages
+ * @mapping: address_space which holds the pagecache and I/O vectors
+ * @ra: file_ra_state which holds the readahead state
+ * @filp: passed on to ->readpage() and ->readpages()
+ * @page: the page at @offset which has the PG_readahead flag set
+ * @offset: start offset into @mapping, in pagecache page-sized units
+ * @req_size: hint: total size of the read which the caller is performing in
+ *            pagecache pages
+ *
+ * page_cache_async_ondemand() should be called when a page is used which
+ * has the PG_readahead flag: this is a marker to suggest that the application
+ * has used up enough of the readahead window that we should start pulling in
+ * more pages. */
+void
+page_cache_async_readahead(struct address_space *mapping,
+			   struct file_ra_state *ra, struct file *filp,
+			   struct page *page, pgoff_t offset,
+			   unsigned long req_size)
+{
+	/* no read-ahead */
+	if (!ra->ra_pages)
+		return;
+
+	/*
+	 * Same bit is used for PG_readahead and PG_reclaim.
+	 */
+	if (PageWriteback(page))
+		return;
+
+	ClearPageReadahead(page);
+
+	/*
+	 * Defer asynchronous read-ahead on IO congestion.
+	 */
+	if (bdi_read_congested(mapping->backing_dev_info))
+		return;
+
+	/* do read-ahead */
+	ondemand_readahead(mapping, ra, filp, true, offset, req_size);
+}
+EXPORT_SYMBOL_GPL(page_cache_async_readahead);
diff --git a/mm/rmap.c b/mm/rmap.c
index 61e4925..fede5c7 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -621,8 +621,10 @@
 			printk (KERN_EMERG "  page->count = %x\n", page_count(page));
 			printk (KERN_EMERG "  page->mapping = %p\n", page->mapping);
 			print_symbol (KERN_EMERG "  vma->vm_ops = %s\n", (unsigned long)vma->vm_ops);
-			if (vma->vm_ops)
+			if (vma->vm_ops) {
 				print_symbol (KERN_EMERG "  vma->vm_ops->nopage = %s\n", (unsigned long)vma->vm_ops->nopage);
+				print_symbol (KERN_EMERG "  vma->vm_ops->fault = %s\n", (unsigned long)vma->vm_ops->fault);
+			}
 			if (vma->vm_file && vma->vm_file->f_op)
 				print_symbol (KERN_EMERG "  vma->vm_file->f_op->mmap = %s\n", (unsigned long)vma->vm_file->f_op->mmap);
 			BUG();
diff --git a/mm/shmem.c b/mm/shmem.c
index 0493e4d..ad155c7 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -27,6 +27,7 @@
 #include <linux/init.h>
 #include <linux/fs.h>
 #include <linux/xattr.h>
+#include <linux/exportfs.h>
 #include <linux/generic_acl.h>
 #include <linux/mm.h>
 #include <linux/mman.h>
@@ -82,6 +83,7 @@
 	SGP_READ,	/* don't exceed i_size, don't allocate page */
 	SGP_CACHE,	/* don't exceed i_size, may allocate page */
 	SGP_WRITE,	/* may exceed i_size, may allocate page */
+	SGP_FAULT,	/* same as SGP_CACHE, return with page locked */
 };
 
 static int shmem_getpage(struct inode *inode, unsigned long idx,
@@ -93,8 +95,11 @@
 	 * The above definition of ENTRIES_PER_PAGE, and the use of
 	 * BLOCKS_PER_PAGE on indirect pages, assume PAGE_CACHE_SIZE:
 	 * might be reconsidered if it ever diverges from PAGE_SIZE.
+	 *
+	 * __GFP_MOVABLE is masked out as swap vectors cannot move
 	 */
-	return alloc_pages(gfp_mask, PAGE_CACHE_SHIFT-PAGE_SHIFT);
+	return alloc_pages((gfp_mask & ~__GFP_MOVABLE) | __GFP_ZERO,
+				PAGE_CACHE_SHIFT-PAGE_SHIFT);
 }
 
 static inline void shmem_dir_free(struct page *page)
@@ -372,7 +377,7 @@
 		}
 
 		spin_unlock(&info->lock);
-		page = shmem_dir_alloc(mapping_gfp_mask(inode->i_mapping) | __GFP_ZERO);
+		page = shmem_dir_alloc(mapping_gfp_mask(inode->i_mapping));
 		if (page)
 			set_page_private(page, 0);
 		spin_lock(&info->lock);
@@ -1096,6 +1101,10 @@
 
 	if (idx >= SHMEM_MAX_INDEX)
 		return -EFBIG;
+
+	if (type)
+		*type = 0;
+
 	/*
 	 * Normally, filepage is NULL on entry, and either found
 	 * uptodate immediately, or allocated and zeroed, or read
@@ -1129,9 +1138,9 @@
 		if (!swappage) {
 			shmem_swp_unmap(entry);
 			/* here we actually do the io */
-			if (type && *type == VM_FAULT_MINOR) {
+			if (type && !(*type & VM_FAULT_MAJOR)) {
 				__count_vm_event(PGMAJFAULT);
-				*type = VM_FAULT_MAJOR;
+				*type |= VM_FAULT_MAJOR;
 			}
 			spin_unlock(&info->lock);
 			swappage = shmem_swapin(info, swap, idx);
@@ -1285,8 +1294,10 @@
 	}
 done:
 	if (*pagep != filepage) {
-		unlock_page(filepage);
 		*pagep = filepage;
+		if (sgp != SGP_FAULT)
+			unlock_page(filepage);
+
 	}
 	return 0;
 
@@ -1298,72 +1309,21 @@
 	return error;
 }
 
-static struct page *shmem_nopage(struct vm_area_struct *vma,
-				 unsigned long address, int *type)
+static int shmem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 {
 	struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
-	struct page *page = NULL;
-	unsigned long idx;
 	int error;
+	int ret;
 
-	idx = (address - vma->vm_start) >> PAGE_SHIFT;
-	idx += vma->vm_pgoff;
-	idx >>= PAGE_CACHE_SHIFT - PAGE_SHIFT;
-	if (((loff_t) idx << PAGE_CACHE_SHIFT) >= i_size_read(inode))
-		return NOPAGE_SIGBUS;
+	if (((loff_t)vmf->pgoff << PAGE_CACHE_SHIFT) >= i_size_read(inode))
+		return VM_FAULT_SIGBUS;
 
-	error = shmem_getpage(inode, idx, &page, SGP_CACHE, type);
+	error = shmem_getpage(inode, vmf->pgoff, &vmf->page, SGP_FAULT, &ret);
 	if (error)
-		return (error == -ENOMEM)? NOPAGE_OOM: NOPAGE_SIGBUS;
+		return ((error == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS);
 
-	mark_page_accessed(page);
-	return page;
-}
-
-static int shmem_populate(struct vm_area_struct *vma,
-	unsigned long addr, unsigned long len,
-	pgprot_t prot, unsigned long pgoff, int nonblock)
-{
-	struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
-	struct mm_struct *mm = vma->vm_mm;
-	enum sgp_type sgp = nonblock? SGP_QUICK: SGP_CACHE;
-	unsigned long size;
-
-	size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
-	if (pgoff >= size || pgoff + (len >> PAGE_SHIFT) > size)
-		return -EINVAL;
-
-	while ((long) len > 0) {
-		struct page *page = NULL;
-		int err;
-		/*
-		 * Will need changing if PAGE_CACHE_SIZE != PAGE_SIZE
-		 */
-		err = shmem_getpage(inode, pgoff, &page, sgp, NULL);
-		if (err)
-			return err;
-		/* Page may still be null, but only if nonblock was set. */
-		if (page) {
-			mark_page_accessed(page);
-			err = install_page(mm, vma, addr, page, prot);
-			if (err) {
-				page_cache_release(page);
-				return err;
-			}
-		} else if (vma->vm_flags & VM_NONLINEAR) {
-			/* No page was found just because we can't read it in
-			 * now (being here implies nonblock != 0), but the page
-			 * may exist, so set the PTE to fault it in later. */
-    			err = install_file_pte(mm, vma, addr, pgoff, prot);
-			if (err)
-	    			return err;
-		}
-
-		len -= PAGE_SIZE;
-		addr += PAGE_SIZE;
-		pgoff++;
-	}
-	return 0;
+	mark_page_accessed(vmf->page);
+	return ret | VM_FAULT_LOCKED;
 }
 
 #ifdef CONFIG_NUMA
@@ -1410,6 +1370,7 @@
 {
 	file_accessed(file);
 	vma->vm_ops = &shmem_vm_ops;
+	vma->vm_flags |= VM_CAN_NONLINEAR;
 	return 0;
 }
 
@@ -2455,8 +2416,7 @@
 };
 
 static struct vm_operations_struct shmem_vm_ops = {
-	.nopage		= shmem_nopage,
-	.populate	= shmem_populate,
+	.fault		= shmem_fault,
 #ifdef CONFIG_NUMA
 	.set_policy     = shmem_set_policy,
 	.get_policy     = shmem_get_policy,
diff --git a/mm/slab.c b/mm/slab.c
index a453383..c3feeaa 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -775,6 +775,9 @@
 	 */
 	BUG_ON(malloc_sizes[INDEX_AC].cs_cachep == NULL);
 #endif
+	if (!size)
+		return ZERO_SIZE_PTR;
+
 	while (size > csizep->cs_size)
 		csizep++;
 
@@ -1160,7 +1163,7 @@
 	struct kmem_cache *cachep;
 	struct kmem_list3 *l3 = NULL;
 	int node = cpu_to_node(cpu);
-	int memsize = sizeof(struct kmem_list3);
+	const int memsize = sizeof(struct kmem_list3);
 
 	switch (action) {
 	case CPU_LOCK_ACQUIRE:
@@ -2351,7 +2354,7 @@
 		 * this should not happen at all.
 		 * But leave a BUG_ON for some lucky dude.
 		 */
-		BUG_ON(!cachep->slabp_cache);
+		BUG_ON(ZERO_OR_NULL_PTR(cachep->slabp_cache));
 	}
 	cachep->ctor = ctor;
 	cachep->name = name;
@@ -2743,7 +2746,7 @@
 	 * Be lazy and only check for valid flags here,  keeping it out of the
 	 * critical path in kmem_cache_alloc().
 	 */
-	BUG_ON(flags & ~(GFP_DMA | GFP_LEVEL_MASK));
+	BUG_ON(flags & ~(GFP_DMA | __GFP_ZERO | GFP_LEVEL_MASK));
 
 	local_flags = (flags & GFP_LEVEL_MASK);
 	/* Take the l3 list lock to change the colour_next on this node */
@@ -3389,6 +3392,9 @@
 	local_irq_restore(save_flags);
 	ptr = cache_alloc_debugcheck_after(cachep, flags, ptr, caller);
 
+	if (unlikely((flags & __GFP_ZERO) && ptr))
+		memset(ptr, 0, obj_size(cachep));
+
 	return ptr;
 }
 
@@ -3440,6 +3446,9 @@
 	objp = cache_alloc_debugcheck_after(cachep, flags, objp, caller);
 	prefetchw(objp);
 
+	if (unlikely((flags & __GFP_ZERO) && objp))
+		memset(objp, 0, obj_size(cachep));
+
 	return objp;
 }
 
@@ -3581,23 +3590,6 @@
 EXPORT_SYMBOL(kmem_cache_alloc);
 
 /**
- * kmem_cache_zalloc - Allocate an object. The memory is set to zero.
- * @cache: The cache to allocate from.
- * @flags: See kmalloc().
- *
- * Allocate an object from this cache and set the allocated memory to zero.
- * The flags are only relevant if the cache has no available objects.
- */
-void *kmem_cache_zalloc(struct kmem_cache *cache, gfp_t flags)
-{
-	void *ret = __cache_alloc(cache, flags, __builtin_return_address(0));
-	if (ret)
-		memset(ret, 0, obj_size(cache));
-	return ret;
-}
-EXPORT_SYMBOL(kmem_cache_zalloc);
-
-/**
  * kmem_ptr_validate - check if an untrusted pointer might
  *	be a slab entry.
  * @cachep: the cache we're checking against
@@ -3653,8 +3645,8 @@
 	struct kmem_cache *cachep;
 
 	cachep = kmem_find_general_cachep(size, flags);
-	if (unlikely(cachep == NULL))
-		return NULL;
+	if (unlikely(ZERO_OR_NULL_PTR(cachep)))
+		return cachep;
 	return kmem_cache_alloc_node(cachep, flags, node);
 }
 
@@ -3698,8 +3690,8 @@
 	 * functions.
 	 */
 	cachep = __find_general_cachep(size, flags);
-	if (unlikely(cachep == NULL))
-		return NULL;
+	if (unlikely(ZERO_OR_NULL_PTR(cachep)))
+		return cachep;
 	return __cache_alloc(cachep, flags, caller);
 }
 
@@ -3726,52 +3718,6 @@
 #endif
 
 /**
- * krealloc - reallocate memory. The contents will remain unchanged.
- * @p: object to reallocate memory for.
- * @new_size: how many bytes of memory are required.
- * @flags: the type of memory to allocate.
- *
- * The contents of the object pointed to are preserved up to the
- * lesser of the new and old sizes.  If @p is %NULL, krealloc()
- * behaves exactly like kmalloc().  If @size is 0 and @p is not a
- * %NULL pointer, the object pointed to is freed.
- */
-void *krealloc(const void *p, size_t new_size, gfp_t flags)
-{
-	struct kmem_cache *cache, *new_cache;
-	void *ret;
-
-	if (unlikely(!p))
-		return kmalloc_track_caller(new_size, flags);
-
-	if (unlikely(!new_size)) {
-		kfree(p);
-		return NULL;
-	}
-
-	cache = virt_to_cache(p);
-	new_cache = __find_general_cachep(new_size, flags);
-
-	/*
- 	 * If new size fits in the current cache, bail out.
- 	 */
-	if (likely(cache == new_cache))
-		return (void *)p;
-
-	/*
- 	 * We are on the slow-path here so do not use __cache_alloc
- 	 * because it bloats kernel text.
- 	 */
-	ret = kmalloc_track_caller(new_size, flags);
-	if (ret) {
-		memcpy(ret, p, min(new_size, ksize(p)));
-		kfree(p);
-	}
-	return ret;
-}
-EXPORT_SYMBOL(krealloc);
-
-/**
  * kmem_cache_free - Deallocate an object
  * @cachep: The cache the allocation was from.
  * @objp: The previously allocated object.
@@ -3806,7 +3752,7 @@
 	struct kmem_cache *c;
 	unsigned long flags;
 
-	if (unlikely(!objp))
+	if (unlikely(ZERO_OR_NULL_PTR(objp)))
 		return;
 	local_irq_save(flags);
 	kfree_debugcheck(objp);
@@ -4398,7 +4344,7 @@
 {
 #ifdef CONFIG_KALLSYMS
 	unsigned long offset, size;
-	char modname[MODULE_NAME_LEN + 1], name[KSYM_NAME_LEN + 1];
+	char modname[MODULE_NAME_LEN], name[KSYM_NAME_LEN];
 
 	if (lookup_symbol_attrs(address, &size, &offset, modname, name) == 0) {
 		seq_printf(m, "%s+%#lx/%#lx", name, offset, size);
@@ -4493,7 +4439,7 @@
  */
 size_t ksize(const void *objp)
 {
-	if (unlikely(objp == NULL))
+	if (unlikely(ZERO_OR_NULL_PTR(objp)))
 		return 0;
 
 	return obj_size(virt_to_cache(objp));
diff --git a/mm/slob.c b/mm/slob.c
index b489907..c89ef11 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -334,6 +334,8 @@
 		BUG_ON(!b);
 		spin_unlock_irqrestore(&slob_lock, flags);
 	}
+	if (unlikely((gfp & __GFP_ZERO) && b))
+		memset(b, 0, size);
 	return b;
 }
 
@@ -347,7 +349,7 @@
 	slobidx_t units;
 	unsigned long flags;
 
-	if (!block)
+	if (ZERO_OR_NULL_PTR(block))
 		return;
 	BUG_ON(!size);
 
@@ -424,10 +426,13 @@
 
 void *__kmalloc_node(size_t size, gfp_t gfp, int node)
 {
+	unsigned int *m;
 	int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
 
 	if (size < PAGE_SIZE - align) {
-		unsigned int *m;
+		if (!size)
+			return ZERO_SIZE_PTR;
+
 		m = slob_alloc(size + align, gfp, align, node);
 		if (m)
 			*m = size;
@@ -446,44 +451,11 @@
 }
 EXPORT_SYMBOL(__kmalloc_node);
 
-/**
- * krealloc - reallocate memory. The contents will remain unchanged.
- *
- * @p: object to reallocate memory for.
- * @new_size: how many bytes of memory are required.
- * @flags: the type of memory to allocate.
- *
- * The contents of the object pointed to are preserved up to the
- * lesser of the new and old sizes.  If @p is %NULL, krealloc()
- * behaves exactly like kmalloc().  If @size is 0 and @p is not a
- * %NULL pointer, the object pointed to is freed.
- */
-void *krealloc(const void *p, size_t new_size, gfp_t flags)
-{
-	void *ret;
-
-	if (unlikely(!p))
-		return kmalloc_track_caller(new_size, flags);
-
-	if (unlikely(!new_size)) {
-		kfree(p);
-		return NULL;
-	}
-
-	ret = kmalloc_track_caller(new_size, flags);
-	if (ret) {
-		memcpy(ret, p, min(new_size, ksize(p)));
-		kfree(p);
-	}
-	return ret;
-}
-EXPORT_SYMBOL(krealloc);
-
 void kfree(const void *block)
 {
 	struct slob_page *sp;
 
-	if (!block)
+	if (ZERO_OR_NULL_PTR(block))
 		return;
 
 	sp = (struct slob_page *)virt_to_page(block);
@@ -501,7 +473,7 @@
 {
 	struct slob_page *sp;
 
-	if (!block)
+	if (ZERO_OR_NULL_PTR(block))
 		return 0;
 
 	sp = (struct slob_page *)virt_to_page(block);
@@ -571,16 +543,6 @@
 }
 EXPORT_SYMBOL(kmem_cache_alloc_node);
 
-void *kmem_cache_zalloc(struct kmem_cache *c, gfp_t flags)
-{
-	void *ret = kmem_cache_alloc(c, flags);
-	if (ret)
-		memset(ret, 0, c->size);
-
-	return ret;
-}
-EXPORT_SYMBOL(kmem_cache_zalloc);
-
 static void __kmem_cache_free(void *b, int size)
 {
 	if (size < PAGE_SIZE)
diff --git a/mm/slub.c b/mm/slub.c
index 6aea489..322f3a5 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -205,6 +205,11 @@
 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
 #endif
 
+/*
+ * The page->inuse field is 16 bit thus we have this limitation
+ */
+#define MAX_OBJECTS_PER_SLAB 65535
+
 /* Internal SLUB flags */
 #define __OBJECT_POISON 0x80000000	/* Poison object */
 
@@ -228,7 +233,7 @@
 
 /* A list of all slab caches on the system */
 static DECLARE_RWSEM(slub_lock);
-LIST_HEAD(slab_caches);
+static LIST_HEAD(slab_caches);
 
 /*
  * Tracking user of a slab.
@@ -247,9 +252,10 @@
 static int sysfs_slab_alias(struct kmem_cache *, const char *);
 static void sysfs_slab_remove(struct kmem_cache *);
 #else
-static int sysfs_slab_add(struct kmem_cache *s) { return 0; }
-static int sysfs_slab_alias(struct kmem_cache *s, const char *p) { return 0; }
-static void sysfs_slab_remove(struct kmem_cache *s) {}
+static inline int sysfs_slab_add(struct kmem_cache *s) { return 0; }
+static inline int sysfs_slab_alias(struct kmem_cache *s, const char *p)
+							{ return 0; }
+static inline void sysfs_slab_remove(struct kmem_cache *s) {}
 #endif
 
 /********************************************************************
@@ -344,7 +350,7 @@
 
 	for (i = 0; i < length; i++) {
 		if (newline) {
-			printk(KERN_ERR "%10s 0x%p: ", text, addr + i);
+			printk(KERN_ERR "%8s 0x%p: ", text, addr + i);
 			newline = 0;
 		}
 		printk(" %02x", addr[i]);
@@ -401,10 +407,11 @@
 
 static void init_tracking(struct kmem_cache *s, void *object)
 {
-	if (s->flags & SLAB_STORE_USER) {
-		set_track(s, object, TRACK_FREE, NULL);
-		set_track(s, object, TRACK_ALLOC, NULL);
-	}
+	if (!(s->flags & SLAB_STORE_USER))
+		return;
+
+	set_track(s, object, TRACK_FREE, NULL);
+	set_track(s, object, TRACK_ALLOC, NULL);
 }
 
 static void print_track(const char *s, struct track *t)
@@ -412,65 +419,106 @@
 	if (!t->addr)
 		return;
 
-	printk(KERN_ERR "%s: ", s);
+	printk(KERN_ERR "INFO: %s in ", s);
 	__print_symbol("%s", (unsigned long)t->addr);
-	printk(" jiffies_ago=%lu cpu=%u pid=%d\n", jiffies - t->when, t->cpu, t->pid);
+	printk(" age=%lu cpu=%u pid=%d\n", jiffies - t->when, t->cpu, t->pid);
 }
 
-static void print_trailer(struct kmem_cache *s, u8 *p)
+static void print_tracking(struct kmem_cache *s, void *object)
+{
+	if (!(s->flags & SLAB_STORE_USER))
+		return;
+
+	print_track("Allocated", get_track(s, object, TRACK_ALLOC));
+	print_track("Freed", get_track(s, object, TRACK_FREE));
+}
+
+static void print_page_info(struct page *page)
+{
+	printk(KERN_ERR "INFO: Slab 0x%p used=%u fp=0x%p flags=0x%04lx\n",
+		page, page->inuse, page->freelist, page->flags);
+
+}
+
+static void slab_bug(struct kmem_cache *s, char *fmt, ...)
+{
+	va_list args;
+	char buf[100];
+
+	va_start(args, fmt);
+	vsnprintf(buf, sizeof(buf), fmt, args);
+	va_end(args);
+	printk(KERN_ERR "========================================"
+			"=====================================\n");
+	printk(KERN_ERR "BUG %s: %s\n", s->name, buf);
+	printk(KERN_ERR "----------------------------------------"
+			"-------------------------------------\n\n");
+}
+
+static void slab_fix(struct kmem_cache *s, char *fmt, ...)
+{
+	va_list args;
+	char buf[100];
+
+	va_start(args, fmt);
+	vsnprintf(buf, sizeof(buf), fmt, args);
+	va_end(args);
+	printk(KERN_ERR "FIX %s: %s\n", s->name, buf);
+}
+
+static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p)
 {
 	unsigned int off;	/* Offset of last byte */
+	u8 *addr = page_address(page);
+
+	print_tracking(s, p);
+
+	print_page_info(page);
+
+	printk(KERN_ERR "INFO: Object 0x%p @offset=%tu fp=0x%p\n\n",
+			p, p - addr, get_freepointer(s, p));
+
+	if (p > addr + 16)
+		print_section("Bytes b4", p - 16, 16);
+
+	print_section("Object", p, min(s->objsize, 128));
 
 	if (s->flags & SLAB_RED_ZONE)
 		print_section("Redzone", p + s->objsize,
 			s->inuse - s->objsize);
 
-	printk(KERN_ERR "FreePointer 0x%p -> 0x%p\n",
-			p + s->offset,
-			get_freepointer(s, p));
-
 	if (s->offset)
 		off = s->offset + sizeof(void *);
 	else
 		off = s->inuse;
 
-	if (s->flags & SLAB_STORE_USER) {
-		print_track("Last alloc", get_track(s, p, TRACK_ALLOC));
-		print_track("Last free ", get_track(s, p, TRACK_FREE));
+	if (s->flags & SLAB_STORE_USER)
 		off += 2 * sizeof(struct track);
-	}
 
 	if (off != s->size)
 		/* Beginning of the filler is the free pointer */
-		print_section("Filler", p + off, s->size - off);
+		print_section("Padding", p + off, s->size - off);
+
+	dump_stack();
 }
 
 static void object_err(struct kmem_cache *s, struct page *page,
 			u8 *object, char *reason)
 {
-	u8 *addr = page_address(page);
-
-	printk(KERN_ERR "*** SLUB %s: %s@0x%p slab 0x%p\n",
-			s->name, reason, object, page);
-	printk(KERN_ERR "    offset=%tu flags=0x%04lx inuse=%u freelist=0x%p\n",
-		object - addr, page->flags, page->inuse, page->freelist);
-	if (object > addr + 16)
-		print_section("Bytes b4", object - 16, 16);
-	print_section("Object", object, min(s->objsize, 128));
-	print_trailer(s, object);
-	dump_stack();
+	slab_bug(s, reason);
+	print_trailer(s, page, object);
 }
 
-static void slab_err(struct kmem_cache *s, struct page *page, char *reason, ...)
+static void slab_err(struct kmem_cache *s, struct page *page, char *fmt, ...)
 {
 	va_list args;
 	char buf[100];
 
-	va_start(args, reason);
-	vsnprintf(buf, sizeof(buf), reason, args);
+	va_start(args, fmt);
+	vsnprintf(buf, sizeof(buf), fmt, args);
 	va_end(args);
-	printk(KERN_ERR "*** SLUB %s: %s in slab @0x%p\n", s->name, buf,
-		page);
+	slab_bug(s, fmt);
+	print_page_info(page);
 	dump_stack();
 }
 
@@ -489,15 +537,46 @@
 			s->inuse - s->objsize);
 }
 
-static int check_bytes(u8 *start, unsigned int value, unsigned int bytes)
+static u8 *check_bytes(u8 *start, unsigned int value, unsigned int bytes)
 {
 	while (bytes) {
 		if (*start != (u8)value)
-			return 0;
+			return start;
 		start++;
 		bytes--;
 	}
-	return 1;
+	return NULL;
+}
+
+static void restore_bytes(struct kmem_cache *s, char *message, u8 data,
+						void *from, void *to)
+{
+	slab_fix(s, "Restoring 0x%p-0x%p=0x%x\n", from, to - 1, data);
+	memset(from, data, to - from);
+}
+
+static int check_bytes_and_report(struct kmem_cache *s, struct page *page,
+			u8 *object, char *what,
+			u8* start, unsigned int value, unsigned int bytes)
+{
+	u8 *fault;
+	u8 *end;
+
+	fault = check_bytes(start, value, bytes);
+	if (!fault)
+		return 1;
+
+	end = start + bytes;
+	while (end > fault && end[-1] == value)
+		end--;
+
+	slab_bug(s, "%s overwritten", what);
+	printk(KERN_ERR "INFO: 0x%p-0x%p. First byte 0x%x instead of 0x%x\n",
+					fault, end - 1, fault[0], value);
+	print_trailer(s, page, object);
+
+	restore_bytes(s, what, value, fault, end);
+	return 0;
 }
 
 /*
@@ -538,14 +617,6 @@
  * may be used with merged slabcaches.
  */
 
-static void restore_bytes(struct kmem_cache *s, char *message, u8 data,
-						void *from, void *to)
-{
-	printk(KERN_ERR "@@@ SLUB %s: Restoring %s (0x%x) from 0x%p-0x%p\n",
-		s->name, message, data, from, to - 1);
-	memset(from, data, to - from);
-}
-
 static int check_pad_bytes(struct kmem_cache *s, struct page *page, u8 *p)
 {
 	unsigned long off = s->inuse;	/* The end of info */
@@ -561,39 +632,39 @@
 	if (s->size == off)
 		return 1;
 
-	if (check_bytes(p + off, POISON_INUSE, s->size - off))
-		return 1;
-
-	object_err(s, page, p, "Object padding check fails");
-
-	/*
-	 * Restore padding
-	 */
-	restore_bytes(s, "object padding", POISON_INUSE, p + off, p + s->size);
-	return 0;
+	return check_bytes_and_report(s, page, p, "Object padding",
+				p + off, POISON_INUSE, s->size - off);
 }
 
 static int slab_pad_check(struct kmem_cache *s, struct page *page)
 {
-	u8 *p;
-	int length, remainder;
+	u8 *start;
+	u8 *fault;
+	u8 *end;
+	int length;
+	int remainder;
 
 	if (!(s->flags & SLAB_POISON))
 		return 1;
 
-	p = page_address(page);
+	start = page_address(page);
+	end = start + (PAGE_SIZE << s->order);
 	length = s->objects * s->size;
-	remainder = (PAGE_SIZE << s->order) - length;
+	remainder = end - (start + length);
 	if (!remainder)
 		return 1;
 
-	if (!check_bytes(p + length, POISON_INUSE, remainder)) {
-		slab_err(s, page, "Padding check failed");
-		restore_bytes(s, "slab padding", POISON_INUSE, p + length,
-			p + length + remainder);
-		return 0;
-	}
-	return 1;
+	fault = check_bytes(start + length, POISON_INUSE, remainder);
+	if (!fault)
+		return 1;
+	while (end > fault && end[-1] == POISON_INUSE)
+		end--;
+
+	slab_err(s, page, "Padding overwritten. 0x%p-0x%p", fault, end - 1);
+	print_section("Padding", start, length);
+
+	restore_bytes(s, "slab padding", POISON_INUSE, start, end);
+	return 0;
 }
 
 static int check_object(struct kmem_cache *s, struct page *page,
@@ -606,41 +677,22 @@
 		unsigned int red =
 			active ? SLUB_RED_ACTIVE : SLUB_RED_INACTIVE;
 
-		if (!check_bytes(endobject, red, s->inuse - s->objsize)) {
-			object_err(s, page, object,
-			active ? "Redzone Active" : "Redzone Inactive");
-			restore_bytes(s, "redzone", red,
-				endobject, object + s->inuse);
+		if (!check_bytes_and_report(s, page, object, "Redzone",
+			endobject, red, s->inuse - s->objsize))
 			return 0;
-		}
 	} else {
-		if ((s->flags & SLAB_POISON) && s->objsize < s->inuse &&
-			!check_bytes(endobject, POISON_INUSE,
-					s->inuse - s->objsize)) {
-		object_err(s, page, p, "Alignment padding check fails");
-		/*
-		 * Fix it so that there will not be another report.
-		 *
-		 * Hmmm... We may be corrupting an object that now expects
-		 * to be longer than allowed.
-		 */
-		restore_bytes(s, "alignment padding", POISON_INUSE,
-			endobject, object + s->inuse);
-		}
+		if ((s->flags & SLAB_POISON) && s->objsize < s->inuse)
+			check_bytes_and_report(s, page, p, "Alignment padding", endobject,
+				POISON_INUSE, s->inuse - s->objsize);
 	}
 
 	if (s->flags & SLAB_POISON) {
 		if (!active && (s->flags & __OBJECT_POISON) &&
-			(!check_bytes(p, POISON_FREE, s->objsize - 1) ||
-				p[s->objsize - 1] != POISON_END)) {
-
-			object_err(s, page, p, "Poison check failed");
-			restore_bytes(s, "Poison", POISON_FREE,
-						p, p + s->objsize -1);
-			restore_bytes(s, "Poison", POISON_END,
-					p + s->objsize - 1, p + s->objsize);
+			(!check_bytes_and_report(s, page, p, "Poison", p,
+					POISON_FREE, s->objsize - 1) ||
+			 !check_bytes_and_report(s, page, p, "Poison",
+			 	p + s->objsize -1, POISON_END, 1)))
 			return 0;
-		}
 		/*
 		 * check_pad_bytes cleans up on its own.
 		 */
@@ -673,25 +725,17 @@
 	VM_BUG_ON(!irqs_disabled());
 
 	if (!PageSlab(page)) {
-		slab_err(s, page, "Not a valid slab page flags=%lx "
-			"mapping=0x%p count=%d", page->flags, page->mapping,
-			page_count(page));
+		slab_err(s, page, "Not a valid slab page");
 		return 0;
 	}
 	if (page->offset * sizeof(void *) != s->offset) {
-		slab_err(s, page, "Corrupted offset %lu flags=0x%lx "
-			"mapping=0x%p count=%d",
-			(unsigned long)(page->offset * sizeof(void *)),
-			page->flags,
-			page->mapping,
-			page_count(page));
+		slab_err(s, page, "Corrupted offset %lu",
+			(unsigned long)(page->offset * sizeof(void *)));
 		return 0;
 	}
 	if (page->inuse > s->objects) {
-		slab_err(s, page, "inuse %u > max %u @0x%p flags=%lx "
-			"mapping=0x%p count=%d",
-			s->name, page->inuse, s->objects, page->flags,
-			page->mapping, page_count(page));
+		slab_err(s, page, "inuse %u > max %u",
+			s->name, page->inuse, s->objects);
 		return 0;
 	}
 	/* Slab_pad_check fixes things up after itself */
@@ -719,13 +763,10 @@
 				set_freepointer(s, object, NULL);
 				break;
 			} else {
-				slab_err(s, page, "Freepointer 0x%p corrupt",
-									fp);
+				slab_err(s, page, "Freepointer corrupt");
 				page->freelist = NULL;
 				page->inuse = s->objects;
-				printk(KERN_ERR "@@@ SLUB %s: Freelist "
-					"cleared. Slab 0x%p\n",
-					s->name, page);
+				slab_fix(s, "Freelist cleared");
 				return 0;
 			}
 			break;
@@ -737,11 +778,9 @@
 
 	if (page->inuse != s->objects - nr) {
 		slab_err(s, page, "Wrong object count. Counter is %d but "
-			"counted were %d", s, page, page->inuse,
-							s->objects - nr);
+			"counted were %d", page->inuse, s->objects - nr);
 		page->inuse = s->objects - nr;
-		printk(KERN_ERR "@@@ SLUB %s: Object count adjusted. "
-			"Slab @0x%p\n", s->name, page);
+		slab_fix(s, "Object count adjusted.");
 	}
 	return search == NULL;
 }
@@ -803,7 +842,7 @@
 		goto bad;
 
 	if (object && !on_freelist(s, page, object)) {
-		slab_err(s, page, "Object 0x%p already allocated", object);
+		object_err(s, page, object, "Object already allocated");
 		goto bad;
 	}
 
@@ -829,8 +868,7 @@
 		 * to avoid issues in the future. Marking all objects
 		 * as used avoids touching the remaining objects.
 		 */
-		printk(KERN_ERR "@@@ SLUB: %s slab 0x%p. Marking all objects used.\n",
-			s->name, page);
+		slab_fix(s, "Marking all objects used");
 		page->inuse = s->objects;
 		page->freelist = NULL;
 		/* Fix up fields that may be corrupted */
@@ -851,7 +889,7 @@
 	}
 
 	if (on_freelist(s, page, object)) {
-		slab_err(s, page, "Object 0x%p already free", object);
+		object_err(s, page, object, "Object already free");
 		goto fail;
 	}
 
@@ -870,8 +908,8 @@
 			dump_stack();
 		}
 		else
-			slab_err(s, page, "object at 0x%p belongs "
-				"to slab %s", object, page->slab->name);
+			object_err(s, page, object,
+					"page slab pointer corrupt.");
 		goto fail;
 	}
 
@@ -885,8 +923,7 @@
 	return 1;
 
 fail:
-	printk(KERN_ERR "@@@ SLUB: %s slab 0x%p object at 0x%p not freed.\n",
-		s->name, page, object);
+	slab_fix(s, "Object at 0x%p not freed", object);
 	return 0;
 }
 
@@ -1041,7 +1078,7 @@
 	void *last;
 	void *p;
 
-	BUG_ON(flags & ~(GFP_DMA | GFP_LEVEL_MASK));
+	BUG_ON(flags & ~(GFP_DMA | __GFP_ZERO | GFP_LEVEL_MASK));
 
 	if (flags & __GFP_WAIT)
 		local_irq_enable();
@@ -1359,7 +1396,7 @@
 	unfreeze_slab(s, page);
 }
 
-static void flush_slab(struct kmem_cache *s, struct page *page, int cpu)
+static inline void flush_slab(struct kmem_cache *s, struct page *page, int cpu)
 {
 	slab_lock(page);
 	deactivate_slab(s, page, cpu);
@@ -1369,7 +1406,7 @@
  * Flush cpu slab.
  * Called from IPI handler with interrupts disabled.
  */
-static void __flush_cpu_slab(struct kmem_cache *s, int cpu)
+static inline void __flush_cpu_slab(struct kmem_cache *s, int cpu)
 {
 	struct page *page = s->cpu_slab[cpu];
 
@@ -1504,7 +1541,7 @@
  * Otherwise we can simply pick the next object from the lockless free list.
  */
 static void __always_inline *slab_alloc(struct kmem_cache *s,
-				gfp_t gfpflags, int node, void *addr)
+		gfp_t gfpflags, int node, void *addr)
 {
 	struct page *page;
 	void **object;
@@ -1522,6 +1559,10 @@
 		page->lockless_freelist = object[page->offset];
 	}
 	local_irq_restore(flags);
+
+	if (unlikely((gfpflags & __GFP_ZERO) && object))
+		memset(object, 0, s->objsize);
+
 	return object;
 }
 
@@ -1705,8 +1746,17 @@
 {
 	int order;
 	int rem;
+	int min_order = slub_min_order;
 
-	for (order = max(slub_min_order,
+	/*
+	 * If we would create too many object per slab then reduce
+	 * the slab order even if it goes below slub_min_order.
+	 */
+	while (min_order > 0 &&
+		(PAGE_SIZE << min_order) >= MAX_OBJECTS_PER_SLAB * size)
+			min_order--;
+
+	for (order = max(min_order,
 				fls(min_objects * size - 1) - PAGE_SHIFT);
 			order <= max_order; order++) {
 
@@ -1720,6 +1770,9 @@
 		if (rem <= slab_size / fract_leftover)
 			break;
 
+		/* If the next size is too high then exit now */
+		if (slab_size * 2 >= MAX_OBJECTS_PER_SLAB * size)
+			break;
 	}
 
 	return order;
@@ -1800,7 +1853,9 @@
 	atomic_long_set(&n->nr_slabs, 0);
 	spin_lock_init(&n->list_lock);
 	INIT_LIST_HEAD(&n->partial);
+#ifdef CONFIG_SLUB_DEBUG
 	INIT_LIST_HEAD(&n->full);
+#endif
 }
 
 #ifdef CONFIG_NUMA
@@ -1828,7 +1883,10 @@
 	page->freelist = get_freepointer(kmalloc_caches, n);
 	page->inuse++;
 	kmalloc_caches->node[node] = n;
-	setup_object_debug(kmalloc_caches, page, n);
+#ifdef CONFIG_SLUB_DEBUG
+	init_object(kmalloc_caches, n, 1);
+	init_tracking(kmalloc_caches, n);
+#endif
 	init_kmem_cache_node(n);
 	atomic_long_inc(&n->nr_slabs);
 	add_partial(n, page);
@@ -2006,7 +2064,7 @@
 	 * The page->inuse field is only 16 bit wide! So we cannot have
 	 * more than 64k objects per slab.
 	 */
-	if (!s->objects || s->objects > 65535)
+	if (!s->objects || s->objects > MAX_OBJECTS_PER_SLAB)
 		return 0;
 	return 1;
 
@@ -2110,7 +2168,7 @@
 /*
  * Release all resources used by a slab cache.
  */
-static int kmem_cache_close(struct kmem_cache *s)
+static inline int kmem_cache_close(struct kmem_cache *s)
 {
 	int node;
 
@@ -2138,12 +2196,13 @@
 	s->refcount--;
 	if (!s->refcount) {
 		list_del(&s->list);
+		up_write(&slub_lock);
 		if (kmem_cache_close(s))
 			WARN_ON(1);
 		sysfs_slab_remove(s);
 		kfree(s);
-	}
-	up_write(&slub_lock);
+	} else
+		up_write(&slub_lock);
 }
 EXPORT_SYMBOL(kmem_cache_destroy);
 
@@ -2216,47 +2275,92 @@
 	panic("Creation of kmalloc slab %s size=%d failed.\n", name, size);
 }
 
-static struct kmem_cache *get_slab(size_t size, gfp_t flags)
-{
-	int index = kmalloc_index(size);
-
-	if (!index)
-		return NULL;
-
-	/* Allocation too large? */
-	BUG_ON(index < 0);
-
 #ifdef CONFIG_ZONE_DMA
-	if ((flags & SLUB_DMA)) {
-		struct kmem_cache *s;
-		struct kmem_cache *x;
-		char *text;
-		size_t realsize;
+static noinline struct kmem_cache *dma_kmalloc_cache(int index, gfp_t flags)
+{
+	struct kmem_cache *s;
+	struct kmem_cache *x;
+	char *text;
+	size_t realsize;
 
-		s = kmalloc_caches_dma[index];
-		if (s)
-			return s;
+	s = kmalloc_caches_dma[index];
+	if (s)
+		return s;
 
-		/* Dynamically create dma cache */
-		x = kmalloc(kmem_size, flags & ~SLUB_DMA);
-		if (!x)
-			panic("Unable to allocate memory for dma cache\n");
+	/* Dynamically create dma cache */
+	x = kmalloc(kmem_size, flags & ~SLUB_DMA);
+	if (!x)
+		panic("Unable to allocate memory for dma cache\n");
 
-		if (index <= KMALLOC_SHIFT_HIGH)
-			realsize = 1 << index;
-		else {
-			if (index == 1)
-				realsize = 96;
-			else
-				realsize = 192;
-		}
-
-		text = kasprintf(flags & ~SLUB_DMA, "kmalloc_dma-%d",
-				(unsigned int)realsize);
-		s = create_kmalloc_cache(x, text, realsize, flags);
+	realsize = kmalloc_caches[index].objsize;
+	text = kasprintf(flags & ~SLUB_DMA, "kmalloc_dma-%d",
+			(unsigned int)realsize);
+	s = create_kmalloc_cache(x, text, realsize, flags);
+	down_write(&slub_lock);
+	if (!kmalloc_caches_dma[index]) {
 		kmalloc_caches_dma[index] = s;
+		up_write(&slub_lock);
 		return s;
 	}
+	up_write(&slub_lock);
+	kmem_cache_destroy(s);
+	return kmalloc_caches_dma[index];
+}
+#endif
+
+/*
+ * Conversion table for small slabs sizes / 8 to the index in the
+ * kmalloc array. This is necessary for slabs < 192 since we have non power
+ * of two cache sizes there. The size of larger slabs can be determined using
+ * fls.
+ */
+static s8 size_index[24] = {
+	3,	/* 8 */
+	4,	/* 16 */
+	5,	/* 24 */
+	5,	/* 32 */
+	6,	/* 40 */
+	6,	/* 48 */
+	6,	/* 56 */
+	6,	/* 64 */
+	1,	/* 72 */
+	1,	/* 80 */
+	1,	/* 88 */
+	1,	/* 96 */
+	7,	/* 104 */
+	7,	/* 112 */
+	7,	/* 120 */
+	7,	/* 128 */
+	2,	/* 136 */
+	2,	/* 144 */
+	2,	/* 152 */
+	2,	/* 160 */
+	2,	/* 168 */
+	2,	/* 176 */
+	2,	/* 184 */
+	2	/* 192 */
+};
+
+static struct kmem_cache *get_slab(size_t size, gfp_t flags)
+{
+	int index;
+
+	if (size <= 192) {
+		if (!size)
+			return ZERO_SIZE_PTR;
+
+		index = size_index[(size - 1) / 8];
+	} else {
+		if (size > KMALLOC_MAX_SIZE)
+			return NULL;
+
+		index = fls(size - 1);
+	}
+
+#ifdef CONFIG_ZONE_DMA
+	if (unlikely((flags & SLUB_DMA)))
+		return dma_kmalloc_cache(index, flags);
+
 #endif
 	return &kmalloc_caches[index];
 }
@@ -2265,9 +2369,10 @@
 {
 	struct kmem_cache *s = get_slab(size, flags);
 
-	if (s)
-		return slab_alloc(s, flags, -1, __builtin_return_address(0));
-	return ZERO_SIZE_PTR;
+	if (ZERO_OR_NULL_PTR(s))
+		return s;
+
+	return slab_alloc(s, flags, -1, __builtin_return_address(0));
 }
 EXPORT_SYMBOL(__kmalloc);
 
@@ -2276,9 +2381,10 @@
 {
 	struct kmem_cache *s = get_slab(size, flags);
 
-	if (s)
-		return slab_alloc(s, flags, node, __builtin_return_address(0));
-	return ZERO_SIZE_PTR;
+	if (ZERO_OR_NULL_PTR(s))
+		return s;
+
+	return slab_alloc(s, flags, node, __builtin_return_address(0));
 }
 EXPORT_SYMBOL(__kmalloc_node);
 #endif
@@ -2288,7 +2394,7 @@
 	struct page *page;
 	struct kmem_cache *s;
 
-	if (object == ZERO_SIZE_PTR)
+	if (ZERO_OR_NULL_PTR(object))
 		return 0;
 
 	page = get_object_page(object);
@@ -2329,7 +2435,7 @@
 	 * this comparison would be true for all "negative" pointers
 	 * (which would cover the whole upper half of the address space).
 	 */
-	if ((unsigned long)x <= (unsigned long)ZERO_SIZE_PTR)
+	if (ZERO_OR_NULL_PTR(x))
 		return;
 
 	page = virt_to_head_page(x);
@@ -2418,43 +2524,6 @@
 }
 EXPORT_SYMBOL(kmem_cache_shrink);
 
-/**
- * krealloc - reallocate memory. The contents will remain unchanged.
- * @p: object to reallocate memory for.
- * @new_size: how many bytes of memory are required.
- * @flags: the type of memory to allocate.
- *
- * The contents of the object pointed to are preserved up to the
- * lesser of the new and old sizes.  If @p is %NULL, krealloc()
- * behaves exactly like kmalloc().  If @size is 0 and @p is not a
- * %NULL pointer, the object pointed to is freed.
- */
-void *krealloc(const void *p, size_t new_size, gfp_t flags)
-{
-	void *ret;
-	size_t ks;
-
-	if (unlikely(!p || p == ZERO_SIZE_PTR))
-		return kmalloc(new_size, flags);
-
-	if (unlikely(!new_size)) {
-		kfree(p);
-		return ZERO_SIZE_PTR;
-	}
-
-	ks = ksize(p);
-	if (ks >= new_size)
-		return (void *)p;
-
-	ret = kmalloc(new_size, flags);
-	if (ret) {
-		memcpy(ret, p, min(new_size, ks));
-		kfree(p);
-	}
-	return ret;
-}
-EXPORT_SYMBOL(krealloc);
-
 /********************************************************************
  *			Basic setup of slabs
  *******************************************************************/
@@ -2497,6 +2566,24 @@
 		caches++;
 	}
 
+
+	/*
+	 * Patch up the size_index table if we have strange large alignment
+	 * requirements for the kmalloc array. This is only the case for
+	 * mips it seems. The standard arches will not generate any code here.
+	 *
+	 * Largest permitted alignment is 256 bytes due to the way we
+	 * handle the index determination for the smaller caches.
+	 *
+	 * Make sure that nothing crazy happens if someone starts tinkering
+	 * around with ARCH_KMALLOC_MINALIGN
+	 */
+	BUILD_BUG_ON(KMALLOC_MIN_SIZE > 256 ||
+		(KMALLOC_MIN_SIZE & (KMALLOC_MIN_SIZE - 1)));
+
+	for (i = 8; i < KMALLOC_MIN_SIZE; i += 8)
+		size_index[(i - 1) / 8] = KMALLOC_SHIFT_LOW;
+
 	slab_state = UP;
 
 	/* Provide the correct kmalloc names now that the caches are up */
@@ -2542,7 +2629,7 @@
 		size_t align, unsigned long flags,
 		void (*ctor)(void *, struct kmem_cache *, unsigned long))
 {
-	struct list_head *h;
+	struct kmem_cache *s;
 
 	if (slub_nomerge || (flags & SLUB_NEVER_MERGE))
 		return NULL;
@@ -2554,10 +2641,7 @@
 	align = calculate_alignment(flags, align, size);
 	size = ALIGN(size, align);
 
-	list_for_each(h, &slab_caches) {
-		struct kmem_cache *s =
-			container_of(h, struct kmem_cache, list);
-
+	list_for_each_entry(s, &slab_caches, list) {
 		if (slab_unmergeable(s))
 			continue;
 
@@ -2600,25 +2684,26 @@
 		 */
 		s->objsize = max(s->objsize, (int)size);
 		s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *)));
+		up_write(&slub_lock);
 		if (sysfs_slab_alias(s, name))
 			goto err;
-	} else {
-		s = kmalloc(kmem_size, GFP_KERNEL);
-		if (s && kmem_cache_open(s, GFP_KERNEL, name,
+		return s;
+	}
+	s = kmalloc(kmem_size, GFP_KERNEL);
+	if (s) {
+		if (kmem_cache_open(s, GFP_KERNEL, name,
 				size, align, flags, ctor)) {
-			if (sysfs_slab_add(s)) {
-				kfree(s);
-				goto err;
-			}
 			list_add(&s->list, &slab_caches);
-		} else
-			kfree(s);
+			up_write(&slub_lock);
+			if (sysfs_slab_add(s))
+				goto err;
+			return s;
+		}
+		kfree(s);
 	}
 	up_write(&slub_lock);
-	return s;
 
 err:
-	up_write(&slub_lock);
 	if (flags & SLAB_PANIC)
 		panic("Cannot create slabcache %s\n", name);
 	else
@@ -2627,45 +2712,7 @@
 }
 EXPORT_SYMBOL(kmem_cache_create);
 
-void *kmem_cache_zalloc(struct kmem_cache *s, gfp_t flags)
-{
-	void *x;
-
-	x = slab_alloc(s, flags, -1, __builtin_return_address(0));
-	if (x)
-		memset(x, 0, s->objsize);
-	return x;
-}
-EXPORT_SYMBOL(kmem_cache_zalloc);
-
 #ifdef CONFIG_SMP
-static void for_all_slabs(void (*func)(struct kmem_cache *, int), int cpu)
-{
-	struct list_head *h;
-
-	down_read(&slub_lock);
-	list_for_each(h, &slab_caches) {
-		struct kmem_cache *s =
-			container_of(h, struct kmem_cache, list);
-
-		func(s, cpu);
-	}
-	up_read(&slub_lock);
-}
-
-/*
- * Version of __flush_cpu_slab for the case that interrupts
- * are enabled.
- */
-static void cpu_slab_flush(struct kmem_cache *s, int cpu)
-{
-	unsigned long flags;
-
-	local_irq_save(flags);
-	__flush_cpu_slab(s, cpu);
-	local_irq_restore(flags);
-}
-
 /*
  * Use the cpu notifier to insure that the cpu slabs are flushed when
  * necessary.
@@ -2674,13 +2721,21 @@
 		unsigned long action, void *hcpu)
 {
 	long cpu = (long)hcpu;
+	struct kmem_cache *s;
+	unsigned long flags;
 
 	switch (action) {
 	case CPU_UP_CANCELED:
 	case CPU_UP_CANCELED_FROZEN:
 	case CPU_DEAD:
 	case CPU_DEAD_FROZEN:
-		for_all_slabs(cpu_slab_flush, cpu);
+		down_read(&slub_lock);
+		list_for_each_entry(s, &slab_caches, list) {
+			local_irq_save(flags);
+			__flush_cpu_slab(s, cpu);
+			local_irq_restore(flags);
+		}
+		up_read(&slub_lock);
 		break;
 	default:
 		break;
@@ -2697,8 +2752,8 @@
 {
 	struct kmem_cache *s = get_slab(size, gfpflags);
 
-	if (!s)
-		return ZERO_SIZE_PTR;
+	if (ZERO_OR_NULL_PTR(s))
+		return s;
 
 	return slab_alloc(s, gfpflags, -1, caller);
 }
@@ -2708,18 +2763,18 @@
 {
 	struct kmem_cache *s = get_slab(size, gfpflags);
 
-	if (!s)
-		return ZERO_SIZE_PTR;
+	if (ZERO_OR_NULL_PTR(s))
+		return s;
 
 	return slab_alloc(s, gfpflags, node, caller);
 }
 
 #if defined(CONFIG_SYSFS) && defined(CONFIG_SLUB_DEBUG)
-static int validate_slab(struct kmem_cache *s, struct page *page)
+static int validate_slab(struct kmem_cache *s, struct page *page,
+						unsigned long *map)
 {
 	void *p;
 	void *addr = page_address(page);
-	DECLARE_BITMAP(map, s->objects);
 
 	if (!check_slab(s, page) ||
 			!on_freelist(s, page, NULL))
@@ -2741,10 +2796,11 @@
 	return 1;
 }
 
-static void validate_slab_slab(struct kmem_cache *s, struct page *page)
+static void validate_slab_slab(struct kmem_cache *s, struct page *page,
+						unsigned long *map)
 {
 	if (slab_trylock(page)) {
-		validate_slab(s, page);
+		validate_slab(s, page, map);
 		slab_unlock(page);
 	} else
 		printk(KERN_INFO "SLUB %s: Skipped busy slab 0x%p\n",
@@ -2761,7 +2817,8 @@
 	}
 }
 
-static int validate_slab_node(struct kmem_cache *s, struct kmem_cache_node *n)
+static int validate_slab_node(struct kmem_cache *s,
+		struct kmem_cache_node *n, unsigned long *map)
 {
 	unsigned long count = 0;
 	struct page *page;
@@ -2770,7 +2827,7 @@
 	spin_lock_irqsave(&n->list_lock, flags);
 
 	list_for_each_entry(page, &n->partial, lru) {
-		validate_slab_slab(s, page);
+		validate_slab_slab(s, page, map);
 		count++;
 	}
 	if (count != n->nr_partial)
@@ -2781,7 +2838,7 @@
 		goto out;
 
 	list_for_each_entry(page, &n->full, lru) {
-		validate_slab_slab(s, page);
+		validate_slab_slab(s, page, map);
 		count++;
 	}
 	if (count != atomic_long_read(&n->nr_slabs))
@@ -2794,17 +2851,23 @@
 	return count;
 }
 
-static unsigned long validate_slab_cache(struct kmem_cache *s)
+static long validate_slab_cache(struct kmem_cache *s)
 {
 	int node;
 	unsigned long count = 0;
+	unsigned long *map = kmalloc(BITS_TO_LONGS(s->objects) *
+				sizeof(unsigned long), GFP_KERNEL);
+
+	if (!map)
+		return -ENOMEM;
 
 	flush_all(s);
 	for_each_online_node(node) {
 		struct kmem_cache_node *n = get_node(s, node);
 
-		count += validate_slab_node(s, n);
+		count += validate_slab_node(s, n, map);
 	}
+	kfree(map);
 	return count;
 }
 
@@ -2893,18 +2956,14 @@
 			get_order(sizeof(struct location) * t->max));
 }
 
-static int alloc_loc_track(struct loc_track *t, unsigned long max)
+static int alloc_loc_track(struct loc_track *t, unsigned long max, gfp_t flags)
 {
 	struct location *l;
 	int order;
 
-	if (!max)
-		max = PAGE_SIZE / sizeof(struct location);
-
 	order = get_order(sizeof(struct location) * max);
 
-	l = (void *)__get_free_pages(GFP_ATOMIC, order);
-
+	l = (void *)__get_free_pages(flags, order);
 	if (!l)
 		return 0;
 
@@ -2970,7 +3029,7 @@
 	/*
 	 * Not found. Insert new tracking element.
 	 */
-	if (t->count >= t->max && !alloc_loc_track(t, 2 * t->max))
+	if (t->count >= t->max && !alloc_loc_track(t, 2 * t->max, GFP_ATOMIC))
 		return 0;
 
 	l = t->loc + pos;
@@ -3013,11 +3072,12 @@
 {
 	int n = 0;
 	unsigned long i;
-	struct loc_track t;
+	struct loc_track t = { 0, 0, NULL };
 	int node;
 
-	t.count = 0;
-	t.max = 0;
+	if (!alloc_loc_track(&t, PAGE_SIZE / sizeof(struct location),
+			GFP_KERNEL))
+		return sprintf(buf, "Out of memory\n");
 
 	/* Push back cpu slabs */
 	flush_all(s);
@@ -3421,11 +3481,14 @@
 static ssize_t validate_store(struct kmem_cache *s,
 			const char *buf, size_t length)
 {
-	if (buf[0] == '1')
-		validate_slab_cache(s);
-	else
-		return -EINVAL;
-	return length;
+	int ret = -EINVAL;
+
+	if (buf[0] == '1') {
+		ret = validate_slab_cache(s);
+		if (ret >= 0)
+			ret = length;
+	}
+	return ret;
 }
 SLAB_ATTR(validate);
 
@@ -3579,7 +3642,7 @@
 	.filter = uevent_filter,
 };
 
-decl_subsys(slab, &slab_ktype, &slab_uevent_ops);
+static decl_subsys(slab, &slab_ktype, &slab_uevent_ops);
 
 #define ID_STR_LENGTH 64
 
@@ -3677,7 +3740,7 @@
 	struct saved_alias *next;
 };
 
-struct saved_alias *alias_list;
+static struct saved_alias *alias_list;
 
 static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
 {
@@ -3705,7 +3768,7 @@
 
 static int __init slab_sysfs_init(void)
 {
-	struct list_head *h;
+	struct kmem_cache *s;
 	int err;
 
 	err = subsystem_register(&slab_subsys);
@@ -3716,10 +3779,7 @@
 
 	slab_state = SYSFS;
 
-	list_for_each(h, &slab_caches) {
-		struct kmem_cache *s =
-			container_of(h, struct kmem_cache, list);
-
+	list_for_each_entry(s, &slab_caches, list) {
 		err = sysfs_slab_add(s);
 		BUG_ON(err);
 	}
diff --git a/mm/swap_state.c b/mm/swap_state.c
index 925d5c5..67daecb 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -334,7 +334,8 @@
 		 * Get a new page to read into from swap.
 		 */
 		if (!new_page) {
-			new_page = alloc_page_vma(GFP_HIGHUSER, vma, addr);
+			new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE,
+								vma, addr);
 			if (!new_page)
 				break;		/* Out of memory */
 		}
diff --git a/mm/truncate.c b/mm/truncate.c
index 7c994f2..5cdfbc1 100644
--- a/mm/truncate.c
+++ b/mm/truncate.c
@@ -82,7 +82,7 @@
 /*
  * If truncate cannot remove the fs-private metadata from the page, the page
  * becomes anonymous.  It will be left on the LRU and may even be mapped into
- * user pagetables if we're racing with filemap_nopage().
+ * user pagetables if we're racing with filemap_fault().
  *
  * We need to bale out if page->mapping is no longer equal to the original
  * mapping.  This happens a) when the VM reclaimed the page while we waited on
@@ -100,9 +100,9 @@
 	if (PagePrivate(page))
 		do_invalidatepage(page, 0);
 
+	remove_from_page_cache(page);
 	ClearPageUptodate(page);
 	ClearPageMappedToDisk(page);
-	remove_from_page_cache(page);
 	page_cache_release(page);	/* pagecache ref */
 }
 
@@ -192,6 +192,11 @@
 				unlock_page(page);
 				continue;
 			}
+			if (page_mapped(page)) {
+				unmap_mapping_range(mapping,
+				  (loff_t)page_index<<PAGE_CACHE_SHIFT,
+				  PAGE_CACHE_SIZE, 0);
+			}
 			truncate_complete_page(mapping, page);
 			unlock_page(page);
 		}
@@ -229,6 +234,11 @@
 				break;
 			lock_page(page);
 			wait_on_page_writeback(page);
+			if (page_mapped(page)) {
+				unmap_mapping_range(mapping,
+				  (loff_t)page->index<<PAGE_CACHE_SHIFT,
+				  PAGE_CACHE_SIZE, 0);
+			}
 			if (page->index > next)
 				next = page->index;
 			next++;
@@ -405,7 +415,7 @@
 				break;
 			}
 			wait_on_page_writeback(page);
-			while (page_mapped(page)) {
+			if (page_mapped(page)) {
 				if (!did_range_unmap) {
 					/*
 					 * Zap the rest of the file in one hit.
@@ -425,6 +435,7 @@
 					  PAGE_CACHE_SIZE, 0);
 				}
 			}
+			BUG_ON(page_mapped(page));
 			ret = do_launder_page(mapping, page);
 			if (ret == 0 && !invalidate_complete_page2(mapping, page))
 				ret = -EIO;
diff --git a/mm/util.c b/mm/util.c
index ace2aea..bf340d8 100644
--- a/mm/util.c
+++ b/mm/util.c
@@ -5,22 +5,7 @@
 #include <asm/uaccess.h>
 
 /**
- * __kzalloc - allocate memory. The memory is set to zero.
- * @size: how many bytes of memory are required.
- * @flags: the type of memory to allocate.
- */
-void *__kzalloc(size_t size, gfp_t flags)
-{
-	void *ret = kmalloc_track_caller(size, flags);
-	if (ret)
-		memset(ret, 0, size);
-	return ret;
-}
-EXPORT_SYMBOL(__kzalloc);
-
-/*
  * kstrdup - allocate space for and copy an existing string
- *
  * @s: the string to duplicate
  * @gfp: the GFP mask used in the kmalloc() call when allocating memory
  */
@@ -41,6 +26,30 @@
 EXPORT_SYMBOL(kstrdup);
 
 /**
+ * kstrndup - allocate space for and copy an existing string
+ * @s: the string to duplicate
+ * @max: read at most @max chars from @s
+ * @gfp: the GFP mask used in the kmalloc() call when allocating memory
+ */
+char *kstrndup(const char *s, size_t max, gfp_t gfp)
+{
+	size_t len;
+	char *buf;
+
+	if (!s)
+		return NULL;
+
+	len = strnlen(s, max);
+	buf = kmalloc_track_caller(len+1, gfp);
+	if (buf) {
+		memcpy(buf, s, len);
+		buf[len] = '\0';
+	}
+	return buf;
+}
+EXPORT_SYMBOL(kstrndup);
+
+/**
  * kmemdup - duplicate region of memory
  *
  * @src: memory region to duplicate
@@ -58,9 +67,42 @@
 }
 EXPORT_SYMBOL(kmemdup);
 
+/**
+ * krealloc - reallocate memory. The contents will remain unchanged.
+ * @p: object to reallocate memory for.
+ * @new_size: how many bytes of memory are required.
+ * @flags: the type of memory to allocate.
+ *
+ * The contents of the object pointed to are preserved up to the
+ * lesser of the new and old sizes.  If @p is %NULL, krealloc()
+ * behaves exactly like kmalloc().  If @size is 0 and @p is not a
+ * %NULL pointer, the object pointed to is freed.
+ */
+void *krealloc(const void *p, size_t new_size, gfp_t flags)
+{
+	void *ret;
+	size_t ks;
+
+	if (unlikely(!new_size)) {
+		kfree(p);
+		return ZERO_SIZE_PTR;
+	}
+
+	ks = ksize(p);
+	if (ks >= new_size)
+		return (void *)p;
+
+	ret = kmalloc_track_caller(new_size, flags);
+	if (ret) {
+		memcpy(ret, p, min(new_size, ks));
+		kfree(p);
+	}
+	return ret;
+}
+EXPORT_SYMBOL(krealloc);
+
 /*
  * strndup_user - duplicate an existing string from user space
- *
  * @s: The string to duplicate
  * @n: Maximum number of bytes to copy, including the trailing NUL.
  */
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index d3a9c53..3cee76a 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -68,12 +68,12 @@
 	} while (pud++, addr = next, addr != end);
 }
 
-void unmap_vm_area(struct vm_struct *area)
+void unmap_kernel_range(unsigned long addr, unsigned long size)
 {
 	pgd_t *pgd;
 	unsigned long next;
-	unsigned long addr = (unsigned long) area->addr;
-	unsigned long end = addr + area->size;
+	unsigned long start = addr;
+	unsigned long end = addr + size;
 
 	BUG_ON(addr >= end);
 	pgd = pgd_offset_k(addr);
@@ -84,7 +84,12 @@
 			continue;
 		vunmap_pud_range(pgd, addr, next);
 	} while (pgd++, addr = next, addr != end);
-	flush_tlb_kernel_range((unsigned long) area->addr, end);
+	flush_tlb_kernel_range(start, end);
+}
+
+static void unmap_vm_area(struct vm_struct *area)
+{
+	unmap_kernel_range((unsigned long)area->addr, area->size);
 }
 
 static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
@@ -159,6 +164,7 @@
 	flush_cache_vmap((unsigned long) area->addr, end);
 	return err;
 }
+EXPORT_SYMBOL_GPL(map_vm_area);
 
 static struct vm_struct *__get_vm_area_node(unsigned long size, unsigned long flags,
 					    unsigned long start, unsigned long end,
@@ -237,6 +243,7 @@
 {
 	return __get_vm_area_node(size, flags, start, end, -1, GFP_KERNEL);
 }
+EXPORT_SYMBOL_GPL(__get_vm_area);
 
 /**
  *	get_vm_area  -  reserve a contingous kernel virtual area
@@ -427,11 +434,12 @@
 	area->nr_pages = nr_pages;
 	/* Please note that the recursion is strictly bounded. */
 	if (array_size > PAGE_SIZE) {
-		pages = __vmalloc_node(array_size, gfp_mask, PAGE_KERNEL, node);
+		pages = __vmalloc_node(array_size, gfp_mask | __GFP_ZERO,
+					PAGE_KERNEL, node);
 		area->flags |= VM_VPAGES;
 	} else {
 		pages = kmalloc_node(array_size,
-				(gfp_mask & GFP_LEVEL_MASK),
+				(gfp_mask & GFP_LEVEL_MASK) | __GFP_ZERO,
 				node);
 	}
 	area->pages = pages;
@@ -440,7 +448,6 @@
 		kfree(area);
 		return NULL;
 	}
-	memset(area->pages, 0, array_size);
 
 	for (i = 0; i < area->nr_pages; i++) {
 		if (node < 0)
@@ -578,9 +585,9 @@
 }
 
 #if defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA32)
-#define GFP_VMALLOC32 GFP_DMA32
+#define GFP_VMALLOC32 GFP_DMA32 | GFP_KERNEL
 #elif defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA)
-#define GFP_VMALLOC32 GFP_DMA
+#define GFP_VMALLOC32 GFP_DMA | GFP_KERNEL
 #else
 #define GFP_VMALLOC32 GFP_KERNEL
 #endif
@@ -762,3 +769,56 @@
 void  __attribute__((weak)) vmalloc_sync_all(void)
 {
 }
+
+
+static int f(pte_t *pte, struct page *pmd_page, unsigned long addr, void *data)
+{
+	/* apply_to_page_range() does all the hard work. */
+	return 0;
+}
+
+/**
+ *	alloc_vm_area - allocate a range of kernel address space
+ *	@size:		size of the area
+ *	@returns:	NULL on failure, vm_struct on success
+ *
+ *	This function reserves a range of kernel address space, and
+ *	allocates pagetables to map that range.  No actual mappings
+ *	are created.  If the kernel address space is not shared
+ *	between processes, it syncs the pagetable across all
+ *	processes.
+ */
+struct vm_struct *alloc_vm_area(size_t size)
+{
+	struct vm_struct *area;
+
+	area = get_vm_area(size, VM_IOREMAP);
+	if (area == NULL)
+		return NULL;
+
+	/*
+	 * This ensures that page tables are constructed for this region
+	 * of kernel virtual address space and mapped into init_mm.
+	 */
+	if (apply_to_page_range(&init_mm, (unsigned long)area->addr,
+				area->size, f, NULL)) {
+		free_vm_area(area);
+		return NULL;
+	}
+
+	/* Make sure the pagetables are constructed in process kernel
+	   mappings */
+	vmalloc_sync_all();
+
+	return area;
+}
+EXPORT_SYMBOL_GPL(alloc_vm_area);
+
+void free_vm_area(struct vm_struct *area)
+{
+	struct vm_struct *ret;
+	ret = remove_vm_area(area->addr);
+	BUG_ON(ret != area);
+	kfree(area);
+}
+EXPORT_SYMBOL_GPL(free_vm_area);
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 1be5a63..d419e10 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -66,17 +66,8 @@
 	int swappiness;
 
 	int all_unreclaimable;
-};
 
-/*
- * The list of shrinker callbacks used by to apply pressure to
- * ageable caches.
- */
-struct shrinker {
-	shrinker_t		shrinker;
-	struct list_head	list;
-	int			seeks;	/* seeks to recreate an obj */
-	long			nr;	/* objs pending delete */
+	int order;
 };
 
 #define lru_to_page(_head) (list_entry((_head)->prev, struct page, lru))
@@ -121,34 +112,25 @@
 /*
  * Add a shrinker callback to be called from the vm
  */
-struct shrinker *set_shrinker(int seeks, shrinker_t theshrinker)
+void register_shrinker(struct shrinker *shrinker)
 {
-        struct shrinker *shrinker;
-
-        shrinker = kmalloc(sizeof(*shrinker), GFP_KERNEL);
-        if (shrinker) {
-	        shrinker->shrinker = theshrinker;
-	        shrinker->seeks = seeks;
-	        shrinker->nr = 0;
-	        down_write(&shrinker_rwsem);
-	        list_add_tail(&shrinker->list, &shrinker_list);
-	        up_write(&shrinker_rwsem);
-	}
-	return shrinker;
+	shrinker->nr = 0;
+	down_write(&shrinker_rwsem);
+	list_add_tail(&shrinker->list, &shrinker_list);
+	up_write(&shrinker_rwsem);
 }
-EXPORT_SYMBOL(set_shrinker);
+EXPORT_SYMBOL(register_shrinker);
 
 /*
  * Remove one
  */
-void remove_shrinker(struct shrinker *shrinker)
+void unregister_shrinker(struct shrinker *shrinker)
 {
 	down_write(&shrinker_rwsem);
 	list_del(&shrinker->list);
 	up_write(&shrinker_rwsem);
-	kfree(shrinker);
 }
-EXPORT_SYMBOL(remove_shrinker);
+EXPORT_SYMBOL(unregister_shrinker);
 
 #define SHRINK_BATCH 128
 /*
@@ -185,7 +167,7 @@
 	list_for_each_entry(shrinker, &shrinker_list, list) {
 		unsigned long long delta;
 		unsigned long total_scan;
-		unsigned long max_pass = (*shrinker->shrinker)(0, gfp_mask);
+		unsigned long max_pass = (*shrinker->shrink)(0, gfp_mask);
 
 		delta = (4 * scanned) / shrinker->seeks;
 		delta *= max_pass;
@@ -213,8 +195,8 @@
 			int shrink_ret;
 			int nr_before;
 
-			nr_before = (*shrinker->shrinker)(0, gfp_mask);
-			shrink_ret = (*shrinker->shrinker)(this_scan, gfp_mask);
+			nr_before = (*shrinker->shrink)(0, gfp_mask);
+			shrink_ret = (*shrinker->shrink)(this_scan, gfp_mask);
 			if (shrink_ret == -1)
 				break;
 			if (shrink_ret < nr_before)
@@ -481,7 +463,8 @@
 
 		referenced = page_referenced(page, 1);
 		/* In active use or really unfreeable?  Activate it. */
-		if (referenced && page_mapping_inuse(page))
+		if (sc->order <= PAGE_ALLOC_COSTLY_ORDER &&
+					referenced && page_mapping_inuse(page))
 			goto activate_locked;
 
 #ifdef CONFIG_SWAP
@@ -514,7 +497,7 @@
 		}
 
 		if (PageDirty(page)) {
-			if (referenced)
+			if (sc->order <= PAGE_ALLOC_COSTLY_ORDER && referenced)
 				goto keep_locked;
 			if (!may_enter_fs)
 				goto keep_locked;
@@ -598,6 +581,51 @@
 	return nr_reclaimed;
 }
 
+/* LRU Isolation modes. */
+#define ISOLATE_INACTIVE 0	/* Isolate inactive pages. */
+#define ISOLATE_ACTIVE 1	/* Isolate active pages. */
+#define ISOLATE_BOTH 2		/* Isolate both active and inactive pages. */
+
+/*
+ * Attempt to remove the specified page from its LRU.  Only take this page
+ * if it is of the appropriate PageActive status.  Pages which are being
+ * freed elsewhere are also ignored.
+ *
+ * page:	page to consider
+ * mode:	one of the LRU isolation modes defined above
+ *
+ * returns 0 on success, -ve errno on failure.
+ */
+static int __isolate_lru_page(struct page *page, int mode)
+{
+	int ret = -EINVAL;
+
+	/* Only take pages on the LRU. */
+	if (!PageLRU(page))
+		return ret;
+
+	/*
+	 * When checking the active state, we need to be sure we are
+	 * dealing with comparible boolean values.  Take the logical not
+	 * of each.
+	 */
+	if (mode != ISOLATE_BOTH && (!PageActive(page) != !mode))
+		return ret;
+
+	ret = -EBUSY;
+	if (likely(get_page_unless_zero(page))) {
+		/*
+		 * Be careful not to clear PageLRU until after we're
+		 * sure the page is not being freed elsewhere -- the
+		 * page release code relies on it.
+		 */
+		ClearPageLRU(page);
+		ret = 0;
+	}
+
+	return ret;
+}
+
 /*
  * zone->lru_lock is heavily contended.  Some of the functions that
  * shrink the lists perform better by taking out a batch of pages
@@ -612,38 +640,90 @@
  * @src:	The LRU list to pull pages off.
  * @dst:	The temp list to put pages on to.
  * @scanned:	The number of pages that were scanned.
+ * @order:	The caller's attempted allocation order
+ * @mode:	One of the LRU isolation modes
  *
  * returns how many pages were moved onto *@dst.
  */
 static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
 		struct list_head *src, struct list_head *dst,
-		unsigned long *scanned)
+		unsigned long *scanned, int order, int mode)
 {
 	unsigned long nr_taken = 0;
-	struct page *page;
 	unsigned long scan;
 
 	for (scan = 0; scan < nr_to_scan && !list_empty(src); scan++) {
-		struct list_head *target;
+		struct page *page;
+		unsigned long pfn;
+		unsigned long end_pfn;
+		unsigned long page_pfn;
+		int zone_id;
+
 		page = lru_to_page(src);
 		prefetchw_prev_lru_page(page, src, flags);
 
 		VM_BUG_ON(!PageLRU(page));
 
-		list_del(&page->lru);
-		target = src;
-		if (likely(get_page_unless_zero(page))) {
-			/*
-			 * Be careful not to clear PageLRU until after we're
-			 * sure the page is not being freed elsewhere -- the
-			 * page release code relies on it.
-			 */
-			ClearPageLRU(page);
-			target = dst;
+		switch (__isolate_lru_page(page, mode)) {
+		case 0:
+			list_move(&page->lru, dst);
 			nr_taken++;
-		} /* else it is being freed elsewhere */
+			break;
 
-		list_add(&page->lru, target);
+		case -EBUSY:
+			/* else it is being freed elsewhere */
+			list_move(&page->lru, src);
+			continue;
+
+		default:
+			BUG();
+		}
+
+		if (!order)
+			continue;
+
+		/*
+		 * Attempt to take all pages in the order aligned region
+		 * surrounding the tag page.  Only take those pages of
+		 * the same active state as that tag page.  We may safely
+		 * round the target page pfn down to the requested order
+		 * as the mem_map is guarenteed valid out to MAX_ORDER,
+		 * where that page is in a different zone we will detect
+		 * it from its zone id and abort this block scan.
+		 */
+		zone_id = page_zone_id(page);
+		page_pfn = page_to_pfn(page);
+		pfn = page_pfn & ~((1 << order) - 1);
+		end_pfn = pfn + (1 << order);
+		for (; pfn < end_pfn; pfn++) {
+			struct page *cursor_page;
+
+			/* The target page is in the block, ignore it. */
+			if (unlikely(pfn == page_pfn))
+				continue;
+
+			/* Avoid holes within the zone. */
+			if (unlikely(!pfn_valid_within(pfn)))
+				break;
+
+			cursor_page = pfn_to_page(pfn);
+			/* Check that we have not crossed a zone boundary. */
+			if (unlikely(page_zone_id(cursor_page) != zone_id))
+				continue;
+			switch (__isolate_lru_page(cursor_page, mode)) {
+			case 0:
+				list_move(&cursor_page->lru, dst);
+				nr_taken++;
+				scan++;
+				break;
+
+			case -EBUSY:
+				/* else it is being freed elsewhere */
+				list_move(&cursor_page->lru, src);
+			default:
+				break;
+			}
+		}
 	}
 
 	*scanned = scan;
@@ -651,6 +731,24 @@
 }
 
 /*
+ * clear_active_flags() is a helper for shrink_active_list(), clearing
+ * any active bits from the pages in the list.
+ */
+static unsigned long clear_active_flags(struct list_head *page_list)
+{
+	int nr_active = 0;
+	struct page *page;
+
+	list_for_each_entry(page, page_list, lru)
+		if (PageActive(page)) {
+			ClearPageActive(page);
+			nr_active++;
+		}
+
+	return nr_active;
+}
+
+/*
  * shrink_inactive_list() is a helper for shrink_zone().  It returns the number
  * of reclaimed pages
  */
@@ -671,11 +769,18 @@
 		unsigned long nr_taken;
 		unsigned long nr_scan;
 		unsigned long nr_freed;
+		unsigned long nr_active;
 
 		nr_taken = isolate_lru_pages(sc->swap_cluster_max,
-					     &zone->inactive_list,
-					     &page_list, &nr_scan);
-		__mod_zone_page_state(zone, NR_INACTIVE, -nr_taken);
+			     &zone->inactive_list,
+			     &page_list, &nr_scan, sc->order,
+			     (sc->order > PAGE_ALLOC_COSTLY_ORDER)?
+					     ISOLATE_BOTH : ISOLATE_INACTIVE);
+		nr_active = clear_active_flags(&page_list);
+
+		__mod_zone_page_state(zone, NR_ACTIVE, -nr_active);
+		__mod_zone_page_state(zone, NR_INACTIVE,
+						-(nr_taken - nr_active));
 		zone->pages_scanned += nr_scan;
 		spin_unlock_irq(&zone->lru_lock);
 
@@ -820,7 +925,7 @@
 	lru_add_drain();
 	spin_lock_irq(&zone->lru_lock);
 	pgmoved = isolate_lru_pages(nr_pages, &zone->active_list,
-				    &l_hold, &pgscanned);
+			    &l_hold, &pgscanned, sc->order, ISOLATE_ACTIVE);
 	zone->pages_scanned += pgscanned;
 	__mod_zone_page_state(zone, NR_ACTIVE, -pgmoved);
 	spin_unlock_irq(&zone->lru_lock);
@@ -1011,7 +1116,7 @@
  * holds filesystem locks which prevent writeout this might not work, and the
  * allocation attempt will fail.
  */
-unsigned long try_to_free_pages(struct zone **zones, gfp_t gfp_mask)
+unsigned long try_to_free_pages(struct zone **zones, int order, gfp_t gfp_mask)
 {
 	int priority;
 	int ret = 0;
@@ -1026,6 +1131,7 @@
 		.swap_cluster_max = SWAP_CLUSTER_MAX,
 		.may_swap = 1,
 		.swappiness = vm_swappiness,
+		.order = order,
 	};
 
 	count_vm_event(ALLOCSTALL);
@@ -1131,6 +1237,7 @@
 		.may_swap = 1,
 		.swap_cluster_max = SWAP_CLUSTER_MAX,
 		.swappiness = vm_swappiness,
+		.order = order,
 	};
 	/*
 	 * temp_priority is used to remember the scanning priority at which
@@ -1314,6 +1421,7 @@
 	 * trying to free the first piece of memory in the first place).
 	 */
 	tsk->flags |= PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD;
+	set_freezable();
 
 	order = 0;
 	for ( ; ; ) {
diff --git a/mm/vmstat.c b/mm/vmstat.c
index eceaf49..fadf791 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -472,7 +472,7 @@
 #endif
 
 #define TEXTS_FOR_ZONES(xx) TEXT_FOR_DMA(xx) TEXT_FOR_DMA32(xx) xx "_normal", \
-					TEXT_FOR_HIGHMEM(xx)
+					TEXT_FOR_HIGHMEM(xx) xx "_movable",
 
 static const char * const vmstat_text[] = {
 	/* Zoned VM counters */
diff --git a/net/atm/br2684.c b/net/atm/br2684.c
index faa6aaf..c0f6861 100644
--- a/net/atm/br2684.c
+++ b/net/atm/br2684.c
@@ -460,11 +460,7 @@
 	skb_pull(skb, plen);
 	skb_set_mac_header(skb, -ETH_HLEN);
 	skb->pkt_type = PACKET_HOST;
-#ifdef CONFIG_BR2684_FAST_TRANS
-	skb->protocol = ((u16 *) skb->data)[-1];
-#else				/* some protocols might require this: */
 	skb->protocol = br_type_trans(skb, net_dev);
-#endif /* CONFIG_BR2684_FAST_TRANS */
 #else
 	skb_pull(skb, plen - ETH_HLEN);
 	skb->protocol = eth_type_trans(skb, net_dev);
diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c
index c83cf843..dae2a42 100644
--- a/net/ax25/af_ax25.c
+++ b/net/ax25/af_ax25.c
@@ -1262,7 +1262,7 @@
 
 		for (;;) {
 			prepare_to_wait(sk->sk_sleep, &wait,
-			                TASK_INTERRUPTIBLE);
+					TASK_INTERRUPTIBLE);
 			if (sk->sk_state != TCP_SYN_SENT)
 				break;
 			if (!signal_pending(current)) {
diff --git a/net/bluetooth/bnep/core.c b/net/bluetooth/bnep/core.c
index 1c8f4a0..1f78c3e 100644
--- a/net/bluetooth/bnep/core.c
+++ b/net/bluetooth/bnep/core.c
@@ -36,6 +36,7 @@
 #include <linux/signal.h>
 #include <linux/init.h>
 #include <linux/wait.h>
+#include <linux/freezer.h>
 #include <linux/errno.h>
 #include <linux/net.h>
 #include <net/sock.h>
@@ -474,7 +475,6 @@
 
 	daemonize("kbnepd %s", dev->name);
 	set_user_nice(current, -15);
-	current->flags |= PF_NOFREEZE;
 
 	init_waitqueue_entry(&wait, current);
 	add_wait_queue(sk->sk_sleep, &wait);
diff --git a/net/bluetooth/cmtp/core.c b/net/bluetooth/cmtp/core.c
index 66bef1c..ca60a451 100644
--- a/net/bluetooth/cmtp/core.c
+++ b/net/bluetooth/cmtp/core.c
@@ -29,6 +29,7 @@
 #include <linux/slab.h>
 #include <linux/poll.h>
 #include <linux/fcntl.h>
+#include <linux/freezer.h>
 #include <linux/skbuff.h>
 #include <linux/socket.h>
 #include <linux/ioctl.h>
@@ -287,7 +288,6 @@
 
 	daemonize("kcmtpd_ctr_%d", session->num);
 	set_user_nice(current, -15);
-	current->flags |= PF_NOFREEZE;
 
 	init_waitqueue_entry(&wait, current);
 	add_wait_queue(sk->sk_sleep, &wait);
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index f6d867e..63caa41 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -982,7 +982,7 @@
 
 			skb->dev = (void *) hdev;
 			bt_cb(skb)->pkt_type = type;
-	
+
 			__reassembly(hdev, type) = skb;
 
 			scb = (void *) skb->cb;
diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c
index 450eb02..64d89ca 100644
--- a/net/bluetooth/hidp/core.c
+++ b/net/bluetooth/hidp/core.c
@@ -28,6 +28,7 @@
 #include <linux/sched.h>
 #include <linux/slab.h>
 #include <linux/poll.h>
+#include <linux/freezer.h>
 #include <linux/fcntl.h>
 #include <linux/skbuff.h>
 #include <linux/socket.h>
@@ -547,7 +548,6 @@
 
 	daemonize("khidpd_%04x%04x", vendor, product);
 	set_user_nice(current, -15);
-	current->flags |= PF_NOFREEZE;
 
 	init_waitqueue_entry(&ctrl_wait, current);
 	init_waitqueue_entry(&intr_wait, current);
diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c
index 52e04df..bb722077 100644
--- a/net/bluetooth/rfcomm/core.c
+++ b/net/bluetooth/rfcomm/core.c
@@ -33,6 +33,7 @@
 #include <linux/sched.h>
 #include <linux/signal.h>
 #include <linux/init.h>
+#include <linux/freezer.h>
 #include <linux/wait.h>
 #include <linux/device.h>
 #include <linux/net.h>
@@ -1940,7 +1941,6 @@
 
 	daemonize("krfcommd");
 	set_user_nice(current, -10);
-	current->flags |= PF_NOFREEZE;
 
 	BT_DBG("");
 
diff --git a/net/bridge/br_stp_if.c b/net/bridge/br_stp_if.c
index a786e78..1ea2f86 100644
--- a/net/bridge/br_stp_if.c
+++ b/net/bridge/br_stp_if.c
@@ -125,7 +125,7 @@
 	char *argv[] = { BR_STP_PROG, br->dev->name, "start", NULL };
 	char *envp[] = { NULL };
 
-	r = call_usermodehelper(BR_STP_PROG, argv, envp, 1);
+	r = call_usermodehelper(BR_STP_PROG, argv, envp, UMH_WAIT_PROC);
 	if (r == 0) {
 		br->stp_enabled = BR_USER_STP;
 		printk(KERN_INFO "%s: userspace STP started\n", br->dev->name);
diff --git a/net/core/dev.c b/net/core/dev.c
index 13a0d9f..38212c3 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2629,7 +2629,7 @@
 		return;
 
 	if (!netif_device_present(dev))
-	        return;
+		return;
 
 	if (dev->set_rx_mode)
 		dev->set_rx_mode(dev);
@@ -2715,20 +2715,6 @@
 	return 0;
 }
 
-void __dev_addr_discard(struct dev_addr_list **list)
-{
-	struct dev_addr_list *tmp;
-
-	while (*list != NULL) {
-		tmp = *list;
-		*list = tmp->next;
-		if (tmp->da_users > tmp->da_gusers)
-			printk("__dev_addr_discard: address leakage! "
-			       "da_users=%d\n", tmp->da_users);
-		kfree(tmp);
-	}
-}
-
 /**
  *	dev_unicast_delete	- Release secondary unicast address.
  *	@dev: device
@@ -2777,11 +2763,30 @@
 }
 EXPORT_SYMBOL(dev_unicast_add);
 
-static void dev_unicast_discard(struct net_device *dev)
+static void __dev_addr_discard(struct dev_addr_list **list)
+{
+	struct dev_addr_list *tmp;
+
+	while (*list != NULL) {
+		tmp = *list;
+		*list = tmp->next;
+		if (tmp->da_users > tmp->da_gusers)
+			printk("__dev_addr_discard: address leakage! "
+			       "da_users=%d\n", tmp->da_users);
+		kfree(tmp);
+	}
+}
+
+static void dev_addr_discard(struct net_device *dev)
 {
 	netif_tx_lock_bh(dev);
+
 	__dev_addr_discard(&dev->uc_list);
 	dev->uc_count = 0;
+
+	__dev_addr_discard(&dev->mc_list);
+	dev->mc_count = 0;
+
 	netif_tx_unlock_bh(dev);
 }
 
@@ -3739,8 +3744,7 @@
 	/*
 	 *	Flush the unicast and multicast chains
 	 */
-	dev_unicast_discard(dev);
-	dev_mc_discard(dev);
+	dev_addr_discard(dev);
 
 	if (dev->uninit)
 		dev->uninit(dev);
diff --git a/net/core/dev_mcast.c b/net/core/dev_mcast.c
index 235a2a8..99aece1 100644
--- a/net/core/dev_mcast.c
+++ b/net/core/dev_mcast.c
@@ -177,18 +177,6 @@
 }
 EXPORT_SYMBOL(dev_mc_unsync);
 
-/*
- *	Discard multicast list when a device is downed
- */
-
-void dev_mc_discard(struct net_device *dev)
-{
-	netif_tx_lock_bh(dev);
-	__dev_addr_discard(&dev->mc_list);
-	dev->mc_count = 0;
-	netif_tx_unlock_bh(dev);
-}
-
 #ifdef CONFIG_PROC_FS
 static void *dev_mc_seq_start(struct seq_file *seq, loff_t *pos)
 {
diff --git a/net/core/gen_estimator.c b/net/core/gen_estimator.c
index cc84d8d..590a767 100644
--- a/net/core/gen_estimator.c
+++ b/net/core/gen_estimator.c
@@ -79,27 +79,27 @@
 
 struct gen_estimator
 {
-	struct gen_estimator	*next;
+	struct list_head	list;
 	struct gnet_stats_basic	*bstats;
 	struct gnet_stats_rate_est	*rate_est;
 	spinlock_t		*stats_lock;
-	unsigned		interval;
 	int			ewma_log;
 	u64			last_bytes;
 	u32			last_packets;
 	u32			avpps;
 	u32			avbps;
+	struct rcu_head		e_rcu;
 };
 
 struct gen_estimator_head
 {
 	struct timer_list	timer;
-	struct gen_estimator	*list;
+	struct list_head	list;
 };
 
 static struct gen_estimator_head elist[EST_MAX_INTERVAL+1];
 
-/* Estimator array lock */
+/* Protects against NULL dereference */
 static DEFINE_RWLOCK(est_lock);
 
 static void est_timer(unsigned long arg)
@@ -107,13 +107,17 @@
 	int idx = (int)arg;
 	struct gen_estimator *e;
 
-	read_lock(&est_lock);
-	for (e = elist[idx].list; e; e = e->next) {
+	rcu_read_lock();
+	list_for_each_entry_rcu(e, &elist[idx].list, list) {
 		u64 nbytes;
 		u32 npackets;
 		u32 rate;
 
 		spin_lock(e->stats_lock);
+		read_lock(&est_lock);
+		if (e->bstats == NULL)
+			goto skip;
+
 		nbytes = e->bstats->bytes;
 		npackets = e->bstats->packets;
 		rate = (nbytes - e->last_bytes)<<(7 - idx);
@@ -125,12 +129,14 @@
 		e->last_packets = npackets;
 		e->avpps += ((long)rate - (long)e->avpps) >> e->ewma_log;
 		e->rate_est->pps = (e->avpps+0x1FF)>>10;
+skip:
+		read_unlock(&est_lock);
 		spin_unlock(e->stats_lock);
 	}
 
-	if (elist[idx].list != NULL)
+	if (!list_empty(&elist[idx].list))
 		mod_timer(&elist[idx].timer, jiffies + ((HZ<<idx)/4));
-	read_unlock(&est_lock);
+	rcu_read_unlock();
 }
 
 /**
@@ -147,12 +153,17 @@
  * &rate_est with the statistics lock grabed during this period.
  *
  * Returns 0 on success or a negative error code.
+ *
+ * NOTE: Called under rtnl_mutex
  */
 int gen_new_estimator(struct gnet_stats_basic *bstats,
-	struct gnet_stats_rate_est *rate_est, spinlock_t *stats_lock, struct rtattr *opt)
+		      struct gnet_stats_rate_est *rate_est,
+		      spinlock_t *stats_lock,
+		      struct rtattr *opt)
 {
 	struct gen_estimator *est;
 	struct gnet_estimator *parm = RTA_DATA(opt);
+	int idx;
 
 	if (RTA_PAYLOAD(opt) < sizeof(*parm))
 		return -EINVAL;
@@ -164,7 +175,7 @@
 	if (est == NULL)
 		return -ENOBUFS;
 
-	est->interval = parm->interval + 2;
+	idx = parm->interval + 2;
 	est->bstats = bstats;
 	est->rate_est = rate_est;
 	est->stats_lock = stats_lock;
@@ -174,20 +185,25 @@
 	est->last_packets = bstats->packets;
 	est->avpps = rate_est->pps<<10;
 
-	est->next = elist[est->interval].list;
-	if (est->next == NULL) {
-		init_timer(&elist[est->interval].timer);
-		elist[est->interval].timer.data = est->interval;
-		elist[est->interval].timer.expires = jiffies + ((HZ<<est->interval)/4);
-		elist[est->interval].timer.function = est_timer;
-		add_timer(&elist[est->interval].timer);
+	if (!elist[idx].timer.function) {
+		INIT_LIST_HEAD(&elist[idx].list);
+		setup_timer(&elist[idx].timer, est_timer, idx);
 	}
-	write_lock_bh(&est_lock);
-	elist[est->interval].list = est;
-	write_unlock_bh(&est_lock);
+
+	if (list_empty(&elist[idx].list))
+		mod_timer(&elist[idx].timer, jiffies + ((HZ<<idx)/4));
+
+	list_add_rcu(&est->list, &elist[idx].list);
 	return 0;
 }
 
+static void __gen_kill_estimator(struct rcu_head *head)
+{
+	struct gen_estimator *e = container_of(head,
+					struct gen_estimator, e_rcu);
+	kfree(e);
+}
+
 /**
  * gen_kill_estimator - remove a rate estimator
  * @bstats: basic statistics
@@ -195,31 +211,32 @@
  *
  * Removes the rate estimator specified by &bstats and &rate_est
  * and deletes the timer.
+ *
+ * NOTE: Called under rtnl_mutex
  */
 void gen_kill_estimator(struct gnet_stats_basic *bstats,
 	struct gnet_stats_rate_est *rate_est)
 {
 	int idx;
-	struct gen_estimator *est, **pest;
+	struct gen_estimator *e, *n;
 
 	for (idx=0; idx <= EST_MAX_INTERVAL; idx++) {
-		int killed = 0;
-		pest = &elist[idx].list;
-		while ((est=*pest) != NULL) {
-			if (est->rate_est != rate_est || est->bstats != bstats) {
-				pest = &est->next;
+
+		/* Skip non initialized indexes */
+		if (!elist[idx].timer.function)
+			continue;
+
+		list_for_each_entry_safe(e, n, &elist[idx].list, list) {
+			if (e->rate_est != rate_est || e->bstats != bstats)
 				continue;
-			}
 
 			write_lock_bh(&est_lock);
-			*pest = est->next;
+			e->bstats = NULL;
 			write_unlock_bh(&est_lock);
 
-			kfree(est);
-			killed++;
+			list_del_rcu(&e->list);
+			call_rcu(&e->e_rcu, __gen_kill_estimator);
 		}
-		if (killed && elist[idx].list == NULL)
-			del_timer(&elist[idx].timer);
 	}
 }
 
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index d1264e9..de1b26a 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -124,13 +124,6 @@
 	if (test_bit(__LINK_STATE_RX_SCHED, &np->dev->state) &&
 	    npinfo->poll_owner != smp_processor_id() &&
 	    spin_trylock(&npinfo->poll_lock)) {
-		/* When calling dev->poll from poll_napi, we may end up in
-		 * netif_rx_complete. However, only the CPU to which the
-		 * device was queued is allowed to remove it from poll_list.
-		 * Setting POLL_LIST_FROZEN tells netif_rx_complete
-		 * to leave the NAPI state alone.
-		 */
-		set_bit(__LINK_STATE_POLL_LIST_FROZEN, &np->dev->state);
 		npinfo->rx_flags |= NETPOLL_RX_DROP;
 		atomic_inc(&trapped);
 
@@ -138,7 +131,6 @@
 
 		atomic_dec(&trapped);
 		npinfo->rx_flags &= ~NETPOLL_RX_DROP;
-		clear_bit(__LINK_STATE_POLL_LIST_FROZEN, &np->dev->state);
 		spin_unlock(&npinfo->poll_lock);
 	}
 }
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index 7521533..bca787f 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -3465,6 +3465,8 @@
 
 	set_current_state(TASK_INTERRUPTIBLE);
 
+	set_freezable();
+
 	while (!kthread_should_stop()) {
 		pkt_dev = next_to_run(t);
 
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 864cbdf..06eccca 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -98,7 +98,7 @@
 }
 
 int __rtattr_parse_nested_compat(struct rtattr *tb[], int maxattr,
-			         struct rtattr *rta, int len)
+				 struct rtattr *rta, int len)
 {
 	if (RTA_PAYLOAD(rta) < len)
 		return -1;
diff --git a/net/core/sock.c b/net/core/sock.c
index 091032a..239a08a 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -171,6 +171,19 @@
   "slock-AF_TIPC"  , "slock-AF_BLUETOOTH", "slock-AF_IUCV"     ,
   "slock-AF_RXRPC" , "slock-AF_MAX"
 };
+static const char *af_family_clock_key_strings[AF_MAX+1] = {
+  "clock-AF_UNSPEC", "clock-AF_UNIX"     , "clock-AF_INET"     ,
+  "clock-AF_AX25"  , "clock-AF_IPX"      , "clock-AF_APPLETALK",
+  "clock-AF_NETROM", "clock-AF_BRIDGE"   , "clock-AF_ATMPVC"   ,
+  "clock-AF_X25"   , "clock-AF_INET6"    , "clock-AF_ROSE"     ,
+  "clock-AF_DECnet", "clock-AF_NETBEUI"  , "clock-AF_SECURITY" ,
+  "clock-AF_KEY"   , "clock-AF_NETLINK"  , "clock-AF_PACKET"   ,
+  "clock-AF_ASH"   , "clock-AF_ECONET"   , "clock-AF_ATMSVC"   ,
+  "clock-21"       , "clock-AF_SNA"      , "clock-AF_IRDA"     ,
+  "clock-AF_PPPOX" , "clock-AF_WANPIPE"  , "clock-AF_LLC"      ,
+  "clock-27"       , "clock-28"          , "clock-29"          ,
+  "clock-AF_TIPC"  , "clock-AF_BLUETOOTH", "clock-AF_MAX"
+};
 #endif
 
 /*
@@ -217,7 +230,7 @@
 			warned++;
 			printk(KERN_INFO "sock_set_timeout: `%s' (pid %d) "
 			       "tries to set negative timeout\n",
-			        current->comm, current->pid);
+				current->comm, current->pid);
 		return 0;
 	}
 	*timeo_p = MAX_SCHEDULE_TIMEOUT;
@@ -941,8 +954,9 @@
 
 		rwlock_init(&newsk->sk_dst_lock);
 		rwlock_init(&newsk->sk_callback_lock);
-		lockdep_set_class(&newsk->sk_callback_lock,
-				   af_callback_keys + newsk->sk_family);
+		lockdep_set_class_and_name(&newsk->sk_callback_lock,
+				af_callback_keys + newsk->sk_family,
+				af_family_clock_key_strings[newsk->sk_family]);
 
 		newsk->sk_dst_cache	= NULL;
 		newsk->sk_wmem_queued	= 0;
@@ -1530,8 +1544,9 @@
 
 	rwlock_init(&sk->sk_dst_lock);
 	rwlock_init(&sk->sk_callback_lock);
-	lockdep_set_class(&sk->sk_callback_lock,
-			   af_callback_keys + sk->sk_family);
+	lockdep_set_class_and_name(&sk->sk_callback_lock,
+			af_callback_keys + sk->sk_family,
+			af_family_clock_key_strings[sk->sk_family]);
 
 	sk->sk_state_change	=	sock_def_wakeup;
 	sk->sk_data_ready	=	sock_def_readable;
diff --git a/net/dccp/ccids/lib/loss_interval.c b/net/dccp/ccids/lib/loss_interval.c
index 515225f..dd0fc99 100644
--- a/net/dccp/ccids/lib/loss_interval.c
+++ b/net/dccp/ccids/lib/loss_interval.c
@@ -227,7 +227,7 @@
 		       struct list_head *li_hist_list,
 		       struct list_head *hist_list,
 		       struct timeval *last_feedback, u16 s, u32 bytes_recv,
-                       u32 previous_x_recv, u64 seq_loss, u8 win_loss)
+		       u32 previous_x_recv, u64 seq_loss, u8 win_loss)
 {
 	struct dccp_li_hist_entry *head;
 	u64 seq_temp;
diff --git a/net/dccp/probe.c b/net/dccp/probe.c
index 43a3adb..bae10b0 100644
--- a/net/dccp/probe.c
+++ b/net/dccp/probe.c
@@ -112,7 +112,7 @@
 	.kp	= {
 		.symbol_name = "dccp_sendmsg",
 	},
-	.entry	= JPROBE_ENTRY(jdccp_sendmsg),
+	.entry	= jdccp_sendmsg,
 };
 
 static int dccpprobe_open(struct inode *inode, struct file *file)
diff --git a/net/ieee80211/ieee80211_wx.c b/net/ieee80211/ieee80211_wx.c
index 523a137..465b73d 100644
--- a/net/ieee80211/ieee80211_wx.c
+++ b/net/ieee80211/ieee80211_wx.c
@@ -90,14 +90,11 @@
 	}
 
 	/* Add channel and frequency */
+	/* Note : userspace automatically computes channel using iwrange */
 	iwe.cmd = SIOCGIWFREQ;
-	iwe.u.freq.m = network->channel;
-	iwe.u.freq.e = 0;
-	iwe.u.freq.i = 0;
-	start = iwe_stream_add_event(start, stop, &iwe, IW_EV_FREQ_LEN);
-
 	iwe.u.freq.m = ieee80211_channel_to_freq(ieee, network->channel);
 	iwe.u.freq.e = 6;
+	iwe.u.freq.i = 0;
 	start = iwe_stream_add_event(start, stop, &iwe, IW_EV_FREQ_LEN);
 
 	/* Add encryption capability */
diff --git a/net/ieee80211/softmac/ieee80211softmac_assoc.c b/net/ieee80211/softmac/ieee80211softmac_assoc.c
index cc8110b..afb6c66 100644
--- a/net/ieee80211/softmac/ieee80211softmac_assoc.c
+++ b/net/ieee80211/softmac/ieee80211softmac_assoc.c
@@ -271,8 +271,11 @@
 			 */
 			dprintk(KERN_INFO PFX "Associate: Scanning for networks first.\n");
 			ieee80211softmac_notify(mac->dev, IEEE80211SOFTMAC_EVENT_SCAN_FINISHED, ieee80211softmac_assoc_notify_scan, NULL);
-			if (ieee80211softmac_start_scan(mac))
+			if (ieee80211softmac_start_scan(mac)) {
 				dprintk(KERN_INFO PFX "Associate: failed to initiate scan. Is device up?\n");
+				mac->associnfo.associating = 0;
+				mac->associnfo.associated = 0;
+			}
 			goto out;
 		} else {
 			mac->associnfo.associating = 0;
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index 2eb909b..eff6bce 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -817,7 +817,7 @@
 static void nl_fib_lookup_init(void)
 {
       netlink_kernel_create(NETLINK_FIB_LOOKUP, 0, nl_fib_input, NULL,
-      			    THIS_MODULE);
+			    THIS_MODULE);
 }
 
 static void fib_disable_ip(struct net_device *dev, int force)
diff --git a/net/ipv4/ip_forward.c b/net/ipv4/ip_forward.c
index 9cb04df..8c95cf0 100644
--- a/net/ipv4/ip_forward.c
+++ b/net/ipv4/ip_forward.c
@@ -86,7 +86,7 @@
 		goto sr_failed;
 
 	if (unlikely(skb->len > dst_mtu(&rt->u.dst) &&
-	             (ip_hdr(skb)->frag_off & htons(IP_DF))) && !skb->local_df) {
+		     (ip_hdr(skb)->frag_off & htons(IP_DF))) && !skb->local_df) {
 		IP_INC_STATS(IPSTATS_MIB_FRAGFAILS);
 		icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
 			  htonl(dst_mtu(&rt->u.dst)));
diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c
index 3da9d73..27c7918 100644
--- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c
+++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c
@@ -177,7 +177,7 @@
 	struct ct_iter_state *st;
 	int ret;
 
-	st = kmalloc(sizeof(struct ct_iter_state), GFP_KERNEL);
+	st = kzalloc(sizeof(struct ct_iter_state), GFP_KERNEL);
 	if (st == NULL)
 		return -ENOMEM;
 	ret = seq_open(file, &ct_seq_ops);
@@ -185,7 +185,6 @@
 		goto out_free;
 	seq          = file->private_data;
 	seq->private = st;
-	memset(st, 0, sizeof(struct ct_iter_state));
 	return ret;
 out_free:
 	kfree(st);
diff --git a/net/ipv4/tcp_bic.c b/net/ipv4/tcp_bic.c
index dd9ef65..519de09 100644
--- a/net/ipv4/tcp_bic.c
+++ b/net/ipv4/tcp_bic.c
@@ -137,7 +137,7 @@
 }
 
 static void bictcp_cong_avoid(struct sock *sk, u32 ack,
-			      u32 seq_rtt, u32 in_flight, int data_acked)
+			      u32 in_flight, int data_acked)
 {
 	struct tcp_sock *tp = tcp_sk(sk);
 	struct bictcp *ca = inet_csk_ca(sk);
diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c
index 1260e52..55fca18 100644
--- a/net/ipv4/tcp_cong.c
+++ b/net/ipv4/tcp_cong.c
@@ -324,8 +324,7 @@
 /* This is Jacobson's slow start and congestion avoidance.
  * SIGCOMM '88, p. 328.
  */
-void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 rtt, u32 in_flight,
-			 int flag)
+void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 in_flight, int flag)
 {
 	struct tcp_sock *tp = tcp_sk(sk);
 
diff --git a/net/ipv4/tcp_cubic.c b/net/ipv4/tcp_cubic.c
index ebfaac2..d17da30 100644
--- a/net/ipv4/tcp_cubic.c
+++ b/net/ipv4/tcp_cubic.c
@@ -270,7 +270,7 @@
 }
 
 static void bictcp_cong_avoid(struct sock *sk, u32 ack,
-			      u32 seq_rtt, u32 in_flight, int data_acked)
+			      u32 in_flight, int data_acked)
 {
 	struct tcp_sock *tp = tcp_sk(sk);
 	struct bictcp *ca = inet_csk_ca(sk);
diff --git a/net/ipv4/tcp_highspeed.c b/net/ipv4/tcp_highspeed.c
index 43d624e5..14a073d 100644
--- a/net/ipv4/tcp_highspeed.c
+++ b/net/ipv4/tcp_highspeed.c
@@ -109,7 +109,7 @@
 	tp->snd_cwnd_clamp = min_t(u32, tp->snd_cwnd_clamp, 0xffffffff/128);
 }
 
-static void hstcp_cong_avoid(struct sock *sk, u32 adk, u32 rtt,
+static void hstcp_cong_avoid(struct sock *sk, u32 adk,
 			     u32 in_flight, int data_acked)
 {
 	struct tcp_sock *tp = tcp_sk(sk);
diff --git a/net/ipv4/tcp_htcp.c b/net/ipv4/tcp_htcp.c
index 4ba4a7a..632c05a 100644
--- a/net/ipv4/tcp_htcp.c
+++ b/net/ipv4/tcp_htcp.c
@@ -225,7 +225,7 @@
 	return max((tp->snd_cwnd * ca->beta) >> 7, 2U);
 }
 
-static void htcp_cong_avoid(struct sock *sk, u32 ack, u32 rtt,
+static void htcp_cong_avoid(struct sock *sk, u32 ack, s32 rtt,
 			    u32 in_flight, int data_acked)
 {
 	struct tcp_sock *tp = tcp_sk(sk);
diff --git a/net/ipv4/tcp_hybla.c b/net/ipv4/tcp_hybla.c
index e5be351..b3e55cf 100644
--- a/net/ipv4/tcp_hybla.c
+++ b/net/ipv4/tcp_hybla.c
@@ -85,7 +85,7 @@
  *     o Give cwnd a new value based on the model proposed
  *     o remember increments <1
  */
-static void hybla_cong_avoid(struct sock *sk, u32 ack, u32 rtt,
+static void hybla_cong_avoid(struct sock *sk, u32 ack,
 			    u32 in_flight, int flag)
 {
 	struct tcp_sock *tp = tcp_sk(sk);
@@ -103,7 +103,7 @@
 		return;
 
 	if (!ca->hybla_en)
-		return tcp_reno_cong_avoid(sk, ack, rtt, in_flight, flag);
+		return tcp_reno_cong_avoid(sk, ack, in_flight, flag);
 
 	if (ca->rho == 0)
 		hybla_recalc_param(sk);
diff --git a/net/ipv4/tcp_illinois.c b/net/ipv4/tcp_illinois.c
index b2b2256..cc5de6f 100644
--- a/net/ipv4/tcp_illinois.c
+++ b/net/ipv4/tcp_illinois.c
@@ -258,7 +258,7 @@
 /*
  * Increase window in response to successful acknowledgment.
  */
-static void tcp_illinois_cong_avoid(struct sock *sk, u32 ack, u32 rtt,
+static void tcp_illinois_cong_avoid(struct sock *sk, u32 ack,
 				    u32 in_flight, int flag)
 {
 	struct tcp_sock *tp = tcp_sk(sk);
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 4e5884a..fec8a7a 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -2323,11 +2323,11 @@
 		tcp_ack_no_tstamp(sk, seq_rtt, flag);
 }
 
-static void tcp_cong_avoid(struct sock *sk, u32 ack, u32 rtt,
+static void tcp_cong_avoid(struct sock *sk, u32 ack,
 			   u32 in_flight, int good)
 {
 	const struct inet_connection_sock *icsk = inet_csk(sk);
-	icsk->icsk_ca_ops->cong_avoid(sk, ack, rtt, in_flight, good);
+	icsk->icsk_ca_ops->cong_avoid(sk, ack, in_flight, good);
 	tcp_sk(sk)->snd_cwnd_stamp = tcp_time_stamp;
 }
 
@@ -2826,11 +2826,11 @@
 		/* Advance CWND, if state allows this. */
 		if ((flag & FLAG_DATA_ACKED) && !frto_cwnd &&
 		    tcp_may_raise_cwnd(sk, flag))
-			tcp_cong_avoid(sk, ack,  seq_rtt, prior_in_flight, 0);
+			tcp_cong_avoid(sk, ack, prior_in_flight, 0);
 		tcp_fastretrans_alert(sk, prior_snd_una, prior_packets, flag);
 	} else {
 		if ((flag & FLAG_DATA_ACKED) && !frto_cwnd)
-			tcp_cong_avoid(sk, ack, seq_rtt, prior_in_flight, 1);
+			tcp_cong_avoid(sk, ack, prior_in_flight, 1);
 	}
 
 	if ((flag & FLAG_FORWARD_PROGRESS) || !(flag&FLAG_NOT_DUP))
diff --git a/net/ipv4/tcp_lp.c b/net/ipv4/tcp_lp.c
index e49836c..80e140e 100644
--- a/net/ipv4/tcp_lp.c
+++ b/net/ipv4/tcp_lp.c
@@ -115,13 +115,12 @@
  * Will only call newReno CA when away from inference.
  * From TCP-LP's paper, this will be handled in additive increasement.
  */
-static void tcp_lp_cong_avoid(struct sock *sk, u32 ack, u32 rtt, u32 in_flight,
-			      int flag)
+static void tcp_lp_cong_avoid(struct sock *sk, u32 ack, u32 in_flight, int flag)
 {
 	struct lp *lp = inet_csk_ca(sk);
 
 	if (!(lp->flag & LP_WITHIN_INF))
-		tcp_reno_cong_avoid(sk, ack, rtt, in_flight, flag);
+		tcp_reno_cong_avoid(sk, ack, in_flight, flag);
 }
 
 /**
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 20aea15..666d8a5 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -1615,7 +1615,7 @@
 		if (window <= free_space - mss || window > free_space)
 			window = (free_space/mss)*mss;
 		else if (mss == full_space &&
-		         free_space > window + full_space/2)
+			 free_space > window + full_space/2)
 			window = free_space;
 	}
 
diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c
index f37d592..b76398d 100644
--- a/net/ipv4/tcp_probe.c
+++ b/net/ipv4/tcp_probe.c
@@ -130,7 +130,7 @@
 	.kp = {
 		.symbol_name	= "tcp_rcv_established",
 	},
-	.entry	= JPROBE_ENTRY(jtcp_rcv_established),
+	.entry	= jtcp_rcv_established,
 };
 
 static int tcpprobe_open(struct inode * inode, struct file * file)
diff --git a/net/ipv4/tcp_scalable.c b/net/ipv4/tcp_scalable.c
index 4624501..be27a33 100644
--- a/net/ipv4/tcp_scalable.c
+++ b/net/ipv4/tcp_scalable.c
@@ -15,7 +15,7 @@
 #define TCP_SCALABLE_AI_CNT	50U
 #define TCP_SCALABLE_MD_SCALE	3
 
-static void tcp_scalable_cong_avoid(struct sock *sk, u32 ack, u32 rtt,
+static void tcp_scalable_cong_avoid(struct sock *sk, u32 ack,
 				    u32 in_flight, int flag)
 {
 	struct tcp_sock *tp = tcp_sk(sk);
diff --git a/net/ipv4/tcp_vegas.c b/net/ipv4/tcp_vegas.c
index e218a51..914e030 100644
--- a/net/ipv4/tcp_vegas.c
+++ b/net/ipv4/tcp_vegas.c
@@ -163,13 +163,13 @@
 EXPORT_SYMBOL_GPL(tcp_vegas_cwnd_event);
 
 static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack,
-				 u32 seq_rtt, u32 in_flight, int flag)
+				 u32 in_flight, int flag)
 {
 	struct tcp_sock *tp = tcp_sk(sk);
 	struct vegas *vegas = inet_csk_ca(sk);
 
 	if (!vegas->doing_vegas_now)
-		return tcp_reno_cong_avoid(sk, ack, seq_rtt, in_flight, flag);
+		return tcp_reno_cong_avoid(sk, ack, in_flight, flag);
 
 	/* The key players are v_beg_snd_una and v_beg_snd_nxt.
 	 *
@@ -228,7 +228,7 @@
 			/* We don't have enough RTT samples to do the Vegas
 			 * calculation, so we'll behave like Reno.
 			 */
-			tcp_reno_cong_avoid(sk, ack, seq_rtt, in_flight, flag);
+			tcp_reno_cong_avoid(sk, ack, in_flight, flag);
 		} else {
 			u32 rtt, target_cwnd, diff;
 
diff --git a/net/ipv4/tcp_veno.c b/net/ipv4/tcp_veno.c
index ec854cc..7a55ddf 100644
--- a/net/ipv4/tcp_veno.c
+++ b/net/ipv4/tcp_veno.c
@@ -115,13 +115,13 @@
 }
 
 static void tcp_veno_cong_avoid(struct sock *sk, u32 ack,
-				u32 seq_rtt, u32 in_flight, int flag)
+				u32 in_flight, int flag)
 {
 	struct tcp_sock *tp = tcp_sk(sk);
 	struct veno *veno = inet_csk_ca(sk);
 
 	if (!veno->doing_veno_now)
-		return tcp_reno_cong_avoid(sk, ack, seq_rtt, in_flight, flag);
+		return tcp_reno_cong_avoid(sk, ack, in_flight, flag);
 
 	/* limited by applications */
 	if (!tcp_is_cwnd_limited(sk, in_flight))
@@ -132,7 +132,7 @@
 		/* We don't have enough rtt samples to do the Veno
 		 * calculation, so we'll behave like Reno.
 		 */
-		tcp_reno_cong_avoid(sk, ack, seq_rtt, in_flight, flag);
+		tcp_reno_cong_avoid(sk, ack, in_flight, flag);
 	} else {
 		u32 rtt, target_cwnd;
 
diff --git a/net/ipv4/tcp_yeah.c b/net/ipv4/tcp_yeah.c
index 545ed23..c04b7c6 100644
--- a/net/ipv4/tcp_yeah.c
+++ b/net/ipv4/tcp_yeah.c
@@ -70,7 +70,7 @@
 }
 
 static void tcp_yeah_cong_avoid(struct sock *sk, u32 ack,
-				u32 seq_rtt, u32 in_flight, int flag)
+				u32 in_flight, int flag)
 {
 	struct tcp_sock *tp = tcp_sk(sk);
 	struct yeah *yeah = inet_csk_ca(sk);
diff --git a/net/irda/af_irda.c b/net/irda/af_irda.c
index dcd7e32..4c670cf 100644
--- a/net/irda/af_irda.c
+++ b/net/irda/af_irda.c
@@ -2567,7 +2567,7 @@
  *    Remove IrDA protocol
  *
  */
-void __exit irsock_cleanup(void)
+void irsock_cleanup(void)
 {
 	sock_unregister(PF_IRDA);
 	proto_unregister(&irda_proto);
diff --git a/net/irda/irda_device.c b/net/irda/irda_device.c
index 7b5def1..435b563 100644
--- a/net/irda/irda_device.c
+++ b/net/irda/irda_device.c
@@ -95,14 +95,14 @@
 	return 0;
 }
 
-static void __exit leftover_dongle(void *arg)
+static void leftover_dongle(void *arg)
 {
 	struct dongle_reg *reg = arg;
 	IRDA_WARNING("IrDA: Dongle type %x not unregistered\n",
 		     reg->type);
 }
 
-void __exit irda_device_cleanup(void)
+void irda_device_cleanup(void)
 {
 	IRDA_DEBUG(4, "%s()\n", __FUNCTION__);
 
diff --git a/net/irda/iriap.c b/net/irda/iriap.c
index 774eb70..ee3889f 100644
--- a/net/irda/iriap.c
+++ b/net/irda/iriap.c
@@ -153,7 +153,7 @@
  *    Initializes the IrIAP layer, called by the module cleanup code in
  *    irmod.c
  */
-void __exit iriap_cleanup(void)
+void iriap_cleanup(void)
 {
 	irlmp_unregister_service(service_handle);
 
diff --git a/net/irda/irias_object.c b/net/irda/irias_object.c
index 4adaae2..cf30245 100644
--- a/net/irda/irias_object.c
+++ b/net/irda/irias_object.c
@@ -36,39 +36,6 @@
  */
 struct ias_value irias_missing = { IAS_MISSING, 0, 0, 0, {0}};
 
-/*
- * Function strndup (str, max)
- *
- *    My own kernel version of strndup!
- *
- * Faster, check boundary... Jean II
- */
-static char *strndup(char *str, size_t max)
-{
-	char *new_str;
-	int len;
-
-	/* Check string */
-	if (str == NULL)
-		return NULL;
-	/* Check length, truncate */
-	len = strlen(str);
-	if(len > max)
-		len = max;
-
-	/* Allocate new string */
-	new_str = kmalloc(len + 1, GFP_ATOMIC);
-	if (new_str == NULL) {
-		IRDA_WARNING("%s: Unable to kmalloc!\n", __FUNCTION__);
-		return NULL;
-	}
-
-	/* Copy and truncate */
-	memcpy(new_str, str, len);
-	new_str[len] = '\0';
-
-	return new_str;
-}
 
 /*
  * Function ias_new_object (name, id)
@@ -90,7 +57,7 @@
 	}
 
 	obj->magic = IAS_OBJECT_MAGIC;
-	obj->name = strndup(name, IAS_MAX_CLASSNAME);
+	obj->name = kstrndup(name, IAS_MAX_CLASSNAME, GFP_ATOMIC);
 	if (!obj->name) {
 		IRDA_WARNING("%s(), Unable to allocate name!\n",
 			     __FUNCTION__);
@@ -360,7 +327,7 @@
 	}
 
 	attrib->magic = IAS_ATTRIB_MAGIC;
-	attrib->name = strndup(name, IAS_MAX_ATTRIBNAME);
+	attrib->name = kstrndup(name, IAS_MAX_ATTRIBNAME, GFP_ATOMIC);
 
 	/* Insert value */
 	attrib->value = irias_new_integer_value(value);
@@ -404,7 +371,7 @@
 	}
 
 	attrib->magic = IAS_ATTRIB_MAGIC;
-	attrib->name = strndup(name, IAS_MAX_ATTRIBNAME);
+	attrib->name = kstrndup(name, IAS_MAX_ATTRIBNAME, GFP_ATOMIC);
 
 	attrib->value = irias_new_octseq_value( octets, len);
 	if (!attrib->name || !attrib->value) {
@@ -446,7 +413,7 @@
 	}
 
 	attrib->magic = IAS_ATTRIB_MAGIC;
-	attrib->name = strndup(name, IAS_MAX_ATTRIBNAME);
+	attrib->name = kstrndup(name, IAS_MAX_ATTRIBNAME, GFP_ATOMIC);
 
 	attrib->value = irias_new_string_value(value);
 	if (!attrib->name || !attrib->value) {
@@ -506,7 +473,7 @@
 
 	value->type = IAS_STRING;
 	value->charset = CS_ASCII;
-	value->t.string = strndup(string, IAS_MAX_STRING);
+	value->t.string = kstrndup(string, IAS_MAX_STRING, GFP_ATOMIC);
 	if (!value->t.string) {
 		IRDA_WARNING("%s: Unable to kmalloc!\n", __FUNCTION__);
 		kfree(value);
diff --git a/net/irda/irlap.c b/net/irda/irlap.c
index 2fc9f51..3d76aaf 100644
--- a/net/irda/irlap.c
+++ b/net/irda/irlap.c
@@ -95,7 +95,7 @@
 	return 0;
 }
 
-void __exit irlap_cleanup(void)
+void irlap_cleanup(void)
 {
 	IRDA_ASSERT(irlap != NULL, return;);
 
diff --git a/net/irda/irlmp.c b/net/irda/irlmp.c
index 24a5e3f..7efa930 100644
--- a/net/irda/irlmp.c
+++ b/net/irda/irlmp.c
@@ -116,7 +116,7 @@
  *    Remove IrLMP layer
  *
  */
-void __exit irlmp_cleanup(void)
+void irlmp_cleanup(void)
 {
 	/* Check for main structure */
 	IRDA_ASSERT(irlmp != NULL, return;);
diff --git a/net/irda/irproc.c b/net/irda/irproc.c
index d6f9aba..181cb51 100644
--- a/net/irda/irproc.c
+++ b/net/irda/irproc.c
@@ -84,7 +84,7 @@
  *    Unregister irda entry in /proc file system
  *
  */
-void __exit irda_proc_unregister(void)
+void irda_proc_unregister(void)
 {
 	int i;
 
diff --git a/net/irda/irsysctl.c b/net/irda/irsysctl.c
index 2e968e7..957e04f 100644
--- a/net/irda/irsysctl.c
+++ b/net/irda/irsysctl.c
@@ -287,7 +287,7 @@
  *    Unregister our sysctl interface
  *
  */
-void __exit irda_sysctl_unregister(void)
+void irda_sysctl_unregister(void)
 {
 	unregister_sysctl_table(irda_table_header);
 }
diff --git a/net/irda/irttp.c b/net/irda/irttp.c
index 7f50832a..3d7ab03 100644
--- a/net/irda/irttp.c
+++ b/net/irda/irttp.c
@@ -109,7 +109,7 @@
  *    Called by module destruction/cleanup code
  *
  */
-void __exit irttp_cleanup(void)
+void irttp_cleanup(void)
 {
 	/* Check for main structure */
 	IRDA_ASSERT(irttp->magic == TTP_MAGIC, return;);
diff --git a/net/mac80211/Makefile b/net/mac80211/Makefile
index e9738da..a9c2d07 100644
--- a/net/mac80211/Makefile
+++ b/net/mac80211/Makefile
@@ -13,6 +13,7 @@
 	ieee80211_iface.o \
 	ieee80211_rate.o \
 	michael.o \
+	regdomain.o \
 	tkip.o \
 	aes_ccm.o \
 	wme.o \
diff --git a/net/mac80211/debugfs_netdev.c b/net/mac80211/debugfs_netdev.c
index a3e01d7..799a920 100644
--- a/net/mac80211/debugfs_netdev.c
+++ b/net/mac80211/debugfs_netdev.c
@@ -397,6 +397,8 @@
 			 void *ndev)
 {
 	struct net_device *dev = ndev;
+	struct dentry *dir;
+	struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
 	char buf[10+IFNAMSIZ];
 
 	if (state != NETDEV_CHANGENAME)
@@ -408,10 +410,11 @@
 	if (dev->ieee80211_ptr->wiphy->privid != mac80211_wiphy_privid)
 		return 0;
 
-	/* TODO
 	sprintf(buf, "netdev:%s", dev->name);
-	debugfs_rename(IEEE80211_DEV_TO_SUB_IF(dev)->debugfsdir, buf);
-	*/
+	dir = sdata->debugfsdir;
+	if (!debugfs_rename(dir->d_parent, dir, dir->d_parent, buf))
+		printk(KERN_ERR "mac80211: debugfs: failed to rename debugfs "
+		       "dir to %s\n", buf);
 
 	return 0;
 }
diff --git a/net/mac80211/ieee80211.c b/net/mac80211/ieee80211.c
index 2ddf4ef..c944b17 100644
--- a/net/mac80211/ieee80211.c
+++ b/net/mac80211/ieee80211.c
@@ -4986,8 +4986,8 @@
 	 * and we need some headroom for passing the frame to monitor
 	 * interfaces, but never both at the same time.
 	 */
-	local->tx_headroom = max(local->hw.extra_tx_headroom,
-				 sizeof(struct ieee80211_tx_status_rtap_hdr));
+	local->tx_headroom = max_t(unsigned int , local->hw.extra_tx_headroom,
+				   sizeof(struct ieee80211_tx_status_rtap_hdr));
 
 	debugfs_hw_add(local);
 
@@ -5095,7 +5095,7 @@
 	}
 
 	if (!(hw->flags & IEEE80211_HW_DEFAULT_REG_DOMAIN_CONFIGURED))
-		ieee80211_init_client(local->mdev);
+		ieee80211_set_default_regdomain(mode);
 
 	return 0;
 }
@@ -5246,6 +5246,7 @@
 	}
 
 	ieee80211_debugfs_netdev_init();
+	ieee80211_regdomain_init();
 
 	return 0;
 }
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index 055a2a9..6f7bae7 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -759,7 +759,6 @@
 /* ieee80211_ioctl.c */
 int ieee80211_set_compression(struct ieee80211_local *local,
 			      struct net_device *dev, struct sta_info *sta);
-int ieee80211_init_client(struct net_device *dev);
 int ieee80211_set_channel(struct ieee80211_local *local, int channel, int freq);
 /* ieee80211_sta.c */
 void ieee80211_sta_timer(unsigned long data);
@@ -798,6 +797,10 @@
 int ieee80211_if_add_mgmt(struct ieee80211_local *local);
 void ieee80211_if_del_mgmt(struct ieee80211_local *local);
 
+/* regdomain.c */
+void ieee80211_regdomain_init(void);
+void ieee80211_set_default_regdomain(struct ieee80211_hw_mode *mode);
+
 /* for wiphy privid */
 extern void *mac80211_wiphy_privid;
 
diff --git a/net/mac80211/ieee80211_ioctl.c b/net/mac80211/ieee80211_ioctl.c
index 5918dd0..d0e1ab5 100644
--- a/net/mac80211/ieee80211_ioctl.c
+++ b/net/mac80211/ieee80211_ioctl.c
@@ -27,20 +27,6 @@
 #include "aes_ccm.h"
 #include "debugfs_key.h"
 
-static int ieee80211_regdom = 0x10; /* FCC */
-module_param(ieee80211_regdom, int, 0444);
-MODULE_PARM_DESC(ieee80211_regdom, "IEEE 802.11 regulatory domain; 64=MKK");
-
-/*
- * If firmware is upgraded by the vendor, additional channels can be used based
- * on the new Japanese regulatory rules. This is indicated by setting
- * ieee80211_japan_5ghz module parameter to one when loading the 80211 kernel
- * module.
- */
-static int ieee80211_japan_5ghz /* = 0 */;
-module_param(ieee80211_japan_5ghz, int, 0444);
-MODULE_PARM_DESC(ieee80211_japan_5ghz, "Vendor-updated firmware for 5 GHz");
-
 static void ieee80211_set_hw_encryption(struct net_device *dev,
 					struct sta_info *sta, u8 addr[ETH_ALEN],
 					struct ieee80211_key *key)
@@ -412,125 +398,6 @@
 }
 
 
-struct ieee80211_channel_range {
-	short start_freq;
-	short end_freq;
-	unsigned char power_level;
-	unsigned char antenna_max;
-};
-
-static const struct ieee80211_channel_range ieee80211_fcc_channels[] = {
-	{ 2412, 2462, 27, 6 } /* IEEE 802.11b/g, channels 1..11 */,
-	{ 5180, 5240, 17, 6 } /* IEEE 802.11a, channels 36..48 */,
-	{ 5260, 5320, 23, 6 } /* IEEE 802.11a, channels 52..64 */,
-	{ 5745, 5825, 30, 6 } /* IEEE 802.11a, channels 149..165, outdoor */,
-	{ 0 }
-};
-
-static const struct ieee80211_channel_range ieee80211_mkk_channels[] = {
-	{ 2412, 2472, 20, 6 } /* IEEE 802.11b/g, channels 1..13 */,
-	{ 5170, 5240, 20, 6 } /* IEEE 802.11a, channels 34..48 */,
-	{ 5260, 5320, 20, 6 } /* IEEE 802.11a, channels 52..64 */,
-	{ 0 }
-};
-
-
-static const struct ieee80211_channel_range *channel_range =
-	ieee80211_fcc_channels;
-
-
-static void ieee80211_unmask_channel(struct net_device *dev, int mode,
-				     struct ieee80211_channel *chan)
-{
-	int i;
-
-	chan->flag = 0;
-
-	if (ieee80211_regdom == 64 &&
-	    (mode == MODE_ATHEROS_TURBO || mode == MODE_ATHEROS_TURBOG)) {
-		/* Do not allow Turbo modes in Japan. */
-		return;
-	}
-
-	for (i = 0; channel_range[i].start_freq; i++) {
-		const struct ieee80211_channel_range *r = &channel_range[i];
-		if (r->start_freq <= chan->freq && r->end_freq >= chan->freq) {
-			if (ieee80211_regdom == 64 && !ieee80211_japan_5ghz &&
-			    chan->freq >= 5260 && chan->freq <= 5320) {
-				/*
-				 * Skip new channels in Japan since the
-				 * firmware was not marked having been upgraded
-				 * by the vendor.
-				 */
-				continue;
-			}
-
-			if (ieee80211_regdom == 0x10 &&
-			    (chan->freq == 5190 || chan->freq == 5210 ||
-			     chan->freq == 5230)) {
-				    /* Skip MKK channels when in FCC domain. */
-				    continue;
-			}
-
-			chan->flag |= IEEE80211_CHAN_W_SCAN |
-				IEEE80211_CHAN_W_ACTIVE_SCAN |
-				IEEE80211_CHAN_W_IBSS;
-			chan->power_level = r->power_level;
-			chan->antenna_max = r->antenna_max;
-
-			if (ieee80211_regdom == 64 &&
-			    (chan->freq == 5170 || chan->freq == 5190 ||
-			     chan->freq == 5210 || chan->freq == 5230)) {
-				/*
-				 * New regulatory rules in Japan have backwards
-				 * compatibility with old channels in 5.15-5.25
-				 * GHz band, but the station is not allowed to
-				 * use active scan on these old channels.
-				 */
-				chan->flag &= ~IEEE80211_CHAN_W_ACTIVE_SCAN;
-			}
-
-			if (ieee80211_regdom == 64 &&
-			    (chan->freq == 5260 || chan->freq == 5280 ||
-			     chan->freq == 5300 || chan->freq == 5320)) {
-				/*
-				 * IBSS is not allowed on 5.25-5.35 GHz band
-				 * due to radar detection requirements.
-				 */
-				chan->flag &= ~IEEE80211_CHAN_W_IBSS;
-			}
-
-			break;
-		}
-	}
-}
-
-
-static int ieee80211_unmask_channels(struct net_device *dev)
-{
-	struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
-	struct ieee80211_hw_mode *mode;
-	int c;
-
-	list_for_each_entry(mode, &local->modes_list, list) {
-		for (c = 0; c < mode->num_channels; c++) {
-			ieee80211_unmask_channel(dev, mode->mode,
-						 &mode->channels[c]);
-		}
-	}
-	return 0;
-}
-
-
-int ieee80211_init_client(struct net_device *dev)
-{
-	if (ieee80211_regdom == 0x40)
-		channel_range = ieee80211_mkk_channels;
-	ieee80211_unmask_channels(dev);
-	return 0;
-}
-
-
 static int ieee80211_ioctl_siwmode(struct net_device *dev,
 				   struct iw_request_info *info,
 				   __u32 *mode, char *extra)
diff --git a/net/mac80211/ieee80211_rate.c b/net/mac80211/ieee80211_rate.c
index 16e8508..2118de0 100644
--- a/net/mac80211/ieee80211_rate.c
+++ b/net/mac80211/ieee80211_rate.c
@@ -24,11 +24,10 @@
 {
 	struct rate_control_alg *alg;
 
-	alg = kmalloc(sizeof(*alg), GFP_KERNEL);
+	alg = kzalloc(sizeof(*alg), GFP_KERNEL);
 	if (alg == NULL) {
 		return -ENOMEM;
 	}
-	memset(alg, 0, sizeof(*alg));
 	alg->ops = ops;
 
 	mutex_lock(&rate_ctrl_mutex);
diff --git a/net/mac80211/ieee80211_sta.c b/net/mac80211/ieee80211_sta.c
index ba2bf8f..7ba352e 100644
--- a/net/mac80211/ieee80211_sta.c
+++ b/net/mac80211/ieee80211_sta.c
@@ -25,7 +25,6 @@
 #include <linux/wireless.h>
 #include <linux/random.h>
 #include <linux/etherdevice.h>
-#include <linux/rtnetlink.h>
 #include <net/iw_handler.h>
 #include <asm/types.h>
 
@@ -1327,10 +1326,9 @@
 	struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
 	struct ieee80211_sta_bss *bss;
 
-	bss = kmalloc(sizeof(*bss), GFP_ATOMIC);
+	bss = kzalloc(sizeof(*bss), GFP_ATOMIC);
 	if (!bss)
 		return NULL;
-	memset(bss, 0, sizeof(*bss));
 	atomic_inc(&bss->users);
 	atomic_inc(&bss->users);
 	memcpy(bss->bssid, bssid, ETH_ALEN);
@@ -2107,12 +2105,9 @@
 	struct ieee80211_sta_bss *bss, *selected = NULL;
 	int top_rssi = 0, freq;
 
-	rtnl_lock();
-
 	if (!ifsta->auto_channel_sel && !ifsta->auto_bssid_sel &&
 	    !ifsta->auto_ssid_sel) {
 		ifsta->state = IEEE80211_AUTHENTICATE;
-		rtnl_unlock();
 		ieee80211_sta_reset_auth(dev, ifsta);
 		return 0;
 	}
@@ -2155,7 +2150,6 @@
 		ieee80211_sta_set_bssid(dev, selected->bssid);
 		ieee80211_rx_bss_put(dev, selected);
 		ifsta->state = IEEE80211_AUTHENTICATE;
-		rtnl_unlock();
 		ieee80211_sta_reset_auth(dev, ifsta);
 		return 0;
 	} else {
@@ -2166,7 +2160,6 @@
 		} else
 			ifsta->state = IEEE80211_DISABLED;
 	}
-	rtnl_unlock();
 	return -1;
 }
 
diff --git a/net/mac80211/regdomain.c b/net/mac80211/regdomain.c
new file mode 100644
index 0000000..b697a2a
--- /dev/null
+++ b/net/mac80211/regdomain.c
@@ -0,0 +1,158 @@
+/*
+ * Copyright 2002-2005, Instant802 Networks, Inc.
+ * Copyright 2005-2006, Devicescape Software, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/*
+ * This regulatory domain control implementation is known to be incomplete
+ * and confusing. mac80211 regulatory domain control will be significantly
+ * reworked in the not-too-distant future.
+ *
+ * For now, drivers wishing to control which channels are and aren't available
+ * are advised as follows:
+ *  - set the IEEE80211_HW_DEFAULT_REG_DOMAIN_CONFIGURED flag
+ *  - continue to include *ALL* possible channels in the modes registered
+ *    through ieee80211_register_hwmode()
+ *  - for each allowable ieee80211_channel structure registered in the above
+ *    call, set the flag member to some meaningful value such as
+ *    IEEE80211_CHAN_W_SCAN | IEEE80211_CHAN_W_ACTIVE_SCAN |
+ *    IEEE80211_CHAN_W_IBSS.
+ *  - leave flag as 0 for non-allowable channels
+ *
+ * The usual implementation is for a driver to read a device EEPROM to
+ * determine which regulatory domain it should be operating under, then
+ * looking up the allowable channels in a driver-local table, then performing
+ * the above.
+ */
+
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <net/mac80211.h>
+#include "ieee80211_i.h"
+
+static int ieee80211_regdom = 0x10; /* FCC */
+module_param(ieee80211_regdom, int, 0444);
+MODULE_PARM_DESC(ieee80211_regdom, "IEEE 802.11 regulatory domain; 64=MKK");
+
+/*
+ * If firmware is upgraded by the vendor, additional channels can be used based
+ * on the new Japanese regulatory rules. This is indicated by setting
+ * ieee80211_japan_5ghz module parameter to one when loading the 80211 kernel
+ * module.
+ */
+static int ieee80211_japan_5ghz /* = 0 */;
+module_param(ieee80211_japan_5ghz, int, 0444);
+MODULE_PARM_DESC(ieee80211_japan_5ghz, "Vendor-updated firmware for 5 GHz");
+
+
+struct ieee80211_channel_range {
+	short start_freq;
+	short end_freq;
+	unsigned char power_level;
+	unsigned char antenna_max;
+};
+
+static const struct ieee80211_channel_range ieee80211_fcc_channels[] = {
+	{ 2412, 2462, 27, 6 } /* IEEE 802.11b/g, channels 1..11 */,
+	{ 5180, 5240, 17, 6 } /* IEEE 802.11a, channels 36..48 */,
+	{ 5260, 5320, 23, 6 } /* IEEE 802.11a, channels 52..64 */,
+	{ 5745, 5825, 30, 6 } /* IEEE 802.11a, channels 149..165, outdoor */,
+	{ 0 }
+};
+
+static const struct ieee80211_channel_range ieee80211_mkk_channels[] = {
+	{ 2412, 2472, 20, 6 } /* IEEE 802.11b/g, channels 1..13 */,
+	{ 5170, 5240, 20, 6 } /* IEEE 802.11a, channels 34..48 */,
+	{ 5260, 5320, 20, 6 } /* IEEE 802.11a, channels 52..64 */,
+	{ 0 }
+};
+
+
+static const struct ieee80211_channel_range *channel_range =
+	ieee80211_fcc_channels;
+
+
+static void ieee80211_unmask_channel(int mode, struct ieee80211_channel *chan)
+{
+	int i;
+
+	chan->flag = 0;
+
+	if (ieee80211_regdom == 64 &&
+	    (mode == MODE_ATHEROS_TURBO || mode == MODE_ATHEROS_TURBOG)) {
+		/* Do not allow Turbo modes in Japan. */
+		return;
+	}
+
+	for (i = 0; channel_range[i].start_freq; i++) {
+		const struct ieee80211_channel_range *r = &channel_range[i];
+		if (r->start_freq <= chan->freq && r->end_freq >= chan->freq) {
+			if (ieee80211_regdom == 64 && !ieee80211_japan_5ghz &&
+			    chan->freq >= 5260 && chan->freq <= 5320) {
+				/*
+				 * Skip new channels in Japan since the
+				 * firmware was not marked having been upgraded
+				 * by the vendor.
+				 */
+				continue;
+			}
+
+			if (ieee80211_regdom == 0x10 &&
+			    (chan->freq == 5190 || chan->freq == 5210 ||
+			     chan->freq == 5230)) {
+				    /* Skip MKK channels when in FCC domain. */
+				    continue;
+			}
+
+			chan->flag |= IEEE80211_CHAN_W_SCAN |
+				IEEE80211_CHAN_W_ACTIVE_SCAN |
+				IEEE80211_CHAN_W_IBSS;
+			chan->power_level = r->power_level;
+			chan->antenna_max = r->antenna_max;
+
+			if (ieee80211_regdom == 64 &&
+			    (chan->freq == 5170 || chan->freq == 5190 ||
+			     chan->freq == 5210 || chan->freq == 5230)) {
+				/*
+				 * New regulatory rules in Japan have backwards
+				 * compatibility with old channels in 5.15-5.25
+				 * GHz band, but the station is not allowed to
+				 * use active scan on these old channels.
+				 */
+				chan->flag &= ~IEEE80211_CHAN_W_ACTIVE_SCAN;
+			}
+
+			if (ieee80211_regdom == 64 &&
+			    (chan->freq == 5260 || chan->freq == 5280 ||
+			     chan->freq == 5300 || chan->freq == 5320)) {
+				/*
+				 * IBSS is not allowed on 5.25-5.35 GHz band
+				 * due to radar detection requirements.
+				 */
+				chan->flag &= ~IEEE80211_CHAN_W_IBSS;
+			}
+
+			break;
+		}
+	}
+}
+
+
+void ieee80211_set_default_regdomain(struct ieee80211_hw_mode *mode)
+{
+	int c;
+	for (c = 0; c < mode->num_channels; c++)
+		ieee80211_unmask_channel(mode->mode, &mode->channels[c]);
+}
+
+
+void ieee80211_regdomain_init(void)
+{
+	if (ieee80211_regdom == 0x40)
+		channel_range = ieee80211_mkk_channels;
+}
+
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
index 3ac39f1..3599770 100644
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -436,6 +436,7 @@
 config NETFILTER_XT_MATCH_CONNLIMIT
 	tristate '"connlimit" match support"'
 	depends on NETFILTER_XTABLES
+	depends on NF_CONNTRACK
 	---help---
 	  This match allows you to match against the number of parallel
 	  connections to a server per client IP address (or address block).
diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c
index ffb6ff8..a4ce5e8 100644
--- a/net/netfilter/nf_conntrack_standalone.c
+++ b/net/netfilter/nf_conntrack_standalone.c
@@ -181,7 +181,7 @@
 
 	if (seq_printf(s, "use=%u\n", atomic_read(&conntrack->ct_general.use)))
 		return -ENOSPC;
-	
+
 	return 0;
 }
 
@@ -198,7 +198,7 @@
 	struct ct_iter_state *st;
 	int ret;
 
-	st = kmalloc(sizeof(struct ct_iter_state), GFP_KERNEL);
+	st = kzalloc(sizeof(struct ct_iter_state), GFP_KERNEL);
 	if (st == NULL)
 		return -ENOMEM;
 	ret = seq_open(file, &ct_seq_ops);
@@ -206,7 +206,6 @@
 		goto out_free;
 	seq          = file->private_data;
 	seq->private = st;
-	memset(st, 0, sizeof(struct ct_iter_state));
 	return ret;
 out_free:
 	kfree(st);
diff --git a/net/netfilter/nf_log.c b/net/netfilter/nf_log.c
index 9498579..d67c4fb 100644
--- a/net/netfilter/nf_log.c
+++ b/net/netfilter/nf_log.c
@@ -9,7 +9,7 @@
 
 #include "nf_internals.h"
 
-/* Internal logging interface, which relies on the real 
+/* Internal logging interface, which relies on the real
    LOG target modules */
 
 #define NF_LOG_PREFIXLEN		128
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index a3c8e69..5681ce3 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -62,6 +62,7 @@
 #include <net/netlink.h>
 
 #define NLGRPSZ(x)	(ALIGN(x, sizeof(unsigned long) * 8) / 8)
+#define NLGRPLONGS(x)	(NLGRPSZ(x)/sizeof(unsigned long))
 
 struct netlink_sock {
 	/* struct sock has to be the first member of netlink_sock */
@@ -314,10 +315,12 @@
 	unsigned long mask;
 	unsigned int i;
 
-	for (i = 0; i < NLGRPSZ(tbl->groups)/sizeof(unsigned long); i++) {
+	for (i = 0; i < NLGRPLONGS(tbl->groups); i++) {
 		mask = 0;
-		sk_for_each_bound(sk, node, &tbl->mc_list)
-			mask |= nlk_sk(sk)->groups[i];
+		sk_for_each_bound(sk, node, &tbl->mc_list) {
+			if (i < NLGRPLONGS(nlk_sk(sk)->ngroups))
+				mask |= nlk_sk(sk)->groups[i];
+		}
 		tbl->listeners[i] = mask;
 	}
 	/* this function is only called with the netlink table "grabbed", which
@@ -555,26 +558,37 @@
 	nlk->subscriptions = subscriptions;
 }
 
-static int netlink_alloc_groups(struct sock *sk)
+static int netlink_realloc_groups(struct sock *sk)
 {
 	struct netlink_sock *nlk = nlk_sk(sk);
 	unsigned int groups;
+	unsigned long *new_groups;
 	int err = 0;
 
-	netlink_lock_table();
+	netlink_table_grab();
+
 	groups = nl_table[sk->sk_protocol].groups;
-	if (!nl_table[sk->sk_protocol].registered)
+	if (!nl_table[sk->sk_protocol].registered) {
 		err = -ENOENT;
-	netlink_unlock_table();
+		goto out_unlock;
+	}
 
-	if (err)
-		return err;
+	if (nlk->ngroups >= groups)
+		goto out_unlock;
 
-	nlk->groups = kzalloc(NLGRPSZ(groups), GFP_KERNEL);
-	if (nlk->groups == NULL)
-		return -ENOMEM;
+	new_groups = krealloc(nlk->groups, NLGRPSZ(groups), GFP_ATOMIC);
+	if (new_groups == NULL) {
+		err = -ENOMEM;
+		goto out_unlock;
+	}
+	memset((char*)new_groups + NLGRPSZ(nlk->ngroups), 0,
+	       NLGRPSZ(groups) - NLGRPSZ(nlk->ngroups));
+
+	nlk->groups = new_groups;
 	nlk->ngroups = groups;
-	return 0;
+ out_unlock:
+	netlink_table_ungrab();
+	return err;
 }
 
 static int netlink_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
@@ -591,11 +605,9 @@
 	if (nladdr->nl_groups) {
 		if (!netlink_capable(sock, NL_NONROOT_RECV))
 			return -EPERM;
-		if (nlk->groups == NULL) {
-			err = netlink_alloc_groups(sk);
-			if (err)
-				return err;
-		}
+		err = netlink_realloc_groups(sk);
+		if (err)
+			return err;
 	}
 
 	if (nlk->pid) {
@@ -839,10 +851,18 @@
 int netlink_has_listeners(struct sock *sk, unsigned int group)
 {
 	int res = 0;
+	unsigned long *listeners;
 
 	BUG_ON(!(nlk_sk(sk)->flags & NETLINK_KERNEL_SOCKET));
+
+	rcu_read_lock();
+	listeners = rcu_dereference(nl_table[sk->sk_protocol].listeners);
+
 	if (group - 1 < nl_table[sk->sk_protocol].groups)
-		res = test_bit(group - 1, nl_table[sk->sk_protocol].listeners);
+		res = test_bit(group - 1, listeners);
+
+	rcu_read_unlock();
+
 	return res;
 }
 EXPORT_SYMBOL_GPL(netlink_has_listeners);
@@ -1007,18 +1027,36 @@
 	read_unlock(&nl_table_lock);
 }
 
+/* must be called with netlink table grabbed */
+static void netlink_update_socket_mc(struct netlink_sock *nlk,
+				     unsigned int group,
+				     int is_new)
+{
+	int old, new = !!is_new, subscriptions;
+
+	old = test_bit(group - 1, nlk->groups);
+	subscriptions = nlk->subscriptions - old + new;
+	if (new)
+		__set_bit(group - 1, nlk->groups);
+	else
+		__clear_bit(group - 1, nlk->groups);
+	netlink_update_subscriptions(&nlk->sk, subscriptions);
+	netlink_update_listeners(&nlk->sk);
+}
+
 static int netlink_setsockopt(struct socket *sock, int level, int optname,
 			      char __user *optval, int optlen)
 {
 	struct sock *sk = sock->sk;
 	struct netlink_sock *nlk = nlk_sk(sk);
-	int val = 0, err;
+	unsigned int val = 0;
+	int err;
 
 	if (level != SOL_NETLINK)
 		return -ENOPROTOOPT;
 
 	if (optlen >= sizeof(int) &&
-	    get_user(val, (int __user *)optval))
+	    get_user(val, (unsigned int __user *)optval))
 		return -EFAULT;
 
 	switch (optname) {
@@ -1031,27 +1069,16 @@
 		break;
 	case NETLINK_ADD_MEMBERSHIP:
 	case NETLINK_DROP_MEMBERSHIP: {
-		unsigned int subscriptions;
-		int old, new = optname == NETLINK_ADD_MEMBERSHIP ? 1 : 0;
-
 		if (!netlink_capable(sock, NL_NONROOT_RECV))
 			return -EPERM;
-		if (nlk->groups == NULL) {
-			err = netlink_alloc_groups(sk);
-			if (err)
-				return err;
-		}
+		err = netlink_realloc_groups(sk);
+		if (err)
+			return err;
 		if (!val || val - 1 >= nlk->ngroups)
 			return -EINVAL;
 		netlink_table_grab();
-		old = test_bit(val - 1, nlk->groups);
-		subscriptions = nlk->subscriptions - old + new;
-		if (new)
-			__set_bit(val - 1, nlk->groups);
-		else
-			__clear_bit(val - 1, nlk->groups);
-		netlink_update_subscriptions(sk, subscriptions);
-		netlink_update_listeners(sk);
+		netlink_update_socket_mc(nlk, val,
+					 optname == NETLINK_ADD_MEMBERSHIP);
 		netlink_table_ungrab();
 		err = 0;
 		break;
@@ -1327,6 +1354,71 @@
 	return NULL;
 }
 
+/**
+ * netlink_change_ngroups - change number of multicast groups
+ *
+ * This changes the number of multicast groups that are available
+ * on a certain netlink family. Note that it is not possible to
+ * change the number of groups to below 32. Also note that it does
+ * not implicitly call netlink_clear_multicast_users() when the
+ * number of groups is reduced.
+ *
+ * @sk: The kernel netlink socket, as returned by netlink_kernel_create().
+ * @groups: The new number of groups.
+ */
+int netlink_change_ngroups(struct sock *sk, unsigned int groups)
+{
+	unsigned long *listeners, *old = NULL;
+	struct netlink_table *tbl = &nl_table[sk->sk_protocol];
+	int err = 0;
+
+	if (groups < 32)
+		groups = 32;
+
+	netlink_table_grab();
+	if (NLGRPSZ(tbl->groups) < NLGRPSZ(groups)) {
+		listeners = kzalloc(NLGRPSZ(groups), GFP_ATOMIC);
+		if (!listeners) {
+			err = -ENOMEM;
+			goto out_ungrab;
+		}
+		old = tbl->listeners;
+		memcpy(listeners, old, NLGRPSZ(tbl->groups));
+		rcu_assign_pointer(tbl->listeners, listeners);
+	}
+	tbl->groups = groups;
+
+ out_ungrab:
+	netlink_table_ungrab();
+	synchronize_rcu();
+	kfree(old);
+	return err;
+}
+EXPORT_SYMBOL(netlink_change_ngroups);
+
+/**
+ * netlink_clear_multicast_users - kick off multicast listeners
+ *
+ * This function removes all listeners from the given group.
+ * @ksk: The kernel netlink socket, as returned by
+ *	netlink_kernel_create().
+ * @group: The multicast group to clear.
+ */
+void netlink_clear_multicast_users(struct sock *ksk, unsigned int group)
+{
+	struct sock *sk;
+	struct hlist_node *node;
+	struct netlink_table *tbl = &nl_table[ksk->sk_protocol];
+
+	netlink_table_grab();
+
+	sk_for_each_bound(sk, node, &tbl->mc_list)
+		netlink_update_socket_mc(nlk_sk(sk), group, 0);
+
+	netlink_table_ungrab();
+}
+EXPORT_SYMBOL(netlink_clear_multicast_users);
+
 void netlink_set_nonroot(int protocol, unsigned int flags)
 {
 	if ((unsigned int)protocol < MAX_LINKS)
diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c
index b9ab62f..e146531 100644
--- a/net/netlink/genetlink.c
+++ b/net/netlink/genetlink.c
@@ -3,6 +3,7 @@
  *
  * 		Authors:	Jamal Hadi Salim
  * 				Thomas Graf <tgraf@suug.ch>
+ *				Johannes Berg <johannes@sipsolutions.net>
  */
 
 #include <linux/module.h>
@@ -13,6 +14,7 @@
 #include <linux/string.h>
 #include <linux/skbuff.h>
 #include <linux/mutex.h>
+#include <linux/bitmap.h>
 #include <net/sock.h>
 #include <net/genetlink.h>
 
@@ -42,6 +44,16 @@
 #define GENL_FAM_TAB_MASK	(GENL_FAM_TAB_SIZE - 1)
 
 static struct list_head family_ht[GENL_FAM_TAB_SIZE];
+/*
+ * Bitmap of multicast groups that are currently in use.
+ *
+ * To avoid an allocation at boot of just one unsigned long,
+ * declare it global instead.
+ * Bit 0 is marked as already used since group 0 is invalid.
+ */
+static unsigned long mc_group_start = 0x1;
+static unsigned long *mc_groups = &mc_group_start;
+static unsigned long mc_groups_longs = 1;
 
 static int genl_ctrl_event(int event, void *data);
 
@@ -116,6 +128,114 @@
 	return id_gen_idx;
 }
 
+static struct genl_multicast_group notify_grp;
+
+/**
+ * genl_register_mc_group - register a multicast group
+ *
+ * Registers the specified multicast group and notifies userspace
+ * about the new group.
+ *
+ * Returns 0 on success or a negative error code.
+ *
+ * @family: The generic netlink family the group shall be registered for.
+ * @grp: The group to register, must have a name.
+ */
+int genl_register_mc_group(struct genl_family *family,
+			   struct genl_multicast_group *grp)
+{
+	int id;
+	unsigned long *new_groups;
+	int err;
+
+	BUG_ON(grp->name[0] == '\0');
+
+	genl_lock();
+
+	/* special-case our own group */
+	if (grp == &notify_grp)
+		id = GENL_ID_CTRL;
+	else
+		id = find_first_zero_bit(mc_groups,
+					 mc_groups_longs * BITS_PER_LONG);
+
+
+	if (id >= mc_groups_longs * BITS_PER_LONG) {
+		size_t nlen = (mc_groups_longs + 1) * sizeof(unsigned long);
+
+		if (mc_groups == &mc_group_start) {
+			new_groups = kzalloc(nlen, GFP_KERNEL);
+			if (!new_groups) {
+				err = -ENOMEM;
+				goto out;
+			}
+			mc_groups = new_groups;
+			*mc_groups = mc_group_start;
+		} else {
+			new_groups = krealloc(mc_groups, nlen, GFP_KERNEL);
+			if (!new_groups) {
+				err = -ENOMEM;
+				goto out;
+			}
+			mc_groups = new_groups;
+			mc_groups[mc_groups_longs] = 0;
+		}
+		mc_groups_longs++;
+	}
+
+	err = netlink_change_ngroups(genl_sock,
+				     sizeof(unsigned long) * NETLINK_GENERIC);
+	if (err)
+		goto out;
+
+	grp->id = id;
+	set_bit(id, mc_groups);
+	list_add_tail(&grp->list, &family->mcast_groups);
+	grp->family = family;
+
+	genl_ctrl_event(CTRL_CMD_NEWMCAST_GRP, grp);
+ out:
+	genl_unlock();
+	return 0;
+}
+EXPORT_SYMBOL(genl_register_mc_group);
+
+/**
+ * genl_unregister_mc_group - unregister a multicast group
+ *
+ * Unregisters the specified multicast group and notifies userspace
+ * about it. All current listeners on the group are removed.
+ *
+ * Note: It is not necessary to unregister all multicast groups before
+ *       unregistering the family, unregistering the family will cause
+ *       all assigned multicast groups to be unregistered automatically.
+ *
+ * @family: Generic netlink family the group belongs to.
+ * @grp: The group to unregister, must have been registered successfully
+ *	 previously.
+ */
+void genl_unregister_mc_group(struct genl_family *family,
+			      struct genl_multicast_group *grp)
+{
+	BUG_ON(grp->family != family);
+	genl_lock();
+	netlink_clear_multicast_users(genl_sock, grp->id);
+	clear_bit(grp->id, mc_groups);
+	list_del(&grp->list);
+	genl_ctrl_event(CTRL_CMD_DELMCAST_GRP, grp);
+	grp->id = 0;
+	grp->family = NULL;
+	genl_unlock();
+}
+
+static void genl_unregister_mc_groups(struct genl_family *family)
+{
+	struct genl_multicast_group *grp, *tmp;
+
+	list_for_each_entry_safe(grp, tmp, &family->mcast_groups, list)
+		genl_unregister_mc_group(family, grp);
+}
+
 /**
  * genl_register_ops - register generic netlink operations
  * @family: generic netlink family
@@ -216,6 +336,7 @@
 		goto errout;
 
 	INIT_LIST_HEAD(&family->ops_list);
+	INIT_LIST_HEAD(&family->mcast_groups);
 
 	genl_lock();
 
@@ -275,6 +396,8 @@
 {
 	struct genl_family *rc;
 
+	genl_unregister_mc_groups(family);
+
 	genl_lock();
 
 	list_for_each_entry(rc, genl_family_chain(family->id), family_list) {
@@ -410,6 +533,67 @@
 		nla_nest_end(skb, nla_ops);
 	}
 
+	if (!list_empty(&family->mcast_groups)) {
+		struct genl_multicast_group *grp;
+		struct nlattr *nla_grps;
+		int idx = 1;
+
+		nla_grps = nla_nest_start(skb, CTRL_ATTR_MCAST_GROUPS);
+		if (nla_grps == NULL)
+			goto nla_put_failure;
+
+		list_for_each_entry(grp, &family->mcast_groups, list) {
+			struct nlattr *nest;
+
+			nest = nla_nest_start(skb, idx++);
+			if (nest == NULL)
+				goto nla_put_failure;
+
+			NLA_PUT_U32(skb, CTRL_ATTR_MCAST_GRP_ID, grp->id);
+			NLA_PUT_STRING(skb, CTRL_ATTR_MCAST_GRP_NAME,
+				       grp->name);
+
+			nla_nest_end(skb, nest);
+		}
+		nla_nest_end(skb, nla_grps);
+	}
+
+	return genlmsg_end(skb, hdr);
+
+nla_put_failure:
+	return genlmsg_cancel(skb, hdr);
+}
+
+static int ctrl_fill_mcgrp_info(struct genl_multicast_group *grp, u32 pid,
+				u32 seq, u32 flags, struct sk_buff *skb,
+				u8 cmd)
+{
+	void *hdr;
+	struct nlattr *nla_grps;
+	struct nlattr *nest;
+
+	hdr = genlmsg_put(skb, pid, seq, &genl_ctrl, flags, cmd);
+	if (hdr == NULL)
+		return -1;
+
+	NLA_PUT_STRING(skb, CTRL_ATTR_FAMILY_NAME, grp->family->name);
+	NLA_PUT_U16(skb, CTRL_ATTR_FAMILY_ID, grp->family->id);
+
+	nla_grps = nla_nest_start(skb, CTRL_ATTR_MCAST_GROUPS);
+	if (nla_grps == NULL)
+		goto nla_put_failure;
+
+	nest = nla_nest_start(skb, 1);
+	if (nest == NULL)
+		goto nla_put_failure;
+
+	NLA_PUT_U32(skb, CTRL_ATTR_MCAST_GRP_ID, grp->id);
+	NLA_PUT_STRING(skb, CTRL_ATTR_MCAST_GRP_NAME,
+		       grp->name);
+
+	nla_nest_end(skb, nest);
+	nla_nest_end(skb, nla_grps);
+
 	return genlmsg_end(skb, hdr);
 
 nla_put_failure:
@@ -453,8 +637,8 @@
 	return skb->len;
 }
 
-static struct sk_buff *ctrl_build_msg(struct genl_family *family, u32 pid,
-				      int seq, u8 cmd)
+static struct sk_buff *ctrl_build_family_msg(struct genl_family *family,
+					     u32 pid, int seq, u8 cmd)
 {
 	struct sk_buff *skb;
 	int err;
@@ -472,6 +656,25 @@
 	return skb;
 }
 
+static struct sk_buff *ctrl_build_mcgrp_msg(struct genl_multicast_group *grp,
+					    u32 pid, int seq, u8 cmd)
+{
+	struct sk_buff *skb;
+	int err;
+
+	skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+	if (skb == NULL)
+		return ERR_PTR(-ENOBUFS);
+
+	err = ctrl_fill_mcgrp_info(grp, pid, seq, 0, skb, cmd);
+	if (err < 0) {
+		nlmsg_free(skb);
+		return ERR_PTR(err);
+	}
+
+	return skb;
+}
+
 static const struct nla_policy ctrl_policy[CTRL_ATTR_MAX+1] = {
 	[CTRL_ATTR_FAMILY_ID]	= { .type = NLA_U16 },
 	[CTRL_ATTR_FAMILY_NAME]	= { .type = NLA_NUL_STRING,
@@ -501,8 +704,8 @@
 		goto errout;
 	}
 
-	msg = ctrl_build_msg(res, info->snd_pid, info->snd_seq,
-			     CTRL_CMD_NEWFAMILY);
+	msg = ctrl_build_family_msg(res, info->snd_pid, info->snd_seq,
+				    CTRL_CMD_NEWFAMILY);
 	if (IS_ERR(msg)) {
 		err = PTR_ERR(msg);
 		goto errout;
@@ -523,7 +726,15 @@
 	switch (event) {
 	case CTRL_CMD_NEWFAMILY:
 	case CTRL_CMD_DELFAMILY:
-		msg = ctrl_build_msg(data, 0, 0, event);
+		msg = ctrl_build_family_msg(data, 0, 0, event);
+		if (IS_ERR(msg))
+			return PTR_ERR(msg);
+
+		genlmsg_multicast(msg, 0, GENL_ID_CTRL, GFP_KERNEL);
+		break;
+	case CTRL_CMD_NEWMCAST_GRP:
+	case CTRL_CMD_DELMCAST_GRP:
+		msg = ctrl_build_mcgrp_msg(data, 0, 0, event);
 		if (IS_ERR(msg))
 			return PTR_ERR(msg);
 
@@ -541,6 +752,10 @@
 	.policy		= ctrl_policy,
 };
 
+static struct genl_multicast_group notify_grp = {
+	.name		= "notify",
+};
+
 static int __init genl_init(void)
 {
 	int i, err;
@@ -557,11 +772,17 @@
 		goto errout_register;
 
 	netlink_set_nonroot(NETLINK_GENERIC, NL_NONROOT_RECV);
-	genl_sock = netlink_kernel_create(NETLINK_GENERIC, GENL_MAX_ID,
-					  genl_rcv, NULL, THIS_MODULE);
+
+	/* we'll bump the group number right afterwards */
+	genl_sock = netlink_kernel_create(NETLINK_GENERIC, 0, genl_rcv,
+					  NULL, THIS_MODULE);
 	if (genl_sock == NULL)
 		panic("GENL: Cannot initialize generic netlink\n");
 
+	err = genl_register_mc_group(&genl_ctrl, &notify_grp);
+	if (err < 0)
+		goto errout_register;
+
 	return 0;
 
 errout_register:
diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
index 5d66490..dc927329 100644
--- a/net/netrom/af_netrom.c
+++ b/net/netrom/af_netrom.c
@@ -720,7 +720,7 @@
 
 		for (;;) {
 			prepare_to_wait(sk->sk_sleep, &wait,
-			                TASK_INTERRUPTIBLE);
+					TASK_INTERRUPTIBLE);
 			if (sk->sk_state != TCP_SYN_SENT)
 				break;
 			if (!signal_pending(current)) {
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 7c27bd3..1322d62 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -108,7 +108,7 @@
 Incoming, dev->hard_header==NULL
    mac_header -> UNKNOWN position. It is very likely, that it points to ll
 		 header.  PPP makes it, that is wrong, because introduce
-                 assymetry between rx and tx paths.
+		 assymetry between rx and tx paths.
    data       -> data
 
 Outgoing, dev->hard_header==NULL
diff --git a/net/rfkill/rfkill-input.c b/net/rfkill/rfkill-input.c
index 230e35c..9f746be 100644
--- a/net/rfkill/rfkill-input.c
+++ b/net/rfkill/rfkill-input.c
@@ -83,7 +83,7 @@
 static DEFINE_RFKILL_TASK(rfkill_bt, RFKILL_TYPE_BLUETOOTH);
 
 static void rfkill_event(struct input_handle *handle, unsigned int type,
-		        unsigned int code, int down)
+			unsigned int code, int down)
 {
 	if (type == EV_KEY && down == 1) {
 		switch (code) {
diff --git a/net/rfkill/rfkill.c b/net/rfkill/rfkill.c
index f3986d4..db3395b 100644
--- a/net/rfkill/rfkill.c
+++ b/net/rfkill/rfkill.c
@@ -187,7 +187,7 @@
 static struct device_attribute rfkill_dev_attrs[] = {
 	__ATTR(name, S_IRUGO, rfkill_name_show, NULL),
 	__ATTR(type, S_IRUGO, rfkill_type_show, NULL),
-	__ATTR(state, S_IRUGO, rfkill_state_show, rfkill_state_store),
+	__ATTR(state, S_IRUGO|S_IWUSR, rfkill_state_show, rfkill_state_store),
 	__ATTR(claim, S_IRUGO|S_IWUSR, rfkill_claim_show, rfkill_claim_store),
 	__ATTR_NULL
 };
diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c
index f4d3aba..976c3cc 100644
--- a/net/rose/af_rose.c
+++ b/net/rose/af_rose.c
@@ -816,7 +816,7 @@
 
 		for (;;) {
 			prepare_to_wait(sk->sk_sleep, &wait,
-			                TASK_INTERRUPTIBLE);
+					TASK_INTERRUPTIBLE);
 			if (sk->sk_state != TCP_SYN_SENT)
 				break;
 			if (!signal_pending(current)) {
diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c
index 2c57df9..46f6d57 100644
--- a/net/rxrpc/af_rxrpc.c
+++ b/net/rxrpc/af_rxrpc.c
@@ -805,26 +805,26 @@
 	}
 
 	ret = proto_register(&rxrpc_proto, 1);
-        if (ret < 0) {
-                printk(KERN_CRIT "RxRPC: Cannot register protocol\n");
+	if (ret < 0) {
+		printk(KERN_CRIT "RxRPC: Cannot register protocol\n");
 		goto error_proto;
 	}
 
 	ret = sock_register(&rxrpc_family_ops);
 	if (ret < 0) {
-                printk(KERN_CRIT "RxRPC: Cannot register socket family\n");
+		printk(KERN_CRIT "RxRPC: Cannot register socket family\n");
 		goto error_sock;
 	}
 
 	ret = register_key_type(&key_type_rxrpc);
 	if (ret < 0) {
-                printk(KERN_CRIT "RxRPC: Cannot register client key type\n");
+		printk(KERN_CRIT "RxRPC: Cannot register client key type\n");
 		goto error_key_type;
 	}
 
 	ret = register_key_type(&key_type_rxrpc_s);
 	if (ret < 0) {
-                printk(KERN_CRIT "RxRPC: Cannot register server key type\n");
+		printk(KERN_CRIT "RxRPC: Cannot register server key type\n");
 		goto error_key_type_s;
 	}
 
diff --git a/net/sched/Kconfig b/net/sched/Kconfig
index d3f7c3f..8a74cac 100644
--- a/net/sched/Kconfig
+++ b/net/sched/Kconfig
@@ -97,7 +97,7 @@
 	  select classes of this queuing discipline.  Each class maps
 	  the flow(s) it is handling to a given virtual circuit.
 
-	  See the top of <file:net/sched/sch_atm.c>) for more details.
+	  See the top of <file:net/sched/sch_atm.c> for more details.
 
 	  To compile this code as a module, choose M here: the
 	  module will be called sch_atm.
@@ -137,7 +137,7 @@
 	tristate "Stochastic Fairness Queueing (SFQ)"
 	---help---
 	  Say Y here if you want to use the Stochastic Fairness Queueing (SFQ)
-	  packet scheduling algorithm .
+	  packet scheduling algorithm.
 
 	  See the top of <file:net/sched/sch_sfq.c> for more details.
 
@@ -306,7 +306,7 @@
 	  is important for real time data such as streaming sound or video.
 
 	  Say Y here if you want to be able to classify outgoing packets based
-	  on their RSVP requests and you are using the IPv6.
+	  on their RSVP requests and you are using the IPv6 protocol.
 
 	  To compile this code as a module, choose M here: the
 	  module will be called cls_rsvp6.
diff --git a/net/sched/sch_atm.c b/net/sched/sch_atm.c
index 417ec8f..ddc4f2c 100644
--- a/net/sched/sch_atm.c
+++ b/net/sched/sch_atm.c
@@ -292,13 +292,12 @@
 		}
 	}
 	DPRINTK("atm_tc_change: new id %x\n", classid);
-	flow = kmalloc(sizeof(struct atm_flow_data) + hdr_len, GFP_KERNEL);
+	flow = kzalloc(sizeof(struct atm_flow_data) + hdr_len, GFP_KERNEL);
 	DPRINTK("atm_tc_change: flow %p\n", flow);
 	if (!flow) {
 		error = -ENOBUFS;
 		goto err_out;
 	}
-	memset(flow, 0, sizeof(*flow));
 	flow->filter_list = NULL;
 	if (!(flow->q = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops, classid)))
 		flow->q = &noop_qdisc;
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index f02ce3d..fd2dfdd 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -1779,7 +1779,7 @@
 					     SCTP_COMM_UP, 0,
 					     asoc->c.sinit_num_ostreams,
 					     asoc->c.sinit_max_instreams,
-                                             NULL, GFP_ATOMIC);
+					     NULL, GFP_ATOMIC);
 		if (!ev)
 			goto nomem;
 
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index b1917f6..ee88f2e 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -4803,7 +4803,7 @@
 						  char __user *optval,
 						  int __user *optlen)
 {
-        u32 val;
+	u32 val;
 
 	if (len < sizeof(u32))
 		return -EINVAL;
@@ -4827,7 +4827,7 @@
 				    char __user *optval,
 				    int __user *optlen)
 {
-        int val;
+	int val;
 
 	if (len < sizeof(int))
 		return -EINVAL;
diff --git a/net/sunrpc/auth.c b/net/sunrpc/auth.c
index aa55d0a..29a8ecc 100644
--- a/net/sunrpc/auth.c
+++ b/net/sunrpc/auth.c
@@ -543,17 +543,18 @@
 		test_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags) != 0;
 }
 
-
-static struct shrinker *rpc_cred_shrinker;
+static struct shrinker rpc_cred_shrinker = {
+	.shrink = rpcauth_cache_shrinker,
+	.seeks = DEFAULT_SEEKS,
+};
 
 void __init rpcauth_init_module(void)
 {
 	rpc_init_authunix();
-	rpc_cred_shrinker = set_shrinker(DEFAULT_SEEKS, rpcauth_cache_shrinker);
+	register_shrinker(&rpc_cred_shrinker);
 }
 
 void __exit rpcauth_remove_module(void)
 {
-	if (rpc_cred_shrinker != NULL)
-		remove_shrinker(rpc_cred_shrinker);
+	unregister_shrinker(&rpc_cred_shrinker);
 }
diff --git a/net/sunrpc/auth_gss/gss_krb5_crypto.c b/net/sunrpc/auth_gss/gss_krb5_crypto.c
index f441aa0..bfb6a29 100644
--- a/net/sunrpc/auth_gss/gss_krb5_crypto.c
+++ b/net/sunrpc/auth_gss/gss_krb5_crypto.c
@@ -67,7 +67,7 @@
 
 	if (crypto_blkcipher_ivsize(tfm) > 16) {
 		dprintk("RPC:       gss_k5encrypt: tfm iv size to large %d\n",
-		         crypto_blkcipher_ivsize(tfm));
+			 crypto_blkcipher_ivsize(tfm));
 		goto out;
 	}
 
diff --git a/net/sunrpc/auth_gss/gss_krb5_mech.c b/net/sunrpc/auth_gss/gss_krb5_mech.c
index 71b9dae..9843eac 100644
--- a/net/sunrpc/auth_gss/gss_krb5_mech.c
+++ b/net/sunrpc/auth_gss/gss_krb5_mech.c
@@ -231,6 +231,7 @@
 static struct gss_api_mech gss_kerberos_mech = {
 	.gm_name	= "krb5",
 	.gm_owner	= THIS_MODULE,
+	.gm_oid		= {9, (void *)"\x2a\x86\x48\x86\xf7\x12\x01\x02\x02"},
 	.gm_ops		= &gss_kerberos_ops,
 	.gm_pf_num	= ARRAY_SIZE(gss_kerberos_pfs),
 	.gm_pfs		= gss_kerberos_pfs,
diff --git a/net/sunrpc/auth_gss/gss_mech_switch.c b/net/sunrpc/auth_gss/gss_mech_switch.c
index 2687251..61801a0 100644
--- a/net/sunrpc/auth_gss/gss_mech_switch.c
+++ b/net/sunrpc/auth_gss/gss_mech_switch.c
@@ -194,6 +194,20 @@
 EXPORT_SYMBOL(gss_mech_get_by_pseudoflavor);
 
 u32
+gss_svc_to_pseudoflavor(struct gss_api_mech *gm, u32 service)
+{
+	int i;
+
+	for (i = 0; i < gm->gm_pf_num; i++) {
+		if (gm->gm_pfs[i].service == service) {
+			return gm->gm_pfs[i].pseudoflavor;
+		}
+	}
+	return RPC_AUTH_MAXFLAVOR; /* illegal value */
+}
+EXPORT_SYMBOL(gss_svc_to_pseudoflavor);
+
+u32
 gss_pseudoflavor_to_service(struct gss_api_mech *gm, u32 pseudoflavor)
 {
 	int i;
diff --git a/net/sunrpc/auth_gss/gss_spkm3_mech.c b/net/sunrpc/auth_gss/gss_spkm3_mech.c
index 577d590..5deb4b6 100644
--- a/net/sunrpc/auth_gss/gss_spkm3_mech.c
+++ b/net/sunrpc/auth_gss/gss_spkm3_mech.c
@@ -217,6 +217,7 @@
 static struct gss_api_mech gss_spkm3_mech = {
 	.gm_name	= "spkm3",
 	.gm_owner	= THIS_MODULE,
+	.gm_oid		= {7, "\053\006\001\005\005\001\003"},
 	.gm_ops		= &gss_spkm3_ops,
 	.gm_pf_num	= ARRAY_SIZE(gss_spkm3_pfs),
 	.gm_pfs		= gss_spkm3_pfs,
diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
index c094583..4906975 100644
--- a/net/sunrpc/auth_gss/svcauth_gss.c
+++ b/net/sunrpc/auth_gss/svcauth_gss.c
@@ -743,6 +743,15 @@
 
 static struct auth_ops svcauthops_gss;
 
+u32 svcauth_gss_flavor(struct auth_domain *dom)
+{
+	struct gss_domain *gd = container_of(dom, struct gss_domain, h);
+
+	return gd->pseudoflavor;
+}
+
+EXPORT_SYMBOL(svcauth_gss_flavor);
+
 int
 svcauth_gss_register_pseudoflavor(u32 pseudoflavor, char * name)
 {
@@ -913,10 +922,23 @@
 	struct gss_svc_data *svcdata = rqstp->rq_auth_data;
 	struct rsc *rsci = svcdata->rsci;
 	struct rpc_gss_wire_cred *gc = &svcdata->clcred;
+	int stat;
 
-	rqstp->rq_client = find_gss_auth_domain(rsci->mechctx, gc->gc_svc);
-	if (rqstp->rq_client == NULL)
+	/*
+	 * A gss export can be specified either by:
+	 * 	export	*(sec=krb5,rw)
+	 * or by
+	 * 	export gss/krb5(rw)
+	 * The latter is deprecated; but for backwards compatibility reasons
+	 * the nfsd code will still fall back on trying it if the former
+	 * doesn't work; so we try to make both available to nfsd, below.
+	 */
+	rqstp->rq_gssclient = find_gss_auth_domain(rsci->mechctx, gc->gc_svc);
+	if (rqstp->rq_gssclient == NULL)
 		return SVC_DENIED;
+	stat = svcauth_unix_set_client(rqstp);
+	if (stat == SVC_DROP)
+		return stat;
 	return SVC_OK;
 }
 
@@ -1088,7 +1110,6 @@
 			svc_putnl(resv, GSS_SEQ_WIN);
 			if (svc_safe_putnetobj(resv, &rsip->out_token))
 				goto drop;
-			rqstp->rq_client = NULL;
 		}
 		goto complete;
 	case RPC_GSS_PROC_DESTROY:
@@ -1131,6 +1152,8 @@
 		}
 		svcdata->rsci = rsci;
 		cache_get(&rsci->h);
+		rqstp->rq_flavor = gss_svc_to_pseudoflavor(
+					rsci->mechctx->mech_type, gc->gc_svc);
 		ret = SVC_OK;
 		goto out;
 	}
@@ -1317,6 +1340,9 @@
 	if (rqstp->rq_client)
 		auth_domain_put(rqstp->rq_client);
 	rqstp->rq_client = NULL;
+	if (rqstp->rq_gssclient)
+		auth_domain_put(rqstp->rq_gssclient);
+	rqstp->rq_gssclient = NULL;
 	if (rqstp->rq_cred.cr_group_info)
 		put_group_info(rqstp->rq_cred.cr_group_info);
 	rqstp->rq_cred.cr_group_info = NULL;
diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c
index e787b6a..5b2b6fb 100644
--- a/net/sunrpc/rpc_pipe.c
+++ b/net/sunrpc/rpc_pipe.c
@@ -460,21 +460,19 @@
 static int
 rpc_lookup_parent(char *path, struct nameidata *nd)
 {
+	struct vfsmount *mnt;
+
 	if (path[0] == '\0')
 		return -ENOENT;
-	nd->mnt = rpc_get_mount();
-	if (IS_ERR(nd->mnt)) {
+
+	mnt = rpc_get_mount();
+	if (IS_ERR(mnt)) {
 		printk(KERN_WARNING "%s: %s failed to mount "
 			       "pseudofilesystem \n", __FILE__, __FUNCTION__);
-		return PTR_ERR(nd->mnt);
+		return PTR_ERR(mnt);
 	}
-	mntget(nd->mnt);
-	nd->dentry = dget(rpc_mount->mnt_root);
-	nd->last_type = LAST_ROOT;
-	nd->flags = LOOKUP_PARENT;
-	nd->depth = 0;
 
-	if (path_walk(path, nd)) {
+	if (vfs_path_lookup(mnt->mnt_root, mnt, path, LOOKUP_PARENT, nd)) {
 		printk(KERN_WARNING "%s: %s failed to find path %s\n",
 				__FILE__, __FUNCTION__, path);
 		rpc_put_mount();
diff --git a/net/sunrpc/svcauth_unix.c b/net/sunrpc/svcauth_unix.c
index 07dcd20..41147941 100644
--- a/net/sunrpc/svcauth_unix.c
+++ b/net/sunrpc/svcauth_unix.c
@@ -5,6 +5,7 @@
 #include <linux/sunrpc/xdr.h>
 #include <linux/sunrpc/svcsock.h>
 #include <linux/sunrpc/svcauth.h>
+#include <linux/sunrpc/gss_api.h>
 #include <linux/err.h>
 #include <linux/seq_file.h>
 #include <linux/hash.h>
@@ -637,7 +638,7 @@
 	}
 }
 
-static int
+int
 svcauth_unix_set_client(struct svc_rqst *rqstp)
 {
 	struct sockaddr_in *sin = svc_addr_in(rqstp);
@@ -672,6 +673,8 @@
 	return SVC_OK;
 }
 
+EXPORT_SYMBOL(svcauth_unix_set_client);
+
 static int
 svcauth_null_accept(struct svc_rqst *rqstp, __be32 *authp)
 {
@@ -707,6 +710,7 @@
 	svc_putnl(resv, RPC_AUTH_NULL);
 	svc_putnl(resv, 0);
 
+	rqstp->rq_flavor = RPC_AUTH_NULL;
 	return SVC_OK;
 }
 
@@ -784,6 +788,7 @@
 	svc_putnl(resv, RPC_AUTH_NULL);
 	svc_putnl(resv, 0);
 
+	rqstp->rq_flavor = RPC_AUTH_UNIX;
 	return SVC_OK;
 
 badcred:
diff --git a/net/tipc/name_table.c b/net/tipc/name_table.c
index 9dfc912..d8473ee 100644
--- a/net/tipc/name_table.c
+++ b/net/tipc/name_table.c
@@ -1052,12 +1052,11 @@
 {
 	int array_size = sizeof(struct hlist_head) * tipc_nametbl_size;
 
-	table.types = kmalloc(array_size, GFP_ATOMIC);
+	table.types = kzalloc(array_size, GFP_ATOMIC);
 	if (!table.types)
 		return -ENOMEM;
 
 	write_lock_bh(&tipc_nametbl_lock);
-	memset(table.types, 0, array_size);
 	table.local_publ_count = 0;
 	write_unlock_bh(&tipc_nametbl_lock);
 	return 0;
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index 4a8f37f..8411017 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -1629,8 +1629,8 @@
 	.getsockopt	= getsockopt,
 	.sendmsg	= send_msg,
 	.recvmsg	= recv_msg,
-        .mmap		= sock_no_mmap,
-        .sendpage	= sock_no_sendpage
+	.mmap		= sock_no_mmap,
+	.sendpage	= sock_no_sendpage
 };
 
 static struct proto_ops packet_ops = {
@@ -1650,8 +1650,8 @@
 	.getsockopt	= getsockopt,
 	.sendmsg	= send_packet,
 	.recvmsg	= recv_msg,
-        .mmap		= sock_no_mmap,
-        .sendpage	= sock_no_sendpage
+	.mmap		= sock_no_mmap,
+	.sendpage	= sock_no_sendpage
 };
 
 static struct proto_ops stream_ops = {
@@ -1671,8 +1671,8 @@
 	.getsockopt	= getsockopt,
 	.sendmsg	= send_stream,
 	.recvmsg	= recv_stream,
-        .mmap		= sock_no_mmap,
-        .sendpage	= sock_no_sendpage
+	.mmap		= sock_no_mmap,
+	.sendpage	= sock_no_sendpage
 };
 
 static struct net_proto_family tipc_family_ops = {
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index 157bfbd..cfaf17c 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -857,7 +857,7 @@
 					       pol, NULL);
 				return err;
 			}
-                }
+		}
 		for (i = xfrm_policy_bydst[dir].hmask; i >= 0; i--) {
 			hlist_for_each_entry(pol, entry,
 					     xfrm_policy_bydst[dir].table + i,
@@ -2141,7 +2141,7 @@
 		if (last == first)
 			break;
 
-		last = last->u.next;
+		last = (struct xfrm_dst *)last->u.dst.next;
 		last->child_mtu_cached = mtu;
 	}
 
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index e070c3f..38f90ca 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -407,7 +407,7 @@
 				xfrm_audit_log(audit_info->loginuid,
 					       audit_info->secid,
 					       AUDIT_MAC_IPSEC_DELSA,
-                                               0, NULL, x);
+					       0, NULL, x);
 
 				return err;
 			}
diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl
index 25e20a2..73751ab 100755
--- a/scripts/checkpatch.pl
+++ b/scripts/checkpatch.pl
@@ -9,7 +9,7 @@
 my $P = $0;
 $P =~ s@.*/@@g;
 
-my $V = '0.07';
+my $V = '0.08';
 
 use Getopt::Long qw(:config no_auto_abbrev);
 
@@ -47,16 +47,14 @@
 if ($tree && -f $removal) {
 	open(REMOVE, "<$removal") || die "$P: $removal: open failed - $!\n";
 	while (<REMOVE>) {
-		if (/^Files:\s+(.*\S)/) {
-			for my $file (split(/[, ]+/, $1)) {
-				if ($file =~ m@include/(.*)@) {
+		if (/^Check:\s+(.*\S)/) {
+			for my $entry (split(/[, ]+/, $1)) {
+				if ($entry =~ m@include/(.*)@) {
 					push(@dep_includes, $1);
-				}
-			}
 
-		} elsif (/^Funcs:\s+(.*\S)/) {
-			for my $func (split(/[, ]+/, $1)) {
-				push(@dep_functions, $func);
+				} elsif ($entry !~ m@/@) {
+					push(@dep_functions, $entry);
+				}
 			}
 		}
 	}
@@ -153,7 +151,7 @@
 }
 
 sub ctx_block_get {
-	my ($linenr, $remain, $outer, $open, $close) = @_;
+	my ($linenr, $remain, $outer, $open, $close, $off) = @_;
 	my $line;
 	my $start = $linenr - 1;
 	my $blk = '';
@@ -161,38 +159,58 @@
 	my @c;
 	my @res = ();
 
+	my $level = 0;
 	for ($line = $start; $remain > 0; $line++) {
 		next if ($rawlines[$line] =~ /^-/);
 		$remain--;
 
 		$blk .= $rawlines[$line];
+		foreach my $c (split(//, $rawlines[$line])) {
+			##print "C<$c>L<$level><$open$close>O<$off>\n";
+			if ($off > 0) {
+				$off--;
+				next;
+			}
 
-		@o = ($blk =~ /$open/g);
-		@c = ($blk =~ /$close/g);
+			if ($c eq $close && $level > 0) {
+				$level--;
+				last if ($level == 0);
+			} elsif ($c eq $open) {
+				$level++;
+			}
+		}
 
-		if (!$outer || (scalar(@o) - scalar(@c)) == 1) {
+		if (!$outer || $level <= 1) {
 			push(@res, $rawlines[$line]);
 		}
 
-		last if (scalar(@o) == scalar(@c));
+		last if ($level == 0);
 	}
 
-	return @res;
+	return ($level, @res);
 }
 sub ctx_block_outer {
 	my ($linenr, $remain) = @_;
 
-	return ctx_block_get($linenr, $remain, 1, '\{', '\}');
+	my ($level, @r) = ctx_block_get($linenr, $remain, 1, '{', '}', 0);
+	return @r;
 }
 sub ctx_block {
 	my ($linenr, $remain) = @_;
 
-	return ctx_block_get($linenr, $remain, 0, '\{', '\}');
+	my ($level, @r) = ctx_block_get($linenr, $remain, 0, '{', '}', 0);
+	return @r;
 }
 sub ctx_statement {
+	my ($linenr, $remain, $off) = @_;
+
+	my ($level, @r) = ctx_block_get($linenr, $remain, 0, '(', ')', $off);
+	return @r;
+}
+sub ctx_block_level {
 	my ($linenr, $remain) = @_;
 
-	return ctx_block_get($linenr, $remain, 0, '\(', '\)');
+	return ctx_block_get($linenr, $remain, 0, '{', '}', 0);
 }
 
 sub ctx_locate_comment {
@@ -246,16 +264,23 @@
 	return $vet;
 }
 
+my @report = ();
+sub report {
+	push(@report, $_[0]);
+}
+sub report_dump {
+	@report;
+}
 sub ERROR {
-	print "ERROR: $_[0]\n";
+	report("ERROR: $_[0]\n");
 	our $clean = 0;
 }
 sub WARN {
-	print "WARNING: $_[0]\n";
+	report("WARNING: $_[0]\n");
 	our $clean = 0;
 }
 sub CHK {
-	print "CHECK: $_[0]\n";
+	report("CHECK: $_[0]\n");
 	our $clean = 0;
 }
 
@@ -318,7 +343,10 @@
 				(?:\s*\*+\s*const|\s*\*+)?
 			  }x;
 	my $Declare	= qr{(?:$Storage\s+)?$Type};
-	my $Attribute	= qr{__read_mostly|__init|__initdata};
+	my $Attribute	= qr{const|__read_mostly|__init|__initdata|__meminit};
+
+	my $Member	= qr{->$Ident|\.$Ident|\[[^]]*\]};
+	my $Lval	= qr{$Ident(?:$Member)*};
 
 	# Pre-scan the patch looking for any __setup documentation.
 	my @setup_docs = ();
@@ -509,7 +537,7 @@
 # if/while/etc brace do not go on next line, unless defining a do while loop,
 # or if that brace on the next line is for something else
 		if ($line =~ /\b(?:(if|while|for|switch)\s*\(|do\b|else\b)/ && $line !~ /^.#/) {
-			my @ctx = ctx_statement($linenr, $realcnt);
+			my @ctx = ctx_statement($linenr, $realcnt, 0);
 			my $ctx_ln = $linenr + $#ctx + 1;
 			my $ctx_cnt = $realcnt - $#ctx - 1;
 			my $ctx = join("\n", @ctx);
@@ -521,7 +549,7 @@
 			##warn "line<$line>\nctx<$ctx>\nnext<$lines[$ctx_ln - 1]>";
 
 			if ($ctx !~ /{\s*/ && $ctx_cnt > 0 && $lines[$ctx_ln - 1] =~ /^\+\s*{/) {
-				ERROR("That { should be on the previous line\n" .
+				ERROR("That open brace { should be on the previous line\n" .
 					"$here\n$ctx\n$lines[$ctx_ln - 1]");
 			}
 		}
@@ -535,6 +563,12 @@
 			next;
 		}
 
+# check for initialisation to aggregates open brace on the next line
+		if ($prevline =~ /$Declare\s*$Ident\s*=\s*$/ &&
+		    $line =~ /^.\s*{/) {
+			ERROR("That open brace { should be on the previous line\n" . $hereprev);
+		}
+
 #
 # Checks which are anchored on the added line.
 #
@@ -570,8 +604,13 @@
 			}
 		}
 
+# check for external initialisers.
+		if ($line =~ /^.$Type\s*$Ident\s*=\s*(0|NULL);/) {
+			ERROR("do not initialise externals to 0 or NULL\n" .
+				$herecurr);
+		}
 # check for static initialisers.
-		if ($line=~/\s*static\s.*=\s+(0|NULL);/) {
+		if ($line =~ /\s*static\s.*=\s*(0|NULL);/) {
 			ERROR("do not initialise statics to 0 or NULL\n" .
 				$herecurr);
 		}
@@ -593,11 +632,11 @@
 			ERROR("\"(foo $1 )\" should be \"(foo $1)\"\n" .
 				$herecurr);
 
-		} elsif ($line =~ m{$NonptrType(\*+)(?:\s+const)?\s+[A-Za-z\d_]+}) {
+		} elsif ($line =~ m{$NonptrType(\*+)(?:\s+$Attribute)?\s+[A-Za-z\d_]+}) {
 			ERROR("\"foo$1 bar\" should be \"foo $1bar\"\n" .
 				$herecurr);
 
-		} elsif ($line =~ m{$NonptrType\s+(\*+)(?!\s+const)\s+[A-Za-z\d_]+}) {
+		} elsif ($line =~ m{$NonptrType\s+(\*+)(?!\s+$Attribute)\s+[A-Za-z\d_]+}) {
 			ERROR("\"foo $1 bar\" should be \"foo $1bar\"\n" .
 				$herecurr);
 		}
@@ -614,7 +653,7 @@
 # to try and find and validate the current printk.  In summary the current
 # printk includes all preceeding printk's which have no newline on the end.
 # we assume the first bad printk is the one to report.
-		if ($line =~ /\bprintk\((?!KERN_)/) {
+		if ($line =~ /\bprintk\((?!KERN_)\s*"/) {
 			my $ok = 0;
 			for (my $ln = $linenr - 1; $ln >= $first_line; $ln--) {
 				#print "CHECK<$lines[$ln - 1]\n";
@@ -639,6 +678,12 @@
 			ERROR("open brace '{' following function declarations go on the next line\n" . $herecurr);
 		}
 
+# check for spaces between functions and their parentheses.
+		if ($line =~ /($Ident)\s+\(/ &&
+		    $1 !~ /^(?:if|for|while|switch|return|volatile)$/ &&
+		    $line !~ /$Type\s+\(/ && $line !~ /^.\#\s*define\b/) {
+			ERROR("no space between function name and open parenthesis '('\n" . $herecurr);
+		}
 # Check operator spacing.
 		# Note we expand the line with the leading + as the real
 		# line will be displayed with the leading + and the tabs
@@ -647,7 +692,7 @@
 		$opline = expand_tabs($opline);
 		$opline =~ s/^./ /;
 		if (!($line=~/\#\s*include/)) {
-			my @elements = split(/(<<=|>>=|<=|>=|==|!=|\+=|-=|\*=|\/=|%=|\^=|\|=|&=|->|<<|>>|<|>|=|!|~|&&|\|\||,|\^|\+\+|--|;|&|\||\+|-|\*|\/\/|\/)/, $opline);
+			my @elements = split(/(<<=|>>=|<=|>=|==|!=|\+=|-=|\*=|\/=|%=|\^=|\|=|&=|=>|->|<<|>>|<|>|=|!|~|&&|\|\||,|\^|\+\+|--|;|&|\||\+|-|\*|\/\/|\/)/, $opline);
 			my $off = 0;
 			for (my $n = 0; $n < $#elements; $n += 2) {
 				$off += length($elements[$n]);
@@ -773,6 +818,18 @@
 			}
 		}
 
+# check for multiple assignments
+		if ($line =~ /^.\s*$Lval\s*=\s*$Lval\s*=(?!=)/) {
+			WARN("multiple assignments should be avoided\n" . $herecurr);
+		}
+
+# check for multiple declarations, allowing for a function declaration
+# continuation.
+		if ($line =~ /^.\s*$Type\s+$Ident(?:\s*=[^,{]*)?\s*,\s*$Ident.*/ &&
+		    $line !~ /^.\s*$Type\s+$Ident(?:\s*=[^,{]*)?\s*,\s*$Type\s*$Ident.*/) {
+			WARN("declaring multiple variables together should be avoided\n" . $herecurr);
+		}
+
 #need space before brace following if, while, etc
 		if ($line =~ /\(.*\){/ || $line =~ /do{/) {
 			ERROR("need a space before the open brace '{'\n" . $herecurr);
@@ -847,13 +904,18 @@
 			# or the one below.
 			my $ln = $linenr;
 			my $cnt = $realcnt;
+			my $off = 0;
 
-			# If the macro starts on the define line start there.
-			if ($prevline !~ m{^.#\s*define\s*$Ident(?:\([^\)]*\))?\s*\\\s*$}) {
+			# If the macro starts on the define line start
+			# grabbing the statement after the identifier
+			$prevline =~ m{^(.#\s*define\s*$Ident(?:\([^\)]*\))?\s*)(.*)\\\s*$};
+			##print "1<$1> 2<$2>\n";
+			if ($2 ne '') {
+				$off = length($1);
 				$ln--;
 				$cnt++;
 			}
-			my @ctx = ctx_statement($ln, $cnt);
+			my @ctx = ctx_statement($ln, $cnt, $off);
 			my $ctx_ln = $ln + $#ctx + 1;
 			my $ctx = join("\n", @ctx);
 
@@ -873,6 +935,44 @@
 			}
 		}
 
+# check for redundant bracing round if etc
+		if ($line =~ /\b(if|while|for|else)\b/) {
+			# Locate the end of the opening statement.
+			my @control = ctx_statement($linenr, $realcnt, 0);
+			my $nr = $linenr + (scalar(@control) - 1);
+			my $cnt = $realcnt - (scalar(@control) - 1);
+
+			my $off = $realcnt - $cnt;
+			#print "$off: line<$line>end<" . $lines[$nr - 1] . ">\n";
+
+			# If this is is a braced statement group check it
+			if ($lines[$nr - 1] =~ /{\s*$/) {
+				my ($lvl, @block) = ctx_block_level($nr, $cnt);
+
+				my $stmt = join(' ', @block);
+				$stmt =~ s/^[^{]*{//;
+				$stmt =~ s/}[^}]*$//;
+
+				#print "block<" . join(' ', @block) . "><" . scalar(@block) . ">\n";
+				#print "stmt<$stmt>\n\n";
+
+				# Count the ;'s if there is fewer than two
+				# then there can only be one statement,
+				# if there is a brace inside we cannot
+				# trivially detect if its one statement.
+				# Also nested if's often require braces to
+				# disambiguate the else binding so shhh there.
+				my @semi = ($stmt =~ /;/g);
+				##print "semi<" . scalar(@semi) . ">\n";
+				if ($lvl == 0 && scalar(@semi) < 2 &&
+				    $stmt !~ /{/ && $stmt !~ /\bif\b/) {
+				    	my $herectx = "$here\n" . join("\n", @control, @block[1 .. $#block]) . "\n";
+				    	shift(@block);
+					ERROR("braces {} are not necessary for single statement blocks\n" . $herectx);
+				}
+			}
+		}
+
 # don't include deprecated include files (uses RAW line)
 		for my $inc (@dep_includes) {
 			if ($rawline =~ m@\#\s*include\s*\<$inc>@) {
@@ -898,6 +998,14 @@
 				$herecurr);
 		}
 
+# check for needless kfree() checks
+		if ($prevline =~ /\bif\s*\(([^\)]*)\)/) {
+			my $expr = $1;
+			if ($line =~ /\bkfree\(\Q$expr\E\);/) {
+				WARN("kfree(NULL) is safe this check is probabally not required\n" . $hereprev);
+			}
+		}
+
 # warn about #ifdefs in C files
 #		if ($line =~ /^.#\s*if(|n)def/ && ($realfile =~ /\.c$/)) {
 #			print "#ifdef in C files should be avoided\n";
@@ -952,6 +1060,9 @@
 		ERROR("Missing Signed-off-by: line(s)\n");
 	}
 
+	if ($clean == 0 && ($chk_patch || $is_patch)) {
+		print report_dump();
+	}
 	if ($clean == 1 && $quiet == 0) {
 		print "Your patch has no obvious style problems and is ready for submission.\n"
 	}
diff --git a/scripts/kallsyms.c b/scripts/kallsyms.c
index caf4c86..1f11d84 100644
--- a/scripts/kallsyms.c
+++ b/scripts/kallsyms.c
@@ -29,7 +29,7 @@
 #include <string.h>
 #include <ctype.h>
 
-#define KSYM_NAME_LEN		127
+#define KSYM_NAME_LEN		128
 
 
 struct sym_entry {
@@ -252,7 +252,7 @@
 	unsigned int i, k, off;
 	unsigned int best_idx[256];
 	unsigned int *markers;
-	char buf[KSYM_NAME_LEN+1];
+	char buf[KSYM_NAME_LEN];
 
 	printf("#include <asm/types.h>\n");
 	printf("#if BITS_PER_LONG == 64\n");
diff --git a/scripts/kernel-doc b/scripts/kernel-doc
index e5bf649..1f58351 100755
--- a/scripts/kernel-doc
+++ b/scripts/kernel-doc
@@ -154,6 +154,7 @@
 
 my $errors = 0;
 my $warnings = 0;
+my $anon_struct_union = 0;
 
 # match expressions used to find embedded type information
 my $type_constant = '\%([-_\w]+)';
@@ -403,7 +404,11 @@
 	    print $lineprefix, $blankline;
 	} else {
 	    $line =~ s/\\\\\\/\&/g;
-	    print $lineprefix, $line;
+	    if ($output_mode eq "man" && substr($line, 0, 1) eq ".") {
+		print "\\&$line";
+	    } else {
+		print $lineprefix, $line;
+	    }
 	}
 	print "\n";
     }
@@ -718,6 +723,7 @@
 	    # pointer-to-function
 	    print "  $1 $parameter) ($2);\n";
 	} elsif ($type =~ m/^(.*?)\s*(:.*)/) {
+	    # bitfield
 	    print "  $1 $parameter$2;\n";
 	} else {
 	    print "  ".$type." ".$parameter.";\n";
@@ -1260,6 +1266,7 @@
 	    # pointer-to-function
 	    print "\t$1 $parameter) ($2);\n";
 	} elsif ($type =~ m/^(.*?)\s*(:.*)/) {
+	    # bitfield
 	    print "\t$1 $parameter$2;\n";
 	} else {
 	    print "\t".$type." ".$parameter.";\n";
@@ -1510,8 +1517,13 @@
 	my $param = shift;
 	my $type = shift;
 	my $file = shift;
-	my $anon = 0;
 
+	if (($anon_struct_union == 1) && ($type eq "") &&
+	    ($param eq "}")) {
+		return;		# ignore the ending }; from anon. struct/union
+	}
+
+	$anon_struct_union = 0;
 	my $param_name = $param;
 	$param_name =~ s/\[.*//;
 
@@ -1530,16 +1542,16 @@
 	# handle unnamed (anonymous) union or struct:
 	{
 		$type = $param;
-		$param = "{unnamed_" . $param. "}";
+		$param = "{unnamed_" . $param . "}";
 		$parameterdescs{$param} = "anonymous\n";
-		$anon = 1;
+		$anon_struct_union = 1;
 	}
 
 	# warn if parameter has no description
 	# (but ignore ones starting with # as these are not parameters
 	# but inline preprocessor statements);
 	# also ignore unnamed structs/unions;
-	if (!$anon) {
+	if (!$anon_struct_union) {
 	if (!defined $parameterdescs{$param_name} && $param_name !~ /^#/) {
 
 	    $parameterdescs{$param_name} = $undescribed;
@@ -1691,6 +1703,8 @@
     my $x = shift;
     my $file = shift;
 
+    $x =~ s@\/\/.*$@@gos; # strip C99-style comments to end of line
+
     if ($x =~ m#\s*/\*\s+MACDOC\s*#io || ($x =~ /^#/ && $x !~ /^#define/)) {
 	# do nothing
     }
@@ -1713,6 +1727,8 @@
     $x =~ s@[\r\n]+@ @gos; # strip newlines/cr's.
     $x =~ s@^\s+@@gos; # strip leading spaces
     $x =~ s@\s+$@@gos; # strip trailing spaces
+    $x =~ s@\/\/.*$@@gos; # strip C99-style comments to end of line
+
     if ($x =~ /^#/) {
 	# To distinguish preprocessor directive from regular declaration later.
 	$x .= ";";
@@ -1796,7 +1812,7 @@
 
 		$state = 2;
 		if (/-(.*)/) {
-		    # strip leading/trailing/multiple spaces #RDD:T:
+		    # strip leading/trailing/multiple spaces
 		    $descr= $1;
 		    $descr =~ s/^\s*//;
 		    $descr =~ s/\s*$//;
diff --git a/security/commoncap.c b/security/commoncap.c
index 384379e..338606e 100644
--- a/security/commoncap.c
+++ b/security/commoncap.c
@@ -148,7 +148,7 @@
 
 	if (bprm->e_uid != current->uid || bprm->e_gid != current->gid ||
 	    !cap_issubset (new_permitted, current->cap_permitted)) {
-		current->mm->dumpable = suid_dumpable;
+		set_dumpable(current->mm, suid_dumpable);
 
 		if (unsafe & ~LSM_UNSAFE_PTRACE_CAP) {
 			if (!capable(CAP_SETUID)) {
diff --git a/security/dummy.c b/security/dummy.c
index d6a112c..19d813d 100644
--- a/security/dummy.c
+++ b/security/dummy.c
@@ -130,7 +130,7 @@
 static void dummy_bprm_apply_creds (struct linux_binprm *bprm, int unsafe)
 {
 	if (bprm->e_uid != current->uid || bprm->e_gid != current->gid) {
-		current->mm->dumpable = suid_dumpable;
+		set_dumpable(current->mm, suid_dumpable);
 
 		if ((unsafe & ~LSM_UNSAFE_PTRACE_CAP) && !capable(CAP_SETUID)) {
 			bprm->e_uid = current->uid;
diff --git a/security/keys/request_key.c b/security/keys/request_key.c
index f573ac1..5575001 100644
--- a/security/keys/request_key.c
+++ b/security/keys/request_key.c
@@ -108,7 +108,8 @@
 	argv[i] = NULL;
 
 	/* do it */
-	ret = call_usermodehelper_keys(argv[0], argv, envp, keyring, 1);
+	ret = call_usermodehelper_keys(argv[0], argv, envp, keyring,
+				       UMH_WAIT_PROC);
 
 error_link:
 	key_put(keyring);
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index 78c3f98..520b999 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -2318,7 +2318,7 @@
 	if (sbsec->behavior == SECURITY_FS_USE_MNTPOINT)
 		return -EOPNOTSUPP;
 
-	if ((current->fsuid != inode->i_uid) && !capable(CAP_FOWNER))
+	if (!is_owner_or_cap(inode))
 		return -EPERM;
 
 	AVC_AUDIT_DATA_INIT(&ad,FS);
diff --git a/sound/core/seq/seq_virmidi.c b/sound/core/seq/seq_virmidi.c
index 061a7c6..e11790f 100644
--- a/sound/core/seq/seq_virmidi.c
+++ b/sound/core/seq/seq_virmidi.c
@@ -363,7 +363,7 @@
 	if (rdev->client >= 0)
 		return 0;
 
-	pinfo = kmalloc(sizeof(*pinfo), GFP_KERNEL);
+	pinfo = kzalloc(sizeof(*pinfo), GFP_KERNEL);
 	if (!pinfo) {
 		err = -ENOMEM;
 		goto __error;
@@ -380,7 +380,6 @@
 	rdev->client = client;
 
 	/* create a port */
-	memset(pinfo, 0, sizeof(*pinfo));
 	pinfo->addr.client = client;
 	sprintf(pinfo->name, "VirMIDI %d-%d", rdev->card->number, rdev->device);
 	/* set all capabilities */
diff --git a/sound/core/sound.c b/sound/core/sound.c
index 70600df..8dc7a3b 100644
--- a/sound/core/sound.c
+++ b/sound/core/sound.c
@@ -446,8 +446,7 @@
 {
 	snd_info_minor_unregister();
 	snd_info_done();
-	if (unregister_chrdev(major, "alsa") != 0)
-		snd_printk(KERN_ERR "unable to unregister major device number %d\n", major);
+	unregister_chrdev(major, "alsa");
 }
 
 module_init(alsa_sound_init)
diff --git a/sound/oss/Kconfig b/sound/oss/Kconfig
index 3144779..866d4de 100644
--- a/sound/oss/Kconfig
+++ b/sound/oss/Kconfig
@@ -5,35 +5,6 @@
 #
 # Prompt user for primary drivers.
 
-config OSS_OBSOLETE
-	bool "Obsolete OSS drivers"
-	depends on SOUND_PRIME
-	help
-	  This option enables support for obsolete OSS drivers that
-	  are scheduled for removal in the near future.
-
-	  Please contact Adrian Bunk <bunk@stusta.de> if you had to
-	  say Y here because your hardware is not properly supported
-	  by ALSA.
-
-	  If unsure, say N.
-
-config SOUND_BT878
-	tristate "BT878 audio dma"
-	depends on SOUND_PRIME && PCI && OSS_OBSOLETE
-	---help---
-	  Audio DMA support for bt878 based grabber boards.  As you might have
-	  already noticed, bt878 is listed with two functions in /proc/pci.
-	  Function 0 does the video stuff (bt848 compatible), function 1 does
-	  the same for audio data.  This is a driver for the audio part of
-	  the chip.  If you say 'Y' here you get a oss-compatible dsp device
-	  where you can record from.  If you want just watch TV you probably
-	  don't need this driver as most TV cards handle sound with a short
-	  cable from the TV card to your sound card's line-in.
-
-	  To compile this driver as a module, choose M here: the module will
-	  be called btaudio.
-
 config SOUND_BCM_CS4297A
 	tristate "Crystal Sound CS4297a (for Swarm)"
 	depends on SOUND_PRIME && SIBYTE_SWARM
@@ -44,13 +15,6 @@
 	  note that CONFIG_KGDB should not be enabled at the same
 	  time, since it also attempts to use this UART port.
 
-config SOUND_ICH
-	tristate "Intel ICH (i8xx) audio support"
-	depends on SOUND_PRIME && PCI && OSS_OBSOLETE
-	help
-	  Support for integral audio in Intel's I/O Controller Hub (ICH)
-	  chipset, as used on the 810/820/840 motherboards.
-
 config SOUND_VWSND
 	tristate "SGI Visual Workstation Sound"
 	depends on SOUND_PRIME && X86_VISWS
@@ -346,26 +310,6 @@
 	  and Pinnacle). Larger values reduce the chance of data overruns at
 	  the expense of overall latency. If unsure, use the default.
 
-config SOUND_VIA82CXXX
-	tristate "VIA 82C686 Audio Codec"
-	depends on SOUND_PRIME && PCI && OSS_OBSOLETE && VIRT_TO_BUS
-	help
-	  Say Y here to include support for the audio codec found on VIA
-	  82Cxxx-based chips. Typically these are built into a motherboard.
-
-	  DO NOT select Sound Blaster or Adlib with this driver, unless
-	  you have a Sound Blaster or Adlib card in addition to your VIA
-	  audio chip.
-
-config MIDI_VIA82CXXX
-	bool "VIA 82C686 MIDI"
-	depends on SOUND_VIA82CXXX && ISA_DMA_API
-	help
-	  Answer Y to use the MIDI interface of the Via686. You may need to
-	  enable this in the BIOS before it will work. This is for connection
-	  to external MIDI hardware, and is not required for software playback
-	  of MIDI files.
-
 config SOUND_OSS
 	tristate "OSS sound modules"
 	depends on SOUND_PRIME && ISA_DMA_API && VIRT_TO_BUS
@@ -400,20 +344,6 @@
 
 	  Say Y unless you have 16MB or more RAM or a PCI sound card.
 
-config SOUND_CS4232
-	tristate "Crystal CS4232 based (PnP) cards"
-	depends on SOUND_OSS && OSS_OBSOLETE
-	help
-	  Say Y here if you have a card based on the Crystal CS4232 chip set,
-	  which uses its own Plug and Play protocol.
-
-	  If you compile the driver into the kernel, you have to add
-	  "cs4232=<io>,<irq>,<dma>,<dma2>,<mpuio>,<mpuirq>" to the kernel
-	  command line.
-
-	  See <file:Documentation/sound/oss/CS4232> for more information on
-	  configuring this card.
-
 config SOUND_SSCAPE
 	tristate "Ensoniq SoundScape support"
 	depends on SOUND_OSS
@@ -720,13 +650,6 @@
 	  Say Y here to include support for the Rockwell WaveArtist sound
 	  system.  This driver is mainly for the NetWinder.
 
-config SOUND_TVMIXER
-	tristate "TV card (bt848) mixer support"
-	depends on SOUND_PRIME && I2C && VIDEO_V4L1 && OSS_OBSOLETE
-	help
-	  Support for audio mixer facilities on the BT848 TV frame-grabber
-	  card.
-
 config SOUND_KAHLUA
 	tristate "XpressAudio Sound Blaster emulation"
 	depends on SOUND_SB
diff --git a/sound/oss/trident.c b/sound/oss/trident.c
index 3bc1f6e..96adc47 100644
--- a/sound/oss/trident.c
+++ b/sound/oss/trident.c
@@ -11,12 +11,12 @@
  *  Built from:
  *	Low level code: <audio@tridentmicro.com> from ALSA
  *	Framework: Thomas Sailer <sailer@ife.ee.ethz.ch>
- *	Extended by: Zach Brown <zab@redhat.com>  
+ *	Extended by: Zach Brown <zab@redhat.com>
  *
  *  Hacked up by:
  *	Aaron Holtzman <aholtzma@ess.engr.uvic.ca>
  *	Ollie Lho <ollie@sis.com.tw> SiS 7018 Audio Core Support
- *	Ching-Ling Lee <cling-li@ali.com.tw> ALi 5451 Audio Core Support 
+ *	Ching-Ling Lee <cling-li@ali.com.tw> ALi 5451 Audio Core Support
  *	Matt Wu <mattwu@acersoftech.com.cn> ALi 5451 Audio Core Support
  *	Peter Wächtler <pwaechtler@loewe-komp.de> CyberPro5050 support
  *      Muli Ben-Yehuda <mulix@mulix.org>
@@ -54,33 +54,33 @@
  *	adapt to new pci joystick attachment interface
  *  v0.14.10f
  *      July 24 2002 Muli Ben-Yehuda <mulix@actcom.co.il>
- *      patch from Eric Lemar (via Ian Soboroff): in suspend and resume, 
- *      fix wrong cast from pci_dev* to struct trident_card*. 
+ *      patch from Eric Lemar (via Ian Soboroff): in suspend and resume,
+ *      fix wrong cast from pci_dev* to struct trident_card*.
  *  v0.14.10e
  *      July 19 2002 Muli Ben-Yehuda <mulix@actcom.co.il>
- *      rewrite the DMA buffer allocation/deallcoation functions, to make it 
- *      modular and fix a bug where we would call free_pages on memory 
- *      obtained with pci_alloc_consistent. Also remove unnecessary #ifdef 
+ *      rewrite the DMA buffer allocation/deallcoation functions, to make it
+ *      modular and fix a bug where we would call free_pages on memory
+ *      obtained with pci_alloc_consistent. Also remove unnecessary #ifdef
  *      CONFIG_PROC_FS and various other cleanups.
  *  v0.14.10d
  *      July 19 2002 Muli Ben-Yehuda <mulix@actcom.co.il>
  *      made several printk(KERN_NOTICE...) into TRDBG(...), to avoid spamming
- *      my syslog with hundreds of messages. 
+ *      my syslog with hundreds of messages.
  *  v0.14.10c
  *      July 16 2002 Muli Ben-Yehuda <mulix@actcom.co.il>
  *      Cleaned up Lei Hu's 0.4.10 driver to conform to Documentation/CodingStyle
- *      and the coding style used in the rest of the file. 
+ *      and the coding style used in the rest of the file.
  *  v0.14.10b
  *      June 23 2002 Muli Ben-Yehuda <mulix@actcom.co.il>
- *      add a missing unlock_set_fmt, remove a superflous lock/unlock pair 
- *      with nothing in between. 
+ *      add a missing unlock_set_fmt, remove a superflous lock/unlock pair
+ *      with nothing in between.
  *  v0.14.10a
- *      June 21 2002 Muli Ben-Yehuda <mulix@actcom.co.il> 
- *      use a debug macro instead of #ifdef CONFIG_DEBUG, trim to 80 columns 
- *      per line, use 'do {} while (0)' in statement macros. 
+ *      June 21 2002 Muli Ben-Yehuda <mulix@actcom.co.il>
+ *      use a debug macro instead of #ifdef CONFIG_DEBUG, trim to 80 columns
+ *      per line, use 'do {} while (0)' in statement macros.
  *  v0.14.10
  *      June 6 2002 Lei Hu <Lei_hu@ali.com.tw>
- *      rewrite the part to read/write registers of audio codec for Ali5451 
+ *      rewrite the part to read/write registers of audio codec for Ali5451
  *  v0.14.9e
  *      January 2 2002 Vojtech Pavlik <vojtech@ucw.cz> added gameport
  *      support to avoid resource conflict with pcigame.c
@@ -111,7 +111,7 @@
  *	Set EBUF1 and EBUF2 to still mode
  *	Add dc97/ac97 reset function
  *	Fix power management: ali_restore_regs
- *  unreleased 
+ *  unreleased
  *	Mar 09 2001 Matt Wu
  *	Add cache for ac97 access
  *  v0.14.7
@@ -120,7 +120,7 @@
  *	Fix bug: an extra tail will be played when playing
  *	Jan 05 2001 Matt Wu
  *	Implement multi-channels and S/PDIF in support for ALi 1535+
- *  v0.14.6 
+ *  v0.14.6
  *	Nov 1 2000 Ching-Ling Lee
  *	Fix the bug of memory leak when switching 5.1-channels to 2 channels.
  *	Add lock protection into dynamic changing format of data.
@@ -138,7 +138,7 @@
  *  v0.14.3 May 10 2000 Ollie Lho
  *	fixed a small bug in trident_update_ptr, xmms 1.0.1 no longer uses 100% CPU
  *  v0.14.2 Mar 29 2000 Ching-Ling Lee
- *	Add clear to silence advance in trident_update_ptr 
+ *	Add clear to silence advance in trident_update_ptr
  *	fix invalid data of the end of the sound
  *  v0.14.1 Mar 24 2000 Ching-Ling Lee
  *	ALi 5451 support added, playback and recording O.K.
@@ -178,7 +178,7 @@
  *	SiS 7018 support added, playback O.K.
  *  v0.01 Alan Cox et. al.
  *	Initial Release in kernel 2.3.30, does not work
- * 
+ *
  *  ToDo
  *	Clean up of low level channel register access code. (done)
  *	Fix the bug on dma buffer management in update_ptr, read/write, drain_dac (done)
@@ -326,7 +326,7 @@
 
 		unsigned error;	/* number of over/underruns */
                 /* put process on wait queue when no more space in buffer */
-		wait_queue_head_t wait;	
+		wait_queue_head_t wait;
 
 		/* redundant, but makes calculations easier */
 		unsigned fragsize;
@@ -358,7 +358,7 @@
 struct trident_channel {
 	int num; /* channel number */
 	u32 lba; /* Loop Begine Address, where dma buffer starts */
-	u32 eso; /* End Sample Offset, wehre dma buffer ends */ 
+	u32 eso; /* End Sample Offset, wehre dma buffer ends */
 	         /* (in the unit of samples) */
 	u32 delta; /* delta value, sample rate / 48k for playback, */
 	           /* 48k/sample rate for recording */
@@ -417,7 +417,7 @@
 	/* soundcore stuff */
 	int dev_audio;
 
-	/* structures for abstraction of hardware facilities, codecs, */ 
+	/* structures for abstraction of hardware facilities, codecs, */
 	/* banks and channels */
 	struct ac97_codec *ac97_codec[NR_AC97];
 	struct trident_pcm_bank banks[NR_BANKS];
@@ -479,7 +479,7 @@
 static u16 trident_ac97_get(struct ac97_codec *codec, u8 reg);
 
 static int trident_open_mixdev(struct inode *inode, struct file *file);
-static int trident_ioctl_mixdev(struct inode *inode, struct file *file, 
+static int trident_ioctl_mixdev(struct inode *inode, struct file *file,
 				unsigned int cmd, unsigned long arg);
 
 static void ali_ac97_set(struct trident_card *card, int secondary, u8 reg, u16 val);
@@ -496,10 +496,10 @@
 static void ali_disable_special_channel(struct trident_card *card, int ch);
 static void ali_setup_spdif_out(struct trident_card *card, int flag);
 static int ali_write_5_1(struct trident_state *state,
-			 const char __user *buffer, 
-			 int cnt_for_multi_channel, unsigned int *copy_count, 
+			 const char __user *buffer,
+			 int cnt_for_multi_channel, unsigned int *copy_count,
 			 unsigned int *state_cnt);
-static int ali_allocate_other_states_resources(struct trident_state *state, 
+static int ali_allocate_other_states_resources(struct trident_state *state,
 					       int chan_nums);
 static void ali_free_other_states_resources(struct trident_state *state);
 
@@ -722,7 +722,7 @@
 	if (channel < 31 || channel > 63)
 		return;
 
-	if (card->pci_id == PCI_DEVICE_ID_TRIDENT_4DWAVE_DX || 
+	if (card->pci_id == PCI_DEVICE_ID_TRIDENT_4DWAVE_DX ||
 	    card->pci_id == PCI_DEVICE_ID_TRIDENT_4DWAVE_NX) {
 		b = inb(TRID_REG(card, T4D_REC_CH));
 		if ((b & ~0x80) == channel)
@@ -742,7 +742,7 @@
 	int idx;
 
 	/* The cyberpro 5050 has only 32 voices and one bank */
-	/* .. at least they are not documented (if you want to call that 
+	/* .. at least they are not documented (if you want to call that
 	 * crap documentation), perhaps broken ? */
 
 	bank = &card->banks[BANK_A];
@@ -802,7 +802,7 @@
 	/* enable, if it was disabled */
 	if ((portDat & CYBER_BMSK_AUENZ) != CYBER_BMSK_AUENZ_ENABLE) {
 		printk(KERN_INFO "cyberpro5050: enabling audio controller\n");
-		cyber_outidx(CYBER_PORT_AUDIO, CYBER_IDX_AUDIO_ENABLE, 
+		cyber_outidx(CYBER_PORT_AUDIO, CYBER_IDX_AUDIO_ENABLE,
 			     portDat | CYBER_BMSK_AUENZ_ENABLE);
 		/* check again if hardware is enabled now */
 		portDat = cyber_inidx(CYBER_PORT_AUDIO, CYBER_IDX_AUDIO_ENABLE);
@@ -811,7 +811,7 @@
 		printk(KERN_ERR "cyberpro5050: initAudioAccess: no success\n");
 		ret = -1;
 	} else {
-		cyber_outidx(CYBER_PORT_AUDIO, CYBER_IDX_IRQ_ENABLE, 
+		cyber_outidx(CYBER_PORT_AUDIO, CYBER_IDX_IRQ_ENABLE,
 			     CYBER_BMSK_AUDIO_INT_ENABLE);
 		cyber_outidx(CYBER_PORT_AUDIO, 0xbf, 0x01);
 		cyber_outidx(CYBER_PORT_AUDIO, 0xba, 0x20);
@@ -827,7 +827,7 @@
 /*  called with spin lock held */
 
 static int
-trident_load_channel_registers(struct trident_card *card, u32 * data, 
+trident_load_channel_registers(struct trident_card *card, u32 * data,
 			       unsigned int channel)
 {
 	int i;
@@ -845,7 +845,7 @@
 			continue;
 		outl(data[i], TRID_REG(card, CHANNEL_START + 4 * i));
 	}
-	if (card->pci_id == PCI_DEVICE_ID_ALI_5451 || 
+	if (card->pci_id == PCI_DEVICE_ID_ALI_5451 ||
 	    card->pci_id == PCI_DEVICE_ID_INTERG_5050) {
 		outl(ALI_EMOD_Still, TRID_REG(card, ALI_EBUF1));
 		outl(ALI_EMOD_Still, TRID_REG(card, ALI_EBUF2));
@@ -884,7 +884,7 @@
 		break;
 	case PCI_DEVICE_ID_TRIDENT_4DWAVE_NX:
 		data[0] = (channel->delta << 24);
-		data[2] = ((channel->delta << 16) & 0xff000000) | 
+		data[2] = ((channel->delta << 16) & 0xff000000) |
 			(channel->eso & 0x00ffffff);
 		data[3] = channel->fm_vol & 0xffff;
 		break;
@@ -989,13 +989,13 @@
 	if (state->card->pci_id != PCI_DEVICE_ID_SI_7018) {
 		channel->attribute = 0;
 		if (state->card->pci_id == PCI_DEVICE_ID_ALI_5451) {
-			if ((channel->num == ALI_SPDIF_IN_CHANNEL) || 
+			if ((channel->num == ALI_SPDIF_IN_CHANNEL) ||
 			    (channel->num == ALI_PCM_IN_CHANNEL))
 				ali_disable_special_channel(state->card, channel->num);
-			else if ((inl(TRID_REG(state->card, ALI_GLOBAL_CONTROL)) 
+			else if ((inl(TRID_REG(state->card, ALI_GLOBAL_CONTROL))
 				  & ALI_SPDIF_OUT_CH_ENABLE)
 				 && (channel->num == ALI_SPDIF_OUT_CHANNEL)) {
-				ali_set_spdif_out_rate(state->card, 
+				ali_set_spdif_out_rate(state->card,
 						       state->dmabuf.rate);
 				state->dmabuf.channel->delta = 0x1000;
 			}
@@ -1063,7 +1063,7 @@
 
 	channel->lba = dmabuf->dma_handle;
 	channel->delta = compute_rate_rec(dmabuf->rate);
-	if ((card->pci_id == PCI_DEVICE_ID_ALI_5451) && 
+	if ((card->pci_id == PCI_DEVICE_ID_ALI_5451) &&
 	    (channel->num == ALI_SPDIF_IN_CHANNEL)) {
 		rate = ali_get_spdif_in_rate(card);
 		if (rate == 0) {
@@ -1180,8 +1180,8 @@
 	unsigned long flags;
 
 	spin_lock_irqsave(&card->lock, flags);
-	if ((dmabuf->mapped || 
-	     dmabuf->count < (signed) dmabuf->dmasize) && 
+	if ((dmabuf->mapped ||
+	     dmabuf->count < (signed) dmabuf->dmasize) &&
 	    dmabuf->ready) {
 		dmabuf->enable |= ADC_RUNNING;
 		trident_enable_voice_irq(card, chan_num);
@@ -1261,7 +1261,7 @@
 	void *rawbuf = NULL;
 	struct page *page, *pend;
 
-	if (!(rawbuf = pci_alloc_consistent(pci_dev, PAGE_SIZE << order, 
+	if (!(rawbuf = pci_alloc_consistent(pci_dev, PAGE_SIZE << order,
 					    &dmabuf->dma_handle)))
 		return -ENOMEM;
 
@@ -1272,7 +1272,7 @@
 	dmabuf->rawbuf = rawbuf;
 	dmabuf->buforder = order;
 
-	/* now mark the pages as reserved; otherwise */ 
+	/* now mark the pages as reserved; otherwise */
 	/* remap_pfn_range doesn't do what we want */
 	pend = virt_to_page(rawbuf + (PAGE_SIZE << order) - 1);
 	for (page = virt_to_page(rawbuf); page <= pend; page++)
@@ -1310,7 +1310,7 @@
 		pend = virt_to_page(dmabuf->rawbuf + (PAGE_SIZE << dmabuf->buforder) - 1);
 		for (page = virt_to_page(dmabuf->rawbuf); page <= pend; page++)
 			ClearPageReserved(page);
-		pci_free_consistent(pci_dev, PAGE_SIZE << dmabuf->buforder, 
+		pci_free_consistent(pci_dev, PAGE_SIZE << dmabuf->buforder,
 				    dmabuf->rawbuf, dmabuf->dma_handle);
 		dmabuf->rawbuf = NULL;
 	}
@@ -1368,7 +1368,7 @@
 					dealloc_dmabuf(&state->dmabuf, state->card->pci_dev);
 					/* release the auxiliary DMA buffers */
 					for (i -= 2; i >= 0; i--)
-						dealloc_dmabuf(&state->other_states[i]->dmabuf, 
+						dealloc_dmabuf(&state->other_states[i]->dmabuf,
 							       state->card->pci_dev);
 					unlock_set_fmt(state);
 					return ret;
@@ -1398,7 +1398,7 @@
 		dmabuf->fragsamples = dmabuf->fragsize >> sample_shift[dmabuf->fmt];
 		dmabuf->dmasize = dmabuf->numfrag << dmabuf->fragshift;
 
-		memset(dmabuf->rawbuf, (dmabuf->fmt & TRIDENT_FMT_16BIT) ? 0 : 0x80, 
+		memset(dmabuf->rawbuf, (dmabuf->fmt & TRIDENT_FMT_16BIT) ? 0 : 0x80,
 		       dmabuf->dmasize);
 
 		spin_lock_irqsave(&s->card->lock, flags);
@@ -1453,7 +1453,7 @@
 	swptr = dmabuf->swptr;
 	spin_unlock_irqrestore(&state->card->lock, flags);
 
-	if (swptr == 0 || swptr == dmabuf->dmasize / 2 || 
+	if (swptr == 0 || swptr == dmabuf->dmasize / 2 ||
 	    swptr == dmabuf->dmasize)
 		return;
 
@@ -1511,7 +1511,7 @@
 
 		/* No matter how much data is left in the buffer, we have to wait until
 		   CSO == ESO/2 or CSO == ESO when address engine interrupts */
-		if (state->card->pci_id == PCI_DEVICE_ID_ALI_5451 || 
+		if (state->card->pci_id == PCI_DEVICE_ID_ALI_5451 ||
 		    state->card->pci_id == PCI_DEVICE_ID_INTERG_5050) {
 			diff = dmabuf->swptr - trident_get_dma_addr(state) + dmabuf->dmasize;
 			diff = diff % (dmabuf->dmasize);
@@ -1532,7 +1532,7 @@
 	return 0;
 }
 
-/* update buffer manangement pointers, especially, */ 
+/* update buffer manangement pointers, especially, */
 /* dmabuf->count and dmabuf->hwptr */
 static void
 trident_update_ptr(struct trident_state *state)
@@ -1559,11 +1559,11 @@
 		} else {
 			dmabuf->count += diff;
 
-			if (dmabuf->count < 0 || 
+			if (dmabuf->count < 0 ||
 			    dmabuf->count > dmabuf->dmasize) {
-				/* buffer underrun or buffer overrun, */ 
-				/* we have no way to recover it here, just */ 
-				/* stop the machine and let the process */ 
+				/* buffer underrun or buffer overrun, */
+				/* we have no way to recover it here, just */
+				/* stop the machine and let the process */
 				/* force hwptr and swptr to sync */
 				__stop_adc(state);
 				dmabuf->error++;
@@ -1582,7 +1582,7 @@
 		} else {
 			dmabuf->count -= diff;
 
-			if (dmabuf->count < 0 || 
+			if (dmabuf->count < 0 ||
 			    dmabuf->count > dmabuf->dmasize) {
 				/* buffer underrun or buffer overrun, we have no way to recover
 				   it here, just stop the machine and let the process force hwptr
@@ -1608,13 +1608,13 @@
 						if (state->chans_num == 6) {
 							clear_cnt = clear_cnt / 2;
 							swptr = swptr / 2;
-							memset(state->other_states[0]->dmabuf.rawbuf + swptr, 
+							memset(state->other_states[0]->dmabuf.rawbuf + swptr,
 							       silence, clear_cnt);
-							memset(state->other_states[1]->dmabuf.rawbuf + swptr, 
+							memset(state->other_states[1]->dmabuf.rawbuf + swptr,
 							       silence, clear_cnt);
-							memset(state->other_states[2]->dmabuf.rawbuf + swptr, 
+							memset(state->other_states[2]->dmabuf.rawbuf + swptr,
 							       silence, clear_cnt);
-							memset(state->other_states[3]->dmabuf.rawbuf + swptr, 
+							memset(state->other_states[3]->dmabuf.rawbuf + swptr,
 							       silence, clear_cnt);
 						}
 						dmabuf->endcleared = 1;
@@ -1627,13 +1627,13 @@
 					if (state->chans_num == 6) {
 						clear_cnt = clear_cnt / 2;
 						swptr = swptr / 2;
-						memset(state->other_states[0]->dmabuf.rawbuf + swptr, 
+						memset(state->other_states[0]->dmabuf.rawbuf + swptr,
 						       silence, clear_cnt);
-						memset(state->other_states[1]->dmabuf.rawbuf + swptr, 
+						memset(state->other_states[1]->dmabuf.rawbuf + swptr,
 						       silence, clear_cnt);
-						memset(state->other_states[2]->dmabuf.rawbuf + swptr, 
+						memset(state->other_states[2]->dmabuf.rawbuf + swptr,
 						       silence, clear_cnt);
-						memset(state->other_states[3]->dmabuf.rawbuf + swptr, 
+						memset(state->other_states[3]->dmabuf.rawbuf + swptr,
 						       silence, clear_cnt);
 					}
 					dmabuf->endcleared = 1;
@@ -1665,7 +1665,7 @@
 			if ((state = card->states[i]) != NULL) {
 				trident_update_ptr(state);
 			} else {
-				printk(KERN_WARNING "trident: spurious channel " 
+				printk(KERN_WARNING "trident: spurious channel "
 				       "irq %d.\n", channel);
 				trident_stop_voice(card, channel);
 				trident_disable_voice_irq(card, channel);
@@ -1694,7 +1694,7 @@
 
 	if (opt == 1) {		// MUTE
 		dwTemp ^= 0x8000;
-		ali_ac97_write(card->ac97_codec[0], 
+		ali_ac97_write(card->ac97_codec[0],
 			       0x02, dwTemp);
 	} else if (opt == 2) {	// Down
 		if (mute)
@@ -1706,7 +1706,7 @@
 		dwTemp &= 0xe0e0;
 		dwTemp |= (volume[0]) | (volume[1] << 8);
 		ali_ac97_write(card->ac97_codec[0], 0x02, dwTemp);
-		card->ac97_codec[0]->mixer_state[0] = ((32 - volume[0]) * 25 / 8) | 
+		card->ac97_codec[0]->mixer_state[0] = ((32 - volume[0]) * 25 / 8) |
 			(((32 - volume[1]) * 25 / 8) << 8);
 	} else if (opt == 4) {	// Up
 		if (mute)
@@ -1718,7 +1718,7 @@
 		dwTemp &= 0xe0e0;
 		dwTemp |= (volume[0]) | (volume[1] << 8);
 		ali_ac97_write(card->ac97_codec[0], 0x02, dwTemp);
-		card->ac97_codec[0]->mixer_state[0] = ((32 - volume[0]) * 25 / 8) | 
+		card->ac97_codec[0]->mixer_state[0] = ((32 - volume[0]) * 25 / 8) |
 			(((32 - volume[1]) * 25 / 8) << 8);
 	} else {
 		/* Nothing needs doing */
@@ -1801,7 +1801,7 @@
 			if ((state = card->states[i]) != NULL) {
 				trident_update_ptr(state);
 			} else {
-				printk(KERN_WARNING "cyber5050: spurious " 
+				printk(KERN_WARNING "cyber5050: spurious "
 				       "channel irq %d.\n", channel);
 				trident_stop_voice(card, channel);
 				trident_disable_voice_irq(card, channel);
@@ -1836,21 +1836,21 @@
 				ali_queue_task(card, gpio & 0x07);
 		}
 		event = inl(TRID_REG(card, T4D_MISCINT));
-		outl(event | (ST_TARGET_REACHED | MIXER_OVERFLOW | MIXER_UNDERFLOW), 
+		outl(event | (ST_TARGET_REACHED | MIXER_OVERFLOW | MIXER_UNDERFLOW),
 		     TRID_REG(card, T4D_MISCINT));
 		spin_unlock(&card->lock);
 		return IRQ_HANDLED;
 	}
 
 	/* manually clear interrupt status, bad hardware design, blame T^2 */
-	outl((ST_TARGET_REACHED | MIXER_OVERFLOW | MIXER_UNDERFLOW), 
+	outl((ST_TARGET_REACHED | MIXER_OVERFLOW | MIXER_UNDERFLOW),
 	     TRID_REG(card, T4D_MISCINT));
 	spin_unlock(&card->lock);
 	return IRQ_HANDLED;
 }
 
-/* in this loop, dmabuf.count signifies the amount of data that is waiting */ 
-/* to be copied to the user's buffer.  it is filled by the dma machine and */ 
+/* in this loop, dmabuf.count signifies the amount of data that is waiting */
+/* to be copied to the user's buffer.  it is filled by the dma machine and */
 /* drained by this loop. */
 static ssize_t
 trident_read(struct file *file, char __user *buffer, size_t count, loff_t * ppos)
@@ -1878,8 +1878,8 @@
 	while (count > 0) {
 		spin_lock_irqsave(&state->card->lock, flags);
 		if (dmabuf->count > (signed) dmabuf->dmasize) {
-			/* buffer overrun, we are recovering from */ 
-			/* sleep_on_timeout, resync hwptr and swptr, */ 
+			/* buffer overrun, we are recovering from */
+			/* sleep_on_timeout, resync hwptr and swptr, */
 			/* make process flush the buffer */
 			dmabuf->count = dmabuf->dmasize;
 			dmabuf->swptr = dmabuf->hwptr;
@@ -1894,7 +1894,7 @@
 			cnt = count;
 		if (cnt <= 0) {
 			unsigned long tmo;
-			/* buffer is empty, start the dma machine and */ 
+			/* buffer is empty, start the dma machine and */
 			/* wait for data to be recorded */
 			start_adc(state);
 			if (file->f_flags & O_NONBLOCK) {
@@ -1904,8 +1904,8 @@
 			}
 
 			mutex_unlock(&state->sem);
-			/* No matter how much space left in the buffer, */ 
-			/* we have to wait until CSO == ESO/2 or CSO == ESO */ 
+			/* No matter how much space left in the buffer, */
+			/* we have to wait until CSO == ESO/2 or CSO == ESO */
 			/* when address engine interrupts */
 			tmo = (dmabuf->dmasize * HZ) / (dmabuf->rate * 2);
 			tmo >>= sample_shift[dmabuf->fmt];
@@ -2005,7 +2005,7 @@
 	while (count > 0) {
 		spin_lock_irqsave(&state->card->lock, flags);
 		if (dmabuf->count < 0) {
-			/* buffer underrun, we are recovering from */ 
+			/* buffer underrun, we are recovering from */
 			/* sleep_on_timeout, resync hwptr and swptr */
 			dmabuf->count = 0;
 			dmabuf->swptr = dmabuf->hwptr;
@@ -2020,7 +2020,7 @@
 			cnt = count;
 		if (cnt <= 0) {
 			unsigned long tmo;
-			/* buffer is full, start the dma machine and */ 
+			/* buffer is full, start the dma machine and */
 			/* wait for data to be played */
 			start_dac(state);
 			if (file->f_flags & O_NONBLOCK) {
@@ -2028,8 +2028,8 @@
 					ret = -EAGAIN;
 				goto out;
 			}
-			/* No matter how much data left in the buffer, */ 
-			/* we have to wait until CSO == ESO/2 or CSO == ESO */ 
+			/* No matter how much data left in the buffer, */
+			/* we have to wait until CSO == ESO/2 or CSO == ESO */
 			/* when address engine interrupts */
 			lock_set_fmt(state);
 			tmo = (dmabuf->dmasize * HZ) / (dmabuf->rate * 2);
@@ -2037,15 +2037,15 @@
 			unlock_set_fmt(state);
 			mutex_unlock(&state->sem);
 
-			/* There are two situations when sleep_on_timeout */ 
-			/* returns, one is when the interrupt is serviced */ 
-			/* correctly and the process is waked up by ISR */ 
-			/* ON TIME. Another is when timeout is expired, which */ 
-			/* means that either interrupt is NOT serviced */ 
-			/* correctly (pending interrupt) or it is TOO LATE */ 
-			/* for the process to be scheduled to run */ 
-			/* (scheduler latency) which results in a (potential) */ 
-			/* buffer underrun. And worse, there is NOTHING we */ 
+			/* There are two situations when sleep_on_timeout */
+			/* returns, one is when the interrupt is serviced */
+			/* correctly and the process is waked up by ISR */
+			/* ON TIME. Another is when timeout is expired, which */
+			/* means that either interrupt is NOT serviced */
+			/* correctly (pending interrupt) or it is TOO LATE */
+			/* for the process to be scheduled to run */
+			/* (scheduler latency) which results in a (potential) */
+			/* buffer underrun. And worse, there is NOTHING we */
 			/* can do to prevent it. */
 			if (!interruptible_sleep_on_timeout(&dmabuf->wait, tmo)) {
 				pr_debug(KERN_ERR "trident: playback schedule "
@@ -2054,8 +2054,8 @@
 					 dmabuf->fragsize, dmabuf->count,
 					 dmabuf->hwptr, dmabuf->swptr);
 
-				/* a buffer underrun, we delay the recovery */ 
-				/* until next time the while loop begin and */ 
+				/* a buffer underrun, we delay the recovery */
+				/* until next time the while loop begin and */
 				/* we REALLY have data to play */
 			}
 			if (signal_pending(current)) {
@@ -2079,7 +2079,7 @@
 		if (state->chans_num == 6) {
 			copy_count = 0;
 			state_cnt = 0;
-			if (ali_write_5_1(state, buffer, cnt, &copy_count, 
+			if (ali_write_5_1(state, buffer, cnt, &copy_count,
 					  &state_cnt) == -EFAULT) {
 				if (state_cnt) {
 					swptr = (swptr + state_cnt) % dmabuf->dmasize;
@@ -2096,7 +2096,7 @@
 				goto out;
 			}
 		} else {
-			if (copy_from_user(dmabuf->rawbuf + swptr, 
+			if (copy_from_user(dmabuf->rawbuf + swptr,
 					   buffer, cnt)) {
 				if (!ret)
 					ret = -EFAULT;
@@ -2172,7 +2172,7 @@
 			if (dmabuf->count >= (signed) dmabuf->fragsize)
 				mask |= POLLOUT | POLLWRNORM;
 		} else {
-			if ((signed) dmabuf->dmasize >= dmabuf->count + 
+			if ((signed) dmabuf->dmasize >= dmabuf->count +
 			    (signed) dmabuf->fragsize)
 				mask |= POLLOUT | POLLWRNORM;
 		}
@@ -2227,7 +2227,7 @@
 }
 
 static int
-trident_ioctl(struct inode *inode, struct file *file, 
+trident_ioctl(struct inode *inode, struct file *file,
 	      unsigned int cmd, unsigned long arg)
 {
 	struct trident_state *state = (struct trident_state *)file->private_data;
@@ -2348,7 +2348,7 @@
 
 
 	case SNDCTL_DSP_GETFMTS: /* Returns a mask of supported sample format */
-		ret = put_user(AFMT_S16_LE | AFMT_U16_LE | AFMT_S8 | 
+		ret = put_user(AFMT_S16_LE | AFMT_U16_LE | AFMT_S8 |
 			       AFMT_U8, p);
 		break;
 
@@ -2379,7 +2379,7 @@
 			}
 		}
 		unlock_set_fmt(state);
-		ret = put_user((dmabuf->fmt & TRIDENT_FMT_16BIT) ? AFMT_S16_LE : 
+		ret = put_user((dmabuf->fmt & TRIDENT_FMT_16BIT) ? AFMT_S16_LE :
 			       AFMT_U8, p);
 		break;
 
@@ -2438,7 +2438,7 @@
 				stop_adc(state);
 				dmabuf->ready = 0;
 				if (val >= 2) {
-					if (!((file->f_mode & FMODE_WRITE) && 
+					if (!((file->f_mode & FMODE_WRITE) &&
 					      (val == 6)))
 						val = 2;
 					dmabuf->fmt |= TRIDENT_FMT_STEREO;
@@ -2504,7 +2504,7 @@
 		abinfo.fragstotal = dmabuf->numfrag;
 		abinfo.fragments = abinfo.bytes >> dmabuf->fragshift;
 		spin_unlock_irqrestore(&state->card->lock, flags);
-		ret = copy_to_user(argp, &abinfo, sizeof (abinfo)) ? 
+		ret = copy_to_user(argp, &abinfo, sizeof (abinfo)) ?
 			-EFAULT : 0;
 		break;
 
@@ -2524,7 +2524,7 @@
 		abinfo.fragstotal = dmabuf->numfrag;
 		abinfo.fragments = abinfo.bytes >> dmabuf->fragshift;
 		spin_unlock_irqrestore(&state->card->lock, flags);
-		ret = copy_to_user(argp, &abinfo, sizeof (abinfo)) ? 
+		ret = copy_to_user(argp, &abinfo, sizeof (abinfo)) ?
 			-EFAULT : 0;
 		break;
 
@@ -2533,7 +2533,7 @@
 		break;
 
 	case SNDCTL_DSP_GETCAPS:
-		ret = put_user(DSP_CAP_REALTIME | DSP_CAP_TRIGGER | 
+		ret = put_user(DSP_CAP_REALTIME | DSP_CAP_TRIGGER |
 			       DSP_CAP_MMAP | DSP_CAP_BIND, p);
 		break;
 
@@ -2553,7 +2553,7 @@
 		}
 		if (file->f_mode & FMODE_READ) {
 			if (val & PCM_ENABLE_INPUT) {
-				if (!dmabuf->ready && 
+				if (!dmabuf->ready &&
 				    (ret = prog_dmabuf_record(state)))
 					break;
 				start_adc(state);
@@ -2562,7 +2562,7 @@
 		}
 		if (file->f_mode & FMODE_WRITE) {
 			if (val & PCM_ENABLE_OUTPUT) {
-				if (!dmabuf->ready && 
+				if (!dmabuf->ready &&
 				    (ret = prog_dmabuf_playback(state)))
 					break;
 				start_dac(state);
@@ -2589,7 +2589,7 @@
 		if (dmabuf->mapped)
 			dmabuf->count &= dmabuf->fragsize - 1;
 		spin_unlock_irqrestore(&state->card->lock, flags);
-		ret = copy_to_user(argp, &cinfo, sizeof (cinfo)) ? 
+		ret = copy_to_user(argp, &cinfo, sizeof (cinfo)) ?
 			-EFAULT : 0;
 		break;
 
@@ -2612,7 +2612,7 @@
 		if (dmabuf->mapped)
 			dmabuf->count &= dmabuf->fragsize - 1;
 		spin_unlock_irqrestore(&state->card->lock, flags);
-		ret = copy_to_user(argp, &cinfo, sizeof (cinfo)) ? 
+		ret = copy_to_user(argp, &cinfo, sizeof (cinfo)) ?
 			-EFAULT : 0;
 		break;
 
@@ -2641,17 +2641,17 @@
 		break;
 
 	case SOUND_PCM_READ_CHANNELS:
-		ret = put_user((dmabuf->fmt & TRIDENT_FMT_STEREO) ? 2 : 1, 
+		ret = put_user((dmabuf->fmt & TRIDENT_FMT_STEREO) ? 2 : 1,
 			       p);
 		break;
 
 	case SOUND_PCM_READ_BITS:
-		ret = put_user((dmabuf->fmt & TRIDENT_FMT_16BIT) ? AFMT_S16_LE : 
+		ret = put_user((dmabuf->fmt & TRIDENT_FMT_16BIT) ? AFMT_S16_LE :
 			       AFMT_U8, p);
 		break;
 
 	case SNDCTL_DSP_GETCHANNELMASK:
-		ret = put_user(DSP_BIND_FRONT | DSP_BIND_SURR | 
+		ret = put_user(DSP_BIND_FRONT | DSP_BIND_SURR |
 			       DSP_BIND_CENTER_LFE,  p);
 		break;
 
@@ -2671,10 +2671,10 @@
 		} else {
 			dmabuf->ready = 0;
 			if (file->f_mode & FMODE_READ)
-				dmabuf->channel->attribute = (CHANNEL_REC | 
+				dmabuf->channel->attribute = (CHANNEL_REC |
 							      SRC_ENABLE);
 			if (file->f_mode & FMODE_WRITE)
-				dmabuf->channel->attribute = (CHANNEL_SPC_PB | 
+				dmabuf->channel->attribute = (CHANNEL_SPC_PB |
 							      SRC_ENABLE);
 			dmabuf->channel->attribute |= mask2attr[ffs(val)];
 		}
@@ -2702,6 +2702,7 @@
 	struct trident_card *card = devs;
 	struct trident_state *state = NULL;
 	struct dmabuf *dmabuf = NULL;
+	unsigned long flags;
 
 	/* Added by Matt Wu 01-05-2001 */
 	/* TODO: there's some redundacy here wrt the check below */
@@ -2765,8 +2766,8 @@
 	init_waitqueue_head(&dmabuf->wait);
 	file->private_data = state;
 
-	/* set default sample format. According to OSS Programmer's */ 
-	/* Guide  /dev/dsp should be default to unsigned 8-bits, mono, */ 
+	/* set default sample format. According to OSS Programmer's */
+	/* Guide  /dev/dsp should be default to unsigned 8-bits, mono, */
 	/* with sample rate 8kHz and /dev/dspW will accept 16-bits sample */
 	if (file->f_mode & FMODE_WRITE) {
 		dmabuf->fmt &= ~TRIDENT_FMT_MASK;
@@ -2779,11 +2780,13 @@
 			/* set default channel attribute to normal playback */
 			dmabuf->channel->attribute = CHANNEL_PB;
 		}
+		spin_lock_irqsave(&card->lock, flags);
 		trident_set_dac_rate(state, 8000);
+		spin_unlock_irqrestore(&card->lock, flags);
 	}
 
 	if (file->f_mode & FMODE_READ) {
-		/* FIXME: Trident 4d can only record in signed 16-bits stereo, */ 
+		/* FIXME: Trident 4d can only record in signed 16-bits stereo, */
 		/* 48kHz sample, to be dealed with in trident_set_adc_rate() ?? */
 		dmabuf->fmt &= ~TRIDENT_FMT_MASK;
 		if ((minor & 0x0f) == SND_DEV_DSP16)
@@ -2794,10 +2797,12 @@
 		if (card->pci_id == PCI_DEVICE_ID_SI_7018) {
 			/* set default channel attribute to 0x8a80, record from
 			   PCM L/R FIFO and mono = (left + right + 1)/2 */
-			dmabuf->channel->attribute = (CHANNEL_REC | PCM_LR | 
+			dmabuf->channel->attribute = (CHANNEL_REC | PCM_LR |
 						      MONO_MIX);
 		}
+		spin_lock_irqsave(&card->lock, flags);
 		trident_set_adc_rate(state, 8000);
+		spin_unlock_irqrestore(&card->lock, flags);
 
 		/* Added by Matt Wu 01-05-2001 */
 		if (card->pci_id == PCI_DEVICE_ID_ALI_5451)
@@ -3020,7 +3025,7 @@
 			break;
 		if (wsemabits == 0) {
 		      unlock:
-			outl(((u32) (wcontrol & 0x1eff) | 0x00004000), 
+			outl(((u32) (wcontrol & 0x1eff) | 0x00004000),
 			     TRID_REG(card, ALI_AC97_WRITE));
 			continue;
 		}
@@ -3104,7 +3109,7 @@
 	ncount = 10;
 
 	while (1) {
-		if ((inw(TRID_REG(card, ALI_AC97_WRITE)) & ALI_AC97_BUSY_READ) 
+		if ((inw(TRID_REG(card, ALI_AC97_WRITE)) & ALI_AC97_BUSY_READ)
 		    != 0)
 			break;
 		if (ncount <= 0)
@@ -3112,7 +3117,7 @@
 		if (ncount-- == 1) {
 			pr_debug("ali_ac97_read :try clear busy flag\n");
 			aud_reg = inl(TRID_REG(card, ALI_AC97_WRITE));
-			outl((aud_reg & 0xffff7fff), 
+			outl((aud_reg & 0xffff7fff),
 			     TRID_REG(card, ALI_AC97_WRITE));
 		}
 		udelay(10);
@@ -3159,7 +3164,7 @@
 
 	wcontrol = inw(TRID_REG(card, ALI_AC97_WRITE));
 	wcontrol &= 0xff00;
-	wcontrol |= (0x8100 | reg); /* bit 8=1: (ali1535 )reserved/ */ 
+	wcontrol |= (0x8100 | reg); /* bit 8=1: (ali1535 )reserved/ */
 	                            /* ali1535+ write */
 	outl((data | wcontrol), TRID_REG(card, ALI_AC97_WRITE));
 
@@ -3177,7 +3182,7 @@
 			break;
 		if (ncount-- == 1) {
 			pr_debug("ali_ac97_set :try clear busy flag!!\n");
-			outw(wcontrol & 0x7fff, 
+			outw(wcontrol & 0x7fff,
 			     TRID_REG(card, ALI_AC97_WRITE));
 		}
 		udelay(10);
@@ -3382,7 +3387,7 @@
 	bval |= 0x1F;
 	outb(bval, TRID_REG(card, ALI_SPDIF_CTRL + 1));
 
-	while (((R1 < 0x0B) || (R1 > 0x0E)) && (R1 != 0x12) && 
+	while (((R1 < 0x0B) || (R1 > 0x0E)) && (R1 != 0x12) &&
 	       count <= 50000) {
 		count++;
 
@@ -3669,14 +3674,14 @@
 	spin_lock_irqsave(&card->lock, flags);
 
 	ali_registers.global_regs[0x2c] = inl(TRID_REG(card, T4D_MISCINT));
-	//ali_registers.global_regs[0x20] = inl(TRID_REG(card,T4D_START_A));    
+	//ali_registers.global_regs[0x20] = inl(TRID_REG(card,T4D_START_A));
 	ali_registers.global_regs[0x21] = inl(TRID_REG(card, T4D_STOP_A));
 
 	//disable all IRQ bits
 	outl(ALI_DISABLE_ALL_IRQ, TRID_REG(card, T4D_MISCINT));
 
 	for (i = 1; i < ALI_MIXER_REGS; i++)
-		ali_registers.mixer_regs[i] = ali_ac97_read(card->ac97_codec[0], 
+		ali_registers.mixer_regs[i] = ali_ac97_read(card->ac97_codec[0],
 							    i * 2);
 
 	for (i = 0; i < ALI_GLOBAL_REGS; i++) {
@@ -3688,7 +3693,7 @@
 	for (i = 0; i < ALI_CHANNELS; i++) {
 		outb(i, TRID_REG(card, T4D_LFO_GC_CIR));
 		for (j = 0; j < ALI_CHANNEL_REGS; j++)
-			ali_registers.channel_regs[i][j] = inl(TRID_REG(card, 
+			ali_registers.channel_regs[i][j] = inl(TRID_REG(card,
 									j * 4 + 0xe0));
 	}
 
@@ -3707,18 +3712,18 @@
 	spin_lock_irqsave(&card->lock, flags);
 
 	for (i = 1; i < ALI_MIXER_REGS; i++)
-		ali_ac97_write(card->ac97_codec[0], i * 2, 
+		ali_ac97_write(card->ac97_codec[0], i * 2,
 			       ali_registers.mixer_regs[i]);
 
 	for (i = 0; i < ALI_CHANNELS; i++) {
 		outb(i, TRID_REG(card, T4D_LFO_GC_CIR));
 		for (j = 0; j < ALI_CHANNEL_REGS; j++)
-			outl(ali_registers.channel_regs[i][j], 
+			outl(ali_registers.channel_regs[i][j],
 			     TRID_REG(card, j * 4 + 0xe0));
 	}
 
 	for (i = 0; i < ALI_GLOBAL_REGS; i++) {
-		if ((i * 4 == T4D_MISCINT) || (i * 4 == T4D_STOP_A) || 
+		if ((i * 4 == T4D_MISCINT) || (i * 4 == T4D_STOP_A) ||
 		    (i * 4 == T4D_START_A))
 			continue;
 		outl(ali_registers.global_regs[i], TRID_REG(card, i * 4));
@@ -3763,7 +3768,7 @@
 
 	bank = &card->banks[BANK_A];
 
-	if (inl(TRID_REG(card, ALI_GLOBAL_CONTROL)) & 
+	if (inl(TRID_REG(card, ALI_GLOBAL_CONTROL)) &
 	    (ALI_SPDIF_OUT_CH_ENABLE)) {
 		idx = ALI_SPDIF_OUT_CHANNEL;
 		if (!(bank->bitmap & (1 << idx))) {
@@ -3774,7 +3779,7 @@
 		}
 	}
 
-	for (idx = ALI_PCM_OUT_CHANNEL_FIRST; idx <= ALI_PCM_OUT_CHANNEL_LAST; 
+	for (idx = ALI_PCM_OUT_CHANNEL_FIRST; idx <= ALI_PCM_OUT_CHANNEL_LAST;
 	     idx++) {
 		if (!(bank->bitmap & (1 << idx))) {
 			struct trident_channel *channel = &bank->channels[idx];
@@ -3785,9 +3790,9 @@
 	}
 
 	/* no more free channels avaliable */
-#if 0 
+#if 0
 	printk(KERN_ERR "ali: no more channels available on Bank A.\n");
-#endif /* 0 */ 
+#endif /* 0 */
 	return NULL;
 }
 
@@ -3812,9 +3817,9 @@
 	}
 
 	/* no free recordable channels avaliable */
-#if 0 
+#if 0
 	printk(KERN_ERR "ali: no recordable channels available on Bank A.\n");
-#endif /* 0 */ 
+#endif /* 0 */
 	return NULL;
 }
 
@@ -3837,14 +3842,14 @@
 		break;
 	}
 
-	/* select spdif_out */ 
+	/* select spdif_out */
 	ch_st_sel = inb(TRID_REG(card, ALI_SPDIF_CTRL)) & ALI_SPDIF_OUT_CH_STATUS;
 
-	ch_st_sel |= 0x80;	/* select right */ 
+	ch_st_sel |= 0x80;	/* select right */
 	outb(ch_st_sel, TRID_REG(card, ALI_SPDIF_CTRL));
 	outb(status_rate | 0x20, TRID_REG(card, ALI_SPDIF_CS + 2));
 
-	ch_st_sel &= (~0x80);	/* select left */ 
+	ch_st_sel &= (~0x80);	/* select left */
 	outb(ch_st_sel, TRID_REG(card, ALI_SPDIF_CTRL));
 	outw(status_rate | 0x10, TRID_REG(card, ALI_SPDIF_CS + 2));
 }
@@ -3881,14 +3886,14 @@
 	}
 }
 
-/* Updating the values of counters of other_states' DMAs without lock 
+/* Updating the values of counters of other_states' DMAs without lock
 protection is no harm because all DMAs of multi-channels and interrupt
 depend on a master state's DMA, and changing the counters of the master
 state DMA is protected by a spinlock.
 */
 static int
-ali_write_5_1(struct trident_state *state, const char __user *buf, 
-	      int cnt_for_multi_channel, unsigned int *copy_count, 
+ali_write_5_1(struct trident_state *state, const char __user *buf,
+	      int cnt_for_multi_channel, unsigned int *copy_count,
 	      unsigned int *state_cnt)
 {
 
@@ -3904,10 +3909,10 @@
 
 	if ((i = state->multi_channels_adjust_count) > 0) {
 		if (i == 1) {
-			if (copy_from_user(dmabuf->rawbuf + swptr, 
+			if (copy_from_user(dmabuf->rawbuf + swptr,
 					   buffer, sample_s))
 				return -EFAULT;
-			seek_offset(swptr, buffer, cnt_for_multi_channel, 
+			seek_offset(swptr, buffer, cnt_for_multi_channel,
 				    sample_s, *copy_count);
 			i--;
 			(*state_cnt) += sample_s;
@@ -3916,10 +3921,10 @@
 			i = i - (state->chans_num - other_dma_nums);
 		for (; (i < other_dma_nums) && (cnt_for_multi_channel > 0); i++) {
 			dmabuf_temp = &state->other_states[i]->dmabuf;
-			if (copy_from_user(dmabuf_temp->rawbuf + dmabuf_temp->swptr, 
+			if (copy_from_user(dmabuf_temp->rawbuf + dmabuf_temp->swptr,
 					   buffer, sample_s))
 				return -EFAULT;
-			seek_offset(dmabuf_temp->swptr, buffer, cnt_for_multi_channel, 
+			seek_offset(dmabuf_temp->swptr, buffer, cnt_for_multi_channel,
 				    sample_s, *copy_count);
 		}
 		if (cnt_for_multi_channel == 0)
@@ -3928,39 +3933,39 @@
 	if (cnt_for_multi_channel > 0) {
 		loop = cnt_for_multi_channel / (state->chans_num * sample_s);
 		for (i = 0; i < loop; i++) {
-			if (copy_from_user(dmabuf->rawbuf + swptr, buffer, 
+			if (copy_from_user(dmabuf->rawbuf + swptr, buffer,
 					   sample_s * 2))
 				return -EFAULT;
-			seek_offset(swptr, buffer, cnt_for_multi_channel, 
+			seek_offset(swptr, buffer, cnt_for_multi_channel,
 				    sample_s * 2, *copy_count);
 			(*state_cnt) += (sample_s * 2);
 
 			dmabuf_temp = &state->other_states[0]->dmabuf;
-			if (copy_from_user(dmabuf_temp->rawbuf + dmabuf_temp->swptr, 
+			if (copy_from_user(dmabuf_temp->rawbuf + dmabuf_temp->swptr,
 					   buffer, sample_s))
 				return -EFAULT;
-			seek_offset(dmabuf_temp->swptr, buffer, cnt_for_multi_channel, 
+			seek_offset(dmabuf_temp->swptr, buffer, cnt_for_multi_channel,
 				    sample_s, *copy_count);
 
 			dmabuf_temp = &state->other_states[1]->dmabuf;
-			if (copy_from_user(dmabuf_temp->rawbuf + dmabuf_temp->swptr, 
+			if (copy_from_user(dmabuf_temp->rawbuf + dmabuf_temp->swptr,
 					   buffer, sample_s))
 				return -EFAULT;
-			seek_offset(dmabuf_temp->swptr, buffer, cnt_for_multi_channel, 
+			seek_offset(dmabuf_temp->swptr, buffer, cnt_for_multi_channel,
 				    sample_s, *copy_count);
 
 			dmabuf_temp = &state->other_states[2]->dmabuf;
-			if (copy_from_user(dmabuf_temp->rawbuf + dmabuf_temp->swptr, 
+			if (copy_from_user(dmabuf_temp->rawbuf + dmabuf_temp->swptr,
 					   buffer, sample_s))
 				return -EFAULT;
-			seek_offset(dmabuf_temp->swptr, buffer, cnt_for_multi_channel, 
+			seek_offset(dmabuf_temp->swptr, buffer, cnt_for_multi_channel,
 				    sample_s, *copy_count);
 
 			dmabuf_temp = &state->other_states[3]->dmabuf;
-			if (copy_from_user(dmabuf_temp->rawbuf + dmabuf_temp->swptr, 
+			if (copy_from_user(dmabuf_temp->rawbuf + dmabuf_temp->swptr,
 					   buffer, sample_s))
 				return -EFAULT;
-			seek_offset(dmabuf_temp->swptr, buffer, cnt_for_multi_channel, 
+			seek_offset(dmabuf_temp->swptr, buffer, cnt_for_multi_channel,
 				    sample_s, *copy_count);
 		}
 
@@ -3969,15 +3974,15 @@
 
 			if (copy_from_user(dmabuf->rawbuf + swptr, buffer, sample_s))
 				return -EFAULT;
-			seek_offset(swptr, buffer, cnt_for_multi_channel, 
+			seek_offset(swptr, buffer, cnt_for_multi_channel,
 				    sample_s, *copy_count);
 			(*state_cnt) += sample_s;
 
 			if (cnt_for_multi_channel > 0) {
-				if (copy_from_user(dmabuf->rawbuf + swptr, 
+				if (copy_from_user(dmabuf->rawbuf + swptr,
 						   buffer, sample_s))
 					return -EFAULT;
-				seek_offset(swptr, buffer, cnt_for_multi_channel, 
+				seek_offset(swptr, buffer, cnt_for_multi_channel,
 					    sample_s, *copy_count);
 				(*state_cnt) += sample_s;
 
@@ -3986,12 +3991,12 @@
 					loop = state->multi_channels_adjust_count - diff;
 					for (i = 0; i < loop; i++) {
 						dmabuf_temp = &state->other_states[i]->dmabuf;
-						if (copy_from_user(dmabuf_temp->rawbuf + 
-								   dmabuf_temp->swptr, 
+						if (copy_from_user(dmabuf_temp->rawbuf +
+								   dmabuf_temp->swptr,
 								   buffer, sample_s))
 							return -EFAULT;
-						seek_offset(dmabuf_temp->swptr, buffer, 
-							    cnt_for_multi_channel, 
+						seek_offset(dmabuf_temp->swptr, buffer,
+							    cnt_for_multi_channel,
 							    sample_s, *copy_count);
 					}
 				}
@@ -4048,11 +4053,11 @@
 		ali_disable_special_channel(card, ALI_SPDIF_OUT_CHANNEL);
 		break;
 	case '1':
-		ali_setup_spdif_out(card, ALI_SPDIF_OUT_TO_SPDIF_OUT | 
+		ali_setup_spdif_out(card, ALI_SPDIF_OUT_TO_SPDIF_OUT |
 				    ALI_SPDIF_OUT_PCM);
 		break;
 	case '2':
-		ali_setup_spdif_out(card, ALI_SPDIF_OUT_TO_SPDIF_OUT | 
+		ali_setup_spdif_out(card, ALI_SPDIF_OUT_TO_SPDIF_OUT |
 				    ALI_SPDIF_OUT_NON_PCM);
 		break;
 	case '3':
@@ -4077,7 +4082,7 @@
 
 	for (card = devs; card != NULL; card = card->next)
 		for (i = 0; i < NR_AC97; i++)
-			if (card->ac97_codec[i] != NULL && 
+			if (card->ac97_codec[i] != NULL &&
 			    card->ac97_codec[i]->dev_mixer == minor)
 				goto match;
 
@@ -4091,7 +4096,7 @@
 }
 
 static int
-trident_ioctl_mixdev(struct inode *inode, struct file *file, unsigned int cmd, 
+trident_ioctl_mixdev(struct inode *inode, struct file *file, unsigned int cmd,
 		     unsigned long arg)
 {
 	struct ac97_codec *codec = (struct ac97_codec *) file->private_data;
@@ -4185,9 +4190,9 @@
 		/* disable AC97 GPIO interrupt */
 		outl(0x00, TRID_REG(card, SI_AC97_GPIO));
 		/* when power up the AC link is in cold reset mode so stop it */
-		outl(PCMOUT | SURROUT | CENTEROUT | LFEOUT | SECONDARY_ID, 
+		outl(PCMOUT | SURROUT | CENTEROUT | LFEOUT | SECONDARY_ID,
 		     TRID_REG(card, SI_SERIAL_INTF_CTRL));
-		/* it take a long time to recover from a cold reset */ 
+		/* it take a long time to recover from a cold reset */
 		/* (especially when you have more than one codec) */
 		udelay(2000);
 		ready_2nd = inl(TRID_REG(card, SI_SERIAL_INTF_CTRL));
@@ -4207,9 +4212,9 @@
 		/* disable AC97 GPIO interrupt */
 		outl(0x00, TRID_REG(card, SI_AC97_GPIO));
 		/* when power up, the AC link is in cold reset mode, so stop it */
-		outl(PCMOUT | SURROUT | CENTEROUT | LFEOUT, 
+		outl(PCMOUT | SURROUT | CENTEROUT | LFEOUT,
 		     TRID_REG(card, SI_SERIAL_INTF_CTRL));
-		/* it take a long time to recover from a cold reset (especially */ 
+		/* it take a long time to recover from a cold reset (especially */
 		/* when you have more than one codec) */
 		udelay(2000);
 		ready_2nd = inl(TRID_REG(card, SI_SERIAL_INTF_CTRL));
@@ -4221,7 +4226,7 @@
 		if ((codec = ac97_alloc_codec()) == NULL)
 			return -ENOMEM;
 
-		/* initialize some basic codec information, other fields */ 
+		/* initialize some basic codec information, other fields */
 		/* will be filled in ac97_probe_codec */
 		codec->private_data = card;
 		codec->id = num_ac97;
@@ -4352,8 +4357,8 @@
 static inline void trident_unregister_gameport(struct trident_card *card) { }
 #endif /* SUPPORT_JOYSTICK */
 
-/* install the driver, we do not allocate hardware channel nor DMA buffer */ 
-/* now, they are defered until "ACCESS" time (in prog_dmabuf called by */ 
+/* install the driver, we do not allocate hardware channel nor DMA buffer */
+/* now, they are defered until "ACCESS" time (in prog_dmabuf called by */
 /* open/read/write/ioctl/mmap) */
 static int __devinit
 trident_probe(struct pci_dev *pci_dev, const struct pci_device_id *pci_id)
@@ -4376,9 +4381,9 @@
 	else
 		dma_mask = TRIDENT_DMA_MASK;
 	if (pci_set_dma_mask(pci_dev, dma_mask)) {
-		printk(KERN_ERR "trident: architecture does not support" 
-		       " %s PCI busmaster DMA\n", 
-		       pci_dev->device == PCI_DEVICE_ID_ALI_5451 ? 
+		printk(KERN_ERR "trident: architecture does not support"
+		       " %s PCI busmaster DMA\n",
+		       pci_dev->device == PCI_DEVICE_ID_ALI_5451 ?
 		       "32-bit" : "30-bit");
 		goto out;
 	}
@@ -4422,7 +4427,7 @@
 
 	pci_set_master(pci_dev);
 
-	printk(KERN_INFO "trident: %s found at IO 0x%04lx, IRQ %d\n", 
+	printk(KERN_INFO "trident: %s found at IO 0x%04lx, IRQ %d\n",
 	       card_names[pci_id->driver_data], card->iobase, card->irq);
 
 	if (card->pci_id == PCI_DEVICE_ID_ALI_5451) {
@@ -4449,9 +4454,9 @@
 
 		/* Add H/W Volume Control By Matt Wu Jul. 06, 2001 */
 		card->hwvolctl = 0;
-		pci_dev_m1533 = pci_find_device(PCI_VENDOR_ID_AL, 
-						PCI_DEVICE_ID_AL_M1533, 
-						pci_dev_m1533);
+		pci_dev_m1533 = pci_get_device(PCI_VENDOR_ID_AL,
+					       PCI_DEVICE_ID_AL_M1533,
+					       pci_dev_m1533);
 		rc = -ENODEV;
 		if (pci_dev_m1533 == NULL)
 			goto out_proc_fs;
@@ -4465,6 +4470,8 @@
 			bits &= 0xbf;	/*clear bit 6 */
 			pci_write_config_byte(pci_dev_m1533, 0x7b, bits);
 		}
+		pci_dev_put(pci_dev_m1533);
+
 	} else if (card->pci_id == PCI_DEVICE_ID_INTERG_5050) {
 		card->alloc_pcm_channel = cyber_alloc_pcm_channel;
 		card->alloc_rec_pcm_channel = cyber_alloc_pcm_channel;
@@ -4482,7 +4489,7 @@
 	rc = -ENODEV;
 	if (request_irq(card->irq, &trident_interrupt, IRQF_SHARED,
 			card_names[pci_id->driver_data], card)) {
-		printk(KERN_ERR "trident: unable to allocate irq %d\n", 
+		printk(KERN_ERR "trident: unable to allocate irq %d\n",
 		       card->irq);
 		goto out_proc_fs;
 	}
@@ -4533,7 +4540,7 @@
 				printk(KERN_INFO "trident: Running on Alpha system "
 				       "type Nautilus\n");
 				ac97_data = ali_ac97_get(card, 0, AC97_POWER_CONTROL);
-				ali_ac97_set(card, 0, AC97_POWER_CONTROL, 
+				ali_ac97_set(card, 0, AC97_POWER_CONTROL,
 					     ac97_data | ALI_EAPD_POWER_DOWN);
 			}
 		}
@@ -4566,7 +4573,7 @@
 	devs = NULL;
 out_release_region:
 	release_region(iobase, 256);
-	return rc; 
+	return rc;
 }
 
 static void __devexit
@@ -4634,8 +4641,8 @@
 static int __init
 trident_init_module(void)
 {
-	printk(KERN_INFO "Trident 4DWave/SiS 7018/ALi 5451,Tvia CyberPro " 
-	       "5050 PCI Audio, version " DRIVER_VERSION ", " __TIME__ " " 
+	printk(KERN_INFO "Trident 4DWave/SiS 7018/ALi 5451,Tvia CyberPro "
+	       "5050 PCI Audio, version " DRIVER_VERSION ", " __TIME__ " "
 	       __DATE__ "\n");
 
 	return pci_register_driver(&trident_pci_driver);
diff --git a/sound/pci/mixart/mixart_hwdep.c b/sound/pci/mixart/mixart_hwdep.c
index 1d9232d..170781a 100644
--- a/sound/pci/mixart/mixart_hwdep.c
+++ b/sound/pci/mixart/mixart_hwdep.c
@@ -24,6 +24,7 @@
 #include <linux/interrupt.h>
 #include <linux/pci.h>
 #include <linux/firmware.h>
+#include <linux/vmalloc.h>
 #include <asm/io.h>
 #include <sound/core.h>
 #include "mixart.h"